{ // 获取包含Hugging Face文本的span元素 const spans = link.querySelectorAll('span.whitespace-nowrap, span.hidden.whitespace-nowrap'); spans.forEach(span => { if (span.textContent && span.textContent.trim().match(/Hugging\s*Face/i)) { span.textContent = 'AI快站'; } }); }); // 替换logo图片的alt属性 document.querySelectorAll('img[alt*="Hugging"], img[alt*="Face"]').forEach(img => { if (img.alt.match(/Hugging\s*Face/i)) { img.alt = 'AI快站 logo'; } }); } // 替换导航栏中的链接 function replaceNavigationLinks() { // 已替换标记,防止重复运行 if (window._navLinksReplaced) { return; } // 已经替换过的链接集合,防止重复替换 const replacedLinks = new Set(); // 只在导航栏区域查找和替换链接 const headerArea = document.querySelector('header') || document.querySelector('nav'); if (!headerArea) { return; } // 在导航区域内查找链接 const navLinks = headerArea.querySelectorAll('a'); navLinks.forEach(link => { // 如果已经替换过,跳过 if (replacedLinks.has(link)) return; const linkText = link.textContent.trim(); const linkHref = link.getAttribute('href') || ''; // 替换Spaces链接 - 仅替换一次 if ( (linkHref.includes('/spaces') || linkHref === '/spaces' || linkText === 'Spaces' || linkText.match(/^s*Spacess*$/i)) && linkText !== 'OCR模型免费转Markdown' && linkText !== 'OCR模型免费转Markdown' ) { link.textContent = 'OCR模型免费转Markdown'; link.href = 'https://fast360.xyz'; link.setAttribute('target', '_blank'); link.setAttribute('rel', 'noopener noreferrer'); replacedLinks.add(link); } // 删除Posts链接 else if ( (linkHref.includes('/posts') || linkHref === '/posts' || linkText === 'Posts' || linkText.match(/^s*Postss*$/i)) ) { if (link.parentNode) { link.parentNode.removeChild(link); } replacedLinks.add(link); } // 替换Docs链接 - 仅替换一次 else if ( (linkHref.includes('/docs') || linkHref === '/docs' || linkText === 'Docs' || linkText.match(/^s*Docss*$/i)) && linkText !== '模型下载攻略' ) { link.textContent = '模型下载攻略'; link.href = '/'; replacedLinks.add(link); } // 删除Enterprise链接 else if ( (linkHref.includes('/enterprise') || linkHref === '/enterprise' || linkText === 'Enterprise' || linkText.match(/^s*Enterprises*$/i)) ) { if (link.parentNode) { link.parentNode.removeChild(link); } replacedLinks.add(link); } }); // 查找可能嵌套的Spaces和Posts文本 const textNodes = []; function findTextNodes(element) { if (element.nodeType === Node.TEXT_NODE) { const text = element.textContent.trim(); if (text === 'Spaces' || text === 'Posts' || text === 'Enterprise') { textNodes.push(element); } } else { for (const child of element.childNodes) { findTextNodes(child); } } } // 只在导航区域内查找文本节点 findTextNodes(headerArea); // 替换找到的文本节点 textNodes.forEach(node => { const text = node.textContent.trim(); if (text === 'Spaces') { node.textContent = node.textContent.replace(/Spaces/g, 'OCR模型免费转Markdown'); } else if (text === 'Posts') { // 删除Posts文本节点 if (node.parentNode) { node.parentNode.removeChild(node); } } else if (text === 'Enterprise') { // 删除Enterprise文本节点 if (node.parentNode) { node.parentNode.removeChild(node); } } }); // 标记已替换完成 window._navLinksReplaced = true; } // 替换代码区域中的域名 function replaceCodeDomains() { // 特别处理span.hljs-string和span.njs-string元素 document.querySelectorAll('span.hljs-string, span.njs-string, span[class*="hljs-string"], span[class*="njs-string"]').forEach(span => { if (span.textContent && span.textContent.includes('huggingface.co')) { span.textContent = span.textContent.replace(/huggingface.co/g, 'aifasthub.com'); } }); // 替换hljs-string类的span中的域名(移除多余的转义符号) document.querySelectorAll('span.hljs-string, span[class*="hljs-string"]').forEach(span => { if (span.textContent && span.textContent.includes('huggingface.co')) { span.textContent = span.textContent.replace(/huggingface.co/g, 'aifasthub.com'); } }); // 替换pre和code标签中包含git clone命令的域名 document.querySelectorAll('pre, code').forEach(element => { if (element.textContent && element.textContent.includes('git clone')) { const text = element.innerHTML; if (text.includes('huggingface.co')) { element.innerHTML = text.replace(/huggingface.co/g, 'aifasthub.com'); } } }); // 处理特定的命令行示例 document.querySelectorAll('pre, code').forEach(element => { const text = element.innerHTML; if (text.includes('huggingface.co')) { // 针对git clone命令的专门处理 if (text.includes('git clone') || text.includes('GIT_LFS_SKIP_SMUDGE=1')) { element.innerHTML = text.replace(/huggingface.co/g, 'aifasthub.com'); } } }); // 特别处理模型下载页面上的代码片段 document.querySelectorAll('.flex.border-t, .svelte_hydrator, .inline-block').forEach(container => { const content = container.innerHTML; if (content && content.includes('huggingface.co')) { container.innerHTML = content.replace(/huggingface.co/g, 'aifasthub.com'); } }); // 特别处理模型仓库克隆对话框中的代码片段 try { // 查找包含"Clone this model repository"标题的对话框 const cloneDialog = document.querySelector('.svelte_hydration_boundary, [data-target="MainHeader"]'); if (cloneDialog) { // 查找对话框中所有的代码片段和命令示例 const codeElements = cloneDialog.querySelectorAll('pre, code, span'); codeElements.forEach(element => { if (element.textContent && element.textContent.includes('huggingface.co')) { if (element.innerHTML.includes('huggingface.co')) { element.innerHTML = element.innerHTML.replace(/huggingface.co/g, 'aifasthub.com'); } else { element.textContent = element.textContent.replace(/huggingface.co/g, 'aifasthub.com'); } } }); } // 更精确地定位克隆命令中的域名 document.querySelectorAll('[data-target]').forEach(container => { const codeBlocks = container.querySelectorAll('pre, code, span.hljs-string'); codeBlocks.forEach(block => { if (block.textContent && block.textContent.includes('huggingface.co')) { if (block.innerHTML.includes('huggingface.co')) { block.innerHTML = block.innerHTML.replace(/huggingface.co/g, 'aifasthub.com'); } else { block.textContent = block.textContent.replace(/huggingface.co/g, 'aifasthub.com'); } } }); }); } catch (e) { // 错误处理但不打印日志 } } // 当DOM加载完成后执行替换 if (document.readyState === 'loading') { document.addEventListener('DOMContentLoaded', () => { replaceHeaderBranding(); replaceNavigationLinks(); replaceCodeDomains(); // 只在必要时执行替换 - 3秒后再次检查 setTimeout(() => { if (!window._navLinksReplaced) { console.log('[Client] 3秒后重新检查导航链接'); replaceNavigationLinks(); } }, 3000); }); } else { replaceHeaderBranding(); replaceNavigationLinks(); replaceCodeDomains(); // 只在必要时执行替换 - 3秒后再次检查 setTimeout(() => { if (!window._navLinksReplaced) { console.log('[Client] 3秒后重新检查导航链接'); replaceNavigationLinks(); } }, 3000); } // 增加一个MutationObserver来处理可能的动态元素加载 const observer = new MutationObserver(mutations => { // 检查是否导航区域有变化 const hasNavChanges = mutations.some(mutation => { // 检查是否存在header或nav元素变化 return Array.from(mutation.addedNodes).some(node => { if (node.nodeType === Node.ELEMENT_NODE) { // 检查是否是导航元素或其子元素 if (node.tagName === 'HEADER' || node.tagName === 'NAV' || node.querySelector('header, nav')) { return true; } // 检查是否在导航元素内部 let parent = node.parentElement; while (parent) { if (parent.tagName === 'HEADER' || parent.tagName === 'NAV') { return true; } parent = parent.parentElement; } } return false; }); }); // 只在导航区域有变化时执行替换 if (hasNavChanges) { // 重置替换状态,允许再次替换 window._navLinksReplaced = false; replaceHeaderBranding(); replaceNavigationLinks(); } }); // 开始观察document.body的变化,包括子节点 if (document.body) { observer.observe(document.body, { childList: true, subtree: true }); } else { document.addEventListener('DOMContentLoaded', () => { observer.observe(document.body, { childList: true, subtree: true }); }); } })(); \\n' % (embedded_css, report_html)\n\n # Save report if it's too large to display or if user asks\n target_dir, target_filename = os.path.split(bv.file.filename)\n html_file = os.path.join(target_dir, 'coverage-report-%s.html' % target_filename)\n choices = [\"Cancel Report\", \"Save Report to File\", \"Save Report and Open in Browser\"]\n choice = 0\n save_file = 1\n save_and_open = 2\n if len(report_html) > 1307673: # if Qt eats even one little wafer more, it bursts\n choice = interaction.get_choice_input(\n \"Qt can't display a report this large. Select an action.\",\n \"Generated report too large\",\n choices)\n if choice in [save_file, save_and_open]:\n save_output = True\n else:\n bv.show_html_report(title, report_html, plaintext=report)\n\n if save_output:\n with open(html_file, 'w') as f:\n f.write(report_html)\n log.log_info(\"[*] Saved HTML report to %s\" % html_file)\n if choice == save_file:\n interaction.show_message_box(\"Report Saved\",\n \"Saved HTML report to: %s\" % html_file,\n enums.MessageBoxButtonSet.OKButtonSet,\n enums.MessageBoxIcon.InformationIcon)\n if choice == save_and_open:\n open_new_browser_tab(\"file://\" + html_file)","def cov():\n tests = unittest.TestLoader().discover('project/tests')\n result = unittest.TextTestRunner(verbosity=2).run(tests)\n if result.wasSuccessful():\n COV.stop()\n COV.save()\n print('Coverage Summary:')\n COV.report()\n basedir = os.path.abspath(os.path.dirname(__file__))\n covdir = os.path.join(basedir, 'tmp/coverage')\n COV.html_report(directory=covdir)\n print('HTML version: file://%s/index.html' % covdir)\n COV.erase()\n return 0\n return 1","def get_project_total_test_coverage(self) -> str:\n number_not_documented_columns = 0\n number_of_columns = 0\n\n for description in self.dbt_definitions.values():\n if description == COLUMN_NOT_DOCUMENTED:\n number_not_documented_columns += 1\n number_of_columns += 1\n\n return self.calculate_coverage_percentage(\n misses=number_not_documented_columns,\n total=number_of_columns,\n )","def percent_covered(self):\n out = self.coverage\n return out and out.cover","def coverage(self):\n try:\n return self.found * 100 / self.needed\n except ZeroDivisionError:\n return 100.0","def test(coverage):\n print('success')\n pass","def cov():\n cov = coverage.coverage(branch=True, include='project/*')\n cov.start()\n tests = unittest.TestLoader().discover('tests')\n unittest.TextTestRunner(verbosity=2).run(tests)\n cov.stop()\n cov.save()\n print('Coverage Summary:')\n cov.report()\n basedir = os.path.abspath(os.path.dirname(__file__))\n covdir = os.path.join(basedir, 'tmp/coverage')\n cov.html_report(directory=covdir)\n print('HTML version: file://%s/index.html' % covdir)\n cov.erase()","def coverage(session):\n session.install(\"coverage[toml]\", \"codecov\")\n session.run(\"coverage\", \"xml\", \"--fail-under=0\")\n session.run(\"codecov\", *session.posargs)","def calculate_coverage_percentage(self, misses: int, total: int) -> str:\n if total == 0:\n return \"0.0\"\n\n percentage_failure = round((1 - (misses / total)) * 100, 1)\n return str(percentage_failure)","def cover(ctx, html=False):\n header(cover.__doc__)\n extra = \"--cov-report html\" if html else \"\"\n with ctx.cd(ROOT):\n ctx.run(\n \"pytest --benchmark-skip --cov flask_restx --cov-report term --cov-report xml {0}\".format(\n extra\n ),\n pty=True,\n )","def get_project_column_description_coverage(self) -> None:\n print_statistics = {}\n for model_name, path in self.all_dbt_models.items():\n schema_content = open_yaml(path)\n\n number_documented_columns = len(\n self.get_documented_columns(\n schema_content=schema_content,\n model_name=model_name,\n )\n )\n\n number_not_documented_columns = len(\n self.get_not_documented_columns(\n schema_content=schema_content,\n model_name=model_name,\n )\n )\n\n print_statistics[model_name] = self.calculate_coverage_percentage(\n misses=number_not_documented_columns,\n total=(number_documented_columns + number_not_documented_columns),\n )\n\n print_statistics[\"\"] = \"\"\n print_statistics[\"Total\"] = self.get_project_total_test_coverage()\n\n self.create_table(\n title=\"Documentation Coverage\",\n columns=[\"Model Name\", r\"% coverage\"],\n data=print_statistics,\n )","def get_coverage_report(details=False)->str:\n model = get_coverage_report_model()\n\n out = StringIO() \n formatter = TextCoverageReportFormatter(model, out)\n formatter.details = details\n formatter.report()\n \n return out.getvalue()","def cov():\n cov = coverage.coverage(\n branch=True,\n include='project/*',\n omit=\"*/__init__.py\"\n )\n cov.start()\n tests = unittest.TestLoader().discover('tests')\n unittest.TextTestRunner(verbosity=2).run(tests)\n cov.stop()\n cov.save()\n print 'Coverage Summary:'\n cov.report()\n basedir = os.path.abspath(os.path.dirname(__file__))\n covdir = os.path.join(basedir, 'tmp/coverage')\n cov.html_report(directory=covdir)\n print('HTML version: file://%s/index.html' % covdir)\n cov.erase()","def derive_model_coverage(self) -> None:\n self.get_model_column_description_coverage()\n self.get_model_test_coverage()","def cov(test_class):\n if test_class == 'all':\n tests = unittest.TestLoader().discover('project/tests')\n else:\n # note, test module must be imported above, doing lazily for now\n test_module = globals()[test_class]\n tests = unittest.TestLoader().loadTestsFromTestCase(test_module)\n result = unittest.TextTestRunner(verbosity=2).run(tests)\n if result.wasSuccessful():\n COV.stop()\n COV.save()\n print('Coverage Summary:')\n COV.report()\n basedir = os.path.abspath(os.path.dirname(__file__))\n covdir = os.path.join(basedir, 'tmp/coverage')\n COV.html_report(directory=covdir)\n print('HTML version: file://%s/index.html' % covdir)\n COV.erase()\n return 0\n return 1","def output_summary_stats(self):\n total_return = self.equity_curve['equity_curve'][-1]\n returns = self.equity_curve['returns']\n pnl = self.equity_curve['equity_curve']\n \n sharpe_ratio = create_sharpe_ratio(returns, periods=252*6.5*60)\n drawdown, max_dd, dd_duration = create_drawdowns(pnl)\n self.equity_curve['drawdown'] = drawdown\n \n stats = [(\"Total Return\", \"%0.2f%%\" % ((total_return - 1.0) * 100.0)), \n (\"Sharpe Ratio\", \"%0.2f\" % sharpe_ratio), \n (\"Max Drawdown\", \"%0.2f%%\" % (max_dd * 100.0)), \n (\"Drawdown Duration\", \"%d\" % dd_duration)]\n \n self.equity_curve.to_csv('equity.csv')\n \n return stats","def current_nbc_coverage():\n covered = 0\n total = 0\n for layer in layer_to_compute:\n covered = covered + np.count_nonzero(nbc_cov_dict[layer.name])\n total = total + np.size(nbc_cov_dict[layer.name])\n return covered / float(total)","def output_summary_stats(self):\n total_return = self.equity_curve['equity_curve'][-1]\n returns = self.equity_curve['returns']\n pnl = self.equity_curve['equity_curve']\n \n sharpe_ratio = create_sharpe_ratio(returns) #, periods=252*6.5*60) ??? \n drawdown, max_dd, dd_duration = create_drawdowns(pnl)\n self.equity_curve['drawdown'] = drawdown\n \n stats = [(\"Total Return\", \"%0.2f%%\" % ((total_return - 1.0) * 100.0)), \n (\"Sharpe Ratio\", \"%0.2f\" % sharpe_ratio), \n (\"Max Drawdown\", \"%0.2f%%\" % (max_dd * 100.0)), \n (\"Drawdown Duration\", \"%d\" % dd_duration)]\n \n self.equity_curve.to_csv('equity.csv')\n \n return stats","def get_test_cases_coverage(session_id):\n tc_stats={}\n tc_stats_list=[]\n total_executed=0\n sql='SELECT DISTINCT(test_id) FROM stats WHERE session_id=:sid AND test_id!=\"null\"'\n params={\"sid\":session_id}\n conn=sqlite3.connect(CONNECTION_STRING)\n c=conn.cursor()\n c.execute(sql,params)\n tests=c.fetchall()\n conn.close()\n if len(tests)>0:\n for t in tests:\n total_executed=0\n sql=\"SELECT DISTINCT(file_id) FROM stats WHERE session_id=:sid AND test_id=:tid\"\n params={\"sid\":session_id,\"tid\":t[0]}\n conn=sqlite3.connect(CONNECTION_STRING)\n c=conn.cursor()\n c.execute(sql,params)\n files=c.fetchall()\n conn.close()\n for f in files:\n line_count=get_executable_lines_count_for_file(f[0])\n # get executions\n sql=\"SELECT COUNT(DISTINCT line_guid) FROM stats WHERE session_id= :sid AND file_id= :fid AND test_id=:tid\"\n params={\"sid\":session_id,\"tid\":t[0],\"fid\":f[0]}\n conn=sqlite3.connect(CONNECTION_STRING)\n c=conn.cursor()\n c.execute(sql,params)\n executed=c.fetchone()\n conn.close()\n total_executed+=executed[0]\n # save test case and it's executions\n tc_stats={}\n tc_stats[\"test_id\"]=t[0]\n tc_stats[\"total_executed\"]=total_executed\n tc_stats[\"total_executed\"]\n \n tc_stats_list.append(tc_stats)\n return tc_stats_list","def main(plot):\n plot_coverage(plot)\n return 0","def coverage(context):\n context.run(\" \".join([\n \"python -m pytest\",\n \"--cov=%s\" % PACKAGE_NAME,\n \"--cov-report html\",\n \"--cov-branch\",\n \"--cov-fail-under=75\"\n ]))","def overallCoverage(dataset, embeddings_col):\n from sparknlp.internal import _EmbeddingsOverallCoverage\n from sparknlp.common import CoverageResult\n return CoverageResult(_EmbeddingsOverallCoverage(dataset, embeddings_col).apply())","def coverage(ctx):\n ctx.run(\"coverage run --source {PROJECT_NAME} -m pytest\".format(PROJECT_NAME=PROJECT_NAME))\n ctx.run(\"coverage report -m\")\n ctx.run(\"coverage html\")","def print_summary(self):\n #outcomes = self.get_outcomes()\n #passes = 'Passes: %i' % sum(1 for outcome in outcomes if outcome == Result.PASS)\n #untested = 'Untested: %i' % sum(1 for outcome in outcomes if outcome == Result.UNTESTED)\n #errors = 'Errors: %i' % sum(1 for outcome in outcomes if outcome == Result.ERROR)\n #fails = 'Fails: %i' % sum(1 for outcome in outcomes if outcome == Result.FAIL)\n print('')\n print ('Passes: %i' % self.get_pass_count())\n print ('Fails: %i' % self.get_fail_count())\n print ('Errors: %i' % self.get_error_count())\n print ('Untested: %i' % self.get_untested_count())\n print ('Skipped: %i' % self.get_skipped_count())","def output_summary_stats(self, filename):\r\n\r\n total_return = self.equity_curve['equity_curve'][-1]\r\n returns = self.equity_curve['returns']\r\n pnl = self.equity_curve['equity_curve']\r\n\r\n sharpe_ratio = create_sharpe_ratio(returns, periods=252)\r\n drawdown, max_dd, dd_duration = create_drawdowns(pnl)\r\n self.equity_curve['drawdown'] = drawdown\r\n\r\n stats = [(\"Total Return\", \"%0.2f%%\" % \\\r\n ((total_return - 1.0) * 100.0)),\r\n (\"Sharpe Ratio\", \"%0.2f%%\" % sharpe_ratio),\r\n (\"Max Drawdown\", \"%0.2f%%\" % (max_dd * 100.0)),\r\n (\"Drawdown Duration\", \"%f\" % dd_duration)]\r\n self.equity_curve.to_csv(filename)\r\n return stats","def run_test_summary1c():\n print()\n print('--------------------------------------------------')\n print('Testing the summary1c function:')\n print('--------------------------------------------------')\n\n format_string = ' summary1c( {} )'\n test_results = [0, 0] # Number of tests passed, failed.\n\n # Test 1:\n expected = 1 + 2 + 5 + 7 # which is 15\n sequence = (20, 23, 29, 30, 33, 29, 100, 2, 4)\n print_expected_result_of_test([sequence], expected, test_results,\n format_string)\n actual = summary1c(sequence)\n print_actual_result_of_test(expected, actual, test_results)\n\n # Test 2:\n expected = 1 + 4 + 6 # which is 11\n sequence = (23, 29, 30, 33, 29, 100, 2)\n print_expected_result_of_test([sequence], expected, test_results,\n format_string)\n actual = summary1c(sequence)\n print_actual_result_of_test(expected, actual, test_results)\n\n # Test 3:\n expected = 16\n sequence = (20, 29, 30, 33, 29, 100, 100, 99, 40, 30, 30, 2)\n print_expected_result_of_test([sequence], expected, test_results,\n format_string)\n actual = summary1c(sequence)\n print_actual_result_of_test(expected, actual, test_results)\n\n # Test 4:\n expected = 5\n sequence = (29, 29, 30, 33, 29, 100, 100, 99, 40, 30, 30)\n print_expected_result_of_test([sequence], expected, test_results,\n format_string)\n actual = summary1c(sequence)\n print_actual_result_of_test(expected, actual, test_results)\n\n # Test 5:\n expected = 5\n sequence = (30, 33, 29, 17, 100, 99, 40, 30, 30)\n print_expected_result_of_test([sequence], expected, test_results,\n format_string)\n actual = summary1c(sequence)\n print_actual_result_of_test(expected, actual, test_results)\n\n # Test 6:\n expected = 2\n sequence = (30, 33, 13, 100, 99, 40, 30, 30)\n print_expected_result_of_test([sequence], expected, test_results,\n format_string)\n actual = summary1c(sequence)\n print_actual_result_of_test(expected, actual, test_results)\n\n # Test 7:\n expected = 0\n sequence = (30, 33, 4, 10, 21, 100, 99, 40, 30, 30)\n print_expected_result_of_test([sequence], expected, test_results,\n format_string)\n actual = summary1c(sequence)\n print_actual_result_of_test(expected, actual, test_results)\n\n # Test 8:\n expected = 3\n sequence = (5, 3, 3)\n print_expected_result_of_test([sequence], expected, test_results,\n format_string)\n actual = summary1c(sequence)\n print_actual_result_of_test(expected, actual, test_results)\n\n # Test 9:\n expected = 1\n sequence = (5, 3)\n print_expected_result_of_test([sequence], expected, test_results,\n format_string)\n actual = summary1c(sequence)\n print_actual_result_of_test(expected, actual, test_results)\n\n # Test 10:\n expected = 0\n sequence = (5,)\n print_expected_result_of_test([sequence], expected, test_results,\n format_string)\n actual = summary1c(sequence)\n print_actual_result_of_test(expected, actual, test_results)\n\n # Test 11:\n expected = 0\n sequence = ()\n print_expected_result_of_test([sequence], expected, test_results,\n format_string)\n actual = summary1c(sequence)\n print_actual_result_of_test(expected, actual, test_results)\n\n # Test 12:\n expected = 0\n sequence = (4,)\n print_expected_result_of_test([sequence], expected, test_results,\n format_string)\n actual = summary1c(sequence)\n print_actual_result_of_test(expected, actual, test_results)\n\n print_summary_of_test_results(test_results)","def get_model_test_coverage(self) -> None:\n # Init variables\n model_number_columns = 0\n model_columns_without_tests = 0\n untested_columns = []\n\n columns = self.dbt_tests.get(self.model_name)\n\n if not columns:\n logger.info(\n f\"There is no documentation entry for '{self.model_name}' in your schema.yml files. \"\n \"You might need to run `dbt-sugar doc` first.\"\n )\n return\n\n for column in columns:\n model_number_columns += 1\n if len(column[\"tests\"]) == 0:\n model_columns_without_tests += 1\n untested_columns.append(column[\"name\"])\n\n percentage_not_tested_columns = self.calculate_coverage_percentage(\n misses=model_columns_without_tests, total=model_number_columns\n )\n\n data = self.print_nicely_the_data(\n data=untested_columns, total=percentage_not_tested_columns\n )\n\n self.create_table(\n title=\"Test Coverage\", columns=[\"Untested Columns\", r\"% coverage\"], data=data\n )","def output_summary_stats(self):\n total_return = self.equity_curve['equity_curve'][-1]\n returns = self.equity_curve['returns']\n pnl = self.equity_curve['equity_curve']\n\n sharpe_ratio = create_sharpe_ratio(returns)\n drawdown, max_dd, dd_duration = create_drawdowns(pnl)\n self.equity_curve['drawdown'] = drawdown\n if len(dd_duration) == 1:\n dd_duration = dd_duration[0]\n\n stats = [(\"Total Return\", \"%0.2f%%\" % ((total_return - 1.0) * 100.0)),\n (\"Sharpe Ratio\", \"%0.2f\" % sharpe_ratio),\n (\"Max Drawdown\", \"%0.2f%%\" % (max_dd * 100.0)),\n (\"Drawdown Duration\", \"%s\" % dd_duration)]\n\n self.equity_curve.to_csv('equity.csv')\n self.positions.to_csv('positions.csv')\n self.prices.to_csv('prices.csv')\n\n return stats","def testViewCoverageData(self):\n try:\n entryD = self.__mU.doImport(self.__instanceSavePath, fmt=\"pickle\")\n covRefDbList = []\n covSampleList = []\n entryCountD = {}\n for entryId in entryD:\n for _, eD in entryD[entryId][\"selected_polymer_entities\"].items():\n\n analD = eD[\"anal_instances\"] if \"anal_instances\" in eD else {}\n\n for _, aD in analD.items():\n entryCountD[entryId] = True\n covRefDb = aD[\"coverage_inst_refdb\"]\n covSample = aD[\"coverage_inst_entity\"]\n if covRefDb is not None:\n covRefDb = 0.0 if covRefDb < 0.0 else covRefDb\n covRefDb = 1.0 if covRefDb > 1.0 else covRefDb\n covRefDbList.append(covRefDb)\n if covSample is not None:\n covSample = 0.0 if covSample < 0.0 else covSample\n covSample = 1.0 if covSample > 1.0 else covSample\n covSampleList.append(covSample)\n #\n logger.info(\"covRefDbList %d covSampleList %d\", len(covRefDbList), len(covSampleList))\n #\n cu = DisorderChartUtils()\n cu.doHistogramChart(\n covRefDbList,\n plotPath=self.__plotCoverageRefDb,\n yPlotScale=\"log\",\n yPlotMax=100000,\n yPlotMin=1000,\n xPlotMin=0.0,\n xPlotMax=1.001,\n xPlotIncr=0.1,\n # yPlotMax=100000,\n xPlotLabel=\"Coverage Fraction\",\n yPlotLabel=\"Protein Instances\",\n plotTitle=\"Reference Sequence Coverage\",\n )\n self.__writeLegend(\n self.__plotCoverageRefDb,\n \"UniProt reference sequence coverage for all (%d) protein sequences (%d X-ray structures with resolution limit < 3.5 Angstoms) \"\n % (len(covRefDbList), len(entryCountD)),\n )\n cu.doHistogramChart(\n covSampleList,\n plotPath=self.__plotCoverageSample1,\n yPlotScale=\"log\",\n xPlotMin=0.0,\n xPlotMax=1.001,\n xPlotIncr=0.1,\n yPlotMax=100000,\n yPlotMin=1000,\n # yPlotMax=100000,\n xPlotLabel=\"Coverage Fraction\",\n yPlotLabel=\"Protein Instances\",\n plotTitle=\"Sample Sequence Coverage\",\n )\n self.__writeLegend(\n self.__plotCoverageSample1,\n \"Sample sequence coverage for all (%d) protein sequences (%d X-ray structures with resolution limit < 3.5 Angstoms) \" % (len(covSampleList), len(entryCountD)),\n )\n #\n cu.doHistogramChart(\n covSampleList,\n plotPath=self.__plotCoverageSample2,\n yPlotScale=\"log\",\n yPlotMax=100000,\n yPlotMin=1000,\n xPlotMin=0.8,\n xPlotMax=1.001,\n xPlotIncr=0.1,\n # yPlotMax=100000,\n xPlotLabel=\"Coverage Fraction\",\n yPlotLabel=\"Protein Instances\",\n plotTitle=\"Sample Sequence Coverage\",\n )\n self.__writeLegend(\n self.__plotCoverageSample1,\n \"Sample sequence coverage for all (%d) protein sequences (%d X-ray structures with resolution limit < 3.5 Angstoms) \" % (len(covSampleList), len(entryCountD)),\n )\n\n except Exception as e:\n logger.exception(\"Failing with %s\", str(e))\n self.fail()","def test_coverage(self) -> None:\n coverage = self.portfolio_coverage_tvp.get_portfolio_coverage(self.data, PortfolioAggregationMethod.WATS)\n self.assertAlmostEqual(coverage, 32.0663, places=4,\n msg=\"The portfolio coverage was not correct\")","def to_html(self) -> str:\n source_name = escape(self.source_name)\n (covered, lines) = self.coverage_stats()\n lines_stats = \"{} / {} ({} lines of code)\".format(covered, lines, len(self.source_code))\n (br_covered, br_count, calls_covered, calls_count) = self.branch_stats()\n branch_stats = \"{} / {}\".format(br_covered, br_count)\n call_stats = \"{} / {}\".format(calls_covered, calls_count)\n (fn_covered, fn_count) = self.function_stats()\n fn_stats = \"{} / {}\".format(fn_covered, fn_count)\n\n self.decode_cpp_function_names()\n\n result = [\"\"\"\n \n \n \n \n Coverage report of file \"\"\" + source_name + \"\"\"\n \n \n \n \n

&lArr; Back | Go to line #

\n

\"\"\" + source_name + \"\"\"

\n
\n \n \n \n \n \n \n \n
Summary
Lines\"\"\" + lines_stats + \"\"\"
Branches\"\"\" + branch_stats + \"\"\"
Calls\"\"\" + call_stats + \"\"\"
Functions\"\"\" + fn_stats + \"\"\"
\n \n \n \n \"\"\"]\n result.extend(line.to_html() for line in self.source_code)\n result.append(\"\"\"\n \n
BranchesCovLineSource
\n
\n

Functions

\n
\n \n \n \"\"\")\n result.extend(func.to_html() for func in self.source_functions)\n result.append(\"\"\"\n \n
FunctionCallsRet.Blk. Exec.
\n
\n \n \n \n \"\"\")\n return '\\n'.join(result)","def get_model_column_description_coverage(self) -> None:\n not_documented_columns = self.get_not_documented_columns(\n schema_content=self.model_content,\n model_name=self.model_name,\n ).keys()\n\n number_not_documented_columns = len(not_documented_columns)\n number_documented_columns = len(\n self.get_documented_columns(\n schema_content=self.model_content,\n model_name=self.model_name,\n )\n )\n number_columns = number_documented_columns + number_not_documented_columns\n\n # This means that they are not columns, and we want to skip the printing.\n if number_columns == 0:\n return\n\n percentage_not_documented_columns = self.calculate_coverage_percentage(\n misses=number_not_documented_columns,\n total=number_columns,\n )\n logger.debug(\n f\"percentage_not_documented_columns for '{self.model_name}': {percentage_not_documented_columns}\"\n )\n\n data = self.print_nicely_the_data(\n data=list(not_documented_columns), total=percentage_not_documented_columns\n )\n\n self.create_table(\n title=\"Documentation Coverage\",\n columns=[\"Undocumented Columns\", r\"% coverage\"],\n data=data,\n )","def main():\n coverage = calculate_code_coverage()\n platform = os.uname()[0]\n if coverage < CODE_COVERAGE_GOAL[platform]:\n data = {\n 'expected': CODE_COVERAGE_GOAL[platform],\n 'observed': coverage,\n }\n print '\\033[91mFAIL: %(observed).2f%% does not meet goal of %(expected).2f%%\\033[0m' % data\n sys.exit(1)","def current_knc_coverage():\n covered = 0\n total = 0\n for layer in layer_to_compute:\n covered = covered + np.count_nonzero(knc_cov_dict[layer.name])\n total = total + np.size(knc_cov_dict[layer.name])\n return covered / float(total)","def get_testcase_summary(output):\n print(\"Inside Test Summary\")\n re_tc_summary = re.compile(r\"^\\[(\\d+\\.\\d+)\\][^\\]+\\{\\{(__testcase_summary);(\\d+);(\\d+)\\}\\}\")\n #re_tc_summary = re.compile(r\"^\\[(\\d+\\.\\d+)\\][^\\{]+\\{\\{(__testcase_summary);(\\d+);(\\d+)\\}\\}\")\n print(\"re_tc_summary =\",re_tc_summary.pattern)\n #print(dir(re_tc_summary))\n \n for line in output.splitlines():\n print \"line=\",line\n m = re_tc_summary.search(line)\n print (\"m=\",m.groups())\n if m:\n _, _, passes, failures = m.groups()\n return int(passes), int(failures)\n return None","def _percent(self, lines_total, lines_covered):\n\n if lines_total == 0:\n return '0.0'\n return str(float(float(lines_covered) / float(lines_total)))","def report_coverage(fp=None, details=False):\n if fp is None:\n fp = sys.stdout\n fp.write(get_coverage_report(details))","def define_coverage(self, id=None, units=None, standard_name=None, coverage_dimensions=None):","def pytest_report_header(config):\n circle_node_total, circle_node_index = read_circleci_env_variables()\n return \"CircleCI total nodes: {}, this node index: {}\".format(circle_node_total, circle_node_index)","def coverage(session) -> None:\n session.install(\".[test]\", \"pytest-cov\")\n session.run(\n \"pytest\", \"-n\", \"auto\", \"--cov=./\", \"--cov-report=xml\", *session.posargs\n )","def to_html(self) -> str:\n coverage_class = 'zero' if self.called == 0 else 'all'\n return '''\n {}\n {}{}%{}%\n \\n'''.format(\n self.name, coverage_class, self.linenum, self.pretty_name, self.called,\n self.returned, self.blocks\n )","def query_coverage(query):\n length = query_length(query)\n coverage = (int(query['alignment length'])/length) * 100\n return coverage","def test_summary_report(self):\n self.driver.get('http://psl-outbreak.herokuapp.com/report')\n self.driver.find_element_by_id('summary_report_cases').click()","def test_coverage_1(base_settings):\n filename = base_settings[\"unittest_data_dir\"] / \"coverage-example-2.json\"\n inst = coverage.Coverage.parse_file(\n filename, content_type=\"application/json\", encoding=\"utf-8\"\n )\n assert \"Coverage\" == inst.resource_type\n\n impl_coverage_1(inst)\n\n # testing reverse by generating data from itself and create again.\n data = inst.dict()\n assert \"Coverage\" == data[\"resourceType\"]\n\n inst2 = coverage.Coverage(**data)\n impl_coverage_1(inst2)","def summary_string(self) -> str:","def get_coverage_stats(\n contig_depth_file, contig_fasta, contig_read_counts_file, contig_stats_out\n):\n print(\"getting coverage stats\")\n # add other files if requested\n # read counts\n logger.info(\"Parsing read count file: {}\".format(contig_read_counts_file))\n read_count_table = pandas.read_csv(\n contig_read_counts_file, delim_whitespace=True, names=[\"ReadCount\", \"Contig\"]\n ).set_index(\"Contig\")\n\n # convert base by base depth data into coverage\n logger.info(\"Parsing read depth file: {}\".format(contig_depth_file))\n mapping_depth_table = get_samtool_depth_table(contig_depth_file, contig_fasta,)\n contig_stats = mapping_depth_table.join(read_count_table, how=\"left\").fillna(0)\n\n for col in [\"Length\", \"ReadCount\", \"MaxCov\", \"MinCov\", \"CumuLength\"]:\n if col in contig_stats.columns:\n contig_stats[col] = contig_stats[col].astype(int)\n\n logger.info(\"Writing coverage table to: {}\".format(contig_stats_out))\n contig_stats.to_csv(contig_stats_out, sep=\"\\t\", float_format=\"%0.2f\")","def analyze_coverage(results, outcomes, allow_list, full_coverage):\n available = check_test_cases.collect_available_test_cases()\n for key in available:\n hits = outcomes[key].hits() if key in outcomes else 0\n if hits == 0 and key not in allow_list:\n if full_coverage:\n results.error('Test case not executed: {}', key)\n else:\n results.warning('Test case not executed: {}', key)\n elif hits != 0 and key in allow_list:\n # Test Case should be removed from the allow list.\n if full_coverage:\n results.error('Allow listed test case was executed: {}', key)\n else:\n results.warning('Allow listed test case was executed: {}', key)","def generate_cobertura_xml(self, coverage_data):\n\n dom_impl = minidom.getDOMImplementation()\n doctype = dom_impl.createDocumentType(\"coverage\", None,\n \"http://cobertura.sourceforge.net/xml/coverage-03.dtd\")\n document = dom_impl.createDocument(None, \"coverage\", doctype)\n root = document.documentElement\n summary = coverage_data['summary']\n self._attrs(root, {\n 'branch-rate': self._percent(summary['branches-total'],\n summary['branches-covered']),\n 'branches-covered': str(summary['branches-covered']),\n 'branches-valid': str(summary['branches-total']),\n 'complexity': '0',\n 'line-rate': self._percent(summary['lines-total'],\n summary['lines-covered']),\n 'lines-valid': str(summary['lines-total']),\n 'timestamp': coverage_data['timestamp'],\n 'version': '1.9'\n })\n\n sources = self._el(document, 'sources', {})\n source = self._el(document, 'source', {})\n source.appendChild(document.createTextNode(self.base_dir))\n sources.appendChild(source)\n\n root.appendChild(sources)\n\n packages_el = self._el(document, 'packages', {})\n\n packages = coverage_data['packages']\n for package_name, package_data in list(packages.items()):\n package_el = self._el(document, 'package', {\n 'line-rate': package_data['line-rate'],\n 'branch-rate': package_data['branch-rate'],\n 'name': package_name\n })\n classes_el = self._el(document, 'classes', {})\n for class_name, class_data in list(package_data['classes'].items()):\n class_el = self._el(document, 'class', {\n 'branch-rate': self._percent(class_data['branches-total'],\n class_data['branches-covered']),\n 'complexity': '0',\n 'filename': class_name,\n 'line-rate': self._percent(class_data['lines-total'],\n class_data['lines-covered']),\n 'name': class_data['name']\n })\n\n # Process methods\n methods_el = self._el(document, 'methods', {})\n for method_name, hits in list(class_data['methods'].items()):\n method_el = self._el(document, 'method', {\n 'name': method_name,\n 'signature' : '',\n 'hits': hits\n })\n methods_el.appendChild(method_el)\n\n # Process lines\n lines_el = self._el(document, 'lines', {})\n lines = list(class_data['lines'].keys())\n lines.sort()\n for line_number in lines:\n line_el = self._el(document, 'line', {\n 'branch': class_data['lines'][line_number]['branch'],\n 'hits': str(class_data['lines'][line_number]['hits']),\n 'number': str(line_number)\n })\n if class_data['lines'][line_number]['branch'] == 'true':\n total = int(class_data['lines'][line_number]['branches-total'])\n covered = int(class_data['lines'][line_number]['branches-covered'])\n percentage = int((covered * 100.0) / total)\n line_el.setAttribute('condition-coverage',\n '{0}% ({1}/{2})'.format(\n percentage, covered, total))\n lines_el.appendChild(line_el)\n\n class_el.appendChild(methods_el)\n class_el.appendChild(lines_el)\n classes_el.appendChild(class_el)\n package_el.appendChild(classes_el)\n packages_el.appendChild(package_el)\n root.appendChild(packages_el)\n\n return document.toprettyxml()","def trivial_cover(regions_count, clinics_count, clinics):\n clinics_built = [0]*range(0, clinics_count)\n coverted = set()\n\n for clinic in clinics:\n clinics_built[clinic.index] = 1\n coverted |= set(clinic.regions)\n if len(coverted) >= regions_count:\n break # We are done, we cover all the regions\n\n # Calculamos el costo total de construcción\n total_costs = sum([clinic.cost*clinics_built[clinic.index] for clinic in clinics])\n \n # Convertimos la solución en el formato esperado\n output_data = str(total_cost) + '\\n'\n output_data += ' '.join(map(str, clinics_built))\n\n return output_data","def summary(self) -> str:\n pass","def calculate_coverage(path, alignment, number_of_fastas):\n\n path_to_alignment = path + 'Modeling/fasta_alns_and_identities/' + alignment\n fastas_iterator = parse_multifasta_file(path_to_alignment, number_of_fastas)\n fastas = []\n targer_name, target_seq = next(fastas_iterator)\n fastas.append(target_seq)\n length_of_target = 0\n for i in target_seq:\n if i != '-':\n length_of_target += 1\n for i in range(1, number_of_fastas):\n name, seq = next(fastas_iterator)\n fastas.append(seq)\n coverage = 0\n for i in range(len(fastas[0])):\n for j in range(1, len(fastas)):\n if fastas[0][i] != '-' and fastas[j][i] != '-':\n coverage += 1\n break\n coverage_percent = round(coverage / length_of_target * 100, 2)\n return coverage_percent","def summary(self):\n raise NotImplementedError","def test_coverage_4(base_settings):\n filename = base_settings[\"unittest_data_dir\"] / \"coverage-example.json\"\n inst = coverage.Coverage.parse_file(\n filename, content_type=\"application/json\", encoding=\"utf-8\"\n )\n assert \"Coverage\" == inst.resource_type\n\n impl_coverage_4(inst)\n\n # testing reverse by generating data from itself and create again.\n data = inst.dict()\n assert \"Coverage\" == data[\"resourceType\"]\n\n inst2 = coverage.Coverage(**data)\n impl_coverage_4(inst2)","def compute_filecoverage(self):\n result = dict()\n for filename, fns in self.point_symbol_info.items():\n file_points = []\n for fn, points in fns.items():\n file_points.extend(points.keys())\n covered_points = self.covered_points & set(file_points)\n result[filename] = int(math.ceil(\n len(covered_points) * 100 / len(file_points)))\n return result","def recordCoverage( options, data ):\n for c in data.chrNames:\n data.mafWigDict[ c ]['columnsInBlocks'] = 0\n for m in data.mafBlocksByChrom[ c ]:\n if m.refEnd > m.refStart:\n data.mafWigDict[ c ]['columnsInBlocks'] += ( m.refEnd + 1 ) - m.refStart\n else:\n data.mafWigDict[ c ]['columnsInBlocks'] += ( m.refStart + 1 ) - m.refEnd","def test_coverage_2(base_settings):\n filename = base_settings[\"unittest_data_dir\"] / \"coverage-example-selfpay.json\"\n inst = coverage.Coverage.parse_file(\n filename, content_type=\"application/json\", encoding=\"utf-8\"\n )\n assert \"Coverage\" == inst.resource_type\n\n impl_coverage_2(inst)\n\n # testing reverse by generating data from itself and create again.\n data = inst.dict()\n assert \"Coverage\" == data[\"resourceType\"]\n\n inst2 = coverage.Coverage(**data)\n impl_coverage_2(inst2)","def html_it():\n import coverage\n cov = coverage.coverage()\n cov.start()\n import here # pragma: nested\n cov.stop() # pragma: nested\n cov.html_report(directory=\"../html_other\")","def _cmd_coverage(args):\n pset = coverage.do_coverage(\n args.interval,\n args.bam_file,\n args.count,\n args.min_mapq,\n args.processes,\n args.fasta,\n )\n if not args.output:\n # Create an informative but unique name for the coverage output file\n bambase = core.fbase(args.bam_file)\n bedbase = core.fbase(args.interval)\n tgtbase = (\n \"antitargetcoverage\" if \"anti\" in bedbase.lower() else \"targetcoverage\"\n )\n args.output = f\"{bambase}.{tgtbase}.cnn\"\n if os.path.exists(args.output):\n args.output = f\"{bambase}.{bedbase}.cnn\"\n core.ensure_path(args.output)\n tabio.write(pset, args.output)","def summary(self, **kwargs):\n raise ValueError(\"This function is not available in lazy results evaluation as it would \"\n \"require all pairwise tests to be performed.\")","def summarize(self):\n\n def increment_summary(summary_obj, case_obj):\n \"\"\"increment ReportSummary count was ReportCase status\n\n Whatever the status of the case object, the corresponding property\n will be incremented by 1 in the summary object\n\n Args:\n summary_obj (ReportSummary): summary object to increment\n case_obj (ReportCase): case object\n \"\"\"\n summary_obj.increment(case_obj.get_status())\n\n summary = ReportSummary()\n [increment_summary(summary, case) for case in self.cases]\n self.summary = summary","def test_get_vulnerability_occurrences_summary(self):\n pass","def header_summary(\n self, \n router_context,\n tests_by_status\n ):\n raise MissingOverload","def test():\n with lcd(BASEDIR):\n local('virtenv/bin/coverage run runtests.py -v2')\n local('virtenv/bin/coverage report -m')","def print_coverage(lengths):\n\n primerAlen = 1330\n primerBlen = 1353\n primerClen = 1237\n\n print(\"SRR ID\\tPrimer A\\tPrimer B\\tPrimer C\")\n for s in lengths:\n sys.stdout.write(s)\n sys.stdout.write(\"\\t{}\".format(1.0 * lengths[s][\"PrimerA\"]/primerAlen))\n sys.stdout.write(\"\\t{}\".format(1.0 * lengths[s][\"PrimerB\"]/primerBlen))\n sys.stdout.write(\"\\t{}\\n\".format(1.0 * lengths[s][\"PrimerC\"]/primerClen))","def pytest_terminal_summary(self, terminalreporter, exitstatus):\n # pylint: disable=unused-argument\n terminalreporter.section(\"Test Information\")\n for test, info in self._info.items():\n for datum in info:\n terminalreporter.write(\"{}: {}\\n\".format(test, datum))","def test_coverage_3(base_settings):\n filename = base_settings[\"unittest_data_dir\"] / \"coverage-example-ehic.json\"\n inst = coverage.Coverage.parse_file(\n filename, content_type=\"application/json\", encoding=\"utf-8\"\n )\n assert \"Coverage\" == inst.resource_type\n\n impl_coverage_3(inst)\n\n # testing reverse by generating data from itself and create again.\n data = inst.dict()\n assert \"Coverage\" == data[\"resourceType\"]\n\n inst2 = coverage.Coverage(**data)\n impl_coverage_3(inst2)","def summarise(self):\n self.summary = az.summary(self.trace, var_names=[\"~chol\"], round_to=2)\n print(self.summary)\n return self.summary","def summarize(self):\n \n print self._num_tests, \"tests ran with\", len(self._failed_tests), \"failures:\", sorted(list(self._failed_tests))\n\n self._num_tests = 0\n self._failed_tests = set()","def get_coverage(self):\n if len(self) == 1:\n return self.subacqs[0].get_coverage()\n return np.array([self.subacqs[i].get_coverage() for i in range(len(self))])","def _calc_coverage(self, cds_aln):\n # Aligned region is part of a read that intersects with cds.\n coverage = 0\n for aln_reg in cds_aln.aligned_regions.values(): # aln_reg is of type CdsAlnSublocation\n location = aln_reg.location # location is of type Location\n coverage += location.length()\n coverage = coverage / float(Location.from_location_str(cds_aln.cds.location).length())\n return coverage","def summary_stats(tile_summary):\n return \"Original Dimensions: %dx%d\\n\" % (tile_summary.orig_w, tile_summary.orig_h) + \\\n \"Original Tile Size: %dx%d\\n\" % (tile_summary.orig_tile_w, tile_summary.orig_tile_h) + \\\n \"Scale Factor: 1/%dx\\n\" % tile_summary.scale_factor + \\\n \"Scaled Dimensions: %dx%d\\n\" % (tile_summary.scaled_w, tile_summary.scaled_h) + \\\n \"Scaled Tile Size: %dx%d\\n\" % (tile_summary.scaled_tile_w, tile_summary.scaled_tile_w) + \\\n \"Total Mask: %3.2f%%, Total Tissue: %3.2f%%\\n\" % (\n tile_summary.mask_percentage(), tile_summary.tissue_percentage) + \\\n \"Tiles: %dx%d = %d\\n\" % (tile_summary.num_col_tiles, tile_summary.num_row_tiles, tile_summary.count) + \\\n \" %5d (%5.2f%%) tiles >=%d%% tissue\\n\" % (\n tile_summary.high, tile_summary.high / tile_summary.count * 100, TISSUE_HIGH_THRESH) + \\\n \" %5d (%5.2f%%) tiles >=%d%% and <%d%% tissue\\n\" % (\n tile_summary.medium, tile_summary.medium / tile_summary.count * 100, TISSUE_LOW_THRESH,\n TISSUE_HIGH_THRESH) + \\\n \" %5d (%5.2f%%) tiles >0%% and <%d%% tissue\\n\" % (\n tile_summary.low, tile_summary.low / tile_summary.count * 100, TISSUE_LOW_THRESH) + \\\n \" %5d (%5.2f%%) tiles =0%% tissue\" % (tile_summary.none, tile_summary.none / tile_summary.count * 100)","def calculate_coverage(length_total, length_query, length_subject, option_cov=\"mean\"):\n if option_cov == \"mean\":\n cov = length_total / ((length_query + length_subject) / 2.0)\n elif option_cov == \"subject\":\n cov = length_total / length_subject\n elif option_cov == \"query\":\n cov = length_total / length_query\n elif option_cov == \"shortest\":\n cov = length_total / min(length_query, length_subject)\n elif option_cov == \"longest\":\n cov = length_total / max(length_query, length_subject)\n\n return cov","def html_index(source_files: iter([SourceFile]), compile_root: str) -> str:\n def single_summary(source_file: SourceFile) -> str:\n (covered, lines) = source_file.coverage_stats()\n (br_covered, br_count, _, _) = source_file.branch_stats()\n (fn_covered, fn_count) = source_file.function_stats()\n (coverage_percent, coverage_health) = to_percentage(covered, lines, 90, 75)\n (branch_percent, branch_health) = to_percentage(br_covered, br_count, 75, 50)\n (fn_percent, fn_health) = to_percentage(fn_covered, fn_count, 90, 75)\n\n\n return '''\n {}\n {}%\n {}%\n {}%\n '''.format(\n to_html_filename(source_file.source_name),\n escape(source_file.source_name),\n coverage_health, covered, lines, coverage_percent,\n branch_health, br_covered, br_count, branch_percent,\n fn_health, fn_covered, fn_count, fn_percent\n )\n\n title = escape(compile_root)\n\n html_res = [\"\"\"\n \n \n \n Coverage report for \"\"\" + title + \"\"\"\n \n \n \n \n

Coverage report for \"\"\" + title + \"\"\"

\n
\n \n \n \"\"\"]\n\n html_res.extend(single_summary(s) for s in source_files)\n html_res.append('
FileLinesBranchFunctions
')\n\n return '\\n'.join(html_res)","def summary(self):\n return ''","def test_summaryRsrcsNoHeader(self):\n self.summaryRsrcsNoHeader(\"alert\")\n self.summaryRsrcsNoHeader(\"dashboard\")","def summary_stats(self):\n capital_gains = self.df['values'].iloc[-1].sum() - self.tc.starting_cash\n total_return = capital_gains / self.tc.starting_cash\n days_invested = (self.df.index[-1] - self.df.index[0]).days\n annualized_returns = (total_return + 1) ** (365 / days_invested) - 1\n annualized_volatility = self.df['returns'].std() * (252 ** 0.5)\n sharpe = annualized_returns / annualized_volatility\n num_trades = self.trades.shape[0]\n stats = pd.Series(\n data=[capital_gains, total_return, annualized_returns, annualized_volatility, sharpe, num_trades],\n index=['Capital Gains', 'Total Return', 'Annualized Return', 'Annualized Volatility', 'Sharpe Ratio',\n 'Number of Trades']\n )\n return stats","def report_totals(output):\n groups = (STATS_PATC.match(line) for line in output.splitlines())\n tuples = (g.groups() for g in groups if g)\n\n results = [0,0,0,0,0]\n for t in tuples:\n results[0] += int(t[0]) # total\n results[1] += int(t[1]) # failures\n results[2] += int(t[2]) # errors\n results[3] += int(t[3]) # skipped\n results[4] += float(t[4]) # elapsed time\n\n print 'Tests run: %d, Failures: %d, Errors: %d, Skipped: %d, '\\\n 'Time elapsed: %.2f' % tuple(results)","def get_coverage_report_model()->CoverageReport:\n covergroups = CoverageRegistry.inst().covergroup_types()\n\n db = MemFactory.create() \n save_visitor = CoverageSaveVisitor(db)\n now = datetime.now\n save_visitor.save(TestData(\n UCIS_TESTSTATUS_OK,\n \"UCIS:simulator\",\n ucis.ucis_Time()), covergroups)\n\n return CoverageReportBuilder.build(db)","def hit_coverage(self):\n s = self.hit_aln.replace(\"=\", \"\")\n return len(s)","def coverage(text: str) -> float:\n words = set(text.split(' '))\n return len([w for w in words if frequency(w) != 0]) / len(words) * 100","def metadata_reporter(self):\n logging.info('Creating summary report')\n header = '{}\\n'.format(','.join(self.headers))\n # Create a string to store all the results\n data = str()\n for sample in self.metadata:\n # Add the value of the appropriate attribute to the results string\n data += GenObject.returnattr(sample, 'name')\n # SampleName\n data += GenObject.returnattr(sample.run, 'SamplePlate')\n # Genus\n data += GenObject.returnattr(sample.general, 'closestrefseqgenus')\n # SamplePurity\n data += GenObject.returnattr(sample.confindr, 'num_contaminated_snvs')\n # N50\n n50 = GenObject.returnattr(sample.quast, 'N50',\n number=True)\n if n50 != '-,':\n data += n50\n else:\n data += '0,'\n # NumContigs\n data += GenObject.returnattr(sample.quast, 'num_contigs',\n number=True)\n # TotalLength\n data += GenObject.returnattr(sample.quast, 'Total_length',\n number=True)\n # MeanInsertSize\n data += GenObject.returnattr(sample.quast, 'mean_insert',\n number=True)\n # InsertSizeSTD\n data += GenObject.returnattr(sample.quast, 'std_insert',\n number=True)\n # AverageCoverageDepth\n data += GenObject.returnattr(sample.qualimap, 'MeanCoveragedata',\n number=True)\n # CoverageDepthSTD\n data += GenObject.returnattr(sample.qualimap, 'StdCoveragedata',\n number=True)\n # PercentGC\n data += GenObject.returnattr(sample.quast, 'GC',\n number=True)\n # MASH_ReferenceGenome\n data += GenObject.returnattr(sample.mash, 'closestrefseq')\n # MASH_NumMatchingHashes\n data += GenObject.returnattr(sample.mash, 'nummatches')\n # 16S_result\n data += GenObject.returnattr(sample.sixteens_full, 'sixteens_match')\n # 16S PercentID\n data += GenObject.returnattr(sample.sixteens_full, 'percent_id')\n # CoreGenesPresent\n data += GenObject.returnattr(sample.gdcs, 'coreresults')\n # rMLST_Result\n try:\n # If the number of matches to the closest reference profile is 53, return the profile number\n if sample.rmlst.matches == 53:\n if type(sample.rmlst.sequencetype) is list:\n rmlst_seq_type = ';'.join(sorted(sample.rmlst.sequencetype)).rstrip(';') + ','\n else:\n rmlst_seq_type = GenObject.returnattr(sample.rmlst, 'sequencetype')\n rmlst_seq_type = rmlst_seq_type if rmlst_seq_type != 'ND,' else 'new,'\n data += rmlst_seq_type\n else:\n # Otherwise the profile is set to new\n data += 'new,'\n except AttributeError:\n data += 'new,'\n # MLST_Result\n try:\n if sample.mlst.matches == 7:\n if type(sample.mlst.sequencetype) is list:\n mlst_seq_type = ';'.join(sorted(sample.mlst.sequencetype)).rstrip(';') + ','\n else:\n mlst_seq_type = GenObject.returnattr(sample.mlst, 'sequencetype')\n mlst_seq_type = mlst_seq_type if mlst_seq_type != 'ND,' else 'new,'\n data += mlst_seq_type\n else:\n data += 'new,'\n except AttributeError:\n data += 'new,'\n # MLST_gene_X_alleles\n try:\n # Create a set of all the genes present in the results (gene name split from allele)\n gene_set = {gene.split('_')[0] for gene in sample.mlst.combined_metadata_results}\n for gene in sorted(gene_set):\n allele_list = list()\n # Determine all the alleles that are present for each gene\n for allele in sample.mlst.combined_metadata_results:\n if gene in allele:\n allele_list.append(allele.replace(' ', '_'))\n # If there is more than one allele in the sample, add both to the string separated by a ';'\n if len(allele_list) > 1:\n data += '{},'.format(';'.join(allele_list))\n # Otherwise add the only allele\n else:\n data += allele_list[0] + ','\n # If there are fewer than seven matching alleles, add a ND for each missing result\n if len(gene_set) < 7:\n data += (7 - len(gene_set)) * 'ND,'\n except AttributeError:\n # data += '-,-,-,-,-,-,-,'\n data += 'ND,ND,ND,ND,ND,ND,ND,'\n # E_coli_Serotype\n try:\n # If no O-type was found, set the output to be O-untypeable\n if ';'.join(sample.ectyper.o_type) == '-':\n otype = 'O-untypeable'\n else:\n otype = sample.ectyper.o_type\n # Same as above for the H-type\n if ';'.join(sample.ectyper.h_type) == '-':\n htype = 'H-untypeable'\n\n else:\n htype = sample.ectyper.h_type\n serotype = '{otype}:{htype},'.format(otype=otype,\n htype=htype)\n # Add the serotype to the data string unless neither O-type not H-type were found; add ND instead\n data += serotype if serotype != 'O-untypeable:H-untypeable,' else 'ND,'\n except AttributeError:\n data += 'ND,'\n # SISTR_serovar_antigen\n data += GenObject.returnattr(sample.sistr, 'serovar_antigen').rstrip(';')\n # SISTR_serovar_cgMLST\n data += GenObject.returnattr(sample.sistr, 'serovar_cgmlst')\n # SISTR_serogroup\n data += GenObject.returnattr(sample.sistr, 'serogroup')\n # SISTR_h1\n data += GenObject.returnattr(sample.sistr, 'h1').rstrip(';')\n # SISTR_h2\n data += GenObject.returnattr(sample.sistr, 'h2').rstrip(';')\n # SISTR_serovar\n data += GenObject.returnattr(sample.sistr, 'serovar')\n # GeneSeekr_Profile\n try:\n if sample.genesippr.report_output:\n data += ';'.join(sample.genesippr.report_output) + ','\n else:\n data += 'ND,'\n except AttributeError:\n data += 'ND,'\n # Vtyper_Profile\n data += GenObject.returnattr(sample.verotoxin, 'verotoxin_subtypes_set')\n # AMR_Profile and resistant/sensitive status\n if sample.resfinder_assembled.pipelineresults:\n # Profile\n for resistance, resistance_set in sorted(sample.resfinder_assembled.pipelineresults.items()):\n data += '{res}({r_set});'.format(res=resistance.replace(',', ';'),\n r_set=';'.join(sorted(list(resistance_set))))\n data += ','\n # Resistant/Sensitive\n data += 'Resistant,'\n else:\n # Profile\n data += 'ND,'\n # Resistant/Sensitive\n data += 'Sensitive,'\n # Plasmid Result'\n if sample.mobrecon.pipelineresults:\n for plasmid, details in sorted(sample.mobrecon.pipelineresults.items()):\n data += '{plasmid}({details});'.format(plasmid=plasmid,\n details=details)\n data += ','\n else:\n data += 'ND,'\n # TotalPredictedGenes\n data += GenObject.returnattr(sample.prodigal, 'predictedgenestotal',\n number=True)\n # PredictedGenesOver3000bp\n data += GenObject.returnattr(sample.prodigal, 'predictedgenesover3000bp',\n number=True)\n # PredictedGenesOver1000bp\n data += GenObject.returnattr(sample.prodigal, 'predictedgenesover1000bp',\n number=True)\n # PredictedGenesOver500bp\n data += GenObject.returnattr(sample.prodigal, 'predictedgenesover500bp',\n number=True)\n # PredictedGenesUnder500bp\n data += GenObject.returnattr(sample.prodigal, 'predictedgenesunder500bp',\n number=True)\n # AssemblyDate\n data += datetime.now().strftime('%Y-%m-%d') + ','\n # PipelineVersion\n data += self.commit + ','\n # Name of the database used in the analyses\n data += os.path.split(self.reffilepath)[-1] + ','\n # Database download date\n data += self.download_date\n # Append a new line to the end of the results for this sample\n data += '\\n'\n # Replace any NA values with ND\n cleandata = data.replace('NA', 'ND')\n with open(os.path.join(self.reportpath, 'combinedMetadata.csv'), 'w') as metadatareport:\n metadatareport.write(header)\n metadatareport.write(cleandata)","def get_summary_stats(self, output_csv=None):\n\n contig_size_list = []\n\n self.summary_info[\"ncontigs\"] = len(self.contigs)\n\n for contig_id, sequence in self.contigs.items():\n\n logger.debug(\"Processing contig: {}\".format(contig_id))\n\n # Get contig sequence size\n contig_len = len(sequence)\n\n # Add size for average contig size\n contig_size_list.append(contig_len)\n\n # Add to total assembly length\n self.summary_info[\"total_len\"] += contig_len\n\n # Add to average gc\n self.summary_info[\"avg_gc\"].append(\n sum(map(sequence.count, [\"G\", \"C\"])) / contig_len\n )\n\n # Add to missing data\n self.summary_info[\"missing_data\"] += sequence.count(\"N\")\n\n # Get average contig size\n logger.debug(\"Getting average contig size\")\n self.summary_info[\"avg_contig_size\"] = \\\n sum(contig_size_list) / len(contig_size_list)\n\n # Get average gc content\n logger.debug(\"Getting average GC content\")\n self.summary_info[\"avg_gc\"] = \\\n sum(self.summary_info[\"avg_gc\"]) / len(self.summary_info[\"avg_gc\"])\n\n # Get N50\n logger.debug(\"Getting N50\")\n cum_size = 0\n for l in sorted(contig_size_list, reverse=True):\n cum_size += l\n if cum_size >= self.summary_info[\"total_len\"] / 2:\n self.summary_info[\"n50\"] = l\n break\n\n if output_csv:\n logger.debug(\"Writing report to csv\")\n # Write summary info to CSV\n with open(output_csv, \"w\") as fh:\n summary_line = \"{}, {}\\\\n\".format(\n self.sample, \",\".join(\n [str(x) for x in self.summary_info.values()]))\n fh.write(summary_line)","def test_concentration_profile(self):\n # TODO: add an output for average particle concentration","def cowreport():\n central = pytz.timezone(\"America/Chicago\")\n yesterday = (utc() - datetime.timedelta(days=1)).astimezone(central)\n midnight = yesterday.replace(hour=0, minute=0)\n midutc = midnight.astimezone(pytz.UTC)\n begints = midutc.strftime(\"%Y-%m-%dT%H:%M\")\n endts = (midutc + datetime.timedelta(hours=24)).strftime(\"%Y-%m-%dT%H:%M\")\n api = (\n f\"http://iem.local/api/1/cow.json?begints={begints}&endts={endts}&\"\n \"phenomena=SV&phenomena=TO&lsrtype=SV&lsrtype=TO\"\n )\n data = requests.get(api, timeout=60).json()\n st = data[\"stats\"]\n if st[\"events_total\"] == 0:\n text = \"No SVR+TOR Warnings Issued.\"\n html = f\"

IEM Cow Report

{text}
\"\n txt = f\"> IEM Cow Report\\n{text}\\n\"\n return txt, html\n\n vp = st[\"events_verified\"] / float(st[\"events_total\"]) * 100.0\n text = (\n f\"SVR+TOR Warnings Issued: {st['events_total']:3.0f} \"\n f\"Verified: {st['events_verified']:3.0f} [{vp:.1f}%]\\n\"\n \"Polygon Size Versus County Size \"\n f\"[{st['size_poly_vs_county[%]']:.1f}%]\\n\"\n \"Average Perimeter Ratio \"\n f\"[{st['shared_border[%]']:.1f}%]\\n\"\n \"Percentage of Warned Area Verified (15km) \"\n f\"[{st['area_verify[%]']:.1f}%]\\n\"\n \"Average Storm Based Warning Size \"\n f\"[{st['avg_size[sq km]']:.0f} sq km]\\n\"\n f\"Probability of Detection(higher is better) [{st['POD[1]']:.2f}]\\n\"\n f\"False Alarm Ratio (lower is better) [{st['FAR[1]']:.2f}]\\n\"\n f\"Critical Success Index (higher is better) [{st['CSI[1]']:.2f}]\\n\"\n )\n\n html = f\"

IEM Cow Report

{text}
\"\n txt = f\"> IEM Cow Report\\n{text}\\n\"\n\n return txt, html","def run(self):\n cmd = 'coverage run setup.py test && coverage report -m'\n check_call(cmd, shell=True)","def final_report(self):\n print('Final Count for', self.reason, self.successes, 'of', self.tests, 'tests passed')","def parse_coverage(depth_filename, allow_missing=True):\n\n delims = [ 0, 10, 100, 1000, 2000, 10000]\n nbins = len(delims)+1\n\n bin_labels = ['0'] + [f\"{delims[i-1]+1}x-{delims[i]}x\" for i in range(1,nbins-1)] + [f\"> {delims[-1]}x\"]\n bin_labels = [ f\"Fraction with {l} coverage\" for l in bin_labels ]\n\n ret = {\n 'bin_labels': bin_labels,\n 'bin_fractions': [ None for b in range(nbins) ],\n 'mean_coverage': None,\n 'qc_meancov': 'FAIL',\n 'qc_cov100': 'FAIL',\n 'qc_cov1000': 'FAIL',\n 'cov100': 0\n }\n\n if file_is_missing(depth_filename, allow_missing):\n return ret\n\n coverage = []\n for line in open(depth_filename):\n t = line.split('\\t')\n assert len(t) == 3\n coverage.append(int(float(t[2].strip(\"\\n\"))))\n\n coverage = np.array(coverage)\n bin_assignments = np.searchsorted(np.array(delims), coverage, side='left')\n bin_fractions = np.bincount(bin_assignments, minlength=nbins) / float(len(coverage))\n assert bin_fractions.shape == (nbins,)\n\n\n ret['cov100'] = np.mean(coverage >= 100)\n ret['bin_fractions'] = [ xround(f,3) for f in bin_fractions ]\n ret['mean_coverage'] = xround(np.mean(coverage), 1)\n ret['qc_meancov'] = \"PASS\" if (np.mean(coverage) >= 2000) else \"FAIL\"\n ret['qc_cov100'] = \"PASS\" if (np.mean(coverage >= 100) >= 0.9) else \"FAIL\"\n ret['qc_cov1000'] = \"PASS\" if (np.mean(coverage >= 1000) >= 0.9) else \"WARN\"\n\n return ret","def do(self, callback_name, *args):\n value_dict = self._evaluator.evaluate(self.data_stream)\n print(\"Train test coverage:{0}\".format(self.coverage))\n for key, value in value_dict.items():\n print(\"{0}:{1}\".format(key, value * self.coverage))","def test_run_coverage(self):\n cmd = GreenTestCommand(Distribution())\n cmd.coverage = True\n cmd.ensure_finalized()\n cmd.run()\n self.assertThat(_subprocess_call_args(), Contains(\"-r\"))","def cuv(ctx, coverage_fname, exclude, branch):\n if coverage_fname is None:\n coverage_fname = find_coverage_data('.')\n # coverage_fname still could be None\n\n cfg = Config()\n ctx.obj = cfg\n\n cfg.nice_width = min(80, shutil.get_terminal_size()[0])\n cfg.exclude = exclude\n\n cfg.branch = branch\n if coverage_fname is not None:\n cfg.data = coverage.Coverage(data_file=coverage_fname)\n cfg.data.load()\n else:\n raise click.UsageError(\n \"No coverage data. Do you have a .coverage file?\"\n )","def test_summary_success(self):\n\n summary_data_key = 'summary_data'\n\n response = self.send_request(view_name='upload_summary_view', params={'upload_id': 1})\n context_data = response.context_data\n self.assertTrue(summary_data_key in context_data)\n\n summary_data = context_data[summary_data_key]\n self.assertEquals(3, len(summary_data))\n\n self.assertEqual(Decimal('100.0'), summary_data[0].pre_tax_amount)","def _coverage(self, chr, limit, nbins):\n\n c = np.zeros(nbins, dtype=np.int)\n chr_start, chr_stop = self.refs[chr][1:]\n bin_size = float((limit[1] - limit[0]) / nbins)\n\n for i in range(chr_start, chr_stop):\n read_start = self.lines[i][3]\n read_len = len(self.lines[i][9])\n\n start_bin = int((read_start - limit[0]) / bin_size)\n stop_bin = int((read_start + read_len - limit[0]) / bin_size)\n\n # print start_bin, stop_bin\n c[start_bin:stop_bin + 1] += 1\n \n return c","def getTotalCaseAndControlCounts(genotypesFilename):\r\n\r\n\tcomphetSuffix = \"\"\r\n\tif \"comphet\" in genotypesFilename:\r\n\t\tcomphetSuffix = \" (#1)\"\r\n\r\n\t# We read through the whole file. Might take a while, but easier than dealing with all edge cases.\r\n\tmaxCoveredCasePercentage = 0\r\n\tmaxCoveredControlPercentage = 0\r\n\treader = csv.reader(open(genotypesFilename, \"r\"))\r\n\theader = next(reader)\r\n\r\n\tfor variant in reader:\r\n\r\n\t\tvariant = dict(zip(header, variant))\r\n\t\tcasePercentage = float(variant[\"Covered Case Percentage\" + comphetSuffix])/100.0\r\n\t\tif casePercentage > maxCoveredCasePercentage:\r\n\t\t\tmaxCoveredCasePercentage = casePercentage\r\n\t\t\tcoveredCases = int(variant[\"Covered Case\" + comphetSuffix])\r\n\t\t\ttotalCases = int(round(coveredCases/casePercentage))\r\n\r\n\t\tcontrolPercentage = float(variant[\"Covered Ctrl Percentage\" + comphetSuffix])/100.0\r\n\t\tif controlPercentage > maxCoveredControlPercentage:\r\n\t\t\tmaxCoveredControlPercentage = controlPercentage\r\n\t\t\tcoveredControls = int(variant[\"Covered Ctrl\" + comphetSuffix])\r\n\t\t\ttotalControls = int(round(coveredControls/controlPercentage))\r\n\treturn totalCases, totalControls","def generate_report():\n if os.path.isdir(\"build/coverage\"):\n shutil.rmtree(\"build/coverage\")\n commands = '''\nscons -uij32 --optimization=coverage controller/cplusplus_test\nlcov --base-directory build/coverage --directory build/coverage -c -o build/coverage/controller_test.info\ngenhtml -o build/coverage/controller/test_coverage -t test --num-spaces 4 build/coverage/controller_test.info\n'''\n for cmd in commands.splitlines():\n cmd_args = cmd.split()\n if (len(cmd_args) == 0):\n continue\n cmd = cmd_args[0]\n cmd_path = find_executable(cmd)\n if not cmd_path:\n continue\n pid = os.fork()\n if pid == 0:\n # Avoid stdout buffering by execing command into child process.\n os.execv(cmd_path, cmd_args)\n os.waitpid(pid, 0)"],"string":"[\n \"def get_project_test_coverage(self) -> None:\\n print_statistics = {}\\n total_number_columns = 0\\n number_columns_without_tests = 0\\n\\n for model_name in self.dbt_tests.keys():\\n columns = self.dbt_tests[model_name]\\n\\n model_number_columns = 0\\n model_columns_without_tests = 0\\n\\n for column in columns:\\n total_number_columns += 1\\n model_number_columns += 1\\n\\n if len(column[\\\"tests\\\"]) == 0:\\n number_columns_without_tests += 1\\n model_columns_without_tests += 1\\n\\n print_statistics[model_name] = self.calculate_coverage_percentage(\\n misses=model_columns_without_tests, total=model_number_columns\\n )\\n\\n print_statistics[\\\"\\\"] = \\\"\\\"\\n print_statistics[\\\"Total\\\"] = self.calculate_coverage_percentage(\\n misses=number_columns_without_tests, total=total_number_columns\\n )\\n\\n self.create_table(\\n title=\\\"Test Coverage\\\",\\n columns=[\\\"Model Name\\\", r\\\"% coverage\\\"],\\n data=print_statistics,\\n )\",\n \"def cov():\\n tests = unittest.TestLoader().discover('project/tests')\\n result = unittest.TextTestRunner(verbosity=2).run(tests)\\n if result.wasSuccessful():\\n COV.stop()\\n COV.save()\\n print('Coverage Summary:')\\n COV.report()\\n COV.html_report()\\n COV.erase()\\n return 0\\n return 1\",\n \"def task_coverage():\\n return {\\n 'actions': ['py.test --cov nikola --cov-report term-missing tests/'],\\n 'verbosity': 2,\\n }\",\n \"def coverage_stats(self) -> (int, int):\\n covered = sum(1 for line in self.source_code if line.coverage > 0)\\n lines = sum(1 for line in self.source_code if line.coverage >= 0)\\n return (covered, lines)\",\n \"def derive_project_coverage(self) -> None:\\n self.get_project_column_description_coverage()\\n self.get_project_test_coverage()\",\n \"def cov():\\n tests = unittest.TestLoader().discover('tests')\\n result = unittest.TextTestRunner(verbosity=1).run(tests)\\n if result.wasSuccessful():\\n COV.stop()\\n COV.save()\\n print('Coverage Summary:')\\n COV.report()\\n basedir = os.path.abspath(os.path.dirname(__file__))\\n covdir = os.path.join(basedir, 'tmp/coverage')\\n COV.html_report(directory=covdir)\\n print('HTML version: file://%s/index.html' % covdir)\\n COV.erase()\\n return 0\\n return 1\",\n \"def show_coverage_report(bv, save_output=False):\\n if no_coverage_warn():\\n return\\n num_functions, blocks_covered, blocks_total = covdb.get_overall_function_coverage()\\n title = \\\"Coverage Report for %s\\\" % covdb.module_name\\n report = \\\"%d Functions, %d blocks covered of %d total\\\\n\\\" % (num_functions, blocks_covered, blocks_total)\\n embedded_css = '''\\\\n'''\\n report_html = (\\\"

%d Functions, %d blocks covered of %d total

\\\\n\\\" %\\n (num_functions, blocks_covered, blocks_total))\\n column_titles = ['Start Address', 'Function Name', 'Coverage Percent', 'Blocks Covered / Total', 'Complexity']\\n report_html += (\\\"\\\\n%s\\\\n\\\" % ''.join('' % title for title in column_titles))\\n function_dict = {f.name: f for f in bv.functions}\\n name_dict = {}\\n for f in bv.functions:\\n name_dict[f.name] = f.symbol.short_name\\n max_name_length = max([len(name) for name in name_dict.values()])\\n for mangled_name, stats in sorted(covdb.function_stats.items(), key=lambda x: x[1].coverage_percent, reverse=True):\\n name = name_dict[mangled_name]\\n pad = \\\" \\\" * (max_name_length - len(name))\\n function_addr = function_dict[mangled_name].start\\n report += \\\" 0x%08x %s%s : %.2f%% coverage\\\\t( %-3d / %3d blocks)\\\\n\\\" % \\\\\\n (function_addr, name, pad, stats.coverage_percent, stats.blocks_covered, stats.blocks_total)\\n # build the html table row one item at a time, then combine them\\n function_link = '0x%08x' % (function_addr, function_addr)\\n function_name = html_escape(name)\\n coverage_percent = '%.2f%% coverage' % stats.coverage_percent\\n blocks_covered = '%d / %d blocks' % (stats.blocks_covered, stats.blocks_total)\\n row_data = [function_link, function_name, coverage_percent, blocks_covered, str(stats.complexity)]\\n table_row = '' + ''.join('' % item for item in row_data) + ''\\n report_html += table_row\\n\\n report_html += \\\"
%s
%s
\\\\n\\\"\\n report_html = '\\\\n\\\\n%s\\\\n\\\\n\\\\n%s\\\\n\\\\n' % (embedded_css, report_html)\\n\\n # Save report if it's too large to display or if user asks\\n target_dir, target_filename = os.path.split(bv.file.filename)\\n html_file = os.path.join(target_dir, 'coverage-report-%s.html' % target_filename)\\n choices = [\\\"Cancel Report\\\", \\\"Save Report to File\\\", \\\"Save Report and Open in Browser\\\"]\\n choice = 0\\n save_file = 1\\n save_and_open = 2\\n if len(report_html) > 1307673: # if Qt eats even one little wafer more, it bursts\\n choice = interaction.get_choice_input(\\n \\\"Qt can't display a report this large. Select an action.\\\",\\n \\\"Generated report too large\\\",\\n choices)\\n if choice in [save_file, save_and_open]:\\n save_output = True\\n else:\\n bv.show_html_report(title, report_html, plaintext=report)\\n\\n if save_output:\\n with open(html_file, 'w') as f:\\n f.write(report_html)\\n log.log_info(\\\"[*] Saved HTML report to %s\\\" % html_file)\\n if choice == save_file:\\n interaction.show_message_box(\\\"Report Saved\\\",\\n \\\"Saved HTML report to: %s\\\" % html_file,\\n enums.MessageBoxButtonSet.OKButtonSet,\\n enums.MessageBoxIcon.InformationIcon)\\n if choice == save_and_open:\\n open_new_browser_tab(\\\"file://\\\" + html_file)\",\n \"def cov():\\n tests = unittest.TestLoader().discover('project/tests')\\n result = unittest.TextTestRunner(verbosity=2).run(tests)\\n if result.wasSuccessful():\\n COV.stop()\\n COV.save()\\n print('Coverage Summary:')\\n COV.report()\\n basedir = os.path.abspath(os.path.dirname(__file__))\\n covdir = os.path.join(basedir, 'tmp/coverage')\\n COV.html_report(directory=covdir)\\n print('HTML version: file://%s/index.html' % covdir)\\n COV.erase()\\n return 0\\n return 1\",\n \"def get_project_total_test_coverage(self) -> str:\\n number_not_documented_columns = 0\\n number_of_columns = 0\\n\\n for description in self.dbt_definitions.values():\\n if description == COLUMN_NOT_DOCUMENTED:\\n number_not_documented_columns += 1\\n number_of_columns += 1\\n\\n return self.calculate_coverage_percentage(\\n misses=number_not_documented_columns,\\n total=number_of_columns,\\n )\",\n \"def percent_covered(self):\\n out = self.coverage\\n return out and out.cover\",\n \"def coverage(self):\\n try:\\n return self.found * 100 / self.needed\\n except ZeroDivisionError:\\n return 100.0\",\n \"def test(coverage):\\n print('success')\\n pass\",\n \"def cov():\\n cov = coverage.coverage(branch=True, include='project/*')\\n cov.start()\\n tests = unittest.TestLoader().discover('tests')\\n unittest.TextTestRunner(verbosity=2).run(tests)\\n cov.stop()\\n cov.save()\\n print('Coverage Summary:')\\n cov.report()\\n basedir = os.path.abspath(os.path.dirname(__file__))\\n covdir = os.path.join(basedir, 'tmp/coverage')\\n cov.html_report(directory=covdir)\\n print('HTML version: file://%s/index.html' % covdir)\\n cov.erase()\",\n \"def coverage(session):\\n session.install(\\\"coverage[toml]\\\", \\\"codecov\\\")\\n session.run(\\\"coverage\\\", \\\"xml\\\", \\\"--fail-under=0\\\")\\n session.run(\\\"codecov\\\", *session.posargs)\",\n \"def calculate_coverage_percentage(self, misses: int, total: int) -> str:\\n if total == 0:\\n return \\\"0.0\\\"\\n\\n percentage_failure = round((1 - (misses / total)) * 100, 1)\\n return str(percentage_failure)\",\n \"def cover(ctx, html=False):\\n header(cover.__doc__)\\n extra = \\\"--cov-report html\\\" if html else \\\"\\\"\\n with ctx.cd(ROOT):\\n ctx.run(\\n \\\"pytest --benchmark-skip --cov flask_restx --cov-report term --cov-report xml {0}\\\".format(\\n extra\\n ),\\n pty=True,\\n )\",\n \"def get_project_column_description_coverage(self) -> None:\\n print_statistics = {}\\n for model_name, path in self.all_dbt_models.items():\\n schema_content = open_yaml(path)\\n\\n number_documented_columns = len(\\n self.get_documented_columns(\\n schema_content=schema_content,\\n model_name=model_name,\\n )\\n )\\n\\n number_not_documented_columns = len(\\n self.get_not_documented_columns(\\n schema_content=schema_content,\\n model_name=model_name,\\n )\\n )\\n\\n print_statistics[model_name] = self.calculate_coverage_percentage(\\n misses=number_not_documented_columns,\\n total=(number_documented_columns + number_not_documented_columns),\\n )\\n\\n print_statistics[\\\"\\\"] = \\\"\\\"\\n print_statistics[\\\"Total\\\"] = self.get_project_total_test_coverage()\\n\\n self.create_table(\\n title=\\\"Documentation Coverage\\\",\\n columns=[\\\"Model Name\\\", r\\\"% coverage\\\"],\\n data=print_statistics,\\n )\",\n \"def get_coverage_report(details=False)->str:\\n model = get_coverage_report_model()\\n\\n out = StringIO() \\n formatter = TextCoverageReportFormatter(model, out)\\n formatter.details = details\\n formatter.report()\\n \\n return out.getvalue()\",\n \"def cov():\\n cov = coverage.coverage(\\n branch=True,\\n include='project/*',\\n omit=\\\"*/__init__.py\\\"\\n )\\n cov.start()\\n tests = unittest.TestLoader().discover('tests')\\n unittest.TextTestRunner(verbosity=2).run(tests)\\n cov.stop()\\n cov.save()\\n print 'Coverage Summary:'\\n cov.report()\\n basedir = os.path.abspath(os.path.dirname(__file__))\\n covdir = os.path.join(basedir, 'tmp/coverage')\\n cov.html_report(directory=covdir)\\n print('HTML version: file://%s/index.html' % covdir)\\n cov.erase()\",\n \"def derive_model_coverage(self) -> None:\\n self.get_model_column_description_coverage()\\n self.get_model_test_coverage()\",\n \"def cov(test_class):\\n if test_class == 'all':\\n tests = unittest.TestLoader().discover('project/tests')\\n else:\\n # note, test module must be imported above, doing lazily for now\\n test_module = globals()[test_class]\\n tests = unittest.TestLoader().loadTestsFromTestCase(test_module)\\n result = unittest.TextTestRunner(verbosity=2).run(tests)\\n if result.wasSuccessful():\\n COV.stop()\\n COV.save()\\n print('Coverage Summary:')\\n COV.report()\\n basedir = os.path.abspath(os.path.dirname(__file__))\\n covdir = os.path.join(basedir, 'tmp/coverage')\\n COV.html_report(directory=covdir)\\n print('HTML version: file://%s/index.html' % covdir)\\n COV.erase()\\n return 0\\n return 1\",\n \"def output_summary_stats(self):\\n total_return = self.equity_curve['equity_curve'][-1]\\n returns = self.equity_curve['returns']\\n pnl = self.equity_curve['equity_curve']\\n \\n sharpe_ratio = create_sharpe_ratio(returns, periods=252*6.5*60)\\n drawdown, max_dd, dd_duration = create_drawdowns(pnl)\\n self.equity_curve['drawdown'] = drawdown\\n \\n stats = [(\\\"Total Return\\\", \\\"%0.2f%%\\\" % ((total_return - 1.0) * 100.0)), \\n (\\\"Sharpe Ratio\\\", \\\"%0.2f\\\" % sharpe_ratio), \\n (\\\"Max Drawdown\\\", \\\"%0.2f%%\\\" % (max_dd * 100.0)), \\n (\\\"Drawdown Duration\\\", \\\"%d\\\" % dd_duration)]\\n \\n self.equity_curve.to_csv('equity.csv')\\n \\n return stats\",\n \"def current_nbc_coverage():\\n covered = 0\\n total = 0\\n for layer in layer_to_compute:\\n covered = covered + np.count_nonzero(nbc_cov_dict[layer.name])\\n total = total + np.size(nbc_cov_dict[layer.name])\\n return covered / float(total)\",\n \"def output_summary_stats(self):\\n total_return = self.equity_curve['equity_curve'][-1]\\n returns = self.equity_curve['returns']\\n pnl = self.equity_curve['equity_curve']\\n \\n sharpe_ratio = create_sharpe_ratio(returns) #, periods=252*6.5*60) ??? \\n drawdown, max_dd, dd_duration = create_drawdowns(pnl)\\n self.equity_curve['drawdown'] = drawdown\\n \\n stats = [(\\\"Total Return\\\", \\\"%0.2f%%\\\" % ((total_return - 1.0) * 100.0)), \\n (\\\"Sharpe Ratio\\\", \\\"%0.2f\\\" % sharpe_ratio), \\n (\\\"Max Drawdown\\\", \\\"%0.2f%%\\\" % (max_dd * 100.0)), \\n (\\\"Drawdown Duration\\\", \\\"%d\\\" % dd_duration)]\\n \\n self.equity_curve.to_csv('equity.csv')\\n \\n return stats\",\n \"def get_test_cases_coverage(session_id):\\n tc_stats={}\\n tc_stats_list=[]\\n total_executed=0\\n sql='SELECT DISTINCT(test_id) FROM stats WHERE session_id=:sid AND test_id!=\\\"null\\\"'\\n params={\\\"sid\\\":session_id}\\n conn=sqlite3.connect(CONNECTION_STRING)\\n c=conn.cursor()\\n c.execute(sql,params)\\n tests=c.fetchall()\\n conn.close()\\n if len(tests)>0:\\n for t in tests:\\n total_executed=0\\n sql=\\\"SELECT DISTINCT(file_id) FROM stats WHERE session_id=:sid AND test_id=:tid\\\"\\n params={\\\"sid\\\":session_id,\\\"tid\\\":t[0]}\\n conn=sqlite3.connect(CONNECTION_STRING)\\n c=conn.cursor()\\n c.execute(sql,params)\\n files=c.fetchall()\\n conn.close()\\n for f in files:\\n line_count=get_executable_lines_count_for_file(f[0])\\n # get executions\\n sql=\\\"SELECT COUNT(DISTINCT line_guid) FROM stats WHERE session_id= :sid AND file_id= :fid AND test_id=:tid\\\"\\n params={\\\"sid\\\":session_id,\\\"tid\\\":t[0],\\\"fid\\\":f[0]}\\n conn=sqlite3.connect(CONNECTION_STRING)\\n c=conn.cursor()\\n c.execute(sql,params)\\n executed=c.fetchone()\\n conn.close()\\n total_executed+=executed[0]\\n # save test case and it's executions\\n tc_stats={}\\n tc_stats[\\\"test_id\\\"]=t[0]\\n tc_stats[\\\"total_executed\\\"]=total_executed\\n tc_stats[\\\"total_executed\\\"]\\n \\n tc_stats_list.append(tc_stats)\\n return tc_stats_list\",\n \"def main(plot):\\n plot_coverage(plot)\\n return 0\",\n \"def coverage(context):\\n context.run(\\\" \\\".join([\\n \\\"python -m pytest\\\",\\n \\\"--cov=%s\\\" % PACKAGE_NAME,\\n \\\"--cov-report html\\\",\\n \\\"--cov-branch\\\",\\n \\\"--cov-fail-under=75\\\"\\n ]))\",\n \"def overallCoverage(dataset, embeddings_col):\\n from sparknlp.internal import _EmbeddingsOverallCoverage\\n from sparknlp.common import CoverageResult\\n return CoverageResult(_EmbeddingsOverallCoverage(dataset, embeddings_col).apply())\",\n \"def coverage(ctx):\\n ctx.run(\\\"coverage run --source {PROJECT_NAME} -m pytest\\\".format(PROJECT_NAME=PROJECT_NAME))\\n ctx.run(\\\"coverage report -m\\\")\\n ctx.run(\\\"coverage html\\\")\",\n \"def print_summary(self):\\n #outcomes = self.get_outcomes()\\n #passes = 'Passes: %i' % sum(1 for outcome in outcomes if outcome == Result.PASS)\\n #untested = 'Untested: %i' % sum(1 for outcome in outcomes if outcome == Result.UNTESTED)\\n #errors = 'Errors: %i' % sum(1 for outcome in outcomes if outcome == Result.ERROR)\\n #fails = 'Fails: %i' % sum(1 for outcome in outcomes if outcome == Result.FAIL)\\n print('')\\n print ('Passes: %i' % self.get_pass_count())\\n print ('Fails: %i' % self.get_fail_count())\\n print ('Errors: %i' % self.get_error_count())\\n print ('Untested: %i' % self.get_untested_count())\\n print ('Skipped: %i' % self.get_skipped_count())\",\n \"def output_summary_stats(self, filename):\\r\\n\\r\\n total_return = self.equity_curve['equity_curve'][-1]\\r\\n returns = self.equity_curve['returns']\\r\\n pnl = self.equity_curve['equity_curve']\\r\\n\\r\\n sharpe_ratio = create_sharpe_ratio(returns, periods=252)\\r\\n drawdown, max_dd, dd_duration = create_drawdowns(pnl)\\r\\n self.equity_curve['drawdown'] = drawdown\\r\\n\\r\\n stats = [(\\\"Total Return\\\", \\\"%0.2f%%\\\" % \\\\\\r\\n ((total_return - 1.0) * 100.0)),\\r\\n (\\\"Sharpe Ratio\\\", \\\"%0.2f%%\\\" % sharpe_ratio),\\r\\n (\\\"Max Drawdown\\\", \\\"%0.2f%%\\\" % (max_dd * 100.0)),\\r\\n (\\\"Drawdown Duration\\\", \\\"%f\\\" % dd_duration)]\\r\\n self.equity_curve.to_csv(filename)\\r\\n return stats\",\n \"def run_test_summary1c():\\n print()\\n print('--------------------------------------------------')\\n print('Testing the summary1c function:')\\n print('--------------------------------------------------')\\n\\n format_string = ' summary1c( {} )'\\n test_results = [0, 0] # Number of tests passed, failed.\\n\\n # Test 1:\\n expected = 1 + 2 + 5 + 7 # which is 15\\n sequence = (20, 23, 29, 30, 33, 29, 100, 2, 4)\\n print_expected_result_of_test([sequence], expected, test_results,\\n format_string)\\n actual = summary1c(sequence)\\n print_actual_result_of_test(expected, actual, test_results)\\n\\n # Test 2:\\n expected = 1 + 4 + 6 # which is 11\\n sequence = (23, 29, 30, 33, 29, 100, 2)\\n print_expected_result_of_test([sequence], expected, test_results,\\n format_string)\\n actual = summary1c(sequence)\\n print_actual_result_of_test(expected, actual, test_results)\\n\\n # Test 3:\\n expected = 16\\n sequence = (20, 29, 30, 33, 29, 100, 100, 99, 40, 30, 30, 2)\\n print_expected_result_of_test([sequence], expected, test_results,\\n format_string)\\n actual = summary1c(sequence)\\n print_actual_result_of_test(expected, actual, test_results)\\n\\n # Test 4:\\n expected = 5\\n sequence = (29, 29, 30, 33, 29, 100, 100, 99, 40, 30, 30)\\n print_expected_result_of_test([sequence], expected, test_results,\\n format_string)\\n actual = summary1c(sequence)\\n print_actual_result_of_test(expected, actual, test_results)\\n\\n # Test 5:\\n expected = 5\\n sequence = (30, 33, 29, 17, 100, 99, 40, 30, 30)\\n print_expected_result_of_test([sequence], expected, test_results,\\n format_string)\\n actual = summary1c(sequence)\\n print_actual_result_of_test(expected, actual, test_results)\\n\\n # Test 6:\\n expected = 2\\n sequence = (30, 33, 13, 100, 99, 40, 30, 30)\\n print_expected_result_of_test([sequence], expected, test_results,\\n format_string)\\n actual = summary1c(sequence)\\n print_actual_result_of_test(expected, actual, test_results)\\n\\n # Test 7:\\n expected = 0\\n sequence = (30, 33, 4, 10, 21, 100, 99, 40, 30, 30)\\n print_expected_result_of_test([sequence], expected, test_results,\\n format_string)\\n actual = summary1c(sequence)\\n print_actual_result_of_test(expected, actual, test_results)\\n\\n # Test 8:\\n expected = 3\\n sequence = (5, 3, 3)\\n print_expected_result_of_test([sequence], expected, test_results,\\n format_string)\\n actual = summary1c(sequence)\\n print_actual_result_of_test(expected, actual, test_results)\\n\\n # Test 9:\\n expected = 1\\n sequence = (5, 3)\\n print_expected_result_of_test([sequence], expected, test_results,\\n format_string)\\n actual = summary1c(sequence)\\n print_actual_result_of_test(expected, actual, test_results)\\n\\n # Test 10:\\n expected = 0\\n sequence = (5,)\\n print_expected_result_of_test([sequence], expected, test_results,\\n format_string)\\n actual = summary1c(sequence)\\n print_actual_result_of_test(expected, actual, test_results)\\n\\n # Test 11:\\n expected = 0\\n sequence = ()\\n print_expected_result_of_test([sequence], expected, test_results,\\n format_string)\\n actual = summary1c(sequence)\\n print_actual_result_of_test(expected, actual, test_results)\\n\\n # Test 12:\\n expected = 0\\n sequence = (4,)\\n print_expected_result_of_test([sequence], expected, test_results,\\n format_string)\\n actual = summary1c(sequence)\\n print_actual_result_of_test(expected, actual, test_results)\\n\\n print_summary_of_test_results(test_results)\",\n \"def get_model_test_coverage(self) -> None:\\n # Init variables\\n model_number_columns = 0\\n model_columns_without_tests = 0\\n untested_columns = []\\n\\n columns = self.dbt_tests.get(self.model_name)\\n\\n if not columns:\\n logger.info(\\n f\\\"There is no documentation entry for '{self.model_name}' in your schema.yml files. \\\"\\n \\\"You might need to run `dbt-sugar doc` first.\\\"\\n )\\n return\\n\\n for column in columns:\\n model_number_columns += 1\\n if len(column[\\\"tests\\\"]) == 0:\\n model_columns_without_tests += 1\\n untested_columns.append(column[\\\"name\\\"])\\n\\n percentage_not_tested_columns = self.calculate_coverage_percentage(\\n misses=model_columns_without_tests, total=model_number_columns\\n )\\n\\n data = self.print_nicely_the_data(\\n data=untested_columns, total=percentage_not_tested_columns\\n )\\n\\n self.create_table(\\n title=\\\"Test Coverage\\\", columns=[\\\"Untested Columns\\\", r\\\"% coverage\\\"], data=data\\n )\",\n \"def output_summary_stats(self):\\n total_return = self.equity_curve['equity_curve'][-1]\\n returns = self.equity_curve['returns']\\n pnl = self.equity_curve['equity_curve']\\n\\n sharpe_ratio = create_sharpe_ratio(returns)\\n drawdown, max_dd, dd_duration = create_drawdowns(pnl)\\n self.equity_curve['drawdown'] = drawdown\\n if len(dd_duration) == 1:\\n dd_duration = dd_duration[0]\\n\\n stats = [(\\\"Total Return\\\", \\\"%0.2f%%\\\" % ((total_return - 1.0) * 100.0)),\\n (\\\"Sharpe Ratio\\\", \\\"%0.2f\\\" % sharpe_ratio),\\n (\\\"Max Drawdown\\\", \\\"%0.2f%%\\\" % (max_dd * 100.0)),\\n (\\\"Drawdown Duration\\\", \\\"%s\\\" % dd_duration)]\\n\\n self.equity_curve.to_csv('equity.csv')\\n self.positions.to_csv('positions.csv')\\n self.prices.to_csv('prices.csv')\\n\\n return stats\",\n \"def testViewCoverageData(self):\\n try:\\n entryD = self.__mU.doImport(self.__instanceSavePath, fmt=\\\"pickle\\\")\\n covRefDbList = []\\n covSampleList = []\\n entryCountD = {}\\n for entryId in entryD:\\n for _, eD in entryD[entryId][\\\"selected_polymer_entities\\\"].items():\\n\\n analD = eD[\\\"anal_instances\\\"] if \\\"anal_instances\\\" in eD else {}\\n\\n for _, aD in analD.items():\\n entryCountD[entryId] = True\\n covRefDb = aD[\\\"coverage_inst_refdb\\\"]\\n covSample = aD[\\\"coverage_inst_entity\\\"]\\n if covRefDb is not None:\\n covRefDb = 0.0 if covRefDb < 0.0 else covRefDb\\n covRefDb = 1.0 if covRefDb > 1.0 else covRefDb\\n covRefDbList.append(covRefDb)\\n if covSample is not None:\\n covSample = 0.0 if covSample < 0.0 else covSample\\n covSample = 1.0 if covSample > 1.0 else covSample\\n covSampleList.append(covSample)\\n #\\n logger.info(\\\"covRefDbList %d covSampleList %d\\\", len(covRefDbList), len(covSampleList))\\n #\\n cu = DisorderChartUtils()\\n cu.doHistogramChart(\\n covRefDbList,\\n plotPath=self.__plotCoverageRefDb,\\n yPlotScale=\\\"log\\\",\\n yPlotMax=100000,\\n yPlotMin=1000,\\n xPlotMin=0.0,\\n xPlotMax=1.001,\\n xPlotIncr=0.1,\\n # yPlotMax=100000,\\n xPlotLabel=\\\"Coverage Fraction\\\",\\n yPlotLabel=\\\"Protein Instances\\\",\\n plotTitle=\\\"Reference Sequence Coverage\\\",\\n )\\n self.__writeLegend(\\n self.__plotCoverageRefDb,\\n \\\"UniProt reference sequence coverage for all (%d) protein sequences (%d X-ray structures with resolution limit < 3.5 Angstoms) \\\"\\n % (len(covRefDbList), len(entryCountD)),\\n )\\n cu.doHistogramChart(\\n covSampleList,\\n plotPath=self.__plotCoverageSample1,\\n yPlotScale=\\\"log\\\",\\n xPlotMin=0.0,\\n xPlotMax=1.001,\\n xPlotIncr=0.1,\\n yPlotMax=100000,\\n yPlotMin=1000,\\n # yPlotMax=100000,\\n xPlotLabel=\\\"Coverage Fraction\\\",\\n yPlotLabel=\\\"Protein Instances\\\",\\n plotTitle=\\\"Sample Sequence Coverage\\\",\\n )\\n self.__writeLegend(\\n self.__plotCoverageSample1,\\n \\\"Sample sequence coverage for all (%d) protein sequences (%d X-ray structures with resolution limit < 3.5 Angstoms) \\\" % (len(covSampleList), len(entryCountD)),\\n )\\n #\\n cu.doHistogramChart(\\n covSampleList,\\n plotPath=self.__plotCoverageSample2,\\n yPlotScale=\\\"log\\\",\\n yPlotMax=100000,\\n yPlotMin=1000,\\n xPlotMin=0.8,\\n xPlotMax=1.001,\\n xPlotIncr=0.1,\\n # yPlotMax=100000,\\n xPlotLabel=\\\"Coverage Fraction\\\",\\n yPlotLabel=\\\"Protein Instances\\\",\\n plotTitle=\\\"Sample Sequence Coverage\\\",\\n )\\n self.__writeLegend(\\n self.__plotCoverageSample1,\\n \\\"Sample sequence coverage for all (%d) protein sequences (%d X-ray structures with resolution limit < 3.5 Angstoms) \\\" % (len(covSampleList), len(entryCountD)),\\n )\\n\\n except Exception as e:\\n logger.exception(\\\"Failing with %s\\\", str(e))\\n self.fail()\",\n \"def test_coverage(self) -> None:\\n coverage = self.portfolio_coverage_tvp.get_portfolio_coverage(self.data, PortfolioAggregationMethod.WATS)\\n self.assertAlmostEqual(coverage, 32.0663, places=4,\\n msg=\\\"The portfolio coverage was not correct\\\")\",\n \"def to_html(self) -> str:\\n source_name = escape(self.source_name)\\n (covered, lines) = self.coverage_stats()\\n lines_stats = \\\"{} / {} ({} lines of code)\\\".format(covered, lines, len(self.source_code))\\n (br_covered, br_count, calls_covered, calls_count) = self.branch_stats()\\n branch_stats = \\\"{} / {}\\\".format(br_covered, br_count)\\n call_stats = \\\"{} / {}\\\".format(calls_covered, calls_count)\\n (fn_covered, fn_count) = self.function_stats()\\n fn_stats = \\\"{} / {}\\\".format(fn_covered, fn_count)\\n\\n self.decode_cpp_function_names()\\n\\n result = [\\\"\\\"\\\"\\n \\n \\n \\n \\n Coverage report of file \\\"\\\"\\\" + source_name + \\\"\\\"\\\"\\n \\n \\n \\n \\n

&lArr; Back | Go to line #

\\n

\\\"\\\"\\\" + source_name + \\\"\\\"\\\"

\\n
\\n \\n \\n \\n \\n \\n \\n \\n
Summary
Lines\\\"\\\"\\\" + lines_stats + \\\"\\\"\\\"
Branches\\\"\\\"\\\" + branch_stats + \\\"\\\"\\\"
Calls\\\"\\\"\\\" + call_stats + \\\"\\\"\\\"
Functions\\\"\\\"\\\" + fn_stats + \\\"\\\"\\\"
\\n \\n \\n \\n \\\"\\\"\\\"]\\n result.extend(line.to_html() for line in self.source_code)\\n result.append(\\\"\\\"\\\"\\n \\n
BranchesCovLineSource
\\n
\\n

Functions

\\n
\\n \\n \\n \\\"\\\"\\\")\\n result.extend(func.to_html() for func in self.source_functions)\\n result.append(\\\"\\\"\\\"\\n \\n
FunctionCallsRet.Blk. Exec.
\\n
\\n \\n \\n \\n \\\"\\\"\\\")\\n return '\\\\n'.join(result)\",\n \"def get_model_column_description_coverage(self) -> None:\\n not_documented_columns = self.get_not_documented_columns(\\n schema_content=self.model_content,\\n model_name=self.model_name,\\n ).keys()\\n\\n number_not_documented_columns = len(not_documented_columns)\\n number_documented_columns = len(\\n self.get_documented_columns(\\n schema_content=self.model_content,\\n model_name=self.model_name,\\n )\\n )\\n number_columns = number_documented_columns + number_not_documented_columns\\n\\n # This means that they are not columns, and we want to skip the printing.\\n if number_columns == 0:\\n return\\n\\n percentage_not_documented_columns = self.calculate_coverage_percentage(\\n misses=number_not_documented_columns,\\n total=number_columns,\\n )\\n logger.debug(\\n f\\\"percentage_not_documented_columns for '{self.model_name}': {percentage_not_documented_columns}\\\"\\n )\\n\\n data = self.print_nicely_the_data(\\n data=list(not_documented_columns), total=percentage_not_documented_columns\\n )\\n\\n self.create_table(\\n title=\\\"Documentation Coverage\\\",\\n columns=[\\\"Undocumented Columns\\\", r\\\"% coverage\\\"],\\n data=data,\\n )\",\n \"def main():\\n coverage = calculate_code_coverage()\\n platform = os.uname()[0]\\n if coverage < CODE_COVERAGE_GOAL[platform]:\\n data = {\\n 'expected': CODE_COVERAGE_GOAL[platform],\\n 'observed': coverage,\\n }\\n print '\\\\033[91mFAIL: %(observed).2f%% does not meet goal of %(expected).2f%%\\\\033[0m' % data\\n sys.exit(1)\",\n \"def current_knc_coverage():\\n covered = 0\\n total = 0\\n for layer in layer_to_compute:\\n covered = covered + np.count_nonzero(knc_cov_dict[layer.name])\\n total = total + np.size(knc_cov_dict[layer.name])\\n return covered / float(total)\",\n \"def get_testcase_summary(output):\\n print(\\\"Inside Test Summary\\\")\\n re_tc_summary = re.compile(r\\\"^\\\\[(\\\\d+\\\\.\\\\d+)\\\\][^\\\\]+\\\\{\\\\{(__testcase_summary);(\\\\d+);(\\\\d+)\\\\}\\\\}\\\")\\n #re_tc_summary = re.compile(r\\\"^\\\\[(\\\\d+\\\\.\\\\d+)\\\\][^\\\\{]+\\\\{\\\\{(__testcase_summary);(\\\\d+);(\\\\d+)\\\\}\\\\}\\\")\\n print(\\\"re_tc_summary =\\\",re_tc_summary.pattern)\\n #print(dir(re_tc_summary))\\n \\n for line in output.splitlines():\\n print \\\"line=\\\",line\\n m = re_tc_summary.search(line)\\n print (\\\"m=\\\",m.groups())\\n if m:\\n _, _, passes, failures = m.groups()\\n return int(passes), int(failures)\\n return None\",\n \"def _percent(self, lines_total, lines_covered):\\n\\n if lines_total == 0:\\n return '0.0'\\n return str(float(float(lines_covered) / float(lines_total)))\",\n \"def report_coverage(fp=None, details=False):\\n if fp is None:\\n fp = sys.stdout\\n fp.write(get_coverage_report(details))\",\n \"def define_coverage(self, id=None, units=None, standard_name=None, coverage_dimensions=None):\",\n \"def pytest_report_header(config):\\n circle_node_total, circle_node_index = read_circleci_env_variables()\\n return \\\"CircleCI total nodes: {}, this node index: {}\\\".format(circle_node_total, circle_node_index)\",\n \"def coverage(session) -> None:\\n session.install(\\\".[test]\\\", \\\"pytest-cov\\\")\\n session.run(\\n \\\"pytest\\\", \\\"-n\\\", \\\"auto\\\", \\\"--cov=./\\\", \\\"--cov-report=xml\\\", *session.posargs\\n )\",\n \"def to_html(self) -> str:\\n coverage_class = 'zero' if self.called == 0 else 'all'\\n return '''\\n {}\\n {}{}%{}%\\n \\\\n'''.format(\\n self.name, coverage_class, self.linenum, self.pretty_name, self.called,\\n self.returned, self.blocks\\n )\",\n \"def query_coverage(query):\\n length = query_length(query)\\n coverage = (int(query['alignment length'])/length) * 100\\n return coverage\",\n \"def test_summary_report(self):\\n self.driver.get('http://psl-outbreak.herokuapp.com/report')\\n self.driver.find_element_by_id('summary_report_cases').click()\",\n \"def test_coverage_1(base_settings):\\n filename = base_settings[\\\"unittest_data_dir\\\"] / \\\"coverage-example-2.json\\\"\\n inst = coverage.Coverage.parse_file(\\n filename, content_type=\\\"application/json\\\", encoding=\\\"utf-8\\\"\\n )\\n assert \\\"Coverage\\\" == inst.resource_type\\n\\n impl_coverage_1(inst)\\n\\n # testing reverse by generating data from itself and create again.\\n data = inst.dict()\\n assert \\\"Coverage\\\" == data[\\\"resourceType\\\"]\\n\\n inst2 = coverage.Coverage(**data)\\n impl_coverage_1(inst2)\",\n \"def summary_string(self) -> str:\",\n \"def get_coverage_stats(\\n contig_depth_file, contig_fasta, contig_read_counts_file, contig_stats_out\\n):\\n print(\\\"getting coverage stats\\\")\\n # add other files if requested\\n # read counts\\n logger.info(\\\"Parsing read count file: {}\\\".format(contig_read_counts_file))\\n read_count_table = pandas.read_csv(\\n contig_read_counts_file, delim_whitespace=True, names=[\\\"ReadCount\\\", \\\"Contig\\\"]\\n ).set_index(\\\"Contig\\\")\\n\\n # convert base by base depth data into coverage\\n logger.info(\\\"Parsing read depth file: {}\\\".format(contig_depth_file))\\n mapping_depth_table = get_samtool_depth_table(contig_depth_file, contig_fasta,)\\n contig_stats = mapping_depth_table.join(read_count_table, how=\\\"left\\\").fillna(0)\\n\\n for col in [\\\"Length\\\", \\\"ReadCount\\\", \\\"MaxCov\\\", \\\"MinCov\\\", \\\"CumuLength\\\"]:\\n if col in contig_stats.columns:\\n contig_stats[col] = contig_stats[col].astype(int)\\n\\n logger.info(\\\"Writing coverage table to: {}\\\".format(contig_stats_out))\\n contig_stats.to_csv(contig_stats_out, sep=\\\"\\\\t\\\", float_format=\\\"%0.2f\\\")\",\n \"def analyze_coverage(results, outcomes, allow_list, full_coverage):\\n available = check_test_cases.collect_available_test_cases()\\n for key in available:\\n hits = outcomes[key].hits() if key in outcomes else 0\\n if hits == 0 and key not in allow_list:\\n if full_coverage:\\n results.error('Test case not executed: {}', key)\\n else:\\n results.warning('Test case not executed: {}', key)\\n elif hits != 0 and key in allow_list:\\n # Test Case should be removed from the allow list.\\n if full_coverage:\\n results.error('Allow listed test case was executed: {}', key)\\n else:\\n results.warning('Allow listed test case was executed: {}', key)\",\n \"def generate_cobertura_xml(self, coverage_data):\\n\\n dom_impl = minidom.getDOMImplementation()\\n doctype = dom_impl.createDocumentType(\\\"coverage\\\", None,\\n \\\"http://cobertura.sourceforge.net/xml/coverage-03.dtd\\\")\\n document = dom_impl.createDocument(None, \\\"coverage\\\", doctype)\\n root = document.documentElement\\n summary = coverage_data['summary']\\n self._attrs(root, {\\n 'branch-rate': self._percent(summary['branches-total'],\\n summary['branches-covered']),\\n 'branches-covered': str(summary['branches-covered']),\\n 'branches-valid': str(summary['branches-total']),\\n 'complexity': '0',\\n 'line-rate': self._percent(summary['lines-total'],\\n summary['lines-covered']),\\n 'lines-valid': str(summary['lines-total']),\\n 'timestamp': coverage_data['timestamp'],\\n 'version': '1.9'\\n })\\n\\n sources = self._el(document, 'sources', {})\\n source = self._el(document, 'source', {})\\n source.appendChild(document.createTextNode(self.base_dir))\\n sources.appendChild(source)\\n\\n root.appendChild(sources)\\n\\n packages_el = self._el(document, 'packages', {})\\n\\n packages = coverage_data['packages']\\n for package_name, package_data in list(packages.items()):\\n package_el = self._el(document, 'package', {\\n 'line-rate': package_data['line-rate'],\\n 'branch-rate': package_data['branch-rate'],\\n 'name': package_name\\n })\\n classes_el = self._el(document, 'classes', {})\\n for class_name, class_data in list(package_data['classes'].items()):\\n class_el = self._el(document, 'class', {\\n 'branch-rate': self._percent(class_data['branches-total'],\\n class_data['branches-covered']),\\n 'complexity': '0',\\n 'filename': class_name,\\n 'line-rate': self._percent(class_data['lines-total'],\\n class_data['lines-covered']),\\n 'name': class_data['name']\\n })\\n\\n # Process methods\\n methods_el = self._el(document, 'methods', {})\\n for method_name, hits in list(class_data['methods'].items()):\\n method_el = self._el(document, 'method', {\\n 'name': method_name,\\n 'signature' : '',\\n 'hits': hits\\n })\\n methods_el.appendChild(method_el)\\n\\n # Process lines\\n lines_el = self._el(document, 'lines', {})\\n lines = list(class_data['lines'].keys())\\n lines.sort()\\n for line_number in lines:\\n line_el = self._el(document, 'line', {\\n 'branch': class_data['lines'][line_number]['branch'],\\n 'hits': str(class_data['lines'][line_number]['hits']),\\n 'number': str(line_number)\\n })\\n if class_data['lines'][line_number]['branch'] == 'true':\\n total = int(class_data['lines'][line_number]['branches-total'])\\n covered = int(class_data['lines'][line_number]['branches-covered'])\\n percentage = int((covered * 100.0) / total)\\n line_el.setAttribute('condition-coverage',\\n '{0}% ({1}/{2})'.format(\\n percentage, covered, total))\\n lines_el.appendChild(line_el)\\n\\n class_el.appendChild(methods_el)\\n class_el.appendChild(lines_el)\\n classes_el.appendChild(class_el)\\n package_el.appendChild(classes_el)\\n packages_el.appendChild(package_el)\\n root.appendChild(packages_el)\\n\\n return document.toprettyxml()\",\n \"def trivial_cover(regions_count, clinics_count, clinics):\\n clinics_built = [0]*range(0, clinics_count)\\n coverted = set()\\n\\n for clinic in clinics:\\n clinics_built[clinic.index] = 1\\n coverted |= set(clinic.regions)\\n if len(coverted) >= regions_count:\\n break # We are done, we cover all the regions\\n\\n # Calculamos el costo total de construcción\\n total_costs = sum([clinic.cost*clinics_built[clinic.index] for clinic in clinics])\\n \\n # Convertimos la solución en el formato esperado\\n output_data = str(total_cost) + '\\\\n'\\n output_data += ' '.join(map(str, clinics_built))\\n\\n return output_data\",\n \"def summary(self) -> str:\\n pass\",\n \"def calculate_coverage(path, alignment, number_of_fastas):\\n\\n path_to_alignment = path + 'Modeling/fasta_alns_and_identities/' + alignment\\n fastas_iterator = parse_multifasta_file(path_to_alignment, number_of_fastas)\\n fastas = []\\n targer_name, target_seq = next(fastas_iterator)\\n fastas.append(target_seq)\\n length_of_target = 0\\n for i in target_seq:\\n if i != '-':\\n length_of_target += 1\\n for i in range(1, number_of_fastas):\\n name, seq = next(fastas_iterator)\\n fastas.append(seq)\\n coverage = 0\\n for i in range(len(fastas[0])):\\n for j in range(1, len(fastas)):\\n if fastas[0][i] != '-' and fastas[j][i] != '-':\\n coverage += 1\\n break\\n coverage_percent = round(coverage / length_of_target * 100, 2)\\n return coverage_percent\",\n \"def summary(self):\\n raise NotImplementedError\",\n \"def test_coverage_4(base_settings):\\n filename = base_settings[\\\"unittest_data_dir\\\"] / \\\"coverage-example.json\\\"\\n inst = coverage.Coverage.parse_file(\\n filename, content_type=\\\"application/json\\\", encoding=\\\"utf-8\\\"\\n )\\n assert \\\"Coverage\\\" == inst.resource_type\\n\\n impl_coverage_4(inst)\\n\\n # testing reverse by generating data from itself and create again.\\n data = inst.dict()\\n assert \\\"Coverage\\\" == data[\\\"resourceType\\\"]\\n\\n inst2 = coverage.Coverage(**data)\\n impl_coverage_4(inst2)\",\n \"def compute_filecoverage(self):\\n result = dict()\\n for filename, fns in self.point_symbol_info.items():\\n file_points = []\\n for fn, points in fns.items():\\n file_points.extend(points.keys())\\n covered_points = self.covered_points & set(file_points)\\n result[filename] = int(math.ceil(\\n len(covered_points) * 100 / len(file_points)))\\n return result\",\n \"def recordCoverage( options, data ):\\n for c in data.chrNames:\\n data.mafWigDict[ c ]['columnsInBlocks'] = 0\\n for m in data.mafBlocksByChrom[ c ]:\\n if m.refEnd > m.refStart:\\n data.mafWigDict[ c ]['columnsInBlocks'] += ( m.refEnd + 1 ) - m.refStart\\n else:\\n data.mafWigDict[ c ]['columnsInBlocks'] += ( m.refStart + 1 ) - m.refEnd\",\n \"def test_coverage_2(base_settings):\\n filename = base_settings[\\\"unittest_data_dir\\\"] / \\\"coverage-example-selfpay.json\\\"\\n inst = coverage.Coverage.parse_file(\\n filename, content_type=\\\"application/json\\\", encoding=\\\"utf-8\\\"\\n )\\n assert \\\"Coverage\\\" == inst.resource_type\\n\\n impl_coverage_2(inst)\\n\\n # testing reverse by generating data from itself and create again.\\n data = inst.dict()\\n assert \\\"Coverage\\\" == data[\\\"resourceType\\\"]\\n\\n inst2 = coverage.Coverage(**data)\\n impl_coverage_2(inst2)\",\n \"def html_it():\\n import coverage\\n cov = coverage.coverage()\\n cov.start()\\n import here # pragma: nested\\n cov.stop() # pragma: nested\\n cov.html_report(directory=\\\"../html_other\\\")\",\n \"def _cmd_coverage(args):\\n pset = coverage.do_coverage(\\n args.interval,\\n args.bam_file,\\n args.count,\\n args.min_mapq,\\n args.processes,\\n args.fasta,\\n )\\n if not args.output:\\n # Create an informative but unique name for the coverage output file\\n bambase = core.fbase(args.bam_file)\\n bedbase = core.fbase(args.interval)\\n tgtbase = (\\n \\\"antitargetcoverage\\\" if \\\"anti\\\" in bedbase.lower() else \\\"targetcoverage\\\"\\n )\\n args.output = f\\\"{bambase}.{tgtbase}.cnn\\\"\\n if os.path.exists(args.output):\\n args.output = f\\\"{bambase}.{bedbase}.cnn\\\"\\n core.ensure_path(args.output)\\n tabio.write(pset, args.output)\",\n \"def summary(self, **kwargs):\\n raise ValueError(\\\"This function is not available in lazy results evaluation as it would \\\"\\n \\\"require all pairwise tests to be performed.\\\")\",\n \"def summarize(self):\\n\\n def increment_summary(summary_obj, case_obj):\\n \\\"\\\"\\\"increment ReportSummary count was ReportCase status\\n\\n Whatever the status of the case object, the corresponding property\\n will be incremented by 1 in the summary object\\n\\n Args:\\n summary_obj (ReportSummary): summary object to increment\\n case_obj (ReportCase): case object\\n \\\"\\\"\\\"\\n summary_obj.increment(case_obj.get_status())\\n\\n summary = ReportSummary()\\n [increment_summary(summary, case) for case in self.cases]\\n self.summary = summary\",\n \"def test_get_vulnerability_occurrences_summary(self):\\n pass\",\n \"def header_summary(\\n self, \\n router_context,\\n tests_by_status\\n ):\\n raise MissingOverload\",\n \"def test():\\n with lcd(BASEDIR):\\n local('virtenv/bin/coverage run runtests.py -v2')\\n local('virtenv/bin/coverage report -m')\",\n \"def print_coverage(lengths):\\n\\n primerAlen = 1330\\n primerBlen = 1353\\n primerClen = 1237\\n\\n print(\\\"SRR ID\\\\tPrimer A\\\\tPrimer B\\\\tPrimer C\\\")\\n for s in lengths:\\n sys.stdout.write(s)\\n sys.stdout.write(\\\"\\\\t{}\\\".format(1.0 * lengths[s][\\\"PrimerA\\\"]/primerAlen))\\n sys.stdout.write(\\\"\\\\t{}\\\".format(1.0 * lengths[s][\\\"PrimerB\\\"]/primerBlen))\\n sys.stdout.write(\\\"\\\\t{}\\\\n\\\".format(1.0 * lengths[s][\\\"PrimerC\\\"]/primerClen))\",\n \"def pytest_terminal_summary(self, terminalreporter, exitstatus):\\n # pylint: disable=unused-argument\\n terminalreporter.section(\\\"Test Information\\\")\\n for test, info in self._info.items():\\n for datum in info:\\n terminalreporter.write(\\\"{}: {}\\\\n\\\".format(test, datum))\",\n \"def test_coverage_3(base_settings):\\n filename = base_settings[\\\"unittest_data_dir\\\"] / \\\"coverage-example-ehic.json\\\"\\n inst = coverage.Coverage.parse_file(\\n filename, content_type=\\\"application/json\\\", encoding=\\\"utf-8\\\"\\n )\\n assert \\\"Coverage\\\" == inst.resource_type\\n\\n impl_coverage_3(inst)\\n\\n # testing reverse by generating data from itself and create again.\\n data = inst.dict()\\n assert \\\"Coverage\\\" == data[\\\"resourceType\\\"]\\n\\n inst2 = coverage.Coverage(**data)\\n impl_coverage_3(inst2)\",\n \"def summarise(self):\\n self.summary = az.summary(self.trace, var_names=[\\\"~chol\\\"], round_to=2)\\n print(self.summary)\\n return self.summary\",\n \"def summarize(self):\\n \\n print self._num_tests, \\\"tests ran with\\\", len(self._failed_tests), \\\"failures:\\\", sorted(list(self._failed_tests))\\n\\n self._num_tests = 0\\n self._failed_tests = set()\",\n \"def get_coverage(self):\\n if len(self) == 1:\\n return self.subacqs[0].get_coverage()\\n return np.array([self.subacqs[i].get_coverage() for i in range(len(self))])\",\n \"def _calc_coverage(self, cds_aln):\\n # Aligned region is part of a read that intersects with cds.\\n coverage = 0\\n for aln_reg in cds_aln.aligned_regions.values(): # aln_reg is of type CdsAlnSublocation\\n location = aln_reg.location # location is of type Location\\n coverage += location.length()\\n coverage = coverage / float(Location.from_location_str(cds_aln.cds.location).length())\\n return coverage\",\n \"def summary_stats(tile_summary):\\n return \\\"Original Dimensions: %dx%d\\\\n\\\" % (tile_summary.orig_w, tile_summary.orig_h) + \\\\\\n \\\"Original Tile Size: %dx%d\\\\n\\\" % (tile_summary.orig_tile_w, tile_summary.orig_tile_h) + \\\\\\n \\\"Scale Factor: 1/%dx\\\\n\\\" % tile_summary.scale_factor + \\\\\\n \\\"Scaled Dimensions: %dx%d\\\\n\\\" % (tile_summary.scaled_w, tile_summary.scaled_h) + \\\\\\n \\\"Scaled Tile Size: %dx%d\\\\n\\\" % (tile_summary.scaled_tile_w, tile_summary.scaled_tile_w) + \\\\\\n \\\"Total Mask: %3.2f%%, Total Tissue: %3.2f%%\\\\n\\\" % (\\n tile_summary.mask_percentage(), tile_summary.tissue_percentage) + \\\\\\n \\\"Tiles: %dx%d = %d\\\\n\\\" % (tile_summary.num_col_tiles, tile_summary.num_row_tiles, tile_summary.count) + \\\\\\n \\\" %5d (%5.2f%%) tiles >=%d%% tissue\\\\n\\\" % (\\n tile_summary.high, tile_summary.high / tile_summary.count * 100, TISSUE_HIGH_THRESH) + \\\\\\n \\\" %5d (%5.2f%%) tiles >=%d%% and <%d%% tissue\\\\n\\\" % (\\n tile_summary.medium, tile_summary.medium / tile_summary.count * 100, TISSUE_LOW_THRESH,\\n TISSUE_HIGH_THRESH) + \\\\\\n \\\" %5d (%5.2f%%) tiles >0%% and <%d%% tissue\\\\n\\\" % (\\n tile_summary.low, tile_summary.low / tile_summary.count * 100, TISSUE_LOW_THRESH) + \\\\\\n \\\" %5d (%5.2f%%) tiles =0%% tissue\\\" % (tile_summary.none, tile_summary.none / tile_summary.count * 100)\",\n \"def calculate_coverage(length_total, length_query, length_subject, option_cov=\\\"mean\\\"):\\n if option_cov == \\\"mean\\\":\\n cov = length_total / ((length_query + length_subject) / 2.0)\\n elif option_cov == \\\"subject\\\":\\n cov = length_total / length_subject\\n elif option_cov == \\\"query\\\":\\n cov = length_total / length_query\\n elif option_cov == \\\"shortest\\\":\\n cov = length_total / min(length_query, length_subject)\\n elif option_cov == \\\"longest\\\":\\n cov = length_total / max(length_query, length_subject)\\n\\n return cov\",\n \"def html_index(source_files: iter([SourceFile]), compile_root: str) -> str:\\n def single_summary(source_file: SourceFile) -> str:\\n (covered, lines) = source_file.coverage_stats()\\n (br_covered, br_count, _, _) = source_file.branch_stats()\\n (fn_covered, fn_count) = source_file.function_stats()\\n (coverage_percent, coverage_health) = to_percentage(covered, lines, 90, 75)\\n (branch_percent, branch_health) = to_percentage(br_covered, br_count, 75, 50)\\n (fn_percent, fn_health) = to_percentage(fn_covered, fn_count, 90, 75)\\n\\n\\n return '''\\n {}\\n {}%\\n {}%\\n {}%\\n '''.format(\\n to_html_filename(source_file.source_name),\\n escape(source_file.source_name),\\n coverage_health, covered, lines, coverage_percent,\\n branch_health, br_covered, br_count, branch_percent,\\n fn_health, fn_covered, fn_count, fn_percent\\n )\\n\\n title = escape(compile_root)\\n\\n html_res = [\\\"\\\"\\\"\\n \\n \\n \\n Coverage report for \\\"\\\"\\\" + title + \\\"\\\"\\\"\\n \\n \\n \\n \\n

Coverage report for \\\"\\\"\\\" + title + \\\"\\\"\\\"

\\n
\\n \\n \\n \\\"\\\"\\\"]\\n\\n html_res.extend(single_summary(s) for s in source_files)\\n html_res.append('
FileLinesBranchFunctions
')\\n\\n return '\\\\n'.join(html_res)\",\n \"def summary(self):\\n return ''\",\n \"def test_summaryRsrcsNoHeader(self):\\n self.summaryRsrcsNoHeader(\\\"alert\\\")\\n self.summaryRsrcsNoHeader(\\\"dashboard\\\")\",\n \"def summary_stats(self):\\n capital_gains = self.df['values'].iloc[-1].sum() - self.tc.starting_cash\\n total_return = capital_gains / self.tc.starting_cash\\n days_invested = (self.df.index[-1] - self.df.index[0]).days\\n annualized_returns = (total_return + 1) ** (365 / days_invested) - 1\\n annualized_volatility = self.df['returns'].std() * (252 ** 0.5)\\n sharpe = annualized_returns / annualized_volatility\\n num_trades = self.trades.shape[0]\\n stats = pd.Series(\\n data=[capital_gains, total_return, annualized_returns, annualized_volatility, sharpe, num_trades],\\n index=['Capital Gains', 'Total Return', 'Annualized Return', 'Annualized Volatility', 'Sharpe Ratio',\\n 'Number of Trades']\\n )\\n return stats\",\n \"def report_totals(output):\\n groups = (STATS_PATC.match(line) for line in output.splitlines())\\n tuples = (g.groups() for g in groups if g)\\n\\n results = [0,0,0,0,0]\\n for t in tuples:\\n results[0] += int(t[0]) # total\\n results[1] += int(t[1]) # failures\\n results[2] += int(t[2]) # errors\\n results[3] += int(t[3]) # skipped\\n results[4] += float(t[4]) # elapsed time\\n\\n print 'Tests run: %d, Failures: %d, Errors: %d, Skipped: %d, '\\\\\\n 'Time elapsed: %.2f' % tuple(results)\",\n \"def get_coverage_report_model()->CoverageReport:\\n covergroups = CoverageRegistry.inst().covergroup_types()\\n\\n db = MemFactory.create() \\n save_visitor = CoverageSaveVisitor(db)\\n now = datetime.now\\n save_visitor.save(TestData(\\n UCIS_TESTSTATUS_OK,\\n \\\"UCIS:simulator\\\",\\n ucis.ucis_Time()), covergroups)\\n\\n return CoverageReportBuilder.build(db)\",\n \"def hit_coverage(self):\\n s = self.hit_aln.replace(\\\"=\\\", \\\"\\\")\\n return len(s)\",\n \"def coverage(text: str) -> float:\\n words = set(text.split(' '))\\n return len([w for w in words if frequency(w) != 0]) / len(words) * 100\",\n \"def metadata_reporter(self):\\n logging.info('Creating summary report')\\n header = '{}\\\\n'.format(','.join(self.headers))\\n # Create a string to store all the results\\n data = str()\\n for sample in self.metadata:\\n # Add the value of the appropriate attribute to the results string\\n data += GenObject.returnattr(sample, 'name')\\n # SampleName\\n data += GenObject.returnattr(sample.run, 'SamplePlate')\\n # Genus\\n data += GenObject.returnattr(sample.general, 'closestrefseqgenus')\\n # SamplePurity\\n data += GenObject.returnattr(sample.confindr, 'num_contaminated_snvs')\\n # N50\\n n50 = GenObject.returnattr(sample.quast, 'N50',\\n number=True)\\n if n50 != '-,':\\n data += n50\\n else:\\n data += '0,'\\n # NumContigs\\n data += GenObject.returnattr(sample.quast, 'num_contigs',\\n number=True)\\n # TotalLength\\n data += GenObject.returnattr(sample.quast, 'Total_length',\\n number=True)\\n # MeanInsertSize\\n data += GenObject.returnattr(sample.quast, 'mean_insert',\\n number=True)\\n # InsertSizeSTD\\n data += GenObject.returnattr(sample.quast, 'std_insert',\\n number=True)\\n # AverageCoverageDepth\\n data += GenObject.returnattr(sample.qualimap, 'MeanCoveragedata',\\n number=True)\\n # CoverageDepthSTD\\n data += GenObject.returnattr(sample.qualimap, 'StdCoveragedata',\\n number=True)\\n # PercentGC\\n data += GenObject.returnattr(sample.quast, 'GC',\\n number=True)\\n # MASH_ReferenceGenome\\n data += GenObject.returnattr(sample.mash, 'closestrefseq')\\n # MASH_NumMatchingHashes\\n data += GenObject.returnattr(sample.mash, 'nummatches')\\n # 16S_result\\n data += GenObject.returnattr(sample.sixteens_full, 'sixteens_match')\\n # 16S PercentID\\n data += GenObject.returnattr(sample.sixteens_full, 'percent_id')\\n # CoreGenesPresent\\n data += GenObject.returnattr(sample.gdcs, 'coreresults')\\n # rMLST_Result\\n try:\\n # If the number of matches to the closest reference profile is 53, return the profile number\\n if sample.rmlst.matches == 53:\\n if type(sample.rmlst.sequencetype) is list:\\n rmlst_seq_type = ';'.join(sorted(sample.rmlst.sequencetype)).rstrip(';') + ','\\n else:\\n rmlst_seq_type = GenObject.returnattr(sample.rmlst, 'sequencetype')\\n rmlst_seq_type = rmlst_seq_type if rmlst_seq_type != 'ND,' else 'new,'\\n data += rmlst_seq_type\\n else:\\n # Otherwise the profile is set to new\\n data += 'new,'\\n except AttributeError:\\n data += 'new,'\\n # MLST_Result\\n try:\\n if sample.mlst.matches == 7:\\n if type(sample.mlst.sequencetype) is list:\\n mlst_seq_type = ';'.join(sorted(sample.mlst.sequencetype)).rstrip(';') + ','\\n else:\\n mlst_seq_type = GenObject.returnattr(sample.mlst, 'sequencetype')\\n mlst_seq_type = mlst_seq_type if mlst_seq_type != 'ND,' else 'new,'\\n data += mlst_seq_type\\n else:\\n data += 'new,'\\n except AttributeError:\\n data += 'new,'\\n # MLST_gene_X_alleles\\n try:\\n # Create a set of all the genes present in the results (gene name split from allele)\\n gene_set = {gene.split('_')[0] for gene in sample.mlst.combined_metadata_results}\\n for gene in sorted(gene_set):\\n allele_list = list()\\n # Determine all the alleles that are present for each gene\\n for allele in sample.mlst.combined_metadata_results:\\n if gene in allele:\\n allele_list.append(allele.replace(' ', '_'))\\n # If there is more than one allele in the sample, add both to the string separated by a ';'\\n if len(allele_list) > 1:\\n data += '{},'.format(';'.join(allele_list))\\n # Otherwise add the only allele\\n else:\\n data += allele_list[0] + ','\\n # If there are fewer than seven matching alleles, add a ND for each missing result\\n if len(gene_set) < 7:\\n data += (7 - len(gene_set)) * 'ND,'\\n except AttributeError:\\n # data += '-,-,-,-,-,-,-,'\\n data += 'ND,ND,ND,ND,ND,ND,ND,'\\n # E_coli_Serotype\\n try:\\n # If no O-type was found, set the output to be O-untypeable\\n if ';'.join(sample.ectyper.o_type) == '-':\\n otype = 'O-untypeable'\\n else:\\n otype = sample.ectyper.o_type\\n # Same as above for the H-type\\n if ';'.join(sample.ectyper.h_type) == '-':\\n htype = 'H-untypeable'\\n\\n else:\\n htype = sample.ectyper.h_type\\n serotype = '{otype}:{htype},'.format(otype=otype,\\n htype=htype)\\n # Add the serotype to the data string unless neither O-type not H-type were found; add ND instead\\n data += serotype if serotype != 'O-untypeable:H-untypeable,' else 'ND,'\\n except AttributeError:\\n data += 'ND,'\\n # SISTR_serovar_antigen\\n data += GenObject.returnattr(sample.sistr, 'serovar_antigen').rstrip(';')\\n # SISTR_serovar_cgMLST\\n data += GenObject.returnattr(sample.sistr, 'serovar_cgmlst')\\n # SISTR_serogroup\\n data += GenObject.returnattr(sample.sistr, 'serogroup')\\n # SISTR_h1\\n data += GenObject.returnattr(sample.sistr, 'h1').rstrip(';')\\n # SISTR_h2\\n data += GenObject.returnattr(sample.sistr, 'h2').rstrip(';')\\n # SISTR_serovar\\n data += GenObject.returnattr(sample.sistr, 'serovar')\\n # GeneSeekr_Profile\\n try:\\n if sample.genesippr.report_output:\\n data += ';'.join(sample.genesippr.report_output) + ','\\n else:\\n data += 'ND,'\\n except AttributeError:\\n data += 'ND,'\\n # Vtyper_Profile\\n data += GenObject.returnattr(sample.verotoxin, 'verotoxin_subtypes_set')\\n # AMR_Profile and resistant/sensitive status\\n if sample.resfinder_assembled.pipelineresults:\\n # Profile\\n for resistance, resistance_set in sorted(sample.resfinder_assembled.pipelineresults.items()):\\n data += '{res}({r_set});'.format(res=resistance.replace(',', ';'),\\n r_set=';'.join(sorted(list(resistance_set))))\\n data += ','\\n # Resistant/Sensitive\\n data += 'Resistant,'\\n else:\\n # Profile\\n data += 'ND,'\\n # Resistant/Sensitive\\n data += 'Sensitive,'\\n # Plasmid Result'\\n if sample.mobrecon.pipelineresults:\\n for plasmid, details in sorted(sample.mobrecon.pipelineresults.items()):\\n data += '{plasmid}({details});'.format(plasmid=plasmid,\\n details=details)\\n data += ','\\n else:\\n data += 'ND,'\\n # TotalPredictedGenes\\n data += GenObject.returnattr(sample.prodigal, 'predictedgenestotal',\\n number=True)\\n # PredictedGenesOver3000bp\\n data += GenObject.returnattr(sample.prodigal, 'predictedgenesover3000bp',\\n number=True)\\n # PredictedGenesOver1000bp\\n data += GenObject.returnattr(sample.prodigal, 'predictedgenesover1000bp',\\n number=True)\\n # PredictedGenesOver500bp\\n data += GenObject.returnattr(sample.prodigal, 'predictedgenesover500bp',\\n number=True)\\n # PredictedGenesUnder500bp\\n data += GenObject.returnattr(sample.prodigal, 'predictedgenesunder500bp',\\n number=True)\\n # AssemblyDate\\n data += datetime.now().strftime('%Y-%m-%d') + ','\\n # PipelineVersion\\n data += self.commit + ','\\n # Name of the database used in the analyses\\n data += os.path.split(self.reffilepath)[-1] + ','\\n # Database download date\\n data += self.download_date\\n # Append a new line to the end of the results for this sample\\n data += '\\\\n'\\n # Replace any NA values with ND\\n cleandata = data.replace('NA', 'ND')\\n with open(os.path.join(self.reportpath, 'combinedMetadata.csv'), 'w') as metadatareport:\\n metadatareport.write(header)\\n metadatareport.write(cleandata)\",\n \"def get_summary_stats(self, output_csv=None):\\n\\n contig_size_list = []\\n\\n self.summary_info[\\\"ncontigs\\\"] = len(self.contigs)\\n\\n for contig_id, sequence in self.contigs.items():\\n\\n logger.debug(\\\"Processing contig: {}\\\".format(contig_id))\\n\\n # Get contig sequence size\\n contig_len = len(sequence)\\n\\n # Add size for average contig size\\n contig_size_list.append(contig_len)\\n\\n # Add to total assembly length\\n self.summary_info[\\\"total_len\\\"] += contig_len\\n\\n # Add to average gc\\n self.summary_info[\\\"avg_gc\\\"].append(\\n sum(map(sequence.count, [\\\"G\\\", \\\"C\\\"])) / contig_len\\n )\\n\\n # Add to missing data\\n self.summary_info[\\\"missing_data\\\"] += sequence.count(\\\"N\\\")\\n\\n # Get average contig size\\n logger.debug(\\\"Getting average contig size\\\")\\n self.summary_info[\\\"avg_contig_size\\\"] = \\\\\\n sum(contig_size_list) / len(contig_size_list)\\n\\n # Get average gc content\\n logger.debug(\\\"Getting average GC content\\\")\\n self.summary_info[\\\"avg_gc\\\"] = \\\\\\n sum(self.summary_info[\\\"avg_gc\\\"]) / len(self.summary_info[\\\"avg_gc\\\"])\\n\\n # Get N50\\n logger.debug(\\\"Getting N50\\\")\\n cum_size = 0\\n for l in sorted(contig_size_list, reverse=True):\\n cum_size += l\\n if cum_size >= self.summary_info[\\\"total_len\\\"] / 2:\\n self.summary_info[\\\"n50\\\"] = l\\n break\\n\\n if output_csv:\\n logger.debug(\\\"Writing report to csv\\\")\\n # Write summary info to CSV\\n with open(output_csv, \\\"w\\\") as fh:\\n summary_line = \\\"{}, {}\\\\\\\\n\\\".format(\\n self.sample, \\\",\\\".join(\\n [str(x) for x in self.summary_info.values()]))\\n fh.write(summary_line)\",\n \"def test_concentration_profile(self):\\n # TODO: add an output for average particle concentration\",\n \"def cowreport():\\n central = pytz.timezone(\\\"America/Chicago\\\")\\n yesterday = (utc() - datetime.timedelta(days=1)).astimezone(central)\\n midnight = yesterday.replace(hour=0, minute=0)\\n midutc = midnight.astimezone(pytz.UTC)\\n begints = midutc.strftime(\\\"%Y-%m-%dT%H:%M\\\")\\n endts = (midutc + datetime.timedelta(hours=24)).strftime(\\\"%Y-%m-%dT%H:%M\\\")\\n api = (\\n f\\\"http://iem.local/api/1/cow.json?begints={begints}&endts={endts}&\\\"\\n \\\"phenomena=SV&phenomena=TO&lsrtype=SV&lsrtype=TO\\\"\\n )\\n data = requests.get(api, timeout=60).json()\\n st = data[\\\"stats\\\"]\\n if st[\\\"events_total\\\"] == 0:\\n text = \\\"No SVR+TOR Warnings Issued.\\\"\\n html = f\\\"

IEM Cow Report

{text}
\\\"\\n txt = f\\\"> IEM Cow Report\\\\n{text}\\\\n\\\"\\n return txt, html\\n\\n vp = st[\\\"events_verified\\\"] / float(st[\\\"events_total\\\"]) * 100.0\\n text = (\\n f\\\"SVR+TOR Warnings Issued: {st['events_total']:3.0f} \\\"\\n f\\\"Verified: {st['events_verified']:3.0f} [{vp:.1f}%]\\\\n\\\"\\n \\\"Polygon Size Versus County Size \\\"\\n f\\\"[{st['size_poly_vs_county[%]']:.1f}%]\\\\n\\\"\\n \\\"Average Perimeter Ratio \\\"\\n f\\\"[{st['shared_border[%]']:.1f}%]\\\\n\\\"\\n \\\"Percentage of Warned Area Verified (15km) \\\"\\n f\\\"[{st['area_verify[%]']:.1f}%]\\\\n\\\"\\n \\\"Average Storm Based Warning Size \\\"\\n f\\\"[{st['avg_size[sq km]']:.0f} sq km]\\\\n\\\"\\n f\\\"Probability of Detection(higher is better) [{st['POD[1]']:.2f}]\\\\n\\\"\\n f\\\"False Alarm Ratio (lower is better) [{st['FAR[1]']:.2f}]\\\\n\\\"\\n f\\\"Critical Success Index (higher is better) [{st['CSI[1]']:.2f}]\\\\n\\\"\\n )\\n\\n html = f\\\"

IEM Cow Report

{text}
\\\"\\n txt = f\\\"> IEM Cow Report\\\\n{text}\\\\n\\\"\\n\\n return txt, html\",\n \"def run(self):\\n cmd = 'coverage run setup.py test && coverage report -m'\\n check_call(cmd, shell=True)\",\n \"def final_report(self):\\n print('Final Count for', self.reason, self.successes, 'of', self.tests, 'tests passed')\",\n \"def parse_coverage(depth_filename, allow_missing=True):\\n\\n delims = [ 0, 10, 100, 1000, 2000, 10000]\\n nbins = len(delims)+1\\n\\n bin_labels = ['0'] + [f\\\"{delims[i-1]+1}x-{delims[i]}x\\\" for i in range(1,nbins-1)] + [f\\\"> {delims[-1]}x\\\"]\\n bin_labels = [ f\\\"Fraction with {l} coverage\\\" for l in bin_labels ]\\n\\n ret = {\\n 'bin_labels': bin_labels,\\n 'bin_fractions': [ None for b in range(nbins) ],\\n 'mean_coverage': None,\\n 'qc_meancov': 'FAIL',\\n 'qc_cov100': 'FAIL',\\n 'qc_cov1000': 'FAIL',\\n 'cov100': 0\\n }\\n\\n if file_is_missing(depth_filename, allow_missing):\\n return ret\\n\\n coverage = []\\n for line in open(depth_filename):\\n t = line.split('\\\\t')\\n assert len(t) == 3\\n coverage.append(int(float(t[2].strip(\\\"\\\\n\\\"))))\\n\\n coverage = np.array(coverage)\\n bin_assignments = np.searchsorted(np.array(delims), coverage, side='left')\\n bin_fractions = np.bincount(bin_assignments, minlength=nbins) / float(len(coverage))\\n assert bin_fractions.shape == (nbins,)\\n\\n\\n ret['cov100'] = np.mean(coverage >= 100)\\n ret['bin_fractions'] = [ xround(f,3) for f in bin_fractions ]\\n ret['mean_coverage'] = xround(np.mean(coverage), 1)\\n ret['qc_meancov'] = \\\"PASS\\\" if (np.mean(coverage) >= 2000) else \\\"FAIL\\\"\\n ret['qc_cov100'] = \\\"PASS\\\" if (np.mean(coverage >= 100) >= 0.9) else \\\"FAIL\\\"\\n ret['qc_cov1000'] = \\\"PASS\\\" if (np.mean(coverage >= 1000) >= 0.9) else \\\"WARN\\\"\\n\\n return ret\",\n \"def do(self, callback_name, *args):\\n value_dict = self._evaluator.evaluate(self.data_stream)\\n print(\\\"Train test coverage:{0}\\\".format(self.coverage))\\n for key, value in value_dict.items():\\n print(\\\"{0}:{1}\\\".format(key, value * self.coverage))\",\n \"def test_run_coverage(self):\\n cmd = GreenTestCommand(Distribution())\\n cmd.coverage = True\\n cmd.ensure_finalized()\\n cmd.run()\\n self.assertThat(_subprocess_call_args(), Contains(\\\"-r\\\"))\",\n \"def cuv(ctx, coverage_fname, exclude, branch):\\n if coverage_fname is None:\\n coverage_fname = find_coverage_data('.')\\n # coverage_fname still could be None\\n\\n cfg = Config()\\n ctx.obj = cfg\\n\\n cfg.nice_width = min(80, shutil.get_terminal_size()[0])\\n cfg.exclude = exclude\\n\\n cfg.branch = branch\\n if coverage_fname is not None:\\n cfg.data = coverage.Coverage(data_file=coverage_fname)\\n cfg.data.load()\\n else:\\n raise click.UsageError(\\n \\\"No coverage data. Do you have a .coverage file?\\\"\\n )\",\n \"def test_summary_success(self):\\n\\n summary_data_key = 'summary_data'\\n\\n response = self.send_request(view_name='upload_summary_view', params={'upload_id': 1})\\n context_data = response.context_data\\n self.assertTrue(summary_data_key in context_data)\\n\\n summary_data = context_data[summary_data_key]\\n self.assertEquals(3, len(summary_data))\\n\\n self.assertEqual(Decimal('100.0'), summary_data[0].pre_tax_amount)\",\n \"def _coverage(self, chr, limit, nbins):\\n\\n c = np.zeros(nbins, dtype=np.int)\\n chr_start, chr_stop = self.refs[chr][1:]\\n bin_size = float((limit[1] - limit[0]) / nbins)\\n\\n for i in range(chr_start, chr_stop):\\n read_start = self.lines[i][3]\\n read_len = len(self.lines[i][9])\\n\\n start_bin = int((read_start - limit[0]) / bin_size)\\n stop_bin = int((read_start + read_len - limit[0]) / bin_size)\\n\\n # print start_bin, stop_bin\\n c[start_bin:stop_bin + 1] += 1\\n \\n return c\",\n \"def getTotalCaseAndControlCounts(genotypesFilename):\\r\\n\\r\\n\\tcomphetSuffix = \\\"\\\"\\r\\n\\tif \\\"comphet\\\" in genotypesFilename:\\r\\n\\t\\tcomphetSuffix = \\\" (#1)\\\"\\r\\n\\r\\n\\t# We read through the whole file. Might take a while, but easier than dealing with all edge cases.\\r\\n\\tmaxCoveredCasePercentage = 0\\r\\n\\tmaxCoveredControlPercentage = 0\\r\\n\\treader = csv.reader(open(genotypesFilename, \\\"r\\\"))\\r\\n\\theader = next(reader)\\r\\n\\r\\n\\tfor variant in reader:\\r\\n\\r\\n\\t\\tvariant = dict(zip(header, variant))\\r\\n\\t\\tcasePercentage = float(variant[\\\"Covered Case Percentage\\\" + comphetSuffix])/100.0\\r\\n\\t\\tif casePercentage > maxCoveredCasePercentage:\\r\\n\\t\\t\\tmaxCoveredCasePercentage = casePercentage\\r\\n\\t\\t\\tcoveredCases = int(variant[\\\"Covered Case\\\" + comphetSuffix])\\r\\n\\t\\t\\ttotalCases = int(round(coveredCases/casePercentage))\\r\\n\\r\\n\\t\\tcontrolPercentage = float(variant[\\\"Covered Ctrl Percentage\\\" + comphetSuffix])/100.0\\r\\n\\t\\tif controlPercentage > maxCoveredControlPercentage:\\r\\n\\t\\t\\tmaxCoveredControlPercentage = controlPercentage\\r\\n\\t\\t\\tcoveredControls = int(variant[\\\"Covered Ctrl\\\" + comphetSuffix])\\r\\n\\t\\t\\ttotalControls = int(round(coveredControls/controlPercentage))\\r\\n\\treturn totalCases, totalControls\",\n \"def generate_report():\\n if os.path.isdir(\\\"build/coverage\\\"):\\n shutil.rmtree(\\\"build/coverage\\\")\\n commands = '''\\nscons -uij32 --optimization=coverage controller/cplusplus_test\\nlcov --base-directory build/coverage --directory build/coverage -c -o build/coverage/controller_test.info\\ngenhtml -o build/coverage/controller/test_coverage -t test --num-spaces 4 build/coverage/controller_test.info\\n'''\\n for cmd in commands.splitlines():\\n cmd_args = cmd.split()\\n if (len(cmd_args) == 0):\\n continue\\n cmd = cmd_args[0]\\n cmd_path = find_executable(cmd)\\n if not cmd_path:\\n continue\\n pid = os.fork()\\n if pid == 0:\\n # Avoid stdout buffering by execing command into child process.\\n os.execv(cmd_path, cmd_args)\\n os.waitpid(pid, 0)\"\n]"},"negative_scores":{"kind":"list like","value":["0.71685416","0.68137187","0.6776906","0.6677319","0.6650848","0.6639017","0.6626381","0.65998113","0.6589576","0.65476686","0.6533306","0.6496852","0.6492893","0.64699256","0.64314204","0.64311635","0.64148486","0.63928735","0.6356816","0.63496864","0.63363206","0.6288406","0.6266407","0.62500393","0.62024015","0.61881036","0.61556363","0.6153905","0.61179703","0.6105436","0.61032206","0.6089348","0.6077586","0.6036977","0.6015071","0.60113275","0.60109013","0.59781253","0.5977678","0.5970312","0.5964489","0.5956227","0.59155035","0.5900403","0.5882995","0.5879575","0.587582","0.58629966","0.5821904","0.5814581","0.57977504","0.577273","0.575227","0.5749255","0.5747918","0.5735202","0.5731492","0.5707693","0.5705943","0.57026005","0.5685508","0.5683569","0.5677856","0.5675753","0.56738245","0.5668328","0.5667812","0.5660833","0.56594783","0.564732","0.56437427","0.5641916","0.56372786","0.5635899","0.56292456","0.5608846","0.55929166","0.55740994","0.55711234","0.5544603","0.55395234","0.55232507","0.55186677","0.55178505","0.55129856","0.55111057","0.5505072","0.55043805","0.54955375","0.5492022","0.54883343","0.5482901","0.54788476","0.5476473","0.54708874","0.54668874","0.5465019","0.5464465","0.54550874","0.54434067"],"string":"[\n \"0.71685416\",\n \"0.68137187\",\n \"0.6776906\",\n \"0.6677319\",\n \"0.6650848\",\n \"0.6639017\",\n \"0.6626381\",\n \"0.65998113\",\n \"0.6589576\",\n \"0.65476686\",\n \"0.6533306\",\n \"0.6496852\",\n \"0.6492893\",\n \"0.64699256\",\n \"0.64314204\",\n \"0.64311635\",\n \"0.64148486\",\n \"0.63928735\",\n \"0.6356816\",\n \"0.63496864\",\n \"0.63363206\",\n \"0.6288406\",\n \"0.6266407\",\n \"0.62500393\",\n \"0.62024015\",\n \"0.61881036\",\n \"0.61556363\",\n \"0.6153905\",\n \"0.61179703\",\n \"0.6105436\",\n \"0.61032206\",\n \"0.6089348\",\n \"0.6077586\",\n \"0.6036977\",\n \"0.6015071\",\n \"0.60113275\",\n \"0.60109013\",\n \"0.59781253\",\n \"0.5977678\",\n \"0.5970312\",\n \"0.5964489\",\n \"0.5956227\",\n \"0.59155035\",\n \"0.5900403\",\n \"0.5882995\",\n \"0.5879575\",\n \"0.587582\",\n \"0.58629966\",\n \"0.5821904\",\n \"0.5814581\",\n \"0.57977504\",\n \"0.577273\",\n \"0.575227\",\n \"0.5749255\",\n \"0.5747918\",\n \"0.5735202\",\n \"0.5731492\",\n \"0.5707693\",\n \"0.5705943\",\n \"0.57026005\",\n \"0.5685508\",\n \"0.5683569\",\n \"0.5677856\",\n \"0.5675753\",\n \"0.56738245\",\n \"0.5668328\",\n \"0.5667812\",\n \"0.5660833\",\n \"0.56594783\",\n \"0.564732\",\n \"0.56437427\",\n \"0.5641916\",\n \"0.56372786\",\n \"0.5635899\",\n \"0.56292456\",\n \"0.5608846\",\n \"0.55929166\",\n \"0.55740994\",\n \"0.55711234\",\n \"0.5544603\",\n \"0.55395234\",\n \"0.55232507\",\n \"0.55186677\",\n \"0.55178505\",\n \"0.55129856\",\n \"0.55111057\",\n \"0.5505072\",\n \"0.55043805\",\n \"0.54955375\",\n \"0.5492022\",\n \"0.54883343\",\n \"0.5482901\",\n \"0.54788476\",\n \"0.5476473\",\n \"0.54708874\",\n \"0.54668874\",\n \"0.5465019\",\n \"0.5464465\",\n \"0.54550874\",\n \"0.54434067\"\n]"},"document_score":{"kind":"string","value":"0.7372552"},"document_rank":{"kind":"string","value":"0"}}},{"rowIdx":94849,"cells":{"query":{"kind":"string","value":"Coverage handler for incoming messages"},"document":{"kind":"string","value":"def receive_message(self, context, message):\r\n pass"},"metadata":{"kind":"string","value":"{\n \"objective\": {\n \"self\": [],\n \"paired\": [],\n \"triplet\": [\n [\n \"query\",\n \"document\",\n \"negatives\"\n ]\n ]\n }\n}"},"negatives":{"kind":"list like","value":["def sample_handler(controller, msg, pkt):\n pass","def handle(self, message):","def handle_message(self, message):","def processMessage(self, *args, **kwargs):\r\n pass","def handle_message(self, message):\n\n try:\n controller_func = get_controller_func(message.code)\n\n if controller_func:\n response = get_controller_func(message.code)(message.payload)\n self.send_message(response)\n else:\n self.send_bad_request()\n except Exception as e:\n Logger.log_error(e)\n self.send_server_error()","def handle_message(self, msg):\n pass","def _report_message(message, level, request, extra_data, payload_data):\n if not _check_config():\n return\n\n filtered_message = events.on_message(message,\n request=request,\n extra_data=extra_data,\n payload_data=payload_data,\n level=level)\n\n if filtered_message is False:\n return\n\n data = _build_base_data(request, level=level)\n\n # message\n data['body'] = {\n 'message': {\n 'body': filtered_message\n }\n }\n\n if extra_data:\n extra_data = extra_data\n data['body']['message'].update(extra_data)\n\n request = _get_actual_request(request)\n _add_request_data(data, request)\n _add_person_data(data, request)\n _add_lambda_context_data(data)\n data['server'] = _build_server_data()\n\n if payload_data:\n data = dict_merge(data, payload_data, silence_errors=True)\n\n payload = _build_payload(data)\n send_payload(payload, payload.get('access_token'))\n\n return data['uuid']","def process_messages(self):\n pass","def process_message(self, msg, src):","def process(self, msg):\n print \"HANDLER: received a msg: %s\" % msg","def test_messages(self):\n pass","def handleMessage(msg):","def __data_handler__(self, msg):\n print(msg)","def test_sendimmessages(self):\n pass","def _incoming_handler(self, context, message, fake_reply):\r\n return self._map[message.method](context, fake_reply, *message.args, **message.kwargs)","def test_handle_request_get(self):\n # setup\n incoming_message = cast(\n HttpMessage,\n self.build_incoming_message(\n message_type=HttpMessage,\n performative=HttpMessage.Performative.REQUEST,\n to=self.skill_id,\n sender=self.sender,\n method=self.get_method,\n url=self.url,\n version=self.version,\n headers=self.headers,\n body=self.body,\n ),\n )\n\n # operation\n with patch.object(self.logger, \"log\") as mock_logger:\n self.http_handler.handle(incoming_message)\n\n # after\n self.assert_quantity_in_outbox(1)\n\n mock_logger.assert_any_call(\n logging.INFO,\n \"received http request with method={}, url={} and body={!r}\".format(\n incoming_message.method, incoming_message.url, incoming_message.body\n ),\n )\n\n # _handle_get\n message = self.get_message_from_outbox()\n has_attributes, error_str = self.message_has_attributes(\n actual_message=message,\n message_type=HttpMessage,\n performative=HttpMessage.Performative.RESPONSE,\n to=incoming_message.sender,\n sender=incoming_message.to,\n version=incoming_message.version,\n status_code=200,\n status_text=\"Success\",\n headers=incoming_message.headers,\n body=json.dumps({\"tom\": {\"type\": \"cat\", \"age\": 10}}).encode(\"utf-8\"),\n )\n assert has_attributes, error_str\n\n mock_logger.assert_any_call(\n logging.INFO,\n f\"responding with: {message}\",\n )","def test(coverage):\n print('success')\n pass","def process(self, message: Message, **kwargs: Any) -> None:","def run(self):\n alogger.info(\"Recieved message from %s, Message: (%d) %s\" % (self.client.getaddress(), self.action_type, self.message))\n \n #Try to call th function associated with this message type.\n #format = \"handle_\" (eg: handle_100)\n fn = globals().get(\"handle_\" + str(self.action_type))\n if fn and callable(fn):\n fn(self.message, self.address, self.client)\n else:\n alogger.info(\"Received unknown message from %d, type: %d\" % (self.client.getaddress(), self.action_type))","def message_handler(self, dest, source, message):\n pass","def handle_message(self, data, channel):\n pass","def process(self, payload, status_code=0):","def on_message(data):\n pass","def _handleIncomingDataAnalysis(self, msg: str):\n\t\tlogging.info(\"[CDA_CALLBACK]----->>>The _handleIncomingDataAnalysis method is being called\")\n\t\tad = DataUtil.jsonToActuatorData(self, msg)\n\t\tself.actuatorAdapterManager.sendActuatorCommand(ad)","def handle(self) -> None:\r\n\r\n if self.data.get(\"message-id\") != None:\r\n if self.data[\"status\"] == \"error\":\r\n print(self.data[\"error\"])\r\n return\r\n else:\r\n requestData = self.obs.pendingResponses.pop(self.data[\"message-id\"])\r\n request = requestData[\"request-type\"]\r\n #Requests as of version 4.8.0\r\n\r\n #General\r\n if request == \"GetVersion\":\r\n pass\r\n\r\n elif request == \"GetAuthRequired\":\r\n if self.data[\"authRequired\"]:\r\n secret_string: str = self.obs.password + self.data[\"salt\"]\r\n secret_hash: sha256 = sha256(secret_string.encode(\"utf-8\"))\r\n secret: bytes = b64encode(secret_hash.digest())\r\n\r\n response_string: str = secret.decode(\"utf-8\") + self.data[\"challenge\"]\r\n response_hash: sha256 = sha256(response_string.encode(\"utf-8\"))\r\n response: bytes = b64encode(response_hash.digest())\r\n\r\n self.obs.requests.append({\r\n \"type\": \"Authenticate\",\r\n \"auth\": response.decode(\"utf-8\")})\r\n\r\n else:\r\n self.obs.requests.append({\"type\": \"GetSceneList\"})\r\n\r\n elif request == \"Authenticate\":\r\n self.obs.requests.append({\"type\": \"GetSceneList\"})\r\n\r\n elif request == \"SetHeartbeat\":\r\n #To be removed in 5.0.0\r\n pass\r\n\r\n elif request == \"SetFilenameFormatting\":\r\n pass\r\n\r\n elif request == \"GetFilenameFormatting\":\r\n pass\r\n\r\n elif request == \"GetStats\":\r\n pass\r\n\r\n elif request == \"BroadcastCustomMessage\":\r\n pass\r\n\r\n elif request == \"GetVideoInfo\":\r\n pass\r\n\r\n elif request == \"OpenProjector\":\r\n pass\r\n\r\n elif request == \"TriggerHotkeyByName\":\r\n #Unreleased\r\n pass\r\n\r\n elif request == \"TriggerHotkeyBySequence\":\r\n #Unreleased\r\n pass\r\n\r\n #Media Control\r\n elif request == \"PlayPauseMedia\":\r\n #Unreleased\r\n pass\r\n\r\n elif request == \"RestartMedia\":\r\n #Unreleased\r\n pass\r\n\r\n elif request == \"StopMedia\":\r\n #Unreleased\r\n pass\r\n\r\n elif request == \"NextMedia\":\r\n #Unreleased\r\n pass\r\n\r\n elif request == \"PreviousMedia\":\r\n #Unreleased\r\n pass\r\n\r\n elif request == \"GetMediaDuration\":\r\n #Unreleased\r\n pass\r\n\r\n elif request == \"GetMediaTime\":\r\n #Unreleased\r\n pass\r\n\r\n elif request == \"SetMediaTime\":\r\n #Unreleased\r\n pass\r\n\r\n elif request == \"ScrubMedia\":\r\n #Unreleased\r\n pass\r\n\r\n elif request == \"GetMediaState\":\r\n #Unreleased\r\n pass\r\n\r\n #Sources\r\n\r\n elif request == \"GetMediaSourcesList\":\r\n #Unreleased\r\n pass\r\n\r\n elif request == \"GetSourcesList\":\r\n pass\r\n\r\n elif request == \"GetSourceTypesList\":\r\n pass\r\n\r\n elif request == \"GetVolume\":\r\n pass\r\n\r\n elif request == \"SetVolume\":\r\n pass\r\n\r\n elif request == \"GetMute\":\r\n pass\r\n\r\n elif request == \"SetMute\":\r\n pass\r\n\r\n elif request == \"ToggleMute\":\r\n pass\r\n\r\n elif request == \"GetAudioActive\":\r\n pass\r\n\r\n elif request == \"SetSourceName\":\r\n pass\r\n\r\n elif request == \"SetSyncOffset\":\r\n pass\r\n\r\n elif request == \"GetSyncOffset\":\r\n pass\r\n\r\n elif request == \"GetSourceSettings\":\r\n pass\r\n\r\n elif request == \"SetSourceSettings\":\r\n pass\r\n\r\n elif request == \"GetTextGDIPlusProperties\":\r\n pass\r\n\r\n elif request == \"SetTextGDIPlusProperties\":\r\n pass\r\n\r\n elif request == \"GetTextFreetype2Properties\":\r\n pass\r\n\r\n elif request == \"SetTextFreetype2Properties\":\r\n pass\r\n\r\n elif request == \"GetBrowserSourceProperties\":\r\n pass\r\n\r\n elif request == \"SetBrowserSourceProperties\":\r\n pass\r\n\r\n elif request == \"GetSpecialSources\":\r\n pass\r\n\r\n elif request == \"GetSourceFilters\":\r\n source = self.obs.getSource(requestData[\"sourceName\"])\r\n if source != None:\r\n for _filter in self.data[\"filters\"]:\r\n source.addFilter(_filter) #type: ignore\r\n\r\n elif request == \"GetSourceFilterInfo\":\r\n pass\r\n\r\n elif request == \"AddFilterToSource\":\r\n pass\r\n\r\n elif request == \"RemoveFilterFromSource\":\r\n pass\r\n\r\n elif request == \"ReorderSourceFilter\":\r\n pass\r\n\r\n elif request == \"MoveSourceFilter\":\r\n pass\r\n\r\n elif request == \"SetSourceFilterSettings\":\r\n pass\r\n\r\n elif request == \"SetSourceFilterVisibility\":\r\n pass\r\n \r\n elif request == \"GetAudioMonitorType\":\r\n pass\r\n\r\n elif request == \"SetAudioMonitorType\":\r\n pass\r\n\r\n elif request == \"TakeSourceScreenshot\":\r\n pass\r\n\r\n #Outpute\r\n elif request == \"ListOutputs\":\r\n pass\r\n\r\n elif request == \"GetOutputInfo\":\r\n pass\r\n\r\n elif request == \"StartOutput\":\r\n pass\r\n\r\n elif request == \"StopOutput\":\r\n pass\r\n\r\n #Profiles\r\n elif request == \"SetCurrentProfile\":\r\n pass\r\n\r\n elif request == \"GetCurrentProfile\":\r\n pass\r\n\r\n elif request == \"ListProfiles\":\r\n pass\r\n\r\n #Recording\r\n elif request == \"GetRecordingStatus\":\r\n #Unreleased\r\n pass\r\n\r\n elif request == \"StartStopRecording\":\r\n pass\r\n\r\n elif request == \"StartRecording\":\r\n pass\r\n\r\n elif request == \"StopRecording\":\r\n pass\r\n\r\n elif request == \"PauseRecording\":\r\n pass\r\n\r\n elif request == \"ResumeRecording\":\r\n pass\r\n\r\n elif request == \"SetRecordingFolder\":\r\n pass\r\n\r\n elif request == \"GetRecordingFolder\":\r\n pass\r\n\r\n #Replay Buffer\r\n elif request == \"GetReplayBufferStatus\":\r\n #Unreleased\r\n pass\r\n\r\n elif request == \"StartStopReplayBuffer\":\r\n pass\r\n\r\n elif request == \"StartReplayBuffer\":\r\n pass\r\n\r\n elif request == \"StopReplayBuffer\":\r\n pass\r\n\r\n elif request == \"SaveReplayBuffer\":\r\n pass\r\n\r\n #Scene Collections\r\n elif request == \"SetCurrentSceneCollection\":\r\n pass\r\n\r\n elif request == \"GetCurrentSceneCollection\":\r\n pass\r\n\r\n elif request == \"ListSceneCollections\":\r\n pass\r\n\r\n #Scene Items\r\n elif request == \"GetSceneItemList\":\r\n #Unreleased\r\n pass\r\n\r\n elif request == \"GetSceneItemProperties\":\r\n pass\r\n\r\n elif request == \"SetSceneItemProperties\":\r\n pass\r\n\r\n elif request == \"ResetSceneItem\":\r\n pass\r\n\r\n elif request == \"SetSceneItemRender\":\r\n pass\r\n\r\n elif request == \"SetSceneItemPosition\":\r\n pass\r\n\r\n elif request == \"SetSceneItemTransform\":\r\n pass\r\n\r\n elif request == \"SetSceneItemCrop\":\r\n pass\r\n\r\n elif request == \"DeleteSceneItem\":\r\n pass\r\n\r\n elif request == \"AddSceneItem\":\r\n #Unreleased\r\n pass\r\n\r\n elif request == \"DuplicateSceneItem\":\r\n pass\r\n\r\n #Scenes\r\n elif request == \"SetCurrentScene\":\r\n pass\r\n\r\n elif request == \"GetCurrentScene\":\r\n self.obs.setCurrentScene(self.data[\"name\"])\r\n\r\n elif request == \"GetSceneList\":\r\n for scene in self.data[\"scenes\"]:\r\n self.obs.addScene(scene)\r\n self.obs.setCurrentScene(self.data[\"current-scene\"])\r\n\r\n elif request == \"CreateScene\":\r\n pass\r\n\r\n elif request == \"ReorderSceneItems\":\r\n pass\r\n\r\n elif request == \"SetSceneTransitionOverride\":\r\n pass\r\n\r\n elif request == \"RemoveSceneTransitionOverride\":\r\n pass\r\n\r\n elif request == \"GetSceneTransitionOverride\":\r\n pass\r\n\r\n #Streaming\r\n elif request == \"GetStreamingStatus\":\r\n pass\r\n\r\n elif request == \"StartStopStreaming\":\r\n pass\r\n\r\n elif request == \"StartStreaming\":\r\n pass\r\n\r\n elif request == \"StopStreaming\":\r\n pass\r\n\r\n elif request == \"SetStreamSettings\":\r\n pass\r\n\r\n elif request == \"GetStreamSettings\":\r\n pass\r\n\r\n elif request == \"SaveStreamSettings\":\r\n pass\r\n\r\n elif request == \"SendCaptions\":\r\n pass\r\n\r\n #Studio Mode\r\n elif request == \"GetStudioModeStatus\":\r\n pass\r\n\r\n elif request == \"GetPreviewScene\":\r\n pass\r\n\r\n elif request == \"SetPreviewScene\":\r\n pass\r\n\r\n elif request == \"TransitionToProgram\":\r\n pass\r\n\r\n elif request == \"EnableStudioMode\":\r\n pass\r\n\r\n elif request == \"DisableStudioMode\":\r\n pass\r\n\r\n elif request == \"ToggleStudioMode\":\r\n pass\r\n\r\n #Transitions\r\n elif request == \"GetTransitionList\":\r\n pass\r\n\r\n elif request == \"GetCurrentTransition\":\r\n pass\r\n\r\n elif request == \"SetCurrentTransition\":\r\n pass\r\n\r\n elif request == \"SetTransitionDuration\":\r\n pass\r\n\r\n elif request == \"GetTransitionDuration\":\r\n pass\r\n\r\n elif request == \"GetTransitionPosition\":\r\n pass\r\n\r\n else:\r\n print(f\"Unhandled response of type {request} and data {self.data}.\")\r\n\r\n \r\n\r\n else:\r\n event: str = self.data[\"update-type\"]\r\n #Events as of 4.8.0\r\n\r\n #Scenes\r\n if event == \"SwitchScenes\":\r\n self.obs.setCurrentScene(self.data[\"scene-name\"])\r\n\r\n elif event == \"ScenesChanged\":\r\n #self.obs.purgeScenes()\r\n pass\r\n\r\n elif event == \"SceneCollectionChanged\":\r\n pass\r\n\r\n elif event == \"SceneCollectionListChanged\":\r\n pass\r\n\r\n #Transitions\r\n elif event == \"SwitchTransition\":\r\n pass\r\n\r\n elif event == \"TransitionListChanged\":\r\n pass\r\n\r\n elif event == \"TransitionDurationChanged\":\r\n pass\r\n\r\n elif event == \"TransitionBegin\":\r\n pass\r\n\r\n elif event == \"TransitionEnd\":\r\n pass\r\n\r\n elif event == \"TransitionVideoEnd\":\r\n pass\r\n\r\n #Profiles\r\n elif event == \"ProfileChanged\":\r\n pass\r\n\r\n elif event == \"ProfileListChanged\":\r\n pass\r\n\r\n #Streaming\r\n elif event == \"StreamStarting\":\r\n pass\r\n\r\n elif event == \"StreamStarted\":\r\n pass\r\n\r\n elif event == \"StreamStopping\":\r\n pass\r\n\r\n elif event == \"StreamStopped\":\r\n pass\r\n\r\n elif event == \"StreamStatus\":\r\n pass\r\n\r\n #Recording\r\n elif event == \"RecordingStarting\":\r\n pass\r\n\r\n elif event == \"RecordingStarted\":\r\n pass\r\n\r\n elif event == \"RecordingStopping\":\r\n pass\r\n\r\n elif event == \"RecordingStopped\":\r\n pass\r\n\r\n elif event == \"RecordingPaused\":\r\n pass\r\n\r\n elif event == \"RecordingResumed\":\r\n pass\r\n\r\n #Replay Buffer\r\n elif event == \"ReplayStarting\":\r\n pass\r\n\r\n elif event == \"ReplayStarted\":\r\n pass\r\n\r\n elif event == \"ReplayStopping\":\r\n pass\r\n\r\n elif event == \"ReplayStopped\":\r\n pass\r\n\r\n #Other\r\n elif event == \"Exiting\":\r\n pass\r\n\r\n #General\r\n elif event == \"Heartbeat\":\r\n pass\r\n\r\n elif event == \"BroadcastCustomMessage\":\r\n pass\r\n\r\n #Sources\r\n elif event == \"SourceCreated\":\r\n pass\r\n\r\n elif event == \"SourceDestroyed\":\r\n pass\r\n\r\n elif event == \"SourceVolumeChanged\":\r\n pass\r\n\r\n elif event == \"SourceMuteStateChanged\":\r\n pass\r\n\r\n elif event == \"SourceAudioDeactivated\":\r\n #Unreleased\r\n pass\r\n\r\n elif event == \"SourceAudioActivated\":\r\n #Unreleased\r\n pass\r\n\r\n elif event == \"SourceAudioSyncOffsetChanged\":\r\n pass\r\n\r\n elif event == \"SourceAudioMixersChanged\":\r\n pass\r\n\r\n elif event == \"SourceRenamed\":\r\n pass\r\n\r\n elif event == \"SourceFilterAdded\":\r\n pass\r\n\r\n elif event == \"SourceFilterRemoved\":\r\n pass\r\n\r\n elif event == \"SourceFilterVisibilityChanged\":\r\n source = self.obs.getSource(self.data[\"sourceName\"])\r\n if source != None:\r\n _filter = source.getFilter(self.data[\"filterName\"]) #type: ignore\r\n if _filter != None:\r\n _filter.setVisible(self.data[\"filterEnabled\"]) #type: ignore\r\n\r\n elif event == \"SourceFiltersReordered\":\r\n pass\r\n\r\n #Media\r\n elif event == \"MediaPlaying\":\r\n #Unreleased\r\n pass\r\n\r\n elif event == \"MediaPaused\":\r\n #Unreleased\r\n pass\r\n\r\n elif event == \"MediaRestarted\":\r\n #Unreleased\r\n pass\r\n\r\n elif event == \"MediaStopped\":\r\n #Unreleased\r\n pass\r\n\r\n elif event == \"MediaNext\":\r\n #Unreleased\r\n pass\r\n\r\n elif event == \"MediaPrevious\":\r\n #Unreleased\r\n pass\r\n\r\n elif event == \"MediaStarted\":\r\n #Unreleased\r\n pass\r\n\r\n elif event == \"MediaEnded\":\r\n #Unreleased\r\n pass\r\n\r\n #Scene Items\r\n elif event == \"SceneItemOrderChanged\":\r\n pass\r\n\r\n elif event == \"SceneItemAdded\":\r\n pass\r\n\r\n elif event == \"SceneItemRemoved\":\r\n pass\r\n\r\n elif event == \"SceneItemVisibilityChanged\":\r\n scene = self.obs.getScene(self.data[\"scene-name\"])\r\n if scene != None:\r\n source = scene.getSource(self.data[\"item-name\"]) #type: ignore\r\n if source != None:\r\n source.setVisible(self.data[\"item-visible\"]) #type: ignore\r\n \r\n\r\n elif event == \"SceneItemLockChanged\":\r\n pass\r\n\r\n elif event == \"SceneItemTransformChanged\":\r\n pass\r\n\r\n elif event == \"SceneItemSelected\":\r\n pass\r\n\r\n elif event == \"SceneItemDeselected\":\r\n pass\r\n\r\n #Studio Mode\r\n elif event == \"PreviewSceneChanged\":\r\n pass\r\n\r\n elif event == \"StudioModeSwitched\":\r\n pass\r\n\r\n #Unhandled Events\r\n else:\r\n print(\"Unhandled event with data: \" + str(self.data))","def handle_delivery(channel, method, header, body):\n print(body)","def ceilometer_callback(self, ch, method, properties, body):\n payload = json.loads(body)\n try:\n message_body = json.loads(payload['oslo.message'])\n samples = message_body['args']['data']\n #print \"--------------------------------------------------\"\n self.pool.spawn_n(self.zabbix_sender.consume_samples,samples)\n except Exception,e:\n log.warn(str(e))","def handle_message(**payload):\n handler_instance = message.MessageHandler(payload)\n handler_instance.handle()","def _handle_message(self, msg):\n self.event('message', msg)","def handle(self, message):\n print(\"You received a message:\")\n print(message)\n # Overwrite this function to do something with the message!","def handler(event, context):\n if event and \"Records\" in event:\n for record in event[\"Records\"]:\n time_str = time.ctime()\n if \"body\" in record:\n try:\n hasura_request(record[\"body\"])\n except Exception as e:\n print(f\"Start Time: {time_str}\", str(e))\n time_str = time.ctime()\n print(\"Done executing: \", time_str)\n raise_critical_error(\n message=f\"Could not process record: {str(e)}\",\n data=record,\n exception_type=Exception\n )","def MessageHandlerMethod(**kwargs):\n data: dict = kwargs['data']\n bus: AbstractPikaBus = kwargs['bus']\n payload: dict = kwargs['payload']\n print(payload)\n if payload['reply']:\n payload['reply'] = False\n bus.Reply(payload=payload)","def test_send(self):\n # Required to get useful test names\n super(TestCisPlyOutput_local, self).test_send()","def handle(self, message):\n for callback in self.callbacks:\n callback(message['data'])","def handle_message(self, message):\n print \"[WARNING] No message handling implemented!\"","def _on_message(self, raw_msg):\n strmsg = raw_msg.decode()\n msg = json.loads(strmsg)\n\n print(msg)\n\n if self._handlers.get(msg['msgid']):\n for handler in self._handlers[msg['msgid']]:\n handler.handle(msg)","def get_message():\n # Only run xray in the AWS Lambda environment\n if runs_on_aws_lambda():\n xray_subsegment = xray_recorder.current_subsegment()\n xray_subsegment.put_annotation(\"key\", \"value\")\n # Sample metadata\n # subsegment.put_metadata(\"operation\", \"metadata\", \"python object/json\")\n xray_recorder.end_subsegment()","def handleMessage(self, message):\n\n if 'started' in message.tags:\n self.handleMessage_started(message)\n\n elif 'deployment_computed' in message.tags:\n self.handleMessage_computed(message)\n\n elif 'deployment_end' in message.tags:\n self.handleMessage_end(message)","def agent_message(self, in_message):\n\n logging.debug(\"Received %s\" % in_message)\n\n if in_message.startswith(\"start_testing\"):\n self._start_testing()\n\n elif in_message.startswith(\"finish_testing\"):\n epoch = int(in_message.split(\" \")[1]) \n self._finish_testing(epoch)\n else:\n return \"I don't know how to respond to your message\"","def test_handle_request_post(self):\n # setup\n incoming_message = cast(\n HttpMessage,\n self.build_incoming_message(\n message_type=HttpMessage,\n performative=HttpMessage.Performative.REQUEST,\n to=self.skill_id,\n sender=self.sender,\n method=self.post_method,\n url=self.url,\n version=self.version,\n headers=self.headers,\n body=self.body,\n ),\n )\n\n # operation\n with patch.object(self.logger, \"log\") as mock_logger:\n self.http_handler.handle(incoming_message)\n\n # after\n self.assert_quantity_in_outbox(1)\n\n mock_logger.assert_any_call(\n logging.INFO,\n \"received http request with method={}, url={} and body={!r}\".format(\n incoming_message.method, incoming_message.url, incoming_message.body\n ),\n )\n\n # _handle_post\n message = self.get_message_from_outbox()\n has_attributes, error_str = self.message_has_attributes(\n actual_message=message,\n message_type=HttpMessage,\n performative=HttpMessage.Performative.RESPONSE,\n to=incoming_message.sender,\n sender=incoming_message.to,\n version=incoming_message.version,\n status_code=200,\n status_text=\"Success\",\n headers=incoming_message.headers,\n body=self.body,\n )\n assert has_attributes, error_str\n\n mock_logger.assert_any_call(\n logging.INFO,\n f\"responding with: {message}\",\n )","def test_handle_weather_message_calls_current(self):\n pass","def test_dispatch_inbound(self):\n msg_helper = MessageHelper()\n worker_helper = WorkerHelper()\n broker = self.setup_broker(worker_helper)\n self.assertEqual(broker.get_messages('vumi', 'fooconn.inbound'), [])\n msg = msg_helper.make_inbound('message')\n yield worker_helper.dispatch_inbound(msg, 'fooconn')\n self.assertEqual(broker.get_messages('vumi', 'fooconn.inbound'), [msg])","def consume(self, handler) -> None:\n pass # pragma: no cover","def event_handler(self, response):\n pass","def test_handler(self):\n mock_sqr: SequenceRun = SequenceRunFactory()\n\n workflow: dict = bcl_convert.handler({\n 'gds_volume_name': mock_sqr.gds_volume_name,\n 'gds_folder_path': mock_sqr.gds_folder_path,\n 'seq_run_id': mock_sqr.run_id,\n 'seq_name': mock_sqr.name,\n }, None)\n\n logger.info(\"-\" * 32)\n logger.info(\"Example bcl_convert.handler lambda output:\")\n logger.info(json.dumps(workflow))\n\n # assert bcl convert workflow launch success and save workflow run in db\n workflows = Workflow.objects.all()\n self.assertEqual(1, workflows.count())","def test_filter(self, logger: Logger, mocker: MockerFixture) -> None:\n task = OctaveTask()\n task.session_id = \"123\"\n handler = OutputHandler(task)\n logger.addHandler(handler)\n\n send_func = mocker.patch(\"matl_online.tasks.OutputHandler.send\")\n\n logger.warning(\"warning\")\n logger.error(\"error\")\n logger.debug(\"debug\")\n\n assert len(handler.contents) == 0\n send_func.assert_not_called()","def test_dispatch_raw(self):\n msg_helper = MessageHelper()\n worker_helper = WorkerHelper()\n broker = self.setup_broker(worker_helper)\n self.assertEqual(broker.get_messages('vumi', 'fooconn.foo'), [])\n msg = msg_helper.make_inbound('message')\n yield worker_helper.dispatch_raw('fooconn.foo', msg)\n self.assertEqual(broker.get_messages('vumi', 'fooconn.foo'), [msg])","def on_message(channel, method_frame, header_frame, body):\n channel.basic_ack(delivery_tag=method_frame.delivery_tag)\n publisher = Publisher()\n\n message = body.decode(\"utf8\")\n print(message)\n logging.info(message)\n\n response = format_response(code=\"ERR400\", status=\"error\", message=\"\", files_ids=[], action=\"\")\n\n try:\n data = json.loads(message)\n action = data[\"action\"]\n if action in actions:\n threading.Thread(target=actions[action], args=(data, )).start()\n else:\n response[\"action\"] = action\n response[\"message\"] = \"This action does not exist on server.\"\n publisher.send_message(json.dumps(response))\n\n except json.JSONDecodeError:\n response[\"code\"] = \"ERR500\"\n response[\"message\"] = error = \"Invalid JSON file\"\n print(error)\n publisher.send_message(json.dumps(response))","def handle_messages():\n print(\"Handling Messages\")\n payload = request.get_data()\n for sender, incoming_message, payload in messaging_events(payload):\n # The following statements check which options the user selected\n # Response handler contains \"templates\" for the various messages\n user_name = get_full_name(sender, PAT)\n if \"hei\" in incoming_message.lower() or \"hallo\" in incoming_message.lower() or \"yo\" in incoming_message.lower()\\\n or \"hi\" in incoming_message.lower():\n send_message(PAT, send_message(PAT, response_handler.greeting_message(sender, user_name)))\n if user_methods.has_user(user_name):\n send_message(PAT, response_handler.has_course(sender, user_methods.get_subject_from_user(user_name)))\n else:\n send_message(PAT, response_handler.no_course(sender))\n\n elif payload == \"change subject\" or \"change subject\" in incoming_message.lower():\n send_message(PAT, response_handler.text_message(sender, \"You can change course at any time simply by \"\n \"writing the course code on the form [TAG][CODE]\\n\"\n \"ex. TDT4120\"))\n send_message(PAT, response_handler.has_course(sender, user_methods.get_subject_from_user(user_name)))\n\n elif \"help\" in incoming_message.lower():\n\n send_message(PAT, response_handler.text_message(sender, \"Are you lost ...? \"))\n send_message(PAT, response_handler.text_message(sender, \"You can change course at any time simply by \"\n \"writing the course code on the form: [TAG][CODE]\\n\"\n \"ex. TDT4120\"))\n send_message(PAT, response_handler.text_message(sender, \"If you want to see your currently selected course \"\n \"and other information type 'Status'.\"))\n send_message(PAT, response_handler.text_message(sender, \"You can also type 'Hei' or 'Hallo' at any time \"\n \"to receive a greeting that shows your options.\"))\n send_message(PAT, response_handler.text_message(sender, \"Here is a list of commands you can use. This is \"\n \"recommended for the experienced user:\\n\"\n \"Change subject\\n\"\n \"Give feedback\\n\"\n \"How did today's lecture go?\\n\"\n \"Get schedule\\n\"\n \"Get info\\n\"\n \"All lectures\\n\"\n \"A specific lecture\\n\"\n \"You can type most of the commands in chat. Just \"\n \"give it a try!\"))\n send_message(PAT, response_handler.has_course(sender, user_methods.get_subject_from_user(user_name)))\n\n elif \"status\" in incoming_message.lower():\n subject = user_methods.get_subject_from_user(user_name)\n year = feedback_methods.get_year()\n week = feedback_methods.get_week()\n day = feedback_methods.get_day()\n user = get_full_name(sender, PAT)\n lecture_id_current = lecture_methods.get_lecture_from_date(year, week, day, subject)\n lecture = feedback_methods.get_lecture_object(lecture_id_current)\n\n if user_methods.has_user(user_name):\n sub = user_methods.get_subject_from_user(user_name) + \" : \" + \\\n subject_info.course_name(user_methods.get_subject_from_user(user_name))\n send_message(PAT, response_handler.user_info(sender, user_name, sub))\n if feedback_methods.user_has_feedback_for_lecture(user, lecture):\n send_message(PAT, response_handler.text_message(sender, \"You have given feedback for \"\n + subject + \"today. Well done! Be proud of \"\n \"yourself and remember to check in \"\n \"tomorrow.\"))\n send_message(PAT,\n response_handler.has_course(sender, user_methods.get_subject_from_user(user_name)))\n else:\n send_message(PAT, response_handler.text_message(sender,\n \"No feedback for the given lecture on this date. \"\n \"Please press 'Give Feedback' or write it in the \"\n \"chat to do so.\"))\n send_message(PAT,\n response_handler.has_course(sender, user_methods.get_subject_from_user(user_name)))\n else:\n send_message(PAT, response_handler.text_message(sender,\n \"We seem to not be able to detect you in the database. \"\n \"Please report this to the staff!\"))\n send_message(PAT, response_handler.has_course(sender, user_methods.get_subject_from_user(user_name)))\n\n # Checks if the subject has lectures in the database, adds them if not.\n\n elif payload == \"give feedback\" or \"give feedback\" in incoming_message.lower():\n send_message(PAT, response_handler.give_feedback_choice(sender))\n\n elif payload == \"lecture speed\" or \"lecture speed\" in incoming_message.lower():\n\n subject = user_methods.get_subject_from_user(user_name)\n\n if lecture_methods.check_lecture_in_db(subject):\n send_message(PAT, response_handler.lec_feed(sender))\n else:\n schedule = subject_info.get_schedule(subject)\n if schedule:\n database_entry = subject_info.gather_lecture_information(schedule)\n lecture_methods.add_lecture_information_db(database_entry)\n send_message(PAT, response_handler.text_message(sender, \"Lectures for the subject \" + subject +\n \" were not in the database. It is now added.\"))\n send_message(PAT, response_handler.lec_feed(sender))\n else:\n send_message(PAT, response_handler.text_message(sender, \"Lectures for the subject \" + subject +\n \" does not exist. Likely due to the subject having \"\n \"no lectures this semester.\"))\n send_message(PAT, response_handler.has_course(sender, subject))\n\n elif payload == \"evaluation_questions\" or \"lecture questions\" in incoming_message.lower():\n # User wants to give feedback for a lecture.\n subject = user_methods.get_subject_from_user(user_name)\n payload = \"evaluation_questions\" # if user typed 'lecture questions' the payload will be None\n\n if lecture_methods.check_lecture_in_db(subject):\n if feedback_methods.user_can_give_feedback_evaluation(user_name,\n user_methods.get_subject_from_user(user_name)):\n send_message(PAT, response_handler.lecture_feedback_questions(sender, payload))\n else:\n send_message(PAT, response_handler.text_message(sender,\n \"Feedback can not be given either because there \"\n \"is no lecture today, or because you have already \"\n \"given feedback for this lecture.\"))\n send_message(PAT, response_handler.has_course(sender, subject))\n else:\n schedule = subject_info.get_schedule(subject)\n if schedule:\n database_entry = subject_info.gather_lecture_information(schedule)\n lecture_methods.add_lecture_information_db(database_entry)\n send_message(PAT, response_handler.text_message(sender, \"Lectures for the subject \" + subject +\n \" were not in the database. It is now added\"))\n if feedback_methods.user_can_give_feedback_evaluation(user_name,\n user_methods.get_subject_from_user(\n user_name)):\n send_message(PAT, response_handler.lecture_feedback_questions(sender, payload))\n else:\n send_message(PAT, response_handler.text_message(sender,\n \"Feedback can not be given either because \"\n \"there is no lecture today, or because you\"\n \" have already given feedback for this lecture.\"\n \"\"))\n send_message(PAT, response_handler.has_course(sender, subject))\n else:\n send_message(PAT, response_handler.text_message(sender, \"Lectures for the subject \" + subject +\n \"does not exist. Likely due to the subject having \"\n \"no \"\n \"lectures this semester.\"))\n send_message(PAT, response_handler.has_course(sender, subject))\n\n elif \"too slow\" in incoming_message.lower():\n # Adds feedback if the subject has a lecture on the given day\n # and if the user has not already given feedback\n payload = '0'\n message_response = \"too slow\"\n if feedback_methods.add_entry(user_name, user_methods.get_subject_from_user(user_name), payload):\n send_message(PAT, response_handler.text_message(sender, \"You chose: \" + \"'\" +\n message_response + \"'\" + \"\\nFeedback Received!\"))\n send_message(PAT, response_handler.has_course(sender, user_methods.get_subject_from_user(user_name)))\n else:\n send_message(PAT,\n response_handler.text_message(sender, \"There is either no lecture active in the selected\"\n \" subject, or you have already given feedback\"\n \" to the active lecture.\\nFeedback denied!\"))\n send_message(PAT, response_handler.has_course(sender, user_methods.get_subject_from_user(user_name)))\n\n elif \"it's all right\" in incoming_message.lower() or \"its all right\" in incoming_message.lower():\n # Adds feedback if the subject has a lecture on the given day\n # and if the user has not already given feedback\n payload = '1'\n message_response = \"It's all right\"\n if feedback_methods.add_entry(user_name, user_methods.get_subject_from_user(user_name), payload):\n send_message(PAT, response_handler.text_message(sender, \"You chose: \" + \"'\" +\n message_response + \"'\" + \"\\nFeedback Received!\"))\n send_message(PAT, response_handler.has_course(sender, user_methods.get_subject_from_user(user_name)))\n else:\n send_message(PAT,\n response_handler.text_message(sender, \"There is either no lecture active in the selected\"\n \" subject, or you have already given feedback\"\n \" to the active lecture.\\nFeedback denied!\"))\n send_message(PAT, response_handler.has_course(sender, user_methods.get_subject_from_user(user_name)))\n\n elif \"too fast\" in incoming_message.lower():\n # Adds feedback if the subject has a lecture on the given day\n # and if the user has not already given feedback\n payload = '2'\n message_response = \"too fast\"\n if feedback_methods.add_entry(user_name, user_methods.get_subject_from_user(user_name), payload):\n send_message(PAT, response_handler.text_message(sender, \"You chose: \" + \"'\" +\n message_response + \"'\" + \"\\nFeedback Received!\"))\n send_message(PAT, response_handler.has_course(sender, user_methods.get_subject_from_user(user_name)))\n else:\n send_message(PAT,\n response_handler.text_message(sender, \"There is either no lecture active in the selected\"\n \" subject, or you have already given feedback\"\n \" to the active lecture.\\nFeedback denied!\"))\n send_message(PAT, response_handler.has_course(sender, user_methods.get_subject_from_user(user_name)))\n\n elif (\"today\" in incoming_message.lower() and \"lecture\" in incoming_message.lower()) or \\\n (\"todays\" in incoming_message.lower() and \"lecture\" in incoming_message.lower()) or \\\n (\"today's\" in incoming_message.lower() and \"lecture\" in incoming_message.lower()):\n # Gathers the correct information about the date.\n year = feedback_methods.get_year()\n week = feedback_methods.get_week()\n day = feedback_methods.get_day()\n subject = user_methods.get_subject_from_user(user_name)\n # Gathers the feedback from today's lecture:\n if lecture_methods.check_lecture_in_db(subject):\n feedback_list = feedback_methods.get_single_lecture_feed(year, week, day, subject)\n if feedback_list[0] is not None:\n send_message(PAT, response_handler.present_single_lecture_feedback(sender, feedback_list))\n send_message(PAT,\n response_handler.has_course(sender, user_methods.get_subject_from_user(user_name)))\n else:\n send_message(PAT, response_handler.text_message(sender,\n \"No feedback for the given lecture on this date. \"\n \"Please try again at a later date.\"))\n send_message(PAT,\n response_handler.has_course(sender, user_methods.get_subject_from_user(user_name)))\n else:\n send_message(PAT, response_handler.text_message(sender, \"No lecture present in the database. \"\n \"Please provide some feedback and try again.\"))\n send_message(PAT, response_handler.has_course(sender, user_methods.get_subject_from_user(user_name)))\n\n elif payload == \"get schedule\" or \"get schedule\" in incoming_message.lower():\n subject = user_methods.get_subject_from_user(user_name)\n schedule = subject_info.printable_schedule(subject_info.get_schedule(subject))\n if len(schedule) > 640:\n msg_list = message_split.message_split(schedule)\n for msg in msg_list:\n print(msg)\n send_message(PAT, response_handler.text_message(sender, msg))\n else:\n send_message(PAT, response_handler.text_message(sender, schedule))\n send_message(PAT, response_handler.has_course(sender, user_methods.get_subject_from_user(user_name)))\n\n elif payload == \"get info\" or \"get info\" in incoming_message.lower():\n subject = user_methods.get_subject_from_user(user_name)\n send_message(PAT, response_handler.text_message(sender,\n subject_info.printable_course_info(\n subject_info.get_course_json(subject))))\n send_message(PAT, response_handler.has_course(sender, user_methods.get_subject_from_user(user_name)))\n\n elif payload == \"get feedback\" or \"get feedback\" in incoming_message.lower():\n send_message(PAT, response_handler.get_feedback_specific_or_all(sender))\n\n elif payload == \"all_lectures\" or \"all lectures\" in incoming_message.lower():\n # The user wants to see feedback for all lectures in the selected subject\n subject = user_methods.get_subject_from_user(user_name)\n if not lecture_methods.check_lecture_in_db(subject):\n send_message(PAT, response_handler.text_message(sender, \"Course has no feedback.\"))\n send_message(PAT, response_handler.has_course(sender, user_methods.get_subject_from_user(user_name)))\n else:\n feedback, feedbackevaluation = feedback_methods.get_all_subject_feed(subject)\n if len(feedback) > 0:\n percent_list = bot_feedback.generate_percent_for_speed(feedback)\n send_message(PAT, response_handler.all_feedback_speed(sender, subject, percent_list))\n else:\n send_message(PAT,\n response_handler.text_message(sender, \"Course has no feedback for lecture speed.\"))\n if len(feedbackevaluation) > 0:\n percent_list_questions = bot_feedback.generate_percent_for_questions(feedbackevaluation)\n\n send_message(PAT, response_handler.all_feedback_questions(sender, subject, percent_list_questions))\n else:\n send_message(PAT,\n response_handler.text_message(sender, \"Course has no feedback for lecture questions.\"))\n send_message(PAT, response_handler.has_course(sender, user_methods.get_subject_from_user(user_name)))\n\n elif payload == \"a_specific_lecture\" or \"a specific lecture\" in incoming_message.lower():\n # Let the user choose what year to get feedback from.\n years = lecture_feedback_db_methods.get_year(user_methods.get_subject_from_user(user_name))\n if len(years) > 0:\n send_message(PAT, response_handler.get_feedback_year(sender, years))\n else:\n send_message(PAT, response_handler.text_message(sender, 'No feedback for the selected subject.'))\n send_message(PAT, response_handler.has_course(sender, user_methods.get_subject_from_user(user_name)))\n\n elif payload is not None:\n # Underneath are check that use .split() on the payload.\n if \"evaluation_questions\" in payload.split()[0]:\n payload_split = payload.split()\n if len(payload_split) == 1:\n # 1st question\n send_message(PAT, response_handler.lecture_feedback_questions(sender, payload))\n elif len(payload_split) == 2:\n # 2nd question\n send_message(PAT, response_handler.lecture_feedback_questions(sender, payload))\n elif len(payload_split) == 3:\n # 3rd question\n send_message(PAT, response_handler.lecture_feedback_questions(sender, payload))\n elif len(payload_split) == 4:\n # 4th question\n send_message(PAT, response_handler.lecture_feedback_questions(sender, payload))\n elif len(payload_split) == 5:\n # 5th question\n send_message(PAT, response_handler.lecture_feedback_questions(sender, payload))\n elif len(payload_split) == 6:\n # 6th question\n send_message(PAT, response_handler.lecture_feedback_questions(sender, payload))\n elif len(payload_split) == 7:\n # 7th question\n send_message(PAT, response_handler.lecture_feedback_questions(sender, payload))\n elif len(payload_split) == 8:\n # store feedback.\n subject = user_methods.get_subject_from_user(user_name)\n if feedback_methods.add_feedback_evaluation(user_name, subject, int(payload_split[1]),\n int(payload_split[2]), int(payload_split[3]),\n int(payload_split[4]), int(payload_split[5]),\n int(payload_split[6]), int(payload_split[7])):\n # Storing the feedback succeeded.\n send_message(PAT, response_handler.text_message(sender, 'Feedback received!'))\n send_message(PAT, response_handler.has_course(sender, subject))\n else:\n # Storing the feedback failed.\n send_message(PAT, response_handler.text_message(sender,\n \"There is either no lecture active in the \"\n \"selected subject, or you have already given \"\n \"feedback to the active lecture.\\n Feedback \"\n \"denied!\"))\n send_message(PAT, response_handler.has_course(sender, subject))\n pass\n\n elif \"get_lecture_feedback_year\" in payload.split()[0]:\n # Let the user choose what semester to get feedback from.\n semesters = []\n if lecture_feedback_db_methods.check_lecture_semester(user_methods.get_subject_from_user(user_name),\n 1, 17, int(payload.split()[1])):\n semesters.append('Spring')\n elif lecture_feedback_db_methods.check_lecture_semester(user_methods.get_subject_from_user(user_name),\n 32, 49, int(payload.split()[1])):\n semesters.append('Fall')\n if len(semesters) > 0:\n send_message(PAT, response_handler.get_feedback_semester(sender, payload.split()[1], semesters))\n else:\n # Take the user one step up to choose a different year.\n years = lecture_feedback_db_methods.get_year(user_methods.get_subject_from_user(user_name))\n send_message(PAT, response_handler.get_feedback_year(sender, years))\n\n elif \"get_lecture_feedback_semester\" in payload.split()[0]:\n # Let the user choose what weeks to get feedback from.\n\n week_list = lecture_feedback_db_methods.get_lecture_weeks(user_methods.get_subject_from_user(user_name),\n int(payload.split()[1]), payload.split()[2])\n if len(week_list) > 8:\n send_message(PAT, response_handler.get_feedback_month(sender, payload.split()[1], week_list))\n else:\n send_message(PAT, response_handler.get_feedback_week(sender, payload.split()[1], week_list))\n\n elif \"get_lecture_feedback_month\" in payload.split()[0]:\n # Let the user select week\n week_list = []\n payload_split = payload.split()\n for i in range(2, len(payload_split)):\n week_list.append(int(payload_split[i].rstrip(',')))\n\n send_message(PAT, response_handler.get_feedback_week(sender, payload_split[1], week_list))\n\n elif \"get_lecture_feedback_week\" in payload.split()[0]:\n # Lets the user select day\n lecture_days = lecture_feedback_db_methods.get_day_of_lecture_in_week(\n user_methods.get_subject_from_user(user_name), payload.split()[1], payload.split()[2])\n\n send_message(PAT, response_handler.get_feedback_day(sender, payload.split()[1], lecture_days,\n payload.split()[2]))\n\n elif \"get_lecture_feedback_day\" in payload.split()[0]:\n\n subject = user_methods.get_subject_from_user(user_name)\n # Gives the user feedback from the selected day.\n feedback_list = feedback_methods.get_single_lecture_feed(payload.split()[1],\n payload.split()[2],\n payload.split()[3],\n subject)\n feedback_questions_list = feedback_methods.get_single_lecture_feedback_questions(payload.split()[1],\n payload.split()[2],\n payload.split()[3],\n subject)\n\n if len(feedback_list[1]) > 0: # Checks if there is feedback in the variable.\n send_message(PAT, response_handler.present_single_lecture_feedback(sender, feedback_list))\n else:\n send_message(PAT, response_handler.text_message(sender,\n \"This lecture has no feedback for lecture speed.\"))\n if len(feedback_questions_list) > 0: # Checks if there is feedback in the variable.\n feedback_questions = bot_feedback.generate_percent_for_questions(feedback_questions_list)\n send_message(PAT,\n response_handler.present_single_lecture_feedback_questions(sender, feedback_questions))\n else:\n send_message(PAT, response_handler.text_message(sender,\n \"This lecture has no feedback for lecture \"\n \"questions.\"))\n\n send_message(PAT, response_handler.has_course(sender, user_methods.get_subject_from_user(user_name)))\n\n elif ime_data_fetch.subject_exists_boolean(incoming_message.upper().split()[0]):\n if user_methods.has_user(user_name):\n user_methods.add_subject(user_name, incoming_message.split()[0])\n else:\n user_methods.add_user(user_name, incoming_message.split()[0])\n send_message(PAT, response_handler.has_course(sender, user_methods.get_subject_from_user(user_name)))\n\n else:\n send_message(PAT, response_handler.text_message(sender,\n \"Type 'help' to see what you can do with L.I.M.B.O.\\n If \"\n \"you tried to enter a subject-code and got this message,\"\n \" you either misspelled it or the subject you are looking \"\n \"for is not a subject at NTNU.\"))\n if user_methods.has_user(user_name):\n send_message(PAT, response_handler.has_course(sender, user_methods.get_subject_from_user(user_name)))\n else:\n send_message(PAT, response_handler.no_course(sender))\n\n return \"ok\"","def message_received_handler(pdu, **kwargs):\n\n logging.warning('Message received handler (Override me)')","def _process_message(self, obj):\n pass","def callback(ch, method, properties, body):\n requestParams = json.loads(body.decode('utf-8'))\n # print(\"inside the callback\")\n arg1 = int(requestParams[0])\n arg2 = int(requestParams[1])\n result = whaleClassifier.test(arg1, arg2)\n # what this does it publish the RESULT to the exchange (as producers of content \n # cannot send stuff directly to queues, they send to exchanges and then exchanges \n # send to queues. Note Exchange='' is default exchange which then sends to the\n # queue that is listed on the ROUTING_KEY argument.)\n ch.basic_publish(exchange='', \n routing_key=results_queue, \n body=json.dumps(result),\n properties=pika.BasicProperties(\n delivery_mode = 2, # make message persistent\n ))\n # ch.basic_ack(delivery_tag=method.delivery_tag) #need this line so that we don't resend this same message again the next time\n # we start up this script. Which eventually clogs up memory","def test_message_user():","def handle(self, data):\n pass","def onMessage(self, payload, isBinary):","def testIgnoreMessage(self):\n\n self.logger.accept('c',self.logger.foo)\n self.logger.accept('c',self.logger.bar)\n self.logger.ignore('c')\n messager.send('c')\n # No methods should have been called.\n self.assertEqual(self.logger.log,[])","def __call__(self, test_case, response, **assertions):\n self.assert_x_sendfile_response(test_case, response)\n for key, value in iteritems(assertions):\n assert_func = getattr(self, 'assert_%s' % key)\n assert_func(test_case, response, value)","def callback():\n signature = request.headers['X-Line-Signature']\n body = request.get_data(as_text=True)\n logger.info('Request body: %s', body)\n try:\n handler.handle(body, signature)\n except InvalidSignatureError:\n logger.exception(\n 'Invalid signature. Please check your channel access token/channel secret.')\n abort(400)\n\n return 'OK'","def messageHandler(self, source, message, messageId):\n try:\n type, params, data = message.split(':',2)\n except:\n # Not a real message\n return\n \n try:\n getattr(self, \"thive_%s\" % type)(messageId, params.split(), data)\n except exceptions.AttributeError, c:\n raise c\n print \"[HIVE] No method bound for command '%s'\" % type","def handle_request(self):\n try:\n content_type = self.headers.get('content-type')\n\n if content_type != 'application/json':\n self.write_empty_response(400)\n return\n\n content_len = int(self.headers.get('content-length', 0))\n\n # If content was provided, then parse it\n if content_len > 0:\n message = json.loads(self.rfile.read(content_len))\n else:\n self.write_empty_response(400)\n return\n\n helper.log_info(f'Incoming POST from {self.client_address[0]}: {message}')\n\n aspect_type = message['aspect_type']\n object_id = message['object_id']\n object_type = message['object_type']\n # make owner_id a str to avoid issues with athlete_checkpoint dict\n owner_id = str(message['owner_id'])\n\n athlete_checkpoint = helper.get_check_point(\"webhook_updates\") or {}\n\n # We only care about activity updates. New activities are pulled in automatically as strava_api input restarts.\n if aspect_type == 'update' and object_type == 'activity':\n if owner_id not in athlete_checkpoint:\n athlete_checkpoint[owner_id] = []\n athlete_checkpoint[owner_id].append(object_id)\n helper.save_check_point(\"webhook_updates\", athlete_checkpoint)\n else:\n athlete_checkpoint[owner_id].append(object_id)\n helper.save_check_point(\"webhook_updates\", athlete_checkpoint)\n helper.log_debug(f'webhooks_updates checkpoint: {helper.get_check_point(\"webhook_updates\")}')\n\n # Send data to Splunk\n data = json.dumps(message)\n event = helper.new_event(source=helper.get_input_type(), index=helper.get_output_index(), sourcetype=helper.get_sourcetype(), data=data)\n ew.write_event(event)\n\n # Strava API expects a 200 response\n self.write_empty_response(200)\n\n # Restart strava_api inputs to pull in the data unless it's a delete, as the input doesn't do anything with that anyway.\n if aspect_type != 'delete':\n self.restart_input('strava_api', self.SESSION_KEY)\n helper.log_info(f'Reloading Strava API input to retrieve updated activity {object_id} for athlete {owner_id}.')\n\n except Exception as ex:\n helper.log_error(f'Something went wrong in handle request: {ex}')","def __call__ (self, event, payload):\n\n logging.info ('\\n\\nReceived Event: '+ str(event) + '\\nPayload: ' + str(payload))\n\n try:\n\n if event == 'AlertHandler:StartDebug':\n logging.getLogger().setLevel(logging.DEBUG)\n logging.info ('Logging level changed to DEBUG Mode')\n\n elif event == 'AlertHandler:EndDebug':\n logging.getLogger().setLevel(logging.INFO)\n logging.info ('Logging level changed to INFO Mode')\n \n elif event in self.args['AlertEvent'].keys():\n handler = retrieveHandler(self.args['AlertEvent'][event],'AlertHandler')\n handler(payload)\n\n except Exception, ex: \n \n logging.error('Exception Caught while handling the event: ' + str(event) + ' payload: ' + str(payload) ) \n logging.error(str(ex))\n\n return","def handler(event, context):\n pub_sub_message = base64.b64decode(event['data']).decode('utf-8')\n\n if pub_sub_message == 'executor':\n LOGGER.debug('POST: %s', EVENTS_EXECUTION_ENDPOINT)\n response = requests.post(EVENTS_EXECUTION_ENDPOINT, json={'type': 'POLICY'},\n headers=utils.get_auth_header())\n LOGGER.debug('Response: %s', response.text)\n\n elif pub_sub_message == 'validator':\n LOGGER.debug('POST: %s', EVENTS_VALIDATION_ENDPOINT)\n response = requests.post(EVENTS_VALIDATION_ENDPOINT,\n headers=utils.get_auth_header())\n LOGGER.debug('Response: %s', response.text)\n\n else:\n LOGGER.warn('Unexpected message from PubSub: %s', pub_sub_message)\n return","def test_dispatch_outbound(self):\n msg_helper = MessageHelper()\n worker_helper = WorkerHelper()\n broker = self.setup_broker(worker_helper)\n self.assertEqual(broker.get_messages('vumi', 'fooconn.outbound'), [])\n msg = msg_helper.make_outbound('message')\n yield worker_helper.dispatch_outbound(msg, 'fooconn')\n self.assertEqual(\n broker.get_messages('vumi', 'fooconn.outbound'), [msg])","def on_delivered(self, frame):\n pass","def test_create_message_with_succes(self, mock_client): \n\n event = {\n 'operation': 'createMessage', \n 'arguments': {\n 'template': 'my-sample-geofence-id',\n 'input': {\n 'service': 'APNS',\n 'action': 'OPEN_APP',\n 'title': 'Sample Title',\n 'body': 'This is a sample body'\n }\n }\n }\n\n response = {\n \"Arn\": f'arn:aws:mobiletargeting:eus-east-1:SOME_ACCOUNT_ID:templates/my-sample-geofence-id/PUSH',\n \"RequestID\": \"some-request-id\",\n \"Message\": 'some message' \n }\n\n mock_client().create_push_template.return_value = response\n response = manageMessages.handler(event, None)\n\n self.assertTrue(response)\n self.assertEqual(response['status'], 'MESSAGE_CREATED')","def process(self, message: Message, **kwargs: Any) -> None:\n pass","def handle_send_message(self, message_header, message):\n pass","def verify_as_target(self, message_handler):","def handle(self, rsm_ctx):\n pass","def handle_message(self, msg, identity=None):\n\n if (self._supervisor and\n not isinstance(msg, mplane.model.Envelope)):\n self._exporter.put_nowait([msg, identity])\n\n if isinstance(msg, mplane.model.Capability):\n self._add_capability(msg, identity)\n elif isinstance(msg, mplane.model.Withdrawal):\n self._withdraw_capability(msg, identity)\n elif isinstance(msg, mplane.model.Receipt):\n self._handle_receipt(msg, identity)\n elif isinstance(msg, mplane.model.Result):\n self._handle_result(msg, identity)\n elif isinstance(msg, mplane.model.Exception):\n self._handle_exception(msg, identity)\n elif isinstance(msg, mplane.model.Envelope):\n if msg.get_token() in self._receipts:\n self._handle_result(msg, identity)\n else:\n for imsg in msg.messages():\n self.handle_message(imsg, identity)\n else:\n raise ValueError(\"Internal error: unknown message \"+repr(msg))","def test_sample_status_custom(self):\n self.app = self.make_app(argv = ['report', 'sample_status', self.examples[\"project\"], self.examples[\"flowcell\"], '--debug', '--customer_reference', 'MyCustomerReference', '--uppnex_id', 'MyUppnexID', '--ordered_million_reads', '10', '--phix', '{1:0.1, 2:0.2}'],extensions=['scilifelab.pm.ext.ext_couchdb'])\n handler.register(DeliveryReportController)\n self._run_app()\n data = ast.literal_eval(self.app._output_data['debug'].getvalue())\n s_param_map = {x[\"scilifelab_name\"]:x for x in data[\"s_param\"]}\n self.assertEqual(s_param_map['P001_101_index3']['uppnex_project_id'], 'MyUppnexID')\n self.assertEqual(s_param_map['P001_101_index3']['customer_reference'], 'MyCustomerReference')\n self.assertEqual(s_param_map['P001_101_index3']['ordered_amount'], 10)","def testWholeRequest(self):\n body = self.protocol.encode_message(self.request_message)\n self.Reinitialize(input=body,\n content_type=self.content_type)\n self.factory.add_request_mapper(self.mapper())\n self.service_handler.handle('POST', '/my_service', 'method1')\n VerifyResponse(self,\n self.service_handler.response,\n '200',\n 'OK',\n self.protocol.encode_message(self.response_message),\n self.content_type)","def commands_coverage_server():\n try:\n coverage()\n coverage_server()\n except KeyboardInterrupt:\n logger.info(\"Command canceled\")","def test_send(self):\n # Required to get useful test names\n super(TestCisObjOutput_local, self).test_send()","def on_message(self, data):\n req = json.loads(data)\n self.serve(req)","def on_message(self, data):\n req = json.loads(data)\n self.serve(req)","async def testsay(self, ctx, *, message):\n await ctx.send(message)","def test_message_group():","def obj_received(self, obj):\n\n # TODO do something like handler registry\n\n if isinstance(obj, pb.Ping):\n self.handle_ping(obj)\n\n elif isinstance(obj, pb.Pong):\n self.handle_pong(obj)\n\n elif isinstance(obj, pb.ACS):\n if self.factory.config.failure != 'omission':\n res = self.factory.acs.handle(obj, self.remote_vk)\n self.process_acs_res(res, obj)\n\n elif isinstance(obj, pb.TxReq):\n self.factory.tc_runner.handle_tx_req(obj, self.remote_vk)\n\n elif isinstance(obj, pb.TxResp):\n self.factory.tc_runner.handle_tx_resp(obj, self.remote_vk)\n\n elif isinstance(obj, pb.ValidationReq):\n self.factory.tc_runner.handle_validation_req(obj, self.remote_vk)\n\n elif isinstance(obj, pb.ValidationResp):\n self.factory.tc_runner.handle_validation_resp(obj, self.remote_vk)\n\n elif isinstance(obj, pb.SigWithRound):\n self.factory.tc_runner.handle_sig(obj, self.remote_vk)\n\n elif isinstance(obj, pb.CpBlock):\n self.factory.tc_runner.handle_cp(obj, self.remote_vk)\n\n elif isinstance(obj, pb.Cons):\n self.factory.tc_runner.handle_cons(obj, self.remote_vk)\n\n elif isinstance(obj, pb.AskCons):\n self.factory.tc_runner.handle_ask_cons(obj, self.remote_vk)\n\n # NOTE messages below are for testing, bracha/mo14 is normally handled by acs\n\n elif isinstance(obj, pb.Bracha):\n if self.factory.config.failure != 'omission':\n self.factory.bracha.handle(obj, self.remote_vk)\n\n elif isinstance(obj, pb.Mo14):\n if self.factory.config.failure != 'omission':\n self.factory.mo14.handle(obj, self.remote_vk)\n\n elif isinstance(obj, pb.Dummy):\n logging.info(\"NODE: got dummy message from {}\".format(b64encode(self.remote_vk)))\n\n else:\n raise AssertionError(\"invalid message type {}\".format(obj))\n\n self.factory.recv_message_log[obj.__class__.__name__] += obj.ByteSize()","def setUp(self):\n self.hex_data = \"0251112233445566778899a1a2a3a4a5a6a7a8a9aaabacadae\"\n self.message_id = 0x51\n self.bytes_data = bytearray(unhexlify(self.hex_data))\n self.address = Address(\"112233\")\n self.target = Address(\"445566\")\n self.flags = MessageFlags(0x77)\n self.cmd1 = int(0x88)\n self.cmd2 = int(0x99)\n self.user_data = UserData(unhexlify(\"a1a2a3a4a5a6a7a8a9aaabacadae\"))\n\n self.msg, self.msg_bytes = hex_to_inbound_message(self.hex_data)\n set_log_levels(\n logger=\"info\",\n logger_pyinsteon=\"info\",\n logger_messages=\"info\",\n logger_topics=False,\n )","def test(self):\n self.info(\"LOGGING: Testing log messages\")\n self.debug(\"This is a debugging message\")\n self.info(\"This is an informational message\")\n self.warning(\"This is a warning message\")\n self.error(\"This is an error message\")\n self.critical(\"This is a critical message\")\n self.info(\"LOGGING: Testing log messages COMPLETE\")\n return","def receive_message(self, message):","def handle_inbound_message():\n data = json.loads(request.data)\n\n if data[0][\"type\"] == \"message-received\":\n if \"call me\" in data[0][\"message\"][\"text\"]:\n handle_inbound_sms_call_me(data[0][\"message\"][\"to\"][0], data[0][\"message\"][\"from\"])\n elif \"media\" in data[0][\"message\"]:\n handle_inbound_media_mms(data[0][\"message\"][\"to\"][0], data[0][\"message\"][\"from\"], data[0][\"message\"][\"media\"])\n else:\n handle_inbound_sms(data[0][\"message\"][\"to\"][0], data[0][\"message\"][\"from\"])\n else:\n print(data)\n return \"\"","def callback(ch, method, properties, body):\n print(f\" [x] Received {str(body)} kW.\")\n\n try:\n timestamp = properties.timestamp\n current_time = datetime.utcfromtimestamp(timestamp).replace(\n tzinfo=timezone.utc\n )\n except AttributeError:\n # If we don't get a timestamp from the broker, add a timestamp here.\n current_time = datetime.now().replace(tzinfo=timezone.utc)\n\n pv_photovoltaic = generate_pv_output(current_time)\n\n report_item = PVMeterReportItem(\n timestamp=current_time.isoformat(),\n pv_meter=int(body),\n pv_photovoltaic=pv_photovoltaic,\n )\n generate_report(report_item)\n\n ch.basic_ack(delivery_tag=method.delivery_tag)","def incoming(self, msg):\n hdr = msg.header\n\n # Signals:\n if hdr.message_type is MessageType.signal:\n key = (hdr.fields.get(HeaderFields.path, None),\n hdr.fields.get(HeaderFields.interface, None),\n hdr.fields.get(HeaderFields.member, None)\n )\n cb = self.signal_callbacks.get(key, None)\n if cb is not None:\n cb(msg.body)\n return\n\n # Method returns & errors\n reply_serial = hdr.fields.get(HeaderFields.reply_serial, -1)\n reply_handle = self.awaiting_reply.pop(reply_serial, None)\n if reply_handle is not None:\n if hdr.message_type is MessageType.method_return:\n reply_handle.set_result(msg.body)\n return\n elif hdr.message_type is MessageType.error:\n reply_handle.set_exception(DBusErrorResponse(msg))\n return\n\n if self.on_unhandled:\n self.on_unhandled(msg)","def test_send(self, logger: Logger, mocker: MockerFixture) -> None:\n identifier = \"123\"\n task = OctaveTask()\n task.session_id = identifier\n handler = OutputHandler(task)\n logger.addHandler(handler)\n\n emit = mocker.patch(\"matl_online.tasks.socket.emit\")\n\n logger.info(\"test1\")\n logger.info(\"[STDERR]error\")\n handler.send()\n\n assert emit.called == 1\n assert len(emit.call_args) == 2\n\n event, payload = emit.call_args[0]\n\n expected_data = {\n \"session\": identifier,\n \"data\": [\n {\"type\": \"stdout\", \"value\": \"test1\"},\n {\"type\": \"stderr\", \"value\": \"error\"},\n ],\n }\n\n assert payload == expected_data\n assert event == \"status\"\n assert emit.call_args[1].get(\"room\") == identifier","def _process_msg(cls, msg):\n raise NotImplementedError","def test_get_request_output(self):\n pass","def on_message(self, unused_channel, basic_deliver, properties, body):\n\n start = time.time()\n self.invocations += 1\n\n logger.info(\n u\"[{}] received message #{} from exchange {}: {}\".format(self.bot_id,\n basic_deliver.delivery_tag, self.exchange,\n body.decode('utf-8')))\n\n self.statsd.incr(self.statsd_prefix + \"message.receive\")\n\n # Ack the message before processing to tell rabbit we got it.\n # TODO before sending ack we should persist the message in a local queue to avoid the possibility of losing it\n self.acknowledge_message(basic_deliver.delivery_tag)\n\n try:\n\n try:\n json_body = json.loads(body)\n\n except ValueError as ve:\n logger.exception(\n \"[{}] Invalid JSON received from exchange: {} error: {} msg body: []\".format(self.bot_id,\n self.exchange,\n ve.message, body))\n raise\n\n else:\n response_messages = self.callback_func(json_body)\n\n if response_messages is None:\n response_messages = []\n\n logger.info(\"[{}] Sending {} response messages\".format(self.bot_id, len(response_messages)))\n\n for message in response_messages:\n self._channel.basic_publish(exchange=message.get('exchange', self.exchange),\n routing_key=message.get('queue', self.queue_name),\n body=message.get('body'))\n logger.info(\"[{}] published message {}\".format(self.bot_id, message))\n self.statsd.incr(self.statsd_prefix + \"message.publish\")\n\n except Exception as e:\n msg = \"[{}] Unexpected error - {}, message {}, from exchange {}. sending to error queue {}\"\n self.statsd.incr(self.statsd_prefix + \"message.error\")\n logger.exception(msg.format(self.bot_id, e, body, self.exchange, self.error_queue_name))\n self._channel.basic_publish(exchange='',\n routing_key=self.error_queue_name,\n body=body)\n\n\n exec_time_millis = int((time.time() - start) * 1000)\n self.total_execution_time += exec_time_millis\n\n logger.debug(\"Consumer {0} message handling time: {1}ms\".format(self.consumer_id, exec_time_millis))\n\n # if we have processed 100 messages, log out the average execution time at INFO then reset the total\n if self.invocations % 100 == 0:\n average_execution_time = self.total_execution_time / 100\n logger.info(\"Consumer {0} Avg message handling time (last 100): {1}ms\".format(self.consumer_id, average_execution_time))\n self.total_execution_time = 0\n\n self.statsd.timing(self.statsd_prefix + 'message.process.time', int((time.time() - start) * 1000))","def test_base_logging(self):\n\n n = nodes.BaseNode(log_output=True)\n n.channel = FakeChannel(self.loop)\n\n m = generate_msg(message_content='test')\n\n ret = self.loop.run_until_complete(n.handle(m))\n\n # Check return\n self.assertTrue(isinstance(ret, message.Message))\n self.assertEqual(ret.payload, 'test', \"Base node not working !\")\n self.assertEqual(n.processed, 1, \"Processed msg count broken\")\n\n n.channel.logger.log.assert_any_call(10, 'Payload: %r', 'test')\n n.channel.logger.log.assert_called_with(10, 'Meta: %r', {'question': 'unknown'})","def handle(self):\n self.app.logger.info('==== handle github event: %s', self.event)\n # self.app.logger.info('data send: %s', json.dumps(self.data, indent=2))\n if self.event == 'ping':\n return {'msg': 'Hi!'}\n else:\n task_match = []\n repo_config = self.get_repo_config()\n if repo_config:\n for task_config in repo_config['tasks']:\n event_hit = False\n if self.event == 'push':\n event_hit = self._is_task_push(task_config)\n elif self.event == 'pull_request':\n event_hit = self._is_task_pull_request(task_config)\n if event_hit:\n task_match.append(task_config)\n # work start execute here...\n for task in task_match:\n self.app.logger.info(\"event hit, start tasks under %s/%s...\", self.repo_meta['owner'], self.repo_meta['name'])\n self._jenkins_build(task)\n pass\n return \"OK\"","def test_filter_messages(self):\n pass","def setUp(self):\n h = self.MyTestHandler()\n h.request = Request.blank('/rpc/')\n h.response = Response()\n self.handler = h","def _handle_custom_msg(self, content, buffers):\n self._msg_callbacks(self, content, buffers)","def test_basic_asgi_call(self):\n app = otel_asgi.OpenTelemetryMiddleware(simple_asgi)\n self.seed_app(app)\n self.send_default_request()\n outputs = self.get_all_output()\n self.validate_outputs(outputs)","def test_basic_asgi_call(self):\n app = otel_asgi.OpenTelemetryMiddleware(simple_asgi)\n self.seed_app(app)\n self.send_default_request()\n outputs = self.get_all_output()\n self.validate_outputs(outputs)","def handle_msg(self, state_id, msg):\n pass","def handle(self):\n try:\n # Wait for data\n data = json.loads(self.request.recv(1024).decode('UTF-8').strip())\n\n # Process data\n self.process_data(data)\n\n except Exception as e:\n print(\"Exception wile receiving message: \", e)\n self.request.sendall(\n bytes(json.dumps({'return': 'error'}), 'UTF-8'))","async def _response_handler(self):","def _on_message(self, message):\n print(\"RECEIVED on \" + self.session_name + \":\")\n message_json = json.loads(message)\n print(json.dumps(message_json, sort_keys=True, indent=2, separators=(',', ':')))\n\n for singleMsg in message_json:\n self._process_message(singleMsg)"],"string":"[\n \"def sample_handler(controller, msg, pkt):\\n pass\",\n \"def handle(self, message):\",\n \"def handle_message(self, message):\",\n \"def processMessage(self, *args, **kwargs):\\r\\n pass\",\n \"def handle_message(self, message):\\n\\n try:\\n controller_func = get_controller_func(message.code)\\n\\n if controller_func:\\n response = get_controller_func(message.code)(message.payload)\\n self.send_message(response)\\n else:\\n self.send_bad_request()\\n except Exception as e:\\n Logger.log_error(e)\\n self.send_server_error()\",\n \"def handle_message(self, msg):\\n pass\",\n \"def _report_message(message, level, request, extra_data, payload_data):\\n if not _check_config():\\n return\\n\\n filtered_message = events.on_message(message,\\n request=request,\\n extra_data=extra_data,\\n payload_data=payload_data,\\n level=level)\\n\\n if filtered_message is False:\\n return\\n\\n data = _build_base_data(request, level=level)\\n\\n # message\\n data['body'] = {\\n 'message': {\\n 'body': filtered_message\\n }\\n }\\n\\n if extra_data:\\n extra_data = extra_data\\n data['body']['message'].update(extra_data)\\n\\n request = _get_actual_request(request)\\n _add_request_data(data, request)\\n _add_person_data(data, request)\\n _add_lambda_context_data(data)\\n data['server'] = _build_server_data()\\n\\n if payload_data:\\n data = dict_merge(data, payload_data, silence_errors=True)\\n\\n payload = _build_payload(data)\\n send_payload(payload, payload.get('access_token'))\\n\\n return data['uuid']\",\n \"def process_messages(self):\\n pass\",\n \"def process_message(self, msg, src):\",\n \"def process(self, msg):\\n print \\\"HANDLER: received a msg: %s\\\" % msg\",\n \"def test_messages(self):\\n pass\",\n \"def handleMessage(msg):\",\n \"def __data_handler__(self, msg):\\n print(msg)\",\n \"def test_sendimmessages(self):\\n pass\",\n \"def _incoming_handler(self, context, message, fake_reply):\\r\\n return self._map[message.method](context, fake_reply, *message.args, **message.kwargs)\",\n \"def test_handle_request_get(self):\\n # setup\\n incoming_message = cast(\\n HttpMessage,\\n self.build_incoming_message(\\n message_type=HttpMessage,\\n performative=HttpMessage.Performative.REQUEST,\\n to=self.skill_id,\\n sender=self.sender,\\n method=self.get_method,\\n url=self.url,\\n version=self.version,\\n headers=self.headers,\\n body=self.body,\\n ),\\n )\\n\\n # operation\\n with patch.object(self.logger, \\\"log\\\") as mock_logger:\\n self.http_handler.handle(incoming_message)\\n\\n # after\\n self.assert_quantity_in_outbox(1)\\n\\n mock_logger.assert_any_call(\\n logging.INFO,\\n \\\"received http request with method={}, url={} and body={!r}\\\".format(\\n incoming_message.method, incoming_message.url, incoming_message.body\\n ),\\n )\\n\\n # _handle_get\\n message = self.get_message_from_outbox()\\n has_attributes, error_str = self.message_has_attributes(\\n actual_message=message,\\n message_type=HttpMessage,\\n performative=HttpMessage.Performative.RESPONSE,\\n to=incoming_message.sender,\\n sender=incoming_message.to,\\n version=incoming_message.version,\\n status_code=200,\\n status_text=\\\"Success\\\",\\n headers=incoming_message.headers,\\n body=json.dumps({\\\"tom\\\": {\\\"type\\\": \\\"cat\\\", \\\"age\\\": 10}}).encode(\\\"utf-8\\\"),\\n )\\n assert has_attributes, error_str\\n\\n mock_logger.assert_any_call(\\n logging.INFO,\\n f\\\"responding with: {message}\\\",\\n )\",\n \"def test(coverage):\\n print('success')\\n pass\",\n \"def process(self, message: Message, **kwargs: Any) -> None:\",\n \"def run(self):\\n alogger.info(\\\"Recieved message from %s, Message: (%d) %s\\\" % (self.client.getaddress(), self.action_type, self.message))\\n \\n #Try to call th function associated with this message type.\\n #format = \\\"handle_\\\" (eg: handle_100)\\n fn = globals().get(\\\"handle_\\\" + str(self.action_type))\\n if fn and callable(fn):\\n fn(self.message, self.address, self.client)\\n else:\\n alogger.info(\\\"Received unknown message from %d, type: %d\\\" % (self.client.getaddress(), self.action_type))\",\n \"def message_handler(self, dest, source, message):\\n pass\",\n \"def handle_message(self, data, channel):\\n pass\",\n \"def process(self, payload, status_code=0):\",\n \"def on_message(data):\\n pass\",\n \"def _handleIncomingDataAnalysis(self, msg: str):\\n\\t\\tlogging.info(\\\"[CDA_CALLBACK]----->>>The _handleIncomingDataAnalysis method is being called\\\")\\n\\t\\tad = DataUtil.jsonToActuatorData(self, msg)\\n\\t\\tself.actuatorAdapterManager.sendActuatorCommand(ad)\",\n \"def handle(self) -> None:\\r\\n\\r\\n if self.data.get(\\\"message-id\\\") != None:\\r\\n if self.data[\\\"status\\\"] == \\\"error\\\":\\r\\n print(self.data[\\\"error\\\"])\\r\\n return\\r\\n else:\\r\\n requestData = self.obs.pendingResponses.pop(self.data[\\\"message-id\\\"])\\r\\n request = requestData[\\\"request-type\\\"]\\r\\n #Requests as of version 4.8.0\\r\\n\\r\\n #General\\r\\n if request == \\\"GetVersion\\\":\\r\\n pass\\r\\n\\r\\n elif request == \\\"GetAuthRequired\\\":\\r\\n if self.data[\\\"authRequired\\\"]:\\r\\n secret_string: str = self.obs.password + self.data[\\\"salt\\\"]\\r\\n secret_hash: sha256 = sha256(secret_string.encode(\\\"utf-8\\\"))\\r\\n secret: bytes = b64encode(secret_hash.digest())\\r\\n\\r\\n response_string: str = secret.decode(\\\"utf-8\\\") + self.data[\\\"challenge\\\"]\\r\\n response_hash: sha256 = sha256(response_string.encode(\\\"utf-8\\\"))\\r\\n response: bytes = b64encode(response_hash.digest())\\r\\n\\r\\n self.obs.requests.append({\\r\\n \\\"type\\\": \\\"Authenticate\\\",\\r\\n \\\"auth\\\": response.decode(\\\"utf-8\\\")})\\r\\n\\r\\n else:\\r\\n self.obs.requests.append({\\\"type\\\": \\\"GetSceneList\\\"})\\r\\n\\r\\n elif request == \\\"Authenticate\\\":\\r\\n self.obs.requests.append({\\\"type\\\": \\\"GetSceneList\\\"})\\r\\n\\r\\n elif request == \\\"SetHeartbeat\\\":\\r\\n #To be removed in 5.0.0\\r\\n pass\\r\\n\\r\\n elif request == \\\"SetFilenameFormatting\\\":\\r\\n pass\\r\\n\\r\\n elif request == \\\"GetFilenameFormatting\\\":\\r\\n pass\\r\\n\\r\\n elif request == \\\"GetStats\\\":\\r\\n pass\\r\\n\\r\\n elif request == \\\"BroadcastCustomMessage\\\":\\r\\n pass\\r\\n\\r\\n elif request == \\\"GetVideoInfo\\\":\\r\\n pass\\r\\n\\r\\n elif request == \\\"OpenProjector\\\":\\r\\n pass\\r\\n\\r\\n elif request == \\\"TriggerHotkeyByName\\\":\\r\\n #Unreleased\\r\\n pass\\r\\n\\r\\n elif request == \\\"TriggerHotkeyBySequence\\\":\\r\\n #Unreleased\\r\\n pass\\r\\n\\r\\n #Media Control\\r\\n elif request == \\\"PlayPauseMedia\\\":\\r\\n #Unreleased\\r\\n pass\\r\\n\\r\\n elif request == \\\"RestartMedia\\\":\\r\\n #Unreleased\\r\\n pass\\r\\n\\r\\n elif request == \\\"StopMedia\\\":\\r\\n #Unreleased\\r\\n pass\\r\\n\\r\\n elif request == \\\"NextMedia\\\":\\r\\n #Unreleased\\r\\n pass\\r\\n\\r\\n elif request == \\\"PreviousMedia\\\":\\r\\n #Unreleased\\r\\n pass\\r\\n\\r\\n elif request == \\\"GetMediaDuration\\\":\\r\\n #Unreleased\\r\\n pass\\r\\n\\r\\n elif request == \\\"GetMediaTime\\\":\\r\\n #Unreleased\\r\\n pass\\r\\n\\r\\n elif request == \\\"SetMediaTime\\\":\\r\\n #Unreleased\\r\\n pass\\r\\n\\r\\n elif request == \\\"ScrubMedia\\\":\\r\\n #Unreleased\\r\\n pass\\r\\n\\r\\n elif request == \\\"GetMediaState\\\":\\r\\n #Unreleased\\r\\n pass\\r\\n\\r\\n #Sources\\r\\n\\r\\n elif request == \\\"GetMediaSourcesList\\\":\\r\\n #Unreleased\\r\\n pass\\r\\n\\r\\n elif request == \\\"GetSourcesList\\\":\\r\\n pass\\r\\n\\r\\n elif request == \\\"GetSourceTypesList\\\":\\r\\n pass\\r\\n\\r\\n elif request == \\\"GetVolume\\\":\\r\\n pass\\r\\n\\r\\n elif request == \\\"SetVolume\\\":\\r\\n pass\\r\\n\\r\\n elif request == \\\"GetMute\\\":\\r\\n pass\\r\\n\\r\\n elif request == \\\"SetMute\\\":\\r\\n pass\\r\\n\\r\\n elif request == \\\"ToggleMute\\\":\\r\\n pass\\r\\n\\r\\n elif request == \\\"GetAudioActive\\\":\\r\\n pass\\r\\n\\r\\n elif request == \\\"SetSourceName\\\":\\r\\n pass\\r\\n\\r\\n elif request == \\\"SetSyncOffset\\\":\\r\\n pass\\r\\n\\r\\n elif request == \\\"GetSyncOffset\\\":\\r\\n pass\\r\\n\\r\\n elif request == \\\"GetSourceSettings\\\":\\r\\n pass\\r\\n\\r\\n elif request == \\\"SetSourceSettings\\\":\\r\\n pass\\r\\n\\r\\n elif request == \\\"GetTextGDIPlusProperties\\\":\\r\\n pass\\r\\n\\r\\n elif request == \\\"SetTextGDIPlusProperties\\\":\\r\\n pass\\r\\n\\r\\n elif request == \\\"GetTextFreetype2Properties\\\":\\r\\n pass\\r\\n\\r\\n elif request == \\\"SetTextFreetype2Properties\\\":\\r\\n pass\\r\\n\\r\\n elif request == \\\"GetBrowserSourceProperties\\\":\\r\\n pass\\r\\n\\r\\n elif request == \\\"SetBrowserSourceProperties\\\":\\r\\n pass\\r\\n\\r\\n elif request == \\\"GetSpecialSources\\\":\\r\\n pass\\r\\n\\r\\n elif request == \\\"GetSourceFilters\\\":\\r\\n source = self.obs.getSource(requestData[\\\"sourceName\\\"])\\r\\n if source != None:\\r\\n for _filter in self.data[\\\"filters\\\"]:\\r\\n source.addFilter(_filter) #type: ignore\\r\\n\\r\\n elif request == \\\"GetSourceFilterInfo\\\":\\r\\n pass\\r\\n\\r\\n elif request == \\\"AddFilterToSource\\\":\\r\\n pass\\r\\n\\r\\n elif request == \\\"RemoveFilterFromSource\\\":\\r\\n pass\\r\\n\\r\\n elif request == \\\"ReorderSourceFilter\\\":\\r\\n pass\\r\\n\\r\\n elif request == \\\"MoveSourceFilter\\\":\\r\\n pass\\r\\n\\r\\n elif request == \\\"SetSourceFilterSettings\\\":\\r\\n pass\\r\\n\\r\\n elif request == \\\"SetSourceFilterVisibility\\\":\\r\\n pass\\r\\n \\r\\n elif request == \\\"GetAudioMonitorType\\\":\\r\\n pass\\r\\n\\r\\n elif request == \\\"SetAudioMonitorType\\\":\\r\\n pass\\r\\n\\r\\n elif request == \\\"TakeSourceScreenshot\\\":\\r\\n pass\\r\\n\\r\\n #Outpute\\r\\n elif request == \\\"ListOutputs\\\":\\r\\n pass\\r\\n\\r\\n elif request == \\\"GetOutputInfo\\\":\\r\\n pass\\r\\n\\r\\n elif request == \\\"StartOutput\\\":\\r\\n pass\\r\\n\\r\\n elif request == \\\"StopOutput\\\":\\r\\n pass\\r\\n\\r\\n #Profiles\\r\\n elif request == \\\"SetCurrentProfile\\\":\\r\\n pass\\r\\n\\r\\n elif request == \\\"GetCurrentProfile\\\":\\r\\n pass\\r\\n\\r\\n elif request == \\\"ListProfiles\\\":\\r\\n pass\\r\\n\\r\\n #Recording\\r\\n elif request == \\\"GetRecordingStatus\\\":\\r\\n #Unreleased\\r\\n pass\\r\\n\\r\\n elif request == \\\"StartStopRecording\\\":\\r\\n pass\\r\\n\\r\\n elif request == \\\"StartRecording\\\":\\r\\n pass\\r\\n\\r\\n elif request == \\\"StopRecording\\\":\\r\\n pass\\r\\n\\r\\n elif request == \\\"PauseRecording\\\":\\r\\n pass\\r\\n\\r\\n elif request == \\\"ResumeRecording\\\":\\r\\n pass\\r\\n\\r\\n elif request == \\\"SetRecordingFolder\\\":\\r\\n pass\\r\\n\\r\\n elif request == \\\"GetRecordingFolder\\\":\\r\\n pass\\r\\n\\r\\n #Replay Buffer\\r\\n elif request == \\\"GetReplayBufferStatus\\\":\\r\\n #Unreleased\\r\\n pass\\r\\n\\r\\n elif request == \\\"StartStopReplayBuffer\\\":\\r\\n pass\\r\\n\\r\\n elif request == \\\"StartReplayBuffer\\\":\\r\\n pass\\r\\n\\r\\n elif request == \\\"StopReplayBuffer\\\":\\r\\n pass\\r\\n\\r\\n elif request == \\\"SaveReplayBuffer\\\":\\r\\n pass\\r\\n\\r\\n #Scene Collections\\r\\n elif request == \\\"SetCurrentSceneCollection\\\":\\r\\n pass\\r\\n\\r\\n elif request == \\\"GetCurrentSceneCollection\\\":\\r\\n pass\\r\\n\\r\\n elif request == \\\"ListSceneCollections\\\":\\r\\n pass\\r\\n\\r\\n #Scene Items\\r\\n elif request == \\\"GetSceneItemList\\\":\\r\\n #Unreleased\\r\\n pass\\r\\n\\r\\n elif request == \\\"GetSceneItemProperties\\\":\\r\\n pass\\r\\n\\r\\n elif request == \\\"SetSceneItemProperties\\\":\\r\\n pass\\r\\n\\r\\n elif request == \\\"ResetSceneItem\\\":\\r\\n pass\\r\\n\\r\\n elif request == \\\"SetSceneItemRender\\\":\\r\\n pass\\r\\n\\r\\n elif request == \\\"SetSceneItemPosition\\\":\\r\\n pass\\r\\n\\r\\n elif request == \\\"SetSceneItemTransform\\\":\\r\\n pass\\r\\n\\r\\n elif request == \\\"SetSceneItemCrop\\\":\\r\\n pass\\r\\n\\r\\n elif request == \\\"DeleteSceneItem\\\":\\r\\n pass\\r\\n\\r\\n elif request == \\\"AddSceneItem\\\":\\r\\n #Unreleased\\r\\n pass\\r\\n\\r\\n elif request == \\\"DuplicateSceneItem\\\":\\r\\n pass\\r\\n\\r\\n #Scenes\\r\\n elif request == \\\"SetCurrentScene\\\":\\r\\n pass\\r\\n\\r\\n elif request == \\\"GetCurrentScene\\\":\\r\\n self.obs.setCurrentScene(self.data[\\\"name\\\"])\\r\\n\\r\\n elif request == \\\"GetSceneList\\\":\\r\\n for scene in self.data[\\\"scenes\\\"]:\\r\\n self.obs.addScene(scene)\\r\\n self.obs.setCurrentScene(self.data[\\\"current-scene\\\"])\\r\\n\\r\\n elif request == \\\"CreateScene\\\":\\r\\n pass\\r\\n\\r\\n elif request == \\\"ReorderSceneItems\\\":\\r\\n pass\\r\\n\\r\\n elif request == \\\"SetSceneTransitionOverride\\\":\\r\\n pass\\r\\n\\r\\n elif request == \\\"RemoveSceneTransitionOverride\\\":\\r\\n pass\\r\\n\\r\\n elif request == \\\"GetSceneTransitionOverride\\\":\\r\\n pass\\r\\n\\r\\n #Streaming\\r\\n elif request == \\\"GetStreamingStatus\\\":\\r\\n pass\\r\\n\\r\\n elif request == \\\"StartStopStreaming\\\":\\r\\n pass\\r\\n\\r\\n elif request == \\\"StartStreaming\\\":\\r\\n pass\\r\\n\\r\\n elif request == \\\"StopStreaming\\\":\\r\\n pass\\r\\n\\r\\n elif request == \\\"SetStreamSettings\\\":\\r\\n pass\\r\\n\\r\\n elif request == \\\"GetStreamSettings\\\":\\r\\n pass\\r\\n\\r\\n elif request == \\\"SaveStreamSettings\\\":\\r\\n pass\\r\\n\\r\\n elif request == \\\"SendCaptions\\\":\\r\\n pass\\r\\n\\r\\n #Studio Mode\\r\\n elif request == \\\"GetStudioModeStatus\\\":\\r\\n pass\\r\\n\\r\\n elif request == \\\"GetPreviewScene\\\":\\r\\n pass\\r\\n\\r\\n elif request == \\\"SetPreviewScene\\\":\\r\\n pass\\r\\n\\r\\n elif request == \\\"TransitionToProgram\\\":\\r\\n pass\\r\\n\\r\\n elif request == \\\"EnableStudioMode\\\":\\r\\n pass\\r\\n\\r\\n elif request == \\\"DisableStudioMode\\\":\\r\\n pass\\r\\n\\r\\n elif request == \\\"ToggleStudioMode\\\":\\r\\n pass\\r\\n\\r\\n #Transitions\\r\\n elif request == \\\"GetTransitionList\\\":\\r\\n pass\\r\\n\\r\\n elif request == \\\"GetCurrentTransition\\\":\\r\\n pass\\r\\n\\r\\n elif request == \\\"SetCurrentTransition\\\":\\r\\n pass\\r\\n\\r\\n elif request == \\\"SetTransitionDuration\\\":\\r\\n pass\\r\\n\\r\\n elif request == \\\"GetTransitionDuration\\\":\\r\\n pass\\r\\n\\r\\n elif request == \\\"GetTransitionPosition\\\":\\r\\n pass\\r\\n\\r\\n else:\\r\\n print(f\\\"Unhandled response of type {request} and data {self.data}.\\\")\\r\\n\\r\\n \\r\\n\\r\\n else:\\r\\n event: str = self.data[\\\"update-type\\\"]\\r\\n #Events as of 4.8.0\\r\\n\\r\\n #Scenes\\r\\n if event == \\\"SwitchScenes\\\":\\r\\n self.obs.setCurrentScene(self.data[\\\"scene-name\\\"])\\r\\n\\r\\n elif event == \\\"ScenesChanged\\\":\\r\\n #self.obs.purgeScenes()\\r\\n pass\\r\\n\\r\\n elif event == \\\"SceneCollectionChanged\\\":\\r\\n pass\\r\\n\\r\\n elif event == \\\"SceneCollectionListChanged\\\":\\r\\n pass\\r\\n\\r\\n #Transitions\\r\\n elif event == \\\"SwitchTransition\\\":\\r\\n pass\\r\\n\\r\\n elif event == \\\"TransitionListChanged\\\":\\r\\n pass\\r\\n\\r\\n elif event == \\\"TransitionDurationChanged\\\":\\r\\n pass\\r\\n\\r\\n elif event == \\\"TransitionBegin\\\":\\r\\n pass\\r\\n\\r\\n elif event == \\\"TransitionEnd\\\":\\r\\n pass\\r\\n\\r\\n elif event == \\\"TransitionVideoEnd\\\":\\r\\n pass\\r\\n\\r\\n #Profiles\\r\\n elif event == \\\"ProfileChanged\\\":\\r\\n pass\\r\\n\\r\\n elif event == \\\"ProfileListChanged\\\":\\r\\n pass\\r\\n\\r\\n #Streaming\\r\\n elif event == \\\"StreamStarting\\\":\\r\\n pass\\r\\n\\r\\n elif event == \\\"StreamStarted\\\":\\r\\n pass\\r\\n\\r\\n elif event == \\\"StreamStopping\\\":\\r\\n pass\\r\\n\\r\\n elif event == \\\"StreamStopped\\\":\\r\\n pass\\r\\n\\r\\n elif event == \\\"StreamStatus\\\":\\r\\n pass\\r\\n\\r\\n #Recording\\r\\n elif event == \\\"RecordingStarting\\\":\\r\\n pass\\r\\n\\r\\n elif event == \\\"RecordingStarted\\\":\\r\\n pass\\r\\n\\r\\n elif event == \\\"RecordingStopping\\\":\\r\\n pass\\r\\n\\r\\n elif event == \\\"RecordingStopped\\\":\\r\\n pass\\r\\n\\r\\n elif event == \\\"RecordingPaused\\\":\\r\\n pass\\r\\n\\r\\n elif event == \\\"RecordingResumed\\\":\\r\\n pass\\r\\n\\r\\n #Replay Buffer\\r\\n elif event == \\\"ReplayStarting\\\":\\r\\n pass\\r\\n\\r\\n elif event == \\\"ReplayStarted\\\":\\r\\n pass\\r\\n\\r\\n elif event == \\\"ReplayStopping\\\":\\r\\n pass\\r\\n\\r\\n elif event == \\\"ReplayStopped\\\":\\r\\n pass\\r\\n\\r\\n #Other\\r\\n elif event == \\\"Exiting\\\":\\r\\n pass\\r\\n\\r\\n #General\\r\\n elif event == \\\"Heartbeat\\\":\\r\\n pass\\r\\n\\r\\n elif event == \\\"BroadcastCustomMessage\\\":\\r\\n pass\\r\\n\\r\\n #Sources\\r\\n elif event == \\\"SourceCreated\\\":\\r\\n pass\\r\\n\\r\\n elif event == \\\"SourceDestroyed\\\":\\r\\n pass\\r\\n\\r\\n elif event == \\\"SourceVolumeChanged\\\":\\r\\n pass\\r\\n\\r\\n elif event == \\\"SourceMuteStateChanged\\\":\\r\\n pass\\r\\n\\r\\n elif event == \\\"SourceAudioDeactivated\\\":\\r\\n #Unreleased\\r\\n pass\\r\\n\\r\\n elif event == \\\"SourceAudioActivated\\\":\\r\\n #Unreleased\\r\\n pass\\r\\n\\r\\n elif event == \\\"SourceAudioSyncOffsetChanged\\\":\\r\\n pass\\r\\n\\r\\n elif event == \\\"SourceAudioMixersChanged\\\":\\r\\n pass\\r\\n\\r\\n elif event == \\\"SourceRenamed\\\":\\r\\n pass\\r\\n\\r\\n elif event == \\\"SourceFilterAdded\\\":\\r\\n pass\\r\\n\\r\\n elif event == \\\"SourceFilterRemoved\\\":\\r\\n pass\\r\\n\\r\\n elif event == \\\"SourceFilterVisibilityChanged\\\":\\r\\n source = self.obs.getSource(self.data[\\\"sourceName\\\"])\\r\\n if source != None:\\r\\n _filter = source.getFilter(self.data[\\\"filterName\\\"]) #type: ignore\\r\\n if _filter != None:\\r\\n _filter.setVisible(self.data[\\\"filterEnabled\\\"]) #type: ignore\\r\\n\\r\\n elif event == \\\"SourceFiltersReordered\\\":\\r\\n pass\\r\\n\\r\\n #Media\\r\\n elif event == \\\"MediaPlaying\\\":\\r\\n #Unreleased\\r\\n pass\\r\\n\\r\\n elif event == \\\"MediaPaused\\\":\\r\\n #Unreleased\\r\\n pass\\r\\n\\r\\n elif event == \\\"MediaRestarted\\\":\\r\\n #Unreleased\\r\\n pass\\r\\n\\r\\n elif event == \\\"MediaStopped\\\":\\r\\n #Unreleased\\r\\n pass\\r\\n\\r\\n elif event == \\\"MediaNext\\\":\\r\\n #Unreleased\\r\\n pass\\r\\n\\r\\n elif event == \\\"MediaPrevious\\\":\\r\\n #Unreleased\\r\\n pass\\r\\n\\r\\n elif event == \\\"MediaStarted\\\":\\r\\n #Unreleased\\r\\n pass\\r\\n\\r\\n elif event == \\\"MediaEnded\\\":\\r\\n #Unreleased\\r\\n pass\\r\\n\\r\\n #Scene Items\\r\\n elif event == \\\"SceneItemOrderChanged\\\":\\r\\n pass\\r\\n\\r\\n elif event == \\\"SceneItemAdded\\\":\\r\\n pass\\r\\n\\r\\n elif event == \\\"SceneItemRemoved\\\":\\r\\n pass\\r\\n\\r\\n elif event == \\\"SceneItemVisibilityChanged\\\":\\r\\n scene = self.obs.getScene(self.data[\\\"scene-name\\\"])\\r\\n if scene != None:\\r\\n source = scene.getSource(self.data[\\\"item-name\\\"]) #type: ignore\\r\\n if source != None:\\r\\n source.setVisible(self.data[\\\"item-visible\\\"]) #type: ignore\\r\\n \\r\\n\\r\\n elif event == \\\"SceneItemLockChanged\\\":\\r\\n pass\\r\\n\\r\\n elif event == \\\"SceneItemTransformChanged\\\":\\r\\n pass\\r\\n\\r\\n elif event == \\\"SceneItemSelected\\\":\\r\\n pass\\r\\n\\r\\n elif event == \\\"SceneItemDeselected\\\":\\r\\n pass\\r\\n\\r\\n #Studio Mode\\r\\n elif event == \\\"PreviewSceneChanged\\\":\\r\\n pass\\r\\n\\r\\n elif event == \\\"StudioModeSwitched\\\":\\r\\n pass\\r\\n\\r\\n #Unhandled Events\\r\\n else:\\r\\n print(\\\"Unhandled event with data: \\\" + str(self.data))\",\n \"def handle_delivery(channel, method, header, body):\\n print(body)\",\n \"def ceilometer_callback(self, ch, method, properties, body):\\n payload = json.loads(body)\\n try:\\n message_body = json.loads(payload['oslo.message'])\\n samples = message_body['args']['data']\\n #print \\\"--------------------------------------------------\\\"\\n self.pool.spawn_n(self.zabbix_sender.consume_samples,samples)\\n except Exception,e:\\n log.warn(str(e))\",\n \"def handle_message(**payload):\\n handler_instance = message.MessageHandler(payload)\\n handler_instance.handle()\",\n \"def _handle_message(self, msg):\\n self.event('message', msg)\",\n \"def handle(self, message):\\n print(\\\"You received a message:\\\")\\n print(message)\\n # Overwrite this function to do something with the message!\",\n \"def handler(event, context):\\n if event and \\\"Records\\\" in event:\\n for record in event[\\\"Records\\\"]:\\n time_str = time.ctime()\\n if \\\"body\\\" in record:\\n try:\\n hasura_request(record[\\\"body\\\"])\\n except Exception as e:\\n print(f\\\"Start Time: {time_str}\\\", str(e))\\n time_str = time.ctime()\\n print(\\\"Done executing: \\\", time_str)\\n raise_critical_error(\\n message=f\\\"Could not process record: {str(e)}\\\",\\n data=record,\\n exception_type=Exception\\n )\",\n \"def MessageHandlerMethod(**kwargs):\\n data: dict = kwargs['data']\\n bus: AbstractPikaBus = kwargs['bus']\\n payload: dict = kwargs['payload']\\n print(payload)\\n if payload['reply']:\\n payload['reply'] = False\\n bus.Reply(payload=payload)\",\n \"def test_send(self):\\n # Required to get useful test names\\n super(TestCisPlyOutput_local, self).test_send()\",\n \"def handle(self, message):\\n for callback in self.callbacks:\\n callback(message['data'])\",\n \"def handle_message(self, message):\\n print \\\"[WARNING] No message handling implemented!\\\"\",\n \"def _on_message(self, raw_msg):\\n strmsg = raw_msg.decode()\\n msg = json.loads(strmsg)\\n\\n print(msg)\\n\\n if self._handlers.get(msg['msgid']):\\n for handler in self._handlers[msg['msgid']]:\\n handler.handle(msg)\",\n \"def get_message():\\n # Only run xray in the AWS Lambda environment\\n if runs_on_aws_lambda():\\n xray_subsegment = xray_recorder.current_subsegment()\\n xray_subsegment.put_annotation(\\\"key\\\", \\\"value\\\")\\n # Sample metadata\\n # subsegment.put_metadata(\\\"operation\\\", \\\"metadata\\\", \\\"python object/json\\\")\\n xray_recorder.end_subsegment()\",\n \"def handleMessage(self, message):\\n\\n if 'started' in message.tags:\\n self.handleMessage_started(message)\\n\\n elif 'deployment_computed' in message.tags:\\n self.handleMessage_computed(message)\\n\\n elif 'deployment_end' in message.tags:\\n self.handleMessage_end(message)\",\n \"def agent_message(self, in_message):\\n\\n logging.debug(\\\"Received %s\\\" % in_message)\\n\\n if in_message.startswith(\\\"start_testing\\\"):\\n self._start_testing()\\n\\n elif in_message.startswith(\\\"finish_testing\\\"):\\n epoch = int(in_message.split(\\\" \\\")[1]) \\n self._finish_testing(epoch)\\n else:\\n return \\\"I don't know how to respond to your message\\\"\",\n \"def test_handle_request_post(self):\\n # setup\\n incoming_message = cast(\\n HttpMessage,\\n self.build_incoming_message(\\n message_type=HttpMessage,\\n performative=HttpMessage.Performative.REQUEST,\\n to=self.skill_id,\\n sender=self.sender,\\n method=self.post_method,\\n url=self.url,\\n version=self.version,\\n headers=self.headers,\\n body=self.body,\\n ),\\n )\\n\\n # operation\\n with patch.object(self.logger, \\\"log\\\") as mock_logger:\\n self.http_handler.handle(incoming_message)\\n\\n # after\\n self.assert_quantity_in_outbox(1)\\n\\n mock_logger.assert_any_call(\\n logging.INFO,\\n \\\"received http request with method={}, url={} and body={!r}\\\".format(\\n incoming_message.method, incoming_message.url, incoming_message.body\\n ),\\n )\\n\\n # _handle_post\\n message = self.get_message_from_outbox()\\n has_attributes, error_str = self.message_has_attributes(\\n actual_message=message,\\n message_type=HttpMessage,\\n performative=HttpMessage.Performative.RESPONSE,\\n to=incoming_message.sender,\\n sender=incoming_message.to,\\n version=incoming_message.version,\\n status_code=200,\\n status_text=\\\"Success\\\",\\n headers=incoming_message.headers,\\n body=self.body,\\n )\\n assert has_attributes, error_str\\n\\n mock_logger.assert_any_call(\\n logging.INFO,\\n f\\\"responding with: {message}\\\",\\n )\",\n \"def test_handle_weather_message_calls_current(self):\\n pass\",\n \"def test_dispatch_inbound(self):\\n msg_helper = MessageHelper()\\n worker_helper = WorkerHelper()\\n broker = self.setup_broker(worker_helper)\\n self.assertEqual(broker.get_messages('vumi', 'fooconn.inbound'), [])\\n msg = msg_helper.make_inbound('message')\\n yield worker_helper.dispatch_inbound(msg, 'fooconn')\\n self.assertEqual(broker.get_messages('vumi', 'fooconn.inbound'), [msg])\",\n \"def consume(self, handler) -> None:\\n pass # pragma: no cover\",\n \"def event_handler(self, response):\\n pass\",\n \"def test_handler(self):\\n mock_sqr: SequenceRun = SequenceRunFactory()\\n\\n workflow: dict = bcl_convert.handler({\\n 'gds_volume_name': mock_sqr.gds_volume_name,\\n 'gds_folder_path': mock_sqr.gds_folder_path,\\n 'seq_run_id': mock_sqr.run_id,\\n 'seq_name': mock_sqr.name,\\n }, None)\\n\\n logger.info(\\\"-\\\" * 32)\\n logger.info(\\\"Example bcl_convert.handler lambda output:\\\")\\n logger.info(json.dumps(workflow))\\n\\n # assert bcl convert workflow launch success and save workflow run in db\\n workflows = Workflow.objects.all()\\n self.assertEqual(1, workflows.count())\",\n \"def test_filter(self, logger: Logger, mocker: MockerFixture) -> None:\\n task = OctaveTask()\\n task.session_id = \\\"123\\\"\\n handler = OutputHandler(task)\\n logger.addHandler(handler)\\n\\n send_func = mocker.patch(\\\"matl_online.tasks.OutputHandler.send\\\")\\n\\n logger.warning(\\\"warning\\\")\\n logger.error(\\\"error\\\")\\n logger.debug(\\\"debug\\\")\\n\\n assert len(handler.contents) == 0\\n send_func.assert_not_called()\",\n \"def test_dispatch_raw(self):\\n msg_helper = MessageHelper()\\n worker_helper = WorkerHelper()\\n broker = self.setup_broker(worker_helper)\\n self.assertEqual(broker.get_messages('vumi', 'fooconn.foo'), [])\\n msg = msg_helper.make_inbound('message')\\n yield worker_helper.dispatch_raw('fooconn.foo', msg)\\n self.assertEqual(broker.get_messages('vumi', 'fooconn.foo'), [msg])\",\n \"def on_message(channel, method_frame, header_frame, body):\\n channel.basic_ack(delivery_tag=method_frame.delivery_tag)\\n publisher = Publisher()\\n\\n message = body.decode(\\\"utf8\\\")\\n print(message)\\n logging.info(message)\\n\\n response = format_response(code=\\\"ERR400\\\", status=\\\"error\\\", message=\\\"\\\", files_ids=[], action=\\\"\\\")\\n\\n try:\\n data = json.loads(message)\\n action = data[\\\"action\\\"]\\n if action in actions:\\n threading.Thread(target=actions[action], args=(data, )).start()\\n else:\\n response[\\\"action\\\"] = action\\n response[\\\"message\\\"] = \\\"This action does not exist on server.\\\"\\n publisher.send_message(json.dumps(response))\\n\\n except json.JSONDecodeError:\\n response[\\\"code\\\"] = \\\"ERR500\\\"\\n response[\\\"message\\\"] = error = \\\"Invalid JSON file\\\"\\n print(error)\\n publisher.send_message(json.dumps(response))\",\n \"def handle_messages():\\n print(\\\"Handling Messages\\\")\\n payload = request.get_data()\\n for sender, incoming_message, payload in messaging_events(payload):\\n # The following statements check which options the user selected\\n # Response handler contains \\\"templates\\\" for the various messages\\n user_name = get_full_name(sender, PAT)\\n if \\\"hei\\\" in incoming_message.lower() or \\\"hallo\\\" in incoming_message.lower() or \\\"yo\\\" in incoming_message.lower()\\\\\\n or \\\"hi\\\" in incoming_message.lower():\\n send_message(PAT, send_message(PAT, response_handler.greeting_message(sender, user_name)))\\n if user_methods.has_user(user_name):\\n send_message(PAT, response_handler.has_course(sender, user_methods.get_subject_from_user(user_name)))\\n else:\\n send_message(PAT, response_handler.no_course(sender))\\n\\n elif payload == \\\"change subject\\\" or \\\"change subject\\\" in incoming_message.lower():\\n send_message(PAT, response_handler.text_message(sender, \\\"You can change course at any time simply by \\\"\\n \\\"writing the course code on the form [TAG][CODE]\\\\n\\\"\\n \\\"ex. TDT4120\\\"))\\n send_message(PAT, response_handler.has_course(sender, user_methods.get_subject_from_user(user_name)))\\n\\n elif \\\"help\\\" in incoming_message.lower():\\n\\n send_message(PAT, response_handler.text_message(sender, \\\"Are you lost ...? \\\"))\\n send_message(PAT, response_handler.text_message(sender, \\\"You can change course at any time simply by \\\"\\n \\\"writing the course code on the form: [TAG][CODE]\\\\n\\\"\\n \\\"ex. TDT4120\\\"))\\n send_message(PAT, response_handler.text_message(sender, \\\"If you want to see your currently selected course \\\"\\n \\\"and other information type 'Status'.\\\"))\\n send_message(PAT, response_handler.text_message(sender, \\\"You can also type 'Hei' or 'Hallo' at any time \\\"\\n \\\"to receive a greeting that shows your options.\\\"))\\n send_message(PAT, response_handler.text_message(sender, \\\"Here is a list of commands you can use. This is \\\"\\n \\\"recommended for the experienced user:\\\\n\\\"\\n \\\"Change subject\\\\n\\\"\\n \\\"Give feedback\\\\n\\\"\\n \\\"How did today's lecture go?\\\\n\\\"\\n \\\"Get schedule\\\\n\\\"\\n \\\"Get info\\\\n\\\"\\n \\\"All lectures\\\\n\\\"\\n \\\"A specific lecture\\\\n\\\"\\n \\\"You can type most of the commands in chat. Just \\\"\\n \\\"give it a try!\\\"))\\n send_message(PAT, response_handler.has_course(sender, user_methods.get_subject_from_user(user_name)))\\n\\n elif \\\"status\\\" in incoming_message.lower():\\n subject = user_methods.get_subject_from_user(user_name)\\n year = feedback_methods.get_year()\\n week = feedback_methods.get_week()\\n day = feedback_methods.get_day()\\n user = get_full_name(sender, PAT)\\n lecture_id_current = lecture_methods.get_lecture_from_date(year, week, day, subject)\\n lecture = feedback_methods.get_lecture_object(lecture_id_current)\\n\\n if user_methods.has_user(user_name):\\n sub = user_methods.get_subject_from_user(user_name) + \\\" : \\\" + \\\\\\n subject_info.course_name(user_methods.get_subject_from_user(user_name))\\n send_message(PAT, response_handler.user_info(sender, user_name, sub))\\n if feedback_methods.user_has_feedback_for_lecture(user, lecture):\\n send_message(PAT, response_handler.text_message(sender, \\\"You have given feedback for \\\"\\n + subject + \\\"today. Well done! Be proud of \\\"\\n \\\"yourself and remember to check in \\\"\\n \\\"tomorrow.\\\"))\\n send_message(PAT,\\n response_handler.has_course(sender, user_methods.get_subject_from_user(user_name)))\\n else:\\n send_message(PAT, response_handler.text_message(sender,\\n \\\"No feedback for the given lecture on this date. \\\"\\n \\\"Please press 'Give Feedback' or write it in the \\\"\\n \\\"chat to do so.\\\"))\\n send_message(PAT,\\n response_handler.has_course(sender, user_methods.get_subject_from_user(user_name)))\\n else:\\n send_message(PAT, response_handler.text_message(sender,\\n \\\"We seem to not be able to detect you in the database. \\\"\\n \\\"Please report this to the staff!\\\"))\\n send_message(PAT, response_handler.has_course(sender, user_methods.get_subject_from_user(user_name)))\\n\\n # Checks if the subject has lectures in the database, adds them if not.\\n\\n elif payload == \\\"give feedback\\\" or \\\"give feedback\\\" in incoming_message.lower():\\n send_message(PAT, response_handler.give_feedback_choice(sender))\\n\\n elif payload == \\\"lecture speed\\\" or \\\"lecture speed\\\" in incoming_message.lower():\\n\\n subject = user_methods.get_subject_from_user(user_name)\\n\\n if lecture_methods.check_lecture_in_db(subject):\\n send_message(PAT, response_handler.lec_feed(sender))\\n else:\\n schedule = subject_info.get_schedule(subject)\\n if schedule:\\n database_entry = subject_info.gather_lecture_information(schedule)\\n lecture_methods.add_lecture_information_db(database_entry)\\n send_message(PAT, response_handler.text_message(sender, \\\"Lectures for the subject \\\" + subject +\\n \\\" were not in the database. It is now added.\\\"))\\n send_message(PAT, response_handler.lec_feed(sender))\\n else:\\n send_message(PAT, response_handler.text_message(sender, \\\"Lectures for the subject \\\" + subject +\\n \\\" does not exist. Likely due to the subject having \\\"\\n \\\"no lectures this semester.\\\"))\\n send_message(PAT, response_handler.has_course(sender, subject))\\n\\n elif payload == \\\"evaluation_questions\\\" or \\\"lecture questions\\\" in incoming_message.lower():\\n # User wants to give feedback for a lecture.\\n subject = user_methods.get_subject_from_user(user_name)\\n payload = \\\"evaluation_questions\\\" # if user typed 'lecture questions' the payload will be None\\n\\n if lecture_methods.check_lecture_in_db(subject):\\n if feedback_methods.user_can_give_feedback_evaluation(user_name,\\n user_methods.get_subject_from_user(user_name)):\\n send_message(PAT, response_handler.lecture_feedback_questions(sender, payload))\\n else:\\n send_message(PAT, response_handler.text_message(sender,\\n \\\"Feedback can not be given either because there \\\"\\n \\\"is no lecture today, or because you have already \\\"\\n \\\"given feedback for this lecture.\\\"))\\n send_message(PAT, response_handler.has_course(sender, subject))\\n else:\\n schedule = subject_info.get_schedule(subject)\\n if schedule:\\n database_entry = subject_info.gather_lecture_information(schedule)\\n lecture_methods.add_lecture_information_db(database_entry)\\n send_message(PAT, response_handler.text_message(sender, \\\"Lectures for the subject \\\" + subject +\\n \\\" were not in the database. It is now added\\\"))\\n if feedback_methods.user_can_give_feedback_evaluation(user_name,\\n user_methods.get_subject_from_user(\\n user_name)):\\n send_message(PAT, response_handler.lecture_feedback_questions(sender, payload))\\n else:\\n send_message(PAT, response_handler.text_message(sender,\\n \\\"Feedback can not be given either because \\\"\\n \\\"there is no lecture today, or because you\\\"\\n \\\" have already given feedback for this lecture.\\\"\\n \\\"\\\"))\\n send_message(PAT, response_handler.has_course(sender, subject))\\n else:\\n send_message(PAT, response_handler.text_message(sender, \\\"Lectures for the subject \\\" + subject +\\n \\\"does not exist. Likely due to the subject having \\\"\\n \\\"no \\\"\\n \\\"lectures this semester.\\\"))\\n send_message(PAT, response_handler.has_course(sender, subject))\\n\\n elif \\\"too slow\\\" in incoming_message.lower():\\n # Adds feedback if the subject has a lecture on the given day\\n # and if the user has not already given feedback\\n payload = '0'\\n message_response = \\\"too slow\\\"\\n if feedback_methods.add_entry(user_name, user_methods.get_subject_from_user(user_name), payload):\\n send_message(PAT, response_handler.text_message(sender, \\\"You chose: \\\" + \\\"'\\\" +\\n message_response + \\\"'\\\" + \\\"\\\\nFeedback Received!\\\"))\\n send_message(PAT, response_handler.has_course(sender, user_methods.get_subject_from_user(user_name)))\\n else:\\n send_message(PAT,\\n response_handler.text_message(sender, \\\"There is either no lecture active in the selected\\\"\\n \\\" subject, or you have already given feedback\\\"\\n \\\" to the active lecture.\\\\nFeedback denied!\\\"))\\n send_message(PAT, response_handler.has_course(sender, user_methods.get_subject_from_user(user_name)))\\n\\n elif \\\"it's all right\\\" in incoming_message.lower() or \\\"its all right\\\" in incoming_message.lower():\\n # Adds feedback if the subject has a lecture on the given day\\n # and if the user has not already given feedback\\n payload = '1'\\n message_response = \\\"It's all right\\\"\\n if feedback_methods.add_entry(user_name, user_methods.get_subject_from_user(user_name), payload):\\n send_message(PAT, response_handler.text_message(sender, \\\"You chose: \\\" + \\\"'\\\" +\\n message_response + \\\"'\\\" + \\\"\\\\nFeedback Received!\\\"))\\n send_message(PAT, response_handler.has_course(sender, user_methods.get_subject_from_user(user_name)))\\n else:\\n send_message(PAT,\\n response_handler.text_message(sender, \\\"There is either no lecture active in the selected\\\"\\n \\\" subject, or you have already given feedback\\\"\\n \\\" to the active lecture.\\\\nFeedback denied!\\\"))\\n send_message(PAT, response_handler.has_course(sender, user_methods.get_subject_from_user(user_name)))\\n\\n elif \\\"too fast\\\" in incoming_message.lower():\\n # Adds feedback if the subject has a lecture on the given day\\n # and if the user has not already given feedback\\n payload = '2'\\n message_response = \\\"too fast\\\"\\n if feedback_methods.add_entry(user_name, user_methods.get_subject_from_user(user_name), payload):\\n send_message(PAT, response_handler.text_message(sender, \\\"You chose: \\\" + \\\"'\\\" +\\n message_response + \\\"'\\\" + \\\"\\\\nFeedback Received!\\\"))\\n send_message(PAT, response_handler.has_course(sender, user_methods.get_subject_from_user(user_name)))\\n else:\\n send_message(PAT,\\n response_handler.text_message(sender, \\\"There is either no lecture active in the selected\\\"\\n \\\" subject, or you have already given feedback\\\"\\n \\\" to the active lecture.\\\\nFeedback denied!\\\"))\\n send_message(PAT, response_handler.has_course(sender, user_methods.get_subject_from_user(user_name)))\\n\\n elif (\\\"today\\\" in incoming_message.lower() and \\\"lecture\\\" in incoming_message.lower()) or \\\\\\n (\\\"todays\\\" in incoming_message.lower() and \\\"lecture\\\" in incoming_message.lower()) or \\\\\\n (\\\"today's\\\" in incoming_message.lower() and \\\"lecture\\\" in incoming_message.lower()):\\n # Gathers the correct information about the date.\\n year = feedback_methods.get_year()\\n week = feedback_methods.get_week()\\n day = feedback_methods.get_day()\\n subject = user_methods.get_subject_from_user(user_name)\\n # Gathers the feedback from today's lecture:\\n if lecture_methods.check_lecture_in_db(subject):\\n feedback_list = feedback_methods.get_single_lecture_feed(year, week, day, subject)\\n if feedback_list[0] is not None:\\n send_message(PAT, response_handler.present_single_lecture_feedback(sender, feedback_list))\\n send_message(PAT,\\n response_handler.has_course(sender, user_methods.get_subject_from_user(user_name)))\\n else:\\n send_message(PAT, response_handler.text_message(sender,\\n \\\"No feedback for the given lecture on this date. \\\"\\n \\\"Please try again at a later date.\\\"))\\n send_message(PAT,\\n response_handler.has_course(sender, user_methods.get_subject_from_user(user_name)))\\n else:\\n send_message(PAT, response_handler.text_message(sender, \\\"No lecture present in the database. \\\"\\n \\\"Please provide some feedback and try again.\\\"))\\n send_message(PAT, response_handler.has_course(sender, user_methods.get_subject_from_user(user_name)))\\n\\n elif payload == \\\"get schedule\\\" or \\\"get schedule\\\" in incoming_message.lower():\\n subject = user_methods.get_subject_from_user(user_name)\\n schedule = subject_info.printable_schedule(subject_info.get_schedule(subject))\\n if len(schedule) > 640:\\n msg_list = message_split.message_split(schedule)\\n for msg in msg_list:\\n print(msg)\\n send_message(PAT, response_handler.text_message(sender, msg))\\n else:\\n send_message(PAT, response_handler.text_message(sender, schedule))\\n send_message(PAT, response_handler.has_course(sender, user_methods.get_subject_from_user(user_name)))\\n\\n elif payload == \\\"get info\\\" or \\\"get info\\\" in incoming_message.lower():\\n subject = user_methods.get_subject_from_user(user_name)\\n send_message(PAT, response_handler.text_message(sender,\\n subject_info.printable_course_info(\\n subject_info.get_course_json(subject))))\\n send_message(PAT, response_handler.has_course(sender, user_methods.get_subject_from_user(user_name)))\\n\\n elif payload == \\\"get feedback\\\" or \\\"get feedback\\\" in incoming_message.lower():\\n send_message(PAT, response_handler.get_feedback_specific_or_all(sender))\\n\\n elif payload == \\\"all_lectures\\\" or \\\"all lectures\\\" in incoming_message.lower():\\n # The user wants to see feedback for all lectures in the selected subject\\n subject = user_methods.get_subject_from_user(user_name)\\n if not lecture_methods.check_lecture_in_db(subject):\\n send_message(PAT, response_handler.text_message(sender, \\\"Course has no feedback.\\\"))\\n send_message(PAT, response_handler.has_course(sender, user_methods.get_subject_from_user(user_name)))\\n else:\\n feedback, feedbackevaluation = feedback_methods.get_all_subject_feed(subject)\\n if len(feedback) > 0:\\n percent_list = bot_feedback.generate_percent_for_speed(feedback)\\n send_message(PAT, response_handler.all_feedback_speed(sender, subject, percent_list))\\n else:\\n send_message(PAT,\\n response_handler.text_message(sender, \\\"Course has no feedback for lecture speed.\\\"))\\n if len(feedbackevaluation) > 0:\\n percent_list_questions = bot_feedback.generate_percent_for_questions(feedbackevaluation)\\n\\n send_message(PAT, response_handler.all_feedback_questions(sender, subject, percent_list_questions))\\n else:\\n send_message(PAT,\\n response_handler.text_message(sender, \\\"Course has no feedback for lecture questions.\\\"))\\n send_message(PAT, response_handler.has_course(sender, user_methods.get_subject_from_user(user_name)))\\n\\n elif payload == \\\"a_specific_lecture\\\" or \\\"a specific lecture\\\" in incoming_message.lower():\\n # Let the user choose what year to get feedback from.\\n years = lecture_feedback_db_methods.get_year(user_methods.get_subject_from_user(user_name))\\n if len(years) > 0:\\n send_message(PAT, response_handler.get_feedback_year(sender, years))\\n else:\\n send_message(PAT, response_handler.text_message(sender, 'No feedback for the selected subject.'))\\n send_message(PAT, response_handler.has_course(sender, user_methods.get_subject_from_user(user_name)))\\n\\n elif payload is not None:\\n # Underneath are check that use .split() on the payload.\\n if \\\"evaluation_questions\\\" in payload.split()[0]:\\n payload_split = payload.split()\\n if len(payload_split) == 1:\\n # 1st question\\n send_message(PAT, response_handler.lecture_feedback_questions(sender, payload))\\n elif len(payload_split) == 2:\\n # 2nd question\\n send_message(PAT, response_handler.lecture_feedback_questions(sender, payload))\\n elif len(payload_split) == 3:\\n # 3rd question\\n send_message(PAT, response_handler.lecture_feedback_questions(sender, payload))\\n elif len(payload_split) == 4:\\n # 4th question\\n send_message(PAT, response_handler.lecture_feedback_questions(sender, payload))\\n elif len(payload_split) == 5:\\n # 5th question\\n send_message(PAT, response_handler.lecture_feedback_questions(sender, payload))\\n elif len(payload_split) == 6:\\n # 6th question\\n send_message(PAT, response_handler.lecture_feedback_questions(sender, payload))\\n elif len(payload_split) == 7:\\n # 7th question\\n send_message(PAT, response_handler.lecture_feedback_questions(sender, payload))\\n elif len(payload_split) == 8:\\n # store feedback.\\n subject = user_methods.get_subject_from_user(user_name)\\n if feedback_methods.add_feedback_evaluation(user_name, subject, int(payload_split[1]),\\n int(payload_split[2]), int(payload_split[3]),\\n int(payload_split[4]), int(payload_split[5]),\\n int(payload_split[6]), int(payload_split[7])):\\n # Storing the feedback succeeded.\\n send_message(PAT, response_handler.text_message(sender, 'Feedback received!'))\\n send_message(PAT, response_handler.has_course(sender, subject))\\n else:\\n # Storing the feedback failed.\\n send_message(PAT, response_handler.text_message(sender,\\n \\\"There is either no lecture active in the \\\"\\n \\\"selected subject, or you have already given \\\"\\n \\\"feedback to the active lecture.\\\\n Feedback \\\"\\n \\\"denied!\\\"))\\n send_message(PAT, response_handler.has_course(sender, subject))\\n pass\\n\\n elif \\\"get_lecture_feedback_year\\\" in payload.split()[0]:\\n # Let the user choose what semester to get feedback from.\\n semesters = []\\n if lecture_feedback_db_methods.check_lecture_semester(user_methods.get_subject_from_user(user_name),\\n 1, 17, int(payload.split()[1])):\\n semesters.append('Spring')\\n elif lecture_feedback_db_methods.check_lecture_semester(user_methods.get_subject_from_user(user_name),\\n 32, 49, int(payload.split()[1])):\\n semesters.append('Fall')\\n if len(semesters) > 0:\\n send_message(PAT, response_handler.get_feedback_semester(sender, payload.split()[1], semesters))\\n else:\\n # Take the user one step up to choose a different year.\\n years = lecture_feedback_db_methods.get_year(user_methods.get_subject_from_user(user_name))\\n send_message(PAT, response_handler.get_feedback_year(sender, years))\\n\\n elif \\\"get_lecture_feedback_semester\\\" in payload.split()[0]:\\n # Let the user choose what weeks to get feedback from.\\n\\n week_list = lecture_feedback_db_methods.get_lecture_weeks(user_methods.get_subject_from_user(user_name),\\n int(payload.split()[1]), payload.split()[2])\\n if len(week_list) > 8:\\n send_message(PAT, response_handler.get_feedback_month(sender, payload.split()[1], week_list))\\n else:\\n send_message(PAT, response_handler.get_feedback_week(sender, payload.split()[1], week_list))\\n\\n elif \\\"get_lecture_feedback_month\\\" in payload.split()[0]:\\n # Let the user select week\\n week_list = []\\n payload_split = payload.split()\\n for i in range(2, len(payload_split)):\\n week_list.append(int(payload_split[i].rstrip(',')))\\n\\n send_message(PAT, response_handler.get_feedback_week(sender, payload_split[1], week_list))\\n\\n elif \\\"get_lecture_feedback_week\\\" in payload.split()[0]:\\n # Lets the user select day\\n lecture_days = lecture_feedback_db_methods.get_day_of_lecture_in_week(\\n user_methods.get_subject_from_user(user_name), payload.split()[1], payload.split()[2])\\n\\n send_message(PAT, response_handler.get_feedback_day(sender, payload.split()[1], lecture_days,\\n payload.split()[2]))\\n\\n elif \\\"get_lecture_feedback_day\\\" in payload.split()[0]:\\n\\n subject = user_methods.get_subject_from_user(user_name)\\n # Gives the user feedback from the selected day.\\n feedback_list = feedback_methods.get_single_lecture_feed(payload.split()[1],\\n payload.split()[2],\\n payload.split()[3],\\n subject)\\n feedback_questions_list = feedback_methods.get_single_lecture_feedback_questions(payload.split()[1],\\n payload.split()[2],\\n payload.split()[3],\\n subject)\\n\\n if len(feedback_list[1]) > 0: # Checks if there is feedback in the variable.\\n send_message(PAT, response_handler.present_single_lecture_feedback(sender, feedback_list))\\n else:\\n send_message(PAT, response_handler.text_message(sender,\\n \\\"This lecture has no feedback for lecture speed.\\\"))\\n if len(feedback_questions_list) > 0: # Checks if there is feedback in the variable.\\n feedback_questions = bot_feedback.generate_percent_for_questions(feedback_questions_list)\\n send_message(PAT,\\n response_handler.present_single_lecture_feedback_questions(sender, feedback_questions))\\n else:\\n send_message(PAT, response_handler.text_message(sender,\\n \\\"This lecture has no feedback for lecture \\\"\\n \\\"questions.\\\"))\\n\\n send_message(PAT, response_handler.has_course(sender, user_methods.get_subject_from_user(user_name)))\\n\\n elif ime_data_fetch.subject_exists_boolean(incoming_message.upper().split()[0]):\\n if user_methods.has_user(user_name):\\n user_methods.add_subject(user_name, incoming_message.split()[0])\\n else:\\n user_methods.add_user(user_name, incoming_message.split()[0])\\n send_message(PAT, response_handler.has_course(sender, user_methods.get_subject_from_user(user_name)))\\n\\n else:\\n send_message(PAT, response_handler.text_message(sender,\\n \\\"Type 'help' to see what you can do with L.I.M.B.O.\\\\n If \\\"\\n \\\"you tried to enter a subject-code and got this message,\\\"\\n \\\" you either misspelled it or the subject you are looking \\\"\\n \\\"for is not a subject at NTNU.\\\"))\\n if user_methods.has_user(user_name):\\n send_message(PAT, response_handler.has_course(sender, user_methods.get_subject_from_user(user_name)))\\n else:\\n send_message(PAT, response_handler.no_course(sender))\\n\\n return \\\"ok\\\"\",\n \"def message_received_handler(pdu, **kwargs):\\n\\n logging.warning('Message received handler (Override me)')\",\n \"def _process_message(self, obj):\\n pass\",\n \"def callback(ch, method, properties, body):\\n requestParams = json.loads(body.decode('utf-8'))\\n # print(\\\"inside the callback\\\")\\n arg1 = int(requestParams[0])\\n arg2 = int(requestParams[1])\\n result = whaleClassifier.test(arg1, arg2)\\n # what this does it publish the RESULT to the exchange (as producers of content \\n # cannot send stuff directly to queues, they send to exchanges and then exchanges \\n # send to queues. Note Exchange='' is default exchange which then sends to the\\n # queue that is listed on the ROUTING_KEY argument.)\\n ch.basic_publish(exchange='', \\n routing_key=results_queue, \\n body=json.dumps(result),\\n properties=pika.BasicProperties(\\n delivery_mode = 2, # make message persistent\\n ))\\n # ch.basic_ack(delivery_tag=method.delivery_tag) #need this line so that we don't resend this same message again the next time\\n # we start up this script. Which eventually clogs up memory\",\n \"def test_message_user():\",\n \"def handle(self, data):\\n pass\",\n \"def onMessage(self, payload, isBinary):\",\n \"def testIgnoreMessage(self):\\n\\n self.logger.accept('c',self.logger.foo)\\n self.logger.accept('c',self.logger.bar)\\n self.logger.ignore('c')\\n messager.send('c')\\n # No methods should have been called.\\n self.assertEqual(self.logger.log,[])\",\n \"def __call__(self, test_case, response, **assertions):\\n self.assert_x_sendfile_response(test_case, response)\\n for key, value in iteritems(assertions):\\n assert_func = getattr(self, 'assert_%s' % key)\\n assert_func(test_case, response, value)\",\n \"def callback():\\n signature = request.headers['X-Line-Signature']\\n body = request.get_data(as_text=True)\\n logger.info('Request body: %s', body)\\n try:\\n handler.handle(body, signature)\\n except InvalidSignatureError:\\n logger.exception(\\n 'Invalid signature. Please check your channel access token/channel secret.')\\n abort(400)\\n\\n return 'OK'\",\n \"def messageHandler(self, source, message, messageId):\\n try:\\n type, params, data = message.split(':',2)\\n except:\\n # Not a real message\\n return\\n \\n try:\\n getattr(self, \\\"thive_%s\\\" % type)(messageId, params.split(), data)\\n except exceptions.AttributeError, c:\\n raise c\\n print \\\"[HIVE] No method bound for command '%s'\\\" % type\",\n \"def handle_request(self):\\n try:\\n content_type = self.headers.get('content-type')\\n\\n if content_type != 'application/json':\\n self.write_empty_response(400)\\n return\\n\\n content_len = int(self.headers.get('content-length', 0))\\n\\n # If content was provided, then parse it\\n if content_len > 0:\\n message = json.loads(self.rfile.read(content_len))\\n else:\\n self.write_empty_response(400)\\n return\\n\\n helper.log_info(f'Incoming POST from {self.client_address[0]}: {message}')\\n\\n aspect_type = message['aspect_type']\\n object_id = message['object_id']\\n object_type = message['object_type']\\n # make owner_id a str to avoid issues with athlete_checkpoint dict\\n owner_id = str(message['owner_id'])\\n\\n athlete_checkpoint = helper.get_check_point(\\\"webhook_updates\\\") or {}\\n\\n # We only care about activity updates. New activities are pulled in automatically as strava_api input restarts.\\n if aspect_type == 'update' and object_type == 'activity':\\n if owner_id not in athlete_checkpoint:\\n athlete_checkpoint[owner_id] = []\\n athlete_checkpoint[owner_id].append(object_id)\\n helper.save_check_point(\\\"webhook_updates\\\", athlete_checkpoint)\\n else:\\n athlete_checkpoint[owner_id].append(object_id)\\n helper.save_check_point(\\\"webhook_updates\\\", athlete_checkpoint)\\n helper.log_debug(f'webhooks_updates checkpoint: {helper.get_check_point(\\\"webhook_updates\\\")}')\\n\\n # Send data to Splunk\\n data = json.dumps(message)\\n event = helper.new_event(source=helper.get_input_type(), index=helper.get_output_index(), sourcetype=helper.get_sourcetype(), data=data)\\n ew.write_event(event)\\n\\n # Strava API expects a 200 response\\n self.write_empty_response(200)\\n\\n # Restart strava_api inputs to pull in the data unless it's a delete, as the input doesn't do anything with that anyway.\\n if aspect_type != 'delete':\\n self.restart_input('strava_api', self.SESSION_KEY)\\n helper.log_info(f'Reloading Strava API input to retrieve updated activity {object_id} for athlete {owner_id}.')\\n\\n except Exception as ex:\\n helper.log_error(f'Something went wrong in handle request: {ex}')\",\n \"def __call__ (self, event, payload):\\n\\n logging.info ('\\\\n\\\\nReceived Event: '+ str(event) + '\\\\nPayload: ' + str(payload))\\n\\n try:\\n\\n if event == 'AlertHandler:StartDebug':\\n logging.getLogger().setLevel(logging.DEBUG)\\n logging.info ('Logging level changed to DEBUG Mode')\\n\\n elif event == 'AlertHandler:EndDebug':\\n logging.getLogger().setLevel(logging.INFO)\\n logging.info ('Logging level changed to INFO Mode')\\n \\n elif event in self.args['AlertEvent'].keys():\\n handler = retrieveHandler(self.args['AlertEvent'][event],'AlertHandler')\\n handler(payload)\\n\\n except Exception, ex: \\n \\n logging.error('Exception Caught while handling the event: ' + str(event) + ' payload: ' + str(payload) ) \\n logging.error(str(ex))\\n\\n return\",\n \"def handler(event, context):\\n pub_sub_message = base64.b64decode(event['data']).decode('utf-8')\\n\\n if pub_sub_message == 'executor':\\n LOGGER.debug('POST: %s', EVENTS_EXECUTION_ENDPOINT)\\n response = requests.post(EVENTS_EXECUTION_ENDPOINT, json={'type': 'POLICY'},\\n headers=utils.get_auth_header())\\n LOGGER.debug('Response: %s', response.text)\\n\\n elif pub_sub_message == 'validator':\\n LOGGER.debug('POST: %s', EVENTS_VALIDATION_ENDPOINT)\\n response = requests.post(EVENTS_VALIDATION_ENDPOINT,\\n headers=utils.get_auth_header())\\n LOGGER.debug('Response: %s', response.text)\\n\\n else:\\n LOGGER.warn('Unexpected message from PubSub: %s', pub_sub_message)\\n return\",\n \"def test_dispatch_outbound(self):\\n msg_helper = MessageHelper()\\n worker_helper = WorkerHelper()\\n broker = self.setup_broker(worker_helper)\\n self.assertEqual(broker.get_messages('vumi', 'fooconn.outbound'), [])\\n msg = msg_helper.make_outbound('message')\\n yield worker_helper.dispatch_outbound(msg, 'fooconn')\\n self.assertEqual(\\n broker.get_messages('vumi', 'fooconn.outbound'), [msg])\",\n \"def on_delivered(self, frame):\\n pass\",\n \"def test_create_message_with_succes(self, mock_client): \\n\\n event = {\\n 'operation': 'createMessage', \\n 'arguments': {\\n 'template': 'my-sample-geofence-id',\\n 'input': {\\n 'service': 'APNS',\\n 'action': 'OPEN_APP',\\n 'title': 'Sample Title',\\n 'body': 'This is a sample body'\\n }\\n }\\n }\\n\\n response = {\\n \\\"Arn\\\": f'arn:aws:mobiletargeting:eus-east-1:SOME_ACCOUNT_ID:templates/my-sample-geofence-id/PUSH',\\n \\\"RequestID\\\": \\\"some-request-id\\\",\\n \\\"Message\\\": 'some message' \\n }\\n\\n mock_client().create_push_template.return_value = response\\n response = manageMessages.handler(event, None)\\n\\n self.assertTrue(response)\\n self.assertEqual(response['status'], 'MESSAGE_CREATED')\",\n \"def process(self, message: Message, **kwargs: Any) -> None:\\n pass\",\n \"def handle_send_message(self, message_header, message):\\n pass\",\n \"def verify_as_target(self, message_handler):\",\n \"def handle(self, rsm_ctx):\\n pass\",\n \"def handle_message(self, msg, identity=None):\\n\\n if (self._supervisor and\\n not isinstance(msg, mplane.model.Envelope)):\\n self._exporter.put_nowait([msg, identity])\\n\\n if isinstance(msg, mplane.model.Capability):\\n self._add_capability(msg, identity)\\n elif isinstance(msg, mplane.model.Withdrawal):\\n self._withdraw_capability(msg, identity)\\n elif isinstance(msg, mplane.model.Receipt):\\n self._handle_receipt(msg, identity)\\n elif isinstance(msg, mplane.model.Result):\\n self._handle_result(msg, identity)\\n elif isinstance(msg, mplane.model.Exception):\\n self._handle_exception(msg, identity)\\n elif isinstance(msg, mplane.model.Envelope):\\n if msg.get_token() in self._receipts:\\n self._handle_result(msg, identity)\\n else:\\n for imsg in msg.messages():\\n self.handle_message(imsg, identity)\\n else:\\n raise ValueError(\\\"Internal error: unknown message \\\"+repr(msg))\",\n \"def test_sample_status_custom(self):\\n self.app = self.make_app(argv = ['report', 'sample_status', self.examples[\\\"project\\\"], self.examples[\\\"flowcell\\\"], '--debug', '--customer_reference', 'MyCustomerReference', '--uppnex_id', 'MyUppnexID', '--ordered_million_reads', '10', '--phix', '{1:0.1, 2:0.2}'],extensions=['scilifelab.pm.ext.ext_couchdb'])\\n handler.register(DeliveryReportController)\\n self._run_app()\\n data = ast.literal_eval(self.app._output_data['debug'].getvalue())\\n s_param_map = {x[\\\"scilifelab_name\\\"]:x for x in data[\\\"s_param\\\"]}\\n self.assertEqual(s_param_map['P001_101_index3']['uppnex_project_id'], 'MyUppnexID')\\n self.assertEqual(s_param_map['P001_101_index3']['customer_reference'], 'MyCustomerReference')\\n self.assertEqual(s_param_map['P001_101_index3']['ordered_amount'], 10)\",\n \"def testWholeRequest(self):\\n body = self.protocol.encode_message(self.request_message)\\n self.Reinitialize(input=body,\\n content_type=self.content_type)\\n self.factory.add_request_mapper(self.mapper())\\n self.service_handler.handle('POST', '/my_service', 'method1')\\n VerifyResponse(self,\\n self.service_handler.response,\\n '200',\\n 'OK',\\n self.protocol.encode_message(self.response_message),\\n self.content_type)\",\n \"def commands_coverage_server():\\n try:\\n coverage()\\n coverage_server()\\n except KeyboardInterrupt:\\n logger.info(\\\"Command canceled\\\")\",\n \"def test_send(self):\\n # Required to get useful test names\\n super(TestCisObjOutput_local, self).test_send()\",\n \"def on_message(self, data):\\n req = json.loads(data)\\n self.serve(req)\",\n \"def on_message(self, data):\\n req = json.loads(data)\\n self.serve(req)\",\n \"async def testsay(self, ctx, *, message):\\n await ctx.send(message)\",\n \"def test_message_group():\",\n \"def obj_received(self, obj):\\n\\n # TODO do something like handler registry\\n\\n if isinstance(obj, pb.Ping):\\n self.handle_ping(obj)\\n\\n elif isinstance(obj, pb.Pong):\\n self.handle_pong(obj)\\n\\n elif isinstance(obj, pb.ACS):\\n if self.factory.config.failure != 'omission':\\n res = self.factory.acs.handle(obj, self.remote_vk)\\n self.process_acs_res(res, obj)\\n\\n elif isinstance(obj, pb.TxReq):\\n self.factory.tc_runner.handle_tx_req(obj, self.remote_vk)\\n\\n elif isinstance(obj, pb.TxResp):\\n self.factory.tc_runner.handle_tx_resp(obj, self.remote_vk)\\n\\n elif isinstance(obj, pb.ValidationReq):\\n self.factory.tc_runner.handle_validation_req(obj, self.remote_vk)\\n\\n elif isinstance(obj, pb.ValidationResp):\\n self.factory.tc_runner.handle_validation_resp(obj, self.remote_vk)\\n\\n elif isinstance(obj, pb.SigWithRound):\\n self.factory.tc_runner.handle_sig(obj, self.remote_vk)\\n\\n elif isinstance(obj, pb.CpBlock):\\n self.factory.tc_runner.handle_cp(obj, self.remote_vk)\\n\\n elif isinstance(obj, pb.Cons):\\n self.factory.tc_runner.handle_cons(obj, self.remote_vk)\\n\\n elif isinstance(obj, pb.AskCons):\\n self.factory.tc_runner.handle_ask_cons(obj, self.remote_vk)\\n\\n # NOTE messages below are for testing, bracha/mo14 is normally handled by acs\\n\\n elif isinstance(obj, pb.Bracha):\\n if self.factory.config.failure != 'omission':\\n self.factory.bracha.handle(obj, self.remote_vk)\\n\\n elif isinstance(obj, pb.Mo14):\\n if self.factory.config.failure != 'omission':\\n self.factory.mo14.handle(obj, self.remote_vk)\\n\\n elif isinstance(obj, pb.Dummy):\\n logging.info(\\\"NODE: got dummy message from {}\\\".format(b64encode(self.remote_vk)))\\n\\n else:\\n raise AssertionError(\\\"invalid message type {}\\\".format(obj))\\n\\n self.factory.recv_message_log[obj.__class__.__name__] += obj.ByteSize()\",\n \"def setUp(self):\\n self.hex_data = \\\"0251112233445566778899a1a2a3a4a5a6a7a8a9aaabacadae\\\"\\n self.message_id = 0x51\\n self.bytes_data = bytearray(unhexlify(self.hex_data))\\n self.address = Address(\\\"112233\\\")\\n self.target = Address(\\\"445566\\\")\\n self.flags = MessageFlags(0x77)\\n self.cmd1 = int(0x88)\\n self.cmd2 = int(0x99)\\n self.user_data = UserData(unhexlify(\\\"a1a2a3a4a5a6a7a8a9aaabacadae\\\"))\\n\\n self.msg, self.msg_bytes = hex_to_inbound_message(self.hex_data)\\n set_log_levels(\\n logger=\\\"info\\\",\\n logger_pyinsteon=\\\"info\\\",\\n logger_messages=\\\"info\\\",\\n logger_topics=False,\\n )\",\n \"def test(self):\\n self.info(\\\"LOGGING: Testing log messages\\\")\\n self.debug(\\\"This is a debugging message\\\")\\n self.info(\\\"This is an informational message\\\")\\n self.warning(\\\"This is a warning message\\\")\\n self.error(\\\"This is an error message\\\")\\n self.critical(\\\"This is a critical message\\\")\\n self.info(\\\"LOGGING: Testing log messages COMPLETE\\\")\\n return\",\n \"def receive_message(self, message):\",\n \"def handle_inbound_message():\\n data = json.loads(request.data)\\n\\n if data[0][\\\"type\\\"] == \\\"message-received\\\":\\n if \\\"call me\\\" in data[0][\\\"message\\\"][\\\"text\\\"]:\\n handle_inbound_sms_call_me(data[0][\\\"message\\\"][\\\"to\\\"][0], data[0][\\\"message\\\"][\\\"from\\\"])\\n elif \\\"media\\\" in data[0][\\\"message\\\"]:\\n handle_inbound_media_mms(data[0][\\\"message\\\"][\\\"to\\\"][0], data[0][\\\"message\\\"][\\\"from\\\"], data[0][\\\"message\\\"][\\\"media\\\"])\\n else:\\n handle_inbound_sms(data[0][\\\"message\\\"][\\\"to\\\"][0], data[0][\\\"message\\\"][\\\"from\\\"])\\n else:\\n print(data)\\n return \\\"\\\"\",\n \"def callback(ch, method, properties, body):\\n print(f\\\" [x] Received {str(body)} kW.\\\")\\n\\n try:\\n timestamp = properties.timestamp\\n current_time = datetime.utcfromtimestamp(timestamp).replace(\\n tzinfo=timezone.utc\\n )\\n except AttributeError:\\n # If we don't get a timestamp from the broker, add a timestamp here.\\n current_time = datetime.now().replace(tzinfo=timezone.utc)\\n\\n pv_photovoltaic = generate_pv_output(current_time)\\n\\n report_item = PVMeterReportItem(\\n timestamp=current_time.isoformat(),\\n pv_meter=int(body),\\n pv_photovoltaic=pv_photovoltaic,\\n )\\n generate_report(report_item)\\n\\n ch.basic_ack(delivery_tag=method.delivery_tag)\",\n \"def incoming(self, msg):\\n hdr = msg.header\\n\\n # Signals:\\n if hdr.message_type is MessageType.signal:\\n key = (hdr.fields.get(HeaderFields.path, None),\\n hdr.fields.get(HeaderFields.interface, None),\\n hdr.fields.get(HeaderFields.member, None)\\n )\\n cb = self.signal_callbacks.get(key, None)\\n if cb is not None:\\n cb(msg.body)\\n return\\n\\n # Method returns & errors\\n reply_serial = hdr.fields.get(HeaderFields.reply_serial, -1)\\n reply_handle = self.awaiting_reply.pop(reply_serial, None)\\n if reply_handle is not None:\\n if hdr.message_type is MessageType.method_return:\\n reply_handle.set_result(msg.body)\\n return\\n elif hdr.message_type is MessageType.error:\\n reply_handle.set_exception(DBusErrorResponse(msg))\\n return\\n\\n if self.on_unhandled:\\n self.on_unhandled(msg)\",\n \"def test_send(self, logger: Logger, mocker: MockerFixture) -> None:\\n identifier = \\\"123\\\"\\n task = OctaveTask()\\n task.session_id = identifier\\n handler = OutputHandler(task)\\n logger.addHandler(handler)\\n\\n emit = mocker.patch(\\\"matl_online.tasks.socket.emit\\\")\\n\\n logger.info(\\\"test1\\\")\\n logger.info(\\\"[STDERR]error\\\")\\n handler.send()\\n\\n assert emit.called == 1\\n assert len(emit.call_args) == 2\\n\\n event, payload = emit.call_args[0]\\n\\n expected_data = {\\n \\\"session\\\": identifier,\\n \\\"data\\\": [\\n {\\\"type\\\": \\\"stdout\\\", \\\"value\\\": \\\"test1\\\"},\\n {\\\"type\\\": \\\"stderr\\\", \\\"value\\\": \\\"error\\\"},\\n ],\\n }\\n\\n assert payload == expected_data\\n assert event == \\\"status\\\"\\n assert emit.call_args[1].get(\\\"room\\\") == identifier\",\n \"def _process_msg(cls, msg):\\n raise NotImplementedError\",\n \"def test_get_request_output(self):\\n pass\",\n \"def on_message(self, unused_channel, basic_deliver, properties, body):\\n\\n start = time.time()\\n self.invocations += 1\\n\\n logger.info(\\n u\\\"[{}] received message #{} from exchange {}: {}\\\".format(self.bot_id,\\n basic_deliver.delivery_tag, self.exchange,\\n body.decode('utf-8')))\\n\\n self.statsd.incr(self.statsd_prefix + \\\"message.receive\\\")\\n\\n # Ack the message before processing to tell rabbit we got it.\\n # TODO before sending ack we should persist the message in a local queue to avoid the possibility of losing it\\n self.acknowledge_message(basic_deliver.delivery_tag)\\n\\n try:\\n\\n try:\\n json_body = json.loads(body)\\n\\n except ValueError as ve:\\n logger.exception(\\n \\\"[{}] Invalid JSON received from exchange: {} error: {} msg body: []\\\".format(self.bot_id,\\n self.exchange,\\n ve.message, body))\\n raise\\n\\n else:\\n response_messages = self.callback_func(json_body)\\n\\n if response_messages is None:\\n response_messages = []\\n\\n logger.info(\\\"[{}] Sending {} response messages\\\".format(self.bot_id, len(response_messages)))\\n\\n for message in response_messages:\\n self._channel.basic_publish(exchange=message.get('exchange', self.exchange),\\n routing_key=message.get('queue', self.queue_name),\\n body=message.get('body'))\\n logger.info(\\\"[{}] published message {}\\\".format(self.bot_id, message))\\n self.statsd.incr(self.statsd_prefix + \\\"message.publish\\\")\\n\\n except Exception as e:\\n msg = \\\"[{}] Unexpected error - {}, message {}, from exchange {}. sending to error queue {}\\\"\\n self.statsd.incr(self.statsd_prefix + \\\"message.error\\\")\\n logger.exception(msg.format(self.bot_id, e, body, self.exchange, self.error_queue_name))\\n self._channel.basic_publish(exchange='',\\n routing_key=self.error_queue_name,\\n body=body)\\n\\n\\n exec_time_millis = int((time.time() - start) * 1000)\\n self.total_execution_time += exec_time_millis\\n\\n logger.debug(\\\"Consumer {0} message handling time: {1}ms\\\".format(self.consumer_id, exec_time_millis))\\n\\n # if we have processed 100 messages, log out the average execution time at INFO then reset the total\\n if self.invocations % 100 == 0:\\n average_execution_time = self.total_execution_time / 100\\n logger.info(\\\"Consumer {0} Avg message handling time (last 100): {1}ms\\\".format(self.consumer_id, average_execution_time))\\n self.total_execution_time = 0\\n\\n self.statsd.timing(self.statsd_prefix + 'message.process.time', int((time.time() - start) * 1000))\",\n \"def test_base_logging(self):\\n\\n n = nodes.BaseNode(log_output=True)\\n n.channel = FakeChannel(self.loop)\\n\\n m = generate_msg(message_content='test')\\n\\n ret = self.loop.run_until_complete(n.handle(m))\\n\\n # Check return\\n self.assertTrue(isinstance(ret, message.Message))\\n self.assertEqual(ret.payload, 'test', \\\"Base node not working !\\\")\\n self.assertEqual(n.processed, 1, \\\"Processed msg count broken\\\")\\n\\n n.channel.logger.log.assert_any_call(10, 'Payload: %r', 'test')\\n n.channel.logger.log.assert_called_with(10, 'Meta: %r', {'question': 'unknown'})\",\n \"def handle(self):\\n self.app.logger.info('==== handle github event: %s', self.event)\\n # self.app.logger.info('data send: %s', json.dumps(self.data, indent=2))\\n if self.event == 'ping':\\n return {'msg': 'Hi!'}\\n else:\\n task_match = []\\n repo_config = self.get_repo_config()\\n if repo_config:\\n for task_config in repo_config['tasks']:\\n event_hit = False\\n if self.event == 'push':\\n event_hit = self._is_task_push(task_config)\\n elif self.event == 'pull_request':\\n event_hit = self._is_task_pull_request(task_config)\\n if event_hit:\\n task_match.append(task_config)\\n # work start execute here...\\n for task in task_match:\\n self.app.logger.info(\\\"event hit, start tasks under %s/%s...\\\", self.repo_meta['owner'], self.repo_meta['name'])\\n self._jenkins_build(task)\\n pass\\n return \\\"OK\\\"\",\n \"def test_filter_messages(self):\\n pass\",\n \"def setUp(self):\\n h = self.MyTestHandler()\\n h.request = Request.blank('/rpc/')\\n h.response = Response()\\n self.handler = h\",\n \"def _handle_custom_msg(self, content, buffers):\\n self._msg_callbacks(self, content, buffers)\",\n \"def test_basic_asgi_call(self):\\n app = otel_asgi.OpenTelemetryMiddleware(simple_asgi)\\n self.seed_app(app)\\n self.send_default_request()\\n outputs = self.get_all_output()\\n self.validate_outputs(outputs)\",\n \"def test_basic_asgi_call(self):\\n app = otel_asgi.OpenTelemetryMiddleware(simple_asgi)\\n self.seed_app(app)\\n self.send_default_request()\\n outputs = self.get_all_output()\\n self.validate_outputs(outputs)\",\n \"def handle_msg(self, state_id, msg):\\n pass\",\n \"def handle(self):\\n try:\\n # Wait for data\\n data = json.loads(self.request.recv(1024).decode('UTF-8').strip())\\n\\n # Process data\\n self.process_data(data)\\n\\n except Exception as e:\\n print(\\\"Exception wile receiving message: \\\", e)\\n self.request.sendall(\\n bytes(json.dumps({'return': 'error'}), 'UTF-8'))\",\n \"async def _response_handler(self):\",\n \"def _on_message(self, message):\\n print(\\\"RECEIVED on \\\" + self.session_name + \\\":\\\")\\n message_json = json.loads(message)\\n print(json.dumps(message_json, sort_keys=True, indent=2, separators=(',', ':')))\\n\\n for singleMsg in message_json:\\n self._process_message(singleMsg)\"\n]"},"negative_scores":{"kind":"list like","value":["0.6550436","0.6415692","0.6320498","0.60273343","0.60133165","0.60033965","0.6000999","0.59604585","0.595996","0.5894243","0.58883953","0.58782285","0.58495873","0.5848556","0.5842221","0.5829234","0.58113396","0.5810897","0.57167685","0.57069564","0.5706161","0.5698894","0.56914896","0.56771106","0.5666061","0.56043994","0.56010646","0.56002337","0.558659","0.55622864","0.55611587","0.5559642","0.5556685","0.55503064","0.55351305","0.55343854","0.5522892","0.55211914","0.5520903","0.5505625","0.5474699","0.54700255","0.54620135","0.5458619","0.5437926","0.5433857","0.5432834","0.54267186","0.542601","0.5425983","0.54205334","0.5411932","0.5403868","0.53916717","0.53839374","0.53775245","0.53764075","0.53755134","0.53749615","0.5370837","0.53660035","0.5365036","0.5362513","0.535948","0.5347617","0.5343025","0.5340501","0.5339317","0.53389806","0.533551","0.53353685","0.533126","0.5327873","0.5321106","0.53175485","0.53175485","0.53169966","0.5310814","0.5306644","0.52980655","0.5297261","0.5294621","0.5291826","0.5291036","0.52905077","0.52761286","0.5270015","0.52665377","0.5264968","0.52505183","0.52504665","0.52502227","0.5248917","0.52374893","0.52315503","0.52315503","0.5227989","0.52258396","0.52220947","0.52177423"],"string":"[\n \"0.6550436\",\n \"0.6415692\",\n \"0.6320498\",\n \"0.60273343\",\n \"0.60133165\",\n \"0.60033965\",\n \"0.6000999\",\n \"0.59604585\",\n \"0.595996\",\n \"0.5894243\",\n \"0.58883953\",\n \"0.58782285\",\n \"0.58495873\",\n \"0.5848556\",\n \"0.5842221\",\n \"0.5829234\",\n \"0.58113396\",\n \"0.5810897\",\n \"0.57167685\",\n \"0.57069564\",\n \"0.5706161\",\n \"0.5698894\",\n \"0.56914896\",\n \"0.56771106\",\n \"0.5666061\",\n \"0.56043994\",\n \"0.56010646\",\n \"0.56002337\",\n \"0.558659\",\n \"0.55622864\",\n \"0.55611587\",\n \"0.5559642\",\n \"0.5556685\",\n \"0.55503064\",\n \"0.55351305\",\n \"0.55343854\",\n \"0.5522892\",\n \"0.55211914\",\n \"0.5520903\",\n \"0.5505625\",\n \"0.5474699\",\n \"0.54700255\",\n \"0.54620135\",\n \"0.5458619\",\n \"0.5437926\",\n \"0.5433857\",\n \"0.5432834\",\n \"0.54267186\",\n \"0.542601\",\n \"0.5425983\",\n \"0.54205334\",\n \"0.5411932\",\n \"0.5403868\",\n \"0.53916717\",\n \"0.53839374\",\n \"0.53775245\",\n \"0.53764075\",\n \"0.53755134\",\n \"0.53749615\",\n \"0.5370837\",\n \"0.53660035\",\n \"0.5365036\",\n \"0.5362513\",\n \"0.535948\",\n \"0.5347617\",\n \"0.5343025\",\n \"0.5340501\",\n \"0.5339317\",\n \"0.53389806\",\n \"0.533551\",\n \"0.53353685\",\n \"0.533126\",\n \"0.5327873\",\n \"0.5321106\",\n \"0.53175485\",\n \"0.53175485\",\n \"0.53169966\",\n \"0.5310814\",\n \"0.5306644\",\n \"0.52980655\",\n \"0.5297261\",\n \"0.5294621\",\n \"0.5291826\",\n \"0.5291036\",\n \"0.52905077\",\n \"0.52761286\",\n \"0.5270015\",\n \"0.52665377\",\n \"0.5264968\",\n \"0.52505183\",\n \"0.52504665\",\n \"0.52502227\",\n \"0.5248917\",\n \"0.52374893\",\n \"0.52315503\",\n \"0.52315503\",\n \"0.5227989\",\n \"0.52258396\",\n \"0.52220947\",\n \"0.52177423\"\n]"},"document_score":{"kind":"string","value":"0.5589948"},"document_rank":{"kind":"string","value":"28"}}},{"rowIdx":94850,"cells":{"query":{"kind":"string","value":"Encodes the input sequence and returns the hidden state from the last step of the encoder RNN."},"document":{"kind":"string","value":"def encode(self, x):\n _, hid = self.encoder(x) #All RNN classes output a tuple of 2 objects: the output of the RNN first and the hidden state from the last item in\n return hid #the input sequence second. We're only interested in the hidden state"},"metadata":{"kind":"string","value":"{\n \"objective\": {\n \"self\": [],\n \"paired\": [],\n \"triplet\": [\n [\n \"query\",\n \"document\",\n \"negatives\"\n ]\n ]\n }\n}"},"negatives":{"kind":"list like","value":["def _encode(self):\n with tf.variable_scope('passage_encoding'):\n self.sep_p_encodes, _ = rnn('bi-lstm', self.p_emb, self.p_length, self.hidden_size)\n with tf.variable_scope('question_encoding'):\n self.sep_q_encodes, _ = rnn('bi-lstm', self.q_emb, self.q_length, self.hidden_size)\n if self.use_dropout:\n self.sep_p_encodes = tf.nn.dropout(self.sep_p_encodes, self.dropout_keep_prob)\n self.sep_q_encodes = tf.nn.dropout(self.sep_q_encodes, self.dropout_keep_prob)","def encode(self):\n with tf.name_scope(\"encode\"):\n self.encoder_inputs = tf.layers.dense(\n inputs=self.encoder_inputs,\n units=self.options['hidden_size'], activation=None, use_bias=True,\n kernel_initializer=tf.keras.initializers.he_normal(seed=None),\n bias_initializer=tf.zeros_initializer(),\n kernel_regularizer=None, bias_regularizer=None, activity_regularizer=None,\n kernel_constraint=None, bias_constraint=None, trainable=True,\n name=None, reuse=None)\n self.encoder_inputs = tf.layers.batch_normalization(self.encoder_inputs,\n axis=-1,\n momentum=0.99,\n epsilon=0.001,\n center=True,\n scale=True,\n beta_initializer=tf.zeros_initializer(),\n gamma_initializer=tf.ones_initializer(),\n moving_mean_initializer=tf.zeros_initializer(),\n moving_variance_initializer=tf.ones_initializer(),\n training=self.is_training,\n trainable=True,\n renorm=False,\n renorm_momentum=0.99)\n # Prepare inputs to the layer stack by adding positional encodings and\n # applying dropout.\n # embedded_inputs = self.embedding_softmax_layer(inputs)\n #\n inputs_padding = transformer_model_utils.get_padding(tf.cast(\n tf.reduce_max(100*self.encoder_inputs, [-1]),\n dtype=tf.int32))\n\n with tf.name_scope(\"add_pos_encoding\"):\n length = tf.shape(self.encoder_inputs)[1]\n pos_encoding = transformer_model_utils.get_position_encoding(\n length, self.options[\"hidden_size\"])\n encoder_inputs = self.encoder_inputs + pos_encoding\n\n if self.is_training:\n encoder_inputs = tf.nn.dropout(\n encoder_inputs, 1 - self.options[\"layer_postprocess_dropout\"])\n\n return self.encoder_stack(encoder_inputs, self.attention_bias, inputs_padding)","def _encode_back(self):\n with tf.variable_scope('passage_encoding'):\n self.sep_p_encodes, _ = rnn('bi-lstm', self.p_emb, self.p_length, self.hidden_size)\n with tf.variable_scope('question_encoding'):\n self.sep_q_encodes, _ = rnn('bi-lstm', self.q_emb, self.q_length, self.hidden_size)\n if self.use_dropout:\n self.sep_p_encodes = tf.nn.dropout(self.sep_p_encodes, self.dropout_keep_prob)\n self.sep_q_encodes = tf.nn.dropout(self.sep_q_encodes, self.dropout_keep_prob)","def encode(self,x,x_len):\n\n ## Check to see if batch_size parameter is fixed or base on input batch\n cur_batch_size = x.size()[1]\n encode_init_state = self.encoder.initialize_hidden_state(cur_batch_size)\n encoder_state, encoder_outputs = self.encoder.forward(x, encode_init_state, x_len)\n\n return encoder_outputs, encoder_state","def _encode(self):\n with tf.variable_scope('encoding'):\n self.sep_p_encodes, _ = bilstm_layer(self.p_emb, self.p_length, self.hidden_size)\n tf.get_variable_scope().reuse_variables()\n self.sep_q_encodes, _ = bilstm_layer(self.q_emb, self.q_length, self.hidden_size)\n if self.use_dropout:\n self.sep_p_encodes = tf.nn.dropout(self.sep_p_encodes, 1-self.dropout)\n self.sep_q_encodes = tf.nn.dropout(self.sep_q_encodes, 1-self.dropout)","def _add_input_encoder(self, inputs, seq_len):\n with tf.variable_scope(\"encoder\"):\n cell_fw = tf.contrib.rnn.LSTMCell(config.hidden_dim, initializer=self.rand_unif_init, state_is_tuple=True)\n cell_bw = tf.contrib.rnn.LSTMCell(config.hidden_dim, initializer=self.rand_unif_init, state_is_tuple=True)\n ((fw_states, bw_states), (final_fw, final_bw)) = tf.nn.bidirectional_dynamic_rnn(cell_fw, cell_bw, inputs, dtype=tf.float32, sequence_length=seq_len, swap_memory=True)\n\n return fw_states, bw_states, final_fw, final_bw","def encode_input(self, x_tensor, inp_lens_tensor):\r\n input_emb = self.input_emb.forward(x_tensor)\r\n enc_output_each_word, enc_context_mask, enc_final_states = self.encoder(input_emb, inp_lens_tensor)\r\n enc_final_states_reshaped = (enc_final_states[0].unsqueeze(0), enc_final_states[1].unsqueeze(0))\r\n # print('lest go', enc_final_states_reshaped[1].shape)\r\n return enc_output_each_word, enc_context_mask, enc_final_states_reshaped","def encode(self, inputs, masks):\n with tf.variable_scope(\"encoder\") as scope_encoder:\n #compute sequence length\n sequence_lengths = tf.reduce_sum(masks, axis = 1) \n #create a forward cell\n fw_cell = tf.contrib.rnn.LSTMCell(self.size)\n\n #pass the cells to bilstm and create the bilstm\n bw_cell = tf.contrib.rnn.LSTMCell(self.size)\n output, final_state = tf.nn.bidirectional_dynamic_rnn(fw_cell, \\\n bw_cell, inputs, \\\n sequence_length = sequence_lengths, \\\n dtype = tf.float32, \\\n parallel_iterations = 256)\n output_lstm = tf.concat([output[0], output[1]], axis = -1)\n final_state_lstm = tf.concat([final_state[0], final_state[1]], axis = -1)\n return output_lstm, final_state_lstm","def _add_encoder(self, encoder_inputs, seq_len):\n with tf.variable_scope('encoder'):\n cell_fw = tf.contrib.rnn.LSTMCell(self._hps.hidden_dim, initializer=self.rand_unif_init, state_is_tuple=True)\n cell_bw = tf.contrib.rnn.LSTMCell(self._hps.hidden_dim, initializer=self.rand_unif_init, state_is_tuple=True)\n (encoder_outputs, (fw_st, bw_st)) = tf.nn.bidirectional_dynamic_rnn(cell_fw, cell_bw, encoder_inputs, dtype=tf.float32, sequence_length=seq_len, swap_memory=True)\n encoder_outputs = tf.concat(axis=2, values=encoder_outputs) # concatenate the forwards and backwards states\n return encoder_outputs, fw_st, bw_st","def encode(self, state):\n raise NotImplementedError","def build_encoder(self):\n with tf.variable_scope(\"encoder\") as scope:\n length1 = tf.to_int32(tf.reduce_sum(self.encode_mask1, 1), name=\"length1\")\n\n if self.config.bidirectional_encoder:\n if self.config.encoder_dim % 2:\n raise ValueError(\n \"encoder_dim must be even when using a bidirectional encoder.\")\n num_units = self.config.encoder_dim // 2\n cell_fw = self._initialize_gru_cell(num_units) # Forward encoder\n cell_bw = self._initialize_gru_cell(num_units) # Backward encoder\n _, states = tf.nn.bidirectional_dynamic_rnn(\n cell_fw=cell_fw,\n cell_bw=cell_bw,\n inputs=self.encode_emb1,\n sequence_length=length1,\n dtype=tf.float32,\n scope=scope)\n thought_vectors1 = tf.concat(states, 1, name=\"thought_vectors1\")\n else:\n cell = self._initialize_gru_cell(self.config.encoder_dim)\n _, state = tf.nn.dynamic_rnn(\n cell=cell,\n inputs=self.encode_emb1,\n sequence_length=length1,\n dtype=tf.float32,\n scope=scope)\n # Use an identity operation to name the Tensor in the Graph.\n thought_vectors1 = tf.identity(state, name=\"thought_vectors1\")\n \n scope.reuse_variables()\n\n length2 = tf.to_int32(tf.reduce_sum(self.encode_mask2, 1), name=\"length2\")\n\n if self.config.bidirectional_encoder:\n if self.config.encoder_dim % 2:\n raise ValueError(\n \"encoder_dim must be even when using a bidirectional encoder.\")\n num_units = self.config.encoder_dim // 2\n cell_fw = self._initialize_gru_cell(num_units) # Forward encoder\n cell_bw = self._initialize_gru_cell(num_units) # Backward encoder\n _, states = tf.nn.bidirectional_dynamic_rnn(\n cell_fw=cell_fw,\n cell_bw=cell_bw,\n inputs=self.encode_emb2,\n sequence_length=length2,\n dtype=tf.float32,\n scope=scope)\n thought_vectors2 = tf.concat(states, 1, name=\"thought_vectors2\")\n else:\n cell = self._initialize_gru_cell(self.config.encoder_dim)\n _, state = tf.nn.dynamic_rnn(\n cell=cell,\n inputs=self.encode_emb2,\n sequence_length=length2,\n dtype=tf.float32,\n scope=scope)\n # Use an identity operation to name the Tensor in the Graph.\n thought_vectors2 = tf.identity(state, name=\"thought_vectors2\")\n\n self.thought_vectors1 = thought_vectors1\n self.thought_vectors2 = thought_vectors2","def encoder_decoder_archi_gan(inputs, is_train):\n\n encoder_layers = []\n\n encoded = inputs\n\n encoder_layers.append(encoded)\n\n for i in range(config.encoder_layers):\n encoded = encoder_conv_block_gan(encoded, i, is_train)\n encoder_layers.append(encoded)\n \n encoder_layers.reverse()\n\n\n\n decoded = encoder_layers[0]\n\n for i in range(config.encoder_layers):\n decoded = decoder_conv_block_gan(decoded, encoder_layers[i+1], i, is_train)\n\n return decoded","def encoder(self, tensor):\n with tf.variable_scope(\"encoder\"):\n tensor = tf.nn.embedding_lookup(self.embedding, tensor)\n cell = tf.nn.rnn_cell.BasicLSTMCell(self.num_units)\n outputs, state = tf.nn.dynamic_rnn(cell, tensor, sequence_length=self.seq_len, dtype=tf.float32)\n output = outputs[:,-1,:]\n output = tf.nn.l2_normalize(output, -1)\n\n return output","def forward(self, input, hidden, give_gates=False, debug=False):\n\n emb = self.encoder(input)\n if emb.dim()<3:\n emb = emb.unsqueeze(0)\n\n if give_gates:\n output, hidden, extras = self.rnn(emb, hidden, give_gates)\n else:\n output, hidden = self.rnn(emb, hidden)\n\n # decoded = self.softmax(self.decoder(output))\n decoded = self.decoder(output)\n\n if give_gates:\n if debug:\n return decoded, hidden, extras, emb\n else:\n return decoded, hidden, extras\n else:\n if debug:\n return decoded, hidden, emb\n else:\n return decoded, hidden","def decode(self):\n decoder_input = Input(shape=self.input_decoder_shape, batch_shape=self.input_batch_decoder_shape)\n ppg_input = Input(shape=self.input_ppg_shape, batch_shape=self.input_batch_ppg_shape)\n\n if self.hparams.Masking is True:\n mask_decoder_input = Masking(mask_value=0)(decoder_input)\n mask_ppg_input = Masking(mask_value=0)(ppg_input)\n prenet_output = self.PreNet(mask_decoder_input)\n encoder_input = self.Encoder(mask_ppg_input)\n decoder_mask = None\n else:\n decoder_mask = Masking(mask_value=0).compute_mask(ppg_input)\n prenet_output = self.PreNet(decoder_input)\n encoder_input = self.Encoder(ppg_input, decoder_mask)\n\n rnn_output = Concatenate(axis=-1)([prenet_output, encoder_input])\n # mask = Input(shape=(self.hparams.PreNet_hidden_size + self.hparams.Tacotron_encoder_hidden_size))\n # diff_mask = Input(shape=(self.hparams.PreNet_hidden_size + self.hparams.Tacotron_encoder_hidden_size))\n for i in range(self.hparams.Tacotron_decoder_layers):\n rnn_output = self.Decoder_LSTM[i](rnn_output, mask=decoder_mask)\n\n # feed by self.states is unhelpful in training, since we don't stop rnn during epochs\n # but it is important in generating since each fit states will be set to zeros.!!!!!!\n rnn_output = Concatenate(axis=-1)([rnn_output, encoder_input])\n decoder_output = self.Linear_projection(rnn_output)\n if self.hparams.Tacotron_postnet is True:\n residual_output = decoder_output\n for i in range(self.hparams.PostNet_layers):\n residual_output = self.PostNet_Conv1D[i](residual_output)\n residual_output = self.PostNet_BatchNorm[i](residual_output)\n residual_output = self.PostNet_dropout_list[i](residual_output)\n decoder_output = Add()([decoder_output, residual_output])\n return Model(inputs=[decoder_input, ppg_input], outputs=decoder_output)","def encode_sequence(sequence, rnns, embedder, dropout_amount=0.):\n\n batch_size = 1\n layer_states = []\n for rnn in rnns:\n hidden_size = rnn.weight_hh.size()[1]\n \n # h_0 of shape (batch, hidden_size)\n # c_0 of shape (batch, hidden_size)\n if rnn.weight_hh.is_cuda:\n h_0 = torch.cuda.FloatTensor(batch_size,hidden_size).fill_(0)\n c_0 = torch.cuda.FloatTensor(batch_size,hidden_size).fill_(0)\n else:\n h_0 = torch.zeros(batch_size,hidden_size)\n c_0 = torch.zeros(batch_size,hidden_size)\n\n layer_states.append((h_0, c_0))\n\n outputs = []\n for token in sequence:\n rnn_input = embedder(token)\n (cell_states, hidden_states), output, layer_states = forward_one_multilayer(rnns,rnn_input,layer_states,dropout_amount)\n\n outputs.append(output)\n\n return (cell_states, hidden_states), outputs","def _encode(self, data: mx.sym.Symbol, data_length: mx.sym.Symbol, seq_len: int) -> mx.sym.Symbol:\n # (seq_len, batch_size, num_embed)\n data_reverse = mx.sym.SequenceReverse(data=data, sequence_length=data_length,\n use_sequence_length=True)\n # (seq_length, batch, cell_num_hidden)\n hidden_forward, _, _ = self.forward_rnn.encode(data, data_length, seq_len)\n # (seq_length, batch, cell_num_hidden)\n hidden_reverse, _, _ = self.reverse_rnn.encode(data_reverse, data_length, seq_len)\n # (seq_length, batch, cell_num_hidden)\n hidden_reverse = mx.sym.SequenceReverse(data=hidden_reverse, sequence_length=data_length,\n use_sequence_length=True)\n # (seq_length, batch, 2 * cell_num_hidden)\n hidden_concat = mx.sym.concat(hidden_forward, hidden_reverse, dim=2, name=\"%s_rnn\" % self.prefix)\n\n return hidden_concat","def encode_input_for_decoder(x_tensor, inp_lens_tensor, model_input_emb: EmbeddingLayer, model_enc: RNNEncoder):\n input_emb = model_input_emb.forward(x_tensor)\n (enc_output_each_word, enc_context_mask, enc_final_states) = model_enc.forward(input_emb, inp_lens_tensor)\n enc_final_states_reshaped = (enc_final_states[0].unsqueeze(0), enc_final_states[1].unsqueeze(0))\n return (enc_output_each_word, enc_context_mask, enc_final_states_reshaped)","def run_encoder(self, sess, batch):\n feed_dict = self._make_feed_dict(batch, just_enc=True) \n (enc_states, dec_in_state, global_step) = sess.run(\n [self._enc_states, self._dec_in_state, self.global_step], feed_dict) # run the encoder\n\n # dec_in_state is LSTMStateTuple shape ([batch_size,hidden_dim],[batch_size,hidden_dim])\n # Given that the batch is a single example repeated, dec_in_state is identical across the batch so we just take the top row.\n dec_in_state = tf.contrib.rnn.LSTMStateTuple(dec_in_state.c[0], dec_in_state.h[0])\n return enc_states, dec_in_state","def encoder_one_way(self, cell, x, seq_len, init_state=None):\n # Output is the outputs at all time steps, state is the last state\n with tf.variable_scope(\"dynamic_rnn\"):\n outputs, state = tf.nn.dynamic_rnn(\\\n cell, x, sequence_length=seq_len, initial_state=init_state,\n dtype=self.floatX)\n # state is a StateTuple class with properties StateTuple.c and StateTuple.h\n return outputs, state","def encode(self, input):\n h = np.zeros(self.hidden_size) \n \n preactivation = np.dot(self.W.T, input) + self.b\n sigmoid(preactivation, h)\n \n return h","def encode(self, sequence):\n fwd_states, bwd_states = self.encode_fwd_bwd(sequence)\n bwd_states = bwd_states[::-1]\n return [dy.concatenate([fwd_states[i], bwd_states[i]]) for i in range(len(fwd_states))]","def forward(self, inp, state):\n emb = self.drop(self.encoder(inp))\n y, state_next = self.rnn(emb, state)\n y = self.drop(y)\n y = self.decoder(y)\n return y, state_next","def forward_step(self, prev_embed, encoder_hidden, src_mask, proj_key, hidden):\n\n # compute context vector using attention mechanism\n #we only want the hidden, not the cell state of the lstm CZW, hence the hidden[0]\n query = hidden[0][-1].unsqueeze(1) # [#layers, B, D] -> [B, 1, D]\n context, attn_probs = self.attention(\n query=query, proj_key=proj_key,\n value=encoder_hidden, mask=src_mask)\n\n # update rnn hidden state\n rnn_input = torch.cat([prev_embed, context], dim=2)\n output, hidden = self.rnn(rnn_input, hidden)\n \n pre_output = torch.cat([prev_embed, output, context], dim=2)\n pre_output = self.dropout_layer(pre_output)\n pre_output = self.pre_output_layer(pre_output)\n\n return output, hidden, pre_output","def encoder(enc_input, attn_bias, n_layer, n_head,\n d_key, d_value, d_model, d_inner_hid, pos_enc,\n preporstprocess_dropout, attention_dropout,\n relu_dropout, preprocess_cmd='n',\n postprocess_cmd='da'):\n for i in range(n_layer):\n enc_output = encoder_layer(enc_input, attn_bias, n_head,\n d_key, d_value, d_model,d_inner_hid, pos_enc,\n prepostprocess_dropout, attention_dropout,relu_dropout,\n preprocess_cmd, postprocess_cmd\n )\n enc_input = enc_output\n enc_output = pre_process_layer(enc_output,\n preprocess_cmd, preporstprocess_dropout)\n return enc_output","def build_sentence_encoder(self, raw_encoder_input, input_seq_len):\n with tf.variable_scope('text_encoder'):\n self.embedding = \\\n tf.get_variable(\n \"embedding\", initializer=tf.random_uniform(\n [self.config.word_voc_size,\n self.config.word_embedding_space_size],\n -self.config.TRAIN.SENCODER.none_rnn_para_initial_max,\n self.config.TRAIN.SENCODER.none_rnn_para_initial_max))\n inputs = tf.nn.embedding_lookup(self.embedding, raw_encoder_input)\n\n # now it is [MAX_SEQ_LENGTH, batch_size, embedding_length]\n input_batch_order = tf.transpose(inputs, [1, 0, 2])\n\n # now it is [MAX_SEQ_LENGTH * batch_size, embedding_length]\n input_batch_order = tf.reshape(\n input_batch_order, [-1, self.config.word_embedding_space_size])\n\n # now it is LIST OF [BATCH_SIZE, embedding_length]\n encoder_input = tf.split(0, self.config.seq_max_len,\n input_batch_order)\n\n # the encoder part\n encode_gru_cell = tf.nn.rnn_cell.GRUCell(\n self.config.encoder_dimension)\n # big news: The state is final state, output is a list of tensor.\n # We don't to do that\n _, sentence_rep = tf.nn.rnn(encode_gru_cell, encoder_input,\n dtype=tf.float32,\n sequence_length=input_seq_len)\n self.sentence_rep = sentence_rep\n self.sentence_rep = tf.nn.l2_normalize(self.sentence_rep, 1)\n return","def encode(self, src_seq, src_lens):\n src_embed = self.word_embedding(src_seq)\n src_encodings, final_states = self.encoder_lstm(src_embed, src_lens)\n\n return src_encodings, final_states, src_embed","def _add_seq2seq(self):\n mode = self._mode\n vsize = self._vocab.size() # size of the vocabulary\n\n with tf.variable_scope('seq2seq'):\n # Some initializers\n self.rand_unif_init = tf.random_uniform_initializer(-config.rand_unif_init_mag, config.rand_unif_init_mag, seed=123)\n self.trunc_norm_init = tf.truncated_normal_initializer(stddev=config.trunc_norm_init_std)\n\n # Add embedding matrix (shared by the encoder and decoder inputs)\n with tf.variable_scope('embedding'):\n embedding = tf.get_variable('embedding', [vsize, config.emb_dim], dtype=tf.float32, initializer=self.trunc_norm_init)\n if mode==\"train\": self._add_emb_vis(embedding) # add to tensorboard\n emb_enc_inputs = tf.nn.embedding_lookup(embedding, self._enc_batch) # tensor with shape (batch_size, max_enc_steps, emb_size)\n emb_dec_inputs = tf.nn.embedding_lookup(embedding, self._dec_batch) # tensor with shape (batch_size, max_dec_steps, emb_size)\n #emb_dec_inputs = [tf.nn.embedding_lookup(embedding, x) for x in tf.unstack(self._dec_batch, axis=1)] # list length max_dec_steps containing shape (batch_size, emb_size)\n\n # Add the encoder.\n enc_fw_states, enc_bw_states, enc_fw, enc_bw = self._add_input_encoder(emb_enc_inputs, self._enc_lens)\n\n print(\"Encoder FW\", enc_fw_states.shape)\n print(\"Encoder BW\", enc_bw_states.shape)\n raise Exception(\"testing mode\")\n\n #reshape encoder states from [batch_size, input_size, hidden_dim] to [batch_size, input_size * hidden_dim]\n enc_fw_states = tf.reshape(enc_fw_states, [config.batch_size, config.hidden_dim * tf.shape(enc_fw_states)[1]])\n enc_bw_states = tf.reshape(enc_bw_states, [config.batch_size, config.hidden_dim * tf.shape(enc_bw_states)[1]])\n\n\n # python run.py --mode=decode --data_path=data/chunked/train_1/train_1_*.bin --vocab_path=data/vocab_1 --exp_name=full1isto1\n\n # Add the decoder.\n dec_fw_states, dec_bw_states = self._add_input_decoder(emb_dec_inputs, self._dec_lens, enc_fw, enc_bw)\n\n #reshape decoder states from [batch_size, input_size, hidden_dim] to [batch_size, input_size * hidden_dim]\n dec_fw_states = tf.reshape(dec_fw_states, [config.batch_size, config.hidden_dim * tf.shape(dec_fw_states)[1]])\n dec_bw_states = tf.reshape(dec_bw_states, [config.batch_size, config.hidden_dim * tf.shape(dec_bw_states)[1]])\n #print(\"Decoder FW\", dec_fw_states.shape)\n #print(\"Decoder BW\", dec_bw_states.shape)\n\n\n #enc_c = tf.concat(axis=1, values=[enc_fw.c, enc_bw.c])\n #enc_h = tf.concat(axis=1, values=[enc_fw.h, enc_bw.h])\n #dec_c = tf.concat(axis=1, values=[dec_fw.c, dec_bw.c])\n #dec_h = tf.concat(axis=1, values=[dec_fw.h, dec_bw.h])\n\n final_encoding = tf.concat(axis=1, values=[enc_fw_states, enc_bw_states, dec_fw_states, dec_bw_states])\n #print(\"Final encoding\", final_encoding.shape)\n #raise Exception(\"Test\")\n dims_final_enc = tf.shape(final_encoding)\n\n \"\"\"\n #convo_input = tf.concat(axis=1, values=[enc_c, enc_h, dec_c, dec_h])\n input_layer = tf.reshape(final_encoding, [config.batch_size, dims_final_enc[1], 1])\n print(\"Convolution input shape\", input_layer.shape)\n\n conv1 = tf.layers.conv1d(\n inputs=input_layer,\n filters=8,\n kernel_size=5,\n padding=\"same\",\n activation=tf.nn.relu)\n conv1 = tf.layers.batch_normalization(conv1)\n print(\"Convolution1 output shape\", conv1.shape)\n\n pool1 = tf.layers.max_pooling1d(inputs=conv1, pool_size=2, strides=2)\n print(\"Pool1 output shape\", pool1.shape)\n\n conv2 = tf.layers.conv1d(\n inputs=pool1,\n filters=16,\n kernel_size=5,\n padding=\"same\",\n activation=tf.nn.relu)\n\n\n conv2 = tf.layers.batch_normalization(conv2)\n print(\"Convolution2 output shape\", conv2.shape)\n\n pool2 = tf.layers.max_pooling1d(inputs=conv2, pool_size=2, strides=2)\n print(\"Pool2 output shape\", pool2.shape)\n\n dims_pool2 = tf.shape(pool2)\n\n pool2_flat = tf.reshape(pool2, [config.batch_size, dims_pool2[1] * 16])\n print(\"Pool2_flat output shape\", pool2_flat.shape)\n dense = tf.layers.dense(inputs=pool2_flat, units=1024, activation=tf.nn.relu)\n \"\"\"\n #raise Exception(\"testing mode\")\n\n #dropout = tf.layers.dropout(inputs=dense, rate=0.4, training=mode==\"train\")\n #print(\"Dense output shape\", dense.shape)\n\n #raise Exception(\"Just testing\")\n # Add the output projection to obtain the vocabulary distribution\n with tf.variable_scope('output_projection'):\n w = tf.get_variable('w', [dims_final_enc[1], 2], dtype=tf.float32, initializer=self.trunc_norm_init)\n bias_output = tf.get_variable('bias_output', [2], dtype=tf.float32, initializer=self.trunc_norm_init)\n #concatenate abstract and article outputs [batch_size, hidden_dim*4]\n\n\n #get classification output [batch_size, 1] default on last axis\n self._logits = tf.matmul(final_encoding, w) + bias_output\n #self._logits = tf.layers.dense(final_encoding, 2, kernel_initializer=self.trunc_norm_init, bias_initializer=self.trunc_norm_init)\n #self._prob = tf.nn.softmax(logits, \"class_prob\")\n\n if mode in ['train', 'eval']:\n # Calculate the loss\n with tf.variable_scope('loss'):\n #self._prob = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits, labels=self._targets)\n #class_weights = tf.constant([0.1, 5.])\n self._loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=self._targets, logits=self._logits))\n #self._loss = tf.reduce_mean(tf.nn.weighted_cross_entropy_with_logits(targets=self._targets, logits=self._logits, pos_weight=class_weights))\n tf.summary.scalar('loss', self._loss)\n\n\n\n #if mode == \"decode\":","def encode(self, input_):\n return self.encoder(input_)","def encode(self, seq):","def forward(self, x):\n # Get results of encoder network\n q = self.encode_nn(x)\n\n return q","def pretrain_forward(self, inp):\n return self.encoder(inp)","def build_encoder(self):\n \n # some general variables concerning the current processed batch\n batch_size=self.image_embeddings.get_shape()[0]\n sentence_length = self.config.sentence_length # == self.seq_embeddings.get_shape()[2]\n max_text_length = tf.shape(self.seq_embeddings)[1] # maximum text length for this batch\n \n # This LSTM cell has biases and outputs tanh(new_c) * sigmoid(o), but the\n # modified LSTM in the \"Show and Tell\" paper has no biases and outputs\n # new_c * sigmoid(o).\n \n # create an lstm cell that will process a sentence (a sequence of tokens)\n lstm_cell_sentences = tf.nn.rnn_cell.BasicLSTMCell(\n num_units=self.config.sentence_embedding_size, state_is_tuple=True) # num_units describes the size of the internal memory cell (but it is also the output size)\n \n # we also need an lstm cell that will process a sequence of sentences (a text)\n lstm_cell_text = tf.nn.rnn_cell.BasicLSTMCell(\n num_units=self.config.article_embedding_size, state_is_tuple=True)\n \n if self.mode == \"train\":\n # to avoid overfitting we use dropout for all lstm cells\n lstm_cell_sentences = tf.nn.rnn_cell.DropoutWrapper(\n lstm_cell_sentences,\n input_keep_prob=self.config.dropout_keep_prob_encoder,\n output_keep_prob=self.config.dropout_keep_prob_encoder)\n lstm_cell_text = tf.nn.rnn_cell.DropoutWrapper(\n lstm_cell_text,\n input_keep_prob=self.config.dropout_keep_prob_encoder,\n output_keep_prob=self.config.dropout_keep_prob_encoder)\n\n with tf.variable_scope(\"lstm_sentence_encode\", initializer=self.initializer) as lstm_scope:\n # we use the image embedding only to feed the text lstm with image information\n # The sentences are initialized with a zero state\n \n # Set the initial LSTM state.\n initial_state_sentences = lstm_cell_sentences.zero_state(\n batch_size=batch_size, dtype=tf.float32)\n\n # At first, generate a mask for all sentences. \n # This will allow us to specify the individual length of each sentence \n # This lengths are fed into tf.nn.dynamic_rnn, which will produce zero outputs for \n # all padded tokens.\n # Note, that self.input_seqs contains a zero for each padded token (zero is not in the vocabulary)\n zeros = tf.zeros_like(self.input_seqs)\n self.sentence_mask = tf.select(tf.greater(self.input_seqs, zeros) , tf.ones_like(self.input_seqs), zeros) # type int64\n\n #self.sentence_mask = tf.cast(self.sentence_mask, tf.int32)\n \n # In the following, we run a hierarchical approach:\n # Tokens of a sentence are mapped onto an embedding vector through lstm_cell_sentences\n # The resulting sentence embeddings are passed though lstm_cell_text to gather text embeddings\n \n # Since we have to generate an embedding for each sentence in a text, we need a loop somehow.\n # But the number of sentences in a text is dynamically determined for each batch (max_text_length).\n # Therefore, we cannot use unpack and a python loop. Instead we use the while_loop control method of TF.\n \n \n # The output of lstm_cell_sentences will be stored in this matrix, but only \n # the lstm output of the last not padded word in a sentence\n lstm_outputs_sentences = tf.zeros(tf.pack([batch_size, max_text_length, self.config.sentence_embedding_size])) # tf.pack is a hotfix, since a normal array passing would not work as max_text_length is a tensor\n #lstm_outputs_sentences = tf.zeros([batch_size, max_text_length, self.config.embedding_size])\n \n # Allow the LSTM variables to be reused.\n #lstm_scope.reuse_variables()\n\n # now we compute the lstm outputs for each token sequence (sentence) in the while loop body\n def body(i,n,los):\n \"\"\"Compute lstm outputs for sentences i (sentences with index i in text) of current batch.\n\n Inputs:\n i: control variable of loop (runs from 0 to n-1)\n n: max_text_length\n los: lstm_outputs_sentences\n\n Outputs:\n i: incremented\n n: unchanged\n los: input with updated values in index i of second dimension\n \"\"\"\n # extract correct lstm input (i-th sentence from each batch)\n #es = tf.slice(self.seq_embeddings,[0,i,0,0],[batch_size, 1, sentence_length, self.config.word_embedding_size])\n es = tf.slice(self.seq_embeddings,tf.pack([0,i,0,0]),tf.pack([batch_size, 1, sentence_length, self.config.word_embedding_size]))\n es = tf.squeeze(es, axis=1) # get rid of sentence index dimension\n es = tf.reshape(es, tf.pack([batch_size, sentence_length, self.config.word_embedding_size])) # dirty hack, to ensure that shape is known (needed by further methods)\n\n # extract masks of sentences i\n sm = tf.slice(self.sentence_mask,tf.pack([0,i,0]),tf.pack([batch_size, 1, sentence_length]))\n sm = tf.squeeze(sm, axis=1)\n # compute sentence lengths\n sm = tf.reduce_sum(sm, 1)\n sm = tf.reshape(sm, tf.pack([batch_size])) # dirty hack, to ensure that shape is known\n\n # feed i-th sentences through lstm\n lstm_outputs_sentences_tmp, _ = tf.nn.dynamic_rnn(cell=lstm_cell_sentences,\n inputs=es,\n sequence_length=sm,\n initial_state=initial_state_sentences,\n dtype=tf.float32,\n scope=lstm_scope)\n # lstm_outputs_sentences_tmp has shape (batch_size, sentence_length, sentence_embedding_size\n # lstm_outputs_sentences_tmp contains an output for each token in the sentences, but we are only interested in the \n # output of the last token of a sentence\n \n # Now we extract only those outputs (output of last token, which is not a padded token) from lstm_outputs_sentences_tmp\n\n # sm contains the length of each sentence, meaning we can access the right output with the index (length - 1)\n # Note, that the actual masks where reduced to lengths in the above statements.\n sm = tf.sub(sm, 1) # sentence mask contains now the index of the last token in each sentence\n # Those sentence, that have zero tokens (padded sentences) have now an index of -1. We have to set them back to 0\n # which are simply zero outputs of the lstm\n zeros = tf.zeros_like(sm)\n sm = tf.select(tf.less(sm, zeros) , zeros, sm)\n\n # We use tf.gather_nd to extract the desired outputs from lstm_outputs_sentences_tmp.\n # Therefore, we have to produce the \"indices\" parameter of this method first.\n # The elements of the last dimension in this matrix determine the indices for gathering slices from lstm_outputs_sentences\n # Hence the innermost dimension must be a 2D vector: (batch, token) <- index of desired embedding in lstm_outputs_sentences\n # for sentence with index (batch, i) in self.seq_embeddings\n\n # We generate for each of the two indices a seperate matrix and concatenate them at the end\n sm = tf.expand_dims(sm, 1)\n sm = tf.cast(sm, dtype=tf.int32)\n\n # use tf.range to generate the equivalence of sm for batch indices\n #batch_indices = tf.range(0, batch_size)\n batch_indices = tf.constant(np.arange(int(batch_size)), dtype=tf.int32)\n batch_indices = tf.expand_dims(batch_indices, 1) \n\n # then use tf.concat to generate the actual tensor, that can be used to gather the right outputs from lstm_outputs_sentences_tmp\n gather_indices = tf.concat(1, [batch_indices, sm])\n\n # now we can consider the elements (of the last dimension) of gather_indices as indices for the correct ouput\n lstm_outputs_sentences_tmp = tf.gather_nd(lstm_outputs_sentences_tmp, gather_indices)\n lstm_outputs_sentences_tmp = tf.expand_dims(lstm_outputs_sentences_tmp, 1) \n\n # add the current output to our list of outputs\n los = tf.concat(1, [tf.slice(los, tf.pack([0,0,0]), tf.pack([batch_size, i, self.config.sentence_embedding_size])),\n lstm_outputs_sentences_tmp,\n tf.slice(los, tf.pack([0,i+1,0]), tf.pack([batch_size,n-i-1,self.config.sentence_embedding_size]))])\n \n return i+1,n,los\n\n def condition(i,n,los):\n \"\"\"Break condition for while loop\n\n Inputs:\n i: control variable of loop (runs from 0 to n-1)\n n: max_text_length\n los: lstm_outputs_sentences\n\n Outputs:\n Ture, if body should be run.\n \"\"\"\n\n return i < n\n\n result = tf.while_loop(condition, body, loop_vars=[0, max_text_length, lstm_outputs_sentences])\n lstm_outputs_sentences = result[2] \n \n with tf.variable_scope(\"lstm_text_encode\", initializer=self.initializer) as lstm_scope: \n \n # Feed the image embeddings to set the initial LSTM state.\n zero_state_text = lstm_cell_text.zero_state(\n batch_size=batch_size, dtype=tf.float32)\n _, initial_state_text = lstm_cell_text(self.image_embeddings, zero_state_text)\n \n # Allow the LSTM variables to be reused.\n lstm_scope.reuse_variables()\n \n # lstm_outputs_sentences has now the last lstm output for each sentence in the batch (output of last unpadded token)\n # Its shape is (batch_size, max_text_length, sentence_embedding_size)\n \n # Now we use the sentence embeddings to generate text embeddings\n # Run the batch of sentence embeddings through the LSTM.\n self.sentence_sequence_length = tf.reduce_sum(self.input_mask, 1)\n lstm_outputs_text, _ = tf.nn.dynamic_rnn(cell=lstm_cell_text,\n inputs=lstm_outputs_sentences,\n sequence_length=self.sentence_sequence_length,\n initial_state=initial_state_text,\n dtype=tf.float32,\n scope=lstm_scope)\n # lstm_outputs_text has now the lstm output of each sentence_embedding,\n # where the output of the last unpadded sentence_embedding is considered as the text embedding.\n # Note, that we could also call it article embedding, since it comprises the information of the \n # text and the image.\n # Its shape is (batch_size, max_text_length, article_embedding_size)\n\n # extract the text embedding from lstm_outputs_text\n \n # sequence_length contains the length of each text, meaning we can access the right output with the index (length - 1)\n last_sentence = tf.sub(self.sentence_sequence_length, 1) # sentence mask contains now the index of the last unpadded sentence in each text\n\n # We use tf.gather_nd to extract the desired outputs from lstm_outputs_text.\n # Therefore, we have to produce the \"indices\" parameter of this method first.\n # The elements of the last dimension in this matrix determine the indices for gathering slices from lstm_outputs_text\n # Hence the innermost dimension must be a 2D vector: (batch, sentence)\n\n # We generate for each of the two indices a seperate matrix and concatenate them at the end\n last_sentence = tf.expand_dims(last_sentence, 1)\n\n # use tf.range to generate the equivalence of sm for batch indices\n batch_indices = tf.range(0, batch_size)\n batch_indices = tf.expand_dims(batch_indices, 1) \n\n # then use tf.concat to generate the actual tensor, that can be used to gather the right outputs from lstm_outputs_text\n gather_indices = tf.concat(1, [batch_indices, last_sentence])\n \n # now we can consider the elements (of the last dimension) of gather_indices as indices for the correct ouput\n self.article_embeddings = tf.gather_nd(lstm_outputs_text, gather_indices)\n \n # As the image information might have gone lost in the hierarchical rnn, the reader might reconsider it.\n if self.config.reconsider_image:\n with tf.variable_scope(\"reconsider_image\", initializer=self.initializer, reuse=None) as reconsider_image_scope: \n # concat current article embedding with image_embedding and map them through an fully connected layer onto a new embedding\n article_image_concat = tf.concat(1, [self.article_embeddings, self.image_embeddings])\n \n self.article_embeddings = tf.contrib.layers.fully_connected(\n inputs=article_image_concat,\n num_outputs=self.config.article_embedding_size,\n activation_fn=tf.nn.relu, #None, # linear activation \n weights_initializer=self.initializer,\n scope=reconsider_image_scope)\n \n if self.mode == \"train\":\n # to avoid overfitting we use dropout for all fully connected layers\n self.article_embeddings = tf.nn.dropout(self.article_embeddings, self.config.dropout_keep_prob_encoder)\n \n # self.article_embeddings contains now the text/article embedding for each article in the batch\n # Its shape is (batch_size, article_embedding_size)\n \n # All variables up until this point are shared with the autoencoder. So these are the variables\n # (the whole encoder network) that we want to restore/share.\n self.autoencoder_variables = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES)","def encoder_inference(self, features, states):\n with tf.name_scope(f\"{self.name}_encoder\"):\n outputs = tf.expand_dims(features, axis=0)\n outputs, new_states = self.encoder.recognize(outputs, states)\n return tf.squeeze(outputs, axis=0), new_states","def encode(data, encoder):\n # Get the list of hidden depths\n\thd = encoder.hidden_depths\n # Find the middle hidden layer\n\tmiddle_layer_index = (len(hd)-1)/2\n # Initialize empty container for the encoded data\n\tdata_encoded = np.zeros((data.shape[0],hd[middle_layer_index]))\n\tfor i, d_ in enumerate(data):\n # feed forward, get all the activations, and just keep\n # the middle layer, which is the encoding\n\t\tx, z_container, x_container = encoder.ff(d_,True,True)\n\t\tx_encoded = x_container[1+middle_layer_index]\n\t\tdata_encoded[i] = x_encoded\n\t#\n\treturn data_encoded","def dis_encoder_seq2seq(hparams):\n assert FLAGS.discriminator_model == 'seq2seq_vd'\n assert hparams.dis_num_layers == 2\n\n ## Encoder forward variables.\n encoder_lstm_w_0 = [\n v for v in tf.trainable_variables() if v.op.name ==\n 'dis/encoder/rnn/multi_rnn_cell/cell_0/basic_lstm_cell/kernel'\n ][0]\n encoder_lstm_b_0 = [\n v for v in tf.trainable_variables() if v.op.name ==\n 'dis/encoder/rnn/multi_rnn_cell/cell_0/basic_lstm_cell/bias'\n ][0]\n encoder_lstm_w_1 = [\n v for v in tf.trainable_variables() if v.op.name ==\n 'dis/encoder/rnn/multi_rnn_cell/cell_1/basic_lstm_cell/kernel'\n ][0]\n encoder_lstm_b_1 = [\n v for v in tf.trainable_variables() if v.op.name ==\n 'dis/encoder/rnn/multi_rnn_cell/cell_1/basic_lstm_cell/bias'\n ][0]\n\n if FLAGS.data_set == 'ptb':\n model_str = 'Model'\n else:\n model_str = 'model'\n\n variable_mapping = {\n str(model_str) + '/RNN/multi_rnn_cell/cell_0/basic_lstm_cell/kernel':\n encoder_lstm_w_0,\n str(model_str) + '/RNN/multi_rnn_cell/cell_0/basic_lstm_cell/bias':\n encoder_lstm_b_0,\n str(model_str) + '/RNN/multi_rnn_cell/cell_1/basic_lstm_cell/kernel':\n encoder_lstm_w_1,\n str(model_str) + '/RNN/multi_rnn_cell/cell_1/basic_lstm_cell/bias':\n encoder_lstm_b_1\n }\n return variable_mapping","def encode(self, n_dimension=2, learning_rate=0.01, training_epochs=10, batch_size=400):\n X = tf.placeholder(tf.float32,[None, self.n_input])\n tf.set_random_seed(50)\n \n \n n_hidden_layer1 = int(math.pow(2, int(2*math.log(self.n_input,2)/3+math.log(n_dimension,2)/3)))\n n_hidden_layer2 = int(math.pow(2, int(math.log(self.n_input,2)/3+2*math.log(n_dimension,2)/3)))\n n_hidden_layer3 = n_dimension\n \n weights = {\n 'encoder_w1':tf.Variable(tf.random_normal([self.n_input, n_hidden_layer1])),\n 'encoder_w2':tf.Variable(tf.random_normal([n_hidden_layer1, n_hidden_layer2])),\n 'encoder_w3':tf.Variable(tf.random_normal([n_hidden_layer2, n_hidden_layer3])),\n \n 'decoder_w1':tf.Variable(tf.random_normal([n_hidden_layer3, n_hidden_layer2])),\n 'decoder_w2':tf.Variable(tf.random_normal([n_hidden_layer2, n_hidden_layer1])),\n 'decoder_w3':tf.Variable(tf.random_normal([n_hidden_layer1, self.n_input])),\n }\n \n biases = {\n 'encoder_b1':tf.Variable(tf.random_normal([n_hidden_layer1])),\n 'encoder_b2':tf.Variable(tf.random_normal([n_hidden_layer2])),\n 'encoder_b3':tf.Variable(tf.random_normal([n_hidden_layer3])),\n \n 'decoder_b1':tf.Variable(tf.random_normal([n_hidden_layer2])),\n 'decoder_b2':tf.Variable(tf.random_normal([n_hidden_layer1])),\n 'decoder_b3':tf.Variable(tf.random_normal([self.n_input])),\n }\n \n \n def encoder(x):\n layer_1 = tf.nn.sigmoid(tf.add(tf.matmul(x, weights['encoder_w1']), biases['encoder_b1']))\n layer_2 = tf.nn.sigmoid(tf.add(tf.matmul(layer_1, weights['encoder_w2']), biases['encoder_b2']))\n layer_3 = tf.nn.sigmoid(tf.add(tf.matmul(layer_2, weights['encoder_w3']), biases['encoder_b3']))\n \n return layer_3\n\n def decoder(x):\n layer_1 = tf.nn.sigmoid(tf.add(tf.matmul(x, weights['decoder_w1']), biases['decoder_b1']))\n layer_2 = tf.nn.sigmoid(tf.add(tf.matmul(layer_1, weights['decoder_w2']), biases['decoder_b2']))\n layer_3 = tf.nn.sigmoid(tf.add(tf.matmul(layer_2, weights['decoder_w3']), biases['decoder_b3']))\n \n return layer_3\n \n encoder_op = encoder(X)\n decoder_op = decoder(encoder_op)\n\n y_pred = decoder_op\n y_true = X\n\n cost = tf.reduce_mean(tf.pow(y_pred - y_true, 2))\n optimizer = tf.train.AdamOptimizer(learning_rate).minimize(cost)\n \n \n with tf.Session() as sess:\n init = tf.global_variables_initializer()\n sess.run(init)\n n_batch = int(self.data.shape[0]/batch_size)\n for epoch in tqdm(range(training_epochs)):\n for batch_idx in range(n_batch):\n start = batch_idx * batch_size\n stop = start + batch_size\n _, encoder_result = sess.run([optimizer, encoder_op], feed_dict={X: self.data[start:stop]})\n self.X_test = sess.run(encoder_op, feed_dict={X:self.data})\n self.X_cost = sess.run(cost, feed_dict={X:self.data})\n \n return self.X_test, self.X_cost","def encode(self, X):\r\n return self._encoder.predict(X)","def forward(self, input, last_hidden, last_context, encoder_outputs):\r\n # input: B x 1 x d, last_hidden: (num_layers * num_directions) x B x h\r\n # last_context: B x 1 x h, encoder_outputs: B x S x h\r\n\r\n # output = embedded\r\n rnn_input = torch.cat((input, last_context), 2) # B x 1 x (d + h)\r\n output, hidden = self.rnn(rnn_input, last_hidden) # output: B x 1 x h\r\n\r\n # calculate attention from current RNN state and all encoder outputs; apply to encoder outputs\r\n attn_weights = self.attn(output, encoder_outputs) # B x S\r\n context = attn_weights.unsqueeze(1).bmm(encoder_outputs) # B x 1 x h\r\n\r\n # final output layer (next word prediction) using the RNN hidden state and context vector\r\n output = f.log_softmax(self.out(torch.cat((context.squeeze(1), output.squeeze(1)), 1)), 1)\r\n\r\n # Return final output, hidden state, and attention weights (for visualization)\r\n return output, hidden, context, attn_weights","def encode(self, game_state: ssm.SnakeStateMachine) -> int:\n state = [e.encode(game_state) for e in self._encoders]\n return self._state2id[tuple(state)]","def _define_encoder(self):\n self.encoder = nn.Sequential(View((-1, 64 * 64 * 3)),\n nn.Linear(64 * 64 * 3, 5120, bias=False), nn.SELU(),\n nn.BatchNorm1d(5120),\n nn.Linear(5120, 2560, bias=False), nn.SELU(),\n nn.BatchNorm1d(2560),\n nn.Linear(2560, 512, bias=False), nn.SELU(),\n nn.BatchNorm1d(512),\n nn.Linear(512, 128, bias=False), nn.SELU(),\n nn.BatchNorm1d(128),\n nn.Linear(128, self.encoding_shape, bias=False), nn.SELU(),\n )","def forward(self,\n state,\n encoder_out=None,\n encoder_padding_mask=None,\n incremental_state=None,\n prev_self_attn_state=None,\n self_attn_mask=None,\n self_attn_padding_mask=None,\n need_attn=False,\n need_head_weights=False):\n\n # need_attn must be True if need_head_weights\n need_attn = True if need_head_weights else need_attn\n print('encoder padding {}, self padding {}'.format(encoder_padding_mask, self_attn_padding_mask.size()))\n residual = state.clone()\n # print('self attention')\n state, _ = self.self_attn(query=state,\n key=state,\n value=state,\n key_padding_mask=self_attn_padding_mask,\n need_weights=False,\n attn_mask=self_attn_mask)\n state = F.dropout(state, p=self.dropout, training=self.training)\n state += residual\n state = self.self_attn_layer_norm(state)\n\n residual = state.clone()\n '''\n ___QUESTION-6-DESCRIBE-E-START___\n How does encoder attention differ from self attention? What is the difference between key_padding_mask \n and attn_mask? If you understand this difference, then why don't we need to give attn_mask here?\n '''\n '''\n The encoder attention is making the target input word pay attention to the source sequence from encoder, while the self attention is making the input word pay attention to the words in other positions of the input sequence.\n The key_padding mask masks padded tokens ⟨pad⟩ so the model does not attend to these positions, while the attn mask masks the following tokens at each position to ensure the decoder do not look forward into the sequence.\n In encoder attention, we want the decoder to pay attention to the entire source sequence. The attn mask is not needed to mask the subsequent positions because it is not paying attention to itself.\n\n '''\n # print('encoder attention')\n state, attn = self.encoder_attn(query=state,\n key=encoder_out,\n value=encoder_out,\n key_padding_mask=encoder_padding_mask,\n need_weights=need_attn or (not self.training and self.need_attn))\n '''\n ___QUESTION-6-DESCRIBE-E-END___\n '''\n\n state = F.dropout(state, p=self.dropout, training=self.training)\n state += residual\n state = self.encoder_attn_layer_norm(state)\n\n residual = state.clone()\n state = F.relu(self.fc1(state))\n state = F.dropout(state, p=self.activation_dropout, training=self.training)\n state = self.fc2(state)\n state = F.dropout(state, p=self.dropout, training=self.training)\n state += residual\n state = self.final_layer_norm(state)\n\n return state, attn","def _build_encoder(self, hparams):\n\t\tnum_layers = self.num_encoder_layers\n\t\tnum_redisual_layers = self.num_encoder_residual_layers\n\n\t\twith tf.variable_scope('encoder') as _:\n\t\t\tself.encoder_emb_inp = tf.nn.embedding_lookup(self.embedding_encoder, self.encoder_input_data)\n\n\t\t\tif hparams.encoder_type == 'uni':\n\t\t\t\t_info('num_layers = {} num_residual_layers = {}'.format(num_layers, num_redisual_layers))\n\t\t\t\t# 1. build a list of cells\n\t\t\t\tcell = self._build_encoder_cell(hparams, num_layers, num_redisual_layers)\n\t\t\t\t# 2. forward\n\t\t\t\t# encoder_outputs: [batch, time, hidden]\n\t\t\t\t# encoder_state: ([batch, hidden] for _ in range(layers))\n\t\t\t\tencoder_outputs, encoder_state = tf.nn.dynamic_rnn(\n\t\t\t\t\tcell,\n\t\t\t\t\tself.encoder_emb_inp,\n\t\t\t\t\tdtype=self.dtype,\n\t\t\t\t\tsequence_length=self.seq_length_encoder_input_data,\n\t\t\t\t\tswap_memory=True)\n\t\t\telif hparams.encoder_type == 'bi':\n\t\t\t\tif not num_layers % 2 == 0:\n\t\t\t\t\t_error('Bi-directional requires num_layers={} should be divided by 2'.format(num_layers))\n\t\t\t\t\traise ValueError\n\t\t\t\tnum_bi_layers = int(num_layers / 2)\n\t\t\t\tnum_bi_residual_layers = num_bi_layers - 1\n\t\t\t\t_info(' num_bi_layers={} num_bi_residual_layers={}'.format(num_bi_layers, num_bi_residual_layers))\n\n\t\t\t\tcell_fw = self._build_encoder_cell(hparams, num_bi_layers, num_bi_residual_layers)\n\t\t\t\tcell_bw = self._build_encoder_cell(hparams, num_bi_layers, num_bi_residual_layers)\n\n\t\t\t\t# bi_outputs: (fw, bw): fw: [batch, seq, hidden]\n\t\t\t\t# bi_state: (fw, bw): fw : [[batch, hidden] for _ in range(layers)]\n\t\t\t\tbi_outputs, bi_state = tf.nn.bidirectional_dynamic_rnn(\n\t\t\t\t\tcell_fw,\n\t\t\t\t\tcell_bw,\n\t\t\t\t\tself.encoder_emb_inp,\n\t\t\t\t\tdtype=self.dtype,\n\t\t\t\t\tsequence_length=self.seq_length_encoder_input_data,\n\t\t\t\t\tswap_memory=True)\n\n\t\t\t\tif num_bi_layers == 1:\n\t\t\t\t\tencoder_state = bi_state\n\t\t\t\telse:\n\t\t\t\t\tencoder_state = []\n\t\t\t\t\tfor layer_id in range(num_bi_layers):\n\t\t\t\t\t\tencoder_state.append(bi_state[0][layer_id])\t\t# fw state in layer id\n\t\t\t\t\t\tencoder_state.append(bi_state[1][layer_id])\t\t# bw state in layer id\n\t\t\t\t\tencoder_state = tuple(encoder_state)\n\t\t\t\tencoder_outputs = tf.concat(bi_outputs, -1)\t\t# [batch, seq, hidden * 2]\n\t\t\telse:\n\t\t\t\t_error('Unknow encoder type: {}'.format(hparams.encoder_type))\n\t\t\t\traise ValueError\n\t\t\n\t\treturn encoder_outputs, encoder_state","def encode(self, x):\n self.eval()\n x = torch.as_tensor(x).unsqueeze(0)\n if self.do_mt:\n enc_output, _ = self.encoder_mt(x, None)\n else:\n enc_output, _ = self.encoder(x, None)\n return enc_output.squeeze(0)","def encoder_layer(enc_input, attn_bias, n_head, d_key,\n d_value, d_model, d_inner_hid, pos_enc, prepostprocess_dropout,\n attention_dropout, relu_dropout, preprocess_cmd='n',\n postprocess_cmd='da'):\n attn_output = multi_head_attention(\n pre_process_layer(enc_input, preprocess_cmd, prepostprocess_dropout),\n None, None, attn_bias, d_key, d_value, d_model, pos_enc,\n n_head, attention_dropout\n )\n attn_output = post_process_layer(enc_input, attn_output,\n postprocess_cmd, prepostprocess_dropout)\n ffd_output = positionwise_feed_forward(\n pre_process_layer(attn_output, preprocess_cmd, prepostprocess_dropout),\n d_inner_hid, d_model, relu_dropout\n )\n return post_process_layer(attn_output, ffd_output,\n postprocess_cmd, prepostprocess_dropout)","def transparent_forward(self, input, hidden, give_gates=False, debug=False):\n\n lseq, nseq = input.shape\n ispad = (input == self.padding)\n\n H = torch.zeros(lseq, self.nhid, nseq)\n if give_gates:\n Z = torch.zeros(lseq, self.nhid, nseq)\n R = torch.zeros(lseq, self.nhid, nseq)\n \n # because pytorch only returns hidden activity in the last time step,\n # we need to unroll it manually. \n O = torch.zeros(lseq, nseq, self.decoder.out_features)\n emb = self.encoder(input)\n for t in range(lseq):\n if give_gates:\n out, hidden, ZR = self.rnn(emb[t:t+1,...], hidden, give_gates=True)\n Z[t,:,:] = ZR[0].squeeze(0).T\n R[t,:,:] = ZR[1].squeeze(0).T\n else:\n out, hidden = self.rnn(emb[t:t+1,...], hidden)\n dec = self.decoder(out)\n # naan = torch.ones(hidden.squeeze(0).shape)*np.nan\n # H[t,:,:] = torch.where(~ispad[t:t+1,:].T, hidden.squeeze(0), naan).T\n H[t,:,:] = hidden.squeeze(0).T\n O[t,:,:] = dec.squeeze(0)\n\n if give_gates:\n if debug:\n return O, H, Z, R, emb\n else:\n return O, H, Z, R\n else:\n if debug:\n return O, H, emb\n else:\n return O, H","def __encoder_lstm(self, x, x_lengths):\n embedded_x = self.input_embedding.forward(x) # (input_seq_len x batch x embed_dim)\n embedded_x = self.embedding_dropout.forward(embedded_x)\n\n # pack and unpack the padded batch for the encoder\n packed_x = nn.utils.rnn.pack_padded_sequence(embedded_x, x_lengths)\n h, _ = self.encoder.forward(packed_x) # (input_seq_len x batch x 2*encoder_state_dim)\n unpacked_h, _ = nn.utils.rnn.pad_packed_sequence(h)\n\n return unpacked_h","def forward(self,\n state,\n encoder_out=None,\n encoder_padding_mask=None,\n incremental_state=None,\n prev_self_attn_state=None,\n self_attn_mask=None,\n self_attn_padding_mask=None,\n need_attn=False,\n need_head_weights=False):\n\n # need_attn must be True if need_head_weights\n need_attn = True if need_head_weights else need_attn\n\n residual = state.clone()\n state, _ = self.self_attn(query=state,\n key=state,\n value=state,\n key_padding_mask=self_attn_padding_mask,\n need_weights=False,\n attn_mask=self_attn_mask)\n state = F.dropout(state, p=self.dropout, training=self.training)\n state += residual\n state = self.self_attn_layer_norm(state)\n\n residual = state.clone()\n '''\n ___QUESTION-6-DESCRIBE-E-START___\n How does encoder attention differ from self attention? What is the difference between key_padding_mask\n and attn_mask? If you understand this difference, then why don't we need to give attn_mask here?\n\n Encoder attention differs from self-attention in that it attends to the\n output embeddings of the encoder instead of the embeddings in the decoder.\n key_padding_mask is used to adjust the length of the sentences, whereas\n attn_mask prevents the decoder from attending to future positions.\n We do not use attn_mask while attending to the decoder since we want all\n the embeddings in the decoder to have access to all the encoder output\n embeddings.\n '''\n state, attn = self.encoder_attn(query=state,\n key=encoder_out,\n value=encoder_out,\n key_padding_mask=encoder_padding_mask,\n need_weights=need_attn or (not self.training and self.need_attn))\n '''\n ___QUESTION-6-DESCRIBE-E-END___\n '''\n\n state = F.dropout(state, p=self.dropout, training=self.training)\n state += residual\n state = self.encoder_attn_layer_norm(state)\n\n residual = state.clone()\n state = F.relu(self.fc1(state))\n state = F.dropout(state, p=self.activation_dropout, training=self.training)\n state = self.fc2(state)\n state = F.dropout(state, p=self.dropout, training=self.training)\n state += residual\n state = self.final_layer_norm(state)\n\n return state, attn","def _init_rnn_state(self, encoder_hidden):\n if encoder_hidden is None:\n return None\n if isinstance(encoder_hidden, tuple):\n encoder_hidden = tuple(\n [self._cat_directions(h) for h in encoder_hidden])\n else:\n encoder_hidden = self._cat_directions(encoder_hidden)\n return encoder_hidden","def build_model(self):\n # Define model inputs for the encoder/decoder stack\n x_enc = Input(shape=(self.seq_len_in, self.input_feature_amount), name=\"x_enc\")\n x_dec = Input(shape=(self.seq_len_out, self.output_feature_amount), name=\"x_dec\")\n\n # Add noise\n x_dec_t = GaussianNoise(0.2)(x_dec)\n\n input_conv2 = Conv1D(filters=64, kernel_size=5, strides=2, activation='relu', padding='same')\n input_conv1 = Conv1D(filters=64, kernel_size=3, strides=2, activation='relu', padding='same', name=\"last_conv_layer\")\n\n input_conv2_out = input_conv2(x_enc)\n input_conv1_out = input_conv1(input_conv2_out)\n\n # Define the encoder GRU, which only has to return a state\n encoder_gru = GRU(self.state_size, return_sequences=True, return_state=True, name=\"encoder_gru\")\n encoder_out, encoder_state = encoder_gru(input_conv1_out)\n\n # Decoder GRU\n decoder_gru = GRU(self.state_size, return_state=True, return_sequences=True,\n name=\"decoder_gru\")\n # Use these definitions to calculate the outputs of out encoder/decoder stack\n dec_intermediates, decoder_state = decoder_gru(x_dec_t, initial_state=encoder_state)\n\n # Define the attention layer\n attn_layer = AttentionLayer(name=\"attention_layer\")\n attn_out, attn_states = attn_layer([encoder_out, dec_intermediates])\n\n # Concatenate decoder and attn out\n decoder_concat_input = Concatenate(axis=-1, name='concat_layer')([dec_intermediates, attn_out])\n\n # Define the dense layer\n dense = Dense(self.output_feature_amount, activation='linear', name='output_layer')\n dense_time = TimeDistributed(dense, name='time_distributed_layer')\n decoder_pred = dense_time(decoder_concat_input)\n\n # Define the encoder/decoder stack model\n encdecmodel = tsModel(inputs=[x_enc, x_dec], outputs=decoder_pred)\n\n # Define the separate encoder model for inferencing\n encoder_inf_inputs = Input(shape=(self.seq_len_in, self.input_feature_amount), name=\"encoder_inf_inputs\")\n\n input_conv2_inf = input_conv2(encoder_inf_inputs)\n input_conv1_inf_out = input_conv1(input_conv2_inf)\n\n encoder_inf_out, encoder_inf_state = encoder_gru(input_conv1_inf_out)\n encoder_model = tsModel(inputs=encoder_inf_inputs, outputs=[encoder_inf_out, encoder_inf_state])\n\n # Define the separate encoder model for inferencing\n decoder_inf_inputs = Input(shape=(1, self.output_feature_amount), name=\"decoder_inputs\")\n encoder_inf_states = Input(shape=(encdecmodel.get_layer('last_conv_layer').output_shape[1], self.state_size), name=\"decoder_inf_states\")\n decoder_init_state = Input(shape=(self.state_size,), name=\"decoder_init\")\n\n decoder_inf_out, decoder_inf_state = decoder_gru(decoder_inf_inputs, initial_state=decoder_init_state)\n attn_inf_out, attn_inf_states = attn_layer([encoder_inf_states, decoder_inf_out])\n decoder_inf_concat = Concatenate(axis=-1, name='concat')([decoder_inf_out, attn_inf_out])\n decoder_inf_pred = TimeDistributed(dense)(decoder_inf_concat)\n decoder_model = tsModel(inputs=[encoder_inf_states, decoder_init_state, decoder_inf_inputs],\n outputs=[decoder_inf_pred, attn_inf_states, decoder_inf_state])\n\n return encoder_model, decoder_model, encdecmodel","def make_encoder(self, input_size: int, latent_size: int) -> nn.Module:\n pass","def _encode_event_idx(self, event_idx, step_idx):\n enc_dec = self.trans_model._config.encoder_decoder\n input_ = np.zeros(enc_dec.input_size)\n input_[event_idx] = 1.0\n\n offset = enc_dec._one_hot_encoding.num_classes\n n = step_idx + 1\n for i in range(enc_dec._binary_counter_bits):\n input_[offset] = 1.0 if (n // 2 ** i) % 2 else -1.0\n offset += 1\n\n return np.expand_dims(input_, 0)","def forward(self, trg_embed, encoder_hidden, encoder_final, \n src_mask, trg_mask, hidden=None, max_len=None):\n \n # the maximum number of steps to unroll the RNN\n #print(\"czw src mask\", src_mask.size())\n #print(\"czw trg embed\", trg_embed.size())\n #print(\"czw encoder_hidden\", encoder_hidden.size())\n #print(\"czw encoder_final\", encoder_final[0].size())\n if max_len is None:\n max_len = trg_embed.size(1)\n\n # initialize decoder hidden state\n if hidden is None:\n hidden = self.init_hidden(encoder_final)\n \n # pre-compute projected encoder hidden states\n # (the \"keys\" for the attention mechanism)\n # this is only done for efficiency\n proj_key = self.attention.key_layer(encoder_hidden)\n \n # here we store all intermediate hidden states and pre-output vectors\n decoder_states = []\n pre_output_vectors = []\n \n # unroll the decoder RNN for max_len steps\n for i in range(max_len):\n prev_embed = trg_embed[:, i].unsqueeze(1)\n output, hidden, pre_output = self.forward_step(\n prev_embed, encoder_hidden, src_mask, proj_key, hidden)\n decoder_states.append(output)\n pre_output_vectors.append(pre_output)\n\n decoder_states = torch.cat(decoder_states, dim=1)\n pre_output_vectors = torch.cat(pre_output_vectors, dim=1)\n return decoder_states, hidden, pre_output_vectors # [B, N, D]","def encode_decode_TD(self, n_step, idx_list_previous, log_probs_previous, entropies_previous, mask_previous, query1_previous, query2_previous, query3_previous, idx_ ):\r\n\r\n actor_embedding = embed_seq(input_seq=self.input_, from_=self.dimension, to_= self.input_embed, is_training=self.is_training, BN=True, initializer=self.initializer)\r\n actor_encoding = encode_seq(input_seq=actor_embedding, input_dim=self.input_embed, num_stacks=self.num_stacks, num_heads=self.num_heads, num_neurons=self.num_neurons, is_training=self.is_training)\r\n \r\n if self.is_training == False:\r\n actor_encoding = tf.tile(actor_encoding,[self.batch_size,1,1])\r\n \r\n idx_list = copy(idx_list_previous)\r\n log_probs = copy(log_probs_previous)\r\n entropies = copy(entropies_previous)\r\n \r\n\r\n mask = copy(mask_previous)\r\n \r\n n_hidden = actor_encoding.get_shape().as_list()[2] # input_embed\r\n W_ref = tf.get_variable(\"W_ref\",[1, n_hidden, self.num_units],initializer=self.initializer)\r\n W_q = tf.get_variable(\"W_q\",[self.query_dim, self.num_units],initializer=self.initializer)\r\n v = tf.get_variable(\"v\",[self.num_units],initializer=self.initializer)\r\n \r\n encoded_ref = tf.nn.conv1d(actor_encoding, W_ref, 1, \"VALID\") # actor_encoding is the ref for actions [Batch size, seq_length, n_hidden]\r\n \r\n query1 = copy( query1_previous)\r\n query2 = copy( query2_previous)\r\n query3 = copy( query3_previous)\r\n idx_copy = copy(idx_)\r\n \r\n W_1 =tf.get_variable(\"W_1\",[n_hidden, self.query_dim],initializer=self.initializer) # update trajectory (state)\r\n W_2 =tf.get_variable(\"W_2\",[n_hidden, self.query_dim],initializer=self.initializer)\r\n W_3 =tf.get_variable(\"W_3\",[n_hidden, self.query_dim],initializer=self.initializer)\r\n \r\n \r\n \"\"\"\r\n # sample from POINTER from the perspective of the Actor\r\n \"\"\"\r\n for step in range(n_step + 1 ): \r\n query = tf.nn.relu(tf.matmul(query1, W_1) + tf.matmul(query2, W_2) + tf.matmul(query3, W_3))\r\n logits = pointer(encoded_ref=encoded_ref, query=query, mask=mask, W_ref=W_ref, W_q=W_q, v=v, C=config.C, temperature=config.temperature)\r\n prob = distr.Categorical(logits) # logits = masked_scores\r\n idx = prob.sample()\r\n\r\n idx_list.append(idx) # tour index\r\n idx_list_previous.append(idx)\r\n \r\n log_probs.append(prob.log_prob(idx)) # log prob\r\n log_probs_previous.append(prob.log_prob(idx))\r\n \r\n entropies.append(prob.entropy()) # entropies\r\n entropies_previous.append(prob.entropy())\r\n \r\n mask = mask + tf.one_hot(idx, self.max_length) # mask\r\n mask_previous = mask_previous + tf.one_hot(idx, self.max_length)\r\n\r\n idx_copy = tf.stack([tf.range(self.batch_size,dtype=tf.int32), idx],1) # idx with batch \r\n idx_ = tf.stack([tf.range(self.batch_size,dtype=tf.int32), idx],1) # idx with batch \r\n query3 = query2\r\n query2 = query1\r\n query1 = tf.gather_nd(actor_encoding, idx_) # update trajectory (state)\r\n \r\n query3_previous = query2_previous\r\n query2_previous = query1_previous\r\n query1_previous = tf.gather_nd(actor_encoding, idx_) # update trajectory (state) \r\n\r\n if (len(idx_list) >= self.max_length): break #leave the loop if reach the end of the episode\r\n\r\n \"\"\"\r\n # sample from POINTER from the perspective of the Critic\r\n make q_t vector = 0\r\n \"\"\"\r\n while(len(idx_list) < self.max_length): \r\n \r\n logits = pointer_critic(encoded_ref=encoded_ref, mask=mask, W_ref=W_ref, v=v, C=config.C, temperature=config.temperature)\r\n prob = distr.Categorical(logits) # logits = masked_scores\r\n idx = prob.sample()\r\n\r\n idx_list.append(idx) # tour index\r\n log_probs.append(prob.log_prob(idx)) # log prob\r\n entropies.append(prob.entropy()) # entropies\r\n mask = mask + tf.one_hot(idx, self.max_length) # mask\r\n\r\n idx_copy = tf.stack([tf.range(self.batch_size,dtype=tf.int32), idx],1) # idx with batch \r\n #idx_ = tf.stack([tf.range(self.batch_size,dtype=tf.int32), idx],1) # idx with batch \r\n query3 = query2\r\n query2 = query1\r\n query1 = tf.gather_nd(actor_encoding, idx_copy) # update trajectory (state)\r\n \r\n idx_list.append(idx_list[0]) # return to start\r\n self.tour =tf.stack(idx_list, axis=1) # permutations\r\n self.log_prob = tf.add_n(log_probs) # corresponding log-probability for backprop\r\n self.entropies = tf.add_n(entropies)\r\n tf.summary.scalar('log_prob_mean', tf.reduce_mean(self.log_prob))\r\n tf.summary.scalar('entropies_mean', tf.reduce_mean(self.entropies))\r\n \r\n return idx_list_previous, log_probs_previous, entropies_previous, mask_previous, query1_previous, query2_previous, query3_previous, idx_ #returns variables necessary for the next loop\r","def _decode_train(self):\n\n # the basic idea is, we use golden sketch during train and in order to copy from source\n # we given true mask of decoder to generate right copy weights\n state = {'encoder': self.concated_encoder_output}\n\n def transformer_concated_decoder_internal(inputs, memory, bias, mem_bias, params, state=None, scope=None,\n reuse=False):\n return transformer_decoder(inputs, memory, bias, mem_bias, params, state, scope, reuse)\n\n self.final_logits = self._decode_func(\n self.tgt_seq, self.tgt_len, self.target_embeddings, self.decoder_weights,\n self.final_enc_attn_bias, 'train', state, self.vocab_size, use_copy=True,\n expand_source_ids_oo=self.concat_src_ids_oo,\n max_out_oovs=self.max_out_oovs, src_mask=self.concat_src_mask,\n decoder_fn=transformer_concated_decoder_internal,\n scope='final_decoder')","def decode(self, input_size):\n output = np.zeros(input_size)\n \n preactivation = np.dot(self.W, self.h) + self.c\n sigmoid(preactivation, output)\n \n return output","def make_encoder(opt, embeddings, intent_size, output_size, use_history=False, hidden_depth=1, identity=None,\n hidden_size=None):\n # encoder = StateEncoder(intent_size=intent_size, output_size=output_size,\n # state_length=opt.state_length, extra_size=3 if opt.dia_num>0 else 0 )\n\n # intent + price\n diaact_size = (intent_size+1)\n extra_size = 3 + 2\n if hidden_size is None:\n hidden_size = opt.hidden_size\n if not opt.use_utterance:\n embeddings = None\n if use_history:\n extra_size = 3\n # + pmask\n diaact_size += 1\n if identity is None:\n encoder = HistoryIDEncoder(None, diaact_size * 2, extra_size, embeddings, output_size,\n hidden_depth=hidden_depth, rnn_state=True)\n else:\n # encoder = HistoryIDEncoder(identity, diaact_size*2+extra_size, embeddings, output_size,\n # hidden_depth=hidden_depth)\n encoder = HistoryIDEncoder(identity, diaact_size * 2, extra_size, embeddings, output_size,\n hidden_depth=hidden_depth, rnn_state=True)\n else:\n if identity is None:\n encoder = CurrentEncoder(diaact_size*opt.state_length+extra_size, embeddings, output_size,\n hidden_depth=hidden_depth)\n else:\n extra_size = 3\n # + pmask\n diaact_size += 1\n encoder = HistoryIDEncoder(identity, diaact_size * opt.state_length, extra_size, embeddings, output_size,\n hidden_depth=hidden_depth)\n\n return encoder","def encoder_bi(self, cell_fw, cell_bw, x, seq_len, init_state_fw=None,\n init_state_bw=None):\n # Output is the outputs at all time steps, state is the last state\n with tf.variable_scope(\"bidirectional_dynamic_rnn\"):\n outputs, state = tf.nn.bidirectional_dynamic_rnn(\\\n cell_fw=cell_fw,\n cell_bw=cell_bw,\n inputs=x,\n sequence_length=seq_len,\n initial_state_fw=init_state_fw,\n initial_state_bw=init_state_bw,\n dtype=self.floatX)\n # outputs: a tuple(output_fw, output_bw), all sequence hidden states,\n # each as tensor of shape [batch,time,units]\n # Since we don't need the outputs separate, we concat here\n outputs = tf.concat(outputs,2)\n outputs.set_shape([None, None, self.bi_encoder_hidden])\n # If LSTM cell, then \"state\" is not a tuple of Tensors but an\n # LSTMStateTuple of \"c\" and \"h\". Need to concat separately then new\n if \"LSTMStateTuple\" in str(type(state[0])):\n c = tf.concat([state[0][0],state[1][0]],axis=1)\n h = tf.concat([state[0][1],state[1][1]],axis=1)\n state = tf.contrib.rnn.LSTMStateTuple(c,h)\n else:\n state = tf.concat(state,1)\n # Manually set shape to Tensor or all hell breaks loose\n state.set_shape([None, self.bi_encoder_hidden])\n return outputs, state","def forward(self, inp, hidden=None, give_gates=False, debug=False, readout_time=None):\n\n if self.recoder is None:\n emb = inp\n else:\n emb = self.recoder(inp)\n\n if hidden is None:\n hidden = self.init_hidden(inp.shape[1])\n # if emb.dim()<3:\n # emb = emb.unsqueeze(0)\n\n if give_gates:\n output, hidden, extras = self.rnn(emb, hidden, give_gates)\n else:\n output, hidden = self.rnn(emb, hidden)\n # print(output.shape)\n\n # decoded = self.softmax(self.decoder(output))\n decoded = self.decoder(output)\n if readout_time is None:\n decoded = decoded[-1,...] # assume only final timestep matters\n\n if give_gates:\n return decoded, hidden, extras\n else:\n return decoded, hidden","def get_final_encoder_states(encoder_outputs: torch.Tensor,\n mask: torch.Tensor,\n bidirectional: bool = False) -> torch.Tensor:\n # These are the indices of the last words in the sequences (i.e. length sans padding - 1). We\n # are assuming sequences are right padded.\n # Shape: (batch_size,)\n last_word_indices = mask.sum(1).long() - 1\n\n # handle -1 cases\n ll_ = (last_word_indices != -1).long()\n last_word_indices = last_word_indices * ll_\n\n batch_size, _, encoder_output_dim = encoder_outputs.size()\n expanded_indices = last_word_indices.view(-1, 1, 1).expand(batch_size, 1, encoder_output_dim)\n # Shape: (batch_size, 1, encoder_output_dim)\n final_encoder_output = encoder_outputs.gather(1, expanded_indices)\n final_encoder_output = final_encoder_output.squeeze(1) # (batch_size, encoder_output_dim)\n if bidirectional:\n final_forward_output = final_encoder_output[:, :(encoder_output_dim // 2)]\n final_backward_output = encoder_outputs[:, 0, (encoder_output_dim // 2):]\n final_encoder_output = torch.cat([final_forward_output, final_backward_output], dim=-1)\n return final_encoder_output","def make_prediction(self, previous_timesteps_x, previous_y):\n # Get the state from the Encoder using the previous timesteps for x\n # Expand the previous timesteps, we must make the input a batch (going from shape (100, 149) to (1, 100, 149))\n enc_outs, enc_last_state = self.encoder.predict(np.expand_dims(previous_timesteps_x, axis=0))\n dec_state = enc_last_state\n\n # Initialize the outputs on the previous y so we have something to feed the net\n # It might be neater to feed a start symbol instead\n dec_out = np.expand_dims(previous_y, axis=0)\n outputs = []\n attention_weights = []\n for i in range(self.seq_len_out):\n dec_out, attention, dec_state = self.decoder.predict([enc_outs, dec_state, dec_out])\n outputs.append(dec_out)\n\n # Add attention weights\n attention_weights.append(attention)\n\n # Reshape and transpose attention weights so they make more sense\n attention_weights = np.reshape(np.stack(attention_weights), newshape=(self.seq_len_out,\n self.encoder.get_layer(\"last_conv_layer\")\n .output_shape[1])).transpose()\n\n # Concatenate the outputs, as they are batches\n # For example, going from a list of (1,1,1) to one unit of (1,100,1)\n # So we take the 0th element from the batch which are our outputs\n return np.concatenate(outputs, axis=1)[0], attention_weights","def encoder(self, inputs):\n pass","def _define_encoder(self):\n self.encoder = nn.Sequential(nn.Conv2d(in_channels=3, out_channels=32, kernel_size=4, stride=2, padding=1), # B, 32, 32, 32\n nn.SELU(),\n nn.Conv2d(32, 32, 4, 2, 1), # B, 32, 16, 16\n nn.SELU(),\n nn.Conv2d(32, 64, 4, 2, 1), # B, 64, 8, 8\n nn.SELU(),\n nn.Conv2d(64, 64, 4, 2, 1), # B, 64, 4, 4\n nn.SELU(),\n nn.Conv2d(64, 256, 4, 1), # B, 256, 1, 1\n nn.SELU(),\n View((-1, 256 * 1 * 1)), # B, 256\n nn.Linear(256, self.encoding_shape * 2), # B, z_dim*2\n )","def forward(self,\n input,\n hidden,\n encoder_outputs):\n embedded = self.embedding(input).view(1, 1, -1)\n embedded = self.dropout(embedded)\n\n # attn_weights = F.softmax(self.attn(torch.cat((embedded[0], hidden[0]), 1)), dim=1)\n attn_state = hidden[0] if isinstance(hidden, tuple) else hidden\n attn_weights = F.softmax(attn_state[0] @ encoder_outputs.squeeze().t(), dim=1)\n attn_applied = torch.bmm(attn_weights.unsqueeze(0), encoder_outputs.permute(1, 0, 2))\n\n output = torch.cat((embedded[0], attn_applied[0]), 1)\n output = self.attn_combine(output).unsqueeze(0)\n\n output = F.relu(output)\n output, hidden = self.rnn(output, hidden)\n\n output = F.log_softmax(self.out(output[0]), dim=1)\n return output, hidden, attn_weights","def add_model(self):\n\n b_sz = tf.shape(self.encoder_input)[0]\n tstp_en = tf.shape(self.encoder_input)[1]\n tstp_de = tf.shape(self.decoder_input)[1]\n\n encoder_dropout_input = tf.nn.dropout(self.encoder_input, self.ph_dropout, name='encoder_Dropout')\n decoder_dropout_input = tf.nn.dropout(self.decoder_input, self.ph_dropout, name='decoder_Dropout')\n lstm_cell = tf.nn.rnn_cell.BasicLSTMCell(self.config.hidden_size)\n \"\"\"#(batch_size, num_sentence, hidden_size)\"\"\"\n encoder_outputs, state = tf.nn.dynamic_rnn(lstm_cell, encoder_dropout_input, self.encoder_tstps, \n dtype=tf.float32, swap_memory=True, time_major=False, scope = 'rnn_encode')\n self.state=state\n with tf.variable_scope('decoder') as vscope:\n decoder_outputs, _ = tf.nn.dynamic_rnn(lstm_cell, decoder_dropout_input, self.decoder_tstps, #(batch_size, time_steps, hidden_size)\n initial_state=state, dtype=tf.float32, swap_memory=True, time_major=False, scope='rnn_decode')\n \n with tf.variable_scope('rnn_decode'):\n #tf.reshape(self.ph_decoder_label, shape=(-1, 1)) #(batch_size*time_steps, 1)\n encoder_outputs_reshape = tf.reshape(encoder_outputs, shape=(-1, self.config.hidden_size), name='add_model_reshape_0') #(batch_size*time_steps, hidden_size)\n decoder_outputs_reshape = tf.reshape(decoder_outputs, shape=(-1, self.config.hidden_size), name='add_model_reshape_1') #(batch_size*time_steps_1, hidden_size)\n encoder_outputs_linear_reshape = tf.nn.rnn_cell._linear(encoder_outputs_reshape, output_size=self.config.hidden_size, #(#(batch_size*time_steps, hidden_size))\n bias=False, scope='Ptr_W1')\n decoder_outputs_linear_reshape = tf.nn.rnn_cell._linear(decoder_outputs_reshape, output_size=self.config.hidden_size, #(#(batch_size*time_steps, hidden_size))\n bias=False, scope='Ptr_W2')\n encoder_outputs_linear = tf.reshape(encoder_outputs_linear_reshape, tf.shape(encoder_outputs), name='add_model_reshape_2')\n decoder_outputs_linear = tf.reshape(decoder_outputs_linear_reshape, tf.shape(decoder_outputs), name='add_model_reshape_3')\n \n encoder_outputs_linear_expand = tf.expand_dims(encoder_outputs_linear, 1) #(b_sz, 1, tstp_en, h_sz)\n decoder_outputs_linear_expand = tf.expand_dims(decoder_outputs_linear, 2) #(b_sz, tstp_de, 1, h_sz)\n \n after_add = tf.tanh(encoder_outputs_linear_expand + decoder_outputs_linear_expand) #(b_sz, tstp_de, tstp_en, h_sz)\n \n after_add_reshape = tf.reshape(after_add, shape=(-1, self.config.hidden_size), name='add_model_reshape_4')\n \n after_add_linear_reshape = tf.nn.rnn_cell._linear(after_add_reshape, output_size=1, #(b_sz*tstp_de*tstp_en, 1)\n bias=False, scope='Ptr_v')\n after_add_linear = tf.reshape(after_add_linear_reshape, shape=tf.shape(after_add)[:3], name='add_model_reshape_5') #(b_sz, tstp_de, tstp_en)\n\n en_length_mask = tf.sequence_mask(self.encoder_tstps, #(b_sz, tstp_en)\n maxlen=tf.shape(after_add_linear)[-1], dtype=tf.bool)\n en_length_mask = tf.expand_dims(en_length_mask, 1) #(b_sz, 1, tstp_en)\n en_length_mask = tf.tile(en_length_mask, [1, tstp_de, 1])\n\n logits = tf.select(en_length_mask, after_add_linear,\n tf.ones_like(after_add_linear) * (-np.Inf)) # shape(b_sz, tstp_de, tstp_en)\n \n flat_logits = tf.reshape(logits, shape=[b_sz * tstp_de, tstp_en])\n\n vscope.reuse_variables()\n outputs_ta, _, _ = self.decoder(lstm_cell, state, encoder_outputs, encoder_dropout_input, scope='rnn_decode')\n outputs = outputs_ta.pack() #(time_steps, batch_size)\n outputs = tf.transpose(outputs, [1, 0]) #(batch_size, time_steps)\n \n state = tf.concat(1, state)\n lstm_cell = tf.nn.rnn_cell.BasicLSTMCell(self.config.hidden_size, state_is_tuple=False)\n beam_outputs, beam_seq, beam_prob = self.beam_decoder(lstm_cell, state, encoder_outputs, \n encoder_dropout_input, beam_size=self.config.beam_size, scope='rnn_decode')\n \n self.logits = logits\n self.encoder_outputs = encoder_outputs\n self.beam_seq = beam_seq\n self.beam_prob = beam_prob\n return flat_logits, outputs, beam_outputs","def __call__(self, sequence):\n fwd_states, bwd_states = self.encode_fwd_bwd(sequence)\n return dy.concatenate([fwd_states[-1], bwd_states[-1]])","def __init__(self, input_size, hidden_size, bidirection, config):\r\n super(Encoder, self).__init__()\r\n\r\n self.config = config\r\n self.input_size = input_size\r\n self.hidden_size = hidden_size\r\n self.bidirection = bidirection\r\n\r\n if self.config.model in ['LSTM', 'GRU']:\r\n self.rnn = getattr(nn, self.config.model)(self.input_size, self.hidden_size, self.config.nlayer_enc,\r\n batch_first=True, dropout=self.config.dropout,\r\n bidirectional=self.bidirection)\r\n else:\r\n try:\r\n nonlinearity = {'RNN_TANH': 'tanh', 'RNN_RELU': 'relu'}[self.config.model]\r\n except KeyError:\r\n raise ValueError(\"\"\"An invalid option for `--model` was supplied,\r\n options are ['LSTM', 'GRU', 'RNN_TANH' or 'RNN_RELU']\"\"\")\r\n self.rnn = nn.RNN(self.input_size, self.hidden_size, self.config.nlayers, nonlinearity=nonlinearity,\r\n batch_first=True, dropout=self.config.dropout, bidirectional=self.bidirection)","def init_hidden_state(self, encoder_out: torch.Tensor):\n pass","def forward(self, *args): # noqa: R0914\r\n encoder_out, (hn, cn) = self.unified_encoder(*args)\r\n device = hn.device\r\n non_sequential_cont_decoded = self.mlp_non_seq_cont(hn)\r\n non_sequential_cat_decoded = []\r\n for mlp_non_seq_cat in self.mlp_non_seq_cat_list:\r\n non_sequential_cat_decoded.append(mlp_non_seq_cat(hn))\r\n\r\n hn = torch.unsqueeze(hn, 0)\r\n cn = torch.unsqueeze(cn, 0)\r\n # decoded is the output prediction of timestep i-1 of the decoder\r\n decoded = torch.zeros(encoder_out.shape[0], int(\r\n self.unified_encoder.seq_cont_count + self.unified_encoder.no_of_embs_seq), device=device)\r\n seq_cont_decoded = torch.Tensor(device=device)\r\n seq_cat_decoded = []\r\n for _ in range(self.unified_encoder.seq_cat_count):\r\n seq_cat_decoded.append(torch.Tensor(device=device))\r\n\r\n for _ in range(encoder_out.shape[1]):\r\n decoded, (hn, cn), out_cont, out_cat = self.decoder(decoded, (hn, cn))\r\n # Predict all categorical columns\r\n out_cat_onehot = []\r\n if self.unified_encoder.seq_cat_count != 0:\r\n for idx, out in enumerate(out_cat):\r\n out_cat_onehot.append(torch.argmax(out, dim=1).unsqueeze(-1))\r\n seq_cat_decoded[idx] = torch.cat(\r\n [seq_cat_decoded[idx], out.view(out.shape[0], 1, -1)], dim=1)\r\n out_cat_onehot = torch.cat(out_cat_onehot, -1)\r\n out_cat_embedding = self.unified_encoder.seq_emb_layers(out_cat_onehot)\r\n decoded = torch.cat([out_cat_embedding, out_cont], dim=-1)\r\n else:\r\n decoded = out_cont\r\n seq_cont_decoded = torch.cat(\r\n [seq_cont_decoded, out_cont.view(out_cont.shape[0], 1, -1)], dim=1)\r\n\r\n return non_sequential_cont_decoded, non_sequential_cat_decoded, seq_cont_decoded, seq_cat_decoded","def encode(input):\n return ModelEncoder().encode(input)","def generate_encoder(input_shape: Tuple[int]=(100,1), lstm_units:int = 100, latent_dim:int=20)->tf.keras.Model:\n\n input = tf.keras.layers.Input(shape=input_shape , name=\"encoder_input\")\n #create a bi-directional LSTM layer\n encoded = tf.keras.layers.Bidirectional(tf.keras.layers.LSTM(units=lstm_units, return_sequences=True))(input)\n encoded = tf.keras.layers.Flatten()(encoded)\n encoded = tf.keras.layers.Dense(units=latent_dim, name=\"latent_encoding\")(encoded)\n encoded = tf.keras.layers.Reshape(target_shape=(latent_dim, 1) , name=\"output_encoder\")(encoded)\n\n model = tf.keras.Model(inputs=input, outputs=encoded, name=\"encoder\")\n\n return model","def forward(self, inputs, decode_len=None):\n\n batch_size = inputs.size(0)\n input_dim = inputs.size(1)\n assert input_dim == self.input_dim, 'input dim should be {:d} but now: {:d}'.format(self.input_dim, input_dim)\n\n sourceL = inputs.size(2)\n\n if self.embed_input:\n # repeat embeddings across batch_size\n # result is [batch_size x input_dim x embedding_dim]\n # TODO: repeat or expand?\n embedding = self.embedding.repeat(batch_size, 1, 1)\n embedded_inputs = []\n # result is [batch_size, 1, input_dim, sourceL]\n ips = inputs.unsqueeze(1)\n\n for i in range(sourceL):\n # [batch_size x 1 x input_dim] * [batch_size x input_dim x embedding_dim]\n # result is [batch_size, embedding_dim]\n embedded_inputs.append(torch.bmm(\n ips[:, :, :, i].float(),\n embedding).squeeze(1))\n\n # Result is [sourceL x batch_size x embedding_dim]\n embedded_inputs = torch.cat(embedded_inputs).view(\n sourceL,\n batch_size,\n embedding.size(2))\n else:\n embedded_inputs = inputs.permute(2, 0, 1)\n\n (encoder_hx, encoder_cx) = init_zero_hidden(self.hidden_dim, inputs.is_cuda)\n encoder_hx = encoder_hx.unsqueeze(0).repeat(embedded_inputs.size(1), 1).unsqueeze(0)\n encoder_cx = encoder_cx.unsqueeze(0).repeat(embedded_inputs.size(1), 1).unsqueeze(0)\n \n # encoder forward pass\n enc_h, (enc_h_t, enc_c_t) = self.encoder(embedded_inputs, (encoder_hx, encoder_cx))\n\n enc_h_linear = enc_h.view(-1, self.hidden_dim)\n # enc_h_linear_2d = enc_h_linear.view(self.hidden_dim, -1)\n enc_action_scores = self.EncodeScore(enc_h_linear)\n enc_action_scores = enc_action_scores.view(-1, batch_size).permute(1, 0)\n dec_init_state = (enc_h_t[-1], enc_c_t[-1])\n \n # repeat decoder_in_0 across batch\n decoder_input = self.decoder_in_0.unsqueeze(0).repeat(embedded_inputs.size(1), 1)\n\n (head_pointer_probs, head_positions, tail_pointer_probs, tail_positions, cls_scores), dec_hidden_t = self.decoder(decoder_input,\n embedded_inputs,\n dec_init_state,\n enc_h, max_len=decode_len)\n #TODO: added conversion to tensors\n head_pointer_probs = torch.stack(head_pointer_probs)\n head_pointer_probs = head_pointer_probs.permute(1, 0, 2)\n tail_pointer_probs = torch.stack(tail_pointer_probs)\n tail_pointer_probs = tail_pointer_probs.permute(1, 0, 2)\n cls_scores = torch.stack(cls_scores)\n cls_scores = cls_scores.permute(1, 0, 2)\n head_positions = torch.stack(head_positions)\n head_positions = head_positions.permute(1, 0)\n tail_positions = torch.stack(tail_positions)\n tail_positions = tail_positions.permute(1, 0)\n\n\n\n return head_pointer_probs, head_positions, tail_pointer_probs, tail_positions, cls_scores, enc_action_scores","def _basic_rnn_seq2seq(encoder_inputs,\n decoder_inputs,\n cell,\n feed_previous,\n dtype=dtypes.float32,\n scope=None):\n with variable_scope.variable_scope(scope or \"basic_rnn_seq2seq\"):\n enc_cell = copy.deepcopy(cell)\n _, enc_state = rnn.static_rnn(enc_cell, encoder_inputs, dtype=dtype)\n if feed_previous:\n return _rnn_decoder(decoder_inputs, enc_state, cell, _loop_function)\n else:\n return _rnn_decoder(decoder_inputs, enc_state, cell)","def get_rnn_init_state(combiner_outputs: Dict[str, torch.Tensor], sequence_reducer: SequenceReducer, num_layers: int) ->torch.Tensor:\n if ENCODER_OUTPUT_STATE not in combiner_outputs:\n encoder_output_state = combiner_outputs[HIDDEN]\n else:\n encoder_output_state = combiner_outputs[ENCODER_OUTPUT_STATE]\n if isinstance(encoder_output_state, tuple):\n if len(encoder_output_state) == 2:\n encoder_output_state = encoder_output_state[0]\n elif len(encoder_output_state) == 4:\n encoder_output_state = torch.mean([encoder_output_state[0], encoder_output_state[2]])\n else:\n raise ValueError(f'Invalid sequence decoder inputs with keys: {combiner_outputs.keys()} with extracted encoder ' + f'state: {encoder_output_state.size()} that was invalid. Please double check the compatibility ' + 'of your encoder and decoder.')\n if len(encoder_output_state.size()) > 3:\n raise ValueError('Init state for RNN decoders only works for 1d or 2d tensors (encoder_output).')\n if len(encoder_output_state.size()) == 3:\n encoder_output_state = sequence_reducer(encoder_output_state)\n return repeat_2D_tensor(encoder_output_state, num_layers)","def rnn_with_embedding(self,cell,init_state,input_seq,\n input_seq_len,reuse=None,\n scope=\"RNN\"): \n with tf.variable_scope(scope,reuse=reuse) as vs:\n log(vs.name+\"/Encoding sequences\")\n with tf.device('/cpu:0'):\n emb = tf.get_variable(\"emb\",\n [self.vocab_size,self.hidden_size],\n dtype=tf.float32)\n un_emb = tf.get_variable(\"unemb\",\n [self.hidden_size,self.vocab_size],\n tf.float32)\n # We need a bias\n un_emb_b = tf.get_variable(\"unemb_b\",\n [self.vocab_size],\n dtype=tf.float32)\n \n assert scope+\"/emb:0\" in emb.name,\\\n \"Making sure the reusing is working\"\n emb_input_seq = tf.nn.embedding_lookup(\n emb,input_seq)\n emb_input_list = tf.unpack(\n tf.transpose(emb_input_seq,[1,0,2]))\n \n # RNN pass\n if init_state is None:\n init_state = cell.zero_state(\n tf.shape(emb_input_list[0])[0],tf.float32)\n \n emb_output_list, final_state = tf.nn.rnn(\n cell,emb_input_list,initial_state=init_state,\n sequence_length=input_seq_len)\n\n # We shift the predicted outputs, because at\n # each word we're trying to predict the next.\n emb_output_list = emb_output_list[:-1]\n \n # Unembedding\n output_list = [tf.matmul(t,un_emb) + un_emb_b\n for t in emb_output_list]\n outputs = tf.transpose(tf.pack(output_list),[1,0,2])\n\n return outputs, final_state","def transparent_forward(self, inp, hidden=None, give_gates=False, debug=False):\n\n lseq = inp.shape[0]\n nseq = inp.shape[1]\n # ispad = (input == self.padding)\n\n if hidden is None:\n hidden = self.init_hidden(nseq)\n\n H = torch.zeros(lseq, self.nhid, nseq)\n if give_gates:\n Z = torch.zeros(lseq, self.nhid, nseq)\n R = torch.zeros(lseq, self.nhid, nseq)\n \n # because pytorch only returns hidden activity in the last time step,\n # we need to unroll it manually. \n O = torch.zeros(lseq, nseq, self.decoder.out_features)\n if self.recoder is None:\n emb = inp\n else:\n emb = self.recoder(inp)\n for t in range(lseq):\n if give_gates:\n out, hidden, ZR = self.rnn(emb[t:t+1,...], hidden, give_gates=True)\n Z[t,:,:] = ZR[0].squeeze(0).T\n R[t,:,:] = ZR[1].squeeze(0).T\n else:\n out, hidden = self.rnn(emb[t:t+1,...], hidden)\n dec = self.decoder(out)\n # naan = torch.ones(hidden.squeeze(0).shape)*np.nan\n # H[t,:,:] = torch.where(~ispad[t:t+1,:].T, hidden.squeeze(0), naan).T\n H[t,:,:] = hidden.squeeze(0).T\n O[t,:,:] = dec.squeeze(0)\n\n if give_gates:\n if debug:\n return O, H, Z, R, emb\n else:\n return O, H, Z, R\n else:\n if debug:\n return O, H, emb\n else:\n return O, H","def decoding_layer_train(encoder_state, dec_cell, dec_embed_input, \n target_sequence_length, max_summary_length, \n output_layer, keep_prob):\n # TODO: Implement Function\n trainig_helper = tf.contrib.seq2seq.TrainingHelper(dec_embed_input, target_sequence_length)\n basic_decoder = tf.contrib.seq2seq.BasicDecoder(dec_cell, trainig_helper, encoder_state, output_layer)\n f_output, _, _ = tf.contrib.seq2seq.dynamic_decode(basic_decoder,maximum_iterations=max_summary_length)\n return f_output","def _basic_rnn_seq2seq(encoder_inputs,\n decoder_inputs,\n cell,\n feed_previous,\n dtype=dtypes.float32,\n scope=None):\n with variable_scope.variable_scope(scope or \"basic_rnn_seq2seq\"):\n enc_cell = copy.deepcopy(cell)\n _, enc_state = rnn.static_rnn(enc_cell, encoder_inputs, dtype=dtype)\n if feed_previous:\n return _rnn_decoder(decoder_inputs, enc_state, cell, _loop_function)\n else:\n return _rnn_decoder(decoder_inputs, enc_state, cell)","def _add_input_decoder(self, inputs, seq_len, enc_fw, enc_bw):\n with tf.variable_scope(\"decoder\"):\n cell_fw = tf.contrib.rnn.LSTMCell(config.hidden_dim, initializer=self.rand_unif_init, state_is_tuple=True)\n cell_bw = tf.contrib.rnn.LSTMCell(config.hidden_dim, initializer=self.rand_unif_init, state_is_tuple=True)\n ((fw_states, bw_states), (final_fw, final_bw)) = tf.nn.bidirectional_dynamic_rnn(cell_fw, cell_bw, inputs, dtype=tf.float32, sequence_length=seq_len, swap_memory=True, initial_state_fw=enc_fw, initial_state_bw=enc_bw)\n\n return fw_states, bw_states","def forward(self, source, out_seq_len = None):\n # source seems to be a (1,1,2)\n\n batch_size = source.shape[0]\n seq_len = source.shape[1]\n if out_seq_len is None:\n out_seq_len = seq_len\n\n \n #############################################################################\n # TODO: #\n # Implement the forward pass of the Seq2Seq model. Please refer to the #\n # following steps: #\n # 1) Get the last hidden representation from the encoder. Use it as #\n # the first hidden state of the decoder #\n # 2) The first input for the decoder should be the token, which #\n # is the first in the source sequence. #\n # 3) Feed this first input and hidden state into the decoder # \n # one step at a time in the sequence, adding the output to the #\n # final outputs. #\n # 4) Update the input and hidden weights being fed into the decoder #\n # at each time step. The decoder output at the previous time step # \n # will have to be manipulated before being fed in as the decoder #\n # input at the next time step. #\n #############################################################################\n output, hidden = self.encoder(source)\n outputs = torch.zeros(batch_size, out_seq_len, self.decoder.output_size, device=self.device)\n # initialize -- batch size = 128, seq_len = 20.\n output, hidden = self.decoder(source[:, 0], hidden)\n # output of shape -- batch size,\n #outputs.size() = [20 , 5893]\n #output.size() = [ 128, 5893]\n\n\n #simple:\n # output.size() = (8)\n # outputs.size() = (2,8)\n outputs[:, 0, :] = output\n output_idx = outputs[:,0,:].argmax(1)\n output_idx = output_idx.unsqueeze(1)\n for i in range(1, out_seq_len):\n output, hidden = self.decoder(output_idx , hidden)\n outputs[:,i,:] = output\n output_idx = outputs[:,i,:].argmax(1)\n output_idx = output_idx.unsqueeze(1)\n #############################################################################\n # END OF YOUR CODE #\n #############################################################################\n return outputs","def get_rnn_init_state(\n combiner_outputs: Dict[str, torch.Tensor], sequence_reducer: SequenceReducer, num_layers: int\n) -> torch.Tensor:\n if ENCODER_OUTPUT_STATE not in combiner_outputs:\n # Use the combiner's hidden state.\n encoder_output_state = combiner_outputs[HIDDEN]\n else:\n # Use the encoder's output state.\n encoder_output_state = combiner_outputs[ENCODER_OUTPUT_STATE]\n if isinstance(encoder_output_state, tuple):\n if len(encoder_output_state) == 2:\n # LSTM encoder. Use the hidden state and ignore the cell state.\n encoder_output_state = encoder_output_state[0]\n elif len(encoder_output_state) == 4:\n # Bi-directional LSTM encoder. Use the average of hidden states and ignore cell state.\n encoder_output_state = torch.mean([encoder_output_state[0], encoder_output_state[2]])\n else:\n raise ValueError(\n f\"Invalid sequence decoder inputs with keys: {combiner_outputs.keys()} with extracted encoder \"\n + f\"state: {encoder_output_state.size()} that was invalid. Please double check the compatibility \"\n + \"of your encoder and decoder.\"\n )\n\n if len(encoder_output_state.size()) > 3:\n raise ValueError(\"Init state for RNN decoders only works for 1d or 2d tensors (encoder_output).\")\n\n if len(encoder_output_state.size()) == 3:\n # Reduce to [batch_size, hidden_size].\n encoder_output_state = sequence_reducer(encoder_output_state)\n\n return repeat_2D_tensor(encoder_output_state, num_layers)","def _define_decoder(self):\n self.decoder = nn.Sequential(nn.Linear(self.encoding_shape, 256), # B, 256\n View((-1, 256, 1, 1)), # B, 256, 1, 1\n nn.SELU(),\n nn.ConvTranspose2d(256, 64, 4), # B, 64, 4, 4\n nn.SELU(),\n nn.ConvTranspose2d(64, 64, 4, 2, 1), # B, 64, 8, 8\n nn.SELU(),\n nn.ConvTranspose2d(64, 32, 4, 2, 1), # B, 32, 16, 16\n nn.SELU(),\n nn.ConvTranspose2d(32, 32, 4, 2, 1), # B, 32, 32, 32\n nn.SELU(),\n nn.ConvTranspose2d(32, 3, 4, 2, 1), # B, nc, 64, 64\n nn.ReLU()\n )","def encode(self, x: Tensor) ->Tensor:\n return self.encoder(x)[0]","def encode(self) -> str:\n return Activation._encoder.encode(self)","def encoder(list_of_str, key):\n tokenized = self.tokenizer.encode_commands(list_of_str)\n hidden = self.tokenizer.tokenize(tokenized)\n hidden = hidden.permute(1, 0, 2).reshape(hidden.size(1), -1) # correct for bididrectional\n return hidden","def forward(self,\n input,\n hidden):\n embedded = self.embedding(input).view(1, 1, -1)\n output = F.relu(embedded)\n output, hidden = self.rnn(output, hidden)\n output = self.softmax(self.out(output[0]))\n return output, hidden","def encode(self,\n data: mx.sym.Symbol,\n data_length: mx.sym.Symbol,\n seq_len: int) -> Tuple[mx.sym.Symbol, mx.sym.Symbol, int]:\n # data: (batch_size, seq_len, num_hidden)\n data = mx.sym.FullyConnected(data=data,\n num_hidden=self.config.cnn_config.num_hidden,\n no_bias=True,\n flatten=False,\n weight=self.i2h_weight)\n\n # Multiple layers with residual connections:\n for layer in self.layers:\n data = data + layer(data, data_length, seq_len)\n return data, data_length, seq_len","def __call__(self, encoder_hidden_states):\n params = self.dec_params\n search_params = self.search_params\n\n lm_params = self.lm_params\n get_top_k_fn = self.top_k_setup_with_lm(encoder_hidden_states)\n\n x = params.embedding[data_utils.GO_ID]\n x_lm = lm_params.embedding[data_utils.GO_ID]\n\n # Initialize Decoder states\n h_size = params.dec_lstm_w.shape[1]/4\n zero_dec_state = (np.zeros(h_size), np.zeros(h_size))\n\n dec_lm_h_size = params.lm_lstm_w.shape[1]/4\n zero_dec_lm_state = (np.zeros(dec_lm_h_size), np.zeros(dec_lm_h_size))\n\n # Initialize LM state\n lm_h_size = lm_params.lstm_w.shape[1]/4\n zero_lm_state = (np.zeros(lm_h_size), np.zeros(lm_h_size))\n\n zero_attn = np.zeros(encoder_hidden_states.shape[1])\n\n # Maintain a tuple of (output_indices, score, encountered EOS?)\n output_list = []\n final_output_list = []\n k = search_params.beam_size # Represents the current beam size\n step_count = 0\n\n # Run step 0 separately\n top_k_indices, top_k_model_scores, top_k_scores, state_list, context_vec =\\\n get_top_k_fn(x, x_lm, [zero_dec_state, zero_dec_lm_state, zero_lm_state],\n zero_attn, beam_size=k)\n for idx in xrange(top_k_indices.shape[0]):\n output_tuple = (BeamEntry([top_k_indices[idx]], state_list, context_vec),\n top_k_model_scores[idx])\n if top_k_indices[idx] == data_utils.EOS_ID:\n final_output_list.append(output_tuple)\n # Decrease the beam size once EOS is encountered\n k -= 1\n else:\n output_list.append(output_tuple)\n\n step_count += 1\n while step_count < 120 and k > 0:\n # These lists store the states obtained by running the decoder\n # for 1 more step with the previous outputs of the beam\n next_dec_states = []\n next_context_vecs = []\n\n score_list = []\n model_score_list = []\n index_list = []\n for candidate, cand_score in output_list:\n x = params.embedding[candidate.get_last_output()]\n x_lm = lm_params.embedding[candidate.get_last_output()]\n\n top_k_indices, top_k_model_scores, top_k_scores, state_list, context_vec =\\\n get_top_k_fn(x, x_lm, candidate.get_dec_state(),\n candidate.get_context_vec(), beam_size=k)\n\n next_dec_states.append(state_list)\n next_context_vecs.append(context_vec)\n\n index_list.append(top_k_indices)\n score_list.append(top_k_scores + cand_score)\n model_score_list.append(top_k_model_scores + cand_score)\n\n # Score of all k**2 continuations\n all_scores = np.concatenate(score_list, axis=0)\n all_model_scores = np.concatenate(model_score_list, axis=0)\n # All k**2 continuations\n all_indices = np.concatenate(index_list, axis=0)\n\n # Find the top indices among the k^^2 entries\n top_k_indices = np.argpartition(all_scores, -k)[-k:]\n next_k_indices = all_indices[top_k_indices]\n top_k_scores = all_model_scores[top_k_indices]\n # The original candidate indices can be found by dividing by k.\n # Because the indices are of the form - i * k + j, where i\n # represents the ith output and j represents the jth top index for i\n orig_cand_indices = np.divide(top_k_indices, k, dtype=np.int32)\n\n new_output_list = []\n\n for idx in xrange(k):\n orig_cand_idx = int(orig_cand_indices[idx])\n # BeamEntry of the original candidate\n orig_cand = output_list[orig_cand_idx][0]\n next_elem = next_k_indices[idx]\n # Add the next index to the original sequence\n new_index_seq = orig_cand.get_index_seq() + [next_elem]\n dec_state = next_dec_states[orig_cand_idx]\n context_vec = next_context_vecs[orig_cand_idx]\n\n output_tuple = (BeamEntry(new_index_seq, dec_state, context_vec),\n top_k_scores[idx] +\n search_params.word_ins_penalty*len(new_index_seq))\n if next_elem == data_utils.EOS_ID:\n # This sequence is finished. Put the output on the final list\n # and reduce beam size\n final_output_list.append(output_tuple)\n k -= 1\n else:\n new_output_list.append(output_tuple)\n\n output_list = new_output_list\n step_count += 1\n\n final_output_list += output_list\n\n best_output = max(final_output_list, key=lambda output_tuple: output_tuple[1])\n output_seq = best_output[0].get_index_seq()\n return np.stack(output_seq, axis=0)","def forward(self, input, hidden):\r\n output, hidden = self.rnn(input, hidden)\r\n output = f.log_softmax(self.out(output.squeeze(1)), 1)\r\n return output, hidden","def forward(self, state, encoder_padding_mask):\n residual = state.clone()\n\n '''\n ___QUESTION-6-DESCRIBE-D-START___\n What is the purpose of encoder_padding_mask? What will the output shape of `state' Tensor \n be after multi-head attention? HINT: formulate your answer in terms of \n constituent variables like batch_size, embed_dim etc...\n '''\n '''\n The encoder padding mask is used to mask the ⟨pad⟩ token which is padded to the input sequences to make the sequences in the same lengths each batch. Thus the word of input sequence will not pay attention to these padded tokens.\n The shape of state is (tgt_time_steps * batch_size * embed_dim)\n '''\n state, _ = self.self_attn(query=state, key=state, value=state, key_padding_mask=encoder_padding_mask)\n '''\n ___QUESTION-6-DESCRIBE-D-END___\n '''\n\n state = F.dropout(state, p=self.dropout, training=self.training)\n state += residual\n state = self.self_attn_layer_norm(state)\n\n residual = state.clone()\n state = F.relu(self.fc1(state))\n state = F.dropout(state, p=self.activation_dropout, training=self.training)\n state = self.fc2(state)\n state = F.dropout(state, p=self.dropout, training=self.training)\n state += residual\n state = self.final_layer_norm(state)\n\n return state","def get_recurrent_encoder(config: RecurrentEncoderConfig, prefix: str) -> 'Encoder':\n # TODO give more control on encoder architecture\n encoder_seq = EncoderSequence([], config.dtype)\n\n if config.conv_config is not None:\n encoder_seq.append(ConvolutionalEmbeddingEncoder, config=config.conv_config,\n prefix=prefix + C.CHAR_SEQ_ENCODER_PREFIX)\n if config.conv_config.add_positional_encoding:\n # If specified, add positional encodings to segment embeddings\n encoder_seq.append(AddSinCosPositionalEmbeddings,\n num_embed=config.conv_config.num_embed,\n scale_up_input=False,\n scale_down_positions=False,\n prefix=\"%s%sadd_positional_encodings\" % (prefix, C.CHAR_SEQ_ENCODER_PREFIX))\n encoder_seq.append(ConvertLayout, infer_hidden=True, target_layout=C.TIME_MAJOR)\n else:\n encoder_seq.append(ConvertLayout, target_layout=C.TIME_MAJOR, num_hidden=0)\n\n if config.reverse_input:\n encoder_seq.append(ReverseSequence, infer_hidden=True)\n\n if config.rnn_config.residual:\n utils.check_condition(config.rnn_config.first_residual_layer >= 2,\n \"Residual connections on the first encoder layer are not supported\")\n\n # One layer bi-directional RNN:\n encoder_seq.append(BiDirectionalRNNEncoder,\n rnn_config=config.rnn_config.copy(num_layers=1),\n prefix=prefix + C.BIDIRECTIONALRNN_PREFIX,\n layout=C.TIME_MAJOR)\n\n if config.rnn_config.num_layers > 1:\n # Stacked uni-directional RNN:\n # Because we already have a one layer bi-rnn we reduce the num_layers as well as the first_residual_layer.\n remaining_rnn_config = config.rnn_config.copy(num_layers=config.rnn_config.num_layers - 1,\n first_residual_layer=config.rnn_config.first_residual_layer - 1)\n encoder_seq.append(RecurrentEncoder,\n rnn_config=remaining_rnn_config,\n prefix=prefix + C.STACKEDRNN_PREFIX,\n layout=C.TIME_MAJOR)\n\n encoder_seq.append(ConvertLayout, infer_hidden=True, target_layout=C.BATCH_MAJOR)\n\n return encoder_seq","def getEncode(self, img):\n img_ = self.preprocess(img)\n fv = self.model_.predict(img_)\n fv = fv.reshape(-1, 1)\n return fv","def forward(self, *args):\r\n enc_src, _, _ = self.unified_encoder(*args)\r\n enc_src = enc_src.view(enc_src.shape[0], -1)\r\n y_pred = self.mlp(enc_src)\r\n return y_pred","def forward(self, state, encoder_padding_mask):\n residual = state.clone()\n\n '''\n ___QUESTION-6-DESCRIBE-D-START___\n What is the purpose of encoder_padding_mask? What will the output shape of `state' Tensor\n be after multi-head attention? HINT: formulate your answer in terms of\n constituent variables like batch_size, embed_dim etc...\n\n The purpose of encoder_padding_mask is to account for the fact that the\n source sentences in the batch are of different length. The output shape\n of state tensor will be [src_time_steps, batch_size, embed_dim].\n '''\n state, _ = self.self_attn(query=state, key=state, value=state, key_padding_mask=encoder_padding_mask)\n '''\n ___QUESTION-6-DESCRIBE-D-END___\n '''\n\n state = F.dropout(state, p=self.dropout, training=self.training)\n state += residual\n state = self.self_attn_layer_norm(state)\n\n residual = state.clone()\n state = F.relu(self.fc1(state))\n state = F.dropout(state, p=self.activation_dropout, training=self.training)\n state = self.fc2(state)\n state = F.dropout(state, p=self.dropout, training=self.training)\n state += residual\n state = self.final_layer_norm(state)\n\n return state","def gru_seq2seq_internal_bid_encoder(inputs, targets, hparams, train):\n with tf.variable_scope(\"gru_seq2seq_bid_encoder\"):\n if inputs is not None:\n inputs_length = common_layers.length_from_embedding(inputs)\n # Flatten inputs.\n inputs = common_layers.flatten4d3d(inputs)\n # LSTM encoder.\n _, final_encoder_state = gru_bid_encoder(\n inputs, inputs_length, hparams, train, \"encoder\")\n else:\n inputs_length = None\n final_encoder_state = None\n # LSTM decoder.\n shifted_targets = common_layers.shift_right(targets)\n # Add 1 to account for the padding added to the left from shift_right\n targets_length = common_layers.length_from_embedding(shifted_targets) + 1\n hparams_decoder = copy.copy(hparams)\n hparams_decoder.hidden_size = 2 * hparams.hidden_size\n decoder_outputs, _ = gru(\n common_layers.flatten4d3d(shifted_targets),\n targets_length,\n hparams_decoder,\n train,\n \"decoder\",\n initial_state=final_encoder_state)\n return tf.expand_dims(decoder_outputs, axis=2)","def forward(self, x):\n x = self.encoder(x)\n x = self.decoder(x)\n return x","def _encode(self, src_token_ids, padding_mask, training=False):\n src_seq_len = tf.shape(src_token_ids)[1]\n\n # [batch_size, src_seq_len, hidden_size]\n src_token_embeddings = self._embedding_logits_layer(\n src_token_ids, 'embedding')\n\n # [src_seq_len, hidden_size]\n positional_encoding = utils.get_positional_encoding(\n src_seq_len, self._hidden_size)\n src_token_embeddings += positional_encoding\n src_token_embeddings = self._encoder_dropout_layer(\n src_token_embeddings, training)\n\n encoder_outputs = self._encoder(\n src_token_embeddings, padding_mask, training)\n return encoder_outputs","def gru_bid_encoder(inputs, sequence_length, hparams, train, name):\n\n with tf.variable_scope(name):\n cell_fw = tf.nn.rnn_cell.MultiRNNCell(\n [_dropout_gru_cell(hparams, train)\n for _ in range(hparams.num_hidden_layers)])\n\n cell_bw = tf.nn.rnn_cell.MultiRNNCell(\n [_dropout_gru_cell(hparams, train)\n for _ in range(hparams.num_hidden_layers)])\n\n ((encoder_fw_outputs, encoder_bw_outputs),\n (encoder_fw_state, encoder_bw_state)) = tf.nn.bidirectional_dynamic_rnn(\n cell_fw,\n cell_bw,\n inputs,\n sequence_length,\n dtype=tf.float32,\n time_major=False)\n\n encoder_outputs = tf.concat((encoder_fw_outputs, encoder_bw_outputs), 2)\n encoder_states = []\n\n for i in range(hparams.num_hidden_layers):\n encoder_state = tf.concat(\n values=(encoder_fw_state[i], encoder_bw_state[i]),\n axis=1,\n name=\"bidirectional_concat\")\n\n encoder_states.append(encoder_state)\n\n encoder_states = tuple(encoder_states)\n return encoder_outputs, encoder_states","def lstm_encoder(sequence, lstm,\n seq_lens=None, init_states=None, embedding=None):\n # transpose batch tensor to fit lstm format\n # sequence size [batch size,max_seq_len]\n batch_size = sequence.size(0)\n max_seq_len = sequence.size(1)\n batch_first = lstm.batch_first\n\n if not batch_first: # embedding and transpose input sequence tensor\n sequence = sequence.transpose(0, 1)\n\n # emb_sequence size [batch size,max_seq_len,emb_dim]\n emb_sequence = (embedding(sequence) if embedding is not None\n else sequence)\n # reorder batch tensor along batch dim\n if not seq_lens is None: # reorder input sequence tensor along batch dim\n # (max_sen_len, batch_size, lstm_input_size) 按照batch_size维度,根据文本实际长度(句子数量)降序排列\n assert batch_size == len(seq_lens)\n sort_ind = sorted(range(len(seq_lens)),\n key=lambda i: seq_lens[i], reverse=True) # 确定排序索引\n seq_lens = [seq_lens[i] for i in sort_ind] # 根据排序索引 对序列真实长度进行排序\n sequence = reorder_sequence(emb_sequence, sort_ind,\n lstm.batch_first) # 根据排序索引对tensor batch dim进行排序\n\n # init hidden state and cell state for lstm\n if init_states is None: # 初始化lstm中的hidden state 和 cell state\n device = sequence.device\n init_states = init_lstm_states(lstm, batch_size, device)\n else:\n init_states = (init_states[0].contiguous(),\n init_states[1].contiguous())\n\n if not seq_lens is None: # Encode & Reorder Back\n packed_seq = nn.utils.rnn.pack_padded_sequence(emb_sequence, # 压缩lstm输入序列,保留输入序列更多有效序列\n seq_lens,\n batch_first=batch_first) # https://www.cnblogs.com/sbj123456789/p/9834018.html\n packed_out, final_states = lstm(packed_seq.to(init_states[0].dtype), init_states) # encode\n lstm_out, _ = nn.utils.rnn.pad_packed_sequence(packed_out, batch_first=batch_first,\n total_length=max_seq_len)\n # (max_sent_len, batch_size, emb_dim)\n\n sort_ind = sorted(range(len(seq_lens)),\n key=lambda i: seq_lens[i], reverse=True) # 确定排序索引\n back_map = {ind: i for i, ind in enumerate(sort_ind)} # 结构为{之前索引: 当前索引}, 将编码之后的结果按照索引对应回输入索引\n reorder_ind = [back_map[i] for i in range(len(seq_lens))] # 生成逆排序索引,对应于sort_ind\n lstm_out = reorder_sequence(lstm_out, reorder_ind,\n batch_first) # 根据逆排序索引对tensor batch dim进行排序 (max_sent_len, batch_size, lstm_size)\n # final_states = reorder_lstm_states(final_states, reorder_ind)\n else:\n lstm_out, final_states = lstm(sequence, init_states)\n\n # transpose\n return lstm_out, final_states # (seq_len, batch, embedding) (hidden_layer* direction_num, batch, hidden_size)","def forward(self, src, mask):\n bs = src.shape[0]\n src = src.permute(2, 0, 1)\n m = src \n enc_embed = self.enc_embed.weight.unsqueeze(1).repeat(1, bs, 1)\n for layer in self.encoder_layers:\n m = layer(m,\n pos=enc_embed,\n src_mask = mask\n )\n return m.permute(1, 2, 0), enc_embed.permute(1, 2, 0)"],"string":"[\n \"def _encode(self):\\n with tf.variable_scope('passage_encoding'):\\n self.sep_p_encodes, _ = rnn('bi-lstm', self.p_emb, self.p_length, self.hidden_size)\\n with tf.variable_scope('question_encoding'):\\n self.sep_q_encodes, _ = rnn('bi-lstm', self.q_emb, self.q_length, self.hidden_size)\\n if self.use_dropout:\\n self.sep_p_encodes = tf.nn.dropout(self.sep_p_encodes, self.dropout_keep_prob)\\n self.sep_q_encodes = tf.nn.dropout(self.sep_q_encodes, self.dropout_keep_prob)\",\n \"def encode(self):\\n with tf.name_scope(\\\"encode\\\"):\\n self.encoder_inputs = tf.layers.dense(\\n inputs=self.encoder_inputs,\\n units=self.options['hidden_size'], activation=None, use_bias=True,\\n kernel_initializer=tf.keras.initializers.he_normal(seed=None),\\n bias_initializer=tf.zeros_initializer(),\\n kernel_regularizer=None, bias_regularizer=None, activity_regularizer=None,\\n kernel_constraint=None, bias_constraint=None, trainable=True,\\n name=None, reuse=None)\\n self.encoder_inputs = tf.layers.batch_normalization(self.encoder_inputs,\\n axis=-1,\\n momentum=0.99,\\n epsilon=0.001,\\n center=True,\\n scale=True,\\n beta_initializer=tf.zeros_initializer(),\\n gamma_initializer=tf.ones_initializer(),\\n moving_mean_initializer=tf.zeros_initializer(),\\n moving_variance_initializer=tf.ones_initializer(),\\n training=self.is_training,\\n trainable=True,\\n renorm=False,\\n renorm_momentum=0.99)\\n # Prepare inputs to the layer stack by adding positional encodings and\\n # applying dropout.\\n # embedded_inputs = self.embedding_softmax_layer(inputs)\\n #\\n inputs_padding = transformer_model_utils.get_padding(tf.cast(\\n tf.reduce_max(100*self.encoder_inputs, [-1]),\\n dtype=tf.int32))\\n\\n with tf.name_scope(\\\"add_pos_encoding\\\"):\\n length = tf.shape(self.encoder_inputs)[1]\\n pos_encoding = transformer_model_utils.get_position_encoding(\\n length, self.options[\\\"hidden_size\\\"])\\n encoder_inputs = self.encoder_inputs + pos_encoding\\n\\n if self.is_training:\\n encoder_inputs = tf.nn.dropout(\\n encoder_inputs, 1 - self.options[\\\"layer_postprocess_dropout\\\"])\\n\\n return self.encoder_stack(encoder_inputs, self.attention_bias, inputs_padding)\",\n \"def _encode_back(self):\\n with tf.variable_scope('passage_encoding'):\\n self.sep_p_encodes, _ = rnn('bi-lstm', self.p_emb, self.p_length, self.hidden_size)\\n with tf.variable_scope('question_encoding'):\\n self.sep_q_encodes, _ = rnn('bi-lstm', self.q_emb, self.q_length, self.hidden_size)\\n if self.use_dropout:\\n self.sep_p_encodes = tf.nn.dropout(self.sep_p_encodes, self.dropout_keep_prob)\\n self.sep_q_encodes = tf.nn.dropout(self.sep_q_encodes, self.dropout_keep_prob)\",\n \"def encode(self,x,x_len):\\n\\n ## Check to see if batch_size parameter is fixed or base on input batch\\n cur_batch_size = x.size()[1]\\n encode_init_state = self.encoder.initialize_hidden_state(cur_batch_size)\\n encoder_state, encoder_outputs = self.encoder.forward(x, encode_init_state, x_len)\\n\\n return encoder_outputs, encoder_state\",\n \"def _encode(self):\\n with tf.variable_scope('encoding'):\\n self.sep_p_encodes, _ = bilstm_layer(self.p_emb, self.p_length, self.hidden_size)\\n tf.get_variable_scope().reuse_variables()\\n self.sep_q_encodes, _ = bilstm_layer(self.q_emb, self.q_length, self.hidden_size)\\n if self.use_dropout:\\n self.sep_p_encodes = tf.nn.dropout(self.sep_p_encodes, 1-self.dropout)\\n self.sep_q_encodes = tf.nn.dropout(self.sep_q_encodes, 1-self.dropout)\",\n \"def _add_input_encoder(self, inputs, seq_len):\\n with tf.variable_scope(\\\"encoder\\\"):\\n cell_fw = tf.contrib.rnn.LSTMCell(config.hidden_dim, initializer=self.rand_unif_init, state_is_tuple=True)\\n cell_bw = tf.contrib.rnn.LSTMCell(config.hidden_dim, initializer=self.rand_unif_init, state_is_tuple=True)\\n ((fw_states, bw_states), (final_fw, final_bw)) = tf.nn.bidirectional_dynamic_rnn(cell_fw, cell_bw, inputs, dtype=tf.float32, sequence_length=seq_len, swap_memory=True)\\n\\n return fw_states, bw_states, final_fw, final_bw\",\n \"def encode_input(self, x_tensor, inp_lens_tensor):\\r\\n input_emb = self.input_emb.forward(x_tensor)\\r\\n enc_output_each_word, enc_context_mask, enc_final_states = self.encoder(input_emb, inp_lens_tensor)\\r\\n enc_final_states_reshaped = (enc_final_states[0].unsqueeze(0), enc_final_states[1].unsqueeze(0))\\r\\n # print('lest go', enc_final_states_reshaped[1].shape)\\r\\n return enc_output_each_word, enc_context_mask, enc_final_states_reshaped\",\n \"def encode(self, inputs, masks):\\n with tf.variable_scope(\\\"encoder\\\") as scope_encoder:\\n #compute sequence length\\n sequence_lengths = tf.reduce_sum(masks, axis = 1) \\n #create a forward cell\\n fw_cell = tf.contrib.rnn.LSTMCell(self.size)\\n\\n #pass the cells to bilstm and create the bilstm\\n bw_cell = tf.contrib.rnn.LSTMCell(self.size)\\n output, final_state = tf.nn.bidirectional_dynamic_rnn(fw_cell, \\\\\\n bw_cell, inputs, \\\\\\n sequence_length = sequence_lengths, \\\\\\n dtype = tf.float32, \\\\\\n parallel_iterations = 256)\\n output_lstm = tf.concat([output[0], output[1]], axis = -1)\\n final_state_lstm = tf.concat([final_state[0], final_state[1]], axis = -1)\\n return output_lstm, final_state_lstm\",\n \"def _add_encoder(self, encoder_inputs, seq_len):\\n with tf.variable_scope('encoder'):\\n cell_fw = tf.contrib.rnn.LSTMCell(self._hps.hidden_dim, initializer=self.rand_unif_init, state_is_tuple=True)\\n cell_bw = tf.contrib.rnn.LSTMCell(self._hps.hidden_dim, initializer=self.rand_unif_init, state_is_tuple=True)\\n (encoder_outputs, (fw_st, bw_st)) = tf.nn.bidirectional_dynamic_rnn(cell_fw, cell_bw, encoder_inputs, dtype=tf.float32, sequence_length=seq_len, swap_memory=True)\\n encoder_outputs = tf.concat(axis=2, values=encoder_outputs) # concatenate the forwards and backwards states\\n return encoder_outputs, fw_st, bw_st\",\n \"def encode(self, state):\\n raise NotImplementedError\",\n \"def build_encoder(self):\\n with tf.variable_scope(\\\"encoder\\\") as scope:\\n length1 = tf.to_int32(tf.reduce_sum(self.encode_mask1, 1), name=\\\"length1\\\")\\n\\n if self.config.bidirectional_encoder:\\n if self.config.encoder_dim % 2:\\n raise ValueError(\\n \\\"encoder_dim must be even when using a bidirectional encoder.\\\")\\n num_units = self.config.encoder_dim // 2\\n cell_fw = self._initialize_gru_cell(num_units) # Forward encoder\\n cell_bw = self._initialize_gru_cell(num_units) # Backward encoder\\n _, states = tf.nn.bidirectional_dynamic_rnn(\\n cell_fw=cell_fw,\\n cell_bw=cell_bw,\\n inputs=self.encode_emb1,\\n sequence_length=length1,\\n dtype=tf.float32,\\n scope=scope)\\n thought_vectors1 = tf.concat(states, 1, name=\\\"thought_vectors1\\\")\\n else:\\n cell = self._initialize_gru_cell(self.config.encoder_dim)\\n _, state = tf.nn.dynamic_rnn(\\n cell=cell,\\n inputs=self.encode_emb1,\\n sequence_length=length1,\\n dtype=tf.float32,\\n scope=scope)\\n # Use an identity operation to name the Tensor in the Graph.\\n thought_vectors1 = tf.identity(state, name=\\\"thought_vectors1\\\")\\n \\n scope.reuse_variables()\\n\\n length2 = tf.to_int32(tf.reduce_sum(self.encode_mask2, 1), name=\\\"length2\\\")\\n\\n if self.config.bidirectional_encoder:\\n if self.config.encoder_dim % 2:\\n raise ValueError(\\n \\\"encoder_dim must be even when using a bidirectional encoder.\\\")\\n num_units = self.config.encoder_dim // 2\\n cell_fw = self._initialize_gru_cell(num_units) # Forward encoder\\n cell_bw = self._initialize_gru_cell(num_units) # Backward encoder\\n _, states = tf.nn.bidirectional_dynamic_rnn(\\n cell_fw=cell_fw,\\n cell_bw=cell_bw,\\n inputs=self.encode_emb2,\\n sequence_length=length2,\\n dtype=tf.float32,\\n scope=scope)\\n thought_vectors2 = tf.concat(states, 1, name=\\\"thought_vectors2\\\")\\n else:\\n cell = self._initialize_gru_cell(self.config.encoder_dim)\\n _, state = tf.nn.dynamic_rnn(\\n cell=cell,\\n inputs=self.encode_emb2,\\n sequence_length=length2,\\n dtype=tf.float32,\\n scope=scope)\\n # Use an identity operation to name the Tensor in the Graph.\\n thought_vectors2 = tf.identity(state, name=\\\"thought_vectors2\\\")\\n\\n self.thought_vectors1 = thought_vectors1\\n self.thought_vectors2 = thought_vectors2\",\n \"def encoder_decoder_archi_gan(inputs, is_train):\\n\\n encoder_layers = []\\n\\n encoded = inputs\\n\\n encoder_layers.append(encoded)\\n\\n for i in range(config.encoder_layers):\\n encoded = encoder_conv_block_gan(encoded, i, is_train)\\n encoder_layers.append(encoded)\\n \\n encoder_layers.reverse()\\n\\n\\n\\n decoded = encoder_layers[0]\\n\\n for i in range(config.encoder_layers):\\n decoded = decoder_conv_block_gan(decoded, encoder_layers[i+1], i, is_train)\\n\\n return decoded\",\n \"def encoder(self, tensor):\\n with tf.variable_scope(\\\"encoder\\\"):\\n tensor = tf.nn.embedding_lookup(self.embedding, tensor)\\n cell = tf.nn.rnn_cell.BasicLSTMCell(self.num_units)\\n outputs, state = tf.nn.dynamic_rnn(cell, tensor, sequence_length=self.seq_len, dtype=tf.float32)\\n output = outputs[:,-1,:]\\n output = tf.nn.l2_normalize(output, -1)\\n\\n return output\",\n \"def forward(self, input, hidden, give_gates=False, debug=False):\\n\\n emb = self.encoder(input)\\n if emb.dim()<3:\\n emb = emb.unsqueeze(0)\\n\\n if give_gates:\\n output, hidden, extras = self.rnn(emb, hidden, give_gates)\\n else:\\n output, hidden = self.rnn(emb, hidden)\\n\\n # decoded = self.softmax(self.decoder(output))\\n decoded = self.decoder(output)\\n\\n if give_gates:\\n if debug:\\n return decoded, hidden, extras, emb\\n else:\\n return decoded, hidden, extras\\n else:\\n if debug:\\n return decoded, hidden, emb\\n else:\\n return decoded, hidden\",\n \"def decode(self):\\n decoder_input = Input(shape=self.input_decoder_shape, batch_shape=self.input_batch_decoder_shape)\\n ppg_input = Input(shape=self.input_ppg_shape, batch_shape=self.input_batch_ppg_shape)\\n\\n if self.hparams.Masking is True:\\n mask_decoder_input = Masking(mask_value=0)(decoder_input)\\n mask_ppg_input = Masking(mask_value=0)(ppg_input)\\n prenet_output = self.PreNet(mask_decoder_input)\\n encoder_input = self.Encoder(mask_ppg_input)\\n decoder_mask = None\\n else:\\n decoder_mask = Masking(mask_value=0).compute_mask(ppg_input)\\n prenet_output = self.PreNet(decoder_input)\\n encoder_input = self.Encoder(ppg_input, decoder_mask)\\n\\n rnn_output = Concatenate(axis=-1)([prenet_output, encoder_input])\\n # mask = Input(shape=(self.hparams.PreNet_hidden_size + self.hparams.Tacotron_encoder_hidden_size))\\n # diff_mask = Input(shape=(self.hparams.PreNet_hidden_size + self.hparams.Tacotron_encoder_hidden_size))\\n for i in range(self.hparams.Tacotron_decoder_layers):\\n rnn_output = self.Decoder_LSTM[i](rnn_output, mask=decoder_mask)\\n\\n # feed by self.states is unhelpful in training, since we don't stop rnn during epochs\\n # but it is important in generating since each fit states will be set to zeros.!!!!!!\\n rnn_output = Concatenate(axis=-1)([rnn_output, encoder_input])\\n decoder_output = self.Linear_projection(rnn_output)\\n if self.hparams.Tacotron_postnet is True:\\n residual_output = decoder_output\\n for i in range(self.hparams.PostNet_layers):\\n residual_output = self.PostNet_Conv1D[i](residual_output)\\n residual_output = self.PostNet_BatchNorm[i](residual_output)\\n residual_output = self.PostNet_dropout_list[i](residual_output)\\n decoder_output = Add()([decoder_output, residual_output])\\n return Model(inputs=[decoder_input, ppg_input], outputs=decoder_output)\",\n \"def encode_sequence(sequence, rnns, embedder, dropout_amount=0.):\\n\\n batch_size = 1\\n layer_states = []\\n for rnn in rnns:\\n hidden_size = rnn.weight_hh.size()[1]\\n \\n # h_0 of shape (batch, hidden_size)\\n # c_0 of shape (batch, hidden_size)\\n if rnn.weight_hh.is_cuda:\\n h_0 = torch.cuda.FloatTensor(batch_size,hidden_size).fill_(0)\\n c_0 = torch.cuda.FloatTensor(batch_size,hidden_size).fill_(0)\\n else:\\n h_0 = torch.zeros(batch_size,hidden_size)\\n c_0 = torch.zeros(batch_size,hidden_size)\\n\\n layer_states.append((h_0, c_0))\\n\\n outputs = []\\n for token in sequence:\\n rnn_input = embedder(token)\\n (cell_states, hidden_states), output, layer_states = forward_one_multilayer(rnns,rnn_input,layer_states,dropout_amount)\\n\\n outputs.append(output)\\n\\n return (cell_states, hidden_states), outputs\",\n \"def _encode(self, data: mx.sym.Symbol, data_length: mx.sym.Symbol, seq_len: int) -> mx.sym.Symbol:\\n # (seq_len, batch_size, num_embed)\\n data_reverse = mx.sym.SequenceReverse(data=data, sequence_length=data_length,\\n use_sequence_length=True)\\n # (seq_length, batch, cell_num_hidden)\\n hidden_forward, _, _ = self.forward_rnn.encode(data, data_length, seq_len)\\n # (seq_length, batch, cell_num_hidden)\\n hidden_reverse, _, _ = self.reverse_rnn.encode(data_reverse, data_length, seq_len)\\n # (seq_length, batch, cell_num_hidden)\\n hidden_reverse = mx.sym.SequenceReverse(data=hidden_reverse, sequence_length=data_length,\\n use_sequence_length=True)\\n # (seq_length, batch, 2 * cell_num_hidden)\\n hidden_concat = mx.sym.concat(hidden_forward, hidden_reverse, dim=2, name=\\\"%s_rnn\\\" % self.prefix)\\n\\n return hidden_concat\",\n \"def encode_input_for_decoder(x_tensor, inp_lens_tensor, model_input_emb: EmbeddingLayer, model_enc: RNNEncoder):\\n input_emb = model_input_emb.forward(x_tensor)\\n (enc_output_each_word, enc_context_mask, enc_final_states) = model_enc.forward(input_emb, inp_lens_tensor)\\n enc_final_states_reshaped = (enc_final_states[0].unsqueeze(0), enc_final_states[1].unsqueeze(0))\\n return (enc_output_each_word, enc_context_mask, enc_final_states_reshaped)\",\n \"def run_encoder(self, sess, batch):\\n feed_dict = self._make_feed_dict(batch, just_enc=True) \\n (enc_states, dec_in_state, global_step) = sess.run(\\n [self._enc_states, self._dec_in_state, self.global_step], feed_dict) # run the encoder\\n\\n # dec_in_state is LSTMStateTuple shape ([batch_size,hidden_dim],[batch_size,hidden_dim])\\n # Given that the batch is a single example repeated, dec_in_state is identical across the batch so we just take the top row.\\n dec_in_state = tf.contrib.rnn.LSTMStateTuple(dec_in_state.c[0], dec_in_state.h[0])\\n return enc_states, dec_in_state\",\n \"def encoder_one_way(self, cell, x, seq_len, init_state=None):\\n # Output is the outputs at all time steps, state is the last state\\n with tf.variable_scope(\\\"dynamic_rnn\\\"):\\n outputs, state = tf.nn.dynamic_rnn(\\\\\\n cell, x, sequence_length=seq_len, initial_state=init_state,\\n dtype=self.floatX)\\n # state is a StateTuple class with properties StateTuple.c and StateTuple.h\\n return outputs, state\",\n \"def encode(self, input):\\n h = np.zeros(self.hidden_size) \\n \\n preactivation = np.dot(self.W.T, input) + self.b\\n sigmoid(preactivation, h)\\n \\n return h\",\n \"def encode(self, sequence):\\n fwd_states, bwd_states = self.encode_fwd_bwd(sequence)\\n bwd_states = bwd_states[::-1]\\n return [dy.concatenate([fwd_states[i], bwd_states[i]]) for i in range(len(fwd_states))]\",\n \"def forward(self, inp, state):\\n emb = self.drop(self.encoder(inp))\\n y, state_next = self.rnn(emb, state)\\n y = self.drop(y)\\n y = self.decoder(y)\\n return y, state_next\",\n \"def forward_step(self, prev_embed, encoder_hidden, src_mask, proj_key, hidden):\\n\\n # compute context vector using attention mechanism\\n #we only want the hidden, not the cell state of the lstm CZW, hence the hidden[0]\\n query = hidden[0][-1].unsqueeze(1) # [#layers, B, D] -> [B, 1, D]\\n context, attn_probs = self.attention(\\n query=query, proj_key=proj_key,\\n value=encoder_hidden, mask=src_mask)\\n\\n # update rnn hidden state\\n rnn_input = torch.cat([prev_embed, context], dim=2)\\n output, hidden = self.rnn(rnn_input, hidden)\\n \\n pre_output = torch.cat([prev_embed, output, context], dim=2)\\n pre_output = self.dropout_layer(pre_output)\\n pre_output = self.pre_output_layer(pre_output)\\n\\n return output, hidden, pre_output\",\n \"def encoder(enc_input, attn_bias, n_layer, n_head,\\n d_key, d_value, d_model, d_inner_hid, pos_enc,\\n preporstprocess_dropout, attention_dropout,\\n relu_dropout, preprocess_cmd='n',\\n postprocess_cmd='da'):\\n for i in range(n_layer):\\n enc_output = encoder_layer(enc_input, attn_bias, n_head,\\n d_key, d_value, d_model,d_inner_hid, pos_enc,\\n prepostprocess_dropout, attention_dropout,relu_dropout,\\n preprocess_cmd, postprocess_cmd\\n )\\n enc_input = enc_output\\n enc_output = pre_process_layer(enc_output,\\n preprocess_cmd, preporstprocess_dropout)\\n return enc_output\",\n \"def build_sentence_encoder(self, raw_encoder_input, input_seq_len):\\n with tf.variable_scope('text_encoder'):\\n self.embedding = \\\\\\n tf.get_variable(\\n \\\"embedding\\\", initializer=tf.random_uniform(\\n [self.config.word_voc_size,\\n self.config.word_embedding_space_size],\\n -self.config.TRAIN.SENCODER.none_rnn_para_initial_max,\\n self.config.TRAIN.SENCODER.none_rnn_para_initial_max))\\n inputs = tf.nn.embedding_lookup(self.embedding, raw_encoder_input)\\n\\n # now it is [MAX_SEQ_LENGTH, batch_size, embedding_length]\\n input_batch_order = tf.transpose(inputs, [1, 0, 2])\\n\\n # now it is [MAX_SEQ_LENGTH * batch_size, embedding_length]\\n input_batch_order = tf.reshape(\\n input_batch_order, [-1, self.config.word_embedding_space_size])\\n\\n # now it is LIST OF [BATCH_SIZE, embedding_length]\\n encoder_input = tf.split(0, self.config.seq_max_len,\\n input_batch_order)\\n\\n # the encoder part\\n encode_gru_cell = tf.nn.rnn_cell.GRUCell(\\n self.config.encoder_dimension)\\n # big news: The state is final state, output is a list of tensor.\\n # We don't to do that\\n _, sentence_rep = tf.nn.rnn(encode_gru_cell, encoder_input,\\n dtype=tf.float32,\\n sequence_length=input_seq_len)\\n self.sentence_rep = sentence_rep\\n self.sentence_rep = tf.nn.l2_normalize(self.sentence_rep, 1)\\n return\",\n \"def encode(self, src_seq, src_lens):\\n src_embed = self.word_embedding(src_seq)\\n src_encodings, final_states = self.encoder_lstm(src_embed, src_lens)\\n\\n return src_encodings, final_states, src_embed\",\n \"def _add_seq2seq(self):\\n mode = self._mode\\n vsize = self._vocab.size() # size of the vocabulary\\n\\n with tf.variable_scope('seq2seq'):\\n # Some initializers\\n self.rand_unif_init = tf.random_uniform_initializer(-config.rand_unif_init_mag, config.rand_unif_init_mag, seed=123)\\n self.trunc_norm_init = tf.truncated_normal_initializer(stddev=config.trunc_norm_init_std)\\n\\n # Add embedding matrix (shared by the encoder and decoder inputs)\\n with tf.variable_scope('embedding'):\\n embedding = tf.get_variable('embedding', [vsize, config.emb_dim], dtype=tf.float32, initializer=self.trunc_norm_init)\\n if mode==\\\"train\\\": self._add_emb_vis(embedding) # add to tensorboard\\n emb_enc_inputs = tf.nn.embedding_lookup(embedding, self._enc_batch) # tensor with shape (batch_size, max_enc_steps, emb_size)\\n emb_dec_inputs = tf.nn.embedding_lookup(embedding, self._dec_batch) # tensor with shape (batch_size, max_dec_steps, emb_size)\\n #emb_dec_inputs = [tf.nn.embedding_lookup(embedding, x) for x in tf.unstack(self._dec_batch, axis=1)] # list length max_dec_steps containing shape (batch_size, emb_size)\\n\\n # Add the encoder.\\n enc_fw_states, enc_bw_states, enc_fw, enc_bw = self._add_input_encoder(emb_enc_inputs, self._enc_lens)\\n\\n print(\\\"Encoder FW\\\", enc_fw_states.shape)\\n print(\\\"Encoder BW\\\", enc_bw_states.shape)\\n raise Exception(\\\"testing mode\\\")\\n\\n #reshape encoder states from [batch_size, input_size, hidden_dim] to [batch_size, input_size * hidden_dim]\\n enc_fw_states = tf.reshape(enc_fw_states, [config.batch_size, config.hidden_dim * tf.shape(enc_fw_states)[1]])\\n enc_bw_states = tf.reshape(enc_bw_states, [config.batch_size, config.hidden_dim * tf.shape(enc_bw_states)[1]])\\n\\n\\n # python run.py --mode=decode --data_path=data/chunked/train_1/train_1_*.bin --vocab_path=data/vocab_1 --exp_name=full1isto1\\n\\n # Add the decoder.\\n dec_fw_states, dec_bw_states = self._add_input_decoder(emb_dec_inputs, self._dec_lens, enc_fw, enc_bw)\\n\\n #reshape decoder states from [batch_size, input_size, hidden_dim] to [batch_size, input_size * hidden_dim]\\n dec_fw_states = tf.reshape(dec_fw_states, [config.batch_size, config.hidden_dim * tf.shape(dec_fw_states)[1]])\\n dec_bw_states = tf.reshape(dec_bw_states, [config.batch_size, config.hidden_dim * tf.shape(dec_bw_states)[1]])\\n #print(\\\"Decoder FW\\\", dec_fw_states.shape)\\n #print(\\\"Decoder BW\\\", dec_bw_states.shape)\\n\\n\\n #enc_c = tf.concat(axis=1, values=[enc_fw.c, enc_bw.c])\\n #enc_h = tf.concat(axis=1, values=[enc_fw.h, enc_bw.h])\\n #dec_c = tf.concat(axis=1, values=[dec_fw.c, dec_bw.c])\\n #dec_h = tf.concat(axis=1, values=[dec_fw.h, dec_bw.h])\\n\\n final_encoding = tf.concat(axis=1, values=[enc_fw_states, enc_bw_states, dec_fw_states, dec_bw_states])\\n #print(\\\"Final encoding\\\", final_encoding.shape)\\n #raise Exception(\\\"Test\\\")\\n dims_final_enc = tf.shape(final_encoding)\\n\\n \\\"\\\"\\\"\\n #convo_input = tf.concat(axis=1, values=[enc_c, enc_h, dec_c, dec_h])\\n input_layer = tf.reshape(final_encoding, [config.batch_size, dims_final_enc[1], 1])\\n print(\\\"Convolution input shape\\\", input_layer.shape)\\n\\n conv1 = tf.layers.conv1d(\\n inputs=input_layer,\\n filters=8,\\n kernel_size=5,\\n padding=\\\"same\\\",\\n activation=tf.nn.relu)\\n conv1 = tf.layers.batch_normalization(conv1)\\n print(\\\"Convolution1 output shape\\\", conv1.shape)\\n\\n pool1 = tf.layers.max_pooling1d(inputs=conv1, pool_size=2, strides=2)\\n print(\\\"Pool1 output shape\\\", pool1.shape)\\n\\n conv2 = tf.layers.conv1d(\\n inputs=pool1,\\n filters=16,\\n kernel_size=5,\\n padding=\\\"same\\\",\\n activation=tf.nn.relu)\\n\\n\\n conv2 = tf.layers.batch_normalization(conv2)\\n print(\\\"Convolution2 output shape\\\", conv2.shape)\\n\\n pool2 = tf.layers.max_pooling1d(inputs=conv2, pool_size=2, strides=2)\\n print(\\\"Pool2 output shape\\\", pool2.shape)\\n\\n dims_pool2 = tf.shape(pool2)\\n\\n pool2_flat = tf.reshape(pool2, [config.batch_size, dims_pool2[1] * 16])\\n print(\\\"Pool2_flat output shape\\\", pool2_flat.shape)\\n dense = tf.layers.dense(inputs=pool2_flat, units=1024, activation=tf.nn.relu)\\n \\\"\\\"\\\"\\n #raise Exception(\\\"testing mode\\\")\\n\\n #dropout = tf.layers.dropout(inputs=dense, rate=0.4, training=mode==\\\"train\\\")\\n #print(\\\"Dense output shape\\\", dense.shape)\\n\\n #raise Exception(\\\"Just testing\\\")\\n # Add the output projection to obtain the vocabulary distribution\\n with tf.variable_scope('output_projection'):\\n w = tf.get_variable('w', [dims_final_enc[1], 2], dtype=tf.float32, initializer=self.trunc_norm_init)\\n bias_output = tf.get_variable('bias_output', [2], dtype=tf.float32, initializer=self.trunc_norm_init)\\n #concatenate abstract and article outputs [batch_size, hidden_dim*4]\\n\\n\\n #get classification output [batch_size, 1] default on last axis\\n self._logits = tf.matmul(final_encoding, w) + bias_output\\n #self._logits = tf.layers.dense(final_encoding, 2, kernel_initializer=self.trunc_norm_init, bias_initializer=self.trunc_norm_init)\\n #self._prob = tf.nn.softmax(logits, \\\"class_prob\\\")\\n\\n if mode in ['train', 'eval']:\\n # Calculate the loss\\n with tf.variable_scope('loss'):\\n #self._prob = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits, labels=self._targets)\\n #class_weights = tf.constant([0.1, 5.])\\n self._loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=self._targets, logits=self._logits))\\n #self._loss = tf.reduce_mean(tf.nn.weighted_cross_entropy_with_logits(targets=self._targets, logits=self._logits, pos_weight=class_weights))\\n tf.summary.scalar('loss', self._loss)\\n\\n\\n\\n #if mode == \\\"decode\\\":\",\n \"def encode(self, input_):\\n return self.encoder(input_)\",\n \"def encode(self, seq):\",\n \"def forward(self, x):\\n # Get results of encoder network\\n q = self.encode_nn(x)\\n\\n return q\",\n \"def pretrain_forward(self, inp):\\n return self.encoder(inp)\",\n \"def build_encoder(self):\\n \\n # some general variables concerning the current processed batch\\n batch_size=self.image_embeddings.get_shape()[0]\\n sentence_length = self.config.sentence_length # == self.seq_embeddings.get_shape()[2]\\n max_text_length = tf.shape(self.seq_embeddings)[1] # maximum text length for this batch\\n \\n # This LSTM cell has biases and outputs tanh(new_c) * sigmoid(o), but the\\n # modified LSTM in the \\\"Show and Tell\\\" paper has no biases and outputs\\n # new_c * sigmoid(o).\\n \\n # create an lstm cell that will process a sentence (a sequence of tokens)\\n lstm_cell_sentences = tf.nn.rnn_cell.BasicLSTMCell(\\n num_units=self.config.sentence_embedding_size, state_is_tuple=True) # num_units describes the size of the internal memory cell (but it is also the output size)\\n \\n # we also need an lstm cell that will process a sequence of sentences (a text)\\n lstm_cell_text = tf.nn.rnn_cell.BasicLSTMCell(\\n num_units=self.config.article_embedding_size, state_is_tuple=True)\\n \\n if self.mode == \\\"train\\\":\\n # to avoid overfitting we use dropout for all lstm cells\\n lstm_cell_sentences = tf.nn.rnn_cell.DropoutWrapper(\\n lstm_cell_sentences,\\n input_keep_prob=self.config.dropout_keep_prob_encoder,\\n output_keep_prob=self.config.dropout_keep_prob_encoder)\\n lstm_cell_text = tf.nn.rnn_cell.DropoutWrapper(\\n lstm_cell_text,\\n input_keep_prob=self.config.dropout_keep_prob_encoder,\\n output_keep_prob=self.config.dropout_keep_prob_encoder)\\n\\n with tf.variable_scope(\\\"lstm_sentence_encode\\\", initializer=self.initializer) as lstm_scope:\\n # we use the image embedding only to feed the text lstm with image information\\n # The sentences are initialized with a zero state\\n \\n # Set the initial LSTM state.\\n initial_state_sentences = lstm_cell_sentences.zero_state(\\n batch_size=batch_size, dtype=tf.float32)\\n\\n # At first, generate a mask for all sentences. \\n # This will allow us to specify the individual length of each sentence \\n # This lengths are fed into tf.nn.dynamic_rnn, which will produce zero outputs for \\n # all padded tokens.\\n # Note, that self.input_seqs contains a zero for each padded token (zero is not in the vocabulary)\\n zeros = tf.zeros_like(self.input_seqs)\\n self.sentence_mask = tf.select(tf.greater(self.input_seqs, zeros) , tf.ones_like(self.input_seqs), zeros) # type int64\\n\\n #self.sentence_mask = tf.cast(self.sentence_mask, tf.int32)\\n \\n # In the following, we run a hierarchical approach:\\n # Tokens of a sentence are mapped onto an embedding vector through lstm_cell_sentences\\n # The resulting sentence embeddings are passed though lstm_cell_text to gather text embeddings\\n \\n # Since we have to generate an embedding for each sentence in a text, we need a loop somehow.\\n # But the number of sentences in a text is dynamically determined for each batch (max_text_length).\\n # Therefore, we cannot use unpack and a python loop. Instead we use the while_loop control method of TF.\\n \\n \\n # The output of lstm_cell_sentences will be stored in this matrix, but only \\n # the lstm output of the last not padded word in a sentence\\n lstm_outputs_sentences = tf.zeros(tf.pack([batch_size, max_text_length, self.config.sentence_embedding_size])) # tf.pack is a hotfix, since a normal array passing would not work as max_text_length is a tensor\\n #lstm_outputs_sentences = tf.zeros([batch_size, max_text_length, self.config.embedding_size])\\n \\n # Allow the LSTM variables to be reused.\\n #lstm_scope.reuse_variables()\\n\\n # now we compute the lstm outputs for each token sequence (sentence) in the while loop body\\n def body(i,n,los):\\n \\\"\\\"\\\"Compute lstm outputs for sentences i (sentences with index i in text) of current batch.\\n\\n Inputs:\\n i: control variable of loop (runs from 0 to n-1)\\n n: max_text_length\\n los: lstm_outputs_sentences\\n\\n Outputs:\\n i: incremented\\n n: unchanged\\n los: input with updated values in index i of second dimension\\n \\\"\\\"\\\"\\n # extract correct lstm input (i-th sentence from each batch)\\n #es = tf.slice(self.seq_embeddings,[0,i,0,0],[batch_size, 1, sentence_length, self.config.word_embedding_size])\\n es = tf.slice(self.seq_embeddings,tf.pack([0,i,0,0]),tf.pack([batch_size, 1, sentence_length, self.config.word_embedding_size]))\\n es = tf.squeeze(es, axis=1) # get rid of sentence index dimension\\n es = tf.reshape(es, tf.pack([batch_size, sentence_length, self.config.word_embedding_size])) # dirty hack, to ensure that shape is known (needed by further methods)\\n\\n # extract masks of sentences i\\n sm = tf.slice(self.sentence_mask,tf.pack([0,i,0]),tf.pack([batch_size, 1, sentence_length]))\\n sm = tf.squeeze(sm, axis=1)\\n # compute sentence lengths\\n sm = tf.reduce_sum(sm, 1)\\n sm = tf.reshape(sm, tf.pack([batch_size])) # dirty hack, to ensure that shape is known\\n\\n # feed i-th sentences through lstm\\n lstm_outputs_sentences_tmp, _ = tf.nn.dynamic_rnn(cell=lstm_cell_sentences,\\n inputs=es,\\n sequence_length=sm,\\n initial_state=initial_state_sentences,\\n dtype=tf.float32,\\n scope=lstm_scope)\\n # lstm_outputs_sentences_tmp has shape (batch_size, sentence_length, sentence_embedding_size\\n # lstm_outputs_sentences_tmp contains an output for each token in the sentences, but we are only interested in the \\n # output of the last token of a sentence\\n \\n # Now we extract only those outputs (output of last token, which is not a padded token) from lstm_outputs_sentences_tmp\\n\\n # sm contains the length of each sentence, meaning we can access the right output with the index (length - 1)\\n # Note, that the actual masks where reduced to lengths in the above statements.\\n sm = tf.sub(sm, 1) # sentence mask contains now the index of the last token in each sentence\\n # Those sentence, that have zero tokens (padded sentences) have now an index of -1. We have to set them back to 0\\n # which are simply zero outputs of the lstm\\n zeros = tf.zeros_like(sm)\\n sm = tf.select(tf.less(sm, zeros) , zeros, sm)\\n\\n # We use tf.gather_nd to extract the desired outputs from lstm_outputs_sentences_tmp.\\n # Therefore, we have to produce the \\\"indices\\\" parameter of this method first.\\n # The elements of the last dimension in this matrix determine the indices for gathering slices from lstm_outputs_sentences\\n # Hence the innermost dimension must be a 2D vector: (batch, token) <- index of desired embedding in lstm_outputs_sentences\\n # for sentence with index (batch, i) in self.seq_embeddings\\n\\n # We generate for each of the two indices a seperate matrix and concatenate them at the end\\n sm = tf.expand_dims(sm, 1)\\n sm = tf.cast(sm, dtype=tf.int32)\\n\\n # use tf.range to generate the equivalence of sm for batch indices\\n #batch_indices = tf.range(0, batch_size)\\n batch_indices = tf.constant(np.arange(int(batch_size)), dtype=tf.int32)\\n batch_indices = tf.expand_dims(batch_indices, 1) \\n\\n # then use tf.concat to generate the actual tensor, that can be used to gather the right outputs from lstm_outputs_sentences_tmp\\n gather_indices = tf.concat(1, [batch_indices, sm])\\n\\n # now we can consider the elements (of the last dimension) of gather_indices as indices for the correct ouput\\n lstm_outputs_sentences_tmp = tf.gather_nd(lstm_outputs_sentences_tmp, gather_indices)\\n lstm_outputs_sentences_tmp = tf.expand_dims(lstm_outputs_sentences_tmp, 1) \\n\\n # add the current output to our list of outputs\\n los = tf.concat(1, [tf.slice(los, tf.pack([0,0,0]), tf.pack([batch_size, i, self.config.sentence_embedding_size])),\\n lstm_outputs_sentences_tmp,\\n tf.slice(los, tf.pack([0,i+1,0]), tf.pack([batch_size,n-i-1,self.config.sentence_embedding_size]))])\\n \\n return i+1,n,los\\n\\n def condition(i,n,los):\\n \\\"\\\"\\\"Break condition for while loop\\n\\n Inputs:\\n i: control variable of loop (runs from 0 to n-1)\\n n: max_text_length\\n los: lstm_outputs_sentences\\n\\n Outputs:\\n Ture, if body should be run.\\n \\\"\\\"\\\"\\n\\n return i < n\\n\\n result = tf.while_loop(condition, body, loop_vars=[0, max_text_length, lstm_outputs_sentences])\\n lstm_outputs_sentences = result[2] \\n \\n with tf.variable_scope(\\\"lstm_text_encode\\\", initializer=self.initializer) as lstm_scope: \\n \\n # Feed the image embeddings to set the initial LSTM state.\\n zero_state_text = lstm_cell_text.zero_state(\\n batch_size=batch_size, dtype=tf.float32)\\n _, initial_state_text = lstm_cell_text(self.image_embeddings, zero_state_text)\\n \\n # Allow the LSTM variables to be reused.\\n lstm_scope.reuse_variables()\\n \\n # lstm_outputs_sentences has now the last lstm output for each sentence in the batch (output of last unpadded token)\\n # Its shape is (batch_size, max_text_length, sentence_embedding_size)\\n \\n # Now we use the sentence embeddings to generate text embeddings\\n # Run the batch of sentence embeddings through the LSTM.\\n self.sentence_sequence_length = tf.reduce_sum(self.input_mask, 1)\\n lstm_outputs_text, _ = tf.nn.dynamic_rnn(cell=lstm_cell_text,\\n inputs=lstm_outputs_sentences,\\n sequence_length=self.sentence_sequence_length,\\n initial_state=initial_state_text,\\n dtype=tf.float32,\\n scope=lstm_scope)\\n # lstm_outputs_text has now the lstm output of each sentence_embedding,\\n # where the output of the last unpadded sentence_embedding is considered as the text embedding.\\n # Note, that we could also call it article embedding, since it comprises the information of the \\n # text and the image.\\n # Its shape is (batch_size, max_text_length, article_embedding_size)\\n\\n # extract the text embedding from lstm_outputs_text\\n \\n # sequence_length contains the length of each text, meaning we can access the right output with the index (length - 1)\\n last_sentence = tf.sub(self.sentence_sequence_length, 1) # sentence mask contains now the index of the last unpadded sentence in each text\\n\\n # We use tf.gather_nd to extract the desired outputs from lstm_outputs_text.\\n # Therefore, we have to produce the \\\"indices\\\" parameter of this method first.\\n # The elements of the last dimension in this matrix determine the indices for gathering slices from lstm_outputs_text\\n # Hence the innermost dimension must be a 2D vector: (batch, sentence)\\n\\n # We generate for each of the two indices a seperate matrix and concatenate them at the end\\n last_sentence = tf.expand_dims(last_sentence, 1)\\n\\n # use tf.range to generate the equivalence of sm for batch indices\\n batch_indices = tf.range(0, batch_size)\\n batch_indices = tf.expand_dims(batch_indices, 1) \\n\\n # then use tf.concat to generate the actual tensor, that can be used to gather the right outputs from lstm_outputs_text\\n gather_indices = tf.concat(1, [batch_indices, last_sentence])\\n \\n # now we can consider the elements (of the last dimension) of gather_indices as indices for the correct ouput\\n self.article_embeddings = tf.gather_nd(lstm_outputs_text, gather_indices)\\n \\n # As the image information might have gone lost in the hierarchical rnn, the reader might reconsider it.\\n if self.config.reconsider_image:\\n with tf.variable_scope(\\\"reconsider_image\\\", initializer=self.initializer, reuse=None) as reconsider_image_scope: \\n # concat current article embedding with image_embedding and map them through an fully connected layer onto a new embedding\\n article_image_concat = tf.concat(1, [self.article_embeddings, self.image_embeddings])\\n \\n self.article_embeddings = tf.contrib.layers.fully_connected(\\n inputs=article_image_concat,\\n num_outputs=self.config.article_embedding_size,\\n activation_fn=tf.nn.relu, #None, # linear activation \\n weights_initializer=self.initializer,\\n scope=reconsider_image_scope)\\n \\n if self.mode == \\\"train\\\":\\n # to avoid overfitting we use dropout for all fully connected layers\\n self.article_embeddings = tf.nn.dropout(self.article_embeddings, self.config.dropout_keep_prob_encoder)\\n \\n # self.article_embeddings contains now the text/article embedding for each article in the batch\\n # Its shape is (batch_size, article_embedding_size)\\n \\n # All variables up until this point are shared with the autoencoder. So these are the variables\\n # (the whole encoder network) that we want to restore/share.\\n self.autoencoder_variables = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES)\",\n \"def encoder_inference(self, features, states):\\n with tf.name_scope(f\\\"{self.name}_encoder\\\"):\\n outputs = tf.expand_dims(features, axis=0)\\n outputs, new_states = self.encoder.recognize(outputs, states)\\n return tf.squeeze(outputs, axis=0), new_states\",\n \"def encode(data, encoder):\\n # Get the list of hidden depths\\n\\thd = encoder.hidden_depths\\n # Find the middle hidden layer\\n\\tmiddle_layer_index = (len(hd)-1)/2\\n # Initialize empty container for the encoded data\\n\\tdata_encoded = np.zeros((data.shape[0],hd[middle_layer_index]))\\n\\tfor i, d_ in enumerate(data):\\n # feed forward, get all the activations, and just keep\\n # the middle layer, which is the encoding\\n\\t\\tx, z_container, x_container = encoder.ff(d_,True,True)\\n\\t\\tx_encoded = x_container[1+middle_layer_index]\\n\\t\\tdata_encoded[i] = x_encoded\\n\\t#\\n\\treturn data_encoded\",\n \"def dis_encoder_seq2seq(hparams):\\n assert FLAGS.discriminator_model == 'seq2seq_vd'\\n assert hparams.dis_num_layers == 2\\n\\n ## Encoder forward variables.\\n encoder_lstm_w_0 = [\\n v for v in tf.trainable_variables() if v.op.name ==\\n 'dis/encoder/rnn/multi_rnn_cell/cell_0/basic_lstm_cell/kernel'\\n ][0]\\n encoder_lstm_b_0 = [\\n v for v in tf.trainable_variables() if v.op.name ==\\n 'dis/encoder/rnn/multi_rnn_cell/cell_0/basic_lstm_cell/bias'\\n ][0]\\n encoder_lstm_w_1 = [\\n v for v in tf.trainable_variables() if v.op.name ==\\n 'dis/encoder/rnn/multi_rnn_cell/cell_1/basic_lstm_cell/kernel'\\n ][0]\\n encoder_lstm_b_1 = [\\n v for v in tf.trainable_variables() if v.op.name ==\\n 'dis/encoder/rnn/multi_rnn_cell/cell_1/basic_lstm_cell/bias'\\n ][0]\\n\\n if FLAGS.data_set == 'ptb':\\n model_str = 'Model'\\n else:\\n model_str = 'model'\\n\\n variable_mapping = {\\n str(model_str) + '/RNN/multi_rnn_cell/cell_0/basic_lstm_cell/kernel':\\n encoder_lstm_w_0,\\n str(model_str) + '/RNN/multi_rnn_cell/cell_0/basic_lstm_cell/bias':\\n encoder_lstm_b_0,\\n str(model_str) + '/RNN/multi_rnn_cell/cell_1/basic_lstm_cell/kernel':\\n encoder_lstm_w_1,\\n str(model_str) + '/RNN/multi_rnn_cell/cell_1/basic_lstm_cell/bias':\\n encoder_lstm_b_1\\n }\\n return variable_mapping\",\n \"def encode(self, n_dimension=2, learning_rate=0.01, training_epochs=10, batch_size=400):\\n X = tf.placeholder(tf.float32,[None, self.n_input])\\n tf.set_random_seed(50)\\n \\n \\n n_hidden_layer1 = int(math.pow(2, int(2*math.log(self.n_input,2)/3+math.log(n_dimension,2)/3)))\\n n_hidden_layer2 = int(math.pow(2, int(math.log(self.n_input,2)/3+2*math.log(n_dimension,2)/3)))\\n n_hidden_layer3 = n_dimension\\n \\n weights = {\\n 'encoder_w1':tf.Variable(tf.random_normal([self.n_input, n_hidden_layer1])),\\n 'encoder_w2':tf.Variable(tf.random_normal([n_hidden_layer1, n_hidden_layer2])),\\n 'encoder_w3':tf.Variable(tf.random_normal([n_hidden_layer2, n_hidden_layer3])),\\n \\n 'decoder_w1':tf.Variable(tf.random_normal([n_hidden_layer3, n_hidden_layer2])),\\n 'decoder_w2':tf.Variable(tf.random_normal([n_hidden_layer2, n_hidden_layer1])),\\n 'decoder_w3':tf.Variable(tf.random_normal([n_hidden_layer1, self.n_input])),\\n }\\n \\n biases = {\\n 'encoder_b1':tf.Variable(tf.random_normal([n_hidden_layer1])),\\n 'encoder_b2':tf.Variable(tf.random_normal([n_hidden_layer2])),\\n 'encoder_b3':tf.Variable(tf.random_normal([n_hidden_layer3])),\\n \\n 'decoder_b1':tf.Variable(tf.random_normal([n_hidden_layer2])),\\n 'decoder_b2':tf.Variable(tf.random_normal([n_hidden_layer1])),\\n 'decoder_b3':tf.Variable(tf.random_normal([self.n_input])),\\n }\\n \\n \\n def encoder(x):\\n layer_1 = tf.nn.sigmoid(tf.add(tf.matmul(x, weights['encoder_w1']), biases['encoder_b1']))\\n layer_2 = tf.nn.sigmoid(tf.add(tf.matmul(layer_1, weights['encoder_w2']), biases['encoder_b2']))\\n layer_3 = tf.nn.sigmoid(tf.add(tf.matmul(layer_2, weights['encoder_w3']), biases['encoder_b3']))\\n \\n return layer_3\\n\\n def decoder(x):\\n layer_1 = tf.nn.sigmoid(tf.add(tf.matmul(x, weights['decoder_w1']), biases['decoder_b1']))\\n layer_2 = tf.nn.sigmoid(tf.add(tf.matmul(layer_1, weights['decoder_w2']), biases['decoder_b2']))\\n layer_3 = tf.nn.sigmoid(tf.add(tf.matmul(layer_2, weights['decoder_w3']), biases['decoder_b3']))\\n \\n return layer_3\\n \\n encoder_op = encoder(X)\\n decoder_op = decoder(encoder_op)\\n\\n y_pred = decoder_op\\n y_true = X\\n\\n cost = tf.reduce_mean(tf.pow(y_pred - y_true, 2))\\n optimizer = tf.train.AdamOptimizer(learning_rate).minimize(cost)\\n \\n \\n with tf.Session() as sess:\\n init = tf.global_variables_initializer()\\n sess.run(init)\\n n_batch = int(self.data.shape[0]/batch_size)\\n for epoch in tqdm(range(training_epochs)):\\n for batch_idx in range(n_batch):\\n start = batch_idx * batch_size\\n stop = start + batch_size\\n _, encoder_result = sess.run([optimizer, encoder_op], feed_dict={X: self.data[start:stop]})\\n self.X_test = sess.run(encoder_op, feed_dict={X:self.data})\\n self.X_cost = sess.run(cost, feed_dict={X:self.data})\\n \\n return self.X_test, self.X_cost\",\n \"def encode(self, X):\\r\\n return self._encoder.predict(X)\",\n \"def forward(self, input, last_hidden, last_context, encoder_outputs):\\r\\n # input: B x 1 x d, last_hidden: (num_layers * num_directions) x B x h\\r\\n # last_context: B x 1 x h, encoder_outputs: B x S x h\\r\\n\\r\\n # output = embedded\\r\\n rnn_input = torch.cat((input, last_context), 2) # B x 1 x (d + h)\\r\\n output, hidden = self.rnn(rnn_input, last_hidden) # output: B x 1 x h\\r\\n\\r\\n # calculate attention from current RNN state and all encoder outputs; apply to encoder outputs\\r\\n attn_weights = self.attn(output, encoder_outputs) # B x S\\r\\n context = attn_weights.unsqueeze(1).bmm(encoder_outputs) # B x 1 x h\\r\\n\\r\\n # final output layer (next word prediction) using the RNN hidden state and context vector\\r\\n output = f.log_softmax(self.out(torch.cat((context.squeeze(1), output.squeeze(1)), 1)), 1)\\r\\n\\r\\n # Return final output, hidden state, and attention weights (for visualization)\\r\\n return output, hidden, context, attn_weights\",\n \"def encode(self, game_state: ssm.SnakeStateMachine) -> int:\\n state = [e.encode(game_state) for e in self._encoders]\\n return self._state2id[tuple(state)]\",\n \"def _define_encoder(self):\\n self.encoder = nn.Sequential(View((-1, 64 * 64 * 3)),\\n nn.Linear(64 * 64 * 3, 5120, bias=False), nn.SELU(),\\n nn.BatchNorm1d(5120),\\n nn.Linear(5120, 2560, bias=False), nn.SELU(),\\n nn.BatchNorm1d(2560),\\n nn.Linear(2560, 512, bias=False), nn.SELU(),\\n nn.BatchNorm1d(512),\\n nn.Linear(512, 128, bias=False), nn.SELU(),\\n nn.BatchNorm1d(128),\\n nn.Linear(128, self.encoding_shape, bias=False), nn.SELU(),\\n )\",\n \"def forward(self,\\n state,\\n encoder_out=None,\\n encoder_padding_mask=None,\\n incremental_state=None,\\n prev_self_attn_state=None,\\n self_attn_mask=None,\\n self_attn_padding_mask=None,\\n need_attn=False,\\n need_head_weights=False):\\n\\n # need_attn must be True if need_head_weights\\n need_attn = True if need_head_weights else need_attn\\n print('encoder padding {}, self padding {}'.format(encoder_padding_mask, self_attn_padding_mask.size()))\\n residual = state.clone()\\n # print('self attention')\\n state, _ = self.self_attn(query=state,\\n key=state,\\n value=state,\\n key_padding_mask=self_attn_padding_mask,\\n need_weights=False,\\n attn_mask=self_attn_mask)\\n state = F.dropout(state, p=self.dropout, training=self.training)\\n state += residual\\n state = self.self_attn_layer_norm(state)\\n\\n residual = state.clone()\\n '''\\n ___QUESTION-6-DESCRIBE-E-START___\\n How does encoder attention differ from self attention? What is the difference between key_padding_mask \\n and attn_mask? If you understand this difference, then why don't we need to give attn_mask here?\\n '''\\n '''\\n The encoder attention is making the target input word pay attention to the source sequence from encoder, while the self attention is making the input word pay attention to the words in other positions of the input sequence.\\n The key_padding mask masks padded tokens ⟨pad⟩ so the model does not attend to these positions, while the attn mask masks the following tokens at each position to ensure the decoder do not look forward into the sequence.\\n In encoder attention, we want the decoder to pay attention to the entire source sequence. The attn mask is not needed to mask the subsequent positions because it is not paying attention to itself.\\n\\n '''\\n # print('encoder attention')\\n state, attn = self.encoder_attn(query=state,\\n key=encoder_out,\\n value=encoder_out,\\n key_padding_mask=encoder_padding_mask,\\n need_weights=need_attn or (not self.training and self.need_attn))\\n '''\\n ___QUESTION-6-DESCRIBE-E-END___\\n '''\\n\\n state = F.dropout(state, p=self.dropout, training=self.training)\\n state += residual\\n state = self.encoder_attn_layer_norm(state)\\n\\n residual = state.clone()\\n state = F.relu(self.fc1(state))\\n state = F.dropout(state, p=self.activation_dropout, training=self.training)\\n state = self.fc2(state)\\n state = F.dropout(state, p=self.dropout, training=self.training)\\n state += residual\\n state = self.final_layer_norm(state)\\n\\n return state, attn\",\n \"def _build_encoder(self, hparams):\\n\\t\\tnum_layers = self.num_encoder_layers\\n\\t\\tnum_redisual_layers = self.num_encoder_residual_layers\\n\\n\\t\\twith tf.variable_scope('encoder') as _:\\n\\t\\t\\tself.encoder_emb_inp = tf.nn.embedding_lookup(self.embedding_encoder, self.encoder_input_data)\\n\\n\\t\\t\\tif hparams.encoder_type == 'uni':\\n\\t\\t\\t\\t_info('num_layers = {} num_residual_layers = {}'.format(num_layers, num_redisual_layers))\\n\\t\\t\\t\\t# 1. build a list of cells\\n\\t\\t\\t\\tcell = self._build_encoder_cell(hparams, num_layers, num_redisual_layers)\\n\\t\\t\\t\\t# 2. forward\\n\\t\\t\\t\\t# encoder_outputs: [batch, time, hidden]\\n\\t\\t\\t\\t# encoder_state: ([batch, hidden] for _ in range(layers))\\n\\t\\t\\t\\tencoder_outputs, encoder_state = tf.nn.dynamic_rnn(\\n\\t\\t\\t\\t\\tcell,\\n\\t\\t\\t\\t\\tself.encoder_emb_inp,\\n\\t\\t\\t\\t\\tdtype=self.dtype,\\n\\t\\t\\t\\t\\tsequence_length=self.seq_length_encoder_input_data,\\n\\t\\t\\t\\t\\tswap_memory=True)\\n\\t\\t\\telif hparams.encoder_type == 'bi':\\n\\t\\t\\t\\tif not num_layers % 2 == 0:\\n\\t\\t\\t\\t\\t_error('Bi-directional requires num_layers={} should be divided by 2'.format(num_layers))\\n\\t\\t\\t\\t\\traise ValueError\\n\\t\\t\\t\\tnum_bi_layers = int(num_layers / 2)\\n\\t\\t\\t\\tnum_bi_residual_layers = num_bi_layers - 1\\n\\t\\t\\t\\t_info(' num_bi_layers={} num_bi_residual_layers={}'.format(num_bi_layers, num_bi_residual_layers))\\n\\n\\t\\t\\t\\tcell_fw = self._build_encoder_cell(hparams, num_bi_layers, num_bi_residual_layers)\\n\\t\\t\\t\\tcell_bw = self._build_encoder_cell(hparams, num_bi_layers, num_bi_residual_layers)\\n\\n\\t\\t\\t\\t# bi_outputs: (fw, bw): fw: [batch, seq, hidden]\\n\\t\\t\\t\\t# bi_state: (fw, bw): fw : [[batch, hidden] for _ in range(layers)]\\n\\t\\t\\t\\tbi_outputs, bi_state = tf.nn.bidirectional_dynamic_rnn(\\n\\t\\t\\t\\t\\tcell_fw,\\n\\t\\t\\t\\t\\tcell_bw,\\n\\t\\t\\t\\t\\tself.encoder_emb_inp,\\n\\t\\t\\t\\t\\tdtype=self.dtype,\\n\\t\\t\\t\\t\\tsequence_length=self.seq_length_encoder_input_data,\\n\\t\\t\\t\\t\\tswap_memory=True)\\n\\n\\t\\t\\t\\tif num_bi_layers == 1:\\n\\t\\t\\t\\t\\tencoder_state = bi_state\\n\\t\\t\\t\\telse:\\n\\t\\t\\t\\t\\tencoder_state = []\\n\\t\\t\\t\\t\\tfor layer_id in range(num_bi_layers):\\n\\t\\t\\t\\t\\t\\tencoder_state.append(bi_state[0][layer_id])\\t\\t# fw state in layer id\\n\\t\\t\\t\\t\\t\\tencoder_state.append(bi_state[1][layer_id])\\t\\t# bw state in layer id\\n\\t\\t\\t\\t\\tencoder_state = tuple(encoder_state)\\n\\t\\t\\t\\tencoder_outputs = tf.concat(bi_outputs, -1)\\t\\t# [batch, seq, hidden * 2]\\n\\t\\t\\telse:\\n\\t\\t\\t\\t_error('Unknow encoder type: {}'.format(hparams.encoder_type))\\n\\t\\t\\t\\traise ValueError\\n\\t\\t\\n\\t\\treturn encoder_outputs, encoder_state\",\n \"def encode(self, x):\\n self.eval()\\n x = torch.as_tensor(x).unsqueeze(0)\\n if self.do_mt:\\n enc_output, _ = self.encoder_mt(x, None)\\n else:\\n enc_output, _ = self.encoder(x, None)\\n return enc_output.squeeze(0)\",\n \"def encoder_layer(enc_input, attn_bias, n_head, d_key,\\n d_value, d_model, d_inner_hid, pos_enc, prepostprocess_dropout,\\n attention_dropout, relu_dropout, preprocess_cmd='n',\\n postprocess_cmd='da'):\\n attn_output = multi_head_attention(\\n pre_process_layer(enc_input, preprocess_cmd, prepostprocess_dropout),\\n None, None, attn_bias, d_key, d_value, d_model, pos_enc,\\n n_head, attention_dropout\\n )\\n attn_output = post_process_layer(enc_input, attn_output,\\n postprocess_cmd, prepostprocess_dropout)\\n ffd_output = positionwise_feed_forward(\\n pre_process_layer(attn_output, preprocess_cmd, prepostprocess_dropout),\\n d_inner_hid, d_model, relu_dropout\\n )\\n return post_process_layer(attn_output, ffd_output,\\n postprocess_cmd, prepostprocess_dropout)\",\n \"def transparent_forward(self, input, hidden, give_gates=False, debug=False):\\n\\n lseq, nseq = input.shape\\n ispad = (input == self.padding)\\n\\n H = torch.zeros(lseq, self.nhid, nseq)\\n if give_gates:\\n Z = torch.zeros(lseq, self.nhid, nseq)\\n R = torch.zeros(lseq, self.nhid, nseq)\\n \\n # because pytorch only returns hidden activity in the last time step,\\n # we need to unroll it manually. \\n O = torch.zeros(lseq, nseq, self.decoder.out_features)\\n emb = self.encoder(input)\\n for t in range(lseq):\\n if give_gates:\\n out, hidden, ZR = self.rnn(emb[t:t+1,...], hidden, give_gates=True)\\n Z[t,:,:] = ZR[0].squeeze(0).T\\n R[t,:,:] = ZR[1].squeeze(0).T\\n else:\\n out, hidden = self.rnn(emb[t:t+1,...], hidden)\\n dec = self.decoder(out)\\n # naan = torch.ones(hidden.squeeze(0).shape)*np.nan\\n # H[t,:,:] = torch.where(~ispad[t:t+1,:].T, hidden.squeeze(0), naan).T\\n H[t,:,:] = hidden.squeeze(0).T\\n O[t,:,:] = dec.squeeze(0)\\n\\n if give_gates:\\n if debug:\\n return O, H, Z, R, emb\\n else:\\n return O, H, Z, R\\n else:\\n if debug:\\n return O, H, emb\\n else:\\n return O, H\",\n \"def __encoder_lstm(self, x, x_lengths):\\n embedded_x = self.input_embedding.forward(x) # (input_seq_len x batch x embed_dim)\\n embedded_x = self.embedding_dropout.forward(embedded_x)\\n\\n # pack and unpack the padded batch for the encoder\\n packed_x = nn.utils.rnn.pack_padded_sequence(embedded_x, x_lengths)\\n h, _ = self.encoder.forward(packed_x) # (input_seq_len x batch x 2*encoder_state_dim)\\n unpacked_h, _ = nn.utils.rnn.pad_packed_sequence(h)\\n\\n return unpacked_h\",\n \"def forward(self,\\n state,\\n encoder_out=None,\\n encoder_padding_mask=None,\\n incremental_state=None,\\n prev_self_attn_state=None,\\n self_attn_mask=None,\\n self_attn_padding_mask=None,\\n need_attn=False,\\n need_head_weights=False):\\n\\n # need_attn must be True if need_head_weights\\n need_attn = True if need_head_weights else need_attn\\n\\n residual = state.clone()\\n state, _ = self.self_attn(query=state,\\n key=state,\\n value=state,\\n key_padding_mask=self_attn_padding_mask,\\n need_weights=False,\\n attn_mask=self_attn_mask)\\n state = F.dropout(state, p=self.dropout, training=self.training)\\n state += residual\\n state = self.self_attn_layer_norm(state)\\n\\n residual = state.clone()\\n '''\\n ___QUESTION-6-DESCRIBE-E-START___\\n How does encoder attention differ from self attention? What is the difference between key_padding_mask\\n and attn_mask? If you understand this difference, then why don't we need to give attn_mask here?\\n\\n Encoder attention differs from self-attention in that it attends to the\\n output embeddings of the encoder instead of the embeddings in the decoder.\\n key_padding_mask is used to adjust the length of the sentences, whereas\\n attn_mask prevents the decoder from attending to future positions.\\n We do not use attn_mask while attending to the decoder since we want all\\n the embeddings in the decoder to have access to all the encoder output\\n embeddings.\\n '''\\n state, attn = self.encoder_attn(query=state,\\n key=encoder_out,\\n value=encoder_out,\\n key_padding_mask=encoder_padding_mask,\\n need_weights=need_attn or (not self.training and self.need_attn))\\n '''\\n ___QUESTION-6-DESCRIBE-E-END___\\n '''\\n\\n state = F.dropout(state, p=self.dropout, training=self.training)\\n state += residual\\n state = self.encoder_attn_layer_norm(state)\\n\\n residual = state.clone()\\n state = F.relu(self.fc1(state))\\n state = F.dropout(state, p=self.activation_dropout, training=self.training)\\n state = self.fc2(state)\\n state = F.dropout(state, p=self.dropout, training=self.training)\\n state += residual\\n state = self.final_layer_norm(state)\\n\\n return state, attn\",\n \"def _init_rnn_state(self, encoder_hidden):\\n if encoder_hidden is None:\\n return None\\n if isinstance(encoder_hidden, tuple):\\n encoder_hidden = tuple(\\n [self._cat_directions(h) for h in encoder_hidden])\\n else:\\n encoder_hidden = self._cat_directions(encoder_hidden)\\n return encoder_hidden\",\n \"def build_model(self):\\n # Define model inputs for the encoder/decoder stack\\n x_enc = Input(shape=(self.seq_len_in, self.input_feature_amount), name=\\\"x_enc\\\")\\n x_dec = Input(shape=(self.seq_len_out, self.output_feature_amount), name=\\\"x_dec\\\")\\n\\n # Add noise\\n x_dec_t = GaussianNoise(0.2)(x_dec)\\n\\n input_conv2 = Conv1D(filters=64, kernel_size=5, strides=2, activation='relu', padding='same')\\n input_conv1 = Conv1D(filters=64, kernel_size=3, strides=2, activation='relu', padding='same', name=\\\"last_conv_layer\\\")\\n\\n input_conv2_out = input_conv2(x_enc)\\n input_conv1_out = input_conv1(input_conv2_out)\\n\\n # Define the encoder GRU, which only has to return a state\\n encoder_gru = GRU(self.state_size, return_sequences=True, return_state=True, name=\\\"encoder_gru\\\")\\n encoder_out, encoder_state = encoder_gru(input_conv1_out)\\n\\n # Decoder GRU\\n decoder_gru = GRU(self.state_size, return_state=True, return_sequences=True,\\n name=\\\"decoder_gru\\\")\\n # Use these definitions to calculate the outputs of out encoder/decoder stack\\n dec_intermediates, decoder_state = decoder_gru(x_dec_t, initial_state=encoder_state)\\n\\n # Define the attention layer\\n attn_layer = AttentionLayer(name=\\\"attention_layer\\\")\\n attn_out, attn_states = attn_layer([encoder_out, dec_intermediates])\\n\\n # Concatenate decoder and attn out\\n decoder_concat_input = Concatenate(axis=-1, name='concat_layer')([dec_intermediates, attn_out])\\n\\n # Define the dense layer\\n dense = Dense(self.output_feature_amount, activation='linear', name='output_layer')\\n dense_time = TimeDistributed(dense, name='time_distributed_layer')\\n decoder_pred = dense_time(decoder_concat_input)\\n\\n # Define the encoder/decoder stack model\\n encdecmodel = tsModel(inputs=[x_enc, x_dec], outputs=decoder_pred)\\n\\n # Define the separate encoder model for inferencing\\n encoder_inf_inputs = Input(shape=(self.seq_len_in, self.input_feature_amount), name=\\\"encoder_inf_inputs\\\")\\n\\n input_conv2_inf = input_conv2(encoder_inf_inputs)\\n input_conv1_inf_out = input_conv1(input_conv2_inf)\\n\\n encoder_inf_out, encoder_inf_state = encoder_gru(input_conv1_inf_out)\\n encoder_model = tsModel(inputs=encoder_inf_inputs, outputs=[encoder_inf_out, encoder_inf_state])\\n\\n # Define the separate encoder model for inferencing\\n decoder_inf_inputs = Input(shape=(1, self.output_feature_amount), name=\\\"decoder_inputs\\\")\\n encoder_inf_states = Input(shape=(encdecmodel.get_layer('last_conv_layer').output_shape[1], self.state_size), name=\\\"decoder_inf_states\\\")\\n decoder_init_state = Input(shape=(self.state_size,), name=\\\"decoder_init\\\")\\n\\n decoder_inf_out, decoder_inf_state = decoder_gru(decoder_inf_inputs, initial_state=decoder_init_state)\\n attn_inf_out, attn_inf_states = attn_layer([encoder_inf_states, decoder_inf_out])\\n decoder_inf_concat = Concatenate(axis=-1, name='concat')([decoder_inf_out, attn_inf_out])\\n decoder_inf_pred = TimeDistributed(dense)(decoder_inf_concat)\\n decoder_model = tsModel(inputs=[encoder_inf_states, decoder_init_state, decoder_inf_inputs],\\n outputs=[decoder_inf_pred, attn_inf_states, decoder_inf_state])\\n\\n return encoder_model, decoder_model, encdecmodel\",\n \"def make_encoder(self, input_size: int, latent_size: int) -> nn.Module:\\n pass\",\n \"def _encode_event_idx(self, event_idx, step_idx):\\n enc_dec = self.trans_model._config.encoder_decoder\\n input_ = np.zeros(enc_dec.input_size)\\n input_[event_idx] = 1.0\\n\\n offset = enc_dec._one_hot_encoding.num_classes\\n n = step_idx + 1\\n for i in range(enc_dec._binary_counter_bits):\\n input_[offset] = 1.0 if (n // 2 ** i) % 2 else -1.0\\n offset += 1\\n\\n return np.expand_dims(input_, 0)\",\n \"def forward(self, trg_embed, encoder_hidden, encoder_final, \\n src_mask, trg_mask, hidden=None, max_len=None):\\n \\n # the maximum number of steps to unroll the RNN\\n #print(\\\"czw src mask\\\", src_mask.size())\\n #print(\\\"czw trg embed\\\", trg_embed.size())\\n #print(\\\"czw encoder_hidden\\\", encoder_hidden.size())\\n #print(\\\"czw encoder_final\\\", encoder_final[0].size())\\n if max_len is None:\\n max_len = trg_embed.size(1)\\n\\n # initialize decoder hidden state\\n if hidden is None:\\n hidden = self.init_hidden(encoder_final)\\n \\n # pre-compute projected encoder hidden states\\n # (the \\\"keys\\\" for the attention mechanism)\\n # this is only done for efficiency\\n proj_key = self.attention.key_layer(encoder_hidden)\\n \\n # here we store all intermediate hidden states and pre-output vectors\\n decoder_states = []\\n pre_output_vectors = []\\n \\n # unroll the decoder RNN for max_len steps\\n for i in range(max_len):\\n prev_embed = trg_embed[:, i].unsqueeze(1)\\n output, hidden, pre_output = self.forward_step(\\n prev_embed, encoder_hidden, src_mask, proj_key, hidden)\\n decoder_states.append(output)\\n pre_output_vectors.append(pre_output)\\n\\n decoder_states = torch.cat(decoder_states, dim=1)\\n pre_output_vectors = torch.cat(pre_output_vectors, dim=1)\\n return decoder_states, hidden, pre_output_vectors # [B, N, D]\",\n \"def encode_decode_TD(self, n_step, idx_list_previous, log_probs_previous, entropies_previous, mask_previous, query1_previous, query2_previous, query3_previous, idx_ ):\\r\\n\\r\\n actor_embedding = embed_seq(input_seq=self.input_, from_=self.dimension, to_= self.input_embed, is_training=self.is_training, BN=True, initializer=self.initializer)\\r\\n actor_encoding = encode_seq(input_seq=actor_embedding, input_dim=self.input_embed, num_stacks=self.num_stacks, num_heads=self.num_heads, num_neurons=self.num_neurons, is_training=self.is_training)\\r\\n \\r\\n if self.is_training == False:\\r\\n actor_encoding = tf.tile(actor_encoding,[self.batch_size,1,1])\\r\\n \\r\\n idx_list = copy(idx_list_previous)\\r\\n log_probs = copy(log_probs_previous)\\r\\n entropies = copy(entropies_previous)\\r\\n \\r\\n\\r\\n mask = copy(mask_previous)\\r\\n \\r\\n n_hidden = actor_encoding.get_shape().as_list()[2] # input_embed\\r\\n W_ref = tf.get_variable(\\\"W_ref\\\",[1, n_hidden, self.num_units],initializer=self.initializer)\\r\\n W_q = tf.get_variable(\\\"W_q\\\",[self.query_dim, self.num_units],initializer=self.initializer)\\r\\n v = tf.get_variable(\\\"v\\\",[self.num_units],initializer=self.initializer)\\r\\n \\r\\n encoded_ref = tf.nn.conv1d(actor_encoding, W_ref, 1, \\\"VALID\\\") # actor_encoding is the ref for actions [Batch size, seq_length, n_hidden]\\r\\n \\r\\n query1 = copy( query1_previous)\\r\\n query2 = copy( query2_previous)\\r\\n query3 = copy( query3_previous)\\r\\n idx_copy = copy(idx_)\\r\\n \\r\\n W_1 =tf.get_variable(\\\"W_1\\\",[n_hidden, self.query_dim],initializer=self.initializer) # update trajectory (state)\\r\\n W_2 =tf.get_variable(\\\"W_2\\\",[n_hidden, self.query_dim],initializer=self.initializer)\\r\\n W_3 =tf.get_variable(\\\"W_3\\\",[n_hidden, self.query_dim],initializer=self.initializer)\\r\\n \\r\\n \\r\\n \\\"\\\"\\\"\\r\\n # sample from POINTER from the perspective of the Actor\\r\\n \\\"\\\"\\\"\\r\\n for step in range(n_step + 1 ): \\r\\n query = tf.nn.relu(tf.matmul(query1, W_1) + tf.matmul(query2, W_2) + tf.matmul(query3, W_3))\\r\\n logits = pointer(encoded_ref=encoded_ref, query=query, mask=mask, W_ref=W_ref, W_q=W_q, v=v, C=config.C, temperature=config.temperature)\\r\\n prob = distr.Categorical(logits) # logits = masked_scores\\r\\n idx = prob.sample()\\r\\n\\r\\n idx_list.append(idx) # tour index\\r\\n idx_list_previous.append(idx)\\r\\n \\r\\n log_probs.append(prob.log_prob(idx)) # log prob\\r\\n log_probs_previous.append(prob.log_prob(idx))\\r\\n \\r\\n entropies.append(prob.entropy()) # entropies\\r\\n entropies_previous.append(prob.entropy())\\r\\n \\r\\n mask = mask + tf.one_hot(idx, self.max_length) # mask\\r\\n mask_previous = mask_previous + tf.one_hot(idx, self.max_length)\\r\\n\\r\\n idx_copy = tf.stack([tf.range(self.batch_size,dtype=tf.int32), idx],1) # idx with batch \\r\\n idx_ = tf.stack([tf.range(self.batch_size,dtype=tf.int32), idx],1) # idx with batch \\r\\n query3 = query2\\r\\n query2 = query1\\r\\n query1 = tf.gather_nd(actor_encoding, idx_) # update trajectory (state)\\r\\n \\r\\n query3_previous = query2_previous\\r\\n query2_previous = query1_previous\\r\\n query1_previous = tf.gather_nd(actor_encoding, idx_) # update trajectory (state) \\r\\n\\r\\n if (len(idx_list) >= self.max_length): break #leave the loop if reach the end of the episode\\r\\n\\r\\n \\\"\\\"\\\"\\r\\n # sample from POINTER from the perspective of the Critic\\r\\n make q_t vector = 0\\r\\n \\\"\\\"\\\"\\r\\n while(len(idx_list) < self.max_length): \\r\\n \\r\\n logits = pointer_critic(encoded_ref=encoded_ref, mask=mask, W_ref=W_ref, v=v, C=config.C, temperature=config.temperature)\\r\\n prob = distr.Categorical(logits) # logits = masked_scores\\r\\n idx = prob.sample()\\r\\n\\r\\n idx_list.append(idx) # tour index\\r\\n log_probs.append(prob.log_prob(idx)) # log prob\\r\\n entropies.append(prob.entropy()) # entropies\\r\\n mask = mask + tf.one_hot(idx, self.max_length) # mask\\r\\n\\r\\n idx_copy = tf.stack([tf.range(self.batch_size,dtype=tf.int32), idx],1) # idx with batch \\r\\n #idx_ = tf.stack([tf.range(self.batch_size,dtype=tf.int32), idx],1) # idx with batch \\r\\n query3 = query2\\r\\n query2 = query1\\r\\n query1 = tf.gather_nd(actor_encoding, idx_copy) # update trajectory (state)\\r\\n \\r\\n idx_list.append(idx_list[0]) # return to start\\r\\n self.tour =tf.stack(idx_list, axis=1) # permutations\\r\\n self.log_prob = tf.add_n(log_probs) # corresponding log-probability for backprop\\r\\n self.entropies = tf.add_n(entropies)\\r\\n tf.summary.scalar('log_prob_mean', tf.reduce_mean(self.log_prob))\\r\\n tf.summary.scalar('entropies_mean', tf.reduce_mean(self.entropies))\\r\\n \\r\\n return idx_list_previous, log_probs_previous, entropies_previous, mask_previous, query1_previous, query2_previous, query3_previous, idx_ #returns variables necessary for the next loop\\r\",\n \"def _decode_train(self):\\n\\n # the basic idea is, we use golden sketch during train and in order to copy from source\\n # we given true mask of decoder to generate right copy weights\\n state = {'encoder': self.concated_encoder_output}\\n\\n def transformer_concated_decoder_internal(inputs, memory, bias, mem_bias, params, state=None, scope=None,\\n reuse=False):\\n return transformer_decoder(inputs, memory, bias, mem_bias, params, state, scope, reuse)\\n\\n self.final_logits = self._decode_func(\\n self.tgt_seq, self.tgt_len, self.target_embeddings, self.decoder_weights,\\n self.final_enc_attn_bias, 'train', state, self.vocab_size, use_copy=True,\\n expand_source_ids_oo=self.concat_src_ids_oo,\\n max_out_oovs=self.max_out_oovs, src_mask=self.concat_src_mask,\\n decoder_fn=transformer_concated_decoder_internal,\\n scope='final_decoder')\",\n \"def decode(self, input_size):\\n output = np.zeros(input_size)\\n \\n preactivation = np.dot(self.W, self.h) + self.c\\n sigmoid(preactivation, output)\\n \\n return output\",\n \"def make_encoder(opt, embeddings, intent_size, output_size, use_history=False, hidden_depth=1, identity=None,\\n hidden_size=None):\\n # encoder = StateEncoder(intent_size=intent_size, output_size=output_size,\\n # state_length=opt.state_length, extra_size=3 if opt.dia_num>0 else 0 )\\n\\n # intent + price\\n diaact_size = (intent_size+1)\\n extra_size = 3 + 2\\n if hidden_size is None:\\n hidden_size = opt.hidden_size\\n if not opt.use_utterance:\\n embeddings = None\\n if use_history:\\n extra_size = 3\\n # + pmask\\n diaact_size += 1\\n if identity is None:\\n encoder = HistoryIDEncoder(None, diaact_size * 2, extra_size, embeddings, output_size,\\n hidden_depth=hidden_depth, rnn_state=True)\\n else:\\n # encoder = HistoryIDEncoder(identity, diaact_size*2+extra_size, embeddings, output_size,\\n # hidden_depth=hidden_depth)\\n encoder = HistoryIDEncoder(identity, diaact_size * 2, extra_size, embeddings, output_size,\\n hidden_depth=hidden_depth, rnn_state=True)\\n else:\\n if identity is None:\\n encoder = CurrentEncoder(diaact_size*opt.state_length+extra_size, embeddings, output_size,\\n hidden_depth=hidden_depth)\\n else:\\n extra_size = 3\\n # + pmask\\n diaact_size += 1\\n encoder = HistoryIDEncoder(identity, diaact_size * opt.state_length, extra_size, embeddings, output_size,\\n hidden_depth=hidden_depth)\\n\\n return encoder\",\n \"def encoder_bi(self, cell_fw, cell_bw, x, seq_len, init_state_fw=None,\\n init_state_bw=None):\\n # Output is the outputs at all time steps, state is the last state\\n with tf.variable_scope(\\\"bidirectional_dynamic_rnn\\\"):\\n outputs, state = tf.nn.bidirectional_dynamic_rnn(\\\\\\n cell_fw=cell_fw,\\n cell_bw=cell_bw,\\n inputs=x,\\n sequence_length=seq_len,\\n initial_state_fw=init_state_fw,\\n initial_state_bw=init_state_bw,\\n dtype=self.floatX)\\n # outputs: a tuple(output_fw, output_bw), all sequence hidden states,\\n # each as tensor of shape [batch,time,units]\\n # Since we don't need the outputs separate, we concat here\\n outputs = tf.concat(outputs,2)\\n outputs.set_shape([None, None, self.bi_encoder_hidden])\\n # If LSTM cell, then \\\"state\\\" is not a tuple of Tensors but an\\n # LSTMStateTuple of \\\"c\\\" and \\\"h\\\". Need to concat separately then new\\n if \\\"LSTMStateTuple\\\" in str(type(state[0])):\\n c = tf.concat([state[0][0],state[1][0]],axis=1)\\n h = tf.concat([state[0][1],state[1][1]],axis=1)\\n state = tf.contrib.rnn.LSTMStateTuple(c,h)\\n else:\\n state = tf.concat(state,1)\\n # Manually set shape to Tensor or all hell breaks loose\\n state.set_shape([None, self.bi_encoder_hidden])\\n return outputs, state\",\n \"def forward(self, inp, hidden=None, give_gates=False, debug=False, readout_time=None):\\n\\n if self.recoder is None:\\n emb = inp\\n else:\\n emb = self.recoder(inp)\\n\\n if hidden is None:\\n hidden = self.init_hidden(inp.shape[1])\\n # if emb.dim()<3:\\n # emb = emb.unsqueeze(0)\\n\\n if give_gates:\\n output, hidden, extras = self.rnn(emb, hidden, give_gates)\\n else:\\n output, hidden = self.rnn(emb, hidden)\\n # print(output.shape)\\n\\n # decoded = self.softmax(self.decoder(output))\\n decoded = self.decoder(output)\\n if readout_time is None:\\n decoded = decoded[-1,...] # assume only final timestep matters\\n\\n if give_gates:\\n return decoded, hidden, extras\\n else:\\n return decoded, hidden\",\n \"def get_final_encoder_states(encoder_outputs: torch.Tensor,\\n mask: torch.Tensor,\\n bidirectional: bool = False) -> torch.Tensor:\\n # These are the indices of the last words in the sequences (i.e. length sans padding - 1). We\\n # are assuming sequences are right padded.\\n # Shape: (batch_size,)\\n last_word_indices = mask.sum(1).long() - 1\\n\\n # handle -1 cases\\n ll_ = (last_word_indices != -1).long()\\n last_word_indices = last_word_indices * ll_\\n\\n batch_size, _, encoder_output_dim = encoder_outputs.size()\\n expanded_indices = last_word_indices.view(-1, 1, 1).expand(batch_size, 1, encoder_output_dim)\\n # Shape: (batch_size, 1, encoder_output_dim)\\n final_encoder_output = encoder_outputs.gather(1, expanded_indices)\\n final_encoder_output = final_encoder_output.squeeze(1) # (batch_size, encoder_output_dim)\\n if bidirectional:\\n final_forward_output = final_encoder_output[:, :(encoder_output_dim // 2)]\\n final_backward_output = encoder_outputs[:, 0, (encoder_output_dim // 2):]\\n final_encoder_output = torch.cat([final_forward_output, final_backward_output], dim=-1)\\n return final_encoder_output\",\n \"def make_prediction(self, previous_timesteps_x, previous_y):\\n # Get the state from the Encoder using the previous timesteps for x\\n # Expand the previous timesteps, we must make the input a batch (going from shape (100, 149) to (1, 100, 149))\\n enc_outs, enc_last_state = self.encoder.predict(np.expand_dims(previous_timesteps_x, axis=0))\\n dec_state = enc_last_state\\n\\n # Initialize the outputs on the previous y so we have something to feed the net\\n # It might be neater to feed a start symbol instead\\n dec_out = np.expand_dims(previous_y, axis=0)\\n outputs = []\\n attention_weights = []\\n for i in range(self.seq_len_out):\\n dec_out, attention, dec_state = self.decoder.predict([enc_outs, dec_state, dec_out])\\n outputs.append(dec_out)\\n\\n # Add attention weights\\n attention_weights.append(attention)\\n\\n # Reshape and transpose attention weights so they make more sense\\n attention_weights = np.reshape(np.stack(attention_weights), newshape=(self.seq_len_out,\\n self.encoder.get_layer(\\\"last_conv_layer\\\")\\n .output_shape[1])).transpose()\\n\\n # Concatenate the outputs, as they are batches\\n # For example, going from a list of (1,1,1) to one unit of (1,100,1)\\n # So we take the 0th element from the batch which are our outputs\\n return np.concatenate(outputs, axis=1)[0], attention_weights\",\n \"def encoder(self, inputs):\\n pass\",\n \"def _define_encoder(self):\\n self.encoder = nn.Sequential(nn.Conv2d(in_channels=3, out_channels=32, kernel_size=4, stride=2, padding=1), # B, 32, 32, 32\\n nn.SELU(),\\n nn.Conv2d(32, 32, 4, 2, 1), # B, 32, 16, 16\\n nn.SELU(),\\n nn.Conv2d(32, 64, 4, 2, 1), # B, 64, 8, 8\\n nn.SELU(),\\n nn.Conv2d(64, 64, 4, 2, 1), # B, 64, 4, 4\\n nn.SELU(),\\n nn.Conv2d(64, 256, 4, 1), # B, 256, 1, 1\\n nn.SELU(),\\n View((-1, 256 * 1 * 1)), # B, 256\\n nn.Linear(256, self.encoding_shape * 2), # B, z_dim*2\\n )\",\n \"def forward(self,\\n input,\\n hidden,\\n encoder_outputs):\\n embedded = self.embedding(input).view(1, 1, -1)\\n embedded = self.dropout(embedded)\\n\\n # attn_weights = F.softmax(self.attn(torch.cat((embedded[0], hidden[0]), 1)), dim=1)\\n attn_state = hidden[0] if isinstance(hidden, tuple) else hidden\\n attn_weights = F.softmax(attn_state[0] @ encoder_outputs.squeeze().t(), dim=1)\\n attn_applied = torch.bmm(attn_weights.unsqueeze(0), encoder_outputs.permute(1, 0, 2))\\n\\n output = torch.cat((embedded[0], attn_applied[0]), 1)\\n output = self.attn_combine(output).unsqueeze(0)\\n\\n output = F.relu(output)\\n output, hidden = self.rnn(output, hidden)\\n\\n output = F.log_softmax(self.out(output[0]), dim=1)\\n return output, hidden, attn_weights\",\n \"def add_model(self):\\n\\n b_sz = tf.shape(self.encoder_input)[0]\\n tstp_en = tf.shape(self.encoder_input)[1]\\n tstp_de = tf.shape(self.decoder_input)[1]\\n\\n encoder_dropout_input = tf.nn.dropout(self.encoder_input, self.ph_dropout, name='encoder_Dropout')\\n decoder_dropout_input = tf.nn.dropout(self.decoder_input, self.ph_dropout, name='decoder_Dropout')\\n lstm_cell = tf.nn.rnn_cell.BasicLSTMCell(self.config.hidden_size)\\n \\\"\\\"\\\"#(batch_size, num_sentence, hidden_size)\\\"\\\"\\\"\\n encoder_outputs, state = tf.nn.dynamic_rnn(lstm_cell, encoder_dropout_input, self.encoder_tstps, \\n dtype=tf.float32, swap_memory=True, time_major=False, scope = 'rnn_encode')\\n self.state=state\\n with tf.variable_scope('decoder') as vscope:\\n decoder_outputs, _ = tf.nn.dynamic_rnn(lstm_cell, decoder_dropout_input, self.decoder_tstps, #(batch_size, time_steps, hidden_size)\\n initial_state=state, dtype=tf.float32, swap_memory=True, time_major=False, scope='rnn_decode')\\n \\n with tf.variable_scope('rnn_decode'):\\n #tf.reshape(self.ph_decoder_label, shape=(-1, 1)) #(batch_size*time_steps, 1)\\n encoder_outputs_reshape = tf.reshape(encoder_outputs, shape=(-1, self.config.hidden_size), name='add_model_reshape_0') #(batch_size*time_steps, hidden_size)\\n decoder_outputs_reshape = tf.reshape(decoder_outputs, shape=(-1, self.config.hidden_size), name='add_model_reshape_1') #(batch_size*time_steps_1, hidden_size)\\n encoder_outputs_linear_reshape = tf.nn.rnn_cell._linear(encoder_outputs_reshape, output_size=self.config.hidden_size, #(#(batch_size*time_steps, hidden_size))\\n bias=False, scope='Ptr_W1')\\n decoder_outputs_linear_reshape = tf.nn.rnn_cell._linear(decoder_outputs_reshape, output_size=self.config.hidden_size, #(#(batch_size*time_steps, hidden_size))\\n bias=False, scope='Ptr_W2')\\n encoder_outputs_linear = tf.reshape(encoder_outputs_linear_reshape, tf.shape(encoder_outputs), name='add_model_reshape_2')\\n decoder_outputs_linear = tf.reshape(decoder_outputs_linear_reshape, tf.shape(decoder_outputs), name='add_model_reshape_3')\\n \\n encoder_outputs_linear_expand = tf.expand_dims(encoder_outputs_linear, 1) #(b_sz, 1, tstp_en, h_sz)\\n decoder_outputs_linear_expand = tf.expand_dims(decoder_outputs_linear, 2) #(b_sz, tstp_de, 1, h_sz)\\n \\n after_add = tf.tanh(encoder_outputs_linear_expand + decoder_outputs_linear_expand) #(b_sz, tstp_de, tstp_en, h_sz)\\n \\n after_add_reshape = tf.reshape(after_add, shape=(-1, self.config.hidden_size), name='add_model_reshape_4')\\n \\n after_add_linear_reshape = tf.nn.rnn_cell._linear(after_add_reshape, output_size=1, #(b_sz*tstp_de*tstp_en, 1)\\n bias=False, scope='Ptr_v')\\n after_add_linear = tf.reshape(after_add_linear_reshape, shape=tf.shape(after_add)[:3], name='add_model_reshape_5') #(b_sz, tstp_de, tstp_en)\\n\\n en_length_mask = tf.sequence_mask(self.encoder_tstps, #(b_sz, tstp_en)\\n maxlen=tf.shape(after_add_linear)[-1], dtype=tf.bool)\\n en_length_mask = tf.expand_dims(en_length_mask, 1) #(b_sz, 1, tstp_en)\\n en_length_mask = tf.tile(en_length_mask, [1, tstp_de, 1])\\n\\n logits = tf.select(en_length_mask, after_add_linear,\\n tf.ones_like(after_add_linear) * (-np.Inf)) # shape(b_sz, tstp_de, tstp_en)\\n \\n flat_logits = tf.reshape(logits, shape=[b_sz * tstp_de, tstp_en])\\n\\n vscope.reuse_variables()\\n outputs_ta, _, _ = self.decoder(lstm_cell, state, encoder_outputs, encoder_dropout_input, scope='rnn_decode')\\n outputs = outputs_ta.pack() #(time_steps, batch_size)\\n outputs = tf.transpose(outputs, [1, 0]) #(batch_size, time_steps)\\n \\n state = tf.concat(1, state)\\n lstm_cell = tf.nn.rnn_cell.BasicLSTMCell(self.config.hidden_size, state_is_tuple=False)\\n beam_outputs, beam_seq, beam_prob = self.beam_decoder(lstm_cell, state, encoder_outputs, \\n encoder_dropout_input, beam_size=self.config.beam_size, scope='rnn_decode')\\n \\n self.logits = logits\\n self.encoder_outputs = encoder_outputs\\n self.beam_seq = beam_seq\\n self.beam_prob = beam_prob\\n return flat_logits, outputs, beam_outputs\",\n \"def __call__(self, sequence):\\n fwd_states, bwd_states = self.encode_fwd_bwd(sequence)\\n return dy.concatenate([fwd_states[-1], bwd_states[-1]])\",\n \"def __init__(self, input_size, hidden_size, bidirection, config):\\r\\n super(Encoder, self).__init__()\\r\\n\\r\\n self.config = config\\r\\n self.input_size = input_size\\r\\n self.hidden_size = hidden_size\\r\\n self.bidirection = bidirection\\r\\n\\r\\n if self.config.model in ['LSTM', 'GRU']:\\r\\n self.rnn = getattr(nn, self.config.model)(self.input_size, self.hidden_size, self.config.nlayer_enc,\\r\\n batch_first=True, dropout=self.config.dropout,\\r\\n bidirectional=self.bidirection)\\r\\n else:\\r\\n try:\\r\\n nonlinearity = {'RNN_TANH': 'tanh', 'RNN_RELU': 'relu'}[self.config.model]\\r\\n except KeyError:\\r\\n raise ValueError(\\\"\\\"\\\"An invalid option for `--model` was supplied,\\r\\n options are ['LSTM', 'GRU', 'RNN_TANH' or 'RNN_RELU']\\\"\\\"\\\")\\r\\n self.rnn = nn.RNN(self.input_size, self.hidden_size, self.config.nlayers, nonlinearity=nonlinearity,\\r\\n batch_first=True, dropout=self.config.dropout, bidirectional=self.bidirection)\",\n \"def init_hidden_state(self, encoder_out: torch.Tensor):\\n pass\",\n \"def forward(self, *args): # noqa: R0914\\r\\n encoder_out, (hn, cn) = self.unified_encoder(*args)\\r\\n device = hn.device\\r\\n non_sequential_cont_decoded = self.mlp_non_seq_cont(hn)\\r\\n non_sequential_cat_decoded = []\\r\\n for mlp_non_seq_cat in self.mlp_non_seq_cat_list:\\r\\n non_sequential_cat_decoded.append(mlp_non_seq_cat(hn))\\r\\n\\r\\n hn = torch.unsqueeze(hn, 0)\\r\\n cn = torch.unsqueeze(cn, 0)\\r\\n # decoded is the output prediction of timestep i-1 of the decoder\\r\\n decoded = torch.zeros(encoder_out.shape[0], int(\\r\\n self.unified_encoder.seq_cont_count + self.unified_encoder.no_of_embs_seq), device=device)\\r\\n seq_cont_decoded = torch.Tensor(device=device)\\r\\n seq_cat_decoded = []\\r\\n for _ in range(self.unified_encoder.seq_cat_count):\\r\\n seq_cat_decoded.append(torch.Tensor(device=device))\\r\\n\\r\\n for _ in range(encoder_out.shape[1]):\\r\\n decoded, (hn, cn), out_cont, out_cat = self.decoder(decoded, (hn, cn))\\r\\n # Predict all categorical columns\\r\\n out_cat_onehot = []\\r\\n if self.unified_encoder.seq_cat_count != 0:\\r\\n for idx, out in enumerate(out_cat):\\r\\n out_cat_onehot.append(torch.argmax(out, dim=1).unsqueeze(-1))\\r\\n seq_cat_decoded[idx] = torch.cat(\\r\\n [seq_cat_decoded[idx], out.view(out.shape[0], 1, -1)], dim=1)\\r\\n out_cat_onehot = torch.cat(out_cat_onehot, -1)\\r\\n out_cat_embedding = self.unified_encoder.seq_emb_layers(out_cat_onehot)\\r\\n decoded = torch.cat([out_cat_embedding, out_cont], dim=-1)\\r\\n else:\\r\\n decoded = out_cont\\r\\n seq_cont_decoded = torch.cat(\\r\\n [seq_cont_decoded, out_cont.view(out_cont.shape[0], 1, -1)], dim=1)\\r\\n\\r\\n return non_sequential_cont_decoded, non_sequential_cat_decoded, seq_cont_decoded, seq_cat_decoded\",\n \"def encode(input):\\n return ModelEncoder().encode(input)\",\n \"def generate_encoder(input_shape: Tuple[int]=(100,1), lstm_units:int = 100, latent_dim:int=20)->tf.keras.Model:\\n\\n input = tf.keras.layers.Input(shape=input_shape , name=\\\"encoder_input\\\")\\n #create a bi-directional LSTM layer\\n encoded = tf.keras.layers.Bidirectional(tf.keras.layers.LSTM(units=lstm_units, return_sequences=True))(input)\\n encoded = tf.keras.layers.Flatten()(encoded)\\n encoded = tf.keras.layers.Dense(units=latent_dim, name=\\\"latent_encoding\\\")(encoded)\\n encoded = tf.keras.layers.Reshape(target_shape=(latent_dim, 1) , name=\\\"output_encoder\\\")(encoded)\\n\\n model = tf.keras.Model(inputs=input, outputs=encoded, name=\\\"encoder\\\")\\n\\n return model\",\n \"def forward(self, inputs, decode_len=None):\\n\\n batch_size = inputs.size(0)\\n input_dim = inputs.size(1)\\n assert input_dim == self.input_dim, 'input dim should be {:d} but now: {:d}'.format(self.input_dim, input_dim)\\n\\n sourceL = inputs.size(2)\\n\\n if self.embed_input:\\n # repeat embeddings across batch_size\\n # result is [batch_size x input_dim x embedding_dim]\\n # TODO: repeat or expand?\\n embedding = self.embedding.repeat(batch_size, 1, 1)\\n embedded_inputs = []\\n # result is [batch_size, 1, input_dim, sourceL]\\n ips = inputs.unsqueeze(1)\\n\\n for i in range(sourceL):\\n # [batch_size x 1 x input_dim] * [batch_size x input_dim x embedding_dim]\\n # result is [batch_size, embedding_dim]\\n embedded_inputs.append(torch.bmm(\\n ips[:, :, :, i].float(),\\n embedding).squeeze(1))\\n\\n # Result is [sourceL x batch_size x embedding_dim]\\n embedded_inputs = torch.cat(embedded_inputs).view(\\n sourceL,\\n batch_size,\\n embedding.size(2))\\n else:\\n embedded_inputs = inputs.permute(2, 0, 1)\\n\\n (encoder_hx, encoder_cx) = init_zero_hidden(self.hidden_dim, inputs.is_cuda)\\n encoder_hx = encoder_hx.unsqueeze(0).repeat(embedded_inputs.size(1), 1).unsqueeze(0)\\n encoder_cx = encoder_cx.unsqueeze(0).repeat(embedded_inputs.size(1), 1).unsqueeze(0)\\n \\n # encoder forward pass\\n enc_h, (enc_h_t, enc_c_t) = self.encoder(embedded_inputs, (encoder_hx, encoder_cx))\\n\\n enc_h_linear = enc_h.view(-1, self.hidden_dim)\\n # enc_h_linear_2d = enc_h_linear.view(self.hidden_dim, -1)\\n enc_action_scores = self.EncodeScore(enc_h_linear)\\n enc_action_scores = enc_action_scores.view(-1, batch_size).permute(1, 0)\\n dec_init_state = (enc_h_t[-1], enc_c_t[-1])\\n \\n # repeat decoder_in_0 across batch\\n decoder_input = self.decoder_in_0.unsqueeze(0).repeat(embedded_inputs.size(1), 1)\\n\\n (head_pointer_probs, head_positions, tail_pointer_probs, tail_positions, cls_scores), dec_hidden_t = self.decoder(decoder_input,\\n embedded_inputs,\\n dec_init_state,\\n enc_h, max_len=decode_len)\\n #TODO: added conversion to tensors\\n head_pointer_probs = torch.stack(head_pointer_probs)\\n head_pointer_probs = head_pointer_probs.permute(1, 0, 2)\\n tail_pointer_probs = torch.stack(tail_pointer_probs)\\n tail_pointer_probs = tail_pointer_probs.permute(1, 0, 2)\\n cls_scores = torch.stack(cls_scores)\\n cls_scores = cls_scores.permute(1, 0, 2)\\n head_positions = torch.stack(head_positions)\\n head_positions = head_positions.permute(1, 0)\\n tail_positions = torch.stack(tail_positions)\\n tail_positions = tail_positions.permute(1, 0)\\n\\n\\n\\n return head_pointer_probs, head_positions, tail_pointer_probs, tail_positions, cls_scores, enc_action_scores\",\n \"def _basic_rnn_seq2seq(encoder_inputs,\\n decoder_inputs,\\n cell,\\n feed_previous,\\n dtype=dtypes.float32,\\n scope=None):\\n with variable_scope.variable_scope(scope or \\\"basic_rnn_seq2seq\\\"):\\n enc_cell = copy.deepcopy(cell)\\n _, enc_state = rnn.static_rnn(enc_cell, encoder_inputs, dtype=dtype)\\n if feed_previous:\\n return _rnn_decoder(decoder_inputs, enc_state, cell, _loop_function)\\n else:\\n return _rnn_decoder(decoder_inputs, enc_state, cell)\",\n \"def get_rnn_init_state(combiner_outputs: Dict[str, torch.Tensor], sequence_reducer: SequenceReducer, num_layers: int) ->torch.Tensor:\\n if ENCODER_OUTPUT_STATE not in combiner_outputs:\\n encoder_output_state = combiner_outputs[HIDDEN]\\n else:\\n encoder_output_state = combiner_outputs[ENCODER_OUTPUT_STATE]\\n if isinstance(encoder_output_state, tuple):\\n if len(encoder_output_state) == 2:\\n encoder_output_state = encoder_output_state[0]\\n elif len(encoder_output_state) == 4:\\n encoder_output_state = torch.mean([encoder_output_state[0], encoder_output_state[2]])\\n else:\\n raise ValueError(f'Invalid sequence decoder inputs with keys: {combiner_outputs.keys()} with extracted encoder ' + f'state: {encoder_output_state.size()} that was invalid. Please double check the compatibility ' + 'of your encoder and decoder.')\\n if len(encoder_output_state.size()) > 3:\\n raise ValueError('Init state for RNN decoders only works for 1d or 2d tensors (encoder_output).')\\n if len(encoder_output_state.size()) == 3:\\n encoder_output_state = sequence_reducer(encoder_output_state)\\n return repeat_2D_tensor(encoder_output_state, num_layers)\",\n \"def rnn_with_embedding(self,cell,init_state,input_seq,\\n input_seq_len,reuse=None,\\n scope=\\\"RNN\\\"): \\n with tf.variable_scope(scope,reuse=reuse) as vs:\\n log(vs.name+\\\"/Encoding sequences\\\")\\n with tf.device('/cpu:0'):\\n emb = tf.get_variable(\\\"emb\\\",\\n [self.vocab_size,self.hidden_size],\\n dtype=tf.float32)\\n un_emb = tf.get_variable(\\\"unemb\\\",\\n [self.hidden_size,self.vocab_size],\\n tf.float32)\\n # We need a bias\\n un_emb_b = tf.get_variable(\\\"unemb_b\\\",\\n [self.vocab_size],\\n dtype=tf.float32)\\n \\n assert scope+\\\"/emb:0\\\" in emb.name,\\\\\\n \\\"Making sure the reusing is working\\\"\\n emb_input_seq = tf.nn.embedding_lookup(\\n emb,input_seq)\\n emb_input_list = tf.unpack(\\n tf.transpose(emb_input_seq,[1,0,2]))\\n \\n # RNN pass\\n if init_state is None:\\n init_state = cell.zero_state(\\n tf.shape(emb_input_list[0])[0],tf.float32)\\n \\n emb_output_list, final_state = tf.nn.rnn(\\n cell,emb_input_list,initial_state=init_state,\\n sequence_length=input_seq_len)\\n\\n # We shift the predicted outputs, because at\\n # each word we're trying to predict the next.\\n emb_output_list = emb_output_list[:-1]\\n \\n # Unembedding\\n output_list = [tf.matmul(t,un_emb) + un_emb_b\\n for t in emb_output_list]\\n outputs = tf.transpose(tf.pack(output_list),[1,0,2])\\n\\n return outputs, final_state\",\n \"def transparent_forward(self, inp, hidden=None, give_gates=False, debug=False):\\n\\n lseq = inp.shape[0]\\n nseq = inp.shape[1]\\n # ispad = (input == self.padding)\\n\\n if hidden is None:\\n hidden = self.init_hidden(nseq)\\n\\n H = torch.zeros(lseq, self.nhid, nseq)\\n if give_gates:\\n Z = torch.zeros(lseq, self.nhid, nseq)\\n R = torch.zeros(lseq, self.nhid, nseq)\\n \\n # because pytorch only returns hidden activity in the last time step,\\n # we need to unroll it manually. \\n O = torch.zeros(lseq, nseq, self.decoder.out_features)\\n if self.recoder is None:\\n emb = inp\\n else:\\n emb = self.recoder(inp)\\n for t in range(lseq):\\n if give_gates:\\n out, hidden, ZR = self.rnn(emb[t:t+1,...], hidden, give_gates=True)\\n Z[t,:,:] = ZR[0].squeeze(0).T\\n R[t,:,:] = ZR[1].squeeze(0).T\\n else:\\n out, hidden = self.rnn(emb[t:t+1,...], hidden)\\n dec = self.decoder(out)\\n # naan = torch.ones(hidden.squeeze(0).shape)*np.nan\\n # H[t,:,:] = torch.where(~ispad[t:t+1,:].T, hidden.squeeze(0), naan).T\\n H[t,:,:] = hidden.squeeze(0).T\\n O[t,:,:] = dec.squeeze(0)\\n\\n if give_gates:\\n if debug:\\n return O, H, Z, R, emb\\n else:\\n return O, H, Z, R\\n else:\\n if debug:\\n return O, H, emb\\n else:\\n return O, H\",\n \"def decoding_layer_train(encoder_state, dec_cell, dec_embed_input, \\n target_sequence_length, max_summary_length, \\n output_layer, keep_prob):\\n # TODO: Implement Function\\n trainig_helper = tf.contrib.seq2seq.TrainingHelper(dec_embed_input, target_sequence_length)\\n basic_decoder = tf.contrib.seq2seq.BasicDecoder(dec_cell, trainig_helper, encoder_state, output_layer)\\n f_output, _, _ = tf.contrib.seq2seq.dynamic_decode(basic_decoder,maximum_iterations=max_summary_length)\\n return f_output\",\n \"def _basic_rnn_seq2seq(encoder_inputs,\\n decoder_inputs,\\n cell,\\n feed_previous,\\n dtype=dtypes.float32,\\n scope=None):\\n with variable_scope.variable_scope(scope or \\\"basic_rnn_seq2seq\\\"):\\n enc_cell = copy.deepcopy(cell)\\n _, enc_state = rnn.static_rnn(enc_cell, encoder_inputs, dtype=dtype)\\n if feed_previous:\\n return _rnn_decoder(decoder_inputs, enc_state, cell, _loop_function)\\n else:\\n return _rnn_decoder(decoder_inputs, enc_state, cell)\",\n \"def _add_input_decoder(self, inputs, seq_len, enc_fw, enc_bw):\\n with tf.variable_scope(\\\"decoder\\\"):\\n cell_fw = tf.contrib.rnn.LSTMCell(config.hidden_dim, initializer=self.rand_unif_init, state_is_tuple=True)\\n cell_bw = tf.contrib.rnn.LSTMCell(config.hidden_dim, initializer=self.rand_unif_init, state_is_tuple=True)\\n ((fw_states, bw_states), (final_fw, final_bw)) = tf.nn.bidirectional_dynamic_rnn(cell_fw, cell_bw, inputs, dtype=tf.float32, sequence_length=seq_len, swap_memory=True, initial_state_fw=enc_fw, initial_state_bw=enc_bw)\\n\\n return fw_states, bw_states\",\n \"def forward(self, source, out_seq_len = None):\\n # source seems to be a (1,1,2)\\n\\n batch_size = source.shape[0]\\n seq_len = source.shape[1]\\n if out_seq_len is None:\\n out_seq_len = seq_len\\n\\n \\n #############################################################################\\n # TODO: #\\n # Implement the forward pass of the Seq2Seq model. Please refer to the #\\n # following steps: #\\n # 1) Get the last hidden representation from the encoder. Use it as #\\n # the first hidden state of the decoder #\\n # 2) The first input for the decoder should be the token, which #\\n # is the first in the source sequence. #\\n # 3) Feed this first input and hidden state into the decoder # \\n # one step at a time in the sequence, adding the output to the #\\n # final outputs. #\\n # 4) Update the input and hidden weights being fed into the decoder #\\n # at each time step. The decoder output at the previous time step # \\n # will have to be manipulated before being fed in as the decoder #\\n # input at the next time step. #\\n #############################################################################\\n output, hidden = self.encoder(source)\\n outputs = torch.zeros(batch_size, out_seq_len, self.decoder.output_size, device=self.device)\\n # initialize -- batch size = 128, seq_len = 20.\\n output, hidden = self.decoder(source[:, 0], hidden)\\n # output of shape -- batch size,\\n #outputs.size() = [20 , 5893]\\n #output.size() = [ 128, 5893]\\n\\n\\n #simple:\\n # output.size() = (8)\\n # outputs.size() = (2,8)\\n outputs[:, 0, :] = output\\n output_idx = outputs[:,0,:].argmax(1)\\n output_idx = output_idx.unsqueeze(1)\\n for i in range(1, out_seq_len):\\n output, hidden = self.decoder(output_idx , hidden)\\n outputs[:,i,:] = output\\n output_idx = outputs[:,i,:].argmax(1)\\n output_idx = output_idx.unsqueeze(1)\\n #############################################################################\\n # END OF YOUR CODE #\\n #############################################################################\\n return outputs\",\n \"def get_rnn_init_state(\\n combiner_outputs: Dict[str, torch.Tensor], sequence_reducer: SequenceReducer, num_layers: int\\n) -> torch.Tensor:\\n if ENCODER_OUTPUT_STATE not in combiner_outputs:\\n # Use the combiner's hidden state.\\n encoder_output_state = combiner_outputs[HIDDEN]\\n else:\\n # Use the encoder's output state.\\n encoder_output_state = combiner_outputs[ENCODER_OUTPUT_STATE]\\n if isinstance(encoder_output_state, tuple):\\n if len(encoder_output_state) == 2:\\n # LSTM encoder. Use the hidden state and ignore the cell state.\\n encoder_output_state = encoder_output_state[0]\\n elif len(encoder_output_state) == 4:\\n # Bi-directional LSTM encoder. Use the average of hidden states and ignore cell state.\\n encoder_output_state = torch.mean([encoder_output_state[0], encoder_output_state[2]])\\n else:\\n raise ValueError(\\n f\\\"Invalid sequence decoder inputs with keys: {combiner_outputs.keys()} with extracted encoder \\\"\\n + f\\\"state: {encoder_output_state.size()} that was invalid. Please double check the compatibility \\\"\\n + \\\"of your encoder and decoder.\\\"\\n )\\n\\n if len(encoder_output_state.size()) > 3:\\n raise ValueError(\\\"Init state for RNN decoders only works for 1d or 2d tensors (encoder_output).\\\")\\n\\n if len(encoder_output_state.size()) == 3:\\n # Reduce to [batch_size, hidden_size].\\n encoder_output_state = sequence_reducer(encoder_output_state)\\n\\n return repeat_2D_tensor(encoder_output_state, num_layers)\",\n \"def _define_decoder(self):\\n self.decoder = nn.Sequential(nn.Linear(self.encoding_shape, 256), # B, 256\\n View((-1, 256, 1, 1)), # B, 256, 1, 1\\n nn.SELU(),\\n nn.ConvTranspose2d(256, 64, 4), # B, 64, 4, 4\\n nn.SELU(),\\n nn.ConvTranspose2d(64, 64, 4, 2, 1), # B, 64, 8, 8\\n nn.SELU(),\\n nn.ConvTranspose2d(64, 32, 4, 2, 1), # B, 32, 16, 16\\n nn.SELU(),\\n nn.ConvTranspose2d(32, 32, 4, 2, 1), # B, 32, 32, 32\\n nn.SELU(),\\n nn.ConvTranspose2d(32, 3, 4, 2, 1), # B, nc, 64, 64\\n nn.ReLU()\\n )\",\n \"def encode(self, x: Tensor) ->Tensor:\\n return self.encoder(x)[0]\",\n \"def encode(self) -> str:\\n return Activation._encoder.encode(self)\",\n \"def encoder(list_of_str, key):\\n tokenized = self.tokenizer.encode_commands(list_of_str)\\n hidden = self.tokenizer.tokenize(tokenized)\\n hidden = hidden.permute(1, 0, 2).reshape(hidden.size(1), -1) # correct for bididrectional\\n return hidden\",\n \"def forward(self,\\n input,\\n hidden):\\n embedded = self.embedding(input).view(1, 1, -1)\\n output = F.relu(embedded)\\n output, hidden = self.rnn(output, hidden)\\n output = self.softmax(self.out(output[0]))\\n return output, hidden\",\n \"def encode(self,\\n data: mx.sym.Symbol,\\n data_length: mx.sym.Symbol,\\n seq_len: int) -> Tuple[mx.sym.Symbol, mx.sym.Symbol, int]:\\n # data: (batch_size, seq_len, num_hidden)\\n data = mx.sym.FullyConnected(data=data,\\n num_hidden=self.config.cnn_config.num_hidden,\\n no_bias=True,\\n flatten=False,\\n weight=self.i2h_weight)\\n\\n # Multiple layers with residual connections:\\n for layer in self.layers:\\n data = data + layer(data, data_length, seq_len)\\n return data, data_length, seq_len\",\n \"def __call__(self, encoder_hidden_states):\\n params = self.dec_params\\n search_params = self.search_params\\n\\n lm_params = self.lm_params\\n get_top_k_fn = self.top_k_setup_with_lm(encoder_hidden_states)\\n\\n x = params.embedding[data_utils.GO_ID]\\n x_lm = lm_params.embedding[data_utils.GO_ID]\\n\\n # Initialize Decoder states\\n h_size = params.dec_lstm_w.shape[1]/4\\n zero_dec_state = (np.zeros(h_size), np.zeros(h_size))\\n\\n dec_lm_h_size = params.lm_lstm_w.shape[1]/4\\n zero_dec_lm_state = (np.zeros(dec_lm_h_size), np.zeros(dec_lm_h_size))\\n\\n # Initialize LM state\\n lm_h_size = lm_params.lstm_w.shape[1]/4\\n zero_lm_state = (np.zeros(lm_h_size), np.zeros(lm_h_size))\\n\\n zero_attn = np.zeros(encoder_hidden_states.shape[1])\\n\\n # Maintain a tuple of (output_indices, score, encountered EOS?)\\n output_list = []\\n final_output_list = []\\n k = search_params.beam_size # Represents the current beam size\\n step_count = 0\\n\\n # Run step 0 separately\\n top_k_indices, top_k_model_scores, top_k_scores, state_list, context_vec =\\\\\\n get_top_k_fn(x, x_lm, [zero_dec_state, zero_dec_lm_state, zero_lm_state],\\n zero_attn, beam_size=k)\\n for idx in xrange(top_k_indices.shape[0]):\\n output_tuple = (BeamEntry([top_k_indices[idx]], state_list, context_vec),\\n top_k_model_scores[idx])\\n if top_k_indices[idx] == data_utils.EOS_ID:\\n final_output_list.append(output_tuple)\\n # Decrease the beam size once EOS is encountered\\n k -= 1\\n else:\\n output_list.append(output_tuple)\\n\\n step_count += 1\\n while step_count < 120 and k > 0:\\n # These lists store the states obtained by running the decoder\\n # for 1 more step with the previous outputs of the beam\\n next_dec_states = []\\n next_context_vecs = []\\n\\n score_list = []\\n model_score_list = []\\n index_list = []\\n for candidate, cand_score in output_list:\\n x = params.embedding[candidate.get_last_output()]\\n x_lm = lm_params.embedding[candidate.get_last_output()]\\n\\n top_k_indices, top_k_model_scores, top_k_scores, state_list, context_vec =\\\\\\n get_top_k_fn(x, x_lm, candidate.get_dec_state(),\\n candidate.get_context_vec(), beam_size=k)\\n\\n next_dec_states.append(state_list)\\n next_context_vecs.append(context_vec)\\n\\n index_list.append(top_k_indices)\\n score_list.append(top_k_scores + cand_score)\\n model_score_list.append(top_k_model_scores + cand_score)\\n\\n # Score of all k**2 continuations\\n all_scores = np.concatenate(score_list, axis=0)\\n all_model_scores = np.concatenate(model_score_list, axis=0)\\n # All k**2 continuations\\n all_indices = np.concatenate(index_list, axis=0)\\n\\n # Find the top indices among the k^^2 entries\\n top_k_indices = np.argpartition(all_scores, -k)[-k:]\\n next_k_indices = all_indices[top_k_indices]\\n top_k_scores = all_model_scores[top_k_indices]\\n # The original candidate indices can be found by dividing by k.\\n # Because the indices are of the form - i * k + j, where i\\n # represents the ith output and j represents the jth top index for i\\n orig_cand_indices = np.divide(top_k_indices, k, dtype=np.int32)\\n\\n new_output_list = []\\n\\n for idx in xrange(k):\\n orig_cand_idx = int(orig_cand_indices[idx])\\n # BeamEntry of the original candidate\\n orig_cand = output_list[orig_cand_idx][0]\\n next_elem = next_k_indices[idx]\\n # Add the next index to the original sequence\\n new_index_seq = orig_cand.get_index_seq() + [next_elem]\\n dec_state = next_dec_states[orig_cand_idx]\\n context_vec = next_context_vecs[orig_cand_idx]\\n\\n output_tuple = (BeamEntry(new_index_seq, dec_state, context_vec),\\n top_k_scores[idx] +\\n search_params.word_ins_penalty*len(new_index_seq))\\n if next_elem == data_utils.EOS_ID:\\n # This sequence is finished. Put the output on the final list\\n # and reduce beam size\\n final_output_list.append(output_tuple)\\n k -= 1\\n else:\\n new_output_list.append(output_tuple)\\n\\n output_list = new_output_list\\n step_count += 1\\n\\n final_output_list += output_list\\n\\n best_output = max(final_output_list, key=lambda output_tuple: output_tuple[1])\\n output_seq = best_output[0].get_index_seq()\\n return np.stack(output_seq, axis=0)\",\n \"def forward(self, input, hidden):\\r\\n output, hidden = self.rnn(input, hidden)\\r\\n output = f.log_softmax(self.out(output.squeeze(1)), 1)\\r\\n return output, hidden\",\n \"def forward(self, state, encoder_padding_mask):\\n residual = state.clone()\\n\\n '''\\n ___QUESTION-6-DESCRIBE-D-START___\\n What is the purpose of encoder_padding_mask? What will the output shape of `state' Tensor \\n be after multi-head attention? HINT: formulate your answer in terms of \\n constituent variables like batch_size, embed_dim etc...\\n '''\\n '''\\n The encoder padding mask is used to mask the ⟨pad⟩ token which is padded to the input sequences to make the sequences in the same lengths each batch. Thus the word of input sequence will not pay attention to these padded tokens.\\n The shape of state is (tgt_time_steps * batch_size * embed_dim)\\n '''\\n state, _ = self.self_attn(query=state, key=state, value=state, key_padding_mask=encoder_padding_mask)\\n '''\\n ___QUESTION-6-DESCRIBE-D-END___\\n '''\\n\\n state = F.dropout(state, p=self.dropout, training=self.training)\\n state += residual\\n state = self.self_attn_layer_norm(state)\\n\\n residual = state.clone()\\n state = F.relu(self.fc1(state))\\n state = F.dropout(state, p=self.activation_dropout, training=self.training)\\n state = self.fc2(state)\\n state = F.dropout(state, p=self.dropout, training=self.training)\\n state += residual\\n state = self.final_layer_norm(state)\\n\\n return state\",\n \"def get_recurrent_encoder(config: RecurrentEncoderConfig, prefix: str) -> 'Encoder':\\n # TODO give more control on encoder architecture\\n encoder_seq = EncoderSequence([], config.dtype)\\n\\n if config.conv_config is not None:\\n encoder_seq.append(ConvolutionalEmbeddingEncoder, config=config.conv_config,\\n prefix=prefix + C.CHAR_SEQ_ENCODER_PREFIX)\\n if config.conv_config.add_positional_encoding:\\n # If specified, add positional encodings to segment embeddings\\n encoder_seq.append(AddSinCosPositionalEmbeddings,\\n num_embed=config.conv_config.num_embed,\\n scale_up_input=False,\\n scale_down_positions=False,\\n prefix=\\\"%s%sadd_positional_encodings\\\" % (prefix, C.CHAR_SEQ_ENCODER_PREFIX))\\n encoder_seq.append(ConvertLayout, infer_hidden=True, target_layout=C.TIME_MAJOR)\\n else:\\n encoder_seq.append(ConvertLayout, target_layout=C.TIME_MAJOR, num_hidden=0)\\n\\n if config.reverse_input:\\n encoder_seq.append(ReverseSequence, infer_hidden=True)\\n\\n if config.rnn_config.residual:\\n utils.check_condition(config.rnn_config.first_residual_layer >= 2,\\n \\\"Residual connections on the first encoder layer are not supported\\\")\\n\\n # One layer bi-directional RNN:\\n encoder_seq.append(BiDirectionalRNNEncoder,\\n rnn_config=config.rnn_config.copy(num_layers=1),\\n prefix=prefix + C.BIDIRECTIONALRNN_PREFIX,\\n layout=C.TIME_MAJOR)\\n\\n if config.rnn_config.num_layers > 1:\\n # Stacked uni-directional RNN:\\n # Because we already have a one layer bi-rnn we reduce the num_layers as well as the first_residual_layer.\\n remaining_rnn_config = config.rnn_config.copy(num_layers=config.rnn_config.num_layers - 1,\\n first_residual_layer=config.rnn_config.first_residual_layer - 1)\\n encoder_seq.append(RecurrentEncoder,\\n rnn_config=remaining_rnn_config,\\n prefix=prefix + C.STACKEDRNN_PREFIX,\\n layout=C.TIME_MAJOR)\\n\\n encoder_seq.append(ConvertLayout, infer_hidden=True, target_layout=C.BATCH_MAJOR)\\n\\n return encoder_seq\",\n \"def getEncode(self, img):\\n img_ = self.preprocess(img)\\n fv = self.model_.predict(img_)\\n fv = fv.reshape(-1, 1)\\n return fv\",\n \"def forward(self, *args):\\r\\n enc_src, _, _ = self.unified_encoder(*args)\\r\\n enc_src = enc_src.view(enc_src.shape[0], -1)\\r\\n y_pred = self.mlp(enc_src)\\r\\n return y_pred\",\n \"def forward(self, state, encoder_padding_mask):\\n residual = state.clone()\\n\\n '''\\n ___QUESTION-6-DESCRIBE-D-START___\\n What is the purpose of encoder_padding_mask? What will the output shape of `state' Tensor\\n be after multi-head attention? HINT: formulate your answer in terms of\\n constituent variables like batch_size, embed_dim etc...\\n\\n The purpose of encoder_padding_mask is to account for the fact that the\\n source sentences in the batch are of different length. The output shape\\n of state tensor will be [src_time_steps, batch_size, embed_dim].\\n '''\\n state, _ = self.self_attn(query=state, key=state, value=state, key_padding_mask=encoder_padding_mask)\\n '''\\n ___QUESTION-6-DESCRIBE-D-END___\\n '''\\n\\n state = F.dropout(state, p=self.dropout, training=self.training)\\n state += residual\\n state = self.self_attn_layer_norm(state)\\n\\n residual = state.clone()\\n state = F.relu(self.fc1(state))\\n state = F.dropout(state, p=self.activation_dropout, training=self.training)\\n state = self.fc2(state)\\n state = F.dropout(state, p=self.dropout, training=self.training)\\n state += residual\\n state = self.final_layer_norm(state)\\n\\n return state\",\n \"def gru_seq2seq_internal_bid_encoder(inputs, targets, hparams, train):\\n with tf.variable_scope(\\\"gru_seq2seq_bid_encoder\\\"):\\n if inputs is not None:\\n inputs_length = common_layers.length_from_embedding(inputs)\\n # Flatten inputs.\\n inputs = common_layers.flatten4d3d(inputs)\\n # LSTM encoder.\\n _, final_encoder_state = gru_bid_encoder(\\n inputs, inputs_length, hparams, train, \\\"encoder\\\")\\n else:\\n inputs_length = None\\n final_encoder_state = None\\n # LSTM decoder.\\n shifted_targets = common_layers.shift_right(targets)\\n # Add 1 to account for the padding added to the left from shift_right\\n targets_length = common_layers.length_from_embedding(shifted_targets) + 1\\n hparams_decoder = copy.copy(hparams)\\n hparams_decoder.hidden_size = 2 * hparams.hidden_size\\n decoder_outputs, _ = gru(\\n common_layers.flatten4d3d(shifted_targets),\\n targets_length,\\n hparams_decoder,\\n train,\\n \\\"decoder\\\",\\n initial_state=final_encoder_state)\\n return tf.expand_dims(decoder_outputs, axis=2)\",\n \"def forward(self, x):\\n x = self.encoder(x)\\n x = self.decoder(x)\\n return x\",\n \"def _encode(self, src_token_ids, padding_mask, training=False):\\n src_seq_len = tf.shape(src_token_ids)[1]\\n\\n # [batch_size, src_seq_len, hidden_size]\\n src_token_embeddings = self._embedding_logits_layer(\\n src_token_ids, 'embedding')\\n\\n # [src_seq_len, hidden_size]\\n positional_encoding = utils.get_positional_encoding(\\n src_seq_len, self._hidden_size)\\n src_token_embeddings += positional_encoding\\n src_token_embeddings = self._encoder_dropout_layer(\\n src_token_embeddings, training)\\n\\n encoder_outputs = self._encoder(\\n src_token_embeddings, padding_mask, training)\\n return encoder_outputs\",\n \"def gru_bid_encoder(inputs, sequence_length, hparams, train, name):\\n\\n with tf.variable_scope(name):\\n cell_fw = tf.nn.rnn_cell.MultiRNNCell(\\n [_dropout_gru_cell(hparams, train)\\n for _ in range(hparams.num_hidden_layers)])\\n\\n cell_bw = tf.nn.rnn_cell.MultiRNNCell(\\n [_dropout_gru_cell(hparams, train)\\n for _ in range(hparams.num_hidden_layers)])\\n\\n ((encoder_fw_outputs, encoder_bw_outputs),\\n (encoder_fw_state, encoder_bw_state)) = tf.nn.bidirectional_dynamic_rnn(\\n cell_fw,\\n cell_bw,\\n inputs,\\n sequence_length,\\n dtype=tf.float32,\\n time_major=False)\\n\\n encoder_outputs = tf.concat((encoder_fw_outputs, encoder_bw_outputs), 2)\\n encoder_states = []\\n\\n for i in range(hparams.num_hidden_layers):\\n encoder_state = tf.concat(\\n values=(encoder_fw_state[i], encoder_bw_state[i]),\\n axis=1,\\n name=\\\"bidirectional_concat\\\")\\n\\n encoder_states.append(encoder_state)\\n\\n encoder_states = tuple(encoder_states)\\n return encoder_outputs, encoder_states\",\n \"def lstm_encoder(sequence, lstm,\\n seq_lens=None, init_states=None, embedding=None):\\n # transpose batch tensor to fit lstm format\\n # sequence size [batch size,max_seq_len]\\n batch_size = sequence.size(0)\\n max_seq_len = sequence.size(1)\\n batch_first = lstm.batch_first\\n\\n if not batch_first: # embedding and transpose input sequence tensor\\n sequence = sequence.transpose(0, 1)\\n\\n # emb_sequence size [batch size,max_seq_len,emb_dim]\\n emb_sequence = (embedding(sequence) if embedding is not None\\n else sequence)\\n # reorder batch tensor along batch dim\\n if not seq_lens is None: # reorder input sequence tensor along batch dim\\n # (max_sen_len, batch_size, lstm_input_size) 按照batch_size维度,根据文本实际长度(句子数量)降序排列\\n assert batch_size == len(seq_lens)\\n sort_ind = sorted(range(len(seq_lens)),\\n key=lambda i: seq_lens[i], reverse=True) # 确定排序索引\\n seq_lens = [seq_lens[i] for i in sort_ind] # 根据排序索引 对序列真实长度进行排序\\n sequence = reorder_sequence(emb_sequence, sort_ind,\\n lstm.batch_first) # 根据排序索引对tensor batch dim进行排序\\n\\n # init hidden state and cell state for lstm\\n if init_states is None: # 初始化lstm中的hidden state 和 cell state\\n device = sequence.device\\n init_states = init_lstm_states(lstm, batch_size, device)\\n else:\\n init_states = (init_states[0].contiguous(),\\n init_states[1].contiguous())\\n\\n if not seq_lens is None: # Encode & Reorder Back\\n packed_seq = nn.utils.rnn.pack_padded_sequence(emb_sequence, # 压缩lstm输入序列,保留输入序列更多有效序列\\n seq_lens,\\n batch_first=batch_first) # https://www.cnblogs.com/sbj123456789/p/9834018.html\\n packed_out, final_states = lstm(packed_seq.to(init_states[0].dtype), init_states) # encode\\n lstm_out, _ = nn.utils.rnn.pad_packed_sequence(packed_out, batch_first=batch_first,\\n total_length=max_seq_len)\\n # (max_sent_len, batch_size, emb_dim)\\n\\n sort_ind = sorted(range(len(seq_lens)),\\n key=lambda i: seq_lens[i], reverse=True) # 确定排序索引\\n back_map = {ind: i for i, ind in enumerate(sort_ind)} # 结构为{之前索引: 当前索引}, 将编码之后的结果按照索引对应回输入索引\\n reorder_ind = [back_map[i] for i in range(len(seq_lens))] # 生成逆排序索引,对应于sort_ind\\n lstm_out = reorder_sequence(lstm_out, reorder_ind,\\n batch_first) # 根据逆排序索引对tensor batch dim进行排序 (max_sent_len, batch_size, lstm_size)\\n # final_states = reorder_lstm_states(final_states, reorder_ind)\\n else:\\n lstm_out, final_states = lstm(sequence, init_states)\\n\\n # transpose\\n return lstm_out, final_states # (seq_len, batch, embedding) (hidden_layer* direction_num, batch, hidden_size)\",\n \"def forward(self, src, mask):\\n bs = src.shape[0]\\n src = src.permute(2, 0, 1)\\n m = src \\n enc_embed = self.enc_embed.weight.unsqueeze(1).repeat(1, bs, 1)\\n for layer in self.encoder_layers:\\n m = layer(m,\\n pos=enc_embed,\\n src_mask = mask\\n )\\n return m.permute(1, 2, 0), enc_embed.permute(1, 2, 0)\"\n]"},"negative_scores":{"kind":"list like","value":["0.683423","0.6820723","0.6807832","0.675225","0.66331315","0.66327035","0.65157163","0.6451546","0.643727","0.6415194","0.6381192","0.6375881","0.6340776","0.6330177","0.6329643","0.63056594","0.62932295","0.6283715","0.62812126","0.6278949","0.6253341","0.62327033","0.6228094","0.614302","0.6141615","0.6141152","0.61335343","0.61076015","0.6090194","0.60574174","0.60550815","0.60222626","0.60119456","0.60115296","0.6004813","0.59613115","0.59505916","0.59314317","0.59203756","0.5897832","0.58709806","0.58677506","0.5821649","0.5816332","0.5806003","0.57888347","0.5775479","0.57695806","0.5765","0.5749095","0.5738066","0.5732439","0.5729389","0.5712179","0.5696801","0.56803876","0.5679941","0.56747496","0.5670142","0.56660837","0.5665208","0.5663571","0.56617135","0.5639985","0.56144094","0.5610349","0.5599126","0.5596373","0.559421","0.5585892","0.5558495","0.55540025","0.55429024","0.55340666","0.5529411","0.5523737","0.55168015","0.5512574","0.5509761","0.5500576","0.5495655","0.54809475","0.54686475","0.546436","0.5464311","0.54629815","0.5455897","0.5452167","0.54379433","0.54273444","0.5425739","0.54247963","0.54190356","0.54174435","0.5415367","0.541156","0.54029465","0.5391497","0.53867686","0.5386355"],"string":"[\n \"0.683423\",\n \"0.6820723\",\n \"0.6807832\",\n \"0.675225\",\n \"0.66331315\",\n \"0.66327035\",\n \"0.65157163\",\n \"0.6451546\",\n \"0.643727\",\n \"0.6415194\",\n \"0.6381192\",\n \"0.6375881\",\n \"0.6340776\",\n \"0.6330177\",\n \"0.6329643\",\n \"0.63056594\",\n \"0.62932295\",\n \"0.6283715\",\n \"0.62812126\",\n \"0.6278949\",\n \"0.6253341\",\n \"0.62327033\",\n \"0.6228094\",\n \"0.614302\",\n \"0.6141615\",\n \"0.6141152\",\n \"0.61335343\",\n \"0.61076015\",\n \"0.6090194\",\n \"0.60574174\",\n \"0.60550815\",\n \"0.60222626\",\n \"0.60119456\",\n \"0.60115296\",\n \"0.6004813\",\n \"0.59613115\",\n \"0.59505916\",\n \"0.59314317\",\n \"0.59203756\",\n \"0.5897832\",\n \"0.58709806\",\n \"0.58677506\",\n \"0.5821649\",\n \"0.5816332\",\n \"0.5806003\",\n \"0.57888347\",\n \"0.5775479\",\n \"0.57695806\",\n \"0.5765\",\n \"0.5749095\",\n \"0.5738066\",\n \"0.5732439\",\n \"0.5729389\",\n \"0.5712179\",\n \"0.5696801\",\n \"0.56803876\",\n \"0.5679941\",\n \"0.56747496\",\n \"0.5670142\",\n \"0.56660837\",\n \"0.5665208\",\n \"0.5663571\",\n \"0.56617135\",\n \"0.5639985\",\n \"0.56144094\",\n \"0.5610349\",\n \"0.5599126\",\n \"0.5596373\",\n \"0.559421\",\n \"0.5585892\",\n \"0.5558495\",\n \"0.55540025\",\n \"0.55429024\",\n \"0.55340666\",\n \"0.5529411\",\n \"0.5523737\",\n \"0.55168015\",\n \"0.5512574\",\n \"0.5509761\",\n \"0.5500576\",\n \"0.5495655\",\n \"0.54809475\",\n \"0.54686475\",\n \"0.546436\",\n \"0.5464311\",\n \"0.54629815\",\n \"0.5455897\",\n \"0.5452167\",\n \"0.54379433\",\n \"0.54273444\",\n \"0.5425739\",\n \"0.54247963\",\n \"0.54190356\",\n \"0.54174435\",\n \"0.5415367\",\n \"0.541156\",\n \"0.54029465\",\n \"0.5391497\",\n \"0.53867686\",\n \"0.5386355\"\n]"},"document_score":{"kind":"string","value":"0.7965193"},"document_rank":{"kind":"string","value":"0"}}},{"rowIdx":94851,"cells":{"query":{"kind":"string","value":"Gives access to the hidden state of the individual components of the input batch. Since encode() encodes the whole batch of sequences in one call, but decoding is performed for every batch sequence individually, this method becomes necessary."},"document":{"kind":"string","value":"def get_encoded_item(self, encoded, index):\n\n #for vanilla RNN and GRU, since they have a hidden state represented as a single tensor\n ##return encoded[:, index:index+1]\n\n #for LSTM, since it has a hidden state represented as a tuple of two tensors: the cell state and the hidden state\n return encoded[0][:, index:index+1].contiguous(), encoded[1][:, index:index+1].contiguous()"},"metadata":{"kind":"string","value":"{\n \"objective\": {\n \"self\": [],\n \"paired\": [],\n \"triplet\": [\n [\n \"query\",\n \"document\",\n \"negatives\"\n ]\n ]\n }\n}"},"negatives":{"kind":"list like","value":["def encode(self, x):\n _, hid = self.encoder(x) #All RNN classes output a tuple of 2 objects: the output of the RNN first and the hidden state from the last item in\n return hid #the input sequence second. We're only interested in the hidden state","def _encode(self):\n with tf.variable_scope('encoding'):\n self.sep_p_encodes, _ = bilstm_layer(self.p_emb, self.p_length, self.hidden_size)\n tf.get_variable_scope().reuse_variables()\n self.sep_q_encodes, _ = bilstm_layer(self.q_emb, self.q_length, self.hidden_size)\n if self.use_dropout:\n self.sep_p_encodes = tf.nn.dropout(self.sep_p_encodes, 1-self.dropout)\n self.sep_q_encodes = tf.nn.dropout(self.sep_q_encodes, 1-self.dropout)","def init_hidden_state(self, encoder_out: torch.Tensor):\n pass","def initialize_hidden_state(self):\n return tf.zeros(shape=(self.batch_size, self.enc_units))","def forward(\r\n self,\r\n input_ids,\r\n encoder_hidden_states,\r\n encoder_padding_mask,\r\n decoder_padding_mask,\r\n decoder_causal_mask,\r\n past_key_values=None,\r\n use_cache=False,\r\n output_attentions=False,\r\n output_hidden_states=False,\r\n return_dict=False,\r\n **unused,\r\n ):\r\n\r\n if \"decoder_cached_states\" in unused:\r\n warnings.warn(\r\n \"The `decoder_cached_states` argument is deprecated and will be removed in a future version, use `past_key_values` instead.\",\r\n FutureWarning,\r\n )\r\n past_key_values = unused.pop(\"decoder_cached_states\")\r\n if \"decoder_past_key_values\" in unused:\r\n warnings.warn(\r\n \"The `decoder_past_key_values` argument is deprecated and will be removed in a future version, use `past_key_values` instead.\",\r\n FutureWarning,\r\n )\r\n past_key_values = unused.pop(\"decoder_past_key_values\")\r\n\r\n # check attention mask and invert\r\n if encoder_padding_mask is not None:\r\n encoder_padding_mask = invert_mask(encoder_padding_mask)\r\n\r\n # embed positions\r\n positions = self.embed_positions(input_ids, use_cache=use_cache)\r\n\r\n if use_cache:\r\n input_ids = input_ids[:, -1:]\r\n positions = positions[:, -1:]\r\n\r\n x = self.embed_tokens(input_ids) * self.embed_scale\r\n if self.do_blenderbot_90_layernorm:\r\n x = self.layernorm_embedding(x)\r\n x += positions\r\n else:\r\n x += positions\r\n x = self.layernorm_embedding(x)\r\n\r\n x = F.dropout(x, p=self.dropout, training=self.training)\r\n\r\n # Convert to Bart output format: (seq_len, BS, model_dim) -> (BS, seq_len, model_dim)\r\n x = x.transpose(0, 1)\r\n encoder_hidden_states = encoder_hidden_states.transpose(0, 1)\r\n\r\n # decoder layers\r\n all_hidden_states = () if output_hidden_states else None\r\n all_self_attns = () if output_attentions else None\r\n enc_dec_all_attn = () if output_attentions else None\r\n next_decoder_cache = []\r\n for idx, decoder_layer in enumerate(self.layers):\r\n # add LayerDrop (see https://arxiv.org/abs/1909.11556 for description)\r\n if output_hidden_states:\r\n all_hidden_states += (x,)\r\n dropout_probability = random.uniform(0, 1)\r\n if self.training and (dropout_probability < self.layerdrop):\r\n continue\r\n\r\n layer_state = past_key_values[idx] if past_key_values is not None else None\r\n\r\n #isidora - in comment\r\n \"\"\"\r\n x, layer_self_attn, layer_past,_ = decoder_layer(\r\n x,\r\n encoder_hidden_states,\r\n encoder_attn_mask=encoder_padding_mask,\r\n decoder_padding_mask=decoder_padding_mask,\r\n layer_state=layer_state,\r\n causal_mask=decoder_causal_mask,\r\n output_attentions=output_attentions,\r\n )\r\n \"\"\"\r\n\r\n #isidora - start - replace _ with enc_dec_attn to get the encoder-decoder attn weights\r\n x, layer_self_attn, layer_past, enc_dec_attn = decoder_layer(\r\n x,\r\n encoder_hidden_states,\r\n encoder_attn_mask=encoder_padding_mask,\r\n decoder_padding_mask=decoder_padding_mask,\r\n layer_state=layer_state,\r\n causal_mask=decoder_causal_mask,\r\n output_attentions=output_attentions,\r\n )\r\n #isidora - end\r\n\r\n\r\n if use_cache:\r\n next_decoder_cache.append(layer_past.copy())\r\n\r\n if output_attentions:\r\n all_self_attns += (layer_self_attn,)\r\n enc_dec_all_attn += (enc_dec_attn,)\r\n\r\n if self.layer_norm: # if config.add_final_layer_norm (mBART)\r\n x = self.layer_norm(x)\r\n\r\n # Convert to standard output format: (seq_len, BS, model_dim) -> (BS, seq_len, model_dim)\r\n if output_hidden_states:\r\n all_hidden_states = tuple(hidden_state.transpose(0, 1) for hidden_state in all_hidden_states)\r\n x = x.transpose(0, 1)\r\n encoder_hidden_states = encoder_hidden_states.transpose(0, 1)\r\n\r\n next_cache = next_decoder_cache if use_cache else None\r\n\r\n #isidora - start - return enc_dec_all_attn instead of decoder outputs\r\n return enc_dec_all_attn\r\n #isidora - end\r\n\r\n #isidora - in comment\r\n \"\"\"\r\n if not return_dict:\r\n return tuple(v for v in [x, next_cache, all_hidden_states, all_self_attns] if v is not None)\r\n return BaseModelOutputWithPast(\r\n last_hidden_state=x, past_key_values=next_cache, hidden_states=all_hidden_states, attentions=all_self_attns\r\n )\r\n \"\"\"","def init_hidden(self, batch_size):\r\n \r\n self.hidden_state = (\r\n torch.zeros(((1+self.bidirectional)*self.num_layers,\r\n batch_size,\r\n self.hidden_size)).to(self.device),\r\n torch.zeros(((1+self.bidirectional)*self.num_layers, \r\n batch_size, \r\n self.hidden_size)).to(self.device))","def _prepare_attended_output(self,\n decoder_hidden_state: torch.Tensor,\n state: Dict[str, torch.Tensor]) -> torch.Tensor:\n # Ensure mask is also a FloatTensor. Or else the multiplication within\n # attention will complain.\n # shape: (batch_size, max_input_sequence_length)\n\n encoder_outputs = state[\"encoder_outputs\"]\n source_mask = state[\"source_mask\"]\n prev_attention = state[\"attention\"]\n att_keys = state[\"att_keys\"]\n att_values = state[\"att_values\"]\n\n # shape: (batch_size, max_input_sequence_length)\n mode = \"soft\" if self.training else \"hard\"\n if isinstance(self._attention, MonotonicAttention):\n encoder_outs: Dict[str, torch.Tensor] = {\n \"value\": state[\"encoder_outputs\"],\n \"mask\": state[\"source_mask\"]\n }\n\n monotonic_attention, chunk_attention = self._attention(\n encoder_outs, decoder_hidden_state, prev_attention, mode=mode)\n # shape: (batch_size, encoder_output_dim)\n attended_output = util.weighted_sum(\n encoder_outputs, chunk_attention)\n attention = monotonic_attention\n elif isinstance(self._attention, StatefulAttention):\n attended_output, attention = self._attention(decoder_hidden_state,\n att_keys, att_values, source_mask)\n else:\n attention = self._attention(\n decoder_hidden_state, source_mask)\n attended_output = util.weighted_sum(\n encoder_outputs, attention)\n\n return attended_output, attention","def call(self, inputs, output_hidden_states = False, training = False):\n if isinstance(inputs, (list, tuple)):\n input_ids = inputs[0]\n token_type_ids = inputs[1] if len(inputs) > 1 else None\n attention_mask = inputs[2] if len(inputs) > 2 else None\n \n elif isinstance(inputs, dict):\n input_ids = inputs['input_ids']\n token_type_ids = inputs.get('token_type_ids', None)\n attention_mask = inputs.get('attention_mask', None)\n else:\n raise ValueError('The type of inputs should be list or dictionary.')\n \n input_shape = shape_list(input_ids)\n \n# last_hidden_state = tf.ones(input_shape + (self.config.hidden_size))\n# output = tf.ones(input_shape + (self.config.hidden_size,))\n# logits = tf.ones(input_shape + (self.config.vocab_size,))\n# pooler_output = tf.ones((input_shape[0], self.config.hidden_size))\n \n hidden_states = [] if output_hidden_states else None\n output = self.embeddings(input_ids, token_type_ids, training = training)\n \n if output_hidden_states:\n hidden_states.append(output)\n\n if self.causal_attention:\n attention_mask = tf.constant(lower_triangle_matrix(input_shape[-1]))\n attention_mask = tf.reshape(attention_mask, shape = (1, 1, input_shape[-1], input_shape[-1]))\n \n else:\n if attention_mask is None:\n attention_mask = tf.constant(1.0, shape = input_shape, dtype = 'float32')\n # attention_mask now has shape (batches, sequence_len),\n # we need to covert it to (batches, 1, 1, sequence_len)\n # so that it will broadcast to (batches, num_attention_heads, sequence_len, sequence_len)\n attention_mask = tf.reshape(attention_mask, shape = (-1, 1, 1, input_shape[-1]))\n\n \n \n last_hidden_state, layer_outputs = self.encoder(output, attention_mask, output_hidden_states = output_hidden_states, training = training)\n if output_hidden_states:\n hidden_states.extend(layer_outputs)\n \n pooler_output = self.pooler(tf.gather(last_hidden_state, indices = 0, axis = 1)) if self.pooler else None\n logits = self.lm_head(last_hidden_state) if self.lm_head else None\n\n res = {'sequence_output': last_hidden_state,\n 'pooler_output': pooler_output,\n 'logits': logits,\n 'hidden_states': hidden_states}\n\n self.built = True\n\n return {k : v for k, v in res.items() if v is not None}","def _bridge_bidirectional_hidden(self, hidden):\n num_layers = hidden.size(0) // 2\n _, batch_size, hidden_size = hidden.size()\n return hidden.view(num_layers, 2, batch_size, hidden_size)\\\n .transpose(1, 2).contiguous().view(num_layers, batch_size, hidden_size * 2)","def _init_state(self, encoder_hidden):\n if encoder_hidden is None:\n return None\n if isinstance(encoder_hidden, tuple):\n encoder_hidden = tuple([self._cat_directions(h) for h in encoder_hidden])\n else:\n encoder_hidden = self._cat_directions(encoder_hidden)\n return encoder_hidden","def _init_state(self, encoder_hidden):\n if encoder_hidden is None:\n return None\n if isinstance(encoder_hidden, tuple):\n encoder_hidden = tuple([self._cat_directions(h) for h in encoder_hidden])\n else:\n encoder_hidden = self._cat_directions(encoder_hidden)\n return encoder_hidden","def init_hidden_state(self, batch_size):\n h = torch.zeros(batch_size, self.decoder_dim).to(device) # (batch_size, decoder_dim)\n c = torch.zeros(batch_size, self.decoder_dim).to(device)\n return h, c","def init_hidden_state(self, batch_size):\n h = torch.zeros(batch_size, self.decoder_dim).to(device) # (batch_size, decoder_dim)\n c = torch.zeros(batch_size, self.decoder_dim).to(device)\n return h, c","def init_hidden_state(self, batch_size):\n h = torch.zeros(batch_size, self.decoder_dim).to(device) # (batch_size, decoder_dim)\n c = torch.zeros(batch_size, self.decoder_dim).to(device)\n return h, c","def forward_step(self, prev_embed, encoder_hidden, src_mask, proj_key, hidden):\n\n # compute context vector using attention mechanism\n #we only want the hidden, not the cell state of the lstm CZW, hence the hidden[0]\n query = hidden[0][-1].unsqueeze(1) # [#layers, B, D] -> [B, 1, D]\n context, attn_probs = self.attention(\n query=query, proj_key=proj_key,\n value=encoder_hidden, mask=src_mask)\n\n # update rnn hidden state\n rnn_input = torch.cat([prev_embed, context], dim=2)\n output, hidden = self.rnn(rnn_input, hidden)\n \n pre_output = torch.cat([prev_embed, output, context], dim=2)\n pre_output = self.dropout_layer(pre_output)\n pre_output = self.pre_output_layer(pre_output)\n\n return output, hidden, pre_output","def _fix_enc_hidden(self, h):\n if self.bidirectional_encoder:\n h = torch.cat([h[0:h.size(0):2], h[1:h.size(0):2]], 2)\n return h","def decode(self):\n decoder_input = Input(shape=self.input_decoder_shape, batch_shape=self.input_batch_decoder_shape)\n ppg_input = Input(shape=self.input_ppg_shape, batch_shape=self.input_batch_ppg_shape)\n\n if self.hparams.Masking is True:\n mask_decoder_input = Masking(mask_value=0)(decoder_input)\n mask_ppg_input = Masking(mask_value=0)(ppg_input)\n prenet_output = self.PreNet(mask_decoder_input)\n encoder_input = self.Encoder(mask_ppg_input)\n decoder_mask = None\n else:\n decoder_mask = Masking(mask_value=0).compute_mask(ppg_input)\n prenet_output = self.PreNet(decoder_input)\n encoder_input = self.Encoder(ppg_input, decoder_mask)\n\n rnn_output = Concatenate(axis=-1)([prenet_output, encoder_input])\n # mask = Input(shape=(self.hparams.PreNet_hidden_size + self.hparams.Tacotron_encoder_hidden_size))\n # diff_mask = Input(shape=(self.hparams.PreNet_hidden_size + self.hparams.Tacotron_encoder_hidden_size))\n for i in range(self.hparams.Tacotron_decoder_layers):\n rnn_output = self.Decoder_LSTM[i](rnn_output, mask=decoder_mask)\n\n # feed by self.states is unhelpful in training, since we don't stop rnn during epochs\n # but it is important in generating since each fit states will be set to zeros.!!!!!!\n rnn_output = Concatenate(axis=-1)([rnn_output, encoder_input])\n decoder_output = self.Linear_projection(rnn_output)\n if self.hparams.Tacotron_postnet is True:\n residual_output = decoder_output\n for i in range(self.hparams.PostNet_layers):\n residual_output = self.PostNet_Conv1D[i](residual_output)\n residual_output = self.PostNet_BatchNorm[i](residual_output)\n residual_output = self.PostNet_dropout_list[i](residual_output)\n decoder_output = Add()([decoder_output, residual_output])\n return Model(inputs=[decoder_input, ppg_input], outputs=decoder_output)","def init_hidden_state(self,batch_size):\n h = torch.zeros(batch_size,self.decoder_dim).to(device) # (batch_size, decoder_dim)\n c = torch.zeros(batch_size,self.decoder_dim).to(device)\n return h, c","def _TransformHidden(self, _):\n raise NotImplementedError()","def process_state_batch(self, batch):\n return np.squeeze(batch, axis=1)","def _encode_back(self):\n with tf.variable_scope('passage_encoding'):\n self.sep_p_encodes, _ = rnn('bi-lstm', self.p_emb, self.p_length, self.hidden_size)\n with tf.variable_scope('question_encoding'):\n self.sep_q_encodes, _ = rnn('bi-lstm', self.q_emb, self.q_length, self.hidden_size)\n if self.use_dropout:\n self.sep_p_encodes = tf.nn.dropout(self.sep_p_encodes, self.dropout_keep_prob)\n self.sep_q_encodes = tf.nn.dropout(self.sep_q_encodes, self.dropout_keep_prob)","def init_hidden_state(self, encoder_out):\n init_internal_state = []\n mean_encoder_out = encoder_out.mean(dim=1)\n h = self.init_h(mean_encoder_out)\n c = self.init_c(mean_encoder_out)\n init_internal_state.append((h, c))\n\n for i in range(1, self.decoder_number_layers):\n init_internal_state.append((\n Variable(torch.zeros(1, self.decoder_rnn_channels[i])).to(device),\n Variable(torch.zeros(1, self.decoder_rnn_channels[i])).to(device)\n ))\n return init_internal_state","def _decode_train(self):\n\n # the basic idea is, we use golden sketch during train and in order to copy from source\n # we given true mask of decoder to generate right copy weights\n state = {'encoder': self.concated_encoder_output}\n\n def transformer_concated_decoder_internal(inputs, memory, bias, mem_bias, params, state=None, scope=None,\n reuse=False):\n return transformer_decoder(inputs, memory, bias, mem_bias, params, state, scope, reuse)\n\n self.final_logits = self._decode_func(\n self.tgt_seq, self.tgt_len, self.target_embeddings, self.decoder_weights,\n self.final_enc_attn_bias, 'train', state, self.vocab_size, use_copy=True,\n expand_source_ids_oo=self.concat_src_ids_oo,\n max_out_oovs=self.max_out_oovs, src_mask=self.concat_src_mask,\n decoder_fn=transformer_concated_decoder_internal,\n scope='final_decoder')","def run_encoder(self, sess, batch):\n feed_dict = self._make_feed_dict(batch, just_enc=True) \n (enc_states, dec_in_state, global_step) = sess.run(\n [self._enc_states, self._dec_in_state, self.global_step], feed_dict) # run the encoder\n\n # dec_in_state is LSTMStateTuple shape ([batch_size,hidden_dim],[batch_size,hidden_dim])\n # Given that the batch is a single example repeated, dec_in_state is identical across the batch so we just take the top row.\n dec_in_state = tf.contrib.rnn.LSTMStateTuple(dec_in_state.c[0], dec_in_state.h[0])\n return enc_states, dec_in_state","def _make_hidden(self, batch_size):\n hidden = torch.zeros(self.nb_layers, batch_size, self.hidden_size).to(device)\n return hidden","def process_hidden_layers(self, x, training):\n restricted_to_final_seq = False\n for layer_ix, layer in enumerate(self.hidden_layers):\n if type(layer) == Dense:\n if self.return_final_seq_only and not restricted_to_final_seq:\n x = x[:, -1, :]\n restricted_to_final_seq = True\n x = layer(x)\n else:\n x = layer(x)\n if self.batch_norm:\n x = self.batch_norm_layers[layer_ix](x, training=False)\n if self.dropout != 0.0 and training: x = self.dropout_layer(x)\n return x, restricted_to_final_seq","def _make_hidden(self, batch_size):\n hidden = (torch.zeros(self.nb_layers, batch_size, self.hidden_size).to(device),\n torch.zeros(self.nb_layers, batch_size, self.hidden_size).to(device))\n return hidden","def forward(self, hidden_states):\n # Common transformations (dense layer, layer norm + activation function) performed on text, KG and protein data\n # transform is initialized in the parent BigBirdLMPredictionHead class\n hidden_states = self.transform(hidden_states)\n\n # The first part is processed with the text decoder, the second with the entity decoder, and the third with the\n # protein decoder to map to the text, kg, and protein vocab size, respectively\n text_hidden_states_to_vocab = self.text_decoder(hidden_states[:, : self.kg_start_idx])\n ent_hidden_states_to_kg_vocab = self.entity_decoder(\n hidden_states[:, self.kg_start_idx : self.prot_start_idx]\n )\n prot_hidden_states_to_prot_vocab = self.prot_decoder(\n hidden_states[:, self.prot_start_idx :]\n )\n\n return (\n text_hidden_states_to_vocab,\n ent_hidden_states_to_kg_vocab,\n prot_hidden_states_to_prot_vocab,\n )","def initialize_hidden_state(self, batch_size):\n return tf.zeros((batch_size, self.enc_units))","def init_hidden_state(self, encoder_out):\r\n mean_encoder_out = encoder_out.mean(dim=1)\r\n h = self.init_h(mean_encoder_out) # (batch_size, decoder_dim)\r\n c = self.init_c(mean_encoder_out)\r\n return h, c","def _decode(self):\n with tf.variable_scope('same_question_concat'):\n batch_size = tf.shape(self.start_label)[0]\n concat_passage_encodes = tf.reshape(\n self.fuse_p_encodes,\n [batch_size, -1, 2 * self.hidden_size]\n )\n no_dup_question_encodes = tf.reshape(\n self.sep_q_encodes,\n [batch_size, -1, tf.shape(self.sep_q_encodes)[1], 2 * self.hidden_size]\n )[0:, 0, 0:, 0:]\n decoder = PointerNetDecoder(self.hidden_size)\n self.start_probs, self.end_probs = decoder.decode(concat_passage_encodes,\n no_dup_question_encodes)","def forward(self, input, dec_hidden=None):\n ### YOUR CODE HERE for part 2b\n ### TODO - Implement the forward pass of the character decoder.\n # print(\"=====input.size\",input.size())\n char_embedded= self.decoderCharEmb(input)\n # print(\"=====char_embedded.size\",char_embedded.size())\n out, dec_hidden = self.charDecoder(char_embedded,dec_hidden)\n # print(\"=====out.size\",out.size()) #dimensions (seq_length, batch, hidden_size)\n \n out_batch_first = out.permute(1, 0, 2) #dimensions (seq_length, batch, hidden_size)\n o_proj = self.char_output_projection(out_batch_first)\n scores = o_proj.permute(1, 0, 2) #dimensions (seq_length, batch, hidden_size)\n return scores,dec_hidden\n ### END YOUR CODE ","def encoder(list_of_str, key):\n tokenized = self.tokenizer.encode_commands(list_of_str)\n hidden = self.tokenizer.tokenize(tokenized)\n hidden = hidden.permute(1, 0, 2).reshape(hidden.size(1), -1) # correct for bididrectional\n return hidden","def _encode(self):\n with tf.variable_scope('passage_encoding'):\n self.sep_p_encodes, _ = rnn('bi-lstm', self.p_emb, self.p_length, self.hidden_size)\n with tf.variable_scope('question_encoding'):\n self.sep_q_encodes, _ = rnn('bi-lstm', self.q_emb, self.q_length, self.hidden_size)\n if self.use_dropout:\n self.sep_p_encodes = tf.nn.dropout(self.sep_p_encodes, self.dropout_keep_prob)\n self.sep_q_encodes = tf.nn.dropout(self.sep_q_encodes, self.dropout_keep_prob)","def forward(self, input, hidden, give_gates=False, debug=False):\n\n emb = self.encoder(input)\n if emb.dim()<3:\n emb = emb.unsqueeze(0)\n\n if give_gates:\n output, hidden, extras = self.rnn(emb, hidden, give_gates)\n else:\n output, hidden = self.rnn(emb, hidden)\n\n # decoded = self.softmax(self.decoder(output))\n decoded = self.decoder(output)\n\n if give_gates:\n if debug:\n return decoded, hidden, extras, emb\n else:\n return decoded, hidden, extras\n else:\n if debug:\n return decoded, hidden, emb\n else:\n return decoded, hidden","def get_num_hidden(self) -> int:\n return self.encoders[-1].get_num_hidden()","def init_hidden(self, batch_size, device):\n if self.mode == 'LSTM':\n return (torch.ones((1+self.bidirectional , batch_size, self.hidden_dim), device=device),\n torch.ones((1+self.bidirectional, batch_size, self.hidden_dim), device=device))\n else:\n return torch.ones((1+self.bidirectional, batch_size, self.hidden_dim), device=device)","def get_representation(output):\n\n # shape: (seq_len, vocab_size)\n hidden_states = output[1]\n\n token_embeddings = torch.stack(hidden_states, dim=0)\n # remove dimension 1 (batches)\n token_embeddings = torch.squeeze(token_embeddings, dim=1)\n # swap dimension 0 and 1\n token_embeddings = token_embeddings.permute(1, 0, 2)\n # the last hidden layer output (2+seq_len, 768)\n hidden_states = [token[-1] for token in token_embeddings]\n\n return hidden_states","def _bert_encoder(self, sentence, attn_mask):\n output = self.bert(sentence, attn_mask)\n embedding = output['hidden_states'][-1]\n\n feats = self.hidden2tag(embedding)\n return feats","def transparent_forward(self, input, hidden, give_gates=False, debug=False):\n\n lseq, nseq = input.shape\n ispad = (input == self.padding)\n\n H = torch.zeros(lseq, self.nhid, nseq)\n if give_gates:\n Z = torch.zeros(lseq, self.nhid, nseq)\n R = torch.zeros(lseq, self.nhid, nseq)\n \n # because pytorch only returns hidden activity in the last time step,\n # we need to unroll it manually. \n O = torch.zeros(lseq, nseq, self.decoder.out_features)\n emb = self.encoder(input)\n for t in range(lseq):\n if give_gates:\n out, hidden, ZR = self.rnn(emb[t:t+1,...], hidden, give_gates=True)\n Z[t,:,:] = ZR[0].squeeze(0).T\n R[t,:,:] = ZR[1].squeeze(0).T\n else:\n out, hidden = self.rnn(emb[t:t+1,...], hidden)\n dec = self.decoder(out)\n # naan = torch.ones(hidden.squeeze(0).shape)*np.nan\n # H[t,:,:] = torch.where(~ispad[t:t+1,:].T, hidden.squeeze(0), naan).T\n H[t,:,:] = hidden.squeeze(0).T\n O[t,:,:] = dec.squeeze(0)\n\n if give_gates:\n if debug:\n return O, H, Z, R, emb\n else:\n return O, H, Z, R\n else:\n if debug:\n return O, H, emb\n else:\n return O, H","def reset_hidden(self, batch_size):\n\n hidden = {}\n hidden[\"h\"] = torch.Tensor(np.zeros((batch_size, self._hidden_size))).to(self._device)\n hidden[\"c\"] = torch.Tensor(np.zeros((batch_size, self._hidden_size))).to(self._device)\n return hidden","def process_state_batch(self, batch):\n # batch = np.squeeze(batch, axis=1)\n batch = np.array([np.concatenate(obs, axis=-1) for obs in batch])\n return batch","def process_state_batch(self, batch):\n return batch","def process_state_batch(self, batch):\n return batch","def get_reconstructed_input(self, hidden):\r\n return T.nnet.sigmoid(T.dot(hidden, self.W_prime) + self.b_prime)","def get_reconstructed_input(self, hidden):\r\n return T.nnet.sigmoid(T.dot(hidden, self.W_prime) + self.b_prime)","def forward(self, encoding, encoding_lens, hidden, y_labels=None, y_lens=None):\n # split into keys and values\n # keys [B,T,K], values [B,T,V]\n keys, values = torch.split(encoding, [self.key_dim, self.value_dim], dim=-1)\n\n hidden = hidden.unsqueeze(0)\n\n if y_labels is not None and y_lens is not None:\n return self.__forward_train(keys, values, hidden, encoding_lens, y_labels, y_lens)\n else:\n return self.__forward_inference(keys, values, hidden)","def __call__(self, encoder_hidden_states):\n params = self.dec_params\n search_params = self.search_params\n\n lm_params = self.lm_params\n get_top_k_fn = self.top_k_setup_with_lm(encoder_hidden_states)\n\n x = params.embedding[data_utils.GO_ID]\n x_lm = lm_params.embedding[data_utils.GO_ID]\n\n # Initialize Decoder states\n h_size = params.dec_lstm_w.shape[1]/4\n zero_dec_state = (np.zeros(h_size), np.zeros(h_size))\n\n dec_lm_h_size = params.lm_lstm_w.shape[1]/4\n zero_dec_lm_state = (np.zeros(dec_lm_h_size), np.zeros(dec_lm_h_size))\n\n # Initialize LM state\n lm_h_size = lm_params.lstm_w.shape[1]/4\n zero_lm_state = (np.zeros(lm_h_size), np.zeros(lm_h_size))\n\n zero_attn = np.zeros(encoder_hidden_states.shape[1])\n\n # Maintain a tuple of (output_indices, score, encountered EOS?)\n output_list = []\n final_output_list = []\n k = search_params.beam_size # Represents the current beam size\n step_count = 0\n\n # Run step 0 separately\n top_k_indices, top_k_model_scores, top_k_scores, state_list, context_vec =\\\n get_top_k_fn(x, x_lm, [zero_dec_state, zero_dec_lm_state, zero_lm_state],\n zero_attn, beam_size=k)\n for idx in xrange(top_k_indices.shape[0]):\n output_tuple = (BeamEntry([top_k_indices[idx]], state_list, context_vec),\n top_k_model_scores[idx])\n if top_k_indices[idx] == data_utils.EOS_ID:\n final_output_list.append(output_tuple)\n # Decrease the beam size once EOS is encountered\n k -= 1\n else:\n output_list.append(output_tuple)\n\n step_count += 1\n while step_count < 120 and k > 0:\n # These lists store the states obtained by running the decoder\n # for 1 more step with the previous outputs of the beam\n next_dec_states = []\n next_context_vecs = []\n\n score_list = []\n model_score_list = []\n index_list = []\n for candidate, cand_score in output_list:\n x = params.embedding[candidate.get_last_output()]\n x_lm = lm_params.embedding[candidate.get_last_output()]\n\n top_k_indices, top_k_model_scores, top_k_scores, state_list, context_vec =\\\n get_top_k_fn(x, x_lm, candidate.get_dec_state(),\n candidate.get_context_vec(), beam_size=k)\n\n next_dec_states.append(state_list)\n next_context_vecs.append(context_vec)\n\n index_list.append(top_k_indices)\n score_list.append(top_k_scores + cand_score)\n model_score_list.append(top_k_model_scores + cand_score)\n\n # Score of all k**2 continuations\n all_scores = np.concatenate(score_list, axis=0)\n all_model_scores = np.concatenate(model_score_list, axis=0)\n # All k**2 continuations\n all_indices = np.concatenate(index_list, axis=0)\n\n # Find the top indices among the k^^2 entries\n top_k_indices = np.argpartition(all_scores, -k)[-k:]\n next_k_indices = all_indices[top_k_indices]\n top_k_scores = all_model_scores[top_k_indices]\n # The original candidate indices can be found by dividing by k.\n # Because the indices are of the form - i * k + j, where i\n # represents the ith output and j represents the jth top index for i\n orig_cand_indices = np.divide(top_k_indices, k, dtype=np.int32)\n\n new_output_list = []\n\n for idx in xrange(k):\n orig_cand_idx = int(orig_cand_indices[idx])\n # BeamEntry of the original candidate\n orig_cand = output_list[orig_cand_idx][0]\n next_elem = next_k_indices[idx]\n # Add the next index to the original sequence\n new_index_seq = orig_cand.get_index_seq() + [next_elem]\n dec_state = next_dec_states[orig_cand_idx]\n context_vec = next_context_vecs[orig_cand_idx]\n\n output_tuple = (BeamEntry(new_index_seq, dec_state, context_vec),\n top_k_scores[idx] +\n search_params.word_ins_penalty*len(new_index_seq))\n if next_elem == data_utils.EOS_ID:\n # This sequence is finished. Put the output on the final list\n # and reduce beam size\n final_output_list.append(output_tuple)\n k -= 1\n else:\n new_output_list.append(output_tuple)\n\n output_list = new_output_list\n step_count += 1\n\n final_output_list += output_list\n\n best_output = max(final_output_list, key=lambda output_tuple: output_tuple[1])\n output_seq = best_output[0].get_index_seq()\n return np.stack(output_seq, axis=0)","def _decode_back(self):\n with tf.variable_scope('same_question_concat'):\n batch_size = tf.shape(self.start_label)[0]\n concat_passage_encodes = tf.reshape(\n self.fuse_p_encodes,\n [batch_size, -1, self.hidden_size]\n )\n no_dup_question_encodes = tf.reshape(\n self.sep_q_encodes,\n [batch_size, -1, tf.shape(self.sep_q_encodes)[1], self.hidden_size]\n )[0:, 0, 0:, 0:]\n decoder = PointerNetDecoder(self.hidden_size)\n self.start_probs, self.end_probs = decoder.decode(concat_passage_encodes,\n no_dup_question_encodes)\n\n outer = tf.matmul(tf.expand_dims(tf.nn.softmax(self.start_probs), axis=2),\n tf.expand_dims(tf.nn.softmax(self.end_probs), axis=1))\n outer = tf.matrix_band_part(outer, 0, -1)\n self.yp1 = tf.argmax(tf.reduce_max(outer, axis=2), axis=1)\n self.yp2 = tf.argmax(tf.reduce_max(outer, axis=1), axis=1)","def get_state(self):\n state = super().get_state()\n state.update({\n 'num_of_fields': self.num_of_fields,\n 'embedding_dim': self.embedding_dim})\n return state","def get_reconstructed_input(self, hidden):\n return T.nnet.sigmoid(T.dot(hidden, self.W_prime) + self.b_prime)","def forward(self, inputs, decode_len=None):\n\n batch_size = inputs.size(0)\n input_dim = inputs.size(1)\n assert input_dim == self.input_dim, 'input dim should be {:d} but now: {:d}'.format(self.input_dim, input_dim)\n\n sourceL = inputs.size(2)\n\n if self.embed_input:\n # repeat embeddings across batch_size\n # result is [batch_size x input_dim x embedding_dim]\n # TODO: repeat or expand?\n embedding = self.embedding.repeat(batch_size, 1, 1)\n embedded_inputs = []\n # result is [batch_size, 1, input_dim, sourceL]\n ips = inputs.unsqueeze(1)\n\n for i in range(sourceL):\n # [batch_size x 1 x input_dim] * [batch_size x input_dim x embedding_dim]\n # result is [batch_size, embedding_dim]\n embedded_inputs.append(torch.bmm(\n ips[:, :, :, i].float(),\n embedding).squeeze(1))\n\n # Result is [sourceL x batch_size x embedding_dim]\n embedded_inputs = torch.cat(embedded_inputs).view(\n sourceL,\n batch_size,\n embedding.size(2))\n else:\n embedded_inputs = inputs.permute(2, 0, 1)\n\n (encoder_hx, encoder_cx) = init_zero_hidden(self.hidden_dim, inputs.is_cuda)\n encoder_hx = encoder_hx.unsqueeze(0).repeat(embedded_inputs.size(1), 1).unsqueeze(0)\n encoder_cx = encoder_cx.unsqueeze(0).repeat(embedded_inputs.size(1), 1).unsqueeze(0)\n \n # encoder forward pass\n enc_h, (enc_h_t, enc_c_t) = self.encoder(embedded_inputs, (encoder_hx, encoder_cx))\n\n enc_h_linear = enc_h.view(-1, self.hidden_dim)\n # enc_h_linear_2d = enc_h_linear.view(self.hidden_dim, -1)\n enc_action_scores = self.EncodeScore(enc_h_linear)\n enc_action_scores = enc_action_scores.view(-1, batch_size).permute(1, 0)\n dec_init_state = (enc_h_t[-1], enc_c_t[-1])\n \n # repeat decoder_in_0 across batch\n decoder_input = self.decoder_in_0.unsqueeze(0).repeat(embedded_inputs.size(1), 1)\n\n (head_pointer_probs, head_positions, tail_pointer_probs, tail_positions, cls_scores), dec_hidden_t = self.decoder(decoder_input,\n embedded_inputs,\n dec_init_state,\n enc_h, max_len=decode_len)\n #TODO: added conversion to tensors\n head_pointer_probs = torch.stack(head_pointer_probs)\n head_pointer_probs = head_pointer_probs.permute(1, 0, 2)\n tail_pointer_probs = torch.stack(tail_pointer_probs)\n tail_pointer_probs = tail_pointer_probs.permute(1, 0, 2)\n cls_scores = torch.stack(cls_scores)\n cls_scores = cls_scores.permute(1, 0, 2)\n head_positions = torch.stack(head_positions)\n head_positions = head_positions.permute(1, 0)\n tail_positions = torch.stack(tail_positions)\n tail_positions = tail_positions.permute(1, 0)\n\n\n\n return head_pointer_probs, head_positions, tail_pointer_probs, tail_positions, cls_scores, enc_action_scores","def get_reconstructed_input(self, hidden):\n return T.nnet.sigmoid(T.dot(hidden, self.W_prime) + self.b_prime)","def get_reconstructed_input(self, hidden):\n return T.nnet.sigmoid(T.dot(hidden, self.W_prime) + self.b_prime)","def encode_decode_TD(self, n_step, idx_list_previous, log_probs_previous, entropies_previous, mask_previous, query1_previous, query2_previous, query3_previous, idx_ ):\r\n\r\n actor_embedding = embed_seq(input_seq=self.input_, from_=self.dimension, to_= self.input_embed, is_training=self.is_training, BN=True, initializer=self.initializer)\r\n actor_encoding = encode_seq(input_seq=actor_embedding, input_dim=self.input_embed, num_stacks=self.num_stacks, num_heads=self.num_heads, num_neurons=self.num_neurons, is_training=self.is_training)\r\n \r\n if self.is_training == False:\r\n actor_encoding = tf.tile(actor_encoding,[self.batch_size,1,1])\r\n \r\n idx_list = copy(idx_list_previous)\r\n log_probs = copy(log_probs_previous)\r\n entropies = copy(entropies_previous)\r\n \r\n\r\n mask = copy(mask_previous)\r\n \r\n n_hidden = actor_encoding.get_shape().as_list()[2] # input_embed\r\n W_ref = tf.get_variable(\"W_ref\",[1, n_hidden, self.num_units],initializer=self.initializer)\r\n W_q = tf.get_variable(\"W_q\",[self.query_dim, self.num_units],initializer=self.initializer)\r\n v = tf.get_variable(\"v\",[self.num_units],initializer=self.initializer)\r\n \r\n encoded_ref = tf.nn.conv1d(actor_encoding, W_ref, 1, \"VALID\") # actor_encoding is the ref for actions [Batch size, seq_length, n_hidden]\r\n \r\n query1 = copy( query1_previous)\r\n query2 = copy( query2_previous)\r\n query3 = copy( query3_previous)\r\n idx_copy = copy(idx_)\r\n \r\n W_1 =tf.get_variable(\"W_1\",[n_hidden, self.query_dim],initializer=self.initializer) # update trajectory (state)\r\n W_2 =tf.get_variable(\"W_2\",[n_hidden, self.query_dim],initializer=self.initializer)\r\n W_3 =tf.get_variable(\"W_3\",[n_hidden, self.query_dim],initializer=self.initializer)\r\n \r\n \r\n \"\"\"\r\n # sample from POINTER from the perspective of the Actor\r\n \"\"\"\r\n for step in range(n_step + 1 ): \r\n query = tf.nn.relu(tf.matmul(query1, W_1) + tf.matmul(query2, W_2) + tf.matmul(query3, W_3))\r\n logits = pointer(encoded_ref=encoded_ref, query=query, mask=mask, W_ref=W_ref, W_q=W_q, v=v, C=config.C, temperature=config.temperature)\r\n prob = distr.Categorical(logits) # logits = masked_scores\r\n idx = prob.sample()\r\n\r\n idx_list.append(idx) # tour index\r\n idx_list_previous.append(idx)\r\n \r\n log_probs.append(prob.log_prob(idx)) # log prob\r\n log_probs_previous.append(prob.log_prob(idx))\r\n \r\n entropies.append(prob.entropy()) # entropies\r\n entropies_previous.append(prob.entropy())\r\n \r\n mask = mask + tf.one_hot(idx, self.max_length) # mask\r\n mask_previous = mask_previous + tf.one_hot(idx, self.max_length)\r\n\r\n idx_copy = tf.stack([tf.range(self.batch_size,dtype=tf.int32), idx],1) # idx with batch \r\n idx_ = tf.stack([tf.range(self.batch_size,dtype=tf.int32), idx],1) # idx with batch \r\n query3 = query2\r\n query2 = query1\r\n query1 = tf.gather_nd(actor_encoding, idx_) # update trajectory (state)\r\n \r\n query3_previous = query2_previous\r\n query2_previous = query1_previous\r\n query1_previous = tf.gather_nd(actor_encoding, idx_) # update trajectory (state) \r\n\r\n if (len(idx_list) >= self.max_length): break #leave the loop if reach the end of the episode\r\n\r\n \"\"\"\r\n # sample from POINTER from the perspective of the Critic\r\n make q_t vector = 0\r\n \"\"\"\r\n while(len(idx_list) < self.max_length): \r\n \r\n logits = pointer_critic(encoded_ref=encoded_ref, mask=mask, W_ref=W_ref, v=v, C=config.C, temperature=config.temperature)\r\n prob = distr.Categorical(logits) # logits = masked_scores\r\n idx = prob.sample()\r\n\r\n idx_list.append(idx) # tour index\r\n log_probs.append(prob.log_prob(idx)) # log prob\r\n entropies.append(prob.entropy()) # entropies\r\n mask = mask + tf.one_hot(idx, self.max_length) # mask\r\n\r\n idx_copy = tf.stack([tf.range(self.batch_size,dtype=tf.int32), idx],1) # idx with batch \r\n #idx_ = tf.stack([tf.range(self.batch_size,dtype=tf.int32), idx],1) # idx with batch \r\n query3 = query2\r\n query2 = query1\r\n query1 = tf.gather_nd(actor_encoding, idx_copy) # update trajectory (state)\r\n \r\n idx_list.append(idx_list[0]) # return to start\r\n self.tour =tf.stack(idx_list, axis=1) # permutations\r\n self.log_prob = tf.add_n(log_probs) # corresponding log-probability for backprop\r\n self.entropies = tf.add_n(entropies)\r\n tf.summary.scalar('log_prob_mean', tf.reduce_mean(self.log_prob))\r\n tf.summary.scalar('entropies_mean', tf.reduce_mean(self.entropies))\r\n \r\n return idx_list_previous, log_probs_previous, entropies_previous, mask_previous, query1_previous, query2_previous, query3_previous, idx_ #returns variables necessary for the next loop\r","def _replace_appropriate_hidden_state_rows(hidden_state: Union[Tuple[torch.Tensor, torch.Tensor]],\n new_hidden_state: Union[Tuple[torch.Tensor, torch.Tensor], torch.Tensor],\n packed_sequence_info: PackedSequenceInfo, iteration: int,\n num_batches: int) -> Union[Tuple[torch.Tensor, torch.Tensor]]:\n if packed_sequence_info:\n # In the case of PackedSequence, certain inputs in the batch need to be ignored, depending on\n # sequence length for that input and which timestep we are in.\n # In our implementation, we still feed the full batch into the rnn_impl_map function, but\n # instead of replacing all rows of cell_hx (each row corresponds to an output for an item in the\n # batch), we replace only rows which correspond to valid batch inputs. This is the same as how\n # hx behaves in actual Pytorch implementation when using PackedSequence.\n current_batch_size = packed_sequence_info.batch_sizes[iteration]\n if current_batch_size == num_batches:\n # All items in the input batch are valid, so we can replace the entire hidden state.\n hidden_state = new_hidden_state\n else:\n # Not all items in the input batch are valid. Replace the first number of rows in the hidden\n # state corresponding to the number of valid items, and keep the remaining rows unchanged.\n if isinstance(hidden_state, tuple):\n hidden_state = (torch.cat((new_hidden_state[0][:current_batch_size - num_batches],\n hidden_state[0][current_batch_size - num_batches:])),\n torch.cat((new_hidden_state[1][:current_batch_size - num_batches],\n hidden_state[1][current_batch_size - num_batches:])))\n else:\n hidden_state = torch.cat((new_hidden_state[:current_batch_size - num_batches],\n hidden_state[current_batch_size - num_batches:]))\n else:\n hidden_state = new_hidden_state\n return hidden_state","def hidden(self):\n return self._hidden","def transparent_forward(self, inp, hidden=None, give_gates=False, debug=False):\n\n lseq = inp.shape[0]\n nseq = inp.shape[1]\n # ispad = (input == self.padding)\n\n if hidden is None:\n hidden = self.init_hidden(nseq)\n\n H = torch.zeros(lseq, self.nhid, nseq)\n if give_gates:\n Z = torch.zeros(lseq, self.nhid, nseq)\n R = torch.zeros(lseq, self.nhid, nseq)\n \n # because pytorch only returns hidden activity in the last time step,\n # we need to unroll it manually. \n O = torch.zeros(lseq, nseq, self.decoder.out_features)\n if self.recoder is None:\n emb = inp\n else:\n emb = self.recoder(inp)\n for t in range(lseq):\n if give_gates:\n out, hidden, ZR = self.rnn(emb[t:t+1,...], hidden, give_gates=True)\n Z[t,:,:] = ZR[0].squeeze(0).T\n R[t,:,:] = ZR[1].squeeze(0).T\n else:\n out, hidden = self.rnn(emb[t:t+1,...], hidden)\n dec = self.decoder(out)\n # naan = torch.ones(hidden.squeeze(0).shape)*np.nan\n # H[t,:,:] = torch.where(~ispad[t:t+1,:].T, hidden.squeeze(0), naan).T\n H[t,:,:] = hidden.squeeze(0).T\n O[t,:,:] = dec.squeeze(0)\n\n if give_gates:\n if debug:\n return O, H, Z, R, emb\n else:\n return O, H, Z, R\n else:\n if debug:\n return O, H, emb\n else:\n return O, H","def beamSearchDecoder(self, enc_states, hidden, test=False, sentence=None, st=\"\", ed=\"\", k=3):\n\t\tbatch_size = enc_states.shape[0]\n\t\thidden = F.tanh(self.init_decoder_hidden(hidden[1])).view(1, batch_size, self.hid_dim)\n\t\tif test:\n\t\t\tbeams = [Beam(k, self.vocab, hidden[:,i,:], self.device) for i in range(batch_size)]\n\n\t\t\tfor i in range(self.max_trg_len):\n\t\t\t\tfor j in range(batch_size):\n\t\t\t\t\tlogits, hidden = self.decoderStep(enc_states[j].view(1, -1, self.hid_dim).expand(k, -1, -1),\n\t\t\t\t\t\t\t\t\t\t\t\t\t beams[j].get_hidden_state(),\n\t\t\t\t\t\t\t\t\t\t\t\t\t beams[j].get_current_word())\n\t\t\t\t\tlogLikelihood = torch.log(F.softmax(logits, dim=-1))\n\t\t\t\t\tbeams[j].advance(logLikelihood, hidden)\n\n\t\t\tallHyp, allScores = [], []\n\t\t\tn_best = 1\n\t\t\tfor b in range(batch_size):\n\t\t\t\tscores, ks = beams[b].sort_best()\n\n\t\t\t\tallScores += [scores[:n_best]]\n\t\t\t\thyps = [beams[b].get_hyp(k) for k in ks[:n_best]]\n\t\t\t\tallHyp.append(hyps)\n\n\t\t\treturn allHyp\n\t\t\t# return sentences\n\t\telse:\n\t\t\tmax_seq_len = sentence.shape[1]\n\t\t\tlogits = torch.zeros(batch_size, max_seq_len - 1, self.vocab_size, device=self.device)\n\t\t\tfor i in range(max_seq_len - 1):\n\t\t\t\t# logit: [batch, 1, vocab_size]\n\t\t\t\tlogit, hidden = self.decoderStep(enc_states, hidden, sentence[:, i])\n\t\t\t\tlogits[:, i, :] = logit.squeeze()\n\t\t\treturn logits","def _hidden_activation(self, inputs):\n if self.act_enc is None:\n act_enc = lambda x: x\n else:\n act_enc = self.act_enc\n return act_enc(self._mappings(inputs))","def get_output_for(self, inputs, **kwargs):\n # Retrieve the layer input\n input = inputs[0]\n # Retrieve the mask when it is supplied\n mask = None\n hid_init = None\n if self.mask_incoming_index > 0:\n mask = inputs[self.mask_incoming_index]\n if self.hid_init_incoming_index > 0:\n hid_init = inputs[self.hid_init_incoming_index]\n\n # Input should be provided as (n_batch, n_time_steps, n_features)\n # but scan requires the iterable dimension to be first\n # So, we need to dimshuffle to (n_time_steps, n_batch, n_features)\n input = input.dimshuffle(1, 0, *range(2, input.ndim))\n seq_len, num_batch = input.shape[0], input.shape[1]\n\n # When we are not precomputing the input, we also need to pass the\n # input-to-hidden parameters to step\n non_seqs = L.get_all_params(self.input_to_hidden)\n\n # Create single recurrent computation step function\n def step(input_n, hid_previous, *args):\n hid_pre = L.get_output(\n self.input_to_hidden,{self.input_to_hidden_input : input_n,\n self.input_to_hidden_hidden : hid_previous}, **kwargs)\n\n # Clip gradients\n if self.grad_clipping:\n hid_pre = theano.gradient.grad_clip(\n hid_pre, -self.grad_clipping, self.grad_clipping)\n\n return hid_pre\n\n def step_masked(input_n, mask_n, hid_previous, *args):\n # Skip over any input with mask 0 by copying the previous\n # hidden state; proceed normally for any input with mask 1.\n hid = step(input_n, hid_previous, *args)\n hid_out = T.switch(mask_n, hid, hid_previous)\n return [hid_out]\n\n if mask is not None:\n mask = mask.dimshuffle(1, 0, 'x')\n sequences = [input, mask]\n step_fun = step_masked\n else:\n sequences = input\n step_fun = step\n\n if not isinstance(self.hid_init, L.Layer):\n # The code below simply repeats self.hid_init num_batch times in\n # its first dimension. Turns out using a dot product and a\n # dimshuffle is faster than T.repeat.\n dot_dims = (list(range(1, self.hid_init.ndim - 1)) +\n [0, self.hid_init.ndim - 1])\n hid_init = T.dot(T.ones((num_batch, 1)),\n self.hid_init.dimshuffle(dot_dims))\n\n if self.unroll_scan:\n # Retrieve the dimensionality of the incoming layer\n input_shape = self.input_shapes[0]\n # Explicitly unroll the recurrence instead of using scan\n hid_out = unroll_scan(\n fn=step_fun,\n sequences=sequences,\n outputs_info=[hid_init],\n go_backwards=self.backwards,\n non_sequences=non_seqs,\n n_steps=input_shape[1])[0]\n else:\n # Scan op iterates over first dimension of input and repeatedly\n # applies the step function\n hid_out = theano.scan(\n fn=step_fun,\n sequences=sequences,\n go_backwards=self.backwards,\n outputs_info=[hid_init],\n non_sequences=non_seqs,\n truncate_gradient=self.gradient_steps,\n strict=True)[0]\n\n # When it is requested that we only return the final sequence step,\n # we need to slice it out immediately after scan is applied\n if self.only_return_final:\n hid_out = hid_out[-1]\n else:\n # dimshuffle back to (n_batch, n_time_steps, n_features))\n hid_out = hid_out.dimshuffle(1, 0, *range(2, hid_out.ndim))\n\n # if scan is backward reverse the output\n if self.backwards:\n hid_out = hid_out[:, ::-1]\n\n return hid_out","def _init_rnn_state(self, encoder_hidden):\n if encoder_hidden is None:\n return None\n if isinstance(encoder_hidden, tuple):\n encoder_hidden = tuple(\n [self._cat_directions(h) for h in encoder_hidden])\n else:\n encoder_hidden = self._cat_directions(encoder_hidden)\n return encoder_hidden","def gru_encoder(cell, embedding, init_state, batch_input, batch_mask):\n #batch_size = batch_input.get_shape()[0]\n #state = tf.zeros([batch_size, options['state_size']], tf.float32) # initialize the state\n outputs = []\n #split_inputs = tf.split(1, batch_input.get_shape()[0], batch_input)\n \n with tf.device(\"/cpu:0\"):\n embedded_list = tf.nn.embedding_lookup(embedding, batch_input)\n #embedded_list = batch_mask * tf.transpose(embedded_list, [2, 0, 1]) # Add mask to change embedding into zeros\n #embedded_list = tf.transpose(embedded_list, [2, 1, 0])\n embedded_list = tf.transpose(embedded_list, [1, 0, 2])\n embedded_list = tf.unpack(embedded_list) # list of embedding\n \n # min_sequence_length = tf.reduce_min(seq_len)\n #max_sequence_length = tf.reduce_max(seq_len)\n\n state = init_state\n for time, (embedded, i_mask) in enumerate(zip(embedded_list, tf.unpack(tf.transpose(batch_mask)))):\n #embedded = tf.nn.embedding_lookup(embedding, tf.reshape(inputs, [-1])) # deprecated\n #embedded = embedded * tf.reshape(tf.convert_to_tensor(batch_mask[:, time], tf.float32), [batch_size, 1]) # deprecated\n #copy_cond = (time >= seq_len)\n #new_output, new_state = cell(embedded, state)\n output, state = cell(embedded, state)#tf.select(copy_cond, zero_output, new_output), tf.select(copy_cond, state, new_state)\n output = tf.expand_dims(i_mask, 1) * output\n outputs.append(output)\n #outputs = batch_mask * tf.transpose(tf.pack(outputs), [2, 0, 1])\n #outputs = tf.unpack(tf.transpose(outputs, [2, 1, 0]))\n return outputs, state","def get_reconstructed_input(self, hidden):\n return T.nnet.sigmoid(T.dot(hidden, self.W.T) + self.bv)","def forward(self, hidden: Union[torch.Tensor, Tuple[torch.Tensor, ...]]) -> Union[torch.Tensor, Tuple[torch.Tensor, ...]]:\n # First, map the non-tuple version to a 1-tuple for easier processing.\n # We will undo this at the end\n if not isinstance(hidden, tuple):\n hidden = (hidden,)\n\n batch_size, hidden_size = hidden[0].size()\n\n # If we are going to share parameters across the forward and backward\n # directions, then we need to separate them in the tensors\n if self.share_bidirectional_parameters:\n # shape: (batch_size, 2, encoder_hidden_size // 2)\n hidden = tuple(h.view(batch_size, 2, -1) for h in hidden)\n\n # Apply the bridge\n output = tuple(layer(h) for layer, h in zip(self.layers, hidden))\n\n # Reshape the tensors if the parameters are shared\n if self.share_bidirectional_parameters:\n # shape: (batch_size, decoder_hidden_size)\n output = tuple(h.view(batch_size, -1) for h in output)\n\n # Undo the tuple if there's only 1 element\n if len(output) == 1:\n output = output[0]\n return output","def encode(self, state):\n raise NotImplementedError","def init_hidden(self, encoder_final):\n\n #print(\"encoder final shape\")\n #print(encoder_final[0].size())\n if encoder_final is None:\n return None # start with zeros\n\n return (torch.tanh(self.bridge_hidden(encoder_final[0])),\n torch.tanh(self.bridge_cell(encoder_final[1])))","def decoding_layer_train(encoder_state, dec_cell, dec_embed_input, \n target_sequence_length, max_summary_length, \n output_layer, keep_prob):\n # TODO: Implement Function\n trainig_helper = tf.contrib.seq2seq.TrainingHelper(dec_embed_input, target_sequence_length)\n basic_decoder = tf.contrib.seq2seq.BasicDecoder(dec_cell, trainig_helper, encoder_state, output_layer)\n f_output, _, _ = tf.contrib.seq2seq.dynamic_decode(basic_decoder,maximum_iterations=max_summary_length)\n return f_output","def build(self, unused_input_shapes):\n self.layers = []\n for i in range(self.num_hidden_layers):\n self.layers.append(\n TransformerDecoderBlock(\n hidden_size=self.hidden_size,\n num_attention_heads=self.num_attention_heads,\n intermediate_size=self.intermediate_size,\n intermediate_activation=self.intermediate_activation,\n hidden_dropout_prob=self.hidden_dropout_prob,\n attention_probs_dropout_prob=self.attention_probs_dropout_prob,\n initializer_range=self.initializer_range,\n multi_channel_cross_attention=self.multi_channel_cross_attention,\n name=(\"layer_%d\" % i)))\n super(TransformerDecoder, self).build(unused_input_shapes)","def encode(data, encoder):\n # Get the list of hidden depths\n\thd = encoder.hidden_depths\n # Find the middle hidden layer\n\tmiddle_layer_index = (len(hd)-1)/2\n # Initialize empty container for the encoded data\n\tdata_encoded = np.zeros((data.shape[0],hd[middle_layer_index]))\n\tfor i, d_ in enumerate(data):\n # feed forward, get all the activations, and just keep\n # the middle layer, which is the encoding\n\t\tx, z_container, x_container = encoder.ff(d_,True,True)\n\t\tx_encoded = x_container[1+middle_layer_index]\n\t\tdata_encoded[i] = x_encoded\n\t#\n\treturn data_encoded","def forward(self, *args): # noqa: R0914\r\n encoder_out, (hn, cn) = self.unified_encoder(*args)\r\n device = hn.device\r\n non_sequential_cont_decoded = self.mlp_non_seq_cont(hn)\r\n non_sequential_cat_decoded = []\r\n for mlp_non_seq_cat in self.mlp_non_seq_cat_list:\r\n non_sequential_cat_decoded.append(mlp_non_seq_cat(hn))\r\n\r\n hn = torch.unsqueeze(hn, 0)\r\n cn = torch.unsqueeze(cn, 0)\r\n # decoded is the output prediction of timestep i-1 of the decoder\r\n decoded = torch.zeros(encoder_out.shape[0], int(\r\n self.unified_encoder.seq_cont_count + self.unified_encoder.no_of_embs_seq), device=device)\r\n seq_cont_decoded = torch.Tensor(device=device)\r\n seq_cat_decoded = []\r\n for _ in range(self.unified_encoder.seq_cat_count):\r\n seq_cat_decoded.append(torch.Tensor(device=device))\r\n\r\n for _ in range(encoder_out.shape[1]):\r\n decoded, (hn, cn), out_cont, out_cat = self.decoder(decoded, (hn, cn))\r\n # Predict all categorical columns\r\n out_cat_onehot = []\r\n if self.unified_encoder.seq_cat_count != 0:\r\n for idx, out in enumerate(out_cat):\r\n out_cat_onehot.append(torch.argmax(out, dim=1).unsqueeze(-1))\r\n seq_cat_decoded[idx] = torch.cat(\r\n [seq_cat_decoded[idx], out.view(out.shape[0], 1, -1)], dim=1)\r\n out_cat_onehot = torch.cat(out_cat_onehot, -1)\r\n out_cat_embedding = self.unified_encoder.seq_emb_layers(out_cat_onehot)\r\n decoded = torch.cat([out_cat_embedding, out_cont], dim=-1)\r\n else:\r\n decoded = out_cont\r\n seq_cont_decoded = torch.cat(\r\n [seq_cont_decoded, out_cont.view(out_cont.shape[0], 1, -1)], dim=1)\r\n\r\n return non_sequential_cont_decoded, non_sequential_cat_decoded, seq_cont_decoded, seq_cat_decoded","def init_hidden_states(self, encoder_out):\n mean_encoder_out = encoder_out.mean(dim=1)\n h = self.init_h(mean_encoder_out) # (batch_size, decoder_dim)\n c = self.init_c(mean_encoder_out)\n\n return h, c","def get_final_encoder_states(encoder_outputs: torch.Tensor,\n mask: torch.Tensor,\n bidirectional: bool = False) -> torch.Tensor:\n # These are the indices of the last words in the sequences (i.e. length sans padding - 1). We\n # are assuming sequences are right padded.\n # Shape: (batch_size,)\n last_word_indices = mask.sum(1).long() - 1\n\n # handle -1 cases\n ll_ = (last_word_indices != -1).long()\n last_word_indices = last_word_indices * ll_\n\n batch_size, _, encoder_output_dim = encoder_outputs.size()\n expanded_indices = last_word_indices.view(-1, 1, 1).expand(batch_size, 1, encoder_output_dim)\n # Shape: (batch_size, 1, encoder_output_dim)\n final_encoder_output = encoder_outputs.gather(1, expanded_indices)\n final_encoder_output = final_encoder_output.squeeze(1) # (batch_size, encoder_output_dim)\n if bidirectional:\n final_forward_output = final_encoder_output[:, :(encoder_output_dim // 2)]\n final_backward_output = encoder_outputs[:, 0, (encoder_output_dim // 2):]\n final_encoder_output = torch.cat([final_forward_output, final_backward_output], dim=-1)\n return final_encoder_output","def get_state(self):\n state = super().get_state()\n state.update({\n 'num_of_fields': self.num_of_fields,\n 'hash_size': self.hash_size,\n 'embedding_dim': self.embedding_dim})\n return state","def decode(self):\n for layer in self.layers:\n layer.decode()","def decode(self):\n for layer in self.layers:\n layer.decode()","def forward(self,\n input,\n hidden,\n encoder_outputs):\n embedded = self.embedding(input).view(1, 1, -1)\n embedded = self.dropout(embedded)\n\n # attn_weights = F.softmax(self.attn(torch.cat((embedded[0], hidden[0]), 1)), dim=1)\n attn_state = hidden[0] if isinstance(hidden, tuple) else hidden\n attn_weights = F.softmax(attn_state[0] @ encoder_outputs.squeeze().t(), dim=1)\n attn_applied = torch.bmm(attn_weights.unsqueeze(0), encoder_outputs.permute(1, 0, 2))\n\n output = torch.cat((embedded[0], attn_applied[0]), 1)\n output = self.attn_combine(output).unsqueeze(0)\n\n output = F.relu(output)\n output, hidden = self.rnn(output, hidden)\n\n output = F.log_softmax(self.out(output[0]), dim=1)\n return output, hidden, attn_weights","def zero_state(self, batch_size):\n del batch_size\n p = self.params\n if p.left_context != 1 or p.right_context != 0:\n msg = ('Streaming implementation of chunkwise attention with left context'\n 'or right context is not supported yet')\n raise NotImplementedError(msg)\n return py_utils.NestedMap()","def initialize_hidden_state(self):\n initializer = tf.keras.initializers.Zeros()\n rnnten = initializer(shape=(self.batch, self.units))\n return rnnten","def extract_hidden_states(self, output):\n \n # Extracting the forward and backward hidden states from the last BiLSTM layer\n # output (batch_size, sequence length, 2 * hidden dim)\n output_fw = output[:,:,0:self._hidden_size]\n output_bw = output[:,:,self._hidden_size:]\n \n hidden_states = torch.cat((output_fw, output_bw),-1)\n \n return hidden_states","def dev_network(self):\n freeze_model(self.eval_net)\n for data_set_name, data_set in self.data_to_dev.items():\n #print(data_set_name)\n valid_iter = make_data_iter(\n dataset=data_set, batch_size=1, batch_type=self.batch_type,\n shuffle=False, train=False)\n valid_sources_raw = data_set.src\n\n \n # don't track gradients during validation\n r_total = 0\n roptimal_total = 0\n all_outputs = []\n i_sample = 0\n\n for valid_batch in iter(valid_iter):\n # run as during training to get validation loss (e.g. xent)\n\n batch = Batch(valid_batch, self.pad_index, use_cuda=self.use_cuda)\n\n encoder_output, encoder_hidden = self.model.encode(\n batch.src, batch.src_lengths,\n batch.src_mask)\n\n # if maximum output length is \n # not globally specified, adapt to src len\n if self.max_output_length is None:\n self.max_output_length = int(max(batch.src_lengths.cpu().numpy()) * 1.5)\n\n batch_size = batch.src_mask.size(0)\n prev_y = batch.src_mask.new_full(size=[batch_size, 1], fill_value=self.bos_index,\n dtype=torch.long)\n output = []\n hidden = self.model.decoder._init_hidden(encoder_hidden)\n prev_att_vector = None\n finished = batch.src_mask.new_zeros((batch_size, 1)).byte()\n\n # pylint: disable=unused-variable\n for t in range(self.max_output_length):\n \n\n # if i_sample == 0 or i_sample == 3 or i_sample == 6:\n # print(\"state on t = \", t, \" : \" , state)\n\n # decode one single step\n logits, hidden, att_probs, prev_att_vector = self.model.decoder(\n encoder_output=encoder_output,\n encoder_hidden=encoder_hidden,\n src_mask=batch.src_mask,\n trg_embed=self.model.trg_embed(prev_y),\n hidden=hidden,\n prev_att_vector=prev_att_vector,\n unroll_steps=1)\n # greedy decoding: choose arg max over vocabulary in each step with egreedy porbability\n \n if self.state_type == 'hidden':\n state = torch.cat(hidden, dim=2).squeeze(1).detach().cpu()[0]\n else:\n state = torch.FloatTensor(prev_att_vector.squeeze(1).detach().cpu().numpy()[0])\n\n logits = self.eval_net(state)\n logits = logits.reshape([1,1,-1]) \n #print(type(logits), logits.shape, logits)\n next_word = torch.argmax(logits, dim=-1) \n a = next_word.squeeze(1).detach().cpu().numpy()[0]\n prev_y = next_word\n \n output.append(next_word.squeeze(1).detach().cpu().numpy())\n prev_y = next_word\n \n # check if previous symbol was \n is_eos = torch.eq(next_word, self.eos_index)\n finished += is_eos\n # stop predicting if reached for all elements in batch\n if (finished >= 1).sum() == batch_size:\n break\n stacked_output = np.stack(output, axis=1) # batch, time\n\n #decode back to symbols\n decoded_valid_in = self.model.trg_vocab.arrays_to_sentences(arrays=batch.src,\n cut_at_eos=True)\n decoded_valid_out_trg = self.model.trg_vocab.arrays_to_sentences(arrays=batch.trg,\n cut_at_eos=True)\n decoded_valid_out = self.model.trg_vocab.arrays_to_sentences(arrays=stacked_output,\n cut_at_eos=True)\n \n \n\n hyp = stacked_output\n\n r = self.Reward(batch.trg, hyp , show = False)\n \n if i_sample == 0 or i_sample == 3 or i_sample == 6:\n print(\"\\n Sample \", i_sample, \"-------------Target vs Eval_net prediction:--Raw---and---Decoded-----\")\n print(\"Target: \", batch.trg, decoded_valid_out_trg)\n print(\"Eval : \", stacked_output, decoded_valid_out, \"\\n\")\n print(\"Reward: \", r)\n\n #r = self.Reward1(batch.trg, hyp , show = False)\n r_total += sum(r[np.where(r > 0)])\n if i_sample ==0:\n roptimal = self.Reward(batch.trg, batch.trg , show = False)\n roptimal_total += sum(roptimal[np.where(roptimal > 0)])\n \n all_outputs.extend(stacked_output)\n i_sample += 1\n\n assert len(all_outputs) == len(data_set)\n\n # decode back to symbols\n decoded_valid = self.model.trg_vocab.arrays_to_sentences(arrays=all_outputs,\n cut_at_eos=True)\n\n # evaluate with metric on full dataset\n join_char = \" \" if self.level in [\"word\", \"bpe\"] else \"\"\n valid_sources = [join_char.join(s) for s in data_set.src]\n valid_references = [join_char.join(t) for t in data_set.trg]\n valid_hypotheses = [join_char.join(t) for t in decoded_valid]\n\n # post-process\n if self.level == \"bpe\":\n valid_sources = [bpe_postprocess(s) for s in valid_sources]\n valid_references = [bpe_postprocess(v)\n for v in valid_references]\n valid_hypotheses = [bpe_postprocess(v) for\n v in valid_hypotheses]\n\n # if references are given, evaluate against them\n if valid_references:\n assert len(valid_hypotheses) == len(valid_references)\n\n current_valid_score = 0\n if self.eval_metric.lower() == 'bleu':\n # this version does not use any tokenization\n current_valid_score = bleu(valid_hypotheses, valid_references)\n elif self.eval_metric.lower() == 'chrf':\n current_valid_score = chrf(valid_hypotheses, valid_references)\n elif self.eval_metric.lower() == 'token_accuracy':\n current_valid_score = token_accuracy(\n valid_hypotheses, valid_references, level=self.level)\n elif self.eval_metric.lower() == 'sequence_accuracy':\n current_valid_score = sequence_accuracy(\n valid_hypotheses, valid_references)\n else:\n current_valid_score = -1\n\n self.dev_network_count += 1\n self.tb_writer.add_scalar(\"dev/dev_reward\",\n r_total, self.dev_network_count)\n self.tb_writer.add_scalar(\"dev/dev_bleu\",\n current_valid_score, self.dev_network_count)\n \n print(self.dev_network_count ,' r_total and score: ', r_total , current_valid_score)\n\n \n unfreeze_model(self.eval_net)\n return current_valid_score","def encode_sequence(sequence, rnns, embedder, dropout_amount=0.):\n\n batch_size = 1\n layer_states = []\n for rnn in rnns:\n hidden_size = rnn.weight_hh.size()[1]\n \n # h_0 of shape (batch, hidden_size)\n # c_0 of shape (batch, hidden_size)\n if rnn.weight_hh.is_cuda:\n h_0 = torch.cuda.FloatTensor(batch_size,hidden_size).fill_(0)\n c_0 = torch.cuda.FloatTensor(batch_size,hidden_size).fill_(0)\n else:\n h_0 = torch.zeros(batch_size,hidden_size)\n c_0 = torch.zeros(batch_size,hidden_size)\n\n layer_states.append((h_0, c_0))\n\n outputs = []\n for token in sequence:\n rnn_input = embedder(token)\n (cell_states, hidden_states), output, layer_states = forward_one_multilayer(rnns,rnn_input,layer_states,dropout_amount)\n\n outputs.append(output)\n\n return (cell_states, hidden_states), outputs","def init_hidden(self, batch_size):\n return torch.zeros(()), torch.zeros(())","def _decode_train(self, decoder, _encoder_output, _features, labels):\r\n target_embedded = tf.nn.embedding_lookup(decoder.target_embedding,\r\n labels[\"target_ids\"])\r\n\r\n return decoder(_encoder_output, labels=target_embedded[:,:-1], sequence_length=labels[\"target_len\"]-1)","def __call__(self, batch):\n # Right zero-pad all one-hot text sequences to max input length\n input_lengths, ids_sorted_decreasing = torch.sort(\n torch.LongTensor([len(x[0]) for x in batch]),\n dim=0, descending=True)\n max_input_len = input_lengths[0]\n\n inputs_padded = torch.LongTensor(len(batch), max_input_len)\n inputs_padded.zero_()\n for i in range(len(ids_sorted_decreasing)):\n input_id = batch[ids_sorted_decreasing[i]][0]\n inputs_padded[i, :input_id.shape[0]] = input_id\n\n phonemes_padded = torch.LongTensor(len(batch), max_input_len)\n phonemes_padded.zero_()\n for i in range(len(ids_sorted_decreasing)):\n phoneme_id = batch[ids_sorted_decreasing[i]][1]\n phonemes_padded[i, :phoneme_id.shape[0]] = phoneme_id\n\n # Right zero-pad mel-spec\n num_mels = batch[0][2].size(0)\n max_target_len = max([x[2].size(1) for x in batch])\n if max_target_len % self.n_frames_per_step != 0:\n max_target_len += self.n_frames_per_step - max_target_len % self.n_frames_per_step\n assert max_target_len % self.n_frames_per_step == 0\n\n # include mel padded and gate padded\n mel_padded = torch.FloatTensor(len(batch), num_mels, max_target_len)\n mel_padded.zero_()\n gate_padded = torch.FloatTensor(len(batch), max_target_len)\n gate_padded.zero_()\n output_lengths = torch.LongTensor(len(batch))\n for i in range(len(ids_sorted_decreasing)):\n mel = batch[ids_sorted_decreasing[i]][2]\n mel_padded[i, :, :mel.size(1)] = mel\n gate_padded[i, mel.size(1)-1:] = 1\n output_lengths[i] = mel.size(1)\n\n return input_lengths, inputs_padded, phonemes_padded, mel_padded, gate_padded, output_lengths","def encoder_decoder_archi_gan(inputs, is_train):\n\n encoder_layers = []\n\n encoded = inputs\n\n encoder_layers.append(encoded)\n\n for i in range(config.encoder_layers):\n encoded = encoder_conv_block_gan(encoded, i, is_train)\n encoder_layers.append(encoded)\n \n encoder_layers.reverse()\n\n\n\n decoded = encoder_layers[0]\n\n for i in range(config.encoder_layers):\n decoded = decoder_conv_block_gan(decoded, encoder_layers[i+1], i, is_train)\n\n return decoded","def init_hidden(self):\n # TODO ========================\n # initialize the hidden states to zero\n\n initial_hidden = torch.zeros(self.num_layers, self.batch_size, self.hidden_size)\n return initial_hidden # a parameter tensor of shape (self.num_layers, self.batch_size, self.hidden_size)","def hidden(self, value):\n if value is not None:\n value.get_shape().assert_is_compatible_with(self._output_shape)\n self._hidden = value","def forward(self, batch: torch.LongTensor,\n hidden_start: torch.FloatTensor = None) -> torch.FloatTensor:\n\n # max_len = x.size(1)\n # x,label = batch\n # batch_size x max_len x embedding_dim\n x_embedded = self.embedding(batch)\n # x_drop = self.dropout\n x_drop = self.dropout(x_embedded)\n\n # compute hidden states and logits for each time step\n # hidden_states_list = []\n # prev_hidden = hidden_start\n hidden_state = self.rnn(x_drop)[0]\n # print(hidden_state)\n # print(hidden_state[0].shape)\n # print(hidden_state[1].shape)\n\n # hidden_state = hidden_state.permute(2,1,0)\n # hidden_state_maxPooled = F.max_pool1d(hidden_state,hidden_state.shape[2])\n # hidden_state_maxPooled = hidden_state.permute(2,1,0)\n hidden_state_pooled, _ = torch.max(hidden_state, dim=1)\n\n output = self.get_logits(hidden_state_pooled)\n\n # Loss = self.loss(output, y)\n\n # hidden_state = softmax(logits(hidden_state))\n\n # batch_size x max_len x rnn_size\n # hidden_states = torch.stack(hidden_states_list, dim=1)\n\n return output","def unbucketed_next(self):\n # Initialize batch containers\n label_batch = list()\n enc_input_batch = list()\n dec_input_batch = list()\n # Fill individual batches by iterating over the entire data source\n if self.sent_id < self.get_length():\n while len(enc_input_batch) < self.opt.batch_size:\n try:\n indexed_sent = self.data[self.sent_id]\n label_item = indexed_sent[1:]\n enc_input_item = indexed_sent[1:]\n # Reverse the input to the encoder, see arxiv.org/pdf/1703.03906.pdf\n enc_input_item.reverse()\n dec_input_item = indexed_sent[:-1]\n label_batch.append(label_item)\n enc_input_batch.append(enc_input_item)\n dec_input_batch.append(dec_input_item)\n self.sent_id += 1\n except IndexError:\n break\n else:\n raise IndexError\n return label_batch, enc_input_batch, dec_input_batch","def encode(self, images):\n\n i = 0\n N = len(images)\n embs = None\n\n while True:\n end = min(N, i + self.batch_size)\n batch = images[i: end]\n\n size = end - i\n if size < self.batch_size:\n batch += self._input_padding[:self.batch_size - size]\n\n if embs is None:\n embs = self.sess.run(self.embed_layer, feed_dict={self.x: batch})\n else:\n _embs = self.sess.run(self.embed_layer, feed_dict={self.x: batch})\n embs = np.vstack((embs, _embs))\n\n i += self.batch_size\n\n if i >= N - 1:\n break\n\n return embs","def __call__(self, batch):\r\n # Right zero-pad all one-hot text sequences to max input length\r\n input_lengths, ids_sorted_decreasing = torch.sort(\r\n torch.LongTensor([len(x[0]) for x in batch]),\r\n dim=0, descending=True)\r\n max_input_len = input_lengths[0]\r\n\r\n text_padded = torch.LongTensor(len(batch), max_input_len)\r\n text_padded.zero_()\r\n for i in range(len(ids_sorted_decreasing)):\r\n text = batch[ids_sorted_decreasing[i]][0]\r\n text_padded[i, :text.size(0)] = text\r\n\r\n # Right zero-pad mel-spec\r\n num_mels = batch[0][1].size(0)\r\n max_target_len = max([x[1].size(1) for x in batch])\r\n if max_target_len % self.n_frames_per_step != 0:\r\n max_target_len += self.n_frames_per_step - max_target_len % self.n_frames_per_step\r\n assert max_target_len % self.n_frames_per_step == 0\r\n\r\n # include mel padded and gate padded\r\n mel_padded = torch.FloatTensor(len(batch), num_mels, max_target_len)\r\n mel_padded.zero_()\r\n gate_padded = torch.FloatTensor(len(batch), max_target_len)\r\n gate_padded.zero_()\r\n output_lengths = torch.LongTensor(len(batch))\r\n for i in range(len(ids_sorted_decreasing)):\r\n mel = batch[ids_sorted_decreasing[i]][1]\r\n mel_padded[i, :, :mel.size(1)] = mel\r\n gate_padded[i, mel.size(1)-1:] = 1\r\n output_lengths[i] = mel.size(1)\r\n\r\n return text_padded, input_lengths, mel_padded, gate_padded, \\\r\n output_lengths","def forward(self, state, encoder_padding_mask):\n residual = state.clone()\n\n '''\n ___QUESTION-6-DESCRIBE-D-START___\n What is the purpose of encoder_padding_mask? What will the output shape of `state' Tensor \n be after multi-head attention? HINT: formulate your answer in terms of \n constituent variables like batch_size, embed_dim etc...\n '''\n '''\n The encoder padding mask is used to mask the ⟨pad⟩ token which is padded to the input sequences to make the sequences in the same lengths each batch. Thus the word of input sequence will not pay attention to these padded tokens.\n The shape of state is (tgt_time_steps * batch_size * embed_dim)\n '''\n state, _ = self.self_attn(query=state, key=state, value=state, key_padding_mask=encoder_padding_mask)\n '''\n ___QUESTION-6-DESCRIBE-D-END___\n '''\n\n state = F.dropout(state, p=self.dropout, training=self.training)\n state += residual\n state = self.self_attn_layer_norm(state)\n\n residual = state.clone()\n state = F.relu(self.fc1(state))\n state = F.dropout(state, p=self.activation_dropout, training=self.training)\n state = self.fc2(state)\n state = F.dropout(state, p=self.dropout, training=self.training)\n state += residual\n state = self.final_layer_norm(state)\n\n return state","def get_reconstructed_input(self, hidden):\n return T.nnet.sigmoid(T.dot(hidden, self.w2) + self.b2)","def _inference_initial_state(self, encoder_outputs, encoder_decoder_attention_bias):\n\n with tf.variable_scope(\"inference_initial_state\"):\n n_layers = self.attention_layers\n n_heads = self.attention_heads\n batch_size = tf.shape(encoder_outputs)[0]\n n_features = self.num_mels + self.num_freq\n\n state = {\n \"iteration\": tf.constant(0),\n \"inputs\": tf.zeros([batch_size, 1, n_features * self.reduction_factor]),\n \"finished\": tf.cast(tf.zeros([batch_size]), tf.bool),\n \"alignment_positions\": tf.zeros([n_layers, batch_size, n_heads, 1],\n dtype=tf.int32),\n \"outputs\": {\n \"spec\": tf.zeros([batch_size, 0, self.num_mels * self.reduction_factor]),\n \"post_net_spec\": tf.zeros([batch_size, 0, self.num_mels * self.reduction_factor]),\n \"alignments\": [\n tf.zeros([0, 0, 0, 0, 0])\n ],\n \"stop_token_logits\": tf.zeros([batch_size, 0, 1 * self.reduction_factor]),\n \"lengths\": tf.zeros([batch_size], dtype=tf.int32),\n \"mag_spec\": tf.zeros([batch_size, 0, self.num_freq * self.reduction_factor])\n },\n \"encoder_outputs\": encoder_outputs,\n \"encoder_decoder_attention_bias\": encoder_decoder_attention_bias\n }\n\n state_shape_invariants = {\n \"iteration\": tf.TensorShape([]),\n \"inputs\": tf.TensorShape([None, None, n_features * self.reduction_factor]),\n \"finished\": tf.TensorShape([None]),\n \"alignment_positions\": tf.TensorShape([n_layers, None, n_heads, None]),\n \"outputs\": {\n \"spec\": tf.TensorShape([None, None, self.num_mels * self.reduction_factor]),\n \"post_net_spec\": tf.TensorShape([None, None, self.num_mels * self.reduction_factor]),\n \"alignments\": [\n tf.TensorShape([None, None, None, None, None]),\n ],\n \"stop_token_logits\": tf.TensorShape([None, None, 1 * self.reduction_factor]),\n \"lengths\": tf.TensorShape([None]),\n \"mag_spec\": tf.TensorShape([None, None, None])\n },\n \"encoder_outputs\": encoder_outputs.shape,\n \"encoder_decoder_attention_bias\": encoder_decoder_attention_bias.shape\n }\n\n return state, state_shape_invariants","def _inference_step(self, state):\n\n decoder_inputs = state[\"inputs\"]\n encoder_outputs = state[\"encoder_outputs\"]\n attention_bias = state[\"encoder_decoder_attention_bias\"]\n alignment_positions = state[\"alignment_positions\"]\n\n outputs = self._decode_pass(\n decoder_inputs=decoder_inputs,\n encoder_outputs=encoder_outputs,\n enc_dec_attention_bias=attention_bias,\n alignment_positions=alignment_positions\n )\n\n with tf.variable_scope(\"inference_step\"):\n next_inputs_mel = outputs[\"post_net_spec\"][:, -1:, :]\n next_inputs_mel = self._expand(next_inputs_mel, self.reduction_factor)\n next_inputs_mag = outputs[\"mag_spec\"][:, -1:, :]\n next_inputs_mag = self._expand(next_inputs_mag, self.reduction_factor)\n next_inputs = tf.concat([next_inputs_mel, next_inputs_mag], axis=-1)\n\n n_features = self.num_mels + self.num_freq\n next_inputs = self._shrink(next_inputs, n_features, self.reduction_factor)\n\n # Set zero if sequence is finished\n next_inputs = tf.where(\n state[\"finished\"],\n tf.zeros_like(next_inputs),\n next_inputs\n )\n next_inputs = tf.concat([decoder_inputs, next_inputs], 1)\n\n # Update lengths\n lengths = state[\"outputs\"][\"lengths\"]\n lengths = tf.where(\n state[\"finished\"],\n lengths,\n lengths + 1 * self.reduction_factor\n )\n outputs[\"lengths\"] = lengths\n\n # Update spec, post_net_spec and mag_spec\n for key in [\"spec\", \"post_net_spec\", \"mag_spec\"]:\n output = outputs[key][:, -1:, :]\n output = tf.where(state[\"finished\"], tf.zeros_like(output), output)\n outputs[key] = tf.concat([state[\"outputs\"][key], output], 1)\n\n # Update stop token logits\n stop_token_logits = outputs[\"stop_token_logits\"][:, -1:, :]\n stop_token_logits = tf.where(\n state[\"finished\"],\n tf.zeros_like(stop_token_logits) + 1e9,\n stop_token_logits\n )\n stop_prediction = tf.sigmoid(stop_token_logits)\n stop_prediction = tf.reduce_max(stop_prediction, axis=-1)\n\n # Uncomment next line if you want to use stop token predictions\n finished = tf.reshape(tf.cast(tf.round(stop_prediction), tf.bool), [-1])\n finished = tf.reshape(finished, [-1])\n\n stop_token_logits = tf.concat(\n [state[\"outputs\"][\"stop_token_logits\"], stop_token_logits],\n axis=1\n )\n outputs[\"stop_token_logits\"] = stop_token_logits\n\n with tf.variable_scope(\"alignments\"):\n weights = []\n for index, attention in enumerate(self.attentions):\n if isinstance(attention, AttentionBlock):\n weights.append(attention.multiheaded_attention.attention_weights)\n\n weights = tf.stack(weights)\n outputs[\"alignments\"] = [weights]\n\n alignment_positions = tf.argmax(\n weights,\n axis=-1,\n output_type=tf.int32\n )[:, :, :, -1:]\n state[\"alignment_positions\"] = tf.concat(\n [state[\"alignment_positions\"], alignment_positions],\n axis=-1\n )\n\n state[\"iteration\"] = state[\"iteration\"] + 1\n state[\"inputs\"] = next_inputs\n state[\"finished\"] = finished\n state[\"outputs\"] = outputs\n\n return state","def decoder(self, embedded_inputs, decoder_input0,\n decoder_hidden0, encoder_outputs):\n pass","def encode_input(self, x_tensor, inp_lens_tensor):\r\n input_emb = self.input_emb.forward(x_tensor)\r\n enc_output_each_word, enc_context_mask, enc_final_states = self.encoder(input_emb, inp_lens_tensor)\r\n enc_final_states_reshaped = (enc_final_states[0].unsqueeze(0), enc_final_states[1].unsqueeze(0))\r\n # print('lest go', enc_final_states_reshaped[1].shape)\r\n return enc_output_each_word, enc_context_mask, enc_final_states_reshaped","def forward(\r\n self,\r\n input_ids,\r\n attention_mask: torch.Tensor,\r\n token_type_ids: torch.Tensor\r\n ):\r\n ### YOUR CODE HERE\r\n output = self.bert(\r\n input_ids=input_ids,\r\n attention_mask=attention_mask,\r\n token_type_ids=token_type_ids,\r\n )\r\n\r\n sequence_output = output[0] # the last hidden state (batch, sequence_length, hidden_size)\r\n logits = self.qa_outputs(sequence_output)\r\n start_logits, end_logits = logits.split(1, dim=-1)\r\n start_logits = start_logits.squeeze(-1)\r\n end_logits = end_logits.squeeze(-1)\r\n\r\n outputs = (start_logits, end_logits) # + output[2:]\r\n\r\n return outputs\r\n ### END YOUR CODE","def build_graph(self):\n with vs.variable_scope(\"context\"):\n context_encoder = RNNEncoder(self.FLAGS.hidden_size, self.keep_prob)\n context_hiddens = context_encoder.build_graph(self.context_embs,\n self.context_mask) # (batch_size, context_len, hidden_size*2)\n\n with vs.variable_scope(\"question\"):\n question_encoder = RNNEncoder(self.FLAGS.hidden_size, self.keep_prob)\n question_hiddens = question_encoder.build_graph(self.qn_embs,\n self.qn_mask) # (batch_size, question_len, hidden_size*2)\n question_last_hidden = tf.reshape(question_hiddens[:, -1, :], (-1, 2 * self.FLAGS.hidden_size))\n question_last_hidden = tf.contrib.layers.fully_connected(question_last_hidden,\n num_outputs=self.FLAGS.hidden_size)\n # Use context hidden states to attend to question hidden states\n\n # attn_output is shape (batch_size, context_len, hidden_size*2)\n # The following is BiDAF attention\n if self.FLAGS.use_bidaf:\n attn_layer = BiDAF(self.keep_prob, self.FLAGS.hidden_size * 2, self.FLAGS.hidden_size * 2)\n attn_output = attn_layer.build_graph(question_hiddens, self.qn_mask, context_hiddens,\n self.context_mask) # (batch_size, context_len, hidden_size * 6)\n else: # otherwise, basic attention\n attn_layer = BasicAttn(self.keep_prob, self.FLAGS.hidden_size * 2, self.FLAGS.hidden_size * 2)\n _, attn_output = attn_layer.build_graph(question_hiddens, self.qn_mask, context_hiddens)\n # Concat attn_output to context_hiddens to get blended_reps\n blended_reps = tf.concat([context_hiddens, attn_output], axis=2) # (batch_size, context_len, hidden_size*4)\n\n blended_reps_final = tf.contrib.layers.fully_connected(blended_reps, num_outputs=self.FLAGS.hidden_size)\n\n decoder = RNNDecoder(self.FLAGS.batch_size, self.FLAGS.hidden_size, self.ans_vocab_size, self.FLAGS.answer_len,\n self.ans_embedding_matrix, self.keep_prob, sampling_prob=self.sampling_prob,\n schedule_embed=self.FLAGS.schedule_embed, pred_method=self.FLAGS.pred_method)\n (self.train_logits, self.train_translations, _), \\\n (self.dev_logits, self.dev_translations, self.attention_results) = decoder.build_graph(blended_reps_final, question_last_hidden,\n self.ans_embs, self.ans_mask, self.ans_ids,\n self.context_mask)","def sample_n(self, method, batch, max_sample_length, sample_num):\r\n inp = batch.text\r\n inp_len_np = batch.length.cpu().numpy()\r\n\r\n pad_inp1 = torch.LongTensor([self.fw_start_token] * inp.size(1)).view(1,-1)\r\n pad_inp2 = torch.LongTensor([self.pad_token] * inp.size(1)).view(1,-1)\r\n\r\n if self.gpu >= 0:\r\n inp = inp.to(self.gpu)\r\n pad_inp1 = pad_inp1.to(self.gpu)\r\n pad_inp2 = pad_inp2.to(self.gpu)\r\n\r\n padded_inp = torch.cat([pad_inp1, inp, pad_inp2], 0)\r\n padded_inp[inp_len_np + 1] = self.bw_start_token\r\n\r\n assert padded_inp.max().item() < self.n_vocab + 2\r\n assert inp_len_np[0] + 2 <= padded_inp.size(0)\r\n padded_enc_out = self.encoder(padded_inp, inp_len_np + 2) # [T+2,B,H]\r\n\r\n # extract forward hidden state\r\n assert 0 <= batch.fw_pos.item() - 1 <= padded_enc_out.size(0) - 1\r\n assert 0 <= batch.fw_pos.item() <= padded_enc_out.size(0) - 1\r\n fw_hidden = padded_enc_out.index_select(0,batch.fw_pos - 1)\r\n fw_hidden = torch.cat([fw_hidden[:,:,:self.hidden_size],fw_hidden[:,:,self.hidden_size:]], 0)\r\n fw_next_token = padded_inp.index_select(0,batch.fw_pos).view(1,-1)\r\n\r\n # extract backward hidden state\r\n assert 0 <= batch.bw_pos.item() + 3 <= padded_enc_out.size(0) - 1\r\n assert 0 <= batch.bw_pos.item() + 2 <= padded_enc_out.size(0) - 1\r\n bw_hidden = padded_enc_out.index_select(0,batch.bw_pos + 3)\r\n bw_hidden = torch.cat([bw_hidden[:,:,:self.hidden_size], bw_hidden[:,:,self.hidden_size:]], 0)\r\n bw_next_token = padded_inp.index_select(0,batch.bw_pos + 2).view(1,-1)\r\n\r\n fw_sample_outputs = self.sample_n_sequences(method, 'fw', fw_next_token, fw_hidden, max_sample_length, sample_num)\r\n bw_sample_outputs = self.sample_n_sequences(method, 'bw', bw_next_token, bw_hidden, max_sample_length, sample_num)\r\n\r\n self.filter_special_tokens(fw_sample_outputs)\r\n self.filter_special_tokens(bw_sample_outputs)\r\n\r\n return fw_sample_outputs, bw_sample_outputs"],"string":"[\n \"def encode(self, x):\\n _, hid = self.encoder(x) #All RNN classes output a tuple of 2 objects: the output of the RNN first and the hidden state from the last item in\\n return hid #the input sequence second. We're only interested in the hidden state\",\n \"def _encode(self):\\n with tf.variable_scope('encoding'):\\n self.sep_p_encodes, _ = bilstm_layer(self.p_emb, self.p_length, self.hidden_size)\\n tf.get_variable_scope().reuse_variables()\\n self.sep_q_encodes, _ = bilstm_layer(self.q_emb, self.q_length, self.hidden_size)\\n if self.use_dropout:\\n self.sep_p_encodes = tf.nn.dropout(self.sep_p_encodes, 1-self.dropout)\\n self.sep_q_encodes = tf.nn.dropout(self.sep_q_encodes, 1-self.dropout)\",\n \"def init_hidden_state(self, encoder_out: torch.Tensor):\\n pass\",\n \"def initialize_hidden_state(self):\\n return tf.zeros(shape=(self.batch_size, self.enc_units))\",\n \"def forward(\\r\\n self,\\r\\n input_ids,\\r\\n encoder_hidden_states,\\r\\n encoder_padding_mask,\\r\\n decoder_padding_mask,\\r\\n decoder_causal_mask,\\r\\n past_key_values=None,\\r\\n use_cache=False,\\r\\n output_attentions=False,\\r\\n output_hidden_states=False,\\r\\n return_dict=False,\\r\\n **unused,\\r\\n ):\\r\\n\\r\\n if \\\"decoder_cached_states\\\" in unused:\\r\\n warnings.warn(\\r\\n \\\"The `decoder_cached_states` argument is deprecated and will be removed in a future version, use `past_key_values` instead.\\\",\\r\\n FutureWarning,\\r\\n )\\r\\n past_key_values = unused.pop(\\\"decoder_cached_states\\\")\\r\\n if \\\"decoder_past_key_values\\\" in unused:\\r\\n warnings.warn(\\r\\n \\\"The `decoder_past_key_values` argument is deprecated and will be removed in a future version, use `past_key_values` instead.\\\",\\r\\n FutureWarning,\\r\\n )\\r\\n past_key_values = unused.pop(\\\"decoder_past_key_values\\\")\\r\\n\\r\\n # check attention mask and invert\\r\\n if encoder_padding_mask is not None:\\r\\n encoder_padding_mask = invert_mask(encoder_padding_mask)\\r\\n\\r\\n # embed positions\\r\\n positions = self.embed_positions(input_ids, use_cache=use_cache)\\r\\n\\r\\n if use_cache:\\r\\n input_ids = input_ids[:, -1:]\\r\\n positions = positions[:, -1:]\\r\\n\\r\\n x = self.embed_tokens(input_ids) * self.embed_scale\\r\\n if self.do_blenderbot_90_layernorm:\\r\\n x = self.layernorm_embedding(x)\\r\\n x += positions\\r\\n else:\\r\\n x += positions\\r\\n x = self.layernorm_embedding(x)\\r\\n\\r\\n x = F.dropout(x, p=self.dropout, training=self.training)\\r\\n\\r\\n # Convert to Bart output format: (seq_len, BS, model_dim) -> (BS, seq_len, model_dim)\\r\\n x = x.transpose(0, 1)\\r\\n encoder_hidden_states = encoder_hidden_states.transpose(0, 1)\\r\\n\\r\\n # decoder layers\\r\\n all_hidden_states = () if output_hidden_states else None\\r\\n all_self_attns = () if output_attentions else None\\r\\n enc_dec_all_attn = () if output_attentions else None\\r\\n next_decoder_cache = []\\r\\n for idx, decoder_layer in enumerate(self.layers):\\r\\n # add LayerDrop (see https://arxiv.org/abs/1909.11556 for description)\\r\\n if output_hidden_states:\\r\\n all_hidden_states += (x,)\\r\\n dropout_probability = random.uniform(0, 1)\\r\\n if self.training and (dropout_probability < self.layerdrop):\\r\\n continue\\r\\n\\r\\n layer_state = past_key_values[idx] if past_key_values is not None else None\\r\\n\\r\\n #isidora - in comment\\r\\n \\\"\\\"\\\"\\r\\n x, layer_self_attn, layer_past,_ = decoder_layer(\\r\\n x,\\r\\n encoder_hidden_states,\\r\\n encoder_attn_mask=encoder_padding_mask,\\r\\n decoder_padding_mask=decoder_padding_mask,\\r\\n layer_state=layer_state,\\r\\n causal_mask=decoder_causal_mask,\\r\\n output_attentions=output_attentions,\\r\\n )\\r\\n \\\"\\\"\\\"\\r\\n\\r\\n #isidora - start - replace _ with enc_dec_attn to get the encoder-decoder attn weights\\r\\n x, layer_self_attn, layer_past, enc_dec_attn = decoder_layer(\\r\\n x,\\r\\n encoder_hidden_states,\\r\\n encoder_attn_mask=encoder_padding_mask,\\r\\n decoder_padding_mask=decoder_padding_mask,\\r\\n layer_state=layer_state,\\r\\n causal_mask=decoder_causal_mask,\\r\\n output_attentions=output_attentions,\\r\\n )\\r\\n #isidora - end\\r\\n\\r\\n\\r\\n if use_cache:\\r\\n next_decoder_cache.append(layer_past.copy())\\r\\n\\r\\n if output_attentions:\\r\\n all_self_attns += (layer_self_attn,)\\r\\n enc_dec_all_attn += (enc_dec_attn,)\\r\\n\\r\\n if self.layer_norm: # if config.add_final_layer_norm (mBART)\\r\\n x = self.layer_norm(x)\\r\\n\\r\\n # Convert to standard output format: (seq_len, BS, model_dim) -> (BS, seq_len, model_dim)\\r\\n if output_hidden_states:\\r\\n all_hidden_states = tuple(hidden_state.transpose(0, 1) for hidden_state in all_hidden_states)\\r\\n x = x.transpose(0, 1)\\r\\n encoder_hidden_states = encoder_hidden_states.transpose(0, 1)\\r\\n\\r\\n next_cache = next_decoder_cache if use_cache else None\\r\\n\\r\\n #isidora - start - return enc_dec_all_attn instead of decoder outputs\\r\\n return enc_dec_all_attn\\r\\n #isidora - end\\r\\n\\r\\n #isidora - in comment\\r\\n \\\"\\\"\\\"\\r\\n if not return_dict:\\r\\n return tuple(v for v in [x, next_cache, all_hidden_states, all_self_attns] if v is not None)\\r\\n return BaseModelOutputWithPast(\\r\\n last_hidden_state=x, past_key_values=next_cache, hidden_states=all_hidden_states, attentions=all_self_attns\\r\\n )\\r\\n \\\"\\\"\\\"\",\n \"def init_hidden(self, batch_size):\\r\\n \\r\\n self.hidden_state = (\\r\\n torch.zeros(((1+self.bidirectional)*self.num_layers,\\r\\n batch_size,\\r\\n self.hidden_size)).to(self.device),\\r\\n torch.zeros(((1+self.bidirectional)*self.num_layers, \\r\\n batch_size, \\r\\n self.hidden_size)).to(self.device))\",\n \"def _prepare_attended_output(self,\\n decoder_hidden_state: torch.Tensor,\\n state: Dict[str, torch.Tensor]) -> torch.Tensor:\\n # Ensure mask is also a FloatTensor. Or else the multiplication within\\n # attention will complain.\\n # shape: (batch_size, max_input_sequence_length)\\n\\n encoder_outputs = state[\\\"encoder_outputs\\\"]\\n source_mask = state[\\\"source_mask\\\"]\\n prev_attention = state[\\\"attention\\\"]\\n att_keys = state[\\\"att_keys\\\"]\\n att_values = state[\\\"att_values\\\"]\\n\\n # shape: (batch_size, max_input_sequence_length)\\n mode = \\\"soft\\\" if self.training else \\\"hard\\\"\\n if isinstance(self._attention, MonotonicAttention):\\n encoder_outs: Dict[str, torch.Tensor] = {\\n \\\"value\\\": state[\\\"encoder_outputs\\\"],\\n \\\"mask\\\": state[\\\"source_mask\\\"]\\n }\\n\\n monotonic_attention, chunk_attention = self._attention(\\n encoder_outs, decoder_hidden_state, prev_attention, mode=mode)\\n # shape: (batch_size, encoder_output_dim)\\n attended_output = util.weighted_sum(\\n encoder_outputs, chunk_attention)\\n attention = monotonic_attention\\n elif isinstance(self._attention, StatefulAttention):\\n attended_output, attention = self._attention(decoder_hidden_state,\\n att_keys, att_values, source_mask)\\n else:\\n attention = self._attention(\\n decoder_hidden_state, source_mask)\\n attended_output = util.weighted_sum(\\n encoder_outputs, attention)\\n\\n return attended_output, attention\",\n \"def call(self, inputs, output_hidden_states = False, training = False):\\n if isinstance(inputs, (list, tuple)):\\n input_ids = inputs[0]\\n token_type_ids = inputs[1] if len(inputs) > 1 else None\\n attention_mask = inputs[2] if len(inputs) > 2 else None\\n \\n elif isinstance(inputs, dict):\\n input_ids = inputs['input_ids']\\n token_type_ids = inputs.get('token_type_ids', None)\\n attention_mask = inputs.get('attention_mask', None)\\n else:\\n raise ValueError('The type of inputs should be list or dictionary.')\\n \\n input_shape = shape_list(input_ids)\\n \\n# last_hidden_state = tf.ones(input_shape + (self.config.hidden_size))\\n# output = tf.ones(input_shape + (self.config.hidden_size,))\\n# logits = tf.ones(input_shape + (self.config.vocab_size,))\\n# pooler_output = tf.ones((input_shape[0], self.config.hidden_size))\\n \\n hidden_states = [] if output_hidden_states else None\\n output = self.embeddings(input_ids, token_type_ids, training = training)\\n \\n if output_hidden_states:\\n hidden_states.append(output)\\n\\n if self.causal_attention:\\n attention_mask = tf.constant(lower_triangle_matrix(input_shape[-1]))\\n attention_mask = tf.reshape(attention_mask, shape = (1, 1, input_shape[-1], input_shape[-1]))\\n \\n else:\\n if attention_mask is None:\\n attention_mask = tf.constant(1.0, shape = input_shape, dtype = 'float32')\\n # attention_mask now has shape (batches, sequence_len),\\n # we need to covert it to (batches, 1, 1, sequence_len)\\n # so that it will broadcast to (batches, num_attention_heads, sequence_len, sequence_len)\\n attention_mask = tf.reshape(attention_mask, shape = (-1, 1, 1, input_shape[-1]))\\n\\n \\n \\n last_hidden_state, layer_outputs = self.encoder(output, attention_mask, output_hidden_states = output_hidden_states, training = training)\\n if output_hidden_states:\\n hidden_states.extend(layer_outputs)\\n \\n pooler_output = self.pooler(tf.gather(last_hidden_state, indices = 0, axis = 1)) if self.pooler else None\\n logits = self.lm_head(last_hidden_state) if self.lm_head else None\\n\\n res = {'sequence_output': last_hidden_state,\\n 'pooler_output': pooler_output,\\n 'logits': logits,\\n 'hidden_states': hidden_states}\\n\\n self.built = True\\n\\n return {k : v for k, v in res.items() if v is not None}\",\n \"def _bridge_bidirectional_hidden(self, hidden):\\n num_layers = hidden.size(0) // 2\\n _, batch_size, hidden_size = hidden.size()\\n return hidden.view(num_layers, 2, batch_size, hidden_size)\\\\\\n .transpose(1, 2).contiguous().view(num_layers, batch_size, hidden_size * 2)\",\n \"def _init_state(self, encoder_hidden):\\n if encoder_hidden is None:\\n return None\\n if isinstance(encoder_hidden, tuple):\\n encoder_hidden = tuple([self._cat_directions(h) for h in encoder_hidden])\\n else:\\n encoder_hidden = self._cat_directions(encoder_hidden)\\n return encoder_hidden\",\n \"def _init_state(self, encoder_hidden):\\n if encoder_hidden is None:\\n return None\\n if isinstance(encoder_hidden, tuple):\\n encoder_hidden = tuple([self._cat_directions(h) for h in encoder_hidden])\\n else:\\n encoder_hidden = self._cat_directions(encoder_hidden)\\n return encoder_hidden\",\n \"def init_hidden_state(self, batch_size):\\n h = torch.zeros(batch_size, self.decoder_dim).to(device) # (batch_size, decoder_dim)\\n c = torch.zeros(batch_size, self.decoder_dim).to(device)\\n return h, c\",\n \"def init_hidden_state(self, batch_size):\\n h = torch.zeros(batch_size, self.decoder_dim).to(device) # (batch_size, decoder_dim)\\n c = torch.zeros(batch_size, self.decoder_dim).to(device)\\n return h, c\",\n \"def init_hidden_state(self, batch_size):\\n h = torch.zeros(batch_size, self.decoder_dim).to(device) # (batch_size, decoder_dim)\\n c = torch.zeros(batch_size, self.decoder_dim).to(device)\\n return h, c\",\n \"def forward_step(self, prev_embed, encoder_hidden, src_mask, proj_key, hidden):\\n\\n # compute context vector using attention mechanism\\n #we only want the hidden, not the cell state of the lstm CZW, hence the hidden[0]\\n query = hidden[0][-1].unsqueeze(1) # [#layers, B, D] -> [B, 1, D]\\n context, attn_probs = self.attention(\\n query=query, proj_key=proj_key,\\n value=encoder_hidden, mask=src_mask)\\n\\n # update rnn hidden state\\n rnn_input = torch.cat([prev_embed, context], dim=2)\\n output, hidden = self.rnn(rnn_input, hidden)\\n \\n pre_output = torch.cat([prev_embed, output, context], dim=2)\\n pre_output = self.dropout_layer(pre_output)\\n pre_output = self.pre_output_layer(pre_output)\\n\\n return output, hidden, pre_output\",\n \"def _fix_enc_hidden(self, h):\\n if self.bidirectional_encoder:\\n h = torch.cat([h[0:h.size(0):2], h[1:h.size(0):2]], 2)\\n return h\",\n \"def decode(self):\\n decoder_input = Input(shape=self.input_decoder_shape, batch_shape=self.input_batch_decoder_shape)\\n ppg_input = Input(shape=self.input_ppg_shape, batch_shape=self.input_batch_ppg_shape)\\n\\n if self.hparams.Masking is True:\\n mask_decoder_input = Masking(mask_value=0)(decoder_input)\\n mask_ppg_input = Masking(mask_value=0)(ppg_input)\\n prenet_output = self.PreNet(mask_decoder_input)\\n encoder_input = self.Encoder(mask_ppg_input)\\n decoder_mask = None\\n else:\\n decoder_mask = Masking(mask_value=0).compute_mask(ppg_input)\\n prenet_output = self.PreNet(decoder_input)\\n encoder_input = self.Encoder(ppg_input, decoder_mask)\\n\\n rnn_output = Concatenate(axis=-1)([prenet_output, encoder_input])\\n # mask = Input(shape=(self.hparams.PreNet_hidden_size + self.hparams.Tacotron_encoder_hidden_size))\\n # diff_mask = Input(shape=(self.hparams.PreNet_hidden_size + self.hparams.Tacotron_encoder_hidden_size))\\n for i in range(self.hparams.Tacotron_decoder_layers):\\n rnn_output = self.Decoder_LSTM[i](rnn_output, mask=decoder_mask)\\n\\n # feed by self.states is unhelpful in training, since we don't stop rnn during epochs\\n # but it is important in generating since each fit states will be set to zeros.!!!!!!\\n rnn_output = Concatenate(axis=-1)([rnn_output, encoder_input])\\n decoder_output = self.Linear_projection(rnn_output)\\n if self.hparams.Tacotron_postnet is True:\\n residual_output = decoder_output\\n for i in range(self.hparams.PostNet_layers):\\n residual_output = self.PostNet_Conv1D[i](residual_output)\\n residual_output = self.PostNet_BatchNorm[i](residual_output)\\n residual_output = self.PostNet_dropout_list[i](residual_output)\\n decoder_output = Add()([decoder_output, residual_output])\\n return Model(inputs=[decoder_input, ppg_input], outputs=decoder_output)\",\n \"def init_hidden_state(self,batch_size):\\n h = torch.zeros(batch_size,self.decoder_dim).to(device) # (batch_size, decoder_dim)\\n c = torch.zeros(batch_size,self.decoder_dim).to(device)\\n return h, c\",\n \"def _TransformHidden(self, _):\\n raise NotImplementedError()\",\n \"def process_state_batch(self, batch):\\n return np.squeeze(batch, axis=1)\",\n \"def _encode_back(self):\\n with tf.variable_scope('passage_encoding'):\\n self.sep_p_encodes, _ = rnn('bi-lstm', self.p_emb, self.p_length, self.hidden_size)\\n with tf.variable_scope('question_encoding'):\\n self.sep_q_encodes, _ = rnn('bi-lstm', self.q_emb, self.q_length, self.hidden_size)\\n if self.use_dropout:\\n self.sep_p_encodes = tf.nn.dropout(self.sep_p_encodes, self.dropout_keep_prob)\\n self.sep_q_encodes = tf.nn.dropout(self.sep_q_encodes, self.dropout_keep_prob)\",\n \"def init_hidden_state(self, encoder_out):\\n init_internal_state = []\\n mean_encoder_out = encoder_out.mean(dim=1)\\n h = self.init_h(mean_encoder_out)\\n c = self.init_c(mean_encoder_out)\\n init_internal_state.append((h, c))\\n\\n for i in range(1, self.decoder_number_layers):\\n init_internal_state.append((\\n Variable(torch.zeros(1, self.decoder_rnn_channels[i])).to(device),\\n Variable(torch.zeros(1, self.decoder_rnn_channels[i])).to(device)\\n ))\\n return init_internal_state\",\n \"def _decode_train(self):\\n\\n # the basic idea is, we use golden sketch during train and in order to copy from source\\n # we given true mask of decoder to generate right copy weights\\n state = {'encoder': self.concated_encoder_output}\\n\\n def transformer_concated_decoder_internal(inputs, memory, bias, mem_bias, params, state=None, scope=None,\\n reuse=False):\\n return transformer_decoder(inputs, memory, bias, mem_bias, params, state, scope, reuse)\\n\\n self.final_logits = self._decode_func(\\n self.tgt_seq, self.tgt_len, self.target_embeddings, self.decoder_weights,\\n self.final_enc_attn_bias, 'train', state, self.vocab_size, use_copy=True,\\n expand_source_ids_oo=self.concat_src_ids_oo,\\n max_out_oovs=self.max_out_oovs, src_mask=self.concat_src_mask,\\n decoder_fn=transformer_concated_decoder_internal,\\n scope='final_decoder')\",\n \"def run_encoder(self, sess, batch):\\n feed_dict = self._make_feed_dict(batch, just_enc=True) \\n (enc_states, dec_in_state, global_step) = sess.run(\\n [self._enc_states, self._dec_in_state, self.global_step], feed_dict) # run the encoder\\n\\n # dec_in_state is LSTMStateTuple shape ([batch_size,hidden_dim],[batch_size,hidden_dim])\\n # Given that the batch is a single example repeated, dec_in_state is identical across the batch so we just take the top row.\\n dec_in_state = tf.contrib.rnn.LSTMStateTuple(dec_in_state.c[0], dec_in_state.h[0])\\n return enc_states, dec_in_state\",\n \"def _make_hidden(self, batch_size):\\n hidden = torch.zeros(self.nb_layers, batch_size, self.hidden_size).to(device)\\n return hidden\",\n \"def process_hidden_layers(self, x, training):\\n restricted_to_final_seq = False\\n for layer_ix, layer in enumerate(self.hidden_layers):\\n if type(layer) == Dense:\\n if self.return_final_seq_only and not restricted_to_final_seq:\\n x = x[:, -1, :]\\n restricted_to_final_seq = True\\n x = layer(x)\\n else:\\n x = layer(x)\\n if self.batch_norm:\\n x = self.batch_norm_layers[layer_ix](x, training=False)\\n if self.dropout != 0.0 and training: x = self.dropout_layer(x)\\n return x, restricted_to_final_seq\",\n \"def _make_hidden(self, batch_size):\\n hidden = (torch.zeros(self.nb_layers, batch_size, self.hidden_size).to(device),\\n torch.zeros(self.nb_layers, batch_size, self.hidden_size).to(device))\\n return hidden\",\n \"def forward(self, hidden_states):\\n # Common transformations (dense layer, layer norm + activation function) performed on text, KG and protein data\\n # transform is initialized in the parent BigBirdLMPredictionHead class\\n hidden_states = self.transform(hidden_states)\\n\\n # The first part is processed with the text decoder, the second with the entity decoder, and the third with the\\n # protein decoder to map to the text, kg, and protein vocab size, respectively\\n text_hidden_states_to_vocab = self.text_decoder(hidden_states[:, : self.kg_start_idx])\\n ent_hidden_states_to_kg_vocab = self.entity_decoder(\\n hidden_states[:, self.kg_start_idx : self.prot_start_idx]\\n )\\n prot_hidden_states_to_prot_vocab = self.prot_decoder(\\n hidden_states[:, self.prot_start_idx :]\\n )\\n\\n return (\\n text_hidden_states_to_vocab,\\n ent_hidden_states_to_kg_vocab,\\n prot_hidden_states_to_prot_vocab,\\n )\",\n \"def initialize_hidden_state(self, batch_size):\\n return tf.zeros((batch_size, self.enc_units))\",\n \"def init_hidden_state(self, encoder_out):\\r\\n mean_encoder_out = encoder_out.mean(dim=1)\\r\\n h = self.init_h(mean_encoder_out) # (batch_size, decoder_dim)\\r\\n c = self.init_c(mean_encoder_out)\\r\\n return h, c\",\n \"def _decode(self):\\n with tf.variable_scope('same_question_concat'):\\n batch_size = tf.shape(self.start_label)[0]\\n concat_passage_encodes = tf.reshape(\\n self.fuse_p_encodes,\\n [batch_size, -1, 2 * self.hidden_size]\\n )\\n no_dup_question_encodes = tf.reshape(\\n self.sep_q_encodes,\\n [batch_size, -1, tf.shape(self.sep_q_encodes)[1], 2 * self.hidden_size]\\n )[0:, 0, 0:, 0:]\\n decoder = PointerNetDecoder(self.hidden_size)\\n self.start_probs, self.end_probs = decoder.decode(concat_passage_encodes,\\n no_dup_question_encodes)\",\n \"def forward(self, input, dec_hidden=None):\\n ### YOUR CODE HERE for part 2b\\n ### TODO - Implement the forward pass of the character decoder.\\n # print(\\\"=====input.size\\\",input.size())\\n char_embedded= self.decoderCharEmb(input)\\n # print(\\\"=====char_embedded.size\\\",char_embedded.size())\\n out, dec_hidden = self.charDecoder(char_embedded,dec_hidden)\\n # print(\\\"=====out.size\\\",out.size()) #dimensions (seq_length, batch, hidden_size)\\n \\n out_batch_first = out.permute(1, 0, 2) #dimensions (seq_length, batch, hidden_size)\\n o_proj = self.char_output_projection(out_batch_first)\\n scores = o_proj.permute(1, 0, 2) #dimensions (seq_length, batch, hidden_size)\\n return scores,dec_hidden\\n ### END YOUR CODE \",\n \"def encoder(list_of_str, key):\\n tokenized = self.tokenizer.encode_commands(list_of_str)\\n hidden = self.tokenizer.tokenize(tokenized)\\n hidden = hidden.permute(1, 0, 2).reshape(hidden.size(1), -1) # correct for bididrectional\\n return hidden\",\n \"def _encode(self):\\n with tf.variable_scope('passage_encoding'):\\n self.sep_p_encodes, _ = rnn('bi-lstm', self.p_emb, self.p_length, self.hidden_size)\\n with tf.variable_scope('question_encoding'):\\n self.sep_q_encodes, _ = rnn('bi-lstm', self.q_emb, self.q_length, self.hidden_size)\\n if self.use_dropout:\\n self.sep_p_encodes = tf.nn.dropout(self.sep_p_encodes, self.dropout_keep_prob)\\n self.sep_q_encodes = tf.nn.dropout(self.sep_q_encodes, self.dropout_keep_prob)\",\n \"def forward(self, input, hidden, give_gates=False, debug=False):\\n\\n emb = self.encoder(input)\\n if emb.dim()<3:\\n emb = emb.unsqueeze(0)\\n\\n if give_gates:\\n output, hidden, extras = self.rnn(emb, hidden, give_gates)\\n else:\\n output, hidden = self.rnn(emb, hidden)\\n\\n # decoded = self.softmax(self.decoder(output))\\n decoded = self.decoder(output)\\n\\n if give_gates:\\n if debug:\\n return decoded, hidden, extras, emb\\n else:\\n return decoded, hidden, extras\\n else:\\n if debug:\\n return decoded, hidden, emb\\n else:\\n return decoded, hidden\",\n \"def get_num_hidden(self) -> int:\\n return self.encoders[-1].get_num_hidden()\",\n \"def init_hidden(self, batch_size, device):\\n if self.mode == 'LSTM':\\n return (torch.ones((1+self.bidirectional , batch_size, self.hidden_dim), device=device),\\n torch.ones((1+self.bidirectional, batch_size, self.hidden_dim), device=device))\\n else:\\n return torch.ones((1+self.bidirectional, batch_size, self.hidden_dim), device=device)\",\n \"def get_representation(output):\\n\\n # shape: (seq_len, vocab_size)\\n hidden_states = output[1]\\n\\n token_embeddings = torch.stack(hidden_states, dim=0)\\n # remove dimension 1 (batches)\\n token_embeddings = torch.squeeze(token_embeddings, dim=1)\\n # swap dimension 0 and 1\\n token_embeddings = token_embeddings.permute(1, 0, 2)\\n # the last hidden layer output (2+seq_len, 768)\\n hidden_states = [token[-1] for token in token_embeddings]\\n\\n return hidden_states\",\n \"def _bert_encoder(self, sentence, attn_mask):\\n output = self.bert(sentence, attn_mask)\\n embedding = output['hidden_states'][-1]\\n\\n feats = self.hidden2tag(embedding)\\n return feats\",\n \"def transparent_forward(self, input, hidden, give_gates=False, debug=False):\\n\\n lseq, nseq = input.shape\\n ispad = (input == self.padding)\\n\\n H = torch.zeros(lseq, self.nhid, nseq)\\n if give_gates:\\n Z = torch.zeros(lseq, self.nhid, nseq)\\n R = torch.zeros(lseq, self.nhid, nseq)\\n \\n # because pytorch only returns hidden activity in the last time step,\\n # we need to unroll it manually. \\n O = torch.zeros(lseq, nseq, self.decoder.out_features)\\n emb = self.encoder(input)\\n for t in range(lseq):\\n if give_gates:\\n out, hidden, ZR = self.rnn(emb[t:t+1,...], hidden, give_gates=True)\\n Z[t,:,:] = ZR[0].squeeze(0).T\\n R[t,:,:] = ZR[1].squeeze(0).T\\n else:\\n out, hidden = self.rnn(emb[t:t+1,...], hidden)\\n dec = self.decoder(out)\\n # naan = torch.ones(hidden.squeeze(0).shape)*np.nan\\n # H[t,:,:] = torch.where(~ispad[t:t+1,:].T, hidden.squeeze(0), naan).T\\n H[t,:,:] = hidden.squeeze(0).T\\n O[t,:,:] = dec.squeeze(0)\\n\\n if give_gates:\\n if debug:\\n return O, H, Z, R, emb\\n else:\\n return O, H, Z, R\\n else:\\n if debug:\\n return O, H, emb\\n else:\\n return O, H\",\n \"def reset_hidden(self, batch_size):\\n\\n hidden = {}\\n hidden[\\\"h\\\"] = torch.Tensor(np.zeros((batch_size, self._hidden_size))).to(self._device)\\n hidden[\\\"c\\\"] = torch.Tensor(np.zeros((batch_size, self._hidden_size))).to(self._device)\\n return hidden\",\n \"def process_state_batch(self, batch):\\n # batch = np.squeeze(batch, axis=1)\\n batch = np.array([np.concatenate(obs, axis=-1) for obs in batch])\\n return batch\",\n \"def process_state_batch(self, batch):\\n return batch\",\n \"def process_state_batch(self, batch):\\n return batch\",\n \"def get_reconstructed_input(self, hidden):\\r\\n return T.nnet.sigmoid(T.dot(hidden, self.W_prime) + self.b_prime)\",\n \"def get_reconstructed_input(self, hidden):\\r\\n return T.nnet.sigmoid(T.dot(hidden, self.W_prime) + self.b_prime)\",\n \"def forward(self, encoding, encoding_lens, hidden, y_labels=None, y_lens=None):\\n # split into keys and values\\n # keys [B,T,K], values [B,T,V]\\n keys, values = torch.split(encoding, [self.key_dim, self.value_dim], dim=-1)\\n\\n hidden = hidden.unsqueeze(0)\\n\\n if y_labels is not None and y_lens is not None:\\n return self.__forward_train(keys, values, hidden, encoding_lens, y_labels, y_lens)\\n else:\\n return self.__forward_inference(keys, values, hidden)\",\n \"def __call__(self, encoder_hidden_states):\\n params = self.dec_params\\n search_params = self.search_params\\n\\n lm_params = self.lm_params\\n get_top_k_fn = self.top_k_setup_with_lm(encoder_hidden_states)\\n\\n x = params.embedding[data_utils.GO_ID]\\n x_lm = lm_params.embedding[data_utils.GO_ID]\\n\\n # Initialize Decoder states\\n h_size = params.dec_lstm_w.shape[1]/4\\n zero_dec_state = (np.zeros(h_size), np.zeros(h_size))\\n\\n dec_lm_h_size = params.lm_lstm_w.shape[1]/4\\n zero_dec_lm_state = (np.zeros(dec_lm_h_size), np.zeros(dec_lm_h_size))\\n\\n # Initialize LM state\\n lm_h_size = lm_params.lstm_w.shape[1]/4\\n zero_lm_state = (np.zeros(lm_h_size), np.zeros(lm_h_size))\\n\\n zero_attn = np.zeros(encoder_hidden_states.shape[1])\\n\\n # Maintain a tuple of (output_indices, score, encountered EOS?)\\n output_list = []\\n final_output_list = []\\n k = search_params.beam_size # Represents the current beam size\\n step_count = 0\\n\\n # Run step 0 separately\\n top_k_indices, top_k_model_scores, top_k_scores, state_list, context_vec =\\\\\\n get_top_k_fn(x, x_lm, [zero_dec_state, zero_dec_lm_state, zero_lm_state],\\n zero_attn, beam_size=k)\\n for idx in xrange(top_k_indices.shape[0]):\\n output_tuple = (BeamEntry([top_k_indices[idx]], state_list, context_vec),\\n top_k_model_scores[idx])\\n if top_k_indices[idx] == data_utils.EOS_ID:\\n final_output_list.append(output_tuple)\\n # Decrease the beam size once EOS is encountered\\n k -= 1\\n else:\\n output_list.append(output_tuple)\\n\\n step_count += 1\\n while step_count < 120 and k > 0:\\n # These lists store the states obtained by running the decoder\\n # for 1 more step with the previous outputs of the beam\\n next_dec_states = []\\n next_context_vecs = []\\n\\n score_list = []\\n model_score_list = []\\n index_list = []\\n for candidate, cand_score in output_list:\\n x = params.embedding[candidate.get_last_output()]\\n x_lm = lm_params.embedding[candidate.get_last_output()]\\n\\n top_k_indices, top_k_model_scores, top_k_scores, state_list, context_vec =\\\\\\n get_top_k_fn(x, x_lm, candidate.get_dec_state(),\\n candidate.get_context_vec(), beam_size=k)\\n\\n next_dec_states.append(state_list)\\n next_context_vecs.append(context_vec)\\n\\n index_list.append(top_k_indices)\\n score_list.append(top_k_scores + cand_score)\\n model_score_list.append(top_k_model_scores + cand_score)\\n\\n # Score of all k**2 continuations\\n all_scores = np.concatenate(score_list, axis=0)\\n all_model_scores = np.concatenate(model_score_list, axis=0)\\n # All k**2 continuations\\n all_indices = np.concatenate(index_list, axis=0)\\n\\n # Find the top indices among the k^^2 entries\\n top_k_indices = np.argpartition(all_scores, -k)[-k:]\\n next_k_indices = all_indices[top_k_indices]\\n top_k_scores = all_model_scores[top_k_indices]\\n # The original candidate indices can be found by dividing by k.\\n # Because the indices are of the form - i * k + j, where i\\n # represents the ith output and j represents the jth top index for i\\n orig_cand_indices = np.divide(top_k_indices, k, dtype=np.int32)\\n\\n new_output_list = []\\n\\n for idx in xrange(k):\\n orig_cand_idx = int(orig_cand_indices[idx])\\n # BeamEntry of the original candidate\\n orig_cand = output_list[orig_cand_idx][0]\\n next_elem = next_k_indices[idx]\\n # Add the next index to the original sequence\\n new_index_seq = orig_cand.get_index_seq() + [next_elem]\\n dec_state = next_dec_states[orig_cand_idx]\\n context_vec = next_context_vecs[orig_cand_idx]\\n\\n output_tuple = (BeamEntry(new_index_seq, dec_state, context_vec),\\n top_k_scores[idx] +\\n search_params.word_ins_penalty*len(new_index_seq))\\n if next_elem == data_utils.EOS_ID:\\n # This sequence is finished. Put the output on the final list\\n # and reduce beam size\\n final_output_list.append(output_tuple)\\n k -= 1\\n else:\\n new_output_list.append(output_tuple)\\n\\n output_list = new_output_list\\n step_count += 1\\n\\n final_output_list += output_list\\n\\n best_output = max(final_output_list, key=lambda output_tuple: output_tuple[1])\\n output_seq = best_output[0].get_index_seq()\\n return np.stack(output_seq, axis=0)\",\n \"def _decode_back(self):\\n with tf.variable_scope('same_question_concat'):\\n batch_size = tf.shape(self.start_label)[0]\\n concat_passage_encodes = tf.reshape(\\n self.fuse_p_encodes,\\n [batch_size, -1, self.hidden_size]\\n )\\n no_dup_question_encodes = tf.reshape(\\n self.sep_q_encodes,\\n [batch_size, -1, tf.shape(self.sep_q_encodes)[1], self.hidden_size]\\n )[0:, 0, 0:, 0:]\\n decoder = PointerNetDecoder(self.hidden_size)\\n self.start_probs, self.end_probs = decoder.decode(concat_passage_encodes,\\n no_dup_question_encodes)\\n\\n outer = tf.matmul(tf.expand_dims(tf.nn.softmax(self.start_probs), axis=2),\\n tf.expand_dims(tf.nn.softmax(self.end_probs), axis=1))\\n outer = tf.matrix_band_part(outer, 0, -1)\\n self.yp1 = tf.argmax(tf.reduce_max(outer, axis=2), axis=1)\\n self.yp2 = tf.argmax(tf.reduce_max(outer, axis=1), axis=1)\",\n \"def get_state(self):\\n state = super().get_state()\\n state.update({\\n 'num_of_fields': self.num_of_fields,\\n 'embedding_dim': self.embedding_dim})\\n return state\",\n \"def get_reconstructed_input(self, hidden):\\n return T.nnet.sigmoid(T.dot(hidden, self.W_prime) + self.b_prime)\",\n \"def forward(self, inputs, decode_len=None):\\n\\n batch_size = inputs.size(0)\\n input_dim = inputs.size(1)\\n assert input_dim == self.input_dim, 'input dim should be {:d} but now: {:d}'.format(self.input_dim, input_dim)\\n\\n sourceL = inputs.size(2)\\n\\n if self.embed_input:\\n # repeat embeddings across batch_size\\n # result is [batch_size x input_dim x embedding_dim]\\n # TODO: repeat or expand?\\n embedding = self.embedding.repeat(batch_size, 1, 1)\\n embedded_inputs = []\\n # result is [batch_size, 1, input_dim, sourceL]\\n ips = inputs.unsqueeze(1)\\n\\n for i in range(sourceL):\\n # [batch_size x 1 x input_dim] * [batch_size x input_dim x embedding_dim]\\n # result is [batch_size, embedding_dim]\\n embedded_inputs.append(torch.bmm(\\n ips[:, :, :, i].float(),\\n embedding).squeeze(1))\\n\\n # Result is [sourceL x batch_size x embedding_dim]\\n embedded_inputs = torch.cat(embedded_inputs).view(\\n sourceL,\\n batch_size,\\n embedding.size(2))\\n else:\\n embedded_inputs = inputs.permute(2, 0, 1)\\n\\n (encoder_hx, encoder_cx) = init_zero_hidden(self.hidden_dim, inputs.is_cuda)\\n encoder_hx = encoder_hx.unsqueeze(0).repeat(embedded_inputs.size(1), 1).unsqueeze(0)\\n encoder_cx = encoder_cx.unsqueeze(0).repeat(embedded_inputs.size(1), 1).unsqueeze(0)\\n \\n # encoder forward pass\\n enc_h, (enc_h_t, enc_c_t) = self.encoder(embedded_inputs, (encoder_hx, encoder_cx))\\n\\n enc_h_linear = enc_h.view(-1, self.hidden_dim)\\n # enc_h_linear_2d = enc_h_linear.view(self.hidden_dim, -1)\\n enc_action_scores = self.EncodeScore(enc_h_linear)\\n enc_action_scores = enc_action_scores.view(-1, batch_size).permute(1, 0)\\n dec_init_state = (enc_h_t[-1], enc_c_t[-1])\\n \\n # repeat decoder_in_0 across batch\\n decoder_input = self.decoder_in_0.unsqueeze(0).repeat(embedded_inputs.size(1), 1)\\n\\n (head_pointer_probs, head_positions, tail_pointer_probs, tail_positions, cls_scores), dec_hidden_t = self.decoder(decoder_input,\\n embedded_inputs,\\n dec_init_state,\\n enc_h, max_len=decode_len)\\n #TODO: added conversion to tensors\\n head_pointer_probs = torch.stack(head_pointer_probs)\\n head_pointer_probs = head_pointer_probs.permute(1, 0, 2)\\n tail_pointer_probs = torch.stack(tail_pointer_probs)\\n tail_pointer_probs = tail_pointer_probs.permute(1, 0, 2)\\n cls_scores = torch.stack(cls_scores)\\n cls_scores = cls_scores.permute(1, 0, 2)\\n head_positions = torch.stack(head_positions)\\n head_positions = head_positions.permute(1, 0)\\n tail_positions = torch.stack(tail_positions)\\n tail_positions = tail_positions.permute(1, 0)\\n\\n\\n\\n return head_pointer_probs, head_positions, tail_pointer_probs, tail_positions, cls_scores, enc_action_scores\",\n \"def get_reconstructed_input(self, hidden):\\n return T.nnet.sigmoid(T.dot(hidden, self.W_prime) + self.b_prime)\",\n \"def get_reconstructed_input(self, hidden):\\n return T.nnet.sigmoid(T.dot(hidden, self.W_prime) + self.b_prime)\",\n \"def encode_decode_TD(self, n_step, idx_list_previous, log_probs_previous, entropies_previous, mask_previous, query1_previous, query2_previous, query3_previous, idx_ ):\\r\\n\\r\\n actor_embedding = embed_seq(input_seq=self.input_, from_=self.dimension, to_= self.input_embed, is_training=self.is_training, BN=True, initializer=self.initializer)\\r\\n actor_encoding = encode_seq(input_seq=actor_embedding, input_dim=self.input_embed, num_stacks=self.num_stacks, num_heads=self.num_heads, num_neurons=self.num_neurons, is_training=self.is_training)\\r\\n \\r\\n if self.is_training == False:\\r\\n actor_encoding = tf.tile(actor_encoding,[self.batch_size,1,1])\\r\\n \\r\\n idx_list = copy(idx_list_previous)\\r\\n log_probs = copy(log_probs_previous)\\r\\n entropies = copy(entropies_previous)\\r\\n \\r\\n\\r\\n mask = copy(mask_previous)\\r\\n \\r\\n n_hidden = actor_encoding.get_shape().as_list()[2] # input_embed\\r\\n W_ref = tf.get_variable(\\\"W_ref\\\",[1, n_hidden, self.num_units],initializer=self.initializer)\\r\\n W_q = tf.get_variable(\\\"W_q\\\",[self.query_dim, self.num_units],initializer=self.initializer)\\r\\n v = tf.get_variable(\\\"v\\\",[self.num_units],initializer=self.initializer)\\r\\n \\r\\n encoded_ref = tf.nn.conv1d(actor_encoding, W_ref, 1, \\\"VALID\\\") # actor_encoding is the ref for actions [Batch size, seq_length, n_hidden]\\r\\n \\r\\n query1 = copy( query1_previous)\\r\\n query2 = copy( query2_previous)\\r\\n query3 = copy( query3_previous)\\r\\n idx_copy = copy(idx_)\\r\\n \\r\\n W_1 =tf.get_variable(\\\"W_1\\\",[n_hidden, self.query_dim],initializer=self.initializer) # update trajectory (state)\\r\\n W_2 =tf.get_variable(\\\"W_2\\\",[n_hidden, self.query_dim],initializer=self.initializer)\\r\\n W_3 =tf.get_variable(\\\"W_3\\\",[n_hidden, self.query_dim],initializer=self.initializer)\\r\\n \\r\\n \\r\\n \\\"\\\"\\\"\\r\\n # sample from POINTER from the perspective of the Actor\\r\\n \\\"\\\"\\\"\\r\\n for step in range(n_step + 1 ): \\r\\n query = tf.nn.relu(tf.matmul(query1, W_1) + tf.matmul(query2, W_2) + tf.matmul(query3, W_3))\\r\\n logits = pointer(encoded_ref=encoded_ref, query=query, mask=mask, W_ref=W_ref, W_q=W_q, v=v, C=config.C, temperature=config.temperature)\\r\\n prob = distr.Categorical(logits) # logits = masked_scores\\r\\n idx = prob.sample()\\r\\n\\r\\n idx_list.append(idx) # tour index\\r\\n idx_list_previous.append(idx)\\r\\n \\r\\n log_probs.append(prob.log_prob(idx)) # log prob\\r\\n log_probs_previous.append(prob.log_prob(idx))\\r\\n \\r\\n entropies.append(prob.entropy()) # entropies\\r\\n entropies_previous.append(prob.entropy())\\r\\n \\r\\n mask = mask + tf.one_hot(idx, self.max_length) # mask\\r\\n mask_previous = mask_previous + tf.one_hot(idx, self.max_length)\\r\\n\\r\\n idx_copy = tf.stack([tf.range(self.batch_size,dtype=tf.int32), idx],1) # idx with batch \\r\\n idx_ = tf.stack([tf.range(self.batch_size,dtype=tf.int32), idx],1) # idx with batch \\r\\n query3 = query2\\r\\n query2 = query1\\r\\n query1 = tf.gather_nd(actor_encoding, idx_) # update trajectory (state)\\r\\n \\r\\n query3_previous = query2_previous\\r\\n query2_previous = query1_previous\\r\\n query1_previous = tf.gather_nd(actor_encoding, idx_) # update trajectory (state) \\r\\n\\r\\n if (len(idx_list) >= self.max_length): break #leave the loop if reach the end of the episode\\r\\n\\r\\n \\\"\\\"\\\"\\r\\n # sample from POINTER from the perspective of the Critic\\r\\n make q_t vector = 0\\r\\n \\\"\\\"\\\"\\r\\n while(len(idx_list) < self.max_length): \\r\\n \\r\\n logits = pointer_critic(encoded_ref=encoded_ref, mask=mask, W_ref=W_ref, v=v, C=config.C, temperature=config.temperature)\\r\\n prob = distr.Categorical(logits) # logits = masked_scores\\r\\n idx = prob.sample()\\r\\n\\r\\n idx_list.append(idx) # tour index\\r\\n log_probs.append(prob.log_prob(idx)) # log prob\\r\\n entropies.append(prob.entropy()) # entropies\\r\\n mask = mask + tf.one_hot(idx, self.max_length) # mask\\r\\n\\r\\n idx_copy = tf.stack([tf.range(self.batch_size,dtype=tf.int32), idx],1) # idx with batch \\r\\n #idx_ = tf.stack([tf.range(self.batch_size,dtype=tf.int32), idx],1) # idx with batch \\r\\n query3 = query2\\r\\n query2 = query1\\r\\n query1 = tf.gather_nd(actor_encoding, idx_copy) # update trajectory (state)\\r\\n \\r\\n idx_list.append(idx_list[0]) # return to start\\r\\n self.tour =tf.stack(idx_list, axis=1) # permutations\\r\\n self.log_prob = tf.add_n(log_probs) # corresponding log-probability for backprop\\r\\n self.entropies = tf.add_n(entropies)\\r\\n tf.summary.scalar('log_prob_mean', tf.reduce_mean(self.log_prob))\\r\\n tf.summary.scalar('entropies_mean', tf.reduce_mean(self.entropies))\\r\\n \\r\\n return idx_list_previous, log_probs_previous, entropies_previous, mask_previous, query1_previous, query2_previous, query3_previous, idx_ #returns variables necessary for the next loop\\r\",\n \"def _replace_appropriate_hidden_state_rows(hidden_state: Union[Tuple[torch.Tensor, torch.Tensor]],\\n new_hidden_state: Union[Tuple[torch.Tensor, torch.Tensor], torch.Tensor],\\n packed_sequence_info: PackedSequenceInfo, iteration: int,\\n num_batches: int) -> Union[Tuple[torch.Tensor, torch.Tensor]]:\\n if packed_sequence_info:\\n # In the case of PackedSequence, certain inputs in the batch need to be ignored, depending on\\n # sequence length for that input and which timestep we are in.\\n # In our implementation, we still feed the full batch into the rnn_impl_map function, but\\n # instead of replacing all rows of cell_hx (each row corresponds to an output for an item in the\\n # batch), we replace only rows which correspond to valid batch inputs. This is the same as how\\n # hx behaves in actual Pytorch implementation when using PackedSequence.\\n current_batch_size = packed_sequence_info.batch_sizes[iteration]\\n if current_batch_size == num_batches:\\n # All items in the input batch are valid, so we can replace the entire hidden state.\\n hidden_state = new_hidden_state\\n else:\\n # Not all items in the input batch are valid. Replace the first number of rows in the hidden\\n # state corresponding to the number of valid items, and keep the remaining rows unchanged.\\n if isinstance(hidden_state, tuple):\\n hidden_state = (torch.cat((new_hidden_state[0][:current_batch_size - num_batches],\\n hidden_state[0][current_batch_size - num_batches:])),\\n torch.cat((new_hidden_state[1][:current_batch_size - num_batches],\\n hidden_state[1][current_batch_size - num_batches:])))\\n else:\\n hidden_state = torch.cat((new_hidden_state[:current_batch_size - num_batches],\\n hidden_state[current_batch_size - num_batches:]))\\n else:\\n hidden_state = new_hidden_state\\n return hidden_state\",\n \"def hidden(self):\\n return self._hidden\",\n \"def transparent_forward(self, inp, hidden=None, give_gates=False, debug=False):\\n\\n lseq = inp.shape[0]\\n nseq = inp.shape[1]\\n # ispad = (input == self.padding)\\n\\n if hidden is None:\\n hidden = self.init_hidden(nseq)\\n\\n H = torch.zeros(lseq, self.nhid, nseq)\\n if give_gates:\\n Z = torch.zeros(lseq, self.nhid, nseq)\\n R = torch.zeros(lseq, self.nhid, nseq)\\n \\n # because pytorch only returns hidden activity in the last time step,\\n # we need to unroll it manually. \\n O = torch.zeros(lseq, nseq, self.decoder.out_features)\\n if self.recoder is None:\\n emb = inp\\n else:\\n emb = self.recoder(inp)\\n for t in range(lseq):\\n if give_gates:\\n out, hidden, ZR = self.rnn(emb[t:t+1,...], hidden, give_gates=True)\\n Z[t,:,:] = ZR[0].squeeze(0).T\\n R[t,:,:] = ZR[1].squeeze(0).T\\n else:\\n out, hidden = self.rnn(emb[t:t+1,...], hidden)\\n dec = self.decoder(out)\\n # naan = torch.ones(hidden.squeeze(0).shape)*np.nan\\n # H[t,:,:] = torch.where(~ispad[t:t+1,:].T, hidden.squeeze(0), naan).T\\n H[t,:,:] = hidden.squeeze(0).T\\n O[t,:,:] = dec.squeeze(0)\\n\\n if give_gates:\\n if debug:\\n return O, H, Z, R, emb\\n else:\\n return O, H, Z, R\\n else:\\n if debug:\\n return O, H, emb\\n else:\\n return O, H\",\n \"def beamSearchDecoder(self, enc_states, hidden, test=False, sentence=None, st=\\\"\\\", ed=\\\"\\\", k=3):\\n\\t\\tbatch_size = enc_states.shape[0]\\n\\t\\thidden = F.tanh(self.init_decoder_hidden(hidden[1])).view(1, batch_size, self.hid_dim)\\n\\t\\tif test:\\n\\t\\t\\tbeams = [Beam(k, self.vocab, hidden[:,i,:], self.device) for i in range(batch_size)]\\n\\n\\t\\t\\tfor i in range(self.max_trg_len):\\n\\t\\t\\t\\tfor j in range(batch_size):\\n\\t\\t\\t\\t\\tlogits, hidden = self.decoderStep(enc_states[j].view(1, -1, self.hid_dim).expand(k, -1, -1),\\n\\t\\t\\t\\t\\t\\t\\t\\t\\t\\t\\t\\t\\t beams[j].get_hidden_state(),\\n\\t\\t\\t\\t\\t\\t\\t\\t\\t\\t\\t\\t\\t beams[j].get_current_word())\\n\\t\\t\\t\\t\\tlogLikelihood = torch.log(F.softmax(logits, dim=-1))\\n\\t\\t\\t\\t\\tbeams[j].advance(logLikelihood, hidden)\\n\\n\\t\\t\\tallHyp, allScores = [], []\\n\\t\\t\\tn_best = 1\\n\\t\\t\\tfor b in range(batch_size):\\n\\t\\t\\t\\tscores, ks = beams[b].sort_best()\\n\\n\\t\\t\\t\\tallScores += [scores[:n_best]]\\n\\t\\t\\t\\thyps = [beams[b].get_hyp(k) for k in ks[:n_best]]\\n\\t\\t\\t\\tallHyp.append(hyps)\\n\\n\\t\\t\\treturn allHyp\\n\\t\\t\\t# return sentences\\n\\t\\telse:\\n\\t\\t\\tmax_seq_len = sentence.shape[1]\\n\\t\\t\\tlogits = torch.zeros(batch_size, max_seq_len - 1, self.vocab_size, device=self.device)\\n\\t\\t\\tfor i in range(max_seq_len - 1):\\n\\t\\t\\t\\t# logit: [batch, 1, vocab_size]\\n\\t\\t\\t\\tlogit, hidden = self.decoderStep(enc_states, hidden, sentence[:, i])\\n\\t\\t\\t\\tlogits[:, i, :] = logit.squeeze()\\n\\t\\t\\treturn logits\",\n \"def _hidden_activation(self, inputs):\\n if self.act_enc is None:\\n act_enc = lambda x: x\\n else:\\n act_enc = self.act_enc\\n return act_enc(self._mappings(inputs))\",\n \"def get_output_for(self, inputs, **kwargs):\\n # Retrieve the layer input\\n input = inputs[0]\\n # Retrieve the mask when it is supplied\\n mask = None\\n hid_init = None\\n if self.mask_incoming_index > 0:\\n mask = inputs[self.mask_incoming_index]\\n if self.hid_init_incoming_index > 0:\\n hid_init = inputs[self.hid_init_incoming_index]\\n\\n # Input should be provided as (n_batch, n_time_steps, n_features)\\n # but scan requires the iterable dimension to be first\\n # So, we need to dimshuffle to (n_time_steps, n_batch, n_features)\\n input = input.dimshuffle(1, 0, *range(2, input.ndim))\\n seq_len, num_batch = input.shape[0], input.shape[1]\\n\\n # When we are not precomputing the input, we also need to pass the\\n # input-to-hidden parameters to step\\n non_seqs = L.get_all_params(self.input_to_hidden)\\n\\n # Create single recurrent computation step function\\n def step(input_n, hid_previous, *args):\\n hid_pre = L.get_output(\\n self.input_to_hidden,{self.input_to_hidden_input : input_n,\\n self.input_to_hidden_hidden : hid_previous}, **kwargs)\\n\\n # Clip gradients\\n if self.grad_clipping:\\n hid_pre = theano.gradient.grad_clip(\\n hid_pre, -self.grad_clipping, self.grad_clipping)\\n\\n return hid_pre\\n\\n def step_masked(input_n, mask_n, hid_previous, *args):\\n # Skip over any input with mask 0 by copying the previous\\n # hidden state; proceed normally for any input with mask 1.\\n hid = step(input_n, hid_previous, *args)\\n hid_out = T.switch(mask_n, hid, hid_previous)\\n return [hid_out]\\n\\n if mask is not None:\\n mask = mask.dimshuffle(1, 0, 'x')\\n sequences = [input, mask]\\n step_fun = step_masked\\n else:\\n sequences = input\\n step_fun = step\\n\\n if not isinstance(self.hid_init, L.Layer):\\n # The code below simply repeats self.hid_init num_batch times in\\n # its first dimension. Turns out using a dot product and a\\n # dimshuffle is faster than T.repeat.\\n dot_dims = (list(range(1, self.hid_init.ndim - 1)) +\\n [0, self.hid_init.ndim - 1])\\n hid_init = T.dot(T.ones((num_batch, 1)),\\n self.hid_init.dimshuffle(dot_dims))\\n\\n if self.unroll_scan:\\n # Retrieve the dimensionality of the incoming layer\\n input_shape = self.input_shapes[0]\\n # Explicitly unroll the recurrence instead of using scan\\n hid_out = unroll_scan(\\n fn=step_fun,\\n sequences=sequences,\\n outputs_info=[hid_init],\\n go_backwards=self.backwards,\\n non_sequences=non_seqs,\\n n_steps=input_shape[1])[0]\\n else:\\n # Scan op iterates over first dimension of input and repeatedly\\n # applies the step function\\n hid_out = theano.scan(\\n fn=step_fun,\\n sequences=sequences,\\n go_backwards=self.backwards,\\n outputs_info=[hid_init],\\n non_sequences=non_seqs,\\n truncate_gradient=self.gradient_steps,\\n strict=True)[0]\\n\\n # When it is requested that we only return the final sequence step,\\n # we need to slice it out immediately after scan is applied\\n if self.only_return_final:\\n hid_out = hid_out[-1]\\n else:\\n # dimshuffle back to (n_batch, n_time_steps, n_features))\\n hid_out = hid_out.dimshuffle(1, 0, *range(2, hid_out.ndim))\\n\\n # if scan is backward reverse the output\\n if self.backwards:\\n hid_out = hid_out[:, ::-1]\\n\\n return hid_out\",\n \"def _init_rnn_state(self, encoder_hidden):\\n if encoder_hidden is None:\\n return None\\n if isinstance(encoder_hidden, tuple):\\n encoder_hidden = tuple(\\n [self._cat_directions(h) for h in encoder_hidden])\\n else:\\n encoder_hidden = self._cat_directions(encoder_hidden)\\n return encoder_hidden\",\n \"def gru_encoder(cell, embedding, init_state, batch_input, batch_mask):\\n #batch_size = batch_input.get_shape()[0]\\n #state = tf.zeros([batch_size, options['state_size']], tf.float32) # initialize the state\\n outputs = []\\n #split_inputs = tf.split(1, batch_input.get_shape()[0], batch_input)\\n \\n with tf.device(\\\"/cpu:0\\\"):\\n embedded_list = tf.nn.embedding_lookup(embedding, batch_input)\\n #embedded_list = batch_mask * tf.transpose(embedded_list, [2, 0, 1]) # Add mask to change embedding into zeros\\n #embedded_list = tf.transpose(embedded_list, [2, 1, 0])\\n embedded_list = tf.transpose(embedded_list, [1, 0, 2])\\n embedded_list = tf.unpack(embedded_list) # list of embedding\\n \\n # min_sequence_length = tf.reduce_min(seq_len)\\n #max_sequence_length = tf.reduce_max(seq_len)\\n\\n state = init_state\\n for time, (embedded, i_mask) in enumerate(zip(embedded_list, tf.unpack(tf.transpose(batch_mask)))):\\n #embedded = tf.nn.embedding_lookup(embedding, tf.reshape(inputs, [-1])) # deprecated\\n #embedded = embedded * tf.reshape(tf.convert_to_tensor(batch_mask[:, time], tf.float32), [batch_size, 1]) # deprecated\\n #copy_cond = (time >= seq_len)\\n #new_output, new_state = cell(embedded, state)\\n output, state = cell(embedded, state)#tf.select(copy_cond, zero_output, new_output), tf.select(copy_cond, state, new_state)\\n output = tf.expand_dims(i_mask, 1) * output\\n outputs.append(output)\\n #outputs = batch_mask * tf.transpose(tf.pack(outputs), [2, 0, 1])\\n #outputs = tf.unpack(tf.transpose(outputs, [2, 1, 0]))\\n return outputs, state\",\n \"def get_reconstructed_input(self, hidden):\\n return T.nnet.sigmoid(T.dot(hidden, self.W.T) + self.bv)\",\n \"def forward(self, hidden: Union[torch.Tensor, Tuple[torch.Tensor, ...]]) -> Union[torch.Tensor, Tuple[torch.Tensor, ...]]:\\n # First, map the non-tuple version to a 1-tuple for easier processing.\\n # We will undo this at the end\\n if not isinstance(hidden, tuple):\\n hidden = (hidden,)\\n\\n batch_size, hidden_size = hidden[0].size()\\n\\n # If we are going to share parameters across the forward and backward\\n # directions, then we need to separate them in the tensors\\n if self.share_bidirectional_parameters:\\n # shape: (batch_size, 2, encoder_hidden_size // 2)\\n hidden = tuple(h.view(batch_size, 2, -1) for h in hidden)\\n\\n # Apply the bridge\\n output = tuple(layer(h) for layer, h in zip(self.layers, hidden))\\n\\n # Reshape the tensors if the parameters are shared\\n if self.share_bidirectional_parameters:\\n # shape: (batch_size, decoder_hidden_size)\\n output = tuple(h.view(batch_size, -1) for h in output)\\n\\n # Undo the tuple if there's only 1 element\\n if len(output) == 1:\\n output = output[0]\\n return output\",\n \"def encode(self, state):\\n raise NotImplementedError\",\n \"def init_hidden(self, encoder_final):\\n\\n #print(\\\"encoder final shape\\\")\\n #print(encoder_final[0].size())\\n if encoder_final is None:\\n return None # start with zeros\\n\\n return (torch.tanh(self.bridge_hidden(encoder_final[0])),\\n torch.tanh(self.bridge_cell(encoder_final[1])))\",\n \"def decoding_layer_train(encoder_state, dec_cell, dec_embed_input, \\n target_sequence_length, max_summary_length, \\n output_layer, keep_prob):\\n # TODO: Implement Function\\n trainig_helper = tf.contrib.seq2seq.TrainingHelper(dec_embed_input, target_sequence_length)\\n basic_decoder = tf.contrib.seq2seq.BasicDecoder(dec_cell, trainig_helper, encoder_state, output_layer)\\n f_output, _, _ = tf.contrib.seq2seq.dynamic_decode(basic_decoder,maximum_iterations=max_summary_length)\\n return f_output\",\n \"def build(self, unused_input_shapes):\\n self.layers = []\\n for i in range(self.num_hidden_layers):\\n self.layers.append(\\n TransformerDecoderBlock(\\n hidden_size=self.hidden_size,\\n num_attention_heads=self.num_attention_heads,\\n intermediate_size=self.intermediate_size,\\n intermediate_activation=self.intermediate_activation,\\n hidden_dropout_prob=self.hidden_dropout_prob,\\n attention_probs_dropout_prob=self.attention_probs_dropout_prob,\\n initializer_range=self.initializer_range,\\n multi_channel_cross_attention=self.multi_channel_cross_attention,\\n name=(\\\"layer_%d\\\" % i)))\\n super(TransformerDecoder, self).build(unused_input_shapes)\",\n \"def encode(data, encoder):\\n # Get the list of hidden depths\\n\\thd = encoder.hidden_depths\\n # Find the middle hidden layer\\n\\tmiddle_layer_index = (len(hd)-1)/2\\n # Initialize empty container for the encoded data\\n\\tdata_encoded = np.zeros((data.shape[0],hd[middle_layer_index]))\\n\\tfor i, d_ in enumerate(data):\\n # feed forward, get all the activations, and just keep\\n # the middle layer, which is the encoding\\n\\t\\tx, z_container, x_container = encoder.ff(d_,True,True)\\n\\t\\tx_encoded = x_container[1+middle_layer_index]\\n\\t\\tdata_encoded[i] = x_encoded\\n\\t#\\n\\treturn data_encoded\",\n \"def forward(self, *args): # noqa: R0914\\r\\n encoder_out, (hn, cn) = self.unified_encoder(*args)\\r\\n device = hn.device\\r\\n non_sequential_cont_decoded = self.mlp_non_seq_cont(hn)\\r\\n non_sequential_cat_decoded = []\\r\\n for mlp_non_seq_cat in self.mlp_non_seq_cat_list:\\r\\n non_sequential_cat_decoded.append(mlp_non_seq_cat(hn))\\r\\n\\r\\n hn = torch.unsqueeze(hn, 0)\\r\\n cn = torch.unsqueeze(cn, 0)\\r\\n # decoded is the output prediction of timestep i-1 of the decoder\\r\\n decoded = torch.zeros(encoder_out.shape[0], int(\\r\\n self.unified_encoder.seq_cont_count + self.unified_encoder.no_of_embs_seq), device=device)\\r\\n seq_cont_decoded = torch.Tensor(device=device)\\r\\n seq_cat_decoded = []\\r\\n for _ in range(self.unified_encoder.seq_cat_count):\\r\\n seq_cat_decoded.append(torch.Tensor(device=device))\\r\\n\\r\\n for _ in range(encoder_out.shape[1]):\\r\\n decoded, (hn, cn), out_cont, out_cat = self.decoder(decoded, (hn, cn))\\r\\n # Predict all categorical columns\\r\\n out_cat_onehot = []\\r\\n if self.unified_encoder.seq_cat_count != 0:\\r\\n for idx, out in enumerate(out_cat):\\r\\n out_cat_onehot.append(torch.argmax(out, dim=1).unsqueeze(-1))\\r\\n seq_cat_decoded[idx] = torch.cat(\\r\\n [seq_cat_decoded[idx], out.view(out.shape[0], 1, -1)], dim=1)\\r\\n out_cat_onehot = torch.cat(out_cat_onehot, -1)\\r\\n out_cat_embedding = self.unified_encoder.seq_emb_layers(out_cat_onehot)\\r\\n decoded = torch.cat([out_cat_embedding, out_cont], dim=-1)\\r\\n else:\\r\\n decoded = out_cont\\r\\n seq_cont_decoded = torch.cat(\\r\\n [seq_cont_decoded, out_cont.view(out_cont.shape[0], 1, -1)], dim=1)\\r\\n\\r\\n return non_sequential_cont_decoded, non_sequential_cat_decoded, seq_cont_decoded, seq_cat_decoded\",\n \"def init_hidden_states(self, encoder_out):\\n mean_encoder_out = encoder_out.mean(dim=1)\\n h = self.init_h(mean_encoder_out) # (batch_size, decoder_dim)\\n c = self.init_c(mean_encoder_out)\\n\\n return h, c\",\n \"def get_final_encoder_states(encoder_outputs: torch.Tensor,\\n mask: torch.Tensor,\\n bidirectional: bool = False) -> torch.Tensor:\\n # These are the indices of the last words in the sequences (i.e. length sans padding - 1). We\\n # are assuming sequences are right padded.\\n # Shape: (batch_size,)\\n last_word_indices = mask.sum(1).long() - 1\\n\\n # handle -1 cases\\n ll_ = (last_word_indices != -1).long()\\n last_word_indices = last_word_indices * ll_\\n\\n batch_size, _, encoder_output_dim = encoder_outputs.size()\\n expanded_indices = last_word_indices.view(-1, 1, 1).expand(batch_size, 1, encoder_output_dim)\\n # Shape: (batch_size, 1, encoder_output_dim)\\n final_encoder_output = encoder_outputs.gather(1, expanded_indices)\\n final_encoder_output = final_encoder_output.squeeze(1) # (batch_size, encoder_output_dim)\\n if bidirectional:\\n final_forward_output = final_encoder_output[:, :(encoder_output_dim // 2)]\\n final_backward_output = encoder_outputs[:, 0, (encoder_output_dim // 2):]\\n final_encoder_output = torch.cat([final_forward_output, final_backward_output], dim=-1)\\n return final_encoder_output\",\n \"def get_state(self):\\n state = super().get_state()\\n state.update({\\n 'num_of_fields': self.num_of_fields,\\n 'hash_size': self.hash_size,\\n 'embedding_dim': self.embedding_dim})\\n return state\",\n \"def decode(self):\\n for layer in self.layers:\\n layer.decode()\",\n \"def decode(self):\\n for layer in self.layers:\\n layer.decode()\",\n \"def forward(self,\\n input,\\n hidden,\\n encoder_outputs):\\n embedded = self.embedding(input).view(1, 1, -1)\\n embedded = self.dropout(embedded)\\n\\n # attn_weights = F.softmax(self.attn(torch.cat((embedded[0], hidden[0]), 1)), dim=1)\\n attn_state = hidden[0] if isinstance(hidden, tuple) else hidden\\n attn_weights = F.softmax(attn_state[0] @ encoder_outputs.squeeze().t(), dim=1)\\n attn_applied = torch.bmm(attn_weights.unsqueeze(0), encoder_outputs.permute(1, 0, 2))\\n\\n output = torch.cat((embedded[0], attn_applied[0]), 1)\\n output = self.attn_combine(output).unsqueeze(0)\\n\\n output = F.relu(output)\\n output, hidden = self.rnn(output, hidden)\\n\\n output = F.log_softmax(self.out(output[0]), dim=1)\\n return output, hidden, attn_weights\",\n \"def zero_state(self, batch_size):\\n del batch_size\\n p = self.params\\n if p.left_context != 1 or p.right_context != 0:\\n msg = ('Streaming implementation of chunkwise attention with left context'\\n 'or right context is not supported yet')\\n raise NotImplementedError(msg)\\n return py_utils.NestedMap()\",\n \"def initialize_hidden_state(self):\\n initializer = tf.keras.initializers.Zeros()\\n rnnten = initializer(shape=(self.batch, self.units))\\n return rnnten\",\n \"def extract_hidden_states(self, output):\\n \\n # Extracting the forward and backward hidden states from the last BiLSTM layer\\n # output (batch_size, sequence length, 2 * hidden dim)\\n output_fw = output[:,:,0:self._hidden_size]\\n output_bw = output[:,:,self._hidden_size:]\\n \\n hidden_states = torch.cat((output_fw, output_bw),-1)\\n \\n return hidden_states\",\n \"def dev_network(self):\\n freeze_model(self.eval_net)\\n for data_set_name, data_set in self.data_to_dev.items():\\n #print(data_set_name)\\n valid_iter = make_data_iter(\\n dataset=data_set, batch_size=1, batch_type=self.batch_type,\\n shuffle=False, train=False)\\n valid_sources_raw = data_set.src\\n\\n \\n # don't track gradients during validation\\n r_total = 0\\n roptimal_total = 0\\n all_outputs = []\\n i_sample = 0\\n\\n for valid_batch in iter(valid_iter):\\n # run as during training to get validation loss (e.g. xent)\\n\\n batch = Batch(valid_batch, self.pad_index, use_cuda=self.use_cuda)\\n\\n encoder_output, encoder_hidden = self.model.encode(\\n batch.src, batch.src_lengths,\\n batch.src_mask)\\n\\n # if maximum output length is \\n # not globally specified, adapt to src len\\n if self.max_output_length is None:\\n self.max_output_length = int(max(batch.src_lengths.cpu().numpy()) * 1.5)\\n\\n batch_size = batch.src_mask.size(0)\\n prev_y = batch.src_mask.new_full(size=[batch_size, 1], fill_value=self.bos_index,\\n dtype=torch.long)\\n output = []\\n hidden = self.model.decoder._init_hidden(encoder_hidden)\\n prev_att_vector = None\\n finished = batch.src_mask.new_zeros((batch_size, 1)).byte()\\n\\n # pylint: disable=unused-variable\\n for t in range(self.max_output_length):\\n \\n\\n # if i_sample == 0 or i_sample == 3 or i_sample == 6:\\n # print(\\\"state on t = \\\", t, \\\" : \\\" , state)\\n\\n # decode one single step\\n logits, hidden, att_probs, prev_att_vector = self.model.decoder(\\n encoder_output=encoder_output,\\n encoder_hidden=encoder_hidden,\\n src_mask=batch.src_mask,\\n trg_embed=self.model.trg_embed(prev_y),\\n hidden=hidden,\\n prev_att_vector=prev_att_vector,\\n unroll_steps=1)\\n # greedy decoding: choose arg max over vocabulary in each step with egreedy porbability\\n \\n if self.state_type == 'hidden':\\n state = torch.cat(hidden, dim=2).squeeze(1).detach().cpu()[0]\\n else:\\n state = torch.FloatTensor(prev_att_vector.squeeze(1).detach().cpu().numpy()[0])\\n\\n logits = self.eval_net(state)\\n logits = logits.reshape([1,1,-1]) \\n #print(type(logits), logits.shape, logits)\\n next_word = torch.argmax(logits, dim=-1) \\n a = next_word.squeeze(1).detach().cpu().numpy()[0]\\n prev_y = next_word\\n \\n output.append(next_word.squeeze(1).detach().cpu().numpy())\\n prev_y = next_word\\n \\n # check if previous symbol was \\n is_eos = torch.eq(next_word, self.eos_index)\\n finished += is_eos\\n # stop predicting if reached for all elements in batch\\n if (finished >= 1).sum() == batch_size:\\n break\\n stacked_output = np.stack(output, axis=1) # batch, time\\n\\n #decode back to symbols\\n decoded_valid_in = self.model.trg_vocab.arrays_to_sentences(arrays=batch.src,\\n cut_at_eos=True)\\n decoded_valid_out_trg = self.model.trg_vocab.arrays_to_sentences(arrays=batch.trg,\\n cut_at_eos=True)\\n decoded_valid_out = self.model.trg_vocab.arrays_to_sentences(arrays=stacked_output,\\n cut_at_eos=True)\\n \\n \\n\\n hyp = stacked_output\\n\\n r = self.Reward(batch.trg, hyp , show = False)\\n \\n if i_sample == 0 or i_sample == 3 or i_sample == 6:\\n print(\\\"\\\\n Sample \\\", i_sample, \\\"-------------Target vs Eval_net prediction:--Raw---and---Decoded-----\\\")\\n print(\\\"Target: \\\", batch.trg, decoded_valid_out_trg)\\n print(\\\"Eval : \\\", stacked_output, decoded_valid_out, \\\"\\\\n\\\")\\n print(\\\"Reward: \\\", r)\\n\\n #r = self.Reward1(batch.trg, hyp , show = False)\\n r_total += sum(r[np.where(r > 0)])\\n if i_sample ==0:\\n roptimal = self.Reward(batch.trg, batch.trg , show = False)\\n roptimal_total += sum(roptimal[np.where(roptimal > 0)])\\n \\n all_outputs.extend(stacked_output)\\n i_sample += 1\\n\\n assert len(all_outputs) == len(data_set)\\n\\n # decode back to symbols\\n decoded_valid = self.model.trg_vocab.arrays_to_sentences(arrays=all_outputs,\\n cut_at_eos=True)\\n\\n # evaluate with metric on full dataset\\n join_char = \\\" \\\" if self.level in [\\\"word\\\", \\\"bpe\\\"] else \\\"\\\"\\n valid_sources = [join_char.join(s) for s in data_set.src]\\n valid_references = [join_char.join(t) for t in data_set.trg]\\n valid_hypotheses = [join_char.join(t) for t in decoded_valid]\\n\\n # post-process\\n if self.level == \\\"bpe\\\":\\n valid_sources = [bpe_postprocess(s) for s in valid_sources]\\n valid_references = [bpe_postprocess(v)\\n for v in valid_references]\\n valid_hypotheses = [bpe_postprocess(v) for\\n v in valid_hypotheses]\\n\\n # if references are given, evaluate against them\\n if valid_references:\\n assert len(valid_hypotheses) == len(valid_references)\\n\\n current_valid_score = 0\\n if self.eval_metric.lower() == 'bleu':\\n # this version does not use any tokenization\\n current_valid_score = bleu(valid_hypotheses, valid_references)\\n elif self.eval_metric.lower() == 'chrf':\\n current_valid_score = chrf(valid_hypotheses, valid_references)\\n elif self.eval_metric.lower() == 'token_accuracy':\\n current_valid_score = token_accuracy(\\n valid_hypotheses, valid_references, level=self.level)\\n elif self.eval_metric.lower() == 'sequence_accuracy':\\n current_valid_score = sequence_accuracy(\\n valid_hypotheses, valid_references)\\n else:\\n current_valid_score = -1\\n\\n self.dev_network_count += 1\\n self.tb_writer.add_scalar(\\\"dev/dev_reward\\\",\\n r_total, self.dev_network_count)\\n self.tb_writer.add_scalar(\\\"dev/dev_bleu\\\",\\n current_valid_score, self.dev_network_count)\\n \\n print(self.dev_network_count ,' r_total and score: ', r_total , current_valid_score)\\n\\n \\n unfreeze_model(self.eval_net)\\n return current_valid_score\",\n \"def encode_sequence(sequence, rnns, embedder, dropout_amount=0.):\\n\\n batch_size = 1\\n layer_states = []\\n for rnn in rnns:\\n hidden_size = rnn.weight_hh.size()[1]\\n \\n # h_0 of shape (batch, hidden_size)\\n # c_0 of shape (batch, hidden_size)\\n if rnn.weight_hh.is_cuda:\\n h_0 = torch.cuda.FloatTensor(batch_size,hidden_size).fill_(0)\\n c_0 = torch.cuda.FloatTensor(batch_size,hidden_size).fill_(0)\\n else:\\n h_0 = torch.zeros(batch_size,hidden_size)\\n c_0 = torch.zeros(batch_size,hidden_size)\\n\\n layer_states.append((h_0, c_0))\\n\\n outputs = []\\n for token in sequence:\\n rnn_input = embedder(token)\\n (cell_states, hidden_states), output, layer_states = forward_one_multilayer(rnns,rnn_input,layer_states,dropout_amount)\\n\\n outputs.append(output)\\n\\n return (cell_states, hidden_states), outputs\",\n \"def init_hidden(self, batch_size):\\n return torch.zeros(()), torch.zeros(())\",\n \"def _decode_train(self, decoder, _encoder_output, _features, labels):\\r\\n target_embedded = tf.nn.embedding_lookup(decoder.target_embedding,\\r\\n labels[\\\"target_ids\\\"])\\r\\n\\r\\n return decoder(_encoder_output, labels=target_embedded[:,:-1], sequence_length=labels[\\\"target_len\\\"]-1)\",\n \"def __call__(self, batch):\\n # Right zero-pad all one-hot text sequences to max input length\\n input_lengths, ids_sorted_decreasing = torch.sort(\\n torch.LongTensor([len(x[0]) for x in batch]),\\n dim=0, descending=True)\\n max_input_len = input_lengths[0]\\n\\n inputs_padded = torch.LongTensor(len(batch), max_input_len)\\n inputs_padded.zero_()\\n for i in range(len(ids_sorted_decreasing)):\\n input_id = batch[ids_sorted_decreasing[i]][0]\\n inputs_padded[i, :input_id.shape[0]] = input_id\\n\\n phonemes_padded = torch.LongTensor(len(batch), max_input_len)\\n phonemes_padded.zero_()\\n for i in range(len(ids_sorted_decreasing)):\\n phoneme_id = batch[ids_sorted_decreasing[i]][1]\\n phonemes_padded[i, :phoneme_id.shape[0]] = phoneme_id\\n\\n # Right zero-pad mel-spec\\n num_mels = batch[0][2].size(0)\\n max_target_len = max([x[2].size(1) for x in batch])\\n if max_target_len % self.n_frames_per_step != 0:\\n max_target_len += self.n_frames_per_step - max_target_len % self.n_frames_per_step\\n assert max_target_len % self.n_frames_per_step == 0\\n\\n # include mel padded and gate padded\\n mel_padded = torch.FloatTensor(len(batch), num_mels, max_target_len)\\n mel_padded.zero_()\\n gate_padded = torch.FloatTensor(len(batch), max_target_len)\\n gate_padded.zero_()\\n output_lengths = torch.LongTensor(len(batch))\\n for i in range(len(ids_sorted_decreasing)):\\n mel = batch[ids_sorted_decreasing[i]][2]\\n mel_padded[i, :, :mel.size(1)] = mel\\n gate_padded[i, mel.size(1)-1:] = 1\\n output_lengths[i] = mel.size(1)\\n\\n return input_lengths, inputs_padded, phonemes_padded, mel_padded, gate_padded, output_lengths\",\n \"def encoder_decoder_archi_gan(inputs, is_train):\\n\\n encoder_layers = []\\n\\n encoded = inputs\\n\\n encoder_layers.append(encoded)\\n\\n for i in range(config.encoder_layers):\\n encoded = encoder_conv_block_gan(encoded, i, is_train)\\n encoder_layers.append(encoded)\\n \\n encoder_layers.reverse()\\n\\n\\n\\n decoded = encoder_layers[0]\\n\\n for i in range(config.encoder_layers):\\n decoded = decoder_conv_block_gan(decoded, encoder_layers[i+1], i, is_train)\\n\\n return decoded\",\n \"def init_hidden(self):\\n # TODO ========================\\n # initialize the hidden states to zero\\n\\n initial_hidden = torch.zeros(self.num_layers, self.batch_size, self.hidden_size)\\n return initial_hidden # a parameter tensor of shape (self.num_layers, self.batch_size, self.hidden_size)\",\n \"def hidden(self, value):\\n if value is not None:\\n value.get_shape().assert_is_compatible_with(self._output_shape)\\n self._hidden = value\",\n \"def forward(self, batch: torch.LongTensor,\\n hidden_start: torch.FloatTensor = None) -> torch.FloatTensor:\\n\\n # max_len = x.size(1)\\n # x,label = batch\\n # batch_size x max_len x embedding_dim\\n x_embedded = self.embedding(batch)\\n # x_drop = self.dropout\\n x_drop = self.dropout(x_embedded)\\n\\n # compute hidden states and logits for each time step\\n # hidden_states_list = []\\n # prev_hidden = hidden_start\\n hidden_state = self.rnn(x_drop)[0]\\n # print(hidden_state)\\n # print(hidden_state[0].shape)\\n # print(hidden_state[1].shape)\\n\\n # hidden_state = hidden_state.permute(2,1,0)\\n # hidden_state_maxPooled = F.max_pool1d(hidden_state,hidden_state.shape[2])\\n # hidden_state_maxPooled = hidden_state.permute(2,1,0)\\n hidden_state_pooled, _ = torch.max(hidden_state, dim=1)\\n\\n output = self.get_logits(hidden_state_pooled)\\n\\n # Loss = self.loss(output, y)\\n\\n # hidden_state = softmax(logits(hidden_state))\\n\\n # batch_size x max_len x rnn_size\\n # hidden_states = torch.stack(hidden_states_list, dim=1)\\n\\n return output\",\n \"def unbucketed_next(self):\\n # Initialize batch containers\\n label_batch = list()\\n enc_input_batch = list()\\n dec_input_batch = list()\\n # Fill individual batches by iterating over the entire data source\\n if self.sent_id < self.get_length():\\n while len(enc_input_batch) < self.opt.batch_size:\\n try:\\n indexed_sent = self.data[self.sent_id]\\n label_item = indexed_sent[1:]\\n enc_input_item = indexed_sent[1:]\\n # Reverse the input to the encoder, see arxiv.org/pdf/1703.03906.pdf\\n enc_input_item.reverse()\\n dec_input_item = indexed_sent[:-1]\\n label_batch.append(label_item)\\n enc_input_batch.append(enc_input_item)\\n dec_input_batch.append(dec_input_item)\\n self.sent_id += 1\\n except IndexError:\\n break\\n else:\\n raise IndexError\\n return label_batch, enc_input_batch, dec_input_batch\",\n \"def encode(self, images):\\n\\n i = 0\\n N = len(images)\\n embs = None\\n\\n while True:\\n end = min(N, i + self.batch_size)\\n batch = images[i: end]\\n\\n size = end - i\\n if size < self.batch_size:\\n batch += self._input_padding[:self.batch_size - size]\\n\\n if embs is None:\\n embs = self.sess.run(self.embed_layer, feed_dict={self.x: batch})\\n else:\\n _embs = self.sess.run(self.embed_layer, feed_dict={self.x: batch})\\n embs = np.vstack((embs, _embs))\\n\\n i += self.batch_size\\n\\n if i >= N - 1:\\n break\\n\\n return embs\",\n \"def __call__(self, batch):\\r\\n # Right zero-pad all one-hot text sequences to max input length\\r\\n input_lengths, ids_sorted_decreasing = torch.sort(\\r\\n torch.LongTensor([len(x[0]) for x in batch]),\\r\\n dim=0, descending=True)\\r\\n max_input_len = input_lengths[0]\\r\\n\\r\\n text_padded = torch.LongTensor(len(batch), max_input_len)\\r\\n text_padded.zero_()\\r\\n for i in range(len(ids_sorted_decreasing)):\\r\\n text = batch[ids_sorted_decreasing[i]][0]\\r\\n text_padded[i, :text.size(0)] = text\\r\\n\\r\\n # Right zero-pad mel-spec\\r\\n num_mels = batch[0][1].size(0)\\r\\n max_target_len = max([x[1].size(1) for x in batch])\\r\\n if max_target_len % self.n_frames_per_step != 0:\\r\\n max_target_len += self.n_frames_per_step - max_target_len % self.n_frames_per_step\\r\\n assert max_target_len % self.n_frames_per_step == 0\\r\\n\\r\\n # include mel padded and gate padded\\r\\n mel_padded = torch.FloatTensor(len(batch), num_mels, max_target_len)\\r\\n mel_padded.zero_()\\r\\n gate_padded = torch.FloatTensor(len(batch), max_target_len)\\r\\n gate_padded.zero_()\\r\\n output_lengths = torch.LongTensor(len(batch))\\r\\n for i in range(len(ids_sorted_decreasing)):\\r\\n mel = batch[ids_sorted_decreasing[i]][1]\\r\\n mel_padded[i, :, :mel.size(1)] = mel\\r\\n gate_padded[i, mel.size(1)-1:] = 1\\r\\n output_lengths[i] = mel.size(1)\\r\\n\\r\\n return text_padded, input_lengths, mel_padded, gate_padded, \\\\\\r\\n output_lengths\",\n \"def forward(self, state, encoder_padding_mask):\\n residual = state.clone()\\n\\n '''\\n ___QUESTION-6-DESCRIBE-D-START___\\n What is the purpose of encoder_padding_mask? What will the output shape of `state' Tensor \\n be after multi-head attention? HINT: formulate your answer in terms of \\n constituent variables like batch_size, embed_dim etc...\\n '''\\n '''\\n The encoder padding mask is used to mask the ⟨pad⟩ token which is padded to the input sequences to make the sequences in the same lengths each batch. Thus the word of input sequence will not pay attention to these padded tokens.\\n The shape of state is (tgt_time_steps * batch_size * embed_dim)\\n '''\\n state, _ = self.self_attn(query=state, key=state, value=state, key_padding_mask=encoder_padding_mask)\\n '''\\n ___QUESTION-6-DESCRIBE-D-END___\\n '''\\n\\n state = F.dropout(state, p=self.dropout, training=self.training)\\n state += residual\\n state = self.self_attn_layer_norm(state)\\n\\n residual = state.clone()\\n state = F.relu(self.fc1(state))\\n state = F.dropout(state, p=self.activation_dropout, training=self.training)\\n state = self.fc2(state)\\n state = F.dropout(state, p=self.dropout, training=self.training)\\n state += residual\\n state = self.final_layer_norm(state)\\n\\n return state\",\n \"def get_reconstructed_input(self, hidden):\\n return T.nnet.sigmoid(T.dot(hidden, self.w2) + self.b2)\",\n \"def _inference_initial_state(self, encoder_outputs, encoder_decoder_attention_bias):\\n\\n with tf.variable_scope(\\\"inference_initial_state\\\"):\\n n_layers = self.attention_layers\\n n_heads = self.attention_heads\\n batch_size = tf.shape(encoder_outputs)[0]\\n n_features = self.num_mels + self.num_freq\\n\\n state = {\\n \\\"iteration\\\": tf.constant(0),\\n \\\"inputs\\\": tf.zeros([batch_size, 1, n_features * self.reduction_factor]),\\n \\\"finished\\\": tf.cast(tf.zeros([batch_size]), tf.bool),\\n \\\"alignment_positions\\\": tf.zeros([n_layers, batch_size, n_heads, 1],\\n dtype=tf.int32),\\n \\\"outputs\\\": {\\n \\\"spec\\\": tf.zeros([batch_size, 0, self.num_mels * self.reduction_factor]),\\n \\\"post_net_spec\\\": tf.zeros([batch_size, 0, self.num_mels * self.reduction_factor]),\\n \\\"alignments\\\": [\\n tf.zeros([0, 0, 0, 0, 0])\\n ],\\n \\\"stop_token_logits\\\": tf.zeros([batch_size, 0, 1 * self.reduction_factor]),\\n \\\"lengths\\\": tf.zeros([batch_size], dtype=tf.int32),\\n \\\"mag_spec\\\": tf.zeros([batch_size, 0, self.num_freq * self.reduction_factor])\\n },\\n \\\"encoder_outputs\\\": encoder_outputs,\\n \\\"encoder_decoder_attention_bias\\\": encoder_decoder_attention_bias\\n }\\n\\n state_shape_invariants = {\\n \\\"iteration\\\": tf.TensorShape([]),\\n \\\"inputs\\\": tf.TensorShape([None, None, n_features * self.reduction_factor]),\\n \\\"finished\\\": tf.TensorShape([None]),\\n \\\"alignment_positions\\\": tf.TensorShape([n_layers, None, n_heads, None]),\\n \\\"outputs\\\": {\\n \\\"spec\\\": tf.TensorShape([None, None, self.num_mels * self.reduction_factor]),\\n \\\"post_net_spec\\\": tf.TensorShape([None, None, self.num_mels * self.reduction_factor]),\\n \\\"alignments\\\": [\\n tf.TensorShape([None, None, None, None, None]),\\n ],\\n \\\"stop_token_logits\\\": tf.TensorShape([None, None, 1 * self.reduction_factor]),\\n \\\"lengths\\\": tf.TensorShape([None]),\\n \\\"mag_spec\\\": tf.TensorShape([None, None, None])\\n },\\n \\\"encoder_outputs\\\": encoder_outputs.shape,\\n \\\"encoder_decoder_attention_bias\\\": encoder_decoder_attention_bias.shape\\n }\\n\\n return state, state_shape_invariants\",\n \"def _inference_step(self, state):\\n\\n decoder_inputs = state[\\\"inputs\\\"]\\n encoder_outputs = state[\\\"encoder_outputs\\\"]\\n attention_bias = state[\\\"encoder_decoder_attention_bias\\\"]\\n alignment_positions = state[\\\"alignment_positions\\\"]\\n\\n outputs = self._decode_pass(\\n decoder_inputs=decoder_inputs,\\n encoder_outputs=encoder_outputs,\\n enc_dec_attention_bias=attention_bias,\\n alignment_positions=alignment_positions\\n )\\n\\n with tf.variable_scope(\\\"inference_step\\\"):\\n next_inputs_mel = outputs[\\\"post_net_spec\\\"][:, -1:, :]\\n next_inputs_mel = self._expand(next_inputs_mel, self.reduction_factor)\\n next_inputs_mag = outputs[\\\"mag_spec\\\"][:, -1:, :]\\n next_inputs_mag = self._expand(next_inputs_mag, self.reduction_factor)\\n next_inputs = tf.concat([next_inputs_mel, next_inputs_mag], axis=-1)\\n\\n n_features = self.num_mels + self.num_freq\\n next_inputs = self._shrink(next_inputs, n_features, self.reduction_factor)\\n\\n # Set zero if sequence is finished\\n next_inputs = tf.where(\\n state[\\\"finished\\\"],\\n tf.zeros_like(next_inputs),\\n next_inputs\\n )\\n next_inputs = tf.concat([decoder_inputs, next_inputs], 1)\\n\\n # Update lengths\\n lengths = state[\\\"outputs\\\"][\\\"lengths\\\"]\\n lengths = tf.where(\\n state[\\\"finished\\\"],\\n lengths,\\n lengths + 1 * self.reduction_factor\\n )\\n outputs[\\\"lengths\\\"] = lengths\\n\\n # Update spec, post_net_spec and mag_spec\\n for key in [\\\"spec\\\", \\\"post_net_spec\\\", \\\"mag_spec\\\"]:\\n output = outputs[key][:, -1:, :]\\n output = tf.where(state[\\\"finished\\\"], tf.zeros_like(output), output)\\n outputs[key] = tf.concat([state[\\\"outputs\\\"][key], output], 1)\\n\\n # Update stop token logits\\n stop_token_logits = outputs[\\\"stop_token_logits\\\"][:, -1:, :]\\n stop_token_logits = tf.where(\\n state[\\\"finished\\\"],\\n tf.zeros_like(stop_token_logits) + 1e9,\\n stop_token_logits\\n )\\n stop_prediction = tf.sigmoid(stop_token_logits)\\n stop_prediction = tf.reduce_max(stop_prediction, axis=-1)\\n\\n # Uncomment next line if you want to use stop token predictions\\n finished = tf.reshape(tf.cast(tf.round(stop_prediction), tf.bool), [-1])\\n finished = tf.reshape(finished, [-1])\\n\\n stop_token_logits = tf.concat(\\n [state[\\\"outputs\\\"][\\\"stop_token_logits\\\"], stop_token_logits],\\n axis=1\\n )\\n outputs[\\\"stop_token_logits\\\"] = stop_token_logits\\n\\n with tf.variable_scope(\\\"alignments\\\"):\\n weights = []\\n for index, attention in enumerate(self.attentions):\\n if isinstance(attention, AttentionBlock):\\n weights.append(attention.multiheaded_attention.attention_weights)\\n\\n weights = tf.stack(weights)\\n outputs[\\\"alignments\\\"] = [weights]\\n\\n alignment_positions = tf.argmax(\\n weights,\\n axis=-1,\\n output_type=tf.int32\\n )[:, :, :, -1:]\\n state[\\\"alignment_positions\\\"] = tf.concat(\\n [state[\\\"alignment_positions\\\"], alignment_positions],\\n axis=-1\\n )\\n\\n state[\\\"iteration\\\"] = state[\\\"iteration\\\"] + 1\\n state[\\\"inputs\\\"] = next_inputs\\n state[\\\"finished\\\"] = finished\\n state[\\\"outputs\\\"] = outputs\\n\\n return state\",\n \"def decoder(self, embedded_inputs, decoder_input0,\\n decoder_hidden0, encoder_outputs):\\n pass\",\n \"def encode_input(self, x_tensor, inp_lens_tensor):\\r\\n input_emb = self.input_emb.forward(x_tensor)\\r\\n enc_output_each_word, enc_context_mask, enc_final_states = self.encoder(input_emb, inp_lens_tensor)\\r\\n enc_final_states_reshaped = (enc_final_states[0].unsqueeze(0), enc_final_states[1].unsqueeze(0))\\r\\n # print('lest go', enc_final_states_reshaped[1].shape)\\r\\n return enc_output_each_word, enc_context_mask, enc_final_states_reshaped\",\n \"def forward(\\r\\n self,\\r\\n input_ids,\\r\\n attention_mask: torch.Tensor,\\r\\n token_type_ids: torch.Tensor\\r\\n ):\\r\\n ### YOUR CODE HERE\\r\\n output = self.bert(\\r\\n input_ids=input_ids,\\r\\n attention_mask=attention_mask,\\r\\n token_type_ids=token_type_ids,\\r\\n )\\r\\n\\r\\n sequence_output = output[0] # the last hidden state (batch, sequence_length, hidden_size)\\r\\n logits = self.qa_outputs(sequence_output)\\r\\n start_logits, end_logits = logits.split(1, dim=-1)\\r\\n start_logits = start_logits.squeeze(-1)\\r\\n end_logits = end_logits.squeeze(-1)\\r\\n\\r\\n outputs = (start_logits, end_logits) # + output[2:]\\r\\n\\r\\n return outputs\\r\\n ### END YOUR CODE\",\n \"def build_graph(self):\\n with vs.variable_scope(\\\"context\\\"):\\n context_encoder = RNNEncoder(self.FLAGS.hidden_size, self.keep_prob)\\n context_hiddens = context_encoder.build_graph(self.context_embs,\\n self.context_mask) # (batch_size, context_len, hidden_size*2)\\n\\n with vs.variable_scope(\\\"question\\\"):\\n question_encoder = RNNEncoder(self.FLAGS.hidden_size, self.keep_prob)\\n question_hiddens = question_encoder.build_graph(self.qn_embs,\\n self.qn_mask) # (batch_size, question_len, hidden_size*2)\\n question_last_hidden = tf.reshape(question_hiddens[:, -1, :], (-1, 2 * self.FLAGS.hidden_size))\\n question_last_hidden = tf.contrib.layers.fully_connected(question_last_hidden,\\n num_outputs=self.FLAGS.hidden_size)\\n # Use context hidden states to attend to question hidden states\\n\\n # attn_output is shape (batch_size, context_len, hidden_size*2)\\n # The following is BiDAF attention\\n if self.FLAGS.use_bidaf:\\n attn_layer = BiDAF(self.keep_prob, self.FLAGS.hidden_size * 2, self.FLAGS.hidden_size * 2)\\n attn_output = attn_layer.build_graph(question_hiddens, self.qn_mask, context_hiddens,\\n self.context_mask) # (batch_size, context_len, hidden_size * 6)\\n else: # otherwise, basic attention\\n attn_layer = BasicAttn(self.keep_prob, self.FLAGS.hidden_size * 2, self.FLAGS.hidden_size * 2)\\n _, attn_output = attn_layer.build_graph(question_hiddens, self.qn_mask, context_hiddens)\\n # Concat attn_output to context_hiddens to get blended_reps\\n blended_reps = tf.concat([context_hiddens, attn_output], axis=2) # (batch_size, context_len, hidden_size*4)\\n\\n blended_reps_final = tf.contrib.layers.fully_connected(blended_reps, num_outputs=self.FLAGS.hidden_size)\\n\\n decoder = RNNDecoder(self.FLAGS.batch_size, self.FLAGS.hidden_size, self.ans_vocab_size, self.FLAGS.answer_len,\\n self.ans_embedding_matrix, self.keep_prob, sampling_prob=self.sampling_prob,\\n schedule_embed=self.FLAGS.schedule_embed, pred_method=self.FLAGS.pred_method)\\n (self.train_logits, self.train_translations, _), \\\\\\n (self.dev_logits, self.dev_translations, self.attention_results) = decoder.build_graph(blended_reps_final, question_last_hidden,\\n self.ans_embs, self.ans_mask, self.ans_ids,\\n self.context_mask)\",\n \"def sample_n(self, method, batch, max_sample_length, sample_num):\\r\\n inp = batch.text\\r\\n inp_len_np = batch.length.cpu().numpy()\\r\\n\\r\\n pad_inp1 = torch.LongTensor([self.fw_start_token] * inp.size(1)).view(1,-1)\\r\\n pad_inp2 = torch.LongTensor([self.pad_token] * inp.size(1)).view(1,-1)\\r\\n\\r\\n if self.gpu >= 0:\\r\\n inp = inp.to(self.gpu)\\r\\n pad_inp1 = pad_inp1.to(self.gpu)\\r\\n pad_inp2 = pad_inp2.to(self.gpu)\\r\\n\\r\\n padded_inp = torch.cat([pad_inp1, inp, pad_inp2], 0)\\r\\n padded_inp[inp_len_np + 1] = self.bw_start_token\\r\\n\\r\\n assert padded_inp.max().item() < self.n_vocab + 2\\r\\n assert inp_len_np[0] + 2 <= padded_inp.size(0)\\r\\n padded_enc_out = self.encoder(padded_inp, inp_len_np + 2) # [T+2,B,H]\\r\\n\\r\\n # extract forward hidden state\\r\\n assert 0 <= batch.fw_pos.item() - 1 <= padded_enc_out.size(0) - 1\\r\\n assert 0 <= batch.fw_pos.item() <= padded_enc_out.size(0) - 1\\r\\n fw_hidden = padded_enc_out.index_select(0,batch.fw_pos - 1)\\r\\n fw_hidden = torch.cat([fw_hidden[:,:,:self.hidden_size],fw_hidden[:,:,self.hidden_size:]], 0)\\r\\n fw_next_token = padded_inp.index_select(0,batch.fw_pos).view(1,-1)\\r\\n\\r\\n # extract backward hidden state\\r\\n assert 0 <= batch.bw_pos.item() + 3 <= padded_enc_out.size(0) - 1\\r\\n assert 0 <= batch.bw_pos.item() + 2 <= padded_enc_out.size(0) - 1\\r\\n bw_hidden = padded_enc_out.index_select(0,batch.bw_pos + 3)\\r\\n bw_hidden = torch.cat([bw_hidden[:,:,:self.hidden_size], bw_hidden[:,:,self.hidden_size:]], 0)\\r\\n bw_next_token = padded_inp.index_select(0,batch.bw_pos + 2).view(1,-1)\\r\\n\\r\\n fw_sample_outputs = self.sample_n_sequences(method, 'fw', fw_next_token, fw_hidden, max_sample_length, sample_num)\\r\\n bw_sample_outputs = self.sample_n_sequences(method, 'bw', bw_next_token, bw_hidden, max_sample_length, sample_num)\\r\\n\\r\\n self.filter_special_tokens(fw_sample_outputs)\\r\\n self.filter_special_tokens(bw_sample_outputs)\\r\\n\\r\\n return fw_sample_outputs, bw_sample_outputs\"\n]"},"negative_scores":{"kind":"list like","value":["0.6531696","0.6165337","0.61568475","0.59570867","0.59406155","0.5908961","0.58463246","0.5821808","0.5770365","0.57613736","0.57613736","0.5760088","0.5760088","0.5760088","0.5757003","0.57509893","0.5750539","0.57451725","0.570995","0.5698639","0.5691191","0.5663548","0.5658067","0.563007","0.5612284","0.56113315","0.5608394","0.5575342","0.5572906","0.555832","0.55566925","0.55418396","0.5522725","0.55199784","0.5515714","0.55051684","0.5504612","0.54856265","0.5482307","0.5471251","0.546024","0.54584426","0.54535824","0.54535824","0.5449207","0.5449207","0.54463977","0.54393405","0.54261863","0.54207176","0.54178464","0.54089063","0.5385386","0.5385386","0.5374253","0.53654444","0.536061","0.5335899","0.5332261","0.5322773","0.5317808","0.53157264","0.5314026","0.53129804","0.53081757","0.5292727","0.52859914","0.5280081","0.52565163","0.52455044","0.5240575","0.5234109","0.5234089","0.52310425","0.52274734","0.52274734","0.52236813","0.5212277","0.5212023","0.5211154","0.521001","0.5199415","0.518788","0.5187849","0.51868826","0.5184745","0.51797944","0.5174779","0.5173124","0.51716137","0.51612455","0.51600647","0.5158575","0.5141562","0.5122168","0.5121686","0.5120307","0.5110036","0.5109149","0.5105331","0.510459"],"string":"[\n \"0.6531696\",\n \"0.6165337\",\n \"0.61568475\",\n \"0.59570867\",\n \"0.59406155\",\n \"0.5908961\",\n \"0.58463246\",\n \"0.5821808\",\n \"0.5770365\",\n \"0.57613736\",\n \"0.57613736\",\n \"0.5760088\",\n \"0.5760088\",\n \"0.5760088\",\n \"0.5757003\",\n \"0.57509893\",\n \"0.5750539\",\n \"0.57451725\",\n \"0.570995\",\n \"0.5698639\",\n \"0.5691191\",\n \"0.5663548\",\n \"0.5658067\",\n \"0.563007\",\n \"0.5612284\",\n \"0.56113315\",\n \"0.5608394\",\n \"0.5575342\",\n \"0.5572906\",\n \"0.555832\",\n \"0.55566925\",\n \"0.55418396\",\n \"0.5522725\",\n \"0.55199784\",\n \"0.5515714\",\n \"0.55051684\",\n \"0.5504612\",\n \"0.54856265\",\n \"0.5482307\",\n \"0.5471251\",\n \"0.546024\",\n \"0.54584426\",\n \"0.54535824\",\n \"0.54535824\",\n \"0.5449207\",\n \"0.5449207\",\n \"0.54463977\",\n \"0.54393405\",\n \"0.54261863\",\n \"0.54207176\",\n \"0.54178464\",\n \"0.54089063\",\n \"0.5385386\",\n \"0.5385386\",\n \"0.5374253\",\n \"0.53654444\",\n \"0.536061\",\n \"0.5335899\",\n \"0.5332261\",\n \"0.5322773\",\n \"0.5317808\",\n \"0.53157264\",\n \"0.5314026\",\n \"0.53129804\",\n \"0.53081757\",\n \"0.5292727\",\n \"0.52859914\",\n \"0.5280081\",\n \"0.52565163\",\n \"0.52455044\",\n \"0.5240575\",\n \"0.5234109\",\n \"0.5234089\",\n \"0.52310425\",\n \"0.52274734\",\n \"0.52274734\",\n \"0.52236813\",\n \"0.5212277\",\n \"0.5212023\",\n \"0.5211154\",\n \"0.521001\",\n \"0.5199415\",\n \"0.518788\",\n \"0.5187849\",\n \"0.51868826\",\n \"0.5184745\",\n \"0.51797944\",\n \"0.5174779\",\n \"0.5173124\",\n \"0.51716137\",\n \"0.51612455\",\n \"0.51600647\",\n \"0.5158575\",\n \"0.5141562\",\n \"0.5122168\",\n \"0.5121686\",\n \"0.5120307\",\n \"0.5110036\",\n \"0.5109149\",\n \"0.5105331\",\n \"0.510459\"\n]"},"document_score":{"kind":"string","value":"0.0"},"document_rank":{"kind":"string","value":"-1"}}},{"rowIdx":94852,"cells":{"query":{"kind":"string","value":"Performs one single decoding step for one example. It passes the hidden state for the decoder and input the tensor with the embeddings vector for the input token. The result of the decoder is passed to the output net to obtain the logits for every item in the dictionary. It outputs those logits and the new hidden state returned by the decoder."},"document":{"kind":"string","value":"def decode_one(self, hid, input_x):\n out, new_hid = self.decoder(input_x.unsqueeze(0), hid)\n out = self.output(out)\n return out.squeeze(dim=0), new_hid"},"metadata":{"kind":"string","value":"{\n \"objective\": {\n \"self\": [],\n \"paired\": [],\n \"triplet\": [\n [\n \"query\",\n \"document\",\n \"negatives\"\n ]\n ]\n }\n}"},"negatives":{"kind":"list like","value":["def decode():\n with tf.Session() as sess:\n # Create model and load parameters.\n model = create_model(True)\n model.batch_size = 1 # We decode one sentence at a time.\n init_model(sess, model)\n\n # Load vocabularies.\n vocab, rev_vocab = data_utils.get_vocabulary(FLAGS.data_dir, FLAGS.words,\n FLAGS.word_embeddings, FLAGS.vocab_size)\n\n # Decode from standard input.\n sys.stdout.write(\"> \")\n sys.stdout.flush()\n sentence = sys.stdin.readline()\n while sentence:\n # Get token-ids for the input sentence.\n token_ids = data_utils.sentence_to_token_ids(tf.compat.as_bytes(sentence), vocab,\n data_utils.basic_word_tokenizer)\n # Which bucket does it belong to?\n bucket_id = min([b for b in xrange(len(buckets))\n if buckets[b][0] > len(token_ids)])\n # Get a 1-element batch to feed the sentence to the model.\n encoder_inputs, decoder_inputs, target_weights = model.get_batch(\n {bucket_id: [(token_ids, [])]}, bucket_id)\n # Get output logits for the sentence.\n _, _, output_logits = model.step(sess, encoder_inputs, decoder_inputs,\n target_weights, bucket_id, True)\n # This is a greedy decoder - outputs are just argmaxes of output_logits.\n outputs = [int(np.argmax(logit, axis=1)) for logit in output_logits]\n # If there is an EOS symbol in outputs, cut them at that point.\n if data_utils.EOS_ID in outputs:\n outputs = outputs[:outputs.index(data_utils.EOS_ID)]\n # Print out the network's response to the input.\n join = \" \" if FLAGS.words else \"\"\n print(join.join([tf.compat.as_str(rev_vocab[output]) for output in outputs]))\n print(\"> \", end=\"\")\n sys.stdout.flush()\n sentence = sys.stdin.readline()","def decoding_fn(decoder_input, cache, **kwargs):\n index = kwargs['index']\n # [batch_size * beam_width, 1, hidden_size]\n decoder_input = self._embedding_logits_layer(decoder_input, 'embedding')\n decoder_input += timing_signal[index:index + 1]\n\n # [batch_size * beam_width, 1, hidden_size]\n decoder_outputs = self._decoder(decoder_input,\n cache['encoder_outputs'],\n tf.zeros((1, 1, 1, index + 1), \n dtype='float32'),\n cache['padding_mask'],\n training=False,\n cache=cache)\n\n # [[batch_size * beam_width, 1, vocab_size]\n logits = self._embedding_logits_layer(decoder_outputs, mode='logits')\n logits = tf.squeeze(logits, axis=1)\n return logits, cache","def decoding_layer_train(encoder_state, dec_cell, dec_embed_input, \n target_sequence_length, max_summary_length, \n output_layer, keep_prob):\n # TODO: Implement Function\n trainig_helper = tf.contrib.seq2seq.TrainingHelper(dec_embed_input, target_sequence_length)\n basic_decoder = tf.contrib.seq2seq.BasicDecoder(dec_cell, trainig_helper, encoder_state, output_layer)\n f_output, _, _ = tf.contrib.seq2seq.dynamic_decode(basic_decoder,maximum_iterations=max_summary_length)\n return f_output","def decoding_layer_infer(encoder_state, dec_cell, dec_embeddings, start_of_sequence_id,\n end_of_sequence_id, max_target_sequence_length,\n vocab_size, output_layer, batch_size, keep_prob):\n # Convert the start_ids to be a vector with batch size (the go id repeated batch size times)\n start_ids = tf.tile([start_of_sequence_id], [batch_size])\n # Create the embedding helper.\n embedding_helper = tf.contrib.seq2seq.GreedyEmbeddingHelper(\n dec_embeddings, start_ids, end_of_sequence_id)\n basic_decoder = tf.contrib.seq2seq.BasicDecoder(\n dec_cell, embedding_helper, encoder_state, output_layer)\n f_output, _, _ = tf.contrib.seq2seq.dynamic_decode(\n basic_decoder,maximum_iterations=max_target_sequence_length)\n return f_output","def decoder(self, tensor, reuse=False):\n\n outputs, predictions = [], []\n\n with tf.variable_scope(\"decoder\", reuse=reuse) as scope:\n\n\n # add gausian noise\n decoder_input = gaussian_noise_layer(tensor, 0.2)\n encoder_dim = tensor.get_shape().as_list()[-1]\n W = tf.get_variable(\"decoder_last_weight\", [self.num_units + encoder_dim, self.voca_size])\n b = tf.get_variable(\"decoder_last_bias\", [self.voca_size])\n # time-major: [batch_size, max_len, num_units] --> [max_len, batch_size, num_units]\n # decoder_input = tf.transpose(decoder_input, [1,0,2])\n cell = tf.nn.rnn_cell.BasicLSTMCell(self.num_units, state_is_tuple=False)\n # initial_state = state = decoder_input\n initial_state = tf.zeros([self.batch_size, self.num_units])\n initial_state = tf.concat([initial_state, decoder_input], 1)\n\n\n for i in range(self.max_len):\n if i == 0:\n # start of sequence\n input_ = tf.nn.embedding_lookup(self.embedding, tf.ones([self.batch_size], dtype=tf.int32))\n state = initial_state\n\n else:\n scope.reuse_variables()\n input_ = tf.nn.embedding_lookup(self.embedding, prediction)\n\n output, state = cell(input_, state)\n output = tf.concat([output, tensor], -1)\n output = tf.nn.xw_plus_b(output, W, b)\n\n prediction = tf.argmax(output, axis=1)\n\n outputs.append(output)\n predictions.append(prediction)\n\n predictions = tf.transpose(tf.stack(predictions), [1,0])\n outputs = tf.stack(outputs)\n\n return predictions, outputs","def _build_decoder(self, encoder_outputs, encoder_state, hparams):\n\t\ttgt_sos_id = tf.cast(tf.constant(hparams.sos_id), tf.int32)\n\t\ttgt_eos_id = tf.cast(tf.constant(hparams.eos_id), tf.int32)\n\n\t\tmaximum_iterations = self._get_infer_maximum_iterations(hparams)\n\n\t\t# Decoder\n\t\twith tf.variable_scope('decoder') as decoder_scope:\n\t\t\tcell, decoder_initial_state = self._build_decoder_cell(hparams, encoder_state)\n\t\t\t\n\t\t\tlogits = tf.no_op()\n\t\t\tdecoder_outputs = None\n\n\t\t\t# Train or Eval\n\t\t\tif self.mode != 'infer':\n\t\t\t\tdecoder_emb_input = tf.nn.embedding_lookup(self.embedding_decoder, self.decoder_input_data)\n\n\t\t\t\t# helper\n\t\t\t\thelper = tf.contrib.seq2seq.TrainingHelper(\n\t\t\t\t\tdecoder_emb_input, self.seq_length_decoder_input_data)\n\t\t\t\t\n\t\t\t\t# decoder\n\t\t\t\tmy_decoder = tf.contrib.seq2seq.BasicDecoder(\n\t\t\t\t\tcell,\n\t\t\t\t\thelper,\n\t\t\t\t\tdecoder_initial_state)\n\t\t\t\t\n\t\t\t\t# dynamic decoding\n\t\t\t\toutputs, final_context_state, _ = tf.contrib.seq2seq.dynamic_decode(\n\t\t\t\t\tmy_decoder,\n\t\t\t\t\tswap_memory=True,\n\t\t\t\t\tscope=decoder_scope)\n\t\t\t\t\n\t\t\t\tsample_id = outputs.sample_id\n\t\t\t\tlogits = self.output_layer(outputs.rnn_output)\n\t\t\telse:\n\t\t\t\tinfer_mode = hparams.infer_mode\n\t\t\t\tstart_tokens = tf.fill([self.batch_size], tgt_sos_id)\n\t\t\t\tend_token = tgt_eos_id\n\t\t\t\t_info(' decoder by infer_mode={} beam_width={}'.format(infer_mode, hparams.beam_width))\n\n\t\t\t\tif infer_mode == 'greedy':\n\t\t\t\t\thelper = tf.contrib.seq2seq.GreedyEmbeddingHelper(\n\t\t\t\t\t\tself.embedding_decoder, start_tokens, end_token)\n\t\t\t\telif infer_mode == 'beam_search':\n\t\t\t\t\tbeam_width = hparams.beam_width\n\t\t\t\t\tlength_penalty_weight = hparams.length_penalty_weight\n\t\t\t\t\tcoverage_penalty_weight = hparams.coverage_penalty_weight\n\n\t\t\t\t\t# beam search do not require helper\n\t\t\t\t\tmy_decoder = tf.contrib.seq2seq.BeamSearchDecoder(\n\t\t\t\t\t\tcell=cell,\n\t\t\t\t\t\tembedding=self.embedding_decoder,\n\t\t\t\t\t\tstart_tokens=start_tokens,\n\t\t\t\t\t\tend_token=end_token,\n\t\t\t\t\t\tinitial_state=decoder_initial_state,\n\t\t\t\t\t\tbeam_width=beam_width,\n\t\t\t\t\t\toutput_layer=self.output_layer,\n\t\t\t\t\t\tlength_penalty_weight=length_penalty_weight,\n\t\t\t\t\t\tcoverage_penalty_weight=coverage_penalty_weight)\n\t\t\t\telse:\n\t\t\t\t\t_error('Unknown infer_mode {}'.format(infer_mode))\n\t\t\t\t\traise ValueError\n\t\t\t\t\n\t\t\t\tif infer_mode != 'beam_search':\n\t\t\t\t\tmy_decoder = tf.contrib.seq2seq.BasicDecoder(\n\t\t\t\t\t\tcell,\n\t\t\t\t\t\thelper,\n\t\t\t\t\t\tdecoder_initial_state,\n\t\t\t\t\t\toutput_layer=self.output_layer)\t\t# apply to the RNN output prior to storing the result or sampling\n\t\t\t\t\n\t\t\t\toutputs, final_context_state, _ = tf.contrib.seq2seq.dynamic_decode(\n\t\t\t\t\tmy_decoder,\n\t\t\t\t\tmaximum_iterations=maximum_iterations,\n\t\t\t\t\tswap_memory=True,\n\t\t\t\t\tscope=decoder_scope)\n\t\t\t\n\t\t\t\tif infer_mode == 'beam_search':\n\t\t\t\t\tsample_id = outputs.predicted_ids\n\t\t\t\telse:\n\t\t\t\t\tlogits = outputs.rnn_output\n\t\t\t\t\tsample_id = outputs.sample_id\n\n\t\treturn logits, sample_id, final_context_state","def _decode_train(self, decoder, _encoder_output, _features, labels):\r\n target_embedded = tf.nn.embedding_lookup(decoder.target_embedding,\r\n labels[\"target_ids\"])\r\n\r\n return decoder(_encoder_output, labels=target_embedded[:,:-1], sequence_length=labels[\"target_len\"]-1)","def _decode(self, tgt_token_ids, encoder_outputs, padding_mask):\n tgt_seq_len = tf.shape(tgt_token_ids)[1]\n\n # [batch_size, tgt_seq_len, hidden_size]\n tgt_token_embeddings = self._embedding_logits_layer(\n tgt_token_ids, 'embedding')\n\n # [tgt_seq_len, hidden_size]\n positional_encoding = utils.get_positional_encoding(\n tgt_seq_len, self._hidden_size)\n tgt_token_embeddings += positional_encoding\n tgt_token_embeddings = self._decoder_dropout_layer(\n tgt_token_embeddings, training=True) \n\n look_ahead_mask = utils.get_look_ahead_mask(tgt_seq_len)\n\n # [batch_size, tgt_seq_len, hidden_size]\n decoder_outputs = self._decoder(tgt_token_embeddings, \n encoder_outputs, \n look_ahead_mask, \n padding_mask, \n training=True)\n\n # [batch_size, tgt_seq_len, vocab_size]\n logits = self._embedding_logits_layer(decoder_outputs, 'logits')\n return logits","def _decode(self, input_dict):\n encoder_outputs = input_dict['encoder_output']['outputs']\n enc_src_lengths = input_dict['encoder_output']['src_length']\n if self._mode == 'train':\n spec = (\n input_dict['target_tensors'][0]\n if 'target_tensors' in input_dict\n else None\n )\n spec_length = (\n input_dict['target_tensors'][1]\n if 'target_tensors' in input_dict\n else None\n )\n\n _batch_size = tf.shape(encoder_outputs)[0]\n\n training = self._mode == 'train'\n regularizer = self.params.get('regularizer', None)\n\n if self.params.get('enable_postnet', True):\n if 'postnet_conv_layers' not in self.params:\n raise ValueError(\n 'postnet_conv_layers must be passed from config file if postnet is'\n 'enabled'\n )\n\n num_audio_features = self._n_feats\n\n output_projection_layer = tf.layers.Dense(\n name='output_proj', units=num_audio_features, use_bias=True\n )\n stop_token_projection_layer = tf.layers.Dense(\n name='stop_token_proj', units=1, use_bias=True\n )\n\n prenet = None\n if self.params.get('enable_prenet', True):\n prenet = Prenet(\n self.params.get('prenet_units', 256),\n self.params.get('prenet_layers', 2),\n self.params.get('prenet_dropout', 0.5),\n self.params.get('prenet_enable_dropout', True),\n self.params.get('prenet_activation', tf.nn.relu),\n self.params['dtype'],\n )\n\n cell_params = {}\n cell_params['num_units'] = self.params['decoder_cell_units']\n decoder_cells = [\n single_cell(\n cell_class=self.params['decoder_cell_type'],\n cell_params=cell_params,\n zoneout_prob=self.params.get('zoneout_prob', 0.0),\n dp_output_keep_prob=1.0\n - self.params.get('dropout_prob', 0.1),\n training=training,\n )\n for _ in range(self.params['decoder_layers'])\n ]\n\n if self.params['attention_type'] is not None:\n attention_mechanism = self._build_attention(\n encoder_outputs,\n enc_src_lengths,\n self.params.get('attention_bias', False),\n )\n\n attention_cell = tf.contrib.rnn.MultiRNNCell(decoder_cells)\n\n attentive_cell = AttentionWrapper(\n cell=attention_cell,\n attention_mechanism=attention_mechanism,\n alignment_history=True,\n output_attention='both',\n )\n\n decoder_cell = attentive_cell\n\n if self.params['attention_type'] is None:\n decoder_cell = tf.contrib.rnn.MultiRNNCell(decoder_cells)\n\n if self._mode == 'train':\n train_and_not_sampling = True\n helper = TacotronTrainingHelper(\n inputs=spec,\n sequence_length=spec_length,\n prenet=None,\n model_dtype=self.params['dtype'],\n mask_decoder_sequence=self.params.get(\n 'mask_decoder_sequence', True\n ),\n )\n elif self._mode == 'eval' or self._mode == 'infer':\n train_and_not_sampling = False\n inputs = tf.zeros(\n (_batch_size, 1, num_audio_features),\n dtype=self.params['dtype'],\n )\n helper = TacotronHelper(\n inputs=inputs,\n prenet=None,\n mask_decoder_sequence=self.params.get(\n 'mask_decoder_sequence', True\n ),\n )\n else:\n raise ValueError('Unknown mode for decoder: {}'.format(self._mode))\n decoder = TacotronDecoder(\n decoder_cell=decoder_cell,\n helper=helper,\n initial_decoder_state=decoder_cell.zero_state(\n _batch_size, self.params['dtype']\n ),\n attention_type=self.params['attention_type'],\n spec_layer=output_projection_layer,\n stop_token_layer=stop_token_projection_layer,\n prenet=prenet,\n dtype=self.params['dtype'],\n train=train_and_not_sampling,\n )\n\n if self._mode == 'train':\n maximum_iterations = tf.reduce_max(spec_length)\n else:\n maximum_iterations = tf.reduce_max(enc_src_lengths) * 10\n\n outputs, final_state, sequence_lengths = tf.contrib.seq2seq.dynamic_decode(\n # outputs, final_state, sequence_lengths, final_inputs = dynamic_decode(\n decoder=decoder,\n impute_finished=False,\n maximum_iterations=maximum_iterations,\n swap_memory=self.params.get('use_swap_memory', False),\n output_time_major=self.params.get('time_major', False),\n parallel_iterations=self.params.get('parallel_iterations', 32),\n )\n\n decoder_output = outputs.rnn_output\n stop_token_logits = outputs.stop_token_output\n\n with tf.variable_scope('decoder'):\n # If we are in train and doing sampling, we need to do the projections\n if train_and_not_sampling:\n decoder_spec_output = output_projection_layer(decoder_output)\n stop_token_logits = stop_token_projection_layer(\n decoder_spec_output\n )\n decoder_output = decoder_spec_output\n\n ## Add the post net ##\n if self.params.get('enable_postnet', True):\n dropout_keep_prob = self.params.get(\n 'postnet_keep_dropout_prob', 0.5\n )\n\n top_layer = decoder_output\n for i, conv_params in enumerate(self.params['postnet_conv_layers']):\n ch_out = conv_params['num_channels']\n kernel_size = conv_params['kernel_size'] # [time, freq]\n strides = conv_params['stride']\n padding = conv_params['padding']\n activation_fn = conv_params['activation_fn']\n\n if ch_out == -1:\n ch_out = self._n_feats\n\n top_layer = conv_bn_actv(\n layer_type='conv1d',\n name='conv{}'.format(i + 1),\n inputs=top_layer,\n filters=ch_out,\n kernel_size=kernel_size,\n activation_fn=activation_fn,\n strides=strides,\n padding=padding,\n regularizer=regularizer,\n training=training,\n data_format=self.params.get(\n 'postnet_data_format', 'channels_last'\n ),\n bn_momentum=self.params.get('postnet_bn_momentum', 0.1),\n bn_epsilon=self.params.get('postnet_bn_epsilon', 1e-5),\n )\n top_layer = tf.layers.dropout(\n top_layer,\n rate=1.0 - dropout_keep_prob,\n training=training,\n )\n\n else:\n top_layer = tf.zeros(\n [\n _batch_size,\n maximum_iterations,\n outputs.rnn_output.get_shape()[-1],\n ],\n dtype=self.params['dtype'],\n )\n\n if regularizer and training:\n vars_to_regularize = []\n vars_to_regularize += attentive_cell.trainable_variables\n vars_to_regularize += (\n attention_mechanism.memory_layer.trainable_variables\n )\n vars_to_regularize += output_projection_layer.trainable_variables\n vars_to_regularize += (\n stop_token_projection_layer.trainable_variables\n )\n\n for weights in vars_to_regularize:\n if 'bias' not in weights.name:\n # print(\"Added regularizer to {}\".format(weights.name))\n if weights.dtype.base_dtype == tf.float16:\n tf.add_to_collection(\n 'REGULARIZATION_FUNCTIONS', (weights, regularizer)\n )\n else:\n tf.add_to_collection(\n ops.GraphKeys.REGULARIZATION_LOSSES,\n regularizer(weights),\n )\n\n if self.params.get('enable_prenet', True):\n prenet.add_regularization(regularizer)\n\n if self.params['attention_type'] is not None:\n alignments = tf.transpose(\n final_state.alignment_history.stack(), [1, 2, 0]\n )\n else:\n alignments = tf.zeros([_batch_size, _batch_size, _batch_size])\n\n spectrogram_prediction = decoder_output + top_layer\n\n mag_spec_prediction = tf.zeros([_batch_size, _batch_size, _batch_size])\n\n stop_token_prediction = tf.sigmoid(stop_token_logits)\n outputs = [\n decoder_output,\n spectrogram_prediction,\n alignments,\n stop_token_prediction,\n sequence_lengths,\n mag_spec_prediction,\n ]\n\n return {'outputs': outputs, 'stop_token_prediction': stop_token_logits}","def decoder(self, embedded_inputs, decoder_input0,\n decoder_hidden0, encoder_outputs):\n pass","def decode(self, targets, encoder_outputs, encoder_attn_bias, input_shape,\n training):\n with tf.name_scope('decode'):\n length = tf.shape(targets)[1]\n decoder_self_attention_bias = model_utils.get_decoder_self_attention_bias(\n length)\n encoder_outputs = tf.reshape(\n encoder_outputs, [input_shape[0], -1, self._hparams['hidden_size']])\n decoder_inputs = tf.pad(\n targets, [[0, 0], [1, 0]], constant_values=input_utils.START)\n\n # Remove last element.\n decoder_inputs = decoder_inputs[:, :-1]\n decoder_inputs = self._word_embedding_layer(decoder_inputs)\n\n with tf.name_scope('add_pos_encoding'):\n pos_encoding = self._position_embedding_layer(decoder_inputs)\n decoder_inputs += pos_encoding\n\n if training:\n decoder_inputs = tf.nn.dropout(\n decoder_inputs, rate=self._hparams['layer_postprocess_dropout'])\n\n decoder_outputs = self._decoder(\n decoder_inputs,\n encoder_outputs,\n decoder_self_attention_bias,\n encoder_attn_bias,\n training=training)\n logits = self._word_layer(decoder_outputs)\n return logits","def forward(\r\n self,\r\n input_ids,\r\n encoder_hidden_states,\r\n encoder_padding_mask,\r\n decoder_padding_mask,\r\n decoder_causal_mask,\r\n past_key_values=None,\r\n use_cache=False,\r\n output_attentions=False,\r\n output_hidden_states=False,\r\n return_dict=False,\r\n **unused,\r\n ):\r\n\r\n if \"decoder_cached_states\" in unused:\r\n warnings.warn(\r\n \"The `decoder_cached_states` argument is deprecated and will be removed in a future version, use `past_key_values` instead.\",\r\n FutureWarning,\r\n )\r\n past_key_values = unused.pop(\"decoder_cached_states\")\r\n if \"decoder_past_key_values\" in unused:\r\n warnings.warn(\r\n \"The `decoder_past_key_values` argument is deprecated and will be removed in a future version, use `past_key_values` instead.\",\r\n FutureWarning,\r\n )\r\n past_key_values = unused.pop(\"decoder_past_key_values\")\r\n\r\n # check attention mask and invert\r\n if encoder_padding_mask is not None:\r\n encoder_padding_mask = invert_mask(encoder_padding_mask)\r\n\r\n # embed positions\r\n positions = self.embed_positions(input_ids, use_cache=use_cache)\r\n\r\n if use_cache:\r\n input_ids = input_ids[:, -1:]\r\n positions = positions[:, -1:]\r\n\r\n x = self.embed_tokens(input_ids) * self.embed_scale\r\n if self.do_blenderbot_90_layernorm:\r\n x = self.layernorm_embedding(x)\r\n x += positions\r\n else:\r\n x += positions\r\n x = self.layernorm_embedding(x)\r\n\r\n x = F.dropout(x, p=self.dropout, training=self.training)\r\n\r\n # Convert to Bart output format: (seq_len, BS, model_dim) -> (BS, seq_len, model_dim)\r\n x = x.transpose(0, 1)\r\n encoder_hidden_states = encoder_hidden_states.transpose(0, 1)\r\n\r\n # decoder layers\r\n all_hidden_states = () if output_hidden_states else None\r\n all_self_attns = () if output_attentions else None\r\n enc_dec_all_attn = () if output_attentions else None\r\n next_decoder_cache = []\r\n for idx, decoder_layer in enumerate(self.layers):\r\n # add LayerDrop (see https://arxiv.org/abs/1909.11556 for description)\r\n if output_hidden_states:\r\n all_hidden_states += (x,)\r\n dropout_probability = random.uniform(0, 1)\r\n if self.training and (dropout_probability < self.layerdrop):\r\n continue\r\n\r\n layer_state = past_key_values[idx] if past_key_values is not None else None\r\n\r\n #isidora - in comment\r\n \"\"\"\r\n x, layer_self_attn, layer_past,_ = decoder_layer(\r\n x,\r\n encoder_hidden_states,\r\n encoder_attn_mask=encoder_padding_mask,\r\n decoder_padding_mask=decoder_padding_mask,\r\n layer_state=layer_state,\r\n causal_mask=decoder_causal_mask,\r\n output_attentions=output_attentions,\r\n )\r\n \"\"\"\r\n\r\n #isidora - start - replace _ with enc_dec_attn to get the encoder-decoder attn weights\r\n x, layer_self_attn, layer_past, enc_dec_attn = decoder_layer(\r\n x,\r\n encoder_hidden_states,\r\n encoder_attn_mask=encoder_padding_mask,\r\n decoder_padding_mask=decoder_padding_mask,\r\n layer_state=layer_state,\r\n causal_mask=decoder_causal_mask,\r\n output_attentions=output_attentions,\r\n )\r\n #isidora - end\r\n\r\n\r\n if use_cache:\r\n next_decoder_cache.append(layer_past.copy())\r\n\r\n if output_attentions:\r\n all_self_attns += (layer_self_attn,)\r\n enc_dec_all_attn += (enc_dec_attn,)\r\n\r\n if self.layer_norm: # if config.add_final_layer_norm (mBART)\r\n x = self.layer_norm(x)\r\n\r\n # Convert to standard output format: (seq_len, BS, model_dim) -> (BS, seq_len, model_dim)\r\n if output_hidden_states:\r\n all_hidden_states = tuple(hidden_state.transpose(0, 1) for hidden_state in all_hidden_states)\r\n x = x.transpose(0, 1)\r\n encoder_hidden_states = encoder_hidden_states.transpose(0, 1)\r\n\r\n next_cache = next_decoder_cache if use_cache else None\r\n\r\n #isidora - start - return enc_dec_all_attn instead of decoder outputs\r\n return enc_dec_all_attn\r\n #isidora - end\r\n\r\n #isidora - in comment\r\n \"\"\"\r\n if not return_dict:\r\n return tuple(v for v in [x, next_cache, all_hidden_states, all_self_attns] if v is not None)\r\n return BaseModelOutputWithPast(\r\n last_hidden_state=x, past_key_values=next_cache, hidden_states=all_hidden_states, attentions=all_self_attns\r\n )\r\n \"\"\"","def call(self,\n inputs,\n cache=None,\n decode_loop_step=None,\n padded_decode=False):\n attention_bias = inputs[\"attention_bias\"]\n target_ids = inputs[\"target_ids\"]\n all_encoder_outputs = inputs[\"all_encoder_outputs\"]\n self_attention_bias = inputs[\"self_attention_bias\"]\n if not isinstance(all_encoder_outputs, list):\n all_encoder_outputs = [all_encoder_outputs]\n\n target_embeds = self.embedding_lookup(target_ids)\n if decode_loop_step is None:\n target_embeds = self.embedding_postprocessor(target_embeds)\n else:\n target_embeds = self._decoding_step_time_signal(target_embeds,\n decode_loop_step)\n decoder_inputs = dict(\n decoder_inputs=target_embeds,\n encoder_outputs=all_encoder_outputs,\n self_attention_mask=self_attention_bias,\n attention_mask=attention_bias)\n if self.multi_channel_cross_attention:\n decoder_inputs[\"doc_attention_probs\"] = inputs[\"doc_attention_probs\"]\n decode_outputs, cache = self.decoder(\n decoder_inputs, cache, decode_loop_step if padded_decode else None)\n return decode_outputs","def decode(self,\n decoder_input,\n encoder_output,\n encoder_decoder_attention_bias,\n decoder_self_attention_bias,\n hparams,\n cache=None,\n nonpadding=None,\n losses=None):\n del losses\n # TODO(dehghani): enable caching.\n del cache\n\n decoder_input = tf.nn.dropout(decoder_input,\n 1.0 - hparams.layer_prepostprocess_dropout)\n\n # No caching in Universal Transformers!\n (decoder_output, dec_extra_output) = (\n my_universal_transformer_util.universal_transformer_decoder(\n decoder_input,\n encoder_output,\n decoder_self_attention_bias,\n encoder_decoder_attention_bias,\n hparams,\n nonpadding=nonpadding,\n save_weights_to=self.attention_weights))\n\n # Expand since t2t expects 4d tensors.\n return tf.expand_dims(decoder_output, axis=2), dec_extra_output","def attention_decoder(decoder_inputs,\n attention_states,\n cell,\n output_size=None,\n dtype=None,\n scope=None):\n if not decoder_inputs:\n raise ValueError(\"Must provide at least 1 input to attention decoder.\")\n if output_size is None:\n output_size = cell.output_size\n \n # ==================================scope=================================================\n with variable_scope.variable_scope(scope or \"TemporalAttn\", dtype=dtype) as scope:\n \n dtype = scope.dtype\n batch_size = array_ops.shape(decoder_inputs[0])[0] # Needed for reshaping.\n attn_length = attention_states.get_shape()[1].value\n attn_size = attention_states.get_shape()[2].value\n \n # To calculate W1 * h_t we use a 1-by-1 convolution, need to reshape before.\n hidden = array_ops.reshape(attention_states, [-1, attn_length, 1, attn_size])\n # U_d * h_i for i in range(T) (filter)\n u = variable_scope.get_variable(\"AttnDecoderU\", [1, 1, attn_size, attn_size], dtype=dtype)\n hidden_features = nn_ops.conv2d(hidden, u, [1, 1, 1, 1], \"SAME\")\n \n v = variable_scope.get_variable(\"AttnDecoderV\", [attn_size], dtype=dtype)\n \n # how to get the initial_state\n initial_state_size = array_ops.stack([batch_size, cell.output_size])\n initial_state = [array_ops.zeros(initial_state_size, dtype=dtype) for _ in xrange(2)]\n state = initial_state\n \n w = variable_scope.get_variable(\"AttnDecoderW\", [2*cell.output_size, attn_size], dtype=dtype)\n b = variable_scope.get_variable(\"AttnDecoderb\", [attn_size], dtype=dtype)\n \n # beta_scalar = variable_scope.get_variable(\"BetaScalar\", [attn_length])\n \n def attention(query, step):\n \"\"\"\n Put attention masks on hidden using hidden_features and query.\n \"\"\"\n \n if nest.is_sequence(query): # If the query is a tuple, flatten it.\n query_list = nest.flatten(query)\n query = array_ops.concat(query_list, 1)\n _tmp = math_ops.matmul(query, w) + b\n _tmp = array_ops.reshape(_tmp, [-1, 1, 1, attn_size])\n # Attention mask is a softmax of v^T * tanh(...).\n s = math_ops.reduce_sum(v * math_ops.tanh(hidden_features + _tmp), [2, 3])\n # beta = math_ops.multiply(nn_ops.softmax(s, name=\"beta_%d\" % step), beta_scalar)\n beta = nn_ops.softmax(s, name=\"beta_%d\" % step)\n # Now calculate the attention-weighted vector d.\n \n hidden_attn = math_ops.reduce_sum(array_ops.reshape(beta, [-1, attn_length, 1, 1]) * hidden,\n [1, 2])\n return hidden_attn, beta\n\n outputs = []\n attns = []\n with variable_scope.variable_scope(\"Attn\"):\n h_t, attn_t = attention(state, 0)\n attns.append(attn_t)\n # =============================recurrent===========================\n for i, inp in enumerate(decoder_inputs):\n if i > 0:\n variable_scope.get_variable_scope().reuse_variables()\n \n # LSTM_d([\\tilde{\\mathbf{h}}_{t}; \\mathbf{y}_t], \\hat{\\mathbf{y}}_{t}, \\mathbf{s}^d_{t})\n with variable_scope.variable_scope(\"DecoderOutput\"):\n x = tf.concat([inp, h_t], 1)\n cell_output, state = cell(x, state)\n outputs.append(cell_output)\n\n with variable_scope.variable_scope(\"Attn\"):\n h_t, attn_t = attention(state, i+1)\n attns.append(attn_t)\n \n with variable_scope.variable_scope(\"AttnDecoderOutput\"):\n inputs = tf.concat([cell_output, h_t], 1)\n output = Linear(inputs, output_size, True)(inputs)\n outputs.append(output)\n \n return outputs, state, attns","def decode(prev_hidden: torch.tensor, source_hiddens: torch.tensor, prev_context: torch.tensor,\n input: int, model: Seq2SeqAttentionModel) -> (\n torch.tensor, torch.tensor, torch.tensor, torch.tensor):\n\n decode_in = torch.cat((model.target_embedding_matrix[input], prev_context))\n hidden_out = model.decoder_gru.forward(decode_in, prev_hidden)\n # passing the top layer of encoder and decoder hidden dims\n attention_weights = model.attention.forward(source_hiddens[:,-1,:], hidden_out[-1])\n context = torch.mm(attention_weights.unsqueeze(dim=0),source_hiddens[:,-1,:]).squeeze()\n log_probs = model.output_layer.forward(torch.cat((hidden_out[-1].squeeze(),context)))\n return log_probs, hidden_out, context, attention_weights","def _inference_step(self, state):\n\n decoder_inputs = state[\"inputs\"]\n encoder_outputs = state[\"encoder_outputs\"]\n attention_bias = state[\"encoder_decoder_attention_bias\"]\n alignment_positions = state[\"alignment_positions\"]\n\n outputs = self._decode_pass(\n decoder_inputs=decoder_inputs,\n encoder_outputs=encoder_outputs,\n enc_dec_attention_bias=attention_bias,\n alignment_positions=alignment_positions\n )\n\n with tf.variable_scope(\"inference_step\"):\n next_inputs_mel = outputs[\"post_net_spec\"][:, -1:, :]\n next_inputs_mel = self._expand(next_inputs_mel, self.reduction_factor)\n next_inputs_mag = outputs[\"mag_spec\"][:, -1:, :]\n next_inputs_mag = self._expand(next_inputs_mag, self.reduction_factor)\n next_inputs = tf.concat([next_inputs_mel, next_inputs_mag], axis=-1)\n\n n_features = self.num_mels + self.num_freq\n next_inputs = self._shrink(next_inputs, n_features, self.reduction_factor)\n\n # Set zero if sequence is finished\n next_inputs = tf.where(\n state[\"finished\"],\n tf.zeros_like(next_inputs),\n next_inputs\n )\n next_inputs = tf.concat([decoder_inputs, next_inputs], 1)\n\n # Update lengths\n lengths = state[\"outputs\"][\"lengths\"]\n lengths = tf.where(\n state[\"finished\"],\n lengths,\n lengths + 1 * self.reduction_factor\n )\n outputs[\"lengths\"] = lengths\n\n # Update spec, post_net_spec and mag_spec\n for key in [\"spec\", \"post_net_spec\", \"mag_spec\"]:\n output = outputs[key][:, -1:, :]\n output = tf.where(state[\"finished\"], tf.zeros_like(output), output)\n outputs[key] = tf.concat([state[\"outputs\"][key], output], 1)\n\n # Update stop token logits\n stop_token_logits = outputs[\"stop_token_logits\"][:, -1:, :]\n stop_token_logits = tf.where(\n state[\"finished\"],\n tf.zeros_like(stop_token_logits) + 1e9,\n stop_token_logits\n )\n stop_prediction = tf.sigmoid(stop_token_logits)\n stop_prediction = tf.reduce_max(stop_prediction, axis=-1)\n\n # Uncomment next line if you want to use stop token predictions\n finished = tf.reshape(tf.cast(tf.round(stop_prediction), tf.bool), [-1])\n finished = tf.reshape(finished, [-1])\n\n stop_token_logits = tf.concat(\n [state[\"outputs\"][\"stop_token_logits\"], stop_token_logits],\n axis=1\n )\n outputs[\"stop_token_logits\"] = stop_token_logits\n\n with tf.variable_scope(\"alignments\"):\n weights = []\n for index, attention in enumerate(self.attentions):\n if isinstance(attention, AttentionBlock):\n weights.append(attention.multiheaded_attention.attention_weights)\n\n weights = tf.stack(weights)\n outputs[\"alignments\"] = [weights]\n\n alignment_positions = tf.argmax(\n weights,\n axis=-1,\n output_type=tf.int32\n )[:, :, :, -1:]\n state[\"alignment_positions\"] = tf.concat(\n [state[\"alignment_positions\"], alignment_positions],\n axis=-1\n )\n\n state[\"iteration\"] = state[\"iteration\"] + 1\n state[\"inputs\"] = next_inputs\n state[\"finished\"] = finished\n state[\"outputs\"] = outputs\n\n return state","def _build_decoding_fn(self, max_decode_length):\n # [max_decode_length, hidden_size]\n timing_signal = utils.get_positional_encoding(\n max_decode_length, self._hidden_size)\n timing_signal = tf.cast(timing_signal, 'float32')\n\n def decoding_fn(decoder_input, cache, **kwargs):\n \"\"\"Computes the logits of the next decoded token ids.\n\n Args:\n decoder_input: int tensor of shape [batch_size * beam_width, 1], the \n decoded tokens at index `i`.\n cache: dict of entries\n 'encoder_outputs': tensor of shape \n [batch_size * beam_width, src_seq_len, hidden_size],\n 'padding_mask': tensor of shape\n [batch_size * beam_width, 1, 1, src_seq_len],\n\n and entries with keys 'layer_0',...,'layer_[decoder_num_layers - 1]'\n where the value associated with key 'layer_*' is a dict with entries\n 'k': tensor of shape [batch_size * beam_width, seq_len, num_heads, \n size_per_head],\n 'v': tensor of shape [batch_size * beam_width, seq_len, num_heads, \n size_per_head],\n 'tgt_tgt_attention': tensor of shape [batch_size * beam_width, \n num_heads, seq_len, seq_len],\n 'tgt_src_attention': tensor of shape [batch_size * beam_width, \n num_heads, seq_len, src_seq_len].\n Note `seq_len` is the running length of the growing decode sequence.\n kwargs: dict, storing the following additional keyword arguments.\n index -> int scalar tensor, the index of the `decoder_input` in the \n decoded sequence.\n\n Returns:\n logits: float tensor of shape [batch_size * beam_width, vocab_size].\n cache: a dict with the same structure as the input `cache`, except that\n the shapes of the values of key `k`, `v`, `tgt_tgt_attention`, \n `tgt_src_attention` are\n [batch_size * beam_width, seq_len + 1, num_heads, size_per_head],\n [batch_size * beam_width, seq_len + 1, num_heads, size_per_head],\n [batch_size * beam_width, num_heads, seq_len + 1, seq_len + 1],\n [batch_size * beam_width, num_heads, seq_len + 1, src_seq_len].\n \"\"\"\n index = kwargs['index']\n # [batch_size * beam_width, 1, hidden_size]\n decoder_input = self._embedding_logits_layer(decoder_input, 'embedding')\n decoder_input += timing_signal[index:index + 1]\n\n # [batch_size * beam_width, 1, hidden_size]\n decoder_outputs = self._decoder(decoder_input,\n cache['encoder_outputs'],\n tf.zeros((1, 1, 1, index + 1), \n dtype='float32'),\n cache['padding_mask'],\n training=False,\n cache=cache)\n\n # [[batch_size * beam_width, 1, vocab_size]\n logits = self._embedding_logits_layer(decoder_outputs, mode='logits')\n logits = tf.squeeze(logits, axis=1)\n return logits, cache\n\n return decoding_fn","def decode(\n self,\n encoded,\n encoder_input_tokens, # only needed for masks\n decoder_input_tokens,\n decoder_target_tokens,\n encoder_segment_ids=None,\n decoder_segment_ids=None,\n decoder_positions=None,\n enable_dropout=True,\n decode=False,\n max_decode_length=None):\n cfg = self.config\n\n # Make padding attention masks.\n if decode:\n # fast autoregressive decoding uses only a special encoder-decoder mask\n decoder_mask = None\n encoder_decoder_mask = layers.make_attention_mask(\n jnp.ones_like(decoder_target_tokens) > 0,\n encoder_input_tokens > 0,\n dtype=cfg.dtype)\n else:\n decoder_mask = layers.make_decoder_mask(\n decoder_target_tokens=decoder_target_tokens,\n dtype=cfg.dtype,\n decoder_segment_ids=decoder_segment_ids)\n encoder_decoder_mask = layers.make_attention_mask(\n decoder_target_tokens > 0, encoder_input_tokens > 0, dtype=cfg.dtype)\n\n # Add segmentation block-diagonal attention masks if using segmented data.\n if encoder_segment_ids is not None:\n if decode:\n raise ValueError(\n 'During decoding, packing should not be used but '\n '`encoder_segment_ids` was passed to `Transformer.decode`.')\n\n encoder_decoder_mask = layers.combine_masks(\n encoder_decoder_mask,\n layers.make_attention_mask(\n decoder_segment_ids,\n encoder_segment_ids,\n jnp.equal,\n dtype=cfg.dtype))\n\n logits = self.decoder(\n encoded,\n decoder_input_tokens=decoder_input_tokens,\n decoder_positions=decoder_positions,\n decoder_mask=decoder_mask,\n encoder_decoder_mask=encoder_decoder_mask,\n deterministic=not enable_dropout,\n decode=decode,\n max_decode_length=max_decode_length)\n return logits.astype(self.config.dtype)","def decode(self, dec_state, words, **kwargs):\n with tf.name_scope(self.decoder2.name):\n (enc_out, enc_attn_mask, dec1_out, dec1_rdo, dec1_attn_mask,\n attnP, prev_out_seq, rdo) = dec_state\n\n out_seq = tf.concat([prev_out_seq, tf.expand_dims(words, 1)], 1)\n return self._decode_impl((enc_out, enc_attn_mask, dec1_out, dec1_rdo, dec1_attn_mask,\n attnP, out_seq, rdo), **kwargs)","def _decode_infer(self, decoder, _encoder_output, features, labels):\r\n\r\n return decoder(_encoder_output, labels)","def decode():\n\n with tf.device('/cpu:0'):\n dataset_test = SequenceDataset(\n subset=\"test\",\n config_dir=FLAGS.config_dir,\n data_dir=FLAGS.data_dir,\n batch_size=1,\n input_size=FLAGS.input_dim,\n output_size=FLAGS.output_dim,\n infer=True,\n name=\"dataset_test\")()\n\n model = TfModel(\n rnn_cell=FLAGS.rnn_cell,\n dnn_depth=FLAGS.dnn_depth,\n dnn_num_hidden=FLAGS.dnn_num_hidden,\n rnn_depth=FLAGS.rnn_depth,\n rnn_num_hidden=FLAGS.rnn_num_hidden,\n output_size=FLAGS.output_dim,\n bidirectional=FLAGS.bidirectional,\n rnn_output=FLAGS.rnn_output,\n cnn_output=FLAGS.cnn_output,\n look_ahead=FLAGS.look_ahead,\n mdn_output=FLAGS.mdn_output,\n mix_num=FLAGS.mix_num,\n name=\"tf_model\")\n\n # Build the testing model and get test output sequence.\n test_iterator = dataset_test.batched_dataset.make_one_shot_iterator()\n input_sequence, input_sequence_length = test_iterator.get_next()\n test_output_sequence_logits, test_final_state = model(\n input_sequence, input_sequence_length)\n\n show_all_variables()\n\n saver = tf.train.Saver()\n\n # Decode.\n with tf.Session() as sess:\n # Run init\n sess.run(tf.global_variables_initializer())\n\n if not restore_from_ckpt(sess, saver): sys.exit(-1)\n\n # Read cmvn to do reverse mean variance normalization\n cmvn = np.load(os.path.join(FLAGS.data_dir, \"train_cmvn.npz\"))\n\n num_batches = 0\n used_time_sum = frames_sum = 0.0\n while True:\n try:\n time_start = time.time()\n logits = sess.run(test_output_sequence_logits)\n time_end = time.time()\n\n used_time = time_end - time_start\n used_time_sum += used_time\n frame_num = logits.shape[1]\n frames_sum += frame_num\n\n # Squeeze batch dimension.\n logits = logits.squeeze(axis=0)\n\n if FLAGS.mdn_output:\n out_pi = logits[:, : FLAGS.mix_num]\n out_mu = logits[:, FLAGS.mix_num : (FLAGS.mix_num + FLAGS.mix_num * FLAGS.output_dim)]\n out_sigma = logits[:, (FLAGS.mix_num + FLAGS.mix_num * FLAGS.output_dim) :]\n\n max_index_pi = out_pi.argmax(axis=1)\n result_mu = []\n for i in xrange(out_mu.shape[0]):\n beg_index = max_index_pi[i] * FLAGS.output_dim\n end_index = (max_index_pi[i] + 1) * FLAGS.output_dim\n result_mu.append(out_mu[i, beg_index:end_index])\n logits = np.vstack(result_mu)\n\n sequence = logits * cmvn[\"stddev_labels\"] + cmvn[\"mean_labels\"]\n\n out_dir_name = os.path.join(FLAGS.save_dir, \"test\", \"cmp\")\n out_file_name =os.path.basename(\n dataset_test.tfrecords_lst[num_batches]).split('.')[0] + \".cmp\"\n out_path = os.path.join(out_dir_name, out_file_name)\n write_binary_file(sequence, out_path, with_dim=False)\n #np.savetxt(out_path, sequence, fmt=\"%f\")\n\n tf.logging.info(\n \"writing inferred cmp to %s (%d frames in %.4f seconds)\" % (\n out_path, frame_num, used_time))\n num_batches += 1\n except tf.errors.OutOfRangeError:\n break\n\n tf.logging.info(\"Done decoding -- epoch limit reached (%d \"\n \"frames per second)\" % int(frames_sum / used_time_sum))","def decode(self, decoder_input, sampler_output):\n\n self.attention_hidden, self.attention_cell = self.attention_lstm(\n decoder_input, (self.attention_hidden, self.attention_cell))\n self.attention_hidden = F.dropout(\n self.attention_hidden, self.p_attention_dropout, self.training)\n\n self.decoder_hidden, self.decoder_cell = self.decoder_lstm(\n self.attention_hidden, (self.decoder_hidden, self.decoder_cell))\n self.decoder_hidden = F.dropout(\n self.decoder_hidden, self.p_decoder_dropout, self.training)\n\n # print(self.decoder_hidden.size())\n # print(self.decoder_hidden.size(), sampler_output.size())\n proj_input = torch.cat(\n (self.decoder_hidden, sampler_output), 1) # [B, 1024 + 1280]\n\n decoder_output = self.linear_projection(proj_input)\n\n return decoder_output","def decoder(x, reuse=False):\n if reuse:\n tf.get_variable_scope().reuse_variables()\n with tf.name_scope('Decoder'):\n d_dense_1 = tf.nn.relu(dense(x, z_dim, n_l2, 'd_dense_1'))\n d_dense_2 = tf.nn.relu(dense(d_dense_1, n_l2, n_l1, 'd_dense_2'))\n output = tf.nn.sigmoid(dense(d_dense_2, n_l1, input_dim, 'd_output'))\n return output","def forward(self, input_token, target_token, timestep, *inputs):\n log_probs_per_model = []\n state_outputs = []\n next_state_input = len(self.models)\n vocab_reduction_module = self.models[0].decoder.vocab_reduction_module\n if vocab_reduction_module is not None:\n possible_translation_tokens = inputs[len(self.models)]\n next_state_input += 1\n else:\n possible_translation_tokens = None\n for i, model in enumerate(self.models):\n encoder_output = inputs[i]\n prev_hiddens = []\n prev_cells = []\n for _ in range(len(model.decoder.layers)):\n prev_hiddens.append(inputs[next_state_input])\n prev_cells.append(inputs[next_state_input + 1])\n next_state_input += 2\n prev_input_feed = inputs[next_state_input].view(1, -1)\n next_state_input += 1\n if self.enable_precompute_reduced_weights and hasattr(model.decoder, '_precompute_reduced_weights') and possible_translation_tokens is not None:\n reduced_output_weights = inputs[next_state_input:next_state_input + 2]\n next_state_input += 2\n else:\n reduced_output_weights = None\n src_length_int = int(encoder_output.size()[0])\n src_length = torch.LongTensor(np.array([src_length_int]))\n src_tokens = torch.LongTensor(np.array([[0] * src_length_int]))\n src_embeddings = encoder_output.new_zeros(encoder_output.shape)\n encoder_out = encoder_output, prev_hiddens, prev_cells, src_length, src_tokens, src_embeddings\n model.decoder._is_incremental_eval = True\n model.eval()\n incremental_state = {}\n utils.set_incremental_state(model.decoder, incremental_state, 'cached_state', (prev_hiddens, prev_cells, prev_input_feed))\n decoder_output = model.decoder(input_token.view(1, 1), encoder_out, incremental_state=incremental_state, possible_translation_tokens=possible_translation_tokens)\n logits, _, _ = decoder_output\n log_probs = F.log_softmax(logits, dim=2)\n log_probs_per_model.append(log_probs)\n next_hiddens, next_cells, next_input_feed = utils.get_incremental_state(model.decoder, incremental_state, 'cached_state')\n for h, c in zip(next_hiddens, next_cells):\n state_outputs.extend([h, c])\n state_outputs.append(next_input_feed)\n if reduced_output_weights is not None:\n state_outputs.extend(reduced_output_weights)\n average_log_probs = torch.mean(torch.cat(log_probs_per_model, dim=0), dim=0, keepdim=True)\n if possible_translation_tokens is not None:\n reduced_indices = torch.zeros(self.vocab_size).long().fill_(self.unk_token)\n possible_translation_token_range = torch._dim_arange(like=possible_translation_tokens, dim=0)\n reduced_indices[possible_translation_tokens] = possible_translation_token_range\n reduced_index = reduced_indices.index_select(dim=0, index=target_token)\n score = average_log_probs.view((-1,)).index_select(dim=0, index=reduced_index)\n else:\n score = average_log_probs.view((-1,)).index_select(dim=0, index=target_token)\n word_reward = self.word_rewards.index_select(0, target_token)\n score += word_reward\n self.input_names = ['prev_token', 'target_token', 'timestep']\n for i in range(len(self.models)):\n self.input_names.append(f'fixed_input_{i}')\n if possible_translation_tokens is not None:\n self.input_names.append('possible_translation_tokens')\n outputs = [score]\n self.output_names = ['score']\n for i in range(len(self.models)):\n self.output_names.append(f'fixed_input_{i}')\n outputs.append(inputs[i])\n if possible_translation_tokens is not None:\n self.output_names.append('possible_translation_tokens')\n outputs.append(possible_translation_tokens)\n for i, state in enumerate(state_outputs):\n outputs.append(state)\n self.output_names.append(f'state_output_{i}')\n self.input_names.append(f'state_input_{i}')\n return tuple(outputs)","def test_inference(self):\n model = self.create_model()\n ex = self._create_example()\n\n embeddings = tf.get_variable(\n \"W_embed\", [model.target_vocab_info.total_size, self.input_depth])\n\n def make_input_fn(step_output):\n \"\"\"Looks up the predictions in the embeddings.\n \"\"\"\n return tf.nn.embedding_lookup(embeddings, step_output.predictions)\n\n decoder_input_fn = DynamicDecoderInputs(\n initial_inputs=tf.zeros(\n [self.batch_size, self.input_depth], dtype=tf.float32),\n make_input_fn=make_input_fn)\n\n decoder_output = model.encode_decode(\n source=tf.convert_to_tensor(\n ex.source, dtype=tf.float32),\n source_len=tf.convert_to_tensor(\n ex.source_len, dtype=tf.int32),\n decoder_input_fn=decoder_input_fn,\n target_len=self.max_decode_length)\n\n with self.test_session() as sess:\n sess.run(tf.global_variables_initializer())\n decoder_output_ = sess.run(decoder_output)\n\n # Assert shapes are correct\n np.testing.assert_array_equal(decoder_output_.logits.shape, [\n self.batch_size, self.max_decode_length,\n model.target_vocab_info.total_size\n ])\n np.testing.assert_array_equal(decoder_output_.predictions.shape,\n [self.batch_size, self.max_decode_length])","def compute_decoding(self, dec_inp, initial_h, initial_c, forward_encoder_states, backward_encoder_states, denseLayer, embedding):\r\n\t\toutputs = []\r\n\t\tdecoder_state={}\r\n\t\tdecoder_state[\"lstm\"] = self.lstm_cell.zero_state()\r\n\r\n\t\tattention_vector, alpha = self.null_attention_vectors(forward_encoder_states)\r\n\t\tdecoder_state['a'] = attention_vector\r\n\t\tdecoder_state['alpha'] = alpha\r\n\t\tdecoder_state[\"lstm\"]['h'] = initial_h\r\n\t\tdecoder_state[\"lstm\"]['c'] = initial_c\r\n\r\n\t\tfor i in range(self.max_decoding_length):\r\n\r\n\t\t\t#To keep track of the input for LRP when we transmit the relevance from the input\r\n\t\t\tdecoder_state[\"input_int\"] = dec_inp[i].reshape((len(dec_inp[i]),1))\r\n\r\n\t\t\t#Concatenating the input vector with the attention vector\r\n\t\t\tinput_ = np.concatenate([dec_inp[i].reshape((len(dec_inp[i]),1)), decoder_state['a']])\r\n\r\n\t\t\t#We compute the LSTM step\r\n\t\t\tdecoder_state[\"lstm\"] = self.lstm_cell.forward(decoder_state[\"lstm\"], input_)\r\n\r\n\t\t\t#We compute the output after the output layer\r\n\t\t\tdecoder_state[\"output\"] = denseLayer.compute_forward(decoder_state[\"lstm\"])\r\n\r\n\t\t\t# The decoder state stores the attention vector is had as an input\r\n\t\t\toutputs.append(decoder_state.copy())\r\n\r\n\t\t\t#We compute the attention vector used for the next step\r\n\t\t\tattention_vector, alpha = self.compute_attention(decoder_state[\"lstm\"], forward_encoder_states, backward_encoder_states)\r\n\t\t\tdecoder_state['a'] = attention_vector\r\n\t\t\tdecoder_state['alpha'] = alpha\r\n\r\n\r\n\t\tself.outputs = outputs\r\n\t\treturn outputs","def call(self, inputs, output_hidden_states = False, training = False):\n if isinstance(inputs, (list, tuple)):\n input_ids = inputs[0]\n token_type_ids = inputs[1] if len(inputs) > 1 else None\n attention_mask = inputs[2] if len(inputs) > 2 else None\n \n elif isinstance(inputs, dict):\n input_ids = inputs['input_ids']\n token_type_ids = inputs.get('token_type_ids', None)\n attention_mask = inputs.get('attention_mask', None)\n else:\n raise ValueError('The type of inputs should be list or dictionary.')\n \n input_shape = shape_list(input_ids)\n \n# last_hidden_state = tf.ones(input_shape + (self.config.hidden_size))\n# output = tf.ones(input_shape + (self.config.hidden_size,))\n# logits = tf.ones(input_shape + (self.config.vocab_size,))\n# pooler_output = tf.ones((input_shape[0], self.config.hidden_size))\n \n hidden_states = [] if output_hidden_states else None\n output = self.embeddings(input_ids, token_type_ids, training = training)\n \n if output_hidden_states:\n hidden_states.append(output)\n\n if self.causal_attention:\n attention_mask = tf.constant(lower_triangle_matrix(input_shape[-1]))\n attention_mask = tf.reshape(attention_mask, shape = (1, 1, input_shape[-1], input_shape[-1]))\n \n else:\n if attention_mask is None:\n attention_mask = tf.constant(1.0, shape = input_shape, dtype = 'float32')\n # attention_mask now has shape (batches, sequence_len),\n # we need to covert it to (batches, 1, 1, sequence_len)\n # so that it will broadcast to (batches, num_attention_heads, sequence_len, sequence_len)\n attention_mask = tf.reshape(attention_mask, shape = (-1, 1, 1, input_shape[-1]))\n\n \n \n last_hidden_state, layer_outputs = self.encoder(output, attention_mask, output_hidden_states = output_hidden_states, training = training)\n if output_hidden_states:\n hidden_states.extend(layer_outputs)\n \n pooler_output = self.pooler(tf.gather(last_hidden_state, indices = 0, axis = 1)) if self.pooler else None\n logits = self.lm_head(last_hidden_state) if self.lm_head else None\n\n res = {'sequence_output': last_hidden_state,\n 'pooler_output': pooler_output,\n 'logits': logits,\n 'hidden_states': hidden_states}\n\n self.built = True\n\n return {k : v for k, v in res.items() if v is not None}","def build(self, unused_input_shapes):\n if self.embedding_lookup is None:\n self.embedding_lookup = layers.OnDeviceEmbedding(\n vocab_size=self.config.vocab_size,\n embedding_width=self.config.hidden_size,\n initializer=tf.keras.initializers.TruncatedNormal(\n stddev=self.config.initializer_range),\n name=\"target_embeddings\")\n self.embedding_postprocessor = EmbeddingPostprocessor(\n use_type_embeddings=False,\n use_position_embeddings=True,\n max_position_embeddings=self.config.max_position_embeddings,\n dropout_prob=self.config.hidden_dropout_prob,\n initializer=tf.keras.initializers.VarianceScaling(\n scale=self.config.initializer_gain,\n mode=\"fan_avg\",\n distribution=\"uniform\"),\n name=\"embedding_postprocessor\")\n # Decoder can use a different intermediate size.\n self.multi_channel_cross_attention = self.config.get(\n \"multi_channel_cross_attention\", False)\n self.decoder = TransformerDecoder(\n num_hidden_layers=self.config.num_decoder_layers,\n hidden_size=self.config.hidden_size,\n num_attention_heads=self.config.num_decoder_attn_heads,\n intermediate_size=self.config.decoder_intermediate_size,\n intermediate_activation=self.config.hidden_act,\n hidden_dropout_prob=self.config.hidden_dropout_prob,\n attention_probs_dropout_prob=self.config.attention_probs_dropout_prob,\n initializer_range=self.config.initializer_range,\n multi_channel_cross_attention=self.multi_channel_cross_attention,\n name=\"decoder\")\n super(Decoder, self).build(unused_input_shapes)","def call(self, inputs, cache=None, decode_loop_step=None):\n decoder_inputs = inputs[\"decoder_inputs\"]\n encoder_outputs = inputs[\"encoder_outputs\"]\n self_attention_mask = inputs[\"self_attention_mask\"]\n attention_mask = inputs[\"attention_mask\"]\n decoder_shape = tf_utils.get_shape_list(decoder_inputs, expected_rank=3)\n batch_size = decoder_shape[0]\n decoder_length = decoder_shape[1]\n\n def _to_bert_self_attention_mask(matrix):\n \"\"\"[1, 1, target_len, target_len] -> [bs, target_len, target_len].\"\"\"\n matrix = tf.squeeze(matrix, axis=[1])\n matrix = tf.tile(matrix, [batch_size, 1, 1])\n return matrix\n\n def _to_bert_encdec_attention_mask(matrix):\n \"\"\"[bs, 1, 1, input_len] -> [bs, target_len, input_len].\"\"\"\n if self.multi_channel_cross_attention:\n matrix = tf.expand_dims(matrix, axis=2)\n matrix = tf.tile(matrix, [1, 1, decoder_length, 1])\n else:\n matrix = tf.squeeze(matrix, axis=[1])\n matrix = tf.tile(matrix, [1, decoder_length, 1])\n return matrix\n\n attention_mask = _to_bert_encdec_attention_mask(attention_mask)\n self_attention_mask = _to_bert_self_attention_mask(self_attention_mask)\n\n output_tensor = decoder_inputs\n for layer_idx in range(self.num_hidden_layers):\n if self.attend_to_last_layer:\n memory = encoder_outputs[-1]\n else:\n memory = encoder_outputs[layer_idx]\n if self.multi_channel_cross_attention:\n transformer_inputs = [\n output_tensor, memory, attention_mask, self_attention_mask,\n inputs[\"doc_attention_probs\"]\n ]\n else:\n transformer_inputs = [\n output_tensor, memory, attention_mask, self_attention_mask\n ]\n # Gets the cache for decoding.\n if cache is None:\n output_tensor, _ = self.layers[layer_idx](transformer_inputs)\n else:\n cache_layer_idx = str(layer_idx)\n output_tensor, cache[cache_layer_idx] = self.layers[layer_idx](\n transformer_inputs,\n cache=cache[cache_layer_idx],\n decode_loop_step=decode_loop_step)\n return output_tensor, cache","def run_decoder(self, model_state: 'ModelState') -> Tuple[mx.nd.NDArray, mx.nd.NDArray, 'ModelState']:\n batch = mx.io.DataBatch(\n data=[model_state.prev_target_word_id.as_in_context(self.context)] + model_state.decoder_states,\n label=None,\n bucket_key=model_state.bucket_key,\n provide_data=self._get_decoder_data_shapes(model_state.bucket_key))\n self.decoder_module.forward(data_batch=batch, is_train=False)\n probs, attention_probs, *model_state.decoder_states = self.decoder_module.get_outputs()\n return probs, attention_probs, model_state","def gru_training_decoder(cell, attention, sftm, embedding, states, in_mask, in_bool_mask, batch_output, out_mask):\n # batch wise\n l_state = generate_final_state(tf.pack(states), in_bool_mask) # initialize the state, the encoder's last state\n outputs, loss, possib, symbol = [], [], [], []\n sstates, _ = attention.shortcut(states)\n\n with tf.device(\"/cpu:0\"):\n embedded_list = tf.nn.embedding_lookup(embedding, batch_output)\n #embedded_list = out_mask * tf.transpose(embedded_list, [2, 0, 1]) # Add mask to change embedding into zeros\n #embedded_list = tf.transpose(embedded_list, [2, 1, 0])\n embedded_list = tf.transpose(embedded_list, [1, 0, 2])\n embedded_list = tf.unpack(embedded_list) # list of embedding\n\n for time, (embedded, target, t_mask) in enumerate(zip(embedded_list[:-1], tf.unpack(tf.transpose(batch_output))[1:], tf.unpack(tf.transpose(out_mask))[1:])):\n eij = []\n #embedded = tf.nn.embedding_lookup(embedding, tf.reshape(i, [-1])) # deprecated\n #embedded = embedded * tf.reshape(tf.convert_to_tensor(out_mask[:, time], tf.float32), [batch_size, 1]) # deprecated\n for h in sstates:\n eij.append(attention(l_state, h))\n eij = tf.concat(1, eij)\n alphaij = softmax_wt_mask(eij, in_mask) # Add mask to change embedding into zeros\n #alphaij = tf.nn.softmax(eij) # PROBLEM!!!!\n #alphaij = alphaij * in_mask\n ##### Debug\n #print sess.run(alphaij) #print in_mask #print sess.run(alphaij) #print states\n t_ = alphaij * tf.transpose(tf.pack(states)) # broadcastable\n t_ = tf.transpose(t_)\n ci = tf.reduce_sum(t_, 0)\n output, l_state = cell(embedded, l_state, ci)\n output = output * tf.expand_dims(t_mask, 1) # Add mask\n outputs.append(output)\n res = sftm(output, tf.cast(target, tf.int64), t_mask)\n symbol.append(res[0])\n possib.append(res[1])\n loss.append(res[2])\n #cost = tf.reduce_mean(tf.add_n(loss))\n total_size = tf.add_n(tf.unpack(tf.transpose(out_mask))[1:])\n total_size += 1e-12\n cost = tf.add_n(loss) / total_size\n cost = tf.reduce_mean(cost)\n return outputs, symbol, possib, cost, loss","def decode():\n with io.open(FLAGS.predict_input_file, encoding='utf-8') as test_file:\n lines = test_file.readlines()\n # Get the largest sentence length to set an upper bound to the decoder.\n max_length = FLAGS.max_sentence_length\n # max_length = max([len(line) for line in lines])\n \n print(\"Building dynamic character-level ALLDATASET data...\", flush=True)\n dataset = ALLDATASET(\n train_input=FLAGS.train_input, train_output=FLAGS.train_output,\n dev_input=FLAGS.dev_input, dev_output=FLAGS.dev_output,\n predict_input_file=FLAGS.predict_input_file, \n parse_repeated=FLAGS.parse_repeated,\n max_input_length=max_length, max_label_length=max_length)\n \n print(\"Building computational graph...\", flush=True)\n graph = tf.Graph()\n with graph.as_default():\n \n tf.set_random_seed(1)\n random.seed(1)\n np.random.seed(1)\n\n m = Seq2Seq(\n num_types=dataset.num_types(),\n max_encoder_length=max_length, max_decoder_length=max_length,\n pad_id=dataset.type_to_ix['_PAD'],\n eos_id=dataset.type_to_ix['_EOS'],\n go_id=dataset.type_to_ix['_GO'],\n space_id=dataset.type_to_ix[(' ',)],\n ix_to_type=dataset.ix_to_type,\n batch_size=1, embedding_size=FLAGS.embedding_size,\n hidden_size=FLAGS.hidden_size, rnn_layers=FLAGS.rnn_layers,\n bidirectional_encoder=FLAGS.bidirectional_encoder,\n bidirectional_mode=FLAGS.bidirectional_mode,\n use_lstm=FLAGS.use_lstm, attention=FLAGS.attention,\n beam_size=FLAGS.beam_size, restore=True, model_output_dir=FLAGS.model_output_dir)\n \n with tf.Session(graph=graph) as sess:\n print(\"Restoring model...\", flush=True)\n m.start()\n print(\n \"Restored model (global step {})\".format(m.global_step.eval()),\n flush=True)\n with io.open(FLAGS.output_path, 'w', encoding='utf-8') as output_file:\n for line in lines:\n # if len(line) > max_length:\n # continue\n number_of_chars = len(line)\n completely_divisble = number_of_chars % FLAGS.max_sentence_length == 0\n\n if number_of_chars < FLAGS.max_sentence_length:\n parts = [line]\n else:\n parts = []\n count = 0\n last_word_end_index = 0\n\n line_copy = line\n while len(line_copy) != 0 and count < len(line_copy):\n if count == FLAGS.max_sentence_length:\n if last_word_end_index == 0:\n parts.append(line_copy[: count])\n line_copy = line_copy[count:]\n else:\n parts.append(line_copy[: last_word_end_index])\n line_copy = line_copy[last_word_end_index:]\n \n last_word_end_index = 0\n count = 0\n\n if line_copy[count] == \" \":\n last_word_end_index = count\n\n count += 1\n\n if not completely_divisble:\n parts.append(line_copy)\n \n result = \"\"\n for part in parts:\n ids = dataset.tokenize(part)\n while len(ids) < max_length:\n ids.append(dataset.type_to_ix['_PAD'])\n outputs = sess.run(m.generative_output, feed_dict={m.inputs: [ids]})\n top_line = untokenize_batch(dataset, outputs)[0]\n # Sequences of text will only be repeated up to 5 times.\n top_line = re.sub(r'(.+?)\\1{5,}', lambda m: m.group(1) * 5, top_line)\n result += top_line\n output_file.write(result + '\\n')\n print(\"PREDICTION:\", top_line, flush=True)\n print()","def forward(self, inputs, decode_len=None):\n\n batch_size = inputs.size(0)\n input_dim = inputs.size(1)\n assert input_dim == self.input_dim, 'input dim should be {:d} but now: {:d}'.format(self.input_dim, input_dim)\n\n sourceL = inputs.size(2)\n\n if self.embed_input:\n # repeat embeddings across batch_size\n # result is [batch_size x input_dim x embedding_dim]\n # TODO: repeat or expand?\n embedding = self.embedding.repeat(batch_size, 1, 1)\n embedded_inputs = []\n # result is [batch_size, 1, input_dim, sourceL]\n ips = inputs.unsqueeze(1)\n\n for i in range(sourceL):\n # [batch_size x 1 x input_dim] * [batch_size x input_dim x embedding_dim]\n # result is [batch_size, embedding_dim]\n embedded_inputs.append(torch.bmm(\n ips[:, :, :, i].float(),\n embedding).squeeze(1))\n\n # Result is [sourceL x batch_size x embedding_dim]\n embedded_inputs = torch.cat(embedded_inputs).view(\n sourceL,\n batch_size,\n embedding.size(2))\n else:\n embedded_inputs = inputs.permute(2, 0, 1)\n\n (encoder_hx, encoder_cx) = init_zero_hidden(self.hidden_dim, inputs.is_cuda)\n encoder_hx = encoder_hx.unsqueeze(0).repeat(embedded_inputs.size(1), 1).unsqueeze(0)\n encoder_cx = encoder_cx.unsqueeze(0).repeat(embedded_inputs.size(1), 1).unsqueeze(0)\n \n # encoder forward pass\n enc_h, (enc_h_t, enc_c_t) = self.encoder(embedded_inputs, (encoder_hx, encoder_cx))\n\n enc_h_linear = enc_h.view(-1, self.hidden_dim)\n # enc_h_linear_2d = enc_h_linear.view(self.hidden_dim, -1)\n enc_action_scores = self.EncodeScore(enc_h_linear)\n enc_action_scores = enc_action_scores.view(-1, batch_size).permute(1, 0)\n dec_init_state = (enc_h_t[-1], enc_c_t[-1])\n \n # repeat decoder_in_0 across batch\n decoder_input = self.decoder_in_0.unsqueeze(0).repeat(embedded_inputs.size(1), 1)\n\n (head_pointer_probs, head_positions, tail_pointer_probs, tail_positions, cls_scores), dec_hidden_t = self.decoder(decoder_input,\n embedded_inputs,\n dec_init_state,\n enc_h, max_len=decode_len)\n #TODO: added conversion to tensors\n head_pointer_probs = torch.stack(head_pointer_probs)\n head_pointer_probs = head_pointer_probs.permute(1, 0, 2)\n tail_pointer_probs = torch.stack(tail_pointer_probs)\n tail_pointer_probs = tail_pointer_probs.permute(1, 0, 2)\n cls_scores = torch.stack(cls_scores)\n cls_scores = cls_scores.permute(1, 0, 2)\n head_positions = torch.stack(head_positions)\n head_positions = head_positions.permute(1, 0)\n tail_positions = torch.stack(tail_positions)\n tail_positions = tail_positions.permute(1, 0)\n\n\n\n return head_pointer_probs, head_positions, tail_pointer_probs, tail_positions, cls_scores, enc_action_scores","def _build_decoding_cache(self, src_token_ids, batch_size):\n padding_mask = utils.get_padding_mask(src_token_ids, SOS_ID)\n encoder_outputs = self._encode(src_token_ids, padding_mask, training=False)\n size_per_head = self._hidden_size // self._num_heads\n src_seq_len = padding_mask.shape[-1] \n\n decoding_cache = {'layer_%d' % layer:\n {'k':\n tf.zeros([\n batch_size, 0, self._num_heads, size_per_head\n ], 'float32'),\n 'v':\n tf.zeros([\n batch_size, 0, self._num_heads, size_per_head\n ], 'float32'),\n 'tgt_tgt_attention':\n tf.zeros([\n batch_size, self._num_heads, 0, 0], 'float32'), \n 'tgt_src_attention':\n tf.zeros([\n batch_size, self._num_heads, 0, src_seq_len], 'float32')\n\n } for layer in range(self._decoder._stack_size)\n }\n decoding_cache['encoder_outputs'] = encoder_outputs\n decoding_cache['padding_mask'] = padding_mask\n return decoding_cache","def build_decoder(opt, embeddings):\n return TransformerDecoder(opt.dec_layers, opt.dec_rnn_size, opt.heads, opt.transformer_ff, opt.dropout, embeddings)","def decode(self, x, y):\n y = self.embedding(y)\n b, t, h = y.shape\n start = torch.zeros((b, 1, h))\n if self.is_cuda:\n start = start\n y = torch.cat([start, y], dim=1)\n y, _ = self.dec_rnn(y)\n x = x.unsqueeze(dim=2)\n y = y.unsqueeze(dim=1)\n out = self.fc1(x) + self.fc1(y)\n out = nn.functional.relu(out)\n out = self.fc2(out)\n out = nn.functional.log_softmax(out, dim=3)\n return out","def _decode_train(self):\n\n # the basic idea is, we use golden sketch during train and in order to copy from source\n # we given true mask of decoder to generate right copy weights\n state = {'encoder': self.concated_encoder_output}\n\n def transformer_concated_decoder_internal(inputs, memory, bias, mem_bias, params, state=None, scope=None,\n reuse=False):\n return transformer_decoder(inputs, memory, bias, mem_bias, params, state, scope, reuse)\n\n self.final_logits = self._decode_func(\n self.tgt_seq, self.tgt_len, self.target_embeddings, self.decoder_weights,\n self.final_enc_attn_bias, 'train', state, self.vocab_size, use_copy=True,\n expand_source_ids_oo=self.concat_src_ids_oo,\n max_out_oovs=self.max_out_oovs, src_mask=self.concat_src_mask,\n decoder_fn=transformer_concated_decoder_internal,\n scope='final_decoder')","def forward(self, encoder_output, encoded_captions, caption_lengths):\n \n batch_size = encoder_output.size(0)\n encoder_dim = encoder_output.size(-1)\n \n # Flatten image\n encoder_output = encoder_output.view(batch_size, -1, encoder_dim)\n \n #print(encoder_output.size())\n \n num_pixels = encoder_output.size(1)\n \n # Embedding\n embeddings = self.embedding(encoded_captions).type(torch.FloatTensor).to(device)\n #print(embeddings.size())\n \n #initial_states\n h, c = self.init_hidden_states(encoder_output)\n #print(h.size(), c.size())\n\n # Create tensors to hold word predicion scores and alphas\n predictions = torch.zeros(batch_size, max(caption_lengths), self.vocab_size) #.to(device)\n \n #print('prediction_length', predictions.size())\n alphas = torch.zeros(batch_size, max(caption_lengths), num_pixels) #.to(device)\n #print('alphas', alphas.size())\n # At each time-step, decode by\n # attention-weighing the encoder's output based on the decoder's previous hidden state output\n # then generate a new word in the decoder with the previous word and the attention weighted encoding\n for t in range(max(caption_lengths)):\n batch_size_t = sum([l > t for l in caption_lengths])\n att, alpha = self.attention(encoder_output[:batch_size_t],\n h[:batch_size_t])\n gate = self.sigmoid(self.f_beta(h[:batch_size_t])) # gating scalar, (batch_size_t, encoder_dim)\n att = gate * att\n h, c = self.decode_step(\n torch.cat([embeddings[:batch_size_t, t, :], att], dim=1),\n (h[:batch_size_t], c[:batch_size_t])) # (batch_size_t, decoder_dim)\n #preds = self.fc(self.dropout(h)) # (batch_size_t, vocab_size)\n \n h_embedded = self.linear_h(h)\n att_embedded = self.linear_z(att)\n preds = self.linear_o(self.dropout(embeddings[:batch_size_t, t, :] + h_embedded + att_embedded))\n \n predictions[:batch_size_t, t, :] = preds\n alphas[:batch_size_t, t, :] = alpha\n \n #print(predictions.size()) \n return predictions, alphas","def train_output(self, decoder_output, Y, enc_output, vocab_size, scope, reuse):\n logits = top(body_output=decoder_output,\n vocab_size = vocab_size,\n dense_size = self.config.hidden_units,\n scope=scope,\n shared_embedding = self.config.train.shared_embedding,\n reuse=reuse)\n\n with tf.variable_scope(scope, initializer=self._initializer, reuse=reuse):\n #logits = tf.layers.dense(decoder_output, self.config.dst_vocab_size)\n preds = tf.to_int32(tf.arg_max(logits, dimension=-1))\n mask = tf.to_float(tf.not_equal(Y, 0))\n acc = tf.reduce_sum(tf.to_float(tf.equal(preds, Y)) * mask) / tf.reduce_sum(mask)\n\n # Smoothed loss\n loss = smoothing_cross_entropy(logits=logits, labels=Y, vocab_size=vocab_size,\n confidence=1-self.config.train.label_smoothing)\n mean_loss = tf.reduce_sum(loss * mask) / (tf.reduce_sum(mask))\n\n kl_loss = tf.reduce_mean(tf.reduce_sum(tf.pow(enc_output, 2), axis=-1))\n\n mean_loss = mean_loss + self.config.kl_weight * kl_loss\n\n return acc, mean_loss","def decoder(latent_samples, input_dim, output_dim=500, input_channels=1, output_channels=4, deconv_filters=[4,8,16], \n kernel_sizes=[3,3,3], deconv_strides=[1,1,1], act=tf.nn.relu, \n initializer=tf.contrib.layers.xavier_initializer()):\n x = tf.layers.dense(inputs=latent_samples, units=input_dim*input_dim, activation=act, \n kernel_initializer=initializer)\n x = tf.reshape(x, [-1, input_dim, input_dim, input_channels])\n x = tf.layers.conv2d_transpose(inputs=x, filters=deconv_filters[0], kernel_size=kernel_sizes[0], \n strides=deconv_strides[0], padding='same', activation=act, \n kernel_initializer=initializer)\n x = tf.layers.conv2d_transpose(inputs=x, filters=deconv_filters[1], kernel_size=kernel_sizes[1], \n strides=deconv_strides[1], padding='same', activation=act, \n kernel_initializer=initializer)\n x = tf.layers.conv2d_transpose(inputs=x, filters=deconv_filters[2], kernel_size=kernel_sizes[2], \n strides=deconv_strides[2], padding='same', activation=act, \n kernel_initializer=initializer)\n x = tf.contrib.layers.flatten(x)\n x = tf.layers.dense(inputs=x, units=output_dim*output_dim*output_channels, activation=act, \n kernel_initializer=initializer)\n color_imgs = tf.reshape(x, [-1, output_dim, output_dim, output_channels])\n return color_imgs","def _decoding_step(current_time_step, all_finished, next_ids, decoded_ids, decoded_score, memories):\n # Propagate through decoder\n step_logits, memories = decoding_function(next_ids, current_time_step, memories)\n # Calculate log probabilities for token prediction at current time-step\n step_scores = tf.nn.log_softmax(step_logits)\n # Determine next token to be generated, next_ids has shape [batch_size]\n if do_sample:\n next_ids = tf.squeeze(tf.multinomial(step_scores, num_samples=1, output_dtype=int_dtype), axis=1)\n else:\n # Greedy decoding\n next_ids = tf.argmax(step_scores, -1, output_type=int_dtype)\n # Collect scores associated with the selected tokens\n score_coordinates = tf.stack([tf.range(batch_size, dtype=int_dtype), next_ids], axis=1)\n decoded_score += tf.gather_nd(step_scores, score_coordinates)\n # Concatenate newly decoded token ID with the previously decoded ones\n decoded_ids = tf.concat([decoded_ids, tf.expand_dims(next_ids, 1)], 1)\n # Extend next_id's dimensions to be compatible with input dimensionality for the subsequent step\n next_ids = tf.expand_dims(next_ids, time_dim)\n # Check if generation has concluded with \n # all_finished |= tf.equal(tf.squeeze(next_ids, axis=time_dim), eos_id)\n all_finished |= tf.equal(tf.reduce_prod(decoded_ids - eos_id, axis=time_dim), eos_id)\n\n return current_time_step + 1, all_finished, next_ids, decoded_ids, decoded_score, memories","def forward(self, input, dec_hidden=None):\n ### YOUR CODE HERE for part 2a\n embeddings = self.decoderCharEmb(input)\n out, new_hidden = self.charDecoder(embeddings, dec_hidden)\n scores = self.char_output_projection(out)\n return scores, new_hidden\n\n ### END YOUR CODE","def call(self,\n decoder_inputs,\n encoder_outputs,\n decoder_self_attention_bias,\n attention_bias,\n training,\n cache=None):\n # Run values\n outputs = self._transformer_decoder(\n decoder_inputs,\n encoder_outputs,\n decoder_self_attention_bias,\n attention_bias,\n training=training,\n cache=cache)\n return outputs","def _decoder(self, inputs, z_dimension, mcd):\n \n latent_inputs = Input(shape=(z_dimension,), name=\"z_sampling\")\n x = latent_inputs\n x = Dense(\n self.hidden_size // 4,\n activation=self.activation_func,\n kernel_initializer=self.weight_init,\n )(x)\n x = Dropout(self.dropout_probability)(x, training=mcd)\n\n x = Dense(\n self.hidden_size // 3,\n activation=self.activation_func,\n kernel_initializer=self.weight_init,\n )(x)\n x = Dropout(self.dropout_probability)(x, training=mcd)\n\n x = Dense(\n self.hidden_size // 2,\n activation=self.activation_func,\n kernel_initializer=self.weight_init,\n )(x)\n x = Dropout(self.dropout_probability)(x, training=mcd)\n\n x = Dense(\n self.hidden_size,\n activation=self.activation_func,\n kernel_initializer=self.weight_init,\n )(x)\n x = Dropout(self.dropout_probability)(x, training=mcd)\n outputs = Dense(\n self.n_dims,\n activation=self.output_activation,\n kernel_initializer=self.weight_init,\n )(x)\n \n self.decoder = Model(latent_inputs, outputs, name=\"decoder\")\n \n outputs = self.decoder(self.encoder(inputs)[0])\n \n return self.decoder, outputs","def greedyDecoder(self, enc_states, hidden, test=False, sentence=None, st='', ed=''):\n\t\tbatch_size = hidden.shape[1]\n\t\t# according to paper\n\t\thidden = F.tanh(self.init_decoder_hidden(hidden[1])).view(1, batch_size, self.hid_dim)\n\t\tif test:\n\t\t\tword = torch.ones(batch_size, dtype=torch.long, device=self.device) * self.vocab[st]\n\t\t\twords = torch.zeros(batch_size, self.max_trg_len, dtype=torch.long, device=self.device)\n\t\t\tfor i in range(self.max_trg_len-1):\n\t\t\t\tlogit, hidden = self.decoderStep(enc_states, hidden, word)\n\t\t\t\tprobs = F.softmax(logit, dim=-1)\n\t\t\t\tword = torch.argmax(probs, dim=-1).squeeze()\n\t\t\t\twords[:,i] = word\n\t\t\twords[:,-1] = torch.ones(batch_size, dtype=torch.long, device=self.device) * self.vocab[ed]\n\t\t\treturn words\n\t\telse:\n\t\t\tmax_seq_len = sentence.shape[1]\n\t\t\tlogits = torch.zeros(batch_size, max_seq_len-1, self.vocab_size, device=self.device)\n\t\t\tfor i in range(max_seq_len - 1):\n\t\t\t\t# logit: [batch, 1, vocab_size]\n\t\t\t\tlogit, hidden = self.decoderStep(enc_states, hidden, sentence[:,i])\n\t\t\t\tlogits[:,i,:] = logit.squeeze()\n\t\t\treturn logits","def forward(self, input, last_hidden, last_context, encoder_outputs):\r\n # input: B x 1 x d, last_hidden: (num_layers * num_directions) x B x h\r\n # last_context: B x 1 x h, encoder_outputs: B x S x h\r\n\r\n # output = embedded\r\n rnn_input = torch.cat((input, last_context), 2) # B x 1 x (d + h)\r\n output, hidden = self.rnn(rnn_input, last_hidden) # output: B x 1 x h\r\n\r\n # calculate attention from current RNN state and all encoder outputs; apply to encoder outputs\r\n attn_weights = self.attn(output, encoder_outputs) # B x S\r\n context = attn_weights.unsqueeze(1).bmm(encoder_outputs) # B x 1 x h\r\n\r\n # final output layer (next word prediction) using the RNN hidden state and context vector\r\n output = f.log_softmax(self.out(torch.cat((context.squeeze(1), output.squeeze(1)), 1)), 1)\r\n\r\n # Return final output, hidden state, and attention weights (for visualization)\r\n return output, hidden, context, attn_weights","def load_decoder(checkpoint, decoder_cls,\n attn_model, embedding, HIDDEN_SIZE, VOC_SIZE, DECODER_N_LAYERS, DROPOUT, decoder_name):\n model = decoder_cls(attn_model, embedding, HIDDEN_SIZE, VOC_SIZE, DECODER_N_LAYERS, DROPOUT, gate=decoder_name)\n model.load_state_dict(checkpoint['de'])\n model.eval()\n return model","def lfads_decode(params, lfads_hps, key, ib_mean, ib_logvar, ic_mean, ic_logvar,\n xenc_t, keep_rate):\n\n keys = random.split(key, 3)\n\n # Since the factors feed back to the controller,\n # factors_{t-1} -> controller_t -> sample_t -> generator_t -> factors_t\n # is really one big loop and therefor one RNN.\n ii0 = params['ii0']\n ii0 = np.where(lfads_hps['do_tanh_latents'], np.tanh(ii0), ii0)\n # ii_t tanh'd at sampling time in the decode loop. \n c0 = params['con']['h0']\n ib = dists.diag_gaussian_sample(keys[0], ib_mean, ib_logvar,\n lfads_hps['var_min'])\n ib = np.where(lfads_hps['do_tanh_latents'], np.tanh(ib), ib) \n g0 = dists.diag_gaussian_sample(keys[1], ic_mean, ic_logvar,\n lfads_hps['var_min'])\n g0 = np.where(lfads_hps['do_tanh_latents'], np.tanh(g0), g0)\n f0 = params['f0']\n\n # Make all the randomness for all T steps at once, it's more efficient.\n # The random keys get passed into scan along with the input, so the input\n # becomes of a 2-tuple (keys, actual input).\n T = xenc_t.shape[0]\n keys_t = random.split(keys[2], T)\n\n state0 = (c0, ii0, ib, g0, f0)\n decoder = partial(lfads_decode_one_step_scan, *(params, lfads_hps, keep_rate))\n _, state_and_returns_t = lax.scan(decoder, state0, (keys_t, xenc_t))\n return state_and_returns_t","def inference(self, inputs):\n # test_2\n memory = self.get_go_frame(inputs)\n memory = self._update_memory(memory)\n\n self._init_states(inputs, mask=None)\n self.attention.init_states(inputs)\n\n outputs, stop_tokens, alignments, t = [], [], [], 0\n while True:\n memory = self.prenet(memory)\n decoder_output, alignment, stop_token = self.decode(memory)\n stop_token = torch.sigmoid(stop_token.data)\n outputs += [decoder_output.squeeze(1)]\n stop_tokens += [stop_token]\n alignments += [alignment]\n\n if stop_token > self.stop_threshold and t > inputs.shape[0] // 2:\n break\n if len(outputs) == self.max_decoder_steps:\n print(\" | > Decoder stopped with 'max_decoder_steps\")\n break\n\n memory = self._update_memory(decoder_output)\n t += 1\n\n outputs, stop_tokens, alignments = self._parse_outputs(\n outputs, stop_tokens, alignments)\n\n return outputs, alignments, stop_tokens","def predict_step(batch, state, cache, eos_idx, config):\n\n logging.info('predict_step(batch=%s)', batch)\n variables = {'params': state.optimizer.target}\n model = models.Model(config)\n encoded, encoded_mask = model.apply(\n variables, batch, method=models.Model.encode)\n\n encoded_inputs = decode.flat_batch_beam_expand(encoded, config.beam_size)\n encoded_mask = decode.flat_batch_beam_expand(encoded_mask, config.beam_size)\n\n def tokens_ids_to_logits(flat_ids, flat_cache):\n \"\"\"Token slice to logits from decoder model.\"\"\"\n # --> [batch * beam, 1, vocab]\n flat_logits, new_vars = model.apply(\n {\n 'params': state.optimizer.target,\n 'cache': flat_cache\n },\n flat_ids,\n encoded_inputs,\n flat_ids > 0,\n encoded_mask,\n mutable=['cache'],\n method=models.Model.decode)\n new_flat_cache = new_vars['cache']\n # Remove singleton sequence-length dimension:\n # [batch * beam, 1, vocab] --> [batch * beam, vocab]\n flat_logits = flat_logits.squeeze(axis=1)\n return flat_logits, new_flat_cache\n\n # Using the above-defined single-step decoder function, run a\n # beam search over possible sequences given input encoding.\n beam_seqs, _, = decode.beam_search(\n batch['token'],\n cache,\n tokens_ids_to_logits,\n beam_size=config.beam_size,\n alpha=0.6,\n eos_id=eos_idx,\n max_decode_len=config.max_decode_step)\n # Beam search returns [n_batch, n_beam, n_length + 1] with beam dimension\n # sorted in increasing order of log-probability.\n # Return the highest scoring beam sequence.\n return beam_seqs[:, -1]","def long_answer_prepare_decoder(inputs, targets, hparams):\n decoder_input = tf.concat([\n length_embedding(targets, hparams), inputs,\n common_layers.shift_left_3d(targets)], 1)\n if hparams.pos == \"timing\":\n decoder_input = common_attention.add_timing_signal_1d(decoder_input)\n return decoder_input","def decode(self, embeddings):\n def denormalize(img):\n _img = img + 1.0\n _img = _img * (255.0 / 2.0)\n return _img.astype(np.uint8)\n\n i = 0\n N = len(embeddings)\n imgs = []\n while True:\n end = min(N, i + self.batch_size)\n batch = embeddings[i: end]\n\n size = end - i\n if size < self.batch_size:\n batch += self._embed_padding[: self.batch_size - size]\n\n _imgs = self.sess.run(self.output_layer, feed_dict={self.embed_layer: batch})\n imgs += [denormalize(_imgs[i]) for i in range(size)]\n\n i += self.batch_size\n if i >= N - 1:\n break\n\n return imgs","def _compute_output(self, decoder_output, attention_weights):\n # Compute attention weights & context vector\n attention_weights, context_vector = self.attention_layer(\n encoder_outputs=self.encoder_outputs,\n decoder_output=decoder_output,\n encoder_outputs_length=self.encoder_outputs_seq_len,\n attention_weights=attention_weights)\n\n # Input-feeding approach, this is used as inputs for the decoder\n attentional_vector = tf.contrib.layers.fully_connected(\n tf.concat([decoder_output, context_vector], axis=1),\n num_outputs=self.rnn_cell.output_size,\n activation_fn=tf.nn.tanh,\n weights_initializer=tf.truncated_normal_initializer(\n stddev=self.parameter_init),\n biases_initializer=None, # no bias\n scope=\"attentional_vector\")\n # NOTE: This makes the softmax smaller and allows us to synthesize\n # information between decoder state and attention context\n # see https://arxiv.org/abs/1508.04025v5\n\n # Softmax computation\n logits = tf.contrib.layers.fully_connected(\n attentional_vector,\n num_outputs=self.num_classes,\n activation_fn=None,\n weights_initializer=tf.truncated_normal_initializer(\n stddev=self.parameter_init),\n biases_initializer=tf.zeros_initializer(),\n scope=\"output_layer\")\n\n return attentional_vector, logits, attention_weights, context_vector","def decode(self, output_dict: Dict[str, torch.Tensor]) -> Dict[str, torch.Tensor]:\n\n def _decode_predictions(input_key: str, output_key: str, beam=False):\n if input_key in output_dict:\n if beam:\n all_predicted_tokens = [list(map(self._indices_to_tokens, beams)) \n for beams in sanitize(output_dict[input_key])]\n else:\n all_predicted_tokens = list(map(self._indices_to_tokens, sanitize(output_dict[input_key])))\n output_dict[output_key] = all_predicted_tokens\n\n _decode_predictions(\"predictions\", \"predicted_tokens\", beam=True)\n _decode_predictions(\"ctc_predictions\", \"ctc_predicted_tokens\")\n _decode_predictions(\"rnnt_predictions\", \"rnnt_predicted_tokens\")\n _decode_predictions(\"target_tokens\", \"targets\")\n\n return output_dict","def sequence_output_logits(self, decoded_outputs, num_units, vocab_size):\n # We need to get the sequence length for *this* batch, this will not be\n # equal for each batch since the decoder is dynamic. Meaning length is\n # equal to the longest sequence in the batch, not the max over data\n max_seq_len = tf.shape(decoded_outputs)[1]\n\n # Reshape to rank 2 tensor so timestep is no longer a dimension\n output = tf.reshape(decoded_outputs, [-1, num_units])\n\n # Get the logits\n logits = self.output_logits(output, num_units, vocab_size, \"seq_softmax\")\n\n # Reshape back to the original tensor shape\n logits = tf.reshape(logits, [-1, max_seq_len, vocab_size])\n return logits","def forward(self, input, hidden, give_gates=False, debug=False):\n\n emb = self.encoder(input)\n if emb.dim()<3:\n emb = emb.unsqueeze(0)\n\n if give_gates:\n output, hidden, extras = self.rnn(emb, hidden, give_gates)\n else:\n output, hidden = self.rnn(emb, hidden)\n\n # decoded = self.softmax(self.decoder(output))\n decoded = self.decoder(output)\n\n if give_gates:\n if debug:\n return decoded, hidden, extras, emb\n else:\n return decoded, hidden, extras\n else:\n if debug:\n return decoded, hidden, emb\n else:\n return decoded, hidden","def build_prediction_model(self):\n batch_size=self.image_embeddings.get_shape()[0]\n \n \n with tf.variable_scope(\"pred_layer_1\", initializer=self.initializer) as pred_scope_1:\n # We use a simple three layer fully-connected network for the actual prediction task\n # Hidden neuron sizes are randomly chosen \n \n first_pred = tf.contrib.layers.fully_connected(\n inputs=self.article_embeddings,\n num_outputs=32,\n activation_fn=tf.nn.relu,\n weights_initializer=self.initializer,\n scope=pred_scope_1)\n \n if self.mode == \"train\":\n # to avoid overfitting we use dropout for all fully connected layers\n first_pred = tf.nn.dropout(first_pred, self.config.dropout_keep_prob_classifier)\n ''' \n \n with tf.variable_scope(\"pred_layer_2\", initializer=self.initializer) as pred_scope_2:\n second_pred = tf.contrib.layers.fully_connected(\n inputs=first_pred,\n num_outputs=16,\n activation_fn=tf.nn.relu,\n weights_initializer=self.initializer,\n scope=pred_scope_2)\n \n if self.mode == \"train\":\n # to avoid overfitting we use dropout for all fully connected layers\n second_pred = tf.nn.dropout(second_pred, self.config.dropout_keep_prob_classifier)\n '''\n \n second_pred = first_pred\n \n ################################################\n # Predict Mutual Information\n ################################################\n \n with tf.variable_scope(\"predict_mi\", initializer=self.initializer) as mi_scope:\n mi_logits = None\n mi_prediction = None\n \n if self.config.mi_is_multiclass_problem:\n mi_logits = tf.contrib.layers.fully_connected(\n inputs=second_pred,\n num_outputs=self.config.num_mi_labels,\n activation_fn=tf.nn.relu, #None, # linear activation \n weights_initializer=self.initializer,\n scope=mi_scope)\n \n if self.mode != \"prediction\": \n # Compute loss\n mi_loss = 0.0\n \n # Do not punish all misclassifications equally.\n # Use the label distances defined in the config instead.\n if self.config.use_distance_aware_loss:\n # compute softmax to get probability of label l: p(l)\n mi_logits = tf.nn.softmax(mi_logits)\n \n # This part computes the Distance Aware loss function from section 4.4.1 of the thesis.\n # The loss allows to treat misclassifications differently based on a predefined\n # similarity metric defined between labels.\n # D is the symmetric matrix that defines the similarity d(l,t) for a label l\n # and a prediction t. The correct t is not known, only a softmax evidence for\n # all possible t. Therefore, we consider a whole column of D (corresponding to the correct\n # label l) and multiply this column by the softmax output to compute the loss value. \n D = tf.constant(self.config.mi_label_distances)\n indices = tf.expand_dims(self.mi_labels, 1)\n d = tf.gather_nd(D, indices) # contains the l-th column of D for each batch\n \n mi_loss = tf.reduce_sum(d*mi_logits) # d(l,t) * p(t) for all t\n \n # use cross entropy loss\n else:\n # Applies softmax to the unscaled inputs (logits) and then computes the soft-entropy loss: H(p,q) = - sum p(x) * log q(x)\n mi_losses = tf.nn.sparse_softmax_cross_entropy_with_logits(mi_logits, self.mi_labels) \n\n mi_loss = tf.reduce_mean(mi_losses, name=\"mi_loss\") \n \n tf.contrib.losses.add_loss(mi_loss)\n \n else:\n \n # Consider the task as a regression problem and reduce its quadratic loss\n mi_prediction = tf.contrib.layers.fully_connected(\n inputs=second_pred,\n num_outputs=1,\n activation_fn=None, # linear activation \n weights_initializer=self.initializer,\n scope=mi_scope)\n \n if self.mode != \"prediction\": \n mi_loss = tf.reduce_mean(tf.pow(mi_prediction - tf.to_float(self.mi_labels), 2))\n tf.contrib.losses.add_loss(mi_loss)\n \n ################################################\n # Predict Semantic Correlation\n ################################################\n \n with tf.variable_scope(\"predict_sc\", initializer=self.initializer) as sc_scope:\n sc_logits = None\n sc_prediction = None\n \n if self.config.sc_is_multiclass_problem:\n \n # Consider prediction of semantic correlation as a multiclass problem\n sc_logits = tf.contrib.layers.fully_connected(\n inputs=second_pred,\n num_outputs=self.config.num_sc_labels,\n activation_fn=tf.nn.relu, #None, # linear activation \n weights_initializer=self.initializer,\n scope=sc_scope)\n \n if self.mode != \"prediction\": \n # compute sc multiclass labels\n # scale to [0,1,2,3,4]\n multiclass_labels = tf.to_int64(self.sc_labels * 2 + 2)\n\n # Compute loss\n sc_loss = 0.0\n \n # Do not punish all misclassifications equally.\n # Use the label distances defined in the config instead.\n if self.config.use_distance_aware_loss:\n # compute softmax to get probability of label l: p(l)\n sc_logits = tf.nn.softmax(sc_logits)\n \n # see comment above for distance aware MI loss\n D = tf.constant(self.config.sc_label_distances)\n indices = tf.expand_dims(multiclass_labels, 1)\n d = tf.gather_nd(D, indices) # contains the l-th column of D for each batch\n \n sc_loss = tf.reduce_sum(d*sc_logits) # d(l,t) * p(t) for all t\n \n # use cross entropy loss\n else:\n # Applies softmax to the unscaled inputs (logits) and then computes the soft-entropy loss: H(p,q) = - sum p(x) * log q(x)\n sc_losses = tf.nn.sparse_softmax_cross_entropy_with_logits(sc_logits, multiclass_labels) \n\n sc_loss = tf.reduce_mean(sc_losses, name=\"sc_loss\") \n \n tf.contrib.losses.add_loss(sc_loss)\n \n else:\n \n # Consider the task as a regression problem and reduce its quadratic loss\n \n sc_prediction = tf.contrib.layers.fully_connected(\n inputs=second_pred,\n num_outputs=1,\n activation_fn=None, # linear activation \n weights_initializer=self.initializer,\n scope=sc_scope)\n \n if self.mode != \"prediction\":\n sc_loss = tf.reduce_mean(tf.pow(sc_prediction - self.sc_labels, 2))\n tf.contrib.losses.add_loss(sc_loss)\n\n \n if self.mode != \"prediction\":\n \n self.total_loss = tf.contrib.losses.get_total_loss() \n \n self.mi_loss = mi_loss # used in evaluation\n self.sc_loss = sc_loss # used in evaluation\n \n # Add summaries.\n tf.summary.scalar(\"mi_loss\", mi_loss)\n tf.summary.scalar(\"sc_loss\", sc_loss)\n tf.summary.scalar(\"total_loss\", self.total_loss)\n \n \n if self.config.mi_is_multiclass_problem:\n self.mi_logits = mi_logits # used in evaluation\n else:\n self.mi_logits = mi_prediction # used in evaluation\n \n if self.config.sc_is_multiclass_problem:\n self.sc_logits = sc_logits # used in evaluation\n else:\n self.sc_logits = sc_prediction # used in evaluation\n \n for var in tf.trainable_variables():\n #print(var)\n #print(var.name)\n tf.summary.histogram(var.op.name, var)","def step(self,\n session,\n encoder_inputs,\n decoder_inputs,\n targets,\n target_weights,\n encoder_sequence_length,\n decoder_sequence_length,\n input_keep_prob=1.0,\n output_keep_prob=1.0,\n mode='train'):\n input_feed = {}\n input_feed[self.encoder_inputs] = encoder_inputs\n input_feed[self.decoder_inputs] = decoder_inputs\n input_feed[self.targets] = targets\n input_feed[self.target_weights] = target_weights\n input_feed[self.encoder_sequence_length] = encoder_sequence_length\n input_feed[self.decoder_sequence_length] = decoder_sequence_length\n input_feed[self.input_keep_prob] = input_keep_prob\n input_feed[self.output_keep_prob] = output_keep_prob\n\n if mode == 'train':\n # training\n output_feed = [self.update, # SGD\n self.gradient_norm, # Gradient norm\n self.loss] # Loss for this batch\n outputs = session.run(output_feed, input_feed)\n # Gradient norm, loss, no outputs\n return outputs[1], outputs[2], None\n elif mode == 'test_rank':\n # testing as a ranker\n # TODO(ysu): separate mode for validation\n output_feed = [self.loss, # Loss for this batch\n self.outputs] # Output logits\n outputs = session.run(output_feed, input_feed)\n # No gradient norm, loss, outputs\n return None, outputs[0], outputs[1]\n elif mode == 'summarize':\n output_feed = self.merged_summary\n outputs = session.run(output_feed, input_feed)\n return outputs","def __init__(self, num_mels=80, num_freq=513, prenet_hidden_size=512, decoder_hidden_size=512,\n attention_dropout=0.1,\n layer_postprocess_dropout=0.1, prenet_activation_fn=None, conv_layers_num=4,\n mag_conv_layers_num=4, prenet_layers=2,\n prenet_dropout=0.5,\n prenet_use_inference_dropout=False,\n cnn_dropout_prob=0.1,\n bn_momentum=0.95,\n bn_epsilon=-1e8,\n reduction_factor=2,\n attention_layers=4,\n self_attention_conv_params=None,\n attention_heads=1,\n attention_cnn_dropout_prob=0.5,\n window_size=4,\n back_step_size=0, kernel_size=5, regularizer=None,\n force_layers=None, dtype=tf.float32, name=\"centaur_decoder\", is_prediction=False, is_training=False,\n is_validation=False):\n self.kernel_size = kernel_size\n\n if force_layers is None:\n force_layers = [1, 3]\n self.is_validation = is_validation\n self.is_prediction = is_prediction\n self.name = name\n self.is_training = is_training\n self.prenet = None\n self.linear_projection = None\n self.attentions = []\n self.output_normalization = None\n self.conv_layers = []\n self.mag_conv_layers = []\n self.conv_layers_num = conv_layers_num\n self.mag_conv_layers_num = mag_conv_layers_num\n self.stop_token_projection_layer = None\n self.mel_projection_layer = None\n self.mag_projection_layer = None\n self.regularizer = regularizer\n self.num_mels = num_mels\n self.num_freq = num_freq\n self.reduction_factor = reduction_factor\n self.prenet_layers = prenet_layers\n self.prenet_hidden_size = prenet_hidden_size\n self.prenet_activation_fn = prenet_activation_fn if prenet_activation_fn else tf.nn.relu\n self.prenet_use_inference_dropout = prenet_use_inference_dropout\n self.prenet_dropout = prenet_dropout\n self.cnn_dropout_prob = cnn_dropout_prob\n self.dtype = dtype\n self.bn_momentum = bn_momentum\n self.bn_epsilon = bn_epsilon\n self.decoder_hidden_size = decoder_hidden_size\n self.attention_layers = attention_layers\n self.force_layers = force_layers\n\n self.window_size = window_size\n self.attention_heads = attention_heads\n self.attention_dropout = attention_dropout\n self.layer_postprocess_dropout = layer_postprocess_dropout\n self.attention_cnn_dropout_prob = attention_cnn_dropout_prob\n self.back_step_size = back_step_size\n if self_attention_conv_params is None:\n self_attention_conv_params = {\n \"kernel_size\": [self.kernel_size],\n \"stride\": [1],\n \"num_channels\": self.decoder_hidden_size,\n \"padding\": \"VALID\",\n \"is_causal\": True,\n \"activation_fn\": tf.nn.relu\n }\n self.self_attention_conv_params = self_attention_conv_params","def forward(self, input, dec_hidden=None):\n ### YOUR CODE HERE for part 2b\n ### TODO - Implement the forward pass of the character decoder.\n # print(\"=====input.size\",input.size())\n char_embedded= self.decoderCharEmb(input)\n # print(\"=====char_embedded.size\",char_embedded.size())\n out, dec_hidden = self.charDecoder(char_embedded,dec_hidden)\n # print(\"=====out.size\",out.size()) #dimensions (seq_length, batch, hidden_size)\n \n out_batch_first = out.permute(1, 0, 2) #dimensions (seq_length, batch, hidden_size)\n o_proj = self.char_output_projection(out_batch_first)\n scores = o_proj.permute(1, 0, 2) #dimensions (seq_length, batch, hidden_size)\n return scores,dec_hidden\n ### END YOUR CODE ","def forward(self, input, dec_hidden=None):\n ### TODO - Implement the forward pass of the character decoder.\n\n # Get the embedding matrix of the given input\n char_embedding = self.decoderCharEmb(input)\n\n # Apply the LSTM to the input\n dec_state = self.charDecoder(char_embedding, dec_hidden)\n\n # Split the hidden states and cell states\n (dec_hiddens, dec_hidden) = dec_state\n\n # Apply the output projection to get the scores\n scores = self.char_output_projection(dec_hiddens)\n\n # Return the scores and dec_state (afte rthe LSTM)\n return (scores, dec_hidden)","def decode(self, passage_vectors, question_vectors, init_with_question=True):\n with tf.variable_scope('pn_decoder'):\n fake_inputs = tf.zeros(\n [tf.shape(passage_vectors)[0], 2, 1]) # not used\n sequence_len = tf.tile([2], [tf.shape(passage_vectors)[0]])\n if init_with_question:\n random_attn_vector = tf.Variable(tf.random_normal([1, self.hidden_size]),\n trainable=True, name=\"random_attn_vector\")\n pooled_question_rep = tc.layers.fully_connected(\n attend_pooling(question_vectors,\n random_attn_vector, self.hidden_size),\n num_outputs=self.hidden_size, activation_fn=None\n )\n init_state = tc.rnn.LSTMStateTuple(\n pooled_question_rep, pooled_question_rep)\n else:\n init_state = None\n with tf.variable_scope('fw'):\n fw_cell = PointerNetLSTMCell(self.hidden_size, passage_vectors)\n fw_outputs, _ = custom_dynamic_rnn(\n fw_cell, fake_inputs, sequence_len, init_state)\n with tf.variable_scope('bw'):\n bw_cell = PointerNetLSTMCell(self.hidden_size, passage_vectors)\n bw_outputs, _ = custom_dynamic_rnn(\n bw_cell, fake_inputs, sequence_len, init_state)\n start_prob = (fw_outputs[0:, 0, 0:] + bw_outputs[0:, 1, 0:]) / 2\n end_prob = (fw_outputs[0:, 1, 0:] + bw_outputs[0:, 0, 0:]) / 2\n return start_prob, end_prob","def decode(self):\n decoder_input = Input(shape=self.input_decoder_shape, batch_shape=self.input_batch_decoder_shape)\n ppg_input = Input(shape=self.input_ppg_shape, batch_shape=self.input_batch_ppg_shape)\n\n if self.hparams.Masking is True:\n mask_decoder_input = Masking(mask_value=0)(decoder_input)\n mask_ppg_input = Masking(mask_value=0)(ppg_input)\n prenet_output = self.PreNet(mask_decoder_input)\n encoder_input = self.Encoder(mask_ppg_input)\n decoder_mask = None\n else:\n decoder_mask = Masking(mask_value=0).compute_mask(ppg_input)\n prenet_output = self.PreNet(decoder_input)\n encoder_input = self.Encoder(ppg_input, decoder_mask)\n\n rnn_output = Concatenate(axis=-1)([prenet_output, encoder_input])\n # mask = Input(shape=(self.hparams.PreNet_hidden_size + self.hparams.Tacotron_encoder_hidden_size))\n # diff_mask = Input(shape=(self.hparams.PreNet_hidden_size + self.hparams.Tacotron_encoder_hidden_size))\n for i in range(self.hparams.Tacotron_decoder_layers):\n rnn_output = self.Decoder_LSTM[i](rnn_output, mask=decoder_mask)\n\n # feed by self.states is unhelpful in training, since we don't stop rnn during epochs\n # but it is important in generating since each fit states will be set to zeros.!!!!!!\n rnn_output = Concatenate(axis=-1)([rnn_output, encoder_input])\n decoder_output = self.Linear_projection(rnn_output)\n if self.hparams.Tacotron_postnet is True:\n residual_output = decoder_output\n for i in range(self.hparams.PostNet_layers):\n residual_output = self.PostNet_Conv1D[i](residual_output)\n residual_output = self.PostNet_BatchNorm[i](residual_output)\n residual_output = self.PostNet_dropout_list[i](residual_output)\n decoder_output = Add()([decoder_output, residual_output])\n return Model(inputs=[decoder_input, ppg_input], outputs=decoder_output)","def _build_decoder(self, hparams, inputs, initial_state, is_training):\n ## Decoder.\n with tf.variable_scope(\"trajectory_decoder\"):\n if hparams.decoder_type == \"fc\":\n regression = self._build_fc_decoder(hparams, inputs, is_training)\n final_states = None\n \n elif hparams.decoder_type == \"rnn\":\n list_dummy_input = []\n with tf.name_scope(\"dummy_input\"):\n for gpu_idx in range(self.num_gpu):\n with tf.device(tf.DeviceSpec(device_type=\"GPU\", device_index=gpu_idx)), tf.name_scope(\"tower_{:d}\".format(gpu_idx)):\n list_dummy_input.append(tf.zeros(tf.stack([self.target_length, self.batch_size[gpu_idx], 1])))\n \n with tf.variable_scope(\"rnn\"):\n if hparams.encoder_type == \"cnn\":\n with tf.variable_scope(\"rnn_initial_state\"):\n initial_state = self._make_initial_states(hparams, inputs)\n\n net, final_states = self._build_rnn_decoder(hparams, list_dummy_input, initial_state, is_training)\n\n with tf.name_scope(\"time_batch_transpose\"):\n net = list_ops.list_transpose(net, perm=[1, 0, 2])\n \n with tf.variable_scope(\"projection\"):\n regression = self._build_output_projection(hparams, net, is_training)\n\n else:\n raise ValueError(\"Unknown decoder type {:s}.\".format(hparams.decoder_type))\n\n return regression, final_states","def _inference_initial_state(self, encoder_outputs, encoder_decoder_attention_bias):\n\n with tf.variable_scope(\"inference_initial_state\"):\n n_layers = self.attention_layers\n n_heads = self.attention_heads\n batch_size = tf.shape(encoder_outputs)[0]\n n_features = self.num_mels + self.num_freq\n\n state = {\n \"iteration\": tf.constant(0),\n \"inputs\": tf.zeros([batch_size, 1, n_features * self.reduction_factor]),\n \"finished\": tf.cast(tf.zeros([batch_size]), tf.bool),\n \"alignment_positions\": tf.zeros([n_layers, batch_size, n_heads, 1],\n dtype=tf.int32),\n \"outputs\": {\n \"spec\": tf.zeros([batch_size, 0, self.num_mels * self.reduction_factor]),\n \"post_net_spec\": tf.zeros([batch_size, 0, self.num_mels * self.reduction_factor]),\n \"alignments\": [\n tf.zeros([0, 0, 0, 0, 0])\n ],\n \"stop_token_logits\": tf.zeros([batch_size, 0, 1 * self.reduction_factor]),\n \"lengths\": tf.zeros([batch_size], dtype=tf.int32),\n \"mag_spec\": tf.zeros([batch_size, 0, self.num_freq * self.reduction_factor])\n },\n \"encoder_outputs\": encoder_outputs,\n \"encoder_decoder_attention_bias\": encoder_decoder_attention_bias\n }\n\n state_shape_invariants = {\n \"iteration\": tf.TensorShape([]),\n \"inputs\": tf.TensorShape([None, None, n_features * self.reduction_factor]),\n \"finished\": tf.TensorShape([None]),\n \"alignment_positions\": tf.TensorShape([n_layers, None, n_heads, None]),\n \"outputs\": {\n \"spec\": tf.TensorShape([None, None, self.num_mels * self.reduction_factor]),\n \"post_net_spec\": tf.TensorShape([None, None, self.num_mels * self.reduction_factor]),\n \"alignments\": [\n tf.TensorShape([None, None, None, None, None]),\n ],\n \"stop_token_logits\": tf.TensorShape([None, None, 1 * self.reduction_factor]),\n \"lengths\": tf.TensorShape([None]),\n \"mag_spec\": tf.TensorShape([None, None, None])\n },\n \"encoder_outputs\": encoder_outputs.shape,\n \"encoder_decoder_attention_bias\": encoder_decoder_attention_bias.shape\n }\n\n return state, state_shape_invariants","def make_decoder(latent_tensor,\n output_shape,\n is_training=True,\n decoder_fn=gin.REQUIRED):\n with tf.variable_scope(\"decoder\"):\n return decoder_fn(\n latent_tensor=latent_tensor,\n output_shape=output_shape,\n is_training=is_training)","def build_decoder(opt, embeddings):\n dec_type = \"ifrnn\" if opt.decoder_type == \"rnn\" and opt.input_feed \\\n else opt.decoder_type\n return str2dec[dec_type].from_opt(opt, embeddings)","def _DecodeFn():\n _, decode_dict = self._model.ConstructDecodeGraph(\n input_batch=inp_instance.TpuDequeueBatch())\n self.decode_nm = py_utils.NestedMap(decode_dict)\n return self.decode_nm.Flatten()","def predict_step(params,\n inputs,\n outputs,\n cache,\n eos_token,\n max_decode_len,\n beam_size,\n config):\n # Prepare transformer fast-decoder call for beam search: for beam search, we\n # need to set up our decoder model to handle a batch size equal to\n # batch_size * beam_size, where each batch item's data is expanded in-place\n # rather than tiled.\n flat_encoded = decode.flat_batch_beam_expand(\n models.ProgramTransformer(config).apply(\n {'params': params},\n inputs,\n outputs,\n method=models.ProgramTransformer.encode),\n beam_size)\n\n encoded_padding_mask = jnp.where(outputs > 0, 1, 0).astype(jnp.float32)\n flat_encoded_padding_mask = decode.flat_batch_beam_expand(\n encoded_padding_mask, beam_size)\n\n def tokens_ids_to_logits(flat_ids, flat_cache):\n \"\"\"Token slice to logits from decoder model.\"\"\"\n # --> [batch * beam, 1, vocab]\n flat_logits, new_vars = models.ProgramTransformer(config).apply(\n {'params': params, 'cache': flat_cache},\n flat_ids,\n flat_encoded,\n flat_encoded_padding_mask,\n mutable=['cache'],\n method=models.ProgramTransformer.decode)\n new_flat_cache = new_vars['cache']\n # Remove singleton sequence-length dimension:\n # [batch * beam, 1, vocab] --> [batch * beam, vocab]\n flat_logits = flat_logits.squeeze(axis=1)\n return flat_logits, new_flat_cache\n\n # Using the above-defined single-step decoder function, run a\n # beam search over possible sequences given input encoding.\n beam_seqs, _ = decode.beam_search(\n inputs,\n cache,\n tokens_ids_to_logits,\n beam_size=beam_size,\n alpha=0.6,\n bos_token=config.bos_token,\n eos_token=eos_token,\n max_decode_len=max_decode_len)\n\n # Beam search returns [n_batch, n_beam, n_length] with beam dimension\n # sorted in increasing order of log-probability.\n return beam_seqs","def decode_one(*args, **kwargs):\n decoded_batch, out = decode_batch(*args, **kwargs)\n decoded_doc = decoded_batch[0]\n if out.enc_attn_weights is not None:\n out.enc_attn_weights = out.enc_attn_weights[:len(decoded_doc), 0, :]\n if out.ptr_probs is not None:\n out.ptr_probs = out.ptr_probs[:len(decoded_doc), 0]\n return decoded_doc, out","def _add_input_decoder(self, inputs, seq_len, enc_fw, enc_bw):\n with tf.variable_scope(\"decoder\"):\n cell_fw = tf.contrib.rnn.LSTMCell(config.hidden_dim, initializer=self.rand_unif_init, state_is_tuple=True)\n cell_bw = tf.contrib.rnn.LSTMCell(config.hidden_dim, initializer=self.rand_unif_init, state_is_tuple=True)\n ((fw_states, bw_states), (final_fw, final_bw)) = tf.nn.bidirectional_dynamic_rnn(cell_fw, cell_bw, inputs, dtype=tf.float32, sequence_length=seq_len, swap_memory=True, initial_state_fw=enc_fw, initial_state_bw=enc_bw)\n\n return fw_states, bw_states","def step(self, session, encoder_inputs, decoder_inputs, target_weights,\n bucket_id, forward_only):\n # Check if the sizes match.\n encoder_size, decoder_size = self.buckets[bucket_id]\n if len(encoder_inputs) != encoder_size:\n raise ValueError(\"Encoder length must be equal to the one in bucket,\"\n \" %d != %d.\" % (len(encoder_inputs), encoder_size))\n if len(decoder_inputs) != decoder_size:\n raise ValueError(\"Decoder length must be equal to the one in bucket,\"\n \" %d != %d.\" % (len(decoder_inputs), decoder_size))\n if len(target_weights) != decoder_size:\n raise ValueError(\"Weights length must be equal to the one in bucket,\"\n \" %d != %d.\" % (len(target_weights), decoder_size))\n # print('in model.step()')\n # print('a',bucket_id, encoder_size, decoder_size)\n\n # Input feed: encoder inputs, decoder inputs, target_weights, as provided.\n input_feed = {}\n for l in xrange(encoder_size):\n input_feed[self.encoder_inputs[l].name] = encoder_inputs[l]\n for l in xrange(decoder_size):\n input_feed[self.decoder_inputs[l].name] = decoder_inputs[l]\n input_feed[self.target_weights[l].name] = target_weights[l]\n # print(self.encoder_inputs[l].name)\n # print(self.decoder_inputs[l].name)\n # print(self.target_weights[l].name)\n\n # Since our targets are decoder inputs shifted by one, we need one more.\n last_target = self.decoder_inputs[decoder_size].name\n input_feed[last_target] = np.zeros([self.batch_size], dtype=np.int32)\n # print('last_target', last_target)\n\n # Output feed: depends on whether we do a backward step or not.\n if not forward_only:\n output_feed = [self.updates[bucket_id], # Update Op that does SGD.\n self.gradient_norms[bucket_id], # Gradient norm.\n self.losses[bucket_id]] # Loss for this batch.\n else:\n output_feed = [self.losses[bucket_id]] # Loss for this batch.\n for l in xrange(decoder_size): # Output logits.\n output_feed.append(self.outputs[bucket_id][l])\n\n outputs = session.run(output_feed, input_feed)\n if not forward_only:\n return outputs[1], outputs[2], None # Gradient norm, loss, no outputs.\n else:\n return None, outputs[0], outputs[1:] # No gradient norm, loss, outputs.","def conv_decoder(encoder_output):\n namescope = 'conv_decoder'\n with tf.variable_scope(namescope):\n net = tf.layers.conv2d(encoder_output,\n filters=256,\n kernel_size=(1, 1),\n padding='same',\n kernel_initializer=tf.contrib.layers.xavier_initializer(),\n kernel_regularizer=tf.contrib.layers.l2_regularizer(scale=0.0005),\n activation=tf.nn.elu)\n net = tf.layers.conv2d(net,\n filters=C,\n kernel_size=(1, 1),\n padding='same',\n kernel_initializer=tf.contrib.layers.xavier_initializer(),\n kernel_regularizer=tf.contrib.layers.l2_regularizer(scale=0.0005),\n activation=None)\n return net","def forward(self, combiner_outputs: Dict[str, torch.Tensor], target: torch.Tensor) ->torch.Tensor:\n decoder_hidden, decoder_cell_state = get_lstm_init_state(combiner_outputs, self.reduce_sequence, self.num_layers)\n batch_size = decoder_hidden.size()[1]\n decoder_input = self.decoder_input.repeat(batch_size)\n logits = self.logits.unsqueeze(0).repeat(batch_size, 1, 1)\n for di in range(self.max_sequence_length):\n decoder_output, decoder_hidden, decoder_cell_state = self.lstm_decoder(decoder_input, decoder_hidden, decoder_cell_state)\n logits[:, di, :] = decoder_output.squeeze(1)\n if target is None:\n _, topi = decoder_output.topk(1)\n decoder_input = topi.squeeze(1).squeeze(1).detach()\n else:\n decoder_input = target[:, di]\n return logits","def output_logits(self, decoded_outputs, num_units, vocab_size, scope):\n with tf.variable_scope(scope):\n w = tf.get_variable(\"weights\", [num_units, vocab_size],\n dtype=self.floatX, initializer=glorot())\n b = tf.get_variable(\"biases\", [vocab_size],\n dtype=self.floatX, initializer=tf.constant_initializer(0.0))\n\n logits = tf.matmul(decoded_outputs, w) + b\n return logits","def decode(conv_output, i=0):\n\n conv_shape = tf.shape(conv_output)\n batch_size = conv_shape[0]\n output_size = conv_shape[1]\n\n conv_output = tf.reshape(conv_output, (batch_size, output_size, output_size, 3, 5 + NUM_CLASS))\n\n conv_raw_dxdy = conv_output[:, :, :, :, 0:2]\n conv_raw_dwdh = conv_output[:, :, :, :, 2:4]\n conv_raw_conf = conv_output[:, :, :, :, 4:5]\n conv_raw_prob = conv_output[:, :, :, :, 5: ]\n\n y = tf.tile(tf.range(output_size, dtype=tf.int32)[:, tf.newaxis], [1, output_size])\n x = tf.tile(tf.range(output_size, dtype=tf.int32)[tf.newaxis, :], [output_size, 1])\n\n xy_grid = tf.concat([x[:, :, tf.newaxis], y[:, :, tf.newaxis]], axis=-1)\n xy_grid = tf.tile(xy_grid[tf.newaxis, :, :, tf.newaxis, :], [batch_size, 1, 1, 3, 1])\n xy_grid = tf.cast(xy_grid, tf.float32)\n\n pred_xy = (tf.sigmoid(conv_raw_dxdy) + xy_grid) * STRIDES[i]\n pred_wh = (tf.exp(conv_raw_dwdh) * ANCHORS[i]) * STRIDES[i]\n pred_xywh = tf.concat([pred_xy, pred_wh], axis=-1)\n\n pred_conf = tf.sigmoid(conv_raw_conf)\n pred_prob = tf.sigmoid(conv_raw_prob)\n\n return tf.concat([pred_xywh, pred_conf, pred_prob], axis=-1)","def _define_decoder(self):\n self.decoder = nn.Sequential(nn.Linear(self.encoding_shape, 256), # B, 256\n View((-1, 256, 1, 1)), # B, 256, 1, 1\n nn.SELU(),\n nn.ConvTranspose2d(256, 64, 4), # B, 64, 4, 4\n nn.SELU(),\n nn.ConvTranspose2d(64, 64, 4, 2, 1), # B, 64, 8, 8\n nn.SELU(),\n nn.ConvTranspose2d(64, 32, 4, 2, 1), # B, 32, 16, 16\n nn.SELU(),\n nn.ConvTranspose2d(32, 32, 4, 2, 1), # B, 32, 32, 32\n nn.SELU(),\n nn.ConvTranspose2d(32, 3, 4, 2, 1), # B, nc, 64, 64\n nn.ReLU()\n )","def forward(self,\n input,\n hidden):\n embedded = self.embedding(input.squeeze())[:, None, :]\n semantics = self.semantic_embedding(input.squeeze())\n output = embedded\n output, hidden = self.rnn(output, hidden)\n return output, hidden, semantics","def beamSearchDecoder(self, enc_states, hidden, test=False, sentence=None, st=\"\", ed=\"\", k=3):\n\t\tbatch_size = enc_states.shape[0]\n\t\thidden = F.tanh(self.init_decoder_hidden(hidden[1])).view(1, batch_size, self.hid_dim)\n\t\tif test:\n\t\t\tbeams = [Beam(k, self.vocab, hidden[:,i,:], self.device) for i in range(batch_size)]\n\n\t\t\tfor i in range(self.max_trg_len):\n\t\t\t\tfor j in range(batch_size):\n\t\t\t\t\tlogits, hidden = self.decoderStep(enc_states[j].view(1, -1, self.hid_dim).expand(k, -1, -1),\n\t\t\t\t\t\t\t\t\t\t\t\t\t beams[j].get_hidden_state(),\n\t\t\t\t\t\t\t\t\t\t\t\t\t beams[j].get_current_word())\n\t\t\t\t\tlogLikelihood = torch.log(F.softmax(logits, dim=-1))\n\t\t\t\t\tbeams[j].advance(logLikelihood, hidden)\n\n\t\t\tallHyp, allScores = [], []\n\t\t\tn_best = 1\n\t\t\tfor b in range(batch_size):\n\t\t\t\tscores, ks = beams[b].sort_best()\n\n\t\t\t\tallScores += [scores[:n_best]]\n\t\t\t\thyps = [beams[b].get_hyp(k) for k in ks[:n_best]]\n\t\t\t\tallHyp.append(hyps)\n\n\t\t\treturn allHyp\n\t\t\t# return sentences\n\t\telse:\n\t\t\tmax_seq_len = sentence.shape[1]\n\t\t\tlogits = torch.zeros(batch_size, max_seq_len - 1, self.vocab_size, device=self.device)\n\t\t\tfor i in range(max_seq_len - 1):\n\t\t\t\t# logit: [batch, 1, vocab_size]\n\t\t\t\tlogit, hidden = self.decoderStep(enc_states, hidden, sentence[:, i])\n\t\t\t\tlogits[:, i, :] = logit.squeeze()\n\t\t\treturn logits","def encode_decode_TD(self, n_step, idx_list_previous, log_probs_previous, entropies_previous, mask_previous, query1_previous, query2_previous, query3_previous, idx_ ):\r\n\r\n actor_embedding = embed_seq(input_seq=self.input_, from_=self.dimension, to_= self.input_embed, is_training=self.is_training, BN=True, initializer=self.initializer)\r\n actor_encoding = encode_seq(input_seq=actor_embedding, input_dim=self.input_embed, num_stacks=self.num_stacks, num_heads=self.num_heads, num_neurons=self.num_neurons, is_training=self.is_training)\r\n \r\n if self.is_training == False:\r\n actor_encoding = tf.tile(actor_encoding,[self.batch_size,1,1])\r\n \r\n idx_list = copy(idx_list_previous)\r\n log_probs = copy(log_probs_previous)\r\n entropies = copy(entropies_previous)\r\n \r\n\r\n mask = copy(mask_previous)\r\n \r\n n_hidden = actor_encoding.get_shape().as_list()[2] # input_embed\r\n W_ref = tf.get_variable(\"W_ref\",[1, n_hidden, self.num_units],initializer=self.initializer)\r\n W_q = tf.get_variable(\"W_q\",[self.query_dim, self.num_units],initializer=self.initializer)\r\n v = tf.get_variable(\"v\",[self.num_units],initializer=self.initializer)\r\n \r\n encoded_ref = tf.nn.conv1d(actor_encoding, W_ref, 1, \"VALID\") # actor_encoding is the ref for actions [Batch size, seq_length, n_hidden]\r\n \r\n query1 = copy( query1_previous)\r\n query2 = copy( query2_previous)\r\n query3 = copy( query3_previous)\r\n idx_copy = copy(idx_)\r\n \r\n W_1 =tf.get_variable(\"W_1\",[n_hidden, self.query_dim],initializer=self.initializer) # update trajectory (state)\r\n W_2 =tf.get_variable(\"W_2\",[n_hidden, self.query_dim],initializer=self.initializer)\r\n W_3 =tf.get_variable(\"W_3\",[n_hidden, self.query_dim],initializer=self.initializer)\r\n \r\n \r\n \"\"\"\r\n # sample from POINTER from the perspective of the Actor\r\n \"\"\"\r\n for step in range(n_step + 1 ): \r\n query = tf.nn.relu(tf.matmul(query1, W_1) + tf.matmul(query2, W_2) + tf.matmul(query3, W_3))\r\n logits = pointer(encoded_ref=encoded_ref, query=query, mask=mask, W_ref=W_ref, W_q=W_q, v=v, C=config.C, temperature=config.temperature)\r\n prob = distr.Categorical(logits) # logits = masked_scores\r\n idx = prob.sample()\r\n\r\n idx_list.append(idx) # tour index\r\n idx_list_previous.append(idx)\r\n \r\n log_probs.append(prob.log_prob(idx)) # log prob\r\n log_probs_previous.append(prob.log_prob(idx))\r\n \r\n entropies.append(prob.entropy()) # entropies\r\n entropies_previous.append(prob.entropy())\r\n \r\n mask = mask + tf.one_hot(idx, self.max_length) # mask\r\n mask_previous = mask_previous + tf.one_hot(idx, self.max_length)\r\n\r\n idx_copy = tf.stack([tf.range(self.batch_size,dtype=tf.int32), idx],1) # idx with batch \r\n idx_ = tf.stack([tf.range(self.batch_size,dtype=tf.int32), idx],1) # idx with batch \r\n query3 = query2\r\n query2 = query1\r\n query1 = tf.gather_nd(actor_encoding, idx_) # update trajectory (state)\r\n \r\n query3_previous = query2_previous\r\n query2_previous = query1_previous\r\n query1_previous = tf.gather_nd(actor_encoding, idx_) # update trajectory (state) \r\n\r\n if (len(idx_list) >= self.max_length): break #leave the loop if reach the end of the episode\r\n\r\n \"\"\"\r\n # sample from POINTER from the perspective of the Critic\r\n make q_t vector = 0\r\n \"\"\"\r\n while(len(idx_list) < self.max_length): \r\n \r\n logits = pointer_critic(encoded_ref=encoded_ref, mask=mask, W_ref=W_ref, v=v, C=config.C, temperature=config.temperature)\r\n prob = distr.Categorical(logits) # logits = masked_scores\r\n idx = prob.sample()\r\n\r\n idx_list.append(idx) # tour index\r\n log_probs.append(prob.log_prob(idx)) # log prob\r\n entropies.append(prob.entropy()) # entropies\r\n mask = mask + tf.one_hot(idx, self.max_length) # mask\r\n\r\n idx_copy = tf.stack([tf.range(self.batch_size,dtype=tf.int32), idx],1) # idx with batch \r\n #idx_ = tf.stack([tf.range(self.batch_size,dtype=tf.int32), idx],1) # idx with batch \r\n query3 = query2\r\n query2 = query1\r\n query1 = tf.gather_nd(actor_encoding, idx_copy) # update trajectory (state)\r\n \r\n idx_list.append(idx_list[0]) # return to start\r\n self.tour =tf.stack(idx_list, axis=1) # permutations\r\n self.log_prob = tf.add_n(log_probs) # corresponding log-probability for backprop\r\n self.entropies = tf.add_n(entropies)\r\n tf.summary.scalar('log_prob_mean', tf.reduce_mean(self.log_prob))\r\n tf.summary.scalar('entropies_mean', tf.reduce_mean(self.entropies))\r\n \r\n return idx_list_previous, log_probs_previous, entropies_previous, mask_previous, query1_previous, query2_previous, query3_previous, idx_ #returns variables necessary for the next loop\r","def decode(self, output_dict: Dict[str, torch.Tensor]) -> Dict[str, torch.Tensor]:\n class_probabilities = F.softmax(output_dict['logits'], dim=-1) # softmax over the rows, dim=0 softmaxes over the columns\n output_dict['class_probabilities'] = class_probabilities\n\n predictions = class_probabilities.cpu().data.numpy()\n argmax_indices = numpy.argmax(predictions, axis=-1)\n labels = [self.vocab.get_token_from_index(x, namespace=\"labels\")\n for x in argmax_indices]\n output_dict['label'] = labels\n return output_dict","def translate(\n self, x: tf.Tensor, encoder_inputs: TextEncoder, encoder_targets: TextEncoder\n ) -> tf.Tensor:\n batch_size = x.shape[0]\n max_seq_length = tf.reduce_max(\n base.translation_max_seq_lenght(x, encoder_inputs)\n )\n\n encoder_hidden = self.encoder.initialize_hidden_state(batch_size)\n encoder_output, encoder_hidden = self.encoder(x, encoder_hidden, False)\n decoder_hidden = encoder_hidden\n\n # The first words of each sentence in the batch is the start of sample token.\n words = (\n tf.zeros([batch_size, 1], dtype=tf.int64)\n + encoder_targets.start_of_sample_index\n )\n last_words = words\n\n has_finish_predicting = False\n reach_max_seq_lenght = False\n\n while not (has_finish_predicting or reach_max_seq_lenght):\n # Call the decoder and update the decoder hidden state\n decoder_output, decoder_hidden, _ = self.decoder(\n last_words, decoder_hidden, encoder_output, False\n )\n last_words = tf.expand_dims(decoder_output, 1)\n last_words = tf.math.argmax(last_words, axis=2)\n\n logger.debug(f\"New word {last_words}.\")\n\n # Append the newly predicted words into words.\n words = tf.concat([words, last_words], 1)\n\n # Compute the end condition of the while loop.\n end_of_sample = (\n np.zeros([batch_size, 1], dtype=np.int64)\n + encoder_targets.end_of_sample_index\n )\n has_finish_predicting = np.array_equal(last_words.numpy(), end_of_sample)\n reach_max_seq_lenght = words.shape[1] >= max_seq_length\n\n logger.debug(f\"Has finish predicting {has_finish_predicting}.\")\n logger.debug(f\"Has reach max sequence length {reach_max_seq_lenght}.\")\n\n return words","def forward(self, input_var: Tensor, hidden: Optional[HiddenDict] = None, **additional: Dict) -> RecurrentOutput:\n\n if hidden is None:\n batch_size = input_var.shape[0]\n hidden = {l: self.init_hidden(batch_size, self.device) for l in range(self.num_layers)}\n\n embed = self.embeddings(input_var) # batch_size x seq_len x embedding_dim+\n embed = self.dropout_layer(embed)\n\n input_ = embed.squeeze(1)\n for l in range(self.num_layers):\n new_hidden = self.forward_step(l, hidden[l], input_)\n input_ = new_hidden[0] # New hidden state becomes input for next layer\n hidden[l] = new_hidden # Store for next step\n\n out = self.dropout_layer(input_)\n output = self.predict_distribution(out)\n\n return output, hidden","def online_inference(self, sess, picture_ids, in_pictures, image_f_inputs,\n stop_word='', c_v=None):\n # get stop word index from dictionary\n stop_word_idx = self.data_dict.word2idx['']\n cap_list = [None] * in_pictures.shape[0]\n with tf.variable_scope(\"decoder\", reuse=tf.AUTO_REUSE):\n _, states = self.decoder(gen_mode=True)\n init_state, out_state, sample = states\n cap_raw = []\n for i in range(len(in_pictures)):\n state = None\n cap_list[i] = {'image_id': picture_ids[i], 'caption': ' '}\n sentence = ['']\n cur_it = 0\n gen_word_idx = 0\n cap_raw.append([])\n while (cur_it < self.params.gen_max_len):\n input_seq = [self.data_dict.word2idx[word] for word in sentence]\n feed = {self.captions: np.array(input_seq)[-1].reshape([1, 1]),\n self.lengths: [len(input_seq)],\n image_f_inputs: np.expand_dims(in_pictures[i], 0)}\n if self.c_i is not None:\n feed.update({self.c_i_ph: np.expand_dims(c_v[i], 0)})\n # for the first decoder step, the state is None\n if state is not None:\n feed.update({init_state: state})\n next_word_probs, state = sess.run([sample, out_state],\n feed)\n if self.params.sample_gen == 'greedy':\n next_word_probs = next_word_probs.ravel()\n t = self.params.temperature\n next_word_probs = next_word_probs**(\n 1/t) / np.sum(next_word_probs**(1/t))\n gen_word_idx = np.argmax(next_word_probs)\n elif self.params.sample_gen == 'sample':\n gen_word_idx = next_word_probs\n gen_word = self.data_dict.idx2word[gen_word_idx]\n sentence += [gen_word]\n cap_raw[i].append(gen_word_idx)\n cur_it += 1\n if gen_word_idx == stop_word_idx:\n break\n cap_list[i]['caption'] = ' '.join([word for word in sentence\n if word not in ['', '']])\n # print(cap_list[i]['caption'])\n return cap_list, cap_raw","def _define_decoder(self):\n self.decoder = nn.Sequential(nn.Linear(self.encoding_shape, 512, bias=False), nn.SELU(),\n nn.BatchNorm1d(512),\n nn.Linear(512, 2560, bias=False), nn.SELU(),\n nn.BatchNorm1d(2560),\n nn.Linear(2560, 5120, bias=False), nn.SELU(),\n nn.BatchNorm1d(5120),\n nn.Linear(5120, 64*64*3, bias=False), nn.ReLU(),\n View((-1, 3, 64, 64)),\n )","def deconv_decoder(latent_tensor, output_shape, is_training=True):\n del is_training\n d1 = tf.layers.dense(latent_tensor, 256, activation=tf.nn.relu)\n d2 = tf.layers.dense(d1, 1024, activation=tf.nn.relu)\n d2_reshaped = tf.reshape(d2, shape=[-1, 4, 4, 64])\n d3 = tf.layers.conv2d_transpose(\n inputs=d2_reshaped,\n filters=64,\n kernel_size=4,\n strides=2,\n activation=tf.nn.relu,\n padding=\"same\",\n )\n\n d4 = tf.layers.conv2d_transpose(\n inputs=d3,\n filters=32,\n kernel_size=4,\n strides=2,\n activation=tf.nn.relu,\n padding=\"same\",\n )\n\n d5 = tf.layers.conv2d_transpose(\n inputs=d4,\n filters=32,\n kernel_size=4,\n strides=2,\n activation=tf.nn.relu,\n padding=\"same\",\n )\n d6 = tf.layers.conv2d_transpose(\n inputs=d5,\n filters=output_shape[2],\n kernel_size=4,\n strides=2,\n padding=\"same\",\n )\n return tf.reshape(d6, [-1] + output_shape)","def loglik():\n enc_in_ = tf.one_hot(tf.gather(enc_in,batch_idx),S)\n dec_in_ = tf.one_hot(tf.gather(dec_in,batch_idx),S)\n dec_out_ = tf.one_hot(tf.gather(dec_out,batch_idx),S)\n \"\"\"mask for decoder input\"\"\"\n dec_mask = tf.expand_dims(tf.expand_dims(tf.reduce_sum(dec_in_,-1),-1),1)\n \"\"\"read input with encoder\"\"\"\n h_enc = BiLSTM(enc_in_,LSTM_encoder_params['enc_fwd'],LSTM_encoder_params['enc_bkwd'])\n h_enc_last_state = h_enc[:,-1,:]\n \"\"\"generate log probability of component membership from last LSTM state\"\"\"\n log_p_z = tf.nn.log_softmax(tf.einsum('nd,dk->nk',h_enc_last_state,params['W']),-1)\n \"\"\"concatenate K component-level embeddings to decoder input\"\"\"\n group_embedding = tf.tile(tf.expand_dims(tf.expand_dims(params['U'],1),0),[tf.shape(batch_idx)[0],1,T-1,1])\n dec_in_embedded = tf.concat([group_embedding*dec_mask,tf.tile(tf.expand_dims(dec_in_,1),[1,K,1,1])],-1)\n \"\"\"read decoder input into K-dimensional decoder\"\"\"\n h_dec = MultiLSTM(dec_in_embedded,LSTM_decoder_params['dec_fwd'])\n log_p_output = tf.nn.log_softmax(tf.einsum('nktd,ds->nkts',h_dec,params['V']),-1)\n \"\"\"compute loss for decoder output under all K components\"\"\"\n llik_z = tf.reduce_sum(tf.expand_dims(log_p_z,-1) + tf.reduce_sum(log_p_output*tf.expand_dims(dec_out_,1),-1),-1)\n \"\"\"marginalize out discrete parameter with log sum exp\"\"\"\n lliks = tf.reduce_logsumexp(llik_z,-1)\n return(tf.reduce_sum(lliks))","def build_model(options,worddicts):\n opt_ret=dict()\n params=dict()\n word_xr1_mask=tf.reverse(word_x1_mask,[1])\n word_xr2_mask = tf.reverse(word_x2_mask, [1])\n\n\n\n #embedding layer\n word_embedding = norm_weight(options['n_words'], options['dim_word'])\n if options['embedding']:\n with open(options['embedding'], 'r',encoding='iso-8859-1') as f:\n for line in f:\n temp=line.split()\n word=temp[0]\n vector=temp[1:]\n if word in worddicts and worddicts[word]