{ // 获取包含Hugging Face文本的span元素 const spans = link.querySelectorAll('span.whitespace-nowrap, span.hidden.whitespace-nowrap'); spans.forEach(span => { if (span.textContent && span.textContent.trim().match(/Hugging\s*Face/i)) { span.textContent = 'AI快站'; } }); }); // 替换logo图片的alt属性 document.querySelectorAll('img[alt*="Hugging"], img[alt*="Face"]').forEach(img => { if (img.alt.match(/Hugging\s*Face/i)) { img.alt = 'AI快站 logo'; } }); } // 替换导航栏中的链接 function replaceNavigationLinks() { // 已替换标记,防止重复运行 if (window._navLinksReplaced) { return; } // 已经替换过的链接集合,防止重复替换 const replacedLinks = new Set(); // 只在导航栏区域查找和替换链接 const headerArea = document.querySelector('header') || document.querySelector('nav'); if (!headerArea) { return; } // 在导航区域内查找链接 const navLinks = headerArea.querySelectorAll('a'); navLinks.forEach(link => { // 如果已经替换过,跳过 if (replacedLinks.has(link)) return; const linkText = link.textContent.trim(); const linkHref = link.getAttribute('href') || ''; // 替换Spaces链接 - 仅替换一次 if ( (linkHref.includes('/spaces') || linkHref === '/spaces' || linkText === 'Spaces' || linkText.match(/^s*Spacess*$/i)) && linkText !== 'PDF TO Markdown' && linkText !== 'PDF TO Markdown' ) { link.textContent = 'PDF TO Markdown'; link.href = 'https://fast360.xyz'; link.setAttribute('target', '_blank'); link.setAttribute('rel', 'noopener noreferrer'); replacedLinks.add(link); } // 删除Posts链接 else if ( (linkHref.includes('/posts') || linkHref === '/posts' || linkText === 'Posts' || linkText.match(/^s*Postss*$/i)) ) { if (link.parentNode) { link.parentNode.removeChild(link); } replacedLinks.add(link); } // 替换Docs链接 - 仅替换一次 else if ( (linkHref.includes('/docs') || linkHref === '/docs' || linkText === 'Docs' || linkText.match(/^s*Docss*$/i)) && linkText !== 'Voice Cloning' ) { link.textContent = 'Voice Cloning'; link.href = 'https://vibevoice.info/'; replacedLinks.add(link); } // 删除Enterprise链接 else if ( (linkHref.includes('/enterprise') || linkHref === '/enterprise' || linkText === 'Enterprise' || linkText.match(/^s*Enterprises*$/i)) ) { if (link.parentNode) { link.parentNode.removeChild(link); } replacedLinks.add(link); } }); // 查找可能嵌套的Spaces和Posts文本 const textNodes = []; function findTextNodes(element) { if (element.nodeType === Node.TEXT_NODE) { const text = element.textContent.trim(); if (text === 'Spaces' || text === 'Posts' || text === 'Enterprise') { textNodes.push(element); } } else { for (const child of element.childNodes) { findTextNodes(child); } } } // 只在导航区域内查找文本节点 findTextNodes(headerArea); // 替换找到的文本节点 textNodes.forEach(node => { const text = node.textContent.trim(); if (text === 'Spaces') { node.textContent = node.textContent.replace(/Spaces/g, 'PDF TO Markdown'); } else if (text === 'Posts') { // 删除Posts文本节点 if (node.parentNode) { node.parentNode.removeChild(node); } } else if (text === 'Enterprise') { // 删除Enterprise文本节点 if (node.parentNode) { node.parentNode.removeChild(node); } } }); // 标记已替换完成 window._navLinksReplaced = true; } // 替换代码区域中的域名 function replaceCodeDomains() { // 特别处理span.hljs-string和span.njs-string元素 document.querySelectorAll('span.hljs-string, span.njs-string, span[class*="hljs-string"], span[class*="njs-string"]').forEach(span => { if (span.textContent && span.textContent.includes('huggingface.co')) { span.textContent = span.textContent.replace(/huggingface.co/g, 'aifasthub.com'); } }); // 替换hljs-string类的span中的域名(移除多余的转义符号) document.querySelectorAll('span.hljs-string, span[class*="hljs-string"]').forEach(span => { if (span.textContent && span.textContent.includes('huggingface.co')) { span.textContent = span.textContent.replace(/huggingface.co/g, 'aifasthub.com'); } }); // 替换pre和code标签中包含git clone命令的域名 document.querySelectorAll('pre, code').forEach(element => { if (element.textContent && element.textContent.includes('git clone')) { const text = element.innerHTML; if (text.includes('huggingface.co')) { element.innerHTML = text.replace(/huggingface.co/g, 'aifasthub.com'); } } }); // 处理特定的命令行示例 document.querySelectorAll('pre, code').forEach(element => { const text = element.innerHTML; if (text.includes('huggingface.co')) { // 针对git clone命令的专门处理 if (text.includes('git clone') || text.includes('GIT_LFS_SKIP_SMUDGE=1')) { element.innerHTML = text.replace(/huggingface.co/g, 'aifasthub.com'); } } }); // 特别处理模型下载页面上的代码片段 document.querySelectorAll('.flex.border-t, .svelte_hydrator, .inline-block').forEach(container => { const content = container.innerHTML; if (content && content.includes('huggingface.co')) { container.innerHTML = content.replace(/huggingface.co/g, 'aifasthub.com'); } }); // 特别处理模型仓库克隆对话框中的代码片段 try { // 查找包含"Clone this model repository"标题的对话框 const cloneDialog = document.querySelector('.svelte_hydration_boundary, [data-target="MainHeader"]'); if (cloneDialog) { // 查找对话框中所有的代码片段和命令示例 const codeElements = cloneDialog.querySelectorAll('pre, code, span'); codeElements.forEach(element => { if (element.textContent && element.textContent.includes('huggingface.co')) { if (element.innerHTML.includes('huggingface.co')) { element.innerHTML = element.innerHTML.replace(/huggingface.co/g, 'aifasthub.com'); } else { element.textContent = element.textContent.replace(/huggingface.co/g, 'aifasthub.com'); } } }); } // 更精确地定位克隆命令中的域名 document.querySelectorAll('[data-target]').forEach(container => { const codeBlocks = container.querySelectorAll('pre, code, span.hljs-string'); codeBlocks.forEach(block => { if (block.textContent && block.textContent.includes('huggingface.co')) { if (block.innerHTML.includes('huggingface.co')) { block.innerHTML = block.innerHTML.replace(/huggingface.co/g, 'aifasthub.com'); } else { block.textContent = block.textContent.replace(/huggingface.co/g, 'aifasthub.com'); } } }); }); } catch (e) { // 错误处理但不打印日志 } } // 当DOM加载完成后执行替换 if (document.readyState === 'loading') { document.addEventListener('DOMContentLoaded', () => { replaceHeaderBranding(); replaceNavigationLinks(); replaceCodeDomains(); // 只在必要时执行替换 - 3秒后再次检查 setTimeout(() => { if (!window._navLinksReplaced) { console.log('[Client] 3秒后重新检查导航链接'); replaceNavigationLinks(); } }, 3000); }); } else { replaceHeaderBranding(); replaceNavigationLinks(); replaceCodeDomains(); // 只在必要时执行替换 - 3秒后再次检查 setTimeout(() => { if (!window._navLinksReplaced) { console.log('[Client] 3秒后重新检查导航链接'); replaceNavigationLinks(); } }, 3000); } // 增加一个MutationObserver来处理可能的动态元素加载 const observer = new MutationObserver(mutations => { // 检查是否导航区域有变化 const hasNavChanges = mutations.some(mutation => { // 检查是否存在header或nav元素变化 return Array.from(mutation.addedNodes).some(node => { if (node.nodeType === Node.ELEMENT_NODE) { // 检查是否是导航元素或其子元素 if (node.tagName === 'HEADER' || node.tagName === 'NAV' || node.querySelector('header, nav')) { return true; } // 检查是否在导航元素内部 let parent = node.parentElement; while (parent) { if (parent.tagName === 'HEADER' || parent.tagName === 'NAV') { return true; } parent = parent.parentElement; } } return false; }); }); // 只在导航区域有变化时执行替换 if (hasNavChanges) { // 重置替换状态,允许再次替换 window._navLinksReplaced = false; replaceHeaderBranding(); replaceNavigationLinks(); } }); // 开始观察document.body的变化,包括子节点 if (document.body) { observer.observe(document.body, { childList: true, subtree: true }); } else { document.addEventListener('DOMContentLoaded', () => { observer.observe(document.body, { childList: true, subtree: true }); }); } })(); \n \n \n \"\"\"\n\n tree = html_tag_tree(html)\n\n self.assertEqual(tree.children[0].token.kind, 'html_start')\n self.assertEqual(tree.children[0].token.tag, 'html')\n\n self.assertEqual(tree.children[0].children[0].token.kind, 'html_start')\n self.assertEqual(tree.children[0].children[0].token.tag, 'head')\n\n self.assertEqual(tree.children[0].children[0].children[0].token.kind, 'html_start')\n self.assertEqual(tree.children[0].children[0].children[0].token.tag, 'title')\n\n self.assertEqual(tree.children[0].children[1].token.kind, 'html_start')\n self.assertEqual(tree.children[0].children[1].token.tag, 'body')\n\n self.assertEqual(tree.children[0].children[1].children[0].token.kind, 'html_start')\n self.assertEqual(tree.children[0].children[1].children[0].token.tag, 'p')\n\n self.assertEqual(tree.children[0].children[1].children[0].children[0].token.kind, 'html_singleton')\n self.assertEqual(tree.children[0].children[1].children[0].children[0].token.tag, 'br')\n\n self.assertEqual(tree.children[0].children[1].children[1].token.kind, 'html_start')\n self.assertEqual(tree.children[0].children[1].children[1].token.tag, 'p')\n\n def test_html_branches(self) -> None:\n html = \"\"\"\n \n \n \n \n \n Test\n \n \n \n \n

Hello
world!

\n

Goodbyeworld!

\n \n \n \n \"\"\"\n\n branches = html_branches(html)\n\n self.assertEqual(branches[0].text(), 'html head title')\n self.assertEqual(branches[1].text(), 'html body p br')\n self.assertEqual(branches[2].text(), 'html body p')\n\n self.assertEqual(branches[0].staircase_text(), '\\n html\\n head\\n title\\n')\n self.assertEqual(branches[1].staircase_text(), '\\n html\\n body\\n p\\n br\\n')\n self.assertEqual(branches[2].staircase_text(), '\\n html\\n body\\n p\\n')\n\n def test_build_id_dict(self) -> None:\n templates = [\"test_template1.html\", \"test_template2.html\"]\n templates = [os.path.join(TEST_TEMPLATES_DIR, fn) for fn in templates]\n\n template_id_dict = build_id_dict(templates)\n\n self.assertEqual(set(template_id_dict.keys()), {'below_navbar', 'hello_{{ message }}', 'intro'})\n self.assertEqual(template_id_dict['hello_{{ message }}'], [\n 'Line 12:%s/tools/tests/test_template_data/test_template1.html' % (ZULIP_PATH),\n 'Line 12:%s/tools/tests/test_template_data/test_template2.html' % (ZULIP_PATH)])\n self.assertEqual(template_id_dict['intro'], [\n 'Line 10:%s/tools/tests/test_template_data/test_template1.html' % (ZULIP_PATH),\n 'Line 11:%s/tools/tests/test_template_data/test_template1.html' % (ZULIP_PATH),\n 'Line 11:%s/tools/tests/test_template_data/test_template2.html' % (ZULIP_PATH)])\n self.assertEqual(template_id_dict['below_navbar'], [\n 'Line 10:%s/tools/tests/test_template_data/test_template2.html' % (ZULIP_PATH)])\n\n def test_split_for_id_and_class(self) -> None:\n id1 = \"{{ red|blue }}\"\n id2 = \"search_box_{{ page }}\"\n\n class1 = \"chat_box message\"\n class2 = \"stream_{{ topic }}\"\n class3 = \"foo {{ a|b|c }} bar\"\n\n self.assertEqual(split_for_id_and_class(id1), ['{{ red|blue }}'])\n self.assertEqual(split_for_id_and_class(id2), ['search_box_{{ page }}'])\n\n self.assertEqual(split_for_id_and_class(class1), ['chat_box', 'message'])\n self.assertEqual(split_for_id_and_class(class2), ['stream_{{ topic }}'])\n self.assertEqual(split_for_id_and_class(class3), ['foo', '{{ a|b|c }}', 'bar'])\n"},"type_annotations":{"kind":"list like","value":[],"string":"[]"},"type_annotation_starts":{"kind":"list like","value":[],"string":"[]"},"type_annotation_ends":{"kind":"list like","value":[],"string":"[]"}}},{"rowIdx":1315,"cells":{"zip":{"kind":"string","value":"archives/18-2-SKKU-OSS_2018-2-OSS-L5.zip"},"filename":{"kind":"string","value":"tools/tests/test_linter_custom_check.py"},"contents":{"kind":"string","value":"import os\n\nfrom mock import patch\nfrom unittest import TestCase\n\nfrom typing import Any, Dict, List\n\nfrom tools.linter_lib.custom_check import build_custom_checkers\nfrom tools.linter_lib.custom_check import custom_check_file\n\nROOT_DIR = os.path.abspath(os.path.join(__file__, '..', '..', '..'))\nCHECK_MESSAGE = \"Fix the corresponding rule in `tools/linter_lib/custom_check.py`.\"\n\nclass TestCustomRules(TestCase):\n\n def setUp(self) -> None:\n self.all_rules = [] # type: List[Dict[str, Any]]\n with patch('tools.linter_lib.custom_check.custom_check_file', return_value=False) as mock_custom_check_file:\n by_lang = dict.fromkeys(['py', 'js', 'sh', 'css', 'handlebars', 'html',\n 'json', 'md', 'txt', 'text', 'yaml', 'rst'],\n ['foo/bar.baz'])\n check_custom_checks_py, check_custom_checks_nonpy = build_custom_checkers(by_lang)\n check_custom_checks_py()\n check_custom_checks_nonpy()\n for call_args in mock_custom_check_file.call_args_list:\n rule_set = call_args[0][2]\n self.all_rules.extend(rule_set)\n\n def test_paths_in_rules(self) -> None:\n \"\"\"Verifies that the paths mentioned in linter rules actually exist\"\"\"\n for rule in self.all_rules:\n for path in rule.get('exclude', {}):\n abs_path = os.path.abspath(os.path.join(ROOT_DIR, path))\n self.assertTrue(os.path.exists(abs_path),\n \"'{}' is neither an existing file, nor a directory. {}\".format(path, CHECK_MESSAGE))\n\n for line_tuple in rule.get('exclude_line', {}):\n path = line_tuple[0]\n abs_path = os.path.abspath(os.path.join(ROOT_DIR, path))\n self.assertTrue(os.path.isfile(abs_path),\n \"The file '{}' doesn't exist. {}\".format(path, CHECK_MESSAGE))\n\n for path in rule.get('include_only', {}):\n if not os.path.splitext(path)[1]:\n self.assertTrue(path.endswith('/'),\n \"The path '{}' should end with '/'. {}\".format(path, CHECK_MESSAGE))\n\n def test_rule_patterns(self) -> None:\n \"\"\"Verifies that the search regex specified in a custom rule actually matches\n the expectation and doesn't throw false positives.\"\"\"\n for rule in self.all_rules:\n pattern = rule['pattern']\n for line in rule.get('good_lines', []):\n # create=True is superfluous when mocking built-ins in Python >= 3.5\n with patch('builtins.open', return_value=iter((line+'\\n\\n').splitlines()), create=True, autospec=True):\n self.assertFalse(custom_check_file('foo.bar', 'baz', [rule], ''),\n \"The pattern '{}' matched the line '{}' while it shouldn't.\".format(pattern, line))\n\n for line in rule.get('bad_lines', []):\n # create=True is superfluous when mocking built-ins in Python >= 3.5\n with patch('builtins.open',\n return_value=iter((line+'\\n\\n').splitlines()), create=True, autospec=True), patch('builtins.print'):\n filename = list(rule.get('include_only', {'foo.bar'}))[0]\n self.assertTrue(custom_check_file(filename, 'baz', [rule], ''),\n \"The pattern '{}' didn't match the line '{}' while it should.\".format(pattern, line))\n"},"type_annotations":{"kind":"list like","value":[],"string":"[]"},"type_annotation_starts":{"kind":"list like","value":[],"string":"[]"},"type_annotation_ends":{"kind":"list like","value":[],"string":"[]"}}},{"rowIdx":1316,"cells":{"zip":{"kind":"string","value":"archives/18-2-SKKU-OSS_2018-2-OSS-L5.zip"},"filename":{"kind":"string","value":"tools/tests/test_pretty_print.py"},"contents":{"kind":"string","value":"import unittest\n\nfrom tools.lib.pretty_print import pretty_print_html\n\n# Note that GOOD_HTML isn't necessarily beautiful HTML. Apart\n# from adjusting indentation, we mostly leave things alone to\n# respect whatever line-wrapping styles were in place before.\n\nBAD_HTML = \"\"\"\n\n\n\n\n\n\n \n \n Test\n \n \n \n \n

Hello
world!

\n

Goodbyeworld!

\n \n \n \n \n
5
\n
\n            print 'hello world'\n    
\n
{{ bla }}
\n \n\n\n\"\"\"\n\nGOOD_HTML = \"\"\"\n\n\n\n\n\n\n \n \n Test\n \n \n \n \n

Hello
world!

\n

Goodbyeworld!

\n \n \n \n \n
5
\n
\n            print 'hello world'\n    
\n
{{ bla }}
\n \n\n\n\"\"\"\n\nBAD_HTML1 = \"\"\"\n\n \n foobarfoobarfoobar\n \n\n\"\"\"\n\nGOOD_HTML1 = \"\"\"\n\n \n foobarfoobarfoobar\n \n\n\"\"\"\n\nBAD_HTML2 = \"\"\"\n\n \n {{# foobar area}}\n foobarfoobarfoobar\n {{/ foobar area}}\n \n\n\"\"\"\n\nGOOD_HTML2 = \"\"\"\n\n \n {{# foobar area}}\n foobarfoobarfoobar\n {{/ foobar area}}\n \n\n\"\"\"\n\nBAD_HTML3 = \"\"\"\n\n \n {{# foobar area}}\n foobarfoobar
\n

\n FOOBAR\n

\n
\n {{/ foobar area}}\n \n\n\"\"\"\n\nGOOD_HTML3 = \"\"\"\n\n \n {{# foobar area}}\n foobarfoobar
\n

\n FOOBAR\n

\n
\n {{/ foobar area}}\n \n\n\"\"\"\n\nBAD_HTML4 = \"\"\"\n
\n foo\n

hello

\n bar\n
\n\"\"\"\n\nGOOD_HTML4 = \"\"\"\n
\n foo\n

hello

\n bar\n
\n\"\"\"\n\nBAD_HTML5 = \"\"\"\n
\n foo\n {{#if foobar}}\n hello\n {{else}}\n bye\n {{/if}}\n bar\n
\n\"\"\"\n\nGOOD_HTML5 = \"\"\"\n
\n foo\n {{#if foobar}}\n hello\n {{else}}\n bye\n {{/if}}\n bar\n
\n\"\"\"\n\nBAD_HTML6 = \"\"\"\n
\n

foobar

\n
\n\"\"\"\n\nGOOD_HTML6 = \"\"\"\n
\n

foobar

\n
\n\"\"\"\n\nBAD_HTML7 = \"\"\"\n
\n {{dyn_name}}\n{{#if invite_only}}{{/if}}\n
\n\"\"\"\n\nGOOD_HTML7 = \"\"\"\n
\n {{dyn_name}}\n {{#if invite_only}}{{/if}}\n
\n\"\"\"\n\nBAD_HTML8 = \"\"\"\n{{#each test}}\n {{#with this}}\n {{#if foobar}}\n
{{{test}}}
\n {{/if}}\n {{#if foobar2}}\n {{partial \"teststuff\"}}\n {{/if}}\n {{/with}}\n{{/each}}\n\"\"\"\n\nGOOD_HTML8 = \"\"\"\n{{#each test}}\n {{#with this}}\n {{#if foobar}}\n
{{{test}}}
\n {{/if}}\n {{#if foobar2}}\n {{partial \"teststuff\"}}\n {{/if}}\n {{/with}}\n{{/each}}\n\"\"\"\n\nBAD_HTML9 = \"\"\"\n
\n {{!
}}\n \n \n \n {{!
}}\n
\n
\n\"\"\"\n\nGOOD_HTML9 = \"\"\"\n
\n {{!
}}\n \n \n \n {{!
}}\n
\n
\n\"\"\"\n\nBAD_HTML10 = \"\"\"\n{% block portico_content %}\n
\n foobar\n
\n
\n{% for row in data %}\n
\n {% for group in (row[0:2], row[2:4]) %}\n
\n
\n {% endfor %}\n
\n{% endfor %}\n
\n{% endblock %}\n\"\"\"\n\nGOOD_HTML10 = \"\"\"\n{% block portico_content %}\n
\n foobar\n
\n
\n {% for row in data %}\n
\n {% for group in (row[0:2], row[2:4]) %}\n
\n
\n {% endfor %}\n
\n {% endfor %}\n
\n{% endblock %}\n\"\"\"\n\nBAD_HTML11 = \"\"\"\n
\n
\n foobar\n
\n
\n
\n
\n\"\"\"\n\nGOOD_HTML11 = \"\"\"\n
\n
\n foobar\n
\n
\n
\n
\n\"\"\"\n\nBAD_HTML12 = \"\"\"\n
\n
\n  
\n foobar\n
\n
\n
\n
\n
\n\"\"\"\n\nGOOD_HTML12 = \"\"\"\n
\n
\n  
\n foobar\n
\n
\n
\n
\n
\n\"\"\"\n\nBAD_HTML13 = \"\"\"\n
\n {{#if this.code}}\n
&nbsp:{{this.name}}:
\n {{else}}\n {{#if this.is_realm_emoji}}\n \n {{else}}\n
\n {{/if}}\n {{/if}}\n
{{this.count}}
\n
\n\"\"\"\n\nGOOD_HTML13 = \"\"\"\n
\n {{#if this.code}}\n
&nbsp:{{this.name}}:
\n {{else}}\n {{#if this.is_realm_emoji}}\n \n {{else}}\n
\n {{/if}}\n {{/if}}\n
{{this.count}}
\n
\n\"\"\"\n\nBAD_HTML14 = \"\"\"\n
\n {{#if this.code}}\n
Here goes some cool code.
\n {{else}}\n
\n content of first div\n
\n content of second div.\n
\n
\n {{/if}}\n
\n\"\"\"\n\nGOOD_HTML14 = \"\"\"\n
\n {{#if this.code}}\n
Here goes some cool code.
\n {{else}}\n
\n content of first div\n
\n content of second div.\n
\n
\n {{/if}}\n
\n\"\"\"\n\nBAD_HTML15 = \"\"\"\n
\n \":thumbs_up:\"\n\n \":thumbs_up:\"\n\n \":thumbs_up:\"\n\n
\n\"\"\"\n\nGOOD_HTML15 = \"\"\"\n
\n \":thumbs_up:\"\n\n \":thumbs_up:\"\n\n \":thumbs_up:\"\n\n
\n\"\"\"\n\nBAD_HTML16 = \"\"\"\n
\n {{partial \"settings_checkbox\"\n \"setting_name\" \"realm_name_in_notifications\"\n \"is_checked\" page_params.realm_name_in_notifications\n \"label\" settings_label.realm_name_in_notifications}}\n
\n\"\"\"\n\nGOOD_HTML16 = \"\"\"\n
\n {{partial \"settings_checkbox\"\n \"setting_name\" \"realm_name_in_notifications\"\n \"is_checked\" page_params.realm_name_in_notifications\n \"label\" settings_label.realm_name_in_notifications}}\n
\n\"\"\"\n\nBAD_HTML17 = \"\"\"\n
\n \n\n
\n {{ bla }}\n
\n
\n\"\"\"\n\nGOOD_HTML17 = \"\"\"\n
\n \n \n
\n {{ bla }}\n
\n
\n\"\"\"\n\nclass TestPrettyPrinter(unittest.TestCase):\n def compare(self, a: str, b: str) -> None:\n self.assertEqual(a.split('\\n'), b.split('\\n'))\n\n def test_pretty_print(self) -> None:\n self.compare(pretty_print_html(GOOD_HTML), GOOD_HTML)\n self.compare(pretty_print_html(BAD_HTML), GOOD_HTML)\n self.compare(pretty_print_html(BAD_HTML1), GOOD_HTML1)\n self.compare(pretty_print_html(BAD_HTML2), GOOD_HTML2)\n self.compare(pretty_print_html(BAD_HTML3), GOOD_HTML3)\n self.compare(pretty_print_html(BAD_HTML4), GOOD_HTML4)\n self.compare(pretty_print_html(BAD_HTML5), GOOD_HTML5)\n self.compare(pretty_print_html(BAD_HTML6), GOOD_HTML6)\n self.compare(pretty_print_html(BAD_HTML7), GOOD_HTML7)\n self.compare(pretty_print_html(BAD_HTML8), GOOD_HTML8)\n self.compare(pretty_print_html(BAD_HTML9), GOOD_HTML9)\n self.compare(pretty_print_html(BAD_HTML10), GOOD_HTML10)\n self.compare(pretty_print_html(BAD_HTML11), GOOD_HTML11)\n self.compare(pretty_print_html(BAD_HTML12), GOOD_HTML12)\n self.compare(pretty_print_html(BAD_HTML13), GOOD_HTML13)\n self.compare(pretty_print_html(BAD_HTML14), GOOD_HTML14)\n self.compare(pretty_print_html(BAD_HTML15), GOOD_HTML15)\n self.compare(pretty_print_html(BAD_HTML16), GOOD_HTML16)\n self.compare(pretty_print_html(BAD_HTML17), GOOD_HTML17)\n"},"type_annotations":{"kind":"list like","value":["str","str"],"string":"[\n \"str\",\n \"str\"\n]"},"type_annotation_starts":{"kind":"list like","value":[8328,8336],"string":"[\n 8328,\n 8336\n]"},"type_annotation_ends":{"kind":"list like","value":[8331,8339],"string":"[\n 8331,\n 8339\n]"}}},{"rowIdx":1317,"cells":{"zip":{"kind":"string","value":"archives/18-2-SKKU-OSS_2018-2-OSS-L5.zip"},"filename":{"kind":"string","value":"tools/tests/test_template_parser.py"},"contents":{"kind":"string","value":"from typing import Optional, Any\n\nimport sys\nimport unittest\n\ntry:\n from tools.lib.template_parser import (\n TemplateParserException,\n is_django_block_tag,\n tokenize,\n validate,\n )\nexcept ImportError:\n print('ERROR!!! You need to run this via tools/test-tools.')\n sys.exit(1)\n\nclass ParserTest(unittest.TestCase):\n def _assert_validate_error(self, error: str, fn: Optional[str]=None,\n text: Optional[str]=None, check_indent: bool=True) -> None:\n with self.assertRaisesRegex(TemplateParserException, error):\n validate(fn=fn, text=text, check_indent=check_indent)\n\n def test_is_django_block_tag(self) -> None:\n self.assertTrue(is_django_block_tag('block'))\n self.assertFalse(is_django_block_tag('not a django tag'))\n\n def test_validate_vanilla_html(self) -> None:\n '''\n Verify that validate() does not raise errors for\n well-formed HTML.\n '''\n my_html = '''\n \n \n \n \n
foo
'''\n validate(text=my_html)\n\n def test_validate_handlebars(self) -> None:\n my_html = '''\n {{#with stream}}\n

{{stream}}

\n {{/with}}\n '''\n validate(text=my_html)\n\n def test_validate_comment(self) -> None:\n my_html = '''\n '''\n validate(text=my_html)\n\n def test_validate_django(self) -> None:\n my_html = '''\n {% include \"some_other.html\" %}\n {% if foo %}\n

bar

\n {% endif %}\n '''\n validate(text=my_html)\n\n my_html = '''\n {% block \"content\" %}\n {% with className=\"class\" %}\n {% include 'foobar' %}\n {% endwith %}\n {% endblock %}\n '''\n validate(text=my_html)\n\n def test_validate_no_start_tag(self) -> None:\n my_html = '''\n foo

\n '''\n self._assert_validate_error('No start tag', text=my_html)\n\n def test_validate_mismatched_tag(self) -> None:\n my_html = '''\n foo\n '''\n self._assert_validate_error('Mismatched tag.', text=my_html)\n\n def test_validate_bad_indentation(self) -> None:\n my_html = '''\n

\n foo\n

\n '''\n self._assert_validate_error('Bad indentation.', text=my_html, check_indent=True)\n\n def test_validate_state_depth(self) -> None:\n my_html = '''\n \n '''\n self._assert_validate_error('Missing end tag', text=my_html)\n\n def test_validate_incomplete_handlebars_tag_1(self) -> None:\n my_html = '''\n {{# foo\n '''\n self._assert_validate_error('''Tag missing \"}}\" at Line 2 Col 13:\"{{# foo\n \"''', text=my_html)\n\n def test_validate_incomplete_handlebars_tag_2(self) -> None:\n my_html = '''\n {{# foo }\n '''\n self._assert_validate_error('Tag missing \"}}\" at Line 2 Col 13:\"{{# foo }\\n\"', text=my_html)\n\n def test_validate_incomplete_django_tag_1(self) -> None:\n my_html = '''\n {% foo\n '''\n self._assert_validate_error('''Tag missing \"%}\" at Line 2 Col 13:\"{% foo\n \"''', text=my_html)\n\n def test_validate_incomplete_django_tag_2(self) -> None:\n my_html = '''\n {% foo %\n '''\n self._assert_validate_error('Tag missing \"%}\" at Line 2 Col 13:\"{% foo %\\n\"', text=my_html)\n\n def test_validate_incomplete_html_tag_1(self) -> None:\n my_html = '''\n \" at Line 2 Col 13:\" None:\n my_html = '''\n \" at Line 2 Col 13:\" None:\n my_html = '''\n < >\n '''\n self._assert_validate_error('Tag name missing', text=my_html)\n\n def test_code_blocks(self) -> None:\n\n # This is fine.\n my_html = '''\n \n x = 5\n y = x + 1\n '''\n validate(text=my_html)\n\n # This is also fine.\n my_html = \"process_widgets()\"\n validate(text=my_html)\n\n # This is illegal.\n my_html = '''\n x =\n 5\n '''\n self._assert_validate_error('Code tag is split across two lines.', text=my_html)\n\n def test_anchor_blocks(self) -> None:\n\n # This is allowed, although strange.\n my_html = '''\n \n Click here\n for more info.\n '''\n validate(text=my_html)\n\n # This is fine.\n my_html = 'click here'\n validate(text=my_html)\n\n # Even this is fine.\n my_html = '''\n @ZulipStatus on Twitter.\n '''\n validate(text=my_html)\n\n def test_tokenize(self) -> None:\n tag = 'bla'\n token = tokenize(tag)[0]\n self.assertEqual(token.kind, 'html_special')\n\n tag = 'bla'\n token = tokenize(tag)[0]\n self.assertEqual(token.kind, 'html_start')\n self.assertEqual(token.tag, 'a')\n\n tag = '
bla'\n token = tokenize(tag)[0]\n self.assertEqual(token.kind, 'html_singleton')\n self.assertEqual(token.tag, 'br')\n\n tag = 'bla'\n token = tokenize(tag)[0]\n self.assertEqual(token.kind, 'html_singleton')\n self.assertEqual(token.tag, 'input')\n\n tag = 'bla'\n token = tokenize(tag)[0]\n self.assertEqual(token.kind, 'html_singleton')\n self.assertEqual(token.tag, 'input')\n\n tag = '
bla'\n token = tokenize(tag)[0]\n self.assertEqual(token.kind, 'html_end')\n self.assertEqual(token.tag, 'a')\n\n tag = '{{#with foo}}bla'\n token = tokenize(tag)[0]\n self.assertEqual(token.kind, 'handlebars_start')\n self.assertEqual(token.tag, 'with')\n\n tag = '{{/with}}bla'\n token = tokenize(tag)[0]\n self.assertEqual(token.kind, 'handlebars_end')\n self.assertEqual(token.tag, 'with')\n\n tag = '{% if foo %}bla'\n token = tokenize(tag)[0]\n self.assertEqual(token.kind, 'django_start')\n self.assertEqual(token.tag, 'if')\n\n tag = '{% endif %}bla'\n token = tokenize(tag)[0]\n self.assertEqual(token.kind, 'django_end')\n self.assertEqual(token.tag, 'if')\n"},"type_annotations":{"kind":"list like","value":["str"],"string":"[\n \"str\"\n]"},"type_annotation_starts":{"kind":"list like","value":[398],"string":"[\n 398\n]"},"type_annotation_ends":{"kind":"list like","value":[401],"string":"[\n 401\n]"}}},{"rowIdx":1318,"cells":{"zip":{"kind":"string","value":"archives/18-2-SKKU-OSS_2018-2-OSS-L5.zip"},"filename":{"kind":"string","value":"tools/zulint/__init__.py"},"contents":{"kind":"string","value":""},"type_annotations":{"kind":"list like","value":[],"string":"[]"},"type_annotation_starts":{"kind":"list like","value":[],"string":"[]"},"type_annotation_ends":{"kind":"list like","value":[],"string":"[]"}}},{"rowIdx":1319,"cells":{"zip":{"kind":"string","value":"archives/18-2-SKKU-OSS_2018-2-OSS-L5.zip"},"filename":{"kind":"string","value":"tools/zulint/command.py"},"contents":{"kind":"string","value":"# -*- coding: utf-8 -*-\n\nfrom __future__ import print_function\nfrom __future__ import absolute_import\n\nimport argparse\nimport logging\nimport os\nimport subprocess\nimport sys\nfrom typing import Any, Callable, Dict, List, Optional\n\nfrom zulint.printer import print_err, colors\n\ndef add_default_linter_arguments(parser):\n # type: (argparse.ArgumentParser) -> None\n parser.add_argument('--modified', '-m',\n action='store_true',\n help='Only check modified files')\n parser.add_argument('--verbose', '-v',\n action='store_true',\n help='Print verbose timing output')\n parser.add_argument('targets',\n nargs='*',\n help='Specify directories to check')\n\ndef run_parallel(lint_functions):\n # type: (Dict[str, Callable[[], int]]) -> bool\n pids = []\n for name, func in lint_functions.items():\n pid = os.fork()\n if pid == 0:\n logging.info(\"start \" + name)\n result = func()\n logging.info(\"finish \" + name)\n sys.stdout.flush()\n sys.stderr.flush()\n os._exit(result)\n pids.append(pid)\n failed = False\n\n for pid in pids:\n (_, status) = os.waitpid(pid, 0)\n if status != 0:\n failed = True\n return failed\n\nclass LinterConfig:\n lint_functions = {} # type: Dict[str, Callable[[], int]]\n\n def __init__(self, by_lang):\n # type: (Any) -> None\n self.by_lang = by_lang\n\n def lint(self, func):\n # type: (Callable[[], int]) -> Callable[[], int]\n self.lint_functions[func.__name__] = func\n return func\n\n def external_linter(self, name, command, target_langs=[]):\n # type: (str, List[str], List[str]) -> None\n \"\"\"Registers an external linter program to be run as part of the\n linter. This program will be passed the subset of files being\n linted that have extensions in target_langs. If there are no\n such files, exits without doing anything.\n\n If target_langs is empty, just runs the linter unconditionally.\n \"\"\"\n color = next(colors)\n\n def run_linter():\n # type: () -> int\n targets = [] # type: List[str]\n if len(target_langs) != 0:\n targets = [target for lang in target_langs for target in self.by_lang[lang]]\n if len(targets) == 0:\n # If this linter has a list of languages, and\n # no files in those languages are to be checked,\n # then we can safely return success without\n # invoking the external linter.\n return 0\n\n p = subprocess.Popen(command + targets,\n stdout=subprocess.PIPE,\n stderr=subprocess.STDOUT)\n\n assert p.stdout # use of subprocess.PIPE indicates non-None\n for line in iter(p.stdout.readline, b''):\n print_err(name, color, line)\n\n return p.wait() # Linter exit code\n\n self.lint_functions[name] = run_linter\n\n def do_lint(self):\n # type: () -> None\n failed = run_parallel(self.lint_functions)\n sys.exit(1 if failed else 0)\n"},"type_annotations":{"kind":"list like","value":[],"string":"[]"},"type_annotation_starts":{"kind":"list like","value":[],"string":"[]"},"type_annotation_ends":{"kind":"list like","value":[],"string":"[]"}}},{"rowIdx":1320,"cells":{"zip":{"kind":"string","value":"archives/18-2-SKKU-OSS_2018-2-OSS-L5.zip"},"filename":{"kind":"string","value":"tools/zulint/linters.py"},"contents":{"kind":"string","value":"from __future__ import print_function\nfrom __future__ import absolute_import\n\nimport subprocess\nfrom typing import List\n\nfrom zulint.printer import print_err, colors\n\ndef run_pycodestyle(files, ignored_rules):\n # type: (List[str], List[str]) -> bool\n if len(files) == 0:\n return False\n\n failed = False\n color = next(colors)\n pep8 = subprocess.Popen(\n ['pycodestyle'] + files + ['--ignore={rules}'.format(rules=','.join(ignored_rules))],\n stdout=subprocess.PIPE, stderr=subprocess.STDOUT)\n assert pep8.stdout is not None # Implied by use of subprocess.PIPE\n for line in iter(pep8.stdout.readline, b''):\n print_err('pep8', color, line)\n failed = True\n return failed\n"},"type_annotations":{"kind":"list like","value":[],"string":"[]"},"type_annotation_starts":{"kind":"list like","value":[],"string":"[]"},"type_annotation_ends":{"kind":"list like","value":[],"string":"[]"}}},{"rowIdx":1321,"cells":{"zip":{"kind":"string","value":"archives/18-2-SKKU-OSS_2018-2-OSS-L5.zip"},"filename":{"kind":"string","value":"tools/zulint/lister.py"},"contents":{"kind":"string","value":"#!/usr/bin/env python3\nfrom __future__ import print_function\nfrom __future__ import absolute_import\n\nimport os\nimport sys\nimport subprocess\nimport re\nfrom collections import defaultdict\nimport argparse\nfrom six.moves import filter\n\nfrom typing import Union, List, Dict\n\ndef get_ftype(fpath, use_shebang):\n # type: (str, bool) -> str\n ext = os.path.splitext(fpath)[1]\n if ext:\n return ext[1:]\n elif use_shebang:\n # opening a file may throw an OSError\n with open(fpath) as f:\n first_line = f.readline()\n if re.search(r'^#!.*\\bpython', first_line):\n return 'py'\n elif re.search(r'^#!.*sh', first_line):\n return 'sh'\n elif re.search(r'^#!.*\\bperl', first_line):\n return 'pl'\n elif re.search(r'^#!.*\\bnode', first_line):\n return 'js'\n elif re.search(r'^#!.*\\bruby', first_line):\n return 'rb'\n elif re.search(r'^#!', first_line):\n print('Error: Unknown shebang in file \"%s\":\\n%s' % (fpath, first_line), file=sys.stderr)\n return ''\n else:\n return ''\n else:\n return ''\n\ndef list_files(targets: List[str]=[], ftypes: List[str]=[], use_shebang: bool=True,\n modified_only: bool=False, exclude: List[str]=[], group_by_ftype: bool=False,\n extless_only: bool=False) -> Union[Dict[str, List[str]], List[str]]:\n \"\"\"\n List files tracked by git.\n\n Returns a list of files which are either in targets or in directories in targets.\n If targets is [], list of all tracked files in current directory is returned.\n\n Other arguments:\n ftypes - List of file types on which to filter the search.\n If ftypes is [], all files are included.\n use_shebang - Determine file type of extensionless files from their shebang.\n modified_only - Only include files which have been modified.\n exclude - List of files or directories to be excluded, relative to repository root.\n group_by_ftype - If True, returns a dict of lists keyed by file type.\n If False, returns a flat list of files.\n extless_only - Only include extensionless files in output.\n \"\"\"\n ftypes = [x.strip('.') for x in ftypes]\n ftypes_set = set(ftypes)\n\n # Really this is all bytes -- it's a file path -- but we get paths in\n # sys.argv as str, so that battle is already lost. Settle for hoping\n # everything is UTF-8.\n repository_root = subprocess.check_output(['git', 'rev-parse',\n '--show-toplevel']).strip().decode('utf-8')\n exclude_abspaths = [os.path.abspath(os.path.join(repository_root, fpath)) for fpath in exclude]\n\n cmdline = ['git', 'ls-files'] + targets\n if modified_only:\n cmdline.append('-m')\n\n files_gen = (x.strip() for x in subprocess.check_output(cmdline, universal_newlines=True).split('\\n'))\n # throw away empty lines and non-files (like symlinks)\n files = list(filter(os.path.isfile, files_gen))\n\n result_dict = defaultdict(list) # type: Dict[str, List[str]]\n result_list = [] # type: List[str]\n\n for fpath in files:\n # this will take a long time if exclude is very large\n ext = os.path.splitext(fpath)[1]\n if extless_only and ext:\n continue\n absfpath = os.path.abspath(fpath)\n if any(absfpath == expath or absfpath.startswith(os.path.abspath(expath) + os.sep)\n for expath in exclude_abspaths):\n continue\n\n if ftypes or group_by_ftype:\n try:\n filetype = get_ftype(fpath, use_shebang)\n except (OSError, UnicodeDecodeError) as e:\n etype = e.__class__.__name__\n print('Error: %s while determining type of file \"%s\":' % (etype, fpath), file=sys.stderr)\n print(e, file=sys.stderr)\n filetype = ''\n if ftypes and filetype not in ftypes_set:\n continue\n\n if group_by_ftype:\n result_dict[filetype].append(fpath)\n else:\n result_list.append(fpath)\n\n if group_by_ftype:\n return result_dict\n else:\n return result_list\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(description=\"List files tracked by git and optionally filter by type\")\n parser.add_argument('targets', nargs='*', default=[],\n help='''files and directories to include in the result.\n If this is not specified, the current directory is used''')\n parser.add_argument('-m', '--modified', action='store_true', default=False,\n help='list only modified files')\n parser.add_argument('-f', '--ftypes', nargs='+', default=[],\n help=\"list of file types to filter on. \"\n \"All files are included if this option is absent\")\n parser.add_argument('--ext-only', dest='extonly', action='store_true', default=False,\n help='only use extension to determine file type')\n parser.add_argument('--exclude', nargs='+', default=[],\n help='list of files and directories to exclude from results, relative to repo root')\n parser.add_argument('--extless-only', dest='extless_only', action='store_true', default=False,\n help='only include extensionless files in output')\n args = parser.parse_args()\n listing = list_files(targets=args.targets, ftypes=args.ftypes, use_shebang=not args.extonly,\n modified_only=args.modified, exclude=args.exclude, extless_only=args.extless_only)\n for l in listing:\n print(l)\n"},"type_annotations":{"kind":"list like","value":[],"string":"[]"},"type_annotation_starts":{"kind":"list like","value":[],"string":"[]"},"type_annotation_ends":{"kind":"list like","value":[],"string":"[]"}}},{"rowIdx":1322,"cells":{"zip":{"kind":"string","value":"archives/18-2-SKKU-OSS_2018-2-OSS-L5.zip"},"filename":{"kind":"string","value":"tools/zulint/printer.py"},"contents":{"kind":"string","value":"from __future__ import print_function\nfrom __future__ import absolute_import\n\nimport sys\nimport os\nfrom itertools import cycle\nfrom typing import Union, Text\n\n# Terminal Color codes for use in differentiatng linters\nBOLDRED = '\\x1B[1;31m'\nGREEN = '\\x1b[32m'\nYELLOW = '\\x1b[33m'\nBLUE = '\\x1b[34m'\nMAGENTA = '\\x1b[35m'\nCYAN = '\\x1b[36m'\nENDC = '\\033[0m'\n\ncolors = cycle([GREEN, YELLOW, BLUE, MAGENTA, CYAN])\n\ndef print_err(name, color, line):\n # type: (str, str, Union[Text, bytes]) -> None\n\n # Decode with UTF-8 if in Python 3 and `line` is of bytes type.\n # (Python 2 does this automatically)\n if sys.version_info[0] == 3 and isinstance(line, bytes):\n line = line.decode('utf-8')\n\n print('{}{}{}|{end} {}{}{end}'.format(\n color,\n name,\n ' ' * max(0, 10 - len(name)),\n BOLDRED,\n line.rstrip(),\n end=ENDC)\n )\n\n # Python 2's print function does not have a `flush` option.\n sys.stdout.flush()\n"},"type_annotations":{"kind":"list like","value":[],"string":"[]"},"type_annotation_starts":{"kind":"list like","value":[],"string":"[]"},"type_annotation_ends":{"kind":"list like","value":[],"string":"[]"}}},{"rowIdx":1323,"cells":{"zip":{"kind":"string","value":"archives/18-2-SKKU-OSS_2018-2-OSS-L5.zip"},"filename":{"kind":"string","value":"version.py"},"contents":{"kind":"string","value":"ZULIP_VERSION = \"1.9.0+git\"\nLATEST_MAJOR_VERSION = \"1.9\"\nLATEST_RELEASE_VERSION = \"1.9.0\"\nLATEST_RELEASE_ANNOUNCEMENT = \"https://blog.zulip.org/2018/11/07/zulip-1-9-released/\"\n\n# Bump the minor PROVISION_VERSION to indicate that folks should provision\n# only when going from an old version of the code to a newer version. Bump\n# the major version to indicate that folks should provision in both\n# directions.\n\n# Typically, adding a dependency only requires a minor version bump, and\n# removing a dependency requires a major version bump.\n\nPROVISION_VERSION = '26.14'\n"},"type_annotations":{"kind":"list like","value":[],"string":"[]"},"type_annotation_starts":{"kind":"list like","value":[],"string":"[]"},"type_annotation_ends":{"kind":"list like","value":[],"string":"[]"}}},{"rowIdx":1324,"cells":{"zip":{"kind":"string","value":"archives/18-2-SKKU-OSS_2018-2-OSS-L5.zip"},"filename":{"kind":"string","value":"zerver/__init__.py"},"contents":{"kind":"string","value":"# Load AppConfig app subclass by default on django applications initialization\ndefault_app_config = 'zerver.apps.ZerverConfig'\n"},"type_annotations":{"kind":"list like","value":[],"string":"[]"},"type_annotation_starts":{"kind":"list like","value":[],"string":"[]"},"type_annotation_ends":{"kind":"list like","value":[],"string":"[]"}}},{"rowIdx":1325,"cells":{"zip":{"kind":"string","value":"archives/18-2-SKKU-OSS_2018-2-OSS-L5.zip"},"filename":{"kind":"string","value":"zerver/apps.py"},"contents":{"kind":"string","value":"\nimport logging\nfrom typing import Any, Dict\n\nfrom django.apps import AppConfig\nfrom django.conf import settings\nfrom django.core.cache import cache\nfrom django.db.models.signals import post_migrate\n\ndef flush_cache(sender: AppConfig, **kwargs: Any) -> None:\n logging.info(\"Clearing memcached cache after migrations\")\n cache.clear()\n\n\nclass ZerverConfig(AppConfig):\n name = \"zerver\" # type: str\n\n def ready(self) -> None:\n import zerver.signals\n\n if settings.POST_MIGRATION_CACHE_FLUSHING:\n post_migrate.connect(flush_cache, sender=self)\n"},"type_annotations":{"kind":"list like","value":["AppConfig","Any"],"string":"[\n \"AppConfig\",\n \"Any\"\n]"},"type_annotation_starts":{"kind":"list like","value":[224,245],"string":"[\n 224,\n 245\n]"},"type_annotation_ends":{"kind":"list like","value":[233,248],"string":"[\n 233,\n 248\n]"}}},{"rowIdx":1326,"cells":{"zip":{"kind":"string","value":"archives/18-2-SKKU-OSS_2018-2-OSS-L5.zip"},"filename":{"kind":"string","value":"zerver/context_processors.py"},"contents":{"kind":"string","value":"\nfrom typing import Any, Dict, List, Optional\nfrom django.http import HttpRequest\nfrom django.conf import settings\n\nfrom zerver.models import UserProfile, get_realm, Realm\nfrom zproject.backends import (\n any_oauth_backend_enabled,\n dev_auth_enabled,\n github_auth_enabled,\n google_auth_enabled,\n password_auth_enabled,\n email_auth_enabled,\n require_email_format_usernames,\n auth_enabled_helper,\n AUTH_BACKEND_NAME_MAP\n)\nfrom zerver.lib.bugdown import convert as bugdown_convert\nfrom zerver.lib.send_email import FromAddress\nfrom zerver.lib.subdomains import get_subdomain\nfrom zerver.lib.realm_icon import get_realm_icon_url\n\nfrom version import ZULIP_VERSION, LATEST_RELEASE_VERSION, \\\n LATEST_RELEASE_ANNOUNCEMENT, LATEST_MAJOR_VERSION\n\ndef common_context(user: UserProfile) -> Dict[str, Any]:\n \"\"\"Common context used for things like outgoing emails that don't\n have a request.\n \"\"\"\n return {\n 'realm_uri': user.realm.uri,\n 'root_domain_uri': settings.ROOT_DOMAIN_URI,\n 'external_uri_scheme': settings.EXTERNAL_URI_SCHEME,\n 'external_host': settings.EXTERNAL_HOST,\n }\n\ndef get_realm_from_request(request: HttpRequest) -> Optional[Realm]:\n if hasattr(request, \"user\") and hasattr(request.user, \"realm\"):\n return request.user.realm\n subdomain = get_subdomain(request)\n return get_realm(subdomain)\n\ndef zulip_default_context(request: HttpRequest) -> Dict[str, Any]:\n \"\"\"Context available to all Zulip Jinja2 templates that have a request\n passed in. Designed to provide the long list of variables at the\n bottom of this function in a wide range of situations: logged-in\n or logged-out, subdomains or not, etc.\n\n The main variable in the below is whether we know what realm the\n user is trying to interact with.\n \"\"\"\n realm = get_realm_from_request(request)\n\n if realm is None:\n realm_uri = settings.ROOT_DOMAIN_URI\n realm_name = None\n realm_icon = None\n realm_description = None\n realm_invite_required = False\n realm_plan_type = 0\n else:\n realm_uri = realm.uri\n realm_name = realm.name\n realm_icon = get_realm_icon_url(realm)\n realm_description_raw = realm.description or \"The coolest place in the universe.\"\n realm_description = bugdown_convert(realm_description_raw, message_realm=realm)\n realm_invite_required = realm.invite_required\n realm_plan_type = realm.plan_type\n\n register_link_disabled = settings.REGISTER_LINK_DISABLED\n login_link_disabled = settings.LOGIN_LINK_DISABLED\n find_team_link_disabled = settings.FIND_TEAM_LINK_DISABLED\n allow_search_engine_indexing = False\n\n if (settings.ROOT_DOMAIN_LANDING_PAGE\n and get_subdomain(request) == Realm.SUBDOMAIN_FOR_ROOT_DOMAIN):\n register_link_disabled = True\n login_link_disabled = True\n find_team_link_disabled = False\n allow_search_engine_indexing = True\n\n apps_page_url = 'https://zulipchat.com/apps/'\n if settings.ZILENCER_ENABLED:\n apps_page_url = '/apps/'\n\n user_is_authenticated = False\n if hasattr(request, 'user') and hasattr(request.user, 'is_authenticated'):\n user_is_authenticated = request.user.is_authenticated.value\n\n if settings.DEVELOPMENT:\n secrets_path = \"zproject/dev-secrets.conf\"\n settings_path = \"zproject/dev_settings.py\"\n settings_comments_path = \"zproject/prod_settings_template.py\"\n else:\n secrets_path = \"/etc/zulip/zulip-secrets.conf\"\n settings_path = \"/etc/zulip/settings.py\"\n settings_comments_path = \"/etc/zulip/settings.py\"\n\n if hasattr(request, \"client\") and request.client.name == \"ZulipElectron\":\n platform = \"ZulipElectron\" # nocoverage\n else:\n platform = \"ZulipWeb\"\n\n return {\n 'root_domain_landing_page': settings.ROOT_DOMAIN_LANDING_PAGE,\n 'custom_logo_url': settings.CUSTOM_LOGO_URL,\n 'register_link_disabled': register_link_disabled,\n 'login_link_disabled': login_link_disabled,\n 'terms_of_service': settings.TERMS_OF_SERVICE,\n 'privacy_policy': settings.PRIVACY_POLICY,\n 'login_url': settings.HOME_NOT_LOGGED_IN,\n 'only_sso': settings.ONLY_SSO,\n 'external_host': settings.EXTERNAL_HOST,\n 'external_uri_scheme': settings.EXTERNAL_URI_SCHEME,\n 'realm_invite_required': realm_invite_required,\n 'realm_uri': realm_uri,\n 'realm_name': realm_name,\n 'realm_icon': realm_icon,\n 'realm_description': realm_description,\n 'realm_plan_type': realm_plan_type,\n 'root_domain_uri': settings.ROOT_DOMAIN_URI,\n 'apps_page_url': apps_page_url,\n 'open_realm_creation': settings.OPEN_REALM_CREATION,\n 'password_auth_enabled': password_auth_enabled(realm),\n 'dev_auth_enabled': dev_auth_enabled(realm),\n 'google_auth_enabled': google_auth_enabled(realm),\n 'github_auth_enabled': github_auth_enabled(realm),\n 'email_auth_enabled': email_auth_enabled(realm),\n 'require_email_format_usernames': require_email_format_usernames(realm),\n 'any_oauth_backend_enabled': any_oauth_backend_enabled(realm),\n 'no_auth_enabled': not auth_enabled_helper(list(AUTH_BACKEND_NAME_MAP.keys()), realm),\n 'development_environment': settings.DEVELOPMENT,\n 'support_email': FromAddress.SUPPORT,\n 'find_team_link_disabled': find_team_link_disabled,\n 'password_min_length': settings.PASSWORD_MIN_LENGTH,\n 'password_min_guesses': settings.PASSWORD_MIN_GUESSES,\n 'jitsi_server_url': settings.JITSI_SERVER_URL,\n 'two_factor_authentication_enabled': settings.TWO_FACTOR_AUTHENTICATION_ENABLED,\n 'zulip_version': ZULIP_VERSION,\n 'latest_release_version': LATEST_RELEASE_VERSION,\n 'latest_major_version': LATEST_MAJOR_VERSION,\n 'latest_release_announcement': LATEST_RELEASE_ANNOUNCEMENT,\n 'user_is_authenticated': user_is_authenticated,\n 'settings_path': settings_path,\n 'secrets_path': secrets_path,\n 'settings_comments_path': settings_comments_path,\n 'platform': platform,\n 'allow_search_engine_indexing': allow_search_engine_indexing,\n }\n"},"type_annotations":{"kind":"list like","value":["UserProfile","HttpRequest","HttpRequest"],"string":"[\n \"UserProfile\",\n \"HttpRequest\",\n \"HttpRequest\"\n]"},"type_annotation_starts":{"kind":"list like","value":[794,1180,1422],"string":"[\n 794,\n 1180,\n 1422\n]"},"type_annotation_ends":{"kind":"list like","value":[805,1191,1433],"string":"[\n 805,\n 1191,\n 1433\n]"}}},{"rowIdx":1327,"cells":{"zip":{"kind":"string","value":"archives/18-2-SKKU-OSS_2018-2-OSS-L5.zip"},"filename":{"kind":"string","value":"zerver/data_import/__init__.py"},"contents":{"kind":"string","value":""},"type_annotations":{"kind":"list like","value":[],"string":"[]"},"type_annotation_starts":{"kind":"list like","value":[],"string":"[]"},"type_annotation_ends":{"kind":"list like","value":[],"string":"[]"}}},{"rowIdx":1328,"cells":{"zip":{"kind":"string","value":"archives/18-2-SKKU-OSS_2018-2-OSS-L5.zip"},"filename":{"kind":"string","value":"zerver/data_import/gitter.py"},"contents":{"kind":"string","value":"import os\nimport dateutil.parser\nimport random\nimport requests\nimport logging\nimport shutil\nimport subprocess\nimport ujson\n\nfrom django.conf import settings\nfrom django.forms.models import model_to_dict\nfrom django.utils.timezone import now as timezone_now\nfrom typing import Any, Dict, List, Set, Tuple\n\nfrom zerver.models import Realm, UserProfile, Recipient\nfrom zerver.lib.export import MESSAGE_BATCH_CHUNK_SIZE\nfrom zerver.data_import.import_util import ZerverFieldsT, build_zerver_realm, \\\n build_avatar, build_subscription, build_recipient, build_usermessages, \\\n build_defaultstream, process_avatars, build_realm, build_stream, \\\n build_message, create_converted_data_files, make_subscriber_map\n\n# stubs\nGitterDataT = List[Dict[str, Any]]\n\nrealm_id = 0\n\ndef gitter_workspace_to_realm(domain_name: str, gitter_data: GitterDataT,\n realm_subdomain: str) -> Tuple[ZerverFieldsT,\n List[ZerverFieldsT],\n Dict[str, int]]:\n \"\"\"\n Returns:\n 1. realm, Converted Realm data\n 2. avatars, which is list to map avatars to zulip avatar records.json\n 3. user_map, which is a dictionary to map from gitter user id to zulip user id\n \"\"\"\n NOW = float(timezone_now().timestamp())\n zerver_realm = build_zerver_realm(realm_id, realm_subdomain, NOW, 'Gitter') # type: List[ZerverFieldsT]\n realm = build_realm(zerver_realm, realm_id, domain_name)\n\n zerver_userprofile, avatars, user_map = build_userprofile(int(NOW), domain_name, gitter_data)\n zerver_stream, zerver_defaultstream = build_stream_and_defaultstream(int(NOW))\n zerver_recipient, zerver_subscription = build_recipient_and_subscription(\n zerver_userprofile, zerver_stream)\n\n realm['zerver_userprofile'] = zerver_userprofile\n realm['zerver_stream'] = zerver_stream\n realm['zerver_defaultstream'] = zerver_defaultstream\n realm['zerver_recipient'] = zerver_recipient\n realm['zerver_subscription'] = zerver_subscription\n\n return realm, avatars, user_map\n\ndef build_userprofile(timestamp: Any, domain_name: str,\n gitter_data: GitterDataT) -> Tuple[List[ZerverFieldsT],\n List[ZerverFieldsT],\n Dict[str, int]]:\n \"\"\"\n Returns:\n 1. zerver_userprofile, which is a list of user profile\n 2. avatar_list, which is list to map avatars to zulip avatard records.json\n 3. added_users, which is a dictionary to map from gitter user id to zulip id\n \"\"\"\n logging.info('######### IMPORTING USERS STARTED #########\\n')\n zerver_userprofile = []\n avatar_list = [] # type: List[ZerverFieldsT]\n user_map = {} # type: Dict[str, int]\n user_id = 0\n\n for data in gitter_data:\n if data['fromUser']['id'] not in user_map:\n user_data = data['fromUser']\n user_map[user_data['id']] = user_id\n\n email = get_user_email(user_data, domain_name)\n build_avatar(user_id, realm_id, email, user_data['avatarUrl'],\n timestamp, avatar_list)\n\n # Build userprofile object\n userprofile = UserProfile(\n full_name=user_data['displayName'],\n short_name=user_data['username'],\n id=user_id,\n email=email,\n delivery_email=email,\n avatar_source='U',\n pointer=-1,\n date_joined=timestamp,\n last_login=timestamp)\n userprofile_dict = model_to_dict(userprofile)\n # Set realm id separately as the corresponding realm is not yet a Realm model\n # instance\n userprofile_dict['realm'] = realm_id\n zerver_userprofile.append(userprofile_dict)\n user_id += 1\n logging.info('######### IMPORTING USERS FINISHED #########\\n')\n return zerver_userprofile, avatar_list, user_map\n\ndef get_user_email(user_data: ZerverFieldsT, domain_name: str) -> str:\n # TODO Get user email from github\n email = (\"%s@users.noreply.github.com\" % user_data['username'])\n return email\n\ndef build_stream_and_defaultstream(timestamp: Any) -> Tuple[List[ZerverFieldsT],\n List[ZerverFieldsT]]:\n logging.info('######### IMPORTING STREAM STARTED #########\\n')\n # We have only one stream for gitter export\n stream_name = 'from gitter'\n stream_description = \"Imported from gitter\"\n stream_id = 0\n stream = build_stream(timestamp, realm_id, stream_name, stream_description,\n stream_id)\n\n defaultstream = build_defaultstream(realm_id=realm_id, stream_id=stream_id,\n defaultstream_id=0)\n logging.info('######### IMPORTING STREAMS FINISHED #########\\n')\n return [stream], [defaultstream]\n\ndef build_recipient_and_subscription(\n zerver_userprofile: List[ZerverFieldsT],\n zerver_stream: List[ZerverFieldsT]) -> Tuple[List[ZerverFieldsT],\n List[ZerverFieldsT]]:\n \"\"\"\n Returns:\n 1. zerver_recipient, which is a list of mapped recipient\n 2. zerver_subscription, which is a list of mapped subscription\n \"\"\"\n zerver_recipient = []\n zerver_subscription = []\n recipient_id = subscription_id = 0\n\n # For stream\n\n # We have only one recipient, because we have only one stream\n # Hence 'recipient_id'=0 corresponds to 'stream_id'=0\n recipient = build_recipient(0, recipient_id, Recipient.STREAM)\n zerver_recipient.append(recipient)\n\n for user in zerver_userprofile:\n subscription = build_subscription(recipient_id, user['id'], subscription_id)\n zerver_subscription.append(subscription)\n subscription_id += 1\n recipient_id += 1\n\n # For users\n for user in zerver_userprofile:\n recipient = build_recipient(user['id'], recipient_id, Recipient.PERSONAL)\n subscription = build_subscription(recipient_id, user['id'], subscription_id)\n zerver_recipient.append(recipient)\n zerver_subscription.append(subscription)\n recipient_id += 1\n subscription_id += 1\n\n return zerver_recipient, zerver_subscription\n\ndef convert_gitter_workspace_messages(gitter_data: GitterDataT, output_dir: str,\n subscriber_map: Dict[int, Set[int]],\n user_map: Dict[str, int],\n user_short_name_to_full_name: Dict[str, str],\n chunk_size: int=MESSAGE_BATCH_CHUNK_SIZE) -> None:\n \"\"\"\n Messages are stored in batches\n \"\"\"\n logging.info('######### IMPORTING MESSAGES STARTED #########\\n')\n message_id = 0\n recipient_id = 0 # Corresponding to stream \"gitter\"\n\n low_index = 0\n upper_index = low_index + chunk_size\n dump_file_id = 1\n\n while True:\n message_json = {}\n zerver_message = []\n zerver_usermessage = [] # type: List[ZerverFieldsT]\n message_data = gitter_data[low_index: upper_index]\n if len(message_data) == 0:\n break\n for message in message_data:\n message_time = dateutil.parser.parse(message['sent']).timestamp()\n mentioned_user_ids = get_usermentions(message, user_map,\n user_short_name_to_full_name)\n rendered_content = None\n topic_name = 'imported from gitter'\n user_id = user_map[message['fromUser']['id']]\n\n zulip_message = build_message(topic_name, float(message_time), message_id, message['text'],\n rendered_content, user_id, recipient_id)\n zerver_message.append(zulip_message)\n\n build_usermessages(\n zerver_usermessage=zerver_usermessage,\n subscriber_map=subscriber_map,\n recipient_id=recipient_id,\n mentioned_user_ids=mentioned_user_ids,\n message_id=message_id,\n )\n\n message_id += 1\n\n message_json['zerver_message'] = zerver_message\n message_json['zerver_usermessage'] = zerver_usermessage\n message_filename = os.path.join(output_dir, \"messages-%06d.json\" % (dump_file_id,))\n logging.info(\"Writing Messages to %s\\n\" % (message_filename,))\n write_data_to_file(os.path.join(message_filename), message_json)\n\n low_index = upper_index\n upper_index = chunk_size + low_index\n dump_file_id += 1\n\n logging.info('######### IMPORTING MESSAGES FINISHED #########\\n')\n\ndef get_usermentions(message: Dict[str, Any], user_map: Dict[str, int],\n user_short_name_to_full_name: Dict[str, str]) -> List[int]:\n mentioned_user_ids = []\n if 'mentions' in message:\n for mention in message['mentions']:\n if mention.get('userId') in user_map:\n gitter_mention = '@%s' % (mention['screenName'])\n zulip_mention = ('@**%s**' %\n (user_short_name_to_full_name[mention['screenName']]))\n message['text'] = message['text'].replace(gitter_mention, zulip_mention)\n\n mentioned_user_ids.append(user_map[mention['userId']])\n return mentioned_user_ids\n\ndef do_convert_data(gitter_data_file: str, output_dir: str, threads: int=6) -> None:\n # Subdomain is set by the user while running the import commands\n realm_subdomain = \"\"\n domain_name = settings.EXTERNAL_HOST\n\n os.makedirs(output_dir, exist_ok=True)\n # output directory should be empty initially\n if os.listdir(output_dir):\n raise Exception(\"Output directory should be empty!\")\n\n # Read data from the gitter file\n with open(gitter_data_file, \"r\") as fp:\n gitter_data = ujson.load(fp)\n\n realm, avatar_list, user_map = gitter_workspace_to_realm(\n domain_name, gitter_data, realm_subdomain)\n\n subscriber_map = make_subscriber_map(\n zerver_subscription=realm['zerver_subscription'],\n )\n\n # For user mentions\n user_short_name_to_full_name = {}\n for userprofile in realm['zerver_userprofile']:\n user_short_name_to_full_name[userprofile['short_name']] = userprofile['full_name']\n\n convert_gitter_workspace_messages(\n gitter_data, output_dir, subscriber_map, user_map,\n user_short_name_to_full_name)\n\n avatar_folder = os.path.join(output_dir, 'avatars')\n avatar_realm_folder = os.path.join(avatar_folder, str(realm_id))\n os.makedirs(avatar_realm_folder, exist_ok=True)\n avatar_records = process_avatars(avatar_list, avatar_folder, realm_id, threads)\n\n attachment = {\"zerver_attachment\": []} # type: Dict[str, List[Any]]\n\n # IO realm.json\n create_converted_data_files(realm, output_dir, '/realm.json')\n # IO emoji records\n create_converted_data_files([], output_dir, '/emoji/records.json')\n # IO avatar records\n create_converted_data_files(avatar_records, output_dir, '/avatars/records.json')\n # IO uploads records\n create_converted_data_files([], output_dir, '/uploads/records.json')\n # IO attachments records\n create_converted_data_files(attachment, output_dir, '/attachment.json')\n\n subprocess.check_call([\"tar\", \"-czf\", output_dir + '.tar.gz', output_dir, '-P'])\n\n logging.info('######### DATA CONVERSION FINISHED #########\\n')\n logging.info(\"Zulip data dump created at %s\" % (output_dir))\n\ndef write_data_to_file(output_file: str, data: Any) -> None:\n with open(output_file, \"w\") as f:\n f.write(ujson.dumps(data, indent=4))\n"},"type_annotations":{"kind":"list like","value":["str","GitterDataT","str","Any","str","GitterDataT","ZerverFieldsT","str","Any","List[ZerverFieldsT]","List[ZerverFieldsT]","GitterDataT","str","Dict[int, Set[int]]","Dict[str, int]","Dict[str, str]","Dict[str, Any]","Dict[str, int]","Dict[str, str]","str","str","str","Any"],"string":"[\n \"str\",\n \"GitterDataT\",\n \"str\",\n \"Any\",\n \"str\",\n \"GitterDataT\",\n \"ZerverFieldsT\",\n \"str\",\n \"Any\",\n \"List[ZerverFieldsT]\",\n \"List[ZerverFieldsT]\",\n \"GitterDataT\",\n \"str\",\n \"Dict[int, Set[int]]\",\n \"Dict[str, int]\",\n \"Dict[str, str]\",\n \"Dict[str, Any]\",\n \"Dict[str, int]\",\n \"Dict[str, str]\",\n \"str\",\n \"str\",\n \"str\",\n \"Any\"\n]"},"type_annotation_starts":{"kind":"list like","value":[814,832,892,2148,2166,2206,4073,4101,4284,5041,5081,6394,6419,6478,6547,6631,8778,8804,8871,9481,9498,11617,11628],"string":"[\n 814,\n 832,\n 892,\n 2148,\n 2166,\n 2206,\n 4073,\n 4101,\n 4284,\n 5041,\n 5081,\n 6394,\n 6419,\n 6478,\n 6547,\n 6631,\n 8778,\n 8804,\n 8871,\n 9481,\n 9498,\n 11617,\n 11628\n]"},"type_annotation_ends":{"kind":"list like","value":[817,843,895,2151,2169,2217,4086,4104,4287,5060,5100,6405,6422,6497,6561,6645,8792,8818,8885,9484,9501,11620,11631],"string":"[\n 817,\n 843,\n 895,\n 2151,\n 2169,\n 2217,\n 4086,\n 4104,\n 4287,\n 5060,\n 5100,\n 6405,\n 6422,\n 6497,\n 6561,\n 6645,\n 8792,\n 8818,\n 8885,\n 9484,\n 9501,\n 11620,\n 11631\n]"}}},{"rowIdx":1329,"cells":{"zip":{"kind":"string","value":"archives/18-2-SKKU-OSS_2018-2-OSS-L5.zip"},"filename":{"kind":"string","value":"zerver/data_import/hipchat.py"},"contents":{"kind":"string","value":"import base64\nimport dateutil\nimport glob\nimport logging\nimport os\nimport re\nimport shutil\nimport subprocess\nimport ujson\n\nfrom typing import Any, Callable, Dict, List, Optional, Set\n\nfrom django.conf import settings\nfrom django.forms.models import model_to_dict\nfrom django.utils.timezone import now as timezone_now\n\nfrom zerver.lib.utils import (\n process_list_in_batches,\n)\n\nfrom zerver.models import (\n RealmEmoji,\n Recipient,\n UserProfile,\n)\n\nfrom zerver.data_import.import_util import (\n build_message,\n build_realm,\n build_realm_emoji,\n build_recipients,\n build_stream,\n build_personal_subscriptions,\n build_public_stream_subscriptions,\n build_private_stream_subscriptions,\n build_user_message,\n build_user_profile,\n build_zerver_realm,\n create_converted_data_files,\n make_subscriber_map,\n write_avatar_png,\n)\n\nfrom zerver.data_import.hipchat_attachment import AttachmentHandler\nfrom zerver.data_import.hipchat_user import UserHandler\nfrom zerver.data_import.hipchat_subscriber import SubscriberHandler\nfrom zerver.data_import.sequencer import NEXT_ID, IdMapper\n\n# stubs\nZerverFieldsT = Dict[str, Any]\n\ndef str_date_to_float(date_str: str) -> float:\n '''\n Dates look like this:\n\n \"2018-08-08T14:23:54Z 626267\"\n '''\n\n parts = date_str.split(' ')\n time_str = parts[0].replace('T', ' ')\n date_time = dateutil.parser.parse(time_str)\n timestamp = date_time.timestamp()\n if len(parts) == 2:\n microseconds = int(parts[1])\n timestamp += microseconds / 1000000.0\n return timestamp\n\ndef untar_input_file(tar_file: str) -> str:\n data_dir = tar_file.replace('.tar', '')\n data_dir = os.path.abspath(data_dir)\n\n if os.path.exists(data_dir):\n logging.info('input data was already untarred to %s, we will use it' % (data_dir,))\n return data_dir\n\n os.makedirs(data_dir)\n\n subprocess.check_call(['tar', '-xf', tar_file, '-C', data_dir])\n\n logging.info('input data was untarred to %s' % (data_dir,))\n\n return data_dir\n\ndef read_user_data(data_dir: str) -> List[ZerverFieldsT]:\n fn = 'users.json'\n data_file = os.path.join(data_dir, fn)\n with open(data_file, \"r\") as fp:\n return ujson.load(fp)\n\ndef convert_user_data(user_handler: UserHandler,\n user_id_mapper: IdMapper,\n raw_data: List[ZerverFieldsT],\n realm_id: int) -> None:\n flat_data = [\n d['User']\n for d in raw_data\n ]\n\n def process(in_dict: ZerverFieldsT) -> ZerverFieldsT:\n delivery_email = in_dict['email']\n email = in_dict['email']\n full_name = in_dict['name']\n id = user_id_mapper.get(in_dict['id'])\n is_realm_admin = in_dict['account_type'] == 'admin'\n is_guest = in_dict['account_type'] == 'guest'\n is_mirror_dummy = False\n short_name = in_dict['mention_name']\n timezone = in_dict['timezone']\n\n date_joined = int(timezone_now().timestamp())\n is_active = not in_dict['is_deleted']\n\n if not email:\n if is_guest:\n # Hipchat guest users don't have emails, so\n # we just fake them.\n email = 'guest-{id}@example.com'.format(id=id)\n delivery_email = email\n else:\n # Hipchat sometimes doesn't export an email for deactivated users.\n assert not is_active\n email = delivery_email = \"deactivated-{id}@example.com\".format(id=id)\n\n # unmapped fields:\n # title - Developer, Project Manager, etc.\n # rooms - no good sample data\n # created - we just use \"now\"\n # roles - we just use account_type\n\n if in_dict.get('avatar'):\n avatar_source = 'U'\n else:\n avatar_source = 'G'\n\n return build_user_profile(\n avatar_source=avatar_source,\n date_joined=date_joined,\n delivery_email=delivery_email,\n email=email,\n full_name=full_name,\n id=id,\n is_active=is_active,\n is_realm_admin=is_realm_admin,\n is_guest=is_guest,\n is_mirror_dummy=is_mirror_dummy,\n realm_id=realm_id,\n short_name=short_name,\n timezone=timezone,\n )\n\n for raw_item in flat_data:\n user = process(raw_item)\n user_handler.add_user(user)\n\ndef convert_avatar_data(avatar_folder: str,\n raw_data: List[ZerverFieldsT],\n realm_id: int) -> List[ZerverFieldsT]:\n '''\n This code is pretty specific to how Hipchat sends us data.\n They give us the avatar payloads in base64 in users.json.\n\n We process avatars in our own pass of that data, rather\n than doing it while we're getting other user data. I\n chose to keep this separate, as otherwise you have a lot\n of extraneous data getting passed around.\n\n This code has MAJOR SIDE EFFECTS--namely writing a bunch\n of files to the avatars directory.\n '''\n\n flat_data = [\n d['User']\n for d in raw_data\n if d.get('avatar')\n ]\n\n def process(raw_user: ZerverFieldsT) -> ZerverFieldsT:\n avatar_payload = raw_user['avatar']\n bits = base64.b64decode(avatar_payload)\n user_id = raw_user['id']\n\n metadata = write_avatar_png(\n avatar_folder=avatar_folder,\n realm_id=realm_id,\n user_id=user_id,\n bits=bits,\n )\n return metadata\n\n avatar_records = list(map(process, flat_data))\n return avatar_records\n\ndef read_room_data(data_dir: str) -> List[ZerverFieldsT]:\n fn = 'rooms.json'\n data_file = os.path.join(data_dir, fn)\n with open(data_file) as f:\n data = ujson.load(f)\n return data\n\ndef convert_room_data(raw_data: List[ZerverFieldsT],\n subscriber_handler: SubscriberHandler,\n stream_id_mapper: IdMapper,\n user_id_mapper: IdMapper,\n realm_id: int) -> List[ZerverFieldsT]:\n flat_data = [\n d['Room']\n for d in raw_data\n ]\n\n def invite_only(v: str) -> bool:\n if v == 'public':\n return False\n elif v == 'private':\n return True\n else:\n raise Exception('unexpected value')\n\n def process(in_dict: ZerverFieldsT) -> ZerverFieldsT:\n now = int(timezone_now().timestamp())\n stream_id = stream_id_mapper.get(in_dict['id'])\n\n out_dict = build_stream(\n date_created=now,\n realm_id=realm_id,\n name=in_dict['name'],\n description=in_dict['topic'],\n stream_id=stream_id,\n deactivated=in_dict['is_archived'],\n invite_only=invite_only(in_dict['privacy']),\n )\n\n if not user_id_mapper.has(in_dict['owner']):\n raise Exception('bad owner')\n\n owner = user_id_mapper.get(in_dict['owner'])\n members = {\n user_id_mapper.get(key)\n for key in in_dict['members']\n if user_id_mapper.has(key)\n }\n\n subscriber_handler.set_info(\n stream_id=stream_id,\n owner=owner,\n members=members,\n )\n\n # unmapped fields:\n # guest_access_url: no Zulip equivalent\n # created: we just use \"now\"\n # members: no good sample data\n # owners: no good sample data\n # participants: no good sample data\n return out_dict\n\n return list(map(process, flat_data))\n\ndef make_realm(realm_id: int) -> ZerverFieldsT:\n NOW = float(timezone_now().timestamp())\n domain_name = settings.EXTERNAL_HOST\n realm_subdomain = \"\"\n zerver_realm = build_zerver_realm(realm_id, realm_subdomain, NOW, 'HipChat')\n realm = build_realm(zerver_realm, realm_id, domain_name)\n\n # We may override these later.\n realm['zerver_defaultstream'] = []\n\n return realm\n\ndef write_avatar_data(raw_user_data: List[ZerverFieldsT],\n output_dir: str,\n realm_id: int) -> None:\n avatar_folder = os.path.join(output_dir, 'avatars')\n avatar_realm_folder = os.path.join(avatar_folder, str(realm_id))\n os.makedirs(avatar_realm_folder, exist_ok=True)\n\n avatar_records = convert_avatar_data(\n avatar_folder=avatar_folder,\n raw_data=raw_user_data,\n realm_id=realm_id,\n )\n\n create_converted_data_files(avatar_records, output_dir, '/avatars/records.json')\n\ndef write_emoticon_data(realm_id: int,\n data_dir: str,\n output_dir: str) -> List[ZerverFieldsT]:\n '''\n This function does most of the work for processing emoticons, the bulk\n of which is copying files. We also write a json file with metadata.\n Finally, we return a list of RealmEmoji dicts to our caller.\n\n In our data_dir we have a pretty simple setup:\n\n emoticons.json - has very simple metadata on emojis:\n\n {\n \"Emoticon\": {\n \"id\": 9875487,\n \"path\": \"emoticons/yasss.jpg\",\n \"shortcut\": \"yasss\"\n }\n },\n {\n \"Emoticon\": {\n \"id\": 718017,\n \"path\": \"emoticons/yayyyyy.gif\",\n \"shortcut\": \"yayyyyy\"\n }\n }\n\n emoticons/ - contains a bunch of image files:\n\n slytherinsnake.gif\n spanishinquisition.jpg\n sparkle.png\n spiderman.gif\n stableparrot.gif\n stalkerparrot.gif\n supergirl.png\n superman.png\n\n We move all the relevant files to Zulip's more nested\n directory structure.\n '''\n\n logging.info('Starting to process emoticons')\n\n fn = 'emoticons.json'\n data_file = os.path.join(data_dir, fn)\n with open(data_file) as f:\n data = ujson.load(f)\n\n flat_data = [\n dict(\n path=d['Emoticon']['path'],\n name=d['Emoticon']['shortcut'],\n )\n for d in data\n ]\n\n emoji_folder = os.path.join(output_dir, 'emoji')\n os.makedirs(emoji_folder, exist_ok=True)\n\n def process(data: ZerverFieldsT) -> ZerverFieldsT:\n source_sub_path = data['path']\n source_fn = os.path.basename(source_sub_path)\n source_path = os.path.join(data_dir, source_sub_path)\n\n # Use our template from RealmEmoji\n # PATH_ID_TEMPLATE = \"{realm_id}/emoji/images/{emoji_file_name}\"\n target_fn = source_fn\n target_sub_path = RealmEmoji.PATH_ID_TEMPLATE.format(\n realm_id=realm_id,\n emoji_file_name=target_fn,\n )\n target_path = os.path.join(emoji_folder, target_sub_path)\n\n os.makedirs(os.path.dirname(target_path), exist_ok=True)\n\n source_path = os.path.abspath(source_path)\n target_path = os.path.abspath(target_path)\n\n shutil.copyfile(source_path, target_path)\n\n return dict(\n path=target_path,\n s3_path=target_path,\n file_name=target_fn,\n realm_id=realm_id,\n name=data['name'],\n )\n\n emoji_records = list(map(process, flat_data))\n create_converted_data_files(emoji_records, output_dir, '/emoji/records.json')\n\n realmemoji = [\n build_realm_emoji(\n realm_id=realm_id,\n name=rec['name'],\n id=NEXT_ID('realmemoji'),\n file_name=rec['file_name'],\n )\n for rec in emoji_records\n ]\n logging.info('Done processing emoticons')\n\n return realmemoji\n\ndef write_message_data(realm_id: int,\n message_key: str,\n zerver_recipient: List[ZerverFieldsT],\n subscriber_map: Dict[int, Set[int]],\n data_dir: str,\n output_dir: str,\n masking_content: bool,\n stream_id_mapper: IdMapper,\n user_id_mapper: IdMapper,\n user_handler: UserHandler,\n attachment_handler: AttachmentHandler) -> None:\n\n stream_id_to_recipient_id = {\n d['type_id']: d['id']\n for d in zerver_recipient\n if d['type'] == Recipient.STREAM\n }\n\n user_id_to_recipient_id = {\n d['type_id']: d['id']\n for d in zerver_recipient\n if d['type'] == Recipient.PERSONAL\n }\n\n def get_stream_recipient_id(raw_message: ZerverFieldsT) -> int:\n fn_id = raw_message['fn_id']\n stream_id = stream_id_mapper.get(fn_id)\n recipient_id = stream_id_to_recipient_id[stream_id]\n return recipient_id\n\n def get_pm_recipient_id(raw_message: ZerverFieldsT) -> int:\n raw_user_id = raw_message['receiver_id']\n assert(raw_user_id)\n user_id = user_id_mapper.get(raw_user_id)\n recipient_id = user_id_to_recipient_id[user_id]\n return recipient_id\n\n if message_key in ['UserMessage', 'NotificationMessage']:\n is_pm_data = False\n dir_glob = os.path.join(data_dir, 'rooms', '*', 'history.json')\n get_recipient_id = get_stream_recipient_id\n get_files_dir = lambda fn_id: os.path.join(data_dir, 'rooms', str(fn_id), 'files')\n\n elif message_key == 'PrivateUserMessage':\n is_pm_data = True\n dir_glob = os.path.join(data_dir, 'users', '*', 'history.json')\n get_recipient_id = get_pm_recipient_id\n get_files_dir = lambda fn_id: os.path.join(data_dir, 'users', 'files')\n\n else:\n raise Exception('programming error: invalid message_key: ' + message_key)\n\n history_files = glob.glob(dir_glob)\n for fn in history_files:\n dir = os.path.dirname(fn)\n fn_id = os.path.basename(dir)\n files_dir = get_files_dir(fn_id)\n\n process_message_file(\n realm_id=realm_id,\n fn=fn,\n fn_id=fn_id,\n files_dir=files_dir,\n get_recipient_id=get_recipient_id,\n message_key=message_key,\n subscriber_map=subscriber_map,\n data_dir=data_dir,\n output_dir=output_dir,\n is_pm_data=is_pm_data,\n masking_content=masking_content,\n user_id_mapper=user_id_mapper,\n user_handler=user_handler,\n attachment_handler=attachment_handler,\n )\n\ndef get_hipchat_sender_id(realm_id: int,\n message_dict: Dict[str, Any],\n user_id_mapper: IdMapper,\n user_handler: UserHandler) -> int:\n '''\n The HipChat export is inconsistent in how it renders\n senders, and sometimes we don't even get an id.\n '''\n if isinstance(message_dict['sender'], str):\n # Some Hipchat instances just give us a person's\n # name in the sender field for NotificationMessage.\n # We turn them into a mirror user.\n mirror_user = user_handler.get_mirror_user(\n realm_id=realm_id,\n name=message_dict['sender'],\n )\n sender_id = mirror_user['id']\n return sender_id\n\n raw_sender_id = message_dict['sender']['id']\n\n if raw_sender_id == 0:\n mirror_user = user_handler.get_mirror_user(\n realm_id=realm_id,\n name=message_dict['sender']['name']\n )\n sender_id = mirror_user['id']\n return sender_id\n\n if not user_id_mapper.has(raw_sender_id):\n mirror_user = user_handler.get_mirror_user(\n realm_id=realm_id,\n name=message_dict['sender']['id']\n )\n sender_id = mirror_user['id']\n return sender_id\n\n # HAPPY PATH: Hipchat just gave us an ordinary\n # sender_id.\n sender_id = user_id_mapper.get(raw_sender_id)\n return sender_id\n\ndef process_message_file(realm_id: int,\n fn: str,\n fn_id: str,\n files_dir: str,\n get_recipient_id: Callable[[ZerverFieldsT], int],\n message_key: str,\n subscriber_map: Dict[int, Set[int]],\n data_dir: str,\n output_dir: str,\n is_pm_data: bool,\n masking_content: bool,\n user_id_mapper: IdMapper,\n user_handler: UserHandler,\n attachment_handler: AttachmentHandler) -> None:\n\n def get_raw_messages(fn: str) -> List[ZerverFieldsT]:\n with open(fn) as f:\n data = ujson.load(f)\n\n flat_data = [\n d[message_key]\n for d in data\n if message_key in d\n ]\n\n def get_raw_message(d: Dict[str, Any]) -> Optional[ZerverFieldsT]:\n sender_id = get_hipchat_sender_id(\n realm_id=realm_id,\n message_dict=d,\n user_id_mapper=user_id_mapper,\n user_handler=user_handler,\n )\n\n if is_pm_data:\n if sender_id != fn_id:\n # PMs are in multiple places in the Hipchat export,\n # and we only use the copy from the sender\n return None\n\n content = d['message']\n\n if masking_content:\n content = re.sub('[a-z]', 'x', content)\n content = re.sub('[A-Z]', 'X', content)\n\n return dict(\n fn_id=fn_id,\n sender_id=sender_id,\n receiver_id=d.get('receiver', {}).get('id'),\n content=content,\n mention_user_ids=d.get('mentions', []),\n pub_date=str_date_to_float(d['timestamp']),\n attachment=d.get('attachment'),\n files_dir=files_dir,\n )\n\n raw_messages = []\n\n for d in flat_data:\n raw_message = get_raw_message(d)\n if raw_message is not None:\n raw_messages.append(raw_message)\n\n return raw_messages\n\n raw_messages = get_raw_messages(fn)\n\n def process_batch(lst: List[Any]) -> None:\n process_raw_message_batch(\n realm_id=realm_id,\n raw_messages=lst,\n subscriber_map=subscriber_map,\n user_id_mapper=user_id_mapper,\n user_handler=user_handler,\n attachment_handler=attachment_handler,\n get_recipient_id=get_recipient_id,\n is_pm_data=is_pm_data,\n output_dir=output_dir,\n )\n\n chunk_size = 1000\n\n process_list_in_batches(\n lst=raw_messages,\n chunk_size=chunk_size,\n process_batch=process_batch,\n )\n\ndef process_raw_message_batch(realm_id: int,\n raw_messages: List[Dict[str, Any]],\n subscriber_map: Dict[int, Set[int]],\n user_id_mapper: IdMapper,\n user_handler: UserHandler,\n attachment_handler: AttachmentHandler,\n get_recipient_id: Callable[[ZerverFieldsT], int],\n is_pm_data: bool,\n output_dir: str) -> None:\n\n def fix_mentions(content: str,\n mention_user_ids: Set[int]) -> str:\n for user_id in mention_user_ids:\n user = user_handler.get_user(user_id=user_id)\n hipchat_mention = '@{short_name}'.format(**user)\n zulip_mention = '@**{full_name}**'.format(**user)\n content = content.replace(hipchat_mention, zulip_mention)\n\n content = content.replace('@here', '@**all**')\n return content\n\n mention_map = dict() # type: Dict[int, Set[int]]\n\n def make_message(message_id: int, raw_message: ZerverFieldsT) -> ZerverFieldsT:\n # One side effect here:\n mention_user_ids = {\n user_id_mapper.get(id)\n for id in set(raw_message['mention_user_ids'])\n if user_id_mapper.has(id)\n }\n mention_map[message_id] = mention_user_ids\n\n content = fix_mentions(\n content=raw_message['content'],\n mention_user_ids=mention_user_ids,\n )\n pub_date = raw_message['pub_date']\n recipient_id = get_recipient_id(raw_message)\n rendered_content = None\n\n if is_pm_data:\n topic_name = ''\n else:\n topic_name = 'imported from hipchat'\n user_id = raw_message['sender_id']\n\n # Another side effect:\n extra_content = attachment_handler.handle_message_data(\n realm_id=realm_id,\n message_id=message_id,\n sender_id=user_id,\n attachment=raw_message['attachment'],\n files_dir=raw_message['files_dir'],\n )\n\n if extra_content:\n has_attachment = True\n content += '\\n' + extra_content\n else:\n has_attachment = False\n\n return build_message(\n content=content,\n message_id=message_id,\n pub_date=pub_date,\n recipient_id=recipient_id,\n rendered_content=rendered_content,\n topic_name=topic_name,\n user_id=user_id,\n has_attachment=has_attachment,\n )\n\n zerver_message = [\n make_message(\n message_id=NEXT_ID('message'),\n raw_message=raw_message\n )\n for raw_message in raw_messages\n ]\n\n zerver_usermessage = make_user_messages(\n zerver_message=zerver_message,\n subscriber_map=subscriber_map,\n is_pm_data=is_pm_data,\n mention_map=mention_map,\n )\n\n message_json = dict(\n zerver_message=zerver_message,\n zerver_usermessage=zerver_usermessage,\n )\n\n dump_file_id = NEXT_ID('dump_file_id')\n message_file = \"/messages-%06d.json\" % (dump_file_id,)\n create_converted_data_files(message_json, output_dir, message_file)\n\ndef make_user_messages(zerver_message: List[ZerverFieldsT],\n subscriber_map: Dict[int, Set[int]],\n is_pm_data: bool,\n mention_map: Dict[int, Set[int]]) -> List[ZerverFieldsT]:\n\n zerver_usermessage = []\n\n for message in zerver_message:\n message_id = message['id']\n recipient_id = message['recipient']\n sender_id = message['sender']\n mention_user_ids = mention_map[message_id]\n subscriber_ids = subscriber_map.get(recipient_id, set())\n user_ids = subscriber_ids | {sender_id}\n\n for user_id in user_ids:\n is_mentioned = user_id in mention_user_ids\n user_message = build_user_message(\n user_id=user_id,\n message_id=message_id,\n is_private=is_pm_data,\n is_mentioned=is_mentioned,\n )\n zerver_usermessage.append(user_message)\n\n return zerver_usermessage\n\ndef do_convert_data(input_tar_file: str,\n output_dir: str,\n masking_content: bool) -> None:\n input_data_dir = untar_input_file(input_tar_file)\n\n attachment_handler = AttachmentHandler()\n user_handler = UserHandler()\n subscriber_handler = SubscriberHandler()\n user_id_mapper = IdMapper()\n stream_id_mapper = IdMapper()\n\n realm_id = 0\n realm = make_realm(realm_id=realm_id)\n\n # users.json -> UserProfile\n raw_user_data = read_user_data(data_dir=input_data_dir)\n convert_user_data(\n user_handler=user_handler,\n user_id_mapper=user_id_mapper,\n raw_data=raw_user_data,\n realm_id=realm_id,\n )\n normal_users = user_handler.get_normal_users()\n # Don't write zerver_userprofile here, because we\n # may add more users later.\n\n # streams.json -> Stream\n raw_stream_data = read_room_data(data_dir=input_data_dir)\n zerver_stream = convert_room_data(\n raw_data=raw_stream_data,\n subscriber_handler=subscriber_handler,\n stream_id_mapper=stream_id_mapper,\n user_id_mapper=user_id_mapper,\n realm_id=realm_id,\n )\n realm['zerver_stream'] = zerver_stream\n\n zerver_recipient = build_recipients(\n zerver_userprofile=normal_users,\n zerver_stream=zerver_stream,\n )\n realm['zerver_recipient'] = zerver_recipient\n\n public_stream_subscriptions = build_public_stream_subscriptions(\n zerver_userprofile=normal_users,\n zerver_recipient=zerver_recipient,\n zerver_stream=zerver_stream,\n )\n\n private_stream_subscriptions = build_private_stream_subscriptions(\n get_users=subscriber_handler.get_users,\n zerver_recipient=zerver_recipient,\n zerver_stream=zerver_stream,\n )\n\n personal_subscriptions = build_personal_subscriptions(\n zerver_recipient=zerver_recipient,\n )\n\n zerver_subscription = \\\n public_stream_subscriptions + \\\n personal_subscriptions + \\\n private_stream_subscriptions\n\n realm['zerver_subscription'] = zerver_subscription\n\n zerver_realmemoji = write_emoticon_data(\n realm_id=realm_id,\n data_dir=input_data_dir,\n output_dir=output_dir,\n )\n realm['zerver_realmemoji'] = zerver_realmemoji\n\n subscriber_map = make_subscriber_map(\n zerver_subscription=zerver_subscription,\n )\n\n logging.info('Start importing message data')\n for message_key in ['UserMessage',\n 'NotificationMessage',\n 'PrivateUserMessage']:\n write_message_data(\n realm_id=realm_id,\n message_key=message_key,\n zerver_recipient=zerver_recipient,\n subscriber_map=subscriber_map,\n data_dir=input_data_dir,\n output_dir=output_dir,\n masking_content=masking_content,\n stream_id_mapper=stream_id_mapper,\n user_id_mapper=user_id_mapper,\n user_handler=user_handler,\n attachment_handler=attachment_handler,\n )\n\n # Order is important here...don't write users until\n # we process everything else, since we may introduce\n # mirror users when processing messages.\n realm['zerver_userprofile'] = user_handler.get_all_users()\n realm['sort_by_date'] = True\n\n create_converted_data_files(realm, output_dir, '/realm.json')\n\n logging.info('Start importing avatar data')\n write_avatar_data(\n raw_user_data=raw_user_data,\n output_dir=output_dir,\n realm_id=realm_id,\n )\n\n attachment_handler.write_info(\n output_dir=output_dir,\n realm_id=realm_id,\n )\n\n logging.info('Start making tarball')\n subprocess.check_call([\"tar\", \"-czf\", output_dir + '.tar.gz', output_dir, '-P'])\n logging.info('Done making tarball')\n"},"type_annotations":{"kind":"list like","value":["str","str","str","UserHandler","IdMapper","List[ZerverFieldsT]","int","ZerverFieldsT","str","List[ZerverFieldsT]","int","ZerverFieldsT","str","List[ZerverFieldsT]","SubscriberHandler","IdMapper","IdMapper","int","str","ZerverFieldsT","int","List[ZerverFieldsT]","str","int","int","str","str","ZerverFieldsT","int","str","List[ZerverFieldsT]","Dict[int, Set[int]]","str","str","bool","IdMapper","IdMapper","UserHandler","AttachmentHandler","ZerverFieldsT","ZerverFieldsT","int","Dict[str, Any]","IdMapper","UserHandler","int","str","str","str","Callable[[ZerverFieldsT], int]","str","Dict[int, Set[int]]","str","str","bool","bool","IdMapper","UserHandler","AttachmentHandler","str","Dict[str, Any]","List[Any]","int","List[Dict[str, Any]]","Dict[int, Set[int]]","IdMapper","UserHandler","AttachmentHandler","Callable[[ZerverFieldsT], int]","bool","str","str","Set[int]","int","ZerverFieldsT","List[ZerverFieldsT]","Dict[int, Set[int]]","bool","Dict[int, Set[int]]","str","str","bool"],"string":"[\n \"str\",\n \"str\",\n \"str\",\n \"UserHandler\",\n \"IdMapper\",\n \"List[ZerverFieldsT]\",\n \"int\",\n \"ZerverFieldsT\",\n \"str\",\n \"List[ZerverFieldsT]\",\n \"int\",\n \"ZerverFieldsT\",\n \"str\",\n \"List[ZerverFieldsT]\",\n \"SubscriberHandler\",\n \"IdMapper\",\n \"IdMapper\",\n \"int\",\n \"str\",\n \"ZerverFieldsT\",\n \"int\",\n \"List[ZerverFieldsT]\",\n \"str\",\n \"int\",\n \"int\",\n \"str\",\n \"str\",\n \"ZerverFieldsT\",\n \"int\",\n \"str\",\n \"List[ZerverFieldsT]\",\n \"Dict[int, Set[int]]\",\n \"str\",\n \"str\",\n \"bool\",\n \"IdMapper\",\n \"IdMapper\",\n \"UserHandler\",\n \"AttachmentHandler\",\n \"ZerverFieldsT\",\n \"ZerverFieldsT\",\n \"int\",\n \"Dict[str, Any]\",\n \"IdMapper\",\n \"UserHandler\",\n \"int\",\n \"str\",\n \"str\",\n \"str\",\n \"Callable[[ZerverFieldsT], int]\",\n \"str\",\n \"Dict[int, Set[int]]\",\n \"str\",\n \"str\",\n \"bool\",\n \"bool\",\n \"IdMapper\",\n \"UserHandler\",\n \"AttachmentHandler\",\n \"str\",\n \"Dict[str, Any]\",\n \"List[Any]\",\n \"int\",\n \"List[Dict[str, Any]]\",\n \"Dict[int, Set[int]]\",\n \"IdMapper\",\n \"UserHandler\",\n \"AttachmentHandler\",\n \"Callable[[ZerverFieldsT], int]\",\n \"bool\",\n \"str\",\n \"str\",\n \"Set[int]\",\n \"int\",\n \"ZerverFieldsT\",\n \"List[ZerverFieldsT]\",\n \"Dict[int, Set[int]]\",\n \"bool\",\n \"Dict[int, Set[int]]\",\n \"str\",\n \"str\",\n \"bool\"\n]"},"type_annotation_starts":{"kind":"list like","value":[1196,1617,2077,2275,2326,2368,2421,2529,4479,4518,4573,5193,5655,5858,5921,5980,6028,6070,6191,6397,7623,8029,8084,8121,8578,8617,8658,10203,11627,11668,11714,11774,11828,11868,11913,11960,12009,12056,12112,12478,12716,14393,14438,14496,14546,15804,15838,15875,15916,15964,16034,16080,16136,16178,16220,16268,16315,16364,16422,16480,16720,18086,18699,18748,18816,18883,18937,19000,19067,19141,19189,19234,19278,19756,19774,21973,22033,22089,22131,22946,22983,23025],"string":"[\n 1196,\n 1617,\n 2077,\n 2275,\n 2326,\n 2368,\n 2421,\n 2529,\n 4479,\n 4518,\n 4573,\n 5193,\n 5655,\n 5858,\n 5921,\n 5980,\n 6028,\n 6070,\n 6191,\n 6397,\n 7623,\n 8029,\n 8084,\n 8121,\n 8578,\n 8617,\n 8658,\n 10203,\n 11627,\n 11668,\n 11714,\n 11774,\n 11828,\n 11868,\n 11913,\n 11960,\n 12009,\n 12056,\n 12112,\n 12478,\n 12716,\n 14393,\n 14438,\n 14496,\n 14546,\n 15804,\n 15838,\n 15875,\n 15916,\n 15964,\n 16034,\n 16080,\n 16136,\n 16178,\n 16220,\n 16268,\n 16315,\n 16364,\n 16422,\n 16480,\n 16720,\n 18086,\n 18699,\n 18748,\n 18816,\n 18883,\n 18937,\n 19000,\n 19067,\n 19141,\n 19189,\n 19234,\n 19278,\n 19756,\n 19774,\n 21973,\n 22033,\n 22089,\n 22131,\n 22946,\n 22983,\n 23025\n]"},"type_annotation_ends":{"kind":"list like","value":[1199,1620,2080,2286,2334,2387,2424,2542,4482,4537,4576,5206,5658,5877,5938,5988,6036,6073,6194,6410,7626,8048,8087,8124,8581,8620,8661,10216,11630,11671,11733,11793,11831,11871,11917,11968,12017,12067,12129,12491,12729,14396,14452,14504,14557,15807,15841,15878,15919,15994,16037,16099,16139,16181,16224,16272,16323,16375,16439,16483,16734,18095,18702,18768,18835,18891,18948,19017,19097,19145,19192,19237,19286,19759,19787,21992,22052,22093,22150,22949,22986,23029],"string":"[\n 1199,\n 1620,\n 2080,\n 2286,\n 2334,\n 2387,\n 2424,\n 2542,\n 4482,\n 4537,\n 4576,\n 5206,\n 5658,\n 5877,\n 5938,\n 5988,\n 6036,\n 6073,\n 6194,\n 6410,\n 7626,\n 8048,\n 8087,\n 8124,\n 8581,\n 8620,\n 8661,\n 10216,\n 11630,\n 11671,\n 11733,\n 11793,\n 11831,\n 11871,\n 11917,\n 11968,\n 12017,\n 12067,\n 12129,\n 12491,\n 12729,\n 14396,\n 14452,\n 14504,\n 14557,\n 15807,\n 15841,\n 15878,\n 15919,\n 15994,\n 16037,\n 16099,\n 16139,\n 16181,\n 16224,\n 16272,\n 16323,\n 16375,\n 16439,\n 16483,\n 16734,\n 18095,\n 18702,\n 18768,\n 18835,\n 18891,\n 18948,\n 19017,\n 19097,\n 19145,\n 19192,\n 19237,\n 19286,\n 19759,\n 19787,\n 21992,\n 22052,\n 22093,\n 22150,\n 22949,\n 22986,\n 23029\n]"}}},{"rowIdx":1330,"cells":{"zip":{"kind":"string","value":"archives/18-2-SKKU-OSS_2018-2-OSS-L5.zip"},"filename":{"kind":"string","value":"zerver/data_import/hipchat_attachment.py"},"contents":{"kind":"string","value":"import logging\nimport shutil\nimport os\n\nfrom zerver.data_import.import_util import (\n build_attachment,\n create_converted_data_files,\n)\n\nfrom typing import Any, Dict, List, Optional\n\nclass AttachmentHandler:\n def __init__(self) -> None:\n self.info_dict = dict() # type: Dict[str, Dict[str, Any]]\n\n def handle_message_data(self,\n realm_id: int,\n message_id: int,\n sender_id: int,\n attachment: Dict[str, Any],\n files_dir: str) -> Optional[str]:\n if not attachment:\n return None\n\n name = attachment['name']\n\n if 'path' not in attachment:\n logging.info('Skipping HipChat attachment with missing path data: ' + name)\n return None\n\n size = attachment['size']\n path = attachment['path']\n\n local_fn = os.path.join(files_dir, path)\n\n target_path = os.path.join(\n str(realm_id),\n 'HipChatImportAttachment',\n path\n )\n\n if target_path in self.info_dict:\n logging.info(\"file used multiple times: \" + path)\n info = self.info_dict[target_path]\n info['message_ids'].add(message_id)\n return info['content']\n\n # HipChat provides size info, but it's not\n # completely trustworthy, so we we just\n # ask the OS for file details.\n size = os.path.getsize(local_fn)\n mtime = os.path.getmtime(local_fn)\n\n content = '[{name}](/user_uploads/{path})'.format(\n name=name,\n path=target_path,\n )\n\n info = dict(\n message_ids={message_id},\n sender_id=sender_id,\n local_fn=local_fn,\n target_path=target_path,\n name=name,\n size=size,\n mtime=mtime,\n content=content,\n )\n self.info_dict[target_path] = info\n\n return content\n\n def write_info(self, output_dir: str, realm_id: int) -> None:\n attachments = [] # type: List[Dict[str, Any]]\n uploads_records = [] # type: List[Dict[str, Any]]\n\n def add_attachment(info: Dict[str, Any]) -> None:\n build_attachment(\n realm_id=realm_id,\n message_ids=info['message_ids'],\n user_id=info['sender_id'],\n fileinfo=dict(\n created=info['mtime'], # minor lie\n size=info['size'],\n name=info['name'],\n ),\n s3_path=info['target_path'],\n zerver_attachment=attachments,\n )\n\n def add_upload(info: Dict[str, Any]) -> None:\n target_path = info['target_path']\n upload_rec = dict(\n size=info['size'],\n user_profile_id=info['sender_id'],\n realm_id=realm_id,\n s3_path=target_path,\n path=target_path,\n content_type=None,\n )\n uploads_records.append(upload_rec)\n\n def make_full_target_path(info: Dict[str, Any]) -> str:\n target_path = info['target_path']\n full_target_path = os.path.join(\n output_dir,\n 'uploads',\n target_path,\n )\n full_target_path = os.path.abspath(full_target_path)\n os.makedirs(os.path.dirname(full_target_path), exist_ok=True)\n return full_target_path\n\n def copy_file(info: Dict[str, Any]) -> None:\n source_path = info['local_fn']\n target_path = make_full_target_path(info)\n shutil.copyfile(source_path, target_path)\n\n logging.info('Start processing attachment files')\n\n for info in self.info_dict.values():\n add_attachment(info)\n add_upload(info)\n copy_file(info)\n\n uploads_folder = os.path.join(output_dir, 'uploads')\n os.makedirs(os.path.join(uploads_folder, str(realm_id)), exist_ok=True)\n\n attachment = dict(\n zerver_attachment=attachments\n )\n\n create_converted_data_files(uploads_records, output_dir, '/uploads/records.json')\n create_converted_data_files(attachment, output_dir, '/attachment.json')\n\n logging.info('Done processing attachment files')\n"},"type_annotations":{"kind":"list like","value":["int","int","int","Dict[str, Any]","str","str","int","Dict[str, Any]","Dict[str, Any]","Dict[str, Any]","Dict[str, Any]"],"string":"[\n \"int\",\n \"int\",\n \"int\",\n \"Dict[str, Any]\",\n \"str\",\n \"str\",\n \"int\",\n \"Dict[str, Any]\",\n \"Dict[str, Any]\",\n \"Dict[str, Any]\",\n \"Dict[str, Any]\"\n]"},"type_annotation_starts":{"kind":"list like","value":[386,431,475,520,575,2040,2055,2217,2719,3150,3567],"string":"[\n 386,\n 431,\n 475,\n 520,\n 575,\n 2040,\n 2055,\n 2217,\n 2719,\n 3150,\n 3567\n]"},"type_annotation_ends":{"kind":"list like","value":[389,434,478,534,578,2043,2058,2231,2733,3164,3581],"string":"[\n 389,\n 434,\n 478,\n 534,\n 578,\n 2043,\n 2058,\n 2231,\n 2733,\n 3164,\n 3581\n]"}}},{"rowIdx":1331,"cells":{"zip":{"kind":"string","value":"archives/18-2-SKKU-OSS_2018-2-OSS-L5.zip"},"filename":{"kind":"string","value":"zerver/data_import/hipchat_subscriber.py"},"contents":{"kind":"string","value":"from typing import Any, Dict, Set\n\nclass SubscriberHandler:\n '''\n A note on ids here: we borrow Hipchat ids as Zulip\n ids during the conversion phase. (They get re-mapped\n during import, but that doesn't concern use here.)\n\n So these are all synonymous:\n\n HipChat room_id == Zulip stream_id\n member ids = hipchat user ids = Zulip user_id\n owner id = hipchat user id = Zulip user_id\n\n In this class, when it's somewhat arbitrary whether\n to call something a \"room\" or a \"stream\", we use\n the Zulip naming.\n '''\n def __init__(self) -> None:\n self.stream_info = dict() # type: Dict[int, Dict[str, Any]]\n\n def set_info(self,\n stream_id: int,\n owner: int,\n members: Set[int]) -> None:\n # Our callers are basically giving us\n # data straight out of rooms.json.\n self.stream_info[stream_id] = dict(\n owner=owner,\n members=members,\n )\n\n def get_users(self,\n stream_id: int) -> Set[int]:\n info = self.stream_info[stream_id]\n users = info['members'] | {info['owner']}\n return users\n"},"type_annotations":{"kind":"list like","value":["int","int","Set[int]","int"],"string":"[\n \"int\",\n \"int\",\n \"Set[int]\",\n \"int\"\n]"},"type_annotation_starts":{"kind":"list like","value":[712,741,772,1042],"string":"[\n 712,\n 741,\n 772,\n 1042\n]"},"type_annotation_ends":{"kind":"list like","value":[715,744,780,1045],"string":"[\n 715,\n 744,\n 780,\n 1045\n]"}}},{"rowIdx":1332,"cells":{"zip":{"kind":"string","value":"archives/18-2-SKKU-OSS_2018-2-OSS-L5.zip"},"filename":{"kind":"string","value":"zerver/data_import/hipchat_user.py"},"contents":{"kind":"string","value":"from typing import Any, Dict, List\n\nfrom django.utils.timezone import now as timezone_now\n\nfrom zerver.data_import.import_util import (\n build_user_profile,\n)\n\nclass UserHandler:\n '''\n Our UserHandler class is a glorified wrapper\n around the data that eventually goes into\n zerver_userprofile.\n\n The class helps us do things like map ids\n to names for mentions.\n\n We also sometimes need to build mirror\n users on the fly.\n '''\n def __init__(self) -> None:\n self.id_to_user_map = dict() # type: Dict[int, Dict[str, Any]]\n self.name_to_mirror_user_map = dict() # type: Dict[str, Dict[str, Any]]\n self.mirror_user_id = 1\n\n def add_user(self, user: Dict[str, Any]) -> None:\n user_id = user['id']\n self.id_to_user_map[user_id] = user\n\n def get_user(self, user_id: int) -> Dict[str, Any]:\n user = self.id_to_user_map[user_id]\n return user\n\n def get_mirror_user(self,\n realm_id: int,\n name: str) -> Dict[str, Any]:\n if name in self.name_to_mirror_user_map:\n user = self.name_to_mirror_user_map[name]\n return user\n\n user_id = self._new_mirror_user_id()\n short_name = name\n full_name = name\n email = 'mirror-{user_id}@example.com'.format(user_id=user_id)\n delivery_email = email\n avatar_source = 'G'\n date_joined = int(timezone_now().timestamp())\n timezone = 'UTC'\n\n user = build_user_profile(\n avatar_source=avatar_source,\n date_joined=date_joined,\n delivery_email=delivery_email,\n email=email,\n full_name=full_name,\n id=user_id,\n is_active=False,\n is_realm_admin=False,\n is_guest=False,\n is_mirror_dummy=True,\n realm_id=realm_id,\n short_name=short_name,\n timezone=timezone,\n )\n\n self.name_to_mirror_user_map[name] = user\n return user\n\n def _new_mirror_user_id(self) -> int:\n next_id = self.mirror_user_id\n while next_id in self.id_to_user_map:\n next_id += 1\n self.mirror_user_id = next_id + 1\n return next_id\n\n def get_normal_users(self) -> List[Dict[str, Any]]:\n users = list(self.id_to_user_map.values())\n return users\n\n def get_all_users(self) -> List[Dict[str, Any]]:\n normal_users = self.get_normal_users()\n mirror_users = list(self.name_to_mirror_user_map.values())\n all_users = normal_users + mirror_users\n return all_users\n"},"type_annotations":{"kind":"list like","value":["Dict[str, Any]","int","int","str"],"string":"[\n \"Dict[str, Any]\",\n \"int\",\n \"int\",\n \"str\"\n]"},"type_annotation_starts":{"kind":"list like","value":[704,835,988,1023],"string":"[\n 704,\n 835,\n 988,\n 1023\n]"},"type_annotation_ends":{"kind":"list like","value":[718,838,991,1026],"string":"[\n 718,\n 838,\n 991,\n 1026\n]"}}},{"rowIdx":1333,"cells":{"zip":{"kind":"string","value":"archives/18-2-SKKU-OSS_2018-2-OSS-L5.zip"},"filename":{"kind":"string","value":"zerver/data_import/import_util.py"},"contents":{"kind":"string","value":"import random\nimport requests\nimport shutil\nimport logging\nimport os\nimport ujson\n\nfrom typing import List, Dict, Any, Optional, Set, Callable\nfrom django.forms.models import model_to_dict\n\nfrom zerver.models import Realm, RealmEmoji, Subscription, Recipient, \\\n Attachment, Stream, Message, UserProfile\nfrom zerver.data_import.sequencer import NEXT_ID\nfrom zerver.lib.actions import STREAM_ASSIGNMENT_COLORS as stream_colors\nfrom zerver.lib.avatar_hash import user_avatar_path_from_ids\nfrom zerver.lib.parallel import run_parallel\n\n# stubs\nZerverFieldsT = Dict[str, Any]\n\ndef build_zerver_realm(realm_id: int, realm_subdomain: str, time: float,\n other_product: str) -> List[ZerverFieldsT]:\n realm = Realm(id=realm_id, date_created=time,\n name=realm_subdomain, string_id=realm_subdomain,\n description=(\"Organization imported from %s!\" % (other_product)))\n auth_methods = [[flag[0], flag[1]] for flag in realm.authentication_methods]\n realm_dict = model_to_dict(realm, exclude='authentication_methods')\n realm_dict['authentication_methods'] = auth_methods\n return[realm_dict]\n\ndef build_user_profile(avatar_source: str,\n date_joined: Any,\n delivery_email: str,\n email: str,\n full_name: str,\n id: int,\n is_active: bool,\n is_realm_admin: bool,\n is_guest: bool,\n is_mirror_dummy: bool,\n realm_id: int,\n short_name: str,\n timezone: Optional[str]) -> ZerverFieldsT:\n pointer = -1\n obj = UserProfile(\n avatar_source=avatar_source,\n date_joined=date_joined,\n delivery_email=delivery_email,\n email=email,\n full_name=full_name,\n id=id,\n is_active=is_active,\n is_realm_admin=is_realm_admin,\n is_guest=is_guest,\n pointer=pointer,\n realm_id=realm_id,\n short_name=short_name,\n timezone=timezone,\n )\n dct = model_to_dict(obj)\n return dct\n\ndef build_avatar(zulip_user_id: int, realm_id: int, email: str, avatar_url: str,\n timestamp: Any, avatar_list: List[ZerverFieldsT]) -> None:\n avatar = dict(\n path=avatar_url, # Save original avatar url here, which is downloaded later\n realm_id=realm_id,\n content_type=None,\n user_profile_id=zulip_user_id,\n last_modified=timestamp,\n user_profile_email=email,\n s3_path=\"\",\n size=\"\")\n avatar_list.append(avatar)\n\ndef make_subscriber_map(zerver_subscription: List[ZerverFieldsT]) -> Dict[int, Set[int]]:\n '''\n This can be convenient for building up UserMessage\n rows.\n '''\n subscriber_map = dict() # type: Dict[int, Set[int]]\n for sub in zerver_subscription:\n user_id = sub['user_profile']\n recipient_id = sub['recipient']\n if recipient_id not in subscriber_map:\n subscriber_map[recipient_id] = set()\n subscriber_map[recipient_id].add(user_id)\n\n return subscriber_map\n\ndef build_subscription(recipient_id: int, user_id: int,\n subscription_id: int) -> ZerverFieldsT:\n subscription = Subscription(\n color=random.choice(stream_colors),\n id=subscription_id)\n subscription_dict = model_to_dict(subscription, exclude=['user_profile', 'recipient_id'])\n subscription_dict['user_profile'] = user_id\n subscription_dict['recipient'] = recipient_id\n return subscription_dict\n\ndef build_public_stream_subscriptions(\n zerver_userprofile: List[ZerverFieldsT],\n zerver_recipient: List[ZerverFieldsT],\n zerver_stream: List[ZerverFieldsT]) -> List[ZerverFieldsT]:\n '''\n This function is only used for Hipchat now, but it may apply to\n future conversions. We often don't get full subscriber data in\n the Hipchat export, so this function just autosubscribes all\n users to every public stream. This returns a list of Subscription\n dicts.\n '''\n subscriptions = [] # type: List[ZerverFieldsT]\n\n public_stream_ids = {\n stream['id']\n for stream in zerver_stream\n if not stream['invite_only']\n }\n\n public_stream_recipient_ids = {\n recipient['id']\n for recipient in zerver_recipient\n if recipient['type'] == Recipient.STREAM\n and recipient['type_id'] in public_stream_ids\n }\n\n user_ids = [\n user['id']\n for user in zerver_userprofile\n ]\n\n for recipient_id in public_stream_recipient_ids:\n for user_id in user_ids:\n subscription = build_subscription(\n recipient_id=recipient_id,\n user_id=user_id,\n subscription_id=NEXT_ID('subscription'),\n )\n subscriptions.append(subscription)\n\n return subscriptions\n\ndef build_private_stream_subscriptions(\n get_users: Callable[..., Set[int]],\n zerver_recipient: List[ZerverFieldsT],\n zerver_stream: List[ZerverFieldsT]) -> List[ZerverFieldsT]:\n\n subscriptions = [] # type: List[ZerverFieldsT]\n\n stream_ids = {\n stream['id']\n for stream in zerver_stream\n if stream['invite_only']\n }\n\n recipient_map = {\n recipient['id']: recipient['type_id'] # recipient_id -> stream_id\n for recipient in zerver_recipient\n if recipient['type'] == Recipient.STREAM\n and recipient['type_id'] in stream_ids\n }\n\n for recipient_id, stream_id in recipient_map.items():\n user_ids = get_users(stream_id=stream_id)\n for user_id in user_ids:\n subscription = build_subscription(\n recipient_id=recipient_id,\n user_id=user_id,\n subscription_id=NEXT_ID('subscription'),\n )\n subscriptions.append(subscription)\n\n return subscriptions\n\ndef build_personal_subscriptions(zerver_recipient: List[ZerverFieldsT]) -> List[ZerverFieldsT]:\n\n subscriptions = [] # type: List[ZerverFieldsT]\n\n personal_recipients = [\n recipient\n for recipient in zerver_recipient\n if recipient['type'] == Recipient.PERSONAL\n ]\n\n for recipient in personal_recipients:\n recipient_id = recipient['id']\n user_id = recipient['type_id']\n subscription = build_subscription(\n recipient_id=recipient_id,\n user_id=user_id,\n subscription_id=NEXT_ID('subscription'),\n )\n subscriptions.append(subscription)\n\n return subscriptions\n\ndef build_recipient(type_id: int, recipient_id: int, type: int) -> ZerverFieldsT:\n recipient = Recipient(\n type_id=type_id, # stream id\n id=recipient_id,\n type=type)\n recipient_dict = model_to_dict(recipient)\n return recipient_dict\n\ndef build_recipients(zerver_userprofile: List[ZerverFieldsT],\n zerver_stream: List[ZerverFieldsT]) -> List[ZerverFieldsT]:\n '''\n As of this writing, we only use this in the HipChat\n conversion. The Slack and Gitter conversions do it more\n tightly integrated with creating other objects.\n '''\n\n recipients = []\n\n for user in zerver_userprofile:\n type_id = user['id']\n type = Recipient.PERSONAL\n recipient = Recipient(\n type_id=type_id,\n id=NEXT_ID('recipient'),\n type=type,\n )\n recipient_dict = model_to_dict(recipient)\n recipients.append(recipient_dict)\n\n for stream in zerver_stream:\n type_id = stream['id']\n type = Recipient.STREAM\n recipient = Recipient(\n type_id=type_id,\n id=NEXT_ID('recipient'),\n type=type,\n )\n recipient_dict = model_to_dict(recipient)\n recipients.append(recipient_dict)\n\n return recipients\n\ndef build_realm(zerver_realm: List[ZerverFieldsT], realm_id: int,\n domain_name: str) -> ZerverFieldsT:\n realm = dict(zerver_client=[{\"name\": \"populate_db\", \"id\": 1},\n {\"name\": \"website\", \"id\": 2},\n {\"name\": \"API\", \"id\": 3}],\n zerver_customprofilefield=[],\n zerver_customprofilefieldvalue=[],\n zerver_userpresence=[], # shows last logged in data, which is not available\n zerver_userprofile_mirrordummy=[],\n zerver_realmdomain=[{\"realm\": realm_id,\n \"allow_subdomains\": False,\n \"domain\": domain_name,\n \"id\": realm_id}],\n zerver_useractivity=[],\n zerver_realm=zerver_realm,\n zerver_huddle=[],\n zerver_userprofile_crossrealm=[],\n zerver_useractivityinterval=[],\n zerver_reaction=[],\n zerver_realmemoji=[],\n zerver_realmfilter=[])\n return realm\n\ndef build_usermessages(zerver_usermessage: List[ZerverFieldsT],\n subscriber_map: Dict[int, Set[int]],\n recipient_id: int,\n mentioned_user_ids: List[int],\n message_id: int) -> None:\n user_ids = subscriber_map.get(recipient_id, set())\n\n if user_ids:\n for user_id in sorted(user_ids):\n is_mentioned = user_id in mentioned_user_ids\n\n # Slack and Gitter don't yet triage private messages.\n # It's possible we don't even get PMs from them.\n is_private = False\n\n usermessage = build_user_message(\n user_id=user_id,\n message_id=message_id,\n is_private=is_private,\n is_mentioned=is_mentioned,\n )\n\n zerver_usermessage.append(usermessage)\n\ndef build_user_message(user_id: int,\n message_id: int,\n is_private: bool,\n is_mentioned: bool) -> ZerverFieldsT:\n flags_mask = 1 # For read\n if is_mentioned:\n flags_mask += 8 # For mentioned\n if is_private:\n flags_mask += 2048 # For is_private\n\n id = NEXT_ID('user_message')\n\n usermessage = dict(\n id=id,\n user_profile=user_id,\n message=message_id,\n flags_mask=flags_mask,\n )\n return usermessage\n\ndef build_defaultstream(realm_id: int, stream_id: int,\n defaultstream_id: int) -> ZerverFieldsT:\n defaultstream = dict(\n stream=stream_id,\n realm=realm_id,\n id=defaultstream_id)\n return defaultstream\n\ndef build_stream(date_created: Any, realm_id: int, name: str,\n description: str, stream_id: int, deactivated: bool=False,\n invite_only: bool=False) -> ZerverFieldsT:\n stream = Stream(\n name=name,\n deactivated=deactivated,\n description=description,\n date_created=date_created,\n invite_only=invite_only,\n id=stream_id)\n stream_dict = model_to_dict(stream,\n exclude=['realm'])\n stream_dict['realm'] = realm_id\n return stream_dict\n\ndef build_message(topic_name: str, pub_date: float, message_id: int, content: str,\n rendered_content: Optional[str], user_id: int, recipient_id: int,\n has_image: bool=False, has_link: bool=False,\n has_attachment: bool=True) -> ZerverFieldsT:\n zulip_message = Message(\n rendered_content_version=1, # this is Zulip specific\n pub_date=pub_date,\n id=message_id,\n content=content,\n rendered_content=rendered_content,\n has_image=has_image,\n has_attachment=has_attachment,\n has_link=has_link)\n zulip_message.set_topic_name(topic_name)\n zulip_message_dict = model_to_dict(zulip_message,\n exclude=['recipient', 'sender', 'sending_client'])\n zulip_message_dict['sender'] = user_id\n zulip_message_dict['sending_client'] = 1\n zulip_message_dict['recipient'] = recipient_id\n\n return zulip_message_dict\n\ndef build_attachment(realm_id: int, message_ids: Set[int],\n user_id: int, fileinfo: ZerverFieldsT, s3_path: str,\n zerver_attachment: List[ZerverFieldsT]) -> None:\n \"\"\"\n This function should be passed a 'fileinfo' dictionary, which contains\n information about 'size', 'created' (created time) and ['name'] (filename).\n \"\"\"\n attachment_id = NEXT_ID('attachment')\n\n attachment = Attachment(\n id=attachment_id,\n size=fileinfo['size'],\n create_time=fileinfo['created'],\n is_realm_public=True,\n path_id=s3_path,\n file_name=fileinfo['name'])\n\n attachment_dict = model_to_dict(attachment,\n exclude=['owner', 'messages', 'realm'])\n attachment_dict['owner'] = user_id\n attachment_dict['messages'] = list(message_ids)\n attachment_dict['realm'] = realm_id\n\n zerver_attachment.append(attachment_dict)\n\ndef process_avatars(avatar_list: List[ZerverFieldsT], avatar_dir: str, realm_id: int,\n threads: int, size_url_suffix: str='') -> List[ZerverFieldsT]:\n \"\"\"\n This function gets the avatar of the user and saves it in the\n user's avatar directory with both the extensions '.png' and '.original'\n Required parameters:\n\n 1. avatar_list: List of avatars to be mapped in avatars records.json file\n 2. avatar_dir: Folder where the downloaded avatars are saved\n 3. realm_id: Realm ID.\n\n We use this for Slack and Gitter conversions, where avatars need to be\n downloaded. For simpler conversions see write_avatar_png.\n \"\"\"\n\n def get_avatar(avatar_upload_item: List[str]) -> int:\n avatar_url = avatar_upload_item[0]\n\n image_path = os.path.join(avatar_dir, avatar_upload_item[1])\n original_image_path = os.path.join(avatar_dir, avatar_upload_item[2])\n\n response = requests.get(avatar_url + size_url_suffix, stream=True)\n with open(image_path, 'wb') as image_file:\n shutil.copyfileobj(response.raw, image_file)\n shutil.copy(image_path, original_image_path)\n return 0\n\n logging.info('######### GETTING AVATARS #########\\n')\n logging.info('DOWNLOADING AVATARS .......\\n')\n avatar_original_list = []\n avatar_upload_list = []\n for avatar in avatar_list:\n avatar_hash = user_avatar_path_from_ids(avatar['user_profile_id'], realm_id)\n avatar_url = avatar['path']\n avatar_original = dict(avatar)\n\n image_path = ('%s.png' % (avatar_hash))\n original_image_path = ('%s.original' % (avatar_hash))\n\n avatar_upload_list.append([avatar_url, image_path, original_image_path])\n # We don't add the size field here in avatar's records.json,\n # since the metadata is not needed on the import end, and we\n # don't have it until we've downloaded the files anyway.\n avatar['path'] = image_path\n avatar['s3_path'] = image_path\n\n avatar_original['path'] = original_image_path\n avatar_original['s3_path'] = original_image_path\n avatar_original_list.append(avatar_original)\n\n # Run downloads parallely\n output = []\n for (status, job) in run_parallel(get_avatar, avatar_upload_list, threads=threads):\n output.append(job)\n\n logging.info('######### GETTING AVATARS FINISHED #########\\n')\n return avatar_list + avatar_original_list\n\ndef write_avatar_png(avatar_folder: str,\n realm_id: int,\n user_id: int,\n bits: bytes) -> ZerverFieldsT:\n '''\n Use this function for conversions like Hipchat where\n the bits for the .png file come in something like\n a users.json file, and where we don't have to\n fetch avatar images externally.\n '''\n avatar_hash = user_avatar_path_from_ids(\n user_profile_id=user_id,\n realm_id=realm_id,\n )\n\n image_fn = avatar_hash + '.original'\n image_path = os.path.join(avatar_folder, image_fn)\n\n with open(image_path, 'wb') as image_file:\n image_file.write(bits)\n\n # Return metadata that eventually goes in records.json.\n metadata = dict(\n path=image_path,\n s3_path=image_path,\n realm_id=realm_id,\n user_profile_id=user_id,\n )\n\n return metadata\n\ndef process_uploads(upload_list: List[ZerverFieldsT], upload_dir: str,\n threads: int) -> List[ZerverFieldsT]:\n \"\"\"\n This function downloads the uploads and saves it in the realm's upload directory.\n Required parameters:\n\n 1. upload_list: List of uploads to be mapped in uploads records.json file\n 2. upload_dir: Folder where the downloaded uploads are saved\n \"\"\"\n def get_uploads(upload: List[str]) -> int:\n upload_url = upload[0]\n upload_path = upload[1]\n upload_path = os.path.join(upload_dir, upload_path)\n\n response = requests.get(upload_url, stream=True)\n os.makedirs(os.path.dirname(upload_path), exist_ok=True)\n with open(upload_path, 'wb') as upload_file:\n shutil.copyfileobj(response.raw, upload_file)\n return 0\n\n logging.info('######### GETTING ATTACHMENTS #########\\n')\n logging.info('DOWNLOADING ATTACHMENTS .......\\n')\n upload_url_list = []\n for upload in upload_list:\n upload_url = upload['path']\n upload_s3_path = upload['s3_path']\n upload_url_list.append([upload_url, upload_s3_path])\n upload['path'] = upload_s3_path\n\n # Run downloads parallely\n output = []\n for (status, job) in run_parallel(get_uploads, upload_url_list, threads=threads):\n output.append(job)\n\n logging.info('######### GETTING ATTACHMENTS FINISHED #########\\n')\n return upload_list\n\ndef build_realm_emoji(realm_id: int,\n name: str,\n id: int,\n file_name: str) -> ZerverFieldsT:\n return model_to_dict(\n RealmEmoji(\n realm_id=realm_id,\n name=name,\n id=id,\n file_name=file_name,\n )\n )\n\ndef process_emojis(zerver_realmemoji: List[ZerverFieldsT], emoji_dir: str,\n emoji_url_map: ZerverFieldsT, threads: int) -> List[ZerverFieldsT]:\n \"\"\"\n This function downloads the custom emojis and saves in the output emoji folder.\n Required parameters:\n\n 1. zerver_realmemoji: List of all RealmEmoji objects to be imported\n 2. emoji_dir: Folder where the downloaded emojis are saved\n 3. emoji_url_map: Maps emoji name to its url\n \"\"\"\n def get_emojis(upload: List[str]) -> int:\n emoji_url = upload[0]\n emoji_path = upload[1]\n upload_emoji_path = os.path.join(emoji_dir, emoji_path)\n\n response = requests.get(emoji_url, stream=True)\n os.makedirs(os.path.dirname(upload_emoji_path), exist_ok=True)\n with open(upload_emoji_path, 'wb') as emoji_file:\n shutil.copyfileobj(response.raw, emoji_file)\n return 0\n\n emoji_records = []\n upload_emoji_list = []\n logging.info('######### GETTING EMOJIS #########\\n')\n logging.info('DOWNLOADING EMOJIS .......\\n')\n for emoji in zerver_realmemoji:\n emoji_url = emoji_url_map[emoji['name']]\n emoji_path = RealmEmoji.PATH_ID_TEMPLATE.format(\n realm_id=emoji['realm'],\n emoji_file_name=emoji['name'])\n\n upload_emoji_list.append([emoji_url, emoji_path])\n\n emoji_record = dict(emoji)\n emoji_record['path'] = emoji_path\n emoji_record['s3_path'] = emoji_path\n emoji_record['realm_id'] = emoji_record['realm']\n emoji_record.pop('realm')\n\n emoji_records.append(emoji_record)\n\n # Run downloads parallely\n output = []\n for (status, job) in run_parallel(get_emojis, upload_emoji_list, threads=threads):\n output.append(job)\n\n logging.info('######### GETTING EMOJIS FINISHED #########\\n')\n return emoji_records\n\ndef create_converted_data_files(data: Any, output_dir: str, file_path: str) -> None:\n output_file = output_dir + file_path\n os.makedirs(os.path.dirname(output_file), exist_ok=True)\n with open(output_file, 'w') as fp:\n ujson.dump(data, fp, indent=4)\n"},"type_annotations":{"kind":"list like","value":["int","str","float","str","str","Any","str","str","str","int","bool","bool","bool","bool","int","str","Optional[str]","int","int","str","str","Any","List[ZerverFieldsT]","List[ZerverFieldsT]","int","int","int","List[ZerverFieldsT]","List[ZerverFieldsT]","List[ZerverFieldsT]","Callable[..., Set[int]]","List[ZerverFieldsT]","List[ZerverFieldsT]","List[ZerverFieldsT]","int","int","int","List[ZerverFieldsT]","List[ZerverFieldsT]","List[ZerverFieldsT]","int","str","List[ZerverFieldsT]","Dict[int, Set[int]]","int","List[int]","int","int","int","bool","bool","int","int","int","Any","int","str","str","int","str","float","int","str","Optional[str]","int","int","int","Set[int]","int","ZerverFieldsT","str","List[ZerverFieldsT]","List[ZerverFieldsT]","str","int","int","List[str]","str","int","int","bytes","List[ZerverFieldsT]","str","int","List[str]","int","str","int","str","List[ZerverFieldsT]","str","ZerverFieldsT","int","List[str]","Any","str","str"],"string":"[\n \"int\",\n \"str\",\n \"float\",\n \"str\",\n \"str\",\n \"Any\",\n \"str\",\n \"str\",\n \"str\",\n \"int\",\n \"bool\",\n \"bool\",\n \"bool\",\n \"bool\",\n \"int\",\n \"str\",\n \"Optional[str]\",\n \"int\",\n \"int\",\n \"str\",\n \"str\",\n \"Any\",\n \"List[ZerverFieldsT]\",\n \"List[ZerverFieldsT]\",\n \"int\",\n \"int\",\n \"int\",\n \"List[ZerverFieldsT]\",\n \"List[ZerverFieldsT]\",\n \"List[ZerverFieldsT]\",\n \"Callable[..., Set[int]]\",\n \"List[ZerverFieldsT]\",\n \"List[ZerverFieldsT]\",\n \"List[ZerverFieldsT]\",\n \"int\",\n \"int\",\n \"int\",\n \"List[ZerverFieldsT]\",\n \"List[ZerverFieldsT]\",\n \"List[ZerverFieldsT]\",\n \"int\",\n \"str\",\n \"List[ZerverFieldsT]\",\n \"Dict[int, Set[int]]\",\n \"int\",\n \"List[int]\",\n \"int\",\n \"int\",\n \"int\",\n \"bool\",\n \"bool\",\n \"int\",\n \"int\",\n \"int\",\n \"Any\",\n \"int\",\n \"str\",\n \"str\",\n \"int\",\n \"str\",\n \"float\",\n \"int\",\n \"str\",\n \"Optional[str]\",\n \"int\",\n \"int\",\n \"int\",\n \"Set[int]\",\n \"int\",\n \"ZerverFieldsT\",\n \"str\",\n \"List[ZerverFieldsT]\",\n \"List[ZerverFieldsT]\",\n \"str\",\n \"int\",\n \"int\",\n \"List[str]\",\n \"str\",\n \"int\",\n \"int\",\n \"bytes\",\n \"List[ZerverFieldsT]\",\n \"str\",\n \"int\",\n \"List[str]\",\n \"int\",\n \"str\",\n \"int\",\n \"str\",\n \"List[ZerverFieldsT]\",\n \"str\",\n \"ZerverFieldsT\",\n \"int\",\n \"List[str]\",\n \"Any\",\n \"str\",\n \"str\"\n]"},"type_annotation_starts":{"kind":"list like","value":[609,631,642,687,1188,1229,1273,1308,1347,1379,1418,1463,1502,1548,1587,1627,1665,2200,2215,2227,2244,2277,2295,2703,3211,3225,3270,3687,3734,3778,5009,5060,5104,6021,6659,6678,6689,6935,6992,7938,7969,8003,9094,9154,9212,9260,9306,9950,9990,10030,10073,10481,10497,10544,10729,10744,10755,10790,10806,11273,11288,11307,11321,11362,11386,11405,12231,12249,12289,12304,12328,12373,13172,13205,13220,13254,13841,15612,15648,15683,15715,16496,16529,16563,16891,17925,17958,17989,18027,18257,18289,18328,18352,18718,20110,20127,20143],"string":"[\n 609,\n 631,\n 642,\n 687,\n 1188,\n 1229,\n 1273,\n 1308,\n 1347,\n 1379,\n 1418,\n 1463,\n 1502,\n 1548,\n 1587,\n 1627,\n 1665,\n 2200,\n 2215,\n 2227,\n 2244,\n 2277,\n 2295,\n 2703,\n 3211,\n 3225,\n 3270,\n 3687,\n 3734,\n 3778,\n 5009,\n 5060,\n 5104,\n 6021,\n 6659,\n 6678,\n 6689,\n 6935,\n 6992,\n 7938,\n 7969,\n 8003,\n 9094,\n 9154,\n 9212,\n 9260,\n 9306,\n 9950,\n 9990,\n 10030,\n 10073,\n 10481,\n 10497,\n 10544,\n 10729,\n 10744,\n 10755,\n 10790,\n 10806,\n 11273,\n 11288,\n 11307,\n 11321,\n 11362,\n 11386,\n 11405,\n 12231,\n 12249,\n 12289,\n 12304,\n 12328,\n 12373,\n 13172,\n 13205,\n 13220,\n 13254,\n 13841,\n 15612,\n 15648,\n 15683,\n 15715,\n 16496,\n 16529,\n 16563,\n 16891,\n 17925,\n 17958,\n 17989,\n 18027,\n 18257,\n 18289,\n 18328,\n 18352,\n 18718,\n 20110,\n 20127,\n 20143\n]"},"type_annotation_ends":{"kind":"list like","value":[612,634,647,690,1191,1232,1276,1311,1350,1382,1422,1467,1506,1552,1590,1630,1678,2203,2218,2230,2247,2280,2314,2722,3214,3228,3273,3706,3753,3797,5032,5079,5123,6040,6662,6681,6692,6954,7011,7957,7972,8006,9113,9173,9215,9269,9309,9953,9993,10034,10077,10484,10500,10547,10732,10747,10758,10793,10809,11276,11293,11310,11324,11375,11389,11408,12234,12257,12292,12317,12331,12392,13191,13208,13223,13257,13850,15615,15651,15686,15720,16515,16532,16566,16900,17928,17961,17992,18030,18276,18292,18341,18355,18727,20113,20130,20146],"string":"[\n 612,\n 634,\n 647,\n 690,\n 1191,\n 1232,\n 1276,\n 1311,\n 1350,\n 1382,\n 1422,\n 1467,\n 1506,\n 1552,\n 1590,\n 1630,\n 1678,\n 2203,\n 2218,\n 2230,\n 2247,\n 2280,\n 2314,\n 2722,\n 3214,\n 3228,\n 3273,\n 3706,\n 3753,\n 3797,\n 5032,\n 5079,\n 5123,\n 6040,\n 6662,\n 6681,\n 6692,\n 6954,\n 7011,\n 7957,\n 7972,\n 8006,\n 9113,\n 9173,\n 9215,\n 9269,\n 9309,\n 9953,\n 9993,\n 10034,\n 10077,\n 10484,\n 10500,\n 10547,\n 10732,\n 10747,\n 10758,\n 10793,\n 10809,\n 11276,\n 11293,\n 11310,\n 11324,\n 11375,\n 11389,\n 11408,\n 12234,\n 12257,\n 12292,\n 12317,\n 12331,\n 12392,\n 13191,\n 13208,\n 13223,\n 13257,\n 13850,\n 15615,\n 15651,\n 15686,\n 15720,\n 16515,\n 16532,\n 16566,\n 16900,\n 17928,\n 17961,\n 17992,\n 18030,\n 18276,\n 18292,\n 18341,\n 18355,\n 18727,\n 20113,\n 20130,\n 20146\n]"}}},{"rowIdx":1334,"cells":{"zip":{"kind":"string","value":"archives/18-2-SKKU-OSS_2018-2-OSS-L5.zip"},"filename":{"kind":"string","value":"zerver/data_import/sequencer.py"},"contents":{"kind":"string","value":"from typing import Any, Callable, Dict\n\n'''\nThis module helps you set up a bunch\nof sequences, similar to how database\nsequences work.\n\nYou need to be a bit careful here, since\nyou're dealing with a big singleton, but\nfor data imports that's usually easy to\nmanage. See hipchat.py for example usage.\n'''\n\ndef _seq() -> Callable[[], int]:\n i = 0\n\n def next_one() -> int:\n nonlocal i\n i += 1\n return i\n\n return next_one\n\ndef sequencer() -> Callable[[str], int]:\n '''\n Use like this:\n\n NEXT_ID = sequencer()\n message_id = NEXT_ID('message')\n '''\n seq_dict = dict() # type: Dict[str, Callable[[], int]]\n\n def next_one(name: str) -> int:\n if name not in seq_dict:\n seq_dict[name] = _seq()\n seq = seq_dict[name]\n return seq()\n\n return next_one\n\n'''\nNEXT_ID is a singleton used by an entire process, which is\nalmost always reasonable. If you want to have two parallel\nsequences, just use different `name` values.\n\nThis object gets created once and only once during the first\nimport of the file.\n'''\n\nNEXT_ID = sequencer()\n\ndef is_int(key: Any) -> bool:\n try:\n n = int(key)\n except ValueError:\n return False\n\n return n <= 999999999\n\nclass IdMapper:\n def __init__(self) -> None:\n self.map = dict() # type: Dict[Any, int]\n self.cnt = 0\n\n def has(self, their_id: Any) -> bool:\n return their_id in self.map\n\n def get(self, their_id: Any) -> int:\n if their_id in self.map:\n return self.map[their_id]\n\n if is_int(their_id):\n our_id = int(their_id)\n if self.cnt > 0:\n raise Exception('mixed key styles')\n else:\n self.cnt += 1\n our_id = self.cnt\n\n self.map[their_id] = our_id\n return our_id\n"},"type_annotations":{"kind":"list like","value":["str","Any","Any","Any"],"string":"[\n \"str\",\n \"Any\",\n \"Any\",\n \"Any\"\n]"},"type_annotation_starts":{"kind":"list like","value":[685,1133,1397,1476],"string":"[\n 685,\n 1133,\n 1397,\n 1476\n]"},"type_annotation_ends":{"kind":"list like","value":[688,1136,1400,1479],"string":"[\n 688,\n 1136,\n 1400,\n 1479\n]"}}},{"rowIdx":1335,"cells":{"zip":{"kind":"string","value":"archives/18-2-SKKU-OSS_2018-2-OSS-L5.zip"},"filename":{"kind":"string","value":"zerver/data_import/slack.py"},"contents":{"kind":"string","value":"import os\nimport ujson\nimport hashlib\nimport sys\nimport argparse\nimport shutil\nimport subprocess\nimport re\nimport logging\nimport random\nimport requests\n\nfrom django.conf import settings\nfrom django.db import connection\nfrom django.utils.timezone import now as timezone_now\nfrom django.forms.models import model_to_dict\nfrom typing import Any, Dict, List, Optional, Tuple, Set\nfrom zerver.forms import check_subdomain_available\nfrom zerver.models import Reaction, RealmEmoji, Realm, UserProfile, Recipient, \\\n CustomProfileField, CustomProfileFieldValue\nfrom zerver.data_import.slack_message_conversion import convert_to_zulip_markdown, \\\n get_user_full_name\nfrom zerver.data_import.import_util import ZerverFieldsT, build_zerver_realm, \\\n build_avatar, build_subscription, build_recipient, build_usermessages, \\\n build_defaultstream, build_attachment, process_avatars, process_uploads, \\\n process_emojis, build_realm, build_stream, build_message, \\\n create_converted_data_files, make_subscriber_map\nfrom zerver.data_import.sequencer import NEXT_ID\nfrom zerver.lib.parallel import run_parallel\nfrom zerver.lib.upload import random_name, sanitize_name\nfrom zerver.lib.export import MESSAGE_BATCH_CHUNK_SIZE\nfrom zerver.lib.emoji import NAME_TO_CODEPOINT_PATH\n\n# stubs\nAddedUsersT = Dict[str, int]\nAddedChannelsT = Dict[str, Tuple[str, int]]\nAddedRecipientsT = Dict[str, int]\n\ndef rm_tree(path: str) -> None:\n if os.path.exists(path):\n shutil.rmtree(path)\n\ndef slack_workspace_to_realm(domain_name: str, realm_id: int, user_list: List[ZerverFieldsT],\n realm_subdomain: str, slack_data_dir: str,\n custom_emoji_list: ZerverFieldsT)-> Tuple[ZerverFieldsT, AddedUsersT,\n AddedRecipientsT,\n AddedChannelsT,\n List[ZerverFieldsT],\n ZerverFieldsT]:\n \"\"\"\n Returns:\n 1. realm, Converted Realm data\n 2. added_users, which is a dictionary to map from slack user id to zulip user id\n 3. added_recipient, which is a dictionary to map from channel name to zulip recipient_id\n 4. added_channels, which is a dictionary to map from channel name to channel id, zulip stream_id\n 5. avatars, which is list to map avatars to zulip avatar records.json\n 6. emoji_url_map, which is maps emoji name to its slack url\n \"\"\"\n NOW = float(timezone_now().timestamp())\n\n zerver_realm = build_zerver_realm(realm_id, realm_subdomain, NOW, 'Slack') # type: List[ZerverFieldsT]\n realm = build_realm(zerver_realm, realm_id, domain_name)\n\n zerver_userprofile, avatars, added_users, zerver_customprofilefield, \\\n zerver_customprofilefield_value = users_to_zerver_userprofile(slack_data_dir, user_list,\n realm_id, int(NOW), domain_name)\n channels_to_zerver_stream_fields = channels_to_zerver_stream(slack_data_dir,\n realm_id,\n added_users,\n zerver_userprofile)\n zerver_realmemoji, emoji_url_map = build_realmemoji(custom_emoji_list, realm_id)\n realm['zerver_realmemoji'] = zerver_realmemoji\n\n # See https://zulipchat.com/help/set-default-streams-for-new-users\n # for documentation on zerver_defaultstream\n realm['zerver_userprofile'] = zerver_userprofile\n\n # Custom profile fields\n realm['zerver_customprofilefield'] = zerver_customprofilefield\n realm['zerver_customprofilefieldvalue'] = zerver_customprofilefield_value\n\n realm['zerver_defaultstream'] = channels_to_zerver_stream_fields[0]\n realm['zerver_stream'] = channels_to_zerver_stream_fields[1]\n realm['zerver_subscription'] = channels_to_zerver_stream_fields[3]\n realm['zerver_recipient'] = channels_to_zerver_stream_fields[4]\n added_channels = channels_to_zerver_stream_fields[2]\n added_recipient = channels_to_zerver_stream_fields[5]\n\n return realm, added_users, added_recipient, added_channels, avatars, emoji_url_map\n\ndef build_realmemoji(custom_emoji_list: ZerverFieldsT,\n realm_id: int) -> Tuple[List[ZerverFieldsT],\n ZerverFieldsT]:\n zerver_realmemoji = []\n emoji_url_map = {}\n emoji_id = 0\n for emoji_name, url in custom_emoji_list.items():\n if 'emoji.slack-edge.com' in url:\n # Some of the emojis we get from the api have invalid links\n # this is to prevent errors related to them\n realmemoji = RealmEmoji(\n name=emoji_name,\n id=emoji_id,\n file_name=os.path.basename(url),\n deactivated=False)\n\n realmemoji_dict = model_to_dict(realmemoji, exclude=['realm', 'author'])\n realmemoji_dict['author'] = None\n realmemoji_dict['realm'] = realm_id\n\n emoji_url_map[emoji_name] = url\n zerver_realmemoji.append(realmemoji_dict)\n emoji_id += 1\n return zerver_realmemoji, emoji_url_map\n\ndef users_to_zerver_userprofile(slack_data_dir: str, users: List[ZerverFieldsT], realm_id: int,\n timestamp: Any, domain_name: str) -> Tuple[List[ZerverFieldsT],\n List[ZerverFieldsT],\n AddedUsersT,\n List[ZerverFieldsT],\n List[ZerverFieldsT]]:\n \"\"\"\n Returns:\n 1. zerver_userprofile, which is a list of user profile\n 2. avatar_list, which is list to map avatars to zulip avatard records.json\n 3. added_users, which is a dictionary to map from slack user id to zulip\n user id\n 4. zerver_customprofilefield, which is a list of all custom profile fields\n 5. zerver_customprofilefield_values, which is a list of user profile fields\n \"\"\"\n logging.info('######### IMPORTING USERS STARTED #########\\n')\n zerver_userprofile = []\n zerver_customprofilefield = [] # type: List[ZerverFieldsT]\n zerver_customprofilefield_values = [] # type: List[ZerverFieldsT]\n avatar_list = [] # type: List[ZerverFieldsT]\n added_users = {}\n\n # The user data we get from the slack api does not contain custom profile data\n # Hence we get it from the slack zip file\n slack_data_file_user_list = get_data_file(slack_data_dir + '/users.json')\n\n # To map user id with the custom profile fields of the corresponding user\n slack_user_custom_field_map = {} # type: ZerverFieldsT\n # To store custom fields corresponding to their ids\n custom_field_map = {} # type: ZerverFieldsT\n\n for user in slack_data_file_user_list:\n process_slack_custom_fields(user, slack_user_custom_field_map)\n\n # We have only one primary owner in slack, see link\n # https://get.slack.help/hc/en-us/articles/201912948-Owners-and-Administrators\n # This is to import the primary owner first from all the users\n user_id_count = custom_field_id_count = customprofilefield_id = 0\n primary_owner_id = user_id_count\n user_id_count += 1\n\n for user in users:\n slack_user_id = user['id']\n\n if user.get('is_primary_owner', False):\n user_id = primary_owner_id\n else:\n user_id = user_id_count\n\n # email\n email = get_user_email(user, domain_name)\n\n # avatar\n # ref: https://chat.zulip.org/help/change-your-avatar\n avatar_url = build_avatar_url(slack_user_id, user['team_id'],\n user['profile']['avatar_hash'])\n build_avatar(user_id, realm_id, email, avatar_url, timestamp, avatar_list)\n\n # check if user is the admin\n realm_admin = get_admin(user)\n\n # timezone\n timezone = get_user_timezone(user)\n\n # Check for custom profile fields\n if slack_user_id in slack_user_custom_field_map:\n # For processing the fields\n custom_field_map, customprofilefield_id = build_customprofile_field(\n zerver_customprofilefield, slack_user_custom_field_map[slack_user_id],\n customprofilefield_id, realm_id, custom_field_map)\n # Store the custom field values for the corresponding user\n custom_field_id_count = build_customprofilefields_values(\n custom_field_map, slack_user_custom_field_map[slack_user_id], user_id,\n custom_field_id_count, zerver_customprofilefield_values)\n\n userprofile = UserProfile(\n full_name=get_user_full_name(user),\n short_name=user['name'],\n is_active=not user['deleted'],\n id=user_id,\n email=email,\n delivery_email=email,\n avatar_source='U',\n is_bot=user.get('is_bot', False),\n pointer=-1,\n is_realm_admin=realm_admin,\n bot_type=1 if user.get('is_bot', False) else None,\n date_joined=timestamp,\n timezone=timezone,\n last_login=timestamp)\n userprofile_dict = model_to_dict(userprofile)\n # Set realm id separately as the corresponding realm is not yet a Realm model instance\n userprofile_dict['realm'] = realm_id\n\n zerver_userprofile.append(userprofile_dict)\n added_users[slack_user_id] = user_id\n if not user.get('is_primary_owner', False):\n user_id_count += 1\n\n logging.info(u\"{} -> {}\".format(user['name'], userprofile_dict['email']))\n\n process_customprofilefields(zerver_customprofilefield, zerver_customprofilefield_values)\n logging.info('######### IMPORTING USERS FINISHED #########\\n')\n return zerver_userprofile, avatar_list, added_users, zerver_customprofilefield, \\\n zerver_customprofilefield_values\n\ndef build_customprofile_field(customprofile_field: List[ZerverFieldsT], fields: ZerverFieldsT,\n customprofilefield_id: int, realm_id: int,\n custom_field_map: ZerverFieldsT) -> Tuple[ZerverFieldsT, int]:\n # The name of the custom profile field is not provided in the slack data\n # Hash keys of the fields are provided\n # Reference: https://api.slack.com/methods/users.profile.set\n for field, value in fields.items():\n if field not in custom_field_map:\n slack_custom_fields = ['phone', 'skype']\n if field in slack_custom_fields:\n field_name = field\n else:\n field_name = (\"slack custom field %s\" % str(customprofilefield_id + 1))\n customprofilefield = CustomProfileField(\n id=customprofilefield_id,\n name=field_name,\n field_type=1 # For now this is defaulted to 'SHORT_TEXT'\n # Processing is done in the function 'process_customprofilefields'\n )\n\n customprofilefield_dict = model_to_dict(customprofilefield,\n exclude=['realm'])\n customprofilefield_dict['realm'] = realm_id\n\n custom_field_map[field] = customprofilefield_id\n customprofilefield_id += 1\n customprofile_field.append(customprofilefield_dict)\n return custom_field_map, customprofilefield_id\n\ndef process_slack_custom_fields(user: ZerverFieldsT,\n slack_user_custom_field_map: ZerverFieldsT) -> None:\n slack_user_custom_field_map[user['id']] = {}\n if user['profile'].get('fields'):\n slack_user_custom_field_map[user['id']] = user['profile']['fields']\n\n slack_custom_fields = ['phone', 'skype']\n for field in slack_custom_fields:\n if field in user['profile']:\n slack_user_custom_field_map[user['id']][field] = {'value': user['profile'][field]}\n\ndef build_customprofilefields_values(custom_field_map: ZerverFieldsT, fields: ZerverFieldsT,\n user_id: int, custom_field_id: int,\n custom_field_values: List[ZerverFieldsT]) -> int:\n for field, value in fields.items():\n custom_field_value = CustomProfileFieldValue(\n id=custom_field_id,\n value=value['value'])\n\n custom_field_value_dict = model_to_dict(custom_field_value,\n exclude=['user_profile', 'field'])\n custom_field_value_dict['user_profile'] = user_id\n custom_field_value_dict['field'] = custom_field_map[field]\n\n custom_field_values.append(custom_field_value_dict)\n custom_field_id += 1\n return custom_field_id\n\ndef process_customprofilefields(customprofilefield: List[ZerverFieldsT],\n customprofilefield_value: List[ZerverFieldsT]) -> None:\n # Process the field types by checking all field values\n for field in customprofilefield:\n for field_value in customprofilefield_value:\n if field_value['field'] == field['id'] and len(field_value['value']) > 50:\n field['field_type'] = 2 # corresponding to Long text\n break\n\ndef get_user_email(user: ZerverFieldsT, domain_name: str) -> str:\n if 'email' in user['profile']:\n return user['profile']['email']\n if 'bot_id' in user['profile']:\n if 'real_name_normalized' in user['profile']:\n slack_bot_name = user['profile']['real_name_normalized']\n elif 'first_name' in user['profile']:\n slack_bot_name = user['profile']['first_name']\n else:\n raise AssertionError(\"Could not identify bot type\")\n return slack_bot_name.replace(\"Bot\", \"\").replace(\" \", \"\") + \"-bot@%s\" % (domain_name,)\n if get_user_full_name(user) == \"slackbot\":\n return \"imported-slackbot-bot@%s\" % (domain_name,)\n raise AssertionError(\"Could not find email address for Slack user %s\" % (user,))\n\ndef build_avatar_url(slack_user_id: str, team_id: str, avatar_hash: str) -> str:\n avatar_url = \"https://ca.slack-edge.com/{}-{}-{}\".format(team_id, slack_user_id,\n avatar_hash)\n return avatar_url\n\ndef get_admin(user: ZerverFieldsT) -> bool:\n admin = user.get('is_admin', False)\n owner = user.get('is_owner', False)\n primary_owner = user.get('is_primary_owner', False)\n\n if admin or owner or primary_owner:\n return True\n return False\n\ndef get_user_timezone(user: ZerverFieldsT) -> str:\n _default_timezone = \"America/New_York\"\n timezone = user.get(\"tz\", _default_timezone)\n if timezone is None or '/' not in timezone:\n timezone = _default_timezone\n return timezone\n\ndef channels_to_zerver_stream(slack_data_dir: str, realm_id: int, added_users: AddedUsersT,\n zerver_userprofile: List[ZerverFieldsT]) -> Tuple[List[ZerverFieldsT],\n List[ZerverFieldsT],\n AddedChannelsT,\n List[ZerverFieldsT],\n List[ZerverFieldsT],\n AddedRecipientsT]:\n \"\"\"\n Returns:\n 1. zerver_defaultstream, which is a list of the default streams\n 2. zerver_stream, while is a list of all streams\n 3. added_channels, which is a dictionary to map from channel name to channel id, zulip stream_id\n 4. zerver_subscription, which is a list of the subscriptions\n 5. zerver_recipient, which is a list of the recipients\n 6. added_recipient, which is a dictionary to map from channel name to zulip recipient_id\n \"\"\"\n logging.info('######### IMPORTING CHANNELS STARTED #########\\n')\n channels = get_data_file(slack_data_dir + '/channels.json')\n\n added_channels = {}\n added_recipient = {}\n\n zerver_stream = []\n zerver_subscription = [] # type: List[ZerverFieldsT]\n zerver_recipient = []\n zerver_defaultstream = []\n\n stream_id_count = subscription_id_count = recipient_id_count = defaultstream_id = 0\n\n for channel in channels:\n # slack_channel_id = channel['id']\n\n # map Slack's topic and purpose content into Zulip's stream description.\n # WARN This mapping is lossy since the topic.creator, topic.last_set,\n # purpose.creator, purpose.last_set fields are not preserved.\n description = channel[\"purpose\"][\"value\"]\n stream_id = stream_id_count\n recipient_id = recipient_id_count\n\n # construct the stream object and append it to zerver_stream\n stream = build_stream(float(channel[\"created\"]), realm_id, channel[\"name\"],\n description, stream_id, channel[\"is_archived\"])\n zerver_stream.append(stream)\n\n # construct defaultstream object\n # slack has the default channel 'general' and 'random'\n # where every user is subscribed\n default_channels = ['general', 'random'] # Slack specific\n if channel['name'] in default_channels:\n defaultstream = build_defaultstream(realm_id, stream_id,\n defaultstream_id)\n zerver_defaultstream.append(defaultstream)\n defaultstream_id += 1\n\n added_channels[stream['name']] = (channel['id'], stream_id)\n\n recipient = build_recipient(stream_id, recipient_id, Recipient.STREAM)\n zerver_recipient.append(recipient)\n added_recipient[stream['name']] = recipient_id\n # TODO add recipients for private message and huddles\n\n # construct the subscription object and append it to zerver_subscription\n subscription_id_count = get_subscription(channel['members'], zerver_subscription,\n recipient_id, added_users,\n subscription_id_count)\n # TODO add zerver_subscription which correspond to\n # huddles type recipient\n # For huddles:\n # sub['recipient']=recipient['id'] where recipient['type_id']=added_users[member]\n\n stream_id_count += 1\n recipient_id_count += 1\n logging.info(u\"{} -> created\".format(channel['name']))\n\n # TODO map Slack's pins to Zulip's stars\n # There is the security model that Slack's pins are known to the team owner\n # as evident from where it is stored at (channels)\n # \"pins\": [\n # {\n # \"id\": \"1444755381.000003\",\n # \"type\": \"C\",\n # \"user\": \"U061A5N1G\",\n # \"owner\": \"U061A5N1G\",\n # \"created\": \"1444755463\"\n # }\n # ],\n\n for user in zerver_userprofile:\n # this maps the recipients and subscriptions\n # related to private messages\n recipient = build_recipient(user['id'], recipient_id_count, Recipient.PERSONAL)\n sub = build_subscription(recipient_id_count, user['id'], subscription_id_count)\n\n zerver_recipient.append(recipient)\n zerver_subscription.append(sub)\n\n subscription_id_count += 1\n recipient_id_count += 1\n\n logging.info('######### IMPORTING STREAMS FINISHED #########\\n')\n return zerver_defaultstream, zerver_stream, added_channels, zerver_subscription, \\\n zerver_recipient, added_recipient\n\ndef get_subscription(channel_members: List[str], zerver_subscription: List[ZerverFieldsT],\n recipient_id: int, added_users: AddedUsersT,\n subscription_id: int) -> int:\n for member in channel_members:\n sub = build_subscription(recipient_id, added_users[member], subscription_id)\n # The recipient corresponds to a stream for stream-readable message.\n zerver_subscription.append(sub)\n subscription_id += 1\n return subscription_id\n\ndef convert_slack_workspace_messages(slack_data_dir: str, users: List[ZerverFieldsT], realm_id: int,\n added_users: AddedUsersT, added_recipient: AddedRecipientsT,\n added_channels: AddedChannelsT, realm: ZerverFieldsT,\n zerver_realmemoji: List[ZerverFieldsT], domain_name: str,\n output_dir: str,\n chunk_size: int=MESSAGE_BATCH_CHUNK_SIZE) -> Tuple[List[ZerverFieldsT],\n List[ZerverFieldsT],\n List[ZerverFieldsT]]:\n \"\"\"\n Returns:\n 1. reactions, which is a list of the reactions\n 2. uploads, which is a list of uploads to be mapped in uploads records.json\n 3. attachment, which is a list of the attachments\n \"\"\"\n all_messages = get_all_messages(slack_data_dir, added_channels)\n\n # we sort the messages according to the timestamp to show messages with\n # the proper date order\n all_messages = sorted(all_messages, key=lambda message: message['ts'])\n\n logging.info('######### IMPORTING MESSAGES STARTED #########\\n')\n\n total_reactions = [] # type: List[ZerverFieldsT]\n total_attachments = [] # type: List[ZerverFieldsT]\n total_uploads = [] # type: List[ZerverFieldsT]\n\n # The messages are stored in batches\n low_index = 0\n upper_index = low_index + chunk_size\n dump_file_id = 1\n\n subscriber_map = make_subscriber_map(\n zerver_subscription=realm['zerver_subscription'],\n )\n\n while True:\n message_data = all_messages[low_index:upper_index]\n if len(message_data) == 0:\n break\n zerver_message, zerver_usermessage, attachment, uploads, reactions = \\\n channel_message_to_zerver_message(\n realm_id, users, added_users, added_recipient, message_data,\n zerver_realmemoji, subscriber_map, added_channels,\n domain_name)\n\n message_json = dict(\n zerver_message=zerver_message,\n zerver_usermessage=zerver_usermessage)\n\n message_file = \"/messages-%06d.json\" % (dump_file_id,)\n logging.info(\"Writing Messages to %s\\n\" % (output_dir + message_file))\n create_converted_data_files(message_json, output_dir, message_file)\n\n total_reactions += reactions\n total_attachments += attachment\n total_uploads += uploads\n\n low_index = upper_index\n upper_index = chunk_size + low_index\n dump_file_id += 1\n\n logging.info('######### IMPORTING MESSAGES FINISHED #########\\n')\n return total_reactions, total_uploads, total_attachments\n\ndef get_all_messages(slack_data_dir: str, added_channels: AddedChannelsT) -> List[ZerverFieldsT]:\n all_messages = [] # type: List[ZerverFieldsT]\n for channel_name in added_channels.keys():\n channel_dir = os.path.join(slack_data_dir, channel_name)\n json_names = os.listdir(channel_dir)\n for json_name in json_names:\n message_dir = os.path.join(channel_dir, json_name)\n messages = get_data_file(message_dir)\n for message in messages:\n # To give every message the channel information\n message['channel_name'] = channel_name\n all_messages += messages\n return all_messages\n\ndef channel_message_to_zerver_message(realm_id: int,\n users: List[ZerverFieldsT],\n added_users: AddedUsersT,\n added_recipient: AddedRecipientsT,\n all_messages: List[ZerverFieldsT],\n zerver_realmemoji: List[ZerverFieldsT],\n subscriber_map: Dict[int, Set[int]],\n added_channels: AddedChannelsT,\n domain_name: str) -> Tuple[List[ZerverFieldsT],\n List[ZerverFieldsT],\n List[ZerverFieldsT],\n List[ZerverFieldsT],\n List[ZerverFieldsT]]:\n \"\"\"\n Returns:\n 1. zerver_message, which is a list of the messages\n 2. zerver_usermessage, which is a list of the usermessages\n 3. zerver_attachment, which is a list of the attachments\n 4. uploads_list, which is a list of uploads to be mapped in uploads records.json\n 5. reaction_list, which is a list of all user reactions\n \"\"\"\n zerver_message = []\n zerver_usermessage = [] # type: List[ZerverFieldsT]\n uploads_list = [] # type: List[ZerverFieldsT]\n zerver_attachment = [] # type: List[ZerverFieldsT]\n reaction_list = [] # type: List[ZerverFieldsT]\n\n # For unicode emoji\n with open(NAME_TO_CODEPOINT_PATH) as fp:\n name_to_codepoint = ujson.load(fp)\n\n for message in all_messages:\n user = get_message_sending_user(message)\n if not user:\n # Ignore messages without user names\n # These are Sometimes produced by slack\n continue\n\n subtype = message.get('subtype', False)\n if subtype in [\n # Zulip doesn't have a pinned_item concept\n \"pinned_item\",\n \"unpinned_item\",\n # Slack's channel join/leave notices are spammy\n \"channel_join\",\n \"channel_leave\",\n \"channel_name\"\n ]:\n continue\n\n try:\n content, mentioned_user_ids, has_link = convert_to_zulip_markdown(\n message['text'], users, added_channels, added_users)\n except Exception:\n print(\"Slack message unexpectedly missing text representation:\")\n print(ujson.dumps(message, indent=4))\n continue\n rendered_content = None\n\n recipient_id = added_recipient[message['channel_name']]\n message_id = NEXT_ID('message')\n\n # Process message reactions\n if 'reactions' in message.keys():\n build_reactions(reaction_list, message['reactions'], added_users,\n message_id, name_to_codepoint,\n zerver_realmemoji)\n\n # Process different subtypes of slack messages\n\n # Subtypes which have only the action in the message should\n # be rendered with '/me' in the content initially\n # For example \"sh_room_created\" has the message 'started a call'\n # which should be displayed as '/me started a call'\n if subtype in [\"bot_add\", \"sh_room_created\", \"me_message\"]:\n content = ('/me %s' % (content))\n if subtype == 'file_comment':\n # The file_comment message type only indicates the\n # responsible user in a subfield.\n message['user'] = message['comment']['user']\n\n file_info = process_message_files(\n message=message,\n domain_name=domain_name,\n realm_id=realm_id,\n message_id=message_id,\n user=user,\n users=users,\n added_users=added_users,\n zerver_attachment=zerver_attachment,\n uploads_list=uploads_list,\n )\n\n content += file_info['content']\n has_link = has_link or file_info['has_link']\n\n has_attachment = file_info['has_attachment']\n has_image = file_info['has_image']\n\n # construct message\n topic_name = 'imported from slack'\n\n zulip_message = build_message(topic_name, float(message['ts']), message_id, content,\n rendered_content, added_users[user], recipient_id,\n has_image, has_link, has_attachment)\n zerver_message.append(zulip_message)\n\n # construct usermessages\n build_usermessages(\n zerver_usermessage=zerver_usermessage,\n subscriber_map=subscriber_map,\n recipient_id=recipient_id,\n mentioned_user_ids=mentioned_user_ids,\n message_id=message_id,\n )\n\n return zerver_message, zerver_usermessage, zerver_attachment, uploads_list, \\\n reaction_list\n\ndef process_message_files(message: ZerverFieldsT,\n domain_name: str,\n realm_id: int,\n message_id: int,\n user: str,\n users: List[ZerverFieldsT],\n added_users: AddedUsersT,\n zerver_attachment: List[ZerverFieldsT],\n uploads_list: List[ZerverFieldsT]) -> Dict[str, Any]:\n has_attachment = False\n has_image = False\n has_link = False\n\n files = message.get('files', [])\n\n subtype = message.get('subtype')\n\n if subtype == 'file_share':\n # In Slack messages, uploads can either have the subtype as 'file_share' or\n # have the upload information in 'files' keyword\n files = [message['file']]\n\n markdown_links = []\n\n for fileinfo in files:\n url = fileinfo['url_private']\n\n if 'files.slack.com' in url:\n # For attachments with slack download link\n has_attachment = True\n has_link = True\n has_image = True if 'image' in fileinfo['mimetype'] else False\n\n file_user = [iterate_user for iterate_user in users if message['user'] == iterate_user['id']]\n file_user_email = get_user_email(file_user[0], domain_name)\n\n s3_path, content_for_link = get_attachment_path_and_content(fileinfo, realm_id)\n markdown_links.append(content_for_link)\n\n # construct attachments\n build_uploads(added_users[user], realm_id, file_user_email, fileinfo, s3_path,\n uploads_list)\n\n build_attachment(realm_id, {message_id}, added_users[user],\n fileinfo, s3_path, zerver_attachment)\n else:\n # For attachments with link not from slack\n # Example: Google drive integration\n has_link = True\n if 'title' in fileinfo:\n file_name = fileinfo['title']\n else:\n file_name = fileinfo['name']\n markdown_links.append('[%s](%s)' % (file_name, fileinfo['url_private']))\n\n content = '\\n'.join(markdown_links)\n\n return dict(\n content=content,\n has_attachment=has_attachment,\n has_image=has_image,\n has_link=has_link,\n )\n\ndef get_attachment_path_and_content(fileinfo: ZerverFieldsT, realm_id: int) -> Tuple[str,\n str]:\n # Should be kept in sync with its equivalent in zerver/lib/uploads in the function\n # 'upload_message_file'\n s3_path = \"/\".join([\n str(realm_id),\n 'SlackImportAttachment', # This is a special placeholder which should be kept\n # in sync with 'exports.py' function 'import_message_data'\n format(random.randint(0, 255), 'x'),\n random_name(18),\n sanitize_name(fileinfo['name'])\n ])\n attachment_path = ('/user_uploads/%s' % (s3_path))\n content = '[%s](%s)' % (fileinfo['title'], attachment_path)\n\n return s3_path, content\n\ndef build_reactions(reaction_list: List[ZerverFieldsT], reactions: List[ZerverFieldsT],\n added_users: AddedUsersT, message_id: int,\n name_to_codepoint: ZerverFieldsT,\n zerver_realmemoji: List[ZerverFieldsT]) -> None:\n realmemoji = {}\n for realm_emoji in zerver_realmemoji:\n realmemoji[realm_emoji['name']] = realm_emoji['id']\n\n # For the unicode emoji codes, we use equivalent of\n # function 'emoji_name_to_emoji_code' in 'zerver/lib/emoji' here\n for slack_reaction in reactions:\n emoji_name = slack_reaction['name']\n # Check in unicode emoji\n if emoji_name in name_to_codepoint:\n emoji_code = name_to_codepoint[emoji_name]\n reaction_type = Reaction.UNICODE_EMOJI\n # Check in realm emoji\n elif emoji_name in realmemoji:\n emoji_code = realmemoji[emoji_name]\n reaction_type = Reaction.REALM_EMOJI\n else:\n continue\n\n for user in slack_reaction['users']:\n reaction_id = NEXT_ID('reaction')\n reaction = Reaction(\n id=reaction_id,\n emoji_code=emoji_code,\n emoji_name=emoji_name,\n reaction_type=reaction_type)\n\n reaction_dict = model_to_dict(reaction,\n exclude=['message', 'user_profile'])\n reaction_dict['message'] = message_id\n reaction_dict['user_profile'] = added_users[user]\n\n reaction_list.append(reaction_dict)\n\ndef build_uploads(user_id: int, realm_id: int, email: str, fileinfo: ZerverFieldsT, s3_path: str,\n uploads_list: List[ZerverFieldsT]) -> None:\n upload = dict(\n path=fileinfo['url_private'], # Save slack's url here, which is used later while processing\n realm_id=realm_id,\n content_type=None,\n user_profile_id=user_id,\n last_modified=fileinfo['timestamp'],\n user_profile_email=email,\n s3_path=s3_path,\n size=fileinfo['size'])\n uploads_list.append(upload)\n\ndef get_message_sending_user(message: ZerverFieldsT) -> Optional[str]:\n if 'user' in message:\n return message['user']\n if message.get('file'):\n return message['file'].get('user')\n return None\n\ndef do_convert_data(slack_zip_file: str, output_dir: str, token: str, threads: int=6) -> None:\n # Subdomain is set by the user while running the import command\n realm_subdomain = \"\"\n realm_id = 0\n domain_name = settings.EXTERNAL_HOST\n\n slack_data_dir = slack_zip_file.replace('.zip', '')\n if not os.path.exists(slack_data_dir):\n os.makedirs(slack_data_dir)\n\n os.makedirs(output_dir, exist_ok=True)\n # output directory should be empty initially\n if os.listdir(output_dir):\n raise Exception('Output directory should be empty!')\n\n subprocess.check_call(['unzip', '-q', slack_zip_file, '-d', slack_data_dir])\n # with zipfile.ZipFile(slack_zip_file, 'r') as zip_ref:\n # zip_ref.extractall(slack_data_dir)\n\n # We get the user data from the legacy token method of slack api, which is depreciated\n # but we use it as the user email data is provided only in this method\n user_list = get_slack_api_data(token, \"https://slack.com/api/users.list\", \"members\")\n # Get custom emoji from slack api\n custom_emoji_list = get_slack_api_data(token, \"https://slack.com/api/emoji.list\", \"emoji\")\n\n realm, added_users, added_recipient, added_channels, avatar_list, \\\n emoji_url_map = slack_workspace_to_realm(domain_name, realm_id, user_list,\n realm_subdomain,\n slack_data_dir, custom_emoji_list)\n\n reactions, uploads_list, zerver_attachment = convert_slack_workspace_messages(\n slack_data_dir, user_list, realm_id, added_users, added_recipient, added_channels,\n realm, realm['zerver_realmemoji'], domain_name, output_dir)\n\n # Move zerver_reactions to realm.json file\n realm['zerver_reaction'] = reactions\n\n emoji_folder = os.path.join(output_dir, 'emoji')\n os.makedirs(emoji_folder, exist_ok=True)\n emoji_records = process_emojis(realm['zerver_realmemoji'], emoji_folder, emoji_url_map, threads)\n\n avatar_folder = os.path.join(output_dir, 'avatars')\n avatar_realm_folder = os.path.join(avatar_folder, str(realm_id))\n os.makedirs(avatar_realm_folder, exist_ok=True)\n avatar_records = process_avatars(avatar_list, avatar_folder, realm_id, threads, size_url_suffix='-512')\n\n uploads_folder = os.path.join(output_dir, 'uploads')\n os.makedirs(os.path.join(uploads_folder, str(realm_id)), exist_ok=True)\n uploads_records = process_uploads(uploads_list, uploads_folder, threads)\n attachment = {\"zerver_attachment\": zerver_attachment}\n\n # IO realm.json\n create_converted_data_files(realm, output_dir, '/realm.json')\n # IO emoji records\n create_converted_data_files(emoji_records, output_dir, '/emoji/records.json')\n # IO avatar records\n create_converted_data_files(avatar_records, output_dir, '/avatars/records.json')\n # IO uploads records\n create_converted_data_files(uploads_records, output_dir, '/uploads/records.json')\n # IO attachments records\n create_converted_data_files(attachment, output_dir, '/attachment.json')\n\n # remove slack dir\n rm_tree(slack_data_dir)\n subprocess.check_call([\"tar\", \"-czf\", output_dir + '.tar.gz', output_dir, '-P'])\n\n logging.info('######### DATA CONVERSION FINISHED #########\\n')\n logging.info(\"Zulip data dump created at %s\" % (output_dir))\n\ndef get_data_file(path: str) -> Any:\n with open(path, \"r\") as fp:\n data = ujson.load(fp)\n return data\n\ndef get_slack_api_data(token: str, slack_api_url: str, get_param: str) -> Any:\n data = requests.get('%s?token=%s' % (slack_api_url, token))\n if data.status_code == requests.codes.ok:\n if 'error' in data.json():\n raise Exception('Enter a valid token!')\n json_data = data.json()[get_param]\n return json_data\n else:\n raise Exception('Something went wrong. Please try again!')\n"},"type_annotations":{"kind":"list like","value":["str","str","int","List[ZerverFieldsT]","str","str","ZerverFieldsT","ZerverFieldsT","int","str","List[ZerverFieldsT]","int","Any","str","List[ZerverFieldsT]","ZerverFieldsT","int","int","ZerverFieldsT","ZerverFieldsT","ZerverFieldsT","ZerverFieldsT","ZerverFieldsT","int","int","List[ZerverFieldsT]","List[ZerverFieldsT]","List[ZerverFieldsT]","ZerverFieldsT","str","str","str","str","ZerverFieldsT","ZerverFieldsT","str","int","AddedUsersT","List[ZerverFieldsT]","List[str]","List[ZerverFieldsT]","int","AddedUsersT","int","str","List[ZerverFieldsT]","int","AddedUsersT","AddedRecipientsT","AddedChannelsT","ZerverFieldsT","List[ZerverFieldsT]","str","str","str","AddedChannelsT","int","List[ZerverFieldsT]","AddedUsersT","AddedRecipientsT","List[ZerverFieldsT]","List[ZerverFieldsT]","Dict[int, Set[int]]","AddedChannelsT","str","ZerverFieldsT","str","int","int","str","List[ZerverFieldsT]","AddedUsersT","List[ZerverFieldsT]","List[ZerverFieldsT]","ZerverFieldsT","int","List[ZerverFieldsT]","List[ZerverFieldsT]","AddedUsersT","int","ZerverFieldsT","List[ZerverFieldsT]","int","int","str","ZerverFieldsT","str","List[ZerverFieldsT]","ZerverFieldsT","str","str","str","str","str","str","str"],"string":"[\n \"str\",\n \"str\",\n \"int\",\n \"List[ZerverFieldsT]\",\n \"str\",\n \"str\",\n \"ZerverFieldsT\",\n \"ZerverFieldsT\",\n \"int\",\n \"str\",\n \"List[ZerverFieldsT]\",\n \"int\",\n \"Any\",\n \"str\",\n \"List[ZerverFieldsT]\",\n \"ZerverFieldsT\",\n \"int\",\n \"int\",\n \"ZerverFieldsT\",\n \"ZerverFieldsT\",\n \"ZerverFieldsT\",\n \"ZerverFieldsT\",\n \"ZerverFieldsT\",\n \"int\",\n \"int\",\n \"List[ZerverFieldsT]\",\n \"List[ZerverFieldsT]\",\n \"List[ZerverFieldsT]\",\n \"ZerverFieldsT\",\n \"str\",\n \"str\",\n \"str\",\n \"str\",\n \"ZerverFieldsT\",\n \"ZerverFieldsT\",\n \"str\",\n \"int\",\n \"AddedUsersT\",\n \"List[ZerverFieldsT]\",\n \"List[str]\",\n \"List[ZerverFieldsT]\",\n \"int\",\n \"AddedUsersT\",\n \"int\",\n \"str\",\n \"List[ZerverFieldsT]\",\n \"int\",\n \"AddedUsersT\",\n \"AddedRecipientsT\",\n \"AddedChannelsT\",\n \"ZerverFieldsT\",\n \"List[ZerverFieldsT]\",\n \"str\",\n \"str\",\n \"str\",\n \"AddedChannelsT\",\n \"int\",\n \"List[ZerverFieldsT]\",\n \"AddedUsersT\",\n \"AddedRecipientsT\",\n \"List[ZerverFieldsT]\",\n \"List[ZerverFieldsT]\",\n \"Dict[int, Set[int]]\",\n \"AddedChannelsT\",\n \"str\",\n \"ZerverFieldsT\",\n \"str\",\n \"int\",\n \"int\",\n \"str\",\n \"List[ZerverFieldsT]\",\n \"AddedUsersT\",\n \"List[ZerverFieldsT]\",\n \"List[ZerverFieldsT]\",\n \"ZerverFieldsT\",\n \"int\",\n \"List[ZerverFieldsT]\",\n \"List[ZerverFieldsT]\",\n \"AddedUsersT\",\n \"int\",\n \"ZerverFieldsT\",\n \"List[ZerverFieldsT]\",\n \"int\",\n \"int\",\n \"str\",\n \"ZerverFieldsT\",\n \"str\",\n \"List[ZerverFieldsT]\",\n \"ZerverFieldsT\",\n \"str\",\n \"str\",\n \"str\",\n \"str\",\n \"str\",\n \"str\",\n \"str\"\n]"},"type_annotation_starts":{"kind":"list like","value":[1410,1524,1539,1555,1622,1643,1696,4396,4442,5409,5421,5452,5500,5518,10283,10312,10380,10395,10448,11766,11842,12301,12324,12385,12407,12470,13106,13185,13569,13597,14350,14364,14382,14597,14864,15131,15146,15164,15227,19983,20015,20071,20089,20140,20500,20512,20543,20598,20628,20699,20722,20793,20827,20881,23294,23315,23979,24029,24101,24169,24239,24317,24392,24467,24534,28960,29014,29055,29098,29135,29173,29233,29291,29352,31306,31331,32085,32117,32171,32196,32240,32294,33639,33654,33666,33681,33705,33742,34185,34399,34416,34428,37711,37837,37857,37873],"string":"[\n 1410,\n 1524,\n 1539,\n 1555,\n 1622,\n 1643,\n 1696,\n 4396,\n 4442,\n 5409,\n 5421,\n 5452,\n 5500,\n 5518,\n 10283,\n 10312,\n 10380,\n 10395,\n 10448,\n 11766,\n 11842,\n 12301,\n 12324,\n 12385,\n 12407,\n 12470,\n 13106,\n 13185,\n 13569,\n 13597,\n 14350,\n 14364,\n 14382,\n 14597,\n 14864,\n 15131,\n 15146,\n 15164,\n 15227,\n 19983,\n 20015,\n 20071,\n 20089,\n 20140,\n 20500,\n 20512,\n 20543,\n 20598,\n 20628,\n 20699,\n 20722,\n 20793,\n 20827,\n 20881,\n 23294,\n 23315,\n 23979,\n 24029,\n 24101,\n 24169,\n 24239,\n 24317,\n 24392,\n 24467,\n 24534,\n 28960,\n 29014,\n 29055,\n 29098,\n 29135,\n 29173,\n 29233,\n 29291,\n 29352,\n 31306,\n 31331,\n 32085,\n 32117,\n 32171,\n 32196,\n 32240,\n 32294,\n 33639,\n 33654,\n 33666,\n 33681,\n 33705,\n 33742,\n 34185,\n 34399,\n 34416,\n 34428,\n 37711,\n 37837,\n 37857,\n 37873\n]"},"type_annotation_ends":{"kind":"list like","value":[1413,1527,1542,1574,1625,1646,1709,4409,4445,5412,5440,5455,5503,5521,10302,10325,10383,10398,10461,11779,11855,12314,12337,12388,12410,12489,13125,13204,13582,13600,14353,14367,14385,14610,14877,15134,15149,15175,15246,19992,20034,20074,20100,20143,20503,20531,20546,20609,20644,20713,20735,20812,20830,20884,23297,23329,23982,24048,24112,24185,24258,24336,24411,24481,24537,28973,29017,29058,29101,29138,29192,29244,29310,29371,31319,31334,32104,32136,32182,32199,32253,32313,33642,33657,33669,33694,33708,33761,34198,34402,34419,34431,37714,37840,37860,37876],"string":"[\n 1413,\n 1527,\n 1542,\n 1574,\n 1625,\n 1646,\n 1709,\n 4409,\n 4445,\n 5412,\n 5440,\n 5455,\n 5503,\n 5521,\n 10302,\n 10325,\n 10383,\n 10398,\n 10461,\n 11779,\n 11855,\n 12314,\n 12337,\n 12388,\n 12410,\n 12489,\n 13125,\n 13204,\n 13582,\n 13600,\n 14353,\n 14367,\n 14385,\n 14610,\n 14877,\n 15134,\n 15149,\n 15175,\n 15246,\n 19992,\n 20034,\n 20074,\n 20100,\n 20143,\n 20503,\n 20531,\n 20546,\n 20609,\n 20644,\n 20713,\n 20735,\n 20812,\n 20830,\n 20884,\n 23297,\n 23329,\n 23982,\n 24048,\n 24112,\n 24185,\n 24258,\n 24336,\n 24411,\n 24481,\n 24537,\n 28973,\n 29017,\n 29058,\n 29101,\n 29138,\n 29192,\n 29244,\n 29310,\n 29371,\n 31319,\n 31334,\n 32104,\n 32136,\n 32182,\n 32199,\n 32253,\n 32313,\n 33642,\n 33657,\n 33669,\n 33694,\n 33708,\n 33761,\n 34198,\n 34402,\n 34419,\n 34431,\n 37714,\n 37840,\n 37860,\n 37876\n]"}}},{"rowIdx":1336,"cells":{"zip":{"kind":"string","value":"archives/18-2-SKKU-OSS_2018-2-OSS-L5.zip"},"filename":{"kind":"string","value":"zerver/data_import/slack_message_conversion.py"},"contents":{"kind":"string","value":"import re\nfrom typing import Any, Dict, Tuple, List, Optional\n\n# stubs\nZerverFieldsT = Dict[str, Any]\nAddedUsersT = Dict[str, int]\nAddedChannelsT = Dict[str, Tuple[str, int]]\n\n# Slack link can be in the format and \nLINK_REGEX = r\"\"\"\n (<) # match '>'\n (http:\\/\\/www\\.|https:\\/\\/www\\.|http:\\/\\/|https:\\/\\/|ftp:\\/\\/)? # protocol and www\n ([a-z0-9]+([\\-\\.]{1}[a-z0-9]+)*)(\\.) # domain name\n ([a-z]{2,63}(:[0-9]{1,5})?) # domain\n (\\/[^>]*)? # path\n (\\|)?(?:\\|([^>]+))? # char after pipe (for slack links)\n (>)\n \"\"\"\n\nSLACK_MAILTO_REGEX = r\"\"\"\n <((mailto:)? # match ` # match email\n \"\"\"\n\nSLACK_USERMENTION_REGEX = r\"\"\"\n (<@) # Start with '<@'\n ([a-zA-Z0-9]+) # Here we have the Slack id\n (\\|)? # We not always have a Vertical line in mention\n ([a-zA-Z0-9]+)? # If Vertical line is present, this is short name\n (>) # ends with '>'\n \"\"\"\n# Slack doesn't have mid-word message-formatting like Zulip.\n# Hence, ~stri~ke doesn't format the word in slack, but ~~stri~~ke\n# formats the word in Zulip\nSLACK_STRIKETHROUGH_REGEX = r\"\"\"\n (^|[ -(]|[+-/]|\\*|\\_|[:-?]|\\{|\\[|\\||\\^) # Start after specified characters\n (\\~) # followed by an asterisk\n ([ -)+-}—]*)([ -}]+) # any character except asterisk\n (\\~) # followed by an asterisk\n ($|[ -']|[+-/]|[:-?]|\\*|\\_|\\}|\\)|\\]|\\||\\^) # ends with specified characters\n \"\"\"\nSLACK_ITALIC_REGEX = r\"\"\"\n (^|[ -*]|[+-/]|[:-?]|\\{|\\[|\\||\\^|~)\n (\\_)\n ([ -^`~—]*)([ -^`-~]+) # any character\n (\\_)\n ($|[ -']|[+-/]|[:-?]|\\}|\\)|\\]|\\*|\\||\\^|~)\n \"\"\"\nSLACK_BOLD_REGEX = r\"\"\"\n (^|[ -(]|[+-/]|[:-?]|\\{|\\[|\\_|\\||\\^|~)\n (\\*)\n ([ -)+-~—]*)([ -)+-~]+) # any character\n (\\*)\n ($|[ -']|[+-/]|[:-?]|\\}|\\)|\\]|\\_|\\||\\^|~)\n \"\"\"\n\ndef get_user_full_name(user: ZerverFieldsT) -> str:\n if user['deleted'] is False:\n if user['real_name'] == '':\n return user['name']\n else:\n return user['real_name']\n else:\n return user['name']\n\n# Markdown mapping\ndef convert_to_zulip_markdown(text: str, users: List[ZerverFieldsT],\n added_channels: AddedChannelsT,\n added_users: AddedUsersT) -> Tuple[str, List[int], bool]:\n mentioned_users_id = []\n text = convert_markdown_syntax(text, SLACK_BOLD_REGEX, \"**\")\n text = convert_markdown_syntax(text, SLACK_STRIKETHROUGH_REGEX, \"~~\")\n text = convert_markdown_syntax(text, SLACK_ITALIC_REGEX, \"*\")\n\n # Map Slack's mention all: '' to '@**all** '\n # Map Slack's mention all: '' to '@**all** '\n # Map Slack's mention all: '' to '@**all** '\n # No regex for this as it can be present anywhere in the sentence\n text = text.replace('', '@**all**')\n text = text.replace('', '@**all**')\n text = text.replace('', '@**all**')\n\n # Map Slack channel mention: '<#C5Z73A7RA|general>' to '#**general**'\n for cname, ids in added_channels.items():\n cid = ids[0]\n text = text.replace('<#%s|%s>' % (cid, cname), '#**' + cname + '**')\n\n tokens = text.split(' ')\n for iterator in range(len(tokens)):\n\n # Check user mentions and change mention format from\n # '<@slack_id|short_name>' to '@**full_name**'\n if (re.findall(SLACK_USERMENTION_REGEX, tokens[iterator], re.VERBOSE)):\n tokens[iterator], user_id = get_user_mentions(tokens[iterator],\n users, added_users)\n if user_id is not None:\n mentioned_users_id.append(user_id)\n\n text = ' '.join(tokens)\n\n # Check and convert link format\n text, has_link = convert_link_format(text)\n # convert `` to `mailto:foo@foo.com`\n text, has_mailto_link = convert_mailto_format(text)\n\n if has_link is True or has_mailto_link is True:\n message_has_link = True\n else:\n message_has_link = False\n\n return text, mentioned_users_id, message_has_link\n\ndef get_user_mentions(token: str, users: List[ZerverFieldsT],\n added_users: AddedUsersT) -> Tuple[str, Optional[int]]:\n slack_usermention_match = re.search(SLACK_USERMENTION_REGEX, token, re.VERBOSE)\n short_name = slack_usermention_match.group(4) # type: ignore # slack_usermention_match exists and is not None\n slack_id = slack_usermention_match.group(2) # type: ignore # slack_usermention_match exists and is not None\n for user in users:\n if (user['id'] == slack_id and user['name'] == short_name and short_name) or \\\n (user['id'] == slack_id and short_name is None):\n full_name = get_user_full_name(user)\n user_id = added_users[slack_id]\n mention = \"@**\" + full_name + \"**\"\n token = re.sub(SLACK_USERMENTION_REGEX, mention, token, flags=re.VERBOSE)\n return token, user_id\n return token, None\n\n# Map italic, bold and strikethrough markdown\ndef convert_markdown_syntax(text: str, regex: str, zulip_keyword: str) -> str:\n \"\"\"\n Returns:\n 1. For strikethrough formatting: This maps Slack's '~strike~' to Zulip's '~~strike~~'\n 2. For bold formatting: This maps Slack's '*bold*' to Zulip's '**bold**'\n 3. For italic formatting: This maps Slack's '_italic_' to Zulip's '*italic*'\n \"\"\"\n for match in re.finditer(regex, text, re.VERBOSE):\n converted_token = (match.group(1) + zulip_keyword + match.group(3)\n + match.group(4) + zulip_keyword + match.group(6))\n text = text.replace(match.group(0), converted_token)\n return text\n\ndef convert_link_format(text: str) -> Tuple[str, bool]:\n \"\"\"\n 1. Converts '' to 'https://foo.com'\n 2. Converts '' to 'https://foo.com|foo'\n \"\"\"\n has_link = False\n for match in re.finditer(LINK_REGEX, text, re.VERBOSE):\n converted_text = match.group(0).replace('>', '').replace('<', '')\n has_link = True\n text = text.replace(match.group(0), converted_text)\n return text, has_link\n\ndef convert_mailto_format(text: str) -> Tuple[str, bool]:\n \"\"\"\n 1. Converts '' to 'mailto:foo@foo.com'\n 2. Converts '' to 'mailto:foo@foo.com'\n \"\"\"\n has_link = False\n for match in re.finditer(SLACK_MAILTO_REGEX, text, re.VERBOSE):\n has_link = True\n text = text.replace(match.group(0), match.group(1))\n return text, has_link\n"},"type_annotations":{"kind":"list like","value":["ZerverFieldsT","str","List[ZerverFieldsT]","AddedChannelsT","AddedUsersT","str","List[ZerverFieldsT]","AddedUsersT","str","str","str","str","str"],"string":"[\n \"ZerverFieldsT\",\n \"str\",\n \"List[ZerverFieldsT]\",\n \"AddedChannelsT\",\n \"AddedUsersT\",\n \"str\",\n \"List[ZerverFieldsT]\",\n \"AddedUsersT\",\n \"str\",\n \"str\",\n \"str\",\n \"str\",\n \"str\"\n]"},"type_annotation_starts":{"kind":"list like","value":[3046,3315,3327,3394,3453,5295,5307,5363,6272,6284,6304,6910,7372],"string":"[\n 3046,\n 3315,\n 3327,\n 3394,\n 3453,\n 5295,\n 5307,\n 5363,\n 6272,\n 6284,\n 6304,\n 6910,\n 7372\n]"},"type_annotation_ends":{"kind":"list like","value":[3059,3318,3346,3408,3464,5298,5326,5374,6275,6287,6307,6913,7375],"string":"[\n 3059,\n 3318,\n 3346,\n 3408,\n 3464,\n 5298,\n 5326,\n 5374,\n 6275,\n 6287,\n 6307,\n 6913,\n 7375\n]"}}},{"rowIdx":1337,"cells":{"zip":{"kind":"string","value":"archives/18-2-SKKU-OSS_2018-2-OSS-L5.zip"},"filename":{"kind":"string","value":"zerver/decorator.py"},"contents":{"kind":"string","value":"\nimport django_otp\nfrom two_factor.utils import default_device\nfrom django_otp import user_has_device, _user_is_authenticated\nfrom django_otp.conf import settings as otp_settings\n\nfrom django.contrib.auth.decorators import user_passes_test as django_user_passes_test\nfrom django.utils.translation import ugettext as _\nfrom django.http import HttpResponseRedirect, HttpResponse\nfrom django.contrib.auth import REDIRECT_FIELD_NAME, login as django_login\nfrom django.views.decorators.csrf import csrf_exempt\nfrom django.http import QueryDict, HttpResponseNotAllowed, HttpRequest\nfrom django.http.multipartparser import MultiPartParser\nfrom zerver.models import Realm, UserProfile, get_client, get_user_profile_by_api_key\nfrom zerver.lib.response import json_error, json_unauthorized, json_success\nfrom django.shortcuts import resolve_url\nfrom django.utils.decorators import available_attrs\nfrom django.utils.timezone import now as timezone_now\nfrom django.conf import settings\n\nfrom zerver.lib.queue import queue_json_publish\nfrom zerver.lib.subdomains import get_subdomain, user_matches_subdomain\nfrom zerver.lib.timestamp import datetime_to_timestamp, timestamp_to_datetime\nfrom zerver.lib.utils import statsd, is_remote_server\nfrom zerver.lib.exceptions import RateLimited, JsonableError, ErrorCode, \\\n InvalidJSONError\nfrom zerver.lib.types import ViewFuncT\n\nfrom zerver.lib.rate_limiter import incr_ratelimit, is_ratelimited, \\\n api_calls_left, RateLimitedUser\nfrom zerver.lib.request import REQ, has_request_variables, JsonableError, RequestVariableMissingError\nfrom django.core.handlers import base\n\nfrom functools import wraps\nimport base64\nimport datetime\nimport ujson\nimport logging\nfrom io import BytesIO\nimport urllib\n\nfrom typing import Union, Any, Callable, Sequence, Dict, Optional, TypeVar, Tuple, cast\nfrom zerver.lib.logging_util import log_to_file\n\n# This is a hack to ensure that RemoteZulipServer always exists even\n# if Zilencer isn't enabled.\nif settings.ZILENCER_ENABLED:\n from zilencer.models import get_remote_server_by_uuid, RemoteZulipServer\nelse: # nocoverage # Hack here basically to make impossible code paths compile\n from mock import Mock\n get_remote_server_by_uuid = Mock()\n RemoteZulipServer = Mock() # type: ignore # https://github.com/JukkaL/mypy/issues/1188\n\nReturnT = TypeVar('ReturnT')\n\nwebhook_logger = logging.getLogger(\"zulip.zerver.webhooks\")\nlog_to_file(webhook_logger, settings.API_KEY_ONLY_WEBHOOK_LOG_PATH)\n\nclass _RespondAsynchronously:\n pass\n\n# Return RespondAsynchronously from an @asynchronous view if the\n# response will be provided later by calling handler.zulip_finish(),\n# or has already been provided this way. We use this for longpolling\n# mode.\nRespondAsynchronously = _RespondAsynchronously()\n\nAsyncWrapperT = Callable[..., Union[HttpResponse, _RespondAsynchronously]]\ndef asynchronous(method: Callable[..., Union[HttpResponse, _RespondAsynchronously]]) -> AsyncWrapperT:\n # TODO: this should be the correct annotation when mypy gets fixed: type:\n # (Callable[[HttpRequest, base.BaseHandler, Sequence[Any], Dict[str, Any]],\n # Union[HttpResponse, _RespondAsynchronously]]) ->\n # Callable[[HttpRequest, Sequence[Any], Dict[str, Any]], Union[HttpResponse, _RespondAsynchronously]]\n # TODO: see https://github.com/python/mypy/issues/1655\n @wraps(method)\n def wrapper(request: HttpRequest, *args: Any,\n **kwargs: Any) -> Union[HttpResponse, _RespondAsynchronously]:\n return method(request, handler=request._tornado_handler, *args, **kwargs)\n if getattr(method, 'csrf_exempt', False): # nocoverage # Our one @asynchronous route requires CSRF\n wrapper.csrf_exempt = True # type: ignore # https://github.com/JukkaL/mypy/issues/1170\n return wrapper\n\ndef cachify(method: Callable[..., ReturnT]) -> Callable[..., ReturnT]:\n dct = {} # type: Dict[Tuple[Any, ...], ReturnT]\n\n def cache_wrapper(*args: Any) -> ReturnT:\n tup = tuple(args)\n if tup in dct:\n return dct[tup]\n result = method(*args)\n dct[tup] = result\n return result\n return cache_wrapper\n\ndef update_user_activity(request: HttpRequest, user_profile: UserProfile,\n query: Optional[str]) -> None:\n # update_active_status also pushes to rabbitmq, and it seems\n # redundant to log that here as well.\n if request.META[\"PATH_INFO\"] == '/json/users/me/presence':\n return\n\n if query is not None:\n pass\n elif hasattr(request, '_query'):\n query = request._query\n else:\n query = request.META['PATH_INFO']\n\n event = {'query': query,\n 'user_profile_id': user_profile.id,\n 'time': datetime_to_timestamp(timezone_now()),\n 'client': request.client.name}\n queue_json_publish(\"user_activity\", event, lambda event: None)\n\n# Based on django.views.decorators.http.require_http_methods\ndef require_post(func: ViewFuncT) -> ViewFuncT:\n @wraps(func)\n def wrapper(request: HttpRequest, *args: Any, **kwargs: Any) -> HttpResponse:\n if (request.method != \"POST\" and\n not (request.method == \"SOCKET\" and\n request.META['zulip.emulated_method'] == \"POST\")):\n if request.method == \"SOCKET\": # nocoverage # zulip.emulated_method is always POST\n err_method = \"SOCKET/%s\" % (request.META['zulip.emulated_method'],)\n else:\n err_method = request.method\n logging.warning('Method Not Allowed (%s): %s', err_method, request.path,\n extra={'status_code': 405, 'request': request})\n return HttpResponseNotAllowed([\"POST\"])\n return func(request, *args, **kwargs)\n return wrapper # type: ignore # https://github.com/python/mypy/issues/1927\n\ndef require_realm_admin(func: ViewFuncT) -> ViewFuncT:\n @wraps(func)\n def wrapper(request: HttpRequest, user_profile: UserProfile, *args: Any, **kwargs: Any) -> HttpResponse:\n if not user_profile.is_realm_admin:\n raise JsonableError(_(\"Must be an organization administrator\"))\n return func(request, user_profile, *args, **kwargs)\n return wrapper # type: ignore # https://github.com/python/mypy/issues/1927\n\ndef require_billing_access(func: ViewFuncT) -> ViewFuncT:\n @wraps(func)\n def wrapper(request: HttpRequest, user_profile: UserProfile, *args: Any, **kwargs: Any) -> HttpResponse:\n if not user_profile.is_realm_admin and not user_profile.is_billing_admin:\n raise JsonableError(_(\"Must be a billing administrator or an organization administrator\"))\n return func(request, user_profile, *args, **kwargs)\n return wrapper # type: ignore # https://github.com/python/mypy/issues/1927\n\nfrom zerver.lib.user_agent import parse_user_agent\n\ndef get_client_name(request: HttpRequest, is_browser_view: bool) -> str:\n # If the API request specified a client in the request content,\n # that has priority. Otherwise, extract the client from the\n # User-Agent.\n if 'client' in request.GET:\n return request.GET['client']\n if 'client' in request.POST:\n return request.POST['client']\n if \"HTTP_USER_AGENT\" in request.META:\n user_agent = parse_user_agent(request.META[\"HTTP_USER_AGENT\"]) # type: Optional[Dict[str, str]]\n else:\n user_agent = None\n if user_agent is not None:\n # We could check for a browser's name being \"Mozilla\", but\n # e.g. Opera and MobileSafari don't set that, and it seems\n # more robust to just key off whether it was a browser view\n if is_browser_view and not user_agent[\"name\"].startswith(\"Zulip\"):\n # Avoid changing the client string for browsers, but let\n # the Zulip desktop and mobile apps be themselves.\n return \"website\"\n else:\n return user_agent[\"name\"]\n else:\n # In the future, we will require setting USER_AGENT, but for\n # now we just want to tag these requests so we can review them\n # in logs and figure out the extent of the problem\n if is_browser_view:\n return \"website\"\n else:\n return \"Unspecified\"\n\ndef process_client(request: HttpRequest, user_profile: UserProfile,\n *, is_browser_view: bool=False,\n client_name: Optional[str]=None,\n remote_server_request: bool=False,\n query: Optional[str]=None) -> None:\n if client_name is None:\n client_name = get_client_name(request, is_browser_view)\n\n request.client = get_client(client_name)\n if not remote_server_request:\n update_user_activity(request, user_profile, query)\n\nclass InvalidZulipServerError(JsonableError):\n code = ErrorCode.INVALID_ZULIP_SERVER\n data_fields = ['role']\n\n def __init__(self, role: str) -> None:\n self.role = role # type: str\n\n @staticmethod\n def msg_format() -> str:\n return \"Zulip server auth failure: {role} is not registered\"\n\nclass InvalidZulipServerKeyError(InvalidZulipServerError):\n @staticmethod\n def msg_format() -> str:\n return \"Zulip server auth failure: key does not match role {role}\"\n\ndef validate_api_key(request: HttpRequest, role: Optional[str],\n api_key: str, is_webhook: bool=False,\n client_name: Optional[str]=None) -> Union[UserProfile, RemoteZulipServer]:\n # Remove whitespace to protect users from trivial errors.\n api_key = api_key.strip()\n if role is not None:\n role = role.strip()\n\n if settings.ZILENCER_ENABLED and role is not None and is_remote_server(role):\n try:\n remote_server = get_remote_server_by_uuid(role)\n except RemoteZulipServer.DoesNotExist:\n raise InvalidZulipServerError(role)\n if api_key != remote_server.api_key:\n raise InvalidZulipServerKeyError(role)\n\n if get_subdomain(request) != Realm.SUBDOMAIN_FOR_ROOT_DOMAIN:\n raise JsonableError(_(\"Invalid subdomain for push notifications bouncer\"))\n request.user = remote_server\n request._email = \"zulip-server:\" + role\n remote_server.rate_limits = \"\"\n process_client(request, remote_server, remote_server_request=True)\n return remote_server\n\n user_profile = access_user_by_api_key(request, api_key, email=role)\n if user_profile.is_incoming_webhook and not is_webhook:\n raise JsonableError(_(\"This API is not available to incoming webhook bots.\"))\n\n request.user = user_profile\n request._email = user_profile.email\n process_client(request, user_profile, client_name=client_name)\n\n return user_profile\n\ndef validate_account_and_subdomain(request: HttpRequest, user_profile: UserProfile) -> None:\n if user_profile.realm.deactivated:\n raise JsonableError(_(\"This organization has been deactivated\"))\n if not user_profile.is_active:\n raise JsonableError(_(\"Account is deactivated\"))\n\n # Either the subdomain matches, or processing a websockets message\n # in the message_sender worker (which will have already had the\n # subdomain validated), or we're accessing Tornado from and to\n # localhost (aka spoofing a request as the user).\n if (not user_matches_subdomain(get_subdomain(request), user_profile) and\n not (request.method == \"SOCKET\" and\n request.META['SERVER_NAME'] == \"127.0.0.1\") and\n not (settings.RUNNING_INSIDE_TORNADO and\n request.META[\"SERVER_NAME\"] == \"127.0.0.1\" and\n request.META[\"REMOTE_ADDR\"] == \"127.0.0.1\")):\n logging.warning(\"User %s (%s) attempted to access API on wrong subdomain (%s)\" % (\n user_profile.email, user_profile.realm.subdomain, get_subdomain(request)))\n raise JsonableError(_(\"Account is not associated with this subdomain\"))\n\ndef access_user_by_api_key(request: HttpRequest, api_key: str, email: Optional[str]=None) -> UserProfile:\n try:\n user_profile = get_user_profile_by_api_key(api_key)\n except UserProfile.DoesNotExist:\n raise JsonableError(_(\"Invalid API key\"))\n if email is not None and email.lower() != user_profile.email.lower():\n # This covers the case that the API key is correct, but for a\n # different user. We may end up wanting to relaxing this\n # constraint or give a different error message in the future.\n raise JsonableError(_(\"Invalid API key\"))\n\n validate_account_and_subdomain(request, user_profile)\n\n return user_profile\n\ndef log_exception_to_webhook_logger(request: HttpRequest, user_profile: UserProfile,\n request_body: Optional[str]=None) -> None:\n if request_body is not None:\n payload = request_body\n else:\n payload = request.body\n\n if request.content_type == 'application/json':\n try:\n payload = ujson.dumps(ujson.loads(payload), indent=4)\n except ValueError:\n request_body = str(payload)\n else:\n request_body = str(payload)\n\n custom_header_template = \"{header}: {value}\\n\"\n\n header_text = \"\"\n for header in request.META.keys():\n if header.lower().startswith('http_x'):\n header_text += custom_header_template.format(\n header=header, value=request.META[header])\n\n header_message = header_text if header_text else None\n\n message = \"\"\"\nuser: {email} ({realm})\nclient: {client_name}\nURL: {path_info}\ncontent_type: {content_type}\ncustom_http_headers:\n{custom_headers}\nbody:\n\n{body}\n \"\"\".format(\n email=user_profile.email,\n realm=user_profile.realm.string_id,\n client_name=request.client.name,\n body=payload,\n path_info=request.META.get('PATH_INFO', None),\n content_type=request.content_type,\n custom_headers=header_message,\n )\n message = message.strip(' ')\n webhook_logger.exception(message)\n\ndef full_webhook_client_name(raw_client_name: Optional[str]=None) -> Optional[str]:\n if raw_client_name is None:\n return None\n return \"Zulip{}Webhook\".format(raw_client_name)\n\n# Use this for webhook views that don't get an email passed in.\ndef api_key_only_webhook_view(\n webhook_client_name: str,\n notify_bot_owner_on_invalid_json: Optional[bool]=True\n) -> Callable[[ViewFuncT], ViewFuncT]:\n # TODO The typing here could be improved by using the Extended Callable types:\n # https://mypy.readthedocs.io/en/latest/kinds_of_types.html#extended-callable-types\n\n def _wrapped_view_func(view_func: ViewFuncT) -> ViewFuncT:\n @csrf_exempt\n @has_request_variables\n @wraps(view_func)\n def _wrapped_func_arguments(request: HttpRequest, api_key: str=REQ(),\n *args: Any, **kwargs: Any) -> HttpResponse:\n user_profile = validate_api_key(request, None, api_key, is_webhook=True,\n client_name=full_webhook_client_name(webhook_client_name))\n\n if settings.RATE_LIMITING:\n rate_limit_user(request, user_profile, domain='all')\n try:\n return view_func(request, user_profile, *args, **kwargs)\n except InvalidJSONError as e:\n if not notify_bot_owner_on_invalid_json:\n raise e\n # NOTE: importing this at the top of file leads to a\n # cyclic import; correct fix is probably to move\n # notify_bot_owner_about_invalid_json to a smaller file.\n from zerver.lib.webhooks.common import notify_bot_owner_about_invalid_json\n notify_bot_owner_about_invalid_json(user_profile, webhook_client_name)\n except Exception as err:\n log_exception_to_webhook_logger(request, user_profile)\n raise err\n\n return _wrapped_func_arguments\n return _wrapped_view_func\n\n# From Django 1.8, modified to leave off ?next=/\ndef redirect_to_login(next: str, login_url: Optional[str]=None,\n redirect_field_name: str=REDIRECT_FIELD_NAME) -> HttpResponseRedirect:\n \"\"\"\n Redirects the user to the login page, passing the given 'next' page\n \"\"\"\n resolved_url = resolve_url(login_url or settings.LOGIN_URL)\n\n login_url_parts = list(urllib.parse.urlparse(resolved_url))\n if redirect_field_name:\n querystring = QueryDict(login_url_parts[4], mutable=True)\n querystring[redirect_field_name] = next\n # Don't add ?next=/, to keep our URLs clean\n if next != '/':\n login_url_parts[4] = querystring.urlencode(safe='/')\n\n return HttpResponseRedirect(urllib.parse.urlunparse(login_url_parts))\n\n# From Django 1.8\ndef user_passes_test(test_func: Callable[[HttpResponse], bool], login_url: Optional[str]=None,\n redirect_field_name: str=REDIRECT_FIELD_NAME) -> Callable[[ViewFuncT], ViewFuncT]:\n \"\"\"\n Decorator for views that checks that the user passes the given test,\n redirecting to the log-in page if necessary. The test should be a callable\n that takes the user object and returns True if the user passes.\n \"\"\"\n def decorator(view_func: ViewFuncT) -> ViewFuncT:\n @wraps(view_func, assigned=available_attrs(view_func))\n def _wrapped_view(request: HttpRequest, *args: Any, **kwargs: Any) -> HttpResponse:\n if test_func(request):\n return view_func(request, *args, **kwargs)\n path = request.build_absolute_uri()\n resolved_login_url = resolve_url(login_url or settings.LOGIN_URL)\n # If the login url is the same scheme and net location then just\n # use the path as the \"next\" url.\n login_scheme, login_netloc = urllib.parse.urlparse(resolved_login_url)[:2]\n current_scheme, current_netloc = urllib.parse.urlparse(path)[:2]\n if ((not login_scheme or login_scheme == current_scheme) and\n (not login_netloc or login_netloc == current_netloc)):\n path = request.get_full_path()\n return redirect_to_login(\n path, resolved_login_url, redirect_field_name)\n return _wrapped_view # type: ignore # https://github.com/python/mypy/issues/1927\n return decorator\n\ndef logged_in_and_active(request: HttpRequest) -> bool:\n if not request.user.is_authenticated:\n return False\n if not request.user.is_active:\n return False\n if request.user.realm.deactivated:\n return False\n return user_matches_subdomain(get_subdomain(request), request.user)\n\ndef do_two_factor_login(request: HttpRequest, user_profile: UserProfile) -> None:\n device = default_device(user_profile)\n if device:\n django_otp.login(request, device)\n\ndef do_login(request: HttpRequest, user_profile: UserProfile) -> None:\n \"\"\"Creates a session, logging in the user, using the Django method,\n and also adds helpful data needed by our server logs.\n \"\"\"\n django_login(request, user_profile)\n request._email = user_profile.email\n process_client(request, user_profile, is_browser_view=True)\n if settings.TWO_FACTOR_AUTHENTICATION_ENABLED:\n # Login with two factor authentication as well.\n do_two_factor_login(request, user_profile)\n\ndef log_view_func(view_func: ViewFuncT) -> ViewFuncT:\n @wraps(view_func)\n def _wrapped_view_func(request: HttpRequest, *args: Any, **kwargs: Any) -> HttpResponse:\n request._query = view_func.__name__\n return view_func(request, *args, **kwargs)\n return _wrapped_view_func # type: ignore # https://github.com/python/mypy/issues/1927\n\ndef add_logging_data(view_func: ViewFuncT) -> ViewFuncT:\n @wraps(view_func)\n def _wrapped_view_func(request: HttpRequest, *args: Any, **kwargs: Any) -> HttpResponse:\n request._email = request.user.email\n process_client(request, request.user, is_browser_view=True,\n query=view_func.__name__)\n return rate_limit()(view_func)(request, *args, **kwargs)\n return _wrapped_view_func # type: ignore # https://github.com/python/mypy/issues/1927\n\ndef human_users_only(view_func: ViewFuncT) -> ViewFuncT:\n @wraps(view_func)\n def _wrapped_view_func(request: HttpRequest, *args: Any, **kwargs: Any) -> HttpResponse:\n if request.user.is_bot:\n return json_error(_(\"This endpoint does not accept bot requests.\"))\n return view_func(request, *args, **kwargs)\n return _wrapped_view_func # type: ignore # https://github.com/python/mypy/issues/1927\n\n# Based on Django 1.8's @login_required\ndef zulip_login_required(\n function: Optional[ViewFuncT]=None,\n redirect_field_name: str=REDIRECT_FIELD_NAME,\n login_url: str=settings.HOME_NOT_LOGGED_IN,\n) -> Union[Callable[[ViewFuncT], ViewFuncT], ViewFuncT]:\n actual_decorator = user_passes_test(\n logged_in_and_active,\n login_url=login_url,\n redirect_field_name=redirect_field_name\n )\n\n otp_required_decorator = zulip_otp_required(\n redirect_field_name=redirect_field_name,\n login_url=login_url\n )\n\n if function:\n # Add necessary logging data via add_logging_data\n return actual_decorator(zulip_otp_required(add_logging_data(function)))\n return actual_decorator(otp_required_decorator) # nocoverage # We don't use this without a function\n\ndef require_server_admin(view_func: ViewFuncT) -> ViewFuncT:\n @zulip_login_required\n @wraps(view_func)\n def _wrapped_view_func(request: HttpRequest, *args: Any, **kwargs: Any) -> HttpResponse:\n if not request.user.is_staff:\n return HttpResponseRedirect(settings.HOME_NOT_LOGGED_IN)\n\n return add_logging_data(view_func)(request, *args, **kwargs)\n return _wrapped_view_func # type: ignore # https://github.com/python/mypy/issues/1927\n\ndef require_server_admin_api(view_func: ViewFuncT) -> ViewFuncT:\n @zulip_login_required\n @wraps(view_func)\n def _wrapped_view_func(request: HttpRequest, user_profile: UserProfile, *args: Any,\n **kwargs: Any) -> HttpResponse:\n if not user_profile.is_staff:\n raise JsonableError(_(\"Must be an server administrator\"))\n return view_func(request, user_profile, *args, **kwargs)\n return _wrapped_view_func # type: ignore # https://github.com/python/mypy/issues/1927\n\ndef require_non_guest_user(view_func: ViewFuncT) -> ViewFuncT:\n @wraps(view_func)\n def _wrapped_view_func(request: HttpRequest, user_profile: UserProfile, *args: Any,\n **kwargs: Any) -> HttpResponse:\n if user_profile.is_guest:\n raise JsonableError(_(\"Not allowed for guest users\"))\n return view_func(request, user_profile, *args, **kwargs)\n return _wrapped_view_func # type: ignore # https://github.com/python/mypy/issues/1927\n\ndef require_non_guest_human_user(view_func: ViewFuncT) -> ViewFuncT:\n @wraps(view_func)\n def _wrapped_view_func(request: HttpRequest, user_profile: UserProfile, *args: Any,\n **kwargs: Any) -> HttpResponse:\n if user_profile.is_guest:\n raise JsonableError(_(\"Not allowed for guest users\"))\n if user_profile.is_bot:\n return json_error(_(\"This endpoint does not accept bot requests.\"))\n return view_func(request, user_profile, *args, **kwargs)\n return _wrapped_view_func # type: ignore # https://github.com/python/mypy/issues/1927\n\n# authenticated_api_view will add the authenticated user's\n# user_profile to the view function's arguments list, since we have to\n# look it up anyway. It is deprecated in favor on the REST API\n# versions.\ndef authenticated_api_view(is_webhook: bool=False) -> Callable[[ViewFuncT], ViewFuncT]:\n def _wrapped_view_func(view_func: ViewFuncT) -> ViewFuncT:\n @csrf_exempt\n @require_post\n @has_request_variables\n @wraps(view_func)\n def _wrapped_func_arguments(request: HttpRequest, email: str=REQ(),\n api_key: Optional[str]=REQ(default=None),\n api_key_legacy: Optional[str]=REQ('api-key', default=None),\n *args: Any, **kwargs: Any) -> HttpResponse:\n if api_key is None:\n api_key = api_key_legacy\n if api_key is None: # nocoverage # We're removing this whole decorator soon.\n raise RequestVariableMissingError(\"api_key\")\n user_profile = validate_api_key(request, email, api_key, is_webhook)\n # Apply rate limiting\n limited_func = rate_limit()(view_func)\n try:\n return limited_func(request, user_profile, *args, **kwargs)\n except Exception as err:\n if is_webhook:\n # In this case, request_body is passed explicitly because the body\n # of the request has already been read in has_request_variables and\n # can't be read/accessed more than once, so we just access it from\n # the request.POST QueryDict.\n log_exception_to_webhook_logger(request, user_profile,\n request_body=request.POST.get('payload'))\n raise err\n\n return _wrapped_func_arguments\n return _wrapped_view_func\n\n# This API endpoint is used only for the mobile apps. It is part of a\n# workaround for the fact that React Native doesn't support setting\n# HTTP basic authentication headers.\ndef authenticated_uploads_api_view() -> Callable[[ViewFuncT], ViewFuncT]:\n def _wrapped_view_func(view_func: ViewFuncT) -> ViewFuncT:\n @csrf_exempt\n @has_request_variables\n @wraps(view_func)\n def _wrapped_func_arguments(request: HttpRequest,\n api_key: str=REQ(),\n *args: Any, **kwargs: Any) -> HttpResponse:\n user_profile = validate_api_key(request, None, api_key, False)\n limited_func = rate_limit()(view_func)\n return limited_func(request, user_profile, *args, **kwargs)\n return _wrapped_func_arguments\n return _wrapped_view_func\n\n# A more REST-y authentication decorator, using, in particular, HTTP Basic\n# authentication.\n#\n# If webhook_client_name is specific, the request is a webhook view\n# with that string as the basis for the client string.\ndef authenticated_rest_api_view(*, webhook_client_name: Optional[str]=None,\n is_webhook: bool=False) -> Callable[[ViewFuncT], ViewFuncT]:\n def _wrapped_view_func(view_func: ViewFuncT) -> ViewFuncT:\n @csrf_exempt\n @wraps(view_func)\n def _wrapped_func_arguments(request: HttpRequest, *args: Any, **kwargs: Any) -> HttpResponse:\n # First try block attempts to get the credentials we need to do authentication\n try:\n # Grab the base64-encoded authentication string, decode it, and split it into\n # the email and API key\n auth_type, credentials = request.META['HTTP_AUTHORIZATION'].split()\n # case insensitive per RFC 1945\n if auth_type.lower() != \"basic\":\n return json_error(_(\"This endpoint requires HTTP basic authentication.\"))\n role, api_key = base64.b64decode(credentials).decode('utf-8').split(\":\")\n except ValueError:\n return json_unauthorized(_(\"Invalid authorization header for basic auth\"))\n except KeyError:\n return json_unauthorized(_(\"Missing authorization header for basic auth\"))\n\n # Now we try to do authentication or die\n try:\n # profile is a Union[UserProfile, RemoteZulipServer]\n profile = validate_api_key(request, role, api_key,\n is_webhook=is_webhook or webhook_client_name is not None,\n client_name=full_webhook_client_name(webhook_client_name))\n except JsonableError as e:\n return json_unauthorized(e.msg)\n try:\n # Apply rate limiting\n return rate_limit()(view_func)(request, profile, *args, **kwargs)\n except Exception as err:\n if is_webhook or webhook_client_name is not None:\n request_body = request.POST.get('payload')\n if request_body is not None:\n log_exception_to_webhook_logger(request, profile,\n request_body=request_body)\n raise err\n return _wrapped_func_arguments\n return _wrapped_view_func\n\ndef process_as_post(view_func: ViewFuncT) -> ViewFuncT:\n @wraps(view_func)\n def _wrapped_view_func(request: HttpRequest, *args: Any, **kwargs: Any) -> HttpResponse:\n # Adapted from django/http/__init__.py.\n # So by default Django doesn't populate request.POST for anything besides\n # POST requests. We want this dict populated for PATCH/PUT, so we have to\n # do it ourselves.\n #\n # This will not be required in the future, a bug will be filed against\n # Django upstream.\n\n if not request.POST:\n # Only take action if POST is empty.\n if request.META.get('CONTENT_TYPE', '').startswith('multipart'):\n # Note that request._files is just the private attribute that backs the\n # FILES property, so we are essentially setting request.FILES here. (In\n # Django 1.5 FILES was still a read-only property.)\n request.POST, request._files = MultiPartParser(\n request.META,\n BytesIO(request.body),\n request.upload_handlers,\n request.encoding\n ).parse()\n else:\n request.POST = QueryDict(request.body, encoding=request.encoding)\n\n return view_func(request, *args, **kwargs)\n\n return _wrapped_view_func # type: ignore # https://github.com/python/mypy/issues/1927\n\ndef authenticate_log_and_execute_json(request: HttpRequest,\n view_func: ViewFuncT,\n *args: Any, **kwargs: Any) -> HttpResponse:\n if not request.user.is_authenticated:\n return json_error(_(\"Not logged in\"), status=401)\n user_profile = request.user\n validate_account_and_subdomain(request, user_profile)\n\n if user_profile.is_incoming_webhook:\n raise JsonableError(_(\"Webhook bots can only access webhooks\"))\n\n process_client(request, user_profile, is_browser_view=True,\n query=view_func.__name__)\n request._email = user_profile.email\n return rate_limit()(view_func)(request, user_profile, *args, **kwargs)\n\n# Checks if the request is a POST request and that the user is logged\n# in. If not, return an error (the @login_required behavior of\n# redirecting to a login page doesn't make sense for json views)\ndef authenticated_json_post_view(view_func: ViewFuncT) -> ViewFuncT:\n @require_post\n @has_request_variables\n @wraps(view_func)\n def _wrapped_view_func(request: HttpRequest,\n *args: Any, **kwargs: Any) -> HttpResponse:\n return authenticate_log_and_execute_json(request, view_func, *args, **kwargs)\n return _wrapped_view_func # type: ignore # https://github.com/python/mypy/issues/1927\n\ndef authenticated_json_view(view_func: ViewFuncT) -> ViewFuncT:\n @wraps(view_func)\n def _wrapped_view_func(request: HttpRequest,\n *args: Any, **kwargs: Any) -> HttpResponse:\n return authenticate_log_and_execute_json(request, view_func, *args, **kwargs)\n return _wrapped_view_func # type: ignore # https://github.com/python/mypy/issues/1927\n\ndef is_local_addr(addr: str) -> bool:\n return addr in ('127.0.0.1', '::1')\n\n# These views are used by the main Django server to notify the Tornado server\n# of events. We protect them from the outside world by checking a shared\n# secret, and also the originating IP (for now).\ndef authenticate_notify(request: HttpRequest) -> bool:\n return (is_local_addr(request.META['REMOTE_ADDR']) and\n request.POST.get('secret') == settings.SHARED_SECRET)\n\ndef client_is_exempt_from_rate_limiting(request: HttpRequest) -> bool:\n\n # Don't rate limit requests from Django that come from our own servers,\n # and don't rate-limit dev instances\n return ((request.client and request.client.name.lower() == 'internal') and\n (is_local_addr(request.META['REMOTE_ADDR']) or\n settings.DEBUG_RATE_LIMITING))\n\ndef internal_notify_view(is_tornado_view: bool) -> Callable[[ViewFuncT], ViewFuncT]:\n # The typing here could be improved by using the Extended Callable types:\n # https://mypy.readthedocs.io/en/latest/kinds_of_types.html#extended-callable-types\n \"\"\"Used for situations where something running on the Zulip server\n needs to make a request to the (other) Django/Tornado processes running on\n the server.\"\"\"\n def _wrapped_view_func(view_func: ViewFuncT) -> ViewFuncT:\n @csrf_exempt\n @require_post\n @wraps(view_func)\n def _wrapped_func_arguments(request: HttpRequest, *args: Any, **kwargs: Any) -> HttpResponse:\n if not authenticate_notify(request):\n return json_error(_('Access denied'), status=403)\n is_tornado_request = hasattr(request, '_tornado_handler')\n # These next 2 are not security checks; they are internal\n # assertions to help us find bugs.\n if is_tornado_view and not is_tornado_request:\n raise RuntimeError('Tornado notify view called with no Tornado handler')\n if not is_tornado_view and is_tornado_request:\n raise RuntimeError('Django notify view called with Tornado handler')\n request._email = \"internal\"\n return view_func(request, *args, **kwargs)\n return _wrapped_func_arguments\n return _wrapped_view_func\n\n# Converter functions for use with has_request_variables\ndef to_non_negative_int(s: str) -> int:\n x = int(s)\n if x < 0:\n raise ValueError(\"argument is negative\")\n return x\n\n\ndef to_not_negative_int_or_none(s: str) -> Optional[int]:\n if s:\n return to_non_negative_int(s)\n return None\n\n\ndef to_utc_datetime(timestamp: str) -> datetime.datetime:\n return timestamp_to_datetime(float(timestamp))\n\ndef statsd_increment(counter: str, val: int=1,\n ) -> Callable[[Callable[..., ReturnT]], Callable[..., ReturnT]]:\n \"\"\"Increments a statsd counter on completion of the\n decorated function.\n\n Pass the name of the counter to this decorator-returning function.\"\"\"\n def wrapper(func: Callable[..., ReturnT]) -> Callable[..., ReturnT]:\n @wraps(func)\n def wrapped_func(*args: Any, **kwargs: Any) -> ReturnT:\n ret = func(*args, **kwargs)\n statsd.incr(counter, val)\n return ret\n return wrapped_func\n return wrapper\n\ndef rate_limit_user(request: HttpRequest, user: UserProfile, domain: str) -> None:\n \"\"\"Returns whether or not a user was rate limited. Will raise a RateLimited exception\n if the user has been rate limited, otherwise returns and modifies request to contain\n the rate limit information\"\"\"\n\n entity = RateLimitedUser(user, domain=domain)\n ratelimited, time = is_ratelimited(entity)\n request._ratelimit_applied_limits = True\n request._ratelimit_secs_to_freedom = time\n request._ratelimit_over_limit = ratelimited\n # Abort this request if the user is over their rate limits\n if ratelimited:\n statsd.incr(\"ratelimiter.limited.%s.%s\" % (type(user), user.id))\n raise RateLimited()\n\n incr_ratelimit(entity)\n calls_remaining, time_reset = api_calls_left(entity)\n\n request._ratelimit_remaining = calls_remaining\n request._ratelimit_secs_to_freedom = time_reset\n\ndef rate_limit(domain: str='all') -> Callable[[ViewFuncT], ViewFuncT]:\n \"\"\"Rate-limits a view. Takes an optional 'domain' param if you wish to\n rate limit different types of API calls independently.\n\n Returns a decorator\"\"\"\n def wrapper(func: ViewFuncT) -> ViewFuncT:\n @wraps(func)\n def wrapped_func(request: HttpRequest, *args: Any, **kwargs: Any) -> HttpResponse:\n\n # It is really tempting to not even wrap our original function\n # when settings.RATE_LIMITING is False, but it would make\n # for awkward unit testing in some situations.\n if not settings.RATE_LIMITING:\n return func(request, *args, **kwargs)\n\n if client_is_exempt_from_rate_limiting(request):\n return func(request, *args, **kwargs)\n\n try:\n user = request.user\n except Exception: # nocoverage # See comments below\n # TODO: This logic is not tested, and I'm not sure we are\n # doing the right thing here.\n user = None\n\n if not user: # nocoverage # See comments below\n logging.error(\"Requested rate-limiting on %s but user is not authenticated!\" %\n func.__name__)\n return func(request, *args, **kwargs)\n\n # Rate-limiting data is stored in redis\n # We also only support rate-limiting authenticated\n # views right now.\n # TODO(leo) - implement per-IP non-authed rate limiting\n rate_limit_user(request, user, domain)\n\n return func(request, *args, **kwargs)\n return wrapped_func # type: ignore # https://github.com/python/mypy/issues/1927\n return wrapper\n\ndef return_success_on_head_request(view_func: ViewFuncT) -> ViewFuncT:\n @wraps(view_func)\n def _wrapped_view_func(request: HttpRequest, *args: Any, **kwargs: Any) -> HttpResponse:\n if request.method == 'HEAD':\n return json_success()\n return view_func(request, *args, **kwargs)\n return _wrapped_view_func # type: ignore # https://github.com/python/mypy/issues/1927\n\ndef zulip_otp_required(view: Any=None,\n redirect_field_name: str='next',\n login_url: str=settings.HOME_NOT_LOGGED_IN,\n ) -> Callable[..., HttpResponse]:\n \"\"\"\n The reason we need to create this function is that the stock\n otp_required decorator doesn't play well with tests. We cannot\n enable/disable if_configured parameter during tests since the decorator\n retains its value due to closure.\n\n Similar to :func:`~django.contrib.auth.decorators.login_required`, but\n requires the user to be :term:`verified`. By default, this redirects users\n to :setting:`OTP_LOGIN_URL`.\n \"\"\"\n\n def test(user: UserProfile) -> bool:\n \"\"\"\n :if_configured: If ``True``, an authenticated user with no confirmed\n OTP devices will be allowed. Default is ``False``. If ``False``,\n 2FA will not do any authentication.\n \"\"\"\n if_configured = settings.TWO_FACTOR_AUTHENTICATION_ENABLED\n if not if_configured:\n return True\n\n return user.is_verified() or (_user_is_authenticated(user)\n and not user_has_device(user))\n\n decorator = django_user_passes_test(test,\n login_url=login_url,\n redirect_field_name=redirect_field_name)\n\n return decorator if (view is None) else decorator(view)\n"},"type_annotations":{"kind":"list like","value":["Callable[..., Union[HttpResponse, _RespondAsynchronously]]","HttpRequest","Any","Any","Callable[..., ReturnT]","Any","HttpRequest","UserProfile","Optional[str]","ViewFuncT","HttpRequest","Any","Any","ViewFuncT","HttpRequest","UserProfile","Any","Any","ViewFuncT","HttpRequest","UserProfile","Any","Any","HttpRequest","bool","HttpRequest","UserProfile","str","HttpRequest","Optional[str]","str","HttpRequest","UserProfile","HttpRequest","str","HttpRequest","UserProfile","str","ViewFuncT","HttpRequest","Any","Any","str","Callable[[HttpResponse], bool]","ViewFuncT","HttpRequest","Any","Any","HttpRequest","HttpRequest","UserProfile","HttpRequest","UserProfile","ViewFuncT","HttpRequest","Any","Any","ViewFuncT","HttpRequest","Any","Any","ViewFuncT","HttpRequest","Any","Any","ViewFuncT","HttpRequest","Any","Any","ViewFuncT","HttpRequest","UserProfile","Any","Any","ViewFuncT","HttpRequest","UserProfile","Any","Any","ViewFuncT","HttpRequest","UserProfile","Any","Any","ViewFuncT","HttpRequest","Any","Any","ViewFuncT","HttpRequest","Any","Any","ViewFuncT","HttpRequest","Any","Any","ViewFuncT","HttpRequest","Any","Any","HttpRequest","ViewFuncT","Any","Any","ViewFuncT","HttpRequest","Any","Any","ViewFuncT","HttpRequest","Any","Any","str","HttpRequest","HttpRequest","bool","ViewFuncT","HttpRequest","Any","Any","str","str","str","str","Callable[..., ReturnT]","Any","Any","HttpRequest","UserProfile","str","ViewFuncT","HttpRequest","Any","Any","ViewFuncT","HttpRequest","Any","Any","UserProfile"],"string":"[\n \"Callable[..., Union[HttpResponse, _RespondAsynchronously]]\",\n \"HttpRequest\",\n \"Any\",\n \"Any\",\n \"Callable[..., ReturnT]\",\n \"Any\",\n \"HttpRequest\",\n \"UserProfile\",\n \"Optional[str]\",\n \"ViewFuncT\",\n \"HttpRequest\",\n \"Any\",\n \"Any\",\n \"ViewFuncT\",\n \"HttpRequest\",\n \"UserProfile\",\n \"Any\",\n \"Any\",\n \"ViewFuncT\",\n \"HttpRequest\",\n \"UserProfile\",\n \"Any\",\n \"Any\",\n \"HttpRequest\",\n \"bool\",\n \"HttpRequest\",\n \"UserProfile\",\n \"str\",\n \"HttpRequest\",\n \"Optional[str]\",\n \"str\",\n \"HttpRequest\",\n \"UserProfile\",\n \"HttpRequest\",\n \"str\",\n \"HttpRequest\",\n \"UserProfile\",\n \"str\",\n \"ViewFuncT\",\n \"HttpRequest\",\n \"Any\",\n \"Any\",\n \"str\",\n \"Callable[[HttpResponse], bool]\",\n \"ViewFuncT\",\n \"HttpRequest\",\n \"Any\",\n \"Any\",\n \"HttpRequest\",\n \"HttpRequest\",\n \"UserProfile\",\n \"HttpRequest\",\n \"UserProfile\",\n \"ViewFuncT\",\n \"HttpRequest\",\n \"Any\",\n \"Any\",\n \"ViewFuncT\",\n \"HttpRequest\",\n \"Any\",\n \"Any\",\n \"ViewFuncT\",\n \"HttpRequest\",\n \"Any\",\n \"Any\",\n \"ViewFuncT\",\n \"HttpRequest\",\n \"Any\",\n \"Any\",\n \"ViewFuncT\",\n \"HttpRequest\",\n \"UserProfile\",\n \"Any\",\n \"Any\",\n \"ViewFuncT\",\n \"HttpRequest\",\n \"UserProfile\",\n \"Any\",\n \"Any\",\n \"ViewFuncT\",\n \"HttpRequest\",\n \"UserProfile\",\n \"Any\",\n \"Any\",\n \"ViewFuncT\",\n \"HttpRequest\",\n \"Any\",\n \"Any\",\n \"ViewFuncT\",\n \"HttpRequest\",\n \"Any\",\n \"Any\",\n \"ViewFuncT\",\n \"HttpRequest\",\n \"Any\",\n \"Any\",\n \"ViewFuncT\",\n \"HttpRequest\",\n \"Any\",\n \"Any\",\n \"HttpRequest\",\n \"ViewFuncT\",\n \"Any\",\n \"Any\",\n \"ViewFuncT\",\n \"HttpRequest\",\n \"Any\",\n \"Any\",\n \"ViewFuncT\",\n \"HttpRequest\",\n \"Any\",\n \"Any\",\n \"str\",\n \"HttpRequest\",\n \"HttpRequest\",\n \"bool\",\n \"ViewFuncT\",\n \"HttpRequest\",\n \"Any\",\n \"Any\",\n \"str\",\n \"str\",\n \"str\",\n \"str\",\n \"Callable[..., ReturnT]\",\n \"Any\",\n \"Any\",\n \"HttpRequest\",\n \"UserProfile\",\n \"str\",\n \"ViewFuncT\",\n \"HttpRequest\",\n \"Any\",\n \"Any\",\n \"ViewFuncT\",\n \"HttpRequest\",\n \"Any\",\n \"Any\",\n \"UserProfile\"\n]"},"type_annotation_starts":{"kind":"list like","value":[2874,3382,3402,3433,3808,3942,4175,4202,4247,4951,5018,5038,5053,5844,5911,5938,5958,5973,6289,6356,6383,6403,6418,6847,6877,8228,8255,8857,9239,9258,9303,10735,10762,11894,11916,12580,12607,14233,14549,14697,14773,14788,15991,16746,17178,17301,17321,17336,18307,18614,18641,18785,18812,19304,19387,19407,19422,19663,19746,19766,19781,20153,20236,20256,20271,21406,21515,21535,21550,21881,21990,22017,22037,22079,22404,22487,22514,22534,22576,22899,22982,23009,23029,23071,23794,23964,24212,24227,25666,25814,25926,25941,26656,26773,26793,26808,28811,28894,28914,28929,30248,30310,30366,30381,31176,31304,31351,31366,31605,31688,31735,31750,31974,32263,32460,32825,33241,33380,33400,33415,34280,34421,34541,34650,34930,35034,35049,35244,35263,35284,36377,36457,36477,36492,37926,38009,38029,38044,38969],"string":"[\n 2874,\n 3382,\n 3402,\n 3433,\n 3808,\n 3942,\n 4175,\n 4202,\n 4247,\n 4951,\n 5018,\n 5038,\n 5053,\n 5844,\n 5911,\n 5938,\n 5958,\n 5973,\n 6289,\n 6356,\n 6383,\n 6403,\n 6418,\n 6847,\n 6877,\n 8228,\n 8255,\n 8857,\n 9239,\n 9258,\n 9303,\n 10735,\n 10762,\n 11894,\n 11916,\n 12580,\n 12607,\n 14233,\n 14549,\n 14697,\n 14773,\n 14788,\n 15991,\n 16746,\n 17178,\n 17301,\n 17321,\n 17336,\n 18307,\n 18614,\n 18641,\n 18785,\n 18812,\n 19304,\n 19387,\n 19407,\n 19422,\n 19663,\n 19746,\n 19766,\n 19781,\n 20153,\n 20236,\n 20256,\n 20271,\n 21406,\n 21515,\n 21535,\n 21550,\n 21881,\n 21990,\n 22017,\n 22037,\n 22079,\n 22404,\n 22487,\n 22514,\n 22534,\n 22576,\n 22899,\n 22982,\n 23009,\n 23029,\n 23071,\n 23794,\n 23964,\n 24212,\n 24227,\n 25666,\n 25814,\n 25926,\n 25941,\n 26656,\n 26773,\n 26793,\n 26808,\n 28811,\n 28894,\n 28914,\n 28929,\n 30248,\n 30310,\n 30366,\n 30381,\n 31176,\n 31304,\n 31351,\n 31366,\n 31605,\n 31688,\n 31735,\n 31750,\n 31974,\n 32263,\n 32460,\n 32825,\n 33241,\n 33380,\n 33400,\n 33415,\n 34280,\n 34421,\n 34541,\n 34650,\n 34930,\n 35034,\n 35049,\n 35244,\n 35263,\n 35284,\n 36377,\n 36457,\n 36477,\n 36492,\n 37926,\n 38009,\n 38029,\n 38044,\n 38969\n]"},"type_annotation_ends":{"kind":"list like","value":[2932,3393,3405,3436,3830,3945,4186,4213,4260,4960,5029,5041,5056,5853,5922,5949,5961,5976,6298,6367,6394,6406,6421,6858,6881,8239,8266,8860,9250,9271,9306,10746,10773,11905,11919,12591,12618,14236,14558,14708,14776,14791,15994,16776,17187,17312,17324,17339,18318,18625,18652,18796,18823,19313,19398,19410,19425,19672,19757,19769,19784,20162,20247,20259,20274,21415,21526,21538,21553,21890,22001,22028,22040,22082,22413,22498,22525,22537,22579,22908,22993,23020,23032,23074,23803,23975,24215,24230,25675,25825,25929,25944,26665,26784,26796,26811,28820,28905,28917,28932,30259,30319,30369,30384,31185,31315,31354,31369,31614,31699,31738,31753,31977,32274,32471,32829,33250,33391,33403,33418,34283,34424,34544,34653,34952,35037,35052,35255,35274,35287,36386,36468,36480,36495,37935,38020,38032,38047,38980],"string":"[\n 2932,\n 3393,\n 3405,\n 3436,\n 3830,\n 3945,\n 4186,\n 4213,\n 4260,\n 4960,\n 5029,\n 5041,\n 5056,\n 5853,\n 5922,\n 5949,\n 5961,\n 5976,\n 6298,\n 6367,\n 6394,\n 6406,\n 6421,\n 6858,\n 6881,\n 8239,\n 8266,\n 8860,\n 9250,\n 9271,\n 9306,\n 10746,\n 10773,\n 11905,\n 11919,\n 12591,\n 12618,\n 14236,\n 14558,\n 14708,\n 14776,\n 14791,\n 15994,\n 16776,\n 17187,\n 17312,\n 17324,\n 17339,\n 18318,\n 18625,\n 18652,\n 18796,\n 18823,\n 19313,\n 19398,\n 19410,\n 19425,\n 19672,\n 19757,\n 19769,\n 19784,\n 20162,\n 20247,\n 20259,\n 20274,\n 21415,\n 21526,\n 21538,\n 21553,\n 21890,\n 22001,\n 22028,\n 22040,\n 22082,\n 22413,\n 22498,\n 22525,\n 22537,\n 22579,\n 22908,\n 22993,\n 23020,\n 23032,\n 23074,\n 23803,\n 23975,\n 24215,\n 24230,\n 25675,\n 25825,\n 25929,\n 25944,\n 26665,\n 26784,\n 26796,\n 26811,\n 28820,\n 28905,\n 28917,\n 28932,\n 30259,\n 30319,\n 30369,\n 30384,\n 31185,\n 31315,\n 31354,\n 31369,\n 31614,\n 31699,\n 31738,\n 31753,\n 31977,\n 32274,\n 32471,\n 32829,\n 33250,\n 33391,\n 33403,\n 33418,\n 34283,\n 34424,\n 34544,\n 34653,\n 34952,\n 35037,\n 35052,\n 35255,\n 35274,\n 35287,\n 36386,\n 36468,\n 36480,\n 36495,\n 37935,\n 38020,\n 38032,\n 38047,\n 38980\n]"}}},{"rowIdx":1338,"cells":{"zip":{"kind":"string","value":"archives/18-2-SKKU-OSS_2018-2-OSS-L5.zip"},"filename":{"kind":"string","value":"zerver/filters.py"},"contents":{"kind":"string","value":"\nimport re\nfrom typing import Any, Dict\n\nfrom django.http import HttpRequest\nfrom django.views.debug import SafeExceptionReporterFilter\n\nclass ZulipExceptionReporterFilter(SafeExceptionReporterFilter):\n def get_post_parameters(self, request: HttpRequest) -> Dict[str, Any]:\n filtered_post = SafeExceptionReporterFilter.get_post_parameters(self, request).copy()\n filtered_vars = ['content', 'secret', 'password', 'key', 'api-key', 'subject', 'stream',\n 'subscriptions', 'to', 'csrfmiddlewaretoken', 'api_key']\n\n for var in filtered_vars:\n if var in filtered_post:\n filtered_post[var] = '**********'\n return filtered_post\n\ndef clean_data_from_query_parameters(val: str) -> str:\n return re.sub(r\"([a-z_-]+=)([^&]+)([&]|$)\", r\"\\1******\\3\", val)\n"},"type_annotations":{"kind":"list like","value":["HttpRequest","str"],"string":"[\n \"HttpRequest\",\n \"str\"\n]"},"type_annotation_starts":{"kind":"list like","value":[245,744],"string":"[\n 245,\n 744\n]"},"type_annotation_ends":{"kind":"list like","value":[256,747],"string":"[\n 256,\n 747\n]"}}},{"rowIdx":1339,"cells":{"zip":{"kind":"string","value":"archives/18-2-SKKU-OSS_2018-2-OSS-L5.zip"},"filename":{"kind":"string","value":"zerver/forms.py"},"contents":{"kind":"string","value":"\nfrom django import forms\nfrom django.conf import settings\nfrom django.contrib.auth import authenticate\nfrom django.contrib.auth.forms import SetPasswordForm, AuthenticationForm, \\\n PasswordResetForm\nfrom django.core.exceptions import ValidationError\nfrom django.urls import reverse\nfrom django.core.validators import validate_email\nfrom django.db.models.query import QuerySet\nfrom django.utils.translation import ugettext as _\nfrom django.contrib.auth.tokens import default_token_generator\nfrom django.contrib.sites.shortcuts import get_current_site\nfrom django.utils.http import urlsafe_base64_encode\nfrom django.utils.encoding import force_bytes\nfrom django.contrib.auth.tokens import PasswordResetTokenGenerator\nfrom django.http import HttpRequest\nfrom jinja2 import Markup as mark_safe\n\nfrom zerver.lib.actions import do_change_password, email_not_system_bot, \\\n validate_email_for_realm\nfrom zerver.lib.name_restrictions import is_reserved_subdomain, is_disposable_domain\nfrom zerver.lib.request import JsonableError\nfrom zerver.lib.send_email import send_email, FromAddress\nfrom zerver.lib.subdomains import get_subdomain, user_matches_subdomain, is_root_domain_available\nfrom zerver.lib.users import check_full_name\nfrom zerver.models import Realm, get_user, UserProfile, get_realm, email_to_domain, \\\n email_allowed_for_realm, DisposableEmailError, DomainNotAllowedForRealmError, \\\n EmailContainsPlusError\nfrom zproject.backends import email_auth_enabled, email_belongs_to_ldap\n\nimport logging\nimport re\nimport DNS\n\nfrom typing import Any, Callable, List, Optional, Dict\nfrom two_factor.forms import AuthenticationTokenForm as TwoFactorAuthenticationTokenForm\nfrom two_factor.utils import totp_digits\n\nMIT_VALIDATION_ERROR = u'That user does not exist at MIT or is a ' + \\\n u'mailing list. ' + \\\n u'If you want to sign up an alias for Zulip, ' + \\\n u'contact us.'\nWRONG_SUBDOMAIN_ERROR = \"Your Zulip account is not a member of the \" + \\\n \"organization associated with this subdomain. \" + \\\n \"Please contact %s with any questions!\" % (FromAddress.SUPPORT,)\n\ndef email_is_not_mit_mailing_list(email: str) -> None:\n \"\"\"Prevent MIT mailing lists from signing up for Zulip\"\"\"\n if \"@mit.edu\" in email:\n username = email.rsplit(\"@\", 1)[0]\n # Check whether the user exists and can get mail.\n try:\n DNS.dnslookup(\"%s.pobox.ns.athena.mit.edu\" % username, DNS.Type.TXT)\n except DNS.Base.ServerError as e:\n if e.rcode == DNS.Status.NXDOMAIN:\n raise ValidationError(mark_safe(MIT_VALIDATION_ERROR))\n else:\n raise AssertionError(\"Unexpected DNS error\")\n\ndef check_subdomain_available(subdomain: str, from_management_command: bool=False) -> None:\n error_strings = {\n 'too short': _(\"Subdomain needs to have length 3 or greater.\"),\n 'extremal dash': _(\"Subdomain cannot start or end with a '-'.\"),\n 'bad character': _(\"Subdomain can only have lowercase letters, numbers, and '-'s.\"),\n 'unavailable': _(\"Subdomain unavailable. Please choose a different one.\")}\n\n if subdomain == Realm.SUBDOMAIN_FOR_ROOT_DOMAIN:\n if is_root_domain_available():\n return\n raise ValidationError(error_strings['unavailable'])\n if subdomain[0] == '-' or subdomain[-1] == '-':\n raise ValidationError(error_strings['extremal dash'])\n if not re.match('^[a-z0-9-]*$', subdomain):\n raise ValidationError(error_strings['bad character'])\n if from_management_command:\n return\n if len(subdomain) < 3:\n raise ValidationError(error_strings['too short'])\n if is_reserved_subdomain(subdomain) or \\\n get_realm(subdomain) is not None:\n raise ValidationError(error_strings['unavailable'])\n\nclass RegistrationForm(forms.Form):\n MAX_PASSWORD_LENGTH = 100\n full_name = forms.CharField(max_length=UserProfile.MAX_NAME_LENGTH)\n # The required-ness of the password field gets overridden if it isn't\n # actually required for a realm\n password = forms.CharField(widget=forms.PasswordInput, max_length=MAX_PASSWORD_LENGTH)\n realm_subdomain = forms.CharField(max_length=Realm.MAX_REALM_SUBDOMAIN_LENGTH, required=False)\n\n def __init__(self, *args: Any, **kwargs: Any) -> None:\n # Since the superclass doesn't except random extra kwargs, we\n # remove it from the kwargs dict before initializing.\n self.realm_creation = kwargs['realm_creation']\n del kwargs['realm_creation']\n\n super().__init__(*args, **kwargs)\n if settings.TERMS_OF_SERVICE:\n self.fields['terms'] = forms.BooleanField(required=True)\n self.fields['realm_name'] = forms.CharField(\n max_length=Realm.MAX_REALM_NAME_LENGTH,\n required=self.realm_creation)\n\n def clean_full_name(self) -> str:\n try:\n return check_full_name(self.cleaned_data['full_name'])\n except JsonableError as e:\n raise ValidationError(e.msg)\n\n def clean_realm_subdomain(self) -> str:\n if not self.realm_creation:\n # This field is only used if realm_creation\n return \"\"\n\n subdomain = self.cleaned_data['realm_subdomain']\n if 'realm_in_root_domain' in self.data:\n subdomain = Realm.SUBDOMAIN_FOR_ROOT_DOMAIN\n\n check_subdomain_available(subdomain)\n return subdomain\n\nclass ToSForm(forms.Form):\n terms = forms.BooleanField(required=True)\n\nclass HomepageForm(forms.Form):\n email = forms.EmailField()\n\n def __init__(self, *args: Any, **kwargs: Any) -> None:\n self.realm = kwargs.pop('realm', None)\n self.from_multiuse_invite = kwargs.pop('from_multiuse_invite', False)\n super().__init__(*args, **kwargs)\n\n def clean_email(self) -> str:\n \"\"\"Returns the email if and only if the user's email address is\n allowed to join the realm they are trying to join.\"\"\"\n email = self.cleaned_data['email']\n\n # Otherwise, the user is trying to join a specific realm.\n realm = self.realm\n from_multiuse_invite = self.from_multiuse_invite\n\n if realm is None:\n raise ValidationError(_(\"The organization you are trying to \"\n \"join using {email} does not \"\n \"exist.\").format(email=email))\n\n if not from_multiuse_invite and realm.invite_required:\n raise ValidationError(_(\"Please request an invite for {email} \"\n \"from the organization \"\n \"administrator.\").format(email=email))\n\n try:\n email_allowed_for_realm(email, realm)\n except DomainNotAllowedForRealmError:\n raise ValidationError(\n _(\"Your email address, {email}, is not in one of the domains \"\n \"that are allowed to register for accounts in this organization.\").format(\n string_id=realm.string_id, email=email))\n except DisposableEmailError:\n raise ValidationError(_(\"Please use your real email address.\"))\n except EmailContainsPlusError:\n raise ValidationError(_(\"Email addresses containing + are not allowed in this organization.\"))\n\n validate_email_for_realm(realm, email)\n\n if realm.is_zephyr_mirror_realm:\n email_is_not_mit_mailing_list(email)\n\n return email\n\ndef email_is_not_disposable(email: str) -> None:\n if is_disposable_domain(email_to_domain(email)):\n raise ValidationError(_(\"Please use your real email address.\"))\n\nclass RealmCreationForm(forms.Form):\n # This form determines whether users can create a new realm.\n email = forms.EmailField(validators=[email_not_system_bot,\n email_is_not_disposable])\n\nclass LoggingSetPasswordForm(SetPasswordForm):\n def save(self, commit: bool=True) -> UserProfile:\n do_change_password(self.user, self.cleaned_data['new_password1'],\n commit=commit)\n return self.user\n\ndef generate_password_reset_url(user_profile: UserProfile,\n token_generator: PasswordResetTokenGenerator) -> str:\n token = token_generator.make_token(user_profile)\n uid = urlsafe_base64_encode(force_bytes(user_profile.id)).decode('ascii')\n endpoint = reverse('django.contrib.auth.views.password_reset_confirm',\n kwargs=dict(uidb64=uid, token=token))\n return \"{}{}\".format(user_profile.realm.uri, endpoint)\n\nclass ZulipPasswordResetForm(PasswordResetForm):\n def save(self,\n domain_override: Optional[bool]=None,\n subject_template_name: str='registration/password_reset_subject.txt',\n email_template_name: str='registration/password_reset_email.html',\n use_https: bool=False,\n token_generator: PasswordResetTokenGenerator=default_token_generator,\n from_email: Optional[str]=None,\n request: HttpRequest=None,\n html_email_template_name: Optional[str]=None,\n extra_email_context: Optional[Dict[str, Any]]=None\n ) -> None:\n \"\"\"\n If the email address has an account in the target realm,\n generates a one-use only link for resetting password and sends\n to the user.\n\n We send a different email if an associated account does not exist in the\n database, or an account does exist, but not in the realm.\n\n Note: We ignore protocol and the various email template arguments (those\n are an artifact of using Django's password reset framework).\n \"\"\"\n email = self.cleaned_data[\"email\"]\n\n realm = get_realm(get_subdomain(request))\n\n if not email_auth_enabled(realm):\n logging.info(\"Password reset attempted for %s even though password auth is disabled.\" % (email,))\n return\n if email_belongs_to_ldap(realm, email):\n # TODO: Ideally, we'd provide a user-facing error here\n # about the fact that they aren't allowed to have a\n # password in the Zulip server and should change it in LDAP.\n logging.info(\"Password reset not allowed for user in LDAP domain\")\n return\n if realm.deactivated:\n logging.info(\"Realm is deactivated\")\n return\n\n user = None # type: Optional[UserProfile]\n try:\n user = get_user(email, realm)\n except UserProfile.DoesNotExist:\n pass\n\n context = {\n 'email': email,\n 'realm_uri': realm.uri,\n }\n\n if user is not None and not user.is_active:\n context['user_deactivated'] = True\n user = None\n\n if user is not None:\n context['active_account_in_realm'] = True\n context['reset_url'] = generate_password_reset_url(user, token_generator)\n send_email('zerver/emails/password_reset', to_user_id=user.id,\n from_name=\"Zulip Account Security\",\n from_address=FromAddress.tokenized_no_reply_address(),\n context=context)\n else:\n context['active_account_in_realm'] = False\n active_accounts_in_other_realms = UserProfile.objects.filter(email__iexact=email, is_active=True)\n if active_accounts_in_other_realms:\n context['active_accounts_in_other_realms'] = active_accounts_in_other_realms\n send_email('zerver/emails/password_reset', to_email=email,\n from_name=\"Zulip Account Security\",\n from_address=FromAddress.tokenized_no_reply_address(),\n context=context)\n\nclass CreateUserForm(forms.Form):\n full_name = forms.CharField(max_length=100)\n email = forms.EmailField()\n\nclass OurAuthenticationForm(AuthenticationForm):\n def clean(self) -> Dict[str, Any]:\n username = self.cleaned_data.get('username')\n password = self.cleaned_data.get('password')\n\n if username is not None and password:\n subdomain = get_subdomain(self.request)\n realm = get_realm(subdomain)\n return_data = {} # type: Dict[str, Any]\n self.user_cache = authenticate(self.request, username=username, password=password,\n realm=realm, return_data=return_data)\n\n if return_data.get(\"inactive_realm\"):\n raise AssertionError(\"Programming error: inactive realm in authentication form\")\n\n if return_data.get(\"inactive_user\") and not return_data.get(\"is_mirror_dummy\"):\n # We exclude mirror dummy accounts here. They should be treated as the\n # user never having had an account, so we let them fall through to the\n # normal invalid_login case below.\n error_msg = (\n u\"Your account is no longer active. \"\n u\"Please contact your organization administrator to reactivate it.\")\n raise ValidationError(mark_safe(error_msg))\n\n if return_data.get(\"invalid_subdomain\"):\n logging.warning(\"User %s attempted to password login to wrong subdomain %s\" %\n (username, subdomain))\n raise ValidationError(mark_safe(WRONG_SUBDOMAIN_ERROR))\n\n if self.user_cache is None:\n raise forms.ValidationError(\n self.error_messages['invalid_login'],\n code='invalid_login',\n params={'username': self.username_field.verbose_name},\n )\n\n self.confirm_login_allowed(self.user_cache)\n\n return self.cleaned_data\n\n def add_prefix(self, field_name: str) -> str:\n \"\"\"Disable prefix, since Zulip doesn't use this Django forms feature\n (and django-two-factor does use it), and we'd like both to be\n happy with this form.\n \"\"\"\n return field_name\n\nclass AuthenticationTokenForm(TwoFactorAuthenticationTokenForm):\n \"\"\"\n We add this form to update the widget of otp_token. The default\n widget is an input element whose type is a number, which doesn't\n stylistically match our theme.\n \"\"\"\n otp_token = forms.IntegerField(label=_(\"Token\"), min_value=1,\n max_value=int('9' * totp_digits()),\n widget=forms.TextInput)\n\nclass MultiEmailField(forms.Field):\n def to_python(self, emails: str) -> List[str]:\n \"\"\"Normalize data to a list of strings.\"\"\"\n if not emails:\n return []\n\n return [email.strip() for email in emails.split(',')]\n\n def validate(self, emails: List[str]) -> None:\n \"\"\"Check if value consists only of valid emails.\"\"\"\n super().validate(emails)\n for email in emails:\n validate_email(email)\n\nclass FindMyTeamForm(forms.Form):\n emails = MultiEmailField(\n help_text=_(\"Add up to 10 comma-separated email addresses.\"))\n\n def clean_emails(self) -> List[str]:\n emails = self.cleaned_data['emails']\n if len(emails) > 10:\n raise forms.ValidationError(_(\"Please enter at most 10 emails.\"))\n\n return emails\n"},"type_annotations":{"kind":"list like","value":["str","str","Any","Any","Any","Any","str","UserProfile","PasswordResetTokenGenerator","str","str","List[str]"],"string":"[\n \"str\",\n \"str\",\n \"Any\",\n \"Any\",\n \"Any\",\n \"Any\",\n \"str\",\n \"UserProfile\",\n \"PasswordResetTokenGenerator\",\n \"str\",\n \"str\",\n \"List[str]\"\n]"},"type_annotation_starts":{"kind":"list like","value":[2321,2901,4439,4454,5745,5760,7651,8313,8375,14004,14751,14961],"string":"[\n 2321,\n 2901,\n 4439,\n 4454,\n 5745,\n 5760,\n 7651,\n 8313,\n 8375,\n 14004,\n 14751,\n 14961\n]"},"type_annotation_ends":{"kind":"list like","value":[2324,2904,4442,4457,5748,5763,7654,8324,8402,14007,14754,14970],"string":"[\n 2324,\n 2904,\n 4442,\n 4457,\n 5748,\n 5763,\n 7654,\n 8324,\n 8402,\n 14007,\n 14754,\n 14970\n]"}}},{"rowIdx":1340,"cells":{"zip":{"kind":"string","value":"archives/18-2-SKKU-OSS_2018-2-OSS-L5.zip"},"filename":{"kind":"string","value":"zerver/lib/__init__.py"},"contents":{"kind":"string","value":""},"type_annotations":{"kind":"list like","value":[],"string":"[]"},"type_annotation_starts":{"kind":"list like","value":[],"string":"[]"},"type_annotation_ends":{"kind":"list like","value":[],"string":"[]"}}},{"rowIdx":1341,"cells":{"zip":{"kind":"string","value":"archives/18-2-SKKU-OSS_2018-2-OSS-L5.zip"},"filename":{"kind":"string","value":"zerver/lib/actions.py"},"contents":{"kind":"string","value":"from typing import (\n AbstractSet, Any, AnyStr, Callable, Dict, Iterable, List, Mapping, MutableMapping,\n Optional, Sequence, Set, Tuple, TypeVar, Union, cast\n)\nfrom mypy_extensions import TypedDict\n\nimport django.db.utils\nfrom django.db.models import Count\nfrom django.contrib.contenttypes.models import ContentType\nfrom django.utils.html import escape\nfrom django.utils.translation import ugettext as _\nfrom django.conf import settings\nfrom django.core import validators\nfrom django.core.files import File\nfrom analytics.lib.counts import COUNT_STATS, do_increment_logging_stat, \\\n RealmCount\n\nfrom zerver.lib.bugdown import (\n version as bugdown_version,\n url_embed_preview_enabled_for_realm\n)\nfrom zerver.lib.addressee import (\n Addressee,\n user_profiles_from_unvalidated_emails,\n)\nfrom zerver.lib.bot_config import (\n ConfigError,\n get_bot_config,\n get_bot_configs,\n set_bot_config,\n)\nfrom zerver.lib.cache import (\n bot_dict_fields,\n delete_user_profile_caches,\n to_dict_cache_key_id,\n)\nfrom zerver.lib.context_managers import lockfile\nfrom zerver.lib.emoji import emoji_name_to_emoji_code, get_emoji_file_name\nfrom zerver.lib.exceptions import StreamDoesNotExistError\nfrom zerver.lib.hotspots import get_next_hotspots\nfrom zerver.lib.message import (\n access_message,\n MessageDict,\n render_markdown,\n)\nfrom zerver.lib.realm_icon import realm_icon_url\nfrom zerver.lib.retention import move_messages_to_archive\nfrom zerver.lib.send_email import send_email, FromAddress\nfrom zerver.lib.stream_subscription import (\n get_active_subscriptions_for_stream_id,\n get_active_subscriptions_for_stream_ids,\n get_bulk_stream_subscriber_info,\n get_stream_subscriptions_for_user,\n get_stream_subscriptions_for_users,\n num_subscribers_for_stream_id,\n)\nfrom zerver.lib.stream_topic import StreamTopicTarget\nfrom zerver.lib.topic import (\n filter_by_exact_message_topic,\n filter_by_topic_name_via_message,\n save_message_for_edit_use_case,\n update_messages_for_topic_edit,\n ORIG_TOPIC,\n LEGACY_PREV_TOPIC,\n TOPIC_LINKS,\n TOPIC_NAME,\n)\nfrom zerver.lib.topic_mutes import (\n get_topic_mutes,\n add_topic_mute,\n remove_topic_mute,\n)\nfrom zerver.lib.users import (\n bulk_get_users,\n check_bot_name_available,\n check_full_name,\n get_api_key,\n user_ids_to_users\n)\nfrom zerver.lib.user_groups import create_user_group, access_user_group_by_id\n\nfrom zerver.models import Realm, RealmEmoji, Stream, UserProfile, UserActivity, \\\n RealmDomain, Service, SubMessage, \\\n Subscription, Recipient, Message, Attachment, UserMessage, RealmAuditLog, \\\n UserHotspot, MultiuseInvite, ScheduledMessage, \\\n Client, DefaultStream, DefaultStreamGroup, UserPresence, PushDeviceToken, \\\n ScheduledEmail, MAX_TOPIC_NAME_LENGTH, \\\n MAX_MESSAGE_LENGTH, get_client, get_stream, get_personal_recipient, get_huddle, \\\n get_user_profile_by_id, PreregistrationUser, get_display_recipient, \\\n get_realm, bulk_get_recipients, get_stream_recipient, get_stream_recipients, \\\n email_allowed_for_realm, email_to_username, display_recipient_cache_key, \\\n get_user, get_stream_cache_key, active_non_guest_user_ids, \\\n UserActivityInterval, active_user_ids, get_active_streams, \\\n realm_filters_for_realm, RealmFilter, stream_name_in_use, \\\n get_old_unclaimed_attachments, is_cross_realm_bot_email, \\\n Reaction, EmailChangeStatus, CustomProfileField, \\\n custom_profile_fields_for_realm, get_huddle_user_ids, \\\n CustomProfileFieldValue, validate_attachment_request, get_system_bot, \\\n get_display_recipient_by_id, query_for_ids, get_huddle_recipient, \\\n UserGroup, UserGroupMembership, get_default_stream_groups, \\\n get_bot_services, get_bot_dicts_in_realm, DomainNotAllowedForRealmError, \\\n DisposableEmailError, EmailContainsPlusError\n\nfrom zerver.lib.alert_words import alert_words_in_realm\nfrom zerver.lib.avatar import avatar_url, avatar_url_from_dict\nfrom zerver.lib.stream_recipient import StreamRecipientMap\nfrom zerver.lib.validator import check_widget_content\nfrom zerver.lib.widget import do_widget_post_save_actions\n\nfrom django.db import transaction, IntegrityError, connection\nfrom django.db.models import F, Q, Max, Sum\nfrom django.db.models.query import QuerySet\nfrom django.core.exceptions import ValidationError\nfrom django.utils.timezone import now as timezone_now\n\nfrom confirmation.models import Confirmation, create_confirmation_link, generate_key\nfrom confirmation import settings as confirmation_settings\n\nfrom zerver.lib.bulk_create import bulk_create_users\nfrom zerver.lib.timestamp import timestamp_to_datetime, datetime_to_timestamp\nfrom zerver.lib.queue import queue_json_publish\nfrom zerver.lib.utils import generate_api_key\nfrom zerver.lib.create_user import create_user\nfrom zerver.lib import bugdown\nfrom zerver.lib.cache import cache_with_key, cache_set, \\\n user_profile_by_email_cache_key, user_profile_cache_key, \\\n cache_set_many, cache_delete, cache_delete_many\nfrom zerver.decorator import statsd_increment\nfrom zerver.lib.utils import log_statsd_event, statsd\nfrom zerver.lib.html_diff import highlight_html_differences\nfrom zerver.lib.i18n import get_language_name\nfrom zerver.lib.alert_words import user_alert_words, add_user_alert_words, \\\n remove_user_alert_words, set_user_alert_words\nfrom zerver.lib.notifications import clear_scheduled_emails, \\\n clear_scheduled_invitation_emails, enqueue_welcome_emails\nfrom zerver.lib.narrow import check_supported_events_narrow_filter\nfrom zerver.lib.exceptions import JsonableError, ErrorCode, BugdownRenderingException\nfrom zerver.lib.sessions import delete_user_sessions\nfrom zerver.lib.upload import attachment_url_re, attachment_url_to_path_id, \\\n claim_attachment, delete_message_image, upload_emoji_image, delete_avatar_image\nfrom zerver.lib.str_utils import NonBinaryStr\nfrom zerver.tornado.event_queue import request_event_queue, send_event\nfrom zerver.lib.types import ProfileFieldData\n\nfrom analytics.models import StreamCount\n\nimport ujson\nimport time\nimport re\nimport datetime\nimport os\nimport platform\nimport logging\nimport itertools\nfrom collections import defaultdict\nfrom operator import itemgetter\n\n# This will be used to type annotate parameters in a function if the function\n# works on both str and unicode in python 2 but in python 3 it only works on str.\nSizedTextIterable = Union[Sequence[str], AbstractSet[str]]\n\nSTREAM_ASSIGNMENT_COLORS = [\n \"#76ce90\", \"#fae589\", \"#a6c7e5\", \"#e79ab5\",\n \"#bfd56f\", \"#f4ae55\", \"#b0a5fd\", \"#addfe5\",\n \"#f5ce6e\", \"#c2726a\", \"#94c849\", \"#bd86e5\",\n \"#ee7e4a\", \"#a6dcbf\", \"#95a5fd\", \"#53a063\",\n \"#9987e1\", \"#e4523d\", \"#c2c2c2\", \"#4f8de4\",\n \"#c6a8ad\", \"#e7cc4d\", \"#c8bebf\", \"#a47462\"]\n\n# Store an event in the log for re-importing messages\ndef log_event(event: MutableMapping[str, Any]) -> None:\n if settings.EVENT_LOG_DIR is None:\n return\n\n if \"timestamp\" not in event:\n event[\"timestamp\"] = time.time()\n\n if not os.path.exists(settings.EVENT_LOG_DIR):\n os.mkdir(settings.EVENT_LOG_DIR)\n\n template = os.path.join(settings.EVENT_LOG_DIR,\n '%s.' + platform.node() +\n timezone_now().strftime('.%Y-%m-%d'))\n\n with lockfile(template % ('lock',)):\n with open(template % ('events',), 'a') as log:\n log.write(ujson.dumps(event) + '\\n')\n\ndef can_access_stream_user_ids(stream: Stream) -> Set[int]:\n # return user ids of users who can access the attributes of\n # a stream, such as its name/description.\n if stream.is_public():\n # For a public stream, this is everyone in the realm\n # except unsubscribed guest users\n return public_stream_user_ids(stream)\n else:\n # for a private stream, it's subscribers plus realm admins.\n return private_stream_user_ids(stream.id) | {user.id for user in stream.realm.get_admin_users()}\n\ndef private_stream_user_ids(stream_id: int) -> Set[int]:\n # TODO: Find similar queries elsewhere and de-duplicate this code.\n subscriptions = get_active_subscriptions_for_stream_id(stream_id)\n return {sub['user_profile_id'] for sub in subscriptions.values('user_profile_id')}\n\ndef public_stream_user_ids(stream: Stream) -> Set[int]:\n guest_subscriptions = get_active_subscriptions_for_stream_id(\n stream.id).filter(user_profile__is_guest=True)\n guest_subscriptions = {sub['user_profile_id'] for sub in guest_subscriptions.values('user_profile_id')}\n return set(active_non_guest_user_ids(stream.realm_id)) | guest_subscriptions\n\ndef bot_owner_user_ids(user_profile: UserProfile) -> Set[int]:\n is_private_bot = (\n user_profile.default_sending_stream and\n user_profile.default_sending_stream.invite_only or\n user_profile.default_events_register_stream and\n user_profile.default_events_register_stream.invite_only)\n if is_private_bot:\n return {user_profile.bot_owner_id, }\n else:\n users = {user.id for user in user_profile.realm.get_admin_users()}\n users.add(user_profile.bot_owner_id)\n return users\n\ndef realm_user_count(realm: Realm) -> int:\n return UserProfile.objects.filter(realm=realm, is_active=True, is_bot=False).count()\n\ndef activity_change_requires_seat_update(user: UserProfile) -> bool:\n return user.realm.has_seat_based_plan and not user.is_bot\n\ndef send_signup_message(sender: UserProfile, admin_realm_signup_notifications_stream: str,\n user_profile: UserProfile, internal: bool=False,\n realm: Optional[Realm]=None) -> None:\n if internal:\n # When this is done using manage.py vs. the web interface\n internal_blurb = \" **INTERNAL SIGNUP** \"\n else:\n internal_blurb = \" \"\n\n user_count = realm_user_count(user_profile.realm)\n signup_notifications_stream = user_profile.realm.get_signup_notifications_stream()\n # Send notification to realm signup notifications stream if it exists\n # Don't send notification for the first user in a realm\n if signup_notifications_stream is not None and user_count > 1:\n internal_send_message(\n user_profile.realm,\n sender,\n \"stream\",\n signup_notifications_stream.name,\n \"signups\",\n \"%s (%s) just signed up for Zulip. (total: %i)\" % (\n user_profile.full_name, user_profile.email, user_count\n )\n )\n\n # We also send a notification to the Zulip administrative realm\n admin_realm = get_system_bot(sender).realm\n try:\n # Check whether the stream exists\n get_stream(admin_realm_signup_notifications_stream, admin_realm)\n except Stream.DoesNotExist:\n # If the signups stream hasn't been created in the admin\n # realm, don't auto-create it to send to it; just do nothing.\n return\n internal_send_message(\n admin_realm,\n sender,\n \"stream\",\n admin_realm_signup_notifications_stream,\n user_profile.realm.display_subdomain,\n \"%s <`%s`> just signed up for Zulip!%s(total: **%i**)\" % (\n user_profile.full_name,\n user_profile.email,\n internal_blurb,\n user_count,\n )\n )\n\ndef notify_invites_changed(user_profile: UserProfile) -> None:\n event = dict(type=\"invites_changed\")\n admin_ids = [user.id for user in user_profile.realm.get_admin_users()]\n send_event(user_profile.realm, event, admin_ids)\n\ndef notify_new_user(user_profile: UserProfile, internal: bool=False) -> None:\n if settings.NOTIFICATION_BOT is not None:\n send_signup_message(settings.NOTIFICATION_BOT, \"signups\", user_profile, internal)\n statsd.gauge(\"users.signups.%s\" % (user_profile.realm.string_id), 1, delta=True)\n\n # We also clear any scheduled invitation emails to prevent them\n # from being sent after the user is created.\n clear_scheduled_invitation_emails(user_profile.email)\n\ndef add_new_user_history(user_profile: UserProfile, streams: Iterable[Stream]) -> None:\n \"\"\"Give you the last 1000 messages on your public streams, so you have\n something to look at in your home view once you finish the\n tutorial.\"\"\"\n one_week_ago = timezone_now() - datetime.timedelta(weeks=1)\n\n stream_ids = [stream.id for stream in streams if not stream.invite_only]\n recipients = get_stream_recipients(stream_ids)\n recent_messages = Message.objects.filter(recipient_id__in=recipients,\n pub_date__gt=one_week_ago).order_by(\"-id\")\n message_ids_to_use = list(reversed(recent_messages.values_list('id', flat=True)[0:1000]))\n if len(message_ids_to_use) == 0:\n return\n\n # Handle the race condition where a message arrives between\n # bulk_add_subscriptions above and the Message query just above\n already_ids = set(UserMessage.objects.filter(message_id__in=message_ids_to_use,\n user_profile=user_profile).values_list(\"message_id\",\n flat=True))\n ums_to_create = [UserMessage(user_profile=user_profile, message_id=message_id,\n flags=UserMessage.flags.read)\n for message_id in message_ids_to_use\n if message_id not in already_ids]\n\n UserMessage.objects.bulk_create(ums_to_create)\n\n# Does the processing for a new user account:\n# * Subscribes to default/invitation streams\n# * Fills in some recent historical messages\n# * Notifies other users in realm and Zulip about the signup\n# * Deactivates PreregistrationUser objects\n# * subscribe the user to newsletter if newsletter_data is specified\ndef process_new_human_user(user_profile: UserProfile,\n prereg_user: Optional[PreregistrationUser]=None,\n newsletter_data: Optional[Dict[str, str]]=None,\n default_stream_groups: List[DefaultStreamGroup]=[],\n realm_creation: bool=False) -> None:\n mit_beta_user = user_profile.realm.is_zephyr_mirror_realm\n if prereg_user is not None:\n streams = prereg_user.streams.all()\n acting_user = prereg_user.referred_by # type: Optional[UserProfile]\n else:\n streams = []\n acting_user = None\n\n # If the user's invitation didn't explicitly list some streams, we\n # add the default streams\n if len(streams) == 0:\n streams = get_default_subs(user_profile)\n\n for default_stream_group in default_stream_groups:\n default_stream_group_streams = default_stream_group.streams.all()\n for stream in default_stream_group_streams:\n if stream not in streams:\n streams.append(stream)\n\n bulk_add_subscriptions(streams, [user_profile], acting_user=acting_user)\n\n add_new_user_history(user_profile, streams)\n\n # mit_beta_users don't have a referred_by field\n if not mit_beta_user and prereg_user is not None and prereg_user.referred_by is not None \\\n and settings.NOTIFICATION_BOT is not None:\n # This is a cross-realm private message.\n internal_send_private_message(\n user_profile.realm,\n get_system_bot(settings.NOTIFICATION_BOT),\n prereg_user.referred_by,\n \"%s <`%s`> accepted your invitation to join Zulip!\" % (\n user_profile.full_name,\n user_profile.email,\n )\n )\n # Mark any other PreregistrationUsers that are STATUS_ACTIVE as\n # inactive so we can keep track of the PreregistrationUser we\n # actually used for analytics\n if prereg_user is not None:\n PreregistrationUser.objects.filter(email__iexact=user_profile.email).exclude(\n id=prereg_user.id).update(status=0)\n if prereg_user.referred_by is not None:\n notify_invites_changed(user_profile)\n else:\n PreregistrationUser.objects.filter(email__iexact=user_profile.email).update(status=0)\n\n notify_new_user(user_profile)\n if user_profile.realm.send_welcome_emails:\n enqueue_welcome_emails(user_profile, realm_creation)\n\n # We have an import loop here; it's intentional, because we want\n # to keep all the onboarding code in zerver/lib/onboarding.py.\n from zerver.lib.onboarding import send_initial_pms\n send_initial_pms(user_profile)\n\n if newsletter_data is not None:\n # If the user was created automatically via the API, we may\n # not want to register them for the newsletter\n queue_json_publish(\n \"signups\",\n {\n 'email_address': user_profile.email,\n 'user_id': user_profile.id,\n 'merge_fields': {\n 'NAME': user_profile.full_name,\n 'REALM_ID': user_profile.realm_id,\n 'OPTIN_IP': newsletter_data[\"IP\"],\n 'OPTIN_TIME': datetime.datetime.isoformat(timezone_now().replace(microsecond=0)),\n },\n },\n lambda event: None)\n\ndef notify_created_user(user_profile: UserProfile) -> None:\n event = dict(type=\"realm_user\", op=\"add\",\n person=dict(email=user_profile.email,\n user_id=user_profile.id,\n is_admin=user_profile.is_realm_admin,\n full_name=user_profile.full_name,\n avatar_url=avatar_url(user_profile),\n timezone=user_profile.timezone,\n date_joined=user_profile.date_joined.isoformat(),\n is_guest=user_profile.is_guest,\n is_bot=user_profile.is_bot)) # type: Dict[str, Any]\n if not user_profile.is_bot:\n event[\"person\"][\"profile_data\"] = {}\n send_event(user_profile.realm, event, active_user_ids(user_profile.realm_id))\n\ndef created_bot_event(user_profile: UserProfile) -> Dict[str, Any]:\n def stream_name(stream: Optional[Stream]) -> Optional[str]:\n if not stream:\n return None\n return stream.name\n\n default_sending_stream_name = stream_name(user_profile.default_sending_stream)\n default_events_register_stream_name = stream_name(user_profile.default_events_register_stream)\n\n bot = dict(email=user_profile.email,\n user_id=user_profile.id,\n full_name=user_profile.full_name,\n bot_type=user_profile.bot_type,\n is_active=user_profile.is_active,\n api_key=get_api_key(user_profile),\n default_sending_stream=default_sending_stream_name,\n default_events_register_stream=default_events_register_stream_name,\n default_all_public_streams=user_profile.default_all_public_streams,\n avatar_url=avatar_url(user_profile),\n services = get_service_dicts_for_bot(user_profile.id),\n )\n\n # Set the owner key only when the bot has an owner.\n # The default bots don't have an owner. So don't\n # set the owner key while reactivating them.\n if user_profile.bot_owner is not None:\n bot['owner'] = user_profile.bot_owner.email\n\n return dict(type=\"realm_bot\", op=\"add\", bot=bot)\n\ndef notify_created_bot(user_profile: UserProfile) -> None:\n event = created_bot_event(user_profile)\n send_event(user_profile.realm, event, bot_owner_user_ids(user_profile))\n\ndef create_users(realm: Realm, name_list: Iterable[Tuple[str, str]], bot_type: int=None) -> None:\n user_set = set()\n for full_name, email in name_list:\n short_name = email_to_username(email)\n user_set.add((email, full_name, short_name, True))\n bulk_create_users(realm, user_set, bot_type)\n\ndef do_create_user(email: str, password: Optional[str], realm: Realm, full_name: str,\n short_name: str, is_realm_admin: bool=False, bot_type: Optional[int]=None,\n bot_owner: Optional[UserProfile]=None, tos_version: Optional[str]=None,\n timezone: str=\"\", avatar_source: str=UserProfile.AVATAR_FROM_GRAVATAR,\n default_sending_stream: Optional[Stream]=None,\n default_events_register_stream: Optional[Stream]=None,\n default_all_public_streams: Optional[bool]=None,\n prereg_user: Optional[PreregistrationUser]=None,\n newsletter_data: Optional[Dict[str, str]]=None,\n default_stream_groups: List[DefaultStreamGroup]=[],\n source_profile: Optional[UserProfile]=None,\n realm_creation: bool=False) -> UserProfile:\n\n user_profile = create_user(email=email, password=password, realm=realm,\n full_name=full_name, short_name=short_name,\n is_realm_admin=is_realm_admin,\n bot_type=bot_type, bot_owner=bot_owner,\n tos_version=tos_version, timezone=timezone, avatar_source=avatar_source,\n default_sending_stream=default_sending_stream,\n default_events_register_stream=default_events_register_stream,\n default_all_public_streams=default_all_public_streams,\n source_profile=source_profile)\n\n event_time = user_profile.date_joined\n RealmAuditLog.objects.create(realm=user_profile.realm, modified_user=user_profile,\n event_type=RealmAuditLog.USER_CREATED, event_time=event_time,\n requires_billing_update=activity_change_requires_seat_update(user_profile))\n do_increment_logging_stat(user_profile.realm, COUNT_STATS['active_users_log:is_bot:day'],\n user_profile.is_bot, event_time)\n\n notify_created_user(user_profile)\n if bot_type:\n notify_created_bot(user_profile)\n else:\n process_new_human_user(user_profile, prereg_user=prereg_user,\n newsletter_data=newsletter_data,\n default_stream_groups=default_stream_groups,\n realm_creation=realm_creation)\n return user_profile\n\ndef do_activate_user(user_profile: UserProfile) -> None:\n user_profile.is_active = True\n user_profile.is_mirror_dummy = False\n user_profile.set_unusable_password()\n user_profile.date_joined = timezone_now()\n user_profile.tos_version = settings.TOS_VERSION\n user_profile.save(update_fields=[\"is_active\", \"date_joined\", \"password\",\n \"is_mirror_dummy\", \"tos_version\"])\n\n event_time = user_profile.date_joined\n RealmAuditLog.objects.create(realm=user_profile.realm, modified_user=user_profile,\n event_type=RealmAuditLog.USER_ACTIVATED, event_time=event_time,\n requires_billing_update=activity_change_requires_seat_update(user_profile))\n do_increment_logging_stat(user_profile.realm, COUNT_STATS['active_users_log:is_bot:day'],\n user_profile.is_bot, event_time)\n\n notify_created_user(user_profile)\n\ndef do_reactivate_user(user_profile: UserProfile, acting_user: Optional[UserProfile]=None) -> None:\n # Unlike do_activate_user, this is meant for re-activating existing users,\n # so it doesn't reset their password, etc.\n user_profile.is_active = True\n user_profile.save(update_fields=[\"is_active\"])\n\n event_time = timezone_now()\n RealmAuditLog.objects.create(realm=user_profile.realm, modified_user=user_profile,\n event_type=RealmAuditLog.USER_REACTIVATED, event_time=event_time,\n acting_user=acting_user,\n requires_billing_update=activity_change_requires_seat_update(user_profile))\n do_increment_logging_stat(user_profile.realm, COUNT_STATS['active_users_log:is_bot:day'],\n user_profile.is_bot, event_time)\n\n notify_created_user(user_profile)\n\n if user_profile.is_bot:\n notify_created_bot(user_profile)\n\ndef active_humans_in_realm(realm: Realm) -> Sequence[UserProfile]:\n return UserProfile.objects.filter(realm=realm, is_active=True, is_bot=False)\n\n\ndef do_set_realm_property(realm: Realm, name: str, value: Any) -> None:\n \"\"\"Takes in a realm object, the name of an attribute to update, and the\n value to update.\n \"\"\"\n property_type = Realm.property_types[name]\n assert isinstance(value, property_type), (\n 'Cannot update %s: %s is not an instance of %s' % (\n name, value, property_type,))\n\n setattr(realm, name, value)\n realm.save(update_fields=[name])\n event = dict(\n type='realm',\n op='update',\n property=name,\n value=value,\n )\n send_event(realm, event, active_user_ids(realm.id))\n\n\ndef do_set_realm_authentication_methods(realm: Realm,\n authentication_methods: Dict[str, bool]) -> None:\n for key, value in list(authentication_methods.items()):\n index = getattr(realm.authentication_methods, key).number\n realm.authentication_methods.set_bit(index, int(value))\n realm.save(update_fields=['authentication_methods'])\n event = dict(\n type=\"realm\",\n op=\"update_dict\",\n property='default',\n data=dict(authentication_methods=realm.authentication_methods_dict())\n )\n send_event(realm, event, active_user_ids(realm.id))\n\ndef do_set_realm_message_editing(realm: Realm,\n allow_message_editing: bool,\n message_content_edit_limit_seconds: int,\n allow_community_topic_editing: bool) -> None:\n realm.allow_message_editing = allow_message_editing\n realm.message_content_edit_limit_seconds = message_content_edit_limit_seconds\n realm.allow_community_topic_editing = allow_community_topic_editing\n realm.save(update_fields=['allow_message_editing',\n 'allow_community_topic_editing',\n 'message_content_edit_limit_seconds',\n ]\n )\n event = dict(\n type=\"realm\",\n op=\"update_dict\",\n property=\"default\",\n data=dict(allow_message_editing=allow_message_editing,\n message_content_edit_limit_seconds=message_content_edit_limit_seconds,\n allow_community_topic_editing=allow_community_topic_editing),\n )\n send_event(realm, event, active_user_ids(realm.id))\n\ndef do_set_realm_message_deleting(realm: Realm,\n message_content_delete_limit_seconds: int) -> None:\n realm.message_content_delete_limit_seconds = message_content_delete_limit_seconds\n realm.save(update_fields=['message_content_delete_limit_seconds'])\n event = dict(\n type=\"realm\",\n op=\"update_dict\",\n property=\"default\",\n data=dict(message_content_delete_limit_seconds=message_content_delete_limit_seconds),\n )\n send_event(realm, event, active_user_ids(realm.id))\n\ndef do_set_realm_notifications_stream(realm: Realm, stream: Stream, stream_id: int) -> None:\n realm.notifications_stream = stream\n realm.save(update_fields=['notifications_stream'])\n event = dict(\n type=\"realm\",\n op=\"update\",\n property=\"notifications_stream_id\",\n value=stream_id\n )\n send_event(realm, event, active_user_ids(realm.id))\n\ndef do_set_realm_signup_notifications_stream(realm: Realm, stream: Stream,\n stream_id: int) -> None:\n realm.signup_notifications_stream = stream\n realm.save(update_fields=['signup_notifications_stream'])\n event = dict(\n type=\"realm\",\n op=\"update\",\n property=\"signup_notifications_stream_id\",\n value=stream_id\n )\n send_event(realm, event, active_user_ids(realm.id))\n\ndef do_deactivate_realm(realm: Realm) -> None:\n \"\"\"\n Deactivate this realm. Do NOT deactivate the users -- we need to be able to\n tell the difference between users that were intentionally deactivated,\n e.g. by a realm admin, and users who can't currently use Zulip because their\n realm has been deactivated.\n \"\"\"\n if realm.deactivated:\n return\n\n realm.deactivated = True\n realm.save(update_fields=[\"deactivated\"])\n\n event_time = timezone_now()\n RealmAuditLog.objects.create(\n realm=realm, event_type=RealmAuditLog.REALM_DEACTIVATED, event_time=event_time)\n\n ScheduledEmail.objects.filter(realm=realm).delete()\n for user in active_humans_in_realm(realm):\n # Don't deactivate the users, but do delete their sessions so they get\n # bumped to the login screen, where they'll get a realm deactivation\n # notice when they try to log in.\n delete_user_sessions(user)\n\n event = dict(type=\"realm\", op=\"deactivated\",\n realm_id=realm.id)\n send_event(realm, event, active_user_ids(realm.id))\n\ndef do_reactivate_realm(realm: Realm) -> None:\n realm.deactivated = False\n realm.save(update_fields=[\"deactivated\"])\n\n event_time = timezone_now()\n RealmAuditLog.objects.create(\n realm=realm, event_type=RealmAuditLog.REALM_REACTIVATED, event_time=event_time)\n\ndef do_change_realm_subdomain(realm: Realm, new_subdomain: str) -> None:\n realm.string_id = new_subdomain\n realm.save(update_fields=[\"string_id\"])\n\ndef do_scrub_realm(realm: Realm) -> None:\n users = UserProfile.objects.filter(realm=realm)\n for user in users:\n do_delete_messages(user)\n do_delete_avatar_image(user)\n user.full_name = \"Scrubbed {}\".format(generate_key()[:15])\n scrubbed_email = \"scrubbed-{}@{}\".format(generate_key()[:15], realm.host)\n user.email = scrubbed_email\n user.delivery_email = scrubbed_email\n user.save(update_fields=[\"full_name\", \"email\", \"delivery_email\"])\n\n do_remove_realm_custom_profile_fields(realm)\n Attachment.objects.filter(realm=realm).delete()\n\n RealmAuditLog.objects.create(realm=realm, event_time=timezone_now(),\n event_type=RealmAuditLog.REALM_SCRUBBED)\n\ndef do_deactivate_user(user_profile: UserProfile,\n acting_user: Optional[UserProfile]=None,\n _cascade: bool=True) -> None:\n if not user_profile.is_active:\n return\n\n user_profile.is_active = False\n user_profile.save(update_fields=[\"is_active\"])\n\n delete_user_sessions(user_profile)\n clear_scheduled_emails(user_profile.id)\n\n event_time = timezone_now()\n RealmAuditLog.objects.create(realm=user_profile.realm, modified_user=user_profile,\n acting_user=acting_user,\n event_type=RealmAuditLog.USER_DEACTIVATED, event_time=event_time,\n requires_billing_update=activity_change_requires_seat_update(user_profile))\n do_increment_logging_stat(user_profile.realm, COUNT_STATS['active_users_log:is_bot:day'],\n user_profile.is_bot, event_time, increment=-1)\n\n event = dict(type=\"realm_user\", op=\"remove\",\n person=dict(email=user_profile.email,\n user_id=user_profile.id,\n full_name=user_profile.full_name))\n send_event(user_profile.realm, event, active_user_ids(user_profile.realm_id))\n\n if user_profile.is_bot:\n event = dict(type=\"realm_bot\", op=\"remove\",\n bot=dict(email=user_profile.email,\n user_id=user_profile.id,\n full_name=user_profile.full_name))\n send_event(user_profile.realm, event, bot_owner_user_ids(user_profile))\n\n if _cascade:\n bot_profiles = UserProfile.objects.filter(is_bot=True, is_active=True,\n bot_owner=user_profile)\n for profile in bot_profiles:\n do_deactivate_user(profile, acting_user=acting_user, _cascade=False)\n\ndef do_deactivate_stream(stream: Stream, log: bool=True) -> None:\n\n # Get the affected user ids *before* we deactivate everybody.\n affected_user_ids = can_access_stream_user_ids(stream)\n\n get_active_subscriptions_for_stream_id(stream.id).update(active=False)\n\n was_invite_only = stream.invite_only\n stream.deactivated = True\n stream.invite_only = True\n # Preserve as much as possible the original stream name while giving it a\n # special prefix that both indicates that the stream is deactivated and\n # frees up the original name for reuse.\n old_name = stream.name\n new_name = (\"!DEACTIVATED:\" + old_name)[:Stream.MAX_NAME_LENGTH]\n for i in range(20):\n if stream_name_in_use(new_name, stream.realm_id):\n # This stream has alrady been deactivated, keep prepending !s until\n # we have a unique stream name or you've hit a rename limit.\n new_name = (\"!\" + new_name)[:Stream.MAX_NAME_LENGTH]\n else:\n break\n\n # If you don't have a unique name at this point, this will fail later in the\n # code path.\n\n stream.name = new_name[:Stream.MAX_NAME_LENGTH]\n stream.save(update_fields=['name', 'deactivated', 'invite_only'])\n\n # If this is a default stream, remove it, properly sending a\n # notification to browser clients.\n if DefaultStream.objects.filter(realm_id=stream.realm_id, stream_id=stream.id).exists():\n do_remove_default_stream(stream)\n\n # Remove the old stream information from remote cache.\n old_cache_key = get_stream_cache_key(old_name, stream.realm_id)\n cache_delete(old_cache_key)\n\n stream_dict = stream.to_dict()\n stream_dict.update(dict(name=old_name, invite_only=was_invite_only))\n event = dict(type=\"stream\", op=\"delete\",\n streams=[stream_dict])\n send_event(stream.realm, event, affected_user_ids)\n\ndef do_change_user_email(user_profile: UserProfile, new_email: str) -> None:\n delete_user_profile_caches([user_profile])\n\n user_profile.email = new_email\n user_profile.delivery_email = new_email\n user_profile.save(update_fields=[\"email\", \"delivery_email\"])\n\n payload = dict(user_id=user_profile.id,\n new_email=new_email)\n send_event(user_profile.realm,\n dict(type='realm_user', op='update', person=payload),\n active_user_ids(user_profile.realm_id))\n event_time = timezone_now()\n RealmAuditLog.objects.create(realm=user_profile.realm, acting_user=user_profile,\n modified_user=user_profile, event_type=RealmAuditLog.USER_EMAIL_CHANGED,\n event_time=event_time)\n\ndef do_start_email_change_process(user_profile: UserProfile, new_email: str) -> None:\n old_email = user_profile.email\n obj = EmailChangeStatus.objects.create(new_email=new_email, old_email=old_email,\n user_profile=user_profile, realm=user_profile.realm)\n\n activation_url = create_confirmation_link(obj, user_profile.realm.host, Confirmation.EMAIL_CHANGE)\n from zerver.context_processors import common_context\n context = common_context(user_profile)\n context.update({\n 'old_email': old_email,\n 'new_email': new_email,\n 'activate_url': activation_url\n })\n send_email('zerver/emails/confirm_new_email', to_email=new_email,\n from_name='Zulip Account Security', from_address=FromAddress.tokenized_no_reply_address(),\n context=context)\n\ndef compute_irc_user_fullname(email: NonBinaryStr) -> NonBinaryStr:\n return email.split(\"@\")[0] + \" (IRC)\"\n\ndef compute_jabber_user_fullname(email: NonBinaryStr) -> NonBinaryStr:\n return email.split(\"@\")[0] + \" (XMPP)\"\n\n@cache_with_key(lambda realm, email, f: user_profile_by_email_cache_key(email),\n timeout=3600*24*7)\ndef create_mirror_user_if_needed(realm: Realm, email: str,\n email_to_fullname: Callable[[str], str]) -> UserProfile:\n try:\n return get_user(email, realm)\n except UserProfile.DoesNotExist:\n try:\n # Forge a user for this person\n return create_user(\n email=email,\n password=None,\n realm=realm,\n full_name=email_to_fullname(email),\n short_name=email_to_username(email),\n active=False,\n is_mirror_dummy=True,\n )\n except IntegrityError:\n return get_user(email, realm)\n\ndef send_welcome_bot_response(message: MutableMapping[str, Any]) -> None:\n welcome_bot = get_system_bot(settings.WELCOME_BOT)\n human_recipient = get_personal_recipient(message['message'].sender.id)\n if Message.objects.filter(sender=welcome_bot, recipient=human_recipient).count() < 2:\n internal_send_private_message(\n message['realm'], welcome_bot, message['message'].sender,\n \"Congratulations on your first reply! :tada:\\n\\n\"\n \"Feel free to continue using this space to practice your new messaging \"\n \"skills. Or, try clicking on some of the stream names to your left!\")\n\ndef render_incoming_message(message: Message,\n content: str,\n user_ids: Set[int],\n realm: Realm,\n mention_data: Optional[bugdown.MentionData]=None,\n email_gateway: Optional[bool]=False) -> str:\n realm_alert_words = alert_words_in_realm(realm)\n try:\n rendered_content = render_markdown(\n message=message,\n content=content,\n realm=realm,\n realm_alert_words=realm_alert_words,\n user_ids=user_ids,\n mention_data=mention_data,\n email_gateway=email_gateway,\n )\n except BugdownRenderingException:\n raise JsonableError(_('Unable to render message'))\n return rendered_content\n\ndef get_typing_user_profiles(recipient: Recipient, sender_id: int) -> List[UserProfile]:\n if recipient.type == Recipient.STREAM:\n '''\n We don't support typing indicators for streams because they\n are expensive and initial user feedback was they were too\n distracting.\n '''\n raise ValueError('Typing indicators not supported for streams')\n\n if recipient.type == Recipient.PERSONAL:\n # The sender and recipient may be the same id, so\n # de-duplicate using a set.\n user_ids = list({recipient.type_id, sender_id})\n assert(len(user_ids) in [1, 2])\n\n elif recipient.type == Recipient.HUDDLE:\n user_ids = get_huddle_user_ids(recipient)\n\n else:\n raise ValueError('Bad recipient type')\n\n users = [get_user_profile_by_id(user_id) for user_id in user_ids]\n return users\n\nRecipientInfoResult = TypedDict('RecipientInfoResult', {\n 'active_user_ids': Set[int],\n 'push_notify_user_ids': Set[int],\n 'stream_push_user_ids': Set[int],\n 'stream_email_user_ids': Set[int],\n 'um_eligible_user_ids': Set[int],\n 'long_term_idle_user_ids': Set[int],\n 'default_bot_user_ids': Set[int],\n 'service_bot_tuples': List[Tuple[int, int]],\n})\n\ndef get_recipient_info(recipient: Recipient,\n sender_id: int,\n stream_topic: Optional[StreamTopicTarget],\n possibly_mentioned_user_ids: Optional[Set[int]]=None) -> RecipientInfoResult:\n stream_push_user_ids = set() # type: Set[int]\n stream_email_user_ids = set() # type: Set[int]\n\n if recipient.type == Recipient.PERSONAL:\n # The sender and recipient may be the same id, so\n # de-duplicate using a set.\n message_to_user_ids = list({recipient.type_id, sender_id})\n assert(len(message_to_user_ids) in [1, 2])\n\n elif recipient.type == Recipient.STREAM:\n # Anybody calling us w/r/t a stream message needs to supply\n # stream_topic. We may eventually want to have different versions\n # of this function for different message types.\n assert(stream_topic is not None)\n\n subscription_rows = stream_topic.get_active_subscriptions().values(\n 'user_profile_id',\n 'push_notifications',\n 'email_notifications',\n 'in_home_view',\n ).order_by('user_profile_id')\n\n message_to_user_ids = [\n row['user_profile_id']\n for row in subscription_rows\n ]\n\n user_ids_muting_topic = stream_topic.user_ids_muting_topic()\n\n stream_push_user_ids = {\n row['user_profile_id']\n for row in subscription_rows\n # Note: muting a stream overrides stream_push_notify\n if row['push_notifications'] and row['in_home_view']\n } - user_ids_muting_topic\n\n stream_email_user_ids = {\n row['user_profile_id']\n for row in subscription_rows\n # Note: muting a stream overrides stream_email_notify\n if row['email_notifications'] and row['in_home_view']\n } - user_ids_muting_topic\n\n elif recipient.type == Recipient.HUDDLE:\n message_to_user_ids = get_huddle_user_ids(recipient)\n\n else:\n raise ValueError('Bad recipient type')\n\n message_to_user_id_set = set(message_to_user_ids)\n\n user_ids = set(message_to_user_id_set)\n if possibly_mentioned_user_ids:\n # Important note: Because we haven't rendered bugdown yet, we\n # don't yet know which of these possibly-mentioned users was\n # actually mentioned in the message (in other words, the\n # mention syntax might have been in a code block or otherwise\n # escaped). `get_ids_for` will filter these extra user rows\n # for our data structures not related to bots\n user_ids |= possibly_mentioned_user_ids\n\n if user_ids:\n query = UserProfile.objects.filter(\n is_active=True,\n ).values(\n 'id',\n 'enable_online_push_notifications',\n 'is_bot',\n 'bot_type',\n 'long_term_idle',\n )\n\n # query_for_ids is fast highly optimized for large queries, and we\n # need this codepath to be fast (it's part of sending messages)\n query = query_for_ids(\n query=query,\n user_ids=sorted(list(user_ids)),\n field='id'\n )\n rows = list(query)\n else:\n # TODO: We should always have at least one user_id as a recipient\n # of any message we send. Right now the exception to this\n # rule is `notify_new_user`, which, at least in a possibly\n # contrived test scenario, can attempt to send messages\n # to an inactive bot. When we plug that hole, we can avoid\n # this `else` clause and just `assert(user_ids)`.\n rows = []\n\n def get_ids_for(f: Callable[[Dict[str, Any]], bool]) -> Set[int]:\n \"\"\"Only includes users on the explicit message to line\"\"\"\n return {\n row['id']\n for row in rows\n if f(row)\n } & message_to_user_id_set\n\n def is_service_bot(row: Dict[str, Any]) -> bool:\n return row['is_bot'] and (row['bot_type'] in UserProfile.SERVICE_BOT_TYPES)\n\n active_user_ids = get_ids_for(lambda r: True)\n push_notify_user_ids = get_ids_for(\n lambda r: r['enable_online_push_notifications']\n )\n\n # Service bots don't get UserMessage rows.\n um_eligible_user_ids = get_ids_for(\n lambda r: not is_service_bot(r)\n )\n\n long_term_idle_user_ids = get_ids_for(\n lambda r: r['long_term_idle']\n )\n\n # These two bot data structures need to filter from the full set\n # of users who either are receiving the message or might have been\n # mentioned in it, and so can't use get_ids_for.\n #\n # Further in the do_send_messages code path, once\n # `mentioned_user_ids` has been computed via bugdown, we'll filter\n # these data structures for just those users who are either a\n # direct recipient or were mentioned; for now, we're just making\n # sure we have the data we need for that without extra database\n # queries.\n default_bot_user_ids = set([\n row['id']\n for row in rows\n if row['is_bot'] and row['bot_type'] == UserProfile.DEFAULT_BOT\n ])\n\n service_bot_tuples = [\n (row['id'], row['bot_type'])\n for row in rows\n if is_service_bot(row)\n ]\n\n info = dict(\n active_user_ids=active_user_ids,\n push_notify_user_ids=push_notify_user_ids,\n stream_push_user_ids=stream_push_user_ids,\n stream_email_user_ids=stream_email_user_ids,\n um_eligible_user_ids=um_eligible_user_ids,\n long_term_idle_user_ids=long_term_idle_user_ids,\n default_bot_user_ids=default_bot_user_ids,\n service_bot_tuples=service_bot_tuples\n ) # type: RecipientInfoResult\n return info\n\ndef get_service_bot_events(sender: UserProfile, service_bot_tuples: List[Tuple[int, int]],\n mentioned_user_ids: Set[int], active_user_ids: Set[int],\n recipient_type: int) -> Dict[str, List[Dict[str, Any]]]:\n\n event_dict = defaultdict(list) # type: Dict[str, List[Dict[str, Any]]]\n\n # Avoid infinite loops by preventing messages sent by bots from generating\n # Service events.\n if sender.is_bot:\n return event_dict\n\n def maybe_add_event(user_profile_id: int, bot_type: int) -> None:\n if bot_type == UserProfile.OUTGOING_WEBHOOK_BOT:\n queue_name = 'outgoing_webhooks'\n elif bot_type == UserProfile.EMBEDDED_BOT:\n queue_name = 'embedded_bots'\n else:\n logging.error(\n 'Unexpected bot_type for Service bot id=%s: %s' %\n (user_profile_id, bot_type))\n return\n\n is_stream = (recipient_type == Recipient.STREAM)\n\n # Important note: service_bot_tuples may contain service bots\n # who were not actually mentioned in the message (e.g. if\n # mention syntax for that bot appeared in a code block).\n # Thus, it is important to filter any users who aren't part of\n # either mentioned_user_ids (the actual mentioned users) or\n # active_user_ids (the actual recipients).\n #\n # So even though this is implied by the logic below, we filter\n # these not-actually-mentioned users here, to help keep this\n # function future-proof.\n if user_profile_id not in mentioned_user_ids and user_profile_id not in active_user_ids:\n return\n\n # Mention triggers, for stream messages\n if is_stream and user_profile_id in mentioned_user_ids:\n trigger = 'mention'\n # PM triggers for personal and huddle messsages\n elif (not is_stream) and (user_profile_id in active_user_ids):\n trigger = 'private_message'\n else:\n return\n\n event_dict[queue_name].append({\n 'trigger': trigger,\n 'user_profile_id': user_profile_id,\n })\n\n for user_profile_id, bot_type in service_bot_tuples:\n maybe_add_event(\n user_profile_id=user_profile_id,\n bot_type=bot_type,\n )\n\n return event_dict\n\ndef do_schedule_messages(messages: Sequence[Mapping[str, Any]]) -> List[int]:\n scheduled_messages = [] # type: List[ScheduledMessage]\n\n for message in messages:\n scheduled_message = ScheduledMessage()\n scheduled_message.sender = message['message'].sender\n scheduled_message.recipient = message['message'].recipient\n topic_name = message['message'].topic_name()\n scheduled_message.set_topic_name(topic_name=topic_name)\n scheduled_message.content = message['message'].content\n scheduled_message.sending_client = message['message'].sending_client\n scheduled_message.stream = message['stream']\n scheduled_message.realm = message['realm']\n scheduled_message.scheduled_timestamp = message['deliver_at']\n if message['delivery_type'] == 'send_later':\n scheduled_message.delivery_type = ScheduledMessage.SEND_LATER\n elif message['delivery_type'] == 'remind':\n scheduled_message.delivery_type = ScheduledMessage.REMIND\n\n scheduled_messages.append(scheduled_message)\n\n ScheduledMessage.objects.bulk_create(scheduled_messages)\n return [scheduled_message.id for scheduled_message in scheduled_messages]\n\n\ndef do_send_messages(messages_maybe_none: Sequence[Optional[MutableMapping[str, Any]]],\n email_gateway: Optional[bool]=False) -> List[int]:\n # Filter out messages which didn't pass internal_prep_message properly\n messages = [message for message in messages_maybe_none if message is not None]\n\n # Filter out zephyr mirror anomalies where the message was already sent\n already_sent_ids = [] # type: List[int]\n new_messages = [] # type: List[MutableMapping[str, Any]]\n for message in messages:\n if isinstance(message['message'], int):\n already_sent_ids.append(message['message'])\n else:\n new_messages.append(message)\n messages = new_messages\n\n links_for_embed = set() # type: Set[str]\n # For consistency, changes to the default values for these gets should also be applied\n # to the default args in do_send_message\n for message in messages:\n message['rendered_content'] = message.get('rendered_content', None)\n message['stream'] = message.get('stream', None)\n message['local_id'] = message.get('local_id', None)\n message['sender_queue_id'] = message.get('sender_queue_id', None)\n message['realm'] = message.get('realm', message['message'].sender.realm)\n\n mention_data = bugdown.MentionData(\n realm_id=message['realm'].id,\n content=message['message'].content,\n )\n message['mention_data'] = mention_data\n\n if message['message'].is_stream_message():\n stream_id = message['message'].recipient.type_id\n stream_topic = StreamTopicTarget(\n stream_id=stream_id,\n topic_name=message['message'].topic_name()\n ) # type: Optional[StreamTopicTarget]\n else:\n stream_topic = None\n\n info = get_recipient_info(\n recipient=message['message'].recipient,\n sender_id=message['message'].sender_id,\n stream_topic=stream_topic,\n possibly_mentioned_user_ids=mention_data.get_user_ids(),\n )\n\n message['active_user_ids'] = info['active_user_ids']\n message['push_notify_user_ids'] = info['push_notify_user_ids']\n message['stream_push_user_ids'] = info['stream_push_user_ids']\n message['stream_email_user_ids'] = info['stream_email_user_ids']\n message['um_eligible_user_ids'] = info['um_eligible_user_ids']\n message['long_term_idle_user_ids'] = info['long_term_idle_user_ids']\n message['default_bot_user_ids'] = info['default_bot_user_ids']\n message['service_bot_tuples'] = info['service_bot_tuples']\n\n # Render our messages.\n assert message['message'].rendered_content is None\n\n rendered_content = render_incoming_message(\n message['message'],\n message['message'].content,\n message['active_user_ids'],\n message['realm'],\n mention_data=message['mention_data'],\n email_gateway=email_gateway,\n )\n message['message'].rendered_content = rendered_content\n message['message'].rendered_content_version = bugdown_version\n links_for_embed |= message['message'].links_for_preview\n\n # Add members of the mentioned user groups into `mentions_user_ids`.\n mention_data = message['mention_data']\n for group_id in message['message'].mentions_user_group_ids:\n members = message['mention_data'].get_group_members(group_id)\n message['message'].mentions_user_ids.update(members)\n\n '''\n Once we have the actual list of mentioned ids from message\n rendering, we can patch in \"default bots\" (aka normal bots)\n who were directly mentioned in this message as eligible to\n get UserMessage rows.\n '''\n mentioned_user_ids = message['message'].mentions_user_ids\n default_bot_user_ids = message['default_bot_user_ids']\n mentioned_bot_user_ids = default_bot_user_ids & mentioned_user_ids\n message['um_eligible_user_ids'] |= mentioned_bot_user_ids\n\n # Update calculated fields of the message\n message['message'].update_calculated_fields()\n\n # Save the message receipts in the database\n user_message_flags = defaultdict(dict) # type: Dict[int, Dict[int, List[str]]]\n with transaction.atomic():\n Message.objects.bulk_create([message['message'] for message in messages])\n ums = [] # type: List[UserMessageLite]\n for message in messages:\n # Service bots (outgoing webhook bots and embedded bots) don't store UserMessage rows;\n # they will be processed later.\n mentioned_user_ids = message['message'].mentions_user_ids\n user_messages = create_user_messages(\n message=message['message'],\n um_eligible_user_ids=message['um_eligible_user_ids'],\n long_term_idle_user_ids=message['long_term_idle_user_ids'],\n stream_push_user_ids = message['stream_push_user_ids'],\n stream_email_user_ids = message['stream_email_user_ids'],\n mentioned_user_ids=mentioned_user_ids,\n )\n\n for um in user_messages:\n user_message_flags[message['message'].id][um.user_profile_id] = um.flags_list()\n\n ums.extend(user_messages)\n\n message['message'].service_queue_events = get_service_bot_events(\n sender=message['message'].sender,\n service_bot_tuples=message['service_bot_tuples'],\n mentioned_user_ids=mentioned_user_ids,\n active_user_ids=message['active_user_ids'],\n recipient_type=message['message'].recipient.type,\n )\n\n bulk_insert_ums(ums)\n\n # Claim attachments in message\n for message in messages:\n if Message.content_has_attachment(message['message'].content):\n do_claim_attachments(message['message'])\n\n for message in messages:\n do_widget_post_save_actions(message)\n\n for message in messages:\n # Deliver events to the real-time push system, as well as\n # enqueuing any additional processing triggered by the message.\n wide_message_dict = MessageDict.wide_dict(message['message'])\n\n user_flags = user_message_flags.get(message['message'].id, {})\n sender = message['message'].sender\n message_type = wide_message_dict['type']\n\n presence_idle_user_ids = get_active_presence_idle_user_ids(\n realm=sender.realm,\n sender_id=sender.id,\n message_type=message_type,\n active_user_ids=message['active_user_ids'],\n user_flags=user_flags,\n )\n\n event = dict(\n type='message',\n message=message['message'].id,\n message_dict=wide_message_dict,\n presence_idle_user_ids=presence_idle_user_ids,\n )\n\n '''\n TODO: We may want to limit user_ids to only those users who have\n UserMessage rows, if only for minor performance reasons.\n\n For now we queue events for all subscribers/sendees of the\n message, since downstream code may still do notifications\n that don't require UserMessage rows.\n\n Our automated tests have gotten better on this codepath,\n but we may have coverage gaps, so we should be careful\n about changing the next line.\n '''\n user_ids = message['active_user_ids'] | set(user_flags.keys())\n\n users = [\n dict(\n id=user_id,\n flags=user_flags.get(user_id, []),\n always_push_notify=(user_id in message['push_notify_user_ids']),\n stream_push_notify=(user_id in message['stream_push_user_ids']),\n stream_email_notify=(user_id in message['stream_email_user_ids']),\n )\n for user_id in user_ids\n ]\n\n if message['message'].is_stream_message():\n # Note: This is where authorization for single-stream\n # get_updates happens! We only attach stream data to the\n # notify new_message request if it's a public stream,\n # ensuring that in the tornado server, non-public stream\n # messages are only associated to their subscribed users.\n if message['stream'] is None:\n stream_id = message['message'].recipient.type_id\n message['stream'] = Stream.objects.select_related(\"realm\").get(id=stream_id)\n assert message['stream'] is not None # assert needed because stubs for django are missing\n if message['stream'].is_public():\n event['realm_id'] = message['stream'].realm_id\n event['stream_name'] = message['stream'].name\n if message['stream'].invite_only:\n event['invite_only'] = True\n if message['local_id'] is not None:\n event['local_id'] = message['local_id']\n if message['sender_queue_id'] is not None:\n event['sender_queue_id'] = message['sender_queue_id']\n send_event(message['realm'], event, users)\n\n if url_embed_preview_enabled_for_realm(message['message']) and links_for_embed:\n event_data = {\n 'message_id': message['message'].id,\n 'message_content': message['message'].content,\n 'message_realm_id': message['realm'].id,\n 'urls': links_for_embed}\n queue_json_publish('embed_links', event_data)\n\n if (settings.ENABLE_FEEDBACK and settings.FEEDBACK_BOT and\n message['message'].recipient.type == Recipient.PERSONAL):\n\n feedback_bot_id = get_system_bot(email=settings.FEEDBACK_BOT).id\n if feedback_bot_id in message['active_user_ids']:\n queue_json_publish(\n 'feedback_messages',\n wide_message_dict,\n )\n\n if message['message'].recipient.type == Recipient.PERSONAL:\n welcome_bot_id = get_system_bot(settings.WELCOME_BOT).id\n if (welcome_bot_id in message['active_user_ids'] and\n welcome_bot_id != message['message'].sender_id):\n send_welcome_bot_response(message)\n\n for queue_name, events in message['message'].service_queue_events.items():\n for event in events:\n queue_json_publish(\n queue_name,\n {\n \"message\": wide_message_dict,\n \"trigger\": event['trigger'],\n \"user_profile_id\": event[\"user_profile_id\"],\n }\n )\n\n # Note that this does not preserve the order of message ids\n # returned. In practice, this shouldn't matter, as we only\n # mirror single zephyr messages at a time and don't otherwise\n # intermingle sending zephyr messages with other messages.\n return already_sent_ids + [message['message'].id for message in messages]\n\nclass UserMessageLite:\n '''\n The Django ORM is too slow for bulk operations. This class\n is optimized for the simple use case of inserting a bunch of\n rows into zerver_usermessage.\n '''\n def __init__(self, user_profile_id: int, message_id: int, flags: int) -> None:\n self.user_profile_id = user_profile_id\n self.message_id = message_id\n self.flags = flags\n\n def flags_list(self) -> List[str]:\n return UserMessage.flags_list_for_flags(self.flags)\n\ndef create_user_messages(message: Message,\n um_eligible_user_ids: Set[int],\n long_term_idle_user_ids: Set[int],\n stream_push_user_ids: Set[int],\n stream_email_user_ids: Set[int],\n mentioned_user_ids: Set[int]) -> List[UserMessageLite]:\n ums_to_create = []\n for user_profile_id in um_eligible_user_ids:\n um = UserMessageLite(\n user_profile_id=user_profile_id,\n message_id=message.id,\n flags=0,\n )\n ums_to_create.append(um)\n\n # These properties on the Message are set via\n # render_markdown by code in the bugdown inline patterns\n wildcard = message.mentions_wildcard\n ids_with_alert_words = message.user_ids_with_alert_words\n\n for um in ums_to_create:\n if um.user_profile_id == message.sender.id and \\\n message.sent_by_human():\n um.flags |= UserMessage.flags.read\n if wildcard:\n um.flags |= UserMessage.flags.wildcard_mentioned\n if um.user_profile_id in mentioned_user_ids:\n um.flags |= UserMessage.flags.mentioned\n if um.user_profile_id in ids_with_alert_words:\n um.flags |= UserMessage.flags.has_alert_word\n if message.recipient.type in [Recipient.HUDDLE, Recipient.PERSONAL]:\n um.flags |= UserMessage.flags.is_private\n\n # For long_term_idle (aka soft-deactivated) users, we are allowed\n # to optimize by lazily not creating UserMessage rows that would\n # have the default 0 flag set (since the soft-reactivation logic\n # knows how to create those when the user comes back). We need to\n # create the UserMessage rows for these long_term_idle users\n # non-lazily in a few cases:\n #\n # * There are nonzero flags (e.g. the user was mentioned), since\n # that case is rare and this saves a lot of complexity in\n # soft-reactivation.\n #\n # * If the user is going to be notified (e.g. they get push/email\n # notifications for every message on a stream), since in that\n # case the notifications code will call `access_message` on the\n # message to re-verify permissions, and for private streams,\n # will get an error if the UserMessage row doesn't exist yet.\n user_messages = []\n for um in ums_to_create:\n if (um.user_profile_id in long_term_idle_user_ids and\n um.user_profile_id not in stream_push_user_ids and\n um.user_profile_id not in stream_email_user_ids and\n message.is_stream_message() and\n int(um.flags) == 0):\n continue\n user_messages.append(um)\n\n return user_messages\n\ndef bulk_insert_ums(ums: List[UserMessageLite]) -> None:\n '''\n Doing bulk inserts this way is much faster than using Django,\n since we don't have any ORM overhead. Profiling with 1000\n users shows a speedup of 0.436 -> 0.027 seconds, so we're\n talking about a 15x speedup.\n '''\n if not ums:\n return\n\n vals = ','.join([\n '(%d, %d, %d)' % (um.user_profile_id, um.message_id, um.flags)\n for um in ums\n ])\n query = '''\n INSERT into\n zerver_usermessage (user_profile_id, message_id, flags)\n VALUES\n ''' + vals\n\n with connection.cursor() as cursor:\n cursor.execute(query)\n\ndef do_add_submessage(realm: Realm,\n sender_id: int,\n message_id: int,\n msg_type: str,\n content: str,\n ) -> None:\n submessage = SubMessage(\n sender_id=sender_id,\n message_id=message_id,\n msg_type=msg_type,\n content=content,\n )\n submessage.save()\n\n event = dict(\n type=\"submessage\",\n msg_type=msg_type,\n message_id=message_id,\n submessage_id=submessage.id,\n sender_id=sender_id,\n content=content,\n )\n ums = UserMessage.objects.filter(message_id=message_id)\n target_user_ids = [um.user_profile_id for um in ums]\n\n send_event(realm, event, target_user_ids)\n\ndef notify_reaction_update(user_profile: UserProfile, message: Message,\n reaction: Reaction, op: str) -> None:\n user_dict = {'user_id': user_profile.id,\n 'email': user_profile.email,\n 'full_name': user_profile.full_name}\n\n event = {'type': 'reaction',\n 'op': op,\n 'user': user_dict,\n 'message_id': message.id,\n 'emoji_name': reaction.emoji_name,\n 'emoji_code': reaction.emoji_code,\n 'reaction_type': reaction.reaction_type} # type: Dict[str, Any]\n\n # Update the cached message since new reaction is added.\n update_to_dict_cache([message])\n\n # Recipients for message update events, including reactions, are\n # everyone who got the original message. This means reactions\n # won't live-update in preview narrows, but it's the right\n # performance tradeoff, since otherwise we'd need to send all\n # reactions to public stream messages to every browser for every\n # client in the organization, which doesn't scale.\n #\n # However, to ensure that reactions do live-update for any user\n # who has actually participated in reacting to a message, we add a\n # \"historical\" UserMessage row for any user who reacts to message,\n # subscribing them to future notifications.\n ums = UserMessage.objects.filter(message=message.id)\n send_event(user_profile.realm, event, [um.user_profile_id for um in ums])\n\ndef do_add_reaction_legacy(user_profile: UserProfile, message: Message, emoji_name: str) -> None:\n (emoji_code, reaction_type) = emoji_name_to_emoji_code(user_profile.realm, emoji_name)\n reaction = Reaction(user_profile=user_profile, message=message,\n emoji_name=emoji_name, emoji_code=emoji_code,\n reaction_type=reaction_type)\n reaction.save()\n notify_reaction_update(user_profile, message, reaction, \"add\")\n\ndef do_remove_reaction_legacy(user_profile: UserProfile, message: Message, emoji_name: str) -> None:\n reaction = Reaction.objects.filter(user_profile=user_profile,\n message=message,\n emoji_name=emoji_name).get()\n reaction.delete()\n notify_reaction_update(user_profile, message, reaction, \"remove\")\n\ndef do_add_reaction(user_profile: UserProfile, message: Message,\n emoji_name: str, emoji_code: str, reaction_type: str) -> None:\n reaction = Reaction(user_profile=user_profile, message=message,\n emoji_name=emoji_name, emoji_code=emoji_code,\n reaction_type=reaction_type)\n reaction.save()\n notify_reaction_update(user_profile, message, reaction, \"add\")\n\ndef do_remove_reaction(user_profile: UserProfile, message: Message,\n emoji_code: str, reaction_type: str) -> None:\n reaction = Reaction.objects.filter(user_profile=user_profile,\n message=message,\n emoji_code=emoji_code,\n reaction_type=reaction_type).get()\n reaction.delete()\n notify_reaction_update(user_profile, message, reaction, \"remove\")\n\ndef do_send_typing_notification(realm: Realm, notification: Dict[str, Any]) -> None:\n recipient_user_profiles = get_typing_user_profiles(notification['recipient'],\n notification['sender'].id)\n # Only deliver the notification to active user recipients\n user_ids_to_notify = [profile.id for profile in recipient_user_profiles if profile.is_active]\n sender_dict = {'user_id': notification['sender'].id, 'email': notification['sender'].email}\n # Include a list of recipients in the event body to help identify where the typing is happening\n recipient_dicts = [{'user_id': profile.id, 'email': profile.email}\n for profile in recipient_user_profiles]\n event = dict(\n type = 'typing',\n op = notification['op'],\n sender = sender_dict,\n recipients = recipient_dicts)\n\n send_event(realm, event, user_ids_to_notify)\n\n# check_send_typing_notification:\n# Checks the typing notification and sends it\ndef check_send_typing_notification(sender: UserProfile, notification_to: Sequence[str],\n operator: str) -> None:\n typing_notification = check_typing_notification(sender, notification_to, operator)\n do_send_typing_notification(sender.realm, typing_notification)\n\n# check_typing_notification:\n# Returns typing notification ready for sending with do_send_typing_notification on success\n# or the error message (string) on error.\ndef check_typing_notification(sender: UserProfile, notification_to: Sequence[str],\n operator: str) -> Dict[str, Any]:\n if len(notification_to) == 0:\n raise JsonableError(_('Missing parameter: \\'to\\' (recipient)'))\n elif operator not in ('start', 'stop'):\n raise JsonableError(_('Invalid \\'op\\' value (should be start or stop)'))\n try:\n recipient = recipient_for_emails(notification_to, False,\n sender, sender)\n except ValidationError as e:\n assert isinstance(e.messages[0], str)\n raise JsonableError(e.messages[0])\n assert recipient.type != Recipient.STREAM\n return {'sender': sender, 'recipient': recipient, 'op': operator}\n\ndef stream_welcome_message(stream: Stream) -> str:\n content = _('Welcome to #**%s**.') % (stream.name,)\n\n if stream.description:\n content += '\\n\\n**' + _('Description') + '**: '\n content += stream.description\n\n return content\n\ndef prep_stream_welcome_message(stream: Stream) -> Optional[Dict[str, Any]]:\n realm = stream.realm\n sender = get_system_bot(settings.WELCOME_BOT)\n topic = _('hello')\n content = stream_welcome_message(stream)\n\n message = internal_prep_stream_message(\n realm=realm,\n sender=sender,\n stream_name=stream.name,\n topic=topic,\n content=content)\n\n return message\n\ndef send_stream_creation_event(stream: Stream, user_ids: List[int]) -> None:\n event = dict(type=\"stream\", op=\"create\",\n streams=[stream.to_dict()])\n send_event(stream.realm, event, user_ids)\n\ndef get_default_value_for_history_public_to_subscribers(\n realm: Realm,\n invite_only: bool,\n history_public_to_subscribers: Optional[bool]\n) -> bool:\n if invite_only:\n if history_public_to_subscribers is None:\n # A private stream's history is non-public by default\n history_public_to_subscribers = False\n else:\n # If we later decide to support public streams without\n # history, we can remove this code path.\n history_public_to_subscribers = True\n\n if realm.is_zephyr_mirror_realm:\n # In the Zephyr mirroring model, history is unconditionally\n # not public to subscribers, even for public streams.\n history_public_to_subscribers = False\n\n return history_public_to_subscribers\n\ndef create_stream_if_needed(realm: Realm,\n stream_name: str,\n *,\n invite_only: bool=False,\n is_announcement_only: bool=False,\n history_public_to_subscribers: Optional[bool]=None,\n stream_description: str=\"\") -> Tuple[Stream, bool]:\n\n history_public_to_subscribers = get_default_value_for_history_public_to_subscribers(\n realm, invite_only, history_public_to_subscribers)\n\n (stream, created) = Stream.objects.get_or_create(\n realm=realm,\n name__iexact=stream_name,\n defaults = dict(\n name=stream_name,\n description=stream_description,\n invite_only=invite_only,\n is_announcement_only=is_announcement_only,\n history_public_to_subscribers=history_public_to_subscribers,\n is_in_zephyr_realm=realm.is_zephyr_mirror_realm\n )\n )\n\n if created:\n Recipient.objects.create(type_id=stream.id, type=Recipient.STREAM)\n if stream.is_public():\n send_stream_creation_event(stream, active_non_guest_user_ids(stream.realm_id))\n else:\n realm_admin_ids = [user.id for user in stream.realm.get_admin_users()]\n send_stream_creation_event(stream, realm_admin_ids)\n return stream, created\n\ndef ensure_stream(realm: Realm,\n stream_name: str,\n invite_only: bool=False,\n stream_description: str=\"\") -> Stream:\n return create_stream_if_needed(realm, stream_name,\n invite_only=invite_only,\n stream_description=stream_description)[0]\n\ndef create_streams_if_needed(realm: Realm,\n stream_dicts: List[Mapping[str, Any]]) -> Tuple[List[Stream], List[Stream]]:\n \"\"\"Note that stream_dict[\"name\"] is assumed to already be stripped of\n whitespace\"\"\"\n added_streams = [] # type: List[Stream]\n existing_streams = [] # type: List[Stream]\n for stream_dict in stream_dicts:\n stream, created = create_stream_if_needed(\n realm,\n stream_dict[\"name\"],\n invite_only=stream_dict.get(\"invite_only\", False),\n is_announcement_only=stream_dict.get(\"is_announcement_only\", False),\n history_public_to_subscribers=stream_dict.get(\"history_public_to_subscribers\"),\n stream_description=stream_dict.get(\"description\", \"\")\n )\n\n if created:\n added_streams.append(stream)\n else:\n existing_streams.append(stream)\n\n return added_streams, existing_streams\n\n\ndef get_recipient_from_user_ids(recipient_profile_ids: Set[int],\n not_forged_mirror_message: bool,\n forwarder_user_profile: Optional[UserProfile],\n sender: UserProfile) -> Recipient:\n\n # Avoid mutating the passed in set of recipient_profile_ids.\n recipient_profile_ids = set(recipient_profile_ids)\n\n # If the private message is just between the sender and\n # another person, force it to be a personal internally\n\n if not_forged_mirror_message:\n assert forwarder_user_profile is not None\n if forwarder_user_profile.id not in recipient_profile_ids:\n raise ValidationError(_(\"User not authorized for this query\"))\n\n if (len(recipient_profile_ids) == 2 and sender.id in recipient_profile_ids):\n recipient_profile_ids.remove(sender.id)\n\n if len(recipient_profile_ids) > 1:\n # Make sure the sender is included in huddle messages\n recipient_profile_ids.add(sender.id)\n return get_huddle_recipient(recipient_profile_ids)\n else:\n return get_personal_recipient(list(recipient_profile_ids)[0])\n\ndef validate_recipient_user_profiles(user_profiles: List[UserProfile],\n sender: UserProfile) -> Set[int]:\n recipient_profile_ids = set()\n\n # We exempt cross-realm bots from the check that all the recipients\n # are in the same realm.\n realms = set()\n if not is_cross_realm_bot_email(sender.email):\n realms.add(sender.realm_id)\n\n for user_profile in user_profiles:\n if (not user_profile.is_active and not user_profile.is_mirror_dummy) or \\\n user_profile.realm.deactivated:\n raise ValidationError(_(\"'%s' is no longer using Zulip.\") % (user_profile.email,))\n recipient_profile_ids.add(user_profile.id)\n if not is_cross_realm_bot_email(user_profile.email):\n realms.add(user_profile.realm_id)\n\n if len(realms) > 1:\n raise ValidationError(_(\"You can't send private messages outside of your organization.\"))\n\n return recipient_profile_ids\n\ndef recipient_for_emails(emails: Iterable[str], not_forged_mirror_message: bool,\n forwarder_user_profile: Optional[UserProfile],\n sender: UserProfile) -> Recipient:\n\n user_profiles = user_profiles_from_unvalidated_emails(emails, sender.realm)\n\n return recipient_for_user_profiles(\n user_profiles=user_profiles,\n not_forged_mirror_message=not_forged_mirror_message,\n forwarder_user_profile=forwarder_user_profile,\n sender=sender\n )\n\ndef recipient_for_user_profiles(user_profiles: List[UserProfile], not_forged_mirror_message: bool,\n forwarder_user_profile: Optional[UserProfile],\n sender: UserProfile) -> Recipient:\n\n recipient_profile_ids = validate_recipient_user_profiles(user_profiles, sender)\n\n return get_recipient_from_user_ids(recipient_profile_ids, not_forged_mirror_message,\n forwarder_user_profile, sender)\n\ndef already_sent_mirrored_message_id(message: Message) -> Optional[int]:\n if message.recipient.type == Recipient.HUDDLE:\n # For huddle messages, we use a 10-second window because the\n # timestamps aren't guaranteed to actually match between two\n # copies of the same message.\n time_window = datetime.timedelta(seconds=10)\n else:\n time_window = datetime.timedelta(seconds=0)\n\n query = Message.objects.filter(\n sender=message.sender,\n recipient=message.recipient,\n content=message.content,\n sending_client=message.sending_client,\n pub_date__gte=message.pub_date - time_window,\n pub_date__lte=message.pub_date + time_window)\n\n messages = filter_by_exact_message_topic(\n query=query,\n message=message,\n )\n\n if messages.exists():\n return messages[0].id\n return None\n\ndef extract_recipients(s: Union[str, Iterable[str]]) -> List[str]:\n # We try to accept multiple incoming formats for recipients.\n # See test_extract_recipients() for examples of what we allow.\n try:\n data = ujson.loads(s) # type: ignore # This function has a super weird union argument.\n except ValueError:\n data = s\n\n if isinstance(data, str):\n data = data.split(',')\n\n if not isinstance(data, list):\n raise ValueError(\"Invalid data type for recipients\")\n\n recipients = data\n\n # Strip recipients, and then remove any duplicates and any that\n # are the empty string after being stripped.\n recipients = [recipient.strip() for recipient in recipients]\n return list(set(recipient for recipient in recipients if recipient))\n\ndef check_send_stream_message(sender: UserProfile, client: Client, stream_name: str,\n topic: str, body: str) -> int:\n addressee = Addressee.for_stream(stream_name, topic)\n message = check_message(sender, client, addressee, body)\n\n return do_send_messages([message])[0]\n\ndef check_send_private_message(sender: UserProfile, client: Client,\n receiving_user: UserProfile, body: str) -> int:\n addressee = Addressee.for_user_profile(receiving_user)\n message = check_message(sender, client, addressee, body)\n\n return do_send_messages([message])[0]\n\n# check_send_message:\n# Returns the id of the sent message. Has same argspec as check_message.\ndef check_send_message(sender: UserProfile, client: Client, message_type_name: str,\n message_to: Sequence[str], topic_name: Optional[str],\n message_content: str, realm: Optional[Realm]=None,\n forged: bool=False, forged_timestamp: Optional[float]=None,\n forwarder_user_profile: Optional[UserProfile]=None,\n local_id: Optional[str]=None,\n sender_queue_id: Optional[str]=None,\n widget_content: Optional[str]=None) -> int:\n\n addressee = Addressee.legacy_build(\n sender,\n message_type_name,\n message_to,\n topic_name)\n\n message = check_message(sender, client, addressee,\n message_content, realm, forged, forged_timestamp,\n forwarder_user_profile, local_id, sender_queue_id,\n widget_content)\n return do_send_messages([message])[0]\n\ndef check_schedule_message(sender: UserProfile, client: Client,\n message_type_name: str, message_to: Sequence[str],\n topic_name: Optional[str], message_content: str,\n delivery_type: str, deliver_at: datetime.datetime,\n realm: Optional[Realm]=None,\n forwarder_user_profile: Optional[UserProfile]=None\n ) -> int:\n addressee = Addressee.legacy_build(\n sender,\n message_type_name,\n message_to,\n topic_name)\n\n message = check_message(sender, client, addressee,\n message_content, realm=realm,\n forwarder_user_profile=forwarder_user_profile)\n message['deliver_at'] = deliver_at\n message['delivery_type'] = delivery_type\n return do_schedule_messages([message])[0]\n\ndef check_stream_name(stream_name: str) -> None:\n if stream_name.strip() == \"\":\n raise JsonableError(_(\"Invalid stream name '%s'\" % (stream_name)))\n if len(stream_name) > Stream.MAX_NAME_LENGTH:\n raise JsonableError(_(\"Stream name too long (limit: %s characters).\" % (Stream.MAX_NAME_LENGTH)))\n for i in stream_name:\n if ord(i) == 0:\n raise JsonableError(_(\"Stream name '%s' contains NULL (0x00) characters.\" % (stream_name)))\n\ndef check_default_stream_group_name(group_name: str) -> None:\n if group_name.strip() == \"\":\n raise JsonableError(_(\"Invalid default stream group name '%s'\" % (group_name)))\n if len(group_name) > DefaultStreamGroup.MAX_NAME_LENGTH:\n raise JsonableError(_(\"Default stream group name too long (limit: %s characters)\"\n % (DefaultStreamGroup.MAX_NAME_LENGTH)))\n for i in group_name:\n if ord(i) == 0:\n raise JsonableError(_(\"Default stream group name '%s' contains NULL (0x00) characters.\"\n % (group_name)))\n\ndef send_rate_limited_pm_notification_to_bot_owner(sender: UserProfile,\n realm: Realm,\n content: str) -> None:\n \"\"\"\n Sends a PM error notification to a bot's owner if one hasn't already\n been sent in the last 5 minutes.\n \"\"\"\n if sender.realm.is_zephyr_mirror_realm or sender.realm.deactivated:\n return\n\n if not sender.is_bot or sender.bot_owner is None:\n return\n\n # Don't send these notifications for cross-realm bot messages\n # (e.g. from EMAIL_GATEWAY_BOT) since the owner for\n # EMAIL_GATEWAY_BOT is probably the server administrator, not\n # the owner of the bot who could potentially fix the problem.\n if sender.realm != realm:\n return\n\n # We warn the user once every 5 minutes to avoid a flood of\n # PMs on a misconfigured integration, re-using the\n # UserProfile.last_reminder field, which is not used for bots.\n last_reminder = sender.last_reminder\n waitperiod = datetime.timedelta(minutes=UserProfile.BOT_OWNER_STREAM_ALERT_WAITPERIOD)\n if last_reminder and timezone_now() - last_reminder <= waitperiod:\n return\n\n internal_send_private_message(realm, get_system_bot(settings.NOTIFICATION_BOT),\n sender.bot_owner, content)\n\n sender.last_reminder = timezone_now()\n sender.save(update_fields=['last_reminder'])\n\n\ndef send_pm_if_empty_stream(sender: UserProfile,\n stream: Optional[Stream],\n stream_name: str,\n realm: Realm) -> None:\n \"\"\"If a bot sends a message to a stream that doesn't exist or has no\n subscribers, sends a notification to the bot owner (if not a\n cross-realm bot) so that the owner can correct the issue.\"\"\"\n if not sender.is_bot or sender.bot_owner is None:\n return\n\n if stream is not None:\n num_subscribers = num_subscribers_for_stream_id(stream.id)\n if num_subscribers > 0:\n return\n\n if stream is None:\n error_msg = \"that stream does not yet exist. To create it, \"\n else:\n # num_subscribers == 0\n error_msg = \"there are no subscribers to that stream. To join it, \"\n\n content = (\"Hi there! We thought you'd like to know that your bot **%s** just \"\n \"tried to send a message to stream `%s`, but %s\"\n \"click the gear in the left-side stream list.\" %\n (sender.full_name, stream_name, error_msg))\n\n send_rate_limited_pm_notification_to_bot_owner(sender, realm, content)\n\ndef validate_sender_can_write_to_stream(sender: UserProfile,\n stream: Stream,\n forwarder_user_profile: Optional[UserProfile]) -> None:\n # Our caller is responsible for making sure that `stream` actually\n # matches the realm of the sender.\n\n if stream.is_announcement_only:\n if not (sender.is_realm_admin or is_cross_realm_bot_email(sender.email)):\n raise JsonableError(_(\"Only organization administrators can send to this stream.\"))\n\n if not (stream.invite_only or sender.is_guest):\n # This is a public stream and sender is not a guest user\n return\n\n if subscribed_to_stream(sender, stream.id):\n # It is private, but your are subscribed\n return\n\n if sender.is_api_super_user:\n return\n\n if (forwarder_user_profile is not None and forwarder_user_profile.is_api_super_user):\n return\n\n if sender.is_bot and (sender.bot_owner is not None and\n subscribed_to_stream(sender.bot_owner, stream.id)):\n # Bots can send to any stream their owner can.\n return\n\n if sender.email == settings.WELCOME_BOT:\n # The welcome bot welcomes folks to the stream.\n return\n\n if sender.email == settings.NOTIFICATION_BOT:\n return\n\n # All other cases are an error.\n raise JsonableError(_(\"Not authorized to send to stream '%s'\") % (stream.name,))\n\n\n# check_message:\n# Returns message ready for sending with do_send_message on success or the error message (string) on error.\ndef check_message(sender: UserProfile, client: Client, addressee: Addressee,\n message_content_raw: str, realm: Optional[Realm]=None, forged: bool=False,\n forged_timestamp: Optional[float]=None,\n forwarder_user_profile: Optional[UserProfile]=None,\n local_id: Optional[str]=None,\n sender_queue_id: Optional[str]=None,\n widget_content: Optional[str]=None) -> Dict[str, Any]:\n stream = None\n\n message_content = message_content_raw.rstrip()\n if len(message_content) == 0:\n raise JsonableError(_(\"Message must not be empty\"))\n if '\\x00' in message_content:\n raise JsonableError(_(\"Message must not contain null bytes\"))\n\n message_content = truncate_body(message_content)\n\n if realm is None:\n realm = sender.realm\n\n if addressee.is_stream():\n stream_name = addressee.stream_name()\n\n stream_name = stream_name.strip()\n check_stream_name(stream_name)\n\n topic_name = addressee.topic()\n topic_name = truncate_topic(topic_name)\n\n try:\n stream = get_stream(stream_name, realm)\n\n send_pm_if_empty_stream(sender, stream, stream_name, realm)\n\n except Stream.DoesNotExist:\n send_pm_if_empty_stream(sender, None, stream_name, realm)\n raise StreamDoesNotExistError(escape(stream_name))\n recipient = get_stream_recipient(stream.id)\n\n # This will raise JsonableError if there are problems.\n validate_sender_can_write_to_stream(\n sender=sender,\n stream=stream,\n forwarder_user_profile=forwarder_user_profile\n )\n\n elif addressee.is_private():\n user_profiles = addressee.user_profiles()\n\n if user_profiles is None or len(user_profiles) == 0:\n raise JsonableError(_(\"Message must have recipients\"))\n\n mirror_message = client and client.name in [\"zephyr_mirror\", \"irc_mirror\",\n \"jabber_mirror\", \"JabberMirror\"]\n not_forged_mirror_message = mirror_message and not forged\n try:\n recipient = recipient_for_user_profiles(user_profiles, not_forged_mirror_message,\n forwarder_user_profile, sender)\n except ValidationError as e:\n assert isinstance(e.messages[0], str)\n raise JsonableError(e.messages[0])\n else:\n # This is defensive code--Addressee already validates\n # the message type.\n raise AssertionError(\"Invalid message type\")\n\n message = Message()\n message.sender = sender\n message.content = message_content\n message.recipient = recipient\n if addressee.is_stream():\n message.set_topic_name(topic_name)\n if forged and forged_timestamp is not None:\n # Forged messages come with a timestamp\n message.pub_date = timestamp_to_datetime(forged_timestamp)\n else:\n message.pub_date = timezone_now()\n message.sending_client = client\n\n # We render messages later in the process.\n assert message.rendered_content is None\n\n if client.name == \"zephyr_mirror\":\n id = already_sent_mirrored_message_id(message)\n if id is not None:\n return {'message': id}\n\n if widget_content is not None:\n try:\n widget_content = ujson.loads(widget_content)\n except Exception:\n raise JsonableError(_('Widgets: API programmer sent invalid JSON content'))\n\n error_msg = check_widget_content(widget_content)\n if error_msg:\n raise JsonableError(_('Widgets: %s') % (error_msg,))\n\n return {'message': message, 'stream': stream, 'local_id': local_id,\n 'sender_queue_id': sender_queue_id, 'realm': realm,\n 'widget_content': widget_content}\n\ndef _internal_prep_message(realm: Realm,\n sender: UserProfile,\n addressee: Addressee,\n content: str) -> Optional[Dict[str, Any]]:\n \"\"\"\n Create a message object and checks it, but doesn't send it or save it to the database.\n The internal function that calls this can therefore batch send a bunch of created\n messages together as one database query.\n Call do_send_messages with a list of the return values of this method.\n \"\"\"\n # Remove any null bytes from the content\n if len(content) > MAX_MESSAGE_LENGTH:\n content = content[0:3900] + \"\\n\\n[message was too long and has been truncated]\"\n\n if realm is None:\n raise RuntimeError(\"None is not a valid realm for internal_prep_message!\")\n\n if addressee.is_stream():\n ensure_stream(realm, addressee.stream_name())\n\n try:\n return check_message(sender, get_client(\"Internal\"), addressee,\n content, realm=realm)\n except JsonableError as e:\n logging.exception(\"Error queueing internal message by %s: %s\" % (sender.email, e))\n\n return None\n\ndef internal_prep_stream_message(realm: Realm, sender: UserProfile,\n stream_name: str, topic: str,\n content: str) -> Optional[Dict[str, Any]]:\n \"\"\"\n See _internal_prep_message for details of how this works.\n \"\"\"\n addressee = Addressee.for_stream(stream_name, topic)\n\n return _internal_prep_message(\n realm=realm,\n sender=sender,\n addressee=addressee,\n content=content,\n )\n\ndef internal_prep_private_message(realm: Realm,\n sender: UserProfile,\n recipient_user: UserProfile,\n content: str) -> Optional[Dict[str, Any]]:\n \"\"\"\n See _internal_prep_message for details of how this works.\n \"\"\"\n addressee = Addressee.for_user_profile(recipient_user)\n\n return _internal_prep_message(\n realm=realm,\n sender=sender,\n addressee=addressee,\n content=content,\n )\n\ndef internal_send_message(realm: Realm, sender_email: str, recipient_type_name: str,\n recipients: str, topic_name: str, content: str,\n email_gateway: Optional[bool]=False) -> None:\n \"\"\"internal_send_message should only be used where `sender_email` is a\n system bot.\"\"\"\n\n # Verify the user is in fact a system bot\n assert(is_cross_realm_bot_email(sender_email) or sender_email == settings.ERROR_BOT)\n\n sender = get_system_bot(sender_email)\n parsed_recipients = extract_recipients(recipients)\n\n addressee = Addressee.legacy_build(\n sender,\n recipient_type_name,\n parsed_recipients,\n topic_name,\n realm=realm)\n\n msg = _internal_prep_message(\n realm=realm,\n sender=sender,\n addressee=addressee,\n content=content,\n )\n if msg is None:\n return\n\n do_send_messages([msg], email_gateway=email_gateway)\n\ndef internal_send_private_message(realm: Realm,\n sender: UserProfile,\n recipient_user: UserProfile,\n content: str) -> None:\n message = internal_prep_private_message(realm, sender, recipient_user, content)\n if message is None:\n return\n do_send_messages([message])\n\ndef internal_send_stream_message(realm: Realm, sender: UserProfile, stream_name: str,\n topic: str, content: str) -> None:\n message = internal_prep_stream_message(realm, sender, stream_name, topic, content)\n if message is None:\n return\n do_send_messages([message])\n\ndef internal_send_huddle_message(realm: Realm, sender: UserProfile, emails: List[str],\n content: str) -> None:\n addressee = Addressee.for_private(emails, realm)\n message = _internal_prep_message(\n realm=realm,\n sender=sender,\n addressee=addressee,\n content=content,\n )\n if message is None:\n return\n do_send_messages([message])\n\ndef pick_color(user_profile: UserProfile, subs: Iterable[Subscription]) -> str:\n # These colors are shared with the palette in subs.js.\n used_colors = [sub.color for sub in subs if sub.active]\n available_colors = [s for s in STREAM_ASSIGNMENT_COLORS if s not in used_colors]\n\n if available_colors:\n return available_colors[0]\n else:\n return STREAM_ASSIGNMENT_COLORS[len(used_colors) % len(STREAM_ASSIGNMENT_COLORS)]\n\ndef validate_user_access_to_subscribers(user_profile: Optional[UserProfile],\n stream: Stream) -> None:\n \"\"\" Validates whether the user can view the subscribers of a stream. Raises a JsonableError if:\n * The user and the stream are in different realms\n * The realm is MIT and the stream is not invite only.\n * The stream is invite only, requesting_user is passed, and that user\n does not subscribe to the stream.\n \"\"\"\n validate_user_access_to_subscribers_helper(\n user_profile,\n {\"realm_id\": stream.realm_id,\n \"invite_only\": stream.invite_only},\n # We use a lambda here so that we only compute whether the\n # user is subscribed if we have to\n lambda: subscribed_to_stream(cast(UserProfile, user_profile), stream.id))\n\ndef validate_user_access_to_subscribers_helper(user_profile: Optional[UserProfile],\n stream_dict: Mapping[str, Any],\n check_user_subscribed: Callable[[], bool]) -> None:\n \"\"\"Helper for validate_user_access_to_subscribers that doesn't require\n a full stream object. This function is a bit hard to read,\n because it is carefully optimized for performance in the two code\n paths we call it from:\n\n * In `bulk_get_subscriber_user_ids`, we already know whether the\n user was subscribed via `sub_dict`, and so we want to avoid a\n database query at all (especially since it calls this in a loop);\n * In `validate_user_access_to_subscribers`, we want to only check\n if the user is subscribed when we absolutely have to, since it\n costs a database query.\n\n The `check_user_subscribed` argument is a function that reports\n whether the user is subscribed to the stream.\n\n Note also that we raise a ValidationError in cases where the\n caller is doing the wrong thing (maybe these should be\n AssertionErrors), and JsonableError for 400 type errors.\n \"\"\"\n if user_profile is None:\n raise ValidationError(\"Missing user to validate access for\")\n\n if user_profile.realm_id != stream_dict[\"realm_id\"]:\n raise ValidationError(\"Requesting user not in given realm\")\n\n # Guest users can access subscribed public stream's subscribers\n if user_profile.is_guest:\n if check_user_subscribed():\n return\n # We could put an AssertionError here; in that we don't have\n # any code paths that would allow a guest user to access other\n # streams in the first place.\n\n if not user_profile.can_access_public_streams() and not stream_dict[\"invite_only\"]:\n raise JsonableError(_(\"Subscriber data is not available for this stream\"))\n\n # Organization administrators can view subscribers for all streams.\n if user_profile.is_realm_admin:\n return\n\n if (stream_dict[\"invite_only\"] and not check_user_subscribed()):\n raise JsonableError(_(\"Unable to retrieve subscribers for private stream\"))\n\ndef bulk_get_subscriber_user_ids(stream_dicts: Iterable[Mapping[str, Any]],\n user_profile: UserProfile,\n sub_dict: Mapping[int, bool],\n stream_recipient: StreamRecipientMap) -> Dict[int, List[int]]:\n \"\"\"sub_dict maps stream_id => whether the user is subscribed to that stream.\"\"\"\n target_stream_dicts = []\n for stream_dict in stream_dicts:\n try:\n validate_user_access_to_subscribers_helper(user_profile, stream_dict,\n lambda: sub_dict[stream_dict[\"id\"]])\n except JsonableError:\n continue\n target_stream_dicts.append(stream_dict)\n\n stream_ids = [stream['id'] for stream in target_stream_dicts]\n stream_recipient.populate_for_stream_ids(stream_ids)\n recipient_ids = sorted([\n stream_recipient.recipient_id_for(stream_id)\n for stream_id in stream_ids\n ])\n\n result = dict((stream[\"id\"], []) for stream in stream_dicts) # type: Dict[int, List[int]]\n if not recipient_ids:\n return result\n\n '''\n The raw SQL below leads to more than a 2x speedup when tested with\n 20k+ total subscribers. (For large realms with lots of default\n streams, this function deals with LOTS of data, so it is important\n to optimize.)\n '''\n\n id_list = ', '.join(str(recipient_id) for recipient_id in recipient_ids)\n\n query = '''\n SELECT\n zerver_subscription.recipient_id,\n zerver_subscription.user_profile_id\n FROM\n zerver_subscription\n INNER JOIN zerver_userprofile ON\n zerver_userprofile.id = zerver_subscription.user_profile_id\n WHERE\n zerver_subscription.recipient_id in (%s) AND\n zerver_subscription.active AND\n zerver_userprofile.is_active\n ORDER BY\n zerver_subscription.recipient_id\n ''' % (id_list,)\n\n cursor = connection.cursor()\n cursor.execute(query)\n rows = cursor.fetchall()\n cursor.close()\n\n recip_to_stream_id = stream_recipient.recipient_to_stream_id_dict()\n\n '''\n Using groupby/itemgetter here is important for performance, at scale.\n It makes it so that all interpreter overhead is just O(N) in nature.\n '''\n for recip_id, recip_rows in itertools.groupby(rows, itemgetter(0)):\n user_profile_ids = [r[1] for r in recip_rows]\n stream_id = recip_to_stream_id[recip_id]\n result[stream_id] = list(user_profile_ids)\n\n return result\n\ndef get_subscribers_query(stream: Stream, requesting_user: Optional[UserProfile]) -> QuerySet:\n # TODO: Make a generic stub for QuerySet\n \"\"\" Build a query to get the subscribers list for a stream, raising a JsonableError if:\n\n 'realm' is optional in stream.\n\n The caller can refine this query with select_related(), values(), etc. depending\n on whether it wants objects or just certain fields\n \"\"\"\n validate_user_access_to_subscribers(requesting_user, stream)\n\n # Note that non-active users may still have \"active\" subscriptions, because we\n # want to be able to easily reactivate them with their old subscriptions. This\n # is why the query here has to look at the UserProfile.is_active flag.\n subscriptions = get_active_subscriptions_for_stream_id(stream.id).filter(\n user_profile__is_active=True\n )\n return subscriptions\n\n\ndef get_subscriber_emails(stream: Stream,\n requesting_user: Optional[UserProfile]=None) -> List[str]:\n subscriptions_query = get_subscribers_query(stream, requesting_user)\n subscriptions = subscriptions_query.values('user_profile__email')\n return [subscription['user_profile__email'] for subscription in subscriptions]\n\n\ndef notify_subscriptions_added(user_profile: UserProfile,\n sub_pairs: Iterable[Tuple[Subscription, Stream]],\n stream_user_ids: Callable[[Stream], List[int]],\n recent_traffic: Dict[int, int],\n no_log: bool=False) -> None:\n if not no_log:\n log_event({'type': 'subscription_added',\n 'user': user_profile.email,\n 'names': [stream.name for sub, stream in sub_pairs],\n 'realm': user_profile.realm.string_id})\n\n # Send a notification to the user who subscribed.\n payload = [dict(name=stream.name,\n stream_id=stream.id,\n in_home_view=subscription.in_home_view,\n invite_only=stream.invite_only,\n is_announcement_only=stream.is_announcement_only,\n color=subscription.color,\n email_address=encode_email_address(stream),\n desktop_notifications=subscription.desktop_notifications,\n audible_notifications=subscription.audible_notifications,\n push_notifications=subscription.push_notifications,\n email_notifications=subscription.email_notifications,\n description=stream.description,\n pin_to_top=subscription.pin_to_top,\n is_old_stream=is_old_stream(stream.date_created),\n stream_weekly_traffic=get_average_weekly_stream_traffic(\n stream.id, stream.date_created, recent_traffic),\n subscribers=stream_user_ids(stream),\n history_public_to_subscribers=stream.history_public_to_subscribers)\n for (subscription, stream) in sub_pairs]\n event = dict(type=\"subscription\", op=\"add\",\n subscriptions=payload)\n send_event(user_profile.realm, event, [user_profile.id])\n\ndef get_peer_user_ids_for_stream_change(stream: Stream,\n altered_user_ids: Iterable[int],\n subscribed_user_ids: Iterable[int]) -> Set[int]:\n '''\n altered_user_ids is the user_ids that we are adding/removing\n subscribed_user_ids is the already-subscribed user_ids\n\n Based on stream policy, we notify the correct bystanders, while\n not notifying altered_users (who get subscribers via another event)\n '''\n\n if stream.invite_only:\n # PRIVATE STREAMS\n # Realm admins can access all private stream subscribers. Send them an\n # event even if they aren't subscribed to stream.\n realm_admin_ids = [user.id for user in stream.realm.get_admin_users()]\n user_ids_to_notify = []\n user_ids_to_notify.extend(realm_admin_ids)\n user_ids_to_notify.extend(subscribed_user_ids)\n return set(user_ids_to_notify) - set(altered_user_ids)\n\n else:\n # PUBLIC STREAMS\n # We now do \"peer_add\" or \"peer_remove\" events even for streams\n # users were never subscribed to, in order for the neversubscribed\n # structure to stay up-to-date.\n return set(active_non_guest_user_ids(stream.realm_id)) - set(altered_user_ids)\n\ndef get_user_ids_for_streams(streams: Iterable[Stream]) -> Dict[int, List[int]]:\n stream_ids = [stream.id for stream in streams]\n\n all_subs = get_active_subscriptions_for_stream_ids(stream_ids).filter(\n user_profile__is_active=True,\n ).values(\n 'recipient__type_id',\n 'user_profile_id',\n ).order_by(\n 'recipient__type_id',\n )\n\n get_stream_id = itemgetter('recipient__type_id')\n\n all_subscribers_by_stream = defaultdict(list) # type: Dict[int, List[int]]\n for stream_id, rows in itertools.groupby(all_subs, get_stream_id):\n user_ids = [row['user_profile_id'] for row in rows]\n all_subscribers_by_stream[stream_id] = user_ids\n\n return all_subscribers_by_stream\n\ndef get_last_message_id() -> int:\n # We generally use this function to populate RealmAuditLog, and\n # the max id here is actually systemwide, not per-realm. I\n # assume there's some advantage in not filtering by realm.\n last_id = Message.objects.aggregate(Max('id'))['id__max']\n if last_id is None:\n # During initial realm creation, there might be 0 messages in\n # the database; in that case, the `aggregate` query returns\n # None. Since we want an int for \"beginning of time\", use -1.\n last_id = -1\n return last_id\n\nSubT = Tuple[List[Tuple[UserProfile, Stream]], List[Tuple[UserProfile, Stream]]]\ndef bulk_add_subscriptions(streams: Iterable[Stream],\n users: Iterable[UserProfile],\n from_stream_creation: bool=False,\n acting_user: Optional[UserProfile]=None) -> SubT:\n users = list(users)\n\n recipients_map = bulk_get_recipients(Recipient.STREAM, [stream.id for stream in streams]) # type: Mapping[int, Recipient]\n recipients = [recipient.id for recipient in recipients_map.values()] # type: List[int]\n\n stream_map = {} # type: Dict[int, Stream]\n for stream in streams:\n stream_map[recipients_map[stream.id].id] = stream\n\n subs_by_user = defaultdict(list) # type: Dict[int, List[Subscription]]\n all_subs_query = get_stream_subscriptions_for_users(users).select_related('user_profile')\n for sub in all_subs_query:\n subs_by_user[sub.user_profile_id].append(sub)\n\n already_subscribed = [] # type: List[Tuple[UserProfile, Stream]]\n subs_to_activate = [] # type: List[Tuple[Subscription, Stream]]\n new_subs = [] # type: List[Tuple[UserProfile, int, Stream]]\n for user_profile in users:\n needs_new_sub = set(recipients) # type: Set[int]\n for sub in subs_by_user[user_profile.id]:\n if sub.recipient_id in needs_new_sub:\n needs_new_sub.remove(sub.recipient_id)\n if sub.active:\n already_subscribed.append((user_profile, stream_map[sub.recipient_id]))\n else:\n subs_to_activate.append((sub, stream_map[sub.recipient_id]))\n # Mark the sub as active, without saving, so that\n # pick_color will consider this to be an active\n # subscription when picking colors\n sub.active = True\n for recipient_id in needs_new_sub:\n new_subs.append((user_profile, recipient_id, stream_map[recipient_id]))\n\n subs_to_add = [] # type: List[Tuple[Subscription, Stream]]\n for (user_profile, recipient_id, stream) in new_subs:\n color = pick_color(user_profile, subs_by_user[user_profile.id])\n sub_to_add = Subscription(user_profile=user_profile, active=True,\n color=color, recipient_id=recipient_id,\n desktop_notifications=user_profile.enable_stream_desktop_notifications,\n audible_notifications=user_profile.enable_stream_sounds,\n push_notifications=user_profile.enable_stream_push_notifications,\n email_notifications=user_profile.enable_stream_email_notifications,\n )\n subs_by_user[user_profile.id].append(sub_to_add)\n subs_to_add.append((sub_to_add, stream))\n\n # TODO: XXX: This transaction really needs to be done at the serializeable\n # transaction isolation level.\n with transaction.atomic():\n occupied_streams_before = list(get_occupied_streams(user_profile.realm))\n Subscription.objects.bulk_create([sub for (sub, stream) in subs_to_add])\n sub_ids = [sub.id for (sub, stream) in subs_to_activate]\n Subscription.objects.filter(id__in=sub_ids).update(active=True)\n occupied_streams_after = list(get_occupied_streams(user_profile.realm))\n\n # Log Subscription Activities in RealmAuditLog\n event_time = timezone_now()\n event_last_message_id = get_last_message_id()\n\n all_subscription_logs = [] # type: (List[RealmAuditLog])\n for (sub, stream) in subs_to_add:\n all_subscription_logs.append(RealmAuditLog(realm=sub.user_profile.realm,\n acting_user=acting_user,\n modified_user=sub.user_profile,\n modified_stream=stream,\n event_last_message_id=event_last_message_id,\n event_type=RealmAuditLog.SUBSCRIPTION_CREATED,\n event_time=event_time))\n for (sub, stream) in subs_to_activate:\n all_subscription_logs.append(RealmAuditLog(realm=sub.user_profile.realm,\n acting_user=acting_user,\n modified_user=sub.user_profile,\n modified_stream=stream,\n event_last_message_id=event_last_message_id,\n event_type=RealmAuditLog.SUBSCRIPTION_ACTIVATED,\n event_time=event_time))\n # Now since we have all log objects generated we can do a bulk insert\n RealmAuditLog.objects.bulk_create(all_subscription_logs)\n\n new_occupied_streams = [stream for stream in\n set(occupied_streams_after) - set(occupied_streams_before)\n if not stream.invite_only]\n if new_occupied_streams and not from_stream_creation:\n event = dict(type=\"stream\", op=\"occupy\",\n streams=[stream.to_dict()\n for stream in new_occupied_streams])\n send_event(user_profile.realm, event, active_user_ids(user_profile.realm_id))\n\n # Notify all existing users on streams that users have joined\n\n # First, get all users subscribed to the streams that we care about\n # We fetch all subscription information upfront, as it's used throughout\n # the following code and we want to minize DB queries\n all_subscribers_by_stream = get_user_ids_for_streams(streams=streams)\n\n def fetch_stream_subscriber_user_ids(stream: Stream) -> List[int]:\n if stream.is_in_zephyr_realm and not stream.invite_only:\n return []\n user_ids = all_subscribers_by_stream[stream.id]\n return user_ids\n\n sub_tuples_by_user = defaultdict(list) # type: Dict[int, List[Tuple[Subscription, Stream]]]\n new_streams = set() # type: Set[Tuple[int, int]]\n for (sub, stream) in subs_to_add + subs_to_activate:\n sub_tuples_by_user[sub.user_profile.id].append((sub, stream))\n new_streams.add((sub.user_profile.id, stream.id))\n\n # We now send several types of events to notify browsers. The\n # first batch is notifications to users on invite-only streams\n # that the stream exists.\n for stream in streams:\n if not stream.is_public():\n # Users newly added to invite-only streams\n # need a `create` notification. The former, because\n # they need the stream to exist before\n # they get the \"subscribe\" notification, and the latter so\n # they can manage the new stream.\n # Realm admins already have all created private streams.\n realm_admin_ids = [user.id for user in user_profile.realm.get_admin_users()]\n new_users_ids = [user.id for user in users if (user.id, stream.id) in new_streams and\n user.id not in realm_admin_ids]\n send_stream_creation_event(stream, new_users_ids)\n\n stream_ids = {stream.id for stream in streams}\n recent_traffic = get_streams_traffic(stream_ids=stream_ids)\n # The second batch is events for the users themselves that they\n # were subscribed to the new streams.\n for user_profile in users:\n if len(sub_tuples_by_user[user_profile.id]) == 0:\n continue\n sub_pairs = sub_tuples_by_user[user_profile.id]\n notify_subscriptions_added(user_profile, sub_pairs, fetch_stream_subscriber_user_ids,\n recent_traffic)\n\n # The second batch is events for other users who are tracking the\n # subscribers lists of streams in their browser; everyone for\n # public streams and only existing subscribers for private streams.\n for stream in streams:\n if stream.is_in_zephyr_realm and not stream.invite_only:\n continue\n\n new_user_ids = [user.id for user in users if (user.id, stream.id) in new_streams]\n subscribed_user_ids = all_subscribers_by_stream[stream.id]\n\n peer_user_ids = get_peer_user_ids_for_stream_change(\n stream=stream,\n altered_user_ids=new_user_ids,\n subscribed_user_ids=subscribed_user_ids,\n )\n\n if peer_user_ids:\n for new_user_id in new_user_ids:\n event = dict(type=\"subscription\", op=\"peer_add\",\n subscriptions=[stream.name],\n user_id=new_user_id)\n send_event(stream.realm, event, peer_user_ids)\n\n return ([(user_profile, stream) for (user_profile, recipient_id, stream) in new_subs] +\n [(sub.user_profile, stream) for (sub, stream) in subs_to_activate],\n already_subscribed)\n\ndef notify_subscriptions_removed(user_profile: UserProfile, streams: Iterable[Stream],\n no_log: bool=False) -> None:\n if not no_log:\n log_event({'type': 'subscription_removed',\n 'user': user_profile.email,\n 'names': [stream.name for stream in streams],\n 'realm': user_profile.realm.string_id})\n\n payload = [dict(name=stream.name, stream_id=stream.id) for stream in streams]\n event = dict(type=\"subscription\", op=\"remove\",\n subscriptions=payload)\n send_event(user_profile.realm, event, [user_profile.id])\n\nSubAndRemovedT = Tuple[List[Tuple[UserProfile, Stream]], List[Tuple[UserProfile, Stream]]]\ndef bulk_remove_subscriptions(users: Iterable[UserProfile],\n streams: Iterable[Stream],\n acting_client: Client,\n acting_user: Optional[UserProfile]=None) -> SubAndRemovedT:\n\n users = list(users)\n streams = list(streams)\n\n stream_dict = {stream.id: stream for stream in streams}\n\n existing_subs_by_user = get_bulk_stream_subscriber_info(users, stream_dict)\n\n def get_non_subscribed_tups() -> List[Tuple[UserProfile, Stream]]:\n stream_ids = {stream.id for stream in streams}\n\n not_subscribed = [] # type: List[Tuple[UserProfile, Stream]]\n\n for user_profile in users:\n user_sub_stream_info = existing_subs_by_user[user_profile.id]\n\n subscribed_stream_ids = {\n stream.id\n for (sub, stream) in user_sub_stream_info\n }\n not_subscribed_stream_ids = stream_ids - subscribed_stream_ids\n\n for stream_id in not_subscribed_stream_ids:\n stream = stream_dict[stream_id]\n not_subscribed.append((user_profile, stream))\n\n return not_subscribed\n\n not_subscribed = get_non_subscribed_tups()\n\n subs_to_deactivate = [] # type: List[Tuple[Subscription, Stream]]\n sub_ids_to_deactivate = [] # type: List[int]\n\n # This loop just flattens out our data into big lists for\n # bulk operations.\n for tup_list in existing_subs_by_user.values():\n for (sub, stream) in tup_list:\n subs_to_deactivate.append((sub, stream))\n sub_ids_to_deactivate.append(sub.id)\n\n our_realm = users[0].realm\n\n # TODO: XXX: This transaction really needs to be done at the serializeable\n # transaction isolation level.\n with transaction.atomic():\n occupied_streams_before = list(get_occupied_streams(our_realm))\n Subscription.objects.filter(\n id__in=sub_ids_to_deactivate,\n ) .update(active=False)\n occupied_streams_after = list(get_occupied_streams(our_realm))\n\n # Log Subscription Activities in RealmAuditLog\n event_time = timezone_now()\n event_last_message_id = get_last_message_id()\n all_subscription_logs = [] # type: (List[RealmAuditLog])\n for (sub, stream) in subs_to_deactivate:\n all_subscription_logs.append(RealmAuditLog(realm=sub.user_profile.realm,\n modified_user=sub.user_profile,\n modified_stream=stream,\n event_last_message_id=event_last_message_id,\n event_type=RealmAuditLog.SUBSCRIPTION_DEACTIVATED,\n event_time=event_time))\n # Now since we have all log objects generated we can do a bulk insert\n RealmAuditLog.objects.bulk_create(all_subscription_logs)\n\n altered_user_dict = defaultdict(list) # type: Dict[int, List[UserProfile]]\n streams_by_user = defaultdict(list) # type: Dict[int, List[Stream]]\n for (sub, stream) in subs_to_deactivate:\n streams_by_user[sub.user_profile_id].append(stream)\n altered_user_dict[stream.id].append(sub.user_profile)\n\n for user_profile in users:\n if len(streams_by_user[user_profile.id]) == 0:\n continue\n notify_subscriptions_removed(user_profile, streams_by_user[user_profile.id])\n\n event = {'type': 'mark_stream_messages_as_read',\n 'client_id': acting_client.id,\n 'user_profile_id': user_profile.id,\n 'stream_ids': [stream.id for stream in streams]}\n queue_json_publish(\"deferred_work\", event)\n\n all_subscribers_by_stream = get_user_ids_for_streams(streams=streams)\n\n def send_peer_remove_event(stream: Stream) -> None:\n if stream.is_in_zephyr_realm and not stream.invite_only:\n return\n\n altered_users = altered_user_dict[stream.id]\n altered_user_ids = [u.id for u in altered_users]\n\n subscribed_user_ids = all_subscribers_by_stream[stream.id]\n\n peer_user_ids = get_peer_user_ids_for_stream_change(\n stream=stream,\n altered_user_ids=altered_user_ids,\n subscribed_user_ids=subscribed_user_ids,\n )\n\n if peer_user_ids:\n for removed_user in altered_users:\n event = dict(type=\"subscription\",\n op=\"peer_remove\",\n subscriptions=[stream.name],\n user_id=removed_user.id)\n send_event(our_realm, event, peer_user_ids)\n\n for stream in streams:\n send_peer_remove_event(stream=stream)\n\n new_vacant_streams = [stream for stream in\n set(occupied_streams_before) - set(occupied_streams_after)]\n new_vacant_private_streams = [stream for stream in new_vacant_streams\n if stream.invite_only]\n new_vacant_public_streams = [stream for stream in new_vacant_streams\n if not stream.invite_only]\n if new_vacant_public_streams:\n event = dict(type=\"stream\", op=\"vacate\",\n streams=[stream.to_dict()\n for stream in new_vacant_public_streams])\n send_event(our_realm, event, active_user_ids(our_realm.id))\n if new_vacant_private_streams:\n # Deactivate any newly-vacant private streams\n for stream in new_vacant_private_streams:\n do_deactivate_stream(stream)\n\n return (\n [(sub.user_profile, stream) for (sub, stream) in subs_to_deactivate],\n not_subscribed,\n )\n\ndef log_subscription_property_change(user_email: str, stream_name: str, property: str,\n value: Any) -> None:\n event = {'type': 'subscription_property',\n 'property': property,\n 'user': user_email,\n 'stream_name': stream_name,\n 'value': value}\n log_event(event)\n\ndef do_change_subscription_property(user_profile: UserProfile, sub: Subscription,\n stream: Stream, property_name: str, value: Any\n ) -> None:\n setattr(sub, property_name, value)\n sub.save(update_fields=[property_name])\n log_subscription_property_change(user_profile.email, stream.name,\n property_name, value)\n\n event = dict(type=\"subscription\",\n op=\"update\",\n email=user_profile.email,\n property=property_name,\n value=value,\n stream_id=stream.id,\n name=stream.name)\n send_event(user_profile.realm, event, [user_profile.id])\n\ndef do_change_password(user_profile: UserProfile, password: str, commit: bool=True) -> None:\n user_profile.set_password(password)\n if commit:\n user_profile.save(update_fields=[\"password\"])\n event_time = timezone_now()\n RealmAuditLog.objects.create(realm=user_profile.realm, acting_user=user_profile,\n modified_user=user_profile, event_type=RealmAuditLog.USER_PASSWORD_CHANGED,\n event_time=event_time)\n\ndef do_change_full_name(user_profile: UserProfile, full_name: str,\n acting_user: Optional[UserProfile]) -> None:\n old_name = user_profile.full_name\n user_profile.full_name = full_name\n user_profile.save(update_fields=[\"full_name\"])\n event_time = timezone_now()\n RealmAuditLog.objects.create(realm=user_profile.realm, acting_user=acting_user,\n modified_user=user_profile, event_type=RealmAuditLog.USER_FULL_NAME_CHANGED,\n event_time=event_time, extra_data=old_name)\n payload = dict(email=user_profile.email,\n user_id=user_profile.id,\n full_name=user_profile.full_name)\n send_event(user_profile.realm,\n dict(type='realm_user', op='update', person=payload),\n active_user_ids(user_profile.realm_id))\n if user_profile.is_bot:\n send_event(user_profile.realm,\n dict(type='realm_bot', op='update', bot=payload),\n bot_owner_user_ids(user_profile))\n\ndef check_change_full_name(user_profile: UserProfile, full_name_raw: str,\n acting_user: UserProfile) -> str:\n \"\"\"Verifies that the user's proposed full name is valid. The caller\n is responsible for checking check permissions. Returns the new\n full name, which may differ from what was passed in (because this\n function strips whitespace).\"\"\"\n new_full_name = check_full_name(full_name_raw)\n do_change_full_name(user_profile, new_full_name, acting_user)\n return new_full_name\n\ndef check_change_bot_full_name(user_profile: UserProfile, full_name_raw: str,\n acting_user: UserProfile) -> None:\n new_full_name = check_full_name(full_name_raw)\n\n if new_full_name == user_profile.full_name:\n # Our web app will try to patch full_name even if the user didn't\n # modify the name in the form. We just silently ignore those\n # situations.\n return\n\n check_bot_name_available(\n realm_id=user_profile.realm_id,\n full_name=new_full_name,\n )\n do_change_full_name(user_profile, new_full_name, acting_user)\n\ndef do_change_bot_owner(user_profile: UserProfile, bot_owner: UserProfile,\n acting_user: UserProfile) -> None:\n previous_owner = user_profile.bot_owner\n user_profile.bot_owner = bot_owner\n user_profile.save() # Can't use update_fields because of how the foreign key works.\n event_time = timezone_now()\n RealmAuditLog.objects.create(realm=user_profile.realm, acting_user=acting_user,\n modified_user=user_profile, event_type=RealmAuditLog.USER_BOT_OWNER_CHANGED,\n event_time=event_time)\n\n update_users = bot_owner_user_ids(user_profile)\n\n # For admins, update event is sent instead of delete/add\n # event. bot_data of admin contains all the\n # bots and none of them should be removed/(added again).\n\n # Delete the bot from previous owner's bot data.\n if previous_owner and not previous_owner.is_realm_admin:\n send_event(user_profile.realm,\n dict(type='realm_bot',\n op=\"delete\",\n bot=dict(email=user_profile.email,\n user_id=user_profile.id,\n )),\n {previous_owner.id, })\n # Do not send update event for previous bot owner.\n update_users = update_users - {previous_owner.id, }\n\n # Notify the new owner that the bot has been added.\n if not bot_owner.is_realm_admin:\n add_event = created_bot_event(user_profile)\n send_event(user_profile.realm, add_event, {bot_owner.id, })\n # Do not send update event for bot_owner.\n update_users = update_users - {bot_owner.id, }\n\n send_event(user_profile.realm,\n dict(type='realm_bot',\n op='update',\n bot=dict(email=user_profile.email,\n user_id=user_profile.id,\n owner_id=user_profile.bot_owner.id,\n )),\n update_users)\n\ndef do_change_tos_version(user_profile: UserProfile, tos_version: str) -> None:\n user_profile.tos_version = tos_version\n user_profile.save(update_fields=[\"tos_version\"])\n event_time = timezone_now()\n RealmAuditLog.objects.create(realm=user_profile.realm, acting_user=user_profile,\n modified_user=user_profile,\n event_type=RealmAuditLog.USER_TOS_VERSION_CHANGED,\n event_time=event_time)\n\ndef do_regenerate_api_key(user_profile: UserProfile, acting_user: UserProfile) -> None:\n user_profile.api_key = generate_api_key()\n user_profile.save(update_fields=[\"api_key\"])\n event_time = timezone_now()\n RealmAuditLog.objects.create(realm=user_profile.realm, acting_user=acting_user,\n modified_user=user_profile, event_type=RealmAuditLog.USER_API_KEY_CHANGED,\n event_time=event_time)\n\n if user_profile.is_bot:\n send_event(user_profile.realm,\n dict(type='realm_bot',\n op='update',\n bot=dict(email=user_profile.email,\n user_id=user_profile.id,\n api_key=user_profile.api_key,\n )),\n bot_owner_user_ids(user_profile))\n\ndef do_change_avatar_fields(user_profile: UserProfile, avatar_source: str) -> None:\n user_profile.avatar_source = avatar_source\n user_profile.avatar_version += 1\n user_profile.save(update_fields=[\"avatar_source\", \"avatar_version\"])\n event_time = timezone_now()\n RealmAuditLog.objects.create(realm=user_profile.realm, modified_user=user_profile,\n event_type=RealmAuditLog.USER_AVATAR_SOURCE_CHANGED,\n extra_data={'avatar_source': avatar_source},\n event_time=event_time)\n\n if user_profile.is_bot:\n send_event(user_profile.realm,\n dict(type='realm_bot',\n op='update',\n bot=dict(email=user_profile.email,\n user_id=user_profile.id,\n avatar_url=avatar_url(user_profile),\n )),\n bot_owner_user_ids(user_profile))\n\n payload = dict(\n email=user_profile.email,\n avatar_source=user_profile.avatar_source,\n avatar_url=avatar_url(user_profile),\n avatar_url_medium=avatar_url(user_profile, medium=True),\n user_id=user_profile.id\n )\n\n send_event(user_profile.realm,\n dict(type='realm_user',\n op='update',\n person=payload),\n active_user_ids(user_profile.realm_id))\n\ndef do_delete_avatar_image(user: UserProfile) -> None:\n do_change_avatar_fields(user, UserProfile.AVATAR_FROM_GRAVATAR)\n delete_avatar_image(user)\n\ndef do_change_icon_source(realm: Realm, icon_source: str, log: bool=True) -> None:\n realm.icon_source = icon_source\n realm.icon_version += 1\n realm.save(update_fields=[\"icon_source\", \"icon_version\"])\n\n if log:\n log_event({'type': 'realm_change_icon',\n 'realm': realm.string_id,\n 'icon_source': icon_source})\n\n send_event(realm,\n dict(type='realm',\n op='update_dict',\n property=\"icon\",\n data=dict(icon_source=realm.icon_source,\n icon_url=realm_icon_url(realm))),\n active_user_ids(realm.id))\n\ndef do_change_plan_type(user: UserProfile, plan_type: int) -> None:\n realm = user.realm\n old_value = realm.plan_type\n realm.plan_type = plan_type\n realm.save(update_fields=['plan_type'])\n RealmAuditLog.objects.create(event_type=RealmAuditLog.REALM_PLAN_TYPE_CHANGED,\n realm=realm, acting_user=user, event_time=timezone_now(),\n extra_data={'old_value': old_value, 'new_value': plan_type})\n\n if plan_type == Realm.STANDARD:\n realm.max_invites = Realm.INVITES_STANDARD_REALM_DAILY_MAX\n realm.message_visibility_limit = None\n elif plan_type == Realm.STANDARD_FREE:\n realm.max_invites = Realm.INVITES_STANDARD_REALM_DAILY_MAX\n realm.message_visibility_limit = None\n elif plan_type == Realm.LIMITED:\n realm.max_invites = settings.INVITES_DEFAULT_REALM_DAILY_MAX\n realm.message_visibility_limit = Realm.MESSAGE_VISIBILITY_LIMITED\n realm.save(update_fields=['_max_invites', 'message_visibility_limit'])\n\ndef do_change_default_sending_stream(user_profile: UserProfile, stream: Optional[Stream],\n log: bool=True) -> None:\n user_profile.default_sending_stream = stream\n user_profile.save(update_fields=['default_sending_stream'])\n if log:\n log_event({'type': 'user_change_default_sending_stream',\n 'user': user_profile.email,\n 'stream': str(stream)})\n if user_profile.is_bot:\n if stream:\n stream_name = stream.name # type: Optional[str]\n else:\n stream_name = None\n send_event(user_profile.realm,\n dict(type='realm_bot',\n op='update',\n bot=dict(email=user_profile.email,\n user_id=user_profile.id,\n default_sending_stream=stream_name,\n )),\n bot_owner_user_ids(user_profile))\n\ndef do_change_default_events_register_stream(user_profile: UserProfile,\n stream: Optional[Stream],\n log: bool=True) -> None:\n user_profile.default_events_register_stream = stream\n user_profile.save(update_fields=['default_events_register_stream'])\n if log:\n log_event({'type': 'user_change_default_events_register_stream',\n 'user': user_profile.email,\n 'stream': str(stream)})\n if user_profile.is_bot:\n if stream:\n stream_name = stream.name # type: Optional[str]\n else:\n stream_name = None\n send_event(user_profile.realm,\n dict(type='realm_bot',\n op='update',\n bot=dict(email=user_profile.email,\n user_id=user_profile.id,\n default_events_register_stream=stream_name,\n )),\n bot_owner_user_ids(user_profile))\n\ndef do_change_default_all_public_streams(user_profile: UserProfile, value: bool,\n log: bool=True) -> None:\n user_profile.default_all_public_streams = value\n user_profile.save(update_fields=['default_all_public_streams'])\n if log:\n log_event({'type': 'user_change_default_all_public_streams',\n 'user': user_profile.email,\n 'value': str(value)})\n if user_profile.is_bot:\n send_event(user_profile.realm,\n dict(type='realm_bot',\n op='update',\n bot=dict(email=user_profile.email,\n user_id=user_profile.id,\n default_all_public_streams=user_profile.default_all_public_streams,\n )),\n bot_owner_user_ids(user_profile))\n\ndef do_change_is_admin(user_profile: UserProfile, value: bool,\n permission: str='administer') -> None:\n if permission == \"administer\":\n user_profile.is_realm_admin = value\n user_profile.save(update_fields=[\"is_realm_admin\"])\n elif permission == \"api_super_user\":\n user_profile.is_api_super_user = value\n user_profile.save(update_fields=[\"is_api_super_user\"])\n else:\n raise AssertionError(\"Invalid admin permission\")\n\n if permission == 'administer':\n event = dict(type=\"realm_user\", op=\"update\",\n person=dict(email=user_profile.email,\n user_id=user_profile.id,\n is_admin=value))\n send_event(user_profile.realm, event, active_user_ids(user_profile.realm_id))\n\ndef do_change_is_guest(user_profile: UserProfile, value: bool) -> None:\n user_profile.is_guest = value\n user_profile.save(update_fields=[\"is_guest\"])\n event = dict(type=\"realm_user\", op=\"update\",\n person=dict(email=user_profile.email,\n user_id=user_profile.id,\n is_guest=value))\n send_event(user_profile.realm, event, active_user_ids(user_profile.realm_id))\n\n\ndef do_change_stream_invite_only(stream: Stream, invite_only: bool,\n history_public_to_subscribers: Optional[bool]=None) -> None:\n history_public_to_subscribers = get_default_value_for_history_public_to_subscribers(\n stream.realm,\n invite_only,\n history_public_to_subscribers\n )\n stream.invite_only = invite_only\n stream.history_public_to_subscribers = history_public_to_subscribers\n stream.save(update_fields=['invite_only', 'history_public_to_subscribers'])\n\ndef do_change_stream_web_public(stream: Stream, is_web_public: bool) -> None:\n stream.is_web_public = is_web_public\n stream.save(update_fields=['is_web_public'])\n\ndef do_change_stream_announcement_only(stream: Stream, is_announcement_only: bool) -> None:\n stream.is_announcement_only = is_announcement_only\n stream.save(update_fields=['is_announcement_only'])\n\ndef do_rename_stream(stream: Stream, new_name: str, log: bool=True) -> Dict[str, str]:\n old_name = stream.name\n stream.name = new_name\n stream.save(update_fields=[\"name\"])\n\n if log:\n log_event({'type': 'stream_name_change',\n 'realm': stream.realm.string_id,\n 'new_name': new_name})\n\n recipient = get_stream_recipient(stream.id)\n messages = Message.objects.filter(recipient=recipient).only(\"id\")\n\n # Update the display recipient and stream, which are easy single\n # items to set.\n old_cache_key = get_stream_cache_key(old_name, stream.realm_id)\n new_cache_key = get_stream_cache_key(stream.name, stream.realm_id)\n if old_cache_key != new_cache_key:\n cache_delete(old_cache_key)\n cache_set(new_cache_key, stream)\n cache_set(display_recipient_cache_key(recipient.id), stream.name)\n\n # Delete cache entries for everything else, which is cheaper and\n # clearer than trying to set them. display_recipient is the out of\n # date field in all cases.\n cache_delete_many(\n to_dict_cache_key_id(message.id) for message in messages)\n new_email = encode_email_address(stream)\n\n # We will tell our users to essentially\n # update stream.name = new_name where name = old_name\n # and update stream.email = new_email where name = old_name.\n # We could optimize this by trying to send one message, but the\n # client code really wants one property update at a time, and\n # updating stream names is a pretty infrequent operation.\n # More importantly, we want to key these updates by id, not name,\n # since id is the immutable primary key, and obviously name is not.\n data_updates = [\n ['email_address', new_email],\n ['name', new_name],\n ]\n for property, value in data_updates:\n event = dict(\n op=\"update\",\n type=\"stream\",\n property=property,\n value=value,\n stream_id=stream.id,\n name=old_name,\n )\n send_event(stream.realm, event, can_access_stream_user_ids(stream))\n\n # Even though the token doesn't change, the web client needs to update the\n # email forwarding address to display the correctly-escaped new name.\n return {\"email_address\": new_email}\n\ndef do_change_stream_description(stream: Stream, new_description: str) -> None:\n stream.description = new_description\n stream.save(update_fields=['description'])\n\n event = dict(\n type='stream',\n op='update',\n property='description',\n name=stream.name,\n stream_id=stream.id,\n value=new_description,\n )\n send_event(stream.realm, event, can_access_stream_user_ids(stream))\n\ndef do_create_realm(string_id: str, name: str,\n emails_restricted_to_domains: Optional[bool]=None) -> Realm:\n existing_realm = get_realm(string_id)\n if existing_realm is not None:\n raise AssertionError(\"Realm %s already exists!\" % (string_id,))\n\n kwargs = {} # type: Dict[str, Any]\n if emails_restricted_to_domains is not None:\n kwargs['emails_restricted_to_domains'] = emails_restricted_to_domains\n if settings.BILLING_ENABLED:\n kwargs['plan_type'] = Realm.LIMITED\n kwargs['message_visibility_limit'] = Realm.MESSAGE_VISIBILITY_LIMITED\n realm = Realm(string_id=string_id, name=name, **kwargs)\n realm.save()\n\n # Create stream once Realm object has been saved\n notifications_stream = ensure_stream(realm, Realm.DEFAULT_NOTIFICATION_STREAM_NAME)\n realm.notifications_stream = notifications_stream\n\n signup_notifications_stream = ensure_stream(\n realm, Realm.INITIAL_PRIVATE_STREAM_NAME, invite_only=True,\n stream_description=\"A private stream for core team members.\")\n realm.signup_notifications_stream = signup_notifications_stream\n\n realm.save(update_fields=['notifications_stream', 'signup_notifications_stream'])\n\n # Log the event\n log_event({\"type\": \"realm_created\",\n \"string_id\": string_id,\n \"emails_restricted_to_domains\": emails_restricted_to_domains})\n\n # Send a notification to the admin realm (if configured)\n if settings.NOTIFICATION_BOT is not None:\n signup_message = \"Signups enabled\"\n admin_realm = get_system_bot(settings.NOTIFICATION_BOT).realm\n internal_send_message(admin_realm, settings.NOTIFICATION_BOT, \"stream\",\n \"signups\", realm.display_subdomain, signup_message)\n return realm\n\ndef do_change_notification_settings(user_profile: UserProfile, name: str, value: bool,\n log: bool=True) -> None:\n \"\"\"Takes in a UserProfile object, the name of a global notification\n preference to update, and the value to update to\n \"\"\"\n\n notification_setting_type = UserProfile.notification_setting_types[name]\n assert isinstance(value, notification_setting_type), (\n 'Cannot update %s: %s is not an instance of %s' % (\n name, value, notification_setting_type,))\n\n setattr(user_profile, name, value)\n\n # Disabling digest emails should clear a user's email queue\n if name == 'enable_digest_emails' and not value:\n clear_scheduled_emails(user_profile.id, ScheduledEmail.DIGEST)\n\n user_profile.save(update_fields=[name])\n event = {'type': 'update_global_notifications',\n 'user': user_profile.email,\n 'notification_name': name,\n 'setting': value}\n if log:\n log_event(event)\n send_event(user_profile.realm, event, [user_profile.id])\n\ndef do_change_enter_sends(user_profile: UserProfile, enter_sends: bool) -> None:\n user_profile.enter_sends = enter_sends\n user_profile.save(update_fields=[\"enter_sends\"])\n\ndef do_set_user_display_setting(user_profile: UserProfile,\n setting_name: str,\n setting_value: Union[bool, str]) -> None:\n property_type = UserProfile.property_types[setting_name]\n assert isinstance(setting_value, property_type)\n setattr(user_profile, setting_name, setting_value)\n user_profile.save(update_fields=[setting_name])\n event = {'type': 'update_display_settings',\n 'user': user_profile.email,\n 'setting_name': setting_name,\n 'setting': setting_value}\n if setting_name == \"default_language\":\n assert isinstance(setting_value, str)\n event['language_name'] = get_language_name(setting_value)\n\n send_event(user_profile.realm, event, [user_profile.id])\n\n # Updates to the timezone display setting are sent to all users\n if setting_name == \"timezone\":\n payload = dict(email=user_profile.email,\n user_id=user_profile.id,\n timezone=user_profile.timezone)\n send_event(user_profile.realm,\n dict(type='realm_user', op='update', person=payload),\n active_user_ids(user_profile.realm_id))\n\ndef lookup_default_stream_groups(default_stream_group_names: List[str],\n realm: Realm) -> List[DefaultStreamGroup]:\n default_stream_groups = []\n for group_name in default_stream_group_names:\n try:\n default_stream_group = DefaultStreamGroup.objects.get(\n name=group_name, realm=realm)\n except DefaultStreamGroup.DoesNotExist:\n raise JsonableError(_('Invalid default stream group %s' % (group_name,)))\n default_stream_groups.append(default_stream_group)\n return default_stream_groups\n\ndef set_default_streams(realm: Realm, stream_dict: Dict[str, Dict[str, Any]]) -> None:\n DefaultStream.objects.filter(realm=realm).delete()\n stream_names = []\n for name, options in stream_dict.items():\n stream_names.append(name)\n stream = ensure_stream(realm,\n name,\n invite_only = options.get(\"invite_only\", False),\n stream_description = options.get(\"description\", ''))\n DefaultStream.objects.create(stream=stream, realm=realm)\n\n # Always include the realm's default notifications streams, if it exists\n if realm.notifications_stream is not None:\n DefaultStream.objects.get_or_create(stream=realm.notifications_stream, realm=realm)\n\n log_event({'type': 'default_streams',\n 'realm': realm.string_id,\n 'streams': stream_names})\n\ndef notify_default_streams(realm: Realm) -> None:\n event = dict(\n type=\"default_streams\",\n default_streams=streams_to_dicts_sorted(get_default_streams_for_realm(realm.id))\n )\n send_event(realm, event, active_user_ids(realm.id))\n\ndef notify_default_stream_groups(realm: Realm) -> None:\n event = dict(\n type=\"default_stream_groups\",\n default_stream_groups=default_stream_groups_to_dicts_sorted(get_default_stream_groups(realm))\n )\n send_event(realm, event, active_user_ids(realm.id))\n\ndef do_add_default_stream(stream: Stream) -> None:\n realm_id = stream.realm_id\n stream_id = stream.id\n if not DefaultStream.objects.filter(realm_id=realm_id, stream_id=stream_id).exists():\n DefaultStream.objects.create(realm_id=realm_id, stream_id=stream_id)\n notify_default_streams(stream.realm)\n\ndef do_remove_default_stream(stream: Stream) -> None:\n realm_id = stream.realm_id\n stream_id = stream.id\n DefaultStream.objects.filter(realm_id=realm_id, stream_id=stream_id).delete()\n notify_default_streams(stream.realm)\n\ndef do_create_default_stream_group(realm: Realm, group_name: str,\n description: str, streams: List[Stream]) -> None:\n default_streams = get_default_streams_for_realm(realm.id)\n for stream in streams:\n if stream in default_streams:\n raise JsonableError(_(\n \"'%(stream_name)s' is a default stream and cannot be added to '%(group_name)s'\")\n % {'stream_name': stream.name, 'group_name': group_name})\n\n check_default_stream_group_name(group_name)\n (group, created) = DefaultStreamGroup.objects.get_or_create(\n name=group_name, realm=realm, description=description)\n if not created:\n raise JsonableError(_(\"Default stream group '%(group_name)s' already exists\")\n % {'group_name': group_name})\n\n group.streams.set(streams)\n notify_default_stream_groups(realm)\n\ndef do_add_streams_to_default_stream_group(realm: Realm, group: DefaultStreamGroup,\n streams: List[Stream]) -> None:\n default_streams = get_default_streams_for_realm(realm.id)\n for stream in streams:\n if stream in default_streams:\n raise JsonableError(_(\n \"'%(stream_name)s' is a default stream and cannot be added to '%(group_name)s'\")\n % {'stream_name': stream.name, 'group_name': group.name})\n if stream in group.streams.all():\n raise JsonableError(_(\n \"Stream '%(stream_name)s' is already present in default stream group '%(group_name)s'\")\n % {'stream_name': stream.name, 'group_name': group.name})\n group.streams.add(stream)\n\n group.save()\n notify_default_stream_groups(realm)\n\ndef do_remove_streams_from_default_stream_group(realm: Realm, group: DefaultStreamGroup,\n streams: List[Stream]) -> None:\n for stream in streams:\n if stream not in group.streams.all():\n raise JsonableError(_(\n \"Stream '%(stream_name)s' is not present in default stream group '%(group_name)s'\")\n % {'stream_name': stream.name, 'group_name': group.name})\n group.streams.remove(stream)\n\n group.save()\n notify_default_stream_groups(realm)\n\ndef do_change_default_stream_group_name(realm: Realm, group: DefaultStreamGroup,\n new_group_name: str) -> None:\n if group.name == new_group_name:\n raise JsonableError(_(\"This default stream group is already named '%s'\") % (new_group_name,))\n\n if DefaultStreamGroup.objects.filter(name=new_group_name, realm=realm).exists():\n raise JsonableError(_(\"Default stream group '%s' already exists\") % (new_group_name,))\n\n group.name = new_group_name\n group.save()\n notify_default_stream_groups(realm)\n\ndef do_change_default_stream_group_description(realm: Realm, group: DefaultStreamGroup,\n new_description: str) -> None:\n group.description = new_description\n group.save()\n notify_default_stream_groups(realm)\n\ndef do_remove_default_stream_group(realm: Realm, group: DefaultStreamGroup) -> None:\n group.delete()\n notify_default_stream_groups(realm)\n\ndef get_default_streams_for_realm(realm_id: int) -> List[Stream]:\n return [default.stream for default in\n DefaultStream.objects.select_related(\"stream\", \"stream__realm\").filter(\n realm_id=realm_id)]\n\ndef get_default_subs(user_profile: UserProfile) -> List[Stream]:\n # Right now default streams are realm-wide. This wrapper gives us flexibility\n # to some day further customize how we set up default streams for new users.\n return get_default_streams_for_realm(user_profile.realm_id)\n\n# returns default streams in json serializeable format\ndef streams_to_dicts_sorted(streams: List[Stream]) -> List[Dict[str, Any]]:\n return sorted([stream.to_dict() for stream in streams], key=lambda elt: elt[\"name\"])\n\ndef default_stream_groups_to_dicts_sorted(groups: List[DefaultStreamGroup]) -> List[Dict[str, Any]]:\n return sorted([group.to_dict() for group in groups], key=lambda elt: elt[\"name\"])\n\ndef do_update_user_activity_interval(user_profile: UserProfile,\n log_time: datetime.datetime) -> None:\n effective_end = log_time + UserActivityInterval.MIN_INTERVAL_LENGTH\n # This code isn't perfect, because with various races we might end\n # up creating two overlapping intervals, but that shouldn't happen\n # often, and can be corrected for in post-processing\n try:\n last = UserActivityInterval.objects.filter(user_profile=user_profile).order_by(\"-end\")[0]\n # There are two ways our intervals could overlap:\n # (1) The start of the new interval could be inside the old interval\n # (2) The end of the new interval could be inside the old interval\n # In either case, we just extend the old interval to include the new interval.\n if ((log_time <= last.end and log_time >= last.start) or\n (effective_end <= last.end and effective_end >= last.start)):\n last.end = max(last.end, effective_end)\n last.start = min(last.start, log_time)\n last.save(update_fields=[\"start\", \"end\"])\n return\n except IndexError:\n pass\n\n # Otherwise, the intervals don't overlap, so we should make a new one\n UserActivityInterval.objects.create(user_profile=user_profile, start=log_time,\n end=effective_end)\n\n@statsd_increment('user_activity')\ndef do_update_user_activity(user_profile: UserProfile,\n client: Client,\n query: str,\n log_time: datetime.datetime) -> None:\n (activity, created) = UserActivity.objects.get_or_create(\n user_profile = user_profile,\n client = client,\n query = query,\n defaults={'last_visit': log_time, 'count': 0})\n\n activity.count += 1\n activity.last_visit = log_time\n activity.save(update_fields=[\"last_visit\", \"count\"])\n\ndef send_presence_changed(user_profile: UserProfile, presence: UserPresence) -> None:\n presence_dict = presence.to_dict()\n event = dict(type=\"presence\", email=user_profile.email,\n server_timestamp=time.time(),\n presence={presence_dict['client']: presence_dict})\n send_event(user_profile.realm, event, active_user_ids(user_profile.realm_id))\n\ndef consolidate_client(client: Client) -> Client:\n # The web app reports a client as 'website'\n # The desktop app reports a client as ZulipDesktop\n # due to it setting a custom user agent. We want both\n # to count as web users\n\n # Alias ZulipDesktop to website\n if client.name in ['ZulipDesktop']:\n return get_client('website')\n else:\n return client\n\n@statsd_increment('user_presence')\ndef do_update_user_presence(user_profile: UserProfile,\n client: Client,\n log_time: datetime.datetime,\n status: int) -> None:\n client = consolidate_client(client)\n (presence, created) = UserPresence.objects.get_or_create(\n user_profile = user_profile,\n client = client,\n defaults = {'timestamp': log_time,\n 'status': status})\n\n stale_status = (log_time - presence.timestamp) > datetime.timedelta(minutes=1, seconds=10)\n was_idle = presence.status == UserPresence.IDLE\n became_online = (status == UserPresence.ACTIVE) and (stale_status or was_idle)\n\n # If an object was created, it has already been saved.\n #\n # We suppress changes from ACTIVE to IDLE before stale_status is reached;\n # this protects us from the user having two clients open: one active, the\n # other idle. Without this check, we would constantly toggle their status\n # between the two states.\n if not created and stale_status or was_idle or status == presence.status:\n # The following block attempts to only update the \"status\"\n # field in the event that it actually changed. This is\n # important to avoid flushing the UserPresence cache when the\n # data it would return to a client hasn't actually changed\n # (see the UserPresence post_save hook for details).\n presence.timestamp = log_time\n update_fields = [\"timestamp\"]\n if presence.status != status:\n presence.status = status\n update_fields.append(\"status\")\n presence.save(update_fields=update_fields)\n\n if not user_profile.realm.presence_disabled and (created or became_online):\n # Push event to all users in the realm so they see the new user\n # appear in the presence list immediately, or the newly online\n # user without delay. Note that we won't send an update here for a\n # timestamp update, because we rely on the browser to ping us every 50\n # seconds for realm-wide status updates, and those updates should have\n # recent timestamps, which means the browser won't think active users\n # have gone idle. If we were more aggressive in this function about\n # sending timestamp updates, we could eliminate the ping responses, but\n # that's not a high priority for now, considering that most of our non-MIT\n # realms are pretty small.\n send_presence_changed(user_profile, presence)\n\ndef update_user_activity_interval(user_profile: UserProfile, log_time: datetime.datetime) -> None:\n event = {'user_profile_id': user_profile.id,\n 'time': datetime_to_timestamp(log_time)}\n queue_json_publish(\"user_activity_interval\", event)\n\ndef update_user_presence(user_profile: UserProfile, client: Client, log_time: datetime.datetime,\n status: int, new_user_input: bool) -> None:\n event = {'user_profile_id': user_profile.id,\n 'status': status,\n 'time': datetime_to_timestamp(log_time),\n 'client': client.name}\n\n queue_json_publish(\"user_presence\", event)\n\n if new_user_input:\n update_user_activity_interval(user_profile, log_time)\n\ndef do_update_pointer(user_profile: UserProfile, client: Client,\n pointer: int, update_flags: bool=False) -> None:\n prev_pointer = user_profile.pointer\n user_profile.pointer = pointer\n user_profile.save(update_fields=[\"pointer\"])\n\n if update_flags: # nocoverage\n # This block of code is compatibility code for the\n # legacy/original Zulip Android app natively. It's a shim\n # that will mark as read any messages up until the pointer\n # move; we expect to remove this feature entirely before long,\n # when we drop support for the old Android app entirely.\n app_message_ids = UserMessage.objects.filter(\n user_profile=user_profile,\n message__id__gt=prev_pointer,\n message__id__lte=pointer).extra(where=[\n UserMessage.where_unread(),\n UserMessage.where_active_push_notification(),\n ]).values_list(\"message_id\", flat=True)\n\n UserMessage.objects.filter(user_profile=user_profile,\n message__id__gt=prev_pointer,\n message__id__lte=pointer).extra(where=[UserMessage.where_unread()]) \\\n .update(flags=F('flags').bitor(UserMessage.flags.read))\n do_clear_mobile_push_notifications_for_ids(user_profile, app_message_ids)\n\n event = dict(type='pointer', pointer=pointer)\n send_event(user_profile.realm, event, [user_profile.id])\n\ndef do_mark_all_as_read(user_profile: UserProfile, client: Client) -> int:\n log_statsd_event('bankruptcy')\n\n msgs = UserMessage.objects.filter(\n user_profile=user_profile\n ).extra(\n where=[UserMessage.where_unread()]\n )\n\n count = msgs.update(\n flags=F('flags').bitor(UserMessage.flags.read)\n )\n\n event = dict(\n type='update_message_flags',\n operation='add',\n flag='read',\n messages=[], # we don't send messages, since the client reloads anyway\n all=True\n )\n send_event(user_profile.realm, event, [user_profile.id])\n\n statsd.incr(\"mark_all_as_read\", count)\n\n all_push_message_ids = UserMessage.objects.filter(\n user_profile=user_profile,\n ).extra(\n where=[UserMessage.where_active_push_notification()],\n ).values_list(\"message_id\", flat=True)[0:10000]\n do_clear_mobile_push_notifications_for_ids(user_profile, all_push_message_ids)\n\n return count\n\ndef do_mark_stream_messages_as_read(user_profile: UserProfile,\n client: Client,\n stream: Stream,\n topic_name: Optional[str]=None) -> int:\n log_statsd_event('mark_stream_as_read')\n\n msgs = UserMessage.objects.filter(\n user_profile=user_profile\n )\n\n recipient = get_stream_recipient(stream.id)\n msgs = msgs.filter(message__recipient=recipient)\n\n if topic_name:\n msgs = filter_by_topic_name_via_message(\n query=msgs,\n topic_name=topic_name,\n )\n\n msgs = msgs.extra(\n where=[UserMessage.where_unread()]\n )\n\n message_ids = list(msgs.values_list('message__id', flat=True))\n\n count = msgs.update(\n flags=F('flags').bitor(UserMessage.flags.read)\n )\n\n event = dict(\n type='update_message_flags',\n operation='add',\n flag='read',\n messages=message_ids,\n all=False,\n )\n send_event(user_profile.realm, event, [user_profile.id])\n do_clear_mobile_push_notifications_for_ids(user_profile, message_ids)\n\n statsd.incr(\"mark_stream_as_read\", count)\n return count\n\ndef do_clear_mobile_push_notifications_for_ids(user_profile: UserProfile,\n message_ids: List[int]) -> None:\n for user_message in UserMessage.objects.filter(\n message_id__in=message_ids,\n user_profile=user_profile).extra(\n where=[UserMessage.where_active_push_notification()]):\n event = {\n \"user_profile_id\": user_profile.id,\n \"message_id\": user_message.message_id,\n \"type\": \"remove\",\n }\n queue_json_publish(\"missedmessage_mobile_notifications\", event)\n\ndef do_update_message_flags(user_profile: UserProfile,\n client: Client,\n operation: str,\n flag: str,\n messages: List[int]) -> int:\n valid_flags = [item for item in UserMessage.flags if item not in UserMessage.NON_API_FLAGS]\n if flag not in valid_flags:\n raise JsonableError(_(\"Invalid flag: '%s'\" % (flag,)))\n flagattr = getattr(UserMessage.flags, flag)\n\n assert messages is not None\n msgs = UserMessage.objects.filter(user_profile=user_profile,\n message__id__in=messages)\n # Hack to let you star any message\n if msgs.count() == 0:\n if not len(messages) == 1:\n raise JsonableError(_(\"Invalid message(s)\"))\n if flag != \"starred\":\n raise JsonableError(_(\"Invalid message(s)\"))\n # Validate that the user could have read the relevant message\n message = access_message(user_profile, messages[0])[0]\n\n # OK, this is a message that you legitimately have access\n # to via narrowing to the stream it is on, even though you\n # didn't actually receive it. So we create a historical,\n # read UserMessage message row for you to star.\n UserMessage.objects.create(user_profile=user_profile,\n message=message,\n flags=UserMessage.flags.historical | UserMessage.flags.read)\n\n if operation == 'add':\n count = msgs.update(flags=F('flags').bitor(flagattr))\n elif operation == 'remove':\n count = msgs.update(flags=F('flags').bitand(~flagattr))\n else:\n raise AssertionError(\"Invalid message flags operation\")\n\n event = {'type': 'update_message_flags',\n 'operation': operation,\n 'flag': flag,\n 'messages': messages,\n 'all': False}\n send_event(user_profile.realm, event, [user_profile.id])\n\n if flag == \"read\" and operation == \"add\":\n do_clear_mobile_push_notifications_for_ids(user_profile, messages)\n\n statsd.incr(\"flags.%s.%s\" % (flag, operation), count)\n return count\n\ndef subscribed_to_stream(user_profile: UserProfile, stream_id: int) -> bool:\n return Subscription.objects.filter(\n user_profile=user_profile,\n active=True,\n recipient__type=Recipient.STREAM,\n recipient__type_id=stream_id).exists()\n\ndef truncate_content(content: str, max_length: int, truncation_message: str) -> str:\n if len(content) > max_length:\n content = content[:max_length - len(truncation_message)] + truncation_message\n return content\n\ndef truncate_body(body: str) -> str:\n return truncate_content(body, MAX_MESSAGE_LENGTH, \"...\")\n\ndef truncate_topic(topic: str) -> str:\n return truncate_content(topic, MAX_TOPIC_NAME_LENGTH, \"...\")\n\nMessageUpdateUserInfoResult = TypedDict('MessageUpdateUserInfoResult', {\n 'message_user_ids': Set[int],\n 'mention_user_ids': Set[int],\n})\n\ndef get_user_info_for_message_updates(message_id: int) -> MessageUpdateUserInfoResult:\n\n # We exclude UserMessage.flags.historical rows since those\n # users did not receive the message originally, and thus\n # probably are not relevant for reprocessed alert_words,\n # mentions and similar rendering features. This may be a\n # decision we change in the future.\n query = UserMessage.objects.filter(\n message=message_id,\n flags=~UserMessage.flags.historical\n ).values('user_profile_id', 'flags')\n rows = list(query)\n\n message_user_ids = {\n row['user_profile_id']\n for row in rows\n }\n\n mask = UserMessage.flags.mentioned | UserMessage.flags.wildcard_mentioned\n\n mention_user_ids = {\n row['user_profile_id']\n for row in rows\n if int(row['flags']) & mask\n }\n\n return dict(\n message_user_ids=message_user_ids,\n mention_user_ids=mention_user_ids,\n )\n\ndef update_user_message_flags(message: Message, ums: Iterable[UserMessage]) -> None:\n wildcard = message.mentions_wildcard\n mentioned_ids = message.mentions_user_ids\n ids_with_alert_words = message.user_ids_with_alert_words\n changed_ums = set() # type: Set[UserMessage]\n\n def update_flag(um: UserMessage, should_set: bool, flag: int) -> None:\n if should_set:\n if not (um.flags & flag):\n um.flags |= flag\n changed_ums.add(um)\n else:\n if (um.flags & flag):\n um.flags &= ~flag\n changed_ums.add(um)\n\n for um in ums:\n has_alert_word = um.user_profile_id in ids_with_alert_words\n update_flag(um, has_alert_word, UserMessage.flags.has_alert_word)\n\n mentioned = um.user_profile_id in mentioned_ids\n update_flag(um, mentioned, UserMessage.flags.mentioned)\n\n update_flag(um, wildcard, UserMessage.flags.wildcard_mentioned)\n\n for um in changed_ums:\n um.save(update_fields=['flags'])\n\ndef update_to_dict_cache(changed_messages: List[Message]) -> List[int]:\n \"\"\"Updates the message as stored in the to_dict cache (for serving\n messages).\"\"\"\n items_for_remote_cache = {}\n message_ids = []\n for changed_message in changed_messages:\n message_ids.append(changed_message.id)\n key = to_dict_cache_key_id(changed_message.id)\n value = MessageDict.to_dict_uncached(changed_message)\n items_for_remote_cache[key] = (value,)\n\n cache_set_many(items_for_remote_cache)\n return message_ids\n\n# We use transaction.atomic to support select_for_update in the attachment codepath.\n@transaction.atomic\ndef do_update_embedded_data(user_profile: UserProfile,\n message: Message,\n content: Optional[str],\n rendered_content: Optional[str]) -> None:\n event = {\n 'type': 'update_message',\n 'sender': user_profile.email,\n 'message_id': message.id} # type: Dict[str, Any]\n changed_messages = [message]\n\n ums = UserMessage.objects.filter(message=message.id)\n\n if content is not None:\n update_user_message_flags(message, ums)\n message.content = content\n message.rendered_content = rendered_content\n message.rendered_content_version = bugdown_version\n event[\"content\"] = content\n event[\"rendered_content\"] = rendered_content\n\n message.save(update_fields=[\"content\", \"rendered_content\"])\n\n event['message_ids'] = update_to_dict_cache(changed_messages)\n\n def user_info(um: UserMessage) -> Dict[str, Any]:\n return {\n 'id': um.user_profile_id,\n 'flags': um.flags_list()\n }\n send_event(user_profile.realm, event, list(map(user_info, ums)))\n\n# We use transaction.atomic to support select_for_update in the attachment codepath.\n@transaction.atomic\ndef do_update_message(user_profile: UserProfile, message: Message, topic_name: Optional[str],\n propagate_mode: str, content: Optional[str],\n rendered_content: Optional[str], prior_mention_user_ids: Set[int],\n mention_user_ids: Set[int]) -> int:\n event = {'type': 'update_message',\n # TODO: We probably want to remove the 'sender' field\n # after confirming it isn't used by any consumers.\n 'sender': user_profile.email,\n 'user_id': user_profile.id,\n 'message_id': message.id} # type: Dict[str, Any]\n edit_history_event = {\n 'user_id': user_profile.id,\n } # type: Dict[str, Any]\n changed_messages = [message]\n\n if message.is_stream_message():\n stream_id = message.recipient.type_id\n event['stream_name'] = Stream.objects.get(id=stream_id).name\n\n ums = UserMessage.objects.filter(message=message.id)\n\n if content is not None:\n update_user_message_flags(message, ums)\n\n # One could imagine checking realm.allow_edit_history here and\n # modifying the events based on that setting, but doing so\n # doesn't really make sense. We need to send the edit event\n # to clients regardless, and a client already had access to\n # the original/pre-edit content of the message anyway. That\n # setting must be enforced on the client side, and making a\n # change here simply complicates the logic for clients parsing\n # edit history events.\n event['orig_content'] = message.content\n event['orig_rendered_content'] = message.rendered_content\n edit_history_event[\"prev_content\"] = message.content\n edit_history_event[\"prev_rendered_content\"] = message.rendered_content\n edit_history_event[\"prev_rendered_content_version\"] = message.rendered_content_version\n message.content = content\n message.rendered_content = rendered_content\n message.rendered_content_version = bugdown_version\n event[\"content\"] = content\n event[\"rendered_content\"] = rendered_content\n event['prev_rendered_content_version'] = message.rendered_content_version\n event['is_me_message'] = Message.is_status_message(content, rendered_content)\n\n prev_content = edit_history_event['prev_content']\n if Message.content_has_attachment(prev_content) or Message.content_has_attachment(message.content):\n check_attachment_reference_change(prev_content, message)\n\n if message.is_stream_message():\n if topic_name is not None:\n new_topic_name = topic_name\n else:\n new_topic_name = message.topic_name()\n\n stream_topic = StreamTopicTarget(\n stream_id=stream_id,\n topic_name=new_topic_name,\n ) # type: Optional[StreamTopicTarget]\n else:\n stream_topic = None\n\n # TODO: We may want a slightly leaner of this function for updates.\n info = get_recipient_info(\n recipient=message.recipient,\n sender_id=message.sender_id,\n stream_topic=stream_topic,\n )\n\n event['push_notify_user_ids'] = list(info['push_notify_user_ids'])\n event['stream_push_user_ids'] = list(info['stream_push_user_ids'])\n event['stream_email_user_ids'] = list(info['stream_email_user_ids'])\n event['prior_mention_user_ids'] = list(prior_mention_user_ids)\n event['mention_user_ids'] = list(mention_user_ids)\n event['presence_idle_user_ids'] = filter_presence_idle_user_ids(info['active_user_ids'])\n\n if topic_name is not None:\n orig_topic_name = message.topic_name()\n topic_name = truncate_topic(topic_name)\n event[\"propagate_mode\"] = propagate_mode\n message.set_topic_name(topic_name)\n event[\"stream_id\"] = message.recipient.type_id\n\n # These fields have legacy field names.\n event[ORIG_TOPIC] = orig_topic_name\n event[TOPIC_NAME] = topic_name\n event[TOPIC_LINKS] = bugdown.topic_links(message.sender.realm_id, topic_name)\n edit_history_event[LEGACY_PREV_TOPIC] = orig_topic_name\n\n if propagate_mode in [\"change_later\", \"change_all\"]:\n messages_list = update_messages_for_topic_edit(\n message=message,\n propagate_mode=propagate_mode,\n orig_topic_name=orig_topic_name,\n topic_name=topic_name,\n )\n\n changed_messages += messages_list\n\n message.last_edit_time = timezone_now()\n assert message.last_edit_time is not None # assert needed because stubs for django are missing\n event['edit_timestamp'] = datetime_to_timestamp(message.last_edit_time)\n edit_history_event['timestamp'] = event['edit_timestamp']\n if message.edit_history is not None:\n edit_history = ujson.loads(message.edit_history)\n edit_history.insert(0, edit_history_event)\n else:\n edit_history = [edit_history_event]\n message.edit_history = ujson.dumps(edit_history)\n\n # This does message.save(update_fields=[...])\n save_message_for_edit_use_case(message=message)\n\n event['message_ids'] = update_to_dict_cache(changed_messages)\n\n def user_info(um: UserMessage) -> Dict[str, Any]:\n return {\n 'id': um.user_profile_id,\n 'flags': um.flags_list()\n }\n send_event(user_profile.realm, event, list(map(user_info, ums)))\n return len(changed_messages)\n\n\ndef do_delete_message(user_profile: UserProfile, message: Message) -> None:\n message_type = \"stream\"\n if not message.is_stream_message():\n message_type = \"private\"\n\n event = {\n 'type': 'delete_message',\n 'sender': user_profile.email,\n 'message_id': message.id,\n 'message_type': message_type, } # type: Dict[str, Any]\n if message_type == \"stream\":\n event['stream_id'] = message.recipient.type_id\n event['topic'] = message.topic_name()\n else:\n event['recipient_user_ids'] = message.recipient.type_id\n\n ums = [{'id': um.user_profile_id} for um in\n UserMessage.objects.filter(message=message.id)]\n move_messages_to_archive([message.id])\n send_event(user_profile.realm, event, ums)\n\ndef do_delete_messages(user: UserProfile) -> None:\n message_ids = Message.objects.filter(sender=user).values_list('id', flat=True).order_by('id')\n if message_ids:\n move_messages_to_archive(message_ids)\n\ndef get_streams_traffic(stream_ids: Set[int]) -> Dict[int, int]:\n stat = COUNT_STATS['messages_in_stream:is_bot:day']\n traffic_from = timezone_now() - datetime.timedelta(days=28)\n\n query = StreamCount.objects.filter(property=stat.property,\n end_time__gt=traffic_from)\n query = query.filter(stream_id__in=stream_ids)\n\n traffic_list = query.values('stream_id').annotate(value=Sum('value'))\n traffic_dict = {}\n for traffic in traffic_list:\n traffic_dict[traffic[\"stream_id\"]] = traffic[\"value\"]\n\n return traffic_dict\n\ndef round_to_2_significant_digits(number: int) -> int:\n return int(round(number, 2 - len(str(number))))\n\nSTREAM_TRAFFIC_CALCULATION_MIN_AGE_DAYS = 7\n\ndef get_average_weekly_stream_traffic(stream_id: int, stream_date_created: datetime.datetime,\n recent_traffic: Dict[int, int]) -> Optional[int]:\n try:\n stream_traffic = recent_traffic[stream_id]\n except KeyError:\n stream_traffic = 0\n\n stream_age = (timezone_now() - stream_date_created).days\n\n if stream_age >= 28:\n average_weekly_traffic = int(stream_traffic // 4)\n elif stream_age >= STREAM_TRAFFIC_CALCULATION_MIN_AGE_DAYS:\n average_weekly_traffic = int(stream_traffic * 7 // stream_age)\n else:\n return None\n\n if average_weekly_traffic == 0 and stream_traffic > 0:\n average_weekly_traffic = 1\n\n return round_to_2_significant_digits(average_weekly_traffic)\n\ndef is_old_stream(stream_date_created: datetime.datetime) -> bool:\n return (timezone_now() - stream_date_created).days \\\n >= STREAM_TRAFFIC_CALCULATION_MIN_AGE_DAYS\n\ndef encode_email_address(stream: Stream) -> str:\n return encode_email_address_helper(stream.name, stream.email_token)\n\ndef encode_email_address_helper(name: str, email_token: str) -> str:\n # Some deployments may not use the email gateway\n if settings.EMAIL_GATEWAY_PATTERN == '':\n return ''\n\n # Given the fact that we have almost no restrictions on stream names and\n # that what characters are allowed in e-mail addresses is complicated and\n # dependent on context in the address, we opt for a very simple scheme:\n #\n # Only encode the stream name (leave the + and token alone). Encode\n # everything that isn't alphanumeric plus _ as the percent-prefixed integer\n # ordinal of that character, padded with zeroes to the maximum number of\n # bytes of a UTF-8 encoded Unicode character.\n encoded_name = re.sub(r\"\\W\", lambda x: \"%\" + str(ord(x.group(0))).zfill(4), name)\n encoded_token = \"%s+%s\" % (encoded_name, email_token)\n return settings.EMAIL_GATEWAY_PATTERN % (encoded_token,)\n\ndef get_email_gateway_message_string_from_address(address: str) -> Optional[str]:\n pattern_parts = [re.escape(part) for part in settings.EMAIL_GATEWAY_PATTERN.split('%s')]\n if settings.EMAIL_GATEWAY_EXTRA_PATTERN_HACK:\n # Accept mails delivered to any Zulip server\n pattern_parts[-1] = settings.EMAIL_GATEWAY_EXTRA_PATTERN_HACK\n match_email_re = re.compile(\"(.*?)\".join(pattern_parts))\n match = match_email_re.match(address)\n\n if not match:\n return None\n\n msg_string = match.group(1)\n\n return msg_string\n\ndef decode_email_address(email: str) -> Optional[Tuple[str, str]]:\n # Perform the reverse of encode_email_address. Returns a tuple of (streamname, email_token)\n msg_string = get_email_gateway_message_string_from_address(email)\n\n if msg_string is None:\n return None\n elif '.' in msg_string:\n # Workaround for Google Groups and other programs that don't accept emails\n # that have + signs in them (see Trac #2102)\n encoded_stream_name, token = msg_string.split('.')\n else:\n encoded_stream_name, token = msg_string.split('+')\n stream_name = re.sub(r\"%\\d{4}\", lambda x: chr(int(x.group(0)[1:])), encoded_stream_name)\n return stream_name, token\n\nSubHelperT = Tuple[List[Dict[str, Any]], List[Dict[str, Any]], List[Dict[str, Any]]]\n\ndef get_web_public_subs(realm: Realm) -> SubHelperT:\n color_idx = 0\n\n def get_next_color() -> str:\n nonlocal color_idx\n color = STREAM_ASSIGNMENT_COLORS[color_idx]\n color_idx = (color_idx + 1) % len(STREAM_ASSIGNMENT_COLORS)\n return color\n\n subscribed = [\n {'name': stream.name,\n 'in_home_view': True,\n 'invite_only': False,\n 'is_announcement_only': stream.is_announcement_only,\n 'color': get_next_color(),\n 'desktop_notifications': True,\n 'audible_notifications': True,\n 'push_notifications': False,\n 'pin_to_top': False,\n 'stream_id': stream.id,\n 'description': stream.description,\n 'is_old_stream': is_old_stream(stream.date_created),\n 'stream_weekly_traffic': get_average_weekly_stream_traffic(stream.id,\n stream.date_created,\n {}),\n 'email_address': ''}\n for stream in Stream.objects.filter(realm=realm, is_web_public=True, deactivated=False)]\n return (subscribed, [], [])\n\n# In general, it's better to avoid using .values() because it makes\n# the code pretty ugly, but in this case, it has significant\n# performance impact for loading / for users with large numbers of\n# subscriptions, so it's worth optimizing.\ndef gather_subscriptions_helper(user_profile: UserProfile,\n include_subscribers: bool=True) -> SubHelperT:\n sub_dicts = get_stream_subscriptions_for_user(user_profile).values(\n \"recipient_id\", \"in_home_view\", \"color\", \"desktop_notifications\",\n \"audible_notifications\", \"push_notifications\", \"email_notifications\",\n \"active\", \"pin_to_top\"\n ).order_by(\"recipient_id\")\n\n sub_dicts = list(sub_dicts)\n sub_recipient_ids = [\n sub['recipient_id']\n for sub in sub_dicts\n ]\n stream_recipient = StreamRecipientMap()\n stream_recipient.populate_for_recipient_ids(sub_recipient_ids)\n\n stream_ids = set() # type: Set[int]\n for sub in sub_dicts:\n sub['stream_id'] = stream_recipient.stream_id_for(sub['recipient_id'])\n stream_ids.add(sub['stream_id'])\n\n recent_traffic = get_streams_traffic(stream_ids=stream_ids)\n\n all_streams = get_active_streams(user_profile.realm).select_related(\n \"realm\").values(\"id\", \"name\", \"invite_only\", \"is_announcement_only\", \"realm_id\",\n \"email_token\", \"description\", \"date_created\",\n \"history_public_to_subscribers\")\n\n stream_dicts = [stream for stream in all_streams if stream['id'] in stream_ids]\n stream_hash = {}\n for stream in stream_dicts:\n stream_hash[stream[\"id\"]] = stream\n\n all_streams_id = [stream[\"id\"] for stream in all_streams]\n\n subscribed = []\n unsubscribed = []\n never_subscribed = []\n\n # Deactivated streams aren't in stream_hash.\n streams = [stream_hash[sub[\"stream_id\"]] for sub in sub_dicts\n if sub[\"stream_id\"] in stream_hash]\n streams_subscribed_map = dict((sub[\"stream_id\"], sub[\"active\"]) for sub in sub_dicts)\n\n # Add never subscribed streams to streams_subscribed_map\n streams_subscribed_map.update({stream['id']: False for stream in all_streams if stream not in streams})\n\n if include_subscribers:\n subscriber_map = bulk_get_subscriber_user_ids(\n all_streams,\n user_profile,\n streams_subscribed_map,\n stream_recipient\n ) # type: Mapping[int, Optional[List[int]]]\n else:\n # If we're not including subscribers, always return None,\n # which the below code needs to check for anyway.\n subscriber_map = defaultdict(lambda: None)\n\n sub_unsub_stream_ids = set()\n for sub in sub_dicts:\n sub_unsub_stream_ids.add(sub[\"stream_id\"])\n stream = stream_hash.get(sub[\"stream_id\"])\n if not stream:\n # This stream has been deactivated, don't include it.\n continue\n\n subscribers = subscriber_map[stream[\"id\"]] # type: Optional[List[int]]\n\n # Important: don't show the subscribers if the stream is invite only\n # and this user isn't on it anymore (or a realm administrator).\n if stream[\"invite_only\"] and not (sub[\"active\"] or user_profile.is_realm_admin):\n subscribers = None\n\n # Guest users lose access to subscribers when they are unsubscribed.\n if not sub[\"active\"] and user_profile.is_guest:\n subscribers = None\n\n stream_dict = {'name': stream[\"name\"],\n 'in_home_view': sub[\"in_home_view\"],\n 'invite_only': stream[\"invite_only\"],\n 'is_announcement_only': stream[\"is_announcement_only\"],\n 'color': sub[\"color\"],\n 'desktop_notifications': sub[\"desktop_notifications\"],\n 'audible_notifications': sub[\"audible_notifications\"],\n 'push_notifications': sub[\"push_notifications\"],\n 'email_notifications': sub[\"email_notifications\"],\n 'pin_to_top': sub[\"pin_to_top\"],\n 'stream_id': stream[\"id\"],\n 'description': stream[\"description\"],\n 'is_old_stream': is_old_stream(stream[\"date_created\"]),\n 'stream_weekly_traffic': get_average_weekly_stream_traffic(stream[\"id\"],\n stream[\"date_created\"],\n recent_traffic),\n 'email_address': encode_email_address_helper(stream[\"name\"], stream[\"email_token\"]),\n 'history_public_to_subscribers': stream['history_public_to_subscribers']}\n if subscribers is not None:\n stream_dict['subscribers'] = subscribers\n if sub[\"active\"]:\n subscribed.append(stream_dict)\n else:\n unsubscribed.append(stream_dict)\n\n all_streams_id_set = set(all_streams_id)\n if user_profile.can_access_public_streams():\n never_subscribed_stream_ids = all_streams_id_set - sub_unsub_stream_ids\n else:\n never_subscribed_stream_ids = set()\n never_subscribed_streams = [ns_stream_dict for ns_stream_dict in all_streams\n if ns_stream_dict['id'] in never_subscribed_stream_ids]\n\n for stream in never_subscribed_streams:\n is_public = (not stream['invite_only'])\n if is_public or user_profile.is_realm_admin:\n stream_dict = {'name': stream['name'],\n 'invite_only': stream['invite_only'],\n 'is_announcement_only': stream['is_announcement_only'],\n 'stream_id': stream['id'],\n 'is_old_stream': is_old_stream(stream[\"date_created\"]),\n 'stream_weekly_traffic': get_average_weekly_stream_traffic(stream[\"id\"],\n stream[\"date_created\"],\n recent_traffic),\n 'description': stream['description'],\n 'history_public_to_subscribers': stream['history_public_to_subscribers']}\n if is_public or user_profile.is_realm_admin:\n subscribers = subscriber_map[stream[\"id\"]]\n if subscribers is not None:\n stream_dict['subscribers'] = subscribers\n never_subscribed.append(stream_dict)\n\n return (sorted(subscribed, key=lambda x: x['name']),\n sorted(unsubscribed, key=lambda x: x['name']),\n sorted(never_subscribed, key=lambda x: x['name']))\n\ndef gather_subscriptions(user_profile: UserProfile) -> Tuple[List[Dict[str, Any]], List[Dict[str, Any]]]:\n subscribed, unsubscribed, never_subscribed = gather_subscriptions_helper(user_profile)\n user_ids = set()\n for subs in [subscribed, unsubscribed, never_subscribed]:\n for sub in subs:\n if 'subscribers' in sub:\n for subscriber in sub['subscribers']:\n user_ids.add(subscriber)\n email_dict = get_emails_from_user_ids(list(user_ids))\n\n for subs in [subscribed, unsubscribed]:\n for sub in subs:\n if 'subscribers' in sub:\n sub['subscribers'] = sorted([email_dict[user_id] for user_id in sub['subscribers']])\n\n return (subscribed, unsubscribed)\n\ndef get_active_presence_idle_user_ids(realm: Realm,\n sender_id: int,\n message_type: str,\n active_user_ids: Set[int],\n user_flags: Dict[int, List[str]]) -> List[int]:\n '''\n Given a list of active_user_ids, we build up a subset\n of those users who fit these criteria:\n\n * They are likely to need notifications (either due\n to mentions or being PM'ed).\n * They are no longer \"present\" according to the\n UserPresence table.\n '''\n\n if realm.presence_disabled:\n return []\n\n is_pm = message_type == 'private'\n\n user_ids = set()\n for user_id in active_user_ids:\n flags = user_flags.get(user_id, []) # type: Iterable[str]\n mentioned = 'mentioned' in flags\n private_message = is_pm and user_id != sender_id\n if mentioned or private_message:\n user_ids.add(user_id)\n\n return filter_presence_idle_user_ids(user_ids)\n\ndef filter_presence_idle_user_ids(user_ids: Set[int]) -> List[int]:\n if not user_ids:\n return []\n\n # 140 seconds is consistent with presence.js:OFFLINE_THRESHOLD_SECS\n recent = timezone_now() - datetime.timedelta(seconds=140)\n rows = UserPresence.objects.filter(\n user_profile_id__in=user_ids,\n status=UserPresence.ACTIVE,\n timestamp__gte=recent\n ).distinct('user_profile_id').values('user_profile_id')\n active_user_ids = {row['user_profile_id'] for row in rows}\n idle_user_ids = user_ids - active_user_ids\n return sorted(list(idle_user_ids))\n\ndef get_status_dict(requesting_user_profile: UserProfile) -> Dict[str, Dict[str, Dict[str, Any]]]:\n if requesting_user_profile.realm.presence_disabled:\n # Return an empty dict if presence is disabled in this realm\n return defaultdict(dict)\n\n return UserPresence.get_status_dict_by_realm(requesting_user_profile.realm_id)\n\ndef get_cross_realm_dicts() -> List[Dict[str, Any]]:\n users = bulk_get_users(list(settings.CROSS_REALM_BOT_EMAILS), None,\n base_query=UserProfile.objects.filter(\n realm__string_id=settings.SYSTEM_BOT_REALM)).values()\n return [{'email': user.email,\n 'user_id': user.id,\n 'is_admin': user.is_realm_admin,\n 'is_bot': user.is_bot,\n 'avatar_url': avatar_url(user),\n 'timezone': user.timezone,\n 'date_joined': user.date_joined.isoformat(),\n 'full_name': user.full_name}\n for user in users\n # Important: We filter here, is addition to in\n # `base_query`, because of how bulk_get_users shares its\n # cache with other UserProfile caches.\n if user.realm.string_id == settings.SYSTEM_BOT_REALM]\n\ndef do_send_confirmation_email(invitee: PreregistrationUser,\n referrer: UserProfile) -> None:\n \"\"\"\n Send the confirmation/welcome e-mail to an invited user.\n \"\"\"\n activation_url = create_confirmation_link(invitee, referrer.realm.host, Confirmation.INVITATION)\n context = {'referrer_full_name': referrer.full_name, 'referrer_email': referrer.email,\n 'activate_url': activation_url, 'referrer_realm_name': referrer.realm.name}\n from_name = \"%s (via Zulip)\" % (referrer.full_name,)\n send_email('zerver/emails/invitation', to_email=invitee.email, from_name=from_name,\n from_address=FromAddress.tokenized_no_reply_address(), context=context)\n\ndef email_not_system_bot(email: str) -> None:\n if is_cross_realm_bot_email(email):\n raise ValidationError('%s is an email address reserved for system bots' % (email,))\n\ndef validate_email_for_realm(target_realm: Realm, email: str) -> None:\n email_not_system_bot(email)\n\n try:\n existing_user_profile = get_user(email, target_realm)\n except UserProfile.DoesNotExist:\n return\n\n if existing_user_profile.is_active:\n if existing_user_profile.is_mirror_dummy:\n raise AssertionError(\"Mirror dummy user is already active!\")\n # Other users should not already exist at all.\n raise ValidationError('%s already has an account' % (email,))\n elif not existing_user_profile.is_mirror_dummy:\n raise ValidationError('The account for %s has been deactivated' % (email,))\n\ndef validate_email(user_profile: UserProfile, email: str) -> Tuple[Optional[str], Optional[str]]:\n try:\n validators.validate_email(email)\n except ValidationError:\n return _(\"Invalid address.\"), None\n\n try:\n email_allowed_for_realm(email, user_profile.realm)\n except DomainNotAllowedForRealmError:\n return _(\"Outside your domain.\"), None\n except DisposableEmailError:\n return _(\"Please use your real email address.\"), None\n except EmailContainsPlusError:\n return _(\"Email addresses containing + are not allowed.\"), None\n\n try:\n validate_email_for_realm(user_profile.realm, email)\n except ValidationError:\n return None, _(\"Already has an account.\")\n\n return None, None\n\nclass InvitationError(JsonableError):\n code = ErrorCode.INVITATION_FAILED\n data_fields = ['errors', 'sent_invitations']\n\n def __init__(self, msg: str, errors: List[Tuple[str, str]], sent_invitations: bool) -> None:\n self._msg = msg # type: str\n self.errors = errors # type: List[Tuple[str, str]]\n self.sent_invitations = sent_invitations # type: bool\n\ndef estimate_recent_invites(realms: Iterable[Realm], *, days: int) -> int:\n '''An upper bound on the number of invites sent in the last `days` days'''\n recent_invites = RealmCount.objects.filter(\n realm__in=realms,\n property='invites_sent::day',\n end_time__gte=timezone_now() - datetime.timedelta(days=days)\n ).aggregate(Sum('value'))['value__sum']\n if recent_invites is None:\n return 0\n return recent_invites\n\ndef check_invite_limit(realm: Realm, num_invitees: int) -> None:\n '''Discourage using invitation emails as a vector for carrying spam.'''\n msg = _(\"You do not have enough remaining invites. \"\n \"Please contact %s to have your limit raised. \"\n \"No invitations were sent.\") % (settings.ZULIP_ADMINISTRATOR,)\n if not settings.OPEN_REALM_CREATION:\n return\n\n recent_invites = estimate_recent_invites([realm], days=1)\n if num_invitees + recent_invites > realm.max_invites:\n raise InvitationError(msg, [], sent_invitations=False)\n\n default_max = settings.INVITES_DEFAULT_REALM_DAILY_MAX\n newrealm_age = datetime.timedelta(days=settings.INVITES_NEW_REALM_DAYS)\n if realm.date_created <= timezone_now() - newrealm_age:\n # If this isn't a \"newly-created\" realm, we're done. The\n # remaining code applies an aggregate limit across all\n # \"new\" realms, to address sudden bursts of spam realms.\n return\n\n if realm.max_invites > default_max:\n # If a user is on a realm where we've bumped up\n # max_invites, then we exempt them from invite limits.\n return\n\n new_realms = Realm.objects.filter(\n date_created__gte=timezone_now() - newrealm_age,\n _max_invites__lte=default_max,\n ).all()\n\n for days, count in settings.INVITES_NEW_REALM_LIMIT_DAYS:\n recent_invites = estimate_recent_invites(new_realms, days=days)\n if num_invitees + recent_invites > count:\n raise InvitationError(msg, [], sent_invitations=False)\n\ndef do_invite_users(user_profile: UserProfile,\n invitee_emails: SizedTextIterable,\n streams: Iterable[Stream],\n invite_as_admin: Optional[bool]=False) -> None:\n\n check_invite_limit(user_profile.realm, len(invitee_emails))\n\n realm = user_profile.realm\n if not realm.invite_required:\n # Inhibit joining an open realm to send spam invitations.\n min_age = datetime.timedelta(days=settings.INVITES_MIN_USER_AGE_DAYS)\n if (user_profile.date_joined > timezone_now() - min_age\n and not user_profile.is_realm_admin):\n raise InvitationError(\n _(\"Your account is too new to send invites for this organization. \"\n \"Ask an organization admin, or a more experienced user.\"),\n [], sent_invitations=False)\n\n validated_emails = [] # type: List[str]\n errors = [] # type: List[Tuple[str, str]]\n skipped = [] # type: List[Tuple[str, str]]\n for email in invitee_emails:\n if email == '':\n continue\n email_error, email_skipped = validate_email(user_profile, email)\n if not (email_error or email_skipped):\n validated_emails.append(email)\n elif email_error:\n errors.append((email, email_error))\n elif email_skipped:\n skipped.append((email, email_skipped))\n\n if errors:\n raise InvitationError(\n _(\"Some emails did not validate, so we didn't send any invitations.\"),\n errors + skipped, sent_invitations=False)\n\n if skipped and len(skipped) == len(invitee_emails):\n # All e-mails were skipped, so we didn't actually invite anyone.\n raise InvitationError(_(\"We weren't able to invite anyone.\"),\n skipped, sent_invitations=False)\n\n # We do this here rather than in the invite queue processor since this\n # is used for rate limiting invitations, rather than keeping track of\n # when exactly invitations were sent\n do_increment_logging_stat(user_profile.realm, COUNT_STATS['invites_sent::day'],\n None, timezone_now(), increment=len(validated_emails))\n\n # Now that we are past all the possible errors, we actually create\n # the PreregistrationUser objects and trigger the email invitations.\n for email in validated_emails:\n # The logged in user is the referrer.\n prereg_user = PreregistrationUser(email=email, referred_by=user_profile,\n invited_as_admin=invite_as_admin,\n realm=user_profile.realm)\n prereg_user.save()\n stream_ids = [stream.id for stream in streams]\n prereg_user.streams.set(stream_ids)\n\n event = {\"prereg_id\": prereg_user.id, \"referrer_id\": user_profile.id}\n queue_json_publish(\"invites\", event)\n\n if skipped:\n raise InvitationError(_(\"Some of those addresses are already using Zulip, \"\n \"so we didn't send them an invitation. We did send \"\n \"invitations to everyone else!\"),\n skipped, sent_invitations=True)\n notify_invites_changed(user_profile)\n\ndef do_get_user_invites(user_profile: UserProfile) -> List[Dict[str, Any]]:\n days_to_activate = getattr(settings, 'ACCOUNT_ACTIVATION_DAYS', 7)\n active_value = getattr(confirmation_settings, 'STATUS_ACTIVE', 1)\n\n lowest_datetime = timezone_now() - datetime.timedelta(days=days_to_activate)\n prereg_users = PreregistrationUser.objects.exclude(status=active_value).filter(\n invited_at__gte=lowest_datetime,\n referred_by__realm=user_profile.realm)\n\n invites = []\n\n for invitee in prereg_users:\n invites.append(dict(email=invitee.email,\n ref=invitee.referred_by.email,\n invited=datetime_to_timestamp(invitee.invited_at),\n id=invitee.id,\n invited_as_admin=invitee.invited_as_admin))\n\n return invites\n\ndef do_create_multiuse_invite_link(referred_by: UserProfile, streams: Optional[List[Stream]]=[]) -> str:\n realm = referred_by.realm\n invite = MultiuseInvite.objects.create(realm=realm, referred_by=referred_by)\n if streams:\n invite.streams.set(streams)\n\n return create_confirmation_link(invite, realm.host, Confirmation.MULTIUSE_INVITE)\n\ndef do_revoke_user_invite(prereg_user: PreregistrationUser) -> None:\n email = prereg_user.email\n\n # Delete both the confirmation objects and the prereg_user object.\n # TODO: Probably we actaully want to set the confirmation objects\n # to a \"revoked\" status so that we can give the user a better\n # error message.\n content_type = ContentType.objects.get_for_model(PreregistrationUser)\n Confirmation.objects.filter(content_type=content_type,\n object_id=prereg_user.id).delete()\n prereg_user.delete()\n clear_scheduled_invitation_emails(email)\n notify_invites_changed(prereg_user)\n\ndef do_resend_user_invite_email(prereg_user: PreregistrationUser) -> int:\n check_invite_limit(prereg_user.referred_by.realm, 1)\n\n prereg_user.invited_at = timezone_now()\n prereg_user.save()\n\n do_increment_logging_stat(prereg_user.realm, COUNT_STATS['invites_sent::day'],\n None, prereg_user.invited_at)\n\n clear_scheduled_invitation_emails(prereg_user.email)\n # We don't store the custom email body, so just set it to None\n event = {\"prereg_id\": prereg_user.id, \"referrer_id\": prereg_user.referred_by.id, \"email_body\": None}\n queue_json_publish(\"invites\", event)\n\n return datetime_to_timestamp(prereg_user.invited_at)\n\ndef notify_realm_emoji(realm: Realm) -> None:\n event = dict(type=\"realm_emoji\", op=\"update\",\n realm_emoji=realm.get_emoji())\n send_event(realm, event, active_user_ids(realm.id))\n\ndef check_add_realm_emoji(realm: Realm,\n name: str,\n author: UserProfile,\n image_file: File) -> Optional[RealmEmoji]:\n realm_emoji = RealmEmoji(realm=realm, name=name, author=author)\n realm_emoji.full_clean()\n realm_emoji.save()\n\n emoji_file_name = get_emoji_file_name(image_file.name, realm_emoji.id)\n emoji_uploaded_successfully = False\n try:\n upload_emoji_image(image_file, emoji_file_name, author)\n emoji_uploaded_successfully = True\n finally:\n if not emoji_uploaded_successfully:\n realm_emoji.delete()\n return None\n else:\n realm_emoji.file_name = emoji_file_name\n realm_emoji.save(update_fields=['file_name'])\n notify_realm_emoji(realm_emoji.realm)\n return realm_emoji\n\ndef do_remove_realm_emoji(realm: Realm, name: str) -> None:\n emoji = RealmEmoji.objects.get(realm=realm, name=name, deactivated=False)\n emoji.deactivated = True\n emoji.save(update_fields=['deactivated'])\n notify_realm_emoji(realm)\n\ndef notify_alert_words(user_profile: UserProfile, words: Iterable[str]) -> None:\n event = dict(type=\"alert_words\", alert_words=words)\n send_event(user_profile.realm, event, [user_profile.id])\n\ndef do_add_alert_words(user_profile: UserProfile, alert_words: Iterable[str]) -> None:\n words = add_user_alert_words(user_profile, alert_words)\n notify_alert_words(user_profile, words)\n\ndef do_remove_alert_words(user_profile: UserProfile, alert_words: Iterable[str]) -> None:\n words = remove_user_alert_words(user_profile, alert_words)\n notify_alert_words(user_profile, words)\n\ndef do_set_alert_words(user_profile: UserProfile, alert_words: List[str]) -> None:\n set_user_alert_words(user_profile, alert_words)\n notify_alert_words(user_profile, alert_words)\n\ndef do_mute_topic(user_profile: UserProfile, stream: Stream, recipient: Recipient, topic: str) -> None:\n add_topic_mute(user_profile, stream.id, recipient.id, topic)\n event = dict(type=\"muted_topics\", muted_topics=get_topic_mutes(user_profile))\n send_event(user_profile.realm, event, [user_profile.id])\n\ndef do_unmute_topic(user_profile: UserProfile, stream: Stream, topic: str) -> None:\n remove_topic_mute(user_profile, stream.id, topic)\n event = dict(type=\"muted_topics\", muted_topics=get_topic_mutes(user_profile))\n send_event(user_profile.realm, event, [user_profile.id])\n\ndef do_mark_hotspot_as_read(user: UserProfile, hotspot: str) -> None:\n UserHotspot.objects.get_or_create(user=user, hotspot=hotspot)\n event = dict(type=\"hotspots\", hotspots=get_next_hotspots(user))\n send_event(user.realm, event, [user.id])\n\ndef notify_realm_filters(realm: Realm) -> None:\n realm_filters = realm_filters_for_realm(realm.id)\n event = dict(type=\"realm_filters\", realm_filters=realm_filters)\n send_event(realm, event, active_user_ids(realm.id))\n\n# NOTE: Regexes must be simple enough that they can be easily translated to JavaScript\n# RegExp syntax. In addition to JS-compatible syntax, the following features are available:\n# * Named groups will be converted to numbered groups automatically\n# * Inline-regex flags will be stripped, and where possible translated to RegExp-wide flags\ndef do_add_realm_filter(realm: Realm, pattern: str, url_format_string: str) -> int:\n pattern = pattern.strip()\n url_format_string = url_format_string.strip()\n realm_filter = RealmFilter(\n realm=realm, pattern=pattern,\n url_format_string=url_format_string)\n realm_filter.full_clean()\n realm_filter.save()\n notify_realm_filters(realm)\n\n return realm_filter.id\n\ndef do_remove_realm_filter(realm: Realm, pattern: Optional[str]=None,\n id: Optional[int]=None) -> None:\n if pattern is not None:\n RealmFilter.objects.get(realm=realm, pattern=pattern).delete()\n else:\n RealmFilter.objects.get(realm=realm, pk=id).delete()\n notify_realm_filters(realm)\n\ndef get_emails_from_user_ids(user_ids: Sequence[int]) -> Dict[int, str]:\n # We may eventually use memcached to speed this up, but the DB is fast.\n return UserProfile.emails_from_ids(user_ids)\n\ndef do_add_realm_domain(realm: Realm, domain: str, allow_subdomains: bool) -> (RealmDomain):\n realm_domain = RealmDomain.objects.create(realm=realm, domain=domain,\n allow_subdomains=allow_subdomains)\n event = dict(type=\"realm_domains\", op=\"add\",\n realm_domain=dict(domain=realm_domain.domain,\n allow_subdomains=realm_domain.allow_subdomains))\n send_event(realm, event, active_user_ids(realm.id))\n return realm_domain\n\ndef do_change_realm_domain(realm_domain: RealmDomain, allow_subdomains: bool) -> None:\n realm_domain.allow_subdomains = allow_subdomains\n realm_domain.save(update_fields=['allow_subdomains'])\n event = dict(type=\"realm_domains\", op=\"change\",\n realm_domain=dict(domain=realm_domain.domain,\n allow_subdomains=realm_domain.allow_subdomains))\n send_event(realm_domain.realm, event, active_user_ids(realm_domain.realm_id))\n\ndef do_remove_realm_domain(realm_domain: RealmDomain) -> None:\n realm = realm_domain.realm\n domain = realm_domain.domain\n realm_domain.delete()\n if RealmDomain.objects.filter(realm=realm).count() == 0 and realm.emails_restricted_to_domains:\n # If this was the last realm domain, we mark the realm as no\n # longer restricted to domain, because the feature doesn't do\n # anything if there are no domains, and this is probably less\n # confusing than the alternative.\n do_set_realm_property(realm, 'emails_restricted_to_domains', False)\n event = dict(type=\"realm_domains\", op=\"remove\", domain=domain)\n send_event(realm, event, active_user_ids(realm.id))\n\ndef get_occupied_streams(realm: Realm) -> QuerySet:\n # TODO: Make a generic stub for QuerySet\n \"\"\" Get streams with subscribers \"\"\"\n subs_filter = Subscription.objects.filter(active=True, user_profile__realm=realm,\n user_profile__is_active=True).values('recipient_id')\n stream_ids = Recipient.objects.filter(\n type=Recipient.STREAM, id__in=subs_filter).values('type_id')\n\n return Stream.objects.filter(id__in=stream_ids, realm=realm, deactivated=False)\n\ndef get_web_public_streams(realm: Realm) -> List[Dict[str, Any]]:\n query = Stream.objects.filter(realm=realm, deactivated=False, is_web_public=True)\n streams = [(row.to_dict()) for row in query]\n return streams\n\ndef do_get_streams(user_profile: UserProfile, include_public: bool=True,\n include_subscribed: bool=True, include_all_active: bool=False,\n include_default: bool=False) -> List[Dict[str, Any]]:\n if include_all_active and not user_profile.is_api_super_user:\n raise JsonableError(_(\"User not authorized for this query\"))\n\n include_public = include_public and user_profile.can_access_public_streams()\n # Start out with all streams in the realm with subscribers\n query = get_occupied_streams(user_profile.realm)\n\n if not include_all_active:\n user_subs = get_stream_subscriptions_for_user(user_profile).filter(\n active=True,\n ).select_related('recipient')\n\n if include_subscribed:\n recipient_check = Q(id__in=[sub.recipient.type_id for sub in user_subs])\n if include_public:\n invite_only_check = Q(invite_only=False)\n\n if include_subscribed and include_public:\n query = query.filter(recipient_check | invite_only_check)\n elif include_public:\n query = query.filter(invite_only_check)\n elif include_subscribed:\n query = query.filter(recipient_check)\n else:\n # We're including nothing, so don't bother hitting the DB.\n query = []\n\n streams = [(row.to_dict()) for row in query]\n streams.sort(key=lambda elt: elt[\"name\"])\n if include_default:\n is_default = {}\n default_streams = get_default_streams_for_realm(user_profile.realm_id)\n for default_stream in default_streams:\n is_default[default_stream.id] = True\n for stream in streams:\n stream['is_default'] = is_default.get(stream[\"stream_id\"], False)\n\n return streams\n\ndef notify_attachment_update(user_profile: UserProfile, op: str,\n attachment_dict: Dict[str, Any]) -> None:\n event = {\n 'type': 'attachment',\n 'op': op,\n 'attachment': attachment_dict,\n }\n send_event(user_profile.realm, event, [user_profile.id])\n\ndef do_claim_attachments(message: Message) -> None:\n attachment_url_list = attachment_url_re.findall(message.content)\n\n for url in attachment_url_list:\n path_id = attachment_url_to_path_id(url)\n user_profile = message.sender\n is_message_realm_public = False\n if message.is_stream_message():\n is_message_realm_public = Stream.objects.get(id=message.recipient.type_id).is_public()\n\n if not validate_attachment_request(user_profile, path_id):\n # Technically, there are 2 cases here:\n # * The user put something in their message that has the form\n # of an upload, but doesn't correspond to a file that doesn't\n # exist. validate_attachment_request will return None.\n # * The user is trying to send a link to a file they don't have permission to\n # access themselves. validate_attachment_request will return False.\n #\n # Either case is unusual and suggests a UI bug that got\n # the user in this situation, so we log in these cases.\n logging.warning(\"User %s tried to share upload %s in message %s, but lacks permission\" % (\n user_profile.id, path_id, message.id))\n continue\n\n attachment = claim_attachment(user_profile, path_id, message, is_message_realm_public)\n notify_attachment_update(user_profile, \"update\", attachment.to_dict())\n\ndef do_delete_old_unclaimed_attachments(weeks_ago: int) -> None:\n old_unclaimed_attachments = get_old_unclaimed_attachments(weeks_ago)\n\n for attachment in old_unclaimed_attachments:\n delete_message_image(attachment.path_id)\n attachment.delete()\n\ndef check_attachment_reference_change(prev_content: str, message: Message) -> None:\n new_content = message.content\n prev_attachments = set(attachment_url_re.findall(prev_content))\n new_attachments = set(attachment_url_re.findall(new_content))\n\n to_remove = list(prev_attachments - new_attachments)\n path_ids = []\n for url in to_remove:\n path_id = attachment_url_to_path_id(url)\n path_ids.append(path_id)\n\n attachments_to_update = Attachment.objects.filter(path_id__in=path_ids).select_for_update()\n message.attachment_set.remove(*attachments_to_update)\n\n to_add = list(new_attachments - prev_attachments)\n if len(to_add) > 0:\n do_claim_attachments(message)\n\ndef notify_realm_custom_profile_fields(realm: Realm, operation: str) -> None:\n fields = custom_profile_fields_for_realm(realm.id)\n event = dict(type=\"custom_profile_fields\",\n op=operation,\n fields=[f.as_dict() for f in fields])\n send_event(realm, event, active_user_ids(realm.id))\n\ndef try_add_realm_custom_profile_field(realm: Realm, name: str, field_type: int,\n hint: str='',\n field_data: ProfileFieldData=None) -> CustomProfileField:\n field = CustomProfileField(realm=realm, name=name, field_type=field_type)\n field.hint = hint\n if field.field_type == CustomProfileField.CHOICE:\n field.field_data = ujson.dumps(field_data or {})\n\n field.save()\n field.order = field.id\n field.save(update_fields=['order'])\n notify_realm_custom_profile_fields(realm, 'add')\n return field\n\ndef do_remove_realm_custom_profile_field(realm: Realm, field: CustomProfileField) -> None:\n \"\"\"\n Deleting a field will also delete the user profile data\n associated with it in CustomProfileFieldValue model.\n \"\"\"\n field.delete()\n notify_realm_custom_profile_fields(realm, 'delete')\n\ndef do_remove_realm_custom_profile_fields(realm: Realm) -> None:\n CustomProfileField.objects.filter(realm=realm).delete()\n\ndef try_update_realm_custom_profile_field(realm: Realm, field: CustomProfileField,\n name: str, hint: str='',\n field_data: ProfileFieldData=None) -> None:\n field.name = name\n field.hint = hint\n if field.field_type == CustomProfileField.CHOICE:\n field.field_data = ujson.dumps(field_data or {})\n field.save()\n notify_realm_custom_profile_fields(realm, 'update')\n\ndef try_reorder_realm_custom_profile_fields(realm: Realm, order: List[int]) -> None:\n order_mapping = dict((_[1], _[0]) for _ in enumerate(order))\n fields = CustomProfileField.objects.filter(realm=realm)\n for field in fields:\n if field.id not in order_mapping:\n raise JsonableError(_(\"Invalid order mapping.\"))\n for field in fields:\n field.order = order_mapping[field.id]\n field.save(update_fields=['order'])\n notify_realm_custom_profile_fields(realm, 'update')\n\ndef notify_user_update_custom_profile_data(user_profile: UserProfile,\n field: Dict[str, Union[int, str, List[int], None]]) -> None:\n if field['type'] == CustomProfileField.USER:\n field_value = ujson.dumps(field['value']) # type: Union[int, str, List[int], None]\n else:\n field_value = field['value']\n payload = dict(user_id=user_profile.id, custom_profile_field=dict(id=field['id'],\n value=field_value))\n event = dict(type=\"realm_user\", op=\"update\", person=payload)\n send_event(user_profile.realm, event, active_user_ids(user_profile.realm.id))\n\ndef do_update_user_custom_profile_data(user_profile: UserProfile,\n data: List[Dict[str, Union[int, str, List[int]]]]) -> None:\n with transaction.atomic():\n for field in data:\n field_value, created = CustomProfileFieldValue.objects.update_or_create(\n user_profile=user_profile,\n field_id=field['id'],\n defaults={'value': field['value']})\n notify_user_update_custom_profile_data(user_profile, {\n \"id\": field_value.field_id,\n \"value\": field_value.value,\n \"type\": field_value.field.field_type})\n\ndef do_send_create_user_group_event(user_group: UserGroup, members: List[UserProfile]) -> None:\n event = dict(type=\"user_group\",\n op=\"add\",\n group=dict(name=user_group.name,\n members=[member.id for member in members],\n description=user_group.description,\n id=user_group.id,\n ),\n )\n send_event(user_group.realm, event, active_user_ids(user_group.realm_id))\n\ndef check_add_user_group(realm: Realm, name: str, initial_members: List[UserProfile],\n description: str) -> None:\n try:\n user_group = create_user_group(name, initial_members, realm, description=description)\n do_send_create_user_group_event(user_group, initial_members)\n except django.db.utils.IntegrityError:\n raise JsonableError(_(\"User group '%s' already exists.\" % (name,)))\n\ndef do_send_user_group_update_event(user_group: UserGroup, data: Dict[str, Any]) -> None:\n event = dict(type=\"user_group\", op='update', group_id=user_group.id, data=data)\n send_event(user_group.realm, event, active_user_ids(user_group.realm_id))\n\ndef do_update_user_group_name(user_group: UserGroup, name: str) -> None:\n try:\n user_group.name = name\n user_group.save(update_fields=['name'])\n except django.db.utils.IntegrityError:\n raise JsonableError(_(\"User group '%s' already exists.\" % (name,)))\n do_send_user_group_update_event(user_group, dict(name=name))\n\ndef do_update_user_group_description(user_group: UserGroup, description: str) -> None:\n user_group.description = description\n user_group.save(update_fields=['description'])\n do_send_user_group_update_event(user_group, dict(description=description))\n\ndef do_update_outgoing_webhook_service(bot_profile: UserProfile,\n service_interface: int,\n service_payload_url: str) -> None:\n # TODO: First service is chosen because currently one bot can only have one service.\n # Update this once multiple services are supported.\n service = get_bot_services(bot_profile.id)[0]\n service.base_url = service_payload_url\n service.interface = service_interface\n service.save()\n send_event(bot_profile.realm,\n dict(type='realm_bot',\n op='update',\n bot=dict(email=bot_profile.email,\n user_id=bot_profile.id,\n services = [dict(base_url=service.base_url,\n interface=service.interface,\n token=service.token,)],\n ),\n ),\n bot_owner_user_ids(bot_profile))\n\ndef do_update_bot_config_data(bot_profile: UserProfile,\n config_data: Dict[str, str]) -> None:\n for key, value in config_data.items():\n set_bot_config(bot_profile, key, value)\n updated_config_data = get_bot_config(bot_profile)\n send_event(bot_profile.realm,\n dict(type='realm_bot',\n op='update',\n bot=dict(email=bot_profile.email,\n user_id=bot_profile.id,\n services = [dict(config_data=updated_config_data)],\n ),\n ),\n bot_owner_user_ids(bot_profile))\n\ndef get_service_dicts_for_bot(user_profile_id: str) -> List[Dict[str, Any]]:\n user_profile = get_user_profile_by_id(user_profile_id)\n services = get_bot_services(user_profile_id)\n service_dicts = [] # type: List[Dict[str, Any]]\n if user_profile.bot_type == UserProfile.OUTGOING_WEBHOOK_BOT:\n service_dicts = [{'base_url': service.base_url,\n 'interface': service.interface,\n 'token': service.token,\n }\n for service in services]\n elif user_profile.bot_type == UserProfile.EMBEDDED_BOT:\n try:\n service_dicts = [{'config_data': get_bot_config(user_profile),\n 'service_name': services[0].name\n }]\n # A ConfigError just means that there are no config entries for user_profile.\n except ConfigError:\n pass\n return service_dicts\n\ndef get_service_dicts_for_bots(bot_dicts: List[Dict[str, Any]],\n realm: Realm) -> Dict[int, List[Dict[str, Any]]]:\n bot_profile_ids = [bot_dict['id'] for bot_dict in bot_dicts]\n bot_services_by_uid = defaultdict(list) # type: Dict[int, List[Service]]\n for service in Service.objects.filter(user_profile_id__in=bot_profile_ids):\n bot_services_by_uid[service.user_profile_id].append(service)\n\n embedded_bot_ids = [bot_dict['id'] for bot_dict in bot_dicts\n if bot_dict['bot_type'] == UserProfile.EMBEDDED_BOT]\n embedded_bot_configs = get_bot_configs(embedded_bot_ids)\n\n service_dicts_by_uid = {} # type: Dict[int, List[Dict[str, Any]]]\n for bot_dict in bot_dicts:\n bot_profile_id = bot_dict[\"id\"]\n bot_type = bot_dict[\"bot_type\"]\n services = bot_services_by_uid[bot_profile_id]\n service_dicts = [] # type: List[Dict[str, Any]]\n if bot_type == UserProfile.OUTGOING_WEBHOOK_BOT:\n service_dicts = [{'base_url': service.base_url,\n 'interface': service.interface,\n 'token': service.token,\n }\n for service in services]\n elif bot_type == UserProfile.EMBEDDED_BOT:\n if bot_profile_id in embedded_bot_configs.keys():\n bot_config = embedded_bot_configs[bot_profile_id]\n service_dicts = [{'config_data': bot_config,\n 'service_name': services[0].name\n }]\n service_dicts_by_uid[bot_profile_id] = service_dicts\n return service_dicts_by_uid\n\ndef get_owned_bot_dicts(user_profile: UserProfile,\n include_all_realm_bots_if_admin: bool=True) -> List[Dict[str, Any]]:\n if user_profile.is_realm_admin and include_all_realm_bots_if_admin:\n result = get_bot_dicts_in_realm(user_profile.realm)\n else:\n result = UserProfile.objects.filter(realm=user_profile.realm, is_bot=True,\n bot_owner=user_profile).values(*bot_dict_fields)\n services_by_ids = get_service_dicts_for_bots(result, user_profile.realm)\n return [{'email': botdict['email'],\n 'user_id': botdict['id'],\n 'full_name': botdict['full_name'],\n 'bot_type': botdict['bot_type'],\n 'is_active': botdict['is_active'],\n 'api_key': botdict['api_key'],\n 'default_sending_stream': botdict['default_sending_stream__name'],\n 'default_events_register_stream': botdict['default_events_register_stream__name'],\n 'default_all_public_streams': botdict['default_all_public_streams'],\n 'owner': botdict['bot_owner__email'],\n 'avatar_url': avatar_url_from_dict(botdict),\n 'services': services_by_ids[botdict['id']],\n }\n for botdict in result]\n\ndef do_send_user_group_members_update_event(event_name: str,\n user_group: UserGroup,\n user_ids: List[int]) -> None:\n event = dict(type=\"user_group\",\n op=event_name,\n group_id=user_group.id,\n user_ids=user_ids)\n send_event(user_group.realm, event, active_user_ids(user_group.realm_id))\n\ndef bulk_add_members_to_user_group(user_group: UserGroup,\n user_profiles: List[UserProfile]) -> None:\n memberships = [UserGroupMembership(user_group_id=user_group.id,\n user_profile=user_profile)\n for user_profile in user_profiles]\n UserGroupMembership.objects.bulk_create(memberships)\n\n user_ids = [up.id for up in user_profiles]\n do_send_user_group_members_update_event('add_members', user_group, user_ids)\n\ndef remove_members_from_user_group(user_group: UserGroup,\n user_profiles: List[UserProfile]) -> None:\n UserGroupMembership.objects.filter(\n user_group_id=user_group.id,\n user_profile__in=user_profiles).delete()\n\n user_ids = [up.id for up in user_profiles]\n do_send_user_group_members_update_event('remove_members', user_group, user_ids)\n\ndef do_send_delete_user_group_event(realm: Realm, user_group_id: int,\n realm_id: int) -> None:\n event = dict(type=\"user_group\",\n op=\"remove\",\n group_id=user_group_id)\n send_event(realm, event, active_user_ids(realm_id))\n\ndef check_delete_user_group(user_group_id: int, user_profile: UserProfile) -> None:\n user_group = access_user_group_by_id(user_group_id, user_profile)\n user_group.delete()\n do_send_delete_user_group_event(user_profile.realm, user_group_id, user_profile.realm.id)\n\ndef missing_any_realm_internal_bots() -> bool:\n bot_emails = [bot['email_template'] % (settings.INTERNAL_BOT_DOMAIN,)\n for bot in settings.REALM_INTERNAL_BOTS]\n bot_counts = dict(UserProfile.objects.filter(email__in=bot_emails)\n .values_list('email')\n .annotate(Count('id')))\n realm_count = Realm.objects.count()\n return any(bot_counts.get(email, 0) < realm_count for email in bot_emails)\n"},"type_annotations":{"kind":"list like","value":["MutableMapping[str, Any]","Stream","int","Stream","UserProfile","Realm","UserProfile","UserProfile","str","UserProfile","UserProfile","UserProfile","UserProfile","Iterable[Stream]","UserProfile","UserProfile","UserProfile","Optional[Stream]","UserProfile","Realm","Iterable[Tuple[str, str]]","str","Optional[str]","Realm","str","str","UserProfile","UserProfile","Realm","Realm","str","Any","Realm","Dict[str, bool]","Realm","bool","int","bool","Realm","int","Realm","Stream","int","Realm","Stream","int","Realm","Realm","Realm","str","Realm","UserProfile","Stream","UserProfile","str","UserProfile","str","NonBinaryStr","NonBinaryStr","Realm","str","Callable[[str], str]","MutableMapping[str, Any]","Message","str","Set[int]","Realm","Recipient","int","Recipient","int","Optional[StreamTopicTarget]","Callable[[Dict[str, Any]], bool]","Dict[str, Any]","UserProfile","List[Tuple[int, int]]","Set[int]","Set[int]","int","int","int","Sequence[Mapping[str, Any]]","Sequence[Optional[MutableMapping[str, Any]]]","int","int","int","Message","Set[int]","Set[int]","Set[int]","Set[int]","Set[int]","List[UserMessageLite]","Realm","int","int","str","str","UserProfile","Message","Reaction","str","UserProfile","Message","str","UserProfile","Message","str","UserProfile","Message","str","str","str","UserProfile","Message","str","str","Realm","Dict[str, Any]","UserProfile","Sequence[str]","str","UserProfile","Sequence[str]","str","Stream","Stream","Stream","List[int]","Realm","bool","Optional[bool]","Realm","str","Realm","str","Realm","List[Mapping[str, Any]]","Set[int]","bool","Optional[UserProfile]","UserProfile","List[UserProfile]","UserProfile","Iterable[str]","bool","Optional[UserProfile]","UserProfile","List[UserProfile]","bool","Optional[UserProfile]","UserProfile","Message","Union[str, Iterable[str]]","UserProfile","Client","str","str","str","UserProfile","Client","UserProfile","str","UserProfile","Client","str","Sequence[str]","Optional[str]","str","UserProfile","Client","str","Sequence[str]","Optional[str]","str","str","datetime.datetime","str","str","UserProfile","Realm","str","UserProfile","Optional[Stream]","str","Realm","UserProfile","Stream","Optional[UserProfile]","UserProfile","Client","Addressee","str","Realm","UserProfile","Addressee","str","Realm","UserProfile","str","str","str","Realm","UserProfile","UserProfile","str","Realm","str","str","str","str","str","Realm","UserProfile","UserProfile","str","Realm","UserProfile","str","str","str","Realm","UserProfile","List[str]","str","UserProfile","Iterable[Subscription]","Optional[UserProfile]","Stream","Optional[UserProfile]","Mapping[str, Any]","Callable[[], bool]","Iterable[Mapping[str, Any]]","UserProfile","Mapping[int, bool]","StreamRecipientMap","Stream","Optional[UserProfile]","Stream","UserProfile","Iterable[Tuple[Subscription, Stream]]","Callable[[Stream], List[int]]","Dict[int, int]","Stream","Iterable[int]","Iterable[int]","Iterable[Stream]","Iterable[Stream]","Iterable[UserProfile]","Stream","UserProfile","Iterable[Stream]","Iterable[UserProfile]","Iterable[Stream]","Client","Stream","str","str","str","Any","UserProfile","Subscription","Stream","str","Any","UserProfile","str","UserProfile","str","Optional[UserProfile]","UserProfile","str","UserProfile","UserProfile","str","UserProfile","UserProfile","UserProfile","UserProfile","UserProfile","str","UserProfile","UserProfile","UserProfile","str","UserProfile","Realm","str","UserProfile","int","UserProfile","Optional[Stream]","UserProfile","Optional[Stream]","UserProfile","bool","UserProfile","bool","UserProfile","bool","Stream","bool","Stream","bool","Stream","bool","Stream","str","Stream","str","str","str","UserProfile","str","bool","UserProfile","bool","UserProfile","str","Union[bool, str]","List[str]","Realm","Realm","Dict[str, Dict[str, Any]]","Realm","Realm","Stream","Stream","Realm","str","str","List[Stream]","Realm","DefaultStreamGroup","List[Stream]","Realm","DefaultStreamGroup","List[Stream]","Realm","DefaultStreamGroup","str","Realm","DefaultStreamGroup","str","Realm","DefaultStreamGroup","int","UserProfile","List[Stream]","List[DefaultStreamGroup]","UserProfile","datetime.datetime","UserProfile","Client","str","datetime.datetime","UserProfile","UserPresence","Client","UserProfile","Client","datetime.datetime","int","UserProfile","datetime.datetime","UserProfile","Client","datetime.datetime","int","bool","UserProfile","Client","int","UserProfile","Client","UserProfile","Client","Stream","UserProfile","List[int]","UserProfile","Client","str","str","List[int]","UserProfile","int","str","int","str","str","str","int","Message","Iterable[UserMessage]","UserMessage","bool","int","List[Message]","UserProfile","Message","Optional[str]","Optional[str]","UserMessage","UserProfile","Message","Optional[str]","str","Optional[str]","Optional[str]","Set[int]","Set[int]","UserMessage","UserProfile","Message","UserProfile","Set[int]","int","int","datetime.datetime","Dict[int, int]","datetime.datetime","Stream","str","str","str","str","Realm","UserProfile","UserProfile","Realm","int","str","Set[int]","Dict[int, List[str]]","Set[int]","UserProfile","PreregistrationUser","UserProfile","str","Realm","str","UserProfile","str","str","List[Tuple[str, str]]","bool","Iterable[Realm]","int","Realm","int","UserProfile","SizedTextIterable","Iterable[Stream]","UserProfile","UserProfile","PreregistrationUser","PreregistrationUser","Realm","Realm","str","UserProfile","File","Realm","str","UserProfile","Iterable[str]","UserProfile","Iterable[str]","UserProfile","Iterable[str]","UserProfile","List[str]","UserProfile","Stream","Recipient","str","UserProfile","Stream","str","UserProfile","str","Realm","Realm","str","str","Realm","Sequence[int]","Realm","str","bool","RealmDomain","bool","RealmDomain","Realm","Realm","UserProfile","UserProfile","str","Dict[str, Any]","Message","int","str","Message","Realm","str","Realm","str","int","Realm","CustomProfileField","Realm","Realm","CustomProfileField","str","Realm","List[int]","UserProfile","Dict[str, Union[int, str, List[int], None]]","UserProfile","List[Dict[str, Union[int, str, List[int]]]]","UserGroup","List[UserProfile]","Realm","str","List[UserProfile]","str","UserGroup","Dict[str, Any]","UserGroup","str","UserGroup","str","UserProfile","int","str","UserProfile","Dict[str, str]","str","List[Dict[str, Any]]","Realm","UserProfile","str","UserGroup","List[int]","UserGroup","List[UserProfile]","UserGroup","List[UserProfile]","Realm","int","int","int","UserProfile"],"string":"[\n \"MutableMapping[str, Any]\",\n \"Stream\",\n \"int\",\n \"Stream\",\n \"UserProfile\",\n \"Realm\",\n \"UserProfile\",\n \"UserProfile\",\n \"str\",\n \"UserProfile\",\n \"UserProfile\",\n \"UserProfile\",\n \"UserProfile\",\n \"Iterable[Stream]\",\n \"UserProfile\",\n \"UserProfile\",\n \"UserProfile\",\n \"Optional[Stream]\",\n \"UserProfile\",\n \"Realm\",\n \"Iterable[Tuple[str, str]]\",\n \"str\",\n \"Optional[str]\",\n \"Realm\",\n \"str\",\n \"str\",\n \"UserProfile\",\n \"UserProfile\",\n \"Realm\",\n \"Realm\",\n \"str\",\n \"Any\",\n \"Realm\",\n \"Dict[str, bool]\",\n \"Realm\",\n \"bool\",\n \"int\",\n \"bool\",\n \"Realm\",\n \"int\",\n \"Realm\",\n \"Stream\",\n \"int\",\n \"Realm\",\n \"Stream\",\n \"int\",\n \"Realm\",\n \"Realm\",\n \"Realm\",\n \"str\",\n \"Realm\",\n \"UserProfile\",\n \"Stream\",\n \"UserProfile\",\n \"str\",\n \"UserProfile\",\n \"str\",\n \"NonBinaryStr\",\n \"NonBinaryStr\",\n \"Realm\",\n \"str\",\n \"Callable[[str], str]\",\n \"MutableMapping[str, Any]\",\n \"Message\",\n \"str\",\n \"Set[int]\",\n \"Realm\",\n \"Recipient\",\n \"int\",\n \"Recipient\",\n \"int\",\n \"Optional[StreamTopicTarget]\",\n \"Callable[[Dict[str, Any]], bool]\",\n \"Dict[str, Any]\",\n \"UserProfile\",\n \"List[Tuple[int, int]]\",\n \"Set[int]\",\n \"Set[int]\",\n \"int\",\n \"int\",\n \"int\",\n \"Sequence[Mapping[str, Any]]\",\n \"Sequence[Optional[MutableMapping[str, Any]]]\",\n \"int\",\n \"int\",\n \"int\",\n \"Message\",\n \"Set[int]\",\n \"Set[int]\",\n \"Set[int]\",\n \"Set[int]\",\n \"Set[int]\",\n \"List[UserMessageLite]\",\n \"Realm\",\n \"int\",\n \"int\",\n \"str\",\n \"str\",\n \"UserProfile\",\n \"Message\",\n \"Reaction\",\n \"str\",\n \"UserProfile\",\n \"Message\",\n \"str\",\n \"UserProfile\",\n \"Message\",\n \"str\",\n \"UserProfile\",\n \"Message\",\n \"str\",\n \"str\",\n \"str\",\n \"UserProfile\",\n \"Message\",\n \"str\",\n \"str\",\n \"Realm\",\n \"Dict[str, Any]\",\n \"UserProfile\",\n \"Sequence[str]\",\n \"str\",\n \"UserProfile\",\n \"Sequence[str]\",\n \"str\",\n \"Stream\",\n \"Stream\",\n \"Stream\",\n \"List[int]\",\n \"Realm\",\n \"bool\",\n \"Optional[bool]\",\n \"Realm\",\n \"str\",\n \"Realm\",\n \"str\",\n \"Realm\",\n \"List[Mapping[str, Any]]\",\n \"Set[int]\",\n \"bool\",\n \"Optional[UserProfile]\",\n \"UserProfile\",\n \"List[UserProfile]\",\n \"UserProfile\",\n \"Iterable[str]\",\n \"bool\",\n \"Optional[UserProfile]\",\n \"UserProfile\",\n \"List[UserProfile]\",\n \"bool\",\n \"Optional[UserProfile]\",\n \"UserProfile\",\n \"Message\",\n \"Union[str, Iterable[str]]\",\n \"UserProfile\",\n \"Client\",\n \"str\",\n \"str\",\n \"str\",\n \"UserProfile\",\n \"Client\",\n \"UserProfile\",\n \"str\",\n \"UserProfile\",\n \"Client\",\n \"str\",\n \"Sequence[str]\",\n \"Optional[str]\",\n \"str\",\n \"UserProfile\",\n \"Client\",\n \"str\",\n \"Sequence[str]\",\n \"Optional[str]\",\n \"str\",\n \"str\",\n \"datetime.datetime\",\n \"str\",\n \"str\",\n \"UserProfile\",\n \"Realm\",\n \"str\",\n \"UserProfile\",\n \"Optional[Stream]\",\n \"str\",\n \"Realm\",\n \"UserProfile\",\n \"Stream\",\n \"Optional[UserProfile]\",\n \"UserProfile\",\n \"Client\",\n \"Addressee\",\n \"str\",\n \"Realm\",\n \"UserProfile\",\n \"Addressee\",\n \"str\",\n \"Realm\",\n \"UserProfile\",\n \"str\",\n \"str\",\n \"str\",\n \"Realm\",\n \"UserProfile\",\n \"UserProfile\",\n \"str\",\n \"Realm\",\n \"str\",\n \"str\",\n \"str\",\n \"str\",\n \"str\",\n \"Realm\",\n \"UserProfile\",\n \"UserProfile\",\n \"str\",\n \"Realm\",\n \"UserProfile\",\n \"str\",\n \"str\",\n \"str\",\n \"Realm\",\n \"UserProfile\",\n \"List[str]\",\n \"str\",\n \"UserProfile\",\n \"Iterable[Subscription]\",\n \"Optional[UserProfile]\",\n \"Stream\",\n \"Optional[UserProfile]\",\n \"Mapping[str, Any]\",\n \"Callable[[], bool]\",\n \"Iterable[Mapping[str, Any]]\",\n \"UserProfile\",\n \"Mapping[int, bool]\",\n \"StreamRecipientMap\",\n \"Stream\",\n \"Optional[UserProfile]\",\n \"Stream\",\n \"UserProfile\",\n \"Iterable[Tuple[Subscription, Stream]]\",\n \"Callable[[Stream], List[int]]\",\n \"Dict[int, int]\",\n \"Stream\",\n \"Iterable[int]\",\n \"Iterable[int]\",\n \"Iterable[Stream]\",\n \"Iterable[Stream]\",\n \"Iterable[UserProfile]\",\n \"Stream\",\n \"UserProfile\",\n \"Iterable[Stream]\",\n \"Iterable[UserProfile]\",\n \"Iterable[Stream]\",\n \"Client\",\n \"Stream\",\n \"str\",\n \"str\",\n \"str\",\n \"Any\",\n \"UserProfile\",\n \"Subscription\",\n \"Stream\",\n \"str\",\n \"Any\",\n \"UserProfile\",\n \"str\",\n \"UserProfile\",\n \"str\",\n \"Optional[UserProfile]\",\n \"UserProfile\",\n \"str\",\n \"UserProfile\",\n \"UserProfile\",\n \"str\",\n \"UserProfile\",\n \"UserProfile\",\n \"UserProfile\",\n \"UserProfile\",\n \"UserProfile\",\n \"str\",\n \"UserProfile\",\n \"UserProfile\",\n \"UserProfile\",\n \"str\",\n \"UserProfile\",\n \"Realm\",\n \"str\",\n \"UserProfile\",\n \"int\",\n \"UserProfile\",\n \"Optional[Stream]\",\n \"UserProfile\",\n \"Optional[Stream]\",\n \"UserProfile\",\n \"bool\",\n \"UserProfile\",\n \"bool\",\n \"UserProfile\",\n \"bool\",\n \"Stream\",\n \"bool\",\n \"Stream\",\n \"bool\",\n \"Stream\",\n \"bool\",\n \"Stream\",\n \"str\",\n \"Stream\",\n \"str\",\n \"str\",\n \"str\",\n \"UserProfile\",\n \"str\",\n \"bool\",\n \"UserProfile\",\n \"bool\",\n \"UserProfile\",\n \"str\",\n \"Union[bool, str]\",\n \"List[str]\",\n \"Realm\",\n \"Realm\",\n \"Dict[str, Dict[str, Any]]\",\n \"Realm\",\n \"Realm\",\n \"Stream\",\n \"Stream\",\n \"Realm\",\n \"str\",\n \"str\",\n \"List[Stream]\",\n \"Realm\",\n \"DefaultStreamGroup\",\n \"List[Stream]\",\n \"Realm\",\n \"DefaultStreamGroup\",\n \"List[Stream]\",\n \"Realm\",\n \"DefaultStreamGroup\",\n \"str\",\n \"Realm\",\n \"DefaultStreamGroup\",\n \"str\",\n \"Realm\",\n \"DefaultStreamGroup\",\n \"int\",\n \"UserProfile\",\n \"List[Stream]\",\n \"List[DefaultStreamGroup]\",\n \"UserProfile\",\n \"datetime.datetime\",\n \"UserProfile\",\n \"Client\",\n \"str\",\n \"datetime.datetime\",\n \"UserProfile\",\n \"UserPresence\",\n \"Client\",\n \"UserProfile\",\n \"Client\",\n \"datetime.datetime\",\n \"int\",\n \"UserProfile\",\n \"datetime.datetime\",\n \"UserProfile\",\n \"Client\",\n \"datetime.datetime\",\n \"int\",\n \"bool\",\n \"UserProfile\",\n \"Client\",\n \"int\",\n \"UserProfile\",\n \"Client\",\n \"UserProfile\",\n \"Client\",\n \"Stream\",\n \"UserProfile\",\n \"List[int]\",\n \"UserProfile\",\n \"Client\",\n \"str\",\n \"str\",\n \"List[int]\",\n \"UserProfile\",\n \"int\",\n \"str\",\n \"int\",\n \"str\",\n \"str\",\n \"str\",\n \"int\",\n \"Message\",\n \"Iterable[UserMessage]\",\n \"UserMessage\",\n \"bool\",\n \"int\",\n \"List[Message]\",\n \"UserProfile\",\n \"Message\",\n \"Optional[str]\",\n \"Optional[str]\",\n \"UserMessage\",\n \"UserProfile\",\n \"Message\",\n \"Optional[str]\",\n \"str\",\n \"Optional[str]\",\n \"Optional[str]\",\n \"Set[int]\",\n \"Set[int]\",\n \"UserMessage\",\n \"UserProfile\",\n \"Message\",\n \"UserProfile\",\n \"Set[int]\",\n \"int\",\n \"int\",\n \"datetime.datetime\",\n \"Dict[int, int]\",\n \"datetime.datetime\",\n \"Stream\",\n \"str\",\n \"str\",\n \"str\",\n \"str\",\n \"Realm\",\n \"UserProfile\",\n \"UserProfile\",\n \"Realm\",\n \"int\",\n \"str\",\n \"Set[int]\",\n \"Dict[int, List[str]]\",\n \"Set[int]\",\n \"UserProfile\",\n \"PreregistrationUser\",\n \"UserProfile\",\n \"str\",\n \"Realm\",\n \"str\",\n \"UserProfile\",\n \"str\",\n \"str\",\n \"List[Tuple[str, str]]\",\n \"bool\",\n \"Iterable[Realm]\",\n \"int\",\n \"Realm\",\n \"int\",\n \"UserProfile\",\n \"SizedTextIterable\",\n \"Iterable[Stream]\",\n \"UserProfile\",\n \"UserProfile\",\n \"PreregistrationUser\",\n \"PreregistrationUser\",\n \"Realm\",\n \"Realm\",\n \"str\",\n \"UserProfile\",\n \"File\",\n \"Realm\",\n \"str\",\n \"UserProfile\",\n \"Iterable[str]\",\n \"UserProfile\",\n \"Iterable[str]\",\n \"UserProfile\",\n \"Iterable[str]\",\n \"UserProfile\",\n \"List[str]\",\n \"UserProfile\",\n \"Stream\",\n \"Recipient\",\n \"str\",\n \"UserProfile\",\n \"Stream\",\n \"str\",\n \"UserProfile\",\n \"str\",\n \"Realm\",\n \"Realm\",\n \"str\",\n \"str\",\n \"Realm\",\n \"Sequence[int]\",\n \"Realm\",\n \"str\",\n \"bool\",\n \"RealmDomain\",\n \"bool\",\n \"RealmDomain\",\n \"Realm\",\n \"Realm\",\n \"UserProfile\",\n \"UserProfile\",\n \"str\",\n \"Dict[str, Any]\",\n \"Message\",\n \"int\",\n \"str\",\n \"Message\",\n \"Realm\",\n \"str\",\n \"Realm\",\n \"str\",\n \"int\",\n \"Realm\",\n \"CustomProfileField\",\n \"Realm\",\n \"Realm\",\n \"CustomProfileField\",\n \"str\",\n \"Realm\",\n \"List[int]\",\n \"UserProfile\",\n \"Dict[str, Union[int, str, List[int], None]]\",\n \"UserProfile\",\n \"List[Dict[str, Union[int, str, List[int]]]]\",\n \"UserGroup\",\n \"List[UserProfile]\",\n \"Realm\",\n \"str\",\n \"List[UserProfile]\",\n \"str\",\n \"UserGroup\",\n \"Dict[str, Any]\",\n \"UserGroup\",\n \"str\",\n \"UserGroup\",\n \"str\",\n \"UserProfile\",\n \"int\",\n \"str\",\n \"UserProfile\",\n \"Dict[str, str]\",\n \"str\",\n \"List[Dict[str, Any]]\",\n \"Realm\",\n \"UserProfile\",\n \"str\",\n \"UserGroup\",\n \"List[int]\",\n \"UserGroup\",\n \"List[UserProfile]\",\n \"UserGroup\",\n \"List[UserProfile]\",\n \"Realm\",\n \"int\",\n \"int\",\n \"int\",\n \"UserProfile\"\n]"},"type_annotation_starts":{"kind":"list like","value":[6848,7464,7994,8276,8645,9170,9322,9439,9493,9536,11324,11550,12031,12053,13818,17174,18026,18086,19374,19541,19559,19856,19871,19893,19911,19947,22370,23325,24286,24435,24448,24460,25061,25132,25680,25743,25818,25887,26777,26856,27323,27338,27357,27710,27725,27789,28142,29225,29510,29532,29653,30406,32279,34152,34176,34954,34978,35789,35903,36133,36147,36204,36803,37434,37480,37523,37568,38250,38272,39481,39526,39568,43122,43388,45201,45234,45304,45331,45384,45694,45709,47542,48767,60074,60091,60103,60363,60419,60479,60536,60594,60649,63082,63743,63783,63822,63859,63895,64509,64531,64577,64591,65981,66003,66024,66452,66474,66495,66826,66848,66889,66906,66926,67256,67278,67322,67342,67746,67767,68802,68832,68892,69262,69292,69347,70007,70262,70670,70688,70917,70945,70990,71661,71709,73047,73085,73419,73469,74390,74459,74521,74584,75548,75612,76494,76536,76591,76647,77025,77071,77133,77196,77516,78377,79172,79193,79214,79256,79267,79481,79502,79557,79576,79880,79901,79928,79968,79995,80050,80881,80902,80956,80973,81027,81059,81106,81123,81791,82273,82886,82957,83024,84302,84351,84410,84450,85491,85552,85624,87048,87069,87088,87138,90916,90958,91009,91056,92082,92097,92156,92168,92215,92566,92615,92678,92734,93079,93100,93126,93169,93186,93200,94033,94082,94145,94201,94411,94426,94452,94497,94511,94724,94739,94760,94813,95123,95142,95594,95665,96440,96523,96612,98615,98691,98747,98818,101166,101191,102039,102405,102460,102547,102625,104402,104468,104544,105673,107047,107099,112816,116009,116031,116716,116778,116841,120526,122442,122460,122475,122524,122794,122812,122870,122893,122905,123523,123546,124009,124033,124075,125070,125098,125143,125599,125627,125676,126194,126218,126268,128219,128245,128714,128740,129597,129625,131046,131200,131220,131860,131884,132910,132931,133898,133964,134967,134987,135840,135860,136665,136685,137113,137134,137641,137664,137817,137847,138003,138021,140303,140328,140721,140732,142535,142554,142566,143594,143620,143778,143837,143889,145013,145064,145565,145585,146459,146717,146988,147312,147552,147571,147624,147638,148458,148472,148544,149303,149317,149394,149842,149856,149932,150411,150425,150509,150663,150677,150810,151030,151381,151560,151749,151809,153162,153211,153254,153297,153685,153708,154059,154491,154540,154586,154641,157028,157051,157278,157299,157317,157369,157390,157746,157767,157806,159229,159250,160204,160261,160313,161407,161480,161981,162030,162077,162116,162159,164153,164177,164407,164424,164449,164626,164727,165001,165941,165955,166210,166235,166247,166978,167620,167670,167716,167777,168501,168846,168868,168889,168942,168956,169011,169050,169100,174106,174380,174402,175142,175365,175955,176115,176141,176214,176868,177038,177165,177183,178094,178614,179396,180821,187355,188108,188164,188221,188281,188341,189165,189762,190985,191047,191694,191884,191898,192527,192547,193400,193413,193454,193666,193692,194114,194135,195671,195720,195768,198924,199780,200127,200772,201430,201634,201673,201712,201763,202491,202504,202739,202759,202938,202964,203133,203159,203328,203354,203509,203530,203549,203567,203824,203845,203860,204106,204128,204354,204923,204939,204963,205320,205658,205849,205864,205887,206384,206415,206864,207559,208082,208302,210083,210100,210151,210379,211831,212098,212112,212801,212819,213124,213137,213154,213724,213738,214025,214151,214165,214233,214618,214632,215134,215197,215816,215874,216463,216483,216966,216979,217001,217058,217412,217429,217659,217676,218012,218036,218274,218345,218410,219300,219356,219970,220912,220972,222602,223899,223960,224025,224316,224377,224827,224888,225218,225240,225291,225512,225531],"string":"[\n 6848,\n 7464,\n 7994,\n 8276,\n 8645,\n 9170,\n 9322,\n 9439,\n 9493,\n 9536,\n 11324,\n 11550,\n 12031,\n 12053,\n 13818,\n 17174,\n 18026,\n 18086,\n 19374,\n 19541,\n 19559,\n 19856,\n 19871,\n 19893,\n 19911,\n 19947,\n 22370,\n 23325,\n 24286,\n 24435,\n 24448,\n 24460,\n 25061,\n 25132,\n 25680,\n 25743,\n 25818,\n 25887,\n 26777,\n 26856,\n 27323,\n 27338,\n 27357,\n 27710,\n 27725,\n 27789,\n 28142,\n 29225,\n 29510,\n 29532,\n 29653,\n 30406,\n 32279,\n 34152,\n 34176,\n 34954,\n 34978,\n 35789,\n 35903,\n 36133,\n 36147,\n 36204,\n 36803,\n 37434,\n 37480,\n 37523,\n 37568,\n 38250,\n 38272,\n 39481,\n 39526,\n 39568,\n 43122,\n 43388,\n 45201,\n 45234,\n 45304,\n 45331,\n 45384,\n 45694,\n 45709,\n 47542,\n 48767,\n 60074,\n 60091,\n 60103,\n 60363,\n 60419,\n 60479,\n 60536,\n 60594,\n 60649,\n 63082,\n 63743,\n 63783,\n 63822,\n 63859,\n 63895,\n 64509,\n 64531,\n 64577,\n 64591,\n 65981,\n 66003,\n 66024,\n 66452,\n 66474,\n 66495,\n 66826,\n 66848,\n 66889,\n 66906,\n 66926,\n 67256,\n 67278,\n 67322,\n 67342,\n 67746,\n 67767,\n 68802,\n 68832,\n 68892,\n 69262,\n 69292,\n 69347,\n 70007,\n 70262,\n 70670,\n 70688,\n 70917,\n 70945,\n 70990,\n 71661,\n 71709,\n 73047,\n 73085,\n 73419,\n 73469,\n 74390,\n 74459,\n 74521,\n 74584,\n 75548,\n 75612,\n 76494,\n 76536,\n 76591,\n 76647,\n 77025,\n 77071,\n 77133,\n 77196,\n 77516,\n 78377,\n 79172,\n 79193,\n 79214,\n 79256,\n 79267,\n 79481,\n 79502,\n 79557,\n 79576,\n 79880,\n 79901,\n 79928,\n 79968,\n 79995,\n 80050,\n 80881,\n 80902,\n 80956,\n 80973,\n 81027,\n 81059,\n 81106,\n 81123,\n 81791,\n 82273,\n 82886,\n 82957,\n 83024,\n 84302,\n 84351,\n 84410,\n 84450,\n 85491,\n 85552,\n 85624,\n 87048,\n 87069,\n 87088,\n 87138,\n 90916,\n 90958,\n 91009,\n 91056,\n 92082,\n 92097,\n 92156,\n 92168,\n 92215,\n 92566,\n 92615,\n 92678,\n 92734,\n 93079,\n 93100,\n 93126,\n 93169,\n 93186,\n 93200,\n 94033,\n 94082,\n 94145,\n 94201,\n 94411,\n 94426,\n 94452,\n 94497,\n 94511,\n 94724,\n 94739,\n 94760,\n 94813,\n 95123,\n 95142,\n 95594,\n 95665,\n 96440,\n 96523,\n 96612,\n 98615,\n 98691,\n 98747,\n 98818,\n 101166,\n 101191,\n 102039,\n 102405,\n 102460,\n 102547,\n 102625,\n 104402,\n 104468,\n 104544,\n 105673,\n 107047,\n 107099,\n 112816,\n 116009,\n 116031,\n 116716,\n 116778,\n 116841,\n 120526,\n 122442,\n 122460,\n 122475,\n 122524,\n 122794,\n 122812,\n 122870,\n 122893,\n 122905,\n 123523,\n 123546,\n 124009,\n 124033,\n 124075,\n 125070,\n 125098,\n 125143,\n 125599,\n 125627,\n 125676,\n 126194,\n 126218,\n 126268,\n 128219,\n 128245,\n 128714,\n 128740,\n 129597,\n 129625,\n 131046,\n 131200,\n 131220,\n 131860,\n 131884,\n 132910,\n 132931,\n 133898,\n 133964,\n 134967,\n 134987,\n 135840,\n 135860,\n 136665,\n 136685,\n 137113,\n 137134,\n 137641,\n 137664,\n 137817,\n 137847,\n 138003,\n 138021,\n 140303,\n 140328,\n 140721,\n 140732,\n 142535,\n 142554,\n 142566,\n 143594,\n 143620,\n 143778,\n 143837,\n 143889,\n 145013,\n 145064,\n 145565,\n 145585,\n 146459,\n 146717,\n 146988,\n 147312,\n 147552,\n 147571,\n 147624,\n 147638,\n 148458,\n 148472,\n 148544,\n 149303,\n 149317,\n 149394,\n 149842,\n 149856,\n 149932,\n 150411,\n 150425,\n 150509,\n 150663,\n 150677,\n 150810,\n 151030,\n 151381,\n 151560,\n 151749,\n 151809,\n 153162,\n 153211,\n 153254,\n 153297,\n 153685,\n 153708,\n 154059,\n 154491,\n 154540,\n 154586,\n 154641,\n 157028,\n 157051,\n 157278,\n 157299,\n 157317,\n 157369,\n 157390,\n 157746,\n 157767,\n 157806,\n 159229,\n 159250,\n 160204,\n 160261,\n 160313,\n 161407,\n 161480,\n 161981,\n 162030,\n 162077,\n 162116,\n 162159,\n 164153,\n 164177,\n 164407,\n 164424,\n 164449,\n 164626,\n 164727,\n 165001,\n 165941,\n 165955,\n 166210,\n 166235,\n 166247,\n 166978,\n 167620,\n 167670,\n 167716,\n 167777,\n 168501,\n 168846,\n 168868,\n 168889,\n 168942,\n 168956,\n 169011,\n 169050,\n 169100,\n 174106,\n 174380,\n 174402,\n 175142,\n 175365,\n 175955,\n 176115,\n 176141,\n 176214,\n 176868,\n 177038,\n 177165,\n 177183,\n 178094,\n 178614,\n 179396,\n 180821,\n 187355,\n 188108,\n 188164,\n 188221,\n 188281,\n 188341,\n 189165,\n 189762,\n 190985,\n 191047,\n 191694,\n 191884,\n 191898,\n 192527,\n 192547,\n 193400,\n 193413,\n 193454,\n 193666,\n 193692,\n 194114,\n 194135,\n 195671,\n 195720,\n 195768,\n 198924,\n 199780,\n 200127,\n 200772,\n 201430,\n 201634,\n 201673,\n 201712,\n 201763,\n 202491,\n 202504,\n 202739,\n 202759,\n 202938,\n 202964,\n 203133,\n 203159,\n 203328,\n 203354,\n 203509,\n 203530,\n 203549,\n 203567,\n 203824,\n 203845,\n 203860,\n 204106,\n 204128,\n 204354,\n 204923,\n 204939,\n 204963,\n 205320,\n 205658,\n 205849,\n 205864,\n 205887,\n 206384,\n 206415,\n 206864,\n 207559,\n 208082,\n 208302,\n 210083,\n 210100,\n 210151,\n 210379,\n 211831,\n 212098,\n 212112,\n 212801,\n 212819,\n 213124,\n 213137,\n 213154,\n 213724,\n 213738,\n 214025,\n 214151,\n 214165,\n 214233,\n 214618,\n 214632,\n 215134,\n 215197,\n 215816,\n 215874,\n 216463,\n 216483,\n 216966,\n 216979,\n 217001,\n 217058,\n 217412,\n 217429,\n 217659,\n 217676,\n 218012,\n 218036,\n 218274,\n 218345,\n 218410,\n 219300,\n 219356,\n 219970,\n 220912,\n 220972,\n 222602,\n 223899,\n 223960,\n 224025,\n 224316,\n 224377,\n 224827,\n 224888,\n 225218,\n 225240,\n 225291,\n 225512,\n 225531\n]"},"type_annotation_ends":{"kind":"list like","value":[6872,7470,7997,8282,8656,9175,9333,9450,9496,9547,11335,11561,12042,12069,13829,17185,18037,18102,19385,19546,19584,19859,19884,19898,19914,19950,22381,23336,24291,24440,24451,24463,25066,25147,25685,25747,25821,25891,26782,26859,27328,27344,27360,27715,27731,27792,28147,29230,29515,29535,29658,30417,32285,34163,34179,34965,34981,35801,35915,36138,36150,36224,36827,37441,37483,37531,37573,38259,38275,39490,39529,39595,43154,43402,45212,45255,45312,45339,45387,45697,45712,47569,48811,60077,60094,60106,60370,60427,60487,60544,60602,60657,63103,63748,63786,63825,63862,63898,64520,64538,64585,64594,65992,66010,66027,66463,66481,66498,66837,66855,66892,66909,66929,67267,67285,67325,67345,67751,67781,68813,68845,68895,69273,69305,69350,70013,70268,70676,70697,70922,70949,71004,71666,71712,73052,73088,73424,73492,74398,74463,74542,74595,75565,75623,76507,76540,76612,76658,77042,77075,77154,77207,77523,78402,79183,79199,79217,79259,79270,79492,79508,79568,79579,79891,79907,79931,79981,80008,80053,80892,80908,80959,80986,81040,81062,81109,81140,81794,82276,82897,82962,83027,84313,84367,84413,84455,85502,85558,85645,87059,87075,87097,87141,90921,90969,91018,91059,92087,92108,92159,92171,92218,92571,92626,92689,92737,93084,93103,93129,93172,93189,93203,94038,94093,94156,94204,94416,94437,94455,94500,94514,94729,94750,94769,94816,95134,95164,95615,95671,96461,96540,96630,98642,98702,98765,98836,101172,101212,102045,102416,102497,102576,102639,104408,104481,104557,105689,107063,107120,112822,116020,116047,116737,116794,116847,120532,122445,122463,122478,122527,122805,122824,122876,122896,122908,123534,123549,124020,124036,124096,125081,125101,125154,125610,125630,125687,126205,126229,126279,128230,128248,128725,128751,129608,129628,131057,131205,131223,131871,131887,132921,132947,133909,133980,134978,134991,135851,135864,136676,136689,137119,137138,137647,137668,137823,137851,138009,138024,140309,140331,140724,140735,142546,142557,142570,143605,143624,143789,143840,143905,145022,145069,145570,145610,146464,146722,146994,147318,147557,147574,147627,147650,148463,148490,148556,149308,149335,149406,149847,149874,149935,150416,150443,150512,150668,150695,150813,151041,151393,151584,151760,151826,153173,153217,153257,153314,153696,153720,154065,154502,154546,154603,154644,157039,157068,157289,157305,157334,157372,157394,157757,157773,157809,159240,159256,160215,160267,160319,161418,161489,161992,162036,162080,162119,162168,164164,164180,164410,164427,164452,164629,164730,165004,165948,165976,166221,166239,166250,166991,167631,167677,167729,167790,168512,168857,168875,168902,168945,168969,169024,169058,169108,174117,174391,174409,175153,175373,175958,176118,176158,176228,176885,177044,177168,177186,178097,178617,179401,180832,187366,188113,188167,188224,188289,188361,189173,189773,191004,191058,191697,191889,191901,192538,192550,193403,193434,193458,193681,193695,194119,194138,195682,195737,195784,198935,199791,200146,200791,201435,201639,201676,201723,201767,202496,202507,202750,202772,202949,202977,203144,203172,203339,203363,203520,203536,203558,203570,203835,203851,203863,204117,204131,204359,204928,204942,204966,205325,205671,205854,205867,205891,206395,206419,206875,207564,208087,208313,210094,210103,210165,210386,211834,212101,212119,212806,212822,213129,213140,213157,213729,213756,214030,214156,214183,214236,214623,214641,215145,215240,215827,215917,216472,216500,216971,216982,217018,217061,217421,217443,217668,217679,218021,218039,218285,218348,218413,219311,219370,219973,220932,220977,222613,223902,223969,224034,224325,224394,224836,224905,225223,225243,225294,225515,225542],"string":"[\n 6872,\n 7470,\n 7997,\n 8282,\n 8656,\n 9175,\n 9333,\n 9450,\n 9496,\n 9547,\n 11335,\n 11561,\n 12042,\n 12069,\n 13829,\n 17185,\n 18037,\n 18102,\n 19385,\n 19546,\n 19584,\n 19859,\n 19884,\n 19898,\n 19914,\n 19950,\n 22381,\n 23336,\n 24291,\n 24440,\n 24451,\n 24463,\n 25066,\n 25147,\n 25685,\n 25747,\n 25821,\n 25891,\n 26782,\n 26859,\n 27328,\n 27344,\n 27360,\n 27715,\n 27731,\n 27792,\n 28147,\n 29230,\n 29515,\n 29535,\n 29658,\n 30417,\n 32285,\n 34163,\n 34179,\n 34965,\n 34981,\n 35801,\n 35915,\n 36138,\n 36150,\n 36224,\n 36827,\n 37441,\n 37483,\n 37531,\n 37573,\n 38259,\n 38275,\n 39490,\n 39529,\n 39595,\n 43154,\n 43402,\n 45212,\n 45255,\n 45312,\n 45339,\n 45387,\n 45697,\n 45712,\n 47569,\n 48811,\n 60077,\n 60094,\n 60106,\n 60370,\n 60427,\n 60487,\n 60544,\n 60602,\n 60657,\n 63103,\n 63748,\n 63786,\n 63825,\n 63862,\n 63898,\n 64520,\n 64538,\n 64585,\n 64594,\n 65992,\n 66010,\n 66027,\n 66463,\n 66481,\n 66498,\n 66837,\n 66855,\n 66892,\n 66909,\n 66929,\n 67267,\n 67285,\n 67325,\n 67345,\n 67751,\n 67781,\n 68813,\n 68845,\n 68895,\n 69273,\n 69305,\n 69350,\n 70013,\n 70268,\n 70676,\n 70697,\n 70922,\n 70949,\n 71004,\n 71666,\n 71712,\n 73052,\n 73088,\n 73424,\n 73492,\n 74398,\n 74463,\n 74542,\n 74595,\n 75565,\n 75623,\n 76507,\n 76540,\n 76612,\n 76658,\n 77042,\n 77075,\n 77154,\n 77207,\n 77523,\n 78402,\n 79183,\n 79199,\n 79217,\n 79259,\n 79270,\n 79492,\n 79508,\n 79568,\n 79579,\n 79891,\n 79907,\n 79931,\n 79981,\n 80008,\n 80053,\n 80892,\n 80908,\n 80959,\n 80986,\n 81040,\n 81062,\n 81109,\n 81140,\n 81794,\n 82276,\n 82897,\n 82962,\n 83027,\n 84313,\n 84367,\n 84413,\n 84455,\n 85502,\n 85558,\n 85645,\n 87059,\n 87075,\n 87097,\n 87141,\n 90921,\n 90969,\n 91018,\n 91059,\n 92087,\n 92108,\n 92159,\n 92171,\n 92218,\n 92571,\n 92626,\n 92689,\n 92737,\n 93084,\n 93103,\n 93129,\n 93172,\n 93189,\n 93203,\n 94038,\n 94093,\n 94156,\n 94204,\n 94416,\n 94437,\n 94455,\n 94500,\n 94514,\n 94729,\n 94750,\n 94769,\n 94816,\n 95134,\n 95164,\n 95615,\n 95671,\n 96461,\n 96540,\n 96630,\n 98642,\n 98702,\n 98765,\n 98836,\n 101172,\n 101212,\n 102045,\n 102416,\n 102497,\n 102576,\n 102639,\n 104408,\n 104481,\n 104557,\n 105689,\n 107063,\n 107120,\n 112822,\n 116020,\n 116047,\n 116737,\n 116794,\n 116847,\n 120532,\n 122445,\n 122463,\n 122478,\n 122527,\n 122805,\n 122824,\n 122876,\n 122896,\n 122908,\n 123534,\n 123549,\n 124020,\n 124036,\n 124096,\n 125081,\n 125101,\n 125154,\n 125610,\n 125630,\n 125687,\n 126205,\n 126229,\n 126279,\n 128230,\n 128248,\n 128725,\n 128751,\n 129608,\n 129628,\n 131057,\n 131205,\n 131223,\n 131871,\n 131887,\n 132921,\n 132947,\n 133909,\n 133980,\n 134978,\n 134991,\n 135851,\n 135864,\n 136676,\n 136689,\n 137119,\n 137138,\n 137647,\n 137668,\n 137823,\n 137851,\n 138009,\n 138024,\n 140309,\n 140331,\n 140724,\n 140735,\n 142546,\n 142557,\n 142570,\n 143605,\n 143624,\n 143789,\n 143840,\n 143905,\n 145022,\n 145069,\n 145570,\n 145610,\n 146464,\n 146722,\n 146994,\n 147318,\n 147557,\n 147574,\n 147627,\n 147650,\n 148463,\n 148490,\n 148556,\n 149308,\n 149335,\n 149406,\n 149847,\n 149874,\n 149935,\n 150416,\n 150443,\n 150512,\n 150668,\n 150695,\n 150813,\n 151041,\n 151393,\n 151584,\n 151760,\n 151826,\n 153173,\n 153217,\n 153257,\n 153314,\n 153696,\n 153720,\n 154065,\n 154502,\n 154546,\n 154603,\n 154644,\n 157039,\n 157068,\n 157289,\n 157305,\n 157334,\n 157372,\n 157394,\n 157757,\n 157773,\n 157809,\n 159240,\n 159256,\n 160215,\n 160267,\n 160319,\n 161418,\n 161489,\n 161992,\n 162036,\n 162080,\n 162119,\n 162168,\n 164164,\n 164180,\n 164410,\n 164427,\n 164452,\n 164629,\n 164730,\n 165004,\n 165948,\n 165976,\n 166221,\n 166239,\n 166250,\n 166991,\n 167631,\n 167677,\n 167729,\n 167790,\n 168512,\n 168857,\n 168875,\n 168902,\n 168945,\n 168969,\n 169024,\n 169058,\n 169108,\n 174117,\n 174391,\n 174409,\n 175153,\n 175373,\n 175958,\n 176118,\n 176158,\n 176228,\n 176885,\n 177044,\n 177168,\n 177186,\n 178097,\n 178617,\n 179401,\n 180832,\n 187366,\n 188113,\n 188167,\n 188224,\n 188289,\n 188361,\n 189173,\n 189773,\n 191004,\n 191058,\n 191697,\n 191889,\n 191901,\n 192538,\n 192550,\n 193403,\n 193434,\n 193458,\n 193681,\n 193695,\n 194119,\n 194138,\n 195682,\n 195737,\n 195784,\n 198935,\n 199791,\n 200146,\n 200791,\n 201435,\n 201639,\n 201676,\n 201723,\n 201767,\n 202496,\n 202507,\n 202750,\n 202772,\n 202949,\n 202977,\n 203144,\n 203172,\n 203339,\n 203363,\n 203520,\n 203536,\n 203558,\n 203570,\n 203835,\n 203851,\n 203863,\n 204117,\n 204131,\n 204359,\n 204928,\n 204942,\n 204966,\n 205325,\n 205671,\n 205854,\n 205867,\n 205891,\n 206395,\n 206419,\n 206875,\n 207564,\n 208087,\n 208313,\n 210094,\n 210103,\n 210165,\n 210386,\n 211834,\n 212101,\n 212119,\n 212806,\n 212822,\n 213129,\n 213140,\n 213157,\n 213729,\n 213756,\n 214030,\n 214156,\n 214183,\n 214236,\n 214623,\n 214641,\n 215145,\n 215240,\n 215827,\n 215917,\n 216472,\n 216500,\n 216971,\n 216982,\n 217018,\n 217061,\n 217421,\n 217443,\n 217668,\n 217679,\n 218021,\n 218039,\n 218285,\n 218348,\n 218413,\n 219311,\n 219370,\n 219973,\n 220932,\n 220977,\n 222613,\n 223902,\n 223969,\n 224034,\n 224325,\n 224394,\n 224836,\n 224905,\n 225223,\n 225243,\n 225294,\n 225515,\n 225542\n]"}}},{"rowIdx":1342,"cells":{"zip":{"kind":"string","value":"archives/18-2-SKKU-OSS_2018-2-OSS-L5.zip"},"filename":{"kind":"string","value":"zerver/lib/addressee.py"},"contents":{"kind":"string","value":"\nfrom typing import Iterable, List, Optional, Sequence\n\nfrom django.core.exceptions import ValidationError\nfrom django.utils.translation import ugettext as _\nfrom zerver.lib.exceptions import JsonableError\nfrom zerver.lib.request import JsonableError\nfrom zerver.models import (\n Realm,\n UserProfile,\n get_user_including_cross_realm,\n)\n\ndef raw_pm_with_emails(email_str: str, my_email: str) -> List[str]:\n frags = email_str.split(',')\n emails = [s.strip().lower() for s in frags]\n emails = [email for email in emails if email]\n\n if len(emails) > 1:\n emails = [email for email in emails if email != my_email.lower()]\n\n return emails\n\ndef user_profiles_from_unvalidated_emails(emails: Iterable[str], realm: Realm) -> List[UserProfile]:\n user_profiles = [] # type: List[UserProfile]\n for email in emails:\n try:\n user_profile = get_user_including_cross_realm(email, realm)\n except UserProfile.DoesNotExist:\n raise ValidationError(_(\"Invalid email '%s'\") % (email,))\n user_profiles.append(user_profile)\n return user_profiles\n\ndef get_user_profiles(emails: Iterable[str], realm: Realm) -> List[UserProfile]:\n try:\n return user_profiles_from_unvalidated_emails(emails, realm)\n except ValidationError as e:\n assert isinstance(e.messages[0], str)\n raise JsonableError(e.messages[0])\n\nclass Addressee:\n # This is really just a holder for vars that tended to be passed\n # around in a non-type-safe way before this class was introduced.\n #\n # It also avoids some nonsense where you have to think about whether\n # topic should be None or '' for a PM, or you have to make an array\n # of one stream.\n #\n # Eventually we can use this to cache Stream and UserProfile objects\n # in memory.\n #\n # This should be treated as an immutable class.\n def __init__(self, msg_type: str,\n user_profiles: Optional[Sequence[UserProfile]]=None,\n stream_name: Optional[str]=None,\n topic: Optional[str]=None) -> None:\n assert(msg_type in ['stream', 'private'])\n self._msg_type = msg_type\n self._user_profiles = user_profiles\n self._stream_name = stream_name\n self._topic = topic\n\n def is_stream(self) -> bool:\n return self._msg_type == 'stream'\n\n def is_private(self) -> bool:\n return self._msg_type == 'private'\n\n def user_profiles(self) -> List[UserProfile]:\n assert(self.is_private())\n return self._user_profiles # type: ignore # assertion protects us\n\n def stream_name(self) -> str:\n assert(self.is_stream())\n assert(self._stream_name is not None)\n return self._stream_name\n\n def topic(self) -> str:\n assert(self.is_stream())\n assert(self._topic is not None)\n return self._topic\n\n @staticmethod\n def legacy_build(sender: UserProfile,\n message_type_name: str,\n message_to: Sequence[str],\n topic_name: str,\n realm: Optional[Realm]=None) -> 'Addressee':\n\n # For legacy reason message_to used to be either a list of\n # emails or a list of streams. We haven't fixed all of our\n # callers yet.\n if realm is None:\n realm = sender.realm\n\n if message_type_name == 'stream':\n if len(message_to) > 1:\n raise JsonableError(_(\"Cannot send to multiple streams\"))\n\n if message_to:\n stream_name = message_to[0]\n else:\n # This is a hack to deal with the fact that we still support\n # default streams (and the None will be converted later in the\n # callpath).\n if sender.default_sending_stream:\n # Use the users default stream\n stream_name = sender.default_sending_stream.name\n else:\n raise JsonableError(_('Missing stream'))\n\n return Addressee.for_stream(stream_name, topic_name)\n elif message_type_name == 'private':\n emails = message_to\n return Addressee.for_private(emails, realm)\n else:\n raise JsonableError(_(\"Invalid message type\"))\n\n @staticmethod\n def for_stream(stream_name: str, topic: str) -> 'Addressee':\n if topic is None:\n raise JsonableError(_(\"Missing topic\"))\n topic = topic.strip()\n if topic == \"\":\n raise JsonableError(_(\"Topic can't be empty\"))\n return Addressee(\n msg_type='stream',\n stream_name=stream_name,\n topic=topic,\n )\n\n @staticmethod\n def for_private(emails: Sequence[str], realm: Realm) -> 'Addressee':\n user_profiles = get_user_profiles(emails, realm)\n return Addressee(\n msg_type='private',\n user_profiles=user_profiles,\n )\n\n @staticmethod\n def for_user_profile(user_profile: UserProfile) -> 'Addressee':\n user_profiles = [user_profile]\n return Addressee(\n msg_type='private',\n user_profiles=user_profiles,\n )\n"},"type_annotations":{"kind":"list like","value":["str","str","Iterable[str]","Realm","Iterable[str]","Realm","str","UserProfile","str","Sequence[str]","str","str","str","Sequence[str]","Realm","UserProfile"],"string":"[\n \"str\",\n \"str\",\n \"Iterable[str]\",\n \"Realm\",\n \"Iterable[str]\",\n \"Realm\",\n \"str\",\n \"UserProfile\",\n \"str\",\n \"Sequence[str]\",\n \"str\",\n \"str\",\n \"str\",\n \"Sequence[str]\",\n \"Realm\",\n \"UserProfile\"\n]"},"type_annotation_starts":{"kind":"list like","value":[380,395,714,736,1135,1157,1901,2913,2966,3004,3052,4345,4357,4745,4767,5014],"string":"[\n 380,\n 395,\n 714,\n 736,\n 1135,\n 1157,\n 1901,\n 2913,\n 2966,\n 3004,\n 3052,\n 4345,\n 4357,\n 4745,\n 4767,\n 5014\n]"},"type_annotation_ends":{"kind":"list like","value":[383,398,727,741,1148,1162,1904,2924,2969,3017,3055,4348,4360,4758,4772,5025],"string":"[\n 383,\n 398,\n 727,\n 741,\n 1148,\n 1162,\n 1904,\n 2924,\n 2969,\n 3017,\n 3055,\n 4348,\n 4360,\n 4758,\n 4772,\n 5025\n]"}}},{"rowIdx":1343,"cells":{"zip":{"kind":"string","value":"archives/18-2-SKKU-OSS_2018-2-OSS-L5.zip"},"filename":{"kind":"string","value":"zerver/lib/alert_words.py"},"contents":{"kind":"string","value":"\nfrom django.db.models import Q\nfrom zerver.models import UserProfile, Realm\nfrom zerver.lib.cache import cache_with_key, realm_alert_words_cache_key\nimport ujson\nfrom typing import Dict, Iterable, List\n\n@cache_with_key(realm_alert_words_cache_key, timeout=3600*24)\ndef alert_words_in_realm(realm: Realm) -> Dict[int, List[str]]:\n users_query = UserProfile.objects.filter(realm=realm, is_active=True)\n alert_word_data = users_query.filter(~Q(alert_words=ujson.dumps([]))).values('id', 'alert_words')\n all_user_words = dict((elt['id'], ujson.loads(elt['alert_words'])) for elt in alert_word_data)\n user_ids_with_words = dict((user_id, w) for (user_id, w) in all_user_words.items() if len(w))\n return user_ids_with_words\n\ndef user_alert_words(user_profile: UserProfile) -> List[str]:\n return ujson.loads(user_profile.alert_words)\n\ndef add_user_alert_words(user_profile: UserProfile, alert_words: Iterable[str]) -> List[str]:\n words = user_alert_words(user_profile)\n\n new_words = [w for w in alert_words if w not in words]\n words.extend(new_words)\n\n set_user_alert_words(user_profile, words)\n\n return words\n\ndef remove_user_alert_words(user_profile: UserProfile, alert_words: Iterable[str]) -> List[str]:\n words = user_alert_words(user_profile)\n words = [w for w in words if w not in alert_words]\n\n set_user_alert_words(user_profile, words)\n\n return words\n\ndef set_user_alert_words(user_profile: UserProfile, alert_words: List[str]) -> None:\n user_profile.alert_words = ujson.dumps(alert_words)\n user_profile.save(update_fields=['alert_words'])\n"},"type_annotations":{"kind":"list like","value":["Realm","UserProfile","UserProfile","Iterable[str]","UserProfile","Iterable[str]","UserProfile","List[str]"],"string":"[\n \"Realm\",\n \"UserProfile\",\n \"UserProfile\",\n \"Iterable[str]\",\n \"UserProfile\",\n \"Iterable[str]\",\n \"UserProfile\",\n \"List[str]\"\n]"},"type_annotation_starts":{"kind":"list like","value":[298,770,886,912,1180,1206,1438,1464],"string":"[\n 298,\n 770,\n 886,\n 912,\n 1180,\n 1206,\n 1438,\n 1464\n]"},"type_annotation_ends":{"kind":"list like","value":[303,781,897,925,1191,1219,1449,1473],"string":"[\n 303,\n 781,\n 897,\n 925,\n 1191,\n 1219,\n 1449,\n 1473\n]"}}},{"rowIdx":1344,"cells":{"zip":{"kind":"string","value":"archives/18-2-SKKU-OSS_2018-2-OSS-L5.zip"},"filename":{"kind":"string","value":"zerver/lib/api_test_helpers.py"},"contents":{"kind":"string","value":"from typing import Dict, Any, Optional, Iterable\nfrom io import StringIO\n\nimport json\nimport os\n\nfrom zerver.lib import mdiff\nfrom zerver.lib.openapi import validate_against_openapi_schema\n\nif False:\n from zulip import Client\n\nZULIP_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))\nFIXTURE_PATH = os.path.join(ZULIP_DIR, 'templates', 'zerver', 'api', 'fixtures.json')\n\ndef load_api_fixtures():\n # type: () -> Dict[str, Any]\n with open(FIXTURE_PATH, 'r') as fp:\n json_dict = json.loads(fp.read())\n return json_dict\n\nFIXTURES = load_api_fixtures()\n\ndef add_subscriptions(client):\n # type: (Client) -> None\n\n # {code_example|start}\n # Subscribe to the stream \"new stream\"\n result = client.add_subscriptions(\n streams=[\n {\n 'name': 'new stream',\n 'description': 'New stream for testing'\n }\n ]\n )\n # {code_example|end}\n\n validate_against_openapi_schema(result, '/users/me/subscriptions', 'post',\n '200_without_principals')\n\n # {code_example|start}\n # To subscribe another user to a stream, you may pass in\n # the `principals` argument, like so:\n result = client.add_subscriptions(\n streams=[\n {'name': 'new stream', 'description': 'New stream for testing'}\n ],\n principals=['newbie@zulip.com']\n )\n # {code_example|end}\n assert result['result'] == 'success'\n assert 'newbie@zulip.com' in result['subscribed']\n\ndef test_add_subscriptions_already_subscribed(client):\n # type: (Client) -> None\n result = client.add_subscriptions(\n streams=[\n {'name': 'new stream', 'description': 'New stream for testing'}\n ],\n principals=['newbie@zulip.com']\n )\n\n validate_against_openapi_schema(result, '/users/me/subscriptions', 'post',\n '200_already_subscribed')\n\ndef test_authorization_errors_fatal(client, nonadmin_client):\n # type: (Client, Client) -> None\n client.add_subscriptions(\n streams=[\n {'name': 'private_stream'}\n ],\n )\n\n stream_id = client.get_stream_id('private_stream')['stream_id']\n client.call_endpoint(\n 'streams/{}'.format(stream_id),\n method='PATCH',\n request={'is_private': True}\n )\n\n result = nonadmin_client.add_subscriptions(\n streams=[\n {'name': 'private_stream'}\n ],\n authorization_errors_fatal=False,\n )\n\n validate_against_openapi_schema(result, '/users/me/subscriptions', 'post',\n '400_unauthorized_errors_fatal_false')\n\n result = nonadmin_client.add_subscriptions(\n streams=[\n {'name': 'private_stream'}\n ],\n authorization_errors_fatal=True,\n )\n\n validate_against_openapi_schema(result, '/users/me/subscriptions', 'post',\n '400_unauthorized_errors_fatal_true')\n\ndef get_user_presence(client):\n # type: (Client) -> None\n\n # {code_example|start}\n # Get presence information for \"iago@zulip.com\"\n result = client.get_user_presence('iago@zulip.com')\n # {code_example|end}\n\n validate_against_openapi_schema(result, '/users/{email}/presence', 'get', '200')\n\ndef create_user(client):\n # type: (Client) -> None\n\n # {code_example|start}\n # Create a user\n request = {\n 'email': 'newbie@zulip.com',\n 'password': 'temp',\n 'full_name': 'New User',\n 'short_name': 'newbie'\n }\n result = client.create_user(request)\n # {code_example|end}\n\n validate_against_openapi_schema(result, '/users', 'post', '200')\n\n # Test \"Email already used error\"\n result = client.create_user(request)\n\n validate_against_openapi_schema(result, '/users', 'post', '400')\n\ndef get_members(client):\n # type: (Client) -> None\n\n # {code_example|start}\n # Get all users in the realm\n result = client.get_members()\n # {code_example|end}\n\n validate_against_openapi_schema(result, '/users', 'get', '200')\n\n members = [m for m in result['members'] if m['email'] == 'newbie@zulip.com']\n assert len(members) == 1\n newbie = members[0]\n assert not newbie['is_admin']\n assert newbie['full_name'] == 'New User'\n\n # {code_example|start}\n # You may pass the `client_gravatar` query parameter as follows:\n result = client.get_members({'client_gravatar': True})\n # {code_example|end}\n\n validate_against_openapi_schema(result, '/users', 'get', '200')\n assert result['members'][0]['avatar_url'] is None\n\ndef get_realm_filters(client):\n # type: (Client) -> None\n\n # {code_example|start}\n # Fetch all the filters in this organization\n result = client.get_realm_filters()\n # {code_example|end}\n\n validate_against_openapi_schema(result, '/realm/filters', 'get', '200')\n\ndef add_realm_filter(client):\n # type: (Client) -> None\n\n # {code_example|start}\n # Add a filter to automatically linkify # to the corresponding\n # issue in Zulip's server repo\n result = client.add_realm_filter('#(?P[0-9]+)',\n 'https://github.com/zulip/zulip/issues/%(id)s')\n # {code_example|end}\n\n validate_against_openapi_schema(result, '/realm/filters', 'post', '200')\n\ndef remove_realm_filter(client):\n # type: (Client) -> None\n\n # {code_example|start}\n # Remove the organization filter with ID 42\n result = client.remove_realm_filter(42)\n # {code_example|end}\n\n validate_against_openapi_schema(result, '/realm/filters/', 'delete', '200')\n\ndef get_profile(client):\n # type: (Client) -> None\n\n # {code_example|start}\n # Get the profile of the user/bot that requests this endpoint,\n # which is `client` in this case:\n result = client.get_profile()\n # {code_example|end}\n\n fixture = FIXTURES['get-profile']\n check_if_equal = ['email', 'full_name', 'msg', 'result', 'short_name']\n check_if_exists = ['client_id', 'is_admin', 'is_bot', 'max_message_id',\n 'pointer', 'user_id']\n test_against_fixture(result, fixture, check_if_equal=check_if_equal,\n check_if_exists=check_if_exists)\n\ndef get_stream_id(client):\n # type: (Client) -> None\n\n # {code_example|start}\n # Get the ID of a given stream\n stream_name = 'new stream'\n result = client.get_stream_id(stream_name)\n # {code_example|end}\n\n validate_against_openapi_schema(result, '/get_stream_id', 'get', '200')\n\ndef get_streams(client):\n # type: (Client) -> None\n\n # {code_example|start}\n # Get all streams that the user has access to\n result = client.get_streams()\n # {code_example|end}\n\n validate_against_openapi_schema(result, '/streams', 'get', '200')\n streams = [s for s in result['streams'] if s['name'] == 'new stream']\n assert streams[0]['description'] == 'New stream for testing'\n\n # {code_example|start}\n # You may pass in one or more of the query parameters mentioned above\n # as keyword arguments, like so:\n result = client.get_streams(include_public=False)\n # {code_example|end}\n\n validate_against_openapi_schema(result, '/streams', 'get', '200')\n assert len(result['streams']) == 4\n\ndef get_user_groups(client):\n # type: (Client) -> None\n\n # {code_example|start}\n # Get all user groups of the realm\n result = client.get_user_groups()\n # {code_example|end}\n\n validate_against_openapi_schema(result, '/user_groups', 'get', '200')\n user_groups = [u for u in result['user_groups'] if u['name'] == \"hamletcharacters\"]\n assert user_groups[0]['description'] == 'Characters of Hamlet'\n\ndef test_user_not_authorized_error(nonadmin_client):\n # type: (Client) -> None\n result = nonadmin_client.get_streams(include_all_active=True)\n\n fixture = FIXTURES['user-not-authorized-error']\n test_against_fixture(result, fixture)\n\ndef get_subscribers(client):\n # type: (Client) -> None\n\n result = client.get_subscribers(stream='new stream')\n assert result['subscribers'] == ['iago@zulip.com', 'newbie@zulip.com']\n\ndef get_user_agent(client):\n # type: (Client) -> None\n\n result = client.get_user_agent()\n assert result.startswith('ZulipPython/')\n\ndef list_subscriptions(client):\n # type: (Client) -> None\n # {code_example|start}\n # Get all streams that the user is subscribed to\n result = client.list_subscriptions()\n # {code_example|end}\n\n fixture = FIXTURES['get-subscribed-streams']\n test_against_fixture(result, fixture, check_if_equal=['msg', 'result'],\n check_if_exists=['subscriptions'])\n\n streams = [s for s in result['subscriptions'] if s['name'] == 'new stream']\n assert streams[0]['description'] == 'New stream for testing'\n\ndef remove_subscriptions(client):\n # type: (Client) -> None\n\n # {code_example|start}\n # Unsubscribe from the stream \"new stream\"\n result = client.remove_subscriptions(\n ['new stream']\n )\n # {code_example|end}\n\n validate_against_openapi_schema(result, '/users/me/subscriptions',\n 'delete', '200')\n\n # test it was actually removed\n result = client.list_subscriptions()\n assert result['result'] == 'success'\n streams = [s for s in result['subscriptions'] if s['name'] == 'new stream']\n assert len(streams) == 0\n\n # {code_example|start}\n # Unsubscribe another user from the stream \"new stream\"\n result = client.remove_subscriptions(\n ['new stream'],\n principals=['newbie@zulip.com']\n )\n # {code_example|end}\n\n validate_against_openapi_schema(result, '/users/me/subscriptions',\n 'delete', '200')\n\ndef toggle_mute_topic(client):\n # type: (Client) -> None\n\n # Send a test message\n message = {\n 'type': 'stream',\n 'to': 'Denmark',\n 'topic': 'boat party'\n }\n client.call_endpoint(\n url='messages',\n method='POST',\n request=message\n )\n\n # {code_example|start}\n # Mute the topic \"boat party\" in the stream \"Denmark\"\n request = {\n 'stream': 'Denmark',\n 'topic': 'boat party',\n 'op': 'add'\n }\n result = client.mute_topic(request)\n # {code_example|end}\n\n validate_against_openapi_schema(result,\n '/users/me/subscriptions/muted_topics',\n 'patch', '200')\n\n # {code_example|start}\n # Unmute the topic \"boat party\" in the stream \"Denmark\"\n request = {\n 'stream': 'Denmark',\n 'topic': 'boat party',\n 'op': 'remove'\n }\n\n result = client.mute_topic(request)\n # {code_example|end}\n\n validate_against_openapi_schema(result,\n '/users/me/subscriptions/muted_topics',\n 'patch', '200')\n\ndef mark_all_as_read(client):\n # type: (Client) -> None\n\n # {code_example|start}\n # Mark all of the user's unread messages as read\n result = client.mark_all_as_read()\n # {code_example|end}\n\n validate_against_openapi_schema(result, '/mark_all_as_read', 'post', '200')\n\ndef mark_stream_as_read(client):\n # type: (Client) -> None\n\n # {code_example|start}\n # Mark the unread messages in stream with ID \"1\" as read\n result = client.mark_stream_as_read(1)\n # {code_example|end}\n\n validate_against_openapi_schema(result, '/mark_stream_as_read', 'post', '200')\n\ndef mark_topic_as_read(client):\n # type: (Client) -> None\n\n # Grab an existing topic name\n topìc_name = client.get_stream_topics(1)['topics'][0]['name']\n\n # {code_example|start}\n # Mark the unread messages in stream 1's topic \"topic_name\" as read\n result = client.mark_topic_as_read(1, topìc_name)\n # {code_example|end}\n\n validate_against_openapi_schema(result, '/mark_stream_as_read', 'post', '200')\n\ndef update_subscription_settings(client):\n # type: (Client) -> None\n\n # {code_example|start}\n # Update the user's subscription in stream #1 to pin it to the top of the\n # stream list; and in stream #3 to have the hex color \"f00\"\n request = [{\n 'stream_id': 1,\n 'property': 'pin_to_top',\n 'value': True\n }, {\n 'stream_id': 3,\n 'property': 'color',\n 'value': 'f00'\n }]\n result = client.update_subscription_settings(request)\n # {code_example|end}\n\n validate_against_openapi_schema(result,\n '/users/me/subscriptions/properties',\n 'POST', '200')\n\ndef render_message(client):\n # type: (Client) -> None\n\n # {code_example|start}\n # Render a message\n request = {\n 'content': '**foo**'\n }\n result = client.render_message(request)\n # {code_example|end}\n\n validate_against_openapi_schema(result, '/messages/render', 'post', '200')\n\ndef get_messages(client):\n # type: (Client) -> None\n\n # {code_example|start}\n # Get the 3 last messages sent by \"iago@zulip.com\" to the stream \"Verona\"\n request = {\n 'use_first_unread_anchor': True,\n 'num_before': 3,\n 'num_after': 0,\n 'narrow': [{'operator': 'sender', 'operand': 'iago@zulip.com'},\n {'operator': 'stream', 'operand': 'Verona'}],\n 'client_gravatar': True,\n 'apply_markdown': True\n } # type: Dict[str, Any]\n result = client.get_messages(request)\n # {code_example|end}\n\n validate_against_openapi_schema(result, '/messages', 'get', '200')\n assert len(result['messages']) <= request['num_before']\n\ndef get_raw_message(client, message_id):\n # type: (Client, int) -> None\n\n assert int(message_id)\n\n # {code_example|start}\n # Get the raw content of the message with ID \"message_id\"\n result = client.get_raw_message(message_id)\n # {code_example|end}\n\n validate_against_openapi_schema(result, '/messages/{message_id}', 'get',\n '200')\n\ndef send_message(client):\n # type: (Client) -> int\n\n # {code_example|start}\n # Send a stream message\n request = {\n \"type\": \"stream\",\n \"to\": \"Denmark\",\n \"subject\": \"Castle\",\n \"content\": \"I come not, friends, to steal away your hearts.\"\n }\n result = client.send_message(request)\n # {code_example|end}\n\n validate_against_openapi_schema(result, '/messages', 'post', '200')\n\n # test that the message was actually sent\n message_id = result['id']\n url = 'messages/' + str(message_id)\n result = client.call_endpoint(\n url=url,\n method='GET'\n )\n assert result['result'] == 'success'\n assert result['raw_content'] == request['content']\n\n # {code_example|start}\n # Send a private message\n request = {\n \"type\": \"private\",\n \"to\": \"iago@zulip.com\",\n \"content\": \"With mirth and laughter let old wrinkles come.\"\n }\n result = client.send_message(request)\n # {code_example|end}\n\n validate_against_openapi_schema(result, '/messages', 'post', '200')\n\n # test that the message was actually sent\n message_id = result['id']\n url = 'messages/' + str(message_id)\n result = client.call_endpoint(\n url=url,\n method='GET'\n )\n assert result['result'] == 'success'\n assert result['raw_content'] == request['content']\n\n return message_id\n\ndef test_nonexistent_stream_error(client):\n # type: (Client) -> None\n request = {\n \"type\": \"stream\",\n \"to\": \"nonexistent_stream\",\n \"topic\": \"Castle\",\n \"content\": \"I come not, friends, to steal away your hearts.\"\n }\n result = client.send_message(request)\n\n validate_against_openapi_schema(result, '/messages', 'post',\n '400_non_existing_stream')\n\ndef test_private_message_invalid_recipient(client):\n # type: (Client) -> None\n request = {\n \"type\": \"private\",\n \"to\": \"eeshan@zulip.com\",\n \"content\": \"With mirth and laughter let old wrinkles come.\"\n }\n result = client.send_message(request)\n\n validate_against_openapi_schema(result, '/messages', 'post',\n '400_non_existing_user')\n\ndef update_message(client, message_id):\n # type: (Client, int) -> None\n\n assert int(message_id)\n\n # {code_example|start}\n # Edit a message\n # (make sure that message_id below is set to the ID of the\n # message you wish to update)\n request = {\n \"message_id\": message_id,\n \"content\": \"New content\"\n }\n result = client.update_message(request)\n # {code_example|end}\n\n validate_against_openapi_schema(result, '/messages/{message_id}', 'patch',\n '200')\n\n # test it was actually updated\n url = 'messages/' + str(message_id)\n result = client.call_endpoint(\n url=url,\n method='GET'\n )\n assert result['result'] == 'success'\n assert result['raw_content'] == request['content']\n\ndef test_update_message_edit_permission_error(client, nonadmin_client):\n # type: (Client, Client) -> None\n request = {\n \"type\": \"stream\",\n \"to\": \"Denmark\",\n \"topic\": \"Castle\",\n \"content\": \"I come not, friends, to steal away your hearts.\"\n }\n result = client.send_message(request)\n\n request = {\n \"message_id\": result[\"id\"],\n \"content\": \"New content\"\n }\n result = nonadmin_client.update_message(request)\n\n fixture = FIXTURES['update-message-edit-permission-error']\n test_against_fixture(result, fixture)\n\ndef delete_message(client, message_id):\n # type: (Client, int) -> None\n\n # {code_example|start}\n # Delete the message with ID \"message_id\"\n result = client.delete_message(message_id)\n # {code_example|end}\n\n validate_against_openapi_schema(result, '/messages/{message_id}', 'delete',\n '200')\n\ndef test_delete_message_edit_permission_error(client, nonadmin_client):\n # type: (Client, Client) -> None\n request = {\n \"type\": \"stream\",\n \"to\": \"Denmark\",\n \"topic\": \"Castle\",\n \"content\": \"I come not, friends, to steal away your hearts.\"\n }\n result = client.send_message(request)\n\n result = nonadmin_client.delete_message(result['id'])\n\n validate_against_openapi_schema(result, '/messages/{message_id}', 'delete',\n '400_not_admin')\n\ndef get_message_history(client, message_id):\n # type: (Client, int) -> None\n\n # {code_example|start}\n # Get the edit history for message with ID \"message_id\"\n result = client.get_message_history(message_id)\n # {code_example|end}\n\n validate_against_openapi_schema(result, '/messages/{message_id}/history',\n 'get', '200')\n\ndef get_realm_emoji(client):\n # type: (Client) -> None\n\n # {code_example|start}\n result = client.get_realm_emoji()\n # {code_example|end}\n\n validate_against_openapi_schema(result, '/realm/emoji', 'GET', '200')\n\ndef update_message_flags(client):\n # type: (Client) -> None\n\n # Send a few test messages\n request = {\n \"type\": \"stream\",\n \"to\": \"Denmark\",\n \"topic\": \"Castle\",\n \"content\": \"I come not, friends, to steal away your hearts.\"\n } # type: Dict[str, Any]\n message_ids = []\n for i in range(0, 3):\n message_ids.append(client.send_message(request)['id'])\n\n # {code_example|start}\n # Add the \"read\" flag to the messages with IDs in \"message_ids\"\n request = {\n 'messages': message_ids,\n 'op': 'add',\n 'flag': 'read'\n }\n result = client.update_message_flags(request)\n # {code_example|end}\n\n validate_against_openapi_schema(result, '/messages/flags', 'post',\n '200')\n\n # {code_example|start}\n # Remove the \"starred\" flag from the messages with IDs in \"message_ids\"\n request = {\n 'messages': message_ids,\n 'op': 'remove',\n 'flag': 'starred'\n }\n result = client.update_message_flags(request)\n # {code_example|end}\n\n validate_against_openapi_schema(result, '/messages/flags', 'post',\n '200')\n\ndef register_queue(client):\n # type: (Client) -> str\n\n # {code_example|start}\n # Register the queue\n result = client.register(\n event_types=['message', 'realm_emoji']\n )\n # {code_example|end}\n\n validate_against_openapi_schema(result, '/register', 'post', '200')\n return result['queue_id']\n\ndef deregister_queue(client, queue_id):\n # type: (Client, str) -> None\n\n # {code_example|start}\n # Delete a queue (queue_id is the ID of the queue\n # to be removed)\n result = client.deregister(queue_id)\n # {code_example|end}\n\n validate_against_openapi_schema(result, '/events', 'delete', '200')\n\n # Test \"BAD_EVENT_QUEUE_ID\" error\n result = client.deregister(queue_id)\n validate_against_openapi_schema(result, '/events', 'delete', '400')\n\ndef get_server_settings(client):\n # type: (Client) -> None\n\n # {code_example|start}\n # Fetch the settings for this server\n result = client.get_server_settings()\n # {code_example|end}\n\n validate_against_openapi_schema(result, '/server_settings', 'get', '200')\n\ndef upload_file(client):\n # type: (Client) -> None\n fp = StringIO(\"zulip\")\n fp.name = \"zulip.txt\"\n\n # {code_example|start}\n # Upload a file\n # (Make sure that 'fp' is a file object)\n result = client.call_endpoint(\n 'user_uploads',\n method='POST',\n files=[fp]\n )\n # {code_example|end}\n\n validate_against_openapi_schema(result, '/user_uploads', 'post', '200')\n\ndef get_stream_topics(client, stream_id):\n # type: (Client, int) -> None\n\n # {code_example|start}\n result = client.get_stream_topics(stream_id)\n # {code_example|end}\n\n validate_against_openapi_schema(result, '/users/me/{stream_id}/topics',\n 'get', '200')\n\ndef set_typing_status(client):\n # type: (Client) -> None\n\n # {code_example|start}\n # The user has started to type in the group PM with Iago and Polonius\n request = {\n 'op': 'start',\n 'to': ['iago@zulip.com', 'polonius@zulip.com']\n }\n result = client.set_typing_status(request)\n # {code_example|end}\n\n validate_against_openapi_schema(result, '/typing', 'post', '200')\n\n # {code_example|start}\n # The user has finished typing in the group PM with Iago and Polonius\n request = {\n 'op': 'stop',\n 'to': ['iago@zulip.com', 'polonius@zulip.com']\n }\n result = client.set_typing_status(request)\n # {code_example|end}\n\n validate_against_openapi_schema(result, '/typing', 'post', '200')\n\n\ndef test_invalid_api_key(client_with_invalid_key):\n # type: (Client) -> None\n result = client_with_invalid_key.list_subscriptions()\n fixture = FIXTURES['invalid-api-key']\n test_against_fixture(result, fixture)\n\ndef test_missing_request_argument(client):\n # type: (Client) -> None\n result = client.render_message({})\n\n fixture = FIXTURES['missing-request-argument-error']\n test_against_fixture(result, fixture)\n\ndef test_invalid_stream_error(client):\n # type: (Client) -> None\n result = client.get_stream_id('nonexistent')\n\n validate_against_openapi_schema(result, '/get_stream_id', 'get', '400')\n\nTEST_FUNCTIONS = {\n '/mark_all_as_read:post': mark_all_as_read,\n '/mark_stream_as_read:post': mark_stream_as_read,\n '/mark_topic_as_read:post': mark_topic_as_read,\n '/messages/render:post': render_message,\n '/messages:get': get_messages,\n '/messages:post': send_message,\n '/messages/{message_id}:get': get_raw_message,\n '/messages/{message_id}:patch': update_message,\n '/messages/{message_id}:delete': delete_message,\n '/messages/{message_id}/history:get': get_message_history,\n '/messages/flags:post': update_message_flags,\n '/get_stream_id:get': get_stream_id,\n 'get-subscribed-streams': list_subscriptions,\n '/streams:get': get_streams,\n '/users:post': create_user,\n 'get-profile': get_profile,\n 'add-subscriptions': add_subscriptions,\n '/users/{email}/presence:get': get_user_presence,\n '/users/me/subscriptions:delete': remove_subscriptions,\n '/users/me/subscriptions/muted_topics:patch': toggle_mute_topic,\n '/users/me/subscriptions/properties:post': update_subscription_settings,\n '/users:get': get_members,\n '/realm/emoji:get': get_realm_emoji,\n '/realm/filters:get': get_realm_filters,\n '/realm/filters:post': add_realm_filter,\n '/realm/filters/:delete': remove_realm_filter,\n '/register:post': register_queue,\n '/events:delete': deregister_queue,\n '/server_settings:get': get_server_settings,\n '/user_uploads:post': upload_file,\n '/users/me/{stream_id}/topics:get': get_stream_topics,\n '/typing:post': set_typing_status,\n '/user_groups:get': get_user_groups,\n}\n\n# SETUP METHODS FOLLOW\n\ndef test_against_fixture(result, fixture, check_if_equal=[], check_if_exists=[]):\n # type: (Dict[str, Any], Dict[str, Any], Optional[Iterable[str]], Optional[Iterable[str]]) -> None\n assertLength(result, fixture)\n\n if not check_if_equal and not check_if_exists:\n for key, value in fixture.items():\n assertEqual(key, result, fixture)\n\n if check_if_equal:\n for key in check_if_equal:\n assertEqual(key, result, fixture)\n\n if check_if_exists:\n for key in check_if_exists:\n assertIn(key, result)\n\ndef assertEqual(key, result, fixture):\n # type: (str, Dict[str, Any], Dict[str, Any]) -> None\n if result[key] != fixture[key]:\n first = \"{key} = {value}\".format(key=key, value=result[key])\n second = \"{key} = {value}\".format(key=key, value=fixture[key])\n raise AssertionError(\"Actual and expected outputs do not match; showing diff:\\n\" +\n mdiff.diff_strings(first, second))\n else:\n assert result[key] == fixture[key]\n\ndef assertLength(result, fixture):\n # type: (Dict[str, Any], Dict[str, Any]) -> None\n if len(result) != len(fixture):\n result_string = json.dumps(result, indent=4, sort_keys=True)\n fixture_string = json.dumps(fixture, indent=4, sort_keys=True)\n raise AssertionError(\"The lengths of the actual and expected outputs do not match; showing diff:\\n\" +\n mdiff.diff_strings(result_string, fixture_string))\n else:\n assert len(result) == len(fixture)\n\ndef assertIn(key, result):\n # type: (str, Dict[str, Any]) -> None\n if key not in result.keys():\n raise AssertionError(\n \"The actual output does not contain the the key `{key}`.\".format(key=key)\n )\n else:\n assert key in result\n\ndef test_messages(client, nonadmin_client):\n # type: (Client, Client) -> None\n\n render_message(client)\n message_id = send_message(client)\n update_message(client, message_id)\n get_raw_message(client, message_id)\n get_messages(client)\n get_message_history(client, message_id)\n delete_message(client, message_id)\n mark_all_as_read(client)\n mark_stream_as_read(client)\n mark_topic_as_read(client)\n update_message_flags(client)\n\n test_nonexistent_stream_error(client)\n test_private_message_invalid_recipient(client)\n test_update_message_edit_permission_error(client, nonadmin_client)\n test_delete_message_edit_permission_error(client, nonadmin_client)\n\ndef test_users(client):\n # type: (Client) -> None\n\n create_user(client)\n get_members(client)\n get_profile(client)\n upload_file(client)\n set_typing_status(client)\n get_user_presence(client)\n get_user_groups(client)\n\ndef test_streams(client, nonadmin_client):\n # type: (Client, Client) -> None\n\n add_subscriptions(client)\n test_add_subscriptions_already_subscribed(client)\n list_subscriptions(client)\n get_stream_id(client)\n get_streams(client)\n get_subscribers(client)\n remove_subscriptions(client)\n toggle_mute_topic(client)\n update_subscription_settings(client)\n get_stream_topics(client, 1)\n\n test_user_not_authorized_error(nonadmin_client)\n test_authorization_errors_fatal(client, nonadmin_client)\n\n\ndef test_queues(client):\n # type: (Client) -> None\n # Note that the example for api/get-events-from-queue is not tested.\n # Since, methods such as client.get_events() or client.call_on_each_message\n # are blocking calls and since the event queue backend is already\n # thoroughly tested in zerver/tests/test_event_queue.py, it is not worth\n # the effort to come up with asynchronous logic for testing those here.\n queue_id = register_queue(client)\n deregister_queue(client, queue_id)\n\ndef test_server_organizations(client):\n # type: (Client) -> None\n\n get_realm_filters(client)\n add_realm_filter(client)\n get_server_settings(client)\n remove_realm_filter(client)\n get_realm_emoji(client)\n\ndef test_errors(client):\n # type: (Client) -> None\n test_missing_request_argument(client)\n test_invalid_stream_error(client)\n\ndef test_the_api(client, nonadmin_client):\n # type: (Client, Client) -> None\n\n get_user_agent(client)\n test_users(client)\n test_streams(client, nonadmin_client)\n test_messages(client, nonadmin_client)\n test_queues(client)\n test_server_organizations(client)\n test_errors(client)\n"},"type_annotations":{"kind":"list like","value":[],"string":"[]"},"type_annotation_starts":{"kind":"list like","value":[],"string":"[]"},"type_annotation_ends":{"kind":"list like","value":[],"string":"[]"}}},{"rowIdx":1345,"cells":{"zip":{"kind":"string","value":"archives/18-2-SKKU-OSS_2018-2-OSS-L5.zip"},"filename":{"kind":"string","value":"zerver/lib/attachments.py"},"contents":{"kind":"string","value":"\nfrom django.utils.translation import ugettext as _\nfrom typing import Any, Dict, List\n\nfrom zerver.lib.request import JsonableError\nfrom zerver.lib.upload import delete_message_image\nfrom zerver.models import Attachment, UserProfile\n\ndef user_attachments(user_profile: UserProfile) -> List[Dict[str, Any]]:\n attachments = Attachment.objects.filter(owner=user_profile).prefetch_related('messages')\n return [a.to_dict() for a in attachments]\n\ndef access_attachment_by_id(user_profile: UserProfile, attachment_id: int,\n needs_owner: bool=False) -> Attachment:\n query = Attachment.objects.filter(id=attachment_id)\n if needs_owner:\n query = query.filter(owner=user_profile)\n\n attachment = query.first()\n if attachment is None:\n raise JsonableError(_(\"Invalid attachment\"))\n return attachment\n\ndef remove_attachment(user_profile: UserProfile, attachment: Attachment) -> None:\n try:\n delete_message_image(attachment.path_id)\n except Exception:\n raise JsonableError(_(\"An error occurred while deleting the attachment. Please try again later.\"))\n attachment.delete()\n"},"type_annotations":{"kind":"list like","value":["UserProfile","UserProfile","int","UserProfile","Attachment"],"string":"[\n \"UserProfile\",\n \"UserProfile\",\n \"int\",\n \"UserProfile\",\n \"Attachment\"\n]"},"type_annotation_starts":{"kind":"list like","value":[270,490,518,887,912],"string":"[\n 270,\n 490,\n 518,\n 887,\n 912\n]"},"type_annotation_ends":{"kind":"list like","value":[281,501,521,898,922],"string":"[\n 281,\n 501,\n 521,\n 898,\n 922\n]"}}},{"rowIdx":1346,"cells":{"zip":{"kind":"string","value":"archives/18-2-SKKU-OSS_2018-2-OSS-L5.zip"},"filename":{"kind":"string","value":"zerver/lib/avatar.py"},"contents":{"kind":"string","value":"from django.conf import settings\n\nif False:\n from zerver.models import UserProfile\n\nfrom typing import Any, Dict, Optional\n\nfrom zerver.lib.avatar_hash import gravatar_hash, user_avatar_path_from_ids\nfrom zerver.lib.upload import upload_backend, MEDIUM_AVATAR_SIZE\nfrom zerver.models import UserProfile\nimport urllib\n\ndef avatar_url(user_profile: UserProfile, medium: bool=False, client_gravatar: bool=False) -> Optional[str]:\n\n return get_avatar_field(\n user_id=user_profile.id,\n realm_id=user_profile.realm_id,\n email=user_profile.email,\n avatar_source=user_profile.avatar_source,\n avatar_version=user_profile.avatar_version,\n medium=medium,\n client_gravatar=client_gravatar,\n )\n\ndef avatar_url_from_dict(userdict: Dict[str, Any], medium: bool=False) -> str:\n '''\n DEPRECATED: We should start using\n get_avatar_field to populate users,\n particularly for codepaths where the\n client can compute gravatar URLS\n on the client side.\n '''\n url = _get_unversioned_avatar_url(\n userdict['id'],\n userdict['avatar_source'],\n userdict['realm_id'],\n email=userdict['email'],\n medium=medium)\n url += '&version=%d' % (userdict['avatar_version'],)\n return url\n\ndef get_avatar_field(user_id: int,\n realm_id: int,\n email: str,\n avatar_source: str,\n avatar_version: int,\n medium: bool,\n client_gravatar: bool) -> Optional[str]:\n '''\n Most of the parameters to this function map to fields\n by the same name in UserProfile (avatar_source, realm_id,\n email, etc.).\n\n Then there are these:\n\n medium - This means we want a medium-sized avatar. This can\n affect the \"s\" parameter for gravatar avatars, or it\n can give us something like foo-medium.png for\n user-uploaded avatars.\n\n client_gravatar - If the client can compute their own\n gravatars, this will be set to True, and we'll avoid\n computing them on the server (mostly to save bandwidth).\n '''\n\n if client_gravatar:\n '''\n If our client knows how to calculate gravatar hashes, we\n will return None and let the client compute the gravatar\n url.\n '''\n if settings.ENABLE_GRAVATAR:\n if avatar_source == UserProfile.AVATAR_FROM_GRAVATAR:\n return None\n\n '''\n If we get this far, we'll compute an avatar URL that may be\n either user-uploaded or a gravatar, and then we'll add version\n info to try to avoid stale caches.\n '''\n url = _get_unversioned_avatar_url(\n user_profile_id=user_id,\n avatar_source=avatar_source,\n realm_id=realm_id,\n email=email,\n medium=medium,\n )\n url += '&version=%d' % (avatar_version,)\n return url\n\ndef get_gravatar_url(email: str, avatar_version: int, medium: bool=False) -> str:\n url = _get_unversioned_gravatar_url(email, medium)\n url += '&version=%d' % (avatar_version,)\n return url\n\ndef _get_unversioned_gravatar_url(email: str, medium: bool) -> str:\n if settings.ENABLE_GRAVATAR:\n gravitar_query_suffix = \"&s=%s\" % (MEDIUM_AVATAR_SIZE,) if medium else \"\"\n hash_key = gravatar_hash(email)\n return \"https://secure.gravatar.com/avatar/%s?d=identicon%s\" % (hash_key, gravitar_query_suffix)\n return settings.DEFAULT_AVATAR_URI+'?x=x'\n\ndef _get_unversioned_avatar_url(user_profile_id: int,\n avatar_source: str,\n realm_id: int,\n email: Optional[str]=None,\n medium: bool=False) -> str:\n if avatar_source == 'U':\n hash_key = user_avatar_path_from_ids(user_profile_id, realm_id)\n return upload_backend.get_avatar_url(hash_key, medium=medium)\n assert email is not None\n return _get_unversioned_gravatar_url(email, medium)\n\ndef absolute_avatar_url(user_profile: UserProfile) -> str:\n \"\"\"\n Absolute URLs are used to simplify logic for applications that\n won't be served by browsers, such as rendering GCM notifications.\n \"\"\"\n avatar = avatar_url(user_profile)\n # avatar_url can return None if client_gravatar=True, however here we use the default value of False\n assert avatar is not None\n return urllib.parse.urljoin(user_profile.realm.uri, avatar)\n"},"type_annotations":{"kind":"list like","value":["UserProfile","Dict[str, Any]","int","int","str","str","int","bool","bool","str","int","str","bool","int","str","int","UserProfile"],"string":"[\n \"UserProfile\",\n \"Dict[str, Any]\",\n \"int\",\n \"int\",\n \"str\",\n \"str\",\n \"int\",\n \"bool\",\n \"bool\",\n \"str\",\n \"int\",\n \"str\",\n \"bool\",\n \"int\",\n \"str\",\n \"int\",\n \"UserProfile\"\n]"},"type_annotation_starts":{"kind":"list like","value":[350,775,1350,1386,1419,1460,1502,1536,1580,2994,3015,3205,3218,3588,3640,3687,4106],"string":"[\n 350,\n 775,\n 1350,\n 1386,\n 1419,\n 1460,\n 1502,\n 1536,\n 1580,\n 2994,\n 3015,\n 3205,\n 3218,\n 3588,\n 3640,\n 3687,\n 4106\n]"},"type_annotation_ends":{"kind":"list like","value":[361,789,1353,1389,1422,1463,1505,1540,1584,2997,3018,3208,3222,3591,3643,3690,4117],"string":"[\n 361,\n 789,\n 1353,\n 1389,\n 1422,\n 1463,\n 1505,\n 1540,\n 1584,\n 2997,\n 3018,\n 3208,\n 3222,\n 3591,\n 3643,\n 3690,\n 4117\n]"}}},{"rowIdx":1347,"cells":{"zip":{"kind":"string","value":"archives/18-2-SKKU-OSS_2018-2-OSS-L5.zip"},"filename":{"kind":"string","value":"zerver/lib/avatar_hash.py"},"contents":{"kind":"string","value":"\nfrom django.conf import settings\n\nfrom zerver.lib.utils import make_safe_digest\n\nfrom zerver.models import UserProfile\n\nimport hashlib\n\ndef gravatar_hash(email: str) -> str:\n \"\"\"Compute the Gravatar hash for an email address.\"\"\"\n # Non-ASCII characters aren't permitted by the currently active e-mail\n # RFCs. However, the IETF has published https://tools.ietf.org/html/rfc4952,\n # outlining internationalization of email addresses, and regardless if we\n # typo an address or someone manages to give us a non-ASCII address, let's\n # not error out on it.\n return make_safe_digest(email.lower(), hashlib.md5)\n\ndef user_avatar_hash(uid: str) -> str:\n\n # WARNING: If this method is changed, you may need to do a migration\n # similar to zerver/migrations/0060_move_avatars_to_be_uid_based.py .\n\n # The salt probably doesn't serve any purpose now. In the past we\n # used a hash of the email address, not the user ID, and we salted\n # it in order to make the hashing scheme different from Gravatar's.\n user_key = uid + settings.AVATAR_SALT\n return make_safe_digest(user_key, hashlib.sha1)\n\ndef user_avatar_path(user_profile: UserProfile) -> str:\n\n # WARNING: If this method is changed, you may need to do a migration\n # similar to zerver/migrations/0060_move_avatars_to_be_uid_based.py .\n return user_avatar_path_from_ids(user_profile.id, user_profile.realm_id)\n\ndef user_avatar_path_from_ids(user_profile_id: int, realm_id: int) -> str:\n user_id_hash = user_avatar_hash(str(user_profile_id))\n return '%s/%s' % (str(realm_id), user_id_hash)\n"},"type_annotations":{"kind":"list like","value":["str","str","UserProfile","int","int"],"string":"[\n \"str\",\n \"str\",\n \"UserProfile\",\n \"int\",\n \"int\"\n]"},"type_annotation_starts":{"kind":"list like","value":[162,656,1162,1456,1471],"string":"[\n 162,\n 656,\n 1162,\n 1456,\n 1471\n]"},"type_annotation_ends":{"kind":"list like","value":[165,659,1173,1459,1474],"string":"[\n 165,\n 659,\n 1173,\n 1459,\n 1474\n]"}}},{"rowIdx":1348,"cells":{"zip":{"kind":"string","value":"archives/18-2-SKKU-OSS_2018-2-OSS-L5.zip"},"filename":{"kind":"string","value":"zerver/lib/bot_config.py"},"contents":{"kind":"string","value":"from django.conf import settings\nfrom django.db.models import Sum\nfrom django.db.models.query import F\nfrom django.db.models.functions import Length\nfrom zerver.models import BotConfigData, UserProfile\n\nfrom typing import List, Dict, Optional\n\nfrom collections import defaultdict\n\nimport os\n\nimport configparser\nimport importlib\n\nclass ConfigError(Exception):\n pass\n\ndef get_bot_config(bot_profile: UserProfile) -> Dict[str, str]:\n entries = BotConfigData.objects.filter(bot_profile=bot_profile)\n if not entries:\n raise ConfigError(\"No config data available.\")\n return {entry.key: entry.value for entry in entries}\n\ndef get_bot_configs(bot_profile_ids: List[int]) -> Dict[int, Dict[str, str]]:\n if not bot_profile_ids:\n return {}\n entries = BotConfigData.objects.filter(bot_profile_id__in=bot_profile_ids)\n entries_by_uid = defaultdict(dict) # type: Dict[int, Dict[str, str]]\n for entry in entries:\n entries_by_uid[entry.bot_profile_id].update({entry.key: entry.value})\n return entries_by_uid\n\ndef get_bot_config_size(bot_profile: UserProfile, key: Optional[str]=None) -> int:\n if key is None:\n return BotConfigData.objects.filter(bot_profile=bot_profile) \\\n .annotate(key_size=Length('key'), value_size=Length('value')) \\\n .aggregate(sum=Sum(F('key_size')+F('value_size')))['sum'] or 0\n else:\n try:\n return len(key) + len(BotConfigData.objects.get(bot_profile=bot_profile, key=key).value)\n except BotConfigData.DoesNotExist:\n return 0\n\ndef set_bot_config(bot_profile: UserProfile, key: str, value: str) -> None:\n config_size_limit = settings.BOT_CONFIG_SIZE_LIMIT\n old_entry_size = get_bot_config_size(bot_profile, key)\n new_entry_size = len(key) + len(value)\n old_config_size = get_bot_config_size(bot_profile)\n new_config_size = old_config_size + (new_entry_size - old_entry_size)\n if new_config_size > config_size_limit:\n raise ConfigError(\"Cannot store configuration. Request would require {} characters. \"\n \"The current configuration size limit is {} characters.\".format(new_config_size,\n config_size_limit))\n obj, created = BotConfigData.objects.get_or_create(bot_profile=bot_profile, key=key,\n defaults={'value': value})\n if not created:\n obj.value = value\n obj.save()\n\ndef load_bot_config_template(bot: str) -> Dict[str, str]:\n bot_module_name = 'zulip_bots.bots.{}'.format(bot)\n bot_module = importlib.import_module(bot_module_name)\n bot_module_path = os.path.dirname(bot_module.__file__)\n config_path = os.path.join(bot_module_path, '{}.conf'.format(bot))\n if os.path.isfile(config_path):\n config = configparser.ConfigParser()\n with open(config_path) as conf:\n config.readfp(conf) # type: ignore # readfp->read_file in python 3, so not in stubs\n return dict(config.items(bot))\n else:\n return dict()\n"},"type_annotations":{"kind":"list like","value":["UserProfile","List[int]","UserProfile","UserProfile","str","str","str"],"string":"[\n \"UserProfile\",\n \"List[int]\",\n \"UserProfile\",\n \"UserProfile\",\n \"str\",\n \"str\",\n \"str\"\n]"},"type_annotation_starts":{"kind":"list like","value":[402,672,1080,1637,1655,1667,2593],"string":"[\n 402,\n 672,\n 1080,\n 1637,\n 1655,\n 1667,\n 2593\n]"},"type_annotation_ends":{"kind":"list like","value":[413,681,1091,1648,1658,1670,2596],"string":"[\n 413,\n 681,\n 1091,\n 1648,\n 1658,\n 1670,\n 2596\n]"}}},{"rowIdx":1349,"cells":{"zip":{"kind":"string","value":"archives/18-2-SKKU-OSS_2018-2-OSS-L5.zip"},"filename":{"kind":"string","value":"zerver/lib/bot_lib.py"},"contents":{"kind":"string","value":"import json\nimport logging\nimport os\nimport signal\nimport sys\nimport time\nimport re\nimport importlib\nfrom zerver.lib.actions import internal_send_private_message, \\\n internal_send_stream_message, internal_send_huddle_message\nfrom zerver.models import UserProfile, get_active_user\nfrom zerver.lib.bot_storage import get_bot_storage, set_bot_storage, \\\n is_key_in_bot_storage, get_bot_storage_size, remove_bot_storage\nfrom zerver.lib.bot_config import get_bot_config, ConfigError\nfrom zerver.lib.integrations import EMBEDDED_BOTS\nfrom zerver.lib.topic import get_topic_from_message_info\n\nimport configparser\n\nif False:\n from mypy_extensions import NoReturn\nfrom typing import Any, Optional, List, Dict\nfrom types import ModuleType\n\nour_dir = os.path.dirname(os.path.abspath(__file__))\n\nfrom zulip_bots.lib import RateLimit\n\ndef get_bot_handler(service_name: str) -> Any:\n\n # Check that this service is present in EMBEDDED_BOTS, add exception handling.\n is_present_in_registry = any(service_name == embedded_bot_service.name for\n embedded_bot_service in EMBEDDED_BOTS)\n if not is_present_in_registry:\n return None\n bot_module_name = 'zulip_bots.bots.%s.%s' % (service_name, service_name)\n bot_module = importlib.import_module(bot_module_name) # type: Any\n return bot_module.handler_class()\n\n\nclass StateHandler:\n storage_size_limit = 10000000 # type: int # TODO: Store this in the server configuration model.\n\n def __init__(self, user_profile: UserProfile) -> None:\n self.user_profile = user_profile\n self.marshal = lambda obj: json.dumps(obj)\n self.demarshal = lambda obj: json.loads(obj)\n\n def get(self, key: str) -> str:\n return self.demarshal(get_bot_storage(self.user_profile, key))\n\n def put(self, key: str, value: str) -> None:\n set_bot_storage(self.user_profile, [(key, self.marshal(value))])\n\n def remove(self, key: str) -> None:\n remove_bot_storage(self.user_profile, [key])\n\n def contains(self, key: str) -> bool:\n return is_key_in_bot_storage(self.user_profile, key)\n\nclass EmbeddedBotQuitException(Exception):\n pass\n\nclass EmbeddedBotHandler:\n def __init__(self, user_profile: UserProfile) -> None:\n # Only expose a subset of our UserProfile's functionality\n self.user_profile = user_profile\n self._rate_limit = RateLimit(20, 5)\n self.full_name = user_profile.full_name\n self.email = user_profile.email\n self.storage = StateHandler(user_profile)\n\n def send_message(self, message: Dict[str, Any]) -> None:\n if not self._rate_limit.is_legal():\n self._rate_limit.show_error_and_exit()\n\n if message['type'] == 'stream':\n internal_send_stream_message(self.user_profile.realm, self.user_profile, message['to'],\n message['topic'], message['content'])\n return\n\n assert message['type'] == 'private'\n # Ensure that it's a comma-separated list, even though the\n # usual 'to' field could be either a List[str] or a str.\n recipients = ','.join(message['to']).split(',')\n\n if len(message['to']) == 1:\n recipient_user = get_active_user(recipients[0], self.user_profile.realm)\n internal_send_private_message(self.user_profile.realm, self.user_profile,\n recipient_user, message['content'])\n else:\n internal_send_huddle_message(self.user_profile.realm, self.user_profile,\n recipients, message['content'])\n\n def send_reply(self, message: Dict[str, Any], response: str) -> None:\n if message['type'] == 'private':\n self.send_message(dict(\n type='private',\n to=[x['email'] for x in message['display_recipient']],\n content=response,\n sender_email=message['sender_email'],\n ))\n else:\n self.send_message(dict(\n type='stream',\n to=message['display_recipient'],\n topic=get_topic_from_message_info(message),\n content=response,\n sender_email=message['sender_email'],\n ))\n\n # The bot_name argument exists only to comply with ExternalBotHandler.get_config_info().\n def get_config_info(self, bot_name: str, optional: bool=False) -> Dict[str, str]:\n try:\n return get_bot_config(self.user_profile)\n except ConfigError:\n if optional:\n return dict()\n raise\n\n def quit(self, message: str= \"\") -> None:\n raise EmbeddedBotQuitException(message)\n"},"type_annotations":{"kind":"list like","value":["str","UserProfile","str","str","str","str","str","UserProfile","Dict[str, Any]","Dict[str, Any]","str","str"],"string":"[\n \"str\",\n \"UserProfile\",\n \"str\",\n \"str\",\n \"str\",\n \"str\",\n \"str\",\n \"UserProfile\",\n \"Dict[str, Any]\",\n \"Dict[str, Any]\",\n \"str\",\n \"str\"\n]"},"type_annotation_starts":{"kind":"list like","value":[865,1516,1707,1815,1827,1941,2037,2229,2577,3662,3688,4412],"string":"[\n 865,\n 1516,\n 1707,\n 1815,\n 1827,\n 1941,\n 2037,\n 2229,\n 2577,\n 3662,\n 3688,\n 4412\n]"},"type_annotation_ends":{"kind":"list like","value":[868,1527,1710,1818,1830,1944,2040,2240,2591,3676,3691,4415],"string":"[\n 868,\n 1527,\n 1710,\n 1818,\n 1830,\n 1944,\n 2040,\n 2240,\n 2591,\n 3676,\n 3691,\n 4415\n]"}}},{"rowIdx":1350,"cells":{"zip":{"kind":"string","value":"archives/18-2-SKKU-OSS_2018-2-OSS-L5.zip"},"filename":{"kind":"string","value":"zerver/lib/bot_storage.py"},"contents":{"kind":"string","value":"from django.conf import settings\nfrom django.db.models import Sum\nfrom django.db.models.query import F\nfrom django.db.models.functions import Length\nfrom zerver.models import BotStorageData, UserProfile, Length\n\nfrom typing import Optional, List, Tuple\n\nclass StateError(Exception):\n pass\n\ndef get_bot_storage(bot_profile: UserProfile, key: str) -> str:\n try:\n return BotStorageData.objects.get(bot_profile=bot_profile, key=key).value\n except BotStorageData.DoesNotExist:\n raise StateError(\"Key does not exist.\")\n\ndef get_bot_storage_size(bot_profile: UserProfile, key: Optional[str]=None) -> int:\n if key is None:\n return BotStorageData.objects.filter(bot_profile=bot_profile) \\\n .annotate(key_size=Length('key'), value_size=Length('value')) \\\n .aggregate(sum=Sum(F('key_size')+F('value_size')))['sum'] or 0\n else:\n try:\n return len(key) + len(BotStorageData.objects.get(bot_profile=bot_profile, key=key).value)\n except BotStorageData.DoesNotExist:\n return 0\n\ndef set_bot_storage(bot_profile: UserProfile, entries: List[Tuple[str, str]]) -> None:\n storage_size_limit = settings.USER_STATE_SIZE_LIMIT\n storage_size_difference = 0\n for key, value in entries:\n if type(key) is not str:\n raise StateError(\"Key type is {}, but should be str.\".format(type(key)))\n if type(value) is not str:\n raise StateError(\"Value type is {}, but should be str.\".format(type(value)))\n storage_size_difference += (len(key) + len(value)) - get_bot_storage_size(bot_profile, key)\n new_storage_size = get_bot_storage_size(bot_profile) + storage_size_difference\n if new_storage_size > storage_size_limit:\n raise StateError(\"Request exceeds storage limit by {} characters. The limit is {} characters.\"\n .format(new_storage_size - storage_size_limit, storage_size_limit))\n else:\n for key, value in entries:\n BotStorageData.objects.update_or_create(bot_profile=bot_profile, key=key,\n defaults={'value': value})\n\ndef remove_bot_storage(bot_profile: UserProfile, keys: List[str]) -> None:\n queryset = BotStorageData.objects.filter(bot_profile=bot_profile, key__in=keys)\n if len(queryset) < len(keys):\n raise StateError(\"Key does not exist.\")\n queryset.delete()\n\ndef is_key_in_bot_storage(bot_profile: UserProfile, key: str) -> bool:\n return BotStorageData.objects.filter(bot_profile=bot_profile, key=key).exists()\n\ndef get_keys_in_bot_storage(bot_profile: UserProfile) -> List[str]:\n return list(BotStorageData.objects.filter(bot_profile=bot_profile).values_list('key', flat=True))\n"},"type_annotations":{"kind":"list like","value":["UserProfile","str","UserProfile","UserProfile","List[Tuple[str, str]]","UserProfile","List[str]","UserProfile","str","UserProfile"],"string":"[\n \"UserProfile\",\n \"str\",\n \"UserProfile\",\n \"UserProfile\",\n \"List[Tuple[str, str]]\",\n \"UserProfile\",\n \"List[str]\",\n \"UserProfile\",\n \"str\",\n \"UserProfile\"\n]"},"type_annotation_starts":{"kind":"list like","value":[326,344,575,1138,1160,2225,2244,2492,2510,2650],"string":"[\n 326,\n 344,\n 575,\n 1138,\n 1160,\n 2225,\n 2244,\n 2492,\n 2510,\n 2650\n]"},"type_annotation_ends":{"kind":"list like","value":[337,347,586,1149,1181,2236,2253,2503,2513,2661],"string":"[\n 337,\n 347,\n 586,\n 1149,\n 1181,\n 2236,\n 2253,\n 2503,\n 2513,\n 2661\n]"}}},{"rowIdx":1351,"cells":{"zip":{"kind":"string","value":"archives/18-2-SKKU-OSS_2018-2-OSS-L5.zip"},"filename":{"kind":"string","value":"zerver/lib/bugdown/__init__.py"},"contents":{"kind":"string","value":"# Zulip's main markdown implementation. See docs/subsystems/markdown.md for\n# detailed documentation on our markdown syntax.\nfrom typing import (Any, Callable, Dict, Iterable, List, NamedTuple,\n Optional, Set, Tuple, TypeVar, Union, cast)\nfrom mypy_extensions import TypedDict\nfrom typing.re import Match, Pattern\n\nimport markdown\nimport logging\nimport traceback\nimport urllib\nimport re\nimport os\nimport html\nimport platform\nimport time\nimport functools\nimport ujson\nimport xml.etree.cElementTree as etree\nfrom xml.etree.cElementTree import Element, SubElement\n\nfrom collections import deque, defaultdict\n\nimport requests\n\nfrom django.core import mail\nfrom django.conf import settings\nfrom django.db.models import Q\n\nfrom markdown.extensions import codehilite, nl2br, tables\nfrom zerver.lib.bugdown import fenced_code\nfrom zerver.lib.bugdown.fenced_code import FENCE_RE\nfrom zerver.lib.camo import get_camo_url\nfrom zerver.lib.emoji import translate_emoticons, emoticon_regex\nfrom zerver.lib.mention import possible_mentions, \\\n possible_user_group_mentions, extract_user_group\nfrom zerver.lib.url_encoding import encode_stream\nfrom zerver.lib.thumbnail import is_thumbor_enabled, user_uploads_or_external\nfrom zerver.lib.timeout import timeout, TimeoutExpired\nfrom zerver.lib.cache import cache_with_key, NotFoundInCache\nfrom zerver.lib.url_preview import preview as link_preview\nfrom zerver.models import (\n all_realm_filters,\n get_active_streams,\n MAX_MESSAGE_LENGTH,\n Message,\n Realm,\n RealmFilter,\n realm_filters_for_realm,\n UserProfile,\n UserGroup,\n UserGroupMembership,\n)\nimport zerver.lib.mention as mention\nfrom zerver.lib.tex import render_tex\nfrom zerver.lib.exceptions import BugdownRenderingException\n\nFullNameInfo = TypedDict('FullNameInfo', {\n 'id': int,\n 'email': str,\n 'full_name': str,\n})\n\nDbData = Dict[str, Any]\n\n# Format version of the bugdown rendering; stored along with rendered\n# messages so that we can efficiently determine what needs to be re-rendered\nversion = 1\n\n_T = TypeVar('_T')\nElementStringNone = Union[Element, Optional[str]]\n\nAVATAR_REGEX = r'!avatar\\((?P[^)]*)\\)'\nGRAVATAR_REGEX = r'!gravatar\\((?P[^)]*)\\)'\nEMOJI_REGEX = r'(?P:[\\w\\-\\+]+:)'\n\ndef verbose_compile(pattern: str) -> Any:\n return re.compile(\n \"^(.*?)%s(.*?)$\" % pattern,\n re.DOTALL | re.UNICODE | re.VERBOSE\n )\n\nSTREAM_LINK_REGEX = r\"\"\"\n (?[^\\*]+) # stream name can contain anything\n \\*\\* # ends by double asterisks\n \"\"\"\n\nLINK_REGEX = None # type: Pattern\n\ndef get_web_link_regex() -> str:\n # We create this one time, but not at startup. So the\n # first message rendered in any process will have some\n # extra costs.\n global LINK_REGEX\n if LINK_REGEX is None:\n # NOTE: this is a very expensive step, it reads a file of tlds!\n tlds = '|'.join(list_of_tlds())\n\n # A link starts at a word boundary, and ends at space, punctuation, or end-of-input.\n #\n # We detect a url either by the `https?://` or by building around the TLD.\n\n # In lieu of having a recursive regex (which python doesn't support) to match\n # arbitrary numbers of nested matching parenthesis, we manually build a regexp that\n # can match up to six\n # The inner_paren_contents chunk matches the innermore non-parenthesis-holding text,\n # and the paren_group matches text with, optionally, a matching set of parens\n inner_paren_contents = r\"[^\\s()\\\"]*\"\n paren_group = r\"\"\"\n [^\\s()\\\"]*? # Containing characters that won't end the URL\n (?: \\( %s \\) # and more characters in matched parens\n [^\\s()\\\"]*? # followed by more characters\n )* # zero-or-more sets of paired parens\n \"\"\"\n nested_paren_chunk = paren_group\n for i in range(6):\n nested_paren_chunk = nested_paren_chunk % (paren_group,)\n nested_paren_chunk = nested_paren_chunk % (inner_paren_contents,)\n\n file_links = r\"| (?:file://(/[^/ ]*)+/?)\" if settings.ENABLE_FILE_LINKS else r\"\"\n regex = r\"\"\"\n (? # Main group\n (?:(?: # Domain part\n https?://[\\w.:@-]+? # If it has a protocol, anything goes.\n |(?: # Or, if not, be more strict to avoid false-positives\n (?:[\\w-]+\\.)+ # One or more domain components, separated by dots\n (?:%s) # TLDs (filled in via format from tlds-alpha-by-domain.txt)\n )\n )\n (?:/ # A path, beginning with /\n %s # zero-to-6 sets of paired parens\n )?) # Path is optional\n | (?:[\\w.-]+\\@[\\w.-]+\\.[\\w]+) # Email is separate, since it can't have a path\n %s # File path start with file:///, enable by setting ENABLE_FILE_LINKS=True\n | (?:bitcoin:[13][a-km-zA-HJ-NP-Z1-9]{25,34}) # Bitcoin address pattern, see https://mokagio.github.io/tech-journal/2014/11/21/regex-bitcoin.html\n )\n (?= # URL must be followed by (not included in group)\n [!:;\\?\\),\\.\\'\\\"\\>]* # Optional punctuation characters\n (?:\\Z|\\s) # followed by whitespace or end of string\n )\n \"\"\" % (tlds, nested_paren_chunk, file_links)\n LINK_REGEX = verbose_compile(regex)\n return LINK_REGEX\n\ndef clear_state_for_testing() -> None:\n # The link regex never changes in production, but our tests\n # try out both sides of ENABLE_FILE_LINKS, so we need\n # a way to clear it.\n global LINK_REGEX\n LINK_REGEX = None\n\nbugdown_logger = logging.getLogger()\n\ndef rewrite_local_links_to_relative(db_data: Optional[DbData], link: str) -> str:\n \"\"\" If the link points to a local destination we can just switch to that\n instead of opening a new tab. \"\"\"\n\n if db_data:\n realm_uri_prefix = db_data['realm_uri'] + \"/\"\n if link.startswith(realm_uri_prefix):\n # +1 to skip the `/` before the hash link.\n return link[len(realm_uri_prefix):]\n\n return link\n\ndef url_embed_preview_enabled_for_realm(message: Optional[Message]=None,\n realm: Optional[Realm]=None) -> bool:\n if not settings.INLINE_URL_EMBED_PREVIEW:\n return False\n\n if realm is None:\n if message is not None:\n realm = message.get_realm()\n\n if realm is None:\n # realm can be None for odd use cases\n # like generating documentation or running\n # test code\n return True\n\n return realm.inline_url_embed_preview\n\ndef image_preview_enabled_for_realm(message: Optional[Message]=None,\n realm: Optional[Realm]=None) -> bool:\n if not settings.INLINE_IMAGE_PREVIEW:\n return False\n\n if realm is None:\n if message is not None:\n realm = message.get_realm()\n\n if realm is None:\n # realm can be None for odd use cases\n # like generating documentation or running\n # test code\n return True\n\n return realm.inline_image_preview\n\ndef list_of_tlds() -> List[str]:\n # HACK we manually blacklist a few domains\n blacklist = ['PY\\n', \"MD\\n\"]\n\n # tlds-alpha-by-domain.txt comes from http://data.iana.org/TLD/tlds-alpha-by-domain.txt\n tlds_file = os.path.join(os.path.dirname(__file__), 'tlds-alpha-by-domain.txt')\n tlds = [tld.lower().strip() for tld in open(tlds_file, 'r')\n if tld not in blacklist and not tld[0].startswith('#')]\n tlds.sort(key=len, reverse=True)\n return tlds\n\ndef walk_tree(root: Element,\n processor: Callable[[Element], Optional[_T]],\n stop_after_first: bool=False) -> List[_T]:\n results = []\n queue = deque([root])\n\n while queue:\n currElement = queue.popleft()\n for child in currElement.getchildren():\n if child.getchildren():\n queue.append(child)\n\n result = processor(child)\n if result is not None:\n results.append(result)\n if stop_after_first:\n return results\n\n return results\n\nElementFamily = NamedTuple('ElementFamily', [\n ('grandparent', Optional[Element]),\n ('parent', Element),\n ('child', Element)\n])\n\nResultWithFamily = NamedTuple('ResultWithFamily', [\n ('family', ElementFamily),\n ('result', Any)\n])\n\nElementPair = NamedTuple('ElementPair', [\n ('parent', Optional[Element]),\n ('value', Element)\n])\n\ndef walk_tree_with_family(root: Element,\n processor: Callable[[Element], Optional[_T]]\n ) -> List[ResultWithFamily]:\n results = []\n\n queue = deque([ElementPair(parent=None, value=root)])\n while queue:\n currElementPair = queue.popleft()\n for child in currElementPair.value.getchildren():\n if child.getchildren():\n queue.append(ElementPair(parent=currElementPair, value=child)) # type: ignore # Lack of Deque support in typing module for Python 3.4.3\n result = processor(child)\n if result is not None:\n if currElementPair.parent is not None:\n grandparent_element = cast(ElementPair, currElementPair.parent)\n grandparent = grandparent_element.value\n else:\n grandparent = None\n family = ElementFamily(\n grandparent=grandparent,\n parent=currElementPair.value,\n child=child\n )\n\n results.append(ResultWithFamily(\n family=family,\n result=result\n ))\n\n return results\n\n# height is not actually used\ndef add_a(\n root: Element,\n url: str,\n link: str,\n title: Optional[str]=None,\n desc: Optional[str]=None,\n class_attr: str=\"message_inline_image\",\n data_id: Optional[str]=None,\n insertion_index: Optional[int]=None,\n already_thumbnailed: Optional[bool]=False\n) -> None:\n title = title if title is not None else url_filename(link)\n title = title if title else \"\"\n desc = desc if desc is not None else \"\"\n\n if insertion_index is not None:\n div = markdown.util.etree.Element(\"div\")\n root.insert(insertion_index, div)\n else:\n div = markdown.util.etree.SubElement(root, \"div\")\n\n div.set(\"class\", class_attr)\n a = markdown.util.etree.SubElement(div, \"a\")\n a.set(\"href\", link)\n a.set(\"target\", \"_blank\")\n a.set(\"title\", title)\n if data_id is not None:\n a.set(\"data-id\", data_id)\n img = markdown.util.etree.SubElement(a, \"img\")\n if is_thumbor_enabled() and (not already_thumbnailed) and user_uploads_or_external(url):\n # See docs/thumbnailing.md for some high-level documentation.\n #\n # We strip leading '/' from relative URLs here to ensure\n # consistency in what gets passed to /thumbnail\n url = url.lstrip('/')\n img.set(\"src\", \"/thumbnail?url={0}&size=thumbnail\".format(\n urllib.parse.quote(url, safe='')\n ))\n img.set('data-src-fullsize', \"/thumbnail?url={0}&size=full\".format(\n urllib.parse.quote(url, safe='')\n ))\n else:\n img.set(\"src\", url)\n\n if class_attr == \"message_inline_ref\":\n summary_div = markdown.util.etree.SubElement(div, \"div\")\n title_div = markdown.util.etree.SubElement(summary_div, \"div\")\n title_div.set(\"class\", \"message_inline_image_title\")\n title_div.text = title\n desc_div = markdown.util.etree.SubElement(summary_div, \"desc\")\n desc_div.set(\"class\", \"message_inline_image_desc\")\n\ndef add_embed(root: Element, link: str, extracted_data: Dict[str, Any]) -> None:\n container = markdown.util.etree.SubElement(root, \"div\")\n container.set(\"class\", \"message_embed\")\n\n img_link = extracted_data.get('image')\n if img_link:\n parsed_img_link = urllib.parse.urlparse(img_link)\n # Append domain where relative img_link url is given\n if not parsed_img_link.netloc:\n parsed_url = urllib.parse.urlparse(link)\n domain = '{url.scheme}://{url.netloc}/'.format(url=parsed_url)\n img_link = urllib.parse.urljoin(domain, img_link)\n img = markdown.util.etree.SubElement(container, \"a\")\n img.set(\"style\", \"background-image: url(\" + img_link + \")\")\n img.set(\"href\", link)\n img.set(\"target\", \"_blank\")\n img.set(\"class\", \"message_embed_image\")\n\n data_container = markdown.util.etree.SubElement(container, \"div\")\n data_container.set(\"class\", \"data-container\")\n\n title = extracted_data.get('title')\n if title:\n title_elm = markdown.util.etree.SubElement(data_container, \"div\")\n title_elm.set(\"class\", \"message_embed_title\")\n a = markdown.util.etree.SubElement(title_elm, \"a\")\n a.set(\"href\", link)\n a.set(\"target\", \"_blank\")\n a.set(\"title\", title)\n a.text = title\n description = extracted_data.get('description')\n if description:\n description_elm = markdown.util.etree.SubElement(data_container, \"div\")\n description_elm.set(\"class\", \"message_embed_description\")\n description_elm.text = description\n\n@cache_with_key(lambda tweet_id: tweet_id, cache_name=\"database\", with_statsd_key=\"tweet_data\")\ndef fetch_tweet_data(tweet_id: str) -> Optional[Dict[str, Any]]:\n if settings.TEST_SUITE:\n from . import testing_mocks\n res = testing_mocks.twitter(tweet_id)\n else:\n creds = {\n 'consumer_key': settings.TWITTER_CONSUMER_KEY,\n 'consumer_secret': settings.TWITTER_CONSUMER_SECRET,\n 'access_token_key': settings.TWITTER_ACCESS_TOKEN_KEY,\n 'access_token_secret': settings.TWITTER_ACCESS_TOKEN_SECRET,\n }\n if not all(creds.values()):\n return None\n\n # We lazily import twitter here because its import process is\n # surprisingly slow, and doing so has a significant impact on\n # the startup performance of `manage.py` commands.\n import twitter\n\n try:\n api = twitter.Api(tweet_mode='extended', **creds)\n # Sometimes Twitter hangs on responses. Timing out here\n # will cause the Tweet to go through as-is with no inline\n # preview, rather than having the message be rejected\n # entirely. This timeout needs to be less than our overall\n # formatting timeout.\n tweet = timeout(3, api.GetStatus, tweet_id)\n res = tweet.AsDict()\n except AttributeError:\n bugdown_logger.error('Unable to load twitter api, you may have the wrong '\n 'library installed, see https://github.com/zulip/zulip/issues/86')\n return None\n except TimeoutExpired:\n # We'd like to try again later and not cache the bad result,\n # so we need to re-raise the exception (just as though\n # we were being rate-limited)\n raise\n except twitter.TwitterError as e:\n t = e.args[0]\n if len(t) == 1 and ('code' in t[0]) and (t[0]['code'] == 34):\n # Code 34 means that the message doesn't exist; return\n # None so that we will cache the error\n return None\n elif len(t) == 1 and ('code' in t[0]) and (t[0]['code'] == 88 or\n t[0]['code'] == 130):\n # Code 88 means that we were rate-limited and 130\n # means Twitter is having capacity issues; either way\n # just raise the error so we don't cache None and will\n # try again later.\n raise\n else:\n # It's not clear what to do in cases of other errors,\n # but for now it seems reasonable to log at error\n # level (so that we get notified), but then cache the\n # failure to proceed with our usual work\n bugdown_logger.error(traceback.format_exc())\n return None\n return res\n\nHEAD_START_RE = re.compile('^head[ >]')\nHEAD_END_RE = re.compile('^/head[ >]')\nMETA_START_RE = re.compile('^meta[ >]')\nMETA_END_RE = re.compile('^/meta[ >]')\n\ndef fetch_open_graph_image(url: str) -> Optional[Dict[str, Any]]:\n in_head = False\n # HTML will auto close meta tags, when we start the next tag add\n # a closing tag if it has not been closed yet.\n last_closed = True\n head = []\n # TODO: What if response content is huge? Should we get headers first?\n try:\n content = requests.get(url, timeout=1).text\n except Exception:\n return None\n # Extract the head and meta tags\n # All meta tags are self closing, have no children or are closed\n # automatically.\n for part in content.split('<'):\n if not in_head and HEAD_START_RE.match(part):\n # Started the head node output it to have a document root\n in_head = True\n head.append('')\n elif in_head and HEAD_END_RE.match(part):\n # Found the end of the head close any remaining tag then stop\n # processing\n in_head = False\n if not last_closed:\n last_closed = True\n head.append('')\n head.append('')\n break\n\n elif in_head and META_START_RE.match(part):\n # Found a meta node copy it\n if not last_closed:\n head.append('')\n last_closed = True\n head.append('<')\n head.append(part)\n if '/>' not in part:\n last_closed = False\n\n elif in_head and META_END_RE.match(part):\n # End of a meta node just copy it to close the tag\n head.append('<')\n head.append(part)\n last_closed = True\n\n try:\n doc = etree.fromstring(''.join(head))\n except etree.ParseError:\n return None\n og_image = doc.find('meta[@property=\"og:image\"]')\n og_title = doc.find('meta[@property=\"og:title\"]')\n og_desc = doc.find('meta[@property=\"og:description\"]')\n title = None\n desc = None\n if og_image is not None:\n image = og_image.get('content')\n else:\n return None\n if og_title is not None:\n title = og_title.get('content')\n if og_desc is not None:\n desc = og_desc.get('content')\n return {'image': image, 'title': title, 'desc': desc}\n\ndef get_tweet_id(url: str) -> Optional[str]:\n parsed_url = urllib.parse.urlparse(url)\n if not (parsed_url.netloc == 'twitter.com' or parsed_url.netloc.endswith('.twitter.com')):\n return None\n to_match = parsed_url.path\n # In old-style twitter.com/#!/wdaher/status/1231241234-style URLs,\n # we need to look at the fragment instead\n if parsed_url.path == '/' and len(parsed_url.fragment) > 5:\n to_match = parsed_url.fragment\n\n tweet_id_match = re.match(r'^!?/.*?/status(es)?/(?P\\d{10,30})(/photo/[0-9])?/?$', to_match)\n if not tweet_id_match:\n return None\n return tweet_id_match.group(\"tweetid\")\n\nclass InlineHttpsProcessor(markdown.treeprocessors.Treeprocessor):\n def run(self, root: Element) -> None:\n # Get all URLs from the blob\n found_imgs = walk_tree(root, lambda e: e if e.tag == \"img\" else None)\n for img in found_imgs:\n url = img.get(\"src\")\n if not url.startswith(\"http://\"):\n # Don't rewrite images on our own site (e.g. emoji).\n continue\n img.set(\"src\", get_camo_url(url))\n\nclass BacktickPattern(markdown.inlinepatterns.Pattern):\n \"\"\" Return a `` element containing the matching text. \"\"\"\n def __init__(self, pattern: str) -> None:\n markdown.inlinepatterns.Pattern.__init__(self, pattern)\n self.ESCAPED_BSLASH = '%s%s%s' % (markdown.util.STX, ord('\\\\'), markdown.util.ETX)\n self.tag = 'code'\n\n def handleMatch(self, m: Match[str]) -> Union[str, Element]:\n if m.group(4):\n el = markdown.util.etree.Element(self.tag)\n # Modified to not strip whitespace\n el.text = markdown.util.AtomicString(m.group(4))\n return el\n else:\n return m.group(2).replace('\\\\\\\\', self.ESCAPED_BSLASH)\n\nclass InlineInterestingLinkProcessor(markdown.treeprocessors.Treeprocessor):\n TWITTER_MAX_IMAGE_HEIGHT = 400\n TWITTER_MAX_TO_PREVIEW = 3\n INLINE_PREVIEW_LIMIT_PER_MESSAGE = 5\n\n def __init__(self, md: markdown.Markdown, bugdown: 'Bugdown') -> None:\n # Passing in bugdown for access to config to check if realm is zulip.com\n self.bugdown = bugdown\n markdown.treeprocessors.Treeprocessor.__init__(self, md)\n\n def get_actual_image_url(self, url: str) -> str:\n # Add specific per-site cases to convert image-preview urls to image urls.\n # See https://github.com/zulip/zulip/issues/4658 for more information\n parsed_url = urllib.parse.urlparse(url)\n if (parsed_url.netloc == 'github.com' or parsed_url.netloc.endswith('.github.com')):\n # https://github.com/zulip/zulip/blob/master/static/images/logo/zulip-icon-128x128.png ->\n # https://raw.githubusercontent.com/zulip/zulip/master/static/images/logo/zulip-icon-128x128.png\n split_path = parsed_url.path.split('/')\n if len(split_path) > 3 and split_path[3] == \"blob\":\n return urllib.parse.urljoin('https://raw.githubusercontent.com',\n '/'.join(split_path[0:3] + split_path[4:]))\n\n return url\n\n def image_preview_enabled(self) -> bool:\n return image_preview_enabled_for_realm(\n self.markdown.zulip_message,\n self.markdown.zulip_realm,\n )\n\n def is_image(self, url: str) -> bool:\n if not self.image_preview_enabled():\n return False\n parsed_url = urllib.parse.urlparse(url)\n # List from http://support.google.com/chromeos/bin/answer.py?hl=en&answer=183093\n for ext in [\".bmp\", \".gif\", \".jpg\", \"jpeg\", \".png\", \".webp\"]:\n if parsed_url.path.lower().endswith(ext):\n return True\n return False\n\n def dropbox_image(self, url: str) -> Optional[Dict[str, Any]]:\n # TODO: The returned Dict could possibly be a TypedDict in future.\n parsed_url = urllib.parse.urlparse(url)\n if (parsed_url.netloc == 'dropbox.com' or parsed_url.netloc.endswith('.dropbox.com')):\n is_album = parsed_url.path.startswith('/sc/') or parsed_url.path.startswith('/photos/')\n # Only allow preview Dropbox shared links\n if not (parsed_url.path.startswith('/s/') or\n parsed_url.path.startswith('/sh/') or\n is_album):\n return None\n\n # Try to retrieve open graph protocol info for a preview\n # This might be redundant right now for shared links for images.\n # However, we might want to make use of title and description\n # in the future. If the actual image is too big, we might also\n # want to use the open graph image.\n image_info = fetch_open_graph_image(url)\n\n is_image = is_album or self.is_image(url)\n\n # If it is from an album or not an actual image file,\n # just use open graph image.\n if is_album or not is_image:\n # Failed to follow link to find an image preview so\n # use placeholder image and guess filename\n if image_info is None:\n return None\n\n image_info[\"is_image\"] = is_image\n return image_info\n\n # Otherwise, try to retrieve the actual image.\n # This is because open graph image from Dropbox may have padding\n # and gifs do not work.\n # TODO: What if image is huge? Should we get headers first?\n if image_info is None:\n image_info = dict()\n image_info['is_image'] = True\n parsed_url_list = list(parsed_url)\n parsed_url_list[4] = \"dl=1\" # Replaces query\n image_info[\"image\"] = urllib.parse.urlunparse(parsed_url_list)\n\n return image_info\n return None\n\n def youtube_id(self, url: str) -> Optional[str]:\n if not self.image_preview_enabled():\n return None\n # Youtube video id extraction regular expression from http://pastebin.com/KyKAFv1s\n # If it matches, match.group(2) is the video id.\n youtube_re = r'^((?:https?://)?(?:youtu\\.be/|(?:\\w+\\.)?youtube(?:-nocookie)?\\.com/)' + \\\n r'(?:(?:(?:v|embed)/)|(?:(?:watch(?:_popup)?(?:\\.php)?)?(?:\\?|#!?)(?:.+&)?v=)))' + \\\n r'?([0-9A-Za-z_-]+)(?(1).+)?$'\n match = re.match(youtube_re, url)\n if match is None:\n return None\n return match.group(2)\n\n def youtube_image(self, url: str) -> Optional[str]:\n yt_id = self.youtube_id(url)\n\n if yt_id is not None:\n return \"https://i.ytimg.com/vi/%s/default.jpg\" % (yt_id,)\n return None\n\n def vimeo_id(self, url: str) -> Optional[str]:\n if not self.image_preview_enabled():\n return None\n #(http|https)?:\\/\\/(www\\.)?vimeo.com\\/(?:channels\\/(?:\\w+\\/)?|groups\\/([^\\/]*)\\/videos\\/|)(\\d+)(?:|\\/\\?)\n # If it matches, match.group('id') is the video id.\n\n vimeo_re = r'^((http|https)?:\\/\\/(www\\.)?vimeo.com\\/' + \\\n r'(?:channels\\/(?:\\w+\\/)?|groups\\/' + \\\n r'([^\\/]*)\\/videos\\/|)(\\d+)(?:|\\/\\?))$'\n match = re.match(vimeo_re, url)\n if match is None:\n return None\n return match.group(5)\n\n def vimeo_title(self, extracted_data: Dict[str, Any]) -> Optional[str]:\n title = extracted_data.get(\"title\")\n if title is not None:\n return \"Vimeo - {}\".format(title)\n return None\n\n def twitter_text(self, text: str,\n urls: List[Dict[str, str]],\n user_mentions: List[Dict[str, Any]],\n media: List[Dict[str, Any]]) -> Element:\n \"\"\"\n Use data from the twitter API to turn links, mentions and media into A\n tags. Also convert unicode emojis to images.\n\n This works by using the urls, user_mentions and media data from\n the twitter API and searching for unicode emojis in the text using\n `unicode_emoji_regex`.\n\n The first step is finding the locations of the URLs, mentions, media and\n emoji in the text. For each match we build a dictionary with type, the start\n location, end location, the URL to link to, and the text(codepoint and title\n in case of emojis) to be used in the link(image in case of emojis).\n\n Next we sort the matches by start location. And for each we add the\n text from the end of the last link to the start of the current link to\n the output. The text needs to added to the text attribute of the first\n node (the P tag) or the tail the last link created.\n\n Finally we add any remaining text to the last node.\n \"\"\"\n\n to_process = [] # type: List[Dict[str, Any]]\n # Build dicts for URLs\n for url_data in urls:\n short_url = url_data[\"url\"]\n full_url = url_data[\"expanded_url\"]\n for match in re.finditer(re.escape(short_url), text, re.IGNORECASE):\n to_process.append({\n 'type': 'url',\n 'start': match.start(),\n 'end': match.end(),\n 'url': short_url,\n 'text': full_url,\n })\n # Build dicts for mentions\n for user_mention in user_mentions:\n screen_name = user_mention['screen_name']\n mention_string = '@' + screen_name\n for match in re.finditer(re.escape(mention_string), text, re.IGNORECASE):\n to_process.append({\n 'type': 'mention',\n 'start': match.start(),\n 'end': match.end(),\n 'url': 'https://twitter.com/' + urllib.parse.quote(screen_name),\n 'text': mention_string,\n })\n # Build dicts for media\n for media_item in media:\n short_url = media_item['url']\n expanded_url = media_item['expanded_url']\n for match in re.finditer(re.escape(short_url), text, re.IGNORECASE):\n to_process.append({\n 'type': 'media',\n 'start': match.start(),\n 'end': match.end(),\n 'url': short_url,\n 'text': expanded_url,\n })\n # Build dicts for emojis\n for match in re.finditer(unicode_emoji_regex, text, re.IGNORECASE):\n orig_syntax = match.group('syntax')\n codepoint = unicode_emoji_to_codepoint(orig_syntax)\n if codepoint in codepoint_to_name:\n display_string = ':' + codepoint_to_name[codepoint] + ':'\n to_process.append({\n 'type': 'emoji',\n 'start': match.start(),\n 'end': match.end(),\n 'codepoint': codepoint,\n 'title': display_string,\n })\n\n to_process.sort(key=lambda x: x['start'])\n p = current_node = markdown.util.etree.Element('p')\n\n def set_text(text: str) -> None:\n \"\"\"\n Helper to set the text or the tail of the current_node\n \"\"\"\n if current_node == p:\n current_node.text = text\n else:\n current_node.tail = text\n\n db_data = self.markdown.zulip_db_data\n current_index = 0\n for item in to_process:\n # The text we want to link starts in already linked text skip it\n if item['start'] < current_index:\n continue\n # Add text from the end of last link to the start of the current\n # link\n set_text(text[current_index:item['start']])\n current_index = item['end']\n if item['type'] != 'emoji':\n current_node = elem = url_to_a(db_data, item['url'], item['text'])\n else:\n current_node = elem = make_emoji(item['codepoint'], item['title'])\n p.append(elem)\n\n # Add any unused text\n set_text(text[current_index:])\n return p\n\n def twitter_link(self, url: str) -> Optional[Element]:\n tweet_id = get_tweet_id(url)\n\n if tweet_id is None:\n return None\n\n try:\n res = fetch_tweet_data(tweet_id)\n if res is None:\n return None\n user = res['user'] # type: Dict[str, Any]\n tweet = markdown.util.etree.Element(\"div\")\n tweet.set(\"class\", \"twitter-tweet\")\n img_a = markdown.util.etree.SubElement(tweet, 'a')\n img_a.set(\"href\", url)\n img_a.set(\"target\", \"_blank\")\n profile_img = markdown.util.etree.SubElement(img_a, 'img')\n profile_img.set('class', 'twitter-avatar')\n # For some reason, for, e.g. tweet 285072525413724161,\n # python-twitter does not give us a\n # profile_image_url_https, but instead puts that URL in\n # profile_image_url. So use _https if available, but fall\n # back gracefully.\n image_url = user.get('profile_image_url_https', user['profile_image_url'])\n profile_img.set('src', image_url)\n\n text = html.unescape(res['full_text'])\n urls = res.get('urls', [])\n user_mentions = res.get('user_mentions', [])\n media = res.get('media', []) # type: List[Dict[str, Any]]\n p = self.twitter_text(text, urls, user_mentions, media)\n tweet.append(p)\n\n span = markdown.util.etree.SubElement(tweet, 'span')\n span.text = \"- %s (@%s)\" % (user['name'], user['screen_name'])\n\n # Add image previews\n for media_item in media:\n # Only photos have a preview image\n if media_item['type'] != 'photo':\n continue\n\n # Find the image size that is smaller than\n # TWITTER_MAX_IMAGE_HEIGHT px tall or the smallest\n size_name_tuples = list(media_item['sizes'].items())\n size_name_tuples.sort(reverse=True,\n key=lambda x: x[1]['h'])\n for size_name, size in size_name_tuples:\n if size['h'] < self.TWITTER_MAX_IMAGE_HEIGHT:\n break\n\n media_url = '%s:%s' % (media_item['media_url_https'], size_name)\n img_div = markdown.util.etree.SubElement(tweet, 'div')\n img_div.set('class', 'twitter-image')\n img_a = markdown.util.etree.SubElement(img_div, 'a')\n img_a.set('href', media_item['url'])\n img_a.set('target', '_blank')\n img_a.set('title', media_item['url'])\n img = markdown.util.etree.SubElement(img_a, 'img')\n img.set('src', media_url)\n\n return tweet\n except Exception:\n # We put this in its own try-except because it requires external\n # connectivity. If Twitter flakes out, we don't want to not-render\n # the entire message; we just want to not show the Twitter preview.\n bugdown_logger.warning(traceback.format_exc())\n return None\n\n def get_url_data(self, e: Element) -> Optional[Tuple[str, str]]:\n if e.tag == \"a\":\n if e.text is not None:\n return (e.get(\"href\"), e.text)\n return (e.get(\"href\"), e.get(\"href\"))\n return None\n\n def handle_image_inlining(self, root: Element, found_url: ResultWithFamily) -> None:\n grandparent = found_url.family.grandparent\n parent = found_url.family.parent\n ahref_element = found_url.family.child\n (url, text) = found_url.result\n actual_url = self.get_actual_image_url(url)\n\n # url != text usually implies a named link, which we opt not to remove\n url_eq_text = (url == text)\n\n if parent.tag == 'li':\n add_a(parent, self.get_actual_image_url(url), url, title=text)\n if not parent.text and not ahref_element.tail and url_eq_text:\n parent.remove(ahref_element)\n\n elif parent.tag == 'p':\n parent_index = None\n for index, uncle in enumerate(grandparent.getchildren()):\n if uncle is parent:\n parent_index = index\n break\n\n if parent_index is not None:\n ins_index = self.find_proper_insertion_index(grandparent, parent, parent_index)\n add_a(grandparent, actual_url, url, title=text, insertion_index=ins_index)\n\n else:\n # We're not inserting after parent, since parent not found.\n # Append to end of list of grandparent's children as normal\n add_a(grandparent, actual_url, url, title=text)\n\n # If link is alone in a paragraph, delete paragraph containing it\n if (len(parent.getchildren()) == 1 and\n (not parent.text or parent.text == \"\\n\") and\n not ahref_element.tail and\n url_eq_text):\n grandparent.remove(parent)\n\n else:\n # If none of the above criteria match, fall back to old behavior\n add_a(root, actual_url, url, title=text)\n\n def find_proper_insertion_index(self, grandparent: Element, parent: Element,\n parent_index_in_grandparent: int) -> int:\n # If there are several inline images from same paragraph, ensure that\n # they are in correct (and not opposite) order by inserting after last\n # inline image from paragraph 'parent'\n\n uncles = grandparent.getchildren()\n parent_links = [ele.attrib['href'] for ele in parent.iter(tag=\"a\")]\n insertion_index = parent_index_in_grandparent\n\n while True:\n insertion_index += 1\n if insertion_index >= len(uncles):\n return insertion_index\n\n uncle = uncles[insertion_index]\n inline_image_classes = ['message_inline_image', 'message_inline_ref']\n if (\n uncle.tag != 'div' or\n 'class' not in uncle.keys() or\n uncle.attrib['class'] not in inline_image_classes\n ):\n return insertion_index\n\n uncle_link = list(uncle.iter(tag=\"a\"))[0].attrib['href']\n if uncle_link not in parent_links:\n return insertion_index\n\n def is_absolute_url(self, url: str) -> bool:\n return bool(urllib.parse.urlparse(url).netloc)\n\n def run(self, root: Element) -> None:\n # Get all URLs from the blob\n found_urls = walk_tree_with_family(root, self.get_url_data)\n if len(found_urls) == 0 or len(found_urls) > self.INLINE_PREVIEW_LIMIT_PER_MESSAGE:\n return\n\n rendered_tweet_count = 0\n\n for found_url in found_urls:\n (url, text) = found_url.result\n if not self.is_absolute_url(url):\n if self.is_image(url):\n self.handle_image_inlining(root, found_url)\n # We don't have a strong use case for doing url preview for relative links.\n continue\n\n dropbox_image = self.dropbox_image(url)\n if dropbox_image is not None:\n class_attr = \"message_inline_ref\"\n is_image = dropbox_image[\"is_image\"]\n if is_image:\n class_attr = \"message_inline_image\"\n # Not making use of title and description of images\n add_a(root, dropbox_image['image'], url,\n title=dropbox_image.get('title', \"\"),\n desc=dropbox_image.get('desc', \"\"),\n class_attr=class_attr,\n already_thumbnailed=True)\n continue\n if self.is_image(url):\n self.handle_image_inlining(root, found_url)\n continue\n if get_tweet_id(url) is not None:\n if rendered_tweet_count >= self.TWITTER_MAX_TO_PREVIEW:\n # Only render at most one tweet per message\n continue\n twitter_data = self.twitter_link(url)\n if twitter_data is None:\n # This link is not actually a tweet known to twitter\n continue\n rendered_tweet_count += 1\n div = markdown.util.etree.SubElement(root, \"div\")\n div.set(\"class\", \"inline-preview-twitter\")\n div.insert(0, twitter_data)\n continue\n youtube = self.youtube_image(url)\n if youtube is not None:\n yt_id = self.youtube_id(url)\n add_a(root, youtube, url, None, None,\n \"youtube-video message_inline_image\",\n yt_id, already_thumbnailed=True)\n continue\n\n db_data = self.markdown.zulip_db_data\n if db_data and db_data['sent_by_bot']:\n continue\n\n if not url_embed_preview_enabled_for_realm(self.markdown.zulip_message,\n self.markdown.zulip_realm):\n continue\n\n try:\n extracted_data = link_preview.link_embed_data_from_cache(url)\n except NotFoundInCache:\n self.markdown.zulip_message.links_for_preview.add(url)\n continue\n if extracted_data:\n vm_id = self.vimeo_id(url)\n if vm_id is not None:\n vimeo_image = extracted_data.get('image')\n vimeo_title = self.vimeo_title(extracted_data)\n if vimeo_image is not None:\n add_a(root, vimeo_image, url, vimeo_title,\n None, \"vimeo-video message_inline_image\", vm_id,\n already_thumbnailed=True)\n if vimeo_title is not None:\n found_url.family.child.text = vimeo_title\n else:\n add_embed(root, url, extracted_data)\n\nclass Avatar(markdown.inlinepatterns.Pattern):\n def handleMatch(self, match: Match[str]) -> Optional[Element]:\n img = markdown.util.etree.Element('img')\n email_address = match.group('email')\n email = email_address.strip().lower()\n profile_id = None\n\n db_data = self.markdown.zulip_db_data\n if db_data is not None:\n user_dict = db_data['email_info'].get(email)\n if user_dict is not None:\n profile_id = user_dict['id']\n\n img.set('class', 'message_body_gravatar')\n img.set('src', '/avatar/{0}?s=30'.format(profile_id or email))\n img.set('title', email)\n img.set('alt', email)\n return img\n\ndef possible_avatar_emails(content: str) -> Set[str]:\n emails = set()\n for regex in [AVATAR_REGEX, GRAVATAR_REGEX]:\n matches = re.findall(regex, content)\n for email in matches:\n if email:\n emails.add(email)\n\n return emails\n\npath_to_name_to_codepoint = os.path.join(settings.STATIC_ROOT,\n \"generated\", \"emoji\", \"name_to_codepoint.json\")\nwith open(path_to_name_to_codepoint) as name_to_codepoint_file:\n name_to_codepoint = ujson.load(name_to_codepoint_file)\n\npath_to_codepoint_to_name = os.path.join(settings.STATIC_ROOT,\n \"generated\", \"emoji\", \"codepoint_to_name.json\")\nwith open(path_to_codepoint_to_name) as codepoint_to_name_file:\n codepoint_to_name = ujson.load(codepoint_to_name_file)\n\n# All of our emojis(non ZWJ sequences) belong to one of these unicode blocks:\n# \\U0001f100-\\U0001f1ff - Enclosed Alphanumeric Supplement\n# \\U0001f200-\\U0001f2ff - Enclosed Ideographic Supplement\n# \\U0001f300-\\U0001f5ff - Miscellaneous Symbols and Pictographs\n# \\U0001f600-\\U0001f64f - Emoticons (Emoji)\n# \\U0001f680-\\U0001f6ff - Transport and Map Symbols\n# \\U0001f900-\\U0001f9ff - Supplemental Symbols and Pictographs\n# \\u2000-\\u206f - General Punctuation\n# \\u2300-\\u23ff - Miscellaneous Technical\n# \\u2400-\\u243f - Control Pictures\n# \\u2440-\\u245f - Optical Character Recognition\n# \\u2460-\\u24ff - Enclosed Alphanumerics\n# \\u2500-\\u257f - Box Drawing\n# \\u2580-\\u259f - Block Elements\n# \\u25a0-\\u25ff - Geometric Shapes\n# \\u2600-\\u26ff - Miscellaneous Symbols\n# \\u2700-\\u27bf - Dingbats\n# \\u2900-\\u297f - Supplemental Arrows-B\n# \\u2b00-\\u2bff - Miscellaneous Symbols and Arrows\n# \\u3000-\\u303f - CJK Symbols and Punctuation\n# \\u3200-\\u32ff - Enclosed CJK Letters and Months\nunicode_emoji_regex = '(?P['\\\n '\\U0001F100-\\U0001F64F' \\\n '\\U0001F680-\\U0001F6FF' \\\n '\\U0001F900-\\U0001F9FF' \\\n '\\u2000-\\u206F' \\\n '\\u2300-\\u27BF' \\\n '\\u2900-\\u297F' \\\n '\\u2B00-\\u2BFF' \\\n '\\u3000-\\u303F' \\\n '\\u3200-\\u32FF' \\\n '])'\n# The equivalent JS regex is \\ud83c[\\udd00-\\udfff]|\\ud83d[\\udc00-\\ude4f]|\\ud83d[\\ude80-\\udeff]|\n# \\ud83e[\\udd00-\\uddff]|[\\u2000-\\u206f]|[\\u2300-\\u27bf]|[\\u2b00-\\u2bff]|[\\u3000-\\u303f]|\n# [\\u3200-\\u32ff]. See below comments for explanation. The JS regex is used by marked.js for\n# frontend unicode emoji processing.\n# The JS regex \\ud83c[\\udd00-\\udfff]|\\ud83d[\\udc00-\\ude4f] represents U0001f100-\\U0001f64f\n# The JS regex \\ud83d[\\ude80-\\udeff] represents \\U0001f680-\\U0001f6ff\n# The JS regex \\ud83e[\\udd00-\\uddff] represents \\U0001f900-\\U0001f9ff\n# The JS regex [\\u2000-\\u206f] represents \\u2000-\\u206f\n# The JS regex [\\u2300-\\u27bf] represents \\u2300-\\u27bf\n# Similarly other JS regexes can be mapped to the respective unicode blocks.\n# For more information, please refer to the following article:\n# http://crocodillon.com/blog/parsing-emoji-unicode-in-javascript\n\ndef make_emoji(codepoint: str, display_string: str) -> Element:\n # Replace underscore in emoji's title with space\n title = display_string[1:-1].replace(\"_\", \" \")\n span = markdown.util.etree.Element('span')\n span.set('class', 'emoji emoji-%s' % (codepoint,))\n span.set('title', title)\n span.text = display_string\n return span\n\ndef make_realm_emoji(src: str, display_string: str) -> Element:\n elt = markdown.util.etree.Element('img')\n elt.set('src', src)\n elt.set('class', 'emoji')\n elt.set(\"alt\", display_string)\n elt.set(\"title\", display_string[1:-1].replace(\"_\", \" \"))\n return elt\n\ndef unicode_emoji_to_codepoint(unicode_emoji: str) -> str:\n codepoint = hex(ord(unicode_emoji))[2:]\n # Unicode codepoints are minimum of length 4, padded\n # with zeroes if the length is less than zero.\n while len(codepoint) < 4:\n codepoint = '0' + codepoint\n return codepoint\n\nclass EmoticonTranslation(markdown.inlinepatterns.Pattern):\n \"\"\" Translates emoticons like `:)` into emoji like `:smile:`. \"\"\"\n def handleMatch(self, match: Match[str]) -> Optional[Element]:\n db_data = self.markdown.zulip_db_data\n if db_data is None or not db_data['translate_emoticons']:\n return None\n\n emoticon = match.group('emoticon')\n translated = translate_emoticons(emoticon)\n name = translated[1:-1]\n return make_emoji(name_to_codepoint[name], translated)\n\nclass UnicodeEmoji(markdown.inlinepatterns.Pattern):\n def handleMatch(self, match: Match[str]) -> Optional[Element]:\n orig_syntax = match.group('syntax')\n codepoint = unicode_emoji_to_codepoint(orig_syntax)\n if codepoint in codepoint_to_name:\n display_string = ':' + codepoint_to_name[codepoint] + ':'\n return make_emoji(codepoint, display_string)\n else:\n return None\n\nclass Emoji(markdown.inlinepatterns.Pattern):\n def handleMatch(self, match: Match[str]) -> Optional[Element]:\n orig_syntax = match.group(\"syntax\")\n name = orig_syntax[1:-1]\n\n active_realm_emoji = {} # type: Dict[str, Dict[str, str]]\n db_data = self.markdown.zulip_db_data\n if db_data is not None:\n active_realm_emoji = db_data['active_realm_emoji']\n\n if self.markdown.zulip_message and name in active_realm_emoji:\n return make_realm_emoji(active_realm_emoji[name]['source_url'], orig_syntax)\n elif name == 'zulip':\n return make_realm_emoji('/static/generated/emoji/images/emoji/unicode/zulip.png', orig_syntax)\n elif name in name_to_codepoint:\n return make_emoji(name_to_codepoint[name], orig_syntax)\n else:\n return None\n\ndef content_has_emoji_syntax(content: str) -> bool:\n return re.search(EMOJI_REGEX, content) is not None\n\nclass ModalLink(markdown.inlinepatterns.Pattern):\n \"\"\"\n A pattern that allows including in-app modal links in messages.\n \"\"\"\n\n def handleMatch(self, match: Match[str]) -> Element:\n relative_url = match.group('relative_url')\n text = match.group('text')\n\n a_tag = markdown.util.etree.Element(\"a\")\n a_tag.set(\"href\", relative_url)\n a_tag.set(\"title\", relative_url)\n a_tag.text = text\n\n return a_tag\n\nclass Tex(markdown.inlinepatterns.Pattern):\n def handleMatch(self, match: Match[str]) -> Element:\n rendered = render_tex(match.group('body'), is_inline=True)\n if rendered is not None:\n return etree.fromstring(rendered.encode('utf-8'))\n else: # Something went wrong while rendering\n span = markdown.util.etree.Element('span')\n span.set('class', 'tex-error')\n span.text = '$$' + match.group('body') + '$$'\n return span\n\nupload_title_re = re.compile(\"^(https?://[^/]*)?(/user_uploads/\\\\d+)(/[^/]*)?/[^/]*/(?P[^/]*)$\")\ndef url_filename(url: str) -> str:\n \"\"\"Extract the filename if a URL is an uploaded file, or return the original URL\"\"\"\n match = upload_title_re.match(url)\n if match:\n return match.group('filename')\n else:\n return url\n\ndef fixup_link(link: markdown.util.etree.Element, target_blank: bool=True) -> None:\n \"\"\"Set certain attributes we want on every link.\"\"\"\n if target_blank:\n link.set('target', '_blank')\n link.set('title', url_filename(link.get('href')))\n\n\ndef sanitize_url(url: str) -> Optional[str]:\n \"\"\"\n Sanitize a url against xss attacks.\n See the docstring on markdown.inlinepatterns.LinkPattern.sanitize_url.\n \"\"\"\n try:\n parts = urllib.parse.urlparse(url.replace(' ', '%20'))\n scheme, netloc, path, params, query, fragment = parts\n except ValueError:\n # Bad url - so bad it couldn't be parsed.\n return ''\n\n # If there is no scheme or netloc and there is a '@' in the path,\n # treat it as a mailto: and set the appropriate scheme\n if scheme == '' and netloc == '' and '@' in path:\n scheme = 'mailto'\n elif scheme == '' and netloc == '' and len(path) > 0 and path[0] == '/':\n # Allow domain-relative links\n return urllib.parse.urlunparse(('', '', path, params, query, fragment))\n elif (scheme, netloc, path, params, query) == ('', '', '', '', '') and len(fragment) > 0:\n # Allow fragment links\n return urllib.parse.urlunparse(('', '', '', '', '', fragment))\n\n # Zulip modification: If scheme is not specified, assume http://\n # We re-enter sanitize_url because netloc etc. need to be re-parsed.\n if not scheme:\n return sanitize_url('http://' + url)\n\n locless_schemes = ['mailto', 'news', 'file', 'bitcoin']\n if netloc == '' and scheme not in locless_schemes:\n # This fails regardless of anything else.\n # Return immediately to save additional processing\n return None\n\n # Upstream code will accept a URL like javascript://foo because it\n # appears to have a netloc. Additionally there are plenty of other\n # schemes that do weird things like launch external programs. To be\n # on the safe side, we whitelist the scheme.\n if scheme not in ('http', 'https', 'ftp', 'mailto', 'file', 'bitcoin'):\n return None\n\n # Upstream code scans path, parameters, and query for colon characters\n # because\n #\n # some aliases [for javascript:] will appear to urllib.parse to have\n # no scheme. On top of that relative links (i.e.: \"foo/bar.html\")\n # have no scheme.\n #\n # We already converted an empty scheme to http:// above, so we skip\n # the colon check, which would also forbid a lot of legitimate URLs.\n\n # Url passes all tests. Return url as-is.\n return urllib.parse.urlunparse((scheme, netloc, path, params, query, fragment))\n\ndef url_to_a(db_data: Optional[DbData], url: str, text: Optional[str]=None) -> Union[Element, str]:\n a = markdown.util.etree.Element('a')\n\n href = sanitize_url(url)\n target_blank = True\n if href is None:\n # Rejected by sanitize_url; render it as plain text.\n return url\n if text is None:\n text = markdown.util.AtomicString(url)\n\n href = rewrite_local_links_to_relative(db_data, href)\n target_blank = not href.startswith(\"#narrow\") and not href.startswith('mailto:')\n\n a.set('href', href)\n a.text = text\n fixup_link(a, target_blank)\n return a\n\nclass VerbosePattern(markdown.inlinepatterns.Pattern):\n def __init__(self, compiled_re: Pattern, md: markdown.Markdown) -> None:\n markdown.inlinepatterns.Pattern.__init__(self, ' ', md)\n\n # HACK: we just had python-markdown compile an empty regex.\n # Now replace with the real regex compiled with the flags we want.\n\n self.compiled_re = compiled_re\n\nclass AutoLink(VerbosePattern):\n def handleMatch(self, match: Match[str]) -> ElementStringNone:\n url = match.group('url')\n db_data = self.markdown.zulip_db_data\n return url_to_a(db_data, url)\n\nclass UListProcessor(markdown.blockprocessors.UListProcessor):\n \"\"\" Process unordered list blocks.\n\n Based on markdown.blockprocessors.UListProcessor, but does not accept\n '+' or '-' as a bullet character.\"\"\"\n\n TAG = 'ul'\n RE = re.compile('^[ ]{0,3}[*][ ]+(.*)')\n\n def __init__(self, parser: Any) -> None:\n\n # HACK: Set the tab length to 2 just for the initialization of\n # this class, so that bulleted lists (and only bulleted lists)\n # work off 2-space indentation.\n parser.markdown.tab_length = 2\n super().__init__(parser)\n parser.markdown.tab_length = 4\n\nclass ListIndentProcessor(markdown.blockprocessors.ListIndentProcessor):\n \"\"\" Process unordered list blocks.\n\n Based on markdown.blockprocessors.ListIndentProcessor, but with 2-space indent\n \"\"\"\n\n def __init__(self, parser: Any) -> None:\n\n # HACK: Set the tab length to 2 just for the initialization of\n # this class, so that bulleted lists (and only bulleted lists)\n # work off 2-space indentation.\n parser.markdown.tab_length = 2\n super().__init__(parser)\n parser.markdown.tab_length = 4\n\nclass BugdownUListPreprocessor(markdown.preprocessors.Preprocessor):\n \"\"\" Allows unordered list blocks that come directly after a\n paragraph to be rendered as an unordered list\n\n Detects paragraphs that have a matching list item that comes\n directly after a line of text, and inserts a newline between\n to satisfy Markdown\"\"\"\n\n LI_RE = re.compile('^[ ]{0,3}[*][ ]+(.*)', re.MULTILINE)\n HANGING_ULIST_RE = re.compile('^.+\\\\n([ ]{0,3}[*][ ]+.*)', re.MULTILINE)\n\n def run(self, lines: List[str]) -> List[str]:\n \"\"\" Insert a newline between a paragraph and ulist if missing \"\"\"\n inserts = 0\n fence = None\n copy = lines[:]\n for i in range(len(lines) - 1):\n # Ignore anything that is inside a fenced code block\n m = FENCE_RE.match(lines[i])\n if not fence and m:\n fence = m.group('fence')\n elif fence and m and fence == m.group('fence'):\n fence = None\n\n # If we're not in a fenced block and we detect an upcoming list\n # hanging off a paragraph, add a newline\n if (not fence and lines[i] and\n self.LI_RE.match(lines[i+1]) and\n not self.LI_RE.match(lines[i])):\n\n copy.insert(i+inserts+1, '')\n inserts += 1\n return copy\n\nclass AutoNumberOListPreprocessor(markdown.preprocessors.Preprocessor):\n \"\"\" Finds a sequence of lines numbered by the same number\"\"\"\n RE = re.compile(r'^([ ]*)(\\d+)\\.[ ]+(.*)')\n TAB_LENGTH = 2\n\n def run(self, lines: List[str]) -> List[str]:\n new_lines = [] # type: List[str]\n current_list = [] # type: List[Match[str]]\n current_indent = 0\n\n for line in lines:\n m = self.RE.match(line)\n\n # Remember if this line is a continuation of already started list\n is_next_item = (m and current_list\n and current_indent == len(m.group(1)) // self.TAB_LENGTH)\n\n if not is_next_item:\n # There is no more items in the list we were processing\n new_lines.extend(self.renumber(current_list))\n current_list = []\n\n if not m:\n # Ordinary line\n new_lines.append(line)\n elif is_next_item:\n # Another list item\n current_list.append(m)\n else:\n # First list item\n current_list = [m]\n current_indent = len(m.group(1)) // self.TAB_LENGTH\n\n new_lines.extend(self.renumber(current_list))\n\n return new_lines\n\n def renumber(self, mlist: List[Match[str]]) -> List[str]:\n if not mlist:\n return []\n\n start_number = int(mlist[0].group(2))\n\n # Change numbers only if every one is the same\n change_numbers = True\n for m in mlist:\n if int(m.group(2)) != start_number:\n change_numbers = False\n break\n\n lines = [] # type: List[str]\n counter = start_number\n\n for m in mlist:\n number = str(counter) if change_numbers else m.group(2)\n lines.append('%s%s. %s' % (m.group(1), number, m.group(3)))\n counter += 1\n\n return lines\n\n# Based on markdown.inlinepatterns.LinkPattern\nclass LinkPattern(markdown.inlinepatterns.Pattern):\n \"\"\" Return a link element from the given match. \"\"\"\n\n def handleMatch(self, m: Match[str]) -> Optional[Element]:\n href = m.group(9)\n if not href:\n return None\n\n if href[0] == \"<\":\n href = href[1:-1]\n href = sanitize_url(self.unescape(href.strip()))\n if href is None:\n return None\n\n db_data = self.markdown.zulip_db_data\n href = rewrite_local_links_to_relative(db_data, href)\n\n el = markdown.util.etree.Element('a')\n el.text = m.group(2)\n el.set('href', href)\n fixup_link(el, target_blank=(href[:1] != '#'))\n return el\n\ndef prepare_realm_pattern(source: str) -> str:\n \"\"\" Augment a realm filter so it only matches after start-of-string,\n whitespace, or opening delimiters, won't match if there are word\n characters directly after, and saves what was matched as \"name\". \"\"\"\n return r\"\"\"(?\"\"\" + source + r')(?!\\w)'\n\n# Given a regular expression pattern, linkifies groups that match it\n# using the provided format string to construct the URL.\nclass RealmFilterPattern(markdown.inlinepatterns.Pattern):\n \"\"\" Applied a given realm filter to the input \"\"\"\n\n def __init__(self, source_pattern: str,\n format_string: str,\n markdown_instance: Optional[markdown.Markdown]=None) -> None:\n self.pattern = prepare_realm_pattern(source_pattern)\n self.format_string = format_string\n markdown.inlinepatterns.Pattern.__init__(self, self.pattern, markdown_instance)\n\n def handleMatch(self, m: Match[str]) -> Union[Element, str]:\n db_data = self.markdown.zulip_db_data\n return url_to_a(db_data,\n self.format_string % m.groupdict(),\n m.group(\"name\"))\n\nclass UserMentionPattern(markdown.inlinepatterns.Pattern):\n def handleMatch(self, m: Match[str]) -> Optional[Element]:\n match = m.group(2)\n\n db_data = self.markdown.zulip_db_data\n if self.markdown.zulip_message and db_data is not None:\n if match.startswith(\"**\") and match.endswith(\"**\"):\n name = match[2:-2]\n else:\n return None\n\n wildcard = mention.user_mention_matches_wildcard(name)\n\n id_syntax_match = re.match(r'.+\\|(?P\\d+)$', name)\n if id_syntax_match:\n id = id_syntax_match.group(\"user_id\")\n user = db_data['mention_data'].get_user_by_id(id)\n else:\n user = db_data['mention_data'].get_user(name)\n\n if wildcard:\n self.markdown.zulip_message.mentions_wildcard = True\n user_id = \"*\"\n elif user:\n self.markdown.zulip_message.mentions_user_ids.add(user['id'])\n name = user['full_name']\n user_id = str(user['id'])\n else:\n # Don't highlight @mentions that don't refer to a valid user\n return None\n\n el = markdown.util.etree.Element(\"span\")\n el.set('class', 'user-mention')\n el.set('data-user-id', user_id)\n el.text = \"@%s\" % (name,)\n return el\n return None\n\nclass UserGroupMentionPattern(markdown.inlinepatterns.Pattern):\n def handleMatch(self, m: Match[str]) -> Optional[Element]:\n match = m.group(2)\n\n db_data = self.markdown.zulip_db_data\n if self.markdown.zulip_message and db_data is not None:\n name = extract_user_group(match)\n user_group = db_data['mention_data'].get_user_group(name)\n if user_group:\n self.markdown.zulip_message.mentions_user_group_ids.add(user_group.id)\n name = user_group.name\n user_group_id = str(user_group.id)\n else:\n # Don't highlight @-mentions that don't refer to a valid user\n # group.\n return None\n\n el = markdown.util.etree.Element(\"span\")\n el.set('class', 'user-group-mention')\n el.set('data-user-group-id', user_group_id)\n el.text = \"@%s\" % (name,)\n return el\n return None\n\nclass StreamPattern(VerbosePattern):\n def find_stream_by_name(self, name: Match[str]) -> Optional[Dict[str, Any]]:\n db_data = self.markdown.zulip_db_data\n if db_data is None:\n return None\n stream = db_data['stream_names'].get(name)\n return stream\n\n def handleMatch(self, m: Match[str]) -> Optional[Element]:\n name = m.group('stream_name')\n\n if self.markdown.zulip_message:\n stream = self.find_stream_by_name(name)\n if stream is None:\n return None\n el = markdown.util.etree.Element('a')\n el.set('class', 'stream')\n el.set('data-stream-id', str(stream['id']))\n # TODO: We should quite possibly not be specifying the\n # href here and instead having the browser auto-add the\n # href when it processes a message with one of these, to\n # provide more clarity to API clients.\n stream_url = encode_stream(stream['id'], name)\n el.set('href', '/#narrow/stream/{stream_url}'.format(stream_url=stream_url))\n el.text = '#{stream_name}'.format(stream_name=name)\n return el\n return None\n\ndef possible_linked_stream_names(content: str) -> Set[str]:\n matches = re.findall(STREAM_LINK_REGEX, content, re.VERBOSE)\n return set(matches)\n\nclass AlertWordsNotificationProcessor(markdown.preprocessors.Preprocessor):\n def run(self, lines: Iterable[str]) -> Iterable[str]:\n db_data = self.markdown.zulip_db_data\n if self.markdown.zulip_message and db_data is not None:\n # We check for alert words here, the set of which are\n # dependent on which users may see this message.\n #\n # Our caller passes in the list of possible_words. We\n # don't do any special rendering; we just append the alert words\n # we find to the set self.markdown.zulip_message.alert_words.\n\n realm_words = db_data['possible_words']\n\n content = '\\n'.join(lines).lower()\n\n allowed_before_punctuation = \"|\".join([r'\\s', '^', r'[\\(\\\".,\\';\\[\\*`>]'])\n allowed_after_punctuation = \"|\".join([r'\\s', '$', r'[\\)\\\"\\?:.,\\';\\]!\\*`]'])\n\n for word in realm_words:\n escaped = re.escape(word.lower())\n match_re = re.compile('(?:%s)%s(?:%s)' %\n (allowed_before_punctuation,\n escaped,\n allowed_after_punctuation))\n if re.search(match_re, content):\n self.markdown.zulip_message.alert_words.add(word)\n\n return lines\n\n# This prevents realm_filters from running on the content of a\n# Markdown link, breaking up the link. This is a monkey-patch, but it\n# might be worth sending a version of this change upstream.\nclass AtomicLinkPattern(LinkPattern):\n def handleMatch(self, m: Match[str]) -> Optional[Element]:\n ret = LinkPattern.handleMatch(self, m)\n if ret is None:\n return None\n if not isinstance(ret, str):\n ret.text = markdown.util.AtomicString(ret.text)\n return ret\n\n# These are used as keys (\"realm_filters_keys\") to md_engines and the respective\n# realm filter caches\nDEFAULT_BUGDOWN_KEY = -1\nZEPHYR_MIRROR_BUGDOWN_KEY = -2\n\nclass Bugdown(markdown.Extension):\n def __init__(self, *args: Any, **kwargs: Union[bool, int, List[Any]]) -> None:\n # define default configs\n self.config = {\n \"realm_filters\": [kwargs['realm_filters'],\n \"Realm-specific filters for realm_filters_key %s\" % (kwargs['realm'],)],\n \"realm\": [kwargs['realm'], \"Realm id\"],\n \"code_block_processor_disabled\": [kwargs['code_block_processor_disabled'],\n \"Disabled for email gateway\"]\n }\n\n super().__init__(*args, **kwargs)\n\n def extendMarkdown(self, md: markdown.Markdown, md_globals: Dict[str, Any]) -> None:\n del md.preprocessors['reference']\n\n if self.getConfig('code_block_processor_disabled'):\n del md.parser.blockprocessors['code']\n\n for k in ('image_link', 'image_reference', 'automail',\n 'autolink', 'link', 'reference', 'short_reference',\n 'escape', 'strong_em', 'emphasis', 'emphasis2',\n 'linebreak', 'strong', 'backtick'):\n del md.inlinePatterns[k]\n try:\n # linebreak2 was removed upstream in version 3.2.1, so\n # don't throw an error if it is not there\n del md.inlinePatterns['linebreak2']\n except Exception:\n pass\n\n # Having the extension operations split into a bunch of\n # smaller functions both helps with organization and\n # simplifies profiling of the markdown engine build time.\n self.extend_alert_words(md)\n self.extend_text_formatting(md)\n self.extend_block_formatting(md)\n self.extend_avatars(md)\n self.extend_modal_links(md)\n self.extend_mentions(md)\n self.extend_stream_links(md)\n self.extend_emojis(md)\n self.extend_misc(md)\n\n def extend_alert_words(self, md: markdown.Markdown) -> None:\n md.preprocessors.add(\"custom_text_notifications\", AlertWordsNotificationProcessor(md), \"_end\")\n\n def extend_text_formatting(self, md: markdown.Markdown) -> None:\n # Inline code block without whitespace stripping\n md.inlinePatterns.add(\n \"backtick\",\n BacktickPattern(r'(?:(?backtick')\n\n # Custom bold syntax: **foo** but not __foo__\n md.inlinePatterns.add('strong',\n markdown.inlinepatterns.SimpleTagPattern(r'(\\*\\*)([^\\n]+?)\\2', 'strong'),\n '>not_strong')\n\n # Custom strikethrough syntax: ~~foo~~\n md.inlinePatterns.add('del',\n markdown.inlinepatterns.SimpleTagPattern(\n r'(?strong')\n\n # str inside ** must start and end with a word character\n # it need for things like \"const char *x = (char *)y\"\n md.inlinePatterns.add(\n 'emphasis',\n markdown.inlinepatterns.SimpleTagPattern(r'(\\*)(?!\\s+)([^\\*^\\n]+)(?strong')\n\n def extend_block_formatting(self, md: markdown.Markdown) -> None:\n for k in ('hashheader', 'setextheader', 'olist', 'ulist', 'indent'):\n del md.parser.blockprocessors[k]\n\n md.parser.blockprocessors.add('ulist', UListProcessor(md.parser), '>hr')\n md.parser.blockprocessors.add('indent', ListIndentProcessor(md.parser), '[ ]?(.*)')\n md.parser.blockprocessors['quote'].RE = re.compile(\n r'(^|\\n)(?!(?:[ ]{0,3}>\\s*(?:$|\\n))*(?:$|\\n))'\n r'[ ]{0,3}>[ ]?(.*)')\n\n def extend_avatars(self, md: markdown.Markdown) -> None:\n # Note that !gravatar syntax should be deprecated long term.\n md.inlinePatterns.add('avatar', Avatar(AVATAR_REGEX, md), '>backtick')\n md.inlinePatterns.add('gravatar', Avatar(GRAVATAR_REGEX, md), '>backtick')\n\n def extend_modal_links(self, md: markdown.Markdown) -> None:\n md.inlinePatterns.add(\n 'modal_link',\n ModalLink(r'!modal_link\\((?P[^)]*), (?P[^)]*)\\)'),\n '>avatar')\n\n def extend_mentions(self, md: markdown.Markdown) -> None:\n md.inlinePatterns.add('usermention', UserMentionPattern(mention.find_mentions, md), '>backtick')\n md.inlinePatterns.add('usergroupmention',\n UserGroupMentionPattern(mention.user_group_mentions, md),\n '>backtick')\n\n def extend_stream_links(self, md: markdown.Markdown) -> None:\n md.inlinePatterns.add('stream', StreamPattern(verbose_compile(STREAM_LINK_REGEX), md), '>backtick')\n\n def extend_emojis(self, md: markdown.Markdown) -> None:\n md.inlinePatterns.add(\n 'tex',\n Tex(r'\\B(?[^\\n_$](\\\\\\$|[^$\\n])*)\\$\\$(?!\\$)\\B'),\n '>backtick')\n md.inlinePatterns.add('emoji', Emoji(EMOJI_REGEX, md), 'emoji')\n md.inlinePatterns.add('unicodeemoji', UnicodeEmoji(unicode_emoji_regex), '_end')\n\n def extend_misc(self, md: markdown.Markdown) -> None:\n md.inlinePatterns.add('link', AtomicLinkPattern(markdown.inlinepatterns.LINK_RE, md), '>avatar')\n\n for (pattern, format_string, id) in self.getConfig(\"realm_filters\"):\n md.inlinePatterns.add('realm_filters/%s' % (pattern,),\n RealmFilterPattern(pattern, format_string, md), '>link')\n\n md.inlinePatterns.add('autolink', AutoLink(get_web_link_regex(), md), '>link')\n\n md.preprocessors.add('hanging_ulists',\n BugdownUListPreprocessor(md),\n \"_begin\")\n\n md.preprocessors.add('auto_number_olist',\n AutoNumberOListPreprocessor(md),\n \"_begin\")\n\n md.treeprocessors.add(\"inline_interesting_links\", InlineInterestingLinkProcessor(md, self), \"_end\")\n\n if settings.CAMO_URI:\n md.treeprocessors.add(\"rewrite_to_https\", InlineHttpsProcessor(md), \"_end\")\n\n if self.getConfig(\"realm\") == ZEPHYR_MIRROR_BUGDOWN_KEY:\n # Disable almost all inline patterns for zephyr mirror\n # users' traffic that is mirrored. Note that\n # inline_interesting_links is a treeprocessor and thus is\n # not removed\n for k in list(md.inlinePatterns.keys()):\n if k not in [\"autolink\"]:\n del md.inlinePatterns[k]\n for k in list(md.treeprocessors.keys()):\n if k not in [\"inline_interesting_links\", \"inline\", \"rewrite_to_https\"]:\n del md.treeprocessors[k]\n for k in list(md.preprocessors.keys()):\n if k not in [\"custom_text_notifications\"]:\n del md.preprocessors[k]\n for k in list(md.parser.blockprocessors.keys()):\n if k not in [\"paragraph\"]:\n del md.parser.blockprocessors[k]\n\nmd_engines = {} # type: Dict[Tuple[int, bool], markdown.Markdown]\nrealm_filter_data = {} # type: Dict[int, List[Tuple[str, str, int]]]\n\nclass EscapeHtml(markdown.Extension):\n def extendMarkdown(self, md: markdown.Markdown, md_globals: Dict[str, Any]) -> None:\n del md.preprocessors['html_block']\n del md.inlinePatterns['html']\n\ndef make_md_engine(realm_filters_key: int, email_gateway: bool) -> None:\n md_engine_key = (realm_filters_key, email_gateway)\n if md_engine_key in md_engines:\n del md_engines[md_engine_key]\n\n realm_filters = realm_filter_data[realm_filters_key]\n md_engines[md_engine_key] = build_engine(\n realm_filters=realm_filters,\n realm_filters_key=realm_filters_key,\n email_gateway=email_gateway,\n )\n\ndef build_engine(realm_filters: List[Tuple[str, str, int]],\n realm_filters_key: int,\n email_gateway: bool) -> markdown.Markdown:\n engine = markdown.Markdown(\n output_format = 'html',\n extensions = [\n nl2br.makeExtension(),\n tables.makeExtension(),\n codehilite.makeExtension(\n linenums=False,\n guess_lang=False\n ),\n fenced_code.makeExtension(),\n EscapeHtml(),\n Bugdown(realm_filters=realm_filters,\n realm=realm_filters_key,\n code_block_processor_disabled=email_gateway)])\n return engine\n\ndef topic_links(realm_filters_key: int, topic_name: str) -> List[str]:\n matches = [] # type: List[str]\n\n realm_filters = realm_filters_for_realm(realm_filters_key)\n\n for realm_filter in realm_filters:\n pattern = prepare_realm_pattern(realm_filter[0])\n for m in re.finditer(pattern, topic_name):\n matches += [realm_filter[1] % m.groupdict()]\n return matches\n\ndef maybe_update_markdown_engines(realm_filters_key: Optional[int], email_gateway: bool) -> None:\n # If realm_filters_key is None, load all filters\n global realm_filter_data\n if realm_filters_key is None:\n all_filters = all_realm_filters()\n all_filters[DEFAULT_BUGDOWN_KEY] = []\n for realm_filters_key, filters in all_filters.items():\n realm_filter_data[realm_filters_key] = filters\n make_md_engine(realm_filters_key, email_gateway)\n # Hack to ensure that getConfig(\"realm\") is right for mirrored Zephyrs\n realm_filter_data[ZEPHYR_MIRROR_BUGDOWN_KEY] = []\n make_md_engine(ZEPHYR_MIRROR_BUGDOWN_KEY, False)\n else:\n realm_filters = realm_filters_for_realm(realm_filters_key)\n if realm_filters_key not in realm_filter_data or \\\n realm_filter_data[realm_filters_key] != realm_filters:\n # Realm filters data has changed, update `realm_filter_data` and any\n # of the existing markdown engines using this set of realm filters.\n realm_filter_data[realm_filters_key] = realm_filters\n for email_gateway_flag in [True, False]:\n if (realm_filters_key, email_gateway_flag) in md_engines:\n # Update only existing engines(if any), don't create new one.\n make_md_engine(realm_filters_key, email_gateway_flag)\n\n if (realm_filters_key, email_gateway) not in md_engines:\n # Markdown engine corresponding to this key doesn't exists so create one.\n make_md_engine(realm_filters_key, email_gateway)\n\n# We want to log Markdown parser failures, but shouldn't log the actual input\n# message for privacy reasons. The compromise is to replace all alphanumeric\n# characters with 'x'.\n#\n# We also use repr() to improve reproducibility, and to escape terminal control\n# codes, which can do surprisingly nasty things.\n_privacy_re = re.compile('\\\\w', flags=re.UNICODE)\ndef privacy_clean_markdown(content: str) -> str:\n return repr(_privacy_re.sub('x', content))\n\ndef log_bugdown_error(msg: str) -> None:\n \"\"\"We use this unusual logging approach to log the bugdown error, in\n order to prevent AdminNotifyHandler from sending the santized\n original markdown formatting into another Zulip message, which\n could cause an infinite exception loop.\"\"\"\n bugdown_logger.error(msg)\n\ndef get_email_info(realm_id: int, emails: Set[str]) -> Dict[str, FullNameInfo]:\n if not emails:\n return dict()\n\n q_list = {\n Q(email__iexact=email.strip().lower())\n for email in emails\n }\n\n rows = UserProfile.objects.filter(\n realm_id=realm_id\n ).filter(\n functools.reduce(lambda a, b: a | b, q_list),\n ).values(\n 'id',\n 'email',\n )\n\n dct = {\n row['email'].strip().lower(): row\n for row in rows\n }\n return dct\n\ndef get_full_name_info(realm_id: int, full_names: Set[str]) -> Dict[str, FullNameInfo]:\n if not full_names:\n return dict()\n\n # Remove the trailing part of the `user|id` mention syntax.\n name_re = r'(?P.+)\\|\\d+$'\n for full_name in full_names.copy():\n name_syntax_match = re.match(name_re, full_name)\n if name_syntax_match:\n full_names.remove(full_name)\n full_names.add(name_syntax_match.group(\"full_name\"))\n\n q_list = {\n Q(full_name__iexact=full_name)\n for full_name in full_names\n }\n\n rows = UserProfile.objects.filter(\n realm_id=realm_id,\n is_active=True,\n ).filter(\n functools.reduce(lambda a, b: a | b, q_list),\n ).values(\n 'id',\n 'full_name',\n 'email',\n )\n dct = {} # type: Dict[str, FullNameInfo]\n for row in rows:\n key = row['full_name'].lower()\n # To insert users with duplicate full names in the dict\n if key in dct:\n key = '{}|{}'.format(key, row['id'])\n dct[key] = row\n return dct\n\nclass MentionData:\n def __init__(self, realm_id: int, content: str) -> None:\n full_names = possible_mentions(content)\n self.full_name_info = get_full_name_info(realm_id, full_names)\n self.user_id_info = {\n row['id']: row\n for row in self.full_name_info.values()\n }\n self.init_user_group_data(realm_id=realm_id, content=content)\n\n def init_user_group_data(self,\n realm_id: int,\n content: str) -> None:\n user_group_names = possible_user_group_mentions(content)\n self.user_group_name_info = get_user_group_name_info(realm_id, user_group_names)\n self.user_group_members = defaultdict(list) # type: Dict[int, List[int]]\n group_ids = [group.id for group in self.user_group_name_info.values()]\n\n if not group_ids:\n # Early-return to avoid the cost of hitting the ORM,\n # which shows up in profiles.\n return\n\n membership = UserGroupMembership.objects.filter(user_group_id__in=group_ids)\n for info in membership.values('user_group_id', 'user_profile_id'):\n group_id = info['user_group_id']\n user_profile_id = info['user_profile_id']\n self.user_group_members[group_id].append(user_profile_id)\n\n def get_user(self, name: str) -> Optional[FullNameInfo]:\n return self.full_name_info.get(name.lower(), None)\n\n def get_user_by_id(self, id: str) -> Optional[FullNameInfo]:\n return self.user_id_info.get(int(id), None)\n\n def get_user_ids(self) -> Set[int]:\n \"\"\"\n Returns the user IDs that might have been mentioned by this\n content. Note that because this data structure has not parsed\n the message and does not know about escaping/code blocks, this\n will overestimate the list of user ids.\n \"\"\"\n return set(self.user_id_info.keys())\n\n def get_user_group(self, name: str) -> Optional[UserGroup]:\n return self.user_group_name_info.get(name.lower(), None)\n\n def get_group_members(self, user_group_id: int) -> List[int]:\n return self.user_group_members.get(user_group_id, [])\n\ndef get_user_group_name_info(realm_id: int, user_group_names: Set[str]) -> Dict[str, UserGroup]:\n if not user_group_names:\n return dict()\n\n rows = UserGroup.objects.filter(realm_id=realm_id,\n name__in=user_group_names)\n dct = {row.name.lower(): row for row in rows}\n return dct\n\ndef get_stream_name_info(realm: Realm, stream_names: Set[str]) -> Dict[str, FullNameInfo]:\n if not stream_names:\n return dict()\n\n q_list = {\n Q(name=name)\n for name in stream_names\n }\n\n rows = get_active_streams(\n realm=realm,\n ).filter(\n functools.reduce(lambda a, b: a | b, q_list),\n ).values(\n 'id',\n 'name',\n )\n\n dct = {\n row['name']: row\n for row in rows\n }\n return dct\n\n\ndef do_convert(content: str,\n message: Optional[Message]=None,\n message_realm: Optional[Realm]=None,\n possible_words: Optional[Set[str]]=None,\n sent_by_bot: Optional[bool]=False,\n translate_emoticons: Optional[bool]=False,\n mention_data: Optional[MentionData]=None,\n email_gateway: Optional[bool]=False) -> str:\n \"\"\"Convert Markdown to HTML, with Zulip-specific settings and hacks.\"\"\"\n # This logic is a bit convoluted, but the overall goal is to support a range of use cases:\n # * Nothing is passed in other than content -> just run default options (e.g. for docs)\n # * message is passed, but no realm is -> look up realm from message\n # * message_realm is passed -> use that realm for bugdown purposes\n if message is not None:\n if message_realm is None:\n message_realm = message.get_realm()\n if message_realm is None:\n realm_filters_key = DEFAULT_BUGDOWN_KEY\n else:\n realm_filters_key = message_realm.id\n\n if message is not None and message_realm is not None:\n if message_realm.is_zephyr_mirror_realm:\n if message.sending_client.name == \"zephyr_mirror\":\n # Use slightly customized Markdown processor for content\n # delivered via zephyr_mirror\n realm_filters_key = ZEPHYR_MIRROR_BUGDOWN_KEY\n\n maybe_update_markdown_engines(realm_filters_key, email_gateway)\n md_engine_key = (realm_filters_key, email_gateway)\n\n if md_engine_key in md_engines:\n _md_engine = md_engines[md_engine_key]\n else:\n if DEFAULT_BUGDOWN_KEY not in md_engines:\n maybe_update_markdown_engines(realm_filters_key=None, email_gateway=False)\n\n _md_engine = md_engines[(DEFAULT_BUGDOWN_KEY, email_gateway)]\n # Reset the parser; otherwise it will get slower over time.\n _md_engine.reset()\n\n # Filters such as UserMentionPattern need a message.\n _md_engine.zulip_message = message\n _md_engine.zulip_realm = message_realm\n _md_engine.zulip_db_data = None # for now\n\n # Pre-fetch data from the DB that is used in the bugdown thread\n if message is not None:\n assert message_realm is not None # ensured above if message is not None\n if possible_words is None:\n possible_words = set() # Set[str]\n\n # Here we fetch the data structures needed to render\n # mentions/avatars/stream mentions from the database, but only\n # if there is syntax in the message that might use them, since\n # the fetches are somewhat expensive and these types of syntax\n # are uncommon enough that it's a useful optimization.\n\n if mention_data is None:\n mention_data = MentionData(message_realm.id, content)\n\n emails = possible_avatar_emails(content)\n email_info = get_email_info(message_realm.id, emails)\n\n stream_names = possible_linked_stream_names(content)\n stream_name_info = get_stream_name_info(message_realm, stream_names)\n\n if content_has_emoji_syntax(content):\n active_realm_emoji = message_realm.get_active_emoji()\n else:\n active_realm_emoji = dict()\n\n _md_engine.zulip_db_data = {\n 'possible_words': possible_words,\n 'email_info': email_info,\n 'mention_data': mention_data,\n 'active_realm_emoji': active_realm_emoji,\n 'realm_uri': message_realm.uri,\n 'sent_by_bot': sent_by_bot,\n 'stream_names': stream_name_info,\n 'translate_emoticons': translate_emoticons,\n }\n\n try:\n # Spend at most 5 seconds rendering; this protects the backend\n # from being overloaded by bugs (e.g. markdown logic that is\n # extremely inefficient in corner cases) as well as user\n # errors (e.g. a realm filter that makes some syntax\n # infinite-loop).\n rendered_content = timeout(5, _md_engine.convert, content)\n\n # Throw an exception if the content is huge; this protects the\n # rest of the codebase from any bugs where we end up rendering\n # something huge.\n if len(rendered_content) > MAX_MESSAGE_LENGTH * 10:\n raise BugdownRenderingException('Rendered content exceeds %s characters' %\n (MAX_MESSAGE_LENGTH * 10,))\n return rendered_content\n except Exception:\n cleaned = privacy_clean_markdown(content)\n # NOTE: Don't change this message without also changing the\n # logic in logging_handlers.py or we can create recursive\n # exceptions.\n exception_message = ('Exception in Markdown parser: %sInput (sanitized) was: %s'\n % (traceback.format_exc(), cleaned))\n bugdown_logger.exception(exception_message)\n\n raise BugdownRenderingException()\n finally:\n # These next three lines are slightly paranoid, since\n # we always set these right before actually using the\n # engine, but better safe then sorry.\n _md_engine.zulip_message = None\n _md_engine.zulip_realm = None\n _md_engine.zulip_db_data = None\n\nbugdown_time_start = 0.0\nbugdown_total_time = 0.0\nbugdown_total_requests = 0\n\ndef get_bugdown_time() -> float:\n return bugdown_total_time\n\ndef get_bugdown_requests() -> int:\n return bugdown_total_requests\n\ndef bugdown_stats_start() -> None:\n global bugdown_time_start\n bugdown_time_start = time.time()\n\ndef bugdown_stats_finish() -> None:\n global bugdown_total_time\n global bugdown_total_requests\n global bugdown_time_start\n bugdown_total_requests += 1\n bugdown_total_time += (time.time() - bugdown_time_start)\n\ndef convert(content: str,\n message: Optional[Message]=None,\n message_realm: Optional[Realm]=None,\n possible_words: Optional[Set[str]]=None,\n sent_by_bot: Optional[bool]=False,\n translate_emoticons: Optional[bool]=False,\n mention_data: Optional[MentionData]=None,\n email_gateway: Optional[bool]=False) -> str:\n bugdown_stats_start()\n ret = do_convert(content, message, message_realm,\n possible_words, sent_by_bot, translate_emoticons,\n mention_data, email_gateway)\n bugdown_stats_finish()\n return ret\n"},"type_annotations":{"kind":"list like","value":["str","Optional[DbData]","str","Element","Callable[[Element], Optional[_T]]","Element","Callable[[Element], Optional[_T]]","Element","str","str","Element","str","Dict[str, Any]","str","str","str","Element","str","Match[str]","markdown.Markdown","'Bugdown'","str","str","str","str","str","str","Dict[str, Any]","str","List[Dict[str, str]]","List[Dict[str, Any]]","List[Dict[str, Any]]","str","str","Element","Element","ResultWithFamily","Element","Element","int","str","Element","Match[str]","str","str","str","str","str","str","Match[str]","Match[str]","Match[str]","str","Match[str]","Match[str]","str","markdown.util.etree.Element","str","Optional[DbData]","str","Pattern","markdown.Markdown","Match[str]","Any","Any","List[str]","List[str]","List[Match[str]]","Match[str]","str","str","str","Match[str]","Match[str]","Match[str]","Match[str]","Match[str]","str","Iterable[str]","Match[str]","Any","Union[bool, int, List[Any]]","markdown.Markdown","Dict[str, Any]","markdown.Markdown","markdown.Markdown","markdown.Markdown","markdown.Markdown","markdown.Markdown","markdown.Markdown","markdown.Markdown","markdown.Markdown","markdown.Markdown","markdown.Markdown","Dict[str, Any]","int","bool","List[Tuple[str, str, int]]","int","bool","int","str","Optional[int]","bool","str","str","int","Set[str]","int","Set[str]","int","str","int","str","str","str","str","int","int","Set[str]","Realm","Set[str]","str","str"],"string":"[\n \"str\",\n \"Optional[DbData]\",\n \"str\",\n \"Element\",\n \"Callable[[Element], Optional[_T]]\",\n \"Element\",\n \"Callable[[Element], Optional[_T]]\",\n \"Element\",\n \"str\",\n \"str\",\n \"Element\",\n \"str\",\n \"Dict[str, Any]\",\n \"str\",\n \"str\",\n \"str\",\n \"Element\",\n \"str\",\n \"Match[str]\",\n \"markdown.Markdown\",\n \"'Bugdown'\",\n \"str\",\n \"str\",\n \"str\",\n \"str\",\n \"str\",\n \"str\",\n \"Dict[str, Any]\",\n \"str\",\n \"List[Dict[str, str]]\",\n \"List[Dict[str, Any]]\",\n \"List[Dict[str, Any]]\",\n \"str\",\n \"str\",\n \"Element\",\n \"Element\",\n \"ResultWithFamily\",\n \"Element\",\n \"Element\",\n \"int\",\n \"str\",\n \"Element\",\n \"Match[str]\",\n \"str\",\n \"str\",\n \"str\",\n \"str\",\n \"str\",\n \"str\",\n \"Match[str]\",\n \"Match[str]\",\n \"Match[str]\",\n \"str\",\n \"Match[str]\",\n \"Match[str]\",\n \"str\",\n \"markdown.util.etree.Element\",\n \"str\",\n \"Optional[DbData]\",\n \"str\",\n \"Pattern\",\n \"markdown.Markdown\",\n \"Match[str]\",\n \"Any\",\n \"Any\",\n \"List[str]\",\n \"List[str]\",\n \"List[Match[str]]\",\n \"Match[str]\",\n \"str\",\n \"str\",\n \"str\",\n \"Match[str]\",\n \"Match[str]\",\n \"Match[str]\",\n \"Match[str]\",\n \"Match[str]\",\n \"str\",\n \"Iterable[str]\",\n \"Match[str]\",\n \"Any\",\n \"Union[bool, int, List[Any]]\",\n \"markdown.Markdown\",\n \"Dict[str, Any]\",\n \"markdown.Markdown\",\n \"markdown.Markdown\",\n \"markdown.Markdown\",\n \"markdown.Markdown\",\n \"markdown.Markdown\",\n \"markdown.Markdown\",\n \"markdown.Markdown\",\n \"markdown.Markdown\",\n \"markdown.Markdown\",\n \"markdown.Markdown\",\n \"Dict[str, Any]\",\n \"int\",\n \"bool\",\n \"List[Tuple[str, str, int]]\",\n \"int\",\n \"bool\",\n \"int\",\n \"str\",\n \"Optional[int]\",\n \"bool\",\n \"str\",\n \"str\",\n \"int\",\n \"Set[str]\",\n \"int\",\n \"Set[str]\",\n \"int\",\n \"str\",\n \"int\",\n \"str\",\n \"str\",\n \"str\",\n \"str\",\n \"int\",\n \"int\",\n \"Set[str]\",\n \"Realm\",\n \"Set[str]\",\n \"str\",\n \"str\"\n]"},"type_annotation_starts":{"kind":"list like","value":[2292,6463,6487,8367,8401,9299,9345,10549,10571,10590,12509,12524,12545,14193,17161,19385,20106,20646,20871,21409,21437,21675,22719,23147,25231,25882,26092,26705,26913,26945,27003,27053,30457,31520,34655,34914,34934,36756,36773,36847,37919,38013,41674,42333,45443,45464,45790,45811,46085,46501,46948,47374,48177,48415,48782,49332,49576,49831,52200,52223,52866,52879,53221,53691,54240,55071,56144,57235,58045,58636,59211,59248,59554,59859,61296,62254,62496,63416,63625,65133,65604,65619,66174,66205,67444,67617,68919,69516,69813,70039,70392,70561,71036,73154,73185,73330,73350,73756,73820,73857,74446,74463,74860,74890,76815,76903,77230,77243,77738,77755,78838,78852,79249,79292,80134,80259,80747,80889,81010,81033,81336,81357,81798,87545],"string":"[\n 2292,\n 6463,\n 6487,\n 8367,\n 8401,\n 9299,\n 9345,\n 10549,\n 10571,\n 10590,\n 12509,\n 12524,\n 12545,\n 14193,\n 17161,\n 19385,\n 20106,\n 20646,\n 20871,\n 21409,\n 21437,\n 21675,\n 22719,\n 23147,\n 25231,\n 25882,\n 26092,\n 26705,\n 26913,\n 26945,\n 27003,\n 27053,\n 30457,\n 31520,\n 34655,\n 34914,\n 34934,\n 36756,\n 36773,\n 36847,\n 37919,\n 38013,\n 41674,\n 42333,\n 45443,\n 45464,\n 45790,\n 45811,\n 46085,\n 46501,\n 46948,\n 47374,\n 48177,\n 48415,\n 48782,\n 49332,\n 49576,\n 49831,\n 52200,\n 52223,\n 52866,\n 52879,\n 53221,\n 53691,\n 54240,\n 55071,\n 56144,\n 57235,\n 58045,\n 58636,\n 59211,\n 59248,\n 59554,\n 59859,\n 61296,\n 62254,\n 62496,\n 63416,\n 63625,\n 65133,\n 65604,\n 65619,\n 66174,\n 66205,\n 67444,\n 67617,\n 68919,\n 69516,\n 69813,\n 70039,\n 70392,\n 70561,\n 71036,\n 73154,\n 73185,\n 73330,\n 73350,\n 73756,\n 73820,\n 73857,\n 74446,\n 74463,\n 74860,\n 74890,\n 76815,\n 76903,\n 77230,\n 77243,\n 77738,\n 77755,\n 78838,\n 78852,\n 79249,\n 79292,\n 80134,\n 80259,\n 80747,\n 80889,\n 81010,\n 81033,\n 81336,\n 81357,\n 81798,\n 87545\n]"},"type_annotation_ends":{"kind":"list like","value":[2295,6479,6490,8374,8434,9306,9378,10556,10574,10593,12516,12527,12559,14196,17164,19388,20113,20649,20881,21426,21446,21678,22722,23150,25234,25885,26095,26719,26916,26965,27023,27073,30460,31523,34662,34921,34950,36763,36780,36850,37922,38020,41684,42336,45446,45467,45793,45814,46088,46511,46958,47384,48180,48425,48792,49335,49603,49834,52216,52226,52873,52896,53231,53694,54243,55080,56153,57251,58055,58639,59214,59251,59564,59869,61306,62264,62506,63419,63638,65143,65607,65646,66191,66219,67461,67634,68936,69533,69830,70056,70409,70578,71053,73171,73199,73333,73354,73782,73823,73861,74449,74466,74873,74894,76818,76906,77233,77251,77741,77763,78841,78855,79252,79295,80137,80262,80750,80892,81013,81041,81341,81365,81801,87548],"string":"[\n 2295,\n 6479,\n 6490,\n 8374,\n 8434,\n 9306,\n 9378,\n 10556,\n 10574,\n 10593,\n 12516,\n 12527,\n 12559,\n 14196,\n 17164,\n 19388,\n 20113,\n 20649,\n 20881,\n 21426,\n 21446,\n 21678,\n 22722,\n 23150,\n 25234,\n 25885,\n 26095,\n 26719,\n 26916,\n 26965,\n 27023,\n 27073,\n 30460,\n 31523,\n 34662,\n 34921,\n 34950,\n 36763,\n 36780,\n 36850,\n 37922,\n 38020,\n 41684,\n 42336,\n 45446,\n 45467,\n 45793,\n 45814,\n 46088,\n 46511,\n 46958,\n 47384,\n 48180,\n 48425,\n 48792,\n 49335,\n 49603,\n 49834,\n 52216,\n 52226,\n 52873,\n 52896,\n 53231,\n 53694,\n 54243,\n 55080,\n 56153,\n 57251,\n 58055,\n 58639,\n 59214,\n 59251,\n 59564,\n 59869,\n 61306,\n 62264,\n 62506,\n 63419,\n 63638,\n 65143,\n 65607,\n 65646,\n 66191,\n 66219,\n 67461,\n 67634,\n 68936,\n 69533,\n 69830,\n 70056,\n 70409,\n 70578,\n 71053,\n 73171,\n 73199,\n 73333,\n 73354,\n 73782,\n 73823,\n 73861,\n 74449,\n 74466,\n 74873,\n 74894,\n 76818,\n 76906,\n 77233,\n 77251,\n 77741,\n 77763,\n 78841,\n 78855,\n 79252,\n 79295,\n 80137,\n 80262,\n 80750,\n 80892,\n 81013,\n 81041,\n 81341,\n 81365,\n 81801,\n 87548\n]"}}},{"rowIdx":1352,"cells":{"zip":{"kind":"string","value":"archives/18-2-SKKU-OSS_2018-2-OSS-L5.zip"},"filename":{"kind":"string","value":"zerver/lib/bugdown/api_arguments_table_generator.py"},"contents":{"kind":"string","value":"import re\nimport os\nimport ujson\n\nfrom django.utils.html import escape as escape_html\nfrom markdown.extensions import Extension\nfrom markdown.preprocessors import Preprocessor\nfrom zerver.lib.openapi import get_openapi_parameters\nfrom typing import Any, Dict, Optional, List\nimport markdown\n\nREGEXP = re.compile(r'\\{generate_api_arguments_table\\|\\s*(.+?)\\s*\\|\\s*(.+)\\s*\\}')\n\n\nclass MarkdownArgumentsTableGenerator(Extension):\n def __init__(self, configs: Optional[Dict[str, Any]]=None) -> None:\n if configs is None:\n configs = {}\n self.config = {\n 'base_path': ['.', 'Default location from which to evaluate relative paths for the JSON files.'],\n }\n for key, value in configs.items():\n self.setConfig(key, value)\n\n def extendMarkdown(self, md: markdown.Markdown, md_globals: Dict[str, Any]) -> None:\n md.preprocessors.add(\n 'generate_api_arguments', APIArgumentsTablePreprocessor(md, self.getConfigs()), '_begin'\n )\n\n\nclass APIArgumentsTablePreprocessor(Preprocessor):\n def __init__(self, md: markdown.Markdown, config: Dict[str, Any]) -> None:\n super(APIArgumentsTablePreprocessor, self).__init__(md)\n self.base_path = config['base_path']\n\n def run(self, lines: List[str]) -> List[str]:\n done = False\n while not done:\n for line in lines:\n loc = lines.index(line)\n match = REGEXP.search(line)\n\n if not match:\n continue\n\n filename = match.group(1)\n doc_name = match.group(2)\n filename = os.path.expanduser(filename)\n\n is_openapi_format = filename.endswith('.yaml')\n\n if not os.path.isabs(filename):\n parent_dir = self.base_path\n filename = os.path.normpath(os.path.join(parent_dir, filename))\n\n if is_openapi_format:\n endpoint, method = doc_name.rsplit(':', 1)\n arguments = [] # type: List[Dict[str, Any]]\n\n try:\n arguments = get_openapi_parameters(endpoint, method)\n except KeyError as e:\n # Don't raise an exception if the \"parameters\"\n # field is missing; we assume that's because the\n # endpoint doesn't accept any parameters\n if e.args != ('parameters',):\n raise e\n else:\n with open(filename, 'r') as fp:\n json_obj = ujson.load(fp)\n arguments = json_obj[doc_name]\n\n if arguments:\n text = self.render_table(arguments)\n else:\n text = ['This endpoint does not consume any arguments.']\n # The line that contains the directive to include the macro\n # may be preceded or followed by text or tags, in that case\n # we need to make sure that any preceding or following text\n # stays the same.\n line_split = REGEXP.split(line, maxsplit=0)\n preceding = line_split[0]\n following = line_split[-1]\n text = [preceding] + text + [following]\n lines = lines[:loc] + text + lines[loc+1:]\n break\n else:\n done = True\n return lines\n\n def render_table(self, arguments: List[Dict[str, Any]]) -> List[str]:\n table = []\n beginning = \"\"\"\n\n \n \n \n \n \n \n \n \n\n\"\"\"\n tr = \"\"\"\n\n \n \n \n \n\n\"\"\"\n\n table.append(beginning)\n\n md_engine = markdown.Markdown(extensions=[])\n\n for argument in arguments:\n description = argument['description']\n\n oneof = ['`' + item + '`'\n for item in argument.get('schema', {}).get('enum', [])]\n if oneof:\n description += '\\nMust be one of: {}.'.format(', '.join(oneof))\n\n default = argument.get('schema', {}).get('default')\n if default is not None:\n description += '\\nDefaults to `{}`.'.format(ujson.dumps(default))\n\n # TODO: Swagger allows indicating where the argument goes\n # (path, querystring, form data...). A column in the table should\n # be added for this.\n table.append(tr.format(\n argument=argument.get('argument') or argument.get('name'),\n # Show this as JSON to avoid changing the quoting style, which\n # may cause problems with JSON encoding.\n example=escape_html(ujson.dumps(argument['example'])),\n required='Yes' if argument.get('required') else 'No',\n description=md_engine.convert(description),\n ))\n\n table.append(\"\")\n table.append(\"
ArgumentExampleRequiredDescription
{argument}{example}{required}{description}
\")\n\n return table\n\ndef makeExtension(*args: Any, **kwargs: str) -> MarkdownArgumentsTableGenerator:\n return MarkdownArgumentsTableGenerator(kwargs)\n"},"type_annotations":{"kind":"list like","value":["markdown.Markdown","Dict[str, Any]","markdown.Markdown","Dict[str, Any]","List[str]","List[Dict[str, Any]]","Any","str"],"string":"[\n \"markdown.Markdown\",\n \"Dict[str, Any]\",\n \"markdown.Markdown\",\n \"Dict[str, Any]\",\n \"List[str]\",\n \"List[Dict[str, Any]]\",\n \"Any\",\n \"str\"\n]"},"type_annotation_starts":{"kind":"list like","value":[811,842,1088,1115,1275,3532,5265,5280],"string":"[\n 811,\n 842,\n 1088,\n 1115,\n 1275,\n 3532,\n 5265,\n 5280\n]"},"type_annotation_ends":{"kind":"list like","value":[828,856,1105,1129,1284,3552,5268,5283],"string":"[\n 828,\n 856,\n 1105,\n 1129,\n 1284,\n 3552,\n 5268,\n 5283\n]"}}},{"rowIdx":1353,"cells":{"zip":{"kind":"string","value":"archives/18-2-SKKU-OSS_2018-2-OSS-L5.zip"},"filename":{"kind":"string","value":"zerver/lib/bugdown/api_code_examples.py"},"contents":{"kind":"string","value":"import re\nimport os\nimport sys\nimport json\nimport inspect\n\nfrom markdown.extensions import Extension\nfrom markdown.preprocessors import Preprocessor\nfrom typing import Any, Dict, Optional, List\nimport markdown\n\nimport zerver.lib.api_test_helpers\nfrom zerver.lib.openapi import get_openapi_fixture\n\nMACRO_REGEXP = re.compile(r'\\{generate_code_example(\\(\\s*(.+?)\\s*\\))*\\|\\s*(.+?)\\s*\\|\\s*(.+?)\\s*(\\(\\s*(.+)\\s*\\))?\\}')\nCODE_EXAMPLE_REGEX = re.compile(r'\\# \\{code_example\\|\\s*(.+?)\\s*\\}')\n\nPYTHON_CLIENT_CONFIG = \"\"\"\n#!/usr/bin/env python3\n\nimport zulip\n\n# Pass the path to your zuliprc file here.\nclient = zulip.Client(config_file=\"~/zuliprc\")\n\n\"\"\"\n\nPYTHON_CLIENT_ADMIN_CONFIG = \"\"\"\n#!/usr/bin/env python\n\nimport zulip\n\n# The user for this zuliprc file must be an organization administrator\nclient = zulip.Client(config_file=\"~/zuliprc-admin\")\n\n\"\"\"\n\ndef extract_python_code_example(source: List[str], snippet: List[str]) -> List[str]:\n start = -1\n end = -1\n for line in source:\n match = CODE_EXAMPLE_REGEX.search(line)\n if match:\n if match.group(1) == 'start':\n start = source.index(line)\n elif match.group(1) == 'end':\n end = source.index(line)\n break\n\n if (start == -1 and end == -1):\n return snippet\n\n snippet.extend(source[start + 1: end])\n snippet.append(' print(result)')\n snippet.append('\\n')\n source = source[end + 1:]\n return extract_python_code_example(source, snippet)\n\ndef render_python_code_example(function: str, admin_config: Optional[bool]=False) -> List[str]:\n method = zerver.lib.api_test_helpers.TEST_FUNCTIONS[function]\n function_source_lines = inspect.getsourcelines(method)[0]\n\n if admin_config:\n config = PYTHON_CLIENT_ADMIN_CONFIG.splitlines()\n else:\n config = PYTHON_CLIENT_CONFIG.splitlines()\n\n snippet = extract_python_code_example(function_source_lines, [])\n\n code_example = []\n code_example.append('```python')\n code_example.extend(config)\n\n for line in snippet:\n # Remove one level of indentation and strip newlines\n code_example.append(line[4:].rstrip())\n\n code_example.append('```')\n\n return code_example\n\nSUPPORTED_LANGUAGES = {\n 'python': {\n 'client_config': PYTHON_CLIENT_CONFIG,\n 'admin_config': PYTHON_CLIENT_ADMIN_CONFIG,\n 'render': render_python_code_example,\n }\n} # type: Dict[str, Any]\n\nclass APICodeExamplesGenerator(Extension):\n def extendMarkdown(self, md: markdown.Markdown, md_globals: Dict[str, Any]) -> None:\n md.preprocessors.add(\n 'generate_code_example', APICodeExamplesPreprocessor(md, self.getConfigs()), '_begin'\n )\n\n\nclass APICodeExamplesPreprocessor(Preprocessor):\n def __init__(self, md: markdown.Markdown, config: Dict[str, Any]) -> None:\n super(APICodeExamplesPreprocessor, self).__init__(md)\n\n def run(self, lines: List[str]) -> List[str]:\n done = False\n while not done:\n for line in lines:\n loc = lines.index(line)\n match = MACRO_REGEXP.search(line)\n\n if match:\n language = match.group(2)\n function = match.group(3)\n key = match.group(4)\n argument = match.group(6)\n\n if key == 'fixture':\n if argument:\n text = self.render_fixture(function, name=argument)\n else:\n text = self.render_fixture(function)\n elif key == 'example':\n if argument == 'admin_config=True':\n text = SUPPORTED_LANGUAGES[language]['render'](function, admin_config=True)\n else:\n text = SUPPORTED_LANGUAGES[language]['render'](function)\n\n # The line that contains the directive to include the macro\n # may be preceded or followed by text or tags, in that case\n # we need to make sure that any preceding or following text\n # stays the same.\n line_split = MACRO_REGEXP.split(line, maxsplit=0)\n preceding = line_split[0]\n following = line_split[-1]\n text = [preceding] + text + [following]\n lines = lines[:loc] + text + lines[loc+1:]\n break\n else:\n done = True\n return lines\n\n def render_fixture(self, function: str, name: Optional[str]=None) -> List[str]:\n fixture = []\n\n # We assume that if the function we're rendering starts with a slash\n # it's a path in the endpoint and therefore it uses the new OpenAPI\n # format.\n if function.startswith('/'):\n path, method = function.rsplit(':', 1)\n fixture_dict = get_openapi_fixture(path, method, name)\n else:\n fixture_dict = zerver.lib.api_test_helpers.FIXTURES[function]\n\n fixture_json = json.dumps(fixture_dict, indent=4, sort_keys=True,\n separators=(',', ': '))\n\n fixture.append('```')\n fixture.extend(fixture_json.splitlines())\n fixture.append('```')\n\n return fixture\n\ndef makeExtension(*args: Any, **kwargs: str) -> APICodeExamplesGenerator:\n return APICodeExamplesGenerator(kwargs)\n"},"type_annotations":{"kind":"list like","value":["List[str]","List[str]","str","markdown.Markdown","Dict[str, Any]","markdown.Markdown","Dict[str, Any]","List[str]","str","Any","str"],"string":"[\n \"List[str]\",\n \"List[str]\",\n \"str\",\n \"markdown.Markdown\",\n \"Dict[str, Any]\",\n \"markdown.Markdown\",\n \"Dict[str, Any]\",\n \"List[str]\",\n \"str\",\n \"Any\",\n \"str\"\n]"},"type_annotation_starts":{"kind":"list like","value":[886,906,1536,2507,2538,2779,2806,2919,4590,5365,5380],"string":"[\n 886,\n 906,\n 1536,\n 2507,\n 2538,\n 2779,\n 2806,\n 2919,\n 4590,\n 5365,\n 5380\n]"},"type_annotation_ends":{"kind":"list like","value":[895,915,1539,2524,2552,2796,2820,2928,4593,5368,5383],"string":"[\n 895,\n 915,\n 1539,\n 2524,\n 2552,\n 2796,\n 2820,\n 2928,\n 4593,\n 5368,\n 5383\n]"}}},{"rowIdx":1354,"cells":{"zip":{"kind":"string","value":"archives/18-2-SKKU-OSS_2018-2-OSS-L5.zip"},"filename":{"kind":"string","value":"zerver/lib/bugdown/fenced_code.py"},"contents":{"kind":"string","value":"\"\"\"\nFenced Code Extension for Python Markdown\n=========================================\n\nThis extension adds Fenced Code Blocks to Python-Markdown.\n\n >>> import markdown\n >>> text = '''\n ... A paragraph before a fenced code block:\n ...\n ... ~~~\n ... Fenced code block\n ... ~~~\n ... '''\n >>> html = markdown.markdown(text, extensions=['fenced_code'])\n >>> print html\n

A paragraph before a fenced code block:

\n
Fenced code block\n    
\n\nWorks with safe_mode also (we check this because we are using the HtmlStash):\n\n >>> print markdown.markdown(text, extensions=['fenced_code'], safe_mode='replace')\n

A paragraph before a fenced code block:

\n
Fenced code block\n    
\n\nInclude tilde's in a code block and wrap with blank lines:\n\n >>> text = '''\n ... ~~~~~~~~\n ...\n ... ~~~~\n ... ~~~~~~~~'''\n >>> print markdown.markdown(text, extensions=['fenced_code'])\n
\n    ~~~~\n    
\n\nRemoves trailing whitespace from code blocks that cause horizontal scrolling\n >>> import markdown\n >>> text = '''\n ... A paragraph before a fenced code block:\n ...\n ... ~~~\n ... Fenced code block \\t\\t\\t\\t\\t\\t\\t\n ... ~~~\n ... '''\n >>> html = markdown.markdown(text, extensions=['fenced_code'])\n >>> print html\n

A paragraph before a fenced code block:

\n
Fenced code block\n    
\n\nLanguage tags:\n\n >>> text = '''\n ... ~~~~{.python}\n ... # Some python code\n ... ~~~~'''\n >>> print markdown.markdown(text, extensions=['fenced_code'])\n
# Some python code\n    
\n\nCopyright 2007-2008 [Waylan Limberg](http://achinghead.com/).\n\nProject website: \nContact: markdown@freewisdom.org\n\nLicense: BSD (see ../docs/LICENSE for details)\n\nDependencies:\n* [Python 2.4+](http://python.org)\n* [Markdown 2.0+](http://packages.python.org/Markdown/)\n* [Pygments (optional)](http://pygments.org)\n\n\"\"\"\n\nimport re\nimport subprocess\nimport markdown\nfrom django.utils.html import escape\nfrom markdown.extensions.codehilite import CodeHilite, CodeHiliteExtension\nfrom zerver.lib.tex import render_tex\nfrom typing import Any, Dict, Iterable, List, MutableSequence, Optional, Tuple, Union\n\n# Global vars\nFENCE_RE = re.compile(\"\"\"\n # ~~~ or ```\n (?P\n ^(?:~{3,}|`{3,})\n )\n\n [ ]* # spaces\n\n (\n \\\\{?\\\\.?\n (?P\n [a-zA-Z0-9_+-./#]*\n ) # \"py\" or \"javascript\"\n \\\\}?\n ) # language, like \".py\" or \"{javascript}\"\n [ ]* # spaces\n $\n \"\"\", re.VERBOSE)\n\n\nCODE_WRAP = '
%s\\n
'\nLANG_TAG = ' class=\"%s\"'\n\nclass FencedCodeExtension(markdown.Extension):\n\n def extendMarkdown(self, md: markdown.Markdown, md_globals: Dict[str, Any]) -> None:\n \"\"\" Add FencedBlockPreprocessor to the Markdown instance. \"\"\"\n md.registerExtension(self)\n\n # Newer versions of Python-Markdown (starting at 2.3?) have\n # a normalize_whitespace preprocessor that needs to go first.\n position = ('>normalize_whitespace'\n if 'normalize_whitespace' in md.preprocessors\n else '_begin')\n\n md.preprocessors.add('fenced_code_block',\n FencedBlockPreprocessor(md),\n position)\n\n\nclass BaseHandler:\n def handle_line(self, line: str) -> None:\n raise NotImplementedError()\n\n def done(self) -> None:\n raise NotImplementedError()\n\ndef generic_handler(processor: Any, output: MutableSequence[str], fence: str, lang: str) -> BaseHandler:\n if lang in ('quote', 'quoted'):\n return QuoteHandler(processor, output, fence)\n elif lang in ('math', 'tex', 'latex'):\n return TexHandler(processor, output, fence)\n else:\n return CodeHandler(processor, output, fence, lang)\n\ndef check_for_new_fence(processor: Any, output: MutableSequence[str], line: str) -> None:\n m = FENCE_RE.match(line)\n if m:\n fence = m.group('fence')\n lang = m.group('lang')\n handler = generic_handler(processor, output, fence, lang)\n processor.push(handler)\n else:\n output.append(line)\n\nclass OuterHandler(BaseHandler):\n def __init__(self, processor: Any, output: MutableSequence[str]) -> None:\n self.output = output\n self.processor = processor\n\n def handle_line(self, line: str) -> None:\n check_for_new_fence(self.processor, self.output, line)\n\n def done(self) -> None:\n self.processor.pop()\n\nclass CodeHandler(BaseHandler):\n def __init__(self, processor: Any, output: MutableSequence[str], fence: str, lang: str) -> None:\n self.processor = processor\n self.output = output\n self.fence = fence\n self.lang = lang\n self.lines = [] # type: List[str]\n\n def handle_line(self, line: str) -> None:\n if line.rstrip() == self.fence:\n self.done()\n else:\n self.lines.append(line.rstrip())\n\n def done(self) -> None:\n text = '\\n'.join(self.lines)\n text = self.processor.format_code(self.lang, text)\n text = self.processor.placeholder(text)\n processed_lines = text.split('\\n')\n self.output.append('')\n self.output.extend(processed_lines)\n self.output.append('')\n self.processor.pop()\n\nclass QuoteHandler(BaseHandler):\n def __init__(self, processor: Any, output: MutableSequence[str], fence: str) -> None:\n self.processor = processor\n self.output = output\n self.fence = fence\n self.lines = [] # type: List[str]\n\n def handle_line(self, line: str) -> None:\n if line.rstrip() == self.fence:\n self.done()\n else:\n check_for_new_fence(self.processor, self.lines, line)\n\n def done(self) -> None:\n text = '\\n'.join(self.lines)\n text = self.processor.format_quote(text)\n processed_lines = text.split('\\n')\n self.output.append('')\n self.output.extend(processed_lines)\n self.output.append('')\n self.processor.pop()\n\nclass TexHandler(BaseHandler):\n def __init__(self, processor: Any, output: MutableSequence[str], fence: str) -> None:\n self.processor = processor\n self.output = output\n self.fence = fence\n self.lines = [] # type: List[str]\n\n def handle_line(self, line: str) -> None:\n if line.rstrip() == self.fence:\n self.done()\n else:\n self.lines.append(line)\n\n def done(self) -> None:\n text = '\\n'.join(self.lines)\n text = self.processor.format_tex(text)\n text = self.processor.placeholder(text)\n processed_lines = text.split('\\n')\n self.output.append('')\n self.output.extend(processed_lines)\n self.output.append('')\n self.processor.pop()\n\n\nclass FencedBlockPreprocessor(markdown.preprocessors.Preprocessor):\n def __init__(self, md: markdown.Markdown) -> None:\n markdown.preprocessors.Preprocessor.__init__(self, md)\n\n self.checked_for_codehilite = False\n self.codehilite_conf = {} # type: Dict[str, List[Any]]\n\n def push(self, handler: BaseHandler) -> None:\n self.handlers.append(handler)\n\n def pop(self) -> None:\n self.handlers.pop()\n\n def run(self, lines: Iterable[str]) -> List[str]:\n \"\"\" Match and store Fenced Code Blocks in the HtmlStash. \"\"\"\n\n output = [] # type: List[str]\n\n processor = self\n self.handlers = [] # type: List[BaseHandler]\n\n handler = OuterHandler(processor, output)\n self.push(handler)\n\n for line in lines:\n self.handlers[-1].handle_line(line)\n\n while self.handlers:\n self.handlers[-1].done()\n\n # This fiddly handling of new lines at the end of our output was done to make\n # existing tests pass. Bugdown is just kind of funny when it comes to new lines,\n # but we could probably remove this hack.\n if len(output) > 2 and output[-2] != '':\n output.append('')\n return output\n\n def format_code(self, lang: str, text: str) -> str:\n if lang:\n langclass = LANG_TAG % (lang,)\n else:\n langclass = ''\n\n # Check for code hilite extension\n if not self.checked_for_codehilite:\n for ext in self.markdown.registeredExtensions:\n if isinstance(ext, CodeHiliteExtension):\n self.codehilite_conf = ext.config\n break\n\n self.checked_for_codehilite = True\n\n # If config is not empty, then the codehighlite extension\n # is enabled, so we call it to highlite the code\n if self.codehilite_conf:\n highliter = CodeHilite(text,\n linenums=self.codehilite_conf['linenums'][0],\n guess_lang=self.codehilite_conf['guess_lang'][0],\n css_class=self.codehilite_conf['css_class'][0],\n style=self.codehilite_conf['pygments_style'][0],\n use_pygments=self.codehilite_conf['use_pygments'][0],\n lang=(lang or None),\n noclasses=self.codehilite_conf['noclasses'][0])\n\n code = highliter.hilite()\n else:\n code = CODE_WRAP % (langclass, self._escape(text))\n\n return code\n\n def format_quote(self, text: str) -> str:\n paragraphs = text.split(\"\\n\\n\")\n quoted_paragraphs = []\n for paragraph in paragraphs:\n lines = paragraph.split(\"\\n\")\n quoted_paragraphs.append(\"\\n\".join(\"> \" + line for line in lines if line != ''))\n return \"\\n\\n\".join(quoted_paragraphs)\n\n def format_tex(self, text: str) -> str:\n paragraphs = text.split(\"\\n\\n\")\n tex_paragraphs = []\n for paragraph in paragraphs:\n html = render_tex(paragraph, is_inline=False)\n if html is not None:\n tex_paragraphs.append(html)\n else:\n tex_paragraphs.append('' +\n escape(paragraph) + '')\n return \"\\n\\n\".join(tex_paragraphs)\n\n def placeholder(self, code: str) -> str:\n return self.markdown.htmlStash.store(code, safe=True)\n\n def _escape(self, txt: str) -> str:\n \"\"\" basic html escaping \"\"\"\n txt = txt.replace('&', '&amp;')\n txt = txt.replace('<', '&lt;')\n txt = txt.replace('>', '&gt;')\n txt = txt.replace('\"', '&quot;')\n return txt\n\n\ndef makeExtension(*args: Any, **kwargs: None) -> FencedCodeExtension:\n return FencedCodeExtension(*args, **kwargs)\n\nif __name__ == \"__main__\":\n import doctest\n doctest.testmod()\n"},"type_annotations":{"kind":"list like","value":["markdown.Markdown","Dict[str, Any]","str","Any","MutableSequence[str]","str","str","Any","MutableSequence[str]","str","Any","MutableSequence[str]","str","Any","MutableSequence[str]","str","str","str","Any","MutableSequence[str]","str","str","Any","MutableSequence[str]","str","str","markdown.Markdown","BaseHandler","Iterable[str]","str","str","str","str","str","str","Any","None"],"string":"[\n \"markdown.Markdown\",\n \"Dict[str, Any]\",\n \"str\",\n \"Any\",\n \"MutableSequence[str]\",\n \"str\",\n \"str\",\n \"Any\",\n \"MutableSequence[str]\",\n \"str\",\n \"Any\",\n \"MutableSequence[str]\",\n \"str\",\n \"Any\",\n \"MutableSequence[str]\",\n \"str\",\n \"str\",\n \"str\",\n \"Any\",\n \"MutableSequence[str]\",\n \"str\",\n \"str\",\n \"Any\",\n \"MutableSequence[str]\",\n \"str\",\n \"str\",\n \"markdown.Markdown\",\n \"BaseHandler\",\n \"Iterable[str]\",\n \"str\",\n \"str\",\n \"str\",\n \"str\",\n \"str\",\n \"str\",\n \"Any\",\n \"None\"\n]"},"type_annotation_starts":{"kind":"list like","value":[2853,2884,3499,3646,3659,3688,3699,4010,4023,4051,4372,4385,4513,4715,4728,4757,4768,4974,5530,5543,5572,5753,6270,6283,6312,6493,7057,7286,7428,8228,8239,9614,9948,10431,10534,10788,10803],"string":"[\n 2853,\n 2884,\n 3499,\n 3646,\n 3659,\n 3688,\n 3699,\n 4010,\n 4023,\n 4051,\n 4372,\n 4385,\n 4513,\n 4715,\n 4728,\n 4757,\n 4768,\n 4974,\n 5530,\n 5543,\n 5572,\n 5753,\n 6270,\n 6283,\n 6312,\n 6493,\n 7057,\n 7286,\n 7428,\n 8228,\n 8239,\n 9614,\n 9948,\n 10431,\n 10534,\n 10788,\n 10803\n]"},"type_annotation_ends":{"kind":"list like","value":[2870,2898,3502,3649,3679,3691,3702,4013,4043,4054,4375,4405,4516,4718,4748,4760,4771,4977,5533,5563,5575,5756,6273,6303,6315,6496,7074,7297,7441,8231,8242,9617,9951,10434,10537,10791,10807],"string":"[\n 2870,\n 2898,\n 3502,\n 3649,\n 3679,\n 3691,\n 3702,\n 4013,\n 4043,\n 4054,\n 4375,\n 4405,\n 4516,\n 4718,\n 4748,\n 4760,\n 4771,\n 4977,\n 5533,\n 5563,\n 5575,\n 5756,\n 6273,\n 6303,\n 6315,\n 6496,\n 7074,\n 7297,\n 7441,\n 8231,\n 8242,\n 9617,\n 9951,\n 10434,\n 10537,\n 10791,\n 10807\n]"}}},{"rowIdx":1355,"cells":{"zip":{"kind":"string","value":"archives/18-2-SKKU-OSS_2018-2-OSS-L5.zip"},"filename":{"kind":"string","value":"zerver/lib/bugdown/help_emoticon_translations_table.py"},"contents":{"kind":"string","value":"import re\nimport markdown\nfrom typing import Any, Dict, List, Optional, Union\nfrom typing.re import Match\nfrom markdown.preprocessors import Preprocessor\n\nfrom zerver.lib.emoji import EMOTICON_CONVERSIONS, name_to_codepoint\n\nREGEXP = re.compile(r'\\{emoticon_translations\\}')\n\nTABLE_HTML = \"\"\"\n\n \n \n \n \n \n \n \n {body}\n \n
EmoticonEmoji
\n\"\"\"\n\nROW_HTML = \"\"\"\n\n {emoticon}\n \n \n \n\n\"\"\"\n\nclass EmoticonTranslationsHelpExtension(markdown.Extension):\n def extendMarkdown(self, md: markdown.Markdown, md_globals: Dict[str, Any]) -> None:\n \"\"\" Add SettingHelpExtension to the Markdown instance. \"\"\"\n md.registerExtension(self)\n md.preprocessors.add('emoticon_translations', EmoticonTranslation(), '_end')\n\n\nclass EmoticonTranslation(Preprocessor):\n def run(self, lines: List[str]) -> List[str]:\n for loc, line in enumerate(lines):\n match = REGEXP.search(line)\n if match:\n text = self.handleMatch(match)\n lines = lines[:loc] + text + lines[loc+1:]\n break\n return lines\n\n def handleMatch(self, match: Match[str]) -> List[str]:\n rows = [\n ROW_HTML.format(emoticon=emoticon,\n name=name.strip(':'),\n codepoint=name_to_codepoint[name.strip(':')])\n for emoticon, name in EMOTICON_CONVERSIONS.items()\n ]\n body = '\\n'.join(rows).strip()\n return TABLE_HTML.format(body=body).strip().splitlines()\n\ndef makeExtension(*args: Any, **kwargs: Any) -> EmoticonTranslationsHelpExtension:\n return EmoticonTranslationsHelpExtension(*args, **kwargs)\n"},"type_annotations":{"kind":"list like","value":["markdown.Markdown","Dict[str, Any]","List[str]","Match[str]","Any","Any"],"string":"[\n \"markdown.Markdown\",\n \"Dict[str, Any]\",\n \"List[str]\",\n \"Match[str]\",\n \"Any\",\n \"Any\"\n]"},"type_annotation_starts":{"kind":"list like","value":[849,880,1160,1473,1890,1905],"string":"[\n 849,\n 880,\n 1160,\n 1473,\n 1890,\n 1905\n]"},"type_annotation_ends":{"kind":"list like","value":[866,894,1169,1483,1893,1908],"string":"[\n 866,\n 894,\n 1169,\n 1483,\n 1893,\n 1908\n]"}}},{"rowIdx":1356,"cells":{"zip":{"kind":"string","value":"archives/18-2-SKKU-OSS_2018-2-OSS-L5.zip"},"filename":{"kind":"string","value":"zerver/lib/bugdown/help_relative_links.py"},"contents":{"kind":"string","value":"import re\nimport markdown\nfrom typing import Any, Dict, List, Optional, Union\nfrom typing.re import Match\nfrom markdown.preprocessors import Preprocessor\n\n# There is a lot of duplicated code between this file and\n# help_settings_links.py. So if you're making a change here consider making\n# it there as well.\n\nREGEXP = re.compile(r'\\{relative\\|(?P.*?)\\|(?P.*?)\\}')\n\ngear_info = {\n # The pattern is key: [name, link]\n # key is from REGEXP: `{relative|gear|key}`\n # name is what the item is called in the gear menu: `Select **name**.`\n # link is used for relative links: `Select [name](link).`\n 'manage-streams': ['Manage streams', '/#streams/subscribed'],\n 'settings': ['Settings', '/#settings/your-account'],\n 'manage-organization': ['Manage organization', '/#organization/organization-profile'],\n 'integrations': ['Integrations', '/integrations'],\n 'stats': ['Statistics', '/stats'],\n 'plans': ['Plans and pricing', '/plans'],\n 'billing': ['Billing', '/billing'],\n 'invite': ['Invite users', '/#invite'],\n}\n\ngear_instructions = \"\"\"\n1. From your desktop, click on the **gear**\n () in the upper right corner.\n\n1. Select %(item)s.\n\"\"\"\n\ndef gear_handle_match(key: str) -> str:\n if relative_help_links:\n item = '[%s](%s)' % (gear_info[key][0], gear_info[key][1])\n else:\n item = '**%s**' % (gear_info[key][0],)\n return gear_instructions % {'item': item}\n\n\nstream_info = {\n 'all': ['All streams', '/#streams/all'],\n 'subscribed': ['Your streams', '/#streams/subscribed'],\n}\n\nstream_instructions_no_link = \"\"\"\n1. From your desktop, click on the **gear**\n () in the upper right corner.\n\n1. Click **Manage streams**.\n\"\"\"\n\ndef stream_handle_match(key: str) -> str:\n if relative_help_links:\n return \"1. Go to [%s](%s).\" % (stream_info[key][0], stream_info[key][1])\n if key == 'all':\n return stream_instructions_no_link + \"\\n\\n1. Click **All streams** in the upper left.\"\n return stream_instructions_no_link\n\n\nLINK_TYPE_HANDLERS = {\n 'gear': gear_handle_match,\n 'stream': stream_handle_match,\n}\n\nclass RelativeLinksHelpExtension(markdown.Extension):\n def extendMarkdown(self, md: markdown.Markdown, md_globals: Dict[str, Any]) -> None:\n \"\"\" Add RelativeLinksHelpExtension to the Markdown instance. \"\"\"\n md.registerExtension(self)\n md.preprocessors.add('help_relative_links', RelativeLinks(), '_begin')\n\nrelative_help_links = None # type: Optional[bool]\n\ndef set_relative_help_links(value: bool) -> None:\n global relative_help_links\n relative_help_links = value\n\nclass RelativeLinks(Preprocessor):\n def run(self, lines: List[str]) -> List[str]:\n done = False\n while not done:\n for line in lines:\n loc = lines.index(line)\n match = REGEXP.search(line)\n\n if match:\n text = [self.handleMatch(match)]\n # The line that contains the directive to include the macro\n # may be preceded or followed by text or tags, in that case\n # we need to make sure that any preceding or following text\n # stays the same.\n line_split = REGEXP.split(line, maxsplit=0)\n preceding = line_split[0]\n following = line_split[-1]\n text = [preceding] + text + [following]\n lines = lines[:loc] + text + lines[loc+1:]\n break\n else:\n done = True\n return lines\n\n def handleMatch(self, match: Match[str]) -> str:\n return LINK_TYPE_HANDLERS[match.group('link_type')](match.group('key'))\n\ndef makeExtension(*args: Any, **kwargs: Any) -> RelativeLinksHelpExtension:\n return RelativeLinksHelpExtension(*args, **kwargs)\n"},"type_annotations":{"kind":"list like","value":["str","str","markdown.Markdown","Dict[str, Any]","bool","List[str]","Match[str]","Any","Any"],"string":"[\n \"str\",\n \"str\",\n \"markdown.Markdown\",\n \"Dict[str, Any]\",\n \"bool\",\n \"List[str]\",\n \"Match[str]\",\n \"Any\",\n \"Any\"\n]"},"type_annotation_starts":{"kind":"list like","value":[1240,1777,2235,2266,2566,2705,3655,3781,3796],"string":"[\n 1240,\n 1777,\n 2235,\n 2266,\n 2566,\n 2705,\n 3655,\n 3781,\n 3796\n]"},"type_annotation_ends":{"kind":"list like","value":[1243,1780,2252,2280,2570,2714,3665,3784,3799],"string":"[\n 1243,\n 1780,\n 2252,\n 2280,\n 2570,\n 2714,\n 3665,\n 3784,\n 3799\n]"}}},{"rowIdx":1357,"cells":{"zip":{"kind":"string","value":"archives/18-2-SKKU-OSS_2018-2-OSS-L5.zip"},"filename":{"kind":"string","value":"zerver/lib/bugdown/help_settings_links.py"},"contents":{"kind":"string","value":"import re\nimport markdown\nfrom typing import Any, Dict, List, Optional, Union\nfrom typing.re import Match\nfrom markdown.preprocessors import Preprocessor\n\n# There is a lot of duplicated code between this file and\n# help_relative_links.py. So if you're making a change here consider making\n# it there as well.\n\nREGEXP = re.compile(r'\\{settings_tab\\|(?P.*?)\\}')\n\nlink_mapping = {\n # a mapping from the setting identifier that is the same as the final URL\n # breadcrumb to that setting to the name of its setting type, the setting\n # name as it appears in the user interface, and a relative link that can\n # be used to get to that setting\n 'your-account': ['Settings', 'Your account', '/#settings/your-account'],\n 'display-settings': ['Settings', 'Display settings', '/#settings/display-settings'],\n 'notifications': ['Settings', 'Notifications', '/#settings/notifications'],\n 'your-bots': ['Settings', 'Your bots', '/#settings/your-bots'],\n 'alert-words': ['Settings', 'Alert words', '/#settings/alert-words'],\n 'uploaded-files': ['Settings', 'Uploaded files', '/#settings/uploaded-files'],\n 'muted-topics': ['Settings', 'Muted topics', '/#settings/muted-topics'],\n\n 'organization-profile': ['Manage organization', 'Organization profile',\n '/#organization/organization-profile'],\n 'organization-settings': ['Manage organization', 'Organization settings',\n '/#organization/organization-settings'],\n 'organization-permissions': ['Manage organization', 'Organization permissions',\n '/#organization/organization-permissions'],\n 'emoji-settings': ['Manage organization', 'Custom emoji',\n '/#organization/emoji-settings'],\n 'auth-methods': ['Manage organization', 'Authentication methods',\n '/#organization/auth-methods'],\n 'user-groups-admin': ['Manage organization', 'User groups',\n '/#organization/user-groups-admin'],\n 'user-list-admin': ['Manage organization', 'Users', '/#organization/user-list-admin'],\n 'deactivated-users-admin': ['Manage organization', 'Deactivated users',\n '/#organization/deactivated-users-admin'],\n 'bot-list-admin': ['Manage organization', 'Bots', '/#organization/bot-list-admin'],\n 'default-streams-list': ['Manage organization', 'Default streams',\n '/#organization/default-streams-list'],\n 'filter-settings': ['Manage organization', 'Linkifiers',\n '/#organization/filter-settings'],\n 'profile-field-settings': ['Manage organization', 'Custom profile fields',\n '/#organization/profile-field-settings'],\n 'invites-list-admin': ['Manage organization', 'Invitations',\n '/#organization/invites-list-admin'],\n}\n\nsettings_markdown = \"\"\"\n1. From your desktop, click on the **gear**\n () in the upper right corner.\n\n1. Select **%(setting_type_name)s**.\n\n1. On the left, click %(setting_reference)s.\n\"\"\"\n\n\nclass SettingHelpExtension(markdown.Extension):\n def extendMarkdown(self, md: markdown.Markdown, md_globals: Dict[str, Any]) -> None:\n \"\"\" Add SettingHelpExtension to the Markdown instance. \"\"\"\n md.registerExtension(self)\n md.preprocessors.add('setting', Setting(), '_begin')\n\nrelative_settings_links = None # type: Optional[bool]\n\ndef set_relative_settings_links(value: bool) -> None:\n global relative_settings_links\n relative_settings_links = value\n\nclass Setting(Preprocessor):\n def run(self, lines: List[str]) -> List[str]:\n done = False\n while not done:\n for line in lines:\n loc = lines.index(line)\n match = REGEXP.search(line)\n\n if match:\n text = [self.handleMatch(match)]\n # The line that contains the directive to include the macro\n # may be preceded or followed by text or tags, in that case\n # we need to make sure that any preceding or following text\n # stays the same.\n line_split = REGEXP.split(line, maxsplit=0)\n preceding = line_split[0]\n following = line_split[-1]\n text = [preceding] + text + [following]\n lines = lines[:loc] + text + lines[loc+1:]\n break\n else:\n done = True\n return lines\n\n def handleMatch(self, match: Match[str]) -> str:\n setting_identifier = match.group('setting_identifier')\n setting_type_name = link_mapping[setting_identifier][0]\n setting_name = link_mapping[setting_identifier][1]\n setting_link = link_mapping[setting_identifier][2]\n if relative_settings_links:\n return \"1. Go to [%s](%s).\" % (setting_name, setting_link)\n return settings_markdown % {'setting_type_name': setting_type_name,\n 'setting_reference': \"**%s**\" % (setting_name,)}\n\ndef makeExtension(*args: Any, **kwargs: Any) -> SettingHelpExtension:\n return SettingHelpExtension(*args, **kwargs)\n"},"type_annotations":{"kind":"list like","value":["markdown.Markdown","Dict[str, Any]","bool","List[str]","Match[str]","Any","Any"],"string":"[\n \"markdown.Markdown\",\n \"Dict[str, Any]\",\n \"bool\",\n \"List[str]\",\n \"Match[str]\",\n \"Any\",\n \"Any\"\n]"},"type_annotation_starts":{"kind":"list like","value":[3213,3244,3528,3669,4619,5178,5193],"string":"[\n 3213,\n 3244,\n 3528,\n 3669,\n 4619,\n 5178,\n 5193\n]"},"type_annotation_ends":{"kind":"list like","value":[3230,3258,3532,3678,4629,5181,5196],"string":"[\n 3230,\n 3258,\n 3532,\n 3678,\n 4629,\n 5181,\n 5196\n]"}}},{"rowIdx":1358,"cells":{"zip":{"kind":"string","value":"archives/18-2-SKKU-OSS_2018-2-OSS-L5.zip"},"filename":{"kind":"string","value":"zerver/lib/bugdown/nested_code_blocks.py"},"contents":{"kind":"string","value":"from markdown.extensions import Extension\nfrom markdown.treeprocessors import Treeprocessor\nfrom typing import Any, Dict, Optional, List, Tuple\nimport markdown\nfrom xml.etree.cElementTree import Element\n\nfrom zerver.lib.bugdown import walk_tree_with_family, ResultWithFamily\n\nclass NestedCodeBlocksRenderer(Extension):\n def extendMarkdown(self, md: markdown.Markdown, md_globals: Dict[str, Any]) -> None:\n md.treeprocessors.add(\n 'nested_code_blocks',\n NestedCodeBlocksRendererTreeProcessor(md, self.getConfigs()),\n '_end'\n )\n\nclass NestedCodeBlocksRendererTreeProcessor(markdown.treeprocessors.Treeprocessor):\n def __init__(self, md: markdown.Markdown, config: Dict[str, Any]) -> None:\n super(NestedCodeBlocksRendererTreeProcessor, self).__init__(md)\n\n def run(self, root: Element) -> None:\n code_tags = walk_tree_with_family(root, self.get_code_tags)\n nested_code_blocks = self.get_nested_code_blocks(code_tags)\n for block in nested_code_blocks:\n tag, text = block.result\n codehilite_block = self.get_codehilite_block(text)\n self.replace_element(block.family.grandparent,\n codehilite_block,\n block.family.parent)\n\n def get_code_tags(self, e: Element) -> Optional[Tuple[str, Optional[str]]]:\n if e.tag == \"code\":\n return (e.tag, e.text)\n return None\n\n def get_nested_code_blocks(\n self, code_tags: List[ResultWithFamily]\n ) -> List[ResultWithFamily]:\n nested_code_blocks = []\n for code_tag in code_tags:\n parent = code_tag.family.parent # type: Any\n grandparent = code_tag.family.grandparent # type: Any\n if parent.tag == \"p\" and grandparent.tag == \"li\":\n # if the parent (

) has no text, and no children,\n # that means that the element inside is its\n # only thing inside the bullet, we can confidently say\n # that this is a nested code block\n if parent.text is None and len(list(parent)) == 1 and len(list(parent.itertext())) == 1:\n nested_code_blocks.append(code_tag)\n\n return nested_code_blocks\n\n def get_codehilite_block(self, code_block_text: str) -> Element:\n div = markdown.util.etree.Element(\"div\")\n div.set(\"class\", \"codehilite\")\n pre = markdown.util.etree.SubElement(div, \"pre\")\n pre.text = code_block_text\n return div\n\n def replace_element(\n self, parent: Optional[Element],\n replacement: markdown.util.etree.Element,\n element_to_replace: Element\n ) -> None:\n if parent is None:\n return\n\n children = parent.getchildren()\n for index, child in enumerate(children):\n if child is element_to_replace:\n parent.insert(index, replacement)\n parent.remove(element_to_replace)\n\ndef makeExtension(*args: Any, **kwargs: str) -> NestedCodeBlocksRenderer:\n return NestedCodeBlocksRenderer(kwargs)\n"},"type_annotations":{"kind":"list like","value":["markdown.Markdown","Dict[str, Any]","markdown.Markdown","Dict[str, Any]","Element","Element","List[ResultWithFamily]","str","Optional[Element]","markdown.util.etree.Element","Element","Any","str"],"string":"[\n \"markdown.Markdown\",\n \"Dict[str, Any]\",\n \"markdown.Markdown\",\n \"Dict[str, Any]\",\n \"Element\",\n \"Element\",\n \"List[ResultWithFamily]\",\n \"str\",\n \"Optional[Element]\",\n \"markdown.util.etree.Element\",\n \"Element\",\n \"Any\",\n \"str\"\n]"},"type_annotation_starts":{"kind":"list like","value":[352,383,688,715,837,1328,1522,2337,2605,2649,2710,3039,3054],"string":"[\n 352,\n 383,\n 688,\n 715,\n 837,\n 1328,\n 1522,\n 2337,\n 2605,\n 2649,\n 2710,\n 3039,\n 3054\n]"},"type_annotation_ends":{"kind":"list like","value":[369,397,705,729,844,1335,1544,2340,2622,2676,2717,3042,3057],"string":"[\n 369,\n 397,\n 705,\n 729,\n 844,\n 1335,\n 1544,\n 2340,\n 2622,\n 2676,\n 2717,\n 3042,\n 3057\n]"}}},{"rowIdx":1359,"cells":{"zip":{"kind":"string","value":"archives/18-2-SKKU-OSS_2018-2-OSS-L5.zip"},"filename":{"kind":"string","value":"zerver/lib/bugdown/tabbed_sections.py"},"contents":{"kind":"string","value":"import re\n\nfrom markdown.extensions import Extension\nfrom markdown.preprocessors import Preprocessor\nfrom typing import Any, Dict, Optional, List, Tuple\nimport markdown\n\nSTART_TABBED_SECTION_REGEX = re.compile(r'^\\{start_tabs\\}$')\nEND_TABBED_SECTION_REGEX = re.compile(r'^\\{end_tabs\\}$')\nTAB_CONTENT_REGEX = re.compile(r'^\\{tab\\|\\s*(.+?)\\s*\\}$')\n\nCODE_SECTION_TEMPLATE = \"\"\"\n

\n{nav_bar}\n
\n{blocks}\n
\n
\n\"\"\".strip()\n\nNAV_BAR_TEMPLATE = \"\"\"\n
    \n{tabs}\n
\n\"\"\".strip()\n\nNAV_LIST_ITEM_TEMPLATE = \"\"\"\n
  • {name}
  • \n\"\"\".strip()\n\nDIV_TAB_CONTENT_TEMPLATE = \"\"\"\n
    \n{content}\n
    \n\"\"\".strip()\n\n# If adding new entries here, also check if you need to update\n# tabbed-instructions.js\nTAB_DISPLAY_NAMES = {\n 'desktop-web': 'Desktop/Web',\n 'ios': 'iOS',\n 'android': 'Android',\n 'mac': 'macOS',\n 'windows': 'Windows',\n 'linux': 'Linux',\n 'python': 'Python',\n 'js': 'JavaScript',\n 'curl': 'curl',\n 'zulip-send': 'zulip-send',\n\n 'cloud': 'HipChat Cloud',\n 'server': 'HipChat Server or Data Center',\n}\n\nclass TabbedSectionsGenerator(Extension):\n def extendMarkdown(self, md: markdown.Markdown, md_globals: Dict[str, Any]) -> None:\n md.preprocessors.add(\n 'tabbed_sections', TabbedSectionsPreprocessor(md, self.getConfigs()), '_end')\n\nclass TabbedSectionsPreprocessor(Preprocessor):\n def __init__(self, md: markdown.Markdown, config: Dict[str, Any]) -> None:\n super(TabbedSectionsPreprocessor, self).__init__(md)\n\n def run(self, lines: List[str]) -> List[str]:\n tab_section = self.parse_tabs(lines)\n while tab_section:\n nav_bar = self.generate_nav_bar(tab_section)\n content_blocks = self.generate_content_blocks(tab_section, lines)\n rendered_tabs = CODE_SECTION_TEMPLATE.format(\n nav_bar=nav_bar, blocks=content_blocks)\n\n start = tab_section['start_tabs_index']\n end = tab_section['end_tabs_index'] + 1\n lines = lines[:start] + [rendered_tabs] + lines[end:]\n tab_section = self.parse_tabs(lines)\n return lines\n\n def generate_content_blocks(self, tab_section: Dict[str, Any], lines: List[str]) -> str:\n tab_content_blocks = []\n for index, tab in enumerate(tab_section['tabs']):\n start_index = tab['start'] + 1\n try:\n # If there are more tabs, we can use the starting index\n # of the next tab as the ending index of the previous one\n end_index = tab_section['tabs'][index + 1]['start']\n except IndexError:\n # Otherwise, just use the end of the entire section\n end_index = tab_section['end_tabs_index']\n\n content = '\\n'.join(lines[start_index:end_index]).strip()\n tab_content_block = DIV_TAB_CONTENT_TEMPLATE.format(\n data_language=tab['tab_name'],\n # Wrapping the content in two newlines is necessary here.\n # If we don't do this, the inner Markdown does not get\n # rendered properly.\n content='\\n{}\\n'.format(content))\n tab_content_blocks.append(tab_content_block)\n return '\\n'.join(tab_content_blocks)\n\n def generate_nav_bar(self, tab_section: Dict[str, Any]) -> str:\n li_elements = []\n for tab in tab_section['tabs']:\n li = NAV_LIST_ITEM_TEMPLATE.format(\n data_language=tab.get('tab_name'),\n name=TAB_DISPLAY_NAMES.get(tab.get('tab_name')))\n li_elements.append(li)\n return NAV_BAR_TEMPLATE.format(tabs='\\n'.join(li_elements))\n\n def parse_tabs(self, lines: List[str]) -> Optional[Dict[str, Any]]:\n block = {} # type: Dict[str, Any]\n for index, line in enumerate(lines):\n start_match = START_TABBED_SECTION_REGEX.search(line)\n if start_match:\n block['start_tabs_index'] = index\n\n tab_content_match = TAB_CONTENT_REGEX.search(line)\n if tab_content_match:\n block.setdefault('tabs', [])\n tab = {'start': index,\n 'tab_name': tab_content_match.group(1)}\n block['tabs'].append(tab)\n\n end_match = END_TABBED_SECTION_REGEX.search(line)\n if end_match:\n block['end_tabs_index'] = index\n break\n return block\n\ndef makeExtension(*args: Any, **kwargs: str) -> TabbedSectionsGenerator:\n return TabbedSectionsGenerator(kwargs)\n"},"type_annotations":{"kind":"list like","value":["markdown.Markdown","Dict[str, Any]","markdown.Markdown","Dict[str, Any]","List[str]","Dict[str, Any]","List[str]","Dict[str, Any]","List[str]","Any","str"],"string":"[\n \"markdown.Markdown\",\n \"Dict[str, Any]\",\n \"markdown.Markdown\",\n \"Dict[str, Any]\",\n \"List[str]\",\n \"Dict[str, Any]\",\n \"List[str]\",\n \"Dict[str, Any]\",\n \"List[str]\",\n \"Any\",\n \"str\"\n]"},"type_annotation_starts":{"kind":"list like","value":[1262,1293,1514,1541,1653,2292,2315,3417,3806,4571,4586],"string":"[\n 1262,\n 1293,\n 1514,\n 1541,\n 1653,\n 2292,\n 2315,\n 3417,\n 3806,\n 4571,\n 4586\n]"},"type_annotation_ends":{"kind":"list like","value":[1279,1307,1531,1555,1662,2306,2324,3431,3815,4574,4589],"string":"[\n 1279,\n 1307,\n 1531,\n 1555,\n 1662,\n 2306,\n 2324,\n 3431,\n 3815,\n 4574,\n 4589\n]"}}},{"rowIdx":1360,"cells":{"zip":{"kind":"string","value":"archives/18-2-SKKU-OSS_2018-2-OSS-L5.zip"},"filename":{"kind":"string","value":"zerver/lib/bugdown/testing_mocks.py"},"contents":{"kind":"string","value":"# -*- coding: utf-8 -*-\n\nfrom typing import Any, Dict, Optional\nimport ujson\n\n\nNORMAL_TWEET = \"\"\"{\n \"created_at\": \"Sat Sep 10 22:23:38 +0000 2011\",\n \"favorite_count\": 1,\n \"full_text\": \"@twitter meets @seepicturely at #tcdisrupt cc.@boscomonkey @episod http://t.co/6J2EgYM\",\n \"hashtags\": [\n {\n \"text\": \"tcdisrupt\"\n }\n ],\n \"id\": 112652479837110270,\n \"id_str\": \"112652479837110273\",\n \"in_reply_to_screen_name\": \"Twitter\",\n \"in_reply_to_user_id\": 783214,\n \"lang\": \"en\",\n \"retweet_count\": 4,\n \"source\": \"Instagram\",\n \"urls\": [\n {\n \"expanded_url\": \"http://instagr.am/p/MuW67/\",\n \"url\": \"http://t.co/6J2EgYM\"\n }\n ],\n \"user\": {\n \"created_at\": \"Mon May 16 20:07:59 +0000 2011\",\n \"description\": \"Eoin's photography account. See @mceoin for tweets.\",\n \"followers_count\": 3,\n \"id\": 299862462,\n \"lang\": \"en\",\n \"location\": \"Twitter\",\n \"name\": \"Eoin McMillan\",\n \"profile_background_color\": \"131516\",\n \"profile_background_image_url\": \"http://abs.twimg.com/images/themes/theme14/bg.gif\",\n \"profile_background_tile\": true,\n \"profile_image_url\": \"http://pbs.twimg.com/profile_images/1380912173/Screen_shot_2011-06-03_at_7.35.36_PM_normal.png\",\n \"profile_link_color\": \"009999\",\n \"profile_sidebar_fill_color\": \"EFEFEF\",\n \"profile_text_color\": \"333333\",\n \"screen_name\": \"imeoin\",\n \"statuses_count\": 278,\n \"url\": \"http://t.co/p9hKpiGMyN\"\n },\n \"user_mentions\": [\n {\n \"id\": 783214,\n \"name\": \"Twitter\",\n \"screen_name\": \"Twitter\"\n },\n {\n \"id\": 14792670,\n \"name\": \"Bosco So\",\n \"screen_name\": \"boscomonkey\"\n },\n {\n \"id\": 819797,\n \"name\": \"Taylor Singletary\",\n \"screen_name\": \"episod\"\n }\n ]\n}\"\"\"\n\nMENTION_IN_LINK_TWEET = \"\"\"{\n \"created_at\": \"Sat Sep 10 22:23:38 +0000 2011\",\n \"favorite_count\": 1,\n \"full_text\": \"http://t.co/@foo\",\n \"hashtags\": [\n {\n \"text\": \"tcdisrupt\"\n }\n ],\n \"id\": 112652479837110270,\n \"id_str\": \"112652479837110273\",\n \"in_reply_to_screen_name\": \"Twitter\",\n \"in_reply_to_user_id\": 783214,\n \"lang\": \"en\",\n \"retweet_count\": 4,\n \"source\": \"Instagram\",\n \"urls\": [\n {\n \"expanded_url\": \"http://foo.com\",\n \"url\": \"http://t.co/@foo\"\n }\n ],\n \"user\": {\n \"created_at\": \"Mon May 16 20:07:59 +0000 2011\",\n \"description\": \"Eoin's photography account. See @mceoin for tweets.\",\n \"followers_count\": 3,\n \"id\": 299862462,\n \"lang\": \"en\",\n \"location\": \"Twitter\",\n \"name\": \"Eoin McMillan\",\n \"profile_background_color\": \"131516\",\n \"profile_background_image_url\": \"http://abs.twimg.com/images/themes/theme14/bg.gif\",\n \"profile_background_tile\": true,\n \"profile_image_url\": \"http://pbs.twimg.com/profile_images/1380912173/Screen_shot_2011-06-03_at_7.35.36_PM_normal.png\",\n \"profile_link_color\": \"009999\",\n \"profile_sidebar_fill_color\": \"EFEFEF\",\n \"profile_text_color\": \"333333\",\n \"screen_name\": \"imeoin\",\n \"statuses_count\": 278,\n \"url\": \"http://t.co/p9hKpiGMyN\"\n },\n \"user_mentions\": [\n {\n \"id\": 783214,\n \"name\": \"Foo\",\n \"screen_name\": \"foo\"\n }\n ]\n}\"\"\"\n\nMEDIA_TWEET = \"\"\"{\n \"created_at\": \"Sat Sep 10 22:23:38 +0000 2011\",\n \"favorite_count\": 1,\n \"full_text\": \"http://t.co/xo7pAhK6n3\",\n \"id\": 112652479837110270,\n \"id_str\": \"112652479837110273\",\n \"in_reply_to_screen_name\": \"Twitter\",\n \"in_reply_to_user_id\": 783214,\n \"lang\": \"en\",\n \"media\": [\n {\n \"display_url\": \"pic.twitter.com/xo7pAhK6n3\",\n \"expanded_url\": \"http://twitter.com/NEVNBoston/status/421654515616849920/photo/1\",\n \"id\": 421654515495211010,\n \"media_url\": \"http://pbs.twimg.com/media/BdoEjD4IEAIq86Z.jpg\",\n \"media_url_https\": \"https://pbs.twimg.com/media/BdoEjD4IEAIq86Z.jpg\",\n \"sizes\": {\"large\": {\"h\": 700, \"resize\": \"fit\", \"w\": 1024},\n \"medium\": {\"h\": 410, \"resize\": \"fit\", \"w\": 599},\n \"small\": {\"h\": 232, \"resize\": \"fit\", \"w\": 340},\n \"thumb\": {\"h\": 150, \"resize\": \"crop\", \"w\": 150}},\n \"type\": \"photo\",\n \"url\": \"http://t.co/xo7pAhK6n3\"}\n ],\n \"retweet_count\": 4,\n \"source\": \"Instagram\",\n \"user\": {\n \"created_at\": \"Mon May 16 20:07:59 +0000 2011\",\n \"description\": \"Eoin's photography account. See @mceoin for tweets.\",\n \"followers_count\": 3,\n \"id\": 299862462,\n \"lang\": \"en\",\n \"location\": \"Twitter\",\n \"name\": \"Eoin McMillan\",\n \"profile_background_color\": \"131516\",\n \"profile_background_image_url\": \"http://abs.twimg.com/images/themes/theme14/bg.gif\",\n \"profile_background_tile\": true,\n \"profile_image_url\": \"http://pbs.twimg.com/profile_images/1380912173/Screen_shot_2011-06-03_at_7.35.36_PM_normal.png\",\n \"profile_link_color\": \"009999\",\n \"profile_sidebar_fill_color\": \"EFEFEF\",\n \"profile_text_color\": \"333333\",\n \"screen_name\": \"imeoin\",\n \"statuses_count\": 278,\n \"url\": \"http://t.co/p9hKpiGMyN\"\n },\n \"user_mentions\": [\n {\n \"id\": 783214,\n \"name\": \"Foo\",\n \"screen_name\": \"foo\"\n }\n ]\n}\"\"\"\n\nEMOJI_TWEET = \"\"\"{\n \"created_at\": \"Sat Sep 10 22:23:38 +0000 2011\",\n \"favorite_count\": 1,\n \"full_text\": \"Zulip is 💯% open-source!\",\n \"hashtags\": [\n {\n \"text\": \"tcdisrupt\"\n }\n ],\n \"id\": 112652479837110270,\n \"id_str\": \"112652479837110273\",\n \"in_reply_to_screen_name\": \"Twitter\",\n \"in_reply_to_user_id\": 783214,\n \"lang\": \"en\",\n \"retweet_count\": 4,\n \"source\": \"Instagram\",\n \"user\": {\n \"created_at\": \"Mon May 16 20:07:59 +0000 2011\",\n \"description\": \"Eoin's photography account. See @mceoin for tweets.\",\n \"followers_count\": 3,\n \"id\": 299862462,\n \"lang\": \"en\",\n \"location\": \"Twitter\",\n \"name\": \"Eoin McMillan\",\n \"profile_background_color\": \"131516\",\n \"profile_background_image_url\": \"http://abs.twimg.com/images/themes/theme14/bg.gif\",\n \"profile_background_tile\": true,\n \"profile_image_url\": \"http://pbs.twimg.com/profile_images/1380912173/Screen_shot_2011-06-03_at_7.35.36_PM_normal.png\",\n \"profile_link_color\": \"009999\",\n \"profile_sidebar_fill_color\": \"EFEFEF\",\n \"profile_text_color\": \"333333\",\n \"screen_name\": \"imeoin\",\n \"statuses_count\": 278,\n \"url\": \"http://t.co/p9hKpiGMyN\"\n },\n \"user_mentions\": [\n {\n \"id\": 783214,\n \"name\": \"Twitter\",\n \"screen_name\": \"Twitter\"\n },\n {\n \"id\": 14792670,\n \"name\": \"Bosco So\",\n \"screen_name\": \"boscomonkey\"\n },\n {\n \"id\": 819797,\n \"name\": \"Taylor Singletary\",\n \"screen_name\": \"episod\"\n }\n ]\n}\"\"\"\n\ndef twitter(tweet_id: str) -> Optional[Dict[str, Any]]:\n if tweet_id in [\"112652479837110273\", \"287977969287315456\", \"287977969287315457\"]:\n return ujson.loads(NORMAL_TWEET)\n elif tweet_id == \"287977969287315458\":\n return ujson.loads(MENTION_IN_LINK_TWEET)\n elif tweet_id == \"287977969287315459\":\n return ujson.loads(MEDIA_TWEET)\n elif tweet_id == \"287977969287315460\":\n return ujson.loads(EMOJI_TWEET)\n else:\n return None\n"},"type_annotations":{"kind":"list like","value":["str"],"string":"[\n \"str\"\n]"},"type_annotation_starts":{"kind":"list like","value":[7401],"string":"[\n 7401\n]"},"type_annotation_ends":{"kind":"list like","value":[7404],"string":"[\n 7404\n]"}}},{"rowIdx":1361,"cells":{"zip":{"kind":"string","value":"archives/18-2-SKKU-OSS_2018-2-OSS-L5.zip"},"filename":{"kind":"string","value":"zerver/lib/bulk_create.py"},"contents":{"kind":"string","value":"from typing import Any, Dict, Iterable, List, Mapping, Optional, Set, Tuple\n\nfrom zerver.lib.initial_password import initial_password\nfrom zerver.models import Realm, Stream, UserProfile, Huddle, \\\n Subscription, Recipient, Client, RealmAuditLog, get_huddle_hash\nfrom zerver.lib.create_user import create_user_profile\n\ndef bulk_create_users(realm: Realm,\n users_raw: Set[Tuple[str, str, str, bool]],\n bot_type: Optional[int]=None,\n bot_owner: Optional[UserProfile]=None,\n tos_version: Optional[str]=None,\n timezone: str=\"\") -> None:\n \"\"\"\n Creates and saves a UserProfile with the given email.\n Has some code based off of UserManage.create_user, but doesn't .save()\n \"\"\"\n existing_users = frozenset(UserProfile.objects.filter(\n realm=realm).values_list('email', flat=True))\n users = sorted([user_raw for user_raw in users_raw if user_raw[0] not in existing_users])\n\n # Now create user_profiles\n profiles_to_create = [] # type: List[UserProfile]\n for (email, full_name, short_name, active) in users:\n profile = create_user_profile(realm, email,\n initial_password(email), active, bot_type,\n full_name, short_name, bot_owner, False, tos_version,\n timezone, tutorial_status=UserProfile.TUTORIAL_FINISHED,\n enter_sends=True)\n profiles_to_create.append(profile)\n UserProfile.objects.bulk_create(profiles_to_create)\n\n RealmAuditLog.objects.bulk_create(\n [RealmAuditLog(realm=realm, modified_user=profile_,\n event_type=RealmAuditLog.USER_CREATED, event_time=profile_.date_joined)\n for profile_ in profiles_to_create])\n\n profiles_by_email = {} # type: Dict[str, UserProfile]\n profiles_by_id = {} # type: Dict[int, UserProfile]\n for profile in UserProfile.objects.select_related().filter(realm=realm):\n profiles_by_email[profile.email] = profile\n profiles_by_id[profile.id] = profile\n\n recipients_to_create = [] # type: List[Recipient]\n for (email, full_name, short_name, active) in users:\n recipients_to_create.append(Recipient(type_id=profiles_by_email[email].id,\n type=Recipient.PERSONAL))\n Recipient.objects.bulk_create(recipients_to_create)\n\n recipients_by_email = {} # type: Dict[str, Recipient]\n for recipient in recipients_to_create:\n recipients_by_email[profiles_by_id[recipient.type_id].email] = recipient\n\n subscriptions_to_create = [] # type: List[Subscription]\n for (email, full_name, short_name, active) in users:\n subscriptions_to_create.append(\n Subscription(user_profile_id=profiles_by_email[email].id,\n recipient=recipients_by_email[email]))\n Subscription.objects.bulk_create(subscriptions_to_create)\n\n# This is only sed in populate_db, so doesn't realy need tests\ndef bulk_create_streams(realm: Realm,\n stream_dict: Dict[str, Dict[str, Any]]) -> None: # nocoverage\n existing_streams = frozenset([name.lower() for name in\n Stream.objects.filter(realm=realm)\n .values_list('name', flat=True)])\n streams_to_create = [] # type: List[Stream]\n for name, options in stream_dict.items():\n if 'history_public_to_subscribers' not in options:\n options['history_public_to_subscribers'] = (\n not options.get(\"invite_only\", False) and not realm.is_zephyr_mirror_realm)\n if name.lower() not in existing_streams:\n streams_to_create.append(\n Stream(\n realm=realm,\n name=name,\n description=options[\"description\"],\n invite_only=options.get(\"invite_only\", False),\n is_announcement_only=options.get(\"is_announcement_only\", False),\n history_public_to_subscribers=options[\"history_public_to_subscribers\"],\n is_web_public=options.get(\"is_web_public\", False),\n is_in_zephyr_realm=realm.is_zephyr_mirror_realm,\n )\n )\n # Sort streams by name before creating them so that we can have a\n # reliable ordering of `stream_id` across different python versions.\n # This is required for test fixtures which contain `stream_id`. Prior\n # to python 3.3 hashes were not randomized but after a security fix\n # hash randomization was enabled in python 3.3 which made iteration\n # of dictionaries and sets completely unpredictable. Here the order\n # of elements while iterating `stream_dict` will be completely random\n # for python 3.3 and later versions.\n streams_to_create.sort(key=lambda x: x.name)\n Stream.objects.bulk_create(streams_to_create)\n\n recipients_to_create = [] # type: List[Recipient]\n for stream in Stream.objects.filter(realm=realm).values('id', 'name'):\n if stream['name'].lower() not in existing_streams:\n recipients_to_create.append(Recipient(type_id=stream['id'],\n type=Recipient.STREAM))\n Recipient.objects.bulk_create(recipients_to_create)\n"},"type_annotations":{"kind":"list like","value":["Realm","Set[Tuple[str, str, str, bool]]","Realm","Dict[str, Dict[str, Any]]"],"string":"[\n \"Realm\",\n \"Set[Tuple[str, str, str, bool]]\",\n \"Realm\",\n \"Dict[str, Dict[str, Any]]\"\n]"},"type_annotation_starts":{"kind":"list like","value":[351,391,3104,3148],"string":"[\n 351,\n 391,\n 3104,\n 3148\n]"},"type_annotation_ends":{"kind":"list like","value":[356,422,3109,3173],"string":"[\n 356,\n 422,\n 3109,\n 3173\n]"}}},{"rowIdx":1362,"cells":{"zip":{"kind":"string","value":"archives/18-2-SKKU-OSS_2018-2-OSS-L5.zip"},"filename":{"kind":"string","value":"zerver/lib/cache.py"},"contents":{"kind":"string","value":"# See https://zulip.readthedocs.io/en/latest/subsystems/caching.html for docs\nfrom functools import wraps\n\nfrom django.utils.lru_cache import lru_cache\nfrom django.core.cache import cache as djcache\nfrom django.core.cache import caches\nfrom django.conf import settings\nfrom django.db.models import Q\nfrom django.core.cache.backends.base import BaseCache\n\nfrom typing import cast, Any, Callable, Dict, Iterable, List, Optional, Union, Set, TypeVar, Tuple\n\nfrom zerver.lib.utils import statsd, statsd_key, make_safe_digest\nimport time\nimport base64\nimport random\nimport sys\nimport os\nimport hashlib\n\nif False:\n from zerver.models import UserProfile, Realm, Message\n # These modules have to be imported for type annotations but\n # they cannot be imported at runtime due to cyclic dependency.\n\nReturnT = TypeVar('ReturnT') # Useful for matching return types via Callable[..., ReturnT]\n\nclass NotFoundInCache(Exception):\n pass\n\n\nremote_cache_time_start = 0.0\nremote_cache_total_time = 0.0\nremote_cache_total_requests = 0\n\ndef get_remote_cache_time() -> float:\n return remote_cache_total_time\n\ndef get_remote_cache_requests() -> int:\n return remote_cache_total_requests\n\ndef remote_cache_stats_start() -> None:\n global remote_cache_time_start\n remote_cache_time_start = time.time()\n\ndef remote_cache_stats_finish() -> None:\n global remote_cache_total_time\n global remote_cache_total_requests\n global remote_cache_time_start\n remote_cache_total_requests += 1\n remote_cache_total_time += (time.time() - remote_cache_time_start)\n\ndef get_or_create_key_prefix() -> str:\n if settings.CASPER_TESTS:\n # This sets the prefix for the benefit of the Casper tests.\n #\n # Having a fixed key is OK since we don't support running\n # multiple copies of the casper tests at the same time anyway.\n return 'casper_tests:'\n elif settings.TEST_SUITE:\n # The Python tests overwrite KEY_PREFIX on each test, but use\n # this codepath as well, just to save running the more complex\n # code below for reading the normal key prefix.\n return 'django_tests_unused:'\n\n # directory `var` should exist in production\n os.makedirs(os.path.join(settings.DEPLOY_ROOT, \"var\"), exist_ok=True)\n\n filename = os.path.join(settings.DEPLOY_ROOT, \"var\", \"remote_cache_prefix\")\n try:\n fd = os.open(filename, os.O_CREAT | os.O_EXCL | os.O_RDWR, 0o444)\n random_hash = hashlib.sha256(str(random.getrandbits(256)).encode('utf-8')).digest()\n prefix = base64.b16encode(random_hash)[:32].decode('utf-8').lower() + ':'\n # This does close the underlying file\n with os.fdopen(fd, 'w') as f:\n f.write(prefix + \"\\n\")\n except OSError:\n # The file already exists\n tries = 1\n while tries < 10:\n with open(filename, 'r') as f:\n prefix = f.readline()[:-1]\n if len(prefix) == 33:\n break\n tries += 1\n prefix = ''\n time.sleep(0.5)\n\n if not prefix:\n print(\"Could not read remote cache key prefix file\")\n sys.exit(1)\n\n return prefix\n\nKEY_PREFIX = get_or_create_key_prefix() # type: str\n\ndef bounce_key_prefix_for_testing(test_name: str) -> None:\n global KEY_PREFIX\n KEY_PREFIX = test_name + ':' + str(os.getpid()) + ':'\n # We are taking the hash of the KEY_PREFIX to decrease the size of the key.\n # Memcached keys should have a length of less than 256.\n KEY_PREFIX = hashlib.sha1(KEY_PREFIX.encode('utf-8')).hexdigest()\n\ndef get_cache_backend(cache_name: Optional[str]) -> BaseCache:\n if cache_name is None:\n return djcache\n return caches[cache_name]\n\ndef get_cache_with_key(\n keyfunc: Callable[..., str],\n cache_name: Optional[str]=None\n) -> Callable[[Callable[..., ReturnT]], Callable[..., ReturnT]]:\n \"\"\"\n The main goal of this function getting value from the cache like in the \"cache_with_key\".\n A cache value can contain any data including the \"None\", so\n here used exception for case if value isn't found in the cache.\n \"\"\"\n def decorator(func: Callable[..., ReturnT]) -> (Callable[..., ReturnT]):\n @wraps(func)\n def func_with_caching(*args: Any, **kwargs: Any) -> Callable[..., ReturnT]:\n key = keyfunc(*args, **kwargs)\n val = cache_get(key, cache_name=cache_name)\n if val is not None:\n return val[0]\n raise NotFoundInCache()\n\n return func_with_caching\n\n return decorator\n\ndef cache_with_key(\n keyfunc: Callable[..., str], cache_name: Optional[str]=None,\n timeout: Optional[int]=None, with_statsd_key: Optional[str]=None\n) -> Callable[[Callable[..., ReturnT]], Callable[..., ReturnT]]:\n \"\"\"Decorator which applies Django caching to a function.\n\n Decorator argument is a function which computes a cache key\n from the original function's arguments. You are responsible\n for avoiding collisions with other uses of this decorator or\n other uses of caching.\"\"\"\n\n def decorator(func: Callable[..., ReturnT]) -> Callable[..., ReturnT]:\n @wraps(func)\n def func_with_caching(*args: Any, **kwargs: Any) -> ReturnT:\n key = keyfunc(*args, **kwargs)\n\n val = cache_get(key, cache_name=cache_name)\n\n extra = \"\"\n if cache_name == 'database':\n extra = \".dbcache\"\n\n if with_statsd_key is not None:\n metric_key = with_statsd_key\n else:\n metric_key = statsd_key(key)\n\n status = \"hit\" if val is not None else \"miss\"\n statsd.incr(\"cache%s.%s.%s\" % (extra, metric_key, status))\n\n # Values are singleton tuples so that we can distinguish\n # a result of None from a missing key.\n if val is not None:\n return val[0]\n\n val = func(*args, **kwargs)\n\n cache_set(key, val, cache_name=cache_name, timeout=timeout)\n\n return val\n\n return func_with_caching\n\n return decorator\n\ndef cache_set(key: str, val: Any, cache_name: Optional[str]=None, timeout: Optional[int]=None) -> None:\n remote_cache_stats_start()\n cache_backend = get_cache_backend(cache_name)\n cache_backend.set(KEY_PREFIX + key, (val,), timeout=timeout)\n remote_cache_stats_finish()\n\ndef cache_get(key: str, cache_name: Optional[str]=None) -> Any:\n remote_cache_stats_start()\n cache_backend = get_cache_backend(cache_name)\n ret = cache_backend.get(KEY_PREFIX + key)\n remote_cache_stats_finish()\n return ret\n\ndef cache_get_many(keys: List[str], cache_name: Optional[str]=None) -> Dict[str, Any]:\n keys = [KEY_PREFIX + key for key in keys]\n remote_cache_stats_start()\n ret = get_cache_backend(cache_name).get_many(keys)\n remote_cache_stats_finish()\n return dict([(key[len(KEY_PREFIX):], value) for key, value in ret.items()])\n\ndef cache_set_many(items: Dict[str, Any], cache_name: Optional[str]=None,\n timeout: Optional[int]=None) -> None:\n new_items = {}\n for key in items:\n new_items[KEY_PREFIX + key] = items[key]\n items = new_items\n remote_cache_stats_start()\n get_cache_backend(cache_name).set_many(items, timeout=timeout)\n remote_cache_stats_finish()\n\ndef cache_delete(key: str, cache_name: Optional[str]=None) -> None:\n remote_cache_stats_start()\n get_cache_backend(cache_name).delete(KEY_PREFIX + key)\n remote_cache_stats_finish()\n\ndef cache_delete_many(items: Iterable[str], cache_name: Optional[str]=None) -> None:\n remote_cache_stats_start()\n get_cache_backend(cache_name).delete_many(\n KEY_PREFIX + item for item in items)\n remote_cache_stats_finish()\n\n# Generic_bulk_cached fetch and its helpers\nObjKT = TypeVar('ObjKT')\nItemT = TypeVar('ItemT')\nCompressedItemT = TypeVar('CompressedItemT')\n\ndef default_extractor(obj: CompressedItemT) -> ItemT:\n return obj # type: ignore # Need a type assert that ItemT=CompressedItemT\n\ndef default_setter(obj: ItemT) -> CompressedItemT:\n return obj # type: ignore # Need a type assert that ItemT=CompressedItemT\n\ndef default_id_fetcher(obj: ItemT) -> ObjKT:\n return obj.id # type: ignore # Need ItemT/CompressedItemT typevars to be a Django protocol\n\ndef default_cache_transformer(obj: ItemT) -> ItemT:\n return obj\n\n# Required Arguments are as follows:\n# * object_ids: The list of object ids to look up\n# * cache_key_function: object_id => cache key\n# * query_function: [object_ids] => [objects from database]\n# Optional keyword arguments:\n# * setter: Function to call before storing items to cache (e.g. compression)\n# * extractor: Function to call on items returned from cache\n# (e.g. decompression). Should be the inverse of the setter\n# function.\n# * id_fetcher: Function mapping an object from database => object_id\n# (in case we're using a key more complex than obj.id)\n# * cache_transformer: Function mapping an object from database =>\n# value for cache (in case the values that we're caching are some\n# function of the objects, not the objects themselves)\ndef generic_bulk_cached_fetch(\n cache_key_function: Callable[[ObjKT], str],\n query_function: Callable[[List[ObjKT]], Iterable[Any]],\n object_ids: Iterable[ObjKT],\n extractor: Callable[[CompressedItemT], ItemT] = default_extractor,\n setter: Callable[[ItemT], CompressedItemT] = default_setter,\n id_fetcher: Callable[[ItemT], ObjKT] = default_id_fetcher,\n cache_transformer: Callable[[ItemT], ItemT] = default_cache_transformer\n) -> Dict[ObjKT, ItemT]:\n cache_keys = {} # type: Dict[ObjKT, str]\n for object_id in object_ids:\n cache_keys[object_id] = cache_key_function(object_id)\n cached_objects_compressed = cache_get_many([cache_keys[object_id]\n for object_id in object_ids]) # type: Dict[str, Tuple[CompressedItemT]]\n cached_objects = {} # type: Dict[str, ItemT]\n for (key, val) in cached_objects_compressed.items():\n cached_objects[key] = extractor(cached_objects_compressed[key][0])\n needed_ids = [object_id for object_id in object_ids if\n cache_keys[object_id] not in cached_objects]\n db_objects = query_function(needed_ids)\n\n items_for_remote_cache = {} # type: Dict[str, Tuple[CompressedItemT]]\n for obj in db_objects:\n key = cache_keys[id_fetcher(obj)]\n item = cache_transformer(obj)\n items_for_remote_cache[key] = (setter(item),)\n cached_objects[key] = item\n if len(items_for_remote_cache) > 0:\n cache_set_many(items_for_remote_cache)\n return dict((object_id, cached_objects[cache_keys[object_id]]) for object_id in object_ids\n if cache_keys[object_id] in cached_objects)\n\ndef cache(func: Callable[..., ReturnT]) -> Callable[..., ReturnT]:\n \"\"\"Decorator which applies Django caching to a function.\n\n Uses a key based on the function's name, filename, and\n the repr() of its arguments.\"\"\"\n\n func_uniqifier = '%s-%s' % (func.__code__.co_filename, func.__name__)\n\n @wraps(func)\n def keyfunc(*args: Any, **kwargs: Any) -> str:\n # Django complains about spaces because memcached rejects them\n key = func_uniqifier + repr((args, kwargs))\n return key.replace('-', '--').replace(' ', '-s')\n\n return cache_with_key(keyfunc)(func)\n\ndef preview_url_cache_key(url: str) -> str:\n return \"preview_url:%s\" % (make_safe_digest(url))\n\ndef display_recipient_cache_key(recipient_id: int) -> str:\n return \"display_recipient_dict:%d\" % (recipient_id,)\n\ndef user_profile_by_email_cache_key(email: str) -> str:\n # See the comment in zerver/lib/avatar_hash.py:gravatar_hash for why we\n # are proactively encoding email addresses even though they will\n # with high likelihood be ASCII-only for the foreseeable future.\n return 'user_profile_by_email:%s' % (make_safe_digest(email.strip()),)\n\ndef user_profile_cache_key_id(email: str, realm_id: int) -> str:\n return u\"user_profile:%s:%s\" % (make_safe_digest(email.strip()), realm_id,)\n\ndef user_profile_cache_key(email: str, realm: 'Realm') -> str:\n return user_profile_cache_key_id(email, realm.id)\n\ndef bot_profile_cache_key(email: str) -> str:\n return \"bot_profile:%s\" % (make_safe_digest(email.strip()))\n\ndef user_profile_by_id_cache_key(user_profile_id: int) -> str:\n return \"user_profile_by_id:%s\" % (user_profile_id,)\n\ndef user_profile_by_api_key_cache_key(api_key: str) -> str:\n return \"user_profile_by_api_key:%s\" % (api_key,)\n\nrealm_user_dict_fields = [\n 'id', 'full_name', 'short_name', 'email',\n 'avatar_source', 'avatar_version', 'is_active',\n 'is_realm_admin', 'is_bot', 'realm_id', 'timezone',\n 'date_joined', 'is_guest'\n] # type: List[str]\n\ndef realm_user_dicts_cache_key(realm_id: int) -> str:\n return \"realm_user_dicts:%s\" % (realm_id,)\n\ndef active_user_ids_cache_key(realm_id: int) -> str:\n return \"active_user_ids:%s\" % (realm_id,)\n\ndef active_non_guest_user_ids_cache_key(realm_id: int) -> str:\n return \"active_non_guest_user_ids:%s\" % (realm_id,)\n\nbot_dict_fields = ['id', 'full_name', 'short_name', 'bot_type', 'email',\n 'is_active', 'default_sending_stream__name',\n 'realm_id',\n 'default_events_register_stream__name',\n 'default_all_public_streams', 'api_key',\n 'bot_owner__email', 'avatar_source',\n 'avatar_version'] # type: List[str]\n\ndef bot_dicts_in_realm_cache_key(realm: 'Realm') -> str:\n return \"bot_dicts_in_realm:%s\" % (realm.id,)\n\ndef get_stream_cache_key(stream_name: str, realm_id: int) -> str:\n return \"stream_by_realm_and_name:%s:%s\" % (\n realm_id, make_safe_digest(stream_name.strip().lower()))\n\ndef delete_user_profile_caches(user_profiles: Iterable['UserProfile']) -> None:\n # Imported here to avoid cyclic dependency.\n from zerver.lib.users import get_all_api_keys\n keys = []\n for user_profile in user_profiles:\n keys.append(user_profile_by_email_cache_key(user_profile.email))\n keys.append(user_profile_by_id_cache_key(user_profile.id))\n for api_key in get_all_api_keys(user_profile):\n keys.append(user_profile_by_api_key_cache_key(api_key))\n keys.append(user_profile_cache_key(user_profile.email, user_profile.realm))\n\n cache_delete_many(keys)\n\ndef delete_display_recipient_cache(user_profile: 'UserProfile') -> None:\n from zerver.models import Subscription # We need to import here to avoid cyclic dependency.\n recipient_ids = Subscription.objects.filter(user_profile=user_profile)\n recipient_ids = recipient_ids.values_list('recipient_id', flat=True)\n keys = [display_recipient_cache_key(rid) for rid in recipient_ids]\n cache_delete_many(keys)\n\n# Called by models.py to flush the user_profile cache whenever we save\n# a user_profile object\ndef flush_user_profile(sender: Any, **kwargs: Any) -> None:\n user_profile = kwargs['instance']\n delete_user_profile_caches([user_profile])\n\n def changed(fields: List[str]) -> bool:\n if kwargs.get('update_fields') is None:\n # adds/deletes should invalidate the cache\n return True\n\n update_fields = set(kwargs['update_fields'])\n for f in fields:\n if f in update_fields:\n return True\n\n return False\n\n # Invalidate our active_users_in_realm info dict if any user has changed\n # the fields in the dict or become (in)active\n if changed(realm_user_dict_fields):\n cache_delete(realm_user_dicts_cache_key(user_profile.realm_id))\n\n if changed(['is_active']):\n cache_delete(active_user_ids_cache_key(user_profile.realm_id))\n cache_delete(active_non_guest_user_ids_cache_key(user_profile.realm_id))\n\n if changed(['is_guest']):\n cache_delete(active_non_guest_user_ids_cache_key(user_profile.realm_id))\n\n if changed(['email', 'full_name', 'short_name', 'id', 'is_mirror_dummy']):\n delete_display_recipient_cache(user_profile)\n\n # Invalidate our bots_in_realm info dict if any bot has\n # changed the fields in the dict or become (in)active\n if user_profile.is_bot and changed(bot_dict_fields):\n cache_delete(bot_dicts_in_realm_cache_key(user_profile.realm))\n\n # Invalidate realm-wide alert words cache if any user in the realm has changed\n # alert words\n if changed(['alert_words']):\n cache_delete(realm_alert_words_cache_key(user_profile.realm))\n\n# Called by models.py to flush various caches whenever we save\n# a Realm object. The main tricky thing here is that Realm info is\n# generally cached indirectly through user_profile objects.\ndef flush_realm(sender: Any, **kwargs: Any) -> None:\n realm = kwargs['instance']\n users = realm.get_active_users()\n delete_user_profile_caches(users)\n\n # Deleting realm or updating message_visibility_limit\n # attribute should clear the first_visible_message_id cache.\n if kwargs.get('update_fields') is None or \"message_visibility_limit\" in kwargs['update_fields']:\n cache_delete(realm_first_visible_message_id_cache_key(realm))\n\n if realm.deactivated or (kwargs[\"update_fields\"] is not None and\n \"string_id\" in kwargs['update_fields']):\n cache_delete(realm_user_dicts_cache_key(realm.id))\n cache_delete(active_user_ids_cache_key(realm.id))\n cache_delete(bot_dicts_in_realm_cache_key(realm))\n cache_delete(realm_alert_words_cache_key(realm))\n cache_delete(active_non_guest_user_ids_cache_key(realm.id))\n\ndef realm_alert_words_cache_key(realm: 'Realm') -> str:\n return \"realm_alert_words:%s\" % (realm.string_id,)\n\ndef realm_first_visible_message_id_cache_key(realm: 'Realm') -> str:\n return u\"realm_first_visible_message_id:%s\" % (realm.string_id,)\n\n# Called by models.py to flush the stream cache whenever we save a stream\n# object.\ndef flush_stream(sender: Any, **kwargs: Any) -> None:\n from zerver.models import UserProfile\n stream = kwargs['instance']\n items_for_remote_cache = {}\n items_for_remote_cache[get_stream_cache_key(stream.name, stream.realm_id)] = (stream,)\n cache_set_many(items_for_remote_cache)\n\n if kwargs.get('update_fields') is None or 'name' in kwargs['update_fields'] and \\\n UserProfile.objects.filter(\n Q(default_sending_stream=stream) |\n Q(default_events_register_stream=stream)).exists():\n cache_delete(bot_dicts_in_realm_cache_key(stream.realm))\n\ndef to_dict_cache_key_id(message_id: int) -> str:\n return 'message_dict:%d' % (message_id,)\n\ndef to_dict_cache_key(message: 'Message') -> str:\n return to_dict_cache_key_id(message.id)\n\ndef flush_message(sender: Any, **kwargs: Any) -> None:\n message = kwargs['instance']\n cache_delete(to_dict_cache_key_id(message.id))\n\ndef flush_submessage(sender: Any, **kwargs: Any) -> None:\n submessage = kwargs['instance']\n # submessages are not cached directly, they are part of their\n # parent messages\n message_id = submessage.message_id\n cache_delete(to_dict_cache_key_id(message_id))\n\nDECORATOR = Callable[[Callable[..., Any]], Callable[..., Any]]\n\ndef ignore_unhashable_lru_cache(maxsize: int=128, typed: bool=False) -> DECORATOR:\n \"\"\"\n This is a wrapper over lru_cache function. It adds following features on\n top of lru_cache:\n\n * It will not cache result of functions with unhashable arguments.\n * It will clear cache whenever zerver.lib.cache.KEY_PREFIX changes.\n \"\"\"\n internal_decorator = lru_cache(maxsize=maxsize, typed=typed)\n\n def decorator(user_function: Callable[..., Any]) -> Callable[..., Any]:\n if settings.DEVELOPMENT and not settings.TEST_SUITE: # nocoverage\n # In the development environment, we want every file\n # change to refresh the source files from disk.\n return user_function\n cache_enabled_user_function = internal_decorator(user_function)\n\n def wrapper(*args: Any, **kwargs: Any) -> Any:\n if not hasattr(cache_enabled_user_function, 'key_prefix'):\n cache_enabled_user_function.key_prefix = KEY_PREFIX\n\n if cache_enabled_user_function.key_prefix != KEY_PREFIX:\n # Clear cache when cache.KEY_PREFIX changes. This is used in\n # tests.\n cache_enabled_user_function.cache_clear()\n cache_enabled_user_function.key_prefix = KEY_PREFIX\n\n try:\n return cache_enabled_user_function(*args, **kwargs)\n except TypeError:\n # args or kwargs contains an element which is unhashable. In\n # this case we don't cache the result.\n pass\n\n # Deliberately calling this function from outside of exception\n # handler to get a more descriptive traceback. Otherise traceback\n # can include the exception from cached_enabled_user_function as\n # well.\n return user_function(*args, **kwargs)\n\n setattr(wrapper, 'cache_info', cache_enabled_user_function.cache_info)\n setattr(wrapper, 'cache_clear', cache_enabled_user_function.cache_clear)\n return wrapper\n\n return decorator\n"},"type_annotations":{"kind":"list like","value":["str","Optional[str]","Callable[..., str]","Callable[..., ReturnT]","Any","Any","Callable[..., str]","Callable[..., ReturnT]","Any","Any","str","Any","str","List[str]","Dict[str, Any]","str","Iterable[str]","CompressedItemT","ItemT","ItemT","ItemT","Callable[[ObjKT], str]","Callable[[List[ObjKT]], Iterable[Any]]","Iterable[ObjKT]","Callable[..., ReturnT]","Any","Any","str","int","str","str","int","str","'Realm'","str","int","str","int","int","int","'Realm'","str","int","Iterable['UserProfile']","'UserProfile'","Any","Any","List[str]","Any","Any","'Realm'","'Realm'","Any","Any","int","'Message'","Any","Any","Any","Any","Callable[..., Any]","Any","Any"],"string":"[\n \"str\",\n \"Optional[str]\",\n \"Callable[..., str]\",\n \"Callable[..., ReturnT]\",\n \"Any\",\n \"Any\",\n \"Callable[..., str]\",\n \"Callable[..., ReturnT]\",\n \"Any\",\n \"Any\",\n \"str\",\n \"Any\",\n \"str\",\n \"List[str]\",\n \"Dict[str, Any]\",\n \"str\",\n \"Iterable[str]\",\n \"CompressedItemT\",\n \"ItemT\",\n \"ItemT\",\n \"ItemT\",\n \"Callable[[ObjKT], str]\",\n \"Callable[[List[ObjKT]], Iterable[Any]]\",\n \"Iterable[ObjKT]\",\n \"Callable[..., ReturnT]\",\n \"Any\",\n \"Any\",\n \"str\",\n \"int\",\n \"str\",\n \"str\",\n \"int\",\n \"str\",\n \"'Realm'\",\n \"str\",\n \"int\",\n \"str\",\n \"int\",\n \"int\",\n \"int\",\n \"'Realm'\",\n \"str\",\n \"int\",\n \"Iterable['UserProfile']\",\n \"'UserProfile'\",\n \"Any\",\n \"Any\",\n \"List[str]\",\n \"Any\",\n \"Any\",\n \"'Realm'\",\n \"'Realm'\",\n \"Any\",\n \"Any\",\n \"int\",\n \"'Message'\",\n \"Any\",\n \"Any\",\n \"Any\",\n \"Any\",\n \"Callable[..., Any]\",\n \"Any\",\n \"Any\"\n]"},"type_annotation_starts":{"kind":"list like","value":[3257,3596,3747,4137,4248,4263,4586,5099,5208,5223,6120,6130,6403,6648,6981,7351,7549,7928,8059,8194,8343,9194,9242,9302,10846,11176,11191,11458,11572,11686,12026,12041,12169,12181,12286,12414,12531,12872,12973,13083,13593,13698,13713,13886,14497,14992,15007,15131,16779,16794,17689,17814,18010,18025,18613,18703,18793,18808,18936,18951,19693,20069,20084],"string":"[\n 3257,\n 3596,\n 3747,\n 4137,\n 4248,\n 4263,\n 4586,\n 5099,\n 5208,\n 5223,\n 6120,\n 6130,\n 6403,\n 6648,\n 6981,\n 7351,\n 7549,\n 7928,\n 8059,\n 8194,\n 8343,\n 9194,\n 9242,\n 9302,\n 10846,\n 11176,\n 11191,\n 11458,\n 11572,\n 11686,\n 12026,\n 12041,\n 12169,\n 12181,\n 12286,\n 12414,\n 12531,\n 12872,\n 12973,\n 13083,\n 13593,\n 13698,\n 13713,\n 13886,\n 14497,\n 14992,\n 15007,\n 15131,\n 16779,\n 16794,\n 17689,\n 17814,\n 18010,\n 18025,\n 18613,\n 18703,\n 18793,\n 18808,\n 18936,\n 18951,\n 19693,\n 20069,\n 20084\n]"},"type_annotation_ends":{"kind":"list like","value":[3260,3609,3765,4159,4251,4266,4604,5121,5211,5226,6123,6133,6406,6657,6995,7354,7562,7943,8064,8199,8348,9216,9280,9317,10868,11179,11194,11461,11575,11689,12029,12044,12172,12188,12289,12417,12534,12875,12976,13086,13600,13701,13716,13909,14510,14995,15010,15140,16782,16797,17696,17821,18013,18028,18616,18712,18796,18811,18939,18954,19711,20072,20087],"string":"[\n 3260,\n 3609,\n 3765,\n 4159,\n 4251,\n 4266,\n 4604,\n 5121,\n 5211,\n 5226,\n 6123,\n 6133,\n 6406,\n 6657,\n 6995,\n 7354,\n 7562,\n 7943,\n 8064,\n 8199,\n 8348,\n 9216,\n 9280,\n 9317,\n 10868,\n 11179,\n 11194,\n 11461,\n 11575,\n 11689,\n 12029,\n 12044,\n 12172,\n 12188,\n 12289,\n 12417,\n 12534,\n 12875,\n 12976,\n 13086,\n 13600,\n 13701,\n 13716,\n 13909,\n 14510,\n 14995,\n 15010,\n 15140,\n 16782,\n 16797,\n 17696,\n 17821,\n 18013,\n 18028,\n 18616,\n 18712,\n 18796,\n 18811,\n 18939,\n 18954,\n 19711,\n 20072,\n 20087\n]"}}},{"rowIdx":1363,"cells":{"zip":{"kind":"string","value":"archives/18-2-SKKU-OSS_2018-2-OSS-L5.zip"},"filename":{"kind":"string","value":"zerver/lib/cache_helpers.py"},"contents":{"kind":"string","value":"# See https://zulip.readthedocs.io/en/latest/subsystems/caching.html for docs\n\nfrom typing import Any, Callable, Dict, List, Tuple\n\nimport datetime\nimport logging\n\n# This file needs to be different from cache.py because cache.py\n# cannot import anything from zerver.models or we'd have an import\n# loop\nfrom analytics.models import RealmCount\nfrom django.conf import settings\nfrom zerver.models import Message, UserProfile, Stream, get_stream_cache_key, \\\n Recipient, get_recipient_cache_key, Client, get_client_cache_key, \\\n Huddle, huddle_hash_cache_key\nfrom zerver.lib.cache import cache_with_key, cache_set, \\\n user_profile_by_api_key_cache_key, \\\n user_profile_cache_key, get_remote_cache_time, get_remote_cache_requests, \\\n cache_set_many, to_dict_cache_key_id\nfrom zerver.lib.message import MessageDict\nfrom zerver.lib.users import get_all_api_keys\nfrom importlib import import_module\nfrom django.contrib.sessions.models import Session\nfrom django.db.models import Q\nfrom django.utils.timezone import now as timezone_now\n\nMESSAGE_CACHE_SIZE = 75000\n\ndef message_fetch_objects() -> List[Any]:\n try:\n max_id = Message.objects.only('id').order_by(\"-id\")[0].id\n except IndexError:\n return []\n return Message.objects.select_related().filter(~Q(sender__email='tabbott/extra@mit.edu'),\n id__gt=max_id - MESSAGE_CACHE_SIZE)\n\ndef message_cache_items(items_for_remote_cache: Dict[str, Tuple[bytes]],\n message: Message) -> None:\n '''\n Note: this code is untested, and the caller has been\n commented out for a while.\n '''\n key = to_dict_cache_key_id(message.id)\n value = MessageDict.to_dict_uncached(message)\n items_for_remote_cache[key] = (value,)\n\ndef user_cache_items(items_for_remote_cache: Dict[str, Tuple[UserProfile]],\n user_profile: UserProfile) -> None:\n for api_key in get_all_api_keys(user_profile):\n items_for_remote_cache[user_profile_by_api_key_cache_key(api_key)] = (user_profile,)\n items_for_remote_cache[user_profile_cache_key(user_profile.email, user_profile.realm)] = (user_profile,)\n # We have other user_profile caches, but none of them are on the\n # core serving path for lots of requests.\n\ndef stream_cache_items(items_for_remote_cache: Dict[str, Tuple[Stream]],\n stream: Stream) -> None:\n items_for_remote_cache[get_stream_cache_key(stream.name, stream.realm_id)] = (stream,)\n\ndef client_cache_items(items_for_remote_cache: Dict[str, Tuple[Client]],\n client: Client) -> None:\n items_for_remote_cache[get_client_cache_key(client.name)] = (client,)\n\ndef huddle_cache_items(items_for_remote_cache: Dict[str, Tuple[Huddle]],\n huddle: Huddle) -> None:\n items_for_remote_cache[huddle_hash_cache_key(huddle.huddle_hash)] = (huddle,)\n\ndef recipient_cache_items(items_for_remote_cache: Dict[str, Tuple[Recipient]],\n recipient: Recipient) -> None:\n items_for_remote_cache[get_recipient_cache_key(recipient.type, recipient.type_id)] = (recipient,)\n\nsession_engine = import_module(settings.SESSION_ENGINE)\ndef session_cache_items(items_for_remote_cache: Dict[str, str],\n session: Session) -> None:\n if settings.SESSION_ENGINE != \"django.contrib.sessions.backends.cached_db\":\n # If we're not using the cached_db session engine, we there\n # will be no store.cache_key attribute, and in any case we\n # don't need to fill the cache, since it won't exist.\n return\n store = session_engine.SessionStore(session_key=session.session_key) # type: ignore # import_module\n items_for_remote_cache[store.cache_key] = store.decode(session.session_data)\n\ndef get_active_realm_ids() -> List[int]:\n \"\"\"For servers like zulipchat.com with a lot of realms, it only makes\n sense to do cache-filling work for realms that have any currently\n active users/clients. Otherwise, we end up with every single-user\n trial organization that has ever been created costing us N streams\n worth of cache work (where N is the number of default streams for\n a new organization).\n \"\"\"\n date = timezone_now() - datetime.timedelta(days=2)\n return RealmCount.objects.filter(\n end_time__gte=date,\n property=\"1day_actives::day\",\n value__gt=0).distinct(\"realm_id\").values_list(\"realm_id\", flat=True)\n\ndef get_streams() -> List[Stream]:\n return Stream.objects.select_related().filter(\n realm__in=get_active_realm_ids()).exclude(\n # We filter out Zephyr realms, because they can easily\n # have 10,000s of streams with only 1 subscriber.\n is_in_zephyr_realm=True)\n\ndef get_recipients() -> List[Recipient]:\n return Recipient.objects.select_related().filter(\n type_id__in=get_streams().values_list(\"id\", flat=True)) # type: ignore # Should be QuerySet above\n\ndef get_users() -> List[UserProfile]:\n return UserProfile.objects.select_related().filter(\n long_term_idle=False,\n realm__in=get_active_realm_ids())\n\n# Format is (objects query, items filler function, timeout, batch size)\n#\n# The objects queries are put inside lambdas to prevent Django from\n# doing any setup for things we're unlikely to use (without the lambda\n# wrapper the below adds an extra 3ms or so to startup time for\n# anything importing this file).\ncache_fillers = {\n 'user': (get_users, user_cache_items, 3600*24*7, 10000),\n 'client': (lambda: Client.objects.select_related().all(), client_cache_items, 3600*24*7, 10000),\n 'recipient': (get_recipients, recipient_cache_items, 3600*24*7, 10000),\n 'stream': (get_streams, stream_cache_items, 3600*24*7, 10000),\n # Message cache fetching disabled until we can fix the fact that it\n # does a bunch of inefficient memcached queries as part of filling\n # the display_recipient cache\n # 'message': (message_fetch_objects, message_cache_items, 3600 * 24, 1000),\n 'huddle': (lambda: Huddle.objects.select_related().all(), huddle_cache_items, 3600*24*7, 10000),\n 'session': (lambda: Session.objects.all(), session_cache_items, 3600*24*7, 10000),\n} # type: Dict[str, Tuple[Callable[[], List[Any]], Callable[[Dict[str, Any], Any], None], int, int]]\n\ndef fill_remote_cache(cache: str) -> None:\n remote_cache_time_start = get_remote_cache_time()\n remote_cache_requests_start = get_remote_cache_requests()\n items_for_remote_cache = {} # type: Dict[str, Any]\n (objects, items_filler, timeout, batch_size) = cache_fillers[cache]\n count = 0\n for obj in objects():\n items_filler(items_for_remote_cache, obj)\n count += 1\n if (count % batch_size == 0):\n cache_set_many(items_for_remote_cache, timeout=3600*24)\n items_for_remote_cache = {}\n cache_set_many(items_for_remote_cache, timeout=3600*24*7)\n logging.info(\"Successfully populated %s cache! Consumed %s remote cache queries (%s time)\" %\n (cache, get_remote_cache_requests() - remote_cache_requests_start,\n round(get_remote_cache_time() - remote_cache_time_start, 2)))\n"},"type_annotations":{"kind":"list like","value":["Dict[str, Tuple[bytes]]","Message","Dict[str, Tuple[UserProfile]]","UserProfile","Dict[str, Tuple[Stream]]","Stream","Dict[str, Tuple[Client]]","Client","Dict[str, Tuple[Huddle]]","Huddle","Dict[str, Tuple[Recipient]]","Recipient","Dict[str, str]","Session","str"],"string":"[\n \"Dict[str, Tuple[bytes]]\",\n \"Message\",\n \"Dict[str, Tuple[UserProfile]]\",\n \"UserProfile\",\n \"Dict[str, Tuple[Stream]]\",\n \"Stream\",\n \"Dict[str, Tuple[Client]]\",\n \"Client\",\n \"Dict[str, Tuple[Huddle]]\",\n \"Huddle\",\n \"Dict[str, Tuple[Recipient]]\",\n \"Recipient\",\n \"Dict[str, str]\",\n \"Session\",\n \"str\"\n]"},"type_annotation_starts":{"kind":"list like","value":[1460,1518,1822,1888,2326,2383,2539,2596,2735,2792,2942,3008,3235,3284,6336],"string":"[\n 1460,\n 1518,\n 1822,\n 1888,\n 2326,\n 2383,\n 2539,\n 2596,\n 2735,\n 2792,\n 2942,\n 3008,\n 3235,\n 3284,\n 6336\n]"},"type_annotation_ends":{"kind":"list like","value":[1483,1525,1851,1899,2350,2389,2563,2602,2759,2798,2969,3017,3249,3291,6339],"string":"[\n 1483,\n 1525,\n 1851,\n 1899,\n 2350,\n 2389,\n 2563,\n 2602,\n 2759,\n 2798,\n 2969,\n 3017,\n 3249,\n 3291,\n 6339\n]"}}},{"rowIdx":1364,"cells":{"zip":{"kind":"string","value":"archives/18-2-SKKU-OSS_2018-2-OSS-L5.zip"},"filename":{"kind":"string","value":"zerver/lib/camo.py"},"contents":{"kind":"string","value":"from django.conf import settings\nimport codecs\nimport hashlib\nimport hmac\n\ndef generate_camo_url(url: str) -> str:\n encoded_url = url.encode(\"utf-8\")\n encoded_camo_key = settings.CAMO_KEY.encode(\"utf-8\")\n digest = hmac.new(encoded_camo_key, encoded_url, hashlib.sha1).hexdigest()\n hex_encoded_url = codecs.encode(encoded_url, \"hex\") # type: ignore # https://github.com/python/typeshed/issues/300\n return \"%s/%s\" % (digest, hex_encoded_url.decode(\"utf-8\"))\n\n# Encodes the provided URL using the same algorithm used by the camo\n# caching https image proxy\ndef get_camo_url(url: str) -> str:\n # Only encode the url if Camo is enabled\n if settings.CAMO_URI == '':\n return url\n return \"%s%s\" % (settings.CAMO_URI, generate_camo_url(url))\n"},"type_annotations":{"kind":"list like","value":["str","str"],"string":"[\n \"str\",\n \"str\"\n]"},"type_annotation_starts":{"kind":"list like","value":[102,592],"string":"[\n 102,\n 592\n]"},"type_annotation_ends":{"kind":"list like","value":[105,595],"string":"[\n 105,\n 595\n]"}}},{"rowIdx":1365,"cells":{"zip":{"kind":"string","value":"archives/18-2-SKKU-OSS_2018-2-OSS-L5.zip"},"filename":{"kind":"string","value":"zerver/lib/ccache.py"},"contents":{"kind":"string","value":"from typing import Any, Dict, List, Optional\n\n# This file is adapted from samples/shellinabox/ssh-krb-wrapper in\n# https://github.com/davidben/webathena, which has the following\n# license:\n#\n# Copyright (c) 2013 David Benjamin and Alan Huang\n#\n# Permission is hereby granted, free of charge, to any person\n# obtaining a copy of this software and associated documentation files\n# (the \"Software\"), to deal in the Software without restriction,\n# including without limitation the rights to use, copy, modify, merge,\n# publish, distribute, sublicense, and/or sell copies of the Software,\n# and to permit persons to whom the Software is furnished to do so,\n# subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be\n# included in all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND,\n# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF\n# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND\n# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS\n# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN\n# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN\n# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n\nimport base64\nimport struct\nfrom typing import Union\n\ndef force_bytes(s: Union[str, bytes], encoding: str='utf-8') -> bytes:\n \"\"\"converts a string to binary string\"\"\"\n if isinstance(s, bytes):\n return s\n elif isinstance(s, str):\n return s.encode(encoding)\n else:\n raise TypeError(\"force_bytes expects a string type\")\n\n# Some DER encoding stuff. Bleh. This is because the ccache contains a\n# DER-encoded krb5 Ticket structure, whereas Webathena deserializes\n# into the various fields. Re-encoding in the client would be easy as\n# there is already an ASN.1 implementation, but in the interest of\n# limiting MIT Kerberos's exposure to malformed ccaches, encode it\n# ourselves. To that end, here's the laziest DER encoder ever.\ndef der_encode_length(length: int) -> bytes:\n if length <= 127:\n return struct.pack('!B', length)\n out = b\"\"\n while length > 0:\n out = struct.pack('!B', length & 0xff) + out\n length >>= 8\n out = struct.pack('!B', len(out) | 0x80) + out\n return out\n\ndef der_encode_tlv(tag: int, value: bytes) -> bytes:\n return struct.pack('!B', tag) + der_encode_length(len(value)) + value\n\ndef der_encode_integer_value(val: int) -> bytes:\n if not isinstance(val, int):\n raise TypeError(\"int\")\n # base 256, MSB first, two's complement, minimum number of octets\n # necessary. This has a number of annoying edge cases:\n # * 0 and -1 are 0x00 and 0xFF, not the empty string.\n # * 255 is 0x00 0xFF, not 0xFF\n # * -256 is 0xFF 0x00, not 0x00\n\n # Special-case to avoid an empty encoding.\n if val == 0:\n return b\"\\x00\"\n sign = 0 # What you would get if you sign-extended the current high bit.\n out = b\"\"\n # We can stop once sign-extension matches the remaining value.\n while val != sign:\n byte = val & 0xff\n out = struct.pack('!B', byte) + out\n sign = -1 if byte & 0x80 == 0x80 else 0\n val >>= 8\n return out\n\ndef der_encode_integer(val: int) -> bytes:\n return der_encode_tlv(0x02, der_encode_integer_value(val))\ndef der_encode_int32(val: int) -> bytes:\n if val < -2147483648 or val > 2147483647:\n raise ValueError(\"Bad value\")\n return der_encode_integer(val)\ndef der_encode_uint32(val: int) -> bytes:\n if val < 0 or val > 4294967295:\n raise ValueError(\"Bad value\")\n return der_encode_integer(val)\n\ndef der_encode_string(val: str) -> bytes:\n if not isinstance(val, str):\n raise TypeError(\"unicode\")\n return der_encode_tlv(0x1b, val.encode(\"utf-8\"))\n\ndef der_encode_octet_string(val: bytes) -> bytes:\n if not isinstance(val, bytes):\n raise TypeError(\"bytes\")\n return der_encode_tlv(0x04, val)\n\ndef der_encode_sequence(tlvs: List[Optional[bytes]], tagged: Optional[bool]=True) -> bytes:\n body = []\n for i, tlv in enumerate(tlvs):\n # Missing optional elements represented as None.\n if tlv is None:\n continue\n if tagged:\n # Assume kerberos-style explicit tagging of components.\n tlv = der_encode_tlv(0xa0 | i, tlv)\n body.append(tlv)\n return der_encode_tlv(0x30, b\"\".join(body))\n\ndef der_encode_ticket(tkt: Dict[str, Any]) -> bytes:\n return der_encode_tlv(\n 0x61, # Ticket\n der_encode_sequence(\n [der_encode_integer(5), # tktVno\n der_encode_string(tkt[\"realm\"]),\n der_encode_sequence( # PrincipalName\n [der_encode_int32(tkt[\"sname\"][\"nameType\"]),\n der_encode_sequence([der_encode_string(c)\n for c in tkt[\"sname\"][\"nameString\"]],\n tagged=False)]),\n der_encode_sequence( # EncryptedData\n [der_encode_int32(tkt[\"encPart\"][\"etype\"]),\n (der_encode_uint32(tkt[\"encPart\"][\"kvno\"])\n if \"kvno\" in tkt[\"encPart\"]\n else None),\n der_encode_octet_string(\n base64.b64decode(tkt[\"encPart\"][\"cipher\"]))])]))\n\n# Kerberos ccache writing code. Using format documentation from here:\n# http://www.gnu.org/software/shishi/manual/html_node/The-Credential-Cache-Binary-File-Format.html\n\ndef ccache_counted_octet_string(data: bytes) -> bytes:\n if not isinstance(data, bytes):\n raise TypeError(\"bytes\")\n return struct.pack(\"!I\", len(data)) + data\n\ndef ccache_principal(name: Dict[str, str], realm: str) -> bytes:\n header = struct.pack(\"!II\", name[\"nameType\"], len(name[\"nameString\"]))\n return (header + ccache_counted_octet_string(force_bytes(realm)) +\n b\"\".join(ccache_counted_octet_string(force_bytes(c))\n for c in name[\"nameString\"]))\n\ndef ccache_key(key: Dict[str, str]) -> bytes:\n return (struct.pack(\"!H\", key[\"keytype\"]) +\n ccache_counted_octet_string(base64.b64decode(key[\"keyvalue\"])))\n\ndef flags_to_uint32(flags: List[str]) -> int:\n ret = 0\n for i, v in enumerate(flags):\n if v:\n ret |= 1 << (31 - i)\n return ret\n\ndef ccache_credential(cred: Dict[str, Any]) -> bytes:\n out = ccache_principal(cred[\"cname\"], cred[\"crealm\"])\n out += ccache_principal(cred[\"sname\"], cred[\"srealm\"])\n out += ccache_key(cred[\"key\"])\n out += struct.pack(\"!IIII\",\n cred[\"authtime\"] // 1000,\n cred.get(\"starttime\", cred[\"authtime\"]) // 1000,\n cred[\"endtime\"] // 1000,\n cred.get(\"renewTill\", 0) // 1000)\n out += struct.pack(\"!B\", 0)\n out += struct.pack(\"!I\", flags_to_uint32(cred[\"flags\"]))\n # TODO: Care about addrs or authdata? Former is \"caddr\" key.\n out += struct.pack(\"!II\", 0, 0)\n out += ccache_counted_octet_string(der_encode_ticket(cred[\"ticket\"]))\n # No second_ticket.\n out += ccache_counted_octet_string(b\"\")\n return out\n\ndef make_ccache(cred: Dict[str, Any]) -> bytes:\n # Do we need a DeltaTime header? The ccache I get just puts zero\n # in there, so do the same.\n out = struct.pack(\"!HHHHII\",\n 0x0504, # file_format_version\n 12, # headerlen\n 1, # tag (DeltaTime)\n 8, # taglen (two uint32_ts)\n 0, 0, # time_offset / usec_offset\n )\n out += ccache_principal(cred[\"cname\"], cred[\"crealm\"])\n out += ccache_credential(cred)\n return out\n"},"type_annotations":{"kind":"list like","value":["Union[str, bytes]","int","int","bytes","int","int","int","int","str","bytes","List[Optional[bytes]]","Dict[str, Any]","bytes","Dict[str, str]","str","Dict[str, str]","List[str]","Dict[str, Any]","Dict[str, Any]"],"string":"[\n \"Union[str, bytes]\",\n \"int\",\n \"int\",\n \"bytes\",\n \"int\",\n \"int\",\n \"int\",\n \"int\",\n \"str\",\n \"bytes\",\n \"List[Optional[bytes]]\",\n \"Dict[str, Any]\",\n \"bytes\",\n \"Dict[str, str]\",\n \"str\",\n \"Dict[str, str]\",\n \"List[str]\",\n \"Dict[str, Any]\",\n \"Dict[str, Any]\"\n]"},"type_annotation_starts":{"kind":"list like","value":[1377,2091,2370,2382,2508,3295,3399,3560,3712,3882,4035,4484,5561,5722,5745,6043,6221,6377,7187],"string":"[\n 1377,\n 2091,\n 2370,\n 2382,\n 2508,\n 3295,\n 3399,\n 3560,\n 3712,\n 3882,\n 4035,\n 4484,\n 5561,\n 5722,\n 5745,\n 6043,\n 6221,\n 6377,\n 7187\n]"},"type_annotation_ends":{"kind":"list like","value":[1394,2094,2373,2387,2511,3298,3402,3563,3715,3887,4056,4498,5566,5736,5748,6057,6230,6391,7201],"string":"[\n 1394,\n 2094,\n 2373,\n 2387,\n 2511,\n 3298,\n 3402,\n 3563,\n 3715,\n 3887,\n 4056,\n 4498,\n 5566,\n 5736,\n 5748,\n 6057,\n 6230,\n 6391,\n 7201\n]"}}},{"rowIdx":1366,"cells":{"zip":{"kind":"string","value":"archives/18-2-SKKU-OSS_2018-2-OSS-L5.zip"},"filename":{"kind":"string","value":"zerver/lib/context_managers.py"},"contents":{"kind":"string","value":"\"\"\"\nContext managers, i.e. things you can use with the 'with' statement.\n\"\"\"\n\n\nimport fcntl\nfrom contextlib import contextmanager\nfrom typing import Iterator, IO, Any, Union\n\n@contextmanager\ndef flock(lockfile: Union[int, IO[Any]], shared: bool=False) -> Iterator[None]:\n \"\"\"Lock a file object using flock(2) for the duration of a 'with' statement.\n\n If shared is True, use a LOCK_SH lock, otherwise LOCK_EX.\"\"\"\n\n fcntl.flock(lockfile, fcntl.LOCK_SH if shared else fcntl.LOCK_EX)\n try:\n yield\n finally:\n fcntl.flock(lockfile, fcntl.LOCK_UN)\n\n@contextmanager\ndef lockfile(filename: str, shared: bool=False) -> Iterator[None]:\n \"\"\"Lock a file using flock(2) for the duration of a 'with' statement.\n\n If shared is True, use a LOCK_SH lock, otherwise LOCK_EX.\n\n The file is given by name and will be created if it does not exist.\"\"\"\n with open(filename, 'w') as lock:\n with flock(lock, shared=shared):\n yield\n"},"type_annotations":{"kind":"list like","value":["Union[int, IO[Any]]","str"],"string":"[\n \"Union[int, IO[Any]]\",\n \"str\"\n]"},"type_annotation_starts":{"kind":"list like","value":[211,613],"string":"[\n 211,\n 613\n]"},"type_annotation_ends":{"kind":"list like","value":[230,616],"string":"[\n 230,\n 616\n]"}}},{"rowIdx":1367,"cells":{"zip":{"kind":"string","value":"archives/18-2-SKKU-OSS_2018-2-OSS-L5.zip"},"filename":{"kind":"string","value":"zerver/lib/create_user.py"},"contents":{"kind":"string","value":"\nfrom django.contrib.auth.models import UserManager\nfrom django.utils.timezone import now as timezone_now\nfrom zerver.models import UserProfile, Recipient, Subscription, Realm, Stream\nfrom zerver.lib.upload import copy_avatar\nfrom zerver.lib.hotspots import copy_hotpots\nfrom zerver.lib.utils import generate_api_key\n\nimport base64\nimport ujson\nimport os\nimport string\n\nfrom typing import Optional\n\ndef copy_user_settings(source_profile: UserProfile, target_profile: UserProfile) -> None:\n \"\"\"Warning: Does not save, to avoid extra database queries\"\"\"\n for settings_name in UserProfile.property_types:\n value = getattr(source_profile, settings_name)\n setattr(target_profile, settings_name, value)\n\n for settings_name in UserProfile.notification_setting_types:\n value = getattr(source_profile, settings_name)\n setattr(target_profile, settings_name, value)\n\n setattr(target_profile, \"full_name\", source_profile.full_name)\n setattr(target_profile, \"enter_sends\", source_profile.enter_sends)\n target_profile.save()\n\n if source_profile.avatar_source == UserProfile.AVATAR_FROM_USER:\n from zerver.lib.actions import do_change_avatar_fields\n do_change_avatar_fields(target_profile, UserProfile.AVATAR_FROM_USER)\n copy_avatar(source_profile, target_profile)\n\n copy_hotpots(source_profile, target_profile)\n\n# create_user_profile is based on Django's User.objects.create_user,\n# except that we don't save to the database so it can used in\n# bulk_creates\n#\n# Only use this for bulk_create -- for normal usage one should use\n# create_user (below) which will also make the Subscription and\n# Recipient objects\ndef create_user_profile(realm: Realm, email: str, password: Optional[str],\n active: bool, bot_type: Optional[int], full_name: str,\n short_name: str, bot_owner: Optional[UserProfile],\n is_mirror_dummy: bool, tos_version: Optional[str],\n timezone: Optional[str],\n tutorial_status: Optional[str] = UserProfile.TUTORIAL_WAITING,\n enter_sends: bool = False) -> UserProfile:\n now = timezone_now()\n email = UserManager.normalize_email(email)\n\n user_profile = UserProfile(email=email, is_staff=False, is_active=active,\n full_name=full_name, short_name=short_name,\n last_login=now, date_joined=now, realm=realm,\n pointer=-1, is_bot=bool(bot_type), bot_type=bot_type,\n bot_owner=bot_owner, is_mirror_dummy=is_mirror_dummy,\n tos_version=tos_version, timezone=timezone,\n tutorial_status=tutorial_status,\n enter_sends=enter_sends,\n onboarding_steps=ujson.dumps([]),\n default_language=realm.default_language,\n twenty_four_hour_time=realm.default_twenty_four_hour_time,\n delivery_email=email)\n\n if bot_type or not active:\n password = None\n\n user_profile.set_password(password)\n\n user_profile.api_key = generate_api_key()\n return user_profile\n\ndef create_user(email: str, password: Optional[str], realm: Realm,\n full_name: str, short_name: str, active: bool = True,\n is_realm_admin: bool = False, bot_type: Optional[int] = None,\n bot_owner: Optional[UserProfile] = None,\n tos_version: Optional[str] = None, timezone: str = \"\",\n avatar_source: str = UserProfile.AVATAR_FROM_GRAVATAR,\n is_mirror_dummy: bool = False,\n default_sending_stream: Optional[Stream] = None,\n default_events_register_stream: Optional[Stream] = None,\n default_all_public_streams: Optional[bool] = None,\n source_profile: Optional[UserProfile] = None) -> UserProfile:\n user_profile = create_user_profile(realm, email, password, active, bot_type,\n full_name, short_name, bot_owner,\n is_mirror_dummy, tos_version, timezone)\n user_profile.is_realm_admin = is_realm_admin\n user_profile.avatar_source = avatar_source\n user_profile.timezone = timezone\n user_profile.default_sending_stream = default_sending_stream\n user_profile.default_events_register_stream = default_events_register_stream\n # Allow the ORM default to be used if not provided\n if default_all_public_streams is not None:\n user_profile.default_all_public_streams = default_all_public_streams\n # If a source profile was specified, we copy settings from that\n # user. Note that this is positioned in a way that overrides\n # other arguments passed in, which is correct for most defaults\n # like timezone where the source profile likely has a better value\n # than the guess. As we decide on details like avatars and full\n # names for this feature, we may want to move it.\n if source_profile is not None:\n # copy_user_settings saves the attribute values so a secondary\n # save is not required.\n copy_user_settings(source_profile, user_profile)\n else:\n user_profile.save()\n\n recipient = Recipient.objects.create(type_id=user_profile.id,\n type=Recipient.PERSONAL)\n Subscription.objects.create(user_profile=user_profile, recipient=recipient)\n return user_profile\n"},"type_annotations":{"kind":"list like","value":["UserProfile","UserProfile","Realm","str","Optional[str]","bool","Optional[int]","str","str","Optional[UserProfile]","bool","Optional[str]","Optional[str]","str","Optional[str]","Realm","str","str"],"string":"[\n \"UserProfile\",\n \"UserProfile\",\n \"Realm\",\n \"str\",\n \"Optional[str]\",\n \"bool\",\n \"Optional[int]\",\n \"str\",\n \"str\",\n \"Optional[UserProfile]\",\n \"bool\",\n \"Optional[str]\",\n \"Optional[str]\",\n \"str\",\n \"Optional[str]\",\n \"Realm\",\n \"str\",\n \"str\"\n]"},"type_annotation_starts":{"kind":"list like","value":[438,467,1701,1715,1730,1777,1793,1819,1860,1876,1940,1959,2008,3317,3332,3354,3388,3405],"string":"[\n 438,\n 467,\n 1701,\n 1715,\n 1730,\n 1777,\n 1793,\n 1819,\n 1860,\n 1876,\n 1940,\n 1959,\n 2008,\n 3317,\n 3332,\n 3354,\n 3388,\n 3405\n]"},"type_annotation_ends":{"kind":"list like","value":[449,478,1706,1718,1743,1781,1806,1822,1863,1897,1944,1972,2021,3320,3345,3359,3391,3408],"string":"[\n 449,\n 478,\n 1706,\n 1718,\n 1743,\n 1781,\n 1806,\n 1822,\n 1863,\n 1897,\n 1944,\n 1972,\n 2021,\n 3320,\n 3345,\n 3359,\n 3391,\n 3408\n]"}}},{"rowIdx":1368,"cells":{"zip":{"kind":"string","value":"archives/18-2-SKKU-OSS_2018-2-OSS-L5.zip"},"filename":{"kind":"string","value":"zerver/lib/db.py"},"contents":{"kind":"string","value":"\nimport time\nfrom psycopg2.extensions import cursor, connection\n\nfrom typing import Callable, Optional, Iterable, Any, Dict, List, Union, TypeVar, \\\n Mapping\nfrom zerver.lib.str_utils import NonBinaryStr\n\nCursorObj = TypeVar('CursorObj', bound=cursor)\nParamsT = Union[Iterable[Any], Mapping[str, Any]]\n\n# Similar to the tracking done in Django's CursorDebugWrapper, but done at the\n# psycopg2 cursor level so it works with SQLAlchemy.\ndef wrapper_execute(self: CursorObj,\n action: Callable[[NonBinaryStr, Optional[ParamsT]], CursorObj],\n sql: NonBinaryStr,\n params: Optional[ParamsT]=()) -> CursorObj:\n start = time.time()\n try:\n return action(sql, params)\n finally:\n stop = time.time()\n duration = stop - start\n self.connection.queries.append({\n 'time': \"%.3f\" % duration,\n })\n\nclass TimeTrackingCursor(cursor):\n \"\"\"A psycopg2 cursor class that tracks the time spent executing queries.\"\"\"\n\n def execute(self, query: NonBinaryStr,\n vars: Optional[ParamsT]=None) -> 'TimeTrackingCursor':\n return wrapper_execute(self, super().execute, query, vars)\n\n def executemany(self, query: NonBinaryStr,\n vars: Iterable[Any]) -> 'TimeTrackingCursor':\n return wrapper_execute(self, super().executemany, query, vars)\n\nclass TimeTrackingConnection(connection):\n \"\"\"A psycopg2 connection class that uses TimeTrackingCursors.\"\"\"\n\n def __init__(self, *args: Any, **kwargs: Any) -> None:\n self.queries = [] # type: List[Dict[str, str]]\n super().__init__(*args, **kwargs)\n\n def cursor(self, *args: Any, **kwargs: Any) -> TimeTrackingCursor:\n kwargs.setdefault('cursor_factory', TimeTrackingCursor)\n return connection.cursor(self, *args, **kwargs)\n\ndef reset_queries() -> None:\n from django.db import connections\n for conn in connections.all():\n if conn.connection is not None:\n conn.connection.queries = []\n"},"type_annotations":{"kind":"list like","value":["CursorObj","Callable[[NonBinaryStr, Optional[ParamsT]], CursorObj]","NonBinaryStr","NonBinaryStr","NonBinaryStr","Iterable[Any]","Any","Any","Any","Any"],"string":"[\n \"CursorObj\",\n \"Callable[[NonBinaryStr, Optional[ParamsT]], CursorObj]\",\n \"NonBinaryStr\",\n \"NonBinaryStr\",\n \"NonBinaryStr\",\n \"Iterable[Any]\",\n \"Any\",\n \"Any\",\n \"Any\",\n \"Any\"\n]"},"type_annotation_starts":{"kind":"list like","value":[464,503,584,1038,1224,1264,1518,1533,1674,1689],"string":"[\n 464,\n 503,\n 584,\n 1038,\n 1224,\n 1264,\n 1518,\n 1533,\n 1674,\n 1689\n]"},"type_annotation_ends":{"kind":"list like","value":[473,557,596,1050,1236,1277,1521,1536,1677,1692],"string":"[\n 473,\n 557,\n 596,\n 1050,\n 1236,\n 1277,\n 1521,\n 1536,\n 1677,\n 1692\n]"}}},{"rowIdx":1369,"cells":{"zip":{"kind":"string","value":"archives/18-2-SKKU-OSS_2018-2-OSS-L5.zip"},"filename":{"kind":"string","value":"zerver/lib/debug.py"},"contents":{"kind":"string","value":"\nimport code\nimport gc\nimport logging\nimport os\nimport signal\nimport socket\nimport threading\nimport traceback\nimport tracemalloc\nfrom types import FrameType\n\nfrom django.conf import settings\nfrom django.utils.timezone import now as timezone_now\nfrom typing import Optional\n\nlogger = logging.getLogger('zulip.debug')\n\n# Interactive debugging code from\n# http://stackoverflow.com/questions/132058/showing-the-stack-trace-from-a-running-python-application\n# (that link also points to code for an interactive remote debugger\n# setup, which we might want if we move Tornado to run in a daemon\n# rather than via screen).\ndef interactive_debug(sig: int, frame: FrameType) -> None:\n \"\"\"Interrupt running process, and provide a python prompt for\n interactive debugging.\"\"\"\n d = {'_frame': frame} # Allow access to frame object.\n d.update(frame.f_globals) # Unless shadowed by global\n d.update(frame.f_locals)\n\n message = \"Signal received : entering python shell.\\nTraceback:\\n\"\n message += ''.join(traceback.format_stack(frame))\n i = code.InteractiveConsole(d)\n i.interact(message)\n\n# SIGUSR1 => Just print the stack\n# SIGUSR2 => Print stack + open interactive debugging shell\ndef interactive_debug_listen() -> None:\n signal.signal(signal.SIGUSR1, lambda sig, stack: traceback.print_stack(stack))\n signal.signal(signal.SIGUSR2, interactive_debug)\n\ndef tracemalloc_dump() -> None:\n if not tracemalloc.is_tracing():\n logger.warning(\"pid {}: tracemalloc off, nothing to dump\"\n .format(os.getpid()))\n return\n # Despite our name for it, `timezone_now` always deals in UTC.\n basename = \"snap.{}.{}\".format(os.getpid(),\n timezone_now().strftime(\"%F-%T\"))\n path = os.path.join(settings.TRACEMALLOC_DUMP_DIR, basename)\n os.makedirs(settings.TRACEMALLOC_DUMP_DIR, exist_ok=True)\n\n gc.collect()\n tracemalloc.take_snapshot().dump(path)\n\n procstat = open('/proc/{}/stat'.format(os.getpid()), 'rb').read().split()\n rss_pages = int(procstat[23])\n logger.info(\"tracemalloc dump: tracing {} MiB ({} MiB peak), using {} MiB; rss {} MiB; dumped {}\"\n .format(tracemalloc.get_traced_memory()[0] // 1048576,\n tracemalloc.get_traced_memory()[1] // 1048576,\n tracemalloc.get_tracemalloc_memory() // 1048576,\n rss_pages // 256,\n basename))\n\ndef tracemalloc_listen_sock(sock: socket.socket) -> None:\n logger.debug('pid {}: tracemalloc_listen_sock started!'.format(os.getpid()))\n while True:\n sock.recv(1)\n tracemalloc_dump()\n\nlistener_pid = None # type: Optional[int]\n\ndef tracemalloc_listen() -> None:\n global listener_pid\n if listener_pid == os.getpid():\n # Already set up -- and in this process, not just its parent.\n return\n logger.debug('pid {}: tracemalloc_listen working...'.format(os.getpid()))\n listener_pid = os.getpid()\n\n sock = socket.socket(socket.AF_UNIX, socket.SOCK_DGRAM)\n path = \"/tmp/tracemalloc.{}\".format(os.getpid())\n sock.bind(path)\n thread = threading.Thread(target=lambda: tracemalloc_listen_sock(sock),\n daemon=True)\n thread.start()\n logger.debug('pid {}: tracemalloc_listen done: {}'.format(\n os.getpid(), path))\n\ndef maybe_tracemalloc_listen() -> None:\n '''If tracemalloc tracing enabled, listen for requests to dump a snapshot.\n\n To trigger once this is listening:\n echo | socat -u stdin unix-sendto:/tmp/tracemalloc.$pid\n\n To enable in the Zulip web server: edit /etc/zulip/uwsgi.ini ,\n and add e.g. ` PYTHONTRACEMALLOC=5` to the `env=` line.\n This function is called in middleware, so the process will\n automatically start listening.\n\n To enable in other contexts: see upstream docs\n https://docs.python.org/3/library/tracemalloc .\n You may also have to add a call to this function somewhere.\n\n '''\n if os.environ.get('PYTHONTRACEMALLOC'):\n # If the server was started with `tracemalloc` tracing on, then\n # listen for a signal to dump `tracemalloc` snapshots.\n tracemalloc_listen()\n"},"type_annotations":{"kind":"list like","value":["int","FrameType","socket.socket"],"string":"[\n \"int\",\n \"FrameType\",\n \"socket.socket\"\n]"},"type_annotation_starts":{"kind":"list like","value":[642,654,2488],"string":"[\n 642,\n 654,\n 2488\n]"},"type_annotation_ends":{"kind":"list like","value":[645,663,2501],"string":"[\n 645,\n 663,\n 2501\n]"}}},{"rowIdx":1370,"cells":{"zip":{"kind":"string","value":"archives/18-2-SKKU-OSS_2018-2-OSS-L5.zip"},"filename":{"kind":"string","value":"zerver/lib/digest.py"},"contents":{"kind":"string","value":"from typing import Any, Callable, Dict, Iterable, List, Set, Tuple\n\nfrom collections import defaultdict\nimport datetime\nimport logging\nimport pytz\n\nfrom django.db.models import Q, QuerySet\nfrom django.template import loader\nfrom django.conf import settings\nfrom django.utils.timezone import now as timezone_now\n\nfrom confirmation.models import one_click_unsubscribe_link\nfrom zerver.lib.notifications import build_message_list\nfrom zerver.lib.send_email import send_future_email, FromAddress\nfrom zerver.lib.url_encoding import encode_stream\nfrom zerver.models import UserProfile, UserMessage, Recipient, Stream, \\\n Subscription, UserActivity, get_active_streams, get_user_profile_by_id, \\\n Realm, Message\nfrom zerver.context_processors import common_context\nfrom zerver.lib.queue import queue_json_publish\nfrom zerver.lib.logging_util import log_to_file\n\nlogger = logging.getLogger(__name__)\nlog_to_file(logger, settings.DIGEST_LOG_PATH)\n\nVALID_DIGEST_DAY = 1 # Tuesdays\nDIGEST_CUTOFF = 5\n\n# Digests accumulate 4 types of interesting traffic for a user:\n# 1. Missed PMs\n# 2. New streams\n# 3. New users\n# 4. Interesting stream traffic, as determined by the longest and most\n# diversely comment upon topics.\n\ndef inactive_since(user_profile: UserProfile, cutoff: datetime.datetime) -> bool:\n # Hasn't used the app in the last DIGEST_CUTOFF (5) days.\n most_recent_visit = [row.last_visit for row in\n UserActivity.objects.filter(\n user_profile=user_profile)]\n\n if not most_recent_visit:\n # This person has never used the app.\n return True\n\n last_visit = max(most_recent_visit)\n return last_visit < cutoff\n\ndef should_process_digest(realm_str: str) -> bool:\n if realm_str in settings.SYSTEM_ONLY_REALMS:\n # Don't try to send emails to system-only realms\n return False\n return True\n\n# Changes to this should also be reflected in\n# zerver/worker/queue_processors.py:DigestWorker.consume()\ndef queue_digest_recipient(user_profile: UserProfile, cutoff: datetime.datetime) -> None:\n # Convert cutoff to epoch seconds for transit.\n event = {\"user_profile_id\": user_profile.id,\n \"cutoff\": cutoff.strftime('%s')}\n queue_json_publish(\"digest_emails\", event)\n\ndef enqueue_emails(cutoff: datetime.datetime) -> None:\n if not settings.SEND_DIGEST_EMAILS:\n return\n\n if timezone_now().weekday() != VALID_DIGEST_DAY:\n return\n\n for realm in Realm.objects.filter(deactivated=False, digest_emails_enabled=True):\n if not should_process_digest(realm.string_id):\n continue\n\n user_profiles = UserProfile.objects.filter(\n realm=realm, is_active=True, is_bot=False, enable_digest_emails=True)\n\n for user_profile in user_profiles:\n if inactive_since(user_profile, cutoff):\n queue_digest_recipient(user_profile, cutoff)\n logger.info(\"%s is inactive, queuing for potential digest\" % (\n user_profile.email,))\n\ndef gather_hot_conversations(user_profile: UserProfile, stream_ums: QuerySet) -> List[Dict[str, Any]]:\n # Gather stream conversations of 2 types:\n # 1. long conversations\n # 2. conversations where many different people participated\n #\n # Returns a list of dictionaries containing the templating\n # information for each hot conversation.\n\n # stream_ums is a list of UserMessage rows for a single\n # user, so the list of messages is distinct here.\n messages = [um.message for um in stream_ums]\n\n conversation_length = defaultdict(int) # type: Dict[Tuple[int, str], int]\n conversation_messages = defaultdict(list) # type: Dict[Tuple[int, str], List[Message]]\n conversation_diversity = defaultdict(set) # type: Dict[Tuple[int, str], Set[str]]\n for message in messages:\n key = (message.recipient.type_id,\n message.topic_name())\n\n conversation_messages[key].append(message)\n\n if not message.sent_by_human():\n # Don't include automated messages in the count.\n continue\n\n conversation_diversity[key].add(\n message.sender.full_name)\n conversation_length[key] += 1\n\n diversity_list = list(conversation_diversity.items())\n diversity_list.sort(key=lambda entry: len(entry[1]), reverse=True)\n\n length_list = list(conversation_length.items())\n length_list.sort(key=lambda entry: entry[1], reverse=True)\n\n # Get up to the 4 best conversations from the diversity list\n # and length list, filtering out overlapping conversations.\n hot_conversations = [elt[0] for elt in diversity_list[:2]]\n for candidate, _ in length_list:\n if candidate not in hot_conversations:\n hot_conversations.append(candidate)\n if len(hot_conversations) >= 4:\n break\n\n # There was so much overlap between the diversity and length lists that we\n # still have < 4 conversations. Try to use remaining diversity items to pad\n # out the hot conversations.\n num_convos = len(hot_conversations)\n if num_convos < 4:\n hot_conversations.extend([elt[0] for elt in diversity_list[num_convos:4]])\n\n hot_conversation_render_payloads = []\n for h in hot_conversations:\n users = list(conversation_diversity[h])\n count = conversation_length[h]\n messages = conversation_messages[h]\n\n # We'll display up to 2 messages from the conversation.\n first_few_messages = messages[:2]\n\n teaser_data = {\"participants\": users,\n \"count\": count - len(first_few_messages),\n \"first_few_messages\": build_message_list(\n user_profile, first_few_messages)}\n\n hot_conversation_render_payloads.append(teaser_data)\n return hot_conversation_render_payloads\n\ndef gather_new_users(user_profile: UserProfile, threshold: datetime.datetime) -> Tuple[int, List[str]]:\n # Gather information on users in the realm who have recently\n # joined.\n if not user_profile.can_access_all_realm_members():\n new_users = [] # type: List[UserProfile]\n else:\n new_users = list(UserProfile.objects.filter(\n realm=user_profile.realm, date_joined__gt=threshold,\n is_bot=False))\n user_names = [user.full_name for user in new_users]\n\n return len(user_names), user_names\n\ndef gather_new_streams(user_profile: UserProfile,\n threshold: datetime.datetime) -> Tuple[int, Dict[str, List[str]]]:\n if user_profile.can_access_public_streams():\n new_streams = list(get_active_streams(user_profile.realm).filter(\n invite_only=False, date_created__gt=threshold))\n else:\n new_streams = []\n\n base_url = \"%s/#narrow/stream/\" % (user_profile.realm.uri,)\n\n streams_html = []\n streams_plain = []\n\n for stream in new_streams:\n narrow_url = base_url + encode_stream(stream.id, stream.name)\n stream_link = \"%s\" % (narrow_url, stream.name)\n streams_html.append(stream_link)\n streams_plain.append(stream.name)\n\n return len(new_streams), {\"html\": streams_html, \"plain\": streams_plain}\n\ndef enough_traffic(unread_pms: str, hot_conversations: str, new_streams: int, new_users: int) -> bool:\n if unread_pms or hot_conversations:\n # If you have any unread traffic, good enough.\n return True\n if new_streams and new_users:\n # If you somehow don't have any traffic but your realm did get\n # new streams and users, good enough.\n return True\n return False\n\ndef handle_digest_email(user_profile_id: int, cutoff: float) -> None:\n user_profile = get_user_profile_by_id(user_profile_id)\n\n # We are disabling digest emails for soft deactivated users for the time.\n # TODO: Find an elegant way to generate digest emails for these users.\n if user_profile.long_term_idle:\n return None\n\n # Convert from epoch seconds to a datetime object.\n cutoff_date = datetime.datetime.fromtimestamp(int(cutoff), tz=pytz.utc)\n\n all_messages = UserMessage.objects.filter(\n user_profile=user_profile,\n message__pub_date__gt=cutoff_date\n ).select_related('message').order_by(\"message__pub_date\")\n\n context = common_context(user_profile)\n\n # Start building email template data.\n context.update({\n 'realm_name': user_profile.realm.name,\n 'name': user_profile.full_name,\n 'unsubscribe_link': one_click_unsubscribe_link(user_profile, \"digest\")\n })\n\n # Gather recent missed PMs, re-using the missed PM email logic.\n # You can't have an unread message that you sent, but when testing\n # this causes confusion so filter your messages out.\n pms = all_messages.filter(\n ~Q(message__recipient__type=Recipient.STREAM) &\n ~Q(message__sender=user_profile))\n\n # Show up to 4 missed PMs.\n pms_limit = 4\n\n context['unread_pms'] = build_message_list(\n user_profile, [pm.message for pm in pms[:pms_limit]])\n context['remaining_unread_pms_count'] = min(0, len(pms) - pms_limit)\n\n home_view_recipients = [sub.recipient for sub in\n Subscription.objects.filter(\n user_profile=user_profile,\n active=True,\n in_home_view=True)]\n\n stream_messages = all_messages.filter(\n message__recipient__type=Recipient.STREAM,\n message__recipient__in=home_view_recipients)\n\n # Gather hot conversations.\n context[\"hot_conversations\"] = gather_hot_conversations(\n user_profile, stream_messages)\n\n # Gather new streams.\n new_streams_count, new_streams = gather_new_streams(\n user_profile, cutoff_date)\n context[\"new_streams\"] = new_streams\n context[\"new_streams_count\"] = new_streams_count\n\n # Gather users who signed up recently.\n new_users_count, new_users = gather_new_users(\n user_profile, cutoff_date)\n context[\"new_users\"] = new_users\n\n # We don't want to send emails containing almost no information.\n if enough_traffic(context[\"unread_pms\"], context[\"hot_conversations\"],\n new_streams_count, new_users_count):\n logger.info(\"Sending digest email for %s\" % (user_profile.email,))\n # Send now, as a ScheduledEmail\n send_future_email('zerver/emails/digest', user_profile.realm, to_user_id=user_profile.id,\n from_name=\"Zulip Digest\", from_address=FromAddress.NOREPLY,\n context=context)\n"},"type_annotations":{"kind":"list like","value":["UserProfile","datetime.datetime","str","UserProfile","datetime.datetime","datetime.datetime","UserProfile","QuerySet","UserProfile","datetime.datetime","UserProfile","datetime.datetime","str","str","int","int","int","float"],"string":"[\n \"UserProfile\",\n \"datetime.datetime\",\n \"str\",\n \"UserProfile\",\n \"datetime.datetime\",\n \"datetime.datetime\",\n \"UserProfile\",\n \"QuerySet\",\n \"UserProfile\",\n \"datetime.datetime\",\n \"UserProfile\",\n \"datetime.datetime\",\n \"str\",\n \"str\",\n \"int\",\n \"int\",\n \"int\",\n \"float\"\n]"},"type_annotation_starts":{"kind":"list like","value":[1251,1272,1731,2035,2056,2305,3078,3103,5876,5900,6419,6466,7217,7241,7259,7275,7634,7647],"string":"[\n 1251,\n 1272,\n 1731,\n 2035,\n 2056,\n 2305,\n 3078,\n 3103,\n 5876,\n 5900,\n 6419,\n 6466,\n 7217,\n 7241,\n 7259,\n 7275,\n 7634,\n 7647\n]"},"type_annotation_ends":{"kind":"list like","value":[1262,1289,1734,2046,2073,2322,3089,3111,5887,5917,6430,6483,7220,7244,7262,7278,7637,7652],"string":"[\n 1262,\n 1289,\n 1734,\n 2046,\n 2073,\n 2322,\n 3089,\n 3111,\n 5887,\n 5917,\n 6430,\n 6483,\n 7220,\n 7244,\n 7262,\n 7278,\n 7637,\n 7652\n]"}}},{"rowIdx":1371,"cells":{"zip":{"kind":"string","value":"archives/18-2-SKKU-OSS_2018-2-OSS-L5.zip"},"filename":{"kind":"string","value":"zerver/lib/domains.py"},"contents":{"kind":"string","value":"from django.core.exceptions import ValidationError\nfrom django.utils.translation import ugettext as _\n\nimport re\n\ndef validate_domain(domain: str) -> None:\n if domain is None or len(domain) == 0:\n raise ValidationError(_(\"Domain can't be empty.\"))\n if '.' not in domain:\n raise ValidationError(_(\"Domain must have at least one dot (.)\"))\n if len(domain) > 255:\n raise ValidationError(_(\"Domain is too long\"))\n if domain[0] == '.' or domain[-1] == '.':\n raise ValidationError(_(\"Domain cannot start or end with a dot (.)\"))\n for subdomain in domain.split('.'):\n if not subdomain:\n raise ValidationError(_(\"Consecutive '.' are not allowed.\"))\n if subdomain[0] == '-' or subdomain[-1] == '-':\n raise ValidationError(_(\"Subdomains cannot start or end with a '-'.\"))\n if not re.match('^[a-z0-9-]*$', subdomain):\n raise ValidationError(_(\"Domain can only have letters, numbers, '.' and '-'s.\"))\n"},"type_annotations":{"kind":"list like","value":["str"],"string":"[\n \"str\"\n]"},"type_annotation_starts":{"kind":"list like","value":[142],"string":"[\n 142\n]"},"type_annotation_ends":{"kind":"list like","value":[145],"string":"[\n 145\n]"}}},{"rowIdx":1372,"cells":{"zip":{"kind":"string","value":"archives/18-2-SKKU-OSS_2018-2-OSS-L5.zip"},"filename":{"kind":"string","value":"zerver/lib/email_mirror.py"},"contents":{"kind":"string","value":"from typing import Any, Dict, List, Optional, Union\n\nimport logging\nimport re\n\nfrom email.header import decode_header, Header\nimport email.message as message\n\nfrom django.conf import settings\n\nfrom zerver.lib.actions import decode_email_address, get_email_gateway_message_string_from_address, \\\n internal_send_message, internal_send_private_message, \\\n internal_send_stream_message, internal_send_huddle_message\nfrom zerver.lib.notifications import convert_html_to_markdown\nfrom zerver.lib.queue import queue_json_publish\nfrom zerver.lib.redis_utils import get_redis_client\nfrom zerver.lib.upload import upload_message_file\nfrom zerver.lib.utils import generate_random_token\nfrom zerver.lib.str_utils import force_text\nfrom zerver.lib.send_email import FromAddress\nfrom zerver.models import Stream, Recipient, \\\n get_user_profile_by_id, get_display_recipient, get_personal_recipient, \\\n Message, Realm, UserProfile, get_system_bot, get_user, MAX_TOPIC_NAME_LENGTH, \\\n MAX_MESSAGE_LENGTH\n\nlogger = logging.getLogger(__name__)\n\ndef redact_stream(error_message: str) -> str:\n domain = settings.EMAIL_GATEWAY_PATTERN.rsplit('@')[-1]\n stream_match = re.search('\\\\b(.*?)@' + domain, error_message)\n if stream_match:\n stream_name = stream_match.groups()[0]\n return error_message.replace(stream_name, \"X\" * len(stream_name))\n return error_message\n\ndef report_to_zulip(error_message: str) -> None:\n if settings.ERROR_BOT is None:\n return\n error_bot = get_system_bot(settings.ERROR_BOT)\n error_stream = Stream.objects.get(name=\"errors\", realm=error_bot.realm)\n send_zulip(settings.ERROR_BOT, error_stream, \"email mirror error\",\n \"\"\"~~~\\n%s\\n~~~\"\"\" % (error_message,))\n\ndef log_and_report(email_message: message.Message, error_message: str, debug_info: Dict[str, Any]) -> None:\n scrubbed_error = u\"Sender: %s\\n%s\" % (email_message.get(\"From\"),\n redact_stream(error_message))\n\n if \"to\" in debug_info:\n scrubbed_error = \"Stream: %s\\n%s\" % (redact_stream(debug_info[\"to\"]),\n scrubbed_error)\n\n if \"stream\" in debug_info:\n scrubbed_error = \"Realm: %s\\n%s\" % (debug_info[\"stream\"].realm.string_id,\n scrubbed_error)\n\n logger.error(scrubbed_error)\n report_to_zulip(scrubbed_error)\n\n\n# Temporary missed message addresses\n\nredis_client = get_redis_client()\n\n\ndef missed_message_redis_key(token: str) -> str:\n return 'missed_message:' + token\n\n\ndef is_missed_message_address(address: str) -> bool:\n msg_string = get_email_gateway_message_string_from_address(address)\n return is_mm_32_format(msg_string)\n\ndef is_mm_32_format(msg_string: Optional[str]) -> bool:\n '''\n Missed message strings are formatted with a little \"mm\" prefix\n followed by a randomly generated 32-character string.\n '''\n return msg_string is not None and msg_string.startswith('mm') and len(msg_string) == 34\n\ndef get_missed_message_token_from_address(address: str) -> str:\n msg_string = get_email_gateway_message_string_from_address(address)\n\n if msg_string is None:\n raise ZulipEmailForwardError('Address not recognized by gateway.')\n\n if not is_mm_32_format(msg_string):\n raise ZulipEmailForwardError('Could not parse missed message address')\n\n # strip off the 'mm' before returning the redis key\n return msg_string[2:]\n\ndef create_missed_message_address(user_profile: UserProfile, message: Message) -> str:\n if settings.EMAIL_GATEWAY_PATTERN == '':\n logger.warning(\"EMAIL_GATEWAY_PATTERN is an empty string, using \"\n \"NOREPLY_EMAIL_ADDRESS in the 'from' field.\")\n return FromAddress.NOREPLY\n\n if message.recipient.type == Recipient.PERSONAL:\n # We need to reply to the sender so look up their personal recipient_id\n recipient_id = get_personal_recipient(message.sender_id).id\n else:\n recipient_id = message.recipient_id\n\n data = {\n 'user_profile_id': user_profile.id,\n 'recipient_id': recipient_id,\n 'subject': message.topic_name().encode('utf-8'),\n }\n\n while True:\n token = generate_random_token(32)\n key = missed_message_redis_key(token)\n if redis_client.hsetnx(key, 'uses_left', 1):\n break\n\n with redis_client.pipeline() as pipeline:\n pipeline.hmset(key, data)\n pipeline.expire(key, 60 * 60 * 24 * 5)\n pipeline.execute()\n\n address = 'mm' + token\n return settings.EMAIL_GATEWAY_PATTERN % (address,)\n\n\ndef mark_missed_message_address_as_used(address: str) -> None:\n token = get_missed_message_token_from_address(address)\n key = missed_message_redis_key(token)\n with redis_client.pipeline() as pipeline:\n pipeline.hincrby(key, 'uses_left', -1)\n pipeline.expire(key, 60 * 60 * 24 * 5)\n new_value = pipeline.execute()[0]\n if new_value < 0:\n redis_client.delete(key)\n raise ZulipEmailForwardError('Missed message address has already been used')\n\ndef construct_zulip_body(message: message.Message, realm: Realm) -> str:\n body = extract_body(message)\n # Remove null characters, since Zulip will reject\n body = body.replace(\"\\x00\", \"\")\n body = filter_footer(body)\n body += extract_and_upload_attachments(message, realm)\n body = body.strip()\n if not body:\n body = '(No email body)'\n return body\n\ndef send_to_missed_message_address(address: str, message: message.Message) -> None:\n token = get_missed_message_token_from_address(address)\n key = missed_message_redis_key(token)\n result = redis_client.hmget(key, 'user_profile_id', 'recipient_id', 'subject')\n if not all(val is not None for val in result):\n raise ZulipEmailForwardError('Missing missed message address data')\n user_profile_id, recipient_id, subject_b = result # type: (bytes, bytes, bytes)\n\n user_profile = get_user_profile_by_id(user_profile_id)\n recipient = Recipient.objects.get(id=recipient_id)\n display_recipient = get_display_recipient(recipient)\n\n body = construct_zulip_body(message, user_profile.realm)\n\n if recipient.type == Recipient.STREAM:\n assert isinstance(display_recipient, str)\n recipient_str = display_recipient\n internal_send_stream_message(user_profile.realm, user_profile, recipient_str,\n subject_b.decode('utf-8'), body)\n elif recipient.type == Recipient.PERSONAL:\n assert not isinstance(display_recipient, str)\n recipient_str = display_recipient[0]['email']\n recipient_user = get_user(recipient_str, user_profile.realm)\n internal_send_private_message(user_profile.realm, user_profile,\n recipient_user, body)\n elif recipient.type == Recipient.HUDDLE:\n assert not isinstance(display_recipient, str)\n emails = [user_dict['email'] for user_dict in display_recipient]\n recipient_str = ', '.join(emails)\n internal_send_huddle_message(user_profile.realm, user_profile,\n emails, body)\n else:\n raise AssertionError(\"Invalid recipient type!\")\n\n logger.info(\"Successfully processed email from %s to %s\" % (\n user_profile.email, recipient_str))\n\n## Sending the Zulip ##\n\nclass ZulipEmailForwardError(Exception):\n pass\n\ndef send_zulip(sender: str, stream: Stream, topic: str, content: str) -> None:\n internal_send_message(\n stream.realm,\n sender,\n \"stream\",\n stream.name,\n topic[:MAX_TOPIC_NAME_LENGTH],\n content[:MAX_MESSAGE_LENGTH],\n email_gateway=True)\n\ndef valid_stream(stream_name: str, token: str) -> bool:\n try:\n stream = Stream.objects.get(email_token=token)\n return stream.name.lower() == stream_name.lower()\n except Stream.DoesNotExist:\n return False\n\ndef get_message_part_by_type(message: message.Message, content_type: str) -> Optional[str]:\n charsets = message.get_charsets()\n\n for idx, part in enumerate(message.walk()):\n if part.get_content_type() == content_type:\n content = part.get_payload(decode=True)\n assert isinstance(content, bytes)\n if charsets[idx]:\n return content.decode(charsets[idx], errors=\"ignore\")\n return None\n\ntalon_initialized = False\ndef extract_body(message: message.Message) -> str:\n import talon\n global talon_initialized\n if not talon_initialized:\n talon.init()\n talon_initialized = True\n\n # If the message contains a plaintext version of the body, use\n # that.\n plaintext_content = get_message_part_by_type(message, \"text/plain\")\n if plaintext_content:\n return talon.quotations.extract_from_plain(plaintext_content)\n\n # If we only have an HTML version, try to make that look nice.\n html_content = get_message_part_by_type(message, \"text/html\")\n if html_content:\n return convert_html_to_markdown(talon.quotations.extract_from_html(html_content))\n\n raise ZulipEmailForwardError(\"Unable to find plaintext or HTML message body\")\n\ndef filter_footer(text: str) -> str:\n # Try to filter out obvious footers.\n possible_footers = [line for line in text.split(\"\\n\") if line.strip().startswith(\"--\")]\n if len(possible_footers) != 1:\n # Be conservative and don't try to scrub content if there\n # isn't a trivial footer structure.\n return text\n\n return text.partition(\"--\")[0].strip()\n\ndef extract_and_upload_attachments(message: message.Message, realm: Realm) -> str:\n user_profile = get_system_bot(settings.EMAIL_GATEWAY_BOT)\n attachment_links = []\n\n payload = message.get_payload()\n if not isinstance(payload, list):\n # This is not a multipart message, so it can't contain attachments.\n return \"\"\n\n for part in payload:\n content_type = part.get_content_type()\n filename = part.get_filename()\n if filename:\n attachment = part.get_payload(decode=True)\n if isinstance(attachment, bytes):\n s3_url = upload_message_file(filename, len(attachment), content_type,\n attachment,\n user_profile,\n target_realm=realm)\n formatted_link = \"[%s](%s)\" % (filename, s3_url)\n attachment_links.append(formatted_link)\n else:\n logger.warning(\"Payload is not bytes (invalid attachment %s in message from %s).\" %\n (filename, message.get(\"From\")))\n\n return \"\\n\".join(attachment_links)\n\ndef extract_and_validate(email: str) -> Stream:\n temp = decode_email_address(email)\n if temp is None:\n raise ZulipEmailForwardError(\"Malformed email recipient \" + email)\n stream_name, token = temp\n\n if not valid_stream(stream_name, token):\n raise ZulipEmailForwardError(\"Bad stream token from email recipient \" + email)\n\n return Stream.objects.get(email_token=token)\n\ndef find_emailgateway_recipient(message: message.Message) -> str:\n # We can't use Delivered-To; if there is a X-Gm-Original-To\n # it is more accurate, so try to find the most-accurate\n # recipient list in descending priority order\n recipient_headers = [\"X-Gm-Original-To\", \"Delivered-To\", \"To\"]\n recipients = [] # type: List[Union[str, Header]]\n for recipient_header in recipient_headers:\n r = message.get_all(recipient_header, None)\n if r:\n recipients = r\n break\n\n pattern_parts = [re.escape(part) for part in settings.EMAIL_GATEWAY_PATTERN.split('%s')]\n match_email_re = re.compile(\".*?\".join(pattern_parts))\n for recipient_email in [str(recipient) for recipient in recipients]:\n if match_email_re.match(recipient_email):\n return recipient_email\n\n raise ZulipEmailForwardError(\"Missing recipient in mirror email\")\n\ndef process_stream_message(to: str, subject: str, message: message.Message,\n debug_info: Dict[str, Any]) -> None:\n stream = extract_and_validate(to)\n body = construct_zulip_body(message, stream.realm)\n debug_info[\"stream\"] = stream\n send_zulip(settings.EMAIL_GATEWAY_BOT, stream, subject, body)\n logger.info(\"Successfully processed email to %s (%s)\" % (\n stream.name, stream.realm.string_id))\n\ndef process_missed_message(to: str, message: message.Message, pre_checked: bool) -> None:\n if not pre_checked:\n mark_missed_message_address_as_used(to)\n send_to_missed_message_address(to, message)\n\ndef process_message(message: message.Message, rcpt_to: Optional[str]=None, pre_checked: bool=False) -> None:\n subject_header = str(message.get(\"Subject\", \"\")).strip()\n if subject_header == \"\":\n subject_header = \"(no topic)\"\n encoded_subject, encoding = decode_header(subject_header)[0]\n if encoding is None:\n subject = force_text(encoded_subject) # encoded_subject has type str when encoding is None\n else:\n try:\n subject = encoded_subject.decode(encoding)\n except (UnicodeDecodeError, LookupError):\n subject = \"(unreadable subject)\"\n\n debug_info = {}\n\n try:\n if rcpt_to is not None:\n to = rcpt_to\n else:\n to = find_emailgateway_recipient(message)\n debug_info[\"to\"] = to\n\n if is_missed_message_address(to):\n process_missed_message(to, message, pre_checked)\n else:\n process_stream_message(to, subject, message, debug_info)\n except ZulipEmailForwardError as e:\n # TODO: notify sender of error, retry if appropriate.\n log_and_report(message, str(e), debug_info)\n\n\ndef mirror_email_message(data: Dict[str, str]) -> Dict[str, str]:\n rcpt_to = data['recipient']\n if is_missed_message_address(rcpt_to):\n try:\n mark_missed_message_address_as_used(rcpt_to)\n except ZulipEmailForwardError:\n return {\n \"status\": \"error\",\n \"msg\": \"5.1.1 Bad destination mailbox address: \"\n \"Bad or expired missed message address.\"\n }\n else:\n try:\n extract_and_validate(rcpt_to)\n except ZulipEmailForwardError:\n return {\n \"status\": \"error\",\n \"msg\": \"5.1.1 Bad destination mailbox address: \"\n \"Please use the address specified in your Streams page.\"\n }\n queue_json_publish(\n \"email_mirror\",\n {\n \"message\": data['msg_text'],\n \"rcpt_to\": rcpt_to\n }\n )\n return {\"status\": \"success\"}\n"},"type_annotations":{"kind":"list like","value":["str","str","message.Message","str","Dict[str, Any]","str","str","Optional[str]","str","UserProfile","Message","str","message.Message","Realm","str","message.Message","str","Stream","str","str","str","str","message.Message","str","message.Message","str","message.Message","Realm","str","message.Message","str","str","message.Message","Dict[str, Any]","str","message.Message","bool","message.Message","Dict[str, str]"],"string":"[\n \"str\",\n \"str\",\n \"message.Message\",\n \"str\",\n \"Dict[str, Any]\",\n \"str\",\n \"str\",\n \"Optional[str]\",\n \"str\",\n \"UserProfile\",\n \"Message\",\n \"str\",\n \"message.Message\",\n \"Realm\",\n \"str\",\n \"message.Message\",\n \"str\",\n \"Stream\",\n \"str\",\n \"str\",\n \"str\",\n \"str\",\n \"message.Message\",\n \"str\",\n \"message.Message\",\n \"str\",\n \"message.Message\",\n \"Realm\",\n \"str\",\n \"message.Message\",\n \"str\",\n \"str\",\n \"message.Message\",\n \"Dict[str, Any]\",\n \"str\",\n \"message.Message\",\n \"bool\",\n \"message.Message\",\n \"Dict[str, str]\"\n]"},"type_annotation_starts":{"kind":"list like","value":[1074,1416,1767,1799,1816,2505,2596,2754,3063,3503,3525,4645,5117,5141,5504,5518,7434,7447,7462,7476,7730,7742,7970,8001,8430,9186,9586,9610,10759,11165,12057,12071,12085,12141,12499,12513,12543,12708,13839],"string":"[\n 1074,\n 1416,\n 1767,\n 1799,\n 1816,\n 2505,\n 2596,\n 2754,\n 3063,\n 3503,\n 3525,\n 4645,\n 5117,\n 5141,\n 5504,\n 5518,\n 7434,\n 7447,\n 7462,\n 7476,\n 7730,\n 7742,\n 7970,\n 8001,\n 8430,\n 9186,\n 9586,\n 9610,\n 10759,\n 11165,\n 12057,\n 12071,\n 12085,\n 12141,\n 12499,\n 12513,\n 12543,\n 12708,\n 13839\n]"},"type_annotation_ends":{"kind":"list like","value":[1077,1419,1782,1802,1830,2508,2599,2767,3066,3514,3532,4648,5132,5146,5507,5533,7437,7453,7465,7479,7733,7745,7985,8004,8445,9189,9601,9615,10762,11180,12060,12074,12100,12155,12502,12528,12547,12723,13853],"string":"[\n 1077,\n 1419,\n 1782,\n 1802,\n 1830,\n 2508,\n 2599,\n 2767,\n 3066,\n 3514,\n 3532,\n 4648,\n 5132,\n 5146,\n 5507,\n 5533,\n 7437,\n 7453,\n 7465,\n 7479,\n 7733,\n 7745,\n 7985,\n 8004,\n 8445,\n 9189,\n 9601,\n 9615,\n 10762,\n 11180,\n 12060,\n 12074,\n 12100,\n 12155,\n 12502,\n 12528,\n 12547,\n 12723,\n 13853\n]"}}},{"rowIdx":1373,"cells":{"zip":{"kind":"string","value":"archives/18-2-SKKU-OSS_2018-2-OSS-L5.zip"},"filename":{"kind":"string","value":"zerver/lib/emoji.py"},"contents":{"kind":"string","value":"\nimport os\nimport re\nimport ujson\n\nfrom django.conf import settings\nfrom django.utils.translation import ugettext as _\nfrom typing import Optional, Tuple\n\nfrom zerver.lib.request import JsonableError\nfrom zerver.lib.upload import upload_backend\nfrom zerver.models import Reaction, Realm, RealmEmoji, UserProfile\n\nEMOJI_PATH = os.path.join(settings.STATIC_ROOT, \"generated\", \"emoji\")\nNAME_TO_CODEPOINT_PATH = os.path.join(EMOJI_PATH, \"name_to_codepoint.json\")\nCODEPOINT_TO_NAME_PATH = os.path.join(EMOJI_PATH, \"codepoint_to_name.json\")\nEMOTICON_CONVERSIONS_PATH = os.path.join(EMOJI_PATH, \"emoticon_conversions.json\")\n\nwith open(NAME_TO_CODEPOINT_PATH) as fp:\n name_to_codepoint = ujson.load(fp)\n\nwith open(CODEPOINT_TO_NAME_PATH) as fp:\n codepoint_to_name = ujson.load(fp)\n\nwith open(EMOTICON_CONVERSIONS_PATH) as fp:\n EMOTICON_CONVERSIONS = ujson.load(fp)\n\npossible_emoticons = EMOTICON_CONVERSIONS.keys()\npossible_emoticon_regexes = map(re.escape, possible_emoticons) # type: ignore # AnyStr/str issues\nterminal_symbols = ',.;?!()\\\\[\\\\] \"\\'\\\\n\\\\t' # type: str # from composebox_typeahead.js\nemoticon_regex = ('(?('.format(terminal_symbols)\n + ')|('.join(possible_emoticon_regexes) # type: ignore # AnyStr/str issues\n + '))(?![^{0}])'.format(terminal_symbols))\n\n# Translates emoticons to their colon syntax, e.g. `:smiley:`.\ndef translate_emoticons(text: str) -> str:\n translated = text\n\n for emoticon in EMOTICON_CONVERSIONS:\n translated = re.sub(re.escape(emoticon), EMOTICON_CONVERSIONS[emoticon], translated)\n\n return translated\n\ndef emoji_name_to_emoji_code(realm: Realm, emoji_name: str) -> Tuple[str, str]:\n realm_emojis = realm.get_active_emoji()\n realm_emoji = realm_emojis.get(emoji_name)\n if realm_emoji is not None:\n return str(realm_emojis[emoji_name]['id']), Reaction.REALM_EMOJI\n if emoji_name == 'zulip':\n return emoji_name, Reaction.ZULIP_EXTRA_EMOJI\n if emoji_name in name_to_codepoint:\n return name_to_codepoint[emoji_name], Reaction.UNICODE_EMOJI\n raise JsonableError(_(\"Emoji '%s' does not exist\" % (emoji_name,)))\n\ndef check_valid_emoji(realm: Realm, emoji_name: str) -> None:\n emoji_name_to_emoji_code(realm, emoji_name)\n\ndef check_emoji_request(realm: Realm, emoji_name: str, emoji_code: str,\n emoji_type: str) -> None:\n # For a given realm and emoji type, checks whether an emoji\n # code is valid for new reactions, or not.\n if emoji_type == \"realm_emoji\":\n realm_emojis = realm.get_emoji()\n realm_emoji = realm_emojis.get(emoji_code)\n if realm_emoji is None:\n raise JsonableError(_(\"Invalid custom emoji.\"))\n if realm_emoji[\"name\"] != emoji_name:\n raise JsonableError(_(\"Invalid custom emoji name.\"))\n if realm_emoji[\"deactivated\"]:\n raise JsonableError(_(\"This custom emoji has been deactivated.\"))\n elif emoji_type == \"zulip_extra_emoji\":\n if emoji_code not in [\"zulip\"]:\n raise JsonableError(_(\"Invalid emoji code.\"))\n if emoji_name != emoji_code:\n raise JsonableError(_(\"Invalid emoji name.\"))\n elif emoji_type == \"unicode_emoji\":\n if emoji_code not in codepoint_to_name:\n raise JsonableError(_(\"Invalid emoji code.\"))\n if name_to_codepoint.get(emoji_name) != emoji_code:\n raise JsonableError(_(\"Invalid emoji name.\"))\n else:\n # The above are the only valid emoji types\n raise JsonableError(_(\"Invalid emoji type.\"))\n\ndef check_emoji_admin(user_profile: UserProfile, emoji_name: Optional[str]=None) -> None:\n \"\"\"Raises an exception if the user cannot administer the target realm\n emoji name in their organization.\"\"\"\n\n # Realm administrators can always administer emoji\n if user_profile.is_realm_admin:\n return\n if user_profile.realm.add_emoji_by_admins_only:\n raise JsonableError(_(\"Must be an organization administrator\"))\n\n # Otherwise, normal users can add emoji\n if emoji_name is None:\n return\n\n # Additionally, normal users can remove emoji they themselves added\n emoji = RealmEmoji.objects.filter(realm=user_profile.realm,\n name=emoji_name,\n deactivated=False).first()\n current_user_is_author = (emoji is not None and\n emoji.author is not None and\n emoji.author.id == user_profile.id)\n if not user_profile.is_realm_admin and not current_user_is_author:\n raise JsonableError(_(\"Must be an organization administrator or emoji author\"))\n\ndef check_valid_emoji_name(emoji_name: str) -> None:\n if re.match(r'^[0-9a-z.\\-_]+(? str:\n return upload_backend.get_emoji_url(emoji_file_name, realm_id)\n\n\ndef get_emoji_file_name(emoji_file_name: str, emoji_id: int) -> str:\n _, image_ext = os.path.splitext(emoji_file_name)\n return ''.join((str(emoji_id), image_ext))\n"},"type_annotations":{"kind":"list like","value":["str","Realm","str","Realm","str","Realm","str","str","str","UserProfile","str","str","int","str","int"],"string":"[\n \"str\",\n \"Realm\",\n \"str\",\n \"Realm\",\n \"str\",\n \"Realm\",\n \"str\",\n \"str\",\n \"str\",\n \"UserProfile\",\n \"str\",\n \"str\",\n \"int\",\n \"str\",\n \"int\"\n]"},"type_annotation_starts":{"kind":"list like","value":[1424,1655,1674,2190,2209,2303,2322,2339,2380,3606,4726,4914,4929,5052,5067],"string":"[\n 1424,\n 1655,\n 1674,\n 2190,\n 2209,\n 2303,\n 2322,\n 2339,\n 2380,\n 3606,\n 4726,\n 4914,\n 4929,\n 5052,\n 5067\n]"},"type_annotation_ends":{"kind":"list like","value":[1427,1660,1677,2195,2212,2308,2325,2342,2383,3617,4729,4917,4932,5055,5070],"string":"[\n 1427,\n 1660,\n 1677,\n 2195,\n 2212,\n 2308,\n 2325,\n 2342,\n 2383,\n 3617,\n 4729,\n 4917,\n 4932,\n 5055,\n 5070\n]"}}},{"rowIdx":1374,"cells":{"zip":{"kind":"string","value":"archives/18-2-SKKU-OSS_2018-2-OSS-L5.zip"},"filename":{"kind":"string","value":"zerver/lib/error_notify.py"},"contents":{"kind":"string","value":"# System documented in https://zulip.readthedocs.io/en/latest/subsystems/logging.html\n\nimport logging\n\nfrom collections import defaultdict\nfrom django.conf import settings\nfrom django.core.mail import mail_admins\nfrom django.http import HttpResponse\nfrom django.utils.translation import ugettext as _\nfrom typing import cast, Any, Dict, Optional\n\nfrom zerver.filters import clean_data_from_query_parameters\nfrom zerver.models import get_system_bot\nfrom zerver.lib.actions import internal_send_message\nfrom zerver.lib.response import json_success, json_error\n\ndef format_subject(subject: str) -> str:\n \"\"\"\n Escape CR and LF characters.\n \"\"\"\n return subject.replace('\\n', '\\\\n').replace('\\r', '\\\\r')\n\ndef logger_repr(report: Dict[str, Any]) -> str:\n return (\"Logger %(logger_name)s, from module %(log_module)s line %(log_lineno)d:\"\n % report)\n\ndef user_info_str(report: Dict[str, Any]) -> str:\n if report['user_full_name'] and report['user_email']:\n user_info = \"%(user_full_name)s (%(user_email)s)\" % (report)\n else:\n user_info = \"Anonymous user (not logged in)\"\n\n user_info += \" on %s deployment\" % (report['deployment'],)\n return user_info\n\ndef deployment_repr(report: Dict[str, Any]) -> str:\n deployment = 'Deployed code:\\n'\n for (label, field) in [('git', 'git_described'),\n ('ZULIP_VERSION', 'zulip_version_const'),\n ('version', 'zulip_version_file'),\n ]:\n if report[field] is not None:\n deployment += '- %s: %s\\n' % (label, report[field])\n return deployment\n\ndef notify_browser_error(report: Dict[str, Any]) -> None:\n report = defaultdict(lambda: None, report)\n if settings.ERROR_BOT:\n zulip_browser_error(report)\n email_browser_error(report)\n\ndef email_browser_error(report: Dict[str, Any]) -> None:\n subject = \"Browser error for %s\" % (user_info_str(report))\n\n body = (\"User: %(user_full_name)s <%(user_email)s> on %(deployment)s\\n\\n\"\n \"Message:\\n%(message)s\\n\\nStacktrace:\\n%(stacktrace)s\\n\\n\"\n \"IP address: %(ip_address)s\\n\"\n \"User agent: %(user_agent)s\\n\"\n \"href: %(href)s\\n\"\n \"Server path: %(server_path)s\\n\"\n \"Deployed version: %(version)s\\n\"\n % (report))\n\n more_info = report['more_info']\n if more_info is not None:\n body += \"\\nAdditional information:\"\n for (key, value) in more_info.items():\n body += \"\\n %s: %s\" % (key, value)\n\n body += \"\\n\\nLog:\\n%s\" % (report['log'],)\n\n mail_admins(subject, body)\n\ndef zulip_browser_error(report: Dict[str, Any]) -> None:\n subject = \"JS error: %s\" % (report['user_email'],)\n\n user_info = user_info_str(report)\n\n body = \"User: %s\\n\" % (user_info,)\n body += (\"Message: %(message)s\\n\"\n % (report))\n\n realm = get_system_bot(settings.ERROR_BOT).realm\n internal_send_message(realm, settings.ERROR_BOT,\n \"stream\", \"errors\", format_subject(subject), body)\n\ndef notify_server_error(report: Dict[str, Any], skip_error_zulip: Optional[bool]=False) -> None:\n report = defaultdict(lambda: None, report)\n email_server_error(report)\n if settings.ERROR_BOT and not skip_error_zulip:\n zulip_server_error(report)\n\ndef zulip_server_error(report: Dict[str, Any]) -> None:\n subject = '%(node)s: %(message)s' % (report)\n\n logger_str = logger_repr(report)\n user_info = user_info_str(report)\n deployment = deployment_repr(report)\n\n if report['has_request']:\n request_repr = (\n \"Request info:\\n~~~~\\n\"\n \"- path: %(path)s\\n\"\n \"- %(method)s: %(data)s\\n\") % (report)\n for field in [\"REMOTE_ADDR\", \"QUERY_STRING\", \"SERVER_NAME\"]:\n val = report.get(field.lower())\n if field == \"QUERY_STRING\":\n val = clean_data_from_query_parameters(str(val))\n request_repr += \"- %s: \\\"%s\\\"\\n\" % (field, val)\n request_repr += \"~~~~\"\n else:\n request_repr = \"Request info: none\"\n\n message = (\"%s\\nError generated by %s\\n\\n~~~~ pytb\\n%s\\n\\n~~~~\\n%s\\n%s\"\n % (logger_str, user_info, report['stack_trace'], deployment, request_repr))\n\n realm = get_system_bot(settings.ERROR_BOT).realm\n internal_send_message(realm, settings.ERROR_BOT, \"stream\", \"errors\",\n format_subject(subject), message)\n\ndef email_server_error(report: Dict[str, Any]) -> None:\n subject = '%(node)s: %(message)s' % (report)\n\n logger_str = logger_repr(report)\n user_info = user_info_str(report)\n deployment = deployment_repr(report)\n\n if report['has_request']:\n request_repr = (\n \"Request info:\\n\"\n \"- path: %(path)s\\n\"\n \"- %(method)s: %(data)s\\n\") % (report)\n for field in [\"REMOTE_ADDR\", \"QUERY_STRING\", \"SERVER_NAME\"]:\n val = report.get(field.lower())\n if field == \"QUERY_STRING\":\n val = clean_data_from_query_parameters(str(val))\n request_repr += \"- %s: \\\"%s\\\"\\n\" % (field, val)\n else:\n request_repr = \"Request info: none\\n\"\n\n message = (\"%s\\nError generated by %s\\n\\n%s\\n\\n%s\\n\\n%s\"\n % (logger_str, user_info, report['stack_trace'], deployment, request_repr))\n\n mail_admins(format_subject(subject), message, fail_silently=True)\n\ndef do_report_error(deployment_name: str, type: str, report: Dict[str, Any]) -> HttpResponse:\n report['deployment'] = deployment_name\n if type == 'browser':\n notify_browser_error(report)\n elif type == 'server':\n notify_server_error(report)\n else:\n return json_error(_(\"Invalid type parameter\"))\n return json_success()\n"},"type_annotations":{"kind":"list like","value":["str","Dict[str, Any]","Dict[str, Any]","Dict[str, Any]","Dict[str, Any]","Dict[str, Any]","Dict[str, Any]","Dict[str, Any]","Dict[str, Any]","Dict[str, Any]","str","str","Dict[str, Any]"],"string":"[\n \"str\",\n \"Dict[str, Any]\",\n \"Dict[str, Any]\",\n \"Dict[str, Any]\",\n \"Dict[str, Any]\",\n \"Dict[str, Any]\",\n \"Dict[str, Any]\",\n \"Dict[str, Any]\",\n \"Dict[str, Any]\",\n \"Dict[str, Any]\",\n \"str\",\n \"str\",\n \"Dict[str, Any]\"\n]"},"type_annotation_starts":{"kind":"list like","value":[587,735,894,1223,1655,1855,2643,3082,3344,4461,5418,5429,5442],"string":"[\n 587,\n 735,\n 894,\n 1223,\n 1655,\n 1855,\n 2643,\n 3082,\n 3344,\n 4461,\n 5418,\n 5429,\n 5442\n]"},"type_annotation_ends":{"kind":"list like","value":[590,749,908,1237,1669,1869,2657,3096,3358,4475,5421,5432,5456],"string":"[\n 590,\n 749,\n 908,\n 1237,\n 1669,\n 1869,\n 2657,\n 3096,\n 3358,\n 4475,\n 5421,\n 5432,\n 5456\n]"}}},{"rowIdx":1375,"cells":{"zip":{"kind":"string","value":"archives/18-2-SKKU-OSS_2018-2-OSS-L5.zip"},"filename":{"kind":"string","value":"zerver/lib/events.py"},"contents":{"kind":"string","value":"# See https://zulip.readthedocs.io/en/latest/subsystems/events-system.html for\n# high-level documentation on how this system works.\n\nimport copy\nimport ujson\n\nfrom collections import defaultdict\nfrom django.utils.translation import ugettext as _\nfrom django.conf import settings\nfrom importlib import import_module\nfrom typing import (\n cast, Any, Callable, Dict, Iterable, List, Optional, Sequence, Set, Tuple, Union\n)\n\nsession_engine = import_module(settings.SESSION_ENGINE)\n\nfrom zerver.lib.alert_words import user_alert_words\nfrom zerver.lib.attachments import user_attachments\nfrom zerver.lib.avatar import avatar_url, get_avatar_field\nfrom zerver.lib.bot_config import load_bot_config_template\nfrom zerver.lib.hotspots import get_next_hotspots\nfrom zerver.lib.integrations import EMBEDDED_BOTS\nfrom zerver.lib.message import (\n aggregate_unread_data,\n apply_unread_message_event,\n get_raw_unread_data,\n get_starred_message_ids,\n)\nfrom zerver.lib.narrow import check_supported_events_narrow_filter\nfrom zerver.lib.push_notifications import push_notifications_enabled\nfrom zerver.lib.soft_deactivation import maybe_catch_up_soft_deactivated_user\nfrom zerver.lib.realm_icon import realm_icon_url\nfrom zerver.lib.request import JsonableError\nfrom zerver.lib.topic import TOPIC_NAME\nfrom zerver.lib.topic_mutes import get_topic_mutes\nfrom zerver.lib.actions import (\n validate_user_access_to_subscribers_helper,\n do_get_streams, get_default_streams_for_realm,\n gather_subscriptions_helper, get_cross_realm_dicts,\n get_status_dict, streams_to_dicts_sorted,\n default_stream_groups_to_dicts_sorted,\n get_owned_bot_dicts,\n)\nfrom zerver.lib.user_groups import user_groups_in_realm_serialized\nfrom zerver.tornado.event_queue import request_event_queue, get_user_events\nfrom zerver.models import Client, Message, Realm, UserPresence, UserProfile, CustomProfileFieldValue, \\\n get_user_profile_by_id, \\\n get_realm_user_dicts, realm_filters_for_realm, get_user,\\\n custom_profile_fields_for_realm, get_realm_domains, \\\n get_default_stream_groups, CustomProfileField, Stream\nfrom zproject.backends import email_auth_enabled, password_auth_enabled\nfrom version import ZULIP_VERSION\n\n\ndef get_raw_user_data(realm_id: int, client_gravatar: bool) -> Dict[int, Dict[str, str]]:\n user_dicts = get_realm_user_dicts(realm_id)\n\n # TODO: Consider optimizing this query away with caching.\n custom_profile_field_values = CustomProfileFieldValue.objects.filter(user_profile__realm_id=realm_id)\n profiles_by_user_id = defaultdict(dict) # type: Dict[int, Dict[str, Any]]\n for profile_field in custom_profile_field_values:\n user_id = profile_field.user_profile_id\n profiles_by_user_id[user_id][profile_field.field_id] = profile_field.value\n\n def user_data(row: Dict[str, Any]) -> Dict[str, Any]:\n avatar_url = get_avatar_field(\n user_id=row['id'],\n realm_id= realm_id,\n email=row['email'],\n avatar_source=row['avatar_source'],\n avatar_version=row['avatar_version'],\n medium=False,\n client_gravatar=client_gravatar,\n )\n\n is_admin = row['is_realm_admin']\n is_guest = row['is_guest']\n is_bot = row['is_bot']\n # This format should align with get_cross_realm_dicts() and notify_created_user\n result = dict(\n email=row['email'],\n user_id=row['id'],\n avatar_url=avatar_url,\n is_admin=is_admin,\n is_guest=is_guest,\n is_bot=is_bot,\n full_name=row['full_name'],\n timezone=row['timezone'],\n is_active = row['is_active'],\n date_joined = row['date_joined'].isoformat(),\n )\n if not is_bot:\n result['profile_data'] = profiles_by_user_id.get(row['id'], {})\n return result\n\n return {\n row['id']: user_data(row)\n for row in user_dicts\n }\n\ndef always_want(msg_type: str) -> bool:\n '''\n This function is used as a helper in\n fetch_initial_state_data, when the user passes\n in None for event_types, and we want to fetch\n info for every event type. Defining this at module\n level makes it easier to mock.\n '''\n return True\n\n# Fetch initial data. When event_types is not specified, clients want\n# all event types. Whenever you add new code to this function, you\n# should also add corresponding events for changes in the data\n# structures and new code to apply_events (and add a test in EventsRegisterTest).\ndef fetch_initial_state_data(user_profile: UserProfile,\n event_types: Optional[Iterable[str]],\n queue_id: str, client_gravatar: bool,\n include_subscribers: bool = True) -> Dict[str, Any]:\n state = {'queue_id': queue_id} # type: Dict[str, Any]\n realm = user_profile.realm\n\n if event_types is None:\n # return True always\n want = always_want # type: Callable[[str], bool]\n else:\n want = set(event_types).__contains__\n\n if want('alert_words'):\n state['alert_words'] = user_alert_words(user_profile)\n\n if want('custom_profile_fields'):\n fields = custom_profile_fields_for_realm(realm.id)\n state['custom_profile_fields'] = [f.as_dict() for f in fields]\n state['custom_profile_field_types'] = CustomProfileField.FIELD_TYPE_CHOICES_DICT\n\n if want('hotspots'):\n state['hotspots'] = get_next_hotspots(user_profile)\n\n if want('message'):\n # The client should use get_messages() to fetch messages\n # starting with the max_message_id. They will get messages\n # newer than that ID via get_events()\n messages = Message.objects.filter(usermessage__user_profile=user_profile).order_by('-id')[:1]\n if messages:\n state['max_message_id'] = messages[0].id\n else:\n state['max_message_id'] = -1\n\n if want('muted_topics'):\n state['muted_topics'] = get_topic_mutes(user_profile)\n\n if want('pointer'):\n state['pointer'] = user_profile.pointer\n\n if want('presence'):\n state['presences'] = get_status_dict(user_profile)\n\n if want('realm'):\n for property_name in Realm.property_types:\n state['realm_' + property_name] = getattr(realm, property_name)\n\n # Most state is handled via the property_types framework;\n # these manual entries are for those realm settings that don't\n # fit into that framework.\n state['realm_authentication_methods'] = realm.authentication_methods_dict()\n state['realm_allow_message_editing'] = realm.allow_message_editing\n state['realm_allow_community_topic_editing'] = realm.allow_community_topic_editing\n state['realm_allow_message_deleting'] = realm.allow_message_deleting\n state['realm_message_content_edit_limit_seconds'] = realm.message_content_edit_limit_seconds\n state['realm_message_content_delete_limit_seconds'] = realm.message_content_delete_limit_seconds\n state['realm_icon_url'] = realm_icon_url(realm)\n state['realm_icon_source'] = realm.icon_source\n state['max_icon_file_size'] = settings.MAX_ICON_FILE_SIZE\n state['realm_bot_domain'] = realm.get_bot_domain()\n state['realm_uri'] = realm.uri\n state['realm_available_video_chat_providers'] = realm.VIDEO_CHAT_PROVIDERS\n state['realm_presence_disabled'] = realm.presence_disabled\n state['realm_digest_emails_enabled'] = realm.digest_emails_enabled and settings.SEND_DIGEST_EMAILS\n state['realm_is_zephyr_mirror_realm'] = realm.is_zephyr_mirror_realm\n state['realm_email_auth_enabled'] = email_auth_enabled(realm)\n state['realm_password_auth_enabled'] = password_auth_enabled(realm)\n state['realm_push_notifications_enabled'] = push_notifications_enabled()\n if realm.notifications_stream and not realm.notifications_stream.deactivated:\n notifications_stream = realm.notifications_stream\n state['realm_notifications_stream_id'] = notifications_stream.id\n else:\n state['realm_notifications_stream_id'] = -1\n\n signup_notifications_stream = realm.get_signup_notifications_stream()\n if signup_notifications_stream:\n state['realm_signup_notifications_stream_id'] = signup_notifications_stream.id\n else:\n state['realm_signup_notifications_stream_id'] = -1\n\n if want('realm_domains'):\n state['realm_domains'] = get_realm_domains(realm)\n\n if want('realm_emoji'):\n state['realm_emoji'] = realm.get_emoji()\n\n if want('realm_filters'):\n state['realm_filters'] = realm_filters_for_realm(realm.id)\n\n if want('realm_user_groups'):\n state['realm_user_groups'] = user_groups_in_realm_serialized(realm)\n\n if want('realm_user'):\n state['raw_users'] = get_raw_user_data(\n realm_id=realm.id,\n client_gravatar=client_gravatar,\n )\n\n # For the user's own avatar URL, we force\n # client_gravatar=False, since that saves some unnecessary\n # client-side code for handing medium-size avatars. See #8253\n # for details.\n state['avatar_source'] = user_profile.avatar_source\n state['avatar_url_medium'] = avatar_url(\n user_profile,\n medium=True,\n client_gravatar=False,\n )\n state['avatar_url'] = avatar_url(\n user_profile,\n medium=False,\n client_gravatar=False,\n )\n\n state['can_create_streams'] = user_profile.can_create_streams()\n state['can_subscribe_other_users'] = user_profile.can_subscribe_other_users()\n state['cross_realm_bots'] = list(get_cross_realm_dicts())\n state['is_admin'] = user_profile.is_realm_admin\n state['is_guest'] = user_profile.is_guest\n state['user_id'] = user_profile.id\n state['enter_sends'] = user_profile.enter_sends\n state['email'] = user_profile.email\n state['delivery_email'] = user_profile.delivery_email\n state['full_name'] = user_profile.full_name\n\n if want('realm_bot'):\n state['realm_bots'] = get_owned_bot_dicts(user_profile)\n\n # This does not yet have an apply_event counterpart, since currently,\n # new entries for EMBEDDED_BOTS can only be added directly in the codebase.\n if want('realm_embedded_bots'):\n realm_embedded_bots = []\n for bot in EMBEDDED_BOTS:\n realm_embedded_bots.append({'name': bot.name,\n 'config': load_bot_config_template(bot.name)})\n state['realm_embedded_bots'] = realm_embedded_bots\n\n if want('subscription'):\n subscriptions, unsubscribed, never_subscribed = gather_subscriptions_helper(\n user_profile, include_subscribers=include_subscribers)\n state['subscriptions'] = subscriptions\n state['unsubscribed'] = unsubscribed\n state['never_subscribed'] = never_subscribed\n\n if want('update_message_flags') and want('message'):\n # Keeping unread_msgs updated requires both message flag updates and\n # message updates. This is due to the fact that new messages will not\n # generate a flag update so we need to use the flags field in the\n # message event.\n state['raw_unread_msgs'] = get_raw_unread_data(user_profile)\n\n if want('starred_messages'):\n state['starred_messages'] = get_starred_message_ids(user_profile)\n\n if want('stream'):\n state['streams'] = do_get_streams(user_profile)\n state['stream_name_max_length'] = Stream.MAX_NAME_LENGTH\n state['stream_description_max_length'] = Stream.MAX_DESCRIPTION_LENGTH\n if want('default_streams'):\n state['realm_default_streams'] = streams_to_dicts_sorted(\n get_default_streams_for_realm(realm.id))\n if want('default_stream_groups'):\n state['realm_default_stream_groups'] = default_stream_groups_to_dicts_sorted(\n get_default_stream_groups(realm))\n\n if want('update_display_settings'):\n for prop in UserProfile.property_types:\n state[prop] = getattr(user_profile, prop)\n state['emojiset_choices'] = user_profile.emojiset_choices()\n\n if want('update_global_notifications'):\n for notification in UserProfile.notification_setting_types:\n state[notification] = getattr(user_profile, notification)\n\n if want('zulip_version'):\n state['zulip_version'] = ZULIP_VERSION\n\n return state\n\n\ndef remove_message_id_from_unread_mgs(state: Dict[str, Dict[str, Any]],\n message_id: int) -> None:\n raw_unread = state['raw_unread_msgs']\n\n for key in ['pm_dict', 'stream_dict', 'huddle_dict']:\n raw_unread[key].pop(message_id, None)\n\n raw_unread['unmuted_stream_msgs'].discard(message_id)\n raw_unread['mentions'].discard(message_id)\n\ndef apply_events(state: Dict[str, Any], events: Iterable[Dict[str, Any]],\n user_profile: UserProfile, client_gravatar: bool,\n include_subscribers: bool = True,\n fetch_event_types: Optional[Iterable[str]] = None) -> None:\n for event in events:\n if fetch_event_types is not None and event['type'] not in fetch_event_types:\n # TODO: continuing here is not, most precisely, correct.\n # In theory, an event of one type, e.g. `realm_user`,\n # could modify state that doesn't come from that\n # `fetch_event_types` value, e.g. the `our_person` part of\n # that code path. But it should be extremely rare, and\n # fixing that will require a nontrivial refactor of\n # `apply_event`. For now, be careful in your choice of\n # `fetch_event_types`.\n continue\n apply_event(state, event, user_profile, client_gravatar, include_subscribers)\n\ndef apply_event(state: Dict[str, Any],\n event: Dict[str, Any],\n user_profile: UserProfile,\n client_gravatar: bool,\n include_subscribers: bool) -> None:\n if event['type'] == \"message\":\n state['max_message_id'] = max(state['max_message_id'], event['message']['id'])\n if 'raw_unread_msgs' in state:\n apply_unread_message_event(\n user_profile,\n state['raw_unread_msgs'],\n event['message'],\n event['flags'],\n )\n\n elif event['type'] == \"hotspots\":\n state['hotspots'] = event['hotspots']\n elif event['type'] == \"custom_profile_fields\":\n state['custom_profile_fields'] = event['fields']\n elif event['type'] == \"pointer\":\n state['pointer'] = max(state['pointer'], event['pointer'])\n elif event['type'] == \"realm_user\":\n person = event['person']\n person_user_id = person['user_id']\n\n if event['op'] == \"add\":\n person = copy.deepcopy(person)\n if client_gravatar:\n if 'gravatar.com' in person['avatar_url']:\n person['avatar_url'] = None\n person['is_active'] = True\n if not person['is_bot']:\n person['profile_data'] = {}\n state['raw_users'][person_user_id] = person\n elif event['op'] == \"remove\":\n state['raw_users'][person_user_id]['is_active'] = False\n elif event['op'] == 'update':\n is_me = (person_user_id == user_profile.id)\n\n if is_me:\n if ('avatar_url' in person and 'avatar_url' in state):\n state['avatar_source'] = person['avatar_source']\n state['avatar_url'] = person['avatar_url']\n state['avatar_url_medium'] = person['avatar_url_medium']\n\n for field in ['is_admin', 'email', 'full_name']:\n if field in person and field in state:\n state[field] = person[field]\n\n # In the unlikely event that the current user\n # just changed to/from being an admin, we need\n # to add/remove the data on all bots in the\n # realm. This is ugly and probably better\n # solved by removing the all-realm-bots data\n # given to admin users from this flow.\n if ('is_admin' in person and 'realm_bots' in state):\n prev_state = state['raw_users'][user_profile.id]\n was_admin = prev_state['is_admin']\n now_admin = person['is_admin']\n\n if was_admin and not now_admin:\n state['realm_bots'] = []\n if not was_admin and now_admin:\n state['realm_bots'] = get_owned_bot_dicts(user_profile)\n\n if client_gravatar and 'avatar_url' in person:\n # Respect the client_gravatar setting in the `users` data.\n if 'gravatar.com' in person['avatar_url']:\n person['avatar_url'] = None\n person['avatar_url_medium'] = None\n\n if person_user_id in state['raw_users']:\n p = state['raw_users'][person_user_id]\n for field in p:\n if field in person:\n p[field] = person[field]\n if 'custom_profile_field' in person:\n custom_field_id = person['custom_profile_field']['id']\n custom_field_new_value = person['custom_profile_field']['value']\n p['profile_data'][custom_field_id] = custom_field_new_value\n\n elif event['type'] == 'realm_bot':\n if event['op'] == 'add':\n state['realm_bots'].append(event['bot'])\n\n if event['op'] == 'remove':\n email = event['bot']['email']\n for bot in state['realm_bots']:\n if bot['email'] == email:\n bot['is_active'] = False\n\n if event['op'] == 'delete':\n state['realm_bots'] = [item for item\n in state['realm_bots'] if item['email'] != event['bot']['email']]\n\n if event['op'] == 'update':\n for bot in state['realm_bots']:\n if bot['email'] == event['bot']['email']:\n if 'owner_id' in event['bot']:\n bot['owner'] = get_user_profile_by_id(event['bot']['owner_id']).email\n else:\n bot.update(event['bot'])\n\n elif event['type'] == 'stream':\n if event['op'] == 'create':\n for stream in event['streams']:\n if not stream['invite_only']:\n stream_data = copy.deepcopy(stream)\n if include_subscribers:\n stream_data['subscribers'] = []\n stream_data['stream_weekly_traffic'] = None\n stream_data['is_old_stream'] = False\n stream_data['is_announcement_only'] = False\n # Add stream to never_subscribed (if not invite_only)\n state['never_subscribed'].append(stream_data)\n state['streams'].append(stream)\n state['streams'].sort(key=lambda elt: elt[\"name\"])\n\n if event['op'] == 'delete':\n deleted_stream_ids = {stream['stream_id'] for stream in event['streams']}\n state['streams'] = [s for s in state['streams'] if s['stream_id'] not in deleted_stream_ids]\n state['never_subscribed'] = [stream for stream in state['never_subscribed'] if\n stream['stream_id'] not in deleted_stream_ids]\n\n if event['op'] == 'update':\n # For legacy reasons, we call stream data 'subscriptions' in\n # the state var here, for the benefit of the JS code.\n for obj in state['subscriptions']:\n if obj['name'].lower() == event['name'].lower():\n obj[event['property']] = event['value']\n # Also update the pure streams data\n for stream in state['streams']:\n if stream['name'].lower() == event['name'].lower():\n prop = event['property']\n if prop in stream:\n stream[prop] = event['value']\n elif event['op'] == \"occupy\":\n state['streams'] += event['streams']\n elif event['op'] == \"vacate\":\n stream_ids = [s[\"stream_id\"] for s in event['streams']]\n state['streams'] = [s for s in state['streams'] if s[\"stream_id\"] not in stream_ids]\n elif event['type'] == 'default_streams':\n state['realm_default_streams'] = event['default_streams']\n elif event['type'] == 'default_stream_groups':\n state['realm_default_stream_groups'] = event['default_stream_groups']\n elif event['type'] == 'realm':\n if event['op'] == \"update\":\n field = 'realm_' + event['property']\n state[field] = event['value']\n\n # Tricky interaction: Whether we can create streams can get changed here.\n if (field in ['realm_create_stream_by_admins_only',\n 'realm_waiting_period_threshold']) and 'can_create_streams' in state:\n state['can_create_streams'] = user_profile.can_create_streams()\n state['can_subscribe_other_users'] = user_profile.can_subscribe_other_users()\n elif event['op'] == \"update_dict\":\n for key, value in event['data'].items():\n state['realm_' + key] = value\n # It's a bit messy, but this is where we need to\n # update the state for whether password authentication\n # is enabled on this server.\n if key == 'authentication_methods':\n state['realm_password_auth_enabled'] = (value['Email'] or value['LDAP'])\n state['realm_email_auth_enabled'] = value['Email']\n elif event['type'] == \"subscription\":\n if not include_subscribers and event['op'] in ['peer_add', 'peer_remove']:\n return\n\n if event['op'] in [\"add\"]:\n if not include_subscribers:\n # Avoid letting 'subscribers' entries end up in the list\n for i, sub in enumerate(event['subscriptions']):\n event['subscriptions'][i] = copy.deepcopy(event['subscriptions'][i])\n del event['subscriptions'][i]['subscribers']\n\n def name(sub: Dict[str, Any]) -> str:\n return sub['name'].lower()\n\n if event['op'] == \"add\":\n added_names = set(map(name, event[\"subscriptions\"]))\n was_added = lambda s: name(s) in added_names\n\n # add the new subscriptions\n state['subscriptions'] += event['subscriptions']\n\n # remove them from unsubscribed if they had been there\n state['unsubscribed'] = [s for s in state['unsubscribed'] if not was_added(s)]\n\n # remove them from never_subscribed if they had been there\n state['never_subscribed'] = [s for s in state['never_subscribed'] if not was_added(s)]\n\n elif event['op'] == \"remove\":\n removed_names = set(map(name, event[\"subscriptions\"]))\n was_removed = lambda s: name(s) in removed_names\n\n # Find the subs we are affecting.\n removed_subs = list(filter(was_removed, state['subscriptions']))\n\n # Remove our user from the subscribers of the removed subscriptions.\n if include_subscribers:\n for sub in removed_subs:\n sub['subscribers'] = [id for id in sub['subscribers'] if id != user_profile.id]\n\n # We must effectively copy the removed subscriptions from subscriptions to\n # unsubscribe, since we only have the name in our data structure.\n state['unsubscribed'] += removed_subs\n\n # Now filter out the removed subscriptions from subscriptions.\n state['subscriptions'] = [s for s in state['subscriptions'] if not was_removed(s)]\n\n elif event['op'] == 'update':\n for sub in state['subscriptions']:\n if sub['name'].lower() == event['name'].lower():\n sub[event['property']] = event['value']\n elif event['op'] == 'peer_add':\n user_id = event['user_id']\n for sub in state['subscriptions']:\n if (sub['name'] in event['subscriptions'] and\n user_id not in sub['subscribers']):\n sub['subscribers'].append(user_id)\n for sub in state['never_subscribed']:\n if (sub['name'] in event['subscriptions'] and\n user_id not in sub['subscribers']):\n sub['subscribers'].append(user_id)\n elif event['op'] == 'peer_remove':\n user_id = event['user_id']\n for sub in state['subscriptions']:\n if (sub['name'] in event['subscriptions'] and\n user_id in sub['subscribers']):\n sub['subscribers'].remove(user_id)\n elif event['type'] == \"presence\":\n # TODO: Add user_id to presence update events / state format!\n presence_user_profile = get_user(event['email'], user_profile.realm)\n state['presences'][event['email']] = UserPresence.get_status_dict_by_user(\n presence_user_profile)[event['email']]\n elif event['type'] == \"update_message\":\n # We don't return messages in /register, so we don't need to\n # do anything for content updates, but we may need to update\n # the unread_msgs data if the topic of an unread message changed.\n if TOPIC_NAME in event:\n stream_dict = state['raw_unread_msgs']['stream_dict']\n topic = event[TOPIC_NAME]\n for message_id in event['message_ids']:\n if message_id in stream_dict:\n stream_dict[message_id]['topic'] = topic\n elif event['type'] == \"delete_message\":\n max_message = Message.objects.filter(\n usermessage__user_profile=user_profile).order_by('-id').first()\n if max_message:\n state['max_message_id'] = max_message.id\n else:\n state['max_message_id'] = -1\n\n remove_id = event['message_id']\n remove_message_id_from_unread_mgs(state, remove_id)\n elif event['type'] == \"reaction\":\n # The client will get the message with the reactions directly\n pass\n elif event['type'] == \"submessage\":\n # The client will get submessages with their messages\n pass\n elif event['type'] == 'typing':\n # Typing notification events are transient and thus ignored\n pass\n elif event['type'] == \"attachment\":\n # Attachment events are just for updating the \"uploads\" UI;\n # they are not sent directly.\n pass\n elif event['type'] == \"update_message_flags\":\n # We don't return messages in `/register`, so most flags we\n # can ignore, but we do need to update the unread_msgs data if\n # unread state is changed.\n if event['flag'] == 'read' and event['operation'] == 'add':\n for remove_id in event['messages']:\n remove_message_id_from_unread_mgs(state, remove_id)\n if event['flag'] == 'starred' and event['operation'] == 'add':\n state['starred_messages'] += event['messages']\n if event['flag'] == 'starred' and event['operation'] == 'remove':\n state['starred_messages'] = [message for message in state['starred_messages']\n if not (message in event['messages'])]\n elif event['type'] == \"realm_domains\":\n if event['op'] == 'add':\n state['realm_domains'].append(event['realm_domain'])\n elif event['op'] == 'change':\n for realm_domain in state['realm_domains']:\n if realm_domain['domain'] == event['realm_domain']['domain']:\n realm_domain['allow_subdomains'] = event['realm_domain']['allow_subdomains']\n elif event['op'] == 'remove':\n state['realm_domains'] = [realm_domain for realm_domain in state['realm_domains']\n if realm_domain['domain'] != event['domain']]\n elif event['type'] == \"realm_emoji\":\n state['realm_emoji'] = event['realm_emoji']\n elif event['type'] == \"alert_words\":\n state['alert_words'] = event['alert_words']\n elif event['type'] == \"muted_topics\":\n state['muted_topics'] = event[\"muted_topics\"]\n elif event['type'] == \"realm_filters\":\n state['realm_filters'] = event[\"realm_filters\"]\n elif event['type'] == \"update_display_settings\":\n assert event['setting_name'] in UserProfile.property_types\n state[event['setting_name']] = event['setting']\n elif event['type'] == \"update_global_notifications\":\n assert event['notification_name'] in UserProfile.notification_setting_types\n state[event['notification_name']] = event['setting']\n elif event['type'] == \"invites_changed\":\n pass\n elif event['type'] == \"user_group\":\n if event['op'] == 'add':\n state['realm_user_groups'].append(event['group'])\n state['realm_user_groups'].sort(key=lambda group: group['id'])\n elif event['op'] == 'update':\n for user_group in state['realm_user_groups']:\n if user_group['id'] == event['group_id']:\n user_group.update(event['data'])\n elif event['op'] == 'add_members':\n for user_group in state['realm_user_groups']:\n if user_group['id'] == event['group_id']:\n user_group['members'].extend(event['user_ids'])\n user_group['members'].sort()\n elif event['op'] == 'remove_members':\n for user_group in state['realm_user_groups']:\n if user_group['id'] == event['group_id']:\n members = set(user_group['members'])\n user_group['members'] = list(members - set(event['user_ids']))\n user_group['members'].sort()\n elif event['op'] == 'remove':\n state['realm_user_groups'] = [ug for ug in state['realm_user_groups']\n if ug['id'] != event['group_id']]\n else:\n raise AssertionError(\"Unexpected event type %s\" % (event['type'],))\n\ndef do_events_register(user_profile: UserProfile, user_client: Client,\n apply_markdown: bool = True,\n client_gravatar: bool = False,\n event_types: Optional[Iterable[str]] = None,\n queue_lifespan_secs: int = 0,\n all_public_streams: bool = False,\n include_subscribers: bool = True,\n narrow: Iterable[Sequence[str]] = [],\n fetch_event_types: Optional[Iterable[str]] = None) -> Dict[str, Any]:\n # Technically we don't need to check this here because\n # build_narrow_filter will check it, but it's nicer from an error\n # handling perspective to do it before contacting Tornado\n check_supported_events_narrow_filter(narrow)\n\n # Note that we pass event_types, not fetch_event_types here, since\n # that's what controls which future events are sent.\n queue_id = request_event_queue(user_profile, user_client, apply_markdown, client_gravatar,\n queue_lifespan_secs, event_types, all_public_streams,\n narrow=narrow)\n\n if queue_id is None:\n raise JsonableError(_(\"Could not allocate event queue\"))\n\n if fetch_event_types is not None:\n event_types_set = set(fetch_event_types) # type: Optional[Set[str]]\n elif event_types is not None:\n event_types_set = set(event_types)\n else:\n event_types_set = None\n\n # Fill up the UserMessage rows if a soft-deactivated user has returned\n maybe_catch_up_soft_deactivated_user(user_profile)\n\n ret = fetch_initial_state_data(user_profile, event_types_set, queue_id,\n client_gravatar=client_gravatar,\n include_subscribers=include_subscribers)\n\n # Apply events that came in while we were fetching initial data\n events = get_user_events(user_profile, queue_id, -1)\n apply_events(ret, events, user_profile, include_subscribers=include_subscribers,\n client_gravatar=client_gravatar,\n fetch_event_types=fetch_event_types)\n\n post_process_state(ret)\n\n if len(events) > 0:\n ret['last_event_id'] = events[-1]['id']\n else:\n ret['last_event_id'] = -1\n return ret\n\ndef post_process_state(ret: Dict[str, Any]) -> None:\n '''\n NOTE:\n\n Below is an example of post-processing initial state data AFTER we\n apply events. For large payloads like `unread_msgs`, it's helpful\n to have an intermediate data structure that is easy to manipulate\n with O(1)-type operations as we apply events.\n\n Then, only at the end, we put it in the form that's more appropriate\n for client.\n '''\n if 'raw_unread_msgs' in ret:\n ret['unread_msgs'] = aggregate_unread_data(ret['raw_unread_msgs'])\n del ret['raw_unread_msgs']\n\n '''\n See the note above; the same technique applies below.\n '''\n if 'raw_users'in ret:\n user_dicts = list(ret['raw_users'].values())\n\n ret['realm_users'] = [d for d in user_dicts if d['is_active']]\n ret['realm_non_active_users'] = [d for d in user_dicts if not d['is_active']]\n\n '''\n Be aware that we do intentional aliasing in the below code.\n We can now safely remove the `is_active` field from all the\n dicts that got partitioned into the two lists above.\n\n We remove the field because it's already implied, and sending\n it to clients makes clients prone to bugs where they \"trust\"\n the field but don't actually update in live updates. It also\n wastes bandwidth.\n '''\n for d in user_dicts:\n d.pop('is_active')\n\n del ret['raw_users']\n"},"type_annotations":{"kind":"list like","value":["int","bool","Dict[str, Any]","str","UserProfile","Optional[Iterable[str]]","str","bool","Dict[str, Dict[str, Any]]","int","Dict[str, Any]","Iterable[Dict[str, Any]]","UserProfile","bool","Dict[str, Any]","Dict[str, Any]","UserProfile","bool","bool","Dict[str, Any]","UserProfile","Client","Dict[str, Any]"],"string":"[\n \"int\",\n \"bool\",\n \"Dict[str, Any]\",\n \"str\",\n \"UserProfile\",\n \"Optional[Iterable[str]]\",\n \"str\",\n \"bool\",\n \"Dict[str, Dict[str, Any]]\",\n \"int\",\n \"Dict[str, Any]\",\n \"Iterable[Dict[str, Any]]\",\n \"UserProfile\",\n \"bool\",\n \"Dict[str, Any]\",\n \"Dict[str, Any]\",\n \"UserProfile\",\n \"bool\",\n \"bool\",\n \"Dict[str, Any]\",\n \"UserProfile\",\n \"Client\",\n \"Dict[str, Any]\"\n]"},"type_annotation_starts":{"kind":"list like","value":[2249,2271,2812,3986,4593,4648,4712,4734,12610,12687,12979,13003,13060,13090,13967,14006,14052,14098,14141,22547,30548,30574,32862],"string":"[\n 2249,\n 2271,\n 2812,\n 3986,\n 4593,\n 4648,\n 4712,\n 4734,\n 12610,\n 12687,\n 12979,\n 13003,\n 13060,\n 13090,\n 13967,\n 14006,\n 14052,\n 14098,\n 14141,\n 22547,\n 30548,\n 30574,\n 32862\n]"},"type_annotation_ends":{"kind":"list like","value":[2252,2275,2826,3989,4604,4671,4715,4738,12635,12690,12993,13027,13071,13094,13981,14020,14063,14102,14145,22561,30559,30580,32876],"string":"[\n 2252,\n 2275,\n 2826,\n 3989,\n 4604,\n 4671,\n 4715,\n 4738,\n 12635,\n 12690,\n 12993,\n 13027,\n 13071,\n 13094,\n 13981,\n 14020,\n 14063,\n 14102,\n 14145,\n 22561,\n 30559,\n 30580,\n 32876\n]"}}},{"rowIdx":1376,"cells":{"zip":{"kind":"string","value":"archives/18-2-SKKU-OSS_2018-2-OSS-L5.zip"},"filename":{"kind":"string","value":"zerver/lib/exceptions.py"},"contents":{"kind":"string","value":"from enum import Enum\nfrom typing import Any, Dict, List, Optional, Type\nfrom mypy_extensions import NoReturn\n\nfrom django.core.exceptions import PermissionDenied\nfrom django.utils.translation import ugettext as _\n\nclass AbstractEnum(Enum):\n '''An enumeration whose members are used strictly for their names.'''\n\n def __new__(cls: Type['AbstractEnum']) -> 'AbstractEnum':\n obj = object.__new__(cls)\n obj._value_ = len(cls.__members__) + 1\n return obj\n\n # Override all the `Enum` methods that use `_value_`.\n\n def __repr__(self) -> str:\n return str(self) # nocoverage\n\n def value(self) -> None:\n raise AssertionError(\"Not implemented\")\n\n def __reduce_ex__(self, proto: int) -> NoReturn:\n raise AssertionError(\"Not implemented\")\n\nclass ErrorCode(AbstractEnum):\n BAD_REQUEST = () # Generic name, from the name of HTTP 400.\n REQUEST_VARIABLE_MISSING = ()\n REQUEST_VARIABLE_INVALID = ()\n INVALID_JSON = ()\n BAD_IMAGE = ()\n REALM_UPLOAD_QUOTA = ()\n BAD_NARROW = ()\n MISSING_HTTP_EVENT_HEADER = ()\n STREAM_DOES_NOT_EXIST = ()\n UNAUTHORIZED_PRINCIPAL = ()\n UNEXPECTED_WEBHOOK_EVENT_TYPE = ()\n BAD_EVENT_QUEUE_ID = ()\n CSRF_FAILED = ()\n INVITATION_FAILED = ()\n INVALID_ZULIP_SERVER = ()\n REQUEST_CONFUSING_VAR = ()\n\nclass JsonableError(Exception):\n '''A standardized error format we can turn into a nice JSON HTTP response.\n\n This class can be invoked in a couple ways.\n\n * Easiest, but completely machine-unreadable:\n\n raise JsonableError(_(\"No such widget: {}\").format(widget_name))\n\n The message may be passed through to clients and shown to a user,\n so translation is required. Because the text will vary depending\n on the user's language, it's not possible for code to distinguish\n this error from others in a non-buggy way.\n\n * Fully machine-readable, with an error code and structured data:\n\n class NoSuchWidgetError(JsonableError):\n code = ErrorCode.NO_SUCH_WIDGET\n data_fields = ['widget_name']\n\n def __init__(self, widget_name: str) -> None:\n self.widget_name = widget_name # type: str\n\n @staticmethod\n def msg_format() -> str:\n return _(\"No such widget: {widget_name}\")\n\n raise NoSuchWidgetError(widget_name)\n\n Now both server and client code see a `widget_name` attribute\n and an error code.\n\n Subclasses may also override `http_status_code`.\n '''\n\n # Override this in subclasses, as needed.\n code = ErrorCode.BAD_REQUEST # type: ErrorCode\n\n # Override this in subclasses if providing structured data.\n data_fields = [] # type: List[str]\n\n # Optionally override this in subclasses to return a different HTTP status,\n # like 403 or 404.\n http_status_code = 400 # type: int\n\n def __init__(self, msg: str) -> None:\n # `_msg` is an implementation detail of `JsonableError` itself.\n self._msg = msg # type: str\n\n @staticmethod\n def msg_format() -> str:\n '''Override in subclasses. Gets the items in `data_fields` as format args.\n\n This should return (a translation of) a string literal.\n The reason it's not simply a class attribute is to allow\n translation to work.\n '''\n # Secretly this gets one more format arg not in `data_fields`: `_msg`.\n # That's for the sake of the `JsonableError` base logic itself, for\n # the simplest form of use where we just get a plain message string\n # at construction time.\n return '{_msg}'\n\n #\n # Infrastructure -- not intended to be overridden in subclasses.\n #\n\n @property\n def msg(self) -> str:\n format_data = dict(((f, getattr(self, f)) for f in self.data_fields),\n _msg=getattr(self, '_msg', None))\n return self.msg_format().format(**format_data)\n\n @property\n def data(self) -> Dict[str, Any]:\n return dict(((f, getattr(self, f)) for f in self.data_fields),\n code=self.code.name)\n\n def to_json(self) -> Dict[str, Any]:\n d = {'result': 'error', 'msg': self.msg}\n d.update(self.data)\n return d\n\n def __str__(self) -> str:\n return self.msg\n\nclass StreamDoesNotExistError(JsonableError):\n code = ErrorCode.STREAM_DOES_NOT_EXIST\n data_fields = ['stream']\n\n def __init__(self, stream: str) -> None:\n self.stream = stream\n\n @staticmethod\n def msg_format() -> str:\n return _(\"Stream '{stream}' does not exist\")\n\nclass RateLimited(PermissionDenied):\n def __init__(self, msg: str=\"\") -> None:\n super().__init__(msg)\n\nclass InvalidJSONError(JsonableError):\n code = ErrorCode.INVALID_JSON\n\n @staticmethod\n def msg_format() -> str:\n return _(\"Malformed JSON\")\n\nclass BugdownRenderingException(Exception):\n pass\n"},"type_annotations":{"kind":"list like","value":["Type['AbstractEnum']","int","str","str"],"string":"[\n \"Type['AbstractEnum']\",\n \"int\",\n \"str\",\n \"str\"\n]"},"type_annotation_starts":{"kind":"list like","value":[337,722,2912,4449],"string":"[\n 337,\n 722,\n 2912,\n 4449\n]"},"type_annotation_ends":{"kind":"list like","value":[357,725,2915,4452],"string":"[\n 357,\n 725,\n 2915,\n 4452\n]"}}},{"rowIdx":1377,"cells":{"zip":{"kind":"string","value":"archives/18-2-SKKU-OSS_2018-2-OSS-L5.zip"},"filename":{"kind":"string","value":"zerver/lib/export.py"},"contents":{"kind":"string","value":"import datetime\nfrom boto.s3.connection import S3Connection\nfrom django.apps import apps\nfrom django.conf import settings\nfrom django.db import connection\nfrom django.forms.models import model_to_dict\nfrom django.utils.timezone import make_aware as timezone_make_aware\nfrom django.utils.timezone import utc as timezone_utc\nfrom django.utils.timezone import is_naive as timezone_is_naive\nimport glob\nimport logging\nimport os\nimport ujson\nimport subprocess\nimport tempfile\nfrom zerver.lib.avatar_hash import user_avatar_path_from_ids\nfrom zerver.models import UserProfile, Realm, Client, Huddle, Stream, \\\n UserMessage, Subscription, Message, RealmEmoji, RealmFilter, Reaction, \\\n RealmDomain, Recipient, DefaultStream, get_user_profile_by_id, \\\n UserPresence, UserActivity, UserActivityInterval, CustomProfileField, \\\n CustomProfileFieldValue, get_display_recipient, Attachment, get_system_bot, \\\n RealmAuditLog, UserHotspot, MutedTopic, Service, UserGroup, \\\n UserGroupMembership, BotStorageData, BotConfigData\nfrom zerver.lib.parallel import run_parallel\nfrom typing import Any, Callable, Dict, List, Optional, Set, Tuple, \\\n Iterable, Union\n\n# Custom mypy types follow:\nRecord = Dict[str, Any]\nTableName = str\nTableData = Dict[TableName, List[Record]]\nField = str\nPath = str\nContext = Dict[str, Any]\nFilterArgs = Dict[str, Any]\nIdSource = Tuple[TableName, Field]\nSourceFilter = Callable[[Record], bool]\n\n# These next two types are callbacks, which mypy does not\n# support well, because PEP 484 says \"using callbacks\n# with keyword arguments is not perceived as a common use case.\"\n# CustomFetch = Callable[[TableData, Config, Context], None]\n# PostProcessData = Callable[[TableData, Config, Context], None]\nCustomFetch = Any # TODO: make more specific, see above\nPostProcessData = Any # TODO: make more specific\n\n# The keys of our MessageOutput variables are normally\n# List[Record], but when we write partials, we can get\n# lists of integers or a single integer.\n# TODO: This could maybe be improved using TypedDict?\nMessageOutput = Dict[str, Union[List[Record], List[int], int]]\n\nMESSAGE_BATCH_CHUNK_SIZE = 1000\n\nrealm_tables = [(\"zerver_defaultstream\", DefaultStream, \"defaultstream\"),\n (\"zerver_realmemoji\", RealmEmoji, \"realmemoji\"),\n (\"zerver_realmdomain\", RealmDomain, \"realmdomain\"),\n (\"zerver_realmfilter\", RealmFilter, \"realmfilter\")] # List[Tuple[TableName, Any, str]]\n\n\nALL_ZULIP_TABLES = {\n 'analytics_anomaly',\n 'analytics_fillstate',\n 'analytics_installationcount',\n 'analytics_realmcount',\n 'analytics_streamcount',\n 'analytics_usercount',\n 'otp_static_staticdevice',\n 'otp_static_statictoken',\n 'otp_totp_totpdevice',\n 'social_auth_association',\n 'social_auth_code',\n 'social_auth_nonce',\n 'social_auth_partial',\n 'social_auth_usersocialauth',\n 'two_factor_phonedevice',\n 'zerver_archivedattachment',\n 'zerver_archivedattachment_messages',\n 'zerver_archivedmessage',\n 'zerver_archivedusermessage',\n 'zerver_attachment',\n 'zerver_attachment_messages',\n 'zerver_botconfigdata',\n 'zerver_botstoragedata',\n 'zerver_client',\n 'zerver_customprofilefield',\n 'zerver_customprofilefieldvalue',\n 'zerver_defaultstream',\n 'zerver_defaultstreamgroup',\n 'zerver_defaultstreamgroup_streams',\n 'zerver_emailchangestatus',\n 'zerver_huddle',\n 'zerver_message',\n 'zerver_multiuseinvite',\n 'zerver_multiuseinvite_streams',\n 'zerver_preregistrationuser',\n 'zerver_preregistrationuser_streams',\n 'zerver_pushdevicetoken',\n 'zerver_reaction',\n 'zerver_realm',\n 'zerver_realmauditlog',\n 'zerver_realmdomain',\n 'zerver_realmemoji',\n 'zerver_realmfilter',\n 'zerver_recipient',\n 'zerver_scheduledemail',\n 'zerver_scheduledmessage',\n 'zerver_service',\n 'zerver_stream',\n 'zerver_submessage',\n 'zerver_subscription',\n 'zerver_useractivity',\n 'zerver_useractivityinterval',\n 'zerver_usergroup',\n 'zerver_usergroupmembership',\n 'zerver_userhotspot',\n 'zerver_usermessage',\n 'zerver_userpresence',\n 'zerver_userprofile',\n 'zerver_userprofile_groups',\n 'zerver_userprofile_user_permissions',\n 'zerver_mutedtopic',\n}\n\nNON_EXPORTED_TABLES = {\n # These invitation/confirmation flow tables don't make sense to\n # export, since invitations links will be broken by the server URL\n # change anyway:\n 'zerver_emailchangestatus',\n 'zerver_multiuseinvite',\n 'zerver_multiuseinvite_streams',\n 'zerver_preregistrationuser',\n 'zerver_preregistrationuser_streams',\n\n # When switching servers, clients will need to re-login and\n # reregister for push notifications anyway.\n 'zerver_pushdevicetoken',\n\n # We don't use these generated Django tables\n 'zerver_userprofile_groups',\n 'zerver_userprofile_user_permissions',\n\n # These is used for scheduling future activity; it could make\n # sense to export, but is relatively low value.\n 'zerver_scheduledemail',\n 'zerver_scheduledmessage',\n\n # These tables are related to a user's 2FA authentication\n # configuration, which will need to be re-setup on the new server.\n 'two_factor_phonedevice',\n 'otp_static_staticdevice',\n 'otp_static_statictoken',\n 'otp_totp_totpdevice',\n\n # These archive tables should not be exported (they are to support\n # restoring content accidentally deleted due to software bugs in\n # the retention policy feature)\n 'zerver_archivedmessage',\n 'zerver_archivedusermessage',\n 'zerver_archivedattachment',\n 'zerver_archivedattachment_messages',\n\n # Social auth tables are not needed post-export, since we don't\n # use any of this state outside of a direct authentication flow.\n 'social_auth_association',\n 'social_auth_code',\n 'social_auth_nonce',\n 'social_auth_partial',\n 'social_auth_usersocialauth',\n\n # We will likely never want to migrate this table, since it's a\n # total of all the realmcount values on the server. Might need to\n # recompute it after a fillstate import.\n 'analytics_installationcount',\n\n # These analytics tables, however, should ideally be in the export.\n 'analytics_realmcount',\n 'analytics_streamcount',\n 'analytics_usercount',\n # Fillstate will require some cleverness to do the right partial export.\n 'analytics_fillstate',\n # This table isn't yet used for anything.\n 'analytics_anomaly',\n\n # These are for unfinished features; we'll want to add them ot the\n # export before they reach full production status.\n 'zerver_defaultstreamgroup',\n 'zerver_defaultstreamgroup_streams',\n 'zerver_submessage',\n\n # For any tables listed below here, it's a bug that they are not present in the export.\n}\n\nIMPLICIT_TABLES = {\n # ManyToMany relationships are exported implicitly.\n 'zerver_attachment_messages',\n}\n\nATTACHMENT_TABLES = {\n 'zerver_attachment',\n}\n\nMESSAGE_TABLES = {\n # message tables get special treatment, because they're so big\n 'zerver_message',\n 'zerver_usermessage',\n # zerver_reaction belongs here, since it's added late\n 'zerver_reaction',\n}\n\nDATE_FIELDS = {\n 'zerver_attachment': ['create_time'],\n 'zerver_message': ['last_edit_time', 'pub_date'],\n 'zerver_realm': ['date_created'],\n 'zerver_stream': ['date_created'],\n 'zerver_useractivity': ['last_visit'],\n 'zerver_useractivityinterval': ['start', 'end'],\n 'zerver_userpresence': ['timestamp'],\n 'zerver_userprofile': ['date_joined', 'last_login', 'last_reminder'],\n 'zerver_realmauditlog': ['event_time'],\n 'zerver_userhotspot': ['timestamp'],\n} # type: Dict[TableName, List[Field]]\n\ndef sanity_check_output(data: TableData) -> None:\n # First, we verify that the export tool has a declared\n # configuration for every table.\n target_models = (\n list(apps.get_app_config('analytics').get_models(include_auto_created=True)) +\n list(apps.get_app_config('django_otp').get_models(include_auto_created=True)) +\n list(apps.get_app_config('otp_static').get_models(include_auto_created=True)) +\n list(apps.get_app_config('otp_totp').get_models(include_auto_created=True)) +\n list(apps.get_app_config('social_django').get_models(include_auto_created=True)) +\n list(apps.get_app_config('two_factor').get_models(include_auto_created=True)) +\n list(apps.get_app_config('zerver').get_models(include_auto_created=True))\n )\n all_tables_db = set(model._meta.db_table for model in target_models)\n\n # These assertion statements will fire when we add a new database\n # table that is not included in Zulip's data exports. Generally,\n # you can add your new table to `ALL_ZULIP_TABLES` and\n # `NON_EXPORTED_TABLES` during early work on a new feature so that\n # CI passes.\n #\n # We'll want to make sure we handle it for exports before\n # releasing the new feature, but doing so correctly requires some\n # expertise on this export system.\n assert ALL_ZULIP_TABLES == all_tables_db\n assert NON_EXPORTED_TABLES.issubset(ALL_ZULIP_TABLES)\n assert IMPLICIT_TABLES.issubset(ALL_ZULIP_TABLES)\n assert ATTACHMENT_TABLES.issubset(ALL_ZULIP_TABLES)\n\n tables = set(ALL_ZULIP_TABLES)\n tables -= NON_EXPORTED_TABLES\n tables -= IMPLICIT_TABLES\n tables -= MESSAGE_TABLES\n tables -= ATTACHMENT_TABLES\n\n for table in tables:\n if table not in data:\n logging.warning('??? NO DATA EXPORTED FOR TABLE %s!!!' % (table,))\n\ndef write_data_to_file(output_file: Path, data: Any) -> None:\n with open(output_file, \"w\") as f:\n f.write(ujson.dumps(data, indent=4))\n\ndef make_raw(query: Any, exclude: Optional[List[Field]]=None) -> List[Record]:\n '''\n Takes a Django query and returns a JSONable list\n of dictionaries corresponding to the database rows.\n '''\n rows = []\n for instance in query:\n data = model_to_dict(instance, exclude=exclude)\n \"\"\"\n In Django 1.11.5, model_to_dict evaluates the QuerySet of\n many-to-many field to give us a list of instances. We require\n a list of primary keys, so we get the primary keys from the\n instances below.\n \"\"\"\n for field in instance._meta.many_to_many:\n value = data[field.name]\n data[field.name] = [row.id for row in value]\n\n rows.append(data)\n\n return rows\n\ndef floatify_datetime_fields(data: TableData, table: TableName) -> None:\n for item in data[table]:\n for field in DATE_FIELDS[table]:\n orig_dt = item[field]\n if orig_dt is None:\n continue\n if timezone_is_naive(orig_dt):\n logging.warning(\"Naive datetime:\", item)\n dt = timezone_make_aware(orig_dt)\n else:\n dt = orig_dt\n utc_naive = dt.replace(tzinfo=None) - dt.utcoffset()\n item[field] = (utc_naive - datetime.datetime(1970, 1, 1)).total_seconds()\n\nclass Config:\n '''\n A Config object configures a single table for exporting (and,\n maybe some day importing as well.\n\n You should never mutate Config objects as part of the export;\n instead use the data to determine how you populate other\n data structures.\n\n There are parent/children relationships between Config objects.\n The parent should be instantiated first. The child will\n append itself to the parent's list of children.\n\n '''\n\n def __init__(self, table: Optional[str]=None,\n model: Optional[Any]=None,\n normal_parent: Optional['Config']=None,\n virtual_parent: Optional['Config']=None,\n filter_args: Optional[FilterArgs]=None,\n custom_fetch: Optional[CustomFetch]=None,\n custom_tables: Optional[List[TableName]]=None,\n post_process_data: Optional[PostProcessData]=None,\n concat_and_destroy: Optional[List[TableName]]=None,\n id_source: Optional[IdSource]=None,\n source_filter: Optional[SourceFilter]=None,\n parent_key: Optional[Field]=None,\n use_all: bool=False,\n is_seeded: bool=False,\n exclude: Optional[List[Field]]=None) -> None:\n assert table or custom_tables\n self.table = table\n self.model = model\n self.normal_parent = normal_parent\n self.virtual_parent = virtual_parent\n self.filter_args = filter_args\n self.parent_key = parent_key\n self.use_all = use_all\n self.is_seeded = is_seeded\n self.exclude = exclude\n self.custom_fetch = custom_fetch\n self.custom_tables = custom_tables\n self.post_process_data = post_process_data\n self.concat_and_destroy = concat_and_destroy\n self.id_source = id_source\n self.source_filter = source_filter\n self.children = [] # type: List[Config]\n\n if normal_parent is not None:\n self.parent = normal_parent # type: Optional[Config]\n else:\n self.parent = None\n\n if virtual_parent is not None and normal_parent is not None:\n raise AssertionError('''\n If you specify a normal_parent, please\n do not create a virtual_parent.\n ''')\n\n if normal_parent is not None:\n normal_parent.children.append(self)\n elif virtual_parent is not None:\n virtual_parent.children.append(self)\n elif is_seeded is None:\n raise AssertionError('''\n You must specify a parent if you are\n not using is_seeded.\n ''')\n\n if self.id_source is not None:\n if self.virtual_parent is None:\n raise AssertionError('''\n You must specify a virtual_parent if you are\n using id_source.''')\n if self.id_source[0] != self.virtual_parent.table:\n raise AssertionError('''\n Configuration error. To populate %s, you\n want data from %s, but that differs from\n the table name of your virtual parent (%s),\n which suggests you many not have set up\n the ordering correctly. You may simply\n need to assign a virtual_parent, or there\n may be deeper issues going on.''' % (\n self.table,\n self.id_source[0],\n self.virtual_parent.table))\n\n\ndef export_from_config(response: TableData, config: Config, seed_object: Optional[Any]=None,\n context: Optional[Context]=None) -> None:\n table = config.table\n parent = config.parent\n model = config.model\n\n if context is None:\n context = {}\n\n if table:\n exported_tables = [table]\n else:\n if config.custom_tables is None:\n raise AssertionError('''\n You must specify config.custom_tables if you\n are not specifying config.table''')\n exported_tables = config.custom_tables\n\n for t in exported_tables:\n logging.info('Exporting via export_from_config: %s' % (t,))\n\n rows = None\n if config.is_seeded:\n rows = [seed_object]\n\n elif config.custom_fetch:\n config.custom_fetch(\n response=response,\n config=config,\n context=context\n )\n if config.custom_tables:\n for t in config.custom_tables:\n if t not in response:\n raise AssertionError('Custom fetch failed to populate %s' % (t,))\n\n elif config.concat_and_destroy:\n # When we concat_and_destroy, we are working with\n # temporary \"tables\" that are lists of records that\n # should already be ready to export.\n data = [] # type: List[Record]\n for t in config.concat_and_destroy:\n data += response[t]\n del response[t]\n logging.info('Deleted temporary %s' % (t,))\n assert table is not None\n response[table] = data\n\n elif config.use_all:\n assert model is not None\n query = model.objects.all()\n rows = list(query)\n\n elif config.normal_parent:\n # In this mode, our current model is figuratively Article,\n # and normal_parent is figuratively Blog, and\n # now we just need to get all the articles\n # contained by the blogs.\n model = config.model\n assert parent is not None\n assert parent.table is not None\n assert config.parent_key is not None\n parent_ids = [r['id'] for r in response[parent.table]]\n filter_parms = {config.parent_key: parent_ids} # type: Dict[str, Any]\n if config.filter_args is not None:\n filter_parms.update(config.filter_args)\n assert model is not None\n query = model.objects.filter(**filter_parms)\n rows = list(query)\n\n elif config.id_source:\n # In this mode, we are the figurative Blog, and we now\n # need to look at the current response to get all the\n # blog ids from the Article rows we fetched previously.\n model = config.model\n assert model is not None\n # This will be a tuple of the form ('zerver_article', 'blog').\n (child_table, field) = config.id_source\n child_rows = response[child_table]\n if config.source_filter:\n child_rows = [r for r in child_rows if config.source_filter(r)]\n lookup_ids = [r[field] for r in child_rows]\n filter_parms = dict(id__in=lookup_ids)\n if config.filter_args:\n filter_parms.update(config.filter_args)\n query = model.objects.filter(**filter_parms)\n rows = list(query)\n\n # Post-process rows (which won't apply to custom fetches/concats)\n if rows is not None:\n assert table is not None # Hint for mypy\n response[table] = make_raw(rows, exclude=config.exclude)\n if table in DATE_FIELDS:\n floatify_datetime_fields(response, table)\n\n if config.post_process_data:\n config.post_process_data(\n response=response,\n config=config,\n context=context\n )\n\n # Now walk our children. It's extremely important to respect\n # the order of children here.\n for child_config in config.children:\n export_from_config(\n response=response,\n config=child_config,\n context=context,\n )\n\ndef get_realm_config() -> Config:\n # This is common, public information about the realm that we can share\n # with all realm users.\n\n realm_config = Config(\n table='zerver_realm',\n is_seeded=True\n )\n\n Config(\n table='zerver_defaultstream',\n model=DefaultStream,\n normal_parent=realm_config,\n parent_key='realm_id__in',\n )\n\n Config(\n table='zerver_customprofilefield',\n model=CustomProfileField,\n normal_parent=realm_config,\n parent_key='realm_id__in',\n )\n\n Config(\n table='zerver_realmemoji',\n model=RealmEmoji,\n normal_parent=realm_config,\n parent_key='realm_id__in',\n )\n\n Config(\n table='zerver_realmdomain',\n model=RealmDomain,\n normal_parent=realm_config,\n parent_key='realm_id__in',\n )\n\n Config(\n table='zerver_realmfilter',\n model=RealmFilter,\n normal_parent=realm_config,\n parent_key='realm_id__in',\n )\n\n Config(\n table='zerver_client',\n model=Client,\n virtual_parent=realm_config,\n use_all=True\n )\n\n user_profile_config = Config(\n custom_tables=[\n 'zerver_userprofile',\n 'zerver_userprofile_mirrordummy',\n ],\n # set table for children who treat us as normal parent\n table='zerver_userprofile',\n virtual_parent=realm_config,\n custom_fetch=fetch_user_profile,\n )\n\n user_groups_config = Config(\n table='zerver_usergroup',\n model=UserGroup,\n normal_parent=realm_config,\n parent_key='realm__in',\n )\n\n Config(\n table='zerver_usergroupmembership',\n model=UserGroupMembership,\n normal_parent=user_groups_config,\n parent_key='user_group__in',\n )\n\n Config(\n custom_tables=[\n 'zerver_userprofile_crossrealm',\n ],\n virtual_parent=user_profile_config,\n custom_fetch=fetch_user_profile_cross_realm,\n )\n\n Config(\n table='zerver_userpresence',\n model=UserPresence,\n normal_parent=user_profile_config,\n parent_key='user_profile__in',\n )\n\n Config(\n table='zerver_customprofilefieldvalue',\n model=CustomProfileFieldValue,\n normal_parent=user_profile_config,\n parent_key='user_profile__in',\n )\n\n Config(\n table='zerver_useractivity',\n model=UserActivity,\n normal_parent=user_profile_config,\n parent_key='user_profile__in',\n )\n\n Config(\n table='zerver_useractivityinterval',\n model=UserActivityInterval,\n normal_parent=user_profile_config,\n parent_key='user_profile__in',\n )\n\n Config(\n table='zerver_realmauditlog',\n model=RealmAuditLog,\n normal_parent=user_profile_config,\n parent_key='modified_user__in',\n )\n\n Config(\n table='zerver_userhotspot',\n model=UserHotspot,\n normal_parent=user_profile_config,\n parent_key='user__in',\n )\n\n Config(\n table='zerver_mutedtopic',\n model=MutedTopic,\n normal_parent=user_profile_config,\n parent_key='user_profile__in',\n )\n\n Config(\n table='zerver_service',\n model=Service,\n normal_parent=user_profile_config,\n parent_key='user_profile__in',\n )\n\n Config(\n table='zerver_botstoragedata',\n model=BotStorageData,\n normal_parent=user_profile_config,\n parent_key='bot_profile__in',\n )\n\n Config(\n table='zerver_botconfigdata',\n model=BotConfigData,\n normal_parent=user_profile_config,\n parent_key='bot_profile__in',\n )\n\n # Some of these tables are intermediate \"tables\" that we\n # create only for the export. Think of them as similar to views.\n\n user_subscription_config = Config(\n table='_user_subscription',\n model=Subscription,\n normal_parent=user_profile_config,\n filter_args={'recipient__type': Recipient.PERSONAL},\n parent_key='user_profile__in',\n )\n\n Config(\n table='_user_recipient',\n model=Recipient,\n virtual_parent=user_subscription_config,\n id_source=('_user_subscription', 'recipient'),\n )\n\n #\n stream_subscription_config = Config(\n table='_stream_subscription',\n model=Subscription,\n normal_parent=user_profile_config,\n filter_args={'recipient__type': Recipient.STREAM},\n parent_key='user_profile__in',\n )\n\n stream_recipient_config = Config(\n table='_stream_recipient',\n model=Recipient,\n virtual_parent=stream_subscription_config,\n id_source=('_stream_subscription', 'recipient'),\n )\n\n Config(\n table='zerver_stream',\n model=Stream,\n virtual_parent=stream_recipient_config,\n id_source=('_stream_recipient', 'type_id'),\n source_filter=lambda r: r['type'] == Recipient.STREAM,\n exclude=['email_token'],\n post_process_data=sanity_check_stream_data\n )\n\n #\n\n Config(\n custom_tables=[\n '_huddle_recipient',\n '_huddle_subscription',\n 'zerver_huddle',\n ],\n normal_parent=user_profile_config,\n custom_fetch=fetch_huddle_objects,\n )\n\n # Now build permanent tables from our temp tables.\n Config(\n table='zerver_recipient',\n virtual_parent=user_profile_config,\n concat_and_destroy=[\n '_user_recipient',\n '_stream_recipient',\n '_huddle_recipient',\n ],\n )\n\n Config(\n table='zerver_subscription',\n virtual_parent=user_profile_config,\n concat_and_destroy=[\n '_user_subscription',\n '_stream_subscription',\n '_huddle_subscription',\n ]\n )\n\n return realm_config\n\ndef sanity_check_stream_data(response: TableData, config: Config, context: Context) -> None:\n\n if context['exportable_user_ids'] is not None:\n # If we restrict which user ids are exportable,\n # the way that we find # streams is a little too\n # complex to have a sanity check.\n return\n\n actual_streams = set([stream.name for stream in Stream.objects.filter(\n realm=response[\"zerver_realm\"][0]['id'])])\n streams_in_response = set([stream['name'] for stream in response['zerver_stream']])\n\n if len(streams_in_response - actual_streams) > 0:\n print(\"Error: Streams not present in the realm were exported:\")\n print(\" \", streams_in_response - actual_streams)\n print(\"This is likely due to a bug in the export tool.\")\n raise AssertionError(\"Aborting! Please investigate.\")\n if len(actual_streams - streams_in_response) > 0:\n print(\"Error: Some streams present in the realm were not exported:\")\n print(\" \", actual_streams - streams_in_response)\n print(\"Usually, this is caused by a stream having been created that never had subscribers.\")\n print(\"(Due to a bug elsewhere in Zulip, not in the export tool)\")\n raise AssertionError(\"Aborting! Please investigate.\")\n\ndef fetch_user_profile(response: TableData, config: Config, context: Context) -> None:\n realm = context['realm']\n exportable_user_ids = context['exportable_user_ids']\n\n query = UserProfile.objects.filter(realm_id=realm.id)\n exclude = ['password', 'api_key']\n rows = make_raw(list(query), exclude=exclude)\n\n normal_rows = [] # type: List[Record]\n dummy_rows = [] # type: List[Record]\n\n for row in rows:\n if exportable_user_ids is not None:\n if row['id'] in exportable_user_ids:\n assert not row['is_mirror_dummy']\n else:\n # Convert non-exportable users to\n # inactive is_mirror_dummy users.\n row['is_mirror_dummy'] = True\n row['is_active'] = False\n\n if row['is_mirror_dummy']:\n dummy_rows.append(row)\n else:\n normal_rows.append(row)\n\n response['zerver_userprofile'] = normal_rows\n response['zerver_userprofile_mirrordummy'] = dummy_rows\n\ndef fetch_user_profile_cross_realm(response: TableData, config: Config, context: Context) -> None:\n realm = context['realm']\n response['zerver_userprofile_crossrealm'] = []\n\n if realm.string_id == settings.SYSTEM_BOT_REALM:\n return\n\n for bot_user in [\n get_system_bot(settings.NOTIFICATION_BOT),\n get_system_bot(settings.EMAIL_GATEWAY_BOT),\n get_system_bot(settings.WELCOME_BOT),\n ]:\n recipient_id = Recipient.objects.get(type_id=bot_user.id, type=Recipient.PERSONAL).id\n response['zerver_userprofile_crossrealm'].append(dict(\n email=bot_user.email,\n id=bot_user.id,\n recipient_id=recipient_id,\n ))\n\ndef fetch_attachment_data(response: TableData, realm_id: int, message_ids: Set[int]) -> None:\n filter_args = {'realm_id': realm_id}\n query = Attachment.objects.filter(**filter_args)\n response['zerver_attachment'] = make_raw(list(query))\n floatify_datetime_fields(response, 'zerver_attachment')\n\n '''\n We usually export most messages for the realm, but not\n quite ALL messages for the realm. So, we need to\n clean up our attachment data to have correct\n values for response['zerver_attachment'][]['messages'].\n '''\n for row in response['zerver_attachment']:\n filterer_message_ids = set(row['messages']).intersection(message_ids)\n row['messages'] = sorted(list(filterer_message_ids))\n\n '''\n Attachments can be connected to multiple messages, although\n it's most common to have just one message. Regardless,\n if none of those message(s) survived the filtering above\n for a particular attachment, then we won't export the\n attachment row.\n '''\n response['zerver_attachment'] = [\n row for row in response['zerver_attachment']\n if row['messages']]\n\ndef fetch_reaction_data(response: TableData, message_ids: Set[int]) -> None:\n query = Reaction.objects.filter(message_id__in=list(message_ids))\n response['zerver_reaction'] = make_raw(list(query))\n\ndef fetch_huddle_objects(response: TableData, config: Config, context: Context) -> None:\n\n realm = context['realm']\n assert config.parent is not None\n assert config.parent.table is not None\n user_profile_ids = set(r['id'] for r in response[config.parent.table])\n\n # First we get all huddles involving someone in the realm.\n realm_huddle_subs = Subscription.objects.select_related(\"recipient\").filter(\n recipient__type=Recipient.HUDDLE, user_profile__in=user_profile_ids)\n realm_huddle_recipient_ids = set(sub.recipient_id for sub in realm_huddle_subs)\n\n # Mark all Huddles whose recipient ID contains a cross-realm user.\n unsafe_huddle_recipient_ids = set()\n for sub in Subscription.objects.select_related().filter(recipient__in=realm_huddle_recipient_ids):\n if sub.user_profile.realm != realm:\n # In almost every case the other realm will be zulip.com\n unsafe_huddle_recipient_ids.add(sub.recipient_id)\n\n # Now filter down to just those huddles that are entirely within the realm.\n #\n # This is important for ensuring that the User objects needed\n # to import it on the other end exist (since we're only\n # exporting the users from this realm), at the cost of losing\n # some of these cross-realm messages.\n huddle_subs = [sub for sub in realm_huddle_subs if sub.recipient_id not in unsafe_huddle_recipient_ids]\n huddle_recipient_ids = set(sub.recipient_id for sub in huddle_subs)\n huddle_ids = set(sub.recipient.type_id for sub in huddle_subs)\n\n huddle_subscription_dicts = make_raw(huddle_subs)\n huddle_recipients = make_raw(Recipient.objects.filter(id__in=huddle_recipient_ids))\n\n response['_huddle_recipient'] = huddle_recipients\n response['_huddle_subscription'] = huddle_subscription_dicts\n response['zerver_huddle'] = make_raw(Huddle.objects.filter(id__in=huddle_ids))\n\ndef fetch_usermessages(realm: Realm,\n message_ids: Set[int],\n user_profile_ids: Set[int],\n message_filename: Path) -> List[Record]:\n # UserMessage export security rule: You can export UserMessages\n # for the messages you exported for the users in your realm.\n user_message_query = UserMessage.objects.filter(user_profile__realm=realm,\n message_id__in=message_ids)\n user_message_chunk = []\n for user_message in user_message_query:\n if user_message.user_profile_id not in user_profile_ids:\n continue\n user_message_obj = model_to_dict(user_message)\n user_message_obj['flags_mask'] = user_message.flags.mask\n del user_message_obj['flags']\n user_message_chunk.append(user_message_obj)\n logging.info(\"Fetched UserMessages for %s\" % (message_filename,))\n return user_message_chunk\n\ndef export_usermessages_batch(input_path: Path, output_path: Path) -> None:\n \"\"\"As part of the system for doing parallel exports, this runs on one\n batch of Message objects and adds the corresponding UserMessage\n objects. (This is called by the export_usermessage_batch\n management command).\"\"\"\n with open(input_path, \"r\") as input_file:\n output = ujson.loads(input_file.read())\n message_ids = [item['id'] for item in output['zerver_message']]\n user_profile_ids = set(output['zerver_userprofile_ids'])\n del output['zerver_userprofile_ids']\n realm = Realm.objects.get(id=output['realm_id'])\n del output['realm_id']\n output['zerver_usermessage'] = fetch_usermessages(realm, set(message_ids), user_profile_ids, output_path)\n write_message_export(output_path, output)\n os.unlink(input_path)\n\ndef write_message_export(message_filename: Path, output: MessageOutput) -> None:\n write_data_to_file(output_file=message_filename, data=output)\n logging.info(\"Dumped to %s\" % (message_filename,))\n\ndef export_partial_message_files(realm: Realm,\n response: TableData,\n chunk_size: int=MESSAGE_BATCH_CHUNK_SIZE,\n output_dir: Optional[Path]=None) -> Set[int]:\n if output_dir is None:\n output_dir = tempfile.mkdtemp(prefix=\"zulip-export\")\n\n def get_ids(records: List[Record]) -> Set[int]:\n return set(x['id'] for x in records)\n\n # Basic security rule: You can export everything either...\n # - sent by someone in your exportable_user_ids\n # OR\n # - received by someone in your exportable_user_ids (which\n # equates to a recipient object we are exporting)\n #\n # TODO: In theory, you should be able to export messages in\n # cross-realm PM threads; currently, this only exports cross-realm\n # messages received by your realm that were sent by Zulip system\n # bots (e.g. emailgateway, notification-bot).\n\n # Here, \"we\" and \"us\" refers to the inner circle of users who\n # were specified as being allowed to be exported. \"Them\"\n # refers to other users.\n user_ids_for_us = get_ids(\n response['zerver_userprofile']\n )\n recipient_ids_for_us = get_ids(response['zerver_recipient'])\n\n ids_of_our_possible_senders = get_ids(\n response['zerver_userprofile'] +\n response['zerver_userprofile_mirrordummy'] +\n response['zerver_userprofile_crossrealm'])\n ids_of_non_exported_possible_recipients = ids_of_our_possible_senders - user_ids_for_us\n\n recipients_for_them = Recipient.objects.filter(\n type=Recipient.PERSONAL,\n type_id__in=ids_of_non_exported_possible_recipients).values(\"id\")\n recipient_ids_for_them = get_ids(recipients_for_them)\n\n # We capture most messages here, since the\n # recipients we subscribe to are also the\n # recipients of most messages we send.\n messages_we_received = Message.objects.filter(\n sender__in=ids_of_our_possible_senders,\n recipient__in=recipient_ids_for_us,\n ).order_by('id')\n\n # This should pick up stragglers; messages we sent\n # where we the recipient wasn't subscribed to by any of\n # us (such as PMs to \"them\").\n messages_we_sent_to_them = Message.objects.filter(\n sender__in=user_ids_for_us,\n recipient__in=recipient_ids_for_them,\n ).order_by('id')\n\n message_queries = [\n messages_we_received,\n messages_we_sent_to_them\n ]\n\n all_message_ids = set() # type: Set[int]\n dump_file_id = 1\n\n for message_query in message_queries:\n dump_file_id = write_message_partial_for_query(\n realm=realm,\n message_query=message_query,\n dump_file_id=dump_file_id,\n all_message_ids=all_message_ids,\n output_dir=output_dir,\n user_profile_ids=user_ids_for_us,\n chunk_size=chunk_size,\n )\n\n return all_message_ids\n\ndef write_message_partial_for_query(realm: Realm, message_query: Any, dump_file_id: int,\n all_message_ids: Set[int], output_dir: Path,\n user_profile_ids: Set[int],\n chunk_size: int=MESSAGE_BATCH_CHUNK_SIZE) -> int:\n min_id = -1\n\n while True:\n actual_query = message_query.filter(id__gt=min_id)[0:chunk_size]\n message_chunk = make_raw(actual_query)\n message_ids = set(m['id'] for m in message_chunk)\n assert len(message_ids.intersection(all_message_ids)) == 0\n\n all_message_ids.update(message_ids)\n\n if len(message_chunk) == 0:\n break\n\n # Figure out the name of our shard file.\n message_filename = os.path.join(output_dir, \"messages-%06d.json\" % (dump_file_id,))\n message_filename += '.partial'\n logging.info(\"Fetched Messages for %s\" % (message_filename,))\n\n # Clean up our messages.\n table_data = {} # type: TableData\n table_data['zerver_message'] = message_chunk\n floatify_datetime_fields(table_data, 'zerver_message')\n\n # Build up our output for the .partial file, which needs\n # a list of user_profile_ids to search for (as well as\n # the realm id).\n output = {} # type: MessageOutput\n output['zerver_message'] = table_data['zerver_message']\n output['zerver_userprofile_ids'] = list(user_profile_ids)\n output['realm_id'] = realm.id\n\n # And write the data.\n write_message_export(message_filename, output)\n min_id = max(message_ids)\n dump_file_id += 1\n\n return dump_file_id\n\ndef export_uploads_and_avatars(realm: Realm, output_dir: Path) -> None:\n uploads_output_dir = os.path.join(output_dir, 'uploads')\n avatars_output_dir = os.path.join(output_dir, 'avatars')\n emoji_output_dir = os.path.join(output_dir, 'emoji')\n\n for output_dir in (uploads_output_dir, avatars_output_dir, emoji_output_dir):\n if not os.path.exists(output_dir):\n os.makedirs(output_dir)\n\n if settings.LOCAL_UPLOADS_DIR:\n # Small installations and developers will usually just store files locally.\n export_uploads_from_local(realm,\n local_dir=os.path.join(settings.LOCAL_UPLOADS_DIR, \"files\"),\n output_dir=uploads_output_dir)\n export_avatars_from_local(realm,\n local_dir=os.path.join(settings.LOCAL_UPLOADS_DIR, \"avatars\"),\n output_dir=avatars_output_dir)\n export_emoji_from_local(realm,\n local_dir=os.path.join(settings.LOCAL_UPLOADS_DIR, \"avatars\"),\n output_dir=emoji_output_dir)\n else:\n # Some bigger installations will have their data stored on S3.\n export_files_from_s3(realm,\n settings.S3_AVATAR_BUCKET,\n output_dir=avatars_output_dir,\n processing_avatars=True)\n export_files_from_s3(realm,\n settings.S3_AUTH_UPLOADS_BUCKET,\n output_dir=uploads_output_dir)\n export_files_from_s3(realm,\n settings.S3_AVATAR_BUCKET,\n output_dir=emoji_output_dir,\n processing_emoji=True)\n\ndef export_files_from_s3(realm: Realm, bucket_name: str, output_dir: Path,\n processing_avatars: bool=False,\n processing_emoji: bool=False) -> None:\n conn = S3Connection(settings.S3_KEY, settings.S3_SECRET_KEY)\n bucket = conn.get_bucket(bucket_name, validate=True)\n records = []\n\n logging.info(\"Downloading uploaded files from %s\" % (bucket_name))\n\n avatar_hash_values = set()\n user_ids = set()\n if processing_avatars:\n bucket_list = bucket.list()\n for user_profile in UserProfile.objects.filter(realm=realm):\n avatar_path = user_avatar_path_from_ids(user_profile.id, realm.id)\n avatar_hash_values.add(avatar_path)\n avatar_hash_values.add(avatar_path + \".original\")\n user_ids.add(user_profile.id)\n if processing_emoji:\n bucket_list = bucket.list(prefix=\"%s/emoji/images/\" % (realm.id,))\n else:\n bucket_list = bucket.list(prefix=\"%s/\" % (realm.id,))\n\n if settings.EMAIL_GATEWAY_BOT is not None:\n email_gateway_bot = get_system_bot(settings.EMAIL_GATEWAY_BOT) # type: Optional[UserProfile]\n else:\n email_gateway_bot = None\n\n count = 0\n for bkey in bucket_list:\n if processing_avatars and bkey.name not in avatar_hash_values:\n continue\n key = bucket.get_key(bkey.name)\n\n # This can happen if an email address has moved realms\n if 'realm_id' in key.metadata and key.metadata['realm_id'] != str(realm.id):\n if email_gateway_bot is None or key.metadata['user_profile_id'] != str(email_gateway_bot.id):\n raise AssertionError(\"Key metadata problem: %s %s / %s\" % (key.name, key.metadata, realm.id))\n # Email gateway bot sends messages, potentially including attachments, cross-realm.\n print(\"File uploaded by email gateway bot: %s / %s\" % (key.name, key.metadata))\n elif processing_avatars:\n if 'user_profile_id' not in key.metadata:\n raise AssertionError(\"Missing user_profile_id in key metadata: %s\" % (key.metadata,))\n if int(key.metadata['user_profile_id']) not in user_ids:\n raise AssertionError(\"Wrong user_profile_id in key metadata: %s\" % (key.metadata,))\n elif 'realm_id' not in key.metadata:\n raise AssertionError(\"Missing realm_id in key metadata: %s\" % (key.metadata,))\n\n record = dict(s3_path=key.name, bucket=bucket_name,\n size=key.size, last_modified=key.last_modified,\n content_type=key.content_type, md5=key.md5)\n record.update(key.metadata)\n\n if processing_emoji:\n record['file_name'] = os.path.basename(key.name)\n\n # A few early avatars don't have 'realm_id' on the object; fix their metadata\n user_profile = get_user_profile_by_id(record['user_profile_id'])\n if 'realm_id' not in record:\n record['realm_id'] = user_profile.realm_id\n record['user_profile_email'] = user_profile.email\n\n # Fix the record ids\n record['user_profile_id'] = int(record['user_profile_id'])\n record['realm_id'] = int(record['realm_id'])\n\n record['path'] = key.name\n if processing_avatars or processing_emoji:\n filename = os.path.join(output_dir, key.name)\n else:\n fields = key.name.split('/')\n if len(fields) != 3:\n raise AssertionError(\"Suspicious key with invalid format %s\" % (key.name))\n filename = os.path.join(output_dir, key.name)\n\n dirname = os.path.dirname(filename)\n if not os.path.exists(dirname):\n os.makedirs(dirname)\n key.get_contents_to_filename(filename)\n\n records.append(record)\n count += 1\n\n if (count % 100 == 0):\n logging.info(\"Finished %s\" % (count,))\n\n with open(os.path.join(output_dir, \"records.json\"), \"w\") as records_file:\n ujson.dump(records, records_file, indent=4)\n\ndef export_uploads_from_local(realm: Realm, local_dir: Path, output_dir: Path) -> None:\n\n count = 0\n records = []\n for attachment in Attachment.objects.filter(realm_id=realm.id):\n local_path = os.path.join(local_dir, attachment.path_id)\n output_path = os.path.join(output_dir, attachment.path_id)\n os.makedirs(os.path.dirname(output_path), exist_ok=True)\n subprocess.check_call([\"cp\", \"-a\", local_path, output_path])\n stat = os.stat(local_path)\n record = dict(realm_id=attachment.realm_id,\n user_profile_id=attachment.owner.id,\n user_profile_email=attachment.owner.email,\n s3_path=attachment.path_id,\n path=attachment.path_id,\n size=stat.st_size,\n last_modified=stat.st_mtime,\n content_type=None)\n records.append(record)\n\n count += 1\n\n if (count % 100 == 0):\n logging.info(\"Finished %s\" % (count,))\n with open(os.path.join(output_dir, \"records.json\"), \"w\") as records_file:\n ujson.dump(records, records_file, indent=4)\n\ndef export_avatars_from_local(realm: Realm, local_dir: Path, output_dir: Path) -> None:\n\n count = 0\n records = []\n\n users = list(UserProfile.objects.filter(realm=realm))\n users += [\n get_system_bot(settings.NOTIFICATION_BOT),\n get_system_bot(settings.EMAIL_GATEWAY_BOT),\n get_system_bot(settings.WELCOME_BOT),\n ]\n for user in users:\n if user.avatar_source == UserProfile.AVATAR_FROM_GRAVATAR:\n continue\n\n avatar_path = user_avatar_path_from_ids(user.id, realm.id)\n wildcard = os.path.join(local_dir, avatar_path + '.*')\n\n for local_path in glob.glob(wildcard):\n logging.info('Copying avatar file for user %s from %s' % (\n user.email, local_path))\n fn = os.path.relpath(local_path, local_dir)\n output_path = os.path.join(output_dir, fn)\n os.makedirs(str(os.path.dirname(output_path)), exist_ok=True)\n subprocess.check_call([\"cp\", \"-a\", str(local_path), str(output_path)])\n stat = os.stat(local_path)\n record = dict(realm_id=realm.id,\n user_profile_id=user.id,\n user_profile_email=user.email,\n s3_path=fn,\n path=fn,\n size=stat.st_size,\n last_modified=stat.st_mtime,\n content_type=None)\n records.append(record)\n\n count += 1\n\n if (count % 100 == 0):\n logging.info(\"Finished %s\" % (count,))\n\n with open(os.path.join(output_dir, \"records.json\"), \"w\") as records_file:\n ujson.dump(records, records_file, indent=4)\n\ndef export_emoji_from_local(realm: Realm, local_dir: Path, output_dir: Path) -> None:\n\n count = 0\n records = []\n for realm_emoji in RealmEmoji.objects.filter(realm_id=realm.id):\n emoji_path = RealmEmoji.PATH_ID_TEMPLATE.format(\n realm_id=realm.id,\n emoji_file_name=realm_emoji.file_name\n )\n local_path = os.path.join(local_dir, emoji_path)\n output_path = os.path.join(output_dir, emoji_path)\n os.makedirs(os.path.dirname(output_path), exist_ok=True)\n subprocess.check_call([\"cp\", \"-a\", local_path, output_path])\n record = dict(realm_id=realm.id,\n author=realm_emoji.author.id,\n path=emoji_path,\n s3_path=emoji_path,\n file_name=realm_emoji.file_name,\n name=realm_emoji.name,\n deactivated=realm_emoji.deactivated)\n records.append(record)\n\n count += 1\n if (count % 100 == 0):\n logging.info(\"Finished %s\" % (count,))\n with open(os.path.join(output_dir, \"records.json\"), \"w\") as records_file:\n ujson.dump(records, records_file, indent=4)\n\ndef do_write_stats_file_for_realm_export(output_dir: Path) -> None:\n stats_file = os.path.join(output_dir, 'stats.txt')\n realm_file = os.path.join(output_dir, 'realm.json')\n attachment_file = os.path.join(output_dir, 'attachment.json')\n message_files = glob.glob(os.path.join(output_dir, 'messages-*.json'))\n fns = sorted([attachment_file] + message_files + [realm_file])\n\n logging.info('Writing stats file: %s\\n' % (stats_file,))\n with open(stats_file, 'w') as f:\n for fn in fns:\n f.write(os.path.basename(fn) + '\\n')\n payload = open(fn).read()\n data = ujson.loads(payload)\n for k in sorted(data):\n f.write('%5d %s\\n' % (len(data[k]), k))\n f.write('\\n')\n\n avatar_file = os.path.join(output_dir, 'avatars/records.json')\n uploads_file = os.path.join(output_dir, 'uploads/records.json')\n\n for fn in [avatar_file, uploads_file]:\n f.write(fn+'\\n')\n payload = open(fn).read()\n data = ujson.loads(payload)\n f.write('%5d records\\n' % len(data))\n f.write('\\n')\n\ndef do_export_realm(realm: Realm, output_dir: Path, threads: int,\n exportable_user_ids: Optional[Set[int]]=None) -> None:\n response = {} # type: TableData\n\n # We need at least one thread running to export\n # UserMessage rows. The management command should\n # enforce this for us.\n if not settings.TEST_SUITE:\n assert threads >= 1\n\n assert os.path.exists(\"./manage.py\")\n\n realm_config = get_realm_config()\n\n create_soft_link(source=output_dir, in_progress=True)\n\n logging.info(\"Exporting data from get_realm_config()...\")\n export_from_config(\n response=response,\n config=realm_config,\n seed_object=realm,\n context=dict(realm=realm, exportable_user_ids=exportable_user_ids)\n )\n logging.info('...DONE with get_realm_config() data')\n\n sanity_check_output(response)\n\n logging.info(\"Exporting uploaded files and avatars\")\n export_uploads_and_avatars(realm, output_dir)\n\n # We (sort of) export zerver_message rows here. We write\n # them to .partial files that are subsequently fleshed out\n # by parallel processes to add in zerver_usermessage data.\n # This is for performance reasons, of course. Some installations\n # have millions of messages.\n logging.info(\"Exporting .partial files messages\")\n message_ids = export_partial_message_files(realm, response, output_dir=output_dir)\n logging.info('%d messages were exported' % (len(message_ids)))\n\n # zerver_reaction\n zerver_reaction = {} # type: TableData\n fetch_reaction_data(response=zerver_reaction, message_ids=message_ids)\n response.update(zerver_reaction)\n\n # Write realm data\n export_file = os.path.join(output_dir, \"realm.json\")\n write_data_to_file(output_file=export_file, data=response)\n logging.info('Writing realm data to %s' % (export_file,))\n\n # zerver_attachment\n export_attachment_table(realm=realm, output_dir=output_dir, message_ids=message_ids)\n\n # Start parallel jobs to export the UserMessage objects.\n launch_user_message_subprocesses(threads=threads, output_dir=output_dir)\n\n logging.info(\"Finished exporting %s\" % (realm.string_id))\n create_soft_link(source=output_dir, in_progress=False)\n\ndef export_attachment_table(realm: Realm, output_dir: Path, message_ids: Set[int]) -> None:\n response = {} # type: TableData\n fetch_attachment_data(response=response, realm_id=realm.id, message_ids=message_ids)\n output_file = os.path.join(output_dir, \"attachment.json\")\n logging.info('Writing attachment table data to %s' % (output_file,))\n write_data_to_file(output_file=output_file, data=response)\n\ndef create_soft_link(source: Path, in_progress: bool=True) -> None:\n is_done = not in_progress\n in_progress_link = '/tmp/zulip-export-in-progress'\n done_link = '/tmp/zulip-export-most-recent'\n\n if in_progress:\n new_target = in_progress_link\n else:\n subprocess.check_call(['rm', '-f', in_progress_link])\n new_target = done_link\n\n subprocess.check_call([\"ln\", \"-nsf\", source, new_target])\n if is_done:\n logging.info('See %s for output files' % (new_target,))\n\n\ndef launch_user_message_subprocesses(threads: int, output_dir: Path) -> None:\n logging.info('Launching %d PARALLEL subprocesses to export UserMessage rows' % (threads,))\n\n def run_job(shard: str) -> int:\n subprocess.call([\"./manage.py\", 'export_usermessage_batch', '--path',\n str(output_dir), '--thread', shard])\n return 0\n\n for (status, job) in run_parallel(run_job,\n [str(x) for x in range(0, threads)],\n threads=threads):\n print(\"Shard %s finished, status %s\" % (job, status))\n\ndef do_export_user(user_profile: UserProfile, output_dir: Path) -> None:\n response = {} # type: TableData\n\n export_single_user(user_profile, response)\n export_file = os.path.join(output_dir, \"user.json\")\n write_data_to_file(output_file=export_file, data=response)\n logging.info(\"Exporting messages\")\n export_messages_single_user(user_profile, output_dir)\n\ndef export_single_user(user_profile: UserProfile, response: TableData) -> None:\n\n config = get_single_user_config()\n export_from_config(\n response=response,\n config=config,\n seed_object=user_profile,\n )\n\ndef get_single_user_config() -> Config:\n\n # zerver_userprofile\n user_profile_config = Config(\n table='zerver_userprofile',\n is_seeded=True,\n exclude=['password', 'api_key'],\n )\n\n # zerver_subscription\n subscription_config = Config(\n table='zerver_subscription',\n model=Subscription,\n normal_parent=user_profile_config,\n parent_key='user_profile__in',\n )\n\n # zerver_recipient\n recipient_config = Config(\n table='zerver_recipient',\n model=Recipient,\n virtual_parent=subscription_config,\n id_source=('zerver_subscription', 'recipient'),\n )\n\n # zerver_stream\n Config(\n table='zerver_stream',\n model=Stream,\n virtual_parent=recipient_config,\n id_source=('zerver_recipient', 'type_id'),\n source_filter=lambda r: r['type'] == Recipient.STREAM,\n exclude=['email_token'],\n )\n\n return user_profile_config\n\ndef export_messages_single_user(user_profile: UserProfile, output_dir: Path,\n chunk_size: int=MESSAGE_BATCH_CHUNK_SIZE) -> None:\n user_message_query = UserMessage.objects.filter(user_profile=user_profile).order_by(\"id\")\n min_id = -1\n dump_file_id = 1\n while True:\n actual_query = user_message_query.select_related(\n \"message\", \"message__sending_client\").filter(id__gt=min_id)[0:chunk_size]\n user_message_chunk = [um for um in actual_query]\n user_message_ids = set(um.id for um in user_message_chunk)\n\n if len(user_message_chunk) == 0:\n break\n\n message_chunk = []\n for user_message in user_message_chunk:\n item = model_to_dict(user_message.message)\n item['flags'] = user_message.flags_list()\n item['flags_mask'] = user_message.flags.mask\n # Add a few nice, human-readable details\n item['sending_client_name'] = user_message.message.sending_client.name\n item['display_recipient'] = get_display_recipient(user_message.message.recipient)\n message_chunk.append(item)\n\n message_filename = os.path.join(output_dir, \"messages-%06d.json\" % (dump_file_id,))\n logging.info(\"Fetched Messages for %s\" % (message_filename,))\n\n output = {'zerver_message': message_chunk}\n floatify_datetime_fields(output, 'zerver_message')\n message_output = dict(output) # type: MessageOutput\n\n write_message_export(message_filename, message_output)\n min_id = max(user_message_ids)\n dump_file_id += 1\n"},"type_annotations":{"kind":"list like","value":["TableData","Path","Any","Any","TableData","TableName","TableData","Config","TableData","Config","Context","TableData","Config","Context","TableData","Config","Context","TableData","int","Set[int]","TableData","Set[int]","TableData","Config","Context","Realm","Set[int]","Set[int]","Path","Path","Path","Path","MessageOutput","Realm","TableData","List[Record]","Realm","Any","int","Set[int]","Path","Set[int]","Realm","Path","Realm","str","Path","Realm","Path","Path","Realm","Path","Path","Realm","Path","Path","Path","Realm","Path","int","Realm","Path","Set[int]","Path","int","Path","str","UserProfile","Path","UserProfile","TableData","UserProfile","Path"],"string":"[\n \"TableData\",\n \"Path\",\n \"Any\",\n \"Any\",\n \"TableData\",\n \"TableName\",\n \"TableData\",\n \"Config\",\n \"TableData\",\n \"Config\",\n \"Context\",\n \"TableData\",\n \"Config\",\n \"Context\",\n \"TableData\",\n \"Config\",\n \"Context\",\n \"TableData\",\n \"int\",\n \"Set[int]\",\n \"TableData\",\n \"Set[int]\",\n \"TableData\",\n \"Config\",\n \"Context\",\n \"Realm\",\n \"Set[int]\",\n \"Set[int]\",\n \"Path\",\n \"Path\",\n \"Path\",\n \"Path\",\n \"MessageOutput\",\n \"Realm\",\n \"TableData\",\n \"List[Record]\",\n \"Realm\",\n \"Any\",\n \"int\",\n \"Set[int]\",\n \"Path\",\n \"Set[int]\",\n \"Realm\",\n \"Path\",\n \"Realm\",\n \"str\",\n \"Path\",\n \"Realm\",\n \"Path\",\n \"Path\",\n \"Realm\",\n \"Path\",\n \"Path\",\n \"Realm\",\n \"Path\",\n \"Path\",\n \"Path\",\n \"Realm\",\n \"Path\",\n \"int\",\n \"Realm\",\n \"Path\",\n \"Set[int]\",\n \"Path\",\n \"int\",\n \"Path\",\n \"str\",\n \"UserProfile\",\n \"Path\",\n \"UserProfile\",\n \"TableData\",\n \"UserProfile\",\n \"Path\"\n]"},"type_annotation_starts":{"kind":"list like","value":[7721,9559,9571,9689,10447,10465,14617,14636,24451,24470,24487,25720,25739,25756,26740,26759,26776,27440,27461,27479,28570,28594,28775,28794,28811,30655,30698,30749,30800,31626,31645,32461,32475,32661,32711,32990,35595,35617,35636,35694,35716,35776,37269,37288,39043,39063,39080,43055,43073,43091,44215,44233,44251,45924,45942,45960,47124,48226,48245,48260,50459,50478,50497,50870,51395,51412,51546,51991,52016,52370,52393,53567,53592],"string":"[\n 7721,\n 9559,\n 9571,\n 9689,\n 10447,\n 10465,\n 14617,\n 14636,\n 24451,\n 24470,\n 24487,\n 25720,\n 25739,\n 25756,\n 26740,\n 26759,\n 26776,\n 27440,\n 27461,\n 27479,\n 28570,\n 28594,\n 28775,\n 28794,\n 28811,\n 30655,\n 30698,\n 30749,\n 30800,\n 31626,\n 31645,\n 32461,\n 32475,\n 32661,\n 32711,\n 32990,\n 35595,\n 35617,\n 35636,\n 35694,\n 35716,\n 35776,\n 37269,\n 37288,\n 39043,\n 39063,\n 39080,\n 43055,\n 43073,\n 43091,\n 44215,\n 44233,\n 44251,\n 45924,\n 45942,\n 45960,\n 47124,\n 48226,\n 48245,\n 48260,\n 50459,\n 50478,\n 50497,\n 50870,\n 51395,\n 51412,\n 51546,\n 51991,\n 52016,\n 52370,\n 52393,\n 53567,\n 53592\n]"},"type_annotation_ends":{"kind":"list like","value":[7730,9563,9574,9692,10456,10474,14626,14642,24460,24476,24494,25729,25745,25763,26749,26765,26783,27449,27464,27487,28579,28602,28784,28800,28818,30660,30706,30757,30804,31630,31649,32465,32488,32666,32720,33002,35600,35620,35639,35702,35720,35784,37274,37292,39048,39066,39084,43060,43077,43095,44220,44237,44255,45929,45946,45964,47128,48231,48249,48263,50464,50482,50505,50874,51398,51416,51549,52002,52020,52381,52402,53578,53596],"string":"[\n 7730,\n 9563,\n 9574,\n 9692,\n 10456,\n 10474,\n 14626,\n 14642,\n 24460,\n 24476,\n 24494,\n 25729,\n 25745,\n 25763,\n 26749,\n 26765,\n 26783,\n 27449,\n 27464,\n 27487,\n 28579,\n 28602,\n 28784,\n 28800,\n 28818,\n 30660,\n 30706,\n 30757,\n 30804,\n 31630,\n 31649,\n 32465,\n 32488,\n 32666,\n 32720,\n 33002,\n 35600,\n 35620,\n 35639,\n 35702,\n 35720,\n 35784,\n 37274,\n 37292,\n 39048,\n 39066,\n 39084,\n 43060,\n 43077,\n 43095,\n 44220,\n 44237,\n 44255,\n 45929,\n 45946,\n 45964,\n 47128,\n 48231,\n 48249,\n 48263,\n 50464,\n 50482,\n 50505,\n 50874,\n 51398,\n 51416,\n 51549,\n 52002,\n 52020,\n 52381,\n 52402,\n 53578,\n 53596\n]"}}},{"rowIdx":1378,"cells":{"zip":{"kind":"string","value":"archives/18-2-SKKU-OSS_2018-2-OSS-L5.zip"},"filename":{"kind":"string","value":"zerver/lib/feedback.py"},"contents":{"kind":"string","value":"\nfrom django.conf import settings\nfrom django.core.mail import EmailMessage\nfrom typing import Any, Mapping, Optional\n\nfrom zerver.lib.actions import internal_send_message\nfrom zerver.lib.send_email import FromAddress\nfrom zerver.lib.redis_utils import get_redis_client\nfrom zerver.models import get_realm, get_system_bot, \\\n UserProfile, Realm\n\nimport time\n\nclient = get_redis_client()\n\ndef has_enough_time_expired_since_last_message(sender_email: str, min_delay: float) -> bool:\n # This function returns a boolean, but it also has the side effect\n # of noting that a new message was received.\n key = 'zilencer:feedback:%s' % (sender_email,)\n t = int(time.time())\n last_time = client.getset(key, t) # type: Optional[bytes]\n if last_time is None:\n return True\n delay = t - int(last_time)\n return delay > min_delay\n\ndef get_ticket_number() -> int:\n num_file = '/var/tmp/.feedback-bot-ticket-number'\n try:\n ticket_number = int(open(num_file).read()) + 1\n except Exception:\n ticket_number = 1\n open(num_file, 'w').write('%d' % (ticket_number,))\n return ticket_number\n\ndef deliver_feedback_by_zulip(message: Mapping[str, Any]) -> None:\n subject = \"%s\" % (message[\"sender_email\"],)\n\n if len(subject) > 60:\n subject = subject[:57].rstrip() + \"...\"\n\n content = ''\n sender_email = message['sender_email']\n\n # We generate ticket numbers if it's been more than a few minutes\n # since their last message. This avoids some noise when people use\n # enter-send.\n need_ticket = has_enough_time_expired_since_last_message(sender_email, 180)\n\n if need_ticket:\n ticket_number = get_ticket_number()\n content += '\\n~~~'\n content += '\\nticket Z%03d (@support please ack)' % (ticket_number,)\n content += '\\nsender: %s' % (message['sender_full_name'],)\n content += '\\nemail: %s' % (sender_email,)\n if 'sender_realm_str' in message:\n content += '\\nrealm: %s' % (message['sender_realm_str'],)\n content += '\\n~~~'\n content += '\\n\\n'\n\n content += message['content']\n\n user_profile = get_system_bot(settings.FEEDBACK_BOT)\n internal_send_message(user_profile.realm, settings.FEEDBACK_BOT,\n \"stream\", settings.FEEDBACK_STREAM, subject, content)\n\ndef handle_feedback(event: Mapping[str, Any]) -> None:\n if not settings.ENABLE_FEEDBACK:\n return\n if settings.FEEDBACK_EMAIL is not None:\n to_email = settings.FEEDBACK_EMAIL\n subject = \"Zulip feedback from %s\" % (event[\"sender_email\"],)\n content = event[\"content\"]\n from_email = '\"%s\" <%s>' % (event[\"sender_full_name\"], FromAddress.SUPPORT)\n headers = {'Reply-To': '\"%s\" <%s>' % (event[\"sender_full_name\"], event[\"sender_email\"])}\n msg = EmailMessage(subject, content, from_email, [to_email], headers=headers)\n msg.send()\n if settings.FEEDBACK_STREAM is not None:\n deliver_feedback_by_zulip(event)\n"},"type_annotations":{"kind":"list like","value":["str","float","Mapping[str, Any]","Mapping[str, Any]"],"string":"[\n \"str\",\n \"float\",\n \"Mapping[str, Any]\",\n \"Mapping[str, Any]\"\n]"},"type_annotation_starts":{"kind":"list like","value":[452,468,1168,2343],"string":"[\n 452,\n 468,\n 1168,\n 2343\n]"},"type_annotation_ends":{"kind":"list like","value":[455,473,1185,2360],"string":"[\n 455,\n 473,\n 1185,\n 2360\n]"}}},{"rowIdx":1379,"cells":{"zip":{"kind":"string","value":"archives/18-2-SKKU-OSS_2018-2-OSS-L5.zip"},"filename":{"kind":"string","value":"zerver/lib/fix_unreads.py"},"contents":{"kind":"string","value":"\nimport time\nimport logging\n\nfrom typing import Callable, List, TypeVar\nfrom psycopg2.extensions import cursor\nCursorObj = TypeVar('CursorObj', bound=cursor)\n\nfrom django.db import connection\n\nfrom zerver.models import UserProfile\n\n'''\nNOTE! Be careful modifying this library, as it is used\nin a migration, and it needs to be valid for the state\nof the database that is in place when the 0104_fix_unreads\nmigration runs.\n'''\n\nlogger = logging.getLogger('zulip.fix_unreads')\nlogger.setLevel(logging.WARNING)\n\ndef build_topic_mute_checker(cursor: CursorObj, user_profile: UserProfile) -> Callable[[int, str], bool]:\n '''\n This function is similar to the function of the same name\n in zerver/lib/topic_mutes.py, but it works without the ORM,\n so that we can use it in migrations.\n '''\n query = '''\n SELECT\n recipient_id,\n topic_name\n FROM\n zerver_mutedtopic\n WHERE\n user_profile_id = %s\n '''\n cursor.execute(query, [user_profile.id])\n rows = cursor.fetchall()\n\n tups = {\n (recipient_id, topic_name.lower())\n for (recipient_id, topic_name) in rows\n }\n\n def is_muted(recipient_id: int, topic: str) -> bool:\n return (recipient_id, topic.lower()) in tups\n\n return is_muted\n\ndef update_unread_flags(cursor: CursorObj, user_message_ids: List[int]) -> None:\n um_id_list = ', '.join(str(id) for id in user_message_ids)\n query = '''\n UPDATE zerver_usermessage\n SET flags = flags | 1\n WHERE id IN (%s)\n ''' % (um_id_list,)\n\n cursor.execute(query)\n\n\ndef get_timing(message: str, f: Callable[[], None]) -> None:\n start = time.time()\n logger.info(message)\n f()\n elapsed = time.time() - start\n logger.info('elapsed time: %.03f\\n' % (elapsed,))\n\n\ndef fix_unsubscribed(cursor: CursorObj, user_profile: UserProfile) -> None:\n\n recipient_ids = []\n\n def find_recipients() -> None:\n query = '''\n SELECT\n zerver_subscription.recipient_id\n FROM\n zerver_subscription\n INNER JOIN zerver_recipient ON (\n zerver_recipient.id = zerver_subscription.recipient_id\n )\n WHERE (\n zerver_subscription.user_profile_id = '%s' AND\n zerver_recipient.type = 2 AND\n (NOT zerver_subscription.active)\n )\n '''\n cursor.execute(query, [user_profile.id])\n rows = cursor.fetchall()\n for row in rows:\n recipient_ids.append(row[0])\n logger.info(str(recipient_ids))\n\n get_timing(\n 'get recipients',\n find_recipients\n )\n\n if not recipient_ids:\n return\n\n user_message_ids = []\n\n def find() -> None:\n recips = ', '.join(str(id) for id in recipient_ids)\n\n query = '''\n SELECT\n zerver_usermessage.id\n FROM\n zerver_usermessage\n INNER JOIN zerver_message ON (\n zerver_message.id = zerver_usermessage.message_id\n )\n WHERE (\n zerver_usermessage.user_profile_id = %s AND\n (zerver_usermessage.flags & 1) = 0 AND\n zerver_message.recipient_id in (%s)\n )\n ''' % (user_profile.id, recips)\n\n logger.info('''\n EXPLAIN analyze''' + query.rstrip() + ';')\n\n cursor.execute(query)\n rows = cursor.fetchall()\n for row in rows:\n user_message_ids.append(row[0])\n logger.info('rows found: %d' % (len(user_message_ids),))\n\n get_timing(\n 'finding unread messages for non-active streams',\n find\n )\n\n if not user_message_ids:\n return\n\n def fix() -> None:\n update_unread_flags(cursor, user_message_ids)\n\n get_timing(\n 'fixing unread messages for non-active streams',\n fix\n )\n\ndef fix_pre_pointer(cursor: CursorObj, user_profile: UserProfile) -> None:\n\n pointer = user_profile.pointer\n\n if not pointer:\n return\n\n recipient_ids = []\n\n def find_non_muted_recipients() -> None:\n query = '''\n SELECT\n zerver_subscription.recipient_id\n FROM\n zerver_subscription\n INNER JOIN zerver_recipient ON (\n zerver_recipient.id = zerver_subscription.recipient_id\n )\n WHERE (\n zerver_subscription.user_profile_id = '%s' AND\n zerver_recipient.type = 2 AND\n zerver_subscription.in_home_view AND\n zerver_subscription.active\n )\n '''\n cursor.execute(query, [user_profile.id])\n rows = cursor.fetchall()\n for row in rows:\n recipient_ids.append(row[0])\n logger.info(str(recipient_ids))\n\n get_timing(\n 'find_non_muted_recipients',\n find_non_muted_recipients\n )\n\n if not recipient_ids:\n return\n\n user_message_ids = []\n\n def find_old_ids() -> None:\n recips = ', '.join(str(id) for id in recipient_ids)\n\n is_topic_muted = build_topic_mute_checker(cursor, user_profile)\n\n query = '''\n SELECT\n zerver_usermessage.id,\n zerver_message.recipient_id,\n zerver_message.subject\n FROM\n zerver_usermessage\n INNER JOIN zerver_message ON (\n zerver_message.id = zerver_usermessage.message_id\n )\n WHERE (\n zerver_usermessage.user_profile_id = %s AND\n zerver_usermessage.message_id <= %s AND\n (zerver_usermessage.flags & 1) = 0 AND\n zerver_message.recipient_id in (%s)\n )\n ''' % (user_profile.id, pointer, recips)\n\n logger.info('''\n EXPLAIN analyze''' + query.rstrip() + ';')\n\n cursor.execute(query)\n rows = cursor.fetchall()\n for (um_id, recipient_id, topic) in rows:\n if not is_topic_muted(recipient_id, topic):\n user_message_ids.append(um_id)\n logger.info('rows found: %d' % (len(user_message_ids),))\n\n get_timing(\n 'finding pre-pointer messages that are not muted',\n find_old_ids\n )\n\n if not user_message_ids:\n return\n\n def fix() -> None:\n update_unread_flags(cursor, user_message_ids)\n\n get_timing(\n 'fixing unread messages for pre-pointer non-muted messages',\n fix\n )\n\ndef fix(user_profile: UserProfile) -> None:\n logger.info('\\n---\\nFixing %s:' % (user_profile.email,))\n with connection.cursor() as cursor:\n fix_unsubscribed(cursor, user_profile)\n fix_pre_pointer(cursor, user_profile)\n"},"type_annotations":{"kind":"list like","value":["CursorObj","UserProfile","int","str","CursorObj","List[int]","str","Callable[[], None]","CursorObj","UserProfile","CursorObj","UserProfile","UserProfile"],"string":"[\n \"CursorObj\",\n \"UserProfile\",\n \"int\",\n \"str\",\n \"CursorObj\",\n \"List[int]\",\n \"str\",\n \"Callable[[], None]\",\n \"CursorObj\",\n \"UserProfile\",\n \"CursorObj\",\n \"UserProfile\",\n \"UserProfile\"\n]"},"type_annotation_starts":{"kind":"list like","value":[546,571,1192,1204,1325,1354,1619,1627,1832,1857,3939,3964,6526],"string":"[\n 546,\n 571,\n 1192,\n 1204,\n 1325,\n 1354,\n 1619,\n 1627,\n 1832,\n 1857,\n 3939,\n 3964,\n 6526\n]"},"type_annotation_ends":{"kind":"list like","value":[555,582,1195,1207,1334,1363,1622,1645,1841,1868,3948,3975,6537],"string":"[\n 555,\n 582,\n 1195,\n 1207,\n 1334,\n 1363,\n 1622,\n 1645,\n 1841,\n 1868,\n 3948,\n 3975,\n 6537\n]"}}},{"rowIdx":1380,"cells":{"zip":{"kind":"string","value":"archives/18-2-SKKU-OSS_2018-2-OSS-L5.zip"},"filename":{"kind":"string","value":"zerver/lib/generate_test_data.py"},"contents":{"kind":"string","value":"import itertools\nimport ujson\nimport random\nfrom typing import List, Dict, Any, Optional\n\ndef load_config() -> Dict[str, Any]:\n with open(\"zerver/tests/fixtures/config.generate_data.json\", \"r\") as infile:\n config = ujson.load(infile)\n\n return config\n\ndef get_stream_title(gens: Dict[str, Any]) -> str:\n\n return next(gens[\"adjectives\"]) + \" \" + next(gens[\"nouns\"]) + \" \" + \\\n next(gens[\"connectors\"]) + \" \" + next(gens[\"verbs\"]) + \" \" + \\\n next(gens[\"adverbs\"])\n\ndef load_generators(config: Dict[str, Any]) -> Dict[str, Any]:\n\n results = {}\n cfg = config[\"gen_fodder\"]\n\n results[\"nouns\"] = itertools.cycle(cfg[\"nouns\"])\n results[\"adjectives\"] = itertools.cycle(cfg[\"adjectives\"])\n results[\"connectors\"] = itertools.cycle(cfg[\"connectors\"])\n results[\"verbs\"] = itertools.cycle(cfg[\"verbs\"])\n results[\"adverbs\"] = itertools.cycle(cfg[\"adverbs\"])\n results[\"emojis\"] = itertools.cycle(cfg[\"emoji\"])\n results[\"links\"] = itertools.cycle(cfg[\"links\"])\n\n results[\"maths\"] = itertools.cycle(cfg[\"maths\"])\n results[\"inline-code\"] = itertools.cycle(cfg[\"inline-code\"])\n results[\"code-blocks\"] = itertools.cycle(cfg[\"code-blocks\"])\n results[\"quote-blocks\"] = itertools.cycle(cfg[\"quote-blocks\"])\n\n results[\"lists\"] = itertools.cycle(cfg[\"lists\"])\n\n return results\n\ndef parse_file(config: Dict[str, Any], gens: Dict[str, Any], corpus_file: str) -> List[str]:\n\n # First, load the entire file into a dictionary,\n # then apply our custom filters to it as needed.\n\n paragraphs = [] # type: List[str]\n\n with open(corpus_file, \"r\") as infile:\n # OUR DATA: we need to separate the person talking and what they say\n paragraphs = remove_line_breaks(infile)\n paragraphs = add_flair(paragraphs, gens)\n\n return paragraphs\n\ndef get_flair_gen(length: int) -> List[str]:\n\n # Grab the percentages from the config file\n # create a list that we can consume that will guarantee the distribution\n result = []\n\n for k, v in config[\"dist_percentages\"].items():\n result.extend([k] * int(v * length / 100))\n\n result.extend([\"None\"] * (length - len(result)))\n\n random.shuffle(result)\n return result\n\ndef add_flair(paragraphs: List[str], gens: Dict[str, Any]) -> List[str]:\n\n # roll the dice and see what kind of flair we should add, if any\n results = []\n\n flair = get_flair_gen(len(paragraphs))\n\n for i in range(len(paragraphs)):\n key = flair[i]\n if key == \"None\":\n txt = paragraphs[i]\n elif key == \"italic\":\n txt = add_md(\"*\", paragraphs[i])\n elif key == \"bold\":\n txt = add_md(\"**\", paragraphs[i])\n elif key == \"strike-thru\":\n txt = add_md(\"~~\", paragraphs[i])\n elif key == \"quoted\":\n txt = \">\" + paragraphs[i]\n elif key == \"quote-block\":\n txt = paragraphs[i] + \"\\n\" + next(gens[\"quote-blocks\"])\n elif key == \"inline-code\":\n txt = paragraphs[i] + \"\\n\" + next(gens[\"inline-code\"])\n elif key == \"code-block\":\n txt = paragraphs[i] + \"\\n\" + next(gens[\"code-blocks\"])\n elif key == \"math\":\n txt = paragraphs[i] + \"\\n\" + next(gens[\"maths\"])\n elif key == \"list\":\n txt = paragraphs[i] + \"\\n\" + next(gens[\"lists\"])\n elif key == \"emoji\":\n txt = add_emoji(paragraphs[i], next(gens[\"emojis\"]))\n elif key == \"link\":\n txt = add_link(paragraphs[i], next(gens[\"links\"]))\n elif key == \"picture\":\n txt = txt # TODO: implement pictures\n\n results.append(txt)\n\n return results\n\ndef add_md(mode: str, text: str) -> str:\n\n # mode means: bold, italic, etc.\n # to add a list at the end of a paragraph, * iterm one\\n * item two\n\n # find out how long the line is, then insert the mode before the end\n\n vals = text.split()\n start = random.randrange(len(vals))\n end = random.randrange(len(vals) - start) + start\n vals[start] = mode + vals[start]\n vals[end] = vals[end] + mode\n\n return \" \".join(vals).strip()\n\ndef add_emoji(text: str, emoji: str) -> str:\n\n vals = text.split()\n start = random.randrange(len(vals))\n\n vals[start] = vals[start] + \" \" + emoji + \" \"\n return \" \".join(vals)\n\ndef add_link(text: str, link: str) -> str:\n\n vals = text.split()\n start = random.randrange(len(vals))\n\n vals[start] = vals[start] + \" \" + link + \" \"\n\n return \" \".join(vals)\n\ndef remove_line_breaks(fh: Any) -> List[str]:\n\n # We're going to remove line breaks from paragraphs\n results = [] # save the dialogs as tuples with (author, dialog)\n\n para = [] # we'll store the lines here to form a paragraph\n\n for line in fh:\n text = line.strip()\n if text != \"\":\n para.append(text)\n else:\n if para:\n results.append(\" \".join(para))\n # reset the paragraph\n para = []\n if para:\n results.append(\" \".join(para))\n\n return results\n\ndef write_file(paragraphs: List[str], filename: str) -> None:\n\n with open(filename, \"w\") as outfile:\n outfile.write(ujson.dumps(paragraphs))\n\ndef create_test_data() -> None:\n\n gens = load_generators(config) # returns a dictionary of generators\n\n paragraphs = parse_file(config, gens, config[\"corpus\"][\"filename\"])\n\n write_file(paragraphs, \"var/test_messages.json\")\n\nconfig = load_config() # type: Dict[str, Any]\n\nif __name__ == \"__main__\":\n create_test_data() # type: () -> ()\n"},"type_annotations":{"kind":"list like","value":["Dict[str, Any]","Dict[str, Any]","Dict[str, Any]","Dict[str, Any]","str","int","List[str]","Dict[str, Any]","str","str","str","str","str","str","Any","List[str]","str"],"string":"[\n \"Dict[str, Any]\",\n \"Dict[str, Any]\",\n \"Dict[str, Any]\",\n \"Dict[str, Any]\",\n \"str\",\n \"int\",\n \"List[str]\",\n \"Dict[str, Any]\",\n \"str\",\n \"str\",\n \"str\",\n \"str\",\n \"str\",\n \"str\",\n \"Any\",\n \"List[str]\",\n \"str\"\n]"},"type_annotation_starts":{"kind":"list like","value":[291,520,1350,1372,1401,1835,2227,2244,3643,3654,4096,4108,4283,4294,4477,5030,5051],"string":"[\n 291,\n 520,\n 1350,\n 1372,\n 1401,\n 1835,\n 2227,\n 2244,\n 3643,\n 3654,\n 4096,\n 4108,\n 4283,\n 4294,\n 4477,\n 5030,\n 5051\n]"},"type_annotation_ends":{"kind":"list like","value":[305,534,1364,1386,1404,1838,2236,2258,3646,3657,4099,4111,4286,4297,4480,5039,5054],"string":"[\n 305,\n 534,\n 1364,\n 1386,\n 1404,\n 1838,\n 2236,\n 2258,\n 3646,\n 3657,\n 4099,\n 4111,\n 4286,\n 4297,\n 4480,\n 5039,\n 5054\n]"}}},{"rowIdx":1381,"cells":{"zip":{"kind":"string","value":"archives/18-2-SKKU-OSS_2018-2-OSS-L5.zip"},"filename":{"kind":"string","value":"zerver/lib/hotspots.py"},"contents":{"kind":"string","value":"from django.conf import settings\nfrom django.utils.translation import ugettext as _\n\nfrom zerver.models import UserProfile, UserHotspot\n\nfrom typing import List, Dict\n\nALL_HOTSPOTS = {\n 'intro_reply': {\n 'title': _('Reply to a message'),\n 'description': _('Click anywhere on a message to reply.'),\n },\n 'intro_streams': {\n 'title': _('Catch up on a stream'),\n 'description': _('Messages sent to a stream are seen by everyone subscribed '\n 'to that stream. Try clicking on one of the stream links below.'),\n },\n 'intro_topics': {\n 'title': _('Topics'),\n 'description': _('Every message has a topic. Topics keep conversations '\n 'easy to follow, and make it easy to reply to conversations that start '\n 'while you are offline.'),\n },\n 'intro_compose': {\n 'title': _('Compose'),\n 'description': _('Click here to start a new conversation. Pick a topic '\n '(2-3 words is best), and give it a go!'),\n },\n} # type: Dict[str, Dict[str, str]]\n\ndef get_next_hotspots(user: UserProfile) -> List[Dict[str, object]]:\n # For manual testing, it can be convenient to set\n # ALWAYS_SEND_ALL_HOTSPOTS=True in `zproject/dev_settings.py` to\n # make it easy to click on all of the hotspots.\n if settings.ALWAYS_SEND_ALL_HOTSPOTS:\n return [{\n 'name': hotspot,\n 'title': ALL_HOTSPOTS[hotspot]['title'],\n 'description': ALL_HOTSPOTS[hotspot]['description'],\n 'delay': 0,\n } for hotspot in ALL_HOTSPOTS]\n\n if user.tutorial_status == UserProfile.TUTORIAL_FINISHED:\n return []\n\n seen_hotspots = frozenset(UserHotspot.objects.filter(user=user).values_list('hotspot', flat=True))\n for hotspot in ['intro_reply', 'intro_streams', 'intro_topics', 'intro_compose']:\n if hotspot not in seen_hotspots:\n return [{\n 'name': hotspot,\n 'title': ALL_HOTSPOTS[hotspot]['title'],\n 'description': ALL_HOTSPOTS[hotspot]['description'],\n 'delay': 0.5,\n }]\n\n user.tutorial_status = UserProfile.TUTORIAL_FINISHED\n user.save(update_fields=['tutorial_status'])\n return []\n\ndef copy_hotpots(source_profile: UserProfile, target_profile: UserProfile) -> None:\n for userhotspot in frozenset(UserHotspot.objects.filter(user=source_profile)):\n UserHotspot.objects.create(user=target_profile, hotspot=userhotspot.hotspot,\n timestamp=userhotspot.timestamp)\n\n target_profile.tutorial_status = source_profile.tutorial_status\n target_profile.onboarding_steps = source_profile.onboarding_steps\n target_profile.save(update_fields=['tutorial_status', 'onboarding_steps'])\n"},"type_annotations":{"kind":"list like","value":["UserProfile","UserProfile","UserProfile"],"string":"[\n \"UserProfile\",\n \"UserProfile\",\n \"UserProfile\"\n]"},"type_annotation_starts":{"kind":"list like","value":[1140,2319,2348],"string":"[\n 1140,\n 2319,\n 2348\n]"},"type_annotation_ends":{"kind":"list like","value":[1151,2330,2359],"string":"[\n 1151,\n 2330,\n 2359\n]"}}},{"rowIdx":1382,"cells":{"zip":{"kind":"string","value":"archives/18-2-SKKU-OSS_2018-2-OSS-L5.zip"},"filename":{"kind":"string","value":"zerver/lib/html_diff.py"},"contents":{"kind":"string","value":"import lxml\n\nfrom lxml.html.diff import htmldiff\nfrom typing import Optional\n\ndef highlight_with_class(text: str, klass: str) -> str:\n return '%s' % (klass, text)\n\ndef highlight_html_differences(s1: str, s2: str, msg_id: Optional[int]=None) -> str:\n retval = htmldiff(s1, s2)\n fragment = lxml.html.fromstring(retval)\n\n for elem in fragment.cssselect('del'):\n elem.tag = 'span'\n elem.set('class', 'highlight_text_deleted')\n\n for elem in fragment.cssselect('ins'):\n elem.tag = 'span'\n elem.set('class', 'highlight_text_inserted')\n\n retval = lxml.html.tostring(fragment)\n\n return retval\n"},"type_annotations":{"kind":"list like","value":["str","str","str","str"],"string":"[\n \"str\",\n \"str\",\n \"str\",\n \"str\"\n]"},"type_annotation_starts":{"kind":"list like","value":[109,121,226,235],"string":"[\n 109,\n 121,\n 226,\n 235\n]"},"type_annotation_ends":{"kind":"list like","value":[112,124,229,238],"string":"[\n 112,\n 124,\n 229,\n 238\n]"}}},{"rowIdx":1383,"cells":{"zip":{"kind":"string","value":"archives/18-2-SKKU-OSS_2018-2-OSS-L5.zip"},"filename":{"kind":"string","value":"zerver/lib/i18n.py"},"contents":{"kind":"string","value":"# -*- coding: utf-8 -*-\nimport operator\n\nfrom django.conf import settings\nfrom django.utils import translation\nfrom django.utils.translation import ugettext as _\nfrom django.utils.lru_cache import lru_cache\n\nfrom itertools import zip_longest\nfrom typing import Any, List, Dict, Optional\n\nimport os\nimport ujson\n\ndef with_language(string: str, language: str) -> str:\n \"\"\"\n This is an expensive function. If you are using it in a loop, it will\n make your code slow.\n \"\"\"\n old_language = translation.get_language()\n translation.activate(language)\n result = _(string)\n translation.activate(old_language)\n return result\n\n@lru_cache()\ndef get_language_list() -> List[Dict[str, Any]]:\n path = os.path.join(settings.STATIC_ROOT, 'locale', 'language_name_map.json')\n with open(path, 'r') as reader:\n languages = ujson.load(reader)\n return languages['name_map']\n\ndef get_language_list_for_templates(default_language: str) -> List[Dict[str, Dict[str, str]]]:\n language_list = [l for l in get_language_list()\n if 'percent_translated' not in l or\n l['percent_translated'] >= 5.]\n\n formatted_list = []\n lang_len = len(language_list)\n firsts_end = (lang_len // 2) + operator.mod(lang_len, 2)\n firsts = list(range(0, firsts_end))\n seconds = list(range(firsts_end, lang_len))\n assert len(firsts) + len(seconds) == lang_len\n for row in zip_longest(firsts, seconds):\n item = {}\n for position, ind in zip(['first', 'second'], row):\n if ind is None:\n continue\n\n lang = language_list[ind]\n percent = name = lang['name']\n if 'percent_translated' in lang:\n percent = \"{} ({}%)\".format(name, lang['percent_translated'])\n\n selected = False\n if default_language in (lang['code'], lang['locale']):\n selected = True\n\n item[position] = {\n 'name': name,\n 'code': lang['code'],\n 'percent': percent,\n 'selected': selected\n }\n\n formatted_list.append(item)\n\n return formatted_list\n\ndef get_language_name(code: str) -> Optional[str]:\n for lang in get_language_list():\n if code in (lang['code'], lang['locale']):\n return lang['name']\n return None\n\ndef get_available_language_codes() -> List[str]:\n language_list = get_language_list()\n codes = [language['code'] for language in language_list]\n return codes\n\ndef get_language_translation_data(language: str) -> Dict[str, str]:\n if language == 'zh-hans':\n language = 'zh_Hans'\n elif language == 'zh-hant':\n language = 'zh_Hant'\n elif language == 'id-id':\n language = 'id_ID'\n path = os.path.join(settings.STATIC_ROOT, 'locale', language, 'translations.json')\n try:\n with open(path, 'r') as reader:\n return ujson.load(reader)\n except FileNotFoundError:\n print('Translation for {} not found at {}'.format(language, path))\n return {}\n"},"type_annotations":{"kind":"list like","value":["str","str","str","str","str"],"string":"[\n \"str\",\n \"str\",\n \"str\",\n \"str\",\n \"str\"\n]"},"type_annotation_starts":{"kind":"list like","value":[338,353,954,2206,2578],"string":"[\n 338,\n 353,\n 954,\n 2206,\n 2578\n]"},"type_annotation_ends":{"kind":"list like","value":[341,356,957,2209,2581],"string":"[\n 341,\n 356,\n 957,\n 2209,\n 2581\n]"}}},{"rowIdx":1384,"cells":{"zip":{"kind":"string","value":"archives/18-2-SKKU-OSS_2018-2-OSS-L5.zip"},"filename":{"kind":"string","value":"zerver/lib/import_realm.py"},"contents":{"kind":"string","value":"import datetime\nimport logging\nimport os\nimport ujson\nimport shutil\n\nfrom boto.s3.connection import S3Connection\nfrom boto.s3.key import Key\nfrom django.conf import settings\nfrom django.db import connection\nfrom django.db.models import Max\nfrom django.utils.timezone import utc as timezone_utc, now as timezone_now\nfrom typing import Any, Dict, List, Optional, Set, Tuple, \\\n Iterable, cast\n\nfrom zerver.lib.actions import UserMessageLite, bulk_insert_ums\nfrom zerver.lib.avatar_hash import user_avatar_path_from_ids\nfrom zerver.lib.bulk_create import bulk_create_users\nfrom zerver.lib.timestamp import datetime_to_timestamp\nfrom zerver.lib.export import DATE_FIELDS, realm_tables, \\\n Record, TableData, TableName, Field, Path\nfrom zerver.lib.message import do_render_markdown, RealmAlertWords\nfrom zerver.lib.bugdown import version as bugdown_version\nfrom zerver.lib.upload import random_name, sanitize_name, \\\n S3UploadBackend, LocalUploadBackend, guess_type\nfrom zerver.lib.utils import generate_api_key, process_list_in_batches\nfrom zerver.models import UserProfile, Realm, Client, Huddle, Stream, \\\n UserMessage, Subscription, Message, RealmEmoji, \\\n RealmDomain, Recipient, get_user_profile_by_id, \\\n UserPresence, UserActivity, UserActivityInterval, Reaction, \\\n CustomProfileField, CustomProfileFieldValue, RealmAuditLog, \\\n Attachment, get_system_bot, email_to_username, get_huddle_hash, \\\n UserHotspot, MutedTopic, Service, UserGroup, UserGroupMembership, \\\n BotStorageData, BotConfigData\n\n# Code from here is the realm import code path\n\n# ID_MAP is a dictionary that maps table names to dictionaries\n# that map old ids to new ids. We use this in\n# re_map_foreign_keys and other places.\n#\n# We explicity initialize ID_MAP with the tables that support\n# id re-mapping.\n#\n# Code reviewers: give these tables extra scrutiny, as we need to\n# make sure to reload related tables AFTER we re-map the ids.\nID_MAP = {\n 'client': {},\n 'user_profile': {},\n 'huddle': {},\n 'realm': {},\n 'stream': {},\n 'recipient': {},\n 'subscription': {},\n 'defaultstream': {},\n 'reaction': {},\n 'realmemoji': {},\n 'realmdomain': {},\n 'realmfilter': {},\n 'message': {},\n 'user_presence': {},\n 'useractivity': {},\n 'useractivityinterval': {},\n 'usermessage': {},\n 'customprofilefield': {},\n 'customprofilefieldvalue': {},\n 'attachment': {},\n 'realmauditlog': {},\n 'recipient_to_huddle_map': {},\n 'userhotspot': {},\n 'mutedtopic': {},\n 'service': {},\n 'usergroup': {},\n 'usergroupmembership': {},\n 'botstoragedata': {},\n 'botconfigdata': {},\n} # type: Dict[str, Dict[int, int]]\n\nid_map_to_list = {\n 'huddle_to_user_list': {},\n} # type: Dict[str, Dict[int, List[int]]]\n\npath_maps = {\n 'attachment_path': {},\n} # type: Dict[str, Dict[str, str]]\n\ndef update_id_map(table: TableName, old_id: int, new_id: int) -> None:\n if table not in ID_MAP:\n raise Exception('''\n Table %s is not initialized in ID_MAP, which could\n mean that we have not thought through circular\n dependencies.\n ''' % (table,))\n ID_MAP[table][old_id] = new_id\n\ndef fix_datetime_fields(data: TableData, table: TableName) -> None:\n for item in data[table]:\n for field_name in DATE_FIELDS[table]:\n if item[field_name] is not None:\n item[field_name] = datetime.datetime.fromtimestamp(item[field_name], tz=timezone_utc)\n\ndef fix_upload_links(data: TableData, message_table: TableName) -> None:\n \"\"\"\n Because the URLs for uploaded files encode the realm ID of the\n organization being imported (which is only determined at import\n time), we need to rewrite the URLs of links to uploaded files\n during the import process.\n \"\"\"\n for message in data[message_table]:\n if message['has_attachment'] is True:\n for key, value in path_maps['attachment_path'].items():\n if key in message['content']:\n message['content'] = message['content'].replace(key, value)\n if message['rendered_content']:\n message['rendered_content'] = message['rendered_content'].replace(key, value)\n\ndef create_subscription_events(data: TableData, realm_id: int) -> None:\n \"\"\"\n When the export data doesn't contain the table `zerver_realmauditlog`,\n this function creates RealmAuditLog objects for `subscription_created`\n type event for all the existing Stream subscriptions.\n\n This is needed for all the export tools which do not include the\n table `zerver_realmauditlog` (Slack, Gitter, etc.) because the appropriate\n data about when a user was subscribed is not exported by the third-party\n service.\n \"\"\"\n all_subscription_logs = []\n\n # from bulk_add_subscriptions in lib/actions\n event_last_message_id = Message.objects.aggregate(Max('id'))['id__max']\n if event_last_message_id is None:\n event_last_message_id = -1\n event_time = timezone_now()\n\n recipient_id_to_stream_id = {\n d['id']: d['type_id']\n for d in data['zerver_recipient']\n if d['type'] == Recipient.STREAM\n }\n\n for sub in data['zerver_subscription']:\n recipient_id = sub['recipient_id']\n stream_id = recipient_id_to_stream_id.get(recipient_id)\n\n if stream_id is None:\n continue\n\n user_id = sub['user_profile_id']\n\n all_subscription_logs.append(RealmAuditLog(realm_id=realm_id,\n acting_user_id=user_id,\n modified_user_id=user_id,\n modified_stream_id=stream_id,\n event_last_message_id=event_last_message_id,\n event_time=event_time,\n event_type=RealmAuditLog.SUBSCRIPTION_CREATED))\n RealmAuditLog.objects.bulk_create(all_subscription_logs)\n\ndef fix_service_tokens(data: TableData, table: TableName) -> None:\n \"\"\"\n The tokens in the services are created by 'generate_api_key'.\n As the tokens are unique, they should be re-created for the imports.\n \"\"\"\n for item in data[table]:\n item['token'] = generate_api_key()\n\ndef process_huddle_hash(data: TableData, table: TableName) -> None:\n \"\"\"\n Build new huddle hashes with the updated ids of the users\n \"\"\"\n for huddle in data[table]:\n user_id_list = id_map_to_list['huddle_to_user_list'][huddle['id']]\n huddle['huddle_hash'] = get_huddle_hash(user_id_list)\n\ndef get_huddles_from_subscription(data: TableData, table: TableName) -> None:\n \"\"\"\n Extract the IDs of the user_profiles involved in a huddle from the subscription object\n This helps to generate a unique huddle hash from the updated user_profile ids\n \"\"\"\n id_map_to_list['huddle_to_user_list'] = {\n value: [] for value in ID_MAP['recipient_to_huddle_map'].values()}\n\n for subscription in data[table]:\n if subscription['recipient'] in ID_MAP['recipient_to_huddle_map']:\n huddle_id = ID_MAP['recipient_to_huddle_map'][subscription['recipient']]\n id_map_to_list['huddle_to_user_list'][huddle_id].append(subscription['user_profile_id'])\n\ndef fix_customprofilefield(data: TableData) -> None:\n \"\"\"\n In CustomProfileField with 'field_type' like 'USER', the IDs need to be\n re-mapped.\n \"\"\"\n field_type_USER_id_list = []\n for item in data['zerver_customprofilefield']:\n if item['field_type'] == CustomProfileField.USER:\n field_type_USER_id_list.append(item['id'])\n\n for item in data['zerver_customprofilefieldvalue']:\n if item['field_id'] in field_type_USER_id_list:\n old_user_id_list = ujson.loads(item['value'])\n\n new_id_list = re_map_foreign_keys_many_to_many_internal(\n table='zerver_customprofilefieldvalue',\n field_name='value',\n related_table='user_profile',\n old_id_list=old_user_id_list)\n item['value'] = ujson.dumps(new_id_list)\n\nclass FakeMessage:\n '''\n We just need a stub object for do_render_markdown\n to write stuff to.\n '''\n pass\n\ndef fix_message_rendered_content(realm: Realm,\n sender_map: Dict[int, Record],\n messages: List[Record]) -> None:\n \"\"\"\n This function sets the rendered_content of all the messages\n after the messages have been imported from a non-Zulip platform.\n \"\"\"\n for message in messages:\n if message['rendered_content'] is not None:\n # For Zulip->Zulip imports, we use the original rendered markdown.\n continue\n\n message_object = FakeMessage()\n\n try:\n content = message['content']\n\n sender_id = message['sender_id']\n sender = sender_map[sender_id]\n sent_by_bot = sender['is_bot']\n translate_emoticons = sender['translate_emoticons']\n\n # We don't handle alert words on import from third-party\n # platforms, since they generally don't have an \"alert\n # words\" type feature, and notifications aren't important anyway.\n realm_alert_words = dict() # type: RealmAlertWords\n message_user_ids = set() # type: Set[int]\n\n rendered_content = do_render_markdown(\n message=cast(Message, message_object),\n content=content,\n realm=realm,\n realm_alert_words=realm_alert_words,\n message_user_ids=message_user_ids,\n sent_by_bot=sent_by_bot,\n translate_emoticons=translate_emoticons,\n )\n assert(rendered_content is not None)\n\n message['rendered_content'] = rendered_content\n message['rendered_content_version'] = bugdown_version\n except Exception:\n # This generally happens with two possible causes:\n # * rendering markdown throwing an uncaught exception\n # * rendering markdown failing with the exception being\n # caught in bugdown (which then returns None, causing the the\n # rendered_content assert above to fire).\n logging.warning(\"Error in markdown rendering for message ID %s; continuing\" % (message['id']))\n\ndef current_table_ids(data: TableData, table: TableName) -> List[int]:\n \"\"\"\n Returns the ids present in the current table\n \"\"\"\n id_list = []\n for item in data[table]:\n id_list.append(item[\"id\"])\n return id_list\n\ndef idseq(model_class: Any) -> str:\n if model_class == RealmDomain:\n return 'zerver_realmalias_id_seq'\n elif model_class == BotStorageData:\n return 'zerver_botuserstatedata_id_seq'\n elif model_class == BotConfigData:\n return 'zerver_botuserconfigdata_id_seq'\n return '{}_id_seq'.format(model_class._meta.db_table)\n\ndef allocate_ids(model_class: Any, count: int) -> List[int]:\n \"\"\"\n Increases the sequence number for a given table by the amount of objects being\n imported into that table. Hence, this gives a reserved range of ids to import the\n converted slack objects into the tables.\n \"\"\"\n conn = connection.cursor()\n sequence = idseq(model_class)\n conn.execute(\"select nextval('%s') from generate_series(1,%s)\" %\n (sequence, str(count)))\n query = conn.fetchall() # Each element in the result is a tuple like (5,)\n conn.close()\n # convert List[Tuple[int]] to List[int]\n return [item[0] for item in query]\n\ndef convert_to_id_fields(data: TableData, table: TableName, field_name: Field) -> None:\n '''\n When Django gives us dict objects via model_to_dict, the foreign\n key fields are `foo`, but we want `foo_id` for the bulk insert.\n This function handles the simple case where we simply rename\n the fields. For cases where we need to munge ids in the\n database, see re_map_foreign_keys.\n '''\n for item in data[table]:\n item[field_name + \"_id\"] = item[field_name]\n del item[field_name]\n\ndef re_map_foreign_keys(data: TableData,\n table: TableName,\n field_name: Field,\n related_table: TableName,\n verbose: bool=False,\n id_field: bool=False,\n recipient_field: bool=False,\n reaction_field: bool=False) -> None:\n \"\"\"\n This is a wrapper function for all the realm data tables\n and only avatar and attachment records need to be passed through the internal function\n because of the difference in data format (TableData corresponding to realm data tables\n and List[Record] corresponding to the avatar and attachment records)\n \"\"\"\n\n # See comments in bulk_import_user_message_data.\n assert('usermessage' not in related_table)\n\n re_map_foreign_keys_internal(data[table], table, field_name, related_table, verbose, id_field,\n recipient_field, reaction_field)\n\ndef re_map_foreign_keys_internal(data_table: List[Record],\n table: TableName,\n field_name: Field,\n related_table: TableName,\n verbose: bool=False,\n id_field: bool=False,\n recipient_field: bool=False,\n reaction_field: bool=False) -> None:\n '''\n We occasionally need to assign new ids to rows during the\n import/export process, to accommodate things like existing rows\n already being in tables. See bulk_import_client for more context.\n\n The tricky part is making sure that foreign key references\n are in sync with the new ids, and this fixer function does\n the re-mapping. (It also appends `_id` to the field.)\n '''\n lookup_table = ID_MAP[related_table]\n for item in data_table:\n old_id = item[field_name]\n if recipient_field:\n if related_table == \"stream\" and item['type'] == 2:\n pass\n elif related_table == \"user_profile\" and item['type'] == 1:\n pass\n elif related_table == \"huddle\" and item['type'] == 3:\n # save the recipient id with the huddle id, so that we can extract\n # the user_profile ids involved in a huddle with the help of the\n # subscription object\n # check function 'get_huddles_from_subscription'\n ID_MAP['recipient_to_huddle_map'][item['id']] = lookup_table[old_id]\n pass\n else:\n continue\n old_id = item[field_name]\n if reaction_field:\n if item['reaction_type'] == Reaction.REALM_EMOJI:\n old_id = int(old_id)\n else:\n continue\n if old_id in lookup_table:\n new_id = lookup_table[old_id]\n if verbose:\n logging.info('Remapping %s %s from %s to %s' % (table,\n field_name + '_id',\n old_id,\n new_id))\n else:\n new_id = old_id\n if not id_field:\n item[field_name + \"_id\"] = new_id\n del item[field_name]\n else:\n if reaction_field:\n item[field_name] = str(new_id)\n else:\n item[field_name] = new_id\n\ndef re_map_foreign_keys_many_to_many(data: TableData,\n table: TableName,\n field_name: Field,\n related_table: TableName,\n verbose: bool=False) -> None:\n \"\"\"\n We need to assign new ids to rows during the import/export\n process.\n\n The tricky part is making sure that foreign key references\n are in sync with the new ids, and this wrapper function does\n the re-mapping only for ManyToMany fields.\n \"\"\"\n for item in data[table]:\n old_id_list = item[field_name]\n new_id_list = re_map_foreign_keys_many_to_many_internal(\n table, field_name, related_table, old_id_list, verbose)\n item[field_name] = new_id_list\n del item[field_name]\n\ndef re_map_foreign_keys_many_to_many_internal(table: TableName,\n field_name: Field,\n related_table: TableName,\n old_id_list: List[int],\n verbose: bool=False) -> List[int]:\n \"\"\"\n This is an internal function for tables with ManyToMany fields,\n which takes the old ID list of the ManyToMany relation and returns the\n new updated ID list.\n \"\"\"\n lookup_table = ID_MAP[related_table]\n new_id_list = []\n for old_id in old_id_list:\n if old_id in lookup_table:\n new_id = lookup_table[old_id]\n if verbose:\n logging.info('Remapping %s %s from %s to %s' % (table,\n field_name + '_id',\n old_id,\n new_id))\n else:\n new_id = old_id\n new_id_list.append(new_id)\n return new_id_list\n\ndef fix_bitfield_keys(data: TableData, table: TableName, field_name: Field) -> None:\n for item in data[table]:\n item[field_name] = item[field_name + '_mask']\n del item[field_name + '_mask']\n\ndef fix_realm_authentication_bitfield(data: TableData, table: TableName, field_name: Field) -> None:\n \"\"\"Used to fixup the authentication_methods bitfield to be a string\"\"\"\n for item in data[table]:\n values_as_bitstring = ''.join(['1' if field[1] else '0' for field in\n item[field_name]])\n values_as_int = int(values_as_bitstring, 2)\n item[field_name] = values_as_int\n\ndef get_db_table(model_class: Any) -> str:\n \"\"\"E.g. (RealmDomain -> 'zerver_realmdomain')\"\"\"\n return model_class._meta.db_table\n\ndef update_model_ids(model: Any, data: TableData, related_table: TableName) -> None:\n table = get_db_table(model)\n\n # Important: remapping usermessage rows is\n # not only unnessary, it's expensive and can cause\n # memory errors. We don't even use ids from ID_MAP.\n assert('usermessage' not in table)\n\n old_id_list = current_table_ids(data, table)\n allocated_id_list = allocate_ids(model, len(data[table]))\n for item in range(len(data[table])):\n update_id_map(related_table, old_id_list[item], allocated_id_list[item])\n re_map_foreign_keys(data, table, 'id', related_table=related_table, id_field=True)\n\ndef bulk_import_user_message_data(data: TableData, dump_file_id: int) -> None:\n model = UserMessage\n table = 'zerver_usermessage'\n lst = data[table]\n\n # IMPORTANT NOTE: We do not use any primary id\n # data from either the import itself or ID_MAP.\n # We let the DB itself generate ids. Note that\n # no tables use user_message.id as a foreign key,\n # so we can safely avoid all re-mapping complexity.\n\n def process_batch(items: List[Dict[str, Any]]) -> None:\n ums = [\n UserMessageLite(\n user_profile_id = item['user_profile_id'],\n message_id = item['message_id'],\n flags=item['flags'],\n )\n for item in items\n ]\n bulk_insert_ums(ums)\n\n chunk_size = 10000\n\n process_list_in_batches(\n lst=lst,\n chunk_size=chunk_size,\n process_batch=process_batch,\n )\n\n logging.info(\"Successfully imported %s from %s[%s].\" % (model, table, dump_file_id))\n\ndef bulk_import_model(data: TableData, model: Any, dump_file_id: Optional[str]=None) -> None:\n table = get_db_table(model)\n # TODO, deprecate dump_file_id\n model.objects.bulk_create(model(**item) for item in data[table])\n if dump_file_id is None:\n logging.info(\"Successfully imported %s from %s.\" % (model, table))\n else:\n logging.info(\"Successfully imported %s from %s[%s].\" % (model, table, dump_file_id))\n\n# Client is a table shared by multiple realms, so in order to\n# correctly import multiple realms into the same server, we need to\n# check if a Client object already exists, and so we need to support\n# remap all Client IDs to the values in the new DB.\ndef bulk_import_client(data: TableData, model: Any, table: TableName) -> None:\n for item in data[table]:\n try:\n client = Client.objects.get(name=item['name'])\n except Client.DoesNotExist:\n client = Client.objects.create(name=item['name'])\n update_id_map(table='client', old_id=item['id'], new_id=client.id)\n\ndef import_uploads_local(import_dir: Path, processing_avatars: bool=False,\n processing_emojis: bool=False) -> None:\n records_filename = os.path.join(import_dir, \"records.json\")\n with open(records_filename) as records_file:\n records = ujson.loads(records_file.read())\n\n re_map_foreign_keys_internal(records, 'records', 'realm_id', related_table=\"realm\",\n id_field=True)\n if not processing_emojis:\n re_map_foreign_keys_internal(records, 'records', 'user_profile_id',\n related_table=\"user_profile\", id_field=True)\n for record in records:\n if processing_avatars:\n # For avatars, we need to rehash the user ID with the\n # new server's avatar salt\n avatar_path = user_avatar_path_from_ids(record['user_profile_id'], record['realm_id'])\n file_path = os.path.join(settings.LOCAL_UPLOADS_DIR, \"avatars\", avatar_path)\n if record['s3_path'].endswith('.original'):\n file_path += '.original'\n else:\n file_path += '.png'\n elif processing_emojis:\n # For emojis we follow the function 'upload_emoji_image'\n emoji_path = RealmEmoji.PATH_ID_TEMPLATE.format(\n realm_id=record['realm_id'],\n emoji_file_name=record['file_name'])\n file_path = os.path.join(settings.LOCAL_UPLOADS_DIR, \"avatars\", emoji_path)\n else:\n # Should be kept in sync with its equivalent in zerver/lib/uploads in the\n # function 'upload_message_image'\n s3_file_name = \"/\".join([\n str(record['realm_id']),\n random_name(18),\n sanitize_name(os.path.basename(record['path']))\n ])\n file_path = os.path.join(settings.LOCAL_UPLOADS_DIR, \"files\", s3_file_name)\n path_maps['attachment_path'][record['path']] = s3_file_name\n\n orig_file_path = os.path.join(import_dir, record['path'])\n os.makedirs(os.path.dirname(file_path), exist_ok=True)\n shutil.copy(orig_file_path, file_path)\n\n if processing_avatars:\n # Ensure that we have medium-size avatar images for every\n # avatar. TODO: This implementation is hacky, both in that it\n # does get_user_profile_by_id for each user, and in that it\n # might be better to require the export to just have these.\n upload_backend = LocalUploadBackend()\n for record in records:\n if record['s3_path'].endswith('.original'):\n user_profile = get_user_profile_by_id(record['user_profile_id'])\n avatar_path = user_avatar_path_from_ids(user_profile.id, record['realm_id'])\n medium_file_path = os.path.join(settings.LOCAL_UPLOADS_DIR, \"avatars\",\n avatar_path) + '-medium.png'\n if os.path.exists(medium_file_path):\n # We remove the image here primarily to deal with\n # issues when running the import script multiple\n # times in development (where one might reuse the\n # same realm ID from a previous iteration).\n os.remove(medium_file_path)\n upload_backend.ensure_medium_avatar_image(user_profile=user_profile)\n\ndef import_uploads_s3(bucket_name: str, import_dir: Path, processing_avatars: bool=False,\n processing_emojis: bool=False) -> None:\n upload_backend = S3UploadBackend()\n conn = S3Connection(settings.S3_KEY, settings.S3_SECRET_KEY)\n bucket = conn.get_bucket(bucket_name, validate=True)\n\n records_filename = os.path.join(import_dir, \"records.json\")\n with open(records_filename) as records_file:\n records = ujson.loads(records_file.read())\n\n re_map_foreign_keys_internal(records, 'records', 'realm_id', related_table=\"realm\",\n id_field=True)\n timestamp = datetime_to_timestamp(timezone_now())\n if not processing_emojis:\n re_map_foreign_keys_internal(records, 'records', 'user_profile_id',\n related_table=\"user_profile\", id_field=True)\n for record in records:\n key = Key(bucket)\n\n if processing_avatars:\n # For avatars, we need to rehash the user's email with the\n # new server's avatar salt\n avatar_path = user_avatar_path_from_ids(record['user_profile_id'], record['realm_id'])\n key.key = avatar_path\n if record['s3_path'].endswith('.original'):\n key.key += '.original'\n elif processing_emojis:\n # For emojis we follow the function 'upload_emoji_image'\n emoji_path = RealmEmoji.PATH_ID_TEMPLATE.format(\n realm_id=record['realm_id'],\n emoji_file_name=record['file_name'])\n key.key = emoji_path\n record['last_modified'] = timestamp\n else:\n # Should be kept in sync with its equivalent in zerver/lib/uploads in the\n # function 'upload_message_image'\n s3_file_name = \"/\".join([\n str(record['realm_id']),\n random_name(18),\n sanitize_name(os.path.basename(record['path']))\n ])\n key.key = s3_file_name\n path_maps['attachment_path'][record['s3_path']] = s3_file_name\n\n # Exported custom emoji from tools like Slack don't have\n # the data for what user uploaded them in `user_profile_id`.\n if not processing_emojis:\n user_profile_id = int(record['user_profile_id'])\n # Support email gateway bot and other cross-realm messages\n if user_profile_id in ID_MAP[\"user_profile\"]:\n logging.info(\"Uploaded by ID mapped user: %s!\" % (user_profile_id,))\n user_profile_id = ID_MAP[\"user_profile\"][user_profile_id]\n user_profile = get_user_profile_by_id(user_profile_id)\n key.set_metadata(\"user_profile_id\", str(user_profile.id))\n\n if 'last_modified' in record:\n key.set_metadata(\"orig_last_modified\", record['last_modified'])\n key.set_metadata(\"realm_id\", str(record['realm_id']))\n\n # Zulip exports will always have a content-type, but third-party exports might not.\n content_type = record.get(\"content_type\")\n if content_type is None:\n content_type = guess_type(record['s3_path'])[0]\n headers = {'Content-Type': content_type}\n\n key.set_contents_from_filename(os.path.join(import_dir, record['path']), headers=headers)\n\n if processing_avatars:\n # Ensure that we have medium-size avatar images for every\n # avatar. TODO: This implementation is hacky, both in that it\n # does get_user_profile_by_id for each user, and in that it\n # might be better to require the export to just have these.\n upload_backend = S3UploadBackend()\n for record in records:\n if record['s3_path'].endswith('.original'):\n user_profile = get_user_profile_by_id(record['user_profile_id'])\n upload_backend.ensure_medium_avatar_image(user_profile=user_profile)\n\ndef import_uploads(import_dir: Path, processing_avatars: bool=False,\n processing_emojis: bool=False) -> None:\n if processing_avatars and processing_emojis:\n raise AssertionError(\"Cannot import avatars and emojis at the same time!\")\n if processing_avatars:\n logging.info(\"Importing avatars\")\n elif processing_emojis:\n logging.info(\"Importing emojis\")\n else:\n logging.info(\"Importing uploaded files\")\n if settings.LOCAL_UPLOADS_DIR:\n import_uploads_local(import_dir, processing_avatars=processing_avatars,\n processing_emojis=processing_emojis)\n else:\n if processing_avatars or processing_emojis:\n bucket_name = settings.S3_AVATAR_BUCKET\n else:\n bucket_name = settings.S3_AUTH_UPLOADS_BUCKET\n import_uploads_s3(bucket_name, import_dir, processing_avatars=processing_avatars,\n processing_emojis=processing_emojis)\n\n# Importing data suffers from a difficult ordering problem because of\n# models that reference each other circularly. Here is a correct order.\n#\n# * Client [no deps]\n# * Realm [-notifications_stream]\n# * Stream [only depends on realm]\n# * Realm's notifications_stream\n# * Now can do all realm_tables\n# * UserProfile, in order by ID to avoid bot loop issues\n# * Huddle\n# * Recipient\n# * Subscription\n# * Message\n# * UserMessage\n#\n# Because the Python object => JSON conversion process is not fully\n# faithful, we have to use a set of fixers (e.g. on DateTime objects\n# and Foreign Keys) to do the import correctly.\ndef do_import_realm(import_dir: Path, subdomain: str) -> Realm:\n logging.info(\"Importing realm dump %s\" % (import_dir,))\n if not os.path.exists(import_dir):\n raise Exception(\"Missing import directory!\")\n\n realm_data_filename = os.path.join(import_dir, \"realm.json\")\n if not os.path.exists(realm_data_filename):\n raise Exception(\"Missing realm.json file!\")\n\n logging.info(\"Importing realm data from %s\" % (realm_data_filename,))\n with open(realm_data_filename) as f:\n data = ujson.load(f)\n\n sort_by_date = data.get('sort_by_date', False)\n\n bulk_import_client(data, Client, 'zerver_client')\n\n # We don't import the Stream model yet, since it depends on Realm,\n # which isn't imported yet. But we need the Stream model IDs for\n # notifications_stream.\n update_model_ids(Stream, data, 'stream')\n re_map_foreign_keys(data, 'zerver_realm', 'notifications_stream', related_table=\"stream\")\n re_map_foreign_keys(data, 'zerver_realm', 'signup_notifications_stream', related_table=\"stream\")\n\n fix_datetime_fields(data, 'zerver_realm')\n # Fix realm subdomain information\n data['zerver_realm'][0]['string_id'] = subdomain\n data['zerver_realm'][0]['name'] = subdomain\n fix_realm_authentication_bitfield(data, 'zerver_realm', 'authentication_methods')\n update_model_ids(Realm, data, 'realm')\n\n realm = Realm(**data['zerver_realm'][0])\n if settings.BILLING_ENABLED:\n realm.plan_type = Realm.LIMITED\n else:\n realm.plan_type = Realm.SELF_HOSTED\n\n if realm.notifications_stream_id is not None:\n notifications_stream_id = int(realm.notifications_stream_id) # type: Optional[int]\n else:\n notifications_stream_id = None\n realm.notifications_stream_id = None\n if realm.signup_notifications_stream_id is not None:\n signup_notifications_stream_id = int(realm.signup_notifications_stream_id) # type: Optional[int]\n else:\n signup_notifications_stream_id = None\n realm.signup_notifications_stream_id = None\n realm.save()\n\n # Email tokens will automatically be randomly generated when the\n # Stream objects are created by Django.\n fix_datetime_fields(data, 'zerver_stream')\n re_map_foreign_keys(data, 'zerver_stream', 'realm', related_table=\"realm\")\n bulk_import_model(data, Stream)\n\n realm.notifications_stream_id = notifications_stream_id\n realm.signup_notifications_stream_id = signup_notifications_stream_id\n realm.save()\n\n # Remap the user IDs for notification_bot and friends to their\n # appropriate IDs on this server\n for item in data['zerver_userprofile_crossrealm']:\n logging.info(\"Adding to ID map: %s %s\" % (item['id'], get_system_bot(item['email']).id))\n new_user_id = get_system_bot(item['email']).id\n update_id_map(table='user_profile', old_id=item['id'], new_id=new_user_id)\n new_recipient_id = Recipient.objects.get(type=Recipient.PERSONAL, type_id=new_user_id).id\n update_id_map(table='recipient', old_id=item['recipient_id'], new_id=new_recipient_id)\n\n # Merge in zerver_userprofile_mirrordummy\n data['zerver_userprofile'] = data['zerver_userprofile'] + data['zerver_userprofile_mirrordummy']\n del data['zerver_userprofile_mirrordummy']\n data['zerver_userprofile'].sort(key=lambda r: r['id'])\n\n # To remap foreign key for UserProfile.last_active_message_id\n update_message_foreign_keys(import_dir=import_dir, sort_by_date=sort_by_date)\n\n fix_datetime_fields(data, 'zerver_userprofile')\n update_model_ids(UserProfile, data, 'user_profile')\n re_map_foreign_keys(data, 'zerver_userprofile', 'realm', related_table=\"realm\")\n re_map_foreign_keys(data, 'zerver_userprofile', 'bot_owner', related_table=\"user_profile\")\n re_map_foreign_keys(data, 'zerver_userprofile', 'default_sending_stream',\n related_table=\"stream\")\n re_map_foreign_keys(data, 'zerver_userprofile', 'default_events_register_stream',\n related_table=\"stream\")\n re_map_foreign_keys(data, 'zerver_userprofile', 'last_active_message_id',\n related_table=\"message\", id_field=True)\n for user_profile_dict in data['zerver_userprofile']:\n user_profile_dict['password'] = None\n user_profile_dict['api_key'] = generate_api_key()\n # Since Zulip doesn't use these permissions, drop them\n del user_profile_dict['user_permissions']\n del user_profile_dict['groups']\n\n user_profiles = [UserProfile(**item) for item in data['zerver_userprofile']]\n for user_profile in user_profiles:\n user_profile.set_unusable_password()\n UserProfile.objects.bulk_create(user_profiles)\n\n re_map_foreign_keys(data, 'zerver_defaultstream', 'stream', related_table=\"stream\")\n re_map_foreign_keys(data, 'zerver_realmemoji', 'author', related_table=\"user_profile\")\n for (table, model, related_table) in realm_tables:\n re_map_foreign_keys(data, table, 'realm', related_table=\"realm\")\n update_model_ids(model, data, related_table)\n bulk_import_model(data, model)\n\n if 'zerver_huddle' in data:\n update_model_ids(Huddle, data, 'huddle')\n # We don't import Huddle yet, since we don't have the data to\n # compute huddle hashes until we've imported some of the\n # tables below.\n # TODO: double-check this.\n\n re_map_foreign_keys(data, 'zerver_recipient', 'type_id', related_table=\"stream\",\n recipient_field=True, id_field=True)\n re_map_foreign_keys(data, 'zerver_recipient', 'type_id', related_table=\"user_profile\",\n recipient_field=True, id_field=True)\n re_map_foreign_keys(data, 'zerver_recipient', 'type_id', related_table=\"huddle\",\n recipient_field=True, id_field=True)\n update_model_ids(Recipient, data, 'recipient')\n bulk_import_model(data, Recipient)\n\n re_map_foreign_keys(data, 'zerver_subscription', 'user_profile', related_table=\"user_profile\")\n get_huddles_from_subscription(data, 'zerver_subscription')\n re_map_foreign_keys(data, 'zerver_subscription', 'recipient', related_table=\"recipient\")\n update_model_ids(Subscription, data, 'subscription')\n bulk_import_model(data, Subscription)\n\n if 'zerver_realmauditlog' in data:\n fix_datetime_fields(data, 'zerver_realmauditlog')\n re_map_foreign_keys(data, 'zerver_realmauditlog', 'realm', related_table=\"realm\")\n re_map_foreign_keys(data, 'zerver_realmauditlog', 'modified_user',\n related_table='user_profile')\n re_map_foreign_keys(data, 'zerver_realmauditlog', 'acting_user',\n related_table='user_profile')\n re_map_foreign_keys(data, 'zerver_realmauditlog', 'modified_stream',\n related_table=\"stream\")\n update_model_ids(RealmAuditLog, data, related_table=\"realmauditlog\")\n bulk_import_model(data, RealmAuditLog)\n else:\n logging.info('about to call create_subscription_events')\n create_subscription_events(\n data=data,\n realm_id=realm.id,\n )\n logging.info('done with create_subscription_events')\n\n if 'zerver_huddle' in data:\n process_huddle_hash(data, 'zerver_huddle')\n bulk_import_model(data, Huddle)\n\n if 'zerver_userhotspot' in data:\n fix_datetime_fields(data, 'zerver_userhotspot')\n re_map_foreign_keys(data, 'zerver_userhotspot', 'user', related_table='user_profile')\n update_model_ids(UserHotspot, data, 'userhotspot')\n bulk_import_model(data, UserHotspot)\n\n if 'zerver_mutedtopic' in data:\n re_map_foreign_keys(data, 'zerver_mutedtopic', 'user_profile', related_table='user_profile')\n re_map_foreign_keys(data, 'zerver_mutedtopic', 'stream', related_table='stream')\n re_map_foreign_keys(data, 'zerver_mutedtopic', 'recipient', related_table='recipient')\n update_model_ids(MutedTopic, data, 'mutedtopic')\n bulk_import_model(data, MutedTopic)\n\n if 'zerver_service' in data:\n re_map_foreign_keys(data, 'zerver_service', 'user_profile', related_table='user_profile')\n fix_service_tokens(data, 'zerver_service')\n update_model_ids(Service, data, 'service')\n bulk_import_model(data, Service)\n\n if 'zerver_usergroup' in data:\n re_map_foreign_keys(data, 'zerver_usergroup', 'realm', related_table='realm')\n re_map_foreign_keys_many_to_many(data, 'zerver_usergroup',\n 'members', related_table='user_profile')\n update_model_ids(UserGroup, data, 'usergroup')\n bulk_import_model(data, UserGroup)\n\n re_map_foreign_keys(data, 'zerver_usergroupmembership',\n 'user_group', related_table='usergroup')\n re_map_foreign_keys(data, 'zerver_usergroupmembership',\n 'user_profile', related_table='user_profile')\n update_model_ids(UserGroupMembership, data, 'usergroupmembership')\n bulk_import_model(data, UserGroupMembership)\n\n if 'zerver_botstoragedata' in data:\n re_map_foreign_keys(data, 'zerver_botstoragedata', 'bot_profile', related_table='user_profile')\n update_model_ids(BotStorageData, data, 'botstoragedata')\n bulk_import_model(data, BotStorageData)\n\n if 'zerver_botconfigdata' in data:\n re_map_foreign_keys(data, 'zerver_botconfigdata', 'bot_profile', related_table='user_profile')\n update_model_ids(BotConfigData, data, 'botconfigdata')\n bulk_import_model(data, BotConfigData)\n\n fix_datetime_fields(data, 'zerver_userpresence')\n re_map_foreign_keys(data, 'zerver_userpresence', 'user_profile', related_table=\"user_profile\")\n re_map_foreign_keys(data, 'zerver_userpresence', 'client', related_table='client')\n update_model_ids(UserPresence, data, 'user_presence')\n bulk_import_model(data, UserPresence)\n\n fix_datetime_fields(data, 'zerver_useractivity')\n re_map_foreign_keys(data, 'zerver_useractivity', 'user_profile', related_table=\"user_profile\")\n re_map_foreign_keys(data, 'zerver_useractivity', 'client', related_table='client')\n update_model_ids(UserActivity, data, 'useractivity')\n bulk_import_model(data, UserActivity)\n\n fix_datetime_fields(data, 'zerver_useractivityinterval')\n re_map_foreign_keys(data, 'zerver_useractivityinterval', 'user_profile', related_table=\"user_profile\")\n update_model_ids(UserActivityInterval, data, 'useractivityinterval')\n bulk_import_model(data, UserActivityInterval)\n\n re_map_foreign_keys(data, 'zerver_customprofilefield', 'realm', related_table=\"realm\")\n update_model_ids(CustomProfileField, data, related_table=\"customprofilefield\")\n bulk_import_model(data, CustomProfileField)\n\n re_map_foreign_keys(data, 'zerver_customprofilefieldvalue', 'user_profile',\n related_table=\"user_profile\")\n re_map_foreign_keys(data, 'zerver_customprofilefieldvalue', 'field',\n related_table=\"customprofilefield\")\n fix_customprofilefield(data)\n update_model_ids(CustomProfileFieldValue, data, related_table=\"customprofilefieldvalue\")\n bulk_import_model(data, CustomProfileFieldValue)\n\n # Import uploaded files and avatars\n import_uploads(os.path.join(import_dir, \"avatars\"), processing_avatars=True)\n import_uploads(os.path.join(import_dir, \"uploads\"))\n\n # We need to have this check as the emoji files are only present in the data\n # importer from slack\n # For Zulip export, this doesn't exist\n if os.path.exists(os.path.join(import_dir, \"emoji\")):\n import_uploads(os.path.join(import_dir, \"emoji\"), processing_emojis=True)\n\n sender_map = {\n user['id']: user\n for user in data['zerver_userprofile']\n }\n\n # Import zerver_message and zerver_usermessage\n import_message_data(realm=realm, sender_map=sender_map, import_dir=import_dir)\n\n re_map_foreign_keys(data, 'zerver_reaction', 'message', related_table=\"message\")\n re_map_foreign_keys(data, 'zerver_reaction', 'user_profile', related_table=\"user_profile\")\n re_map_foreign_keys(data, 'zerver_reaction', 'emoji_code', related_table=\"realmemoji\", id_field=True,\n reaction_field=True)\n update_model_ids(Reaction, data, 'reaction')\n bulk_import_model(data, Reaction)\n\n # Do attachments AFTER message data is loaded.\n # TODO: de-dup how we read these json files.\n fn = os.path.join(import_dir, \"attachment.json\")\n if not os.path.exists(fn):\n raise Exception(\"Missing attachment.json file!\")\n\n logging.info(\"Importing attachment data from %s\" % (fn,))\n with open(fn) as f:\n data = ujson.load(f)\n\n import_attachments(data)\n return realm\n\n# create_users and do_import_system_bots differ from their equivalent in\n# zerver/management/commands/initialize_voyager_db.py because here we check if the bots\n# don't already exist and only then create a user for these bots.\ndef do_import_system_bots(realm: Any) -> None:\n internal_bots = [(bot['name'], bot['email_template'] % (settings.INTERNAL_BOT_DOMAIN,))\n for bot in settings.INTERNAL_BOTS]\n create_users(realm, internal_bots, bot_type=UserProfile.DEFAULT_BOT)\n names = [(settings.FEEDBACK_BOT_NAME, settings.FEEDBACK_BOT)]\n create_users(realm, names, bot_type=UserProfile.DEFAULT_BOT)\n print(\"Finished importing system bots.\")\n\ndef create_users(realm: Realm, name_list: Iterable[Tuple[str, str]],\n bot_type: Optional[int]=None) -> None:\n user_set = set()\n for full_name, email in name_list:\n short_name = email_to_username(email)\n if not UserProfile.objects.filter(email=email):\n user_set.add((email, full_name, short_name, True))\n bulk_create_users(realm, user_set, bot_type)\n\ndef update_message_foreign_keys(import_dir: Path,\n sort_by_date: bool) -> None:\n old_id_list = get_incoming_message_ids(\n import_dir=import_dir,\n sort_by_date=sort_by_date,\n )\n\n count = len(old_id_list)\n\n new_id_list = allocate_ids(model_class=Message, count=count)\n\n for old_id, new_id in zip(old_id_list, new_id_list):\n update_id_map(\n table='message',\n old_id=old_id,\n new_id=new_id,\n )\n\n # We don't touch user_message keys here; that happens later when\n # we're actually read the files a second time to get actual data.\n\ndef get_incoming_message_ids(import_dir: Path,\n sort_by_date: bool) -> List[int]:\n '''\n This function reads in our entire collection of message\n ids, which can be millions of integers for some installations.\n And then we sort the list. This is necessary to ensure\n that the sort order of incoming ids matches the sort order\n of pub_date, which isn't always guaranteed by our\n utilities that convert third party chat data. We also\n need to move our ids to a new range if we're dealing\n with a server that has data for other realms.\n '''\n\n if sort_by_date:\n tups = list() # type: List[Tuple[int, int]]\n else:\n message_ids = [] # type: List[int]\n\n dump_file_id = 1\n while True:\n message_filename = os.path.join(import_dir, \"messages-%06d.json\" % (dump_file_id,))\n if not os.path.exists(message_filename):\n break\n\n with open(message_filename) as f:\n data = ujson.load(f)\n\n # Aggressively free up memory.\n del data['zerver_usermessage']\n\n for row in data['zerver_message']:\n # We truncate pub_date to int to theoretically\n # save memory and speed up the sort. For\n # Zulip-to-Zulip imports, the\n # message_id will generally be a good tiebreaker.\n # If we occasionally mis-order the ids for two\n # messages from the same second, it's not the\n # end of the world, as it's likely those messages\n # arrived to the original server in somewhat\n # arbitrary order.\n\n message_id = row['id']\n\n if sort_by_date:\n pub_date = int(row['pub_date'])\n tup = (pub_date, message_id)\n tups.append(tup)\n else:\n message_ids.append(message_id)\n\n dump_file_id += 1\n\n if sort_by_date:\n tups.sort()\n message_ids = [tup[1] for tup in tups]\n\n return message_ids\n\ndef import_message_data(realm: Realm,\n sender_map: Dict[int, Record],\n import_dir: Path) -> None:\n dump_file_id = 1\n while True:\n message_filename = os.path.join(import_dir, \"messages-%06d.json\" % (dump_file_id,))\n if not os.path.exists(message_filename):\n break\n\n with open(message_filename) as f:\n data = ujson.load(f)\n\n logging.info(\"Importing message dump %s\" % (message_filename,))\n re_map_foreign_keys(data, 'zerver_message', 'sender', related_table=\"user_profile\")\n re_map_foreign_keys(data, 'zerver_message', 'recipient', related_table=\"recipient\")\n re_map_foreign_keys(data, 'zerver_message', 'sending_client', related_table='client')\n fix_datetime_fields(data, 'zerver_message')\n # Parser to update message content with the updated attachment urls\n fix_upload_links(data, 'zerver_message')\n\n # We already create mappings for zerver_message ids\n # in update_message_foreign_keys(), so here we simply\n # apply them.\n message_id_map = ID_MAP['message']\n for row in data['zerver_message']:\n row['id'] = message_id_map[row['id']]\n\n for row in data['zerver_usermessage']:\n assert(row['message'] in message_id_map)\n\n fix_message_rendered_content(\n realm=realm,\n sender_map=sender_map,\n messages=data['zerver_message'],\n )\n logging.info(\"Successfully rendered markdown for message batch\")\n\n # A LOT HAPPENS HERE.\n # This is where we actually import the message data.\n bulk_import_model(data, Message)\n\n # Due to the structure of these message chunks, we're\n # guaranteed to have already imported all the Message objects\n # for this batch of UserMessage objects.\n re_map_foreign_keys(data, 'zerver_usermessage', 'message', related_table=\"message\")\n re_map_foreign_keys(data, 'zerver_usermessage', 'user_profile', related_table=\"user_profile\")\n fix_bitfield_keys(data, 'zerver_usermessage', 'flags')\n\n bulk_import_user_message_data(data, dump_file_id)\n dump_file_id += 1\n\ndef import_attachments(data: TableData) -> None:\n\n # Clean up the data in zerver_attachment that is not\n # relevant to our many-to-many import.\n fix_datetime_fields(data, 'zerver_attachment')\n re_map_foreign_keys(data, 'zerver_attachment', 'owner', related_table=\"user_profile\")\n re_map_foreign_keys(data, 'zerver_attachment', 'realm', related_table=\"realm\")\n\n # Configure ourselves. Django models many-to-many (m2m)\n # relations asymmetrically. The parent here refers to the\n # Model that has the ManyToManyField. It is assumed here\n # the child models have been loaded, but we are in turn\n # responsible for loading the parents and the m2m rows.\n parent_model = Attachment\n parent_db_table_name = 'zerver_attachment'\n parent_singular = 'attachment'\n child_singular = 'message'\n child_plural = 'messages'\n m2m_table_name = 'zerver_attachment_messages'\n parent_id = 'attachment_id'\n child_id = 'message_id'\n\n update_model_ids(parent_model, data, 'attachment')\n # We don't bulk_import_model yet, because we need to first compute\n # the many-to-many for this table.\n\n # First, build our list of many-to-many (m2m) rows.\n # We do this in a slightly convoluted way to anticipate\n # a future where we may need to call re_map_foreign_keys.\n\n m2m_rows = [] # type: List[Record]\n for parent_row in data[parent_db_table_name]:\n for fk_id in parent_row[child_plural]:\n m2m_row = {} # type: Record\n m2m_row[parent_singular] = parent_row['id']\n m2m_row[child_singular] = ID_MAP['message'][fk_id]\n m2m_rows.append(m2m_row)\n\n # Create our table data for insert.\n m2m_data = {m2m_table_name: m2m_rows} # type: TableData\n convert_to_id_fields(m2m_data, m2m_table_name, parent_singular)\n convert_to_id_fields(m2m_data, m2m_table_name, child_singular)\n m2m_rows = m2m_data[m2m_table_name]\n\n # Next, delete out our child data from the parent rows.\n for parent_row in data[parent_db_table_name]:\n del parent_row[child_plural]\n\n # Update 'path_id' for the attachments\n for attachment in data[parent_db_table_name]:\n attachment['path_id'] = path_maps['attachment_path'][attachment['path_id']]\n\n # Next, load the parent rows.\n bulk_import_model(data, parent_model)\n\n # Now, go back to our m2m rows.\n # TODO: Do this the kosher Django way. We may find a\n # better way to do this in Django 1.9 particularly.\n with connection.cursor() as cursor:\n sql_template = '''\n insert into %s (%s, %s) values(%%s, %%s);''' % (m2m_table_name,\n parent_id,\n child_id)\n tups = [(row[parent_id], row[child_id]) for row in m2m_rows]\n cursor.executemany(sql_template, tups)\n\n logging.info('Successfully imported M2M table %s' % (m2m_table_name,))\n"},"type_annotations":{"kind":"list like","value":["TableName","int","int","TableData","TableName","TableData","TableName","TableData","int","TableData","TableName","TableData","TableName","TableData","TableName","TableData","Realm","Dict[int, Record]","List[Record]","TableData","TableName","Any","Any","int","TableData","TableName","Field","TableData","TableName","Field","TableName","List[Record]","TableName","Field","TableName","TableData","TableName","Field","TableName","TableName","Field","TableName","List[int]","TableData","TableName","Field","TableData","TableName","Field","Any","Any","TableData","TableName","TableData","int","List[Dict[str, Any]]","TableData","Any","TableData","Any","TableName","Path","str","Path","Path","Path","str","Any","Realm","Iterable[Tuple[str, str]]","Path","bool","Path","bool","Realm","Dict[int, Record]","Path","TableData"],"string":"[\n \"TableName\",\n \"int\",\n \"int\",\n \"TableData\",\n \"TableName\",\n \"TableData\",\n \"TableName\",\n \"TableData\",\n \"int\",\n \"TableData\",\n \"TableName\",\n \"TableData\",\n \"TableName\",\n \"TableData\",\n \"TableName\",\n \"TableData\",\n \"Realm\",\n \"Dict[int, Record]\",\n \"List[Record]\",\n \"TableData\",\n \"TableName\",\n \"Any\",\n \"Any\",\n \"int\",\n \"TableData\",\n \"TableName\",\n \"Field\",\n \"TableData\",\n \"TableName\",\n \"Field\",\n \"TableName\",\n \"List[Record]\",\n \"TableName\",\n \"Field\",\n \"TableName\",\n \"TableData\",\n \"TableName\",\n \"Field\",\n \"TableName\",\n \"TableName\",\n \"Field\",\n \"TableName\",\n \"List[int]\",\n \"TableData\",\n \"TableName\",\n \"Field\",\n \"TableData\",\n \"TableName\",\n \"Field\",\n \"Any\",\n \"Any\",\n \"TableData\",\n \"TableName\",\n \"TableData\",\n \"int\",\n \"List[Dict[str, Any]]\",\n \"TableData\",\n \"Any\",\n \"TableData\",\n \"Any\",\n \"TableName\",\n \"Path\",\n \"str\",\n \"Path\",\n \"Path\",\n \"Path\",\n \"str\",\n \"Any\",\n \"Realm\",\n \"Iterable[Tuple[str, str]]\",\n \"Path\",\n \"bool\",\n \"Path\",\n \"bool\",\n \"Realm\",\n \"Dict[int, Record]\",\n \"Path\",\n \"TableData\"\n]"},"type_annotation_starts":{"kind":"list like","value":[2876,2895,2908,3220,3238,3508,3534,4274,4295,6098,6116,6394,6412,6719,6737,7400,8365,8417,8479,10508,10526,10740,11095,11107,11742,11760,11783,12258,12300,12347,12393,13254,13308,13364,13419,15811,15866,15926,15985,16654,16723,16791,16861,17760,17778,17801,17984,18002,18025,18404,18537,18548,18574,19186,19211,19600,20168,20186,20858,20876,20888,21220,24603,24620,28481,30074,30091,43059,43495,43513,43915,43967,44550,44599,46543,46586,46641,48752],"string":"[\n 2876,\n 2895,\n 2908,\n 3220,\n 3238,\n 3508,\n 3534,\n 4274,\n 4295,\n 6098,\n 6116,\n 6394,\n 6412,\n 6719,\n 6737,\n 7400,\n 8365,\n 8417,\n 8479,\n 10508,\n 10526,\n 10740,\n 11095,\n 11107,\n 11742,\n 11760,\n 11783,\n 12258,\n 12300,\n 12347,\n 12393,\n 13254,\n 13308,\n 13364,\n 13419,\n 15811,\n 15866,\n 15926,\n 15985,\n 16654,\n 16723,\n 16791,\n 16861,\n 17760,\n 17778,\n 17801,\n 17984,\n 18002,\n 18025,\n 18404,\n 18537,\n 18548,\n 18574,\n 19186,\n 19211,\n 19600,\n 20168,\n 20186,\n 20858,\n 20876,\n 20888,\n 21220,\n 24603,\n 24620,\n 28481,\n 30074,\n 30091,\n 43059,\n 43495,\n 43513,\n 43915,\n 43967,\n 44550,\n 44599,\n 46543,\n 46586,\n 46641,\n 48752\n]"},"type_annotation_ends":{"kind":"list like","value":[2885,2898,2911,3229,3247,3517,3543,4283,4298,6107,6125,6403,6421,6728,6746,7409,8370,8434,8491,10517,10535,10743,11098,11110,11751,11769,11788,12267,12309,12352,12402,13266,13317,13369,13428,15820,15875,15931,15994,16663,16728,16800,16870,17769,17787,17806,17993,18011,18030,18407,18540,18557,18583,19195,19214,19620,20177,20189,20867,20879,20897,21224,24606,24624,28485,30078,30094,43062,43500,43538,43919,43971,44554,44603,46548,46603,46645,48761],"string":"[\n 2885,\n 2898,\n 2911,\n 3229,\n 3247,\n 3517,\n 3543,\n 4283,\n 4298,\n 6107,\n 6125,\n 6403,\n 6421,\n 6728,\n 6746,\n 7409,\n 8370,\n 8434,\n 8491,\n 10517,\n 10535,\n 10743,\n 11098,\n 11110,\n 11751,\n 11769,\n 11788,\n 12267,\n 12309,\n 12352,\n 12402,\n 13266,\n 13317,\n 13369,\n 13428,\n 15820,\n 15875,\n 15931,\n 15994,\n 16663,\n 16728,\n 16800,\n 16870,\n 17769,\n 17787,\n 17806,\n 17993,\n 18011,\n 18030,\n 18407,\n 18540,\n 18557,\n 18583,\n 19195,\n 19214,\n 19620,\n 20177,\n 20189,\n 20867,\n 20879,\n 20897,\n 21224,\n 24606,\n 24624,\n 28485,\n 30078,\n 30094,\n 43062,\n 43500,\n 43538,\n 43919,\n 43971,\n 44554,\n 44603,\n 46548,\n 46603,\n 46645,\n 48761\n]"}}},{"rowIdx":1385,"cells":{"zip":{"kind":"string","value":"archives/18-2-SKKU-OSS_2018-2-OSS-L5.zip"},"filename":{"kind":"string","value":"zerver/lib/initial_password.py"},"contents":{"kind":"string","value":"\nfrom django.conf import settings\n\nimport hashlib\nimport base64\n\nfrom typing import Optional\n\n\ndef initial_password(email: str) -> Optional[str]:\n \"\"\"Given an email address, returns the initial password for that account, as\n created by populate_db.\"\"\"\n\n if settings.INITIAL_PASSWORD_SALT is not None:\n encoded_key = (settings.INITIAL_PASSWORD_SALT + email).encode(\"utf-8\")\n digest = hashlib.sha256(encoded_key).digest()\n return base64.b64encode(digest)[:16].decode('utf-8')\n else:\n # None as a password for a user tells Django to set an unusable password\n return None\n"},"type_annotations":{"kind":"list like","value":["str"],"string":"[\n \"str\"\n]"},"type_annotation_starts":{"kind":"list like","value":[123],"string":"[\n 123\n]"},"type_annotation_ends":{"kind":"list like","value":[126],"string":"[\n 126\n]"}}},{"rowIdx":1386,"cells":{"zip":{"kind":"string","value":"archives/18-2-SKKU-OSS_2018-2-OSS-L5.zip"},"filename":{"kind":"string","value":"zerver/lib/integrations.py"},"contents":{"kind":"string","value":"import os\nimport pathlib\n\nfrom typing import Dict, List, Optional, TypeVar, Any\nfrom django.conf import settings\nfrom django.conf.urls import url\nfrom django.urls.resolvers import LocaleRegexProvider\nfrom django.utils.module_loading import import_string\nfrom django.utils.safestring import mark_safe\nfrom django.utils.translation import ugettext as _\nfrom django.template import loader\n\nfrom zerver.templatetags.app_filters import render_markdown_path\n\n\n\"\"\"This module declares all of the (documented) integrations available\nin the Zulip server. The Integration class is used as part of\ngenerating the documentation on the /integrations page, while the\nWebhookIntegration class is also used to generate the URLs in\n`zproject/urls.py` for webhook integrations.\n\nTo add a new non-webhook integration, add code to the INTEGRATIONS\ndictionary below.\n\nTo add a new webhook integration, declare a WebhookIntegration in the\nWEBHOOK_INTEGRATIONS list below (it will be automatically added to\nINTEGRATIONS).\n\nTo add a new integration category, add to the CATEGORIES dict.\n\nOver time, we expect this registry to grow additional convenience\nfeatures for writing and configuring integrations efficiently.\n\"\"\"\n\nCATEGORIES = {\n 'meta-integration': _('Integration frameworks'),\n 'continuous-integration': _('Continuous integration'),\n 'customer-support': _('Customer support'),\n 'deployment': _('Deployment'),\n 'communication': _('Communication'),\n 'financial': _('Financial'),\n 'hr': _('HR'),\n 'marketing': _('Marketing'),\n 'misc': _('Miscellaneous'),\n 'monitoring': _('Monitoring tools'),\n 'project-management': _('Project management'),\n 'productivity': _('Productivity'),\n 'version-control': _('Version control'),\n 'bots': _('Interactive bots'),\n} # type: Dict[str, str]\n\nclass Integration:\n DEFAULT_LOGO_STATIC_PATH_PNG = 'static/images/integrations/logos/{name}.png'\n DEFAULT_LOGO_STATIC_PATH_SVG = 'static/images/integrations/logos/{name}.svg'\n\n def __init__(self, name: str, client_name: str, categories: List[str],\n logo: Optional[str]=None, secondary_line_text: Optional[str]=None,\n display_name: Optional[str]=None, doc: Optional[str]=None,\n stream_name: Optional[str]=None, legacy: Optional[bool]=False) -> None:\n self.name = name\n self.client_name = client_name\n self.secondary_line_text = secondary_line_text\n self.legacy = legacy\n self.doc = doc\n\n for category in categories:\n if category not in CATEGORIES:\n raise KeyError( # nocoverage\n 'INTEGRATIONS: ' + name + ' - category \\'' +\n category + '\\' is not a key in CATEGORIES.'\n )\n self.categories = list(map((lambda c: CATEGORIES[c]), categories))\n\n if logo is None:\n logo = self.get_logo_url()\n self.logo = logo\n\n if display_name is None:\n display_name = name.title()\n self.display_name = display_name\n\n if stream_name is None:\n stream_name = self.name\n self.stream_name = stream_name\n\n def is_enabled(self) -> bool:\n return True\n\n def get_logo_url(self) -> Optional[str]:\n logo_file_path_svg = str(pathlib.PurePath(\n settings.STATIC_ROOT,\n *self.DEFAULT_LOGO_STATIC_PATH_SVG.format(name=self.name).split('/')[1:]\n ))\n logo_file_path_png = str(pathlib.PurePath(\n settings.STATIC_ROOT,\n *self.DEFAULT_LOGO_STATIC_PATH_PNG.format(name=self.name).split('/')[1:]\n ))\n if os.path.isfile(logo_file_path_svg):\n return self.DEFAULT_LOGO_STATIC_PATH_SVG.format(name=self.name)\n elif os.path.isfile(logo_file_path_png):\n return self.DEFAULT_LOGO_STATIC_PATH_PNG.format(name=self.name)\n\n return None\n\nclass BotIntegration(Integration):\n DEFAULT_LOGO_STATIC_PATH_PNG = 'static/generated/bots/{name}/logo.png'\n DEFAULT_LOGO_STATIC_PATH_SVG = 'static/generated/bots/{name}/logo.svg'\n ZULIP_LOGO_STATIC_PATH_PNG = 'static/images/logo/zulip-icon-128x128.png'\n DEFAULT_DOC_PATH = '{name}/doc.md'\n\n def __init__(self, name: str, categories: List[str], logo: Optional[str]=None,\n secondary_line_text: Optional[str]=None, display_name: Optional[str]=None,\n doc: Optional[str]=None) -> None:\n super().__init__(\n name,\n client_name=name,\n categories=categories,\n secondary_line_text=secondary_line_text,\n )\n\n if logo is None:\n logo_url = self.get_logo_url()\n if logo_url is not None:\n logo = logo_url\n else:\n # TODO: Add a test for this by initializing one in a test.\n logo = self.ZULIP_LOGO_STATIC_PATH_PNG # nocoverage\n self.logo = logo\n\n if display_name is None:\n display_name = \"{} Bot\".format(name.title()) # nocoverage\n else:\n display_name = \"{} Bot\".format(display_name)\n self.display_name = display_name\n\n if doc is None:\n doc = self.DEFAULT_DOC_PATH.format(name=name)\n self.doc = doc\n\nclass EmailIntegration(Integration):\n def is_enabled(self) -> bool:\n return settings.EMAIL_GATEWAY_PATTERN != \"\"\n\nclass WebhookIntegration(Integration):\n DEFAULT_FUNCTION_PATH = 'zerver.webhooks.{name}.view.api_{name}_webhook'\n DEFAULT_URL = 'api/v1/external/{name}'\n DEFAULT_CLIENT_NAME = 'Zulip{name}Webhook'\n DEFAULT_DOC_PATH = '{name}/doc.{ext}'\n\n def __init__(self, name: str, categories: List[str], client_name: Optional[str]=None,\n logo: Optional[str]=None, secondary_line_text: Optional[str]=None,\n function: Optional[str]=None, url: Optional[str]=None,\n display_name: Optional[str]=None, doc: Optional[str]=None,\n stream_name: Optional[str]=None, legacy: Optional[bool]=None) -> None:\n if client_name is None:\n client_name = self.DEFAULT_CLIENT_NAME.format(name=name.title())\n super().__init__(\n name,\n client_name,\n categories,\n logo=logo,\n secondary_line_text=secondary_line_text,\n display_name=display_name,\n stream_name=stream_name,\n legacy=legacy\n )\n\n if function is None:\n function = self.DEFAULT_FUNCTION_PATH.format(name=name)\n\n if isinstance(function, str):\n function = import_string(function)\n\n self.function = function\n\n if url is None:\n url = self.DEFAULT_URL.format(name=name)\n self.url = url\n\n if doc is None:\n doc = self.DEFAULT_DOC_PATH.format(name=name, ext='md')\n\n self.doc = doc\n\n @property\n def url_object(self) -> LocaleRegexProvider:\n return url(self.url, self.function)\n\nclass HubotIntegration(Integration):\n GIT_URL_TEMPLATE = \"https://github.com/hubot-scripts/hubot-{}\"\n\n def __init__(self, name: str, categories: List[str],\n display_name: Optional[str]=None, logo: Optional[str]=None,\n logo_alt: Optional[str]=None, git_url: Optional[str]=None,\n legacy: bool=False) -> None:\n if logo_alt is None:\n logo_alt = \"{} logo\".format(name.title())\n self.logo_alt = logo_alt\n\n if git_url is None:\n git_url = self.GIT_URL_TEMPLATE.format(name)\n self.hubot_docs_url = git_url\n\n super().__init__(\n name, name, categories,\n logo=logo, display_name=display_name,\n doc = 'zerver/integrations/hubot_common.md',\n legacy=legacy\n )\n\nclass GithubIntegration(WebhookIntegration):\n \"\"\"\n We need this class to don't creating url object for git integrations.\n We want to have one generic url with dispatch function for github service and github webhook.\n \"\"\"\n def __init__(self, name: str, categories: List[str], client_name: Optional[str]=None,\n logo: Optional[str]=None, secondary_line_text: Optional[str]=None,\n function: Optional[str]=None, url: Optional[str]=None,\n display_name: Optional[str]=None, doc: Optional[str]=None,\n stream_name: Optional[str]=None, legacy: Optional[bool]=False) -> None:\n url = self.DEFAULT_URL.format(name='github')\n\n super().__init__(\n name,\n categories,\n client_name=client_name,\n logo=logo,\n secondary_line_text=secondary_line_text,\n function=function,\n url=url,\n display_name=display_name,\n doc=doc,\n stream_name=stream_name,\n legacy=legacy\n )\n\n @property\n def url_object(self) -> None:\n return\n\nclass EmbeddedBotIntegration(Integration):\n '''\n This class acts as a registry for bots verified as safe\n and valid such that these are capable of being deployed on the server.\n '''\n DEFAULT_CLIENT_NAME = 'Zulip{name}EmbeddedBot'\n\n def __init__(self, name: str, *args: Any, **kwargs: Any) -> None:\n assert kwargs.get(\"client_name\") is None\n client_name = self.DEFAULT_CLIENT_NAME.format(name=name.title())\n super().__init__(\n name, client_name, *args, **kwargs)\n\nEMBEDDED_BOTS = [\n EmbeddedBotIntegration('converter', []),\n EmbeddedBotIntegration('encrypt', []),\n EmbeddedBotIntegration('helloworld', []),\n EmbeddedBotIntegration('virtual_fs', []),\n EmbeddedBotIntegration('giphy', []),\n EmbeddedBotIntegration('followup', []),\n] # type: List[EmbeddedBotIntegration]\n\nWEBHOOK_INTEGRATIONS = [\n WebhookIntegration('airbrake', ['monitoring']),\n WebhookIntegration('ansibletower', ['deployment'], display_name='Ansible Tower'),\n WebhookIntegration('appfollow', ['customer-support'], display_name='AppFollow'),\n WebhookIntegration('appveyor', ['continuous-integration'], display_name='AppVeyor'),\n WebhookIntegration('beanstalk', ['version-control'], stream_name='commits'),\n WebhookIntegration('basecamp', ['project-management']),\n WebhookIntegration('beeminder', ['misc'], display_name='Beeminder'),\n WebhookIntegration(\n 'bitbucket2',\n ['version-control'],\n logo='static/images/integrations/logos/bitbucket.svg',\n display_name='Bitbucket',\n stream_name='bitbucket'\n ),\n WebhookIntegration(\n 'bitbucket',\n ['version-control'],\n display_name='Bitbucket',\n secondary_line_text='(Enterprise)',\n stream_name='commits',\n legacy=True\n ),\n WebhookIntegration('circleci', ['continuous-integration'], display_name='CircleCI'),\n WebhookIntegration('clubhouse', ['project-management']),\n WebhookIntegration('codeship', ['continuous-integration', 'deployment']),\n WebhookIntegration('crashlytics', ['monitoring']),\n WebhookIntegration('dialogflow', ['customer-support'], display_name='Dialogflow'),\n WebhookIntegration('delighted', ['customer-support', 'marketing'], display_name='Delighted'),\n WebhookIntegration(\n 'deskdotcom',\n ['customer-support'],\n logo='static/images/integrations/logos/deskcom.png',\n display_name='Desk.com',\n stream_name='desk'\n ),\n WebhookIntegration('dropbox', ['productivity'], display_name='Dropbox'),\n WebhookIntegration('flock', ['customer-support'], display_name='Flock'),\n WebhookIntegration('freshdesk', ['customer-support']),\n WebhookIntegration('front', ['customer-support'], display_name='Front'),\n GithubIntegration(\n 'github',\n ['version-control'],\n display_name='GitHub',\n logo='static/images/integrations/logos/github.svg',\n function='zerver.webhooks.github.view.api_github_webhook',\n stream_name='github'\n ),\n WebhookIntegration('gitlab', ['version-control'], display_name='GitLab'),\n WebhookIntegration('gocd', ['continuous-integration'], display_name='GoCD'),\n WebhookIntegration('gogs', ['version-control'], stream_name='commits'),\n WebhookIntegration('gosquared', ['marketing'], display_name='GoSquared'),\n WebhookIntegration('greenhouse', ['hr'], display_name='Greenhouse'),\n WebhookIntegration('groove', ['customer-support'], display_name='Groove'),\n WebhookIntegration('hellosign', ['productivity', 'hr'], display_name='HelloSign'),\n WebhookIntegration('helloworld', ['misc'], display_name='Hello World'),\n WebhookIntegration('heroku', ['deployment'], display_name='Heroku'),\n WebhookIntegration('homeassistant', ['misc'], display_name='Home Assistant'),\n WebhookIntegration(\n 'ifttt',\n ['meta-integration'],\n function='zerver.webhooks.ifttt.view.api_iftt_app_webhook',\n display_name='IFTTT'\n ),\n WebhookIntegration('insping', ['monitoring'], display_name='Insping'),\n WebhookIntegration('intercom', ['customer-support'], display_name='Intercom'),\n WebhookIntegration('jira', ['project-management'], display_name='JIRA'),\n WebhookIntegration('librato', ['monitoring']),\n WebhookIntegration('mention', ['marketing'], display_name='Mention'),\n WebhookIntegration('netlify', ['continuous-integration', 'deployment'], display_name='Netlify'),\n WebhookIntegration('newrelic', ['monitoring'], display_name='New Relic'),\n WebhookIntegration(\n 'opbeat',\n ['monitoring'],\n display_name='Opbeat',\n stream_name='opbeat',\n function='zerver.webhooks.opbeat.view.api_opbeat_webhook'\n ),\n WebhookIntegration('opsgenie', ['meta-integration', 'monitoring'], display_name='OpsGenie'),\n WebhookIntegration('pagerduty', ['monitoring'], display_name='PagerDuty'),\n WebhookIntegration('papertrail', ['monitoring']),\n WebhookIntegration('pingdom', ['monitoring']),\n WebhookIntegration('pivotal', ['project-management'], display_name='Pivotal Tracker'),\n WebhookIntegration('raygun', ['monitoring'], display_name=\"Raygun\"),\n WebhookIntegration('reviewboard', ['version-control'], display_name=\"ReviewBoard\"),\n WebhookIntegration('semaphore', ['continuous-integration', 'deployment'], stream_name='builds'),\n WebhookIntegration('sentry', ['monitoring']),\n WebhookIntegration('slack', ['communication']),\n WebhookIntegration('solano', ['continuous-integration'], display_name='Solano Labs'),\n WebhookIntegration('splunk', ['monitoring'], display_name='Splunk'),\n WebhookIntegration('statuspage', ['customer-support'], display_name='Statuspage'),\n WebhookIntegration('stripe', ['financial'], display_name='Stripe'),\n WebhookIntegration('taiga', ['project-management']),\n WebhookIntegration('teamcity', ['continuous-integration']),\n WebhookIntegration('transifex', ['misc']),\n WebhookIntegration('travis', ['continuous-integration'], display_name='Travis CI'),\n WebhookIntegration('trello', ['project-management']),\n WebhookIntegration('updown', ['monitoring']),\n WebhookIntegration(\n 'yo',\n ['communication'],\n function='zerver.webhooks.yo.view.api_yo_app_webhook',\n display_name='Yo App'\n ),\n WebhookIntegration('wordpress', ['marketing'], display_name='WordPress'),\n WebhookIntegration('zapier', ['meta-integration']),\n WebhookIntegration('zendesk', ['customer-support']),\n WebhookIntegration('zabbix', ['monitoring'], display_name='Zabbix'),\n WebhookIntegration('gci', ['misc'], display_name='Google Code-in',\n stream_name='gci'),\n] # type: List[WebhookIntegration]\n\nINTEGRATIONS = {\n 'asana': Integration('asana', 'asana', ['project-management'], doc='zerver/integrations/asana.md'),\n 'capistrano': Integration(\n 'capistrano',\n 'capistrano',\n ['deployment'],\n display_name='Capistrano',\n doc='zerver/integrations/capistrano.md'\n ),\n 'codebase': Integration('codebase', 'codebase', ['version-control'],\n doc='zerver/integrations/codebase.md'),\n 'discourse': Integration('discourse', 'discourse', ['communication'],\n doc='zerver/integrations/discourse.md'),\n 'email': EmailIntegration('email', 'email', ['communication'],\n doc='zerver/integrations/email.md'),\n 'errbot': Integration('errbot', 'errbot', ['meta-integration', 'bots'],\n doc='zerver/integrations/errbot.md'),\n 'git': Integration('git', 'git', ['version-control'],\n stream_name='commits', doc='zerver/integrations/git.md'),\n 'google-calendar': Integration(\n 'google-calendar',\n 'google-calendar',\n ['productivity'],\n display_name='Google Calendar',\n doc='zerver/integrations/google-calendar.md'\n ),\n 'hubot': Integration('hubot', 'hubot', ['meta-integration', 'bots'], doc='zerver/integrations/hubot.md'),\n 'irc': Integration('irc', 'irc', ['communication'], display_name='IRC',\n doc='zerver/integrations/irc.md'),\n 'jenkins': Integration(\n 'jenkins',\n 'jenkins',\n ['continuous-integration'],\n secondary_line_text='(or Hudson)',\n doc='zerver/integrations/jenkins.md'\n ),\n 'jira-plugin': Integration(\n 'jira-plugin',\n 'jira-plugin',\n ['project-management'],\n logo='static/images/integrations/logos/jira.svg',\n secondary_line_text='(locally installed)',\n display_name='JIRA',\n doc='zerver/integrations/jira-plugin.md',\n stream_name='jira',\n legacy=True\n ),\n 'matrix': Integration('matrix', 'matrix', ['communication'],\n doc='zerver/integrations/matrix.md'),\n 'mercurial': Integration(\n 'mercurial',\n 'mercurial',\n ['version-control'],\n display_name='Mercurial (hg)',\n doc='zerver/integrations/mercurial.md',\n stream_name='commits',\n ),\n 'nagios': Integration('nagios', 'nagios', ['monitoring'], doc='zerver/integrations/nagios.md'),\n 'openshift': Integration(\n 'openshift',\n 'openshift',\n ['deployment'],\n display_name='OpenShift',\n doc='zerver/integrations/openshift.md',\n stream_name='deployments',\n ),\n 'perforce': Integration('perforce', 'perforce', ['version-control'],\n doc='zerver/integrations/perforce.md'),\n 'phabricator': Integration('phabricator', 'phabricator', ['version-control'],\n doc='zerver/integrations/phabricator.md'),\n 'puppet': Integration('puppet', 'puppet', ['deployment'], doc='zerver/integrations/puppet.md'),\n 'redmine': Integration('redmine', 'redmine', ['project-management'],\n doc='zerver/integrations/redmine.md'),\n 'rss': Integration('rss', 'rss', ['communication'],\n display_name='RSS', doc='zerver/integrations/rss.md'),\n 'svn': Integration('svn', 'svn', ['version-control'], doc='zerver/integrations/svn.md'),\n 'trac': Integration('trac', 'trac', ['project-management'], doc='zerver/integrations/trac.md'),\n 'trello-plugin': Integration(\n 'trello-plugin',\n 'trello-plugin',\n ['project-management'],\n logo='static/images/integrations/logos/trello.svg',\n secondary_line_text='(legacy)',\n display_name='Trello',\n doc='zerver/integrations/trello-plugin.md',\n stream_name='trello',\n legacy=True\n ),\n 'twitter': Integration('twitter', 'twitter', ['customer-support', 'marketing'],\n doc='zerver/integrations/twitter.md'),\n} # type: Dict[str, Integration]\n\nBOT_INTEGRATIONS = [\n BotIntegration('github_detail', ['version-control', 'bots'],\n display_name='GitHub Detail'),\n BotIntegration('xkcd', ['bots', 'misc'], display_name='xkcd'),\n] # type: List[BotIntegration]\n\nHUBOT_INTEGRATIONS = [\n HubotIntegration('assembla', ['version-control', 'project-management'],\n display_name='Assembla', logo_alt='Assembla'),\n HubotIntegration('bonusly', ['hr']),\n HubotIntegration('chartbeat', ['marketing'], display_name='Chartbeat'),\n HubotIntegration('darksky', ['misc'], display_name='Dark Sky',\n logo_alt='Dark Sky logo'),\n HubotIntegration('google-hangouts', ['communication'], display_name='Google Hangouts',\n logo_alt='Google Hangouts logo'),\n HubotIntegration('instagram', ['misc'], display_name='Instagram'),\n HubotIntegration('mailchimp', ['communication', 'marketing'],\n display_name='MailChimp'),\n HubotIntegration('google-translate', ['misc'],\n display_name=\"Google Translate\", logo_alt='Google Translate logo'),\n HubotIntegration('youtube', ['misc'], display_name='YouTube'),\n] # type: List[HubotIntegration]\n\nfor hubot_integration in HUBOT_INTEGRATIONS:\n INTEGRATIONS[hubot_integration.name] = hubot_integration\n\nfor webhook_integration in WEBHOOK_INTEGRATIONS:\n INTEGRATIONS[webhook_integration.name] = webhook_integration\n\nfor bot_integration in BOT_INTEGRATIONS:\n INTEGRATIONS[bot_integration.name] = bot_integration\n"},"type_annotations":{"kind":"list like","value":["str","str","List[str]","str","List[str]","str","List[str]","str","List[str]","str","List[str]","str","Any","Any"],"string":"[\n \"str\",\n \"str\",\n \"List[str]\",\n \"str\",\n \"List[str]\",\n \"str\",\n \"List[str]\",\n \"str\",\n \"List[str]\",\n \"str\",\n \"List[str]\",\n \"str\",\n \"Any\",\n \"Any\"\n]"},"type_annotation_starts":{"kind":"list like","value":[2015,2033,2050,4206,4223,5626,5643,7076,7093,8012,8029,9154,9166,9181],"string":"[\n 2015,\n 2033,\n 2050,\n 4206,\n 4223,\n 5626,\n 5643,\n 7076,\n 7093,\n 8012,\n 8029,\n 9154,\n 9166,\n 9181\n]"},"type_annotation_ends":{"kind":"list like","value":[2018,2036,2059,4209,4232,5629,5652,7079,7102,8015,8038,9157,9169,9184],"string":"[\n 2018,\n 2036,\n 2059,\n 4209,\n 4232,\n 5629,\n 5652,\n 7079,\n 7102,\n 8015,\n 8038,\n 9157,\n 9169,\n 9184\n]"}}},{"rowIdx":1387,"cells":{"zip":{"kind":"string","value":"archives/18-2-SKKU-OSS_2018-2-OSS-L5.zip"},"filename":{"kind":"string","value":"zerver/lib/json_encoder_for_html.py"},"contents":{"kind":"string","value":"import json\nfrom typing import Any, Dict, Iterator, Optional\n\n# Taken from\n# https://github.com/simplejson/simplejson/blob/8edc82afcf6f7512b05fba32baa536fe756bd273/simplejson/encoder.py#L378-L402\n# License: MIT\nclass JSONEncoderForHTML(json.JSONEncoder):\n \"\"\"An encoder that produces JSON safe to embed in HTML.\n To embed JSON content in, say, a script tag on a web page, the\n characters &, < and > should be escaped. They cannot be escaped\n with the usual entities (e.g. &amp;) because they are not expanded\n within