{ // 获取包含Hugging Face文本的span元素 const spans = link.querySelectorAll('span.whitespace-nowrap, span.hidden.whitespace-nowrap'); spans.forEach(span => { if (span.textContent && span.textContent.trim().match(/Hugging\s*Face/i)) { span.textContent = 'AI快站'; } }); }); // 替换logo图片的alt属性 document.querySelectorAll('img[alt*="Hugging"], img[alt*="Face"]').forEach(img => { if (img.alt.match(/Hugging\s*Face/i)) { img.alt = 'AI快站 logo'; } }); } // 替换导航栏中的链接 function replaceNavigationLinks() { // 已替换标记,防止重复运行 if (window._navLinksReplaced) { return; } // 已经替换过的链接集合,防止重复替换 const replacedLinks = new Set(); // 只在导航栏区域查找和替换链接 const headerArea = document.querySelector('header') || document.querySelector('nav'); if (!headerArea) { return; } // 在导航区域内查找链接 const navLinks = headerArea.querySelectorAll('a'); navLinks.forEach(link => { // 如果已经替换过,跳过 if (replacedLinks.has(link)) return; const linkText = link.textContent.trim(); const linkHref = link.getAttribute('href') || ''; // 替换Spaces链接 - 仅替换一次 if ( (linkHref.includes('/spaces') || linkHref === '/spaces' || linkText === 'Spaces' || linkText.match(/^s*Spacess*$/i)) && linkText !== 'PDF TO Markdown' && linkText !== 'PDF TO Markdown' ) { link.textContent = 'PDF TO Markdown'; link.href = 'https://fast360.xyz'; link.setAttribute('target', '_blank'); link.setAttribute('rel', 'noopener noreferrer'); replacedLinks.add(link); } // 删除Posts链接 else if ( (linkHref.includes('/posts') || linkHref === '/posts' || linkText === 'Posts' || linkText.match(/^s*Postss*$/i)) ) { if (link.parentNode) { link.parentNode.removeChild(link); } replacedLinks.add(link); } // 替换Docs链接 - 仅替换一次 else if ( (linkHref.includes('/docs') || linkHref === '/docs' || linkText === 'Docs' || linkText.match(/^s*Docss*$/i)) && linkText !== 'Voice Cloning' ) { link.textContent = 'Voice Cloning'; link.href = 'https://vibevoice.info/'; replacedLinks.add(link); } // 删除Enterprise链接 else if ( (linkHref.includes('/enterprise') || linkHref === '/enterprise' || linkText === 'Enterprise' || linkText.match(/^s*Enterprises*$/i)) ) { if (link.parentNode) { link.parentNode.removeChild(link); } replacedLinks.add(link); } }); // 查找可能嵌套的Spaces和Posts文本 const textNodes = []; function findTextNodes(element) { if (element.nodeType === Node.TEXT_NODE) { const text = element.textContent.trim(); if (text === 'Spaces' || text === 'Posts' || text === 'Enterprise') { textNodes.push(element); } } else { for (const child of element.childNodes) { findTextNodes(child); } } } // 只在导航区域内查找文本节点 findTextNodes(headerArea); // 替换找到的文本节点 textNodes.forEach(node => { const text = node.textContent.trim(); if (text === 'Spaces') { node.textContent = node.textContent.replace(/Spaces/g, 'PDF TO Markdown'); } else if (text === 'Posts') { // 删除Posts文本节点 if (node.parentNode) { node.parentNode.removeChild(node); } } else if (text === 'Enterprise') { // 删除Enterprise文本节点 if (node.parentNode) { node.parentNode.removeChild(node); } } }); // 标记已替换完成 window._navLinksReplaced = true; } // 替换代码区域中的域名 function replaceCodeDomains() { // 特别处理span.hljs-string和span.njs-string元素 document.querySelectorAll('span.hljs-string, span.njs-string, span[class*="hljs-string"], span[class*="njs-string"]').forEach(span => { if (span.textContent && span.textContent.includes('huggingface.co')) { span.textContent = span.textContent.replace(/huggingface.co/g, 'aifasthub.com'); } }); // 替换hljs-string类的span中的域名(移除多余的转义符号) document.querySelectorAll('span.hljs-string, span[class*="hljs-string"]').forEach(span => { if (span.textContent && span.textContent.includes('huggingface.co')) { span.textContent = span.textContent.replace(/huggingface.co/g, 'aifasthub.com'); } }); // 替换pre和code标签中包含git clone命令的域名 document.querySelectorAll('pre, code').forEach(element => { if (element.textContent && element.textContent.includes('git clone')) { const text = element.innerHTML; if (text.includes('huggingface.co')) { element.innerHTML = text.replace(/huggingface.co/g, 'aifasthub.com'); } } }); // 处理特定的命令行示例 document.querySelectorAll('pre, code').forEach(element => { const text = element.innerHTML; if (text.includes('huggingface.co')) { // 针对git clone命令的专门处理 if (text.includes('git clone') || text.includes('GIT_LFS_SKIP_SMUDGE=1')) { element.innerHTML = text.replace(/huggingface.co/g, 'aifasthub.com'); } } }); // 特别处理模型下载页面上的代码片段 document.querySelectorAll('.flex.border-t, .svelte_hydrator, .inline-block').forEach(container => { const content = container.innerHTML; if (content && content.includes('huggingface.co')) { container.innerHTML = content.replace(/huggingface.co/g, 'aifasthub.com'); } }); // 特别处理模型仓库克隆对话框中的代码片段 try { // 查找包含"Clone this model repository"标题的对话框 const cloneDialog = document.querySelector('.svelte_hydration_boundary, [data-target="MainHeader"]'); if (cloneDialog) { // 查找对话框中所有的代码片段和命令示例 const codeElements = cloneDialog.querySelectorAll('pre, code, span'); codeElements.forEach(element => { if (element.textContent && element.textContent.includes('huggingface.co')) { if (element.innerHTML.includes('huggingface.co')) { element.innerHTML = element.innerHTML.replace(/huggingface.co/g, 'aifasthub.com'); } else { element.textContent = element.textContent.replace(/huggingface.co/g, 'aifasthub.com'); } } }); } // 更精确地定位克隆命令中的域名 document.querySelectorAll('[data-target]').forEach(container => { const codeBlocks = container.querySelectorAll('pre, code, span.hljs-string'); codeBlocks.forEach(block => { if (block.textContent && block.textContent.includes('huggingface.co')) { if (block.innerHTML.includes('huggingface.co')) { block.innerHTML = block.innerHTML.replace(/huggingface.co/g, 'aifasthub.com'); } else { block.textContent = block.textContent.replace(/huggingface.co/g, 'aifasthub.com'); } } }); }); } catch (e) { // 错误处理但不打印日志 } } // 当DOM加载完成后执行替换 if (document.readyState === 'loading') { document.addEventListener('DOMContentLoaded', () => { replaceHeaderBranding(); replaceNavigationLinks(); replaceCodeDomains(); // 只在必要时执行替换 - 3秒后再次检查 setTimeout(() => { if (!window._navLinksReplaced) { console.log('[Client] 3秒后重新检查导航链接'); replaceNavigationLinks(); } }, 3000); }); } else { replaceHeaderBranding(); replaceNavigationLinks(); replaceCodeDomains(); // 只在必要时执行替换 - 3秒后再次检查 setTimeout(() => { if (!window._navLinksReplaced) { console.log('[Client] 3秒后重新检查导航链接'); replaceNavigationLinks(); } }, 3000); } // 增加一个MutationObserver来处理可能的动态元素加载 const observer = new MutationObserver(mutations => { // 检查是否导航区域有变化 const hasNavChanges = mutations.some(mutation => { // 检查是否存在header或nav元素变化 return Array.from(mutation.addedNodes).some(node => { if (node.nodeType === Node.ELEMENT_NODE) { // 检查是否是导航元素或其子元素 if (node.tagName === 'HEADER' || node.tagName === 'NAV' || node.querySelector('header, nav')) { return true; } // 检查是否在导航元素内部 let parent = node.parentElement; while (parent) { if (parent.tagName === 'HEADER' || parent.tagName === 'NAV') { return true; } parent = parent.parentElement; } } return false; }); }); // 只在导航区域有变化时执行替换 if (hasNavChanges) { // 重置替换状态,允许再次替换 window._navLinksReplaced = false; replaceHeaderBranding(); replaceNavigationLinks(); } }); // 开始观察document.body的变化,包括子节点 if (document.body) { observer.observe(document.body, { childList: true, subtree: true }); } else { document.addEventListener('DOMContentLoaded', () => { observer.observe(document.body, { childList: true, subtree: true }); }); } })(); \",\n end_stream=False,\n )\n h3_server.send_headers(\n stream_id=stream_id,\n headers=[(b\"x-some-trailer\", b\"bar\")],\n end_stream=True,\n )\n \n # receive response\n events = h3_transfer(quic_server, h3_client)\n self.assertEqual(\n events,\n [\n HeadersReceived(\n headers=[\n (b\":status\", b\"200\"),\n (b\"content-type\", b\"text/html; charset=utf-8\"),\n ],\n stream_id=stream_id,\n stream_ended=False,\n ),\n DataReceived(\n data=b\"hello\",\n stream_id=stream_id,\n stream_ended=False,\n ),\n HeadersReceived(\n headers=[(b\"x-some-trailer\", b\"bar\")],\n stream_id=stream_id,\n stream_ended=True,\n ),\n ],\n )\n \n \n===========unchanged ref 0===========\n at: tests.test_h3\n h3_client_and_server()\n \n h3_transfer(quic_sender, h3_receiver)\n \n at: tests.test_h3.H3ConnectionTest.test_request_with_trailers\n h3_client = H3Connection(quic_client)\n \n h3_server = H3Connection(quic_server)\n \n at: unittest.case.TestCase\n assertEqual(first: Any, second: Any, msg: Any=...) -> None\n \n \n===========changed ref 0===========\n # module: tests.test_h3\n + def h3_client_and_server():\n + return client_and_server(\n + client_options={\"alpn_protocols\": H3_ALPN},\n + client_patch=disable_packet_pacing,\n + server_options={\"alpn_protocols\": H3_ALPN},\n + server_patch=disable_packet_pacing,\n + )\n + \n===========changed ref 1===========\n # module: tests.test_h3\n class H3ConnectionTest(TestCase):\n def test_uni_stream_grease(self):\n - with client_and_server(\n - client_options={\"alpn_protocols\": H3_ALPN},\n - server_options={\"alpn_protocols\": H3_ALPN},\n + with h3_client_and_server() as (quic_client, quic_server):\n - ) as (quic_client, quic_server):\n h3_server = H3Connection(quic_server)\n \n quic_client.send_stream_data(\n 14, b\"\\xff\\xff\\xff\\xff\\xff\\xff\\xff\\xfeGREASE is the word\"\n )\n self.assertEqual(h3_transfer(quic_client, h3_server), [])\n \n===========changed ref 2===========\n # module: tests.test_h3\n class H3ConnectionTest(TestCase):\n def test_request_with_server_push_max_push_id(self):\n - with client_and_server(\n - client_options={\"alpn_protocols\": H3_ALPN},\n - server_options={\"alpn_protocols\": H3_ALPN},\n + with h3_client_and_server() as (quic_client, quic_server):\n - ) as (quic_client, quic_server):\n h3_client = H3Connection(quic_client)\n h3_server = H3Connection(quic_server)\n \n # send request\n stream_id = quic_client.get_next_available_stream_id()\n h3_client.send_headers(\n stream_id=stream_id,\n headers=[\n (b\":method\", b\"GET\"),\n (b\":scheme\", b\"https\"),\n (b\":authority\", b\"localhost\"),\n (b\":path\", b\"/\"),\n ],\n end_stream=True,\n )\n \n # receive request\n events = h3_transfer(quic_client, h3_server)\n self.assertEqual(\n events,\n [\n HeadersReceived(\n headers=[\n (b\":method\", b\"GET\"),\n (b\":scheme\", b\"https\"),\n (b\":authority\", b\"localhost\"),\n (b\":path\", b\"/\"),\n ],\n stream_id=stream_id,\n stream_ended=True,\n )\n ],\n )\n \n # send push promises\n for i in range(0, 8):\n h3_server.send_push_promise(\n stream_id=stream_id,\n headers=[\n (b\":method\", b\"GET\"),\n (b\":scheme\", b\"https\"),\n (b\":authority\", b\"localhost\"),\n (b\":path\", \"/{}.css\".format(i).encode(\"ascii\")),\n ],\n )\n \n===========changed ref 3===========\n # module: tests.test_h3\n class H3ConnectionTest(TestCase):\n def test_request_with_server_push_max_push_id(self):\n # offset: 1\n \"),\n (b\":path\", \"/{}.css\".format(i).encode(\"ascii\")),\n ],\n )\n \n # send one too many\n with self.assertRaises(NoAvailablePushIDError):\n h3_server.send_push_promise(\n stream_id=stream_id,\n headers=[\n (b\":method\", b\"GET\"),\n (b\":scheme\", b\"https\"),\n (b\":authority\", b\"localhost\"),\n (b\":path\", b\"/8.css\"),\n ],\n )\n \n===========changed ref 4===========\n # module: tests.test_h3\n class H3ConnectionTest(TestCase):\n def test_request(self):\n - with client_and_server(\n - client_options={\"alpn_protocols\": H3_ALPN},\n - server_options={\"alpn_protocols\": H3_ALPN},\n + with h3_client_and_server() as (quic_client, quic_server):\n - ) as (quic_client, quic_server):\n h3_client = H3Connection(quic_client)\n h3_server = H3Connection(quic_server)\n \n # make first request\n self._make_request(h3_client, h3_server)\n \n # make second request\n self._make_request(h3_client, h3_server)\n \n # make third request -> dynamic table\n self._make_request(h3_client, h3_server)\n "}}},{"rowIdx":4099,"cells":{"path":{"kind":"string","value":"tests.test_h3/H3ConnectionTest.test_uni_stream_type"},"type":{"kind":"string","value":"Modified"},"project":{"kind":"string","value":"aiortc~aioquic"},"commit_hash":{"kind":"string","value":"fd084338b0d72d996d1be928d81f8beb3b55c3f9"},"commit_message":{"kind":"string","value":"[tests] disable packet pacing for H0 / H3 tests"},"ground_truth":{"kind":"string","value":" <0>: with client_and_server(\n <1>: client_options={\"alpn_protocols\": H3_ALPN},\n <2>: server_options={\"alpn_protocols\": H3_ALPN},\n <3>: with h3_client_and_server() as (quic_client, quic_server):\n ) as (quic_client, quic_server):\n"},"main_code":{"kind":"string","value":" # module: tests.test_h3\n class H3ConnectionTest(TestCase):\n def test_uni_stream_type(self):\n <0> with client_and_server(\n <1> client_options={\"alpn_protocols\": H3_ALPN},\n <2> server_options={\"alpn_protocols\": H3_ALPN},\n <3> ) as (quic_client, quic_server):\n <4> h3_server = H3Connection(quic_server)\n <5> \n <6> # unknown stream type 9\n <7> stream_id = quic_client.get_next_available_stream_id(is_unidirectional=True)\n <8> self.assertEqual(stream_id, 2)\n <9> quic_client.send_stream_data(stream_id, b\"\\x09\")\n<10> self.assertEqual(h3_transfer(quic_client, h3_server), [])\n<11> self.assertEqual(list(h3_server._stream.keys()), [2])\n<12> self.assertEqual(h3_server._stream[2].buffer, b\"\")\n<13> self.assertEqual(h3_server._stream[2].stream_type, 9)\n<14> \n<15> # unknown stream type 64, one byte at a time\n<16> stream_id = quic_client.get_next_available_stream_id(is_unidirectional=True)\n<17> self.assertEqual(stream_id, 6)\n<18> \n<19> quic_client.send_stream_data(stream_id, b\"\\x40\")\n<20> self.assertEqual(h3_transfer(quic_client, h3_server), [])\n<21> self.assertEqual(list(h3_server._stream.keys()), [2, 6])\n<22> self.assertEqual(h3_server._stream[2].buffer, b\"\")\n<23> self.assertEqual(h3_server._stream[2].stream_type, 9)\n<24> self.assertEqual(h3_server._stream[6].buffer, b\"\\x40\")\n<25> self.assertEqual(h3_server._stream[6].stream_type, None)"},"context":{"kind":"string","value":"===========below chunk 0===========\n # module: tests.test_h3\n class H3ConnectionTest(TestCase):\n def test_uni_stream_type(self):\n # offset: 1\n quic_client.send_stream_data(stream_id, b\"\\x40\")\n self.assertEqual(h3_transfer(quic_client, h3_server), [])\n self.assertEqual(list(h3_server._stream.keys()), [2, 6])\n self.assertEqual(h3_server._stream[2].buffer, b\"\")\n self.assertEqual(h3_server._stream[2].stream_type, 9)\n self.assertEqual(h3_server._stream[6].buffer, b\"\")\n self.assertEqual(h3_server._stream[6].stream_type, 64)\n \n \n===========unchanged ref 0===========\n at: tests.test_h3\n h3_transfer(quic_sender, h3_receiver)\n \n at: tests.test_h3.H3ConnectionTest.test_uni_stream_type\n h3_server = H3Connection(quic_server)\n \n at: unittest.case.TestCase\n assertEqual(first: Any, second: Any, msg: Any=...) -> None\n \n \n===========changed ref 0===========\n # module: tests.test_h3\n class H3ConnectionTest(TestCase):\n def test_uni_stream_grease(self):\n - with client_and_server(\n - client_options={\"alpn_protocols\": H3_ALPN},\n - server_options={\"alpn_protocols\": H3_ALPN},\n + with h3_client_and_server() as (quic_client, quic_server):\n - ) as (quic_client, quic_server):\n h3_server = H3Connection(quic_server)\n \n quic_client.send_stream_data(\n 14, b\"\\xff\\xff\\xff\\xff\\xff\\xff\\xff\\xfeGREASE is the word\"\n )\n self.assertEqual(h3_transfer(quic_client, h3_server), [])\n \n===========changed ref 1===========\n # module: tests.test_h3\n class H3ConnectionTest(TestCase):\n def test_request_with_trailers(self):\n - with client_and_server(\n - client_options={\"alpn_protocols\": H3_ALPN},\n - server_options={\"alpn_protocols\": H3_ALPN},\n + with h3_client_and_server() as (quic_client, quic_server):\n - ) as (quic_client, quic_server):\n h3_client = H3Connection(quic_client)\n h3_server = H3Connection(quic_server)\n \n # send request with trailers\n stream_id = quic_client.get_next_available_stream_id()\n h3_client.send_headers(\n stream_id=stream_id,\n headers=[\n (b\":method\", b\"GET\"),\n (b\":scheme\", b\"https\"),\n (b\":authority\", b\"localhost\"),\n (b\":path\", b\"/\"),\n ],\n end_stream=False,\n )\n h3_client.send_headers(\n stream_id=stream_id,\n headers=[(b\"x-some-trailer\", b\"foo\")],\n end_stream=True,\n )\n \n # receive request\n events = h3_transfer(quic_client, h3_server)\n self.assertEqual(\n events,\n [\n HeadersReceived(\n headers=[\n (b\":method\", b\"GET\"),\n (b\":scheme\", b\"https\"),\n (b\":authority\", b\"localhost\"),\n (b\":path\", b\"/\"),\n ],\n stream_id=stream_id,\n stream_ended=False,\n ),\n HeadersReceived(\n headers=[(b\"x-some-trailer\", b\"foo\")],\n stream_id=stream_id,\n stream_ended=True,\n ),\n ],\n )\n \n # send response\n h3_server.send_\n===========changed ref 2===========\n # module: tests.test_h3\n class H3ConnectionTest(TestCase):\n def test_request_with_trailers(self):\n # offset: 1\n ended=True,\n ),\n ],\n )\n \n # send response\n h3_server.send_headers(\n stream_id=stream_id,\n headers=[\n (b\":status\", b\"200\"),\n (b\"content-type\", b\"text/html; charset=utf-8\"),\n ],\n end_stream=False,\n )\n h3_server.send_data(\n stream_id=stream_id,\n data=b\"hello\",\n end_stream=False,\n )\n h3_server.send_headers(\n stream_id=stream_id,\n headers=[(b\"x-some-trailer\", b\"bar\")],\n end_stream=True,\n )\n \n # receive response\n events = h3_transfer(quic_server, h3_client)\n self.assertEqual(\n events,\n [\n HeadersReceived(\n headers=[\n (b\":status\", b\"200\"),\n (b\"content-type\", b\"text/html; charset=utf-8\"),\n ],\n stream_id=stream_id,\n stream_ended=False,\n ),\n DataReceived(\n data=b\"hello\",\n stream_id=stream_id,\n stream_ended=False,\n ),\n HeadersReceived(\n headers=[(b\"x-some-trailer\", b\"bar\")],\n stream_id=stream_id,\n stream_ended=True,\n ),\n ],\n )\n \n===========changed ref 3===========\n # module: tests.test_h3\n class H3ConnectionTest(TestCase):\n def test_request_with_server_push_max_push_id(self):\n - with client_and_server(\n - client_options={\"alpn_protocols\": H3_ALPN},\n - server_options={\"alpn_protocols\": H3_ALPN},\n + with h3_client_and_server() as (quic_client, quic_server):\n - ) as (quic_client, quic_server):\n h3_client = H3Connection(quic_client)\n h3_server = H3Connection(quic_server)\n \n # send request\n stream_id = quic_client.get_next_available_stream_id()\n h3_client.send_headers(\n stream_id=stream_id,\n headers=[\n (b\":method\", b\"GET\"),\n (b\":scheme\", b\"https\"),\n (b\":authority\", b\"localhost\"),\n (b\":path\", b\"/\"),\n ],\n end_stream=True,\n )\n \n # receive request\n events = h3_transfer(quic_client, h3_server)\n self.assertEqual(\n events,\n [\n HeadersReceived(\n headers=[\n (b\":method\", b\"GET\"),\n (b\":scheme\", b\"https\"),\n (b\":authority\", b\"localhost\"),\n (b\":path\", b\"/\"),\n ],\n stream_id=stream_id,\n stream_ended=True,\n )\n ],\n )\n \n # send push promises\n for i in range(0, 8):\n h3_server.send_push_promise(\n stream_id=stream_id,\n headers=[\n (b\":method\", b\"GET\"),\n (b\":scheme\", b\"https\"),\n (b\":authority\", b\"localhost\"),\n (b\":path\", \"/{}.css\".format(i).encode(\"ascii\")),\n ],\n )\n "}}}],"truncated":false,"partial":false},"paginationData":{"pageIndex":40,"numItemsPerPage":100,"numTotalItems":7800,"offset":4000,"length":100}},"jwt":"eyJhbGciOiJFZERTQSJ9.eyJyZWFkIjp0cnVlLCJwZXJtaXNzaW9ucyI6eyJyZXBvLmNvbnRlbnQucmVhZCI6dHJ1ZX0sImlhdCI6MTc1ODA3MzIxNywic3ViIjoiL2RhdGFzZXRzL2tyYWFsZmFyL0NvZWRpdG9yLXByb2Nlc3NlZC1kZW1vMiIsImV4cCI6MTc1ODA3NjgxNywiaXNzIjoiaHR0cHM6Ly9odWdnaW5nZmFjZS5jbyJ9.FetBiqgsWAwWsiiyFlgvQL_ZBm2fOlrIN9UIeu_3WvL3Xpp3O8oxYe-SYURYS_KVeEcTPDJ4YtVpBNxeFaegAA","displayUrls":true},"discussionsStats":{"closed":0,"open":1,"total":1},"fullWidth":true,"hasGatedAccess":true,"hasFullAccess":true,"isEmbedded":false,"savedQueries":{"community":[],"user":[]}}">
path
stringlengths
9
117
type
stringclasses
2 values
project
stringclasses
10 values
commit_hash
stringlengths
40
40
commit_message
stringlengths
1
137
ground_truth
stringlengths
0
2.74k
main_code
stringlengths
102
3.37k
context
stringlengths
0
14.7k
tests.test_connection/QuicConnectionTest.test_connect_with_loss_2
Modified
aiortc~aioquic
ab27bcc8272aeaa97a5a852402a0cd230005430b
[packet builder] only pad INITIAL packets containing CRYPTO
# module: tests.test_connection class QuicConnectionTest(TestCase): def test_connect_with_loss_2(self): <0> def datagram_sizes(items): <1> return [len(x[0]) for x in items] <2> <3> client_configuration = QuicConfiguration(is_client=True) <4> client_configuration.load_verify_locations(cafile=SERVER_CACERTFILE) <5> <6> client = QuicConnection(configuration=client_configuration) <7> client._ack_delay = 0 <8> <9> server_configuration = QuicConfiguration(is_client=False) <10> server_configuration.load_cert_chain(SERVER_CERTFILE, SERVER_KEYFILE) <11> <12> server = QuicConnection(configuration=server_configuration) <13> server._ack_delay = 0 <14> <15> # client sends INITIAL <16> now = 0.0 <17> client.connect(SERVER_ADDR, now=now) <18> items = client.datagrams_to_send(now=now) <19> self.assertEqual(datagram_sizes(items), [1280]) <20> self.assertEqual(client.get_timer(), 1.0) <21> <22> # server receives INITIAL, sends INITIAL + HANDSHAKE but second datagram is lost <23> now = 0.1 <24> server.receive_datagram(items[0][0], CLIENT_ADDR, now=now) <25> items = server.datagrams_to_send(now=now) <26> self.assertEqual(datagram_sizes(items), [1280, 1084]) <27> self.assertEqual(server.get_timer(), 1.1) <28> self.assertEqual(len(server._loss.spaces[0].sent_packets), 1) <29> self.assertEqual(len(server._loss.spaces[1].sent_packets), 2) <30> <31> # client only receives first datagram and sends ACKS <32> now = 0.2 <33> client.receive_datagram(items[0][0], SERVER_ADDR, now=now) <34> items = client.datagrams_to_send(now=</s>
===========below chunk 0=========== # module: tests.test_connection class QuicConnectionTest(TestCase): def test_connect_with_loss_2(self): # offset: 1 self.assertEqual(datagram_sizes(items), [1280, 48]) self.assertAlmostEqual(client.get_timer(), 0.625) self.assertEqual(type(client.next_event()), events.ProtocolNegotiated) self.assertIsNone(client.next_event()) # client PTO - HANDSHAKE PING now = client.get_timer() # ~0.625 client.handle_timer(now=now) items = client.datagrams_to_send(now=now) self.assertEqual(datagram_sizes(items), [44]) self.assertAlmostEqual(client.get_timer(), 1.875) # server receives PING, discards INITIAL and sends ACK now = 0.725 server.receive_datagram(items[0][0], CLIENT_ADDR, now=now) items = server.datagrams_to_send(now=now) self.assertEqual(datagram_sizes(items), [48]) self.assertAlmostEqual(server.get_timer(), 1.1) self.assertEqual(len(server._loss.spaces[0].sent_packets), 0) self.assertEqual(len(server._loss.spaces[1].sent_packets), 3) self.assertEqual(type(server.next_event()), events.ProtocolNegotiated) self.assertIsNone(server.next_event()) # ACKs are lost, server retransmits HANDSHAKE now = server.get_timer() server.handle_timer(now=now) items = server.datagrams_to_send(now=now) self.assertEqual(datagram_sizes(items), [1280, 876]) self.assertAlmostEqual(server.get_timer(), 3.1) self.assertEqual(len(server._loss.spaces[0].sent_packets), 0</s> ===========below chunk 1=========== # module: tests.test_connection class QuicConnectionTest(TestCase): def test_connect_with_loss_2(self): # offset: 2 <s>get_timer(), 3.1) self.assertEqual(len(server._loss.spaces[0].sent_packets), 0) self.assertEqual(len(server._loss.spaces[1].sent_packets), 3) self.assertIsNone(server.next_event()) # handshake continues normally now = 1.2 client.receive_datagram(items[0][0], SERVER_ADDR, now=now) client.receive_datagram(items[1][0], SERVER_ADDR, now=now) items = client.datagrams_to_send(now=now) self.assertEqual(datagram_sizes(items), [329]) self.assertAlmostEqual(client.get_timer(), 2.45) self.assertEqual(type(client.next_event()), events.HandshakeCompleted) self.assertEqual(type(client.next_event()), events.ConnectionIdIssued) now = 1.3 server.receive_datagram(items[0][0], CLIENT_ADDR, now=now) items = server.datagrams_to_send(now=now) self.assertEqual(datagram_sizes(items), [229]) self.assertAlmostEqual(server.get_timer(), 1.925) self.assertEqual(type(server.next_event()), events.HandshakeCompleted) self.assertEqual(type(server.next_event()), events.ConnectionIdIssued) now = 1.4 client.receive_datagram(items[0][0], SERVER_ADDR, now=now) items = client.datagrams_to_send(now=now) self.assertEqual(datagram_sizes(items), [32]) self.assertAlmostEqual(client.get_timer(), 61 ===========unchanged ref 0=========== at: tests.test_connection CLIENT_ADDR = ("1.2.3.4", 1234) SERVER_ADDR = ("2.3.4.5", 4433) at: tests.utils SERVER_CACERTFILE = os.path.join(os.path.dirname(__file__), "pycacert.pem") SERVER_CERTFILE = os.path.join(os.path.dirname(__file__), "ssl_cert.pem") SERVER_KEYFILE = os.path.join(os.path.dirname(__file__), "ssl_key.pem") at: unittest.case.TestCase assertEqual(first: Any, second: Any, msg: Any=...) -> None assertIsNone(obj: Any, msg: Any=...) -> None assertAlmostEqual(first: float, second: float, places: Optional[int]=..., msg: Any=..., delta: Optional[float]=...) -> None assertAlmostEqual(first: datetime.datetime, second: datetime.datetime, places: Optional[int]=..., msg: Any=..., delta: Optional[datetime.timedelta]=...) -> None ===========changed ref 0=========== # module: tests.test_connection class QuicConnectionTest(TestCase): def test_connect_with_loss_1(self): """ Check connection is established even in the client's INITIAL is lost. """ def datagram_sizes(items): return [len(x[0]) for x in items] client_configuration = QuicConfiguration(is_client=True) client_configuration.load_verify_locations(cafile=SERVER_CACERTFILE) client = QuicConnection(configuration=client_configuration) client._ack_delay = 0 server_configuration = QuicConfiguration(is_client=False) server_configuration.load_cert_chain(SERVER_CERTFILE, SERVER_KEYFILE) server = QuicConnection(configuration=server_configuration) server._ack_delay = 0 # client sends INITIAL now = 0.0 client.connect(SERVER_ADDR, now=now) items = client.datagrams_to_send(now=now) self.assertEqual(datagram_sizes(items), [1280]) self.assertEqual(client.get_timer(), 1.0) # INITIAL is lost now = 1.0 client.handle_timer(now=now) items = client.datagrams_to_send(now=now) self.assertEqual(datagram_sizes(items), [1280]) self.assertEqual(client.get_timer(), 3.0) # server receives INITIAL, sends INITIAL + HANDSHAKE now = 1.1 server.receive_datagram(items[0][0], CLIENT_ADDR, now=now) items = server.datagrams_to_send(now=now) self.assertEqual(datagram_sizes(items), [1280, 1084]) self.assertEqual(server.get_timer(), 2.1) self.assertEqual(len(server._loss.spaces[0].sent_packets), 1) self.assertEqual(len(server._loss.spaces[1].sent_packets), 2)</s>
tests.test_connection/QuicConnectionTest.test_connect_with_loss_3
Modified
aiortc~aioquic
ab27bcc8272aeaa97a5a852402a0cd230005430b
[packet builder] only pad INITIAL packets containing CRYPTO
# module: tests.test_connection class QuicConnectionTest(TestCase): def test_connect_with_loss_3(self): <0> def datagram_sizes(items): <1> return [len(x[0]) for x in items] <2> <3> client_configuration = QuicConfiguration(is_client=True) <4> client_configuration.load_verify_locations(cafile=SERVER_CACERTFILE) <5> <6> client = QuicConnection(configuration=client_configuration) <7> client._ack_delay = 0 <8> <9> server_configuration = QuicConfiguration(is_client=False) <10> server_configuration.load_cert_chain(SERVER_CERTFILE, SERVER_KEYFILE) <11> <12> server = QuicConnection(configuration=server_configuration) <13> server._ack_delay = 0 <14> <15> # client sends INITIAL <16> now = 0.0 <17> client.connect(SERVER_ADDR, now=now) <18> items = client.datagrams_to_send(now=now) <19> self.assertEqual(datagram_sizes(items), [1280]) <20> self.assertEqual(client.get_timer(), 1.0) <21> <22> # server receives INITIAL, sends INITIAL + HANDSHAKE <23> now = 0.1 <24> server.receive_datagram(items[0][0], CLIENT_ADDR, now=now) <25> items = server.datagrams_to_send(now=now) <26> self.assertEqual(datagram_sizes(items), [1280, 1084]) <27> self.assertEqual(server.get_timer(), 1.1) <28> self.assertEqual(len(server._loss.spaces[0].sent_packets), 1) <29> self.assertEqual(len(server._loss.spaces[1].sent_packets), 2) <30> <31> # client receives INITIAL + HANDSHAKE <32> now = 0.2 <33> client.receive_datagram(items[0][0], SERVER_ADDR, now=now) <34> client.receive_datagram(items[1][0], SERVER_ADDR, now=now) <35> </s>
===========below chunk 0=========== # module: tests.test_connection class QuicConnectionTest(TestCase): def test_connect_with_loss_3(self): # offset: 1 self.assertEqual(datagram_sizes(items), [1280, 327]) self.assertAlmostEqual(client.get_timer(), 0.825) self.assertEqual(type(client.next_event()), events.ProtocolNegotiated) self.assertEqual(type(client.next_event()), events.HandshakeCompleted) self.assertEqual(type(client.next_event()), events.ConnectionIdIssued) # server completes handshake now = 0.3 server.receive_datagram(items[0][0], CLIENT_ADDR, now=now) server.receive_datagram(items[1][0], CLIENT_ADDR, now=now) items = server.datagrams_to_send(now=now) self.assertEqual(datagram_sizes(items), [229]) self.assertAlmostEqual(server.get_timer(), 0.825) self.assertEqual(len(server._loss.spaces[0].sent_packets), 0) self.assertEqual(len(server._loss.spaces[1].sent_packets), 0) self.assertEqual(type(server.next_event()), events.ProtocolNegotiated) self.assertEqual(type(server.next_event()), events.HandshakeCompleted) self.assertEqual(type(server.next_event()), events.ConnectionIdIssued) # server PTO - 1-RTT PING now = 0.825 server.handle_timer(now=now) items = server.datagrams_to_send(now=now) self.assertEqual(datagram_sizes(items), [29]) self.assertAlmostEqual(server.get_timer(), 1.875) # client receives PING, sends ACK now = 0.9 client.receive_datagram(items[0][0], SERVER_ADDR, now=now) items = client.datagram</s> ===========below chunk 1=========== # module: tests.test_connection class QuicConnectionTest(TestCase): def test_connect_with_loss_3(self): # offset: 2 <s> client.receive_datagram(items[0][0], SERVER_ADDR, now=now) items = client.datagrams_to_send(now=now) self.assertEqual(datagram_sizes(items), [32]) self.assertAlmostEqual(client.get_timer(), 0.825) # server receives ACK, retransmits HANDSHAKE_DONE now = 1.0 self.assertFalse(server._handshake_done_pending) server.receive_datagram(items[0][0], CLIENT_ADDR, now=now) self.assertTrue(server._handshake_done_pending) items = server.datagrams_to_send(now=now) self.assertFalse(server._handshake_done_pending) self.assertEqual(datagram_sizes(items), [224]) ===========unchanged ref 0=========== at: tests.test_connection CLIENT_ADDR = ("1.2.3.4", 1234) SERVER_ADDR = ("2.3.4.5", 4433) at: tests.utils SERVER_CACERTFILE = os.path.join(os.path.dirname(__file__), "pycacert.pem") SERVER_CERTFILE = os.path.join(os.path.dirname(__file__), "ssl_cert.pem") SERVER_KEYFILE = os.path.join(os.path.dirname(__file__), "ssl_key.pem") at: unittest.case.TestCase assertEqual(first: Any, second: Any, msg: Any=...) -> None assertTrue(expr: Any, msg: Any=...) -> None assertFalse(expr: Any, msg: Any=...) -> None assertAlmostEqual(first: float, second: float, places: Optional[int]=..., msg: Any=..., delta: Optional[float]=...) -> None assertAlmostEqual(first: datetime.datetime, second: datetime.datetime, places: Optional[int]=..., msg: Any=..., delta: Optional[datetime.timedelta]=...) -> None ===========changed ref 0=========== # module: tests.test_connection class QuicConnectionTest(TestCase): def test_connect_with_loss_1(self): """ Check connection is established even in the client's INITIAL is lost. """ def datagram_sizes(items): return [len(x[0]) for x in items] client_configuration = QuicConfiguration(is_client=True) client_configuration.load_verify_locations(cafile=SERVER_CACERTFILE) client = QuicConnection(configuration=client_configuration) client._ack_delay = 0 server_configuration = QuicConfiguration(is_client=False) server_configuration.load_cert_chain(SERVER_CERTFILE, SERVER_KEYFILE) server = QuicConnection(configuration=server_configuration) server._ack_delay = 0 # client sends INITIAL now = 0.0 client.connect(SERVER_ADDR, now=now) items = client.datagrams_to_send(now=now) self.assertEqual(datagram_sizes(items), [1280]) self.assertEqual(client.get_timer(), 1.0) # INITIAL is lost now = 1.0 client.handle_timer(now=now) items = client.datagrams_to_send(now=now) self.assertEqual(datagram_sizes(items), [1280]) self.assertEqual(client.get_timer(), 3.0) # server receives INITIAL, sends INITIAL + HANDSHAKE now = 1.1 server.receive_datagram(items[0][0], CLIENT_ADDR, now=now) items = server.datagrams_to_send(now=now) self.assertEqual(datagram_sizes(items), [1280, 1084]) self.assertEqual(server.get_timer(), 2.1) self.assertEqual(len(server._loss.spaces[0].sent_packets), 1) self.assertEqual(len(server._loss.spaces[1].sent_packets), 2)</s>
aioquic.asyncio.protocol/QuicConnectionProtocol._process_events
Modified
aiortc~aioquic
1f6f4fc962a4ef6d137d47a9dd26f87652bcfa2f
[asyncio] add an option to not make connect() wait for connection
<8>:<add> <add> # abort connection waiter <12>:<add> <add> # abort ping waiters <add> for waiter in self._ping_waiters.values(): <add> waiter.set_exception(ConnectionError) <add> self._ping_waiters.clear() <add>
# module: aioquic.asyncio.protocol class QuicConnectionProtocol(asyncio.DatagramProtocol): def _process_events(self) -> None: <0> event = self._quic.next_event() <1> while event is not None: <2> if isinstance(event, events.ConnectionIdIssued): <3> self._connection_id_issued_handler(event.connection_id) <4> elif isinstance(event, events.ConnectionIdRetired): <5> self._connection_id_retired_handler(event.connection_id) <6> elif isinstance(event, events.ConnectionTerminated): <7> self._connection_terminated_handler() <8> if self._connected_waiter is not None: <9> waiter = self._connected_waiter <10> self._connected_waiter = None <11> waiter.set_exception(ConnectionError) <12> self._closed.set() <13> elif isinstance(event, events.HandshakeCompleted): <14> if self._connected_waiter is not None: <15> waiter = self._connected_waiter <16> self._connected = True <17> self._connected_waiter = None <18> waiter.set_result(None) <19> elif isinstance(event, events.PingAcknowledged): <20> waiter = self._ping_waiters.pop(event.uid, None) <21> if waiter is not None: <22> waiter.set_result(None) <23> self.quic_event_received(event) <24> event = self._quic.next_event() <25>
===========unchanged ref 0=========== at: _asyncio.Future set_exception(exception, /) at: aioquic.asyncio.protocol.QuicConnectionProtocol.__init__ self._closed = asyncio.Event() self._connected = False self._connected_waiter: Optional[asyncio.Future[None]] = None self._ping_waiters: Dict[int, asyncio.Future[None]] = {} self._quic = quic self._connection_id_issued_handler: QuicConnectionIdHandler = lambda c: None self._connection_id_retired_handler: QuicConnectionIdHandler = lambda c: None self._connection_terminated_handler: Callable[[], None] = lambda: None at: aioquic.asyncio.protocol.QuicConnectionProtocol.wait_connected self._connected_waiter = self._loop.create_future() at: asyncio.futures.Future _state = _PENDING _result = None _exception = None _loop = None _source_traceback = None _cancel_message = None _cancelled_exc = None _asyncio_future_blocking = False __log_traceback = False __class_getitem__ = classmethod(GenericAlias) set_exception(exception: Union[type, BaseException], /) -> None __iter__ = __await__ # make compatible with 'yield from'. at: asyncio.locks.Event set() -> None
aioquic.asyncio.client/connect
Modified
aiortc~aioquic
1f6f4fc962a4ef6d137d47a9dd26f87652bcfa2f
[asyncio] add an option to not make connect() wait for connection
# module: aioquic.asyncio.client @asynccontextmanager async def connect( host: str, port: int, *, configuration: Optional[QuicConfiguration] = None, create_protocol: Optional[Callable] = QuicConnectionProtocol, session_ticket_handler: Optional[SessionTicketHandler] = None, stream_handler: Optional[QuicStreamHandler] = None, + wait_connected: bool = True, ) -> AsyncGenerator[QuicConnectionProtocol, None]: <0> """ <1> Connect to a QUIC server at the given `host` and `port`. <2> <3> :meth:`connect()` returns an awaitable. Awaiting it yields a <4> :class:`~aioquic.asyncio.QuicConnectionProtocol` which can be used to <5> create streams. <6> <7> :func:`connect` also accepts the following optional arguments: <8> <9> * ``configuration`` is a :class:`~aioquic.quic.configuration.QuicConfiguration` <10> configuration object. <11> * ``create_protocol`` allows customizing the :class:`~asyncio.Protocol` that <12> manages the connection. It should be a callable or class accepting the same <13> arguments as :class:`~aioquic.asyncio.QuicConnectionProtocol` and returning <14> an instance of :class:`~aioquic.asyncio.QuicConnectionProtocol` or a subclass. <15> * ``session_ticket_handler`` is a callback which is invoked by the TLS <16> engine when a new session ticket is received. <17> * ``stream_handler`` is a callback which is invoked whenever a stream is <18> created. It must accept two arguments: a :class:`asyncio.StreamReader` <19> and a :class:`asyncio.StreamWriter`. <20> """ <21> loop = asyncio.get_event_loop() <22> <23> # if host is not an IP address, pass it to enable SNI <24> try: <25> ipaddress.ip_address(host) <26> server_name = None <27> except ValueError: <28> server_name = host <29> <30> # lookup remote address <31> </s>
===========below chunk 0=========== <s>: aioquic.asyncio.client @asynccontextmanager async def connect( host: str, port: int, *, configuration: Optional[QuicConfiguration] = None, create_protocol: Optional[Callable] = QuicConnectionProtocol, session_ticket_handler: Optional[SessionTicketHandler] = None, stream_handler: Optional[QuicStreamHandler] = None, + wait_connected: bool = True, ) -> AsyncGenerator[QuicConnectionProtocol, None]: # offset: 1 addr = infos[0][4] if len(addr) == 2: addr = ("::ffff:" + addr[0], addr[1], 0, 0) # prepare QUIC connection if configuration is None: configuration = QuicConfiguration(is_client=True) if server_name is not None: configuration.server_name = server_name connection = QuicConnection( configuration=configuration, session_ticket_handler=session_ticket_handler ) # connect _, protocol = await loop.create_datagram_endpoint( lambda: create_protocol(connection, stream_handler=stream_handler), local_addr=("::", 0), ) protocol = cast(QuicConnectionProtocol, protocol) protocol.connect(addr) await protocol.wait_connected() try: yield protocol finally: protocol.close() await protocol.wait_closed() ===========unchanged ref 0=========== at: _asyncio get_event_loop() at: asyncio.events get_event_loop() -> AbstractEventLoop at: asyncio.events.AbstractEventLoop getaddrinfo(host: Optional[str], port: Union[str, int, None], *, family: int=..., type: int=..., proto: int=..., flags: int=...) -> List[Tuple[AddressFamily, SocketKind, int, str, Union[Tuple[str, int], Tuple[str, int, int, int]]]] create_datagram_endpoint(protocol_factory: _ProtocolFactory, local_addr: Optional[Tuple[str, int]]=..., remote_addr: Optional[Tuple[str, int]]=..., *, family: int=..., proto: int=..., flags: int=..., reuse_address: Optional[bool]=..., reuse_port: Optional[bool]=..., allow_broadcast: Optional[bool]=..., sock: Optional[socket]=...) -> _TransProtPair at: ipaddress ip_address(address: object) -> Any at: socket SOCK_DGRAM: SocketKind at: typing cast(typ: Type[_T], val: Any) -> _T cast(typ: str, val: Any) -> Any cast(typ: object, val: Any) -> Any Callable = _CallableType(collections.abc.Callable, 2) AsyncGenerator = _alias(collections.abc.AsyncGenerator, 2) ===========changed ref 0=========== # module: tests.test_asyncio class HighLevelTest(TestCase): + def test_connect_timeout_no_wait_connected(self): + async def run_client_no_wait_connected(host, port, configuration): + configuration.load_verify_locations(cafile=SERVER_CACERTFILE) + async with connect( + host, port, configuration=configuration, wait_connected=False + ) as client: + await client.ping() + + with self.assertRaises(ConnectionError): + run( + run_client_no_wait_connected( + "127.0.0.1", + port=4400, + configuration=QuicConfiguration(is_client=True, idle_timeout=5), + ) + ) + ===========changed ref 1=========== # module: aioquic.asyncio.protocol class QuicConnectionProtocol(asyncio.DatagramProtocol): def _process_events(self) -> None: event = self._quic.next_event() while event is not None: if isinstance(event, events.ConnectionIdIssued): self._connection_id_issued_handler(event.connection_id) elif isinstance(event, events.ConnectionIdRetired): self._connection_id_retired_handler(event.connection_id) elif isinstance(event, events.ConnectionTerminated): self._connection_terminated_handler() + + # abort connection waiter if self._connected_waiter is not None: waiter = self._connected_waiter self._connected_waiter = None waiter.set_exception(ConnectionError) + + # abort ping waiters + for waiter in self._ping_waiters.values(): + waiter.set_exception(ConnectionError) + self._ping_waiters.clear() + self._closed.set() elif isinstance(event, events.HandshakeCompleted): if self._connected_waiter is not None: waiter = self._connected_waiter self._connected = True self._connected_waiter = None waiter.set_result(None) elif isinstance(event, events.PingAcknowledged): waiter = self._ping_waiters.pop(event.uid, None) if waiter is not None: waiter.set_result(None) self.quic_event_received(event) event = self._quic.next_event()
examples.interop/test_session_resumption
Modified
aiortc~aioquic
b93d071b7b462a4c610085b47a13c5bfc2fb9e83
[interop] sleep for a bit to receive session ticket
<16>:<add> # some servers don't send the ticket immediately <add> await asyncio.sleep(1) <add>
# module: examples.interop def test_session_resumption(server: Server, configuration: QuicConfiguration): <0> port = server.session_resumption_port or server.port <1> saved_ticket = None <2> <3> def session_ticket_handler(ticket): <4> nonlocal saved_ticket <5> saved_ticket = ticket <6> <7> # connect a first time, receive a ticket <8> async with connect( <9> server.host, <10> port, <11> configuration=configuration, <12> session_ticket_handler=session_ticket_handler, <13> ) as protocol: <14> await protocol.ping() <15> <16> # connect a second time, with the ticket <17> if saved_ticket is not None: <18> configuration.session_ticket = saved_ticket <19> async with connect(server.host, port, configuration=configuration) as protocol: <20> await protocol.ping() <21> <22> # check session was resumed <23> if protocol._quic.tls.session_resumed: <24> server.result |= Result.R <25> <26> # check early data was accepted <27> if protocol._quic.tls.early_data_accepted: <28> server.result |= Result.Z <29>
===========unchanged ref 0=========== at: asyncio.tasks sleep(delay: float, result: _T=..., *, loop: Optional[AbstractEventLoop]=...) -> Future[_T] at: examples.interop Result() Server(name: str, host: str, port: int=4433, http3: bool=True, retry_port: Optional[int]=4434, path: str="/", push_path: Optional[str]=None, result: Result=field(default_factory=lambda: Result(0)), session_resumption_port: Optional[int]=None, structured_logging: bool=False, throughput_file_suffix: str="", verify_mode: Optional[int]=None) at: examples.interop.Server name: str host: str port: int = 4433 http3: bool = True retry_port: Optional[int] = 4434 path: str = "/" push_path: Optional[str] = None result: Result = field(default_factory=lambda: Result(0)) session_resumption_port: Optional[int] = None structured_logging: bool = False throughput_file_suffix: str = "" verify_mode: Optional[int] = None
aioquic.tls/decode_public_key
Modified
aiortc~aioquic
8336812cc60fc04bf3a7f3b05b6e6a2abd759d38
[tls] don't bomb when receiving Key Share Entry for unknown group
<2>:<add> elif key_share[0] in GROUP_TO_CURVE: <add> return ec.EllipticCurvePublicKey.from_encoded_point( <del> return ec.EllipticCurvePublicKey.from_encoded_point( <3>:<add> GROUP_TO_CURVE[key_share[0]](), key_share[1] <del> GROUP_TO_CURVE[key_share[0]](), key_share[1] <4>:<add> ) <add> else: <add> return None <del> )
# module: aioquic.tls def decode_public_key( key_share: KeyShareEntry, + ) -> Union[ec.EllipticCurvePublicKey, x25519.X25519PublicKey, None]: - ) -> Union[ec.EllipticCurvePublicKey, x25519.X25519PublicKey]: <0> if key_share[0] == Group.X25519: <1> return x25519.X25519PublicKey.from_public_bytes(key_share[1]) <2> return ec.EllipticCurvePublicKey.from_encoded_point( <3> GROUP_TO_CURVE[key_share[0]](), key_share[1] <4> ) <5>
===========unchanged ref 0=========== at: aioquic.tls Group(x: Union[str, bytes, bytearray], base: int) Group(x: Union[str, bytes, SupportsInt, _SupportsIndex, _SupportsTrunc]=...) KeyShareEntry = Tuple[int, bytes] GROUP_TO_CURVE: Dict = { Group.SECP256R1: ec.SECP256R1, Group.SECP384R1: ec.SECP384R1, Group.SECP521R1: ec.SECP521R1, } ===========changed ref 0=========== # module: aioquic.tls class Group(IntEnum): SECP256R1 = 0x0017 SECP384R1 = 0x0018 SECP521R1 = 0x0019 X25519 = 0x001D + GREASE = 0xAAAA ===========changed ref 1=========== # module: tests.test_tls class ContextTest(TestCase): + def test_handshake_with_grease_group(self): + client = self.create_client() + client._supported_groups = [tls.Group.GREASE, tls.Group.SECP256R1] + server = self.create_server() + + self._handshake(client, server) +
aioquic.tls/Context._client_send_hello
Modified
aiortc~aioquic
8336812cc60fc04bf3a7f3b05b6e6a2abd759d38
[tls] don't bomb when receiving Key Share Entry for unknown group
<3>:<add> for group in self._supported_groups: <del> if Group.SECP256R1 in self._supported_groups: <4>:<add> if group == Group.SECP256R1: <add> self._ec_private_key = ec.generate_private_key( <del> self._ec_private_key = ec.generate_private_key( <5>:<add> GROUP_TO_CURVE[Group.SECP256R1](), default_backend() <del> GROUP_TO_CURVE[Group.SECP256R1](), default_backend() <6>:<add> ) <del> ) <7>:<add> key_share.append(encode_public_key(self._ec_private_key.public_key())) <del> key_share.append(encode_public_key(self._ec_private_key.public_key())) <8>:<add> supported_groups.append(Group.SECP256R1) <del> supported_groups.append(Group.SECP256R1) <9>:<del> <10>:<del> if Group.X25519 in self._supported_groups: <11>:<add> elif group == Group.X25519: <add> self._x25519_private_key = x25519.X25519PrivateKey.generate() <del> self._x25519_private_key = x25519.X25519PrivateKey.generate() <12>:<add> key_share.append( <add> encode_public_key(self._x25519_private_key.public_key()) <del> key_share.append(encode_public_key(self._x25519_private_key.public_key())) <13>:<add> ) <add> supported_groups.append(Group.X25519) <del> supported_groups.append(Group.X25519) <14>:<add> elif group == Group.GREASE: <add> key_share.append((Group.GREASE, b"\x00"))
# module: aioquic.tls class Context: def _client_send_hello(self, output_buf: Buffer) -> None: <0> key_share: List[KeyShareEntry] = [] <1> supported_groups: List[int] = [] <2> <3> if Group.SECP256R1 in self._supported_groups: <4> self._ec_private_key = ec.generate_private_key( <5> GROUP_TO_CURVE[Group.SECP256R1](), default_backend() <6> ) <7> key_share.append(encode_public_key(self._ec_private_key.public_key())) <8> supported_groups.append(Group.SECP256R1) <9> <10> if Group.X25519 in self._supported_groups: <11> self._x25519_private_key = x25519.X25519PrivateKey.generate() <12> key_share.append(encode_public_key(self._x25519_private_key.public_key())) <13> supported_groups.append(Group.X25519) <14> <15> assert len(key_share), "no key share entries" <16> <17> hello = ClientHello( <18> random=self.client_random, <19> session_id=self.session_id, <20> cipher_suites=[int(x) for x in self._cipher_suites], <21> compression_methods=self._compression_methods, <22> alpn_protocols=self._alpn_protocols, <23> key_share=key_share, <24> psk_key_exchange_modes=self._psk_key_exchange_modes <25> if (self.session_ticket or self.new_session_ticket_cb is not None) <26> else None, <27> server_name=self._server_name, <28> signature_algorithms=self._signature_algorithms, <29> supported_groups=supported_groups, <30> supported_versions=self._supported_versions, <31> other_extensions=self.handshake_extensions, <32> ) <33> <34> # PSK <35> if self.session_ticket and self.session_ticket</s>
===========below chunk 0=========== # module: aioquic.tls class Context: def _client_send_hello(self, output_buf: Buffer) -> None: # offset: 1 self._key_schedule_psk = KeySchedule(self.session_ticket.cipher_suite) self._key_schedule_psk.extract(self.session_ticket.resumption_secret) binder_key = self._key_schedule_psk.derive_secret(b"res binder") binder_length = self._key_schedule_psk.algorithm.digest_size # update hello if self.session_ticket.max_early_data_size is not None: hello.early_data = True hello.pre_shared_key = OfferedPsks( identities=[ (self.session_ticket.ticket, self.session_ticket.obfuscated_age) ], binders=[bytes(binder_length)], ) # serialize hello without binder tmp_buf = Buffer(capacity=1024) push_client_hello(tmp_buf, hello) # calculate binder hash_offset = tmp_buf.tell() - binder_length - 3 self._key_schedule_psk.update_hash(tmp_buf.data_slice(0, hash_offset)) binder = self._key_schedule_psk.finished_verify_data(binder_key) hello.pre_shared_key.binders[0] = binder self._key_schedule_psk.update_hash( tmp_buf.data_slice(hash_offset, hash_offset + 3) + binder ) # calculate early data key if hello.early_data: early_key = self._key_schedule_psk.derive_secret(b"c e traffic") self.update_traffic_key_cb( Direction.ENCRYPT, Epoch.ZERO_RTT, self._key_schedule_psk.cipher_suite, early_key, ) self._key_schedule_proxy = Key</s> ===========below chunk 1=========== # module: aioquic.tls class Context: def _client_send_hello(self, output_buf: Buffer) -> None: # offset: 2 <s>schedule_psk.cipher_suite, early_key, ) self._key_schedule_proxy = KeyScheduleProxy(self._cipher_suites) self._key_schedule_proxy.extract(None) with push_message(self._key_schedule_proxy, output_buf): push_client_hello(output_buf, hello) self._set_state(State.CLIENT_EXPECT_SERVER_HELLO) ===========unchanged ref 0=========== at: aioquic.tls Direction() Epoch() Group(x: Union[str, bytes, bytearray], base: int) Group(x: Union[str, bytes, SupportsInt, _SupportsIndex, _SupportsTrunc]=...) KeyShareEntry = Tuple[int, bytes] OfferedPsks(identities: List[PskIdentity], binders: List[bytes]) ClientHello(random: bytes, session_id: bytes, cipher_suites: List[int], compression_methods: List[int], alpn_protocols: Optional[List[str]]=None, early_data: bool=False, key_share: Optional[List[KeyShareEntry]]=None, pre_shared_key: Optional[OfferedPsks]=None, psk_key_exchange_modes: Optional[List[int]]=None, server_name: Optional[str]=None, signature_algorithms: Optional[List[int]]=None, supported_groups: Optional[List[int]]=None, supported_versions: Optional[List[int]]=None, other_extensions: List[Extension]=field(default_factory=list)) push_client_hello(buf: Buffer, hello: ClientHello) -> None KeySchedule(cipher_suite: CipherSuite) GROUP_TO_CURVE: Dict = { Group.SECP256R1: ec.SECP256R1, Group.SECP384R1: ec.SECP384R1, Group.SECP521R1: ec.SECP521R1, } encode_public_key(public_key: Union[ec.EllipticCurvePublicKey, x25519.X25519PublicKey]) -> KeyShareEntry at: aioquic.tls.ClientHello random: bytes session_id: bytes cipher_suites: List[int] compression_methods: List[int] alpn_protocols: Optional[List[str]] = None early_data: bool = False key_share: Optional[List[KeyShareEntry]] = None ===========unchanged ref 1=========== pre_shared_key: Optional[OfferedPsks] = None psk_key_exchange_modes: Optional[List[int]] = None server_name: Optional[str] = None signature_algorithms: Optional[List[int]] = None supported_groups: Optional[List[int]] = None supported_versions: Optional[List[int]] = None other_extensions: List[Extension] = field(default_factory=list) at: aioquic.tls.Context.__init__ self._alpn_protocols = alpn_protocols self.handshake_extensions: List[Extension] = [] self.session_ticket: Optional[SessionTicket] = None self._server_name = server_name self.new_session_ticket_cb: Optional[SessionTicketHandler] = None self.update_traffic_key_cb: Callable[ [Direction, Epoch, CipherSuite, bytes], None ] = lambda d, e, c, s: None self._cipher_suites = [ CipherSuite.AES_256_GCM_SHA384, CipherSuite.AES_128_GCM_SHA256, CipherSuite.CHACHA20_POLY1305_SHA256, ] self._compression_methods: List[int] = [CompressionMethod.NULL] self._psk_key_exchange_modes: List[int] = [PskKeyExchangeMode.PSK_DHE_KE] self._signature_algorithms: List[int] = [ SignatureAlgorithm.RSA_PSS_RSAE_SHA256, SignatureAlgorithm.ECDSA_SECP256R1_SHA256, SignatureAlgorithm.RSA_PKCS1_SHA256, SignatureAlgorithm.RSA_PKCS1_SHA1, ] self._supported_groups = [Group.SECP256R1] self._supported_versions = [TLS_VERSION_1_3] self._key_schedule_psk: Optional[KeySchedule] = None
aioquic.quic.packet_builder/QuicPacketBuilder.start_frame
Modified
aiortc~aioquic
aacf2c82e7706494308ff94080c6e002e6d5fe4f
[packet] refine frame allocation size
<3>:<add> if self.remaining_buffer_space < capacity or ( <del> if self.remaining_buffer_space < required_bytes or ( <5>:<add> and self.remaining_flight_space < capacity <del> and self.remaining_flight_space < required_bytes <17>:<add> self._packet.delivery_handlers.append((handler, handler_args)) <del> self._packet.delivery_handlers.append((handler, args))
# module: aioquic.quic.packet_builder class QuicPacketBuilder: def start_frame( self, frame_type: int, + capacity: int = 1, handler: Optional[QuicDeliveryHandler] = None, + handler_args: Sequence[Any] = [], - args: Sequence[Any] = [], - required_bytes: int = 64, ) -> Buffer: <0> """ <1> Starts a new frame. <2> """ <3> if self.remaining_buffer_space < required_bytes or ( <4> frame_type not in NON_IN_FLIGHT_FRAME_TYPES <5> and self.remaining_flight_space < required_bytes <6> ): <7> raise QuicPacketBuilderStop <8> <9> self._buffer.push_uint_var(frame_type) <10> if frame_type not in NON_ACK_ELICITING_FRAME_TYPES: <11> self._packet.is_ack_eliciting = True <12> if frame_type not in NON_IN_FLIGHT_FRAME_TYPES: <13> self._packet.in_flight = True <14> if frame_type == QuicFrameType.CRYPTO: <15> self._packet.is_crypto_packet = True <16> if handler is not None: <17> self._packet.delivery_handlers.append((handler, args)) <18> return self._buffer <19>
===========unchanged ref 0=========== at: aioquic.quic.packet_builder QuicDeliveryHandler = Callable[..., None] QuicPacketBuilderStop(*args: object) at: aioquic.quic.packet_builder.QuicPacketBuilder.__init__ self._packet: Optional[QuicSentPacket] = None self._buffer = Buffer(PACKET_MAX_SIZE) at: aioquic.quic.packet_builder.QuicPacketBuilder._end_packet self._packet = None at: aioquic.quic.packet_builder.QuicPacketBuilder.start_packet self._packet = QuicSentPacket( epoch=epoch, in_flight=False, is_ack_eliciting=False, is_crypto_packet=False, packet_number=self._packet_number, packet_type=packet_type, ) at: aioquic.quic.packet_builder.QuicSentPacket epoch: Epoch in_flight: bool is_ack_eliciting: bool is_crypto_packet: bool packet_number: int packet_type: int sent_time: Optional[float] = None sent_bytes: int = 0 delivery_handlers: List[Tuple[QuicDeliveryHandler, Any]] = field( default_factory=list ) quic_logger_frames: List[Dict] = field(default_factory=list) at: typing Sequence = _alias(collections.abc.Sequence, 1)
aioquic.quic.connection/QuicConnection._write_ack_frame
Modified
aiortc~aioquic
aacf2c82e7706494308ff94080c6e002e6d5fe4f
[packet] refine frame allocation size
<6>:<add> capacity=ACK_FRAME_CAPACITY, <add> handler=self._on_ack_delivery, <del> self._on_ack_delivery, <7>:<add> handler_args=(space, space.largest_received_packet), <del> (space, space.largest_received_packet),
# module: aioquic.quic.connection class QuicConnection: def _write_ack_frame( self, builder: QuicPacketBuilder, space: QuicPacketSpace, now: float ) -> None: <0> # calculate ACK delay <1> ack_delay = now - space.largest_received_time <2> ack_delay_encoded = int(ack_delay * 1000000) >> self._local_ack_delay_exponent <3> <4> buf = builder.start_frame( <5> QuicFrameType.ACK, <6> self._on_ack_delivery, <7> (space, space.largest_received_packet), <8> ) <9> push_ack_frame(buf, space.ack_queue, ack_delay_encoded) <10> space.ack_at = None <11> <12> # log frame <13> if self._quic_logger is not None: <14> builder.quic_logger_frames.append( <15> self._quic_logger.encode_ack_frame( <16> ranges=space.ack_queue, delay=ack_delay <17> ) <18> ) <19>
===========unchanged ref 0=========== at: aioquic.quic.connection.QuicConnection _write_ping_frame(builder: QuicPacketBuilder, uids: List[int]=[]) _write_ping_frame(self, builder: QuicPacketBuilder, uids: List[int]=[]) at: aioquic.quic.connection.QuicConnection.__init__ self._handshake_complete = False self._local_ack_delay_exponent = 3 self._probe_pending = False at: aioquic.quic.connection.QuicConnection._handle_crypto_frame self._handshake_complete = True at: aioquic.quic.connection.QuicConnection._send_probe self._probe_pending = True at: aioquic.quic.connection.QuicConnection._write_application self._probe_pending = False ===========changed ref 0=========== # module: aioquic.quic.connection + # frame sizes + ACK_FRAME_CAPACITY = 64 # FIXME: this is arbitrary! + APPLICATION_CLOSE_FRAME_CAPACITY = 1 + 8 + 8 # + reason length + HANDSHAKE_DONE_FRAME_CAPACITY = 1 + MAX_DATA_FRAME_CAPACITY = 1 + 8 + MAX_STREAM_DATA_FRAME_CAPACITY = 1 + 8 + 8 + NEW_CONNECTION_ID_FRAME_CAPACITY = 1 + 8 + 8 + 1 + 20 + 16 + PATH_CHALLENGE_FRAME_CAPACITY = 1 + 8 + PATH_RESPONSE_FRAME_CAPACITY = 1 + 8 + PING_FRAME_CAPACITY = 1 + RETIRE_CONNECTION_ID_CAPACITY = 1 + 8 + STREAMS_BLOCKED_CAPACITY = 1 + 8 + TRANSPORT_CLOSE_FRAME_CAPACITY = 1 + 8 + 8 + 8 # + reason length + END_STATES = frozenset( [ QuicConnectionState.CLOSING, QuicConnectionState.DRAINING, QuicConnectionState.TERMINATED, ] ) ===========changed ref 1=========== # module: aioquic.quic.packet_builder class QuicPacketBuilder: def start_frame( self, frame_type: int, + capacity: int = 1, handler: Optional[QuicDeliveryHandler] = None, + handler_args: Sequence[Any] = [], - args: Sequence[Any] = [], - required_bytes: int = 64, ) -> Buffer: """ Starts a new frame. """ + if self.remaining_buffer_space < capacity or ( - if self.remaining_buffer_space < required_bytes or ( frame_type not in NON_IN_FLIGHT_FRAME_TYPES + and self.remaining_flight_space < capacity - and self.remaining_flight_space < required_bytes ): raise QuicPacketBuilderStop self._buffer.push_uint_var(frame_type) if frame_type not in NON_ACK_ELICITING_FRAME_TYPES: self._packet.is_ack_eliciting = True if frame_type not in NON_IN_FLIGHT_FRAME_TYPES: self._packet.in_flight = True if frame_type == QuicFrameType.CRYPTO: self._packet.is_crypto_packet = True if handler is not None: + self._packet.delivery_handlers.append((handler, handler_args)) - self._packet.delivery_handlers.append((handler, args)) return self._buffer
aioquic.quic.connection/QuicConnection._write_connection_close_frame
Modified
aiortc~aioquic
aacf2c82e7706494308ff94080c6e002e6d5fe4f
[packet] refine frame allocation size
<1>:<add> reason_length = len(reason_bytes) <3>:<add> buf = builder.start_frame( <add> QuicFrameType.APPLICATION_CLOSE, <del> buf = builder.start_frame(QuicFrameType.APPLICATION_CLOSE) <4>:<add> capacity=APPLICATION_CLOSE_FRAME_CAPACITY + reason_length, <add> ) <5>:<add> buf.push_uint_var(reason_length) <del> buf.push_uint_var(len(reason_bytes)) <8>:<add> buf = builder.start_frame( <add> QuicFrameType.TRANSPORT_CLOSE, <del> buf = builder.start_frame(QuicFrameType.TRANSPORT_CLOSE) <9>:<add> capacity=TRANSPORT_CLOSE_FRAME_CAPACITY + reason_length, <add> ) <11>:<add> buf.push_uint_var(reason_length) <del> buf.push_uint_var(len(reason_bytes))
# module: aioquic.quic.connection class QuicConnection: def _write_connection_close_frame( self, builder: QuicPacketBuilder, error_code: int, frame_type: Optional[int], reason_phrase: str, ) -> None: <0> reason_bytes = reason_phrase.encode("utf8") <1> <2> if frame_type is None: <3> buf = builder.start_frame(QuicFrameType.APPLICATION_CLOSE) <4> buf.push_uint_var(error_code) <5> buf.push_uint_var(len(reason_bytes)) <6> buf.push_bytes(reason_bytes) <7> else: <8> buf = builder.start_frame(QuicFrameType.TRANSPORT_CLOSE) <9> buf.push_uint_var(error_code) <10> buf.push_uint_var(frame_type) <11> buf.push_uint_var(len(reason_bytes)) <12> buf.push_bytes(reason_bytes) <13> <14> # log frame <15> if self._quic_logger is not None: <16> builder.quic_logger_frames.append( <17> self._quic_logger.encode_connection_close_frame( <18> error_code=error_code, <19> frame_type=frame_type, <20> reason_phrase=reason_phrase, <21> ) <22> ) <23>
===========unchanged ref 0=========== at: aioquic.quic.connection ACK_FRAME_CAPACITY = 64 # FIXME: this is arbitrary! APPLICATION_CLOSE_FRAME_CAPACITY = 1 + 8 + 8 # + reason length at: aioquic.quic.connection.QuicConnection _on_ack_delivery(delivery: QuicDeliveryState, space: QuicPacketSpace, highest_acked: int) -> None at: aioquic.quic.connection.QuicConnection.__init__ self._quic_logger: Optional[QuicLoggerTrace] = None self._quic_logger = configuration.quic_logger.start_trace( is_client=configuration.is_client, odcid=logger_connection_id ) at: aioquic.quic.connection.QuicConnection._close_end self._quic_logger = None at: aioquic.quic.connection.QuicConnection._write_ack_frame ack_delay = now - space.largest_received_time ack_delay_encoded = int(ack_delay * 1000000) >> self._local_ack_delay_exponent buf = builder.start_frame( QuicFrameType.ACK, capacity=ACK_FRAME_CAPACITY, handler=self._on_ack_delivery, handler_args=(space, space.largest_received_packet), ) ===========changed ref 0=========== # module: aioquic.quic.connection class QuicConnection: def _write_ack_frame( self, builder: QuicPacketBuilder, space: QuicPacketSpace, now: float ) -> None: # calculate ACK delay ack_delay = now - space.largest_received_time ack_delay_encoded = int(ack_delay * 1000000) >> self._local_ack_delay_exponent buf = builder.start_frame( QuicFrameType.ACK, + capacity=ACK_FRAME_CAPACITY, + handler=self._on_ack_delivery, - self._on_ack_delivery, + handler_args=(space, space.largest_received_packet), - (space, space.largest_received_packet), ) push_ack_frame(buf, space.ack_queue, ack_delay_encoded) space.ack_at = None # log frame if self._quic_logger is not None: builder.quic_logger_frames.append( self._quic_logger.encode_ack_frame( ranges=space.ack_queue, delay=ack_delay ) ) ===========changed ref 1=========== # module: aioquic.quic.connection + # frame sizes + ACK_FRAME_CAPACITY = 64 # FIXME: this is arbitrary! + APPLICATION_CLOSE_FRAME_CAPACITY = 1 + 8 + 8 # + reason length + HANDSHAKE_DONE_FRAME_CAPACITY = 1 + MAX_DATA_FRAME_CAPACITY = 1 + 8 + MAX_STREAM_DATA_FRAME_CAPACITY = 1 + 8 + 8 + NEW_CONNECTION_ID_FRAME_CAPACITY = 1 + 8 + 8 + 1 + 20 + 16 + PATH_CHALLENGE_FRAME_CAPACITY = 1 + 8 + PATH_RESPONSE_FRAME_CAPACITY = 1 + 8 + PING_FRAME_CAPACITY = 1 + RETIRE_CONNECTION_ID_CAPACITY = 1 + 8 + STREAMS_BLOCKED_CAPACITY = 1 + 8 + TRANSPORT_CLOSE_FRAME_CAPACITY = 1 + 8 + 8 + 8 # + reason length + END_STATES = frozenset( [ QuicConnectionState.CLOSING, QuicConnectionState.DRAINING, QuicConnectionState.TERMINATED, ] ) ===========changed ref 2=========== # module: aioquic.quic.packet_builder class QuicPacketBuilder: def start_frame( self, frame_type: int, + capacity: int = 1, handler: Optional[QuicDeliveryHandler] = None, + handler_args: Sequence[Any] = [], - args: Sequence[Any] = [], - required_bytes: int = 64, ) -> Buffer: """ Starts a new frame. """ + if self.remaining_buffer_space < capacity or ( - if self.remaining_buffer_space < required_bytes or ( frame_type not in NON_IN_FLIGHT_FRAME_TYPES + and self.remaining_flight_space < capacity - and self.remaining_flight_space < required_bytes ): raise QuicPacketBuilderStop self._buffer.push_uint_var(frame_type) if frame_type not in NON_ACK_ELICITING_FRAME_TYPES: self._packet.is_ack_eliciting = True if frame_type not in NON_IN_FLIGHT_FRAME_TYPES: self._packet.in_flight = True if frame_type == QuicFrameType.CRYPTO: self._packet.is_crypto_packet = True if handler is not None: + self._packet.delivery_handlers.append((handler, handler_args)) - self._packet.delivery_handlers.append((handler, args)) return self._buffer
aioquic.quic.connection/QuicConnection._write_connection_limits
Modified
aiortc~aioquic
aacf2c82e7706494308ff94080c6e002e6d5fe4f
[packet] refine frame allocation size
<8>:<add> QuicFrameType.MAX_DATA, <add> capacity=MAX_DATA_FRAME_CAPACITY, <add> handler=self._on_max_data_delivery, <del> QuicFrameType.MAX_DATA, self._on_max_data_delivery
# module: aioquic.quic.connection class QuicConnection: def _write_connection_limits( self, builder: QuicPacketBuilder, space: QuicPacketSpace ) -> None: <0> """ <1> Raise MAX_DATA if needed. <2> """ <3> if self._local_max_data_used * 2 > self._local_max_data: <4> self._local_max_data *= 2 <5> self._logger.debug("Local max_data raised to %d", self._local_max_data) <6> if self._local_max_data_sent != self._local_max_data: <7> buf = builder.start_frame( <8> QuicFrameType.MAX_DATA, self._on_max_data_delivery <9> ) <10> buf.push_uint_var(self._local_max_data) <11> self._local_max_data_sent = self._local_max_data <12> <13> # log frame <14> if self._quic_logger is not None: <15> builder.quic_logger_frames.append( <16> self._quic_logger.encode_max_data_frame(self._local_max_data) <17> ) <18>
===========unchanged ref 0=========== at: aioquic.quic.connection TRANSPORT_CLOSE_FRAME_CAPACITY = 1 + 8 + 8 + 8 # + reason length at: aioquic.quic.connection.QuicConnection.__init__ self._quic_logger: Optional[QuicLoggerTrace] = None self._quic_logger = configuration.quic_logger.start_trace( is_client=configuration.is_client, odcid=logger_connection_id ) at: aioquic.quic.connection.QuicConnection._close_end self._quic_logger = None at: aioquic.quic.connection.QuicConnection._write_connection_close_frame reason_bytes = reason_phrase.encode("utf8") reason_length = len(reason_bytes) ===========changed ref 0=========== # module: aioquic.quic.connection class QuicConnection: def _write_ack_frame( self, builder: QuicPacketBuilder, space: QuicPacketSpace, now: float ) -> None: # calculate ACK delay ack_delay = now - space.largest_received_time ack_delay_encoded = int(ack_delay * 1000000) >> self._local_ack_delay_exponent buf = builder.start_frame( QuicFrameType.ACK, + capacity=ACK_FRAME_CAPACITY, + handler=self._on_ack_delivery, - self._on_ack_delivery, + handler_args=(space, space.largest_received_packet), - (space, space.largest_received_packet), ) push_ack_frame(buf, space.ack_queue, ack_delay_encoded) space.ack_at = None # log frame if self._quic_logger is not None: builder.quic_logger_frames.append( self._quic_logger.encode_ack_frame( ranges=space.ack_queue, delay=ack_delay ) ) ===========changed ref 1=========== # module: aioquic.quic.connection class QuicConnection: def _write_connection_close_frame( self, builder: QuicPacketBuilder, error_code: int, frame_type: Optional[int], reason_phrase: str, ) -> None: reason_bytes = reason_phrase.encode("utf8") + reason_length = len(reason_bytes) if frame_type is None: + buf = builder.start_frame( + QuicFrameType.APPLICATION_CLOSE, - buf = builder.start_frame(QuicFrameType.APPLICATION_CLOSE) + capacity=APPLICATION_CLOSE_FRAME_CAPACITY + reason_length, + ) buf.push_uint_var(error_code) + buf.push_uint_var(reason_length) - buf.push_uint_var(len(reason_bytes)) buf.push_bytes(reason_bytes) else: + buf = builder.start_frame( + QuicFrameType.TRANSPORT_CLOSE, - buf = builder.start_frame(QuicFrameType.TRANSPORT_CLOSE) + capacity=TRANSPORT_CLOSE_FRAME_CAPACITY + reason_length, + ) buf.push_uint_var(error_code) buf.push_uint_var(frame_type) + buf.push_uint_var(reason_length) - buf.push_uint_var(len(reason_bytes)) buf.push_bytes(reason_bytes) # log frame if self._quic_logger is not None: builder.quic_logger_frames.append( self._quic_logger.encode_connection_close_frame( error_code=error_code, frame_type=frame_type, reason_phrase=reason_phrase, ) ) ===========changed ref 2=========== # module: aioquic.quic.connection + # frame sizes + ACK_FRAME_CAPACITY = 64 # FIXME: this is arbitrary! + APPLICATION_CLOSE_FRAME_CAPACITY = 1 + 8 + 8 # + reason length + HANDSHAKE_DONE_FRAME_CAPACITY = 1 + MAX_DATA_FRAME_CAPACITY = 1 + 8 + MAX_STREAM_DATA_FRAME_CAPACITY = 1 + 8 + 8 + NEW_CONNECTION_ID_FRAME_CAPACITY = 1 + 8 + 8 + 1 + 20 + 16 + PATH_CHALLENGE_FRAME_CAPACITY = 1 + 8 + PATH_RESPONSE_FRAME_CAPACITY = 1 + 8 + PING_FRAME_CAPACITY = 1 + RETIRE_CONNECTION_ID_CAPACITY = 1 + 8 + STREAMS_BLOCKED_CAPACITY = 1 + 8 + TRANSPORT_CLOSE_FRAME_CAPACITY = 1 + 8 + 8 + 8 # + reason length + END_STATES = frozenset( [ QuicConnectionState.CLOSING, QuicConnectionState.DRAINING, QuicConnectionState.TERMINATED, ] ) ===========changed ref 3=========== # module: aioquic.quic.packet_builder class QuicPacketBuilder: def start_frame( self, frame_type: int, + capacity: int = 1, handler: Optional[QuicDeliveryHandler] = None, + handler_args: Sequence[Any] = [], - args: Sequence[Any] = [], - required_bytes: int = 64, ) -> Buffer: """ Starts a new frame. """ + if self.remaining_buffer_space < capacity or ( - if self.remaining_buffer_space < required_bytes or ( frame_type not in NON_IN_FLIGHT_FRAME_TYPES + and self.remaining_flight_space < capacity - and self.remaining_flight_space < required_bytes ): raise QuicPacketBuilderStop self._buffer.push_uint_var(frame_type) if frame_type not in NON_ACK_ELICITING_FRAME_TYPES: self._packet.is_ack_eliciting = True if frame_type not in NON_IN_FLIGHT_FRAME_TYPES: self._packet.in_flight = True if frame_type == QuicFrameType.CRYPTO: self._packet.is_crypto_packet = True if handler is not None: + self._packet.delivery_handlers.append((handler, handler_args)) - self._packet.delivery_handlers.append((handler, args)) return self._buffer
aioquic.quic.connection/QuicConnection._write_crypto_frame
Modified
aiortc~aioquic
aacf2c82e7706494308ff94080c6e002e6d5fe4f
[packet] refine frame allocation size
<5>:<add> capacity=frame_overhead, <add> handler=stream.on_data_delivery, <del> stream.on_data_delivery, <6>:<add> handler_args=(frame.offset, frame.offset + len(frame.data)), <del> (frame.offset, frame.offset + len(frame.data)), <7>:<del> required_bytes=frame_overhead,
# module: aioquic.quic.connection class QuicConnection: def _write_crypto_frame( self, builder: QuicPacketBuilder, space: QuicPacketSpace, stream: QuicStream ) -> bool: <0> frame_overhead = 3 + size_uint_var(stream.next_send_offset) <1> frame = stream.get_frame(builder.remaining_flight_space - frame_overhead) <2> if frame is not None: <3> buf = builder.start_frame( <4> QuicFrameType.CRYPTO, <5> stream.on_data_delivery, <6> (frame.offset, frame.offset + len(frame.data)), <7> required_bytes=frame_overhead, <8> ) <9> buf.push_uint_var(frame.offset) <10> buf.push_uint16(len(frame.data) | 0x4000) <11> buf.push_bytes(frame.data) <12> <13> # log frame <14> if self._quic_logger is not None: <15> builder.quic_logger_frames.append( <16> self._quic_logger.encode_crypto_frame(frame) <17> ) <18> return True <19> <20> return False <21>
===========unchanged ref 0=========== at: aioquic.quic.connection MAX_DATA_FRAME_CAPACITY = 1 + 8 at: aioquic.quic.connection.QuicConnection _on_max_data_delivery(delivery: QuicDeliveryState) -> None at: aioquic.quic.connection.QuicConnection.__init__ self._local_max_data = MAX_DATA_WINDOW self._local_max_data_sent = MAX_DATA_WINDOW self._local_max_data_used = 0 self._quic_logger: Optional[QuicLoggerTrace] = None self._quic_logger = configuration.quic_logger.start_trace( is_client=configuration.is_client, odcid=logger_connection_id ) self._logger = QuicConnectionAdapter( logger, {"id": dump_cid(logger_connection_id)} ) at: aioquic.quic.connection.QuicConnection._close_end self._quic_logger = None at: aioquic.quic.connection.QuicConnection._handle_stream_frame self._local_max_data_used += newly_received at: aioquic.quic.connection.QuicConnection._on_max_data_delivery self._local_max_data_sent = 0 at: logging.LoggerAdapter logger: Logger extra: Mapping[str, Any] debug(msg: Any, *args: Any, exc_info: _ExcInfoType=..., stack_info: bool=..., extra: Optional[Dict[str, Any]]=..., **kwargs: Any) -> None ===========changed ref 0=========== # module: aioquic.quic.connection class QuicConnection: def _write_connection_limits( self, builder: QuicPacketBuilder, space: QuicPacketSpace ) -> None: """ Raise MAX_DATA if needed. """ if self._local_max_data_used * 2 > self._local_max_data: self._local_max_data *= 2 self._logger.debug("Local max_data raised to %d", self._local_max_data) if self._local_max_data_sent != self._local_max_data: buf = builder.start_frame( + QuicFrameType.MAX_DATA, + capacity=MAX_DATA_FRAME_CAPACITY, + handler=self._on_max_data_delivery, - QuicFrameType.MAX_DATA, self._on_max_data_delivery ) buf.push_uint_var(self._local_max_data) self._local_max_data_sent = self._local_max_data # log frame if self._quic_logger is not None: builder.quic_logger_frames.append( self._quic_logger.encode_max_data_frame(self._local_max_data) ) ===========changed ref 1=========== # module: aioquic.quic.connection class QuicConnection: def _write_ack_frame( self, builder: QuicPacketBuilder, space: QuicPacketSpace, now: float ) -> None: # calculate ACK delay ack_delay = now - space.largest_received_time ack_delay_encoded = int(ack_delay * 1000000) >> self._local_ack_delay_exponent buf = builder.start_frame( QuicFrameType.ACK, + capacity=ACK_FRAME_CAPACITY, + handler=self._on_ack_delivery, - self._on_ack_delivery, + handler_args=(space, space.largest_received_packet), - (space, space.largest_received_packet), ) push_ack_frame(buf, space.ack_queue, ack_delay_encoded) space.ack_at = None # log frame if self._quic_logger is not None: builder.quic_logger_frames.append( self._quic_logger.encode_ack_frame( ranges=space.ack_queue, delay=ack_delay ) ) ===========changed ref 2=========== # module: aioquic.quic.connection class QuicConnection: def _write_connection_close_frame( self, builder: QuicPacketBuilder, error_code: int, frame_type: Optional[int], reason_phrase: str, ) -> None: reason_bytes = reason_phrase.encode("utf8") + reason_length = len(reason_bytes) if frame_type is None: + buf = builder.start_frame( + QuicFrameType.APPLICATION_CLOSE, - buf = builder.start_frame(QuicFrameType.APPLICATION_CLOSE) + capacity=APPLICATION_CLOSE_FRAME_CAPACITY + reason_length, + ) buf.push_uint_var(error_code) + buf.push_uint_var(reason_length) - buf.push_uint_var(len(reason_bytes)) buf.push_bytes(reason_bytes) else: + buf = builder.start_frame( + QuicFrameType.TRANSPORT_CLOSE, - buf = builder.start_frame(QuicFrameType.TRANSPORT_CLOSE) + capacity=TRANSPORT_CLOSE_FRAME_CAPACITY + reason_length, + ) buf.push_uint_var(error_code) buf.push_uint_var(frame_type) + buf.push_uint_var(reason_length) - buf.push_uint_var(len(reason_bytes)) buf.push_bytes(reason_bytes) # log frame if self._quic_logger is not None: builder.quic_logger_frames.append( self._quic_logger.encode_connection_close_frame( error_code=error_code, frame_type=frame_type, reason_phrase=reason_phrase, ) ) ===========changed ref 3=========== # module: aioquic.quic.connection + # frame sizes + ACK_FRAME_CAPACITY = 64 # FIXME: this is arbitrary! + APPLICATION_CLOSE_FRAME_CAPACITY = 1 + 8 + 8 # + reason length + HANDSHAKE_DONE_FRAME_CAPACITY = 1 + MAX_DATA_FRAME_CAPACITY = 1 + 8 + MAX_STREAM_DATA_FRAME_CAPACITY = 1 + 8 + 8 + NEW_CONNECTION_ID_FRAME_CAPACITY = 1 + 8 + 8 + 1 + 20 + 16 + PATH_CHALLENGE_FRAME_CAPACITY = 1 + 8 + PATH_RESPONSE_FRAME_CAPACITY = 1 + 8 + PING_FRAME_CAPACITY = 1 + RETIRE_CONNECTION_ID_CAPACITY = 1 + 8 + STREAMS_BLOCKED_CAPACITY = 1 + 8 + TRANSPORT_CLOSE_FRAME_CAPACITY = 1 + 8 + 8 + 8 # + reason length + END_STATES = frozenset( [ QuicConnectionState.CLOSING, QuicConnectionState.DRAINING, QuicConnectionState.TERMINATED, ] )
aioquic.quic.connection/QuicConnection._write_datagram_frame
Modified
aiortc~aioquic
aacf2c82e7706494308ff94080c6e002e6d5fe4f
[packet] refine frame allocation size
<9>:<add> buf = builder.start_frame(frame_type, capacity=frame_size) <del> buf = builder.start_frame(frame_type, required_bytes=frame_size)
# module: aioquic.quic.connection class QuicConnection: def _write_datagram_frame( self, builder: QuicPacketBuilder, data: bytes, frame_type: QuicFrameType ) -> bool: <0> """ <1> Write a DATAGRAM frame. <2> <3> Returns True if the frame was processed, False otherwise. <4> """ <5> assert frame_type == QuicFrameType.DATAGRAM_WITH_LENGTH <6> length = len(data) <7> frame_size = 1 + size_uint_var(length) + length <8> <9> buf = builder.start_frame(frame_type, required_bytes=frame_size) <10> buf.push_uint_var(length) <11> buf.push_bytes(data) <12> <13> # log frame <14> if self._quic_logger is not None: <15> builder.quic_logger_frames.append( <16> self._quic_logger.encode_datagram_frame(length=length) <17> ) <18> <19> return True <20>
===========unchanged ref 0=========== at: aioquic.quic.connection.QuicConnection.__init__ self._quic_logger: Optional[QuicLoggerTrace] = None self._quic_logger = configuration.quic_logger.start_trace( is_client=configuration.is_client, odcid=logger_connection_id ) at: aioquic.quic.connection.QuicConnection._close_end self._quic_logger = None ===========changed ref 0=========== # module: aioquic.quic.connection class QuicConnection: def _write_crypto_frame( self, builder: QuicPacketBuilder, space: QuicPacketSpace, stream: QuicStream ) -> bool: frame_overhead = 3 + size_uint_var(stream.next_send_offset) frame = stream.get_frame(builder.remaining_flight_space - frame_overhead) if frame is not None: buf = builder.start_frame( QuicFrameType.CRYPTO, + capacity=frame_overhead, + handler=stream.on_data_delivery, - stream.on_data_delivery, + handler_args=(frame.offset, frame.offset + len(frame.data)), - (frame.offset, frame.offset + len(frame.data)), - required_bytes=frame_overhead, ) buf.push_uint_var(frame.offset) buf.push_uint16(len(frame.data) | 0x4000) buf.push_bytes(frame.data) # log frame if self._quic_logger is not None: builder.quic_logger_frames.append( self._quic_logger.encode_crypto_frame(frame) ) return True return False ===========changed ref 1=========== # module: aioquic.quic.connection class QuicConnection: def _write_connection_limits( self, builder: QuicPacketBuilder, space: QuicPacketSpace ) -> None: """ Raise MAX_DATA if needed. """ if self._local_max_data_used * 2 > self._local_max_data: self._local_max_data *= 2 self._logger.debug("Local max_data raised to %d", self._local_max_data) if self._local_max_data_sent != self._local_max_data: buf = builder.start_frame( + QuicFrameType.MAX_DATA, + capacity=MAX_DATA_FRAME_CAPACITY, + handler=self._on_max_data_delivery, - QuicFrameType.MAX_DATA, self._on_max_data_delivery ) buf.push_uint_var(self._local_max_data) self._local_max_data_sent = self._local_max_data # log frame if self._quic_logger is not None: builder.quic_logger_frames.append( self._quic_logger.encode_max_data_frame(self._local_max_data) ) ===========changed ref 2=========== # module: aioquic.quic.connection class QuicConnection: def _write_ack_frame( self, builder: QuicPacketBuilder, space: QuicPacketSpace, now: float ) -> None: # calculate ACK delay ack_delay = now - space.largest_received_time ack_delay_encoded = int(ack_delay * 1000000) >> self._local_ack_delay_exponent buf = builder.start_frame( QuicFrameType.ACK, + capacity=ACK_FRAME_CAPACITY, + handler=self._on_ack_delivery, - self._on_ack_delivery, + handler_args=(space, space.largest_received_packet), - (space, space.largest_received_packet), ) push_ack_frame(buf, space.ack_queue, ack_delay_encoded) space.ack_at = None # log frame if self._quic_logger is not None: builder.quic_logger_frames.append( self._quic_logger.encode_ack_frame( ranges=space.ack_queue, delay=ack_delay ) ) ===========changed ref 3=========== # module: aioquic.quic.connection class QuicConnection: def _write_connection_close_frame( self, builder: QuicPacketBuilder, error_code: int, frame_type: Optional[int], reason_phrase: str, ) -> None: reason_bytes = reason_phrase.encode("utf8") + reason_length = len(reason_bytes) if frame_type is None: + buf = builder.start_frame( + QuicFrameType.APPLICATION_CLOSE, - buf = builder.start_frame(QuicFrameType.APPLICATION_CLOSE) + capacity=APPLICATION_CLOSE_FRAME_CAPACITY + reason_length, + ) buf.push_uint_var(error_code) + buf.push_uint_var(reason_length) - buf.push_uint_var(len(reason_bytes)) buf.push_bytes(reason_bytes) else: + buf = builder.start_frame( + QuicFrameType.TRANSPORT_CLOSE, - buf = builder.start_frame(QuicFrameType.TRANSPORT_CLOSE) + capacity=TRANSPORT_CLOSE_FRAME_CAPACITY + reason_length, + ) buf.push_uint_var(error_code) buf.push_uint_var(frame_type) + buf.push_uint_var(reason_length) - buf.push_uint_var(len(reason_bytes)) buf.push_bytes(reason_bytes) # log frame if self._quic_logger is not None: builder.quic_logger_frames.append( self._quic_logger.encode_connection_close_frame( error_code=error_code, frame_type=frame_type, reason_phrase=reason_phrase, ) ) ===========changed ref 4=========== # module: aioquic.quic.connection + # frame sizes + ACK_FRAME_CAPACITY = 64 # FIXME: this is arbitrary! + APPLICATION_CLOSE_FRAME_CAPACITY = 1 + 8 + 8 # + reason length + HANDSHAKE_DONE_FRAME_CAPACITY = 1 + MAX_DATA_FRAME_CAPACITY = 1 + 8 + MAX_STREAM_DATA_FRAME_CAPACITY = 1 + 8 + 8 + NEW_CONNECTION_ID_FRAME_CAPACITY = 1 + 8 + 8 + 1 + 20 + 16 + PATH_CHALLENGE_FRAME_CAPACITY = 1 + 8 + PATH_RESPONSE_FRAME_CAPACITY = 1 + 8 + PING_FRAME_CAPACITY = 1 + RETIRE_CONNECTION_ID_CAPACITY = 1 + 8 + STREAMS_BLOCKED_CAPACITY = 1 + 8 + TRANSPORT_CLOSE_FRAME_CAPACITY = 1 + 8 + 8 + 8 # + reason length + END_STATES = frozenset( [ QuicConnectionState.CLOSING, QuicConnectionState.DRAINING, QuicConnectionState.TERMINATED, ] )
aioquic.quic.connection/QuicConnection._write_handshake_done_frame
Modified
aiortc~aioquic
aacf2c82e7706494308ff94080c6e002e6d5fe4f
[packet] refine frame allocation size
<1>:<add> QuicFrameType.HANDSHAKE_DONE, <add> capacity=HANDSHAKE_DONE_FRAME_CAPACITY, <add> handler=self._on_handshake_done_delivery, <del> QuicFrameType.HANDSHAKE_DONE, self._on_handshake_done_delivery,
# module: aioquic.quic.connection class QuicConnection: def _write_handshake_done_frame(self, builder: QuicPacketBuilder) -> None: <0> builder.start_frame( <1> QuicFrameType.HANDSHAKE_DONE, self._on_handshake_done_delivery, <2> ) <3> <4> # log frame <5> if self._quic_logger is not None: <6> builder.quic_logger_frames.append( <7> self._quic_logger.encode_handshake_done_frame() <8> ) <9>
===========changed ref 0=========== # module: aioquic.quic.connection class QuicConnection: def _write_datagram_frame( self, builder: QuicPacketBuilder, data: bytes, frame_type: QuicFrameType ) -> bool: """ Write a DATAGRAM frame. Returns True if the frame was processed, False otherwise. """ assert frame_type == QuicFrameType.DATAGRAM_WITH_LENGTH length = len(data) frame_size = 1 + size_uint_var(length) + length + buf = builder.start_frame(frame_type, capacity=frame_size) - buf = builder.start_frame(frame_type, required_bytes=frame_size) buf.push_uint_var(length) buf.push_bytes(data) # log frame if self._quic_logger is not None: builder.quic_logger_frames.append( self._quic_logger.encode_datagram_frame(length=length) ) return True ===========changed ref 1=========== # module: aioquic.quic.connection class QuicConnection: def _write_crypto_frame( self, builder: QuicPacketBuilder, space: QuicPacketSpace, stream: QuicStream ) -> bool: frame_overhead = 3 + size_uint_var(stream.next_send_offset) frame = stream.get_frame(builder.remaining_flight_space - frame_overhead) if frame is not None: buf = builder.start_frame( QuicFrameType.CRYPTO, + capacity=frame_overhead, + handler=stream.on_data_delivery, - stream.on_data_delivery, + handler_args=(frame.offset, frame.offset + len(frame.data)), - (frame.offset, frame.offset + len(frame.data)), - required_bytes=frame_overhead, ) buf.push_uint_var(frame.offset) buf.push_uint16(len(frame.data) | 0x4000) buf.push_bytes(frame.data) # log frame if self._quic_logger is not None: builder.quic_logger_frames.append( self._quic_logger.encode_crypto_frame(frame) ) return True return False ===========changed ref 2=========== # module: aioquic.quic.connection class QuicConnection: def _write_connection_limits( self, builder: QuicPacketBuilder, space: QuicPacketSpace ) -> None: """ Raise MAX_DATA if needed. """ if self._local_max_data_used * 2 > self._local_max_data: self._local_max_data *= 2 self._logger.debug("Local max_data raised to %d", self._local_max_data) if self._local_max_data_sent != self._local_max_data: buf = builder.start_frame( + QuicFrameType.MAX_DATA, + capacity=MAX_DATA_FRAME_CAPACITY, + handler=self._on_max_data_delivery, - QuicFrameType.MAX_DATA, self._on_max_data_delivery ) buf.push_uint_var(self._local_max_data) self._local_max_data_sent = self._local_max_data # log frame if self._quic_logger is not None: builder.quic_logger_frames.append( self._quic_logger.encode_max_data_frame(self._local_max_data) ) ===========changed ref 3=========== # module: aioquic.quic.connection class QuicConnection: def _write_ack_frame( self, builder: QuicPacketBuilder, space: QuicPacketSpace, now: float ) -> None: # calculate ACK delay ack_delay = now - space.largest_received_time ack_delay_encoded = int(ack_delay * 1000000) >> self._local_ack_delay_exponent buf = builder.start_frame( QuicFrameType.ACK, + capacity=ACK_FRAME_CAPACITY, + handler=self._on_ack_delivery, - self._on_ack_delivery, + handler_args=(space, space.largest_received_packet), - (space, space.largest_received_packet), ) push_ack_frame(buf, space.ack_queue, ack_delay_encoded) space.ack_at = None # log frame if self._quic_logger is not None: builder.quic_logger_frames.append( self._quic_logger.encode_ack_frame( ranges=space.ack_queue, delay=ack_delay ) ) ===========changed ref 4=========== # module: aioquic.quic.connection class QuicConnection: def _write_connection_close_frame( self, builder: QuicPacketBuilder, error_code: int, frame_type: Optional[int], reason_phrase: str, ) -> None: reason_bytes = reason_phrase.encode("utf8") + reason_length = len(reason_bytes) if frame_type is None: + buf = builder.start_frame( + QuicFrameType.APPLICATION_CLOSE, - buf = builder.start_frame(QuicFrameType.APPLICATION_CLOSE) + capacity=APPLICATION_CLOSE_FRAME_CAPACITY + reason_length, + ) buf.push_uint_var(error_code) + buf.push_uint_var(reason_length) - buf.push_uint_var(len(reason_bytes)) buf.push_bytes(reason_bytes) else: + buf = builder.start_frame( + QuicFrameType.TRANSPORT_CLOSE, - buf = builder.start_frame(QuicFrameType.TRANSPORT_CLOSE) + capacity=TRANSPORT_CLOSE_FRAME_CAPACITY + reason_length, + ) buf.push_uint_var(error_code) buf.push_uint_var(frame_type) + buf.push_uint_var(reason_length) - buf.push_uint_var(len(reason_bytes)) buf.push_bytes(reason_bytes) # log frame if self._quic_logger is not None: builder.quic_logger_frames.append( self._quic_logger.encode_connection_close_frame( error_code=error_code, frame_type=frame_type, reason_phrase=reason_phrase, ) ) ===========changed ref 5=========== # module: aioquic.quic.connection + # frame sizes + ACK_FRAME_CAPACITY = 64 # FIXME: this is arbitrary! + APPLICATION_CLOSE_FRAME_CAPACITY = 1 + 8 + 8 # + reason length + HANDSHAKE_DONE_FRAME_CAPACITY = 1 + MAX_DATA_FRAME_CAPACITY = 1 + 8 + MAX_STREAM_DATA_FRAME_CAPACITY = 1 + 8 + 8 + NEW_CONNECTION_ID_FRAME_CAPACITY = 1 + 8 + 8 + 1 + 20 + 16 + PATH_CHALLENGE_FRAME_CAPACITY = 1 + 8 + PATH_RESPONSE_FRAME_CAPACITY = 1 + 8 + PING_FRAME_CAPACITY = 1 + RETIRE_CONNECTION_ID_CAPACITY = 1 + 8 + STREAMS_BLOCKED_CAPACITY = 1 + 8 + TRANSPORT_CLOSE_FRAME_CAPACITY = 1 + 8 + 8 + 8 # + reason length + END_STATES = frozenset( [ QuicConnectionState.CLOSING, QuicConnectionState.DRAINING, QuicConnectionState.TERMINATED, ] )
aioquic.quic.connection/QuicConnection._write_new_connection_id_frame
Modified
aiortc~aioquic
aacf2c82e7706494308ff94080c6e002e6d5fe4f
[packet] refine frame allocation size
<4>:<add> capacity=NEW_CONNECTION_ID_FRAME_CAPACITY, <add> handler=self._on_new_connection_id_delivery, <del> self._on_new_connection_id_delivery, <5>:<add> handler_args=(connection_id,), <del> (connection_id,),
# module: aioquic.quic.connection class QuicConnection: def _write_new_connection_id_frame( self, builder: QuicPacketBuilder, connection_id: QuicConnectionId ) -> None: <0> retire_prior_to = 0 # FIXME <1> <2> buf = builder.start_frame( <3> QuicFrameType.NEW_CONNECTION_ID, <4> self._on_new_connection_id_delivery, <5> (connection_id,), <6> ) <7> buf.push_uint_var(connection_id.sequence_number) <8> buf.push_uint_var(retire_prior_to) <9> buf.push_uint8(len(connection_id.cid)) <10> buf.push_bytes(connection_id.cid) <11> buf.push_bytes(connection_id.stateless_reset_token) <12> <13> connection_id.was_sent = True <14> self._events.append(events.ConnectionIdIssued(connection_id=connection_id.cid)) <15> <16> # log frame <17> if self._quic_logger is not None: <18> builder.quic_logger_frames.append( <19> self._quic_logger.encode_new_connection_id_frame( <20> connection_id=connection_id.cid, <21> retire_prior_to=retire_prior_to, <22> sequence_number=connection_id.sequence_number, <23> stateless_reset_token=connection_id.stateless_reset_token, <24> ) <25> ) <26>
===========unchanged ref 0=========== at: aioquic.quic.connection HANDSHAKE_DONE_FRAME_CAPACITY = 1 QuicConnectionId(cid: bytes, sequence_number: int, stateless_reset_token: bytes=b"", was_sent: bool=False) at: aioquic.quic.connection.QuicConnection _on_handshake_done_delivery(delivery: QuicDeliveryState) -> None at: aioquic.quic.connection.QuicConnection.__init__ self._quic_logger: Optional[QuicLoggerTrace] = None self._quic_logger = configuration.quic_logger.start_trace( is_client=configuration.is_client, odcid=logger_connection_id ) at: aioquic.quic.connection.QuicConnection._close_end self._quic_logger = None at: aioquic.quic.connection.QuicConnection._write_datagram_frame length = len(data) frame_size = 1 + size_uint_var(length) + length ===========changed ref 0=========== # module: aioquic.quic.connection class QuicConnection: def _write_handshake_done_frame(self, builder: QuicPacketBuilder) -> None: builder.start_frame( + QuicFrameType.HANDSHAKE_DONE, + capacity=HANDSHAKE_DONE_FRAME_CAPACITY, + handler=self._on_handshake_done_delivery, - QuicFrameType.HANDSHAKE_DONE, self._on_handshake_done_delivery, ) # log frame if self._quic_logger is not None: builder.quic_logger_frames.append( self._quic_logger.encode_handshake_done_frame() ) ===========changed ref 1=========== # module: aioquic.quic.connection class QuicConnection: def _write_datagram_frame( self, builder: QuicPacketBuilder, data: bytes, frame_type: QuicFrameType ) -> bool: """ Write a DATAGRAM frame. Returns True if the frame was processed, False otherwise. """ assert frame_type == QuicFrameType.DATAGRAM_WITH_LENGTH length = len(data) frame_size = 1 + size_uint_var(length) + length + buf = builder.start_frame(frame_type, capacity=frame_size) - buf = builder.start_frame(frame_type, required_bytes=frame_size) buf.push_uint_var(length) buf.push_bytes(data) # log frame if self._quic_logger is not None: builder.quic_logger_frames.append( self._quic_logger.encode_datagram_frame(length=length) ) return True ===========changed ref 2=========== # module: aioquic.quic.connection class QuicConnection: def _write_crypto_frame( self, builder: QuicPacketBuilder, space: QuicPacketSpace, stream: QuicStream ) -> bool: frame_overhead = 3 + size_uint_var(stream.next_send_offset) frame = stream.get_frame(builder.remaining_flight_space - frame_overhead) if frame is not None: buf = builder.start_frame( QuicFrameType.CRYPTO, + capacity=frame_overhead, + handler=stream.on_data_delivery, - stream.on_data_delivery, + handler_args=(frame.offset, frame.offset + len(frame.data)), - (frame.offset, frame.offset + len(frame.data)), - required_bytes=frame_overhead, ) buf.push_uint_var(frame.offset) buf.push_uint16(len(frame.data) | 0x4000) buf.push_bytes(frame.data) # log frame if self._quic_logger is not None: builder.quic_logger_frames.append( self._quic_logger.encode_crypto_frame(frame) ) return True return False ===========changed ref 3=========== # module: aioquic.quic.connection class QuicConnection: def _write_connection_limits( self, builder: QuicPacketBuilder, space: QuicPacketSpace ) -> None: """ Raise MAX_DATA if needed. """ if self._local_max_data_used * 2 > self._local_max_data: self._local_max_data *= 2 self._logger.debug("Local max_data raised to %d", self._local_max_data) if self._local_max_data_sent != self._local_max_data: buf = builder.start_frame( + QuicFrameType.MAX_DATA, + capacity=MAX_DATA_FRAME_CAPACITY, + handler=self._on_max_data_delivery, - QuicFrameType.MAX_DATA, self._on_max_data_delivery ) buf.push_uint_var(self._local_max_data) self._local_max_data_sent = self._local_max_data # log frame if self._quic_logger is not None: builder.quic_logger_frames.append( self._quic_logger.encode_max_data_frame(self._local_max_data) ) ===========changed ref 4=========== # module: aioquic.quic.connection class QuicConnection: def _write_ack_frame( self, builder: QuicPacketBuilder, space: QuicPacketSpace, now: float ) -> None: # calculate ACK delay ack_delay = now - space.largest_received_time ack_delay_encoded = int(ack_delay * 1000000) >> self._local_ack_delay_exponent buf = builder.start_frame( QuicFrameType.ACK, + capacity=ACK_FRAME_CAPACITY, + handler=self._on_ack_delivery, - self._on_ack_delivery, + handler_args=(space, space.largest_received_packet), - (space, space.largest_received_packet), ) push_ack_frame(buf, space.ack_queue, ack_delay_encoded) space.ack_at = None # log frame if self._quic_logger is not None: builder.quic_logger_frames.append( self._quic_logger.encode_ack_frame( ranges=space.ack_queue, delay=ack_delay ) )
aioquic.quic.connection/QuicConnection._write_path_challenge_frame
Modified
aiortc~aioquic
aacf2c82e7706494308ff94080c6e002e6d5fe4f
[packet] refine frame allocation size
<0>:<add> buf = builder.start_frame( <add> QuicFrameType.PATH_CHALLENGE, capacity=PATH_CHALLENGE_FRAME_CAPACITY <add> ) <del> buf = builder.start_frame(QuicFrameType.PATH_CHALLENGE)
# module: aioquic.quic.connection class QuicConnection: def _write_path_challenge_frame( self, builder: QuicPacketBuilder, challenge: bytes ) -> None: <0> buf = builder.start_frame(QuicFrameType.PATH_CHALLENGE) <1> buf.push_bytes(challenge) <2> <3> # log frame <4> if self._quic_logger is not None: <5> builder.quic_logger_frames.append( <6> self._quic_logger.encode_path_challenge_frame(data=challenge) <7> ) <8>
===========unchanged ref 0=========== at: aioquic.quic.connection NEW_CONNECTION_ID_FRAME_CAPACITY = 1 + 8 + 8 + 1 + 20 + 16 at: aioquic.quic.connection.QuicConnection _on_new_connection_id_delivery(delivery: QuicDeliveryState, connection_id: QuicConnectionId) -> None at: aioquic.quic.connection.QuicConnection._write_new_connection_id_frame retire_prior_to = 0 # FIXME at: aioquic.quic.connection.QuicConnectionId cid: bytes sequence_number: int stateless_reset_token: bytes = b"" was_sent: bool = False ===========changed ref 0=========== # module: aioquic.quic.connection class QuicConnection: def _write_handshake_done_frame(self, builder: QuicPacketBuilder) -> None: builder.start_frame( + QuicFrameType.HANDSHAKE_DONE, + capacity=HANDSHAKE_DONE_FRAME_CAPACITY, + handler=self._on_handshake_done_delivery, - QuicFrameType.HANDSHAKE_DONE, self._on_handshake_done_delivery, ) # log frame if self._quic_logger is not None: builder.quic_logger_frames.append( self._quic_logger.encode_handshake_done_frame() ) ===========changed ref 1=========== # module: aioquic.quic.connection class QuicConnection: def _write_datagram_frame( self, builder: QuicPacketBuilder, data: bytes, frame_type: QuicFrameType ) -> bool: """ Write a DATAGRAM frame. Returns True if the frame was processed, False otherwise. """ assert frame_type == QuicFrameType.DATAGRAM_WITH_LENGTH length = len(data) frame_size = 1 + size_uint_var(length) + length + buf = builder.start_frame(frame_type, capacity=frame_size) - buf = builder.start_frame(frame_type, required_bytes=frame_size) buf.push_uint_var(length) buf.push_bytes(data) # log frame if self._quic_logger is not None: builder.quic_logger_frames.append( self._quic_logger.encode_datagram_frame(length=length) ) return True ===========changed ref 2=========== # module: aioquic.quic.connection class QuicConnection: def _write_crypto_frame( self, builder: QuicPacketBuilder, space: QuicPacketSpace, stream: QuicStream ) -> bool: frame_overhead = 3 + size_uint_var(stream.next_send_offset) frame = stream.get_frame(builder.remaining_flight_space - frame_overhead) if frame is not None: buf = builder.start_frame( QuicFrameType.CRYPTO, + capacity=frame_overhead, + handler=stream.on_data_delivery, - stream.on_data_delivery, + handler_args=(frame.offset, frame.offset + len(frame.data)), - (frame.offset, frame.offset + len(frame.data)), - required_bytes=frame_overhead, ) buf.push_uint_var(frame.offset) buf.push_uint16(len(frame.data) | 0x4000) buf.push_bytes(frame.data) # log frame if self._quic_logger is not None: builder.quic_logger_frames.append( self._quic_logger.encode_crypto_frame(frame) ) return True return False ===========changed ref 3=========== # module: aioquic.quic.connection class QuicConnection: def _write_new_connection_id_frame( self, builder: QuicPacketBuilder, connection_id: QuicConnectionId ) -> None: retire_prior_to = 0 # FIXME buf = builder.start_frame( QuicFrameType.NEW_CONNECTION_ID, + capacity=NEW_CONNECTION_ID_FRAME_CAPACITY, + handler=self._on_new_connection_id_delivery, - self._on_new_connection_id_delivery, + handler_args=(connection_id,), - (connection_id,), ) buf.push_uint_var(connection_id.sequence_number) buf.push_uint_var(retire_prior_to) buf.push_uint8(len(connection_id.cid)) buf.push_bytes(connection_id.cid) buf.push_bytes(connection_id.stateless_reset_token) connection_id.was_sent = True self._events.append(events.ConnectionIdIssued(connection_id=connection_id.cid)) # log frame if self._quic_logger is not None: builder.quic_logger_frames.append( self._quic_logger.encode_new_connection_id_frame( connection_id=connection_id.cid, retire_prior_to=retire_prior_to, sequence_number=connection_id.sequence_number, stateless_reset_token=connection_id.stateless_reset_token, ) ) ===========changed ref 4=========== # module: aioquic.quic.connection class QuicConnection: def _write_connection_limits( self, builder: QuicPacketBuilder, space: QuicPacketSpace ) -> None: """ Raise MAX_DATA if needed. """ if self._local_max_data_used * 2 > self._local_max_data: self._local_max_data *= 2 self._logger.debug("Local max_data raised to %d", self._local_max_data) if self._local_max_data_sent != self._local_max_data: buf = builder.start_frame( + QuicFrameType.MAX_DATA, + capacity=MAX_DATA_FRAME_CAPACITY, + handler=self._on_max_data_delivery, - QuicFrameType.MAX_DATA, self._on_max_data_delivery ) buf.push_uint_var(self._local_max_data) self._local_max_data_sent = self._local_max_data # log frame if self._quic_logger is not None: builder.quic_logger_frames.append( self._quic_logger.encode_max_data_frame(self._local_max_data) ) ===========changed ref 5=========== # module: aioquic.quic.connection class QuicConnection: def _write_ack_frame( self, builder: QuicPacketBuilder, space: QuicPacketSpace, now: float ) -> None: # calculate ACK delay ack_delay = now - space.largest_received_time ack_delay_encoded = int(ack_delay * 1000000) >> self._local_ack_delay_exponent buf = builder.start_frame( QuicFrameType.ACK, + capacity=ACK_FRAME_CAPACITY, + handler=self._on_ack_delivery, - self._on_ack_delivery, + handler_args=(space, space.largest_received_packet), - (space, space.largest_received_packet), ) push_ack_frame(buf, space.ack_queue, ack_delay_encoded) space.ack_at = None # log frame if self._quic_logger is not None: builder.quic_logger_frames.append( self._quic_logger.encode_ack_frame( ranges=space.ack_queue, delay=ack_delay ) )
aioquic.quic.connection/QuicConnection._write_path_response_frame
Modified
aiortc~aioquic
aacf2c82e7706494308ff94080c6e002e6d5fe4f
[packet] refine frame allocation size
<0>:<add> buf = builder.start_frame( <add> QuicFrameType.PATH_RESPONSE, capacity=PATH_RESPONSE_FRAME_CAPACITY <add> ) <del> buf = builder.start_frame(QuicFrameType.PATH_RESPONSE)
# module: aioquic.quic.connection class QuicConnection: def _write_path_response_frame( self, builder: QuicPacketBuilder, challenge: bytes ) -> None: <0> buf = builder.start_frame(QuicFrameType.PATH_RESPONSE) <1> buf.push_bytes(challenge) <2> <3> # log frame <4> if self._quic_logger is not None: <5> builder.quic_logger_frames.append( <6> self._quic_logger.encode_path_response_frame(data=challenge) <7> ) <8>
===========unchanged ref 0=========== at: aioquic.quic.connection.QuicConnection.__init__ self._events: Deque[events.QuicEvent] = deque() self._quic_logger: Optional[QuicLoggerTrace] = None self._quic_logger = configuration.quic_logger.start_trace( is_client=configuration.is_client, odcid=logger_connection_id ) at: aioquic.quic.connection.QuicConnection._close_end self._quic_logger = None at: aioquic.quic.connection.QuicConnection._write_new_connection_id_frame retire_prior_to = 0 # FIXME at: aioquic.quic.connection.QuicConnectionId cid: bytes sequence_number: int was_sent: bool = False at: collections.deque append(x: _T) -> None ===========changed ref 0=========== # module: aioquic.quic.connection class QuicConnection: def _write_path_challenge_frame( self, builder: QuicPacketBuilder, challenge: bytes ) -> None: + buf = builder.start_frame( + QuicFrameType.PATH_CHALLENGE, capacity=PATH_CHALLENGE_FRAME_CAPACITY + ) - buf = builder.start_frame(QuicFrameType.PATH_CHALLENGE) buf.push_bytes(challenge) # log frame if self._quic_logger is not None: builder.quic_logger_frames.append( self._quic_logger.encode_path_challenge_frame(data=challenge) ) ===========changed ref 1=========== # module: aioquic.quic.connection class QuicConnection: def _write_handshake_done_frame(self, builder: QuicPacketBuilder) -> None: builder.start_frame( + QuicFrameType.HANDSHAKE_DONE, + capacity=HANDSHAKE_DONE_FRAME_CAPACITY, + handler=self._on_handshake_done_delivery, - QuicFrameType.HANDSHAKE_DONE, self._on_handshake_done_delivery, ) # log frame if self._quic_logger is not None: builder.quic_logger_frames.append( self._quic_logger.encode_handshake_done_frame() ) ===========changed ref 2=========== # module: aioquic.quic.connection class QuicConnection: def _write_datagram_frame( self, builder: QuicPacketBuilder, data: bytes, frame_type: QuicFrameType ) -> bool: """ Write a DATAGRAM frame. Returns True if the frame was processed, False otherwise. """ assert frame_type == QuicFrameType.DATAGRAM_WITH_LENGTH length = len(data) frame_size = 1 + size_uint_var(length) + length + buf = builder.start_frame(frame_type, capacity=frame_size) - buf = builder.start_frame(frame_type, required_bytes=frame_size) buf.push_uint_var(length) buf.push_bytes(data) # log frame if self._quic_logger is not None: builder.quic_logger_frames.append( self._quic_logger.encode_datagram_frame(length=length) ) return True ===========changed ref 3=========== # module: aioquic.quic.connection class QuicConnection: def _write_crypto_frame( self, builder: QuicPacketBuilder, space: QuicPacketSpace, stream: QuicStream ) -> bool: frame_overhead = 3 + size_uint_var(stream.next_send_offset) frame = stream.get_frame(builder.remaining_flight_space - frame_overhead) if frame is not None: buf = builder.start_frame( QuicFrameType.CRYPTO, + capacity=frame_overhead, + handler=stream.on_data_delivery, - stream.on_data_delivery, + handler_args=(frame.offset, frame.offset + len(frame.data)), - (frame.offset, frame.offset + len(frame.data)), - required_bytes=frame_overhead, ) buf.push_uint_var(frame.offset) buf.push_uint16(len(frame.data) | 0x4000) buf.push_bytes(frame.data) # log frame if self._quic_logger is not None: builder.quic_logger_frames.append( self._quic_logger.encode_crypto_frame(frame) ) return True return False ===========changed ref 4=========== # module: aioquic.quic.connection class QuicConnection: def _write_new_connection_id_frame( self, builder: QuicPacketBuilder, connection_id: QuicConnectionId ) -> None: retire_prior_to = 0 # FIXME buf = builder.start_frame( QuicFrameType.NEW_CONNECTION_ID, + capacity=NEW_CONNECTION_ID_FRAME_CAPACITY, + handler=self._on_new_connection_id_delivery, - self._on_new_connection_id_delivery, + handler_args=(connection_id,), - (connection_id,), ) buf.push_uint_var(connection_id.sequence_number) buf.push_uint_var(retire_prior_to) buf.push_uint8(len(connection_id.cid)) buf.push_bytes(connection_id.cid) buf.push_bytes(connection_id.stateless_reset_token) connection_id.was_sent = True self._events.append(events.ConnectionIdIssued(connection_id=connection_id.cid)) # log frame if self._quic_logger is not None: builder.quic_logger_frames.append( self._quic_logger.encode_new_connection_id_frame( connection_id=connection_id.cid, retire_prior_to=retire_prior_to, sequence_number=connection_id.sequence_number, stateless_reset_token=connection_id.stateless_reset_token, ) ) ===========changed ref 5=========== # module: aioquic.quic.connection class QuicConnection: def _write_connection_limits( self, builder: QuicPacketBuilder, space: QuicPacketSpace ) -> None: """ Raise MAX_DATA if needed. """ if self._local_max_data_used * 2 > self._local_max_data: self._local_max_data *= 2 self._logger.debug("Local max_data raised to %d", self._local_max_data) if self._local_max_data_sent != self._local_max_data: buf = builder.start_frame( + QuicFrameType.MAX_DATA, + capacity=MAX_DATA_FRAME_CAPACITY, + handler=self._on_max_data_delivery, - QuicFrameType.MAX_DATA, self._on_max_data_delivery ) buf.push_uint_var(self._local_max_data) self._local_max_data_sent = self._local_max_data # log frame if self._quic_logger is not None: builder.quic_logger_frames.append( self._quic_logger.encode_max_data_frame(self._local_max_data) )
aioquic.quic.connection/QuicConnection._write_ping_frame
Modified
aiortc~aioquic
aacf2c82e7706494308ff94080c6e002e6d5fe4f
[packet] refine frame allocation size
<1>:<add> QuicFrameType.PING, <add> capacity=PING_FRAME_CAPACITY, <add> handler=self._on_ping_delivery, <add> handler_args=(tuple(uids),), <del> QuicFrameType.PING, self._on_ping_delivery, (tuple(uids),), required_bytes=1
# module: aioquic.quic.connection class QuicConnection: def _write_ping_frame(self, builder: QuicPacketBuilder, uids: List[int] = []): <0> builder.start_frame( <1> QuicFrameType.PING, self._on_ping_delivery, (tuple(uids),), required_bytes=1 <2> ) <3> self._logger.debug( <4> "Sending PING%s in packet %d", <5> "" if uids else " (probe)", <6> builder.packet_number, <7> ) <8> <9> # log frame <10> if self._quic_logger is not None: <11> builder.quic_logger_frames.append(self._quic_logger.encode_ping_frame()) <12>
===========unchanged ref 0=========== at: aioquic.quic.connection PATH_CHALLENGE_FRAME_CAPACITY = 1 + 8 at: aioquic.quic.connection.QuicConnection.__init__ self._quic_logger: Optional[QuicLoggerTrace] = None self._quic_logger = configuration.quic_logger.start_trace( is_client=configuration.is_client, odcid=logger_connection_id ) at: aioquic.quic.connection.QuicConnection._close_end self._quic_logger = None ===========changed ref 0=========== # module: aioquic.quic.connection class QuicConnection: def _write_path_response_frame( self, builder: QuicPacketBuilder, challenge: bytes ) -> None: + buf = builder.start_frame( + QuicFrameType.PATH_RESPONSE, capacity=PATH_RESPONSE_FRAME_CAPACITY + ) - buf = builder.start_frame(QuicFrameType.PATH_RESPONSE) buf.push_bytes(challenge) # log frame if self._quic_logger is not None: builder.quic_logger_frames.append( self._quic_logger.encode_path_response_frame(data=challenge) ) ===========changed ref 1=========== # module: aioquic.quic.connection class QuicConnection: def _write_path_challenge_frame( self, builder: QuicPacketBuilder, challenge: bytes ) -> None: + buf = builder.start_frame( + QuicFrameType.PATH_CHALLENGE, capacity=PATH_CHALLENGE_FRAME_CAPACITY + ) - buf = builder.start_frame(QuicFrameType.PATH_CHALLENGE) buf.push_bytes(challenge) # log frame if self._quic_logger is not None: builder.quic_logger_frames.append( self._quic_logger.encode_path_challenge_frame(data=challenge) ) ===========changed ref 2=========== # module: aioquic.quic.connection class QuicConnection: def _write_handshake_done_frame(self, builder: QuicPacketBuilder) -> None: builder.start_frame( + QuicFrameType.HANDSHAKE_DONE, + capacity=HANDSHAKE_DONE_FRAME_CAPACITY, + handler=self._on_handshake_done_delivery, - QuicFrameType.HANDSHAKE_DONE, self._on_handshake_done_delivery, ) # log frame if self._quic_logger is not None: builder.quic_logger_frames.append( self._quic_logger.encode_handshake_done_frame() ) ===========changed ref 3=========== # module: aioquic.quic.connection class QuicConnection: def _write_datagram_frame( self, builder: QuicPacketBuilder, data: bytes, frame_type: QuicFrameType ) -> bool: """ Write a DATAGRAM frame. Returns True if the frame was processed, False otherwise. """ assert frame_type == QuicFrameType.DATAGRAM_WITH_LENGTH length = len(data) frame_size = 1 + size_uint_var(length) + length + buf = builder.start_frame(frame_type, capacity=frame_size) - buf = builder.start_frame(frame_type, required_bytes=frame_size) buf.push_uint_var(length) buf.push_bytes(data) # log frame if self._quic_logger is not None: builder.quic_logger_frames.append( self._quic_logger.encode_datagram_frame(length=length) ) return True ===========changed ref 4=========== # module: aioquic.quic.connection class QuicConnection: def _write_crypto_frame( self, builder: QuicPacketBuilder, space: QuicPacketSpace, stream: QuicStream ) -> bool: frame_overhead = 3 + size_uint_var(stream.next_send_offset) frame = stream.get_frame(builder.remaining_flight_space - frame_overhead) if frame is not None: buf = builder.start_frame( QuicFrameType.CRYPTO, + capacity=frame_overhead, + handler=stream.on_data_delivery, - stream.on_data_delivery, + handler_args=(frame.offset, frame.offset + len(frame.data)), - (frame.offset, frame.offset + len(frame.data)), - required_bytes=frame_overhead, ) buf.push_uint_var(frame.offset) buf.push_uint16(len(frame.data) | 0x4000) buf.push_bytes(frame.data) # log frame if self._quic_logger is not None: builder.quic_logger_frames.append( self._quic_logger.encode_crypto_frame(frame) ) return True return False ===========changed ref 5=========== # module: aioquic.quic.connection class QuicConnection: def _write_new_connection_id_frame( self, builder: QuicPacketBuilder, connection_id: QuicConnectionId ) -> None: retire_prior_to = 0 # FIXME buf = builder.start_frame( QuicFrameType.NEW_CONNECTION_ID, + capacity=NEW_CONNECTION_ID_FRAME_CAPACITY, + handler=self._on_new_connection_id_delivery, - self._on_new_connection_id_delivery, + handler_args=(connection_id,), - (connection_id,), ) buf.push_uint_var(connection_id.sequence_number) buf.push_uint_var(retire_prior_to) buf.push_uint8(len(connection_id.cid)) buf.push_bytes(connection_id.cid) buf.push_bytes(connection_id.stateless_reset_token) connection_id.was_sent = True self._events.append(events.ConnectionIdIssued(connection_id=connection_id.cid)) # log frame if self._quic_logger is not None: builder.quic_logger_frames.append( self._quic_logger.encode_new_connection_id_frame( connection_id=connection_id.cid, retire_prior_to=retire_prior_to, sequence_number=connection_id.sequence_number, stateless_reset_token=connection_id.stateless_reset_token, ) ) ===========changed ref 6=========== # module: aioquic.quic.connection class QuicConnection: def _write_connection_limits( self, builder: QuicPacketBuilder, space: QuicPacketSpace ) -> None: """ Raise MAX_DATA if needed. """ if self._local_max_data_used * 2 > self._local_max_data: self._local_max_data *= 2 self._logger.debug("Local max_data raised to %d", self._local_max_data) if self._local_max_data_sent != self._local_max_data: buf = builder.start_frame( + QuicFrameType.MAX_DATA, + capacity=MAX_DATA_FRAME_CAPACITY, + handler=self._on_max_data_delivery, - QuicFrameType.MAX_DATA, self._on_max_data_delivery ) buf.push_uint_var(self._local_max_data) self._local_max_data_sent = self._local_max_data # log frame if self._quic_logger is not None: builder.quic_logger_frames.append( self._quic_logger.encode_max_data_frame(self._local_max_data) )
aioquic.quic.connection/QuicConnection._write_retire_connection_id_frame
Modified
aiortc~aioquic
aacf2c82e7706494308ff94080c6e002e6d5fe4f
[packet] refine frame allocation size
<2>:<add> capacity=RETIRE_CONNECTION_ID_CAPACITY, <add> handler=self._on_retire_connection_id_delivery, <del> self._on_retire_connection_id_delivery, <3>:<add> handler_args=(sequence_number,), <del> (sequence_number,),
# module: aioquic.quic.connection class QuicConnection: def _write_retire_connection_id_frame( self, builder: QuicPacketBuilder, sequence_number: int ) -> None: <0> buf = builder.start_frame( <1> QuicFrameType.RETIRE_CONNECTION_ID, <2> self._on_retire_connection_id_delivery, <3> (sequence_number,), <4> ) <5> buf.push_uint_var(sequence_number) <6> <7> # log frame <8> if self._quic_logger is not None: <9> builder.quic_logger_frames.append( <10> self._quic_logger.encode_retire_connection_id_frame(sequence_number) <11> ) <12>
===========unchanged ref 0=========== at: aioquic.quic.connection PATH_RESPONSE_FRAME_CAPACITY = 1 + 8 at: aioquic.quic.connection.QuicConnection.__init__ self._quic_logger: Optional[QuicLoggerTrace] = None self._quic_logger = configuration.quic_logger.start_trace( is_client=configuration.is_client, odcid=logger_connection_id ) at: aioquic.quic.connection.QuicConnection._close_end self._quic_logger = None ===========changed ref 0=========== # module: aioquic.quic.connection class QuicConnection: def _write_path_response_frame( self, builder: QuicPacketBuilder, challenge: bytes ) -> None: + buf = builder.start_frame( + QuicFrameType.PATH_RESPONSE, capacity=PATH_RESPONSE_FRAME_CAPACITY + ) - buf = builder.start_frame(QuicFrameType.PATH_RESPONSE) buf.push_bytes(challenge) # log frame if self._quic_logger is not None: builder.quic_logger_frames.append( self._quic_logger.encode_path_response_frame(data=challenge) ) ===========changed ref 1=========== # module: aioquic.quic.connection class QuicConnection: def _write_path_challenge_frame( self, builder: QuicPacketBuilder, challenge: bytes ) -> None: + buf = builder.start_frame( + QuicFrameType.PATH_CHALLENGE, capacity=PATH_CHALLENGE_FRAME_CAPACITY + ) - buf = builder.start_frame(QuicFrameType.PATH_CHALLENGE) buf.push_bytes(challenge) # log frame if self._quic_logger is not None: builder.quic_logger_frames.append( self._quic_logger.encode_path_challenge_frame(data=challenge) ) ===========changed ref 2=========== # module: aioquic.quic.connection class QuicConnection: def _write_ping_frame(self, builder: QuicPacketBuilder, uids: List[int] = []): builder.start_frame( + QuicFrameType.PING, + capacity=PING_FRAME_CAPACITY, + handler=self._on_ping_delivery, + handler_args=(tuple(uids),), - QuicFrameType.PING, self._on_ping_delivery, (tuple(uids),), required_bytes=1 ) self._logger.debug( "Sending PING%s in packet %d", "" if uids else " (probe)", builder.packet_number, ) # log frame if self._quic_logger is not None: builder.quic_logger_frames.append(self._quic_logger.encode_ping_frame()) ===========changed ref 3=========== # module: aioquic.quic.connection class QuicConnection: def _write_handshake_done_frame(self, builder: QuicPacketBuilder) -> None: builder.start_frame( + QuicFrameType.HANDSHAKE_DONE, + capacity=HANDSHAKE_DONE_FRAME_CAPACITY, + handler=self._on_handshake_done_delivery, - QuicFrameType.HANDSHAKE_DONE, self._on_handshake_done_delivery, ) # log frame if self._quic_logger is not None: builder.quic_logger_frames.append( self._quic_logger.encode_handshake_done_frame() ) ===========changed ref 4=========== # module: aioquic.quic.connection class QuicConnection: def _write_datagram_frame( self, builder: QuicPacketBuilder, data: bytes, frame_type: QuicFrameType ) -> bool: """ Write a DATAGRAM frame. Returns True if the frame was processed, False otherwise. """ assert frame_type == QuicFrameType.DATAGRAM_WITH_LENGTH length = len(data) frame_size = 1 + size_uint_var(length) + length + buf = builder.start_frame(frame_type, capacity=frame_size) - buf = builder.start_frame(frame_type, required_bytes=frame_size) buf.push_uint_var(length) buf.push_bytes(data) # log frame if self._quic_logger is not None: builder.quic_logger_frames.append( self._quic_logger.encode_datagram_frame(length=length) ) return True ===========changed ref 5=========== # module: aioquic.quic.connection class QuicConnection: def _write_crypto_frame( self, builder: QuicPacketBuilder, space: QuicPacketSpace, stream: QuicStream ) -> bool: frame_overhead = 3 + size_uint_var(stream.next_send_offset) frame = stream.get_frame(builder.remaining_flight_space - frame_overhead) if frame is not None: buf = builder.start_frame( QuicFrameType.CRYPTO, + capacity=frame_overhead, + handler=stream.on_data_delivery, - stream.on_data_delivery, + handler_args=(frame.offset, frame.offset + len(frame.data)), - (frame.offset, frame.offset + len(frame.data)), - required_bytes=frame_overhead, ) buf.push_uint_var(frame.offset) buf.push_uint16(len(frame.data) | 0x4000) buf.push_bytes(frame.data) # log frame if self._quic_logger is not None: builder.quic_logger_frames.append( self._quic_logger.encode_crypto_frame(frame) ) return True return False ===========changed ref 6=========== # module: aioquic.quic.connection class QuicConnection: def _write_new_connection_id_frame( self, builder: QuicPacketBuilder, connection_id: QuicConnectionId ) -> None: retire_prior_to = 0 # FIXME buf = builder.start_frame( QuicFrameType.NEW_CONNECTION_ID, + capacity=NEW_CONNECTION_ID_FRAME_CAPACITY, + handler=self._on_new_connection_id_delivery, - self._on_new_connection_id_delivery, + handler_args=(connection_id,), - (connection_id,), ) buf.push_uint_var(connection_id.sequence_number) buf.push_uint_var(retire_prior_to) buf.push_uint8(len(connection_id.cid)) buf.push_bytes(connection_id.cid) buf.push_bytes(connection_id.stateless_reset_token) connection_id.was_sent = True self._events.append(events.ConnectionIdIssued(connection_id=connection_id.cid)) # log frame if self._quic_logger is not None: builder.quic_logger_frames.append( self._quic_logger.encode_new_connection_id_frame( connection_id=connection_id.cid, retire_prior_to=retire_prior_to, sequence_number=connection_id.sequence_number, stateless_reset_token=connection_id.stateless_reset_token, ) )
aioquic.quic.connection/QuicConnection._write_stream_frame
Modified
aiortc~aioquic
aacf2c82e7706494308ff94080c6e002e6d5fe4f
[packet] refine frame allocation size
<20>:<add> capacity=frame_overhead, <add> handler=stream.on_data_delivery, <del> stream.on_data_delivery, <21>:<add> handler_args=(frame.offset, frame.offset + len(frame.data)), <del> (frame.offset, frame.offset + len(frame.data)), <22>:<del> required_bytes=frame_overhead,
# module: aioquic.quic.connection class QuicConnection: def _write_stream_frame( self, builder: QuicPacketBuilder, space: QuicPacketSpace, stream: QuicStream, max_offset: int, ) -> int: <0> # the frame data size is constrained by our peer's MAX_DATA and <1> # the space available in the current packet <2> frame_overhead = ( <3> 3 <4> + size_uint_var(stream.stream_id) <5> + (size_uint_var(stream.next_send_offset) if stream.next_send_offset else 0) <6> ) <7> previous_send_highest = stream._send_highest <8> frame = stream.get_frame( <9> builder.remaining_flight_space - frame_overhead, max_offset <10> ) <11> <12> if frame is not None: <13> frame_type = QuicFrameType.STREAM_BASE | 2 # length <14> if frame.offset: <15> frame_type |= 4 <16> if frame.fin: <17> frame_type |= 1 <18> buf = builder.start_frame( <19> frame_type, <20> stream.on_data_delivery, <21> (frame.offset, frame.offset + len(frame.data)), <22> required_bytes=frame_overhead, <23> ) <24> buf.push_uint_var(stream.stream_id) <25> if frame.offset: <26> buf.push_uint_var(frame.offset) <27> buf.push_uint16(len(frame.data) | 0x4000) <28> buf.push_bytes(frame.data) <29> <30> # log frame <31> if self._quic_logger is not None: <32> builder.quic_logger_frames.append( <33> self._quic_logger.encode_stream_frame( <34> frame, stream_id=stream.stream_id <35> ) <36> ) <37> <38> return stream._send_highest - previous_send_highest <39> </s>
===========below chunk 0=========== # module: aioquic.quic.connection class QuicConnection: def _write_stream_frame( self, builder: QuicPacketBuilder, space: QuicPacketSpace, stream: QuicStream, max_offset: int, ) -> int: # offset: 1 return 0 ===========unchanged ref 0=========== at: aioquic.quic.connection PING_FRAME_CAPACITY = 1 RETIRE_CONNECTION_ID_CAPACITY = 1 + 8 at: aioquic.quic.connection.QuicConnection _on_ping_delivery(delivery: QuicDeliveryState, uids: Sequence[int]) -> None _on_retire_connection_id_delivery(delivery: QuicDeliveryState, sequence_number: int) -> None at: aioquic.quic.connection.QuicConnection.__init__ self._quic_logger: Optional[QuicLoggerTrace] = None self._quic_logger = configuration.quic_logger.start_trace( is_client=configuration.is_client, odcid=logger_connection_id ) self._logger = QuicConnectionAdapter( logger, {"id": dump_cid(logger_connection_id)} ) at: aioquic.quic.connection.QuicConnection._close_end self._quic_logger = None at: logging.LoggerAdapter debug(msg: Any, *args: Any, exc_info: _ExcInfoType=..., stack_info: bool=..., extra: Optional[Dict[str, Any]]=..., **kwargs: Any) -> None at: typing List = _alias(list, 1, inst=False, name='List') ===========changed ref 0=========== # module: aioquic.quic.connection class QuicConnection: def _write_path_response_frame( self, builder: QuicPacketBuilder, challenge: bytes ) -> None: + buf = builder.start_frame( + QuicFrameType.PATH_RESPONSE, capacity=PATH_RESPONSE_FRAME_CAPACITY + ) - buf = builder.start_frame(QuicFrameType.PATH_RESPONSE) buf.push_bytes(challenge) # log frame if self._quic_logger is not None: builder.quic_logger_frames.append( self._quic_logger.encode_path_response_frame(data=challenge) ) ===========changed ref 1=========== # module: aioquic.quic.connection class QuicConnection: def _write_retire_connection_id_frame( self, builder: QuicPacketBuilder, sequence_number: int ) -> None: buf = builder.start_frame( QuicFrameType.RETIRE_CONNECTION_ID, + capacity=RETIRE_CONNECTION_ID_CAPACITY, + handler=self._on_retire_connection_id_delivery, - self._on_retire_connection_id_delivery, + handler_args=(sequence_number,), - (sequence_number,), ) buf.push_uint_var(sequence_number) # log frame if self._quic_logger is not None: builder.quic_logger_frames.append( self._quic_logger.encode_retire_connection_id_frame(sequence_number) ) ===========changed ref 2=========== # module: aioquic.quic.connection class QuicConnection: def _write_path_challenge_frame( self, builder: QuicPacketBuilder, challenge: bytes ) -> None: + buf = builder.start_frame( + QuicFrameType.PATH_CHALLENGE, capacity=PATH_CHALLENGE_FRAME_CAPACITY + ) - buf = builder.start_frame(QuicFrameType.PATH_CHALLENGE) buf.push_bytes(challenge) # log frame if self._quic_logger is not None: builder.quic_logger_frames.append( self._quic_logger.encode_path_challenge_frame(data=challenge) ) ===========changed ref 3=========== # module: aioquic.quic.connection class QuicConnection: def _write_ping_frame(self, builder: QuicPacketBuilder, uids: List[int] = []): builder.start_frame( + QuicFrameType.PING, + capacity=PING_FRAME_CAPACITY, + handler=self._on_ping_delivery, + handler_args=(tuple(uids),), - QuicFrameType.PING, self._on_ping_delivery, (tuple(uids),), required_bytes=1 ) self._logger.debug( "Sending PING%s in packet %d", "" if uids else " (probe)", builder.packet_number, ) # log frame if self._quic_logger is not None: builder.quic_logger_frames.append(self._quic_logger.encode_ping_frame()) ===========changed ref 4=========== # module: aioquic.quic.connection class QuicConnection: def _write_handshake_done_frame(self, builder: QuicPacketBuilder) -> None: builder.start_frame( + QuicFrameType.HANDSHAKE_DONE, + capacity=HANDSHAKE_DONE_FRAME_CAPACITY, + handler=self._on_handshake_done_delivery, - QuicFrameType.HANDSHAKE_DONE, self._on_handshake_done_delivery, ) # log frame if self._quic_logger is not None: builder.quic_logger_frames.append( self._quic_logger.encode_handshake_done_frame() ) ===========changed ref 5=========== # module: aioquic.quic.connection class QuicConnection: def _write_datagram_frame( self, builder: QuicPacketBuilder, data: bytes, frame_type: QuicFrameType ) -> bool: """ Write a DATAGRAM frame. Returns True if the frame was processed, False otherwise. """ assert frame_type == QuicFrameType.DATAGRAM_WITH_LENGTH length = len(data) frame_size = 1 + size_uint_var(length) + length + buf = builder.start_frame(frame_type, capacity=frame_size) - buf = builder.start_frame(frame_type, required_bytes=frame_size) buf.push_uint_var(length) buf.push_bytes(data) # log frame if self._quic_logger is not None: builder.quic_logger_frames.append( self._quic_logger.encode_datagram_frame(length=length) ) return True ===========changed ref 6=========== # module: aioquic.quic.connection class QuicConnection: def _write_crypto_frame( self, builder: QuicPacketBuilder, space: QuicPacketSpace, stream: QuicStream ) -> bool: frame_overhead = 3 + size_uint_var(stream.next_send_offset) frame = stream.get_frame(builder.remaining_flight_space - frame_overhead) if frame is not None: buf = builder.start_frame( QuicFrameType.CRYPTO, + capacity=frame_overhead, + handler=stream.on_data_delivery, - stream.on_data_delivery, + handler_args=(frame.offset, frame.offset + len(frame.data)), - (frame.offset, frame.offset + len(frame.data)), - required_bytes=frame_overhead, ) buf.push_uint_var(frame.offset) buf.push_uint16(len(frame.data) | 0x4000) buf.push_bytes(frame.data) # log frame if self._quic_logger is not None: builder.quic_logger_frames.append( self._quic_logger.encode_crypto_frame(frame) ) return True return False
aioquic.quic.connection/QuicConnection._write_stream_limits
Modified
aiortc~aioquic
aacf2c82e7706494308ff94080c6e002e6d5fe4f
[packet] refine frame allocation size
<20>:<add> capacity=MAX_STREAM_DATA_FRAME_CAPACITY, <add> handler=self._on_max_stream_data_delivery, <del> self._on_max_stream_data_delivery, <21>:<add> handler_args=(stream,), <del> (stream,),
# module: aioquic.quic.connection class QuicConnection: def _write_stream_limits( self, builder: QuicPacketBuilder, space: QuicPacketSpace, stream: QuicStream ) -> None: <0> """ <1> Raise MAX_STREAM_DATA if needed. <2> <3> The only case where `stream.max_stream_data_local` is zero is for <4> locally created unidirectional streams. We skip such streams to avoid <5> spurious logging. <6> """ <7> if ( <8> stream.max_stream_data_local <9> and stream._recv_highest * 2 > stream.max_stream_data_local <10> ): <11> stream.max_stream_data_local *= 2 <12> self._logger.debug( <13> "Stream %d local max_stream_data raised to %d", <14> stream.stream_id, <15> stream.max_stream_data_local, <16> ) <17> if stream.max_stream_data_local_sent != stream.max_stream_data_local: <18> buf = builder.start_frame( <19> QuicFrameType.MAX_STREAM_DATA, <20> self._on_max_stream_data_delivery, <21> (stream,), <22> ) <23> buf.push_uint_var(stream.stream_id) <24> buf.push_uint_var(stream.max_stream_data_local) <25> stream.max_stream_data_local_sent = stream.max_stream_data_local <26> <27> # log frame <28> if self._quic_logger is not None: <29> builder.quic_logger_frames.append( <30> self._quic_logger.encode_max_stream_data_frame( <31> maximum=stream.max_stream_data_local, stream_id=stream.stream_id <32> ) <33> ) <34>
===========unchanged ref 0=========== at: aioquic.quic.connection.QuicConnection.__init__ self._quic_logger: Optional[QuicLoggerTrace] = None self._quic_logger = configuration.quic_logger.start_trace( is_client=configuration.is_client, odcid=logger_connection_id ) at: aioquic.quic.connection.QuicConnection._close_end self._quic_logger = None at: aioquic.quic.connection.QuicConnection._write_stream_frame frame_overhead = ( 3 + size_uint_var(stream.stream_id) + (size_uint_var(stream.next_send_offset) if stream.next_send_offset else 0) ) ===========changed ref 0=========== # module: aioquic.quic.connection class QuicConnection: def _write_path_response_frame( self, builder: QuicPacketBuilder, challenge: bytes ) -> None: + buf = builder.start_frame( + QuicFrameType.PATH_RESPONSE, capacity=PATH_RESPONSE_FRAME_CAPACITY + ) - buf = builder.start_frame(QuicFrameType.PATH_RESPONSE) buf.push_bytes(challenge) # log frame if self._quic_logger is not None: builder.quic_logger_frames.append( self._quic_logger.encode_path_response_frame(data=challenge) ) ===========changed ref 1=========== # module: aioquic.quic.connection class QuicConnection: def _write_retire_connection_id_frame( self, builder: QuicPacketBuilder, sequence_number: int ) -> None: buf = builder.start_frame( QuicFrameType.RETIRE_CONNECTION_ID, + capacity=RETIRE_CONNECTION_ID_CAPACITY, + handler=self._on_retire_connection_id_delivery, - self._on_retire_connection_id_delivery, + handler_args=(sequence_number,), - (sequence_number,), ) buf.push_uint_var(sequence_number) # log frame if self._quic_logger is not None: builder.quic_logger_frames.append( self._quic_logger.encode_retire_connection_id_frame(sequence_number) ) ===========changed ref 2=========== # module: aioquic.quic.connection class QuicConnection: def _write_path_challenge_frame( self, builder: QuicPacketBuilder, challenge: bytes ) -> None: + buf = builder.start_frame( + QuicFrameType.PATH_CHALLENGE, capacity=PATH_CHALLENGE_FRAME_CAPACITY + ) - buf = builder.start_frame(QuicFrameType.PATH_CHALLENGE) buf.push_bytes(challenge) # log frame if self._quic_logger is not None: builder.quic_logger_frames.append( self._quic_logger.encode_path_challenge_frame(data=challenge) ) ===========changed ref 3=========== # module: aioquic.quic.connection class QuicConnection: def _write_ping_frame(self, builder: QuicPacketBuilder, uids: List[int] = []): builder.start_frame( + QuicFrameType.PING, + capacity=PING_FRAME_CAPACITY, + handler=self._on_ping_delivery, + handler_args=(tuple(uids),), - QuicFrameType.PING, self._on_ping_delivery, (tuple(uids),), required_bytes=1 ) self._logger.debug( "Sending PING%s in packet %d", "" if uids else " (probe)", builder.packet_number, ) # log frame if self._quic_logger is not None: builder.quic_logger_frames.append(self._quic_logger.encode_ping_frame()) ===========changed ref 4=========== # module: aioquic.quic.connection class QuicConnection: def _write_handshake_done_frame(self, builder: QuicPacketBuilder) -> None: builder.start_frame( + QuicFrameType.HANDSHAKE_DONE, + capacity=HANDSHAKE_DONE_FRAME_CAPACITY, + handler=self._on_handshake_done_delivery, - QuicFrameType.HANDSHAKE_DONE, self._on_handshake_done_delivery, ) # log frame if self._quic_logger is not None: builder.quic_logger_frames.append( self._quic_logger.encode_handshake_done_frame() ) ===========changed ref 5=========== # module: aioquic.quic.connection class QuicConnection: def _write_datagram_frame( self, builder: QuicPacketBuilder, data: bytes, frame_type: QuicFrameType ) -> bool: """ Write a DATAGRAM frame. Returns True if the frame was processed, False otherwise. """ assert frame_type == QuicFrameType.DATAGRAM_WITH_LENGTH length = len(data) frame_size = 1 + size_uint_var(length) + length + buf = builder.start_frame(frame_type, capacity=frame_size) - buf = builder.start_frame(frame_type, required_bytes=frame_size) buf.push_uint_var(length) buf.push_bytes(data) # log frame if self._quic_logger is not None: builder.quic_logger_frames.append( self._quic_logger.encode_datagram_frame(length=length) ) return True ===========changed ref 6=========== # module: aioquic.quic.connection class QuicConnection: def _write_crypto_frame( self, builder: QuicPacketBuilder, space: QuicPacketSpace, stream: QuicStream ) -> bool: frame_overhead = 3 + size_uint_var(stream.next_send_offset) frame = stream.get_frame(builder.remaining_flight_space - frame_overhead) if frame is not None: buf = builder.start_frame( QuicFrameType.CRYPTO, + capacity=frame_overhead, + handler=stream.on_data_delivery, - stream.on_data_delivery, + handler_args=(frame.offset, frame.offset + len(frame.data)), - (frame.offset, frame.offset + len(frame.data)), - required_bytes=frame_overhead, ) buf.push_uint_var(frame.offset) buf.push_uint16(len(frame.data) | 0x4000) buf.push_bytes(frame.data) # log frame if self._quic_logger is not None: builder.quic_logger_frames.append( self._quic_logger.encode_crypto_frame(frame) ) return True return False
aioquic.quic.connection/QuicConnection._write_streams_blocked_frame
Modified
aiortc~aioquic
aacf2c82e7706494308ff94080c6e002e6d5fe4f
[packet] refine frame allocation size
<0>:<add> buf = builder.start_frame(frame_type, capacity=STREAMS_BLOCKED_CAPACITY) <del> buf = builder.start_frame(frame_type)
# module: aioquic.quic.connection class QuicConnection: def _write_streams_blocked_frame( self, builder: QuicPacketBuilder, frame_type: QuicFrameType, limit: int ) -> None: <0> buf = builder.start_frame(frame_type) <1> buf.push_uint_var(limit) <2> <3> # log frame <4> if self._quic_logger is not None: <5> builder.quic_logger_frames.append( <6> self._quic_logger.encode_streams_blocked_frame( <7> is_unidirectional=frame_type == QuicFrameType.STREAMS_BLOCKED_UNI, <8> limit=limit, <9> ) <10> ) <11>
===========unchanged ref 0=========== at: aioquic.quic.connection.QuicConnection.__init__ self._logger = QuicConnectionAdapter( logger, {"id": dump_cid(logger_connection_id)} ) at: logging.LoggerAdapter debug(msg: Any, *args: Any, exc_info: _ExcInfoType=..., stack_info: bool=..., extra: Optional[Dict[str, Any]]=..., **kwargs: Any) -> None ===========changed ref 0=========== # module: aioquic.quic.connection class QuicConnection: def _write_path_response_frame( self, builder: QuicPacketBuilder, challenge: bytes ) -> None: + buf = builder.start_frame( + QuicFrameType.PATH_RESPONSE, capacity=PATH_RESPONSE_FRAME_CAPACITY + ) - buf = builder.start_frame(QuicFrameType.PATH_RESPONSE) buf.push_bytes(challenge) # log frame if self._quic_logger is not None: builder.quic_logger_frames.append( self._quic_logger.encode_path_response_frame(data=challenge) ) ===========changed ref 1=========== # module: aioquic.quic.connection class QuicConnection: def _write_retire_connection_id_frame( self, builder: QuicPacketBuilder, sequence_number: int ) -> None: buf = builder.start_frame( QuicFrameType.RETIRE_CONNECTION_ID, + capacity=RETIRE_CONNECTION_ID_CAPACITY, + handler=self._on_retire_connection_id_delivery, - self._on_retire_connection_id_delivery, + handler_args=(sequence_number,), - (sequence_number,), ) buf.push_uint_var(sequence_number) # log frame if self._quic_logger is not None: builder.quic_logger_frames.append( self._quic_logger.encode_retire_connection_id_frame(sequence_number) ) ===========changed ref 2=========== # module: aioquic.quic.connection class QuicConnection: def _write_path_challenge_frame( self, builder: QuicPacketBuilder, challenge: bytes ) -> None: + buf = builder.start_frame( + QuicFrameType.PATH_CHALLENGE, capacity=PATH_CHALLENGE_FRAME_CAPACITY + ) - buf = builder.start_frame(QuicFrameType.PATH_CHALLENGE) buf.push_bytes(challenge) # log frame if self._quic_logger is not None: builder.quic_logger_frames.append( self._quic_logger.encode_path_challenge_frame(data=challenge) ) ===========changed ref 3=========== # module: aioquic.quic.connection class QuicConnection: def _write_ping_frame(self, builder: QuicPacketBuilder, uids: List[int] = []): builder.start_frame( + QuicFrameType.PING, + capacity=PING_FRAME_CAPACITY, + handler=self._on_ping_delivery, + handler_args=(tuple(uids),), - QuicFrameType.PING, self._on_ping_delivery, (tuple(uids),), required_bytes=1 ) self._logger.debug( "Sending PING%s in packet %d", "" if uids else " (probe)", builder.packet_number, ) # log frame if self._quic_logger is not None: builder.quic_logger_frames.append(self._quic_logger.encode_ping_frame()) ===========changed ref 4=========== # module: aioquic.quic.connection class QuicConnection: def _write_handshake_done_frame(self, builder: QuicPacketBuilder) -> None: builder.start_frame( + QuicFrameType.HANDSHAKE_DONE, + capacity=HANDSHAKE_DONE_FRAME_CAPACITY, + handler=self._on_handshake_done_delivery, - QuicFrameType.HANDSHAKE_DONE, self._on_handshake_done_delivery, ) # log frame if self._quic_logger is not None: builder.quic_logger_frames.append( self._quic_logger.encode_handshake_done_frame() ) ===========changed ref 5=========== # module: aioquic.quic.connection class QuicConnection: def _write_datagram_frame( self, builder: QuicPacketBuilder, data: bytes, frame_type: QuicFrameType ) -> bool: """ Write a DATAGRAM frame. Returns True if the frame was processed, False otherwise. """ assert frame_type == QuicFrameType.DATAGRAM_WITH_LENGTH length = len(data) frame_size = 1 + size_uint_var(length) + length + buf = builder.start_frame(frame_type, capacity=frame_size) - buf = builder.start_frame(frame_type, required_bytes=frame_size) buf.push_uint_var(length) buf.push_bytes(data) # log frame if self._quic_logger is not None: builder.quic_logger_frames.append( self._quic_logger.encode_datagram_frame(length=length) ) return True ===========changed ref 6=========== # module: aioquic.quic.connection class QuicConnection: def _write_stream_limits( self, builder: QuicPacketBuilder, space: QuicPacketSpace, stream: QuicStream ) -> None: """ Raise MAX_STREAM_DATA if needed. The only case where `stream.max_stream_data_local` is zero is for locally created unidirectional streams. We skip such streams to avoid spurious logging. """ if ( stream.max_stream_data_local and stream._recv_highest * 2 > stream.max_stream_data_local ): stream.max_stream_data_local *= 2 self._logger.debug( "Stream %d local max_stream_data raised to %d", stream.stream_id, stream.max_stream_data_local, ) if stream.max_stream_data_local_sent != stream.max_stream_data_local: buf = builder.start_frame( QuicFrameType.MAX_STREAM_DATA, + capacity=MAX_STREAM_DATA_FRAME_CAPACITY, + handler=self._on_max_stream_data_delivery, - self._on_max_stream_data_delivery, + handler_args=(stream,), - (stream,), ) buf.push_uint_var(stream.stream_id) buf.push_uint_var(stream.max_stream_data_local) stream.max_stream_data_local_sent = stream.max_stream_data_local # log frame if self._quic_logger is not None: builder.quic_logger_frames.append( self._quic_logger.encode_max_stream_data_frame( maximum=stream.max_stream_data_local, stream_id=stream.stream_id ) )
tests.test_connection/QuicConnectionTest.test_send_max_data_blocked_by_cc
Modified
aiortc~aioquic
b9c38183f6b5371f267831543d2166b20e615966
[recovery] split out congestion control
<10>:<add> client._loss._cc.bytes_in_flight = 14303 <del> client._loss.bytes_in_flight = 14303
# module: tests.test_connection class QuicConnectionTest(TestCase): def test_send_max_data_blocked_by_cc(self): <0> with client_and_server() as (client, server): <1> # check congestion control <2> self.assertEqual(client._loss.bytes_in_flight, 0) <3> self.assertEqual(client._loss.congestion_window, 14303) <4> <5> # artificially raise received data counter <6> client._local_max_data_used = client._local_max_data <7> self.assertEqual(server._remote_max_data, 1048576) <8> <9> # artificially raise bytes in flight <10> client._loss.bytes_in_flight = 14303 <11> <12> # MAX_DATA is not sent due to congestion control <13> self.assertEqual(drop(client), 0) <14>
===========unchanged ref 0=========== at: tests.test_connection client_and_server(client_kwargs={}, client_options={}, client_patch=lambda x: None, handshake=True, server_kwargs={}, server_certfile=SERVER_CERTFILE, server_keyfile=SERVER_KEYFILE, server_options={}, server_patch=lambda x: None, transport_options={}) drop(sender) at: unittest.case.TestCase failureException: Type[BaseException] longMessage: bool maxDiff: Optional[int] _testMethodName: str _testMethodDoc: str assertEqual(first: Any, second: Any, msg: Any=...) -> None
tests.test_recovery/QuicPacketRecoveryTest.test_on_packet_lost_crypto
Modified
aiortc~aioquic
b9c38183f6b5371f267831543d2166b20e615966
[recovery] split out congestion control
<8>:<add> sent_time=0.0, <del> sent_time=123.45, <17>:<add> self.recovery._detect_loss(space, now=1.0) <del> self.recovery.on_packet_lost(packet, space)
# module: tests.test_recovery class QuicPacketRecoveryTest(TestCase): def test_on_packet_lost_crypto(self): <0> packet = QuicSentPacket( <1> epoch=tls.Epoch.INITIAL, <2> in_flight=True, <3> is_ack_eliciting=True, <4> is_crypto_packet=True, <5> packet_number=0, <6> packet_type=PACKET_TYPE_INITIAL, <7> sent_bytes=1280, <8> sent_time=123.45, <9> ) <10> space = self.INITIAL_SPACE <11> <12> self.recovery.on_packet_sent(packet, space) <13> self.assertEqual(self.recovery.bytes_in_flight, 1280) <14> self.assertEqual(space.ack_eliciting_in_flight, 1) <15> self.assertEqual(len(space.sent_packets), 1) <16> <17> self.recovery.on_packet_lost(packet, space) <18> self.assertEqual(self.recovery.bytes_in_flight, 0) <19> self.assertEqual(space.ack_eliciting_in_flight, 0) <20> self.assertEqual(len(space.sent_packets), 0) <21>
===========unchanged ref 0=========== at: tests.test_recovery.QuicPacketRecoveryTest.setUp self.INITIAL_SPACE = QuicPacketSpace() self.recovery = QuicPacketRecovery( is_client_without_1rtt=False, send_probe=send_probe ) at: unittest.case.TestCase failureException: Type[BaseException] longMessage: bool maxDiff: Optional[int] _testMethodName: str _testMethodDoc: str assertEqual(first: Any, second: Any, msg: Any=...) -> None ===========changed ref 0=========== # module: tests.test_connection class QuicConnectionTest(TestCase): def test_send_max_data_blocked_by_cc(self): with client_and_server() as (client, server): # check congestion control self.assertEqual(client._loss.bytes_in_flight, 0) self.assertEqual(client._loss.congestion_window, 14303) # artificially raise received data counter client._local_max_data_used = client._local_max_data self.assertEqual(server._remote_max_data, 1048576) # artificially raise bytes in flight + client._loss._cc.bytes_in_flight = 14303 - client._loss.bytes_in_flight = 14303 # MAX_DATA is not sent due to congestion control self.assertEqual(drop(client), 0)
aioquic.quic.recovery/QuicPacketRecovery.__init__
Modified
aiortc~aioquic
b9c38183f6b5371f267831543d2166b20e615966
[recovery] split out congestion control
<18>:<del> self.bytes_in_flight = 0 <19>:<del> self.congestion_window = K_INITIAL_WINDOW <20>:<del> self._congestion_recovery_start_time = 0.0 <21>:<del> self._congestion_stash = 0 <22>:<del> self._rtt_monitor = QuicRttMonitor() <23>:<del> self._ssthresh: Optional[int] = None <24>:<add> self._cc = QuicCongestionControl()
# module: aioquic.quic.recovery class QuicPacketRecovery: def __init__( self, is_client_without_1rtt: bool, send_probe: Callable[[], None], quic_logger: Optional[QuicLoggerTrace] = None, ) -> None: <0> self.is_client_without_1rtt = is_client_without_1rtt <1> self.max_ack_delay = 0.025 <2> self.spaces: List[QuicPacketSpace] = [] <3> <4> # callbacks <5> self._quic_logger = quic_logger <6> self._send_probe = send_probe <7> <8> # loss detection <9> self._pto_count = 0 <10> self._rtt_initialized = False <11> self._rtt_latest = 0.0 <12> self._rtt_min = math.inf <13> self._rtt_smoothed = 0.0 <14> self._rtt_variance = 0.0 <15> self._time_of_last_sent_ack_eliciting_packet = 0.0 <16> <17> # congestion control <18> self.bytes_in_flight = 0 <19> self.congestion_window = K_INITIAL_WINDOW <20> self._congestion_recovery_start_time = 0.0 <21> self._congestion_stash = 0 <22> self._rtt_monitor = QuicRttMonitor() <23> self._ssthresh: Optional[int] = None <24>
===========unchanged ref 0=========== at: aioquic.quic.recovery K_MAX_DATAGRAM_SIZE = 1280 K_INITIAL_WINDOW = 10 * K_MAX_DATAGRAM_SIZE QuicRttMonitor() at: aioquic.quic.recovery.QuicCongestionControl.on_packets_expired self.bytes_in_flight -= packet.sent_bytes at: aioquic.quic.recovery.QuicCongestionControl.on_packets_lost self.bytes_in_flight -= packet.sent_bytes self._congestion_recovery_start_time = now self.congestion_window = max( int(self.congestion_window * K_LOSS_REDUCTION_FACTOR), K_MINIMUM_WINDOW ) self.ssthresh = self.congestion_window at: aioquic.quic.recovery.QuicCongestionControl.on_rtt_measurement self.ssthresh = self.congestion_window at: typing Iterable = _alias(collections.abc.Iterable, 1) ===========changed ref 0=========== # module: aioquic.quic.recovery + class QuicCongestionControl: + """ + New Reno congestion control. + """ + ===========changed ref 1=========== # module: aioquic.quic.recovery + class QuicCongestionControl: + def __init__(self) -> None: + self.bytes_in_flight = 0 + self.congestion_window = K_INITIAL_WINDOW + self._congestion_recovery_start_time = 0.0 + self._congestion_stash = 0 + self._rtt_monitor = QuicRttMonitor() + self.ssthresh: Optional[int] = None + ===========changed ref 2=========== # module: tests.test_connection class QuicConnectionTest(TestCase): def test_send_max_data_blocked_by_cc(self): with client_and_server() as (client, server): # check congestion control self.assertEqual(client._loss.bytes_in_flight, 0) self.assertEqual(client._loss.congestion_window, 14303) # artificially raise received data counter client._local_max_data_used = client._local_max_data self.assertEqual(server._remote_max_data, 1048576) # artificially raise bytes in flight + client._loss._cc.bytes_in_flight = 14303 - client._loss.bytes_in_flight = 14303 # MAX_DATA is not sent due to congestion control self.assertEqual(drop(client), 0) ===========changed ref 3=========== # module: tests.test_recovery class QuicPacketRecoveryTest(TestCase): def test_on_packet_lost_crypto(self): packet = QuicSentPacket( epoch=tls.Epoch.INITIAL, in_flight=True, is_ack_eliciting=True, is_crypto_packet=True, packet_number=0, packet_type=PACKET_TYPE_INITIAL, sent_bytes=1280, + sent_time=0.0, - sent_time=123.45, ) space = self.INITIAL_SPACE self.recovery.on_packet_sent(packet, space) self.assertEqual(self.recovery.bytes_in_flight, 1280) self.assertEqual(space.ack_eliciting_in_flight, 1) self.assertEqual(len(space.sent_packets), 1) + self.recovery._detect_loss(space, now=1.0) - self.recovery.on_packet_lost(packet, space) self.assertEqual(self.recovery.bytes_in_flight, 0) self.assertEqual(space.ack_eliciting_in_flight, 0) self.assertEqual(len(space.sent_packets), 0)
aioquic.quic.recovery/QuicPacketRecovery.discard_space
Modified
aiortc~aioquic
b9c38183f6b5371f267831543d2166b20e615966
[recovery] split out congestion control
<2>:<del> for packet in space.sent_packets.values(): <3>:<del> if packet.in_flight: <4>:<add> self._cc.on_packets_expired( <del> self.on_packet_expired(packet) <5>:<add> filter(lambda x: x.in_flight, space.sent_packets.values()) <add> )
# module: aioquic.quic.recovery class QuicPacketRecovery: def discard_space(self, space: QuicPacketSpace) -> None: <0> assert space in self.spaces <1> <2> for packet in space.sent_packets.values(): <3> if packet.in_flight: <4> self.on_packet_expired(packet) <5> space.sent_packets.clear() <6> <7> space.ack_at = None <8> space.ack_eliciting_in_flight = 0 <9> space.loss_time = None <10>
===========unchanged ref 0=========== at: aioquic.quic.recovery QuicPacketSpace() at: typing Callable = _CallableType(collections.abc.Callable, 2) List = _alias(list, 1, inst=False, name='List') ===========changed ref 0=========== # module: aioquic.quic.recovery + class QuicCongestionControl: + def on_packet_sent(self, packet: QuicSentPacket) -> None: + self.bytes_in_flight += packet.sent_bytes + ===========changed ref 1=========== # module: aioquic.quic.recovery + class QuicCongestionControl: + def on_packets_expired(self, packets: Iterable[QuicSentPacket]) -> None: + for packet in packets: + self.bytes_in_flight -= packet.sent_bytes + ===========changed ref 2=========== # module: aioquic.quic.recovery + class QuicCongestionControl: + # TODO : collapse congestion window if persistent congestion + + def on_rtt_measurement(self, latest_rtt: float, now: float) -> None: + # check whether we should exit slow start + if self.ssthresh is None and self._rtt_monitor.is_rtt_increasing( + latest_rtt, now + ): + self.ssthresh = self.congestion_window + ===========changed ref 3=========== # module: aioquic.quic.recovery + class QuicCongestionControl: + """ + New Reno congestion control. + """ + ===========changed ref 4=========== # module: aioquic.quic.recovery + class QuicCongestionControl: + def __init__(self) -> None: + self.bytes_in_flight = 0 + self.congestion_window = K_INITIAL_WINDOW + self._congestion_recovery_start_time = 0.0 + self._congestion_stash = 0 + self._rtt_monitor = QuicRttMonitor() + self.ssthresh: Optional[int] = None + ===========changed ref 5=========== # module: aioquic.quic.recovery + class QuicCongestionControl: + def on_packets_lost(self, packets: Iterable[QuicSentPacket], now: float) -> None: + lost_largest_time = 0.0 + for packet in packets: + self.bytes_in_flight -= packet.sent_bytes + lost_largest_time = packet.sent_time + + # start a new congestion event if packet was sent after the + # start of the previous congestion recovery period. + if lost_largest_time > self._congestion_recovery_start_time: + self._congestion_recovery_start_time = now + self.congestion_window = max( + int(self.congestion_window * K_LOSS_REDUCTION_FACTOR), K_MINIMUM_WINDOW + ) + self.ssthresh = self.congestion_window + ===========changed ref 6=========== # module: aioquic.quic.recovery + class QuicCongestionControl: + def on_packet_acked(self, packet: QuicSentPacket) -> None: + self.bytes_in_flight -= packet.sent_bytes + + # don't increase window in congestion recovery + if packet.sent_time <= self._congestion_recovery_start_time: + return + + if self.ssthresh is None or self.congestion_window < self.ssthresh: + # slow start + self.congestion_window += packet.sent_bytes + else: + # congestion avoidance + self._congestion_stash += packet.sent_bytes + count = self._congestion_stash // self.congestion_window + if count: + self._congestion_stash -= count * self.congestion_window + self.congestion_window += count * K_MAX_DATAGRAM_SIZE + ===========changed ref 7=========== # module: aioquic.quic.recovery class QuicPacketRecovery: def __init__( self, is_client_without_1rtt: bool, send_probe: Callable[[], None], quic_logger: Optional[QuicLoggerTrace] = None, ) -> None: self.is_client_without_1rtt = is_client_without_1rtt self.max_ack_delay = 0.025 self.spaces: List[QuicPacketSpace] = [] # callbacks self._quic_logger = quic_logger self._send_probe = send_probe # loss detection self._pto_count = 0 self._rtt_initialized = False self._rtt_latest = 0.0 self._rtt_min = math.inf self._rtt_smoothed = 0.0 self._rtt_variance = 0.0 self._time_of_last_sent_ack_eliciting_packet = 0.0 # congestion control - self.bytes_in_flight = 0 - self.congestion_window = K_INITIAL_WINDOW - self._congestion_recovery_start_time = 0.0 - self._congestion_stash = 0 - self._rtt_monitor = QuicRttMonitor() - self._ssthresh: Optional[int] = None + self._cc = QuicCongestionControl() ===========changed ref 8=========== # module: aioquic.quic.recovery class QuicPacketRecovery: - def detect_loss(self, space: QuicPacketSpace, now: float) -> None: - """ - Check whether any packets should be declared lost. - """ - loss_delay = K_TIME_THRESHOLD * ( - max(self._rtt_latest, self._rtt_smoothed) - if self._rtt_initialized - else K_INITIAL_RTT - ) - packet_threshold = space.largest_acked_packet - K_PACKET_THRESHOLD - time_threshold = now - loss_delay - - lost_largest_time = None - lost_packets = [] - space.loss_time = None - for packet_number, packet in space.sent_packets.items(): - if packet_number > space.largest_acked_packet: - break - - if packet_number <= packet_threshold or packet.sent_time <= time_threshold: - lost_packets.append(packet) - else: - packet_loss_time = packet.sent_time + loss_delay - if space.loss_time is None or space.loss_time > packet_loss_time: - space.loss_time = packet_loss_time - - for packet in lost_packets: - # remove packet and update counters - self.on_packet_lost(packet, space) - if packet.in_flight: - lost_largest_time = packet.sent_time - - if lost_largest_time is not None: - self.on_packets_lost(lost_largest_time, now=now) -
aioquic.quic.recovery/QuicPacketRecovery.on_ack_received
Modified
aiortc~aioquic
b9c38183f6b5371f267831543d2166b20e615966
[recovery] split out congestion control
<21>:<add> self._cc.on_packet_acked(packet) <del> self.on_packet_acked(packet)
# module: aioquic.quic.recovery class QuicPacketRecovery: def on_ack_received( self, space: QuicPacketSpace, ack_rangeset: RangeSet, ack_delay: float, now: float, ) -> None: <0> """ <1> Update metrics as the result of an ACK being received. <2> """ <3> is_ack_eliciting = False <4> largest_acked = ack_rangeset.bounds().stop - 1 <5> largest_newly_acked = None <6> largest_sent_time = None <7> <8> if largest_acked > space.largest_acked_packet: <9> space.largest_acked_packet = largest_acked <10> <11> for packet_number in sorted(space.sent_packets.keys()): <12> if packet_number > largest_acked: <13> break <14> if packet_number in ack_rangeset: <15> # remove packet and update counters <16> packet = space.sent_packets.pop(packet_number) <17> if packet.is_ack_eliciting: <18> is_ack_eliciting = True <19> space.ack_eliciting_in_flight -= 1 <20> if packet.in_flight: <21> self.on_packet_acked(packet) <22> largest_newly_acked = packet_number <23> largest_sent_time = packet.sent_time <24> <25> # trigger callbacks <26> for handler, args in packet.delivery_handlers: <27> handler(QuicDeliveryState.ACKED, *args) <28> <29> # nothing to do if there are no newly acked packets <30> if largest_newly_acked is None: <31> return <32> <33> if largest_acked == largest_newly_acked and is_ack_eliciting: <34> latest_rtt = now - largest_sent_time <35> log_rtt = True <36> <37> # limit ACK delay to max_ack_delay <38> ack_delay = min(ack_delay, self</s>
===========below chunk 0=========== # module: aioquic.quic.recovery class QuicPacketRecovery: def on_ack_received( self, space: QuicPacketSpace, ack_rangeset: RangeSet, ack_delay: float, now: float, ) -> None: # offset: 1 # update RTT estimate, which cannot be < 1 ms self._rtt_latest = max(latest_rtt, 0.001) if self._rtt_latest < self._rtt_min: self._rtt_min = self._rtt_latest if self._rtt_latest > self._rtt_min + ack_delay: self._rtt_latest -= ack_delay if not self._rtt_initialized: self._rtt_initialized = True self._rtt_variance = latest_rtt / 2 self._rtt_smoothed = latest_rtt else: self._rtt_variance = 3 / 4 * self._rtt_variance + 1 / 4 * abs( self._rtt_min - self._rtt_latest ) self._rtt_smoothed = ( 7 / 8 * self._rtt_smoothed + 1 / 8 * self._rtt_latest ) # check whether we should exist slow start if self._ssthresh is None and self._rtt_monitor.is_rtt_increasing( latest_rtt, now ): self._ssthresh = self.congestion_window else: log_rtt = False self.detect_loss(space, now=now) if self._quic_logger is not None: self._log_metrics_updated(log_rtt=log_rtt) self._pto_count = 0 ===========unchanged ref 0=========== at: aioquic.quic.recovery K_INITIAL_RTT = 0.5 # seconds K_GRANULARITY = 0.001 # seconds QuicPacketSpace() at: aioquic.quic.recovery.QuicCongestionControl on_packet_acked(packet: QuicSentPacket) -> None on_packet_acked(self, packet: QuicSentPacket) -> None at: aioquic.quic.recovery.QuicPacketRecovery get_earliest_loss_space() -> Optional[QuicPacketSpace] at: aioquic.quic.recovery.QuicPacketRecovery.__init__ self.is_client_without_1rtt = is_client_without_1rtt self.max_ack_delay = 0.025 self.spaces: List[QuicPacketSpace] = [] self._pto_count = 0 self._rtt_initialized = False self._rtt_smoothed = 0.0 self._rtt_variance = 0.0 self._time_of_last_sent_ack_eliciting_packet = 0.0 self._cc = QuicCongestionControl() at: aioquic.quic.recovery.QuicPacketRecovery.on_ack_received self._rtt_initialized = True self._rtt_variance = latest_rtt / 2 self._rtt_variance = 3 / 4 * self._rtt_variance + 1 / 4 * abs( self._rtt_min - self._rtt_latest ) self._rtt_smoothed = latest_rtt self._rtt_smoothed = ( 7 / 8 * self._rtt_smoothed + 1 / 8 * self._rtt_latest ) self._pto_count = 0 at: aioquic.quic.recovery.QuicPacketRecovery.on_loss_detection_timeout self._pto_count += 1 ===========unchanged ref 1=========== at: aioquic.quic.recovery.QuicPacketRecovery.on_packet_sent self._time_of_last_sent_ack_eliciting_packet = packet.sent_time at: aioquic.quic.recovery.QuicPacketSpace.__init__ self.ack_eliciting_in_flight = 0 self.largest_acked_packet = 0 self.loss_time: Optional[float] = None self.sent_packets: Dict[int, QuicSentPacket] = {} at: typing.MutableMapping pop(key: _KT) -> _VT pop(key: _KT, default: Union[_VT, _T]=...) -> Union[_VT, _T] ===========changed ref 0=========== # module: aioquic.quic.recovery + class QuicCongestionControl: + def on_packet_acked(self, packet: QuicSentPacket) -> None: + self.bytes_in_flight -= packet.sent_bytes + + # don't increase window in congestion recovery + if packet.sent_time <= self._congestion_recovery_start_time: + return + + if self.ssthresh is None or self.congestion_window < self.ssthresh: + # slow start + self.congestion_window += packet.sent_bytes + else: + # congestion avoidance + self._congestion_stash += packet.sent_bytes + count = self._congestion_stash // self.congestion_window + if count: + self._congestion_stash -= count * self.congestion_window + self.congestion_window += count * K_MAX_DATAGRAM_SIZE + ===========changed ref 1=========== # module: aioquic.quic.recovery class QuicPacketRecovery: + @property + def congestion_window(self) -> int: + return self._cc.congestion_window + ===========changed ref 2=========== # module: aioquic.quic.recovery class QuicPacketRecovery: + @property + def bytes_in_flight(self) -> int: + return self._cc.bytes_in_flight + ===========changed ref 3=========== # module: aioquic.quic.recovery + class QuicCongestionControl: + def on_packet_sent(self, packet: QuicSentPacket) -> None: + self.bytes_in_flight += packet.sent_bytes + ===========changed ref 4=========== # module: aioquic.quic.recovery + class QuicCongestionControl: + def on_packets_expired(self, packets: Iterable[QuicSentPacket]) -> None: + for packet in packets: + self.bytes_in_flight -= packet.sent_bytes + ===========changed ref 5=========== # module: aioquic.quic.recovery + class QuicCongestionControl: + # TODO : collapse congestion window if persistent congestion + + def on_rtt_measurement(self, latest_rtt: float, now: float) -> None: + # check whether we should exit slow start + if self.ssthresh is None and self._rtt_monitor.is_rtt_increasing( + latest_rtt, now + ): + self.ssthresh = self.congestion_window + ===========changed ref 6=========== # module: aioquic.quic.recovery + class QuicCongestionControl: + """ + New Reno congestion control. + """ + ===========changed ref 7=========== # module: aioquic.quic.recovery class QuicPacketRecovery: def discard_space(self, space: QuicPacketSpace) -> None: assert space in self.spaces - for packet in space.sent_packets.values(): - if packet.in_flight: + self._cc.on_packets_expired( - self.on_packet_expired(packet) + filter(lambda x: x.in_flight, space.sent_packets.values()) + ) space.sent_packets.clear() space.ack_at = None space.ack_eliciting_in_flight = 0 space.loss_time = None
aioquic.quic.recovery/QuicPacketRecovery.on_loss_detection_timeout
Modified
aiortc~aioquic
b9c38183f6b5371f267831543d2166b20e615966
[recovery] split out congestion control
<2>:<add> self._detect_loss(loss_space, now=now) <del> self.detect_loss(loss_space, now=now) <8>:<add> self._on_packets_lost( <add> tuple( <add> filter( <del> for packet_number, packet in list( <9>:<add> lambda i: i.is_crypto_packet, space.sent_packets.values() <del> filter(lambda i: i[1].is_crypto_packet, space.sent_packets.items()) <10>:<add> ) <add> ), <add> space=space, <add> now=now, <add> ) <del> ): <11>:<del> # remove packet and update counters <12>:<del> self.on_packet_lost(packet, space)
# module: aioquic.quic.recovery class QuicPacketRecovery: def on_loss_detection_timeout(self, now: float) -> None: <0> loss_space = self.get_earliest_loss_space() <1> if loss_space is not None: <2> self.detect_loss(loss_space, now=now) <3> else: <4> self._pto_count += 1 <5> <6> # reschedule some data <7> for space in self.spaces: <8> for packet_number, packet in list( <9> filter(lambda i: i[1].is_crypto_packet, space.sent_packets.items()) <10> ): <11> # remove packet and update counters <12> self.on_packet_lost(packet, space) <13> <14> self._send_probe() <15>
===========unchanged ref 0=========== at: aioquic.quic.recovery.QuicPacketRecovery.__init__ self._rtt_initialized = False self._rtt_latest = 0.0 self._rtt_min = math.inf self._rtt_smoothed = 0.0 self._rtt_variance = 0.0 at: aioquic.quic.recovery.QuicPacketRecovery.on_ack_received latest_rtt = now - largest_sent_time ack_delay = min(ack_delay, self.max_ack_delay) ===========changed ref 0=========== # module: aioquic.quic.recovery class QuicPacketRecovery: + @property + def congestion_window(self) -> int: + return self._cc.congestion_window + ===========changed ref 1=========== # module: aioquic.quic.recovery class QuicPacketRecovery: + @property + def bytes_in_flight(self) -> int: + return self._cc.bytes_in_flight + ===========changed ref 2=========== # module: aioquic.quic.recovery + class QuicCongestionControl: + def on_packet_sent(self, packet: QuicSentPacket) -> None: + self.bytes_in_flight += packet.sent_bytes + ===========changed ref 3=========== # module: aioquic.quic.recovery + class QuicCongestionControl: + def on_packets_expired(self, packets: Iterable[QuicSentPacket]) -> None: + for packet in packets: + self.bytes_in_flight -= packet.sent_bytes + ===========changed ref 4=========== # module: aioquic.quic.recovery + class QuicCongestionControl: + # TODO : collapse congestion window if persistent congestion + + def on_rtt_measurement(self, latest_rtt: float, now: float) -> None: + # check whether we should exit slow start + if self.ssthresh is None and self._rtt_monitor.is_rtt_increasing( + latest_rtt, now + ): + self.ssthresh = self.congestion_window + ===========changed ref 5=========== # module: aioquic.quic.recovery + class QuicCongestionControl: + """ + New Reno congestion control. + """ + ===========changed ref 6=========== # module: aioquic.quic.recovery class QuicPacketRecovery: def discard_space(self, space: QuicPacketSpace) -> None: assert space in self.spaces - for packet in space.sent_packets.values(): - if packet.in_flight: + self._cc.on_packets_expired( - self.on_packet_expired(packet) + filter(lambda x: x.in_flight, space.sent_packets.values()) + ) space.sent_packets.clear() space.ack_at = None space.ack_eliciting_in_flight = 0 space.loss_time = None ===========changed ref 7=========== # module: aioquic.quic.recovery + class QuicCongestionControl: + def __init__(self) -> None: + self.bytes_in_flight = 0 + self.congestion_window = K_INITIAL_WINDOW + self._congestion_recovery_start_time = 0.0 + self._congestion_stash = 0 + self._rtt_monitor = QuicRttMonitor() + self.ssthresh: Optional[int] = None + ===========changed ref 8=========== # module: aioquic.quic.recovery + class QuicCongestionControl: + def on_packets_lost(self, packets: Iterable[QuicSentPacket], now: float) -> None: + lost_largest_time = 0.0 + for packet in packets: + self.bytes_in_flight -= packet.sent_bytes + lost_largest_time = packet.sent_time + + # start a new congestion event if packet was sent after the + # start of the previous congestion recovery period. + if lost_largest_time > self._congestion_recovery_start_time: + self._congestion_recovery_start_time = now + self.congestion_window = max( + int(self.congestion_window * K_LOSS_REDUCTION_FACTOR), K_MINIMUM_WINDOW + ) + self.ssthresh = self.congestion_window + ===========changed ref 9=========== # module: aioquic.quic.recovery + class QuicCongestionControl: + def on_packet_acked(self, packet: QuicSentPacket) -> None: + self.bytes_in_flight -= packet.sent_bytes + + # don't increase window in congestion recovery + if packet.sent_time <= self._congestion_recovery_start_time: + return + + if self.ssthresh is None or self.congestion_window < self.ssthresh: + # slow start + self.congestion_window += packet.sent_bytes + else: + # congestion avoidance + self._congestion_stash += packet.sent_bytes + count = self._congestion_stash // self.congestion_window + if count: + self._congestion_stash -= count * self.congestion_window + self.congestion_window += count * K_MAX_DATAGRAM_SIZE + ===========changed ref 10=========== # module: aioquic.quic.recovery class QuicPacketRecovery: def __init__( self, is_client_without_1rtt: bool, send_probe: Callable[[], None], quic_logger: Optional[QuicLoggerTrace] = None, ) -> None: self.is_client_without_1rtt = is_client_without_1rtt self.max_ack_delay = 0.025 self.spaces: List[QuicPacketSpace] = [] # callbacks self._quic_logger = quic_logger self._send_probe = send_probe # loss detection self._pto_count = 0 self._rtt_initialized = False self._rtt_latest = 0.0 self._rtt_min = math.inf self._rtt_smoothed = 0.0 self._rtt_variance = 0.0 self._time_of_last_sent_ack_eliciting_packet = 0.0 # congestion control - self.bytes_in_flight = 0 - self.congestion_window = K_INITIAL_WINDOW - self._congestion_recovery_start_time = 0.0 - self._congestion_stash = 0 - self._rtt_monitor = QuicRttMonitor() - self._ssthresh: Optional[int] = None + self._cc = QuicCongestionControl()
aioquic.quic.recovery/QuicPacketRecovery.on_packet_sent
Modified
aiortc~aioquic
b9c38183f6b5371f267831543d2166b20e615966
[recovery] split out congestion control
<9>:<add> self._cc.on_packet_sent(packet) <del> self.bytes_in_flight += packet.sent_bytes
# module: aioquic.quic.recovery class QuicPacketRecovery: def on_packet_sent(self, packet: QuicSentPacket, space: QuicPacketSpace) -> None: <0> space.sent_packets[packet.packet_number] = packet <1> <2> if packet.is_ack_eliciting: <3> space.ack_eliciting_in_flight += 1 <4> if packet.in_flight: <5> if packet.is_ack_eliciting: <6> self._time_of_last_sent_ack_eliciting_packet = packet.sent_time <7> <8> # add packet to bytes in flight <9> self.bytes_in_flight += packet.sent_bytes <10> <11> if self._quic_logger is not None: <12> self._log_metrics_updated() <13>
===========unchanged ref 0=========== at: aioquic.quic.recovery K_PACKET_THRESHOLD = 3 K_INITIAL_RTT = 0.5 # seconds K_TIME_THRESHOLD = 9 / 8 QuicPacketSpace() at: aioquic.quic.recovery.QuicPacketRecovery _log_metrics_updated(self, log_rtt=False) -> None _log_metrics_updated(log_rtt=False) -> None at: aioquic.quic.recovery.QuicPacketRecovery.__init__ self._quic_logger = quic_logger self._rtt_initialized = False self._rtt_latest = 0.0 self._rtt_smoothed = 0.0 at: aioquic.quic.recovery.QuicPacketRecovery.on_ack_received self._rtt_latest = max(latest_rtt, 0.001) self._rtt_latest -= ack_delay self._rtt_initialized = True self._rtt_smoothed = latest_rtt self._rtt_smoothed = ( 7 / 8 * self._rtt_smoothed + 1 / 8 * self._rtt_latest ) at: aioquic.quic.recovery.QuicPacketSpace.__init__ self.largest_acked_packet = 0 ===========changed ref 0=========== # module: aioquic.quic.recovery class QuicPacketRecovery: - def on_packet_expired(self, packet: QuicSentPacket) -> None: - self.bytes_in_flight -= packet.sent_bytes - - if self._quic_logger is not None: - self._log_metrics_updated() - ===========changed ref 1=========== # module: aioquic.quic.recovery class QuicPacketRecovery: + @property + def congestion_window(self) -> int: + return self._cc.congestion_window + ===========changed ref 2=========== # module: aioquic.quic.recovery class QuicPacketRecovery: + @property + def bytes_in_flight(self) -> int: + return self._cc.bytes_in_flight + ===========changed ref 3=========== # module: aioquic.quic.recovery class QuicPacketRecovery: - def on_packet_lost(self, packet: QuicSentPacket, space: QuicPacketSpace) -> None: - del space.sent_packets[packet.packet_number] - - if packet.is_ack_eliciting: - space.ack_eliciting_in_flight -= 1 - if packet.in_flight: - self.bytes_in_flight -= packet.sent_bytes - - if self._quic_logger is not None: - self._quic_logger.log_event( - category="recovery", - event="packet_lost", - data={ - "type": self._quic_logger.packet_type(packet.packet_type), - "packet_number": str(packet.packet_number), - }, - ) - self._log_metrics_updated() - - # trigger callbacks - for handler, args in packet.delivery_handlers: - handler(QuicDeliveryState.LOST, *args) - ===========changed ref 4=========== # module: aioquic.quic.recovery class QuicPacketRecovery: - def on_packet_acked(self, packet: QuicSentPacket) -> None: - self.bytes_in_flight -= packet.sent_bytes - - # don't increase window in congestion recovery - if packet.sent_time <= self._congestion_recovery_start_time: - return - - if self._ssthresh is None or self.congestion_window < self._ssthresh: - # slow start - self.congestion_window += packet.sent_bytes - else: - # congestion avoidance - self._congestion_stash += packet.sent_bytes - count = self._congestion_stash // self.congestion_window - if count: - self._congestion_stash -= count * self.congestion_window - self.congestion_window += count * K_MAX_DATAGRAM_SIZE - ===========changed ref 5=========== # module: aioquic.quic.recovery + class QuicCongestionControl: + def on_packet_sent(self, packet: QuicSentPacket) -> None: + self.bytes_in_flight += packet.sent_bytes + ===========changed ref 6=========== # module: aioquic.quic.recovery + class QuicCongestionControl: + def on_packets_expired(self, packets: Iterable[QuicSentPacket]) -> None: + for packet in packets: + self.bytes_in_flight -= packet.sent_bytes + ===========changed ref 7=========== # module: aioquic.quic.recovery + class QuicCongestionControl: + # TODO : collapse congestion window if persistent congestion + + def on_rtt_measurement(self, latest_rtt: float, now: float) -> None: + # check whether we should exit slow start + if self.ssthresh is None and self._rtt_monitor.is_rtt_increasing( + latest_rtt, now + ): + self.ssthresh = self.congestion_window + ===========changed ref 8=========== # module: aioquic.quic.recovery class QuicPacketRecovery: def on_loss_detection_timeout(self, now: float) -> None: loss_space = self.get_earliest_loss_space() if loss_space is not None: + self._detect_loss(loss_space, now=now) - self.detect_loss(loss_space, now=now) else: self._pto_count += 1 # reschedule some data for space in self.spaces: + self._on_packets_lost( + tuple( + filter( - for packet_number, packet in list( + lambda i: i.is_crypto_packet, space.sent_packets.values() - filter(lambda i: i[1].is_crypto_packet, space.sent_packets.items()) + ) + ), + space=space, + now=now, + ) - ): - # remove packet and update counters - self.on_packet_lost(packet, space) self._send_probe() ===========changed ref 9=========== # module: aioquic.quic.recovery + class QuicCongestionControl: + """ + New Reno congestion control. + """ + ===========changed ref 10=========== # module: aioquic.quic.recovery class QuicPacketRecovery: def discard_space(self, space: QuicPacketSpace) -> None: assert space in self.spaces - for packet in space.sent_packets.values(): - if packet.in_flight: + self._cc.on_packets_expired( - self.on_packet_expired(packet) + filter(lambda x: x.in_flight, space.sent_packets.values()) + ) space.sent_packets.clear() space.ack_at = None space.ack_eliciting_in_flight = 0 space.loss_time = None ===========changed ref 11=========== # module: aioquic.quic.recovery + class QuicCongestionControl: + def __init__(self) -> None: + self.bytes_in_flight = 0 + self.congestion_window = K_INITIAL_WINDOW + self._congestion_recovery_start_time = 0.0 + self._congestion_stash = 0 + self._rtt_monitor = QuicRttMonitor() + self.ssthresh: Optional[int] = None +
aioquic.quic.recovery/QuicPacketRecovery._log_metrics_updated
Modified
aiortc~aioquic
b9c38183f6b5371f267831543d2166b20e615966
[recovery] split out congestion control
<0>:<add> data = { <add> "bytes_in_flight": self._cc.bytes_in_flight, <add> "cwnd": self._cc.congestion_window, <add> } <del> data = {"bytes_in_flight": self.bytes_in_flight, "cwnd": self.congestion_window} <1>:<add> if self._cc.ssthresh is not None: <del> if self._ssthresh is not None: <2>:<add> data["ssthresh"] = self._cc.ssthresh <del> data["ssthresh"] = self._ssthresh
# module: aioquic.quic.recovery class QuicPacketRecovery: - # TODO : collapse congestion window if persistent congestion - def _log_metrics_updated(self, log_rtt=False) -> None: <0> data = {"bytes_in_flight": self.bytes_in_flight, "cwnd": self.congestion_window} <1> if self._ssthresh is not None: <2> data["ssthresh"] = self._ssthresh <3> <4> if log_rtt: <5> data.update( <6> { <7> "latest_rtt": int(self._rtt_latest * 1000), <8> "min_rtt": int(self._rtt_min * 1000), <9> "smoothed_rtt": int(self._rtt_smoothed * 1000), <10> "rtt_variance": int(self._rtt_variance * 1000), <11> } <12> ) <13> <14> self._quic_logger.log_event( <15> category="recovery", event="metrics_updated", data=data <16> ) <17>
===========unchanged ref 0=========== at: aioquic.quic.recovery.QuicCongestionControl.__init__ self.bytes_in_flight = 0 self.congestion_window = K_INITIAL_WINDOW self.ssthresh: Optional[int] = None at: aioquic.quic.recovery.QuicCongestionControl.on_packet_acked self.bytes_in_flight -= packet.sent_bytes self.congestion_window += packet.sent_bytes self.congestion_window += count * K_MAX_DATAGRAM_SIZE at: aioquic.quic.recovery.QuicCongestionControl.on_packet_sent self.bytes_in_flight += packet.sent_bytes at: aioquic.quic.recovery.QuicCongestionControl.on_packets_expired self.bytes_in_flight -= packet.sent_bytes at: aioquic.quic.recovery.QuicCongestionControl.on_packets_lost self.bytes_in_flight -= packet.sent_bytes self.congestion_window = max( int(self.congestion_window * K_LOSS_REDUCTION_FACTOR), K_MINIMUM_WINDOW ) self.ssthresh = self.congestion_window at: aioquic.quic.recovery.QuicCongestionControl.on_rtt_measurement self.ssthresh = self.congestion_window at: aioquic.quic.recovery.QuicPacketRecovery _on_packets_lost(packets: Iterable[QuicSentPacket], space: QuicPacketSpace, now: float) -> None at: aioquic.quic.recovery.QuicPacketRecovery.__init__ self._rtt_latest = 0.0 self._rtt_min = math.inf self._rtt_smoothed = 0.0 self._rtt_variance = 0.0 self._cc = QuicCongestionControl() ===========unchanged ref 1=========== at: aioquic.quic.recovery.QuicPacketRecovery._detect_loss lost_packets = [] at: aioquic.quic.recovery.QuicPacketRecovery.on_ack_received self._rtt_latest = max(latest_rtt, 0.001) self._rtt_latest -= ack_delay self._rtt_min = self._rtt_latest self._rtt_variance = latest_rtt / 2 self._rtt_variance = 3 / 4 * self._rtt_variance + 1 / 4 * abs( self._rtt_min - self._rtt_latest ) self._rtt_smoothed = latest_rtt self._rtt_smoothed = ( 7 / 8 * self._rtt_smoothed + 1 / 8 * self._rtt_latest ) ===========changed ref 0=========== # module: aioquic.quic.recovery class QuicPacketRecovery: - def on_packet_expired(self, packet: QuicSentPacket) -> None: - self.bytes_in_flight -= packet.sent_bytes - - if self._quic_logger is not None: - self._log_metrics_updated() - ===========changed ref 1=========== # module: aioquic.quic.recovery class QuicPacketRecovery: - def on_packets_lost(self, lost_largest_time: float, now: float) -> None: - # start a new congestion event if packet was sent after the - # start of the previous congestion recovery period. - if lost_largest_time > self._congestion_recovery_start_time: - self._congestion_recovery_start_time = now - self.congestion_window = max( - int(self.congestion_window * K_LOSS_REDUCTION_FACTOR), K_MINIMUM_WINDOW - ) - self._ssthresh = self.congestion_window - - if self._quic_logger is not None: - self._log_metrics_updated() - ===========changed ref 2=========== # module: aioquic.quic.recovery class QuicPacketRecovery: def on_packet_sent(self, packet: QuicSentPacket, space: QuicPacketSpace) -> None: space.sent_packets[packet.packet_number] = packet if packet.is_ack_eliciting: space.ack_eliciting_in_flight += 1 if packet.in_flight: if packet.is_ack_eliciting: self._time_of_last_sent_ack_eliciting_packet = packet.sent_time # add packet to bytes in flight + self._cc.on_packet_sent(packet) - self.bytes_in_flight += packet.sent_bytes if self._quic_logger is not None: self._log_metrics_updated() ===========changed ref 3=========== # module: aioquic.quic.recovery class QuicPacketRecovery: + @property + def congestion_window(self) -> int: + return self._cc.congestion_window + ===========changed ref 4=========== # module: aioquic.quic.recovery class QuicPacketRecovery: + @property + def bytes_in_flight(self) -> int: + return self._cc.bytes_in_flight + ===========changed ref 5=========== # module: aioquic.quic.recovery class QuicPacketRecovery: - def on_packet_lost(self, packet: QuicSentPacket, space: QuicPacketSpace) -> None: - del space.sent_packets[packet.packet_number] - - if packet.is_ack_eliciting: - space.ack_eliciting_in_flight -= 1 - if packet.in_flight: - self.bytes_in_flight -= packet.sent_bytes - - if self._quic_logger is not None: - self._quic_logger.log_event( - category="recovery", - event="packet_lost", - data={ - "type": self._quic_logger.packet_type(packet.packet_type), - "packet_number": str(packet.packet_number), - }, - ) - self._log_metrics_updated() - - # trigger callbacks - for handler, args in packet.delivery_handlers: - handler(QuicDeliveryState.LOST, *args) - ===========changed ref 6=========== # module: aioquic.quic.recovery class QuicPacketRecovery: - def on_packet_acked(self, packet: QuicSentPacket) -> None: - self.bytes_in_flight -= packet.sent_bytes - - # don't increase window in congestion recovery - if packet.sent_time <= self._congestion_recovery_start_time: - return - - if self._ssthresh is None or self.congestion_window < self._ssthresh: - # slow start - self.congestion_window += packet.sent_bytes - else: - # congestion avoidance - self._congestion_stash += packet.sent_bytes - count = self._congestion_stash // self.congestion_window - if count: - self._congestion_stash -= count * self.congestion_window - self.congestion_window += count * K_MAX_DATAGRAM_SIZE -
tests.test_tls/ContextTest._handshake
Modified
aiortc~aioquic
e80bb53cd28654fbd7d526547cd324df71055520
[tls] add support for x448
<6>:<add> self.assertLessEqual(len(server_input), 358) <del> self.assertLessEqual(len(server_input), 296)
# module: tests.test_tls class ContextTest(TestCase): def _handshake(self, client, server): <0> # send client hello <1> client_buf = create_buffers() <2> client.handle_message(b"", client_buf) <3> self.assertEqual(client.state, State.CLIENT_EXPECT_SERVER_HELLO) <4> server_input = merge_buffers(client_buf) <5> self.assertGreaterEqual(len(server_input), 213) <6> self.assertLessEqual(len(server_input), 296) <7> reset_buffers(client_buf) <8> <9> # handle client hello <10> # send server hello, encrypted extensions, certificate, certificate verify, finished, (session ticket) <11> server_buf = create_buffers() <12> server.handle_message(server_input, server_buf) <13> self.assertEqual(server.state, State.SERVER_EXPECT_FINISHED) <14> client_input = merge_buffers(server_buf) <15> self.assertGreaterEqual(len(client_input), 600) <16> self.assertLessEqual(len(client_input), 2316) <17> <18> reset_buffers(server_buf) <19> <20> # handle server hello, encrypted extensions, certificate, certificate verify, finished, (session ticket) <21> # send finished <22> client.handle_message(client_input, client_buf) <23> self.assertEqual(client.state, State.CLIENT_POST_HANDSHAKE) <24> server_input = merge_buffers(client_buf) <25> self.assertEqual(len(server_input), 52) <26> reset_buffers(client_buf) <27> <28> # handle finished <29> server.handle_message(server_input, server_buf) <30> self.assertEqual(server.state, State.SERVER_POST_HANDSHAKE) <31> client_input = merge_buffers(server_buf) <32> self.assertEqual(len(client_input), 0) <33> <34> # check keys match <35> self.assertEqual(client._dec_key, server._enc_</s>
===========below chunk 0=========== # module: tests.test_tls class ContextTest(TestCase): def _handshake(self, client, server): # offset: 1 self.assertEqual(client._enc_key, server._dec_key) # check cipher suite self.assertEqual( client.key_schedule.cipher_suite, tls.CipherSuite.AES_256_GCM_SHA384 ) self.assertEqual( server.key_schedule.cipher_suite, tls.CipherSuite.AES_256_GCM_SHA384 ) ===========unchanged ref 0=========== at: tests.test_tls create_buffers() merge_buffers(buffers) reset_buffers(buffers) at: unittest.case.TestCase failureException: Type[BaseException] longMessage: bool maxDiff: Optional[int] _testMethodName: str _testMethodDoc: str assertEqual(first: Any, second: Any, msg: Any=...) -> None assertGreaterEqual(a: Any, b: Any, msg: Any=...) -> None assertLessEqual(a: Any, b: Any, msg: Any=...) -> None
aioquic.tls/decode_public_key
Modified
aiortc~aioquic
e80bb53cd28654fbd7d526547cd324df71055520
[tls] add support for x448
<2>:<add> elif key_share[0] == Group.X448: <add> return x448.X448PublicKey.from_public_bytes(key_share[1])
# module: aioquic.tls def decode_public_key( key_share: KeyShareEntry, + ) -> Union[ec.EllipticCurvePublicKey, x25519.X25519PublicKey, x448.X448PublicKey, None]: - ) -> Union[ec.EllipticCurvePublicKey, x25519.X25519PublicKey, None]: <0> if key_share[0] == Group.X25519: <1> return x25519.X25519PublicKey.from_public_bytes(key_share[1]) <2> elif key_share[0] in GROUP_TO_CURVE: <3> return ec.EllipticCurvePublicKey.from_encoded_point( <4> GROUP_TO_CURVE[key_share[0]](), key_share[1] <5> ) <6> else: <7> return None <8>
===========unchanged ref 0=========== at: aioquic.tls CipherSuite(x: Union[str, bytes, bytearray], base: int) CipherSuite(x: Union[str, bytes, SupportsInt, _SupportsIndex, _SupportsTrunc]=...) KeyShareEntry = Tuple[int, bytes] CIPHER_SUITES = { CipherSuite.AES_128_GCM_SHA256: hashes.SHA256, CipherSuite.AES_256_GCM_SHA384: hashes.SHA384, CipherSuite.CHACHA20_POLY1305_SHA256: hashes.SHA256, } GROUP_TO_CURVE: Dict = { Group.SECP256R1: ec.SECP256R1, Group.SECP384R1: ec.SECP384R1, Group.SECP521R1: ec.SECP521R1, } ===========changed ref 0=========== # module: aioquic.tls class Group(IntEnum): SECP256R1 = 0x0017 SECP384R1 = 0x0018 SECP521R1 = 0x0019 X25519 = 0x001D + X448 = 0x001E GREASE = 0xAAAA ===========changed ref 1=========== # module: tests.test_tls class ContextTest(TestCase): + def test_handshake_with_x448(self): + client = self.create_client() + client._supported_groups = [tls.Group.X448] + server = self.create_server() + + try: + self._handshake(client, server) + except UnsupportedAlgorithm as exc: + self.skipTest(str(exc)) + ===========changed ref 2=========== # module: tests.test_tls class ContextTest(TestCase): def _handshake(self, client, server): # send client hello client_buf = create_buffers() client.handle_message(b"", client_buf) self.assertEqual(client.state, State.CLIENT_EXPECT_SERVER_HELLO) server_input = merge_buffers(client_buf) self.assertGreaterEqual(len(server_input), 213) + self.assertLessEqual(len(server_input), 358) - self.assertLessEqual(len(server_input), 296) reset_buffers(client_buf) # handle client hello # send server hello, encrypted extensions, certificate, certificate verify, finished, (session ticket) server_buf = create_buffers() server.handle_message(server_input, server_buf) self.assertEqual(server.state, State.SERVER_EXPECT_FINISHED) client_input = merge_buffers(server_buf) self.assertGreaterEqual(len(client_input), 600) self.assertLessEqual(len(client_input), 2316) reset_buffers(server_buf) # handle server hello, encrypted extensions, certificate, certificate verify, finished, (session ticket) # send finished client.handle_message(client_input, client_buf) self.assertEqual(client.state, State.CLIENT_POST_HANDSHAKE) server_input = merge_buffers(client_buf) self.assertEqual(len(server_input), 52) reset_buffers(client_buf) # handle finished server.handle_message(server_input, server_buf) self.assertEqual(server.state, State.SERVER_POST_HANDSHAKE) client_input = merge_buffers(server_buf) self.assertEqual(len(client_input), 0) # check keys match self.assertEqual(client._dec_key, server._enc_key) self.assertEqual(client._enc_key, server._</s> ===========changed ref 3=========== # module: tests.test_tls class ContextTest(TestCase): def _handshake(self, client, server): # offset: 1 <s>assertEqual(client._dec_key, server._enc_key) self.assertEqual(client._enc_key, server._dec_key) # check cipher suite self.assertEqual( client.key_schedule.cipher_suite, tls.CipherSuite.AES_256_GCM_SHA384 ) self.assertEqual( server.key_schedule.cipher_suite, tls.CipherSuite.AES_256_GCM_SHA384 ) ===========changed ref 4=========== # module: tests.test_tls class ContextTest(TestCase): def test_session_ticket(self): client_tickets = [] server_tickets = [] def client_new_ticket(ticket): client_tickets.append(ticket) def server_get_ticket(label): for t in server_tickets: if t.ticket == label: return t return None def server_new_ticket(ticket): server_tickets.append(ticket) def first_handshake(): client = self.create_client() client.new_session_ticket_cb = client_new_ticket server = self.create_server() server.new_session_ticket_cb = server_new_ticket self._handshake(client, server) # check session resumption was not used self.assertFalse(client.session_resumed) self.assertFalse(server.session_resumed) # check tickets match self.assertEqual(len(client_tickets), 1) self.assertEqual(len(server_tickets), 1) self.assertEqual(client_tickets[0].ticket, server_tickets[0].ticket) self.assertEqual( client_tickets[0].resumption_secret, server_tickets[0].resumption_secret ) def second_handshake(): client = self.create_client() client.session_ticket = client_tickets[0] server = self.create_server() server.get_session_ticket_cb = server_get_ticket # send client hello with pre_shared_key client_buf = create_buffers() client.handle_message(b"", client_buf) self.assertEqual(client.state, State.CLIENT_EXPECT_SERVER_HELLO) server_input = merge_buffers(client_buf) self.assertGreaterEqual(len(server_input), 383) + self.assertLessEqual(len</s>
aioquic.tls/encode_public_key
Modified
aiortc~aioquic
e80bb53cd28654fbd7d526547cd324df71055520
[tls] add support for x448
<2>:<add> elif isinstance(public_key, x448.X448PublicKey): <add> return (Group.X448, public_key.public_bytes(Encoding.Raw, PublicFormat.Raw))
# module: aioquic.tls def encode_public_key( + public_key: Union[ + ec.EllipticCurvePublicKey, x25519.X25519PublicKey, x448.X448PublicKey + ] - public_key: Union[ec.EllipticCurvePublicKey, x25519.X25519PublicKey] ) -> KeyShareEntry: <0> if isinstance(public_key, x25519.X25519PublicKey): <1> return (Group.X25519, public_key.public_bytes(Encoding.Raw, PublicFormat.Raw)) <2> return ( <3> CURVE_TO_GROUP[public_key.curve.__class__], <4> public_key.public_bytes(Encoding.X962, PublicFormat.UncompressedPoint), <5> ) <6>
===========unchanged ref 0=========== at: aioquic.tls Group(x: Union[str, bytes, bytearray], base: int) Group(x: Union[str, bytes, SupportsInt, _SupportsIndex, _SupportsTrunc]=...) GROUP_TO_CURVE: Dict = { Group.SECP256R1: ec.SECP256R1, Group.SECP384R1: ec.SECP384R1, Group.SECP521R1: ec.SECP521R1, } ===========changed ref 0=========== # module: aioquic.tls class Group(IntEnum): SECP256R1 = 0x0017 SECP384R1 = 0x0018 SECP521R1 = 0x0019 X25519 = 0x001D + X448 = 0x001E GREASE = 0xAAAA ===========changed ref 1=========== # module: aioquic.tls def decode_public_key( key_share: KeyShareEntry, + ) -> Union[ec.EllipticCurvePublicKey, x25519.X25519PublicKey, x448.X448PublicKey, None]: - ) -> Union[ec.EllipticCurvePublicKey, x25519.X25519PublicKey, None]: if key_share[0] == Group.X25519: return x25519.X25519PublicKey.from_public_bytes(key_share[1]) + elif key_share[0] == Group.X448: + return x448.X448PublicKey.from_public_bytes(key_share[1]) elif key_share[0] in GROUP_TO_CURVE: return ec.EllipticCurvePublicKey.from_encoded_point( GROUP_TO_CURVE[key_share[0]](), key_share[1] ) else: return None ===========changed ref 2=========== # module: tests.test_tls class ContextTest(TestCase): + def test_handshake_with_x448(self): + client = self.create_client() + client._supported_groups = [tls.Group.X448] + server = self.create_server() + + try: + self._handshake(client, server) + except UnsupportedAlgorithm as exc: + self.skipTest(str(exc)) + ===========changed ref 3=========== # module: tests.test_tls class ContextTest(TestCase): def _handshake(self, client, server): # send client hello client_buf = create_buffers() client.handle_message(b"", client_buf) self.assertEqual(client.state, State.CLIENT_EXPECT_SERVER_HELLO) server_input = merge_buffers(client_buf) self.assertGreaterEqual(len(server_input), 213) + self.assertLessEqual(len(server_input), 358) - self.assertLessEqual(len(server_input), 296) reset_buffers(client_buf) # handle client hello # send server hello, encrypted extensions, certificate, certificate verify, finished, (session ticket) server_buf = create_buffers() server.handle_message(server_input, server_buf) self.assertEqual(server.state, State.SERVER_EXPECT_FINISHED) client_input = merge_buffers(server_buf) self.assertGreaterEqual(len(client_input), 600) self.assertLessEqual(len(client_input), 2316) reset_buffers(server_buf) # handle server hello, encrypted extensions, certificate, certificate verify, finished, (session ticket) # send finished client.handle_message(client_input, client_buf) self.assertEqual(client.state, State.CLIENT_POST_HANDSHAKE) server_input = merge_buffers(client_buf) self.assertEqual(len(server_input), 52) reset_buffers(client_buf) # handle finished server.handle_message(server_input, server_buf) self.assertEqual(server.state, State.SERVER_POST_HANDSHAKE) client_input = merge_buffers(server_buf) self.assertEqual(len(client_input), 0) # check keys match self.assertEqual(client._dec_key, server._enc_key) self.assertEqual(client._enc_key, server._</s> ===========changed ref 4=========== # module: tests.test_tls class ContextTest(TestCase): def _handshake(self, client, server): # offset: 1 <s>assertEqual(client._dec_key, server._enc_key) self.assertEqual(client._enc_key, server._dec_key) # check cipher suite self.assertEqual( client.key_schedule.cipher_suite, tls.CipherSuite.AES_256_GCM_SHA384 ) self.assertEqual( server.key_schedule.cipher_suite, tls.CipherSuite.AES_256_GCM_SHA384 ) ===========changed ref 5=========== # module: tests.test_tls class ContextTest(TestCase): def test_session_ticket(self): client_tickets = [] server_tickets = [] def client_new_ticket(ticket): client_tickets.append(ticket) def server_get_ticket(label): for t in server_tickets: if t.ticket == label: return t return None def server_new_ticket(ticket): server_tickets.append(ticket) def first_handshake(): client = self.create_client() client.new_session_ticket_cb = client_new_ticket server = self.create_server() server.new_session_ticket_cb = server_new_ticket self._handshake(client, server) # check session resumption was not used self.assertFalse(client.session_resumed) self.assertFalse(server.session_resumed) # check tickets match self.assertEqual(len(client_tickets), 1) self.assertEqual(len(server_tickets), 1) self.assertEqual(client_tickets[0].ticket, server_tickets[0].ticket) self.assertEqual( client_tickets[0].resumption_secret, server_tickets[0].resumption_secret ) def second_handshake(): client = self.create_client() client.session_ticket = client_tickets[0] server = self.create_server() server.get_session_ticket_cb = server_get_ticket # send client hello with pre_shared_key client_buf = create_buffers() client.handle_message(b"", client_buf) self.assertEqual(client.state, State.CLIENT_EXPECT_SERVER_HELLO) server_input = merge_buffers(client_buf) self.assertGreaterEqual(len(server_input), 383) + self.assertLessEqual(len</s>
aioquic.tls/Context.__init__
Modified
aiortc~aioquic
e80bb53cd28654fbd7d526547cd324df71055520
[tls] add support for x448
<s> is_client: bool, alpn_protocols: Optional[List[str]] = None, cadata: Optional[bytes] = None, cafile: Optional[str] = None, capath: Optional[str] = None, logger: Optional[Union[logging.Logger, logging.LoggerAdapter]] = None, max_early_data: Optional[int] = None, server_name: Optional[str] = None, verify_mode: Optional[int] = None, ): <0> # configuration <1> self._alpn_protocols = alpn_protocols <2> self._cadata = cadata <3> self._cafile = cafile <4> self._capath = capath <5> self.certificate: Optional[x509.Certificate] = None <6> self.certificate_chain: List[x509.Certificate] = [] <7> self.certificate_private_key: Optional[ <8> Union[dsa.DSAPrivateKey, ec.EllipticCurvePrivateKey, rsa.RSAPrivateKey] <9> ] = None <10> self.handshake_extensions: List[Extension] = [] <11> self._max_early_data = max_early_data <12> self.session_ticket: Optional[SessionTicket] = None <13> self._server_name = server_name <14> if verify_mode is not None: <15> self._verify_mode = verify_mode <16> else: <17> self._verify_mode = ssl.CERT_REQUIRED if is_client else ssl.CERT_NONE <18> <19> # callbacks <20> self.alpn_cb: Optional[AlpnHandler] = None <21> self.get_session_ticket_cb: Optional[SessionTicketFetcher] = None <22> self.new_session_ticket_cb: Optional[SessionTicketHandler] = None <23> self.update_traffic_key_cb: Callable[ <24> [Direction, Epoch, CipherSuite, bytes], None <25> ] = lambda d, e, c, s: None <26> <27> # supported parameters <28> self._cipher_suites = [ <29> CipherSuite.AES_256_GCM_</s>
===========below chunk 0=========== <s>: bool, alpn_protocols: Optional[List[str]] = None, cadata: Optional[bytes] = None, cafile: Optional[str] = None, capath: Optional[str] = None, logger: Optional[Union[logging.Logger, logging.LoggerAdapter]] = None, max_early_data: Optional[int] = None, server_name: Optional[str] = None, verify_mode: Optional[int] = None, ): # offset: 1 CipherSuite.AES_128_GCM_SHA256, CipherSuite.CHACHA20_POLY1305_SHA256, ] self._compression_methods: List[int] = [CompressionMethod.NULL] self._psk_key_exchange_modes: List[int] = [PskKeyExchangeMode.PSK_DHE_KE] self._signature_algorithms: List[int] = [ SignatureAlgorithm.RSA_PSS_RSAE_SHA256, SignatureAlgorithm.ECDSA_SECP256R1_SHA256, SignatureAlgorithm.RSA_PKCS1_SHA256, SignatureAlgorithm.RSA_PKCS1_SHA1, ] self._supported_groups = [Group.SECP256R1] if default_backend().x25519_supported(): self._supported_groups.append(Group.X25519) self._supported_versions = [TLS_VERSION_1_3] # state self.alpn_negotiated: Optional[str] = None self.early_data_accepted = False self.key_schedule: Optional[KeySchedule] = None self.received_extensions: Optional[List[Extension]] = None self._key_schedule_psk: Optional[KeySchedule] = None self._key_schedule_proxy: Optional[KeyScheduleProxy] = None self._new_session_ticket: Optional[NewSessionTicket] = None self._peer_certificate: Optional[x509.Certificate] = None self._peer_certificate_chain: List[x509.Certificate] = [] self</s> ===========below chunk 1=========== <s>: bool, alpn_protocols: Optional[List[str]] = None, cadata: Optional[bytes] = None, cafile: Optional[str] = None, capath: Optional[str] = None, logger: Optional[Union[logging.Logger, logging.LoggerAdapter]] = None, max_early_data: Optional[int] = None, server_name: Optional[str] = None, verify_mode: Optional[int] = None, ): # offset: 2 <s>[x509.Certificate] = None self._peer_certificate_chain: List[x509.Certificate] = [] self._receive_buffer = b"" self._session_resumed = False self._enc_key: Optional[bytes] = None self._dec_key: Optional[bytes] = None self.__logger = logger self._ec_private_key: Optional[ec.EllipticCurvePrivateKey] = None self._x25519_private_key: Optional[x25519.X25519PrivateKey] = None if is_client: self.client_random = os.urandom(32) self.session_id = os.urandom(32) self.state = State.CLIENT_HANDSHAKE_START else: self.client_random = None self.session_id = None self.state = State.SERVER_EXPECT_CLIENT_HELLO ===========unchanged ref 0=========== at: aioquic.tls TLS_VERSION_1_3 = 0x0304 utcnow = datetime.datetime.utcnow Direction() Epoch() CipherSuite(x: Union[str, bytes, bytearray], base: int) CipherSuite(x: Union[str, bytes, SupportsInt, _SupportsIndex, _SupportsTrunc]=...) CompressionMethod(x: Union[str, bytes, SupportsInt, _SupportsIndex, _SupportsTrunc]=...) CompressionMethod(x: Union[str, bytes, bytearray], base: int) Group(x: Union[str, bytes, bytearray], base: int) Group(x: Union[str, bytes, SupportsInt, _SupportsIndex, _SupportsTrunc]=...) PskKeyExchangeMode(x: Union[str, bytes, bytearray], base: int) PskKeyExchangeMode(x: Union[str, bytes, SupportsInt, _SupportsIndex, _SupportsTrunc]=...) SignatureAlgorithm(x: Union[str, bytes, bytearray], base: int) SignatureAlgorithm(x: Union[str, bytes, SupportsInt, _SupportsIndex, _SupportsTrunc]=...) Extension = Tuple[int, bytes] NewSessionTicket(ticket_lifetime: int=0, ticket_age_add: int=0, ticket_nonce: bytes=b"", ticket: bytes=b"", max_early_data_size: Optional[int]=None, other_extensions: List[Tuple[int, bytes]]=field(default_factory=list)) KeySchedule(cipher_suite: CipherSuite) KeyScheduleProxy(cipher_suites: List[CipherSuite]) SessionTicket(age_add: int, cipher_suite: CipherSuite, not_valid_after: datetime.datetime, not_valid_before: datetime.datetime, resumption_secret: bytes, server_name: str, ticket: bytes, max_early_data_size: Optional[int]=None, other_extensions: List[Tuple[int, bytes]]=field(default_factory=list)) ===========unchanged ref 1=========== at: aioquic.tls.Context._client_handle_certificate self._peer_certificate = x509.load_der_x509_certificate( certificate.certificates[0][0], backend=default_backend() ) self._peer_certificate_chain = [ x509.load_der_x509_certificate( certificate.certificates[i][0], backend=default_backend() ) for i in range(1, len(certificate.certificates)) ] at: aioquic.tls.Context._client_handle_encrypted_extensions self.alpn_negotiated = encrypted_extensions.alpn_protocol self.early_data_accepted = encrypted_extensions.early_data self.received_extensions = encrypted_extensions.other_extensions at: aioquic.tls.Context._client_handle_hello self.key_schedule = self._key_schedule_psk self.key_schedule = self._key_schedule_proxy.select(cipher_suite) self._key_schedule_psk = None self._key_schedule_proxy = None at: aioquic.tls.Context._client_send_hello self._key_schedule_psk = KeySchedule(self.session_ticket.cipher_suite) self._key_schedule_proxy = KeyScheduleProxy(self._cipher_suites) at: aioquic.tls.Context._server_handle_hello self.alpn_negotiated = negotiate( self._alpn_protocols, peer_hello.alpn_protocols, AlertHandshakeFailure("No common ALPN protocols"), ) self.received_extensions = peer_hello.other_extensions self.key_schedule = KeySchedule(cipher_suite) self.early_data_accepted = True
aioquic.tls/Context._client_send_hello
Modified
aiortc~aioquic
e80bb53cd28654fbd7d526547cd324df71055520
[tls] add support for x448
<16>:<add> elif group == Group.X448: <add> self._x448_private_key = x448.X448PrivateKey.generate() <add> key_share.append(encode_public_key(self._x448_private_key.public_key())) <add> supported_groups.append(Group.X448)
# module: aioquic.tls class Context: def _client_send_hello(self, output_buf: Buffer) -> None: <0> key_share: List[KeyShareEntry] = [] <1> supported_groups: List[int] = [] <2> <3> for group in self._supported_groups: <4> if group == Group.SECP256R1: <5> self._ec_private_key = ec.generate_private_key( <6> GROUP_TO_CURVE[Group.SECP256R1](), default_backend() <7> ) <8> key_share.append(encode_public_key(self._ec_private_key.public_key())) <9> supported_groups.append(Group.SECP256R1) <10> elif group == Group.X25519: <11> self._x25519_private_key = x25519.X25519PrivateKey.generate() <12> key_share.append( <13> encode_public_key(self._x25519_private_key.public_key()) <14> ) <15> supported_groups.append(Group.X25519) <16> elif group == Group.GREASE: <17> key_share.append((Group.GREASE, b"\x00")) <18> supported_groups.append(Group.GREASE) <19> <20> assert len(key_share), "no key share entries" <21> <22> hello = ClientHello( <23> random=self.client_random, <24> session_id=self.session_id, <25> cipher_suites=[int(x) for x in self._cipher_suites], <26> compression_methods=self._compression_methods, <27> alpn_protocols=self._alpn_protocols, <28> key_share=key_share, <29> psk_key_exchange_modes=self._psk_key_exchange_modes <30> if (self.session_ticket or self.new_session_ticket_cb is not None) <31> else None, <32> server_name=self._server_name, <33> signature_algorithms=self._signature_algorithms, <34> supported_groups=supported_</s>
===========below chunk 0=========== # module: aioquic.tls class Context: def _client_send_hello(self, output_buf: Buffer) -> None: # offset: 1 supported_versions=self._supported_versions, other_extensions=self.handshake_extensions, ) # PSK if self.session_ticket and self.session_ticket.is_valid: self._key_schedule_psk = KeySchedule(self.session_ticket.cipher_suite) self._key_schedule_psk.extract(self.session_ticket.resumption_secret) binder_key = self._key_schedule_psk.derive_secret(b"res binder") binder_length = self._key_schedule_psk.algorithm.digest_size # update hello if self.session_ticket.max_early_data_size is not None: hello.early_data = True hello.pre_shared_key = OfferedPsks( identities=[ (self.session_ticket.ticket, self.session_ticket.obfuscated_age) ], binders=[bytes(binder_length)], ) # serialize hello without binder tmp_buf = Buffer(capacity=1024) push_client_hello(tmp_buf, hello) # calculate binder hash_offset = tmp_buf.tell() - binder_length - 3 self._key_schedule_psk.update_hash(tmp_buf.data_slice(0, hash_offset)) binder = self._key_schedule_psk.finished_verify_data(binder_key) hello.pre_shared_key.binders[0] = binder self._key_schedule_psk.update_hash( tmp_buf.data_slice(hash_offset, hash_offset + 3) + binder ) # calculate early data key if hello.early_data: early_key = self._key_schedule_psk.derive_secret(b"c e traffic") self.update_traffic_key_cb(</s> ===========below chunk 1=========== # module: aioquic.tls class Context: def _client_send_hello(self, output_buf: Buffer) -> None: # offset: 2 <s>key_schedule_psk.derive_secret(b"c e traffic") self.update_traffic_key_cb( Direction.ENCRYPT, Epoch.ZERO_RTT, self._key_schedule_psk.cipher_suite, early_key, ) self._key_schedule_proxy = KeyScheduleProxy(self._cipher_suites) self._key_schedule_proxy.extract(None) with push_message(self._key_schedule_proxy, output_buf): push_client_hello(output_buf, hello) self._set_state(State.CLIENT_EXPECT_SERVER_HELLO) ===========unchanged ref 0=========== at: aioquic.tls utcnow = datetime.datetime.utcnow Group(x: Union[str, bytes, bytearray], base: int) Group(x: Union[str, bytes, SupportsInt, _SupportsIndex, _SupportsTrunc]=...) KeyShareEntry = Tuple[int, bytes] OfferedPsks(identities: List[PskIdentity], binders: List[bytes]) ClientHello(random: bytes, session_id: bytes, cipher_suites: List[int], compression_methods: List[int], alpn_protocols: Optional[List[str]]=None, early_data: bool=False, key_share: Optional[List[KeyShareEntry]]=None, pre_shared_key: Optional[OfferedPsks]=None, psk_key_exchange_modes: Optional[List[int]]=None, server_name: Optional[str]=None, signature_algorithms: Optional[List[int]]=None, supported_groups: Optional[List[int]]=None, supported_versions: Optional[List[int]]=None, other_extensions: List[Extension]=field(default_factory=list)) push_client_hello(buf: Buffer, hello: ClientHello) -> None KeySchedule(cipher_suite: CipherSuite) GROUP_TO_CURVE: Dict = { Group.SECP256R1: ec.SECP256R1, Group.SECP384R1: ec.SECP384R1, Group.SECP521R1: ec.SECP521R1, } encode_public_key(public_key: Union[ ec.EllipticCurvePublicKey, x25519.X25519PublicKey, x448.X448PublicKey ]) -> KeyShareEntry ===========unchanged ref 1=========== SessionTicket(age_add: int, cipher_suite: CipherSuite, not_valid_after: datetime.datetime, not_valid_before: datetime.datetime, resumption_secret: bytes, server_name: str, ticket: bytes, max_early_data_size: Optional[int]=None, other_extensions: List[Tuple[int, bytes]]=field(default_factory=list)) at: aioquic.tls.ClientHello random: bytes session_id: bytes cipher_suites: List[int] compression_methods: List[int] alpn_protocols: Optional[List[str]] = None early_data: bool = False key_share: Optional[List[KeyShareEntry]] = None pre_shared_key: Optional[OfferedPsks] = None psk_key_exchange_modes: Optional[List[int]] = None server_name: Optional[str] = None signature_algorithms: Optional[List[int]] = None supported_groups: Optional[List[int]] = None supported_versions: Optional[List[int]] = None other_extensions: List[Extension] = field(default_factory=list) at: aioquic.tls.Context.__init__ self._alpn_protocols = alpn_protocols self.handshake_extensions: List[Extension] = [] self.session_ticket: Optional[SessionTicket] = None self._server_name = server_name self.new_session_ticket_cb: Optional[SessionTicketHandler] = None self._cipher_suites = [ CipherSuite.AES_256_GCM_SHA384, CipherSuite.AES_128_GCM_SHA256, CipherSuite.CHACHA20_POLY1305_SHA256, ] self._compression_methods: List[int] = [CompressionMethod.NULL] self._psk_key_exchange_modes: List[int] = [PskKeyExchangeMode.PSK_DHE_KE]
aioquic.tls/Context._client_handle_hello
Modified
aiortc~aioquic
e80bb53cd28654fbd7d526547cd324df71055520
[tls] add support for x448
<34>:<add> isinstance(peer_public_key, x448.X448PublicKey) <add> and self._x448_private_key is not None <add> ): <add> shared_key = self._x448_private_key.exchange(peer_public_key) <add> elif (
# module: aioquic.tls class Context: def _client_handle_hello(self, input_buf: Buffer, output_buf: Buffer) -> None: <0> peer_hello = pull_server_hello(input_buf) <1> <2> cipher_suite = negotiate( <3> self._cipher_suites, <4> [peer_hello.cipher_suite], <5> AlertHandshakeFailure("Unsupported cipher suite"), <6> ) <7> assert peer_hello.compression_method in self._compression_methods <8> assert peer_hello.supported_version in self._supported_versions <9> <10> # select key schedule <11> if peer_hello.pre_shared_key is not None: <12> if ( <13> self._key_schedule_psk is None <14> or peer_hello.pre_shared_key != 0 <15> or cipher_suite != self._key_schedule_psk.cipher_suite <16> ): <17> raise AlertIllegalParameter <18> self.key_schedule = self._key_schedule_psk <19> self._session_resumed = True <20> else: <21> self.key_schedule = self._key_schedule_proxy.select(cipher_suite) <22> self._key_schedule_psk = None <23> self._key_schedule_proxy = None <24> <25> # perform key exchange <26> peer_public_key = decode_public_key(peer_hello.key_share) <27> shared_key: Optional[bytes] = None <28> if ( <29> isinstance(peer_public_key, x25519.X25519PublicKey) <30> and self._x25519_private_key is not None <31> ): <32> shared_key = self._x25519_private_key.exchange(peer_public_key) <33> elif ( <34> isinstance(peer_public_key, ec.EllipticCurvePublicKey) <35> and self._ec_private_key is not None <36> and self._ec_private_key.public_key().curve.__class__ <37> == peer_public_key.curve.__class__ <38> ): <39> shared_key = self._</s>
===========below chunk 0=========== # module: aioquic.tls class Context: def _client_handle_hello(self, input_buf: Buffer, output_buf: Buffer) -> None: # offset: 1 assert shared_key is not None self.key_schedule.update_hash(input_buf.data) self.key_schedule.extract(shared_key) self._setup_traffic_protection( Direction.DECRYPT, Epoch.HANDSHAKE, b"s hs traffic" ) self._set_state(State.CLIENT_EXPECT_ENCRYPTED_EXTENSIONS) ===========unchanged ref 0=========== at: aioquic.tls AlertHandshakeFailure(*args: object) AlertIllegalParameter(*args: object) Direction() Epoch() State() push_client_hello(buf: Buffer, hello: ClientHello) -> None pull_server_hello(buf: Buffer) -> ServerHello KeyScheduleProxy(cipher_suites: List[CipherSuite]) decode_public_key(key_share: KeyShareEntry) -> Union[ec.EllipticCurvePublicKey, x25519.X25519PublicKey, x448.X448PublicKey, None] negotiate(supported: List[T], offered: Optional[List[Any]], exc: Optional[Alert]=None) -> T push_message(key_schedule: Union[KeySchedule, KeyScheduleProxy], buf: Buffer) -> Generator at: aioquic.tls.ClientHello early_data: bool = False at: aioquic.tls.Context _set_state(state: State) -> None at: aioquic.tls.Context.__init__ self.update_traffic_key_cb: Callable[ [Direction, Epoch, CipherSuite, bytes], None ] = lambda d, e, c, s: None self._cipher_suites = [ CipherSuite.AES_256_GCM_SHA384, CipherSuite.AES_128_GCM_SHA256, CipherSuite.CHACHA20_POLY1305_SHA256, ] self._compression_methods: List[int] = [CompressionMethod.NULL] self._supported_versions = [TLS_VERSION_1_3] self.key_schedule: Optional[KeySchedule] = None self._key_schedule_psk: Optional[KeySchedule] = None self._key_schedule_proxy: Optional[KeyScheduleProxy] = None self._session_resumed = False ===========unchanged ref 1=========== at: aioquic.tls.Context._client_send_hello hello = ClientHello( random=self.client_random, session_id=self.session_id, cipher_suites=[int(x) for x in self._cipher_suites], compression_methods=self._compression_methods, alpn_protocols=self._alpn_protocols, key_share=key_share, psk_key_exchange_modes=self._psk_key_exchange_modes if (self.session_ticket or self.new_session_ticket_cb is not None) else None, server_name=self._server_name, signature_algorithms=self._signature_algorithms, supported_groups=supported_groups, supported_versions=self._supported_versions, other_extensions=self.handshake_extensions, ) self._key_schedule_psk = KeySchedule(self.session_ticket.cipher_suite) tmp_buf = Buffer(capacity=1024) hash_offset = tmp_buf.tell() - binder_length - 3 binder = self._key_schedule_psk.finished_verify_data(binder_key) at: aioquic.tls.Context._server_handle_hello self.key_schedule = KeySchedule(cipher_suite) self._session_resumed = True at: aioquic.tls.KeySchedule derive_secret(label: bytes) -> bytes at: aioquic.tls.KeySchedule.__init__ self.cipher_suite = cipher_suite at: aioquic.tls.KeyScheduleProxy extract(key_material: Optional[bytes]=None) -> None select(cipher_suite: CipherSuite) -> KeySchedule at: aioquic.tls.ServerHello random: bytes session_id: bytes cipher_suite: int compression_method: int key_share: Optional[KeyShareEntry] = None pre_shared_key: Optional[int] = None ===========unchanged ref 2=========== supported_version: Optional[int] = None other_extensions: List[Tuple[int, bytes]] = field(default_factory=list) ===========changed ref 0=========== # module: aioquic.tls def decode_public_key( key_share: KeyShareEntry, + ) -> Union[ec.EllipticCurvePublicKey, x25519.X25519PublicKey, x448.X448PublicKey, None]: - ) -> Union[ec.EllipticCurvePublicKey, x25519.X25519PublicKey, None]: if key_share[0] == Group.X25519: return x25519.X25519PublicKey.from_public_bytes(key_share[1]) + elif key_share[0] == Group.X448: + return x448.X448PublicKey.from_public_bytes(key_share[1]) elif key_share[0] in GROUP_TO_CURVE: return ec.EllipticCurvePublicKey.from_encoded_point( GROUP_TO_CURVE[key_share[0]](), key_share[1] ) else: return None ===========changed ref 1=========== # module: aioquic.tls def encode_public_key( + public_key: Union[ + ec.EllipticCurvePublicKey, x25519.X25519PublicKey, x448.X448PublicKey + ] - public_key: Union[ec.EllipticCurvePublicKey, x25519.X25519PublicKey] ) -> KeyShareEntry: if isinstance(public_key, x25519.X25519PublicKey): return (Group.X25519, public_key.public_bytes(Encoding.Raw, PublicFormat.Raw)) + elif isinstance(public_key, x448.X448PublicKey): + return (Group.X448, public_key.public_bytes(Encoding.Raw, PublicFormat.Raw)) return ( CURVE_TO_GROUP[public_key.curve.__class__], public_key.public_bytes(Encoding.X962, PublicFormat.UncompressedPoint), ) ===========changed ref 2=========== # module: aioquic.tls class Group(IntEnum): SECP256R1 = 0x0017 SECP384R1 = 0x0018 SECP521R1 = 0x0019 X25519 = 0x001D + X448 = 0x001E GREASE = 0xAAAA ===========changed ref 3=========== # module: tests.test_tls class ContextTest(TestCase): + def test_handshake_with_x448(self): + client = self.create_client() + client._supported_groups = [tls.Group.X448] + server = self.create_server() + + try: + self._handshake(client, server) + except UnsupportedAlgorithm as exc: + self.skipTest(str(exc)) +
tests.test_connection/QuicConnectionTest.test_datagram_frame_2
Modified
aiortc~aioquic
79eeb342dd93f7a50b3b96af21e43f5f42b53f23
[pacing] add initial packet pacer
<5>:<add> client_patch=disable_packet_pacing,
# module: tests.test_connection class QuicConnectionTest(TestCase): def test_datagram_frame_2(self): <0> # payload which exactly fills an entire packet <1> payload = b"Z" * 1250 <2> <3> with client_and_server( <4> client_options={"max_datagram_frame_size": 65536}, <5> server_options={"max_datagram_frame_size": 65536}, <6> ) as (client, server): <7> # check handshake completed <8> self.check_handshake(client=client, server=server, alpn_protocol=None) <9> <10> # queue 20 datagrams <11> for i in range(20): <12> client.send_datagram_frame(payload) <13> <14> # client can only 11 datagrams are sent due to congestion control <15> self.assertEqual(transfer(client, server), 11) <16> for i in range(11): <17> event = server.next_event() <18> self.assertEqual(type(event), events.DatagramFrameReceived) <19> self.assertEqual(event.data, payload) <20> <21> # server sends ACK <22> self.assertEqual(transfer(server, client), 1) <23> <24> # client sends remaining datagrams <25> self.assertEqual(transfer(client, server), 9) <26> for i in range(9): <27> event = server.next_event() <28> self.assertEqual(type(event), events.DatagramFrameReceived) <29> self.assertEqual(event.data, payload) <30>
===========unchanged ref 0=========== at: tests.test_connection client_and_server(client_kwargs={}, client_options={}, client_patch=lambda x: None, handshake=True, server_kwargs={}, server_certfile=SERVER_CERTFILE, server_keyfile=SERVER_KEYFILE, server_options={}, server_patch=lambda x: None, transport_options={}) disable_packet_pacing(connection) transfer(sender, receiver) at: tests.test_connection.QuicConnectionTest check_handshake(client, server, alpn_protocol=None) at: unittest.case.TestCase failureException: Type[BaseException] longMessage: bool maxDiff: Optional[int] _testMethodName: str _testMethodDoc: str assertEqual(first: Any, second: Any, msg: Any=...) -> None ===========changed ref 0=========== # module: tests.test_connection + def disable_packet_pacing(connection): + class DummyPacketPacer(QuicPacketPacer): + def next_send_time(self, now): + return None + + connection._loss._pacer = DummyPacketPacer() +
tests.test_connection/QuicConnectionTest.test_send_max_stream_data_retransmit
Modified
aiortc~aioquic
79eeb342dd93f7a50b3b96af21e43f5f42b53f23
[pacing] add initial packet pacer
<0>:<add> with client_and_server(server_patch=disable_packet_pacing) as (client, server): <del> with client_and_server() as (client, server):
# module: tests.test_connection class QuicConnectionTest(TestCase): def test_send_max_stream_data_retransmit(self): <0> with client_and_server() as (client, server): <1> # client creates bidirectional stream 0 <2> stream = client._create_stream(stream_id=0) <3> client.send_stream_data(0, b"hello") <4> self.assertEqual(stream.max_stream_data_local, 1048576) <5> self.assertEqual(stream.max_stream_data_local_sent, 1048576) <6> roundtrip(client, server) <7> <8> # server sends data, just before raising MAX_STREAM_DATA <9> server.send_stream_data(0, b"Z" * 524288) # 1048576 // 2 <10> for i in range(10): <11> roundtrip(server, client) <12> self.assertEqual(stream.max_stream_data_local, 1048576) <13> self.assertEqual(stream.max_stream_data_local_sent, 1048576) <14> <15> # server sends one more byte <16> server.send_stream_data(0, b"Z") <17> transfer(server, client) <18> <19> # MAX_STREAM_DATA is sent and lost <20> self.assertEqual(drop(client), 1) <21> self.assertEqual(stream.max_stream_data_local, 2097152) <22> self.assertEqual(stream.max_stream_data_local_sent, 2097152) <23> client._on_max_stream_data_delivery(QuicDeliveryState.LOST, stream) <24> self.assertEqual(stream.max_stream_data_local, 2097152) <25> self.assertEqual(stream.max_stream_data_local_sent, 0) <26> <27> # MAX_DATA is retransmitted and acked <28> self.assertEqual(roundtrip(client, server), (1, 1)) <29> self.assertEqual(stream.max_stream_data_local, 2097152) <30> self.</s>
===========below chunk 0=========== # module: tests.test_connection class QuicConnectionTest(TestCase): def test_send_max_stream_data_retransmit(self): # offset: 1 ===========unchanged ref 0=========== at: tests.test_connection client_and_server(client_kwargs={}, client_options={}, client_patch=lambda x: None, handshake=True, server_kwargs={}, server_certfile=SERVER_CERTFILE, server_keyfile=SERVER_KEYFILE, server_options={}, server_patch=lambda x: None, transport_options={}) disable_packet_pacing(connection) drop(sender) roundtrip(sender, receiver) transfer(sender, receiver) at: unittest.case.TestCase assertEqual(first: Any, second: Any, msg: Any=...) -> None ===========changed ref 0=========== # module: tests.test_connection + def disable_packet_pacing(connection): + class DummyPacketPacer(QuicPacketPacer): + def next_send_time(self, now): + return None + + connection._loss._pacer = DummyPacketPacer() + ===========changed ref 1=========== # module: tests.test_connection class QuicConnectionTest(TestCase): def test_datagram_frame_2(self): # payload which exactly fills an entire packet payload = b"Z" * 1250 with client_and_server( client_options={"max_datagram_frame_size": 65536}, + client_patch=disable_packet_pacing, server_options={"max_datagram_frame_size": 65536}, ) as (client, server): # check handshake completed self.check_handshake(client=client, server=server, alpn_protocol=None) # queue 20 datagrams for i in range(20): client.send_datagram_frame(payload) # client can only 11 datagrams are sent due to congestion control self.assertEqual(transfer(client, server), 11) for i in range(11): event = server.next_event() self.assertEqual(type(event), events.DatagramFrameReceived) self.assertEqual(event.data, payload) # server sends ACK self.assertEqual(transfer(server, client), 1) # client sends remaining datagrams self.assertEqual(transfer(client, server), 9) for i in range(9): event = server.next_event() self.assertEqual(type(event), events.DatagramFrameReceived) self.assertEqual(event.data, payload)
aioquic.quic.connection/QuicConnection.get_timer
Modified
aiortc~aioquic
79eeb342dd93f7a50b3b96af21e43f5f42b53f23
[pacing] add initial packet pacer
<14>:<add> <add> # pacing timer <add> if self._pacing_at is not None and self._pacing_at < timer_at: <add> timer_at = self._pacing_at <add>
# module: aioquic.quic.connection class QuicConnection: def get_timer(self) -> Optional[float]: <0> """ <1> Return the time at which the timer should fire or None if no timer is needed. <2> """ <3> timer_at = self._close_at <4> if self._state not in END_STATES: <5> # ack timer <6> for space in self._loss.spaces: <7> if space.ack_at is not None and space.ack_at < timer_at: <8> timer_at = space.ack_at <9> <10> # loss detection timer <11> self._loss_at = self._loss.get_loss_detection_time() <12> if self._loss_at is not None and self._loss_at < timer_at: <13> timer_at = self._loss_at <14> return timer_at <15>
===========unchanged ref 0=========== at: aioquic.quic.connection END_STATES = frozenset( [ QuicConnectionState.CLOSING, QuicConnectionState.DRAINING, QuicConnectionState.TERMINATED, ] ) at: aioquic.quic.connection.QuicConnection.__init__ self._close_at: Optional[float] = None self._loss_at: Optional[float] = None self._state = QuicConnectionState.FIRSTFLIGHT self._loss = QuicPacketRecovery( is_client_without_1rtt=self._is_client, quic_logger=self._quic_logger, send_probe=self._send_probe, ) at: aioquic.quic.connection.QuicConnection._close_begin self._close_at = now + 3 * self._loss.get_probe_timeout() at: aioquic.quic.connection.QuicConnection._close_end self._close_at = None at: aioquic.quic.connection.QuicConnection._connect self._close_at = now + self._configuration.idle_timeout at: aioquic.quic.connection.QuicConnection._set_state self._state = state at: aioquic.quic.connection.QuicConnection.receive_datagram self._close_at = now + self._configuration.idle_timeout ===========changed ref 0=========== # module: tests.test_connection + def disable_packet_pacing(connection): + class DummyPacketPacer(QuicPacketPacer): + def next_send_time(self, now): + return None + + connection._loss._pacer = DummyPacketPacer() + ===========changed ref 1=========== # module: tests.test_connection class QuicConnectionTest(TestCase): def test_datagram_frame_2(self): # payload which exactly fills an entire packet payload = b"Z" * 1250 with client_and_server( client_options={"max_datagram_frame_size": 65536}, + client_patch=disable_packet_pacing, server_options={"max_datagram_frame_size": 65536}, ) as (client, server): # check handshake completed self.check_handshake(client=client, server=server, alpn_protocol=None) # queue 20 datagrams for i in range(20): client.send_datagram_frame(payload) # client can only 11 datagrams are sent due to congestion control self.assertEqual(transfer(client, server), 11) for i in range(11): event = server.next_event() self.assertEqual(type(event), events.DatagramFrameReceived) self.assertEqual(event.data, payload) # server sends ACK self.assertEqual(transfer(server, client), 1) # client sends remaining datagrams self.assertEqual(transfer(client, server), 9) for i in range(9): event = server.next_event() self.assertEqual(type(event), events.DatagramFrameReceived) self.assertEqual(event.data, payload) ===========changed ref 2=========== # module: tests.test_connection class QuicConnectionTest(TestCase): def test_send_max_stream_data_retransmit(self): + with client_and_server(server_patch=disable_packet_pacing) as (client, server): - with client_and_server() as (client, server): # client creates bidirectional stream 0 stream = client._create_stream(stream_id=0) client.send_stream_data(0, b"hello") self.assertEqual(stream.max_stream_data_local, 1048576) self.assertEqual(stream.max_stream_data_local_sent, 1048576) roundtrip(client, server) # server sends data, just before raising MAX_STREAM_DATA server.send_stream_data(0, b"Z" * 524288) # 1048576 // 2 for i in range(10): roundtrip(server, client) self.assertEqual(stream.max_stream_data_local, 1048576) self.assertEqual(stream.max_stream_data_local_sent, 1048576) # server sends one more byte server.send_stream_data(0, b"Z") transfer(server, client) # MAX_STREAM_DATA is sent and lost self.assertEqual(drop(client), 1) self.assertEqual(stream.max_stream_data_local, 2097152) self.assertEqual(stream.max_stream_data_local_sent, 2097152) client._on_max_stream_data_delivery(QuicDeliveryState.LOST, stream) self.assertEqual(stream.max_stream_data_local, 2097152) self.assertEqual(stream.max_stream_data_local_sent, 0) # MAX_DATA is retransmitted and acked self.assertEqual(roundtrip(client, server), (1, 1)) self.assertEqual(stream.max_stream_data_local, 2097152) self.assertEqual(</s> ===========changed ref 3=========== # module: tests.test_connection class QuicConnectionTest(TestCase): def test_send_max_stream_data_retransmit(self): # offset: 1 <s> 1)) self.assertEqual(stream.max_stream_data_local, 2097152) self.assertEqual(stream.max_stream_data_local_sent, 2097152) ===========changed ref 4=========== # module: aioquic.quic.connection class QuicConnection: def __init__( self, *, configuration: QuicConfiguration, logger_connection_id: Optional[bytes] = None, original_connection_id: Optional[bytes] = None, session_ticket_fetcher: Optional[tls.SessionTicketFetcher] = None, session_ticket_handler: Optional[tls.SessionTicketHandler] = None, ) -> None: if configuration.is_client: assert ( original_connection_id is None ), "Cannot set original_connection_id for a client" else: assert ( configuration.certificate is not None ), "SSL certificate is required for a server" assert ( configuration.private_key is not None ), "SSL private key is required for a server" # configuration self._configuration = configuration self._is_client = configuration.is_client self._ack_delay = K_GRANULARITY self._close_at: Optional[float] = None self._close_event: Optional[events.ConnectionTerminated] = None self._connect_called = False self._cryptos: Dict[tls.Epoch, CryptoPair] = {} self._crypto_buffers: Dict[tls.Epoch, Buffer] = {} self._crypto_streams: Dict[tls.Epoch, QuicStream] = {} self._events: Deque[events.QuicEvent] = deque() self._handshake_complete = False self._handshake_confirmed = False self._host_cids = [ QuicConnectionId( cid=os.urandom(configuration.connection_id_length), sequence_number=0, stateless_reset_token=os.urandom(16), was_sent=True, ) ] self.host_cid = self._host_cids[0].cid self._host_cid_seq = 1 self._local_ack_delay_exponent = 3 self._local_active_connection_id_limit</s>
aioquic.quic.connection/QuicConnection._write_application
Modified
aiortc~aioquic
79eeb342dd93f7a50b3b96af21e43f5f42b53f23
[pacing] add initial packet pacer
<13>:<add> # apply pacing, except if we have ACKs to send <add> if space.ack_at is None or space.ack_at >= now: <add> self._pacing_at = self._loss._pacer.next_send_time(now=now) <add> if self._pacing_at is not None: <add> break
# module: aioquic.quic.connection class QuicConnection: def _write_application( self, builder: QuicPacketBuilder, network_path: QuicNetworkPath, now: float ) -> None: <0> crypto_stream: Optional[QuicStream] = None <1> if self._cryptos[tls.Epoch.ONE_RTT].send.is_valid(): <2> crypto = self._cryptos[tls.Epoch.ONE_RTT] <3> crypto_stream = self._crypto_streams[tls.Epoch.ONE_RTT] <4> packet_type = PACKET_TYPE_ONE_RTT <5> elif self._cryptos[tls.Epoch.ZERO_RTT].send.is_valid(): <6> crypto = self._cryptos[tls.Epoch.ZERO_RTT] <7> packet_type = PACKET_TYPE_ZERO_RTT <8> else: <9> return <10> space = self._spaces[tls.Epoch.ONE_RTT] <11> <12> while True: <13> builder.start_packet(packet_type, crypto) <14> <15> if self._handshake_complete: <16> # ACK <17> if space.ack_at is not None and space.ack_at <= now: <18> self._write_ack_frame(builder=builder, space=space, now=now) <19> <20> # HANDSHAKE_DONE <21> if self._handshake_done_pending: <22> self._write_handshake_done_frame(builder=builder) <23> self._handshake_done_pending = False <24> <25> # PATH CHALLENGE <26> if ( <27> not network_path.is_validated <28> and network_path.local_challenge is None <29> ): <30> challenge = os.urandom(8) <31> self._write_path_challenge_frame( <32> builder=builder, challenge=challenge <33> ) <34> network_path.local_challenge = challenge <35> <36> # PATH RESPONSE <37> if network_path.remote_challenge is not None: <38> self._write_path_response_frame( <39> </s>
===========below chunk 0=========== # module: aioquic.quic.connection class QuicConnection: def _write_application( self, builder: QuicPacketBuilder, network_path: QuicNetworkPath, now: float ) -> None: # offset: 1 ) network_path.remote_challenge = None # NEW_CONNECTION_ID for connection_id in self._host_cids: if not connection_id.was_sent: self._write_new_connection_id_frame( builder=builder, connection_id=connection_id ) # RETIRE_CONNECTION_ID while self._retire_connection_ids: sequence_number = self._retire_connection_ids.pop(0) self._write_retire_connection_id_frame( builder=builder, sequence_number=sequence_number ) # STREAMS_BLOCKED if self._streams_blocked_pending: if self._streams_blocked_bidi: self._write_streams_blocked_frame( builder=builder, frame_type=QuicFrameType.STREAMS_BLOCKED_BIDI, limit=self._remote_max_streams_bidi, ) if self._streams_blocked_uni: self._write_streams_blocked_frame( builder=builder, frame_type=QuicFrameType.STREAMS_BLOCKED_UNI, limit=self._remote_max_streams_uni, ) self._streams_blocked_pending = False # MAX_DATA self._write_connection_limits(builder=builder, space=space) # stream-level limits for stream in self._streams.values(): self._write_stream_limits(builder=builder, space=space, stream=stream) # PING (user-request) if self._ping_pending: self._write_ping_frame(builder, self._ping_pending) self._ping_pending.clear() # PING (probe) if self._probe_pending: </s> ===========below chunk 1=========== # module: aioquic.quic.connection class QuicConnection: def _write_application( self, builder: QuicPacketBuilder, network_path: QuicNetworkPath, now: float ) -> None: # offset: 2 <s> self._ping_pending.clear() # PING (probe) if self._probe_pending: self._write_ping_frame(builder) self._probe_pending = False # CRYPTO if crypto_stream is not None and not crypto_stream.send_buffer_is_empty: self._write_crypto_frame( builder=builder, space=space, stream=crypto_stream ) # DATAGRAM while self._datagrams_pending: try: self._write_datagram_frame( builder=builder, data=self._datagrams_pending[0], frame_type=QuicFrameType.DATAGRAM_WITH_LENGTH, ) self._datagrams_pending.popleft() except QuicPacketBuilderStop: break # STREAM for stream in self._streams.values(): if not stream.is_blocked and not stream.send_buffer_is_empty: self._remote_max_data_used += self._write_stream_frame( builder=builder, space=space, stream=stream, max_offset=min( stream._send_highest + self._remote_max_data - self._remote_max_data_used, stream.max_stream_data_remote, ), ) if builder.packet_is_empty: break ===========unchanged ref 0=========== at: aioquic.quic.connection QuicNetworkPath(addr: NetworkAddress, bytes_received: int=0, bytes_sent: int=0, is_validated: bool=False, local_challenge: Optional[bytes]=None, remote_challenge: Optional[bytes]=None) at: aioquic.quic.connection.QuicConnection _write_ack_frame(builder: QuicPacketBuilder, space: QuicPacketSpace, now: float) -> None _write_connection_limits(builder: QuicPacketBuilder, space: QuicPacketSpace) -> None _write_crypto_frame(builder: QuicPacketBuilder, space: QuicPacketSpace, stream: QuicStream) -> bool _write_datagram_frame(builder: QuicPacketBuilder, data: bytes, frame_type: QuicFrameType) -> bool _write_handshake_done_frame(builder: QuicPacketBuilder) -> None _write_new_connection_id_frame(builder: QuicPacketBuilder, connection_id: QuicConnectionId) -> None _write_path_challenge_frame(builder: QuicPacketBuilder, challenge: bytes) -> None _write_path_response_frame(builder: QuicPacketBuilder, challenge: bytes) -> None _write_ping_frame(builder: QuicPacketBuilder, uids: List[int]=[]) _write_retire_connection_id_frame(builder: QuicPacketBuilder, sequence_number: int) -> None _write_stream_frame(builder: QuicPacketBuilder, space: QuicPacketSpace, stream: QuicStream, max_offset: int) -> int _write_stream_limits(builder: QuicPacketBuilder, space: QuicPacketSpace, stream: QuicStream) -> None _write_streams_blocked_frame(builder: QuicPacketBuilder, frame_type: QuicFrameType, limit: int) -> None at: aioquic.quic.connection.QuicConnection.__init__ self._cryptos: Dict[tls.Epoch, CryptoPair] = {} ===========unchanged ref 1=========== self._crypto_streams: Dict[tls.Epoch, QuicStream] = {} self._handshake_complete = False self._host_cids = [ QuicConnectionId( cid=os.urandom(configuration.connection_id_length), sequence_number=0, stateless_reset_token=os.urandom(16), was_sent=True, ) ] self._pacing_at: Optional[float] = None self._remote_max_data_used = 0 self._remote_max_streams_bidi = 0 self._remote_max_streams_uni = 0 self._spaces: Dict[tls.Epoch, QuicPacketSpace] = {} self._streams: Dict[int, QuicStream] = {} self._streams_blocked_bidi: List[QuicStream] = [] self._streams_blocked_uni: List[QuicStream] = [] self._version: Optional[int] = None self._loss = QuicPacketRecovery( is_client_without_1rtt=self._is_client, quic_logger=self._quic_logger, send_probe=self._send_probe, ) self._datagrams_pending: Deque[bytes] = deque() self._handshake_done_pending = False self._ping_pending: List[int] = [] self._probe_pending = False self._retire_connection_ids: List[int] = [] self._streams_blocked_pending = False at: aioquic.quic.connection.QuicConnection._create_stream self._streams_blocked_pending = True at: aioquic.quic.connection.QuicConnection._handle_crypto_frame self._handshake_complete = True self._handshake_done_pending = True
aioquic.quic.recovery/QuicPacketRecovery.__init__
Modified
aiortc~aioquic
79eeb342dd93f7a50b3b96af21e43f5f42b53f23
[pacing] add initial packet pacer
<19>:<add> self._pacer = QuicPacketPacer()
# module: aioquic.quic.recovery class QuicPacketRecovery: def __init__( self, is_client_without_1rtt: bool, send_probe: Callable[[], None], quic_logger: Optional[QuicLoggerTrace] = None, ) -> None: <0> self.is_client_without_1rtt = is_client_without_1rtt <1> self.max_ack_delay = 0.025 <2> self.spaces: List[QuicPacketSpace] = [] <3> <4> # callbacks <5> self._quic_logger = quic_logger <6> self._send_probe = send_probe <7> <8> # loss detection <9> self._pto_count = 0 <10> self._rtt_initialized = False <11> self._rtt_latest = 0.0 <12> self._rtt_min = math.inf <13> self._rtt_smoothed = 0.0 <14> self._rtt_variance = 0.0 <15> self._time_of_last_sent_ack_eliciting_packet = 0.0 <16> <17> # congestion control <18> self._cc = QuicCongestionControl() <19>
===========unchanged ref 0=========== at: aioquic.quic.recovery K_MAX_DATAGRAM_SIZE = 1280 at: aioquic.quic.recovery.QuicCongestionControl.__init__ self.bytes_in_flight = 0 self.congestion_window = K_INITIAL_WINDOW self._congestion_stash = 0 self.ssthresh: Optional[int] = None at: aioquic.quic.recovery.QuicCongestionControl.on_packet_acked self.bytes_in_flight -= packet.sent_bytes at: aioquic.quic.recovery.QuicCongestionControl.on_packets_lost self.congestion_window = max( int(self.congestion_window * K_LOSS_REDUCTION_FACTOR), K_MINIMUM_WINDOW ) self.ssthresh = self.congestion_window at: aioquic.quic.recovery.QuicCongestionControl.on_rtt_measurement self.ssthresh = self.congestion_window at: typing Iterable = _alias(collections.abc.Iterable, 1) ===========changed ref 0=========== # module: aioquic.quic.recovery + class QuicPacketPacer: + def update_bucket(self, now: float) -> None: + if now > self.evaluation_time: + self.bucket_time = min( + self.bucket_time + (now - self.evaluation_time), self.bucket_max + ) + self.evaluation_time = now + ===========changed ref 1=========== # module: aioquic.quic.recovery + class QuicPacketPacer: + def next_send_time(self, now: float) -> float: + if self.packet_time is not None: + self.update_bucket(now=now) + if self.bucket_time <= 0: + return now + self.packet_time + return None + ===========changed ref 2=========== # module: aioquic.quic.recovery + class QuicPacketPacer: + def update_after_send(self, now: float) -> None: + if self.packet_time is not None: + self.update_bucket(now=now) + if self.bucket_time < self.packet_time: + self.bucket_time = 0.0 + else: + self.bucket_time -= self.packet_time + ===========changed ref 3=========== # module: aioquic.quic.recovery + class QuicPacketPacer: + def __init__(self) -> None: + self.bucket_max: float = 0.0 + self.bucket_time: float = 0.0 + self.evaluation_time: float = 0.0 + self.packet_time: Optional[float] = None + ===========changed ref 4=========== # module: aioquic.quic.recovery + class QuicPacketPacer: + def update_rate(self, congestion_window: int, smoothed_rtt: float) -> None: + pacing_rate = congestion_window / max(smoothed_rtt, K_MICRO_SECOND) + self.packet_time = max( + K_MICRO_SECOND, min(K_MAX_DATAGRAM_SIZE / pacing_rate, K_SECOND) + ) + + self.bucket_max = ( + max( + 2 * K_MAX_DATAGRAM_SIZE, + min(congestion_window // 4, 16 * K_MAX_DATAGRAM_SIZE), + ) + / pacing_rate + ) + if self.bucket_time > self.bucket_max: + self.bucket_time = self.bucket_max + ===========changed ref 5=========== # module: aioquic.quic.recovery # loss detection K_PACKET_THRESHOLD = 3 K_INITIAL_RTT = 0.5 # seconds K_GRANULARITY = 0.001 # seconds K_TIME_THRESHOLD = 9 / 8 + K_MICRO_SECOND = 0.000001 + K_SECOND = 1.0 # congestion control K_MAX_DATAGRAM_SIZE = 1280 K_INITIAL_WINDOW = 10 * K_MAX_DATAGRAM_SIZE K_MINIMUM_WINDOW = 2 * K_MAX_DATAGRAM_SIZE K_LOSS_REDUCTION_FACTOR = 0.5 ===========changed ref 6=========== # module: tests.test_recovery + class QuicPacketPacerTest(TestCase): + def setUp(self): + self.pacer = QuicPacketPacer() + ===========changed ref 7=========== # module: tests.test_connection + def disable_packet_pacing(connection): + class DummyPacketPacer(QuicPacketPacer): + def next_send_time(self, now): + return None + + connection._loss._pacer = DummyPacketPacer() + ===========changed ref 8=========== # module: tests.test_recovery + class QuicPacketPacerTest(TestCase): + def test_no_measurement(self): + self.assertIsNone(self.pacer.next_send_time(now=0.0)) + self.pacer.update_after_send(now=0.0) + + self.assertIsNone(self.pacer.next_send_time(now=0.0)) + self.pacer.update_after_send(now=0.0) + ===========changed ref 9=========== # module: aioquic.quic.connection class QuicConnection: def get_timer(self) -> Optional[float]: """ Return the time at which the timer should fire or None if no timer is needed. """ timer_at = self._close_at if self._state not in END_STATES: # ack timer for space in self._loss.spaces: if space.ack_at is not None and space.ack_at < timer_at: timer_at = space.ack_at # loss detection timer self._loss_at = self._loss.get_loss_detection_time() if self._loss_at is not None and self._loss_at < timer_at: timer_at = self._loss_at + + # pacing timer + if self._pacing_at is not None and self._pacing_at < timer_at: + timer_at = self._pacing_at + return timer_at ===========changed ref 10=========== # module: tests.test_connection class QuicConnectionTest(TestCase): def test_datagram_frame_2(self): # payload which exactly fills an entire packet payload = b"Z" * 1250 with client_and_server( client_options={"max_datagram_frame_size": 65536}, + client_patch=disable_packet_pacing, server_options={"max_datagram_frame_size": 65536}, ) as (client, server): # check handshake completed self.check_handshake(client=client, server=server, alpn_protocol=None) # queue 20 datagrams for i in range(20): client.send_datagram_frame(payload) # client can only 11 datagrams are sent due to congestion control self.assertEqual(transfer(client, server), 11) for i in range(11): event = server.next_event() self.assertEqual(type(event), events.DatagramFrameReceived) self.assertEqual(event.data, payload) # server sends ACK self.assertEqual(transfer(server, client), 1) # client sends remaining datagrams self.assertEqual(transfer(client, server), 9) for i in range(9): event = server.next_event() self.assertEqual(type(event), events.DatagramFrameReceived) self.assertEqual(event.data, payload)
aioquic.quic.recovery/QuicPacketRecovery.on_ack_received
Modified
aiortc~aioquic
79eeb342dd93f7a50b3b96af21e43f5f42b53f23
[pacing] add initial packet pacer
# module: aioquic.quic.recovery class QuicPacketRecovery: def on_ack_received( self, space: QuicPacketSpace, ack_rangeset: RangeSet, ack_delay: float, now: float, ) -> None: <0> """ <1> Update metrics as the result of an ACK being received. <2> """ <3> is_ack_eliciting = False <4> largest_acked = ack_rangeset.bounds().stop - 1 <5> largest_newly_acked = None <6> largest_sent_time = None <7> <8> if largest_acked > space.largest_acked_packet: <9> space.largest_acked_packet = largest_acked <10> <11> for packet_number in sorted(space.sent_packets.keys()): <12> if packet_number > largest_acked: <13> break <14> if packet_number in ack_rangeset: <15> # remove packet and update counters <16> packet = space.sent_packets.pop(packet_number) <17> if packet.is_ack_eliciting: <18> is_ack_eliciting = True <19> space.ack_eliciting_in_flight -= 1 <20> if packet.in_flight: <21> self._cc.on_packet_acked(packet) <22> largest_newly_acked = packet_number <23> largest_sent_time = packet.sent_time <24> <25> # trigger callbacks <26> for handler, args in packet.delivery_handlers: <27> handler(QuicDeliveryState.ACKED, *args) <28> <29> # nothing to do if there are no newly acked packets <30> if largest_newly_acked is None: <31> return <32> <33> if largest_acked == largest_newly_acked and is_ack_eliciting: <34> latest_rtt = now - largest_sent_time <35> log_rtt = True <36> <37> # limit ACK delay to max_ack_delay <38> ack_delay = min(ack_delay</s>
===========below chunk 0=========== # module: aioquic.quic.recovery class QuicPacketRecovery: def on_ack_received( self, space: QuicPacketSpace, ack_rangeset: RangeSet, ack_delay: float, now: float, ) -> None: # offset: 1 # update RTT estimate, which cannot be < 1 ms self._rtt_latest = max(latest_rtt, 0.001) if self._rtt_latest < self._rtt_min: self._rtt_min = self._rtt_latest if self._rtt_latest > self._rtt_min + ack_delay: self._rtt_latest -= ack_delay if not self._rtt_initialized: self._rtt_initialized = True self._rtt_variance = latest_rtt / 2 self._rtt_smoothed = latest_rtt else: self._rtt_variance = 3 / 4 * self._rtt_variance + 1 / 4 * abs( self._rtt_min - self._rtt_latest ) self._rtt_smoothed = ( 7 / 8 * self._rtt_smoothed + 1 / 8 * self._rtt_latest ) # inform congestion controller self._cc.on_rtt_measurement(latest_rtt, now=now) else: log_rtt = False self._detect_loss(space, now=now) if self._quic_logger is not None: self._log_metrics_updated(log_rtt=log_rtt) self._pto_count = 0 ===========unchanged ref 0=========== at: aioquic.quic.recovery K_INITIAL_RTT = 0.5 # seconds K_GRANULARITY = 0.001 # seconds QuicPacketSpace() at: aioquic.quic.recovery.QuicCongestionControl on_packet_acked(packet: QuicSentPacket) -> None on_packets_expired(packets: Iterable[QuicSentPacket]) -> None on_packets_expired(self, packets: Iterable[QuicSentPacket]) -> None at: aioquic.quic.recovery.QuicPacketRecovery _log_metrics_updated(self, log_rtt=False) -> None _log_metrics_updated(log_rtt=False) -> None at: aioquic.quic.recovery.QuicPacketRecovery.__init__ self.is_client_without_1rtt = is_client_without_1rtt self.max_ack_delay = 0.025 self.spaces: List[QuicPacketSpace] = [] self._quic_logger = quic_logger self._pto_count = 0 self._rtt_initialized = False self._rtt_smoothed = 0.0 self._rtt_variance = 0.0 self._time_of_last_sent_ack_eliciting_packet = 0.0 self._cc = QuicCongestionControl() at: aioquic.quic.recovery.QuicPacketRecovery.on_ack_received self._rtt_initialized = True self._rtt_variance = latest_rtt / 2 self._rtt_variance = 3 / 4 * self._rtt_variance + 1 / 4 * abs( self._rtt_min - self._rtt_latest ) self._rtt_smoothed = latest_rtt self._rtt_smoothed = ( 7 / 8 * self._rtt_smoothed + 1 / 8 * self._rtt_latest ) ===========unchanged ref 1=========== self._pto_count = 0 at: aioquic.quic.recovery.QuicPacketRecovery.on_loss_detection_timeout self._pto_count += 1 at: aioquic.quic.recovery.QuicPacketRecovery.on_packet_sent self._time_of_last_sent_ack_eliciting_packet = packet.sent_time at: aioquic.quic.recovery.QuicPacketSpace.__init__ self.ack_at: Optional[float] = None self.ack_eliciting_in_flight = 0 self.largest_acked_packet = 0 self.loss_time: Optional[float] = None self.sent_packets: Dict[int, QuicSentPacket] = {} at: typing.MutableMapping pop(key: _KT) -> _VT pop(key: _KT, default: Union[_VT, _T]=...) -> Union[_VT, _T] ===========changed ref 0=========== # module: aioquic.quic.recovery + class QuicPacketPacer: + def update_bucket(self, now: float) -> None: + if now > self.evaluation_time: + self.bucket_time = min( + self.bucket_time + (now - self.evaluation_time), self.bucket_max + ) + self.evaluation_time = now + ===========changed ref 1=========== # module: aioquic.quic.recovery + class QuicPacketPacer: + def next_send_time(self, now: float) -> float: + if self.packet_time is not None: + self.update_bucket(now=now) + if self.bucket_time <= 0: + return now + self.packet_time + return None + ===========changed ref 2=========== # module: aioquic.quic.recovery + class QuicPacketPacer: + def update_after_send(self, now: float) -> None: + if self.packet_time is not None: + self.update_bucket(now=now) + if self.bucket_time < self.packet_time: + self.bucket_time = 0.0 + else: + self.bucket_time -= self.packet_time + ===========changed ref 3=========== # module: aioquic.quic.recovery + class QuicPacketPacer: + def __init__(self) -> None: + self.bucket_max: float = 0.0 + self.bucket_time: float = 0.0 + self.evaluation_time: float = 0.0 + self.packet_time: Optional[float] = None + ===========changed ref 4=========== # module: aioquic.quic.recovery class QuicPacketRecovery: def __init__( self, is_client_without_1rtt: bool, send_probe: Callable[[], None], quic_logger: Optional[QuicLoggerTrace] = None, ) -> None: self.is_client_without_1rtt = is_client_without_1rtt self.max_ack_delay = 0.025 self.spaces: List[QuicPacketSpace] = [] # callbacks self._quic_logger = quic_logger self._send_probe = send_probe # loss detection self._pto_count = 0 self._rtt_initialized = False self._rtt_latest = 0.0 self._rtt_min = math.inf self._rtt_smoothed = 0.0 self._rtt_variance = 0.0 self._time_of_last_sent_ack_eliciting_packet = 0.0 # congestion control self._cc = QuicCongestionControl() + self._pacer = QuicPacketPacer()
aioquic.quic.recovery/QuicPacketRecovery._on_packets_lost
Modified
aiortc~aioquic
79eeb342dd93f7a50b3b96af21e43f5f42b53f23
[pacing] add initial packet pacer
<28>:<add> self._pacer.update_rate( <add> congestion_window=self._cc.congestion_window, <add> smoothed_rtt=self._rtt_smoothed, <add> )
# module: aioquic.quic.recovery class QuicPacketRecovery: def _on_packets_lost( self, packets: Iterable[QuicSentPacket], space: QuicPacketSpace, now: float ) -> None: <0> lost_packets_cc = [] <1> for packet in packets: <2> del space.sent_packets[packet.packet_number] <3> <4> if packet.in_flight: <5> lost_packets_cc.append(packet) <6> <7> if packet.is_ack_eliciting: <8> space.ack_eliciting_in_flight -= 1 <9> <10> if self._quic_logger is not None: <11> self._quic_logger.log_event( <12> category="recovery", <13> event="packet_lost", <14> data={ <15> "type": self._quic_logger.packet_type(packet.packet_type), <16> "packet_number": str(packet.packet_number), <17> }, <18> ) <19> self._log_metrics_updated() <20> <21> # trigger callbacks <22> for handler, args in packet.delivery_handlers: <23> handler(QuicDeliveryState.LOST, *args) <24> <25> # inform congestion controller <26> if lost_packets_cc: <27> self._cc.on_packets_lost(lost_packets_cc, now=now) <28> if self._quic_logger is not None: <29> self._log_metrics_updated() <30>
===========unchanged ref 0=========== at: aioquic.quic.recovery K_PACKET_THRESHOLD = 3 K_INITIAL_RTT = 0.5 # seconds K_TIME_THRESHOLD = 9 / 8 QuicPacketSpace() at: aioquic.quic.recovery.QuicPacketRecovery _on_packets_lost(packets: Iterable[QuicSentPacket], space: QuicPacketSpace, now: float) -> None at: aioquic.quic.recovery.QuicPacketRecovery.__init__ self._quic_logger = quic_logger self._rtt_initialized = False self._rtt_latest = 0.0 self._rtt_smoothed = 0.0 at: aioquic.quic.recovery.QuicPacketRecovery.on_ack_received self._rtt_latest = max(latest_rtt, 0.001) self._rtt_latest -= ack_delay self._rtt_initialized = True self._rtt_smoothed = latest_rtt self._rtt_smoothed = ( 7 / 8 * self._rtt_smoothed + 1 / 8 * self._rtt_latest ) at: aioquic.quic.recovery.QuicPacketSpace.__init__ self.largest_acked_packet = 0 self.loss_time: Optional[float] = None self.sent_packets: Dict[int, QuicSentPacket] = {} ===========changed ref 0=========== # module: aioquic.quic.recovery + class QuicPacketPacer: + def update_bucket(self, now: float) -> None: + if now > self.evaluation_time: + self.bucket_time = min( + self.bucket_time + (now - self.evaluation_time), self.bucket_max + ) + self.evaluation_time = now + ===========changed ref 1=========== # module: aioquic.quic.recovery + class QuicPacketPacer: + def next_send_time(self, now: float) -> float: + if self.packet_time is not None: + self.update_bucket(now=now) + if self.bucket_time <= 0: + return now + self.packet_time + return None + ===========changed ref 2=========== # module: aioquic.quic.recovery + class QuicPacketPacer: + def update_after_send(self, now: float) -> None: + if self.packet_time is not None: + self.update_bucket(now=now) + if self.bucket_time < self.packet_time: + self.bucket_time = 0.0 + else: + self.bucket_time -= self.packet_time + ===========changed ref 3=========== # module: aioquic.quic.recovery + class QuicPacketPacer: + def __init__(self) -> None: + self.bucket_max: float = 0.0 + self.bucket_time: float = 0.0 + self.evaluation_time: float = 0.0 + self.packet_time: Optional[float] = None + ===========changed ref 4=========== # module: aioquic.quic.recovery class QuicPacketRecovery: def __init__( self, is_client_without_1rtt: bool, send_probe: Callable[[], None], quic_logger: Optional[QuicLoggerTrace] = None, ) -> None: self.is_client_without_1rtt = is_client_without_1rtt self.max_ack_delay = 0.025 self.spaces: List[QuicPacketSpace] = [] # callbacks self._quic_logger = quic_logger self._send_probe = send_probe # loss detection self._pto_count = 0 self._rtt_initialized = False self._rtt_latest = 0.0 self._rtt_min = math.inf self._rtt_smoothed = 0.0 self._rtt_variance = 0.0 self._time_of_last_sent_ack_eliciting_packet = 0.0 # congestion control self._cc = QuicCongestionControl() + self._pacer = QuicPacketPacer() ===========changed ref 5=========== # module: aioquic.quic.recovery + class QuicPacketPacer: + def update_rate(self, congestion_window: int, smoothed_rtt: float) -> None: + pacing_rate = congestion_window / max(smoothed_rtt, K_MICRO_SECOND) + self.packet_time = max( + K_MICRO_SECOND, min(K_MAX_DATAGRAM_SIZE / pacing_rate, K_SECOND) + ) + + self.bucket_max = ( + max( + 2 * K_MAX_DATAGRAM_SIZE, + min(congestion_window // 4, 16 * K_MAX_DATAGRAM_SIZE), + ) + / pacing_rate + ) + if self.bucket_time > self.bucket_max: + self.bucket_time = self.bucket_max + ===========changed ref 6=========== # module: aioquic.quic.recovery # loss detection K_PACKET_THRESHOLD = 3 K_INITIAL_RTT = 0.5 # seconds K_GRANULARITY = 0.001 # seconds K_TIME_THRESHOLD = 9 / 8 + K_MICRO_SECOND = 0.000001 + K_SECOND = 1.0 # congestion control K_MAX_DATAGRAM_SIZE = 1280 K_INITIAL_WINDOW = 10 * K_MAX_DATAGRAM_SIZE K_MINIMUM_WINDOW = 2 * K_MAX_DATAGRAM_SIZE K_LOSS_REDUCTION_FACTOR = 0.5 ===========changed ref 7=========== # module: aioquic.quic.recovery class QuicPacketRecovery: def on_ack_received( self, space: QuicPacketSpace, ack_rangeset: RangeSet, ack_delay: float, now: float, ) -> None: """ Update metrics as the result of an ACK being received. """ is_ack_eliciting = False largest_acked = ack_rangeset.bounds().stop - 1 largest_newly_acked = None largest_sent_time = None if largest_acked > space.largest_acked_packet: space.largest_acked_packet = largest_acked for packet_number in sorted(space.sent_packets.keys()): if packet_number > largest_acked: break if packet_number in ack_rangeset: # remove packet and update counters packet = space.sent_packets.pop(packet_number) if packet.is_ack_eliciting: is_ack_eliciting = True space.ack_eliciting_in_flight -= 1 if packet.in_flight: self._cc.on_packet_acked(packet) largest_newly_acked = packet_number largest_sent_time = packet.sent_time # trigger callbacks for handler, args in packet.delivery_handlers: handler(QuicDeliveryState.ACKED, *args) # nothing to do if there are no newly acked packets if largest_newly_acked is None: return if largest_acked == largest_newly_acked and is_ack_eliciting: latest_rtt = now - largest_sent_time log_rtt = True # limit ACK delay to max_ack_delay ack_delay = min(ack_delay, self.max_ack_delay) # update RTT estimate, which cannot be < 1 ms self._rtt_latest = max(latest_rtt</s>
aioquic.quic.logger/QuicLoggerTrace.encode_ack_frame
Modified
aiortc~aioquic
b6ec203cab42bc9dceca767b8fd9ef5460264058
[qlog] encode times as microseconds
<1>:<add> "ack_delay": str(self.encode_time(delay)), <del> "ack_delay": str(int(delay * 1000)), # convert to ms
# module: aioquic.quic.logger class QuicLoggerTrace: def encode_ack_frame(self, ranges: RangeSet, delay: float) -> Dict: <0> return { <1> "ack_delay": str(int(delay * 1000)), # convert to ms <2> "acked_ranges": [[str(x.start), str(x.stop - 1)] for x in ranges], <3> "frame_type": "ack", <4> } <5>
===========unchanged ref 0=========== at: aioquic.quic.logger.QuicLoggerTrace encode_time(seconds: float) -> int at: typing Dict = _alias(dict, 2, inst=False, name='Dict')
aioquic.quic.logger/QuicLoggerTrace.to_dict
Modified
aiortc~aioquic
b6ec203cab42bc9dceca767b8fd9ef5460264058
[qlog] encode times as microseconds
<8>:<add> "configuration": {"time_units": "us"}, <10>:<add> "reference_time": str(self.encode_time(reference_time)), <del> "reference_time": "%d" % (reference_time * 1000), <16>:<add> str(self.encode_time(event[0] - reference_time)), <del> "%d" % ((event[0] - reference_time) * 1000),
# module: aioquic.quic.logger class QuicLoggerTrace: def to_dict(self) -> Dict[str, Any]: <0> """ <1> Return the trace as a dictionary which can be written as JSON. <2> """ <3> if self._events: <4> reference_time = self._events[0][0] <5> else: <6> reference_time = 0.0 <7> return { <8> "common_fields": { <9> "ODCID": hexdump(self._odcid), <10> "reference_time": "%d" % (reference_time * 1000), <11> }, <12> "event_fields": ["relative_time", "category", "event_type", "data"], <13> "events": list( <14> map( <15> lambda event: ( <16> "%d" % ((event[0] - reference_time) * 1000), <17> event[1], <18> event[2], <19> event[3], <20> ), <21> self._events, <22> ) <23> ), <24> "vantage_point": self._vantage_point, <25> } <26>
===========unchanged ref 0=========== at: aioquic.quic.logger PACKET_TYPE_NAMES = { PACKET_TYPE_INITIAL: "initial", PACKET_TYPE_HANDSHAKE: "handshake", PACKET_TYPE_ZERO_RTT: "0RTT", PACKET_TYPE_ONE_RTT: "1RTT", PACKET_TYPE_RETRY: "retry", } hexdump(data: bytes) -> str at: aioquic.quic.logger.QuicLoggerTrace encode_time(seconds: float) -> int at: aioquic.quic.logger.QuicLoggerTrace.__init__ self._odcid = odcid self._events: Deque[Tuple[float, str, str, Dict[str, Any]]] = deque() at: collections.deque append(x: _T) -> None at: time time() -> float at: typing Dict = _alias(dict, 2, inst=False, name='Dict') at: typing.Mapping get(key: _KT, default: Union[_VT_co, _T]) -> Union[_VT_co, _T] get(key: _KT) -> Optional[_VT_co] ===========changed ref 0=========== # module: aioquic.quic.logger class QuicLoggerTrace: + def encode_time(self, seconds: float) -> int: + """ + Convert a time to integer microseconds. + """ + return int(seconds * 1000000) + ===========changed ref 1=========== # module: aioquic.quic.logger class QuicLoggerTrace: def encode_ack_frame(self, ranges: RangeSet, delay: float) -> Dict: return { + "ack_delay": str(self.encode_time(delay)), - "ack_delay": str(int(delay * 1000)), # convert to ms "acked_ranges": [[str(x.start), str(x.stop - 1)] for x in ranges], "frame_type": "ack", }
tests.test_logger/QuicLoggerTest.test_empty_trace
Modified
aiortc~aioquic
b6ec203cab42bc9dceca767b8fd9ef5460264058
[qlog] encode times as microseconds
<13>:<add> "configuration": {"time_units": "us"},
# module: tests.test_logger class QuicLoggerTest(TestCase): def test_empty_trace(self): <0> logger = QuicLogger() <1> trace = logger.start_trace(is_client=True, odcid=bytes(8)) <2> logger.end_trace(trace) <3> self.assertEqual( <4> logger.to_dict(), <5> { <6> "qlog_version": "draft-01", <7> "traces": [ <8> { <9> "common_fields": { <10> "ODCID": "0000000000000000", <11> "reference_time": "0", <12> }, <13> "event_fields": [ <14> "relative_time", <15> "category", <16> "event_type", <17> "data", <18> ], <19> "events": [], <20> "vantage_point": {"name": "aioquic", "type": "client"}, <21> } <22> ], <23> }, <24> ) <25>
===========unchanged ref 0=========== at: unittest.case.TestCase failureException: Type[BaseException] longMessage: bool maxDiff: Optional[int] _testMethodName: str _testMethodDoc: str assertEqual(first: Any, second: Any, msg: Any=...) -> None ===========changed ref 0=========== # module: aioquic.quic.logger class QuicLoggerTrace: + def encode_time(self, seconds: float) -> int: + """ + Convert a time to integer microseconds. + """ + return int(seconds * 1000000) + ===========changed ref 1=========== # module: aioquic.quic.logger class QuicLoggerTrace: def encode_ack_frame(self, ranges: RangeSet, delay: float) -> Dict: return { + "ack_delay": str(self.encode_time(delay)), - "ack_delay": str(int(delay * 1000)), # convert to ms "acked_ranges": [[str(x.start), str(x.stop - 1)] for x in ranges], "frame_type": "ack", } ===========changed ref 2=========== # module: aioquic.quic.logger class QuicLoggerTrace: def to_dict(self) -> Dict[str, Any]: """ Return the trace as a dictionary which can be written as JSON. """ if self._events: reference_time = self._events[0][0] else: reference_time = 0.0 return { + "configuration": {"time_units": "us"}, "common_fields": { "ODCID": hexdump(self._odcid), + "reference_time": str(self.encode_time(reference_time)), - "reference_time": "%d" % (reference_time * 1000), }, "event_fields": ["relative_time", "category", "event_type", "data"], "events": list( map( lambda event: ( + str(self.encode_time(event[0] - reference_time)), - "%d" % ((event[0] - reference_time) * 1000), event[1], event[2], event[3], ), self._events, ) ), "vantage_point": self._vantage_point, }
aioquic.quic.recovery/QuicPacketRecovery._log_metrics_updated
Modified
aiortc~aioquic
b6ec203cab42bc9dceca767b8fd9ef5460264058
[qlog] encode times as microseconds
<10>:<add> "latest_rtt": self._quic_logger.encode_time(self._rtt_latest), <del> "latest_rtt": int(self._rtt_latest * 1000), <11>:<add> "min_rtt": self._quic_logger.encode_time(self._rtt_min), <del> "min_rtt": int(self._rtt_min * 1000), <12>:<add> "smoothed_rtt": self._quic_logger.encode_time(self._rtt_smoothed), <del> "smoothed_rtt": int(self._rtt_smoothed * 1000), <13>:<add> "rtt_variance": self._quic_logger.encode_time(self._rtt_variance), <del> "rtt_variance": int(self._rtt_variance * 1000),
# module: aioquic.quic.recovery class QuicPacketRecovery: def _log_metrics_updated(self, log_rtt=False) -> None: <0> data = { <1> "bytes_in_flight": self._cc.bytes_in_flight, <2> "cwnd": self._cc.congestion_window, <3> } <4> if self._cc.ssthresh is not None: <5> data["ssthresh"] = self._cc.ssthresh <6> <7> if log_rtt: <8> data.update( <9> { <10> "latest_rtt": int(self._rtt_latest * 1000), <11> "min_rtt": int(self._rtt_min * 1000), <12> "smoothed_rtt": int(self._rtt_smoothed * 1000), <13> "rtt_variance": int(self._rtt_variance * 1000), <14> } <15> ) <16> <17> self._quic_logger.log_event( <18> category="recovery", event="metrics_updated", data=data <19> ) <20>
===========unchanged ref 0=========== at: aioquic.quic.recovery.QuicCongestionControl.__init__ self.bytes_in_flight = 0 self.congestion_window = K_INITIAL_WINDOW self.ssthresh: Optional[int] = None at: aioquic.quic.recovery.QuicCongestionControl.on_packet_acked self.bytes_in_flight -= packet.sent_bytes self.congestion_window += packet.sent_bytes self.congestion_window += count * K_MAX_DATAGRAM_SIZE at: aioquic.quic.recovery.QuicCongestionControl.on_packet_sent self.bytes_in_flight += packet.sent_bytes at: aioquic.quic.recovery.QuicCongestionControl.on_packets_expired self.bytes_in_flight -= packet.sent_bytes at: aioquic.quic.recovery.QuicCongestionControl.on_packets_lost self.bytes_in_flight -= packet.sent_bytes self.congestion_window = max( int(self.congestion_window * K_LOSS_REDUCTION_FACTOR), K_MINIMUM_WINDOW ) self.ssthresh = self.congestion_window at: aioquic.quic.recovery.QuicCongestionControl.on_rtt_measurement self.ssthresh = self.congestion_window at: aioquic.quic.recovery.QuicPacketRecovery.__init__ self._quic_logger = quic_logger self._rtt_latest = 0.0 self._rtt_min = math.inf self._rtt_smoothed = 0.0 self._rtt_variance = 0.0 self._cc = QuicCongestionControl() ===========unchanged ref 1=========== at: aioquic.quic.recovery.QuicPacketRecovery.on_ack_received self._rtt_latest = max(latest_rtt, 0.001) self._rtt_latest -= ack_delay self._rtt_min = self._rtt_latest self._rtt_variance = latest_rtt / 2 self._rtt_variance = 3 / 4 * self._rtt_variance + 1 / 4 * abs( self._rtt_min - self._rtt_latest ) self._rtt_smoothed = latest_rtt self._rtt_smoothed = ( 7 / 8 * self._rtt_smoothed + 1 / 8 * self._rtt_latest ) ===========changed ref 0=========== # module: aioquic.quic.logger class QuicLoggerTrace: + def encode_time(self, seconds: float) -> int: + """ + Convert a time to integer microseconds. + """ + return int(seconds * 1000000) + ===========changed ref 1=========== # module: aioquic.quic.logger class QuicLoggerTrace: def encode_ack_frame(self, ranges: RangeSet, delay: float) -> Dict: return { + "ack_delay": str(self.encode_time(delay)), - "ack_delay": str(int(delay * 1000)), # convert to ms "acked_ranges": [[str(x.start), str(x.stop - 1)] for x in ranges], "frame_type": "ack", } ===========changed ref 2=========== # module: tests.test_logger class QuicLoggerTest(TestCase): def test_empty_trace(self): logger = QuicLogger() trace = logger.start_trace(is_client=True, odcid=bytes(8)) logger.end_trace(trace) self.assertEqual( logger.to_dict(), { "qlog_version": "draft-01", "traces": [ { "common_fields": { "ODCID": "0000000000000000", "reference_time": "0", }, + "configuration": {"time_units": "us"}, "event_fields": [ "relative_time", "category", "event_type", "data", ], "events": [], "vantage_point": {"name": "aioquic", "type": "client"}, } ], }, ) ===========changed ref 3=========== # module: aioquic.quic.logger class QuicLoggerTrace: def to_dict(self) -> Dict[str, Any]: """ Return the trace as a dictionary which can be written as JSON. """ if self._events: reference_time = self._events[0][0] else: reference_time = 0.0 return { + "configuration": {"time_units": "us"}, "common_fields": { "ODCID": hexdump(self._odcid), + "reference_time": str(self.encode_time(reference_time)), - "reference_time": "%d" % (reference_time * 1000), }, "event_fields": ["relative_time", "category", "event_type", "data"], "events": list( map( lambda event: ( + str(self.encode_time(event[0] - reference_time)), - "%d" % ((event[0] - reference_time) * 1000), event[1], event[2], event[3], ), self._events, ) ), "vantage_point": self._vantage_point, }
aioquic.quic.packet/push_ack_frame
Modified
aiortc~aioquic
cecbbca6777c3085cc1c755e12e2d2cfb02c5bac
[connection] trigger ACK-of-ACK
<0>:<add> ranges = len(rangeset) <add> index = ranges - 1 <del> index = len(rangeset) - 1 <13>:<add> return ranges
# module: aioquic.quic.packet + def push_ack_frame(buf: Buffer, rangeset: RangeSet, delay: int) -> int: - def push_ack_frame(buf: Buffer, rangeset: RangeSet, delay: int) -> None: <0> index = len(rangeset) - 1 <1> r = rangeset[index] <2> buf.push_uint_var(r.stop - 1) <3> buf.push_uint_var(delay) <4> buf.push_uint_var(index) <5> buf.push_uint_var(r.stop - 1 - r.start) <6> start = r.start <7> while index > 0: <8> index -= 1 <9> r = rangeset[index] <10> buf.push_uint_var(start - r.stop - 1) <11> buf.push_uint_var(r.stop - r.start - 1) <12> start = r.start <13>
aioquic.quic.connection/QuicConnection._write_application
Modified
aiortc~aioquic
cecbbca6777c3085cc1c755e12e2d2cfb02c5bac
[connection] trigger ACK-of-ACK
# module: aioquic.quic.connection class QuicConnection: def _write_application( self, builder: QuicPacketBuilder, network_path: QuicNetworkPath, now: float ) -> None: <0> crypto_stream: Optional[QuicStream] = None <1> if self._cryptos[tls.Epoch.ONE_RTT].send.is_valid(): <2> crypto = self._cryptos[tls.Epoch.ONE_RTT] <3> crypto_stream = self._crypto_streams[tls.Epoch.ONE_RTT] <4> packet_type = PACKET_TYPE_ONE_RTT <5> elif self._cryptos[tls.Epoch.ZERO_RTT].send.is_valid(): <6> crypto = self._cryptos[tls.Epoch.ZERO_RTT] <7> packet_type = PACKET_TYPE_ZERO_RTT <8> else: <9> return <10> space = self._spaces[tls.Epoch.ONE_RTT] <11> <12> while True: <13> # apply pacing, except if we have ACKs to send <14> if space.ack_at is None or space.ack_at >= now: <15> self._pacing_at = self._loss._pacer.next_send_time(now=now) <16> if self._pacing_at is not None: <17> break <18> builder.start_packet(packet_type, crypto) <19> <20> if self._handshake_complete: <21> # ACK <22> if space.ack_at is not None and space.ack_at <= now: <23> self._write_ack_frame(builder=builder, space=space, now=now) <24> <25> # HANDSHAKE_DONE <26> if self._handshake_done_pending: <27> self._write_handshake_done_frame(builder=builder) <28> self._handshake_done_pending = False <29> <30> # PATH CHALLENGE <31> if ( <32> not network_path.is_validated <33> and network_path.local_challenge is None <34> ): <35> challenge = os.ur</s>
===========below chunk 0=========== # module: aioquic.quic.connection class QuicConnection: def _write_application( self, builder: QuicPacketBuilder, network_path: QuicNetworkPath, now: float ) -> None: # offset: 1 self._write_path_challenge_frame( builder=builder, challenge=challenge ) network_path.local_challenge = challenge # PATH RESPONSE if network_path.remote_challenge is not None: self._write_path_response_frame( builder=builder, challenge=network_path.remote_challenge ) network_path.remote_challenge = None # NEW_CONNECTION_ID for connection_id in self._host_cids: if not connection_id.was_sent: self._write_new_connection_id_frame( builder=builder, connection_id=connection_id ) # RETIRE_CONNECTION_ID while self._retire_connection_ids: sequence_number = self._retire_connection_ids.pop(0) self._write_retire_connection_id_frame( builder=builder, sequence_number=sequence_number ) # STREAMS_BLOCKED if self._streams_blocked_pending: if self._streams_blocked_bidi: self._write_streams_blocked_frame( builder=builder, frame_type=QuicFrameType.STREAMS_BLOCKED_BIDI, limit=self._remote_max_streams_bidi, ) if self._streams_blocked_uni: self._write_streams_blocked_frame( builder=builder, frame_type=QuicFrameType.STREAMS_BLOCKED_UNI, limit=self._remote_max_streams_uni, ) self._streams_blocked_pending = False # MAX_DATA self._write_connection_limits(builder=builder, space=space) # stream-level limits for stream in self._streams.values(): self._write_stream_</s> ===========below chunk 1=========== # module: aioquic.quic.connection class QuicConnection: def _write_application( self, builder: QuicPacketBuilder, network_path: QuicNetworkPath, now: float ) -> None: # offset: 2 <s>=space) # stream-level limits for stream in self._streams.values(): self._write_stream_limits(builder=builder, space=space, stream=stream) # PING (user-request) if self._ping_pending: self._write_ping_frame(builder, self._ping_pending) self._ping_pending.clear() # PING (probe) if self._probe_pending: self._write_ping_frame(builder) self._probe_pending = False # CRYPTO if crypto_stream is not None and not crypto_stream.send_buffer_is_empty: self._write_crypto_frame( builder=builder, space=space, stream=crypto_stream ) # DATAGRAM while self._datagrams_pending: try: self._write_datagram_frame( builder=builder, data=self._datagrams_pending[0], frame_type=QuicFrameType.DATAGRAM_WITH_LENGTH, ) self._datagrams_pending.popleft() except QuicPacketBuilderStop: break # STREAM for stream in self._streams.values(): if not stream.is_blocked and not stream.send_buffer_is_empty: self._remote_max_data_used += self._write_stream_frame( builder=builder, space=space, stream=stream, max_offset=min( stream._send_highest + self._remote_max_data - self._remote_max_data_used, stream.max_stream_data_remote, </s> ===========below chunk 2=========== # module: aioquic.quic.connection class QuicConnection: def _write_application( self, builder: QuicPacketBuilder, network_path: QuicNetworkPath, now: float ) -> None: # offset: 3 <s> ) if builder.packet_is_empty: break else: self._loss._pacer.update_after_send(now=now) ===========unchanged ref 0=========== at: aioquic.quic.connection QuicNetworkPath(addr: NetworkAddress, bytes_received: int=0, bytes_sent: int=0, is_validated: bool=False, local_challenge: Optional[bytes]=None, remote_challenge: Optional[bytes]=None) at: aioquic.quic.connection.QuicConnection _write_ack_frame(self, builder: QuicPacketBuilder, space: QuicPacketSpace, now: float) -> None _write_ack_frame(builder: QuicPacketBuilder, space: QuicPacketSpace, now: float) -> None _write_connection_limits(builder: QuicPacketBuilder, space: QuicPacketSpace) -> None _write_crypto_frame(builder: QuicPacketBuilder, space: QuicPacketSpace, stream: QuicStream) -> bool _write_datagram_frame(builder: QuicPacketBuilder, data: bytes, frame_type: QuicFrameType) -> bool _write_handshake_done_frame(builder: QuicPacketBuilder) -> None _write_new_connection_id_frame(builder: QuicPacketBuilder, connection_id: QuicConnectionId) -> None _write_path_challenge_frame(builder: QuicPacketBuilder, challenge: bytes) -> None _write_path_response_frame(builder: QuicPacketBuilder, challenge: bytes) -> None _write_ping_frame(builder: QuicPacketBuilder, uids: List[int]=[]) _write_ping_frame(self, builder: QuicPacketBuilder, uids: List[int]=[]) _write_retire_connection_id_frame(builder: QuicPacketBuilder, sequence_number: int) -> None _write_stream_frame(builder: QuicPacketBuilder, space: QuicPacketSpace, stream: QuicStream, max_offset: int) -> int _write_stream_limits(builder: QuicPacketBuilder, space: QuicPacketSpace, stream: QuicStream) -> None
aioquic.quic.connection/QuicConnection._write_handshake
Modified
aiortc~aioquic
cecbbca6777c3085cc1c755e12e2d2cfb02c5bac
[connection] trigger ACK-of-ACK
<31>:<add> self._write_ping_frame(builder, comment="probe") <del> self._write_ping_frame(builder)
# module: aioquic.quic.connection class QuicConnection: def _write_handshake( self, builder: QuicPacketBuilder, epoch: tls.Epoch, now: float ) -> None: <0> crypto = self._cryptos[epoch] <1> if not crypto.send.is_valid(): <2> return <3> <4> crypto_stream = self._crypto_streams[epoch] <5> space = self._spaces[epoch] <6> <7> while True: <8> if epoch == tls.Epoch.INITIAL: <9> packet_type = PACKET_TYPE_INITIAL <10> else: <11> packet_type = PACKET_TYPE_HANDSHAKE <12> builder.start_packet(packet_type, crypto) <13> <14> # ACK <15> if space.ack_at is not None: <16> self._write_ack_frame(builder=builder, space=space, now=now) <17> <18> # CRYPTO <19> if not crypto_stream.send_buffer_is_empty: <20> if self._write_crypto_frame( <21> builder=builder, space=space, stream=crypto_stream <22> ): <23> self._probe_pending = False <24> <25> # PING (probe) <26> if ( <27> self._probe_pending <28> and epoch == tls.Epoch.HANDSHAKE <29> and not self._handshake_complete <30> ): <31> self._write_ping_frame(builder) <32> self._probe_pending = False <33> <34> if builder.packet_is_empty: <35> break <36>
===========unchanged ref 0=========== at: aioquic.quic.connection.QuicConnection _write_ack_frame(self, builder: QuicPacketBuilder, space: QuicPacketSpace, now: float) -> None _write_ack_frame(builder: QuicPacketBuilder, space: QuicPacketSpace, now: float) -> None _write_crypto_frame(builder: QuicPacketBuilder, space: QuicPacketSpace, stream: QuicStream) -> bool _write_ping_frame(builder: QuicPacketBuilder, uids: List[int]=[]) _write_ping_frame(self, builder: QuicPacketBuilder, uids: List[int]=[]) at: aioquic.quic.connection.QuicConnection.__init__ self._cryptos: Dict[tls.Epoch, CryptoPair] = {} self._crypto_streams: Dict[tls.Epoch, QuicStream] = {} self._handshake_complete = False self._spaces: Dict[tls.Epoch, QuicPacketSpace] = {} self._probe_pending = False at: aioquic.quic.connection.QuicConnection._handle_crypto_frame self._handshake_complete = True at: aioquic.quic.connection.QuicConnection._initialize self._cryptos = { tls.Epoch.INITIAL: CryptoPair(), tls.Epoch.ZERO_RTT: CryptoPair(), tls.Epoch.HANDSHAKE: CryptoPair(), tls.Epoch.ONE_RTT: CryptoPair(), } self._crypto_streams = { tls.Epoch.INITIAL: QuicStream(), tls.Epoch.HANDSHAKE: QuicStream(), tls.Epoch.ONE_RTT: QuicStream(), } self._spaces = { tls.Epoch.INITIAL: QuicPacketSpace(), tls.Epoch.HANDSHAKE: QuicPacketSpace(), tls.Epoch.ONE_RTT: QuicPacketSpace(), } at: aioquic.quic.connection.QuicConnection._send_probe self._probe_pending = True ===========unchanged ref 1=========== at: aioquic.quic.connection.QuicConnection._write_application self._probe_pending = False ===========changed ref 0=========== # module: aioquic.quic.packet + def push_ack_frame(buf: Buffer, rangeset: RangeSet, delay: int) -> int: - def push_ack_frame(buf: Buffer, rangeset: RangeSet, delay: int) -> None: + ranges = len(rangeset) + index = ranges - 1 - index = len(rangeset) - 1 r = rangeset[index] buf.push_uint_var(r.stop - 1) buf.push_uint_var(delay) buf.push_uint_var(index) buf.push_uint_var(r.stop - 1 - r.start) start = r.start while index > 0: index -= 1 r = rangeset[index] buf.push_uint_var(start - r.stop - 1) buf.push_uint_var(r.stop - r.start - 1) start = r.start + return ranges ===========changed ref 1=========== # module: aioquic.quic.connection class QuicConnection: def _write_application( self, builder: QuicPacketBuilder, network_path: QuicNetworkPath, now: float ) -> None: crypto_stream: Optional[QuicStream] = None if self._cryptos[tls.Epoch.ONE_RTT].send.is_valid(): crypto = self._cryptos[tls.Epoch.ONE_RTT] crypto_stream = self._crypto_streams[tls.Epoch.ONE_RTT] packet_type = PACKET_TYPE_ONE_RTT elif self._cryptos[tls.Epoch.ZERO_RTT].send.is_valid(): crypto = self._cryptos[tls.Epoch.ZERO_RTT] packet_type = PACKET_TYPE_ZERO_RTT else: return space = self._spaces[tls.Epoch.ONE_RTT] while True: # apply pacing, except if we have ACKs to send if space.ack_at is None or space.ack_at >= now: self._pacing_at = self._loss._pacer.next_send_time(now=now) if self._pacing_at is not None: break builder.start_packet(packet_type, crypto) if self._handshake_complete: # ACK if space.ack_at is not None and space.ack_at <= now: self._write_ack_frame(builder=builder, space=space, now=now) # HANDSHAKE_DONE if self._handshake_done_pending: self._write_handshake_done_frame(builder=builder) self._handshake_done_pending = False # PATH CHALLENGE if ( not network_path.is_validated and network_path.local_challenge is None ): challenge = os.urandom(8) self._write_path_challenge_frame( builder=builder, challenge=challenge ) network_path.local_</s> ===========changed ref 2=========== # module: aioquic.quic.connection class QuicConnection: def _write_application( self, builder: QuicPacketBuilder, network_path: QuicNetworkPath, now: float ) -> None: # offset: 1 <s> self._write_path_challenge_frame( builder=builder, challenge=challenge ) network_path.local_challenge = challenge # PATH RESPONSE if network_path.remote_challenge is not None: self._write_path_response_frame( builder=builder, challenge=network_path.remote_challenge ) network_path.remote_challenge = None # NEW_CONNECTION_ID for connection_id in self._host_cids: if not connection_id.was_sent: self._write_new_connection_id_frame( builder=builder, connection_id=connection_id ) # RETIRE_CONNECTION_ID while self._retire_connection_ids: sequence_number = self._retire_connection_ids.pop(0) self._write_retire_connection_id_frame( builder=builder, sequence_number=sequence_number ) # STREAMS_BLOCKED if self._streams_blocked_pending: if self._streams_blocked_bidi: self._write_streams_blocked_frame( builder=builder, frame_type=QuicFrameType.STREAMS_BLOCKED_BIDI, limit=self._remote_max_streams_bidi, ) if self._streams_blocked_uni: self._write_streams_blocked_frame( builder=builder, frame_type=QuicFrameType.STREAMS_BLOCKED_UNI, limit=self._remote_max_streams_uni, ) self._streams_blocked_pending = False # MAX_DATA self._write_connection_limits(builder=builder</s>
aioquic.quic.connection/QuicConnection._write_ack_frame
Modified
aiortc~aioquic
cecbbca6777c3085cc1c755e12e2d2cfb02c5bac
[connection] trigger ACK-of-ACK
<10>:<add> ranges = push_ack_frame(buf, space.ack_queue, ack_delay_encoded) <del> push_ack_frame(buf, space.ack_queue, ack_delay_encoded)
# module: aioquic.quic.connection class QuicConnection: def _write_ack_frame( self, builder: QuicPacketBuilder, space: QuicPacketSpace, now: float ) -> None: <0> # calculate ACK delay <1> ack_delay = now - space.largest_received_time <2> ack_delay_encoded = int(ack_delay * 1000000) >> self._local_ack_delay_exponent <3> <4> buf = builder.start_frame( <5> QuicFrameType.ACK, <6> capacity=ACK_FRAME_CAPACITY, <7> handler=self._on_ack_delivery, <8> handler_args=(space, space.largest_received_packet), <9> ) <10> push_ack_frame(buf, space.ack_queue, ack_delay_encoded) <11> space.ack_at = None <12> <13> # log frame <14> if self._quic_logger is not None: <15> builder.quic_logger_frames.append( <16> self._quic_logger.encode_ack_frame( <17> ranges=space.ack_queue, delay=ack_delay <18> ) <19> ) <20>
===========unchanged ref 0=========== at: aioquic.quic.connection ACK_FRAME_CAPACITY = 64 # FIXME: this is arbitrary! at: aioquic.quic.connection.QuicConnection _on_ack_delivery(delivery: QuicDeliveryState, space: QuicPacketSpace, highest_acked: int) -> None at: aioquic.quic.connection.QuicConnection.__init__ self._local_ack_delay_exponent = 3 self._quic_logger: Optional[QuicLoggerTrace] = None self._quic_logger = configuration.quic_logger.start_trace( is_client=configuration.is_client, odcid=logger_connection_id ) at: aioquic.quic.connection.QuicConnection._close_end self._quic_logger = None ===========changed ref 0=========== # module: aioquic.quic.connection class QuicConnection: def _write_handshake( self, builder: QuicPacketBuilder, epoch: tls.Epoch, now: float ) -> None: crypto = self._cryptos[epoch] if not crypto.send.is_valid(): return crypto_stream = self._crypto_streams[epoch] space = self._spaces[epoch] while True: if epoch == tls.Epoch.INITIAL: packet_type = PACKET_TYPE_INITIAL else: packet_type = PACKET_TYPE_HANDSHAKE builder.start_packet(packet_type, crypto) # ACK if space.ack_at is not None: self._write_ack_frame(builder=builder, space=space, now=now) # CRYPTO if not crypto_stream.send_buffer_is_empty: if self._write_crypto_frame( builder=builder, space=space, stream=crypto_stream ): self._probe_pending = False # PING (probe) if ( self._probe_pending and epoch == tls.Epoch.HANDSHAKE and not self._handshake_complete ): + self._write_ping_frame(builder, comment="probe") - self._write_ping_frame(builder) self._probe_pending = False if builder.packet_is_empty: break ===========changed ref 1=========== # module: aioquic.quic.packet + def push_ack_frame(buf: Buffer, rangeset: RangeSet, delay: int) -> int: - def push_ack_frame(buf: Buffer, rangeset: RangeSet, delay: int) -> None: + ranges = len(rangeset) + index = ranges - 1 - index = len(rangeset) - 1 r = rangeset[index] buf.push_uint_var(r.stop - 1) buf.push_uint_var(delay) buf.push_uint_var(index) buf.push_uint_var(r.stop - 1 - r.start) start = r.start while index > 0: index -= 1 r = rangeset[index] buf.push_uint_var(start - r.stop - 1) buf.push_uint_var(r.stop - r.start - 1) start = r.start + return ranges ===========changed ref 2=========== # module: aioquic.quic.connection class QuicConnection: def _write_application( self, builder: QuicPacketBuilder, network_path: QuicNetworkPath, now: float ) -> None: crypto_stream: Optional[QuicStream] = None if self._cryptos[tls.Epoch.ONE_RTT].send.is_valid(): crypto = self._cryptos[tls.Epoch.ONE_RTT] crypto_stream = self._crypto_streams[tls.Epoch.ONE_RTT] packet_type = PACKET_TYPE_ONE_RTT elif self._cryptos[tls.Epoch.ZERO_RTT].send.is_valid(): crypto = self._cryptos[tls.Epoch.ZERO_RTT] packet_type = PACKET_TYPE_ZERO_RTT else: return space = self._spaces[tls.Epoch.ONE_RTT] while True: # apply pacing, except if we have ACKs to send if space.ack_at is None or space.ack_at >= now: self._pacing_at = self._loss._pacer.next_send_time(now=now) if self._pacing_at is not None: break builder.start_packet(packet_type, crypto) if self._handshake_complete: # ACK if space.ack_at is not None and space.ack_at <= now: self._write_ack_frame(builder=builder, space=space, now=now) # HANDSHAKE_DONE if self._handshake_done_pending: self._write_handshake_done_frame(builder=builder) self._handshake_done_pending = False # PATH CHALLENGE if ( not network_path.is_validated and network_path.local_challenge is None ): challenge = os.urandom(8) self._write_path_challenge_frame( builder=builder, challenge=challenge ) network_path.local_</s> ===========changed ref 3=========== # module: aioquic.quic.connection class QuicConnection: def _write_application( self, builder: QuicPacketBuilder, network_path: QuicNetworkPath, now: float ) -> None: # offset: 1 <s> self._write_path_challenge_frame( builder=builder, challenge=challenge ) network_path.local_challenge = challenge # PATH RESPONSE if network_path.remote_challenge is not None: self._write_path_response_frame( builder=builder, challenge=network_path.remote_challenge ) network_path.remote_challenge = None # NEW_CONNECTION_ID for connection_id in self._host_cids: if not connection_id.was_sent: self._write_new_connection_id_frame( builder=builder, connection_id=connection_id ) # RETIRE_CONNECTION_ID while self._retire_connection_ids: sequence_number = self._retire_connection_ids.pop(0) self._write_retire_connection_id_frame( builder=builder, sequence_number=sequence_number ) # STREAMS_BLOCKED if self._streams_blocked_pending: if self._streams_blocked_bidi: self._write_streams_blocked_frame( builder=builder, frame_type=QuicFrameType.STREAMS_BLOCKED_BIDI, limit=self._remote_max_streams_bidi, ) if self._streams_blocked_uni: self._write_streams_blocked_frame( builder=builder, frame_type=QuicFrameType.STREAMS_BLOCKED_UNI, limit=self._remote_max_streams_uni, ) self._streams_blocked_pending = False # MAX_DATA self._write_connection_limits(builder=builder</s>
aioquic.quic.connection/QuicConnection._write_ping_frame
Modified
aiortc~aioquic
cecbbca6777c3085cc1c755e12e2d2cfb02c5bac
[connection] trigger ACK-of-ACK
<8>:<add> " (%s)" % comment if comment else "", <del> "" if uids else " (probe)",
# module: aioquic.quic.connection class QuicConnection: + def _write_ping_frame( + self, builder: QuicPacketBuilder, uids: List[int] = [], comment="" - def _write_ping_frame(self, builder: QuicPacketBuilder, uids: List[int] = []): + ): <0> builder.start_frame( <1> QuicFrameType.PING, <2> capacity=PING_FRAME_CAPACITY, <3> handler=self._on_ping_delivery, <4> handler_args=(tuple(uids),), <5> ) <6> self._logger.debug( <7> "Sending PING%s in packet %d", <8> "" if uids else " (probe)", <9> builder.packet_number, <10> ) <11> <12> # log frame <13> if self._quic_logger is not None: <14> builder.quic_logger_frames.append(self._quic_logger.encode_ping_frame()) <15>
===========unchanged ref 0=========== at: aioquic.quic.connection PING_FRAME_CAPACITY = 1 at: aioquic.quic.connection.QuicConnection _on_ping_delivery(delivery: QuicDeliveryState, uids: Sequence[int]) -> None at: aioquic.quic.connection.QuicConnection.__init__ self._quic_logger: Optional[QuicLoggerTrace] = None self._quic_logger = configuration.quic_logger.start_trace( is_client=configuration.is_client, odcid=logger_connection_id ) self._logger = QuicConnectionAdapter( logger, {"id": dump_cid(logger_connection_id)} ) at: aioquic.quic.connection.QuicConnection._close_end self._quic_logger = None at: logging.LoggerAdapter logger: Logger extra: Mapping[str, Any] debug(msg: Any, *args: Any, exc_info: _ExcInfoType=..., stack_info: bool=..., extra: Optional[Dict[str, Any]]=..., **kwargs: Any) -> None at: typing List = _alias(list, 1, inst=False, name='List') ===========changed ref 0=========== # module: aioquic.quic.connection class QuicConnection: def _write_ack_frame( self, builder: QuicPacketBuilder, space: QuicPacketSpace, now: float ) -> None: # calculate ACK delay ack_delay = now - space.largest_received_time ack_delay_encoded = int(ack_delay * 1000000) >> self._local_ack_delay_exponent buf = builder.start_frame( QuicFrameType.ACK, capacity=ACK_FRAME_CAPACITY, handler=self._on_ack_delivery, handler_args=(space, space.largest_received_packet), ) + ranges = push_ack_frame(buf, space.ack_queue, ack_delay_encoded) - push_ack_frame(buf, space.ack_queue, ack_delay_encoded) space.ack_at = None # log frame if self._quic_logger is not None: builder.quic_logger_frames.append( self._quic_logger.encode_ack_frame( ranges=space.ack_queue, delay=ack_delay ) ) ===========changed ref 1=========== # module: aioquic.quic.connection class QuicConnection: def _write_handshake( self, builder: QuicPacketBuilder, epoch: tls.Epoch, now: float ) -> None: crypto = self._cryptos[epoch] if not crypto.send.is_valid(): return crypto_stream = self._crypto_streams[epoch] space = self._spaces[epoch] while True: if epoch == tls.Epoch.INITIAL: packet_type = PACKET_TYPE_INITIAL else: packet_type = PACKET_TYPE_HANDSHAKE builder.start_packet(packet_type, crypto) # ACK if space.ack_at is not None: self._write_ack_frame(builder=builder, space=space, now=now) # CRYPTO if not crypto_stream.send_buffer_is_empty: if self._write_crypto_frame( builder=builder, space=space, stream=crypto_stream ): self._probe_pending = False # PING (probe) if ( self._probe_pending and epoch == tls.Epoch.HANDSHAKE and not self._handshake_complete ): + self._write_ping_frame(builder, comment="probe") - self._write_ping_frame(builder) self._probe_pending = False if builder.packet_is_empty: break ===========changed ref 2=========== # module: aioquic.quic.packet + def push_ack_frame(buf: Buffer, rangeset: RangeSet, delay: int) -> int: - def push_ack_frame(buf: Buffer, rangeset: RangeSet, delay: int) -> None: + ranges = len(rangeset) + index = ranges - 1 - index = len(rangeset) - 1 r = rangeset[index] buf.push_uint_var(r.stop - 1) buf.push_uint_var(delay) buf.push_uint_var(index) buf.push_uint_var(r.stop - 1 - r.start) start = r.start while index > 0: index -= 1 r = rangeset[index] buf.push_uint_var(start - r.stop - 1) buf.push_uint_var(r.stop - r.start - 1) start = r.start + return ranges ===========changed ref 3=========== # module: aioquic.quic.connection class QuicConnection: def _write_application( self, builder: QuicPacketBuilder, network_path: QuicNetworkPath, now: float ) -> None: crypto_stream: Optional[QuicStream] = None if self._cryptos[tls.Epoch.ONE_RTT].send.is_valid(): crypto = self._cryptos[tls.Epoch.ONE_RTT] crypto_stream = self._crypto_streams[tls.Epoch.ONE_RTT] packet_type = PACKET_TYPE_ONE_RTT elif self._cryptos[tls.Epoch.ZERO_RTT].send.is_valid(): crypto = self._cryptos[tls.Epoch.ZERO_RTT] packet_type = PACKET_TYPE_ZERO_RTT else: return space = self._spaces[tls.Epoch.ONE_RTT] while True: # apply pacing, except if we have ACKs to send if space.ack_at is None or space.ack_at >= now: self._pacing_at = self._loss._pacer.next_send_time(now=now) if self._pacing_at is not None: break builder.start_packet(packet_type, crypto) if self._handshake_complete: # ACK if space.ack_at is not None and space.ack_at <= now: self._write_ack_frame(builder=builder, space=space, now=now) # HANDSHAKE_DONE if self._handshake_done_pending: self._write_handshake_done_frame(builder=builder) self._handshake_done_pending = False # PATH CHALLENGE if ( not network_path.is_validated and network_path.local_challenge is None ): challenge = os.urandom(8) self._write_path_challenge_frame( builder=builder, challenge=challenge ) network_path.local_</s>
examples.http3_client/perform_http_request
Modified
aiortc~aioquic
1f54dcd046bb40c148a017af419a88de108021a3
[examples] make HTTP/3 client capable of writing to a file
<22>:<add> # output response <del> # print response <23>:<add> if output_file is not None: <del> if print_response: <25>:<add> if isinstance(http_event, HeadersReceived) and include: <del> if isinstance(http_event, HeadersReceived): <30>:<add> output_file.write(headers + b"\r\n") <del> sys.stderr.buffer.write(headers + b"\r\n") <31>:<add> output_file.flush() <del> sys.stderr.buffer.flush() <33>:<add> output_file.write(http_event.data) <del> sys.stdout.buffer.write(http_event.data) <34>:<add> output_file.flush() <del> sys.stdout.buffer.flush()
# module: examples.http3_client def perform_http_request( + client: HttpClient, + url: str, + data: str, + include: bool, + output_file: Optional[BinaryIO], - client: HttpClient, url: str, data: str, print_response: bool ) -> None: <0> # perform request <1> start = time.time() <2> if data is not None: <3> http_events = await client.post( <4> url, <5> data=data.encode(), <6> headers={"content-type": "application/x-www-form-urlencoded"}, <7> ) <8> else: <9> http_events = await client.get(url) <10> elapsed = time.time() - start <11> <12> # print speed <13> octets = 0 <14> for http_event in http_events: <15> if isinstance(http_event, DataReceived): <16> octets += len(http_event.data) <17> logger.info( <18> "Received %d bytes in %.1f s (%.3f Mbps)" <19> % (octets, elapsed, octets * 8 / elapsed / 1000000) <20> ) <21> <22> # print response <23> if print_response: <24> for http_event in http_events: <25> if isinstance(http_event, HeadersReceived): <26> headers = b"" <27> for k, v in http_event.headers: <28> headers += k + b": " + v + b"\r\n" <29> if headers: <30> sys.stderr.buffer.write(headers + b"\r\n") <31> sys.stderr.buffer.flush() <32> elif isinstance(http_event, DataReceived): <33> sys.stdout.buffer.write(http_event.data) <34> sys.stdout.buffer.flush() <35>
===========unchanged ref 0=========== at: examples.http3_client logger = logging.getLogger("client") HttpClient(*args, **kwargs) at: examples.http3_client.HttpClient get(url: str, headers: Dict={}) -> Deque[H3Event] post(url: str, data: bytes, headers: Dict={}) -> Deque[H3Event] at: logging.Logger info(msg: Any, *args: Any, exc_info: _ExcInfoType=..., stack_info: bool=..., extra: Optional[Dict[str, Any]]=..., **kwargs: Any) -> None at: time time() -> float at: typing BinaryIO() at: typing.BinaryIO __slots__ = () write(s: AnyStr) -> int
examples.http3_client/run
Modified
aiortc~aioquic
1f54dcd046bb40c148a017af419a88de108021a3
[examples] make HTTP/3 client capable of writing to a file
<36>:<add> if output == "-": <add> output_file = sys.stdout.buffer <add> elif output: <add> output_file = open(output, "wb") <add> else: <add> output_file = None <add> <39>:<add> client=client, <add> url=url, <add> data=data, <add> include=include, <add> output_file=output_file, <del> client=client, url=url, data=data, print_response=print_response
# module: examples.http3_client def run( configuration: QuicConfiguration, url: str, data: str, + include: bool, + output: str, parallel: int, - print_response: bool, ) -> None: <0> # parse URL <1> parsed = urlparse(url) <2> assert parsed.scheme in ( <3> "https", <4> "wss", <5> ), "Only https:// or wss:// URLs are supported." <6> if ":" in parsed.netloc: <7> host, port_str = parsed.netloc.split(":") <8> port = int(port_str) <9> else: <10> host = parsed.netloc <11> port = 443 <12> <13> async with connect( <14> host, <15> port, <16> configuration=configuration, <17> create_protocol=HttpClient, <18> session_ticket_handler=save_session_ticket, <19> ) as client: <20> client = cast(HttpClient, client) <21> <22> if parsed.scheme == "wss": <23> ws = await client.websocket(url, subprotocols=["chat", "superchat"]) <24> <25> # send some messages and receive reply <26> for i in range(2): <27> message = "Hello {}, WebSocket!".format(i) <28> print("> " + message) <29> await ws.send(message) <30> <31> message = await ws.recv() <32> print("< " + message) <33> <34> await ws.close() <35> else: <36> # perform request <37> coros = [ <38> perform_http_request( <39> client=client, url=url, data=data, print_response=print_response <40> ) <41> for i in range(parallel) <42> ] <43> await asyncio.gather(*coros) <44>
===========unchanged ref 0=========== at: _pickle dump(obj, file, protocol=None, *, fix_imports=True, buffer_callback=None) at: examples.http3_client HttpClient(*args, **kwargs) save_session_ticket(ticket) args = parser.parse_args() at: pickle dump, dumps, load, loads = _dump, _dumps, _load, _loads at: sys stdout: TextIO at: typing cast(typ: Type[_T], val: Any) -> _T cast(typ: str, val: Any) -> Any cast(typ: object, val: Any) -> Any at: typing.TextIO __slots__ = () at: urllib.parse urlparse(url: str, scheme: Optional[str]=..., allow_fragments: bool=...) -> ParseResult urlparse(url: Optional[bytes], scheme: Optional[bytes]=..., allow_fragments: bool=...) -> ParseResultBytes ===========changed ref 0=========== # module: examples.http3_client def perform_http_request( + client: HttpClient, + url: str, + data: str, + include: bool, + output_file: Optional[BinaryIO], - client: HttpClient, url: str, data: str, print_response: bool ) -> None: # perform request start = time.time() if data is not None: http_events = await client.post( url, data=data.encode(), headers={"content-type": "application/x-www-form-urlencoded"}, ) else: http_events = await client.get(url) elapsed = time.time() - start # print speed octets = 0 for http_event in http_events: if isinstance(http_event, DataReceived): octets += len(http_event.data) logger.info( "Received %d bytes in %.1f s (%.3f Mbps)" % (octets, elapsed, octets * 8 / elapsed / 1000000) ) + # output response - # print response + if output_file is not None: - if print_response: for http_event in http_events: + if isinstance(http_event, HeadersReceived) and include: - if isinstance(http_event, HeadersReceived): headers = b"" for k, v in http_event.headers: headers += k + b": " + v + b"\r\n" if headers: + output_file.write(headers + b"\r\n") - sys.stderr.buffer.write(headers + b"\r\n") + output_file.flush() - sys.stderr.buffer.flush() elif isinstance(http_event, DataReceived): + output_file.write(http_event.data) - sys.stdout.buffer.write(http_event.data) + output_file.flush() - sys.stdout.buffer.flush()
examples.httpx_client/run
Modified
aiortc~aioquic
ba1447d8ed9696a9f155d72106b8c2b9d708a9ee
[examples] add missing CRLF after headers in httpx client
<42>:<add> sys.stderr.write("\r\n")
# module: examples.httpx_client def run(configuration: QuicConfiguration, url: str, data: str) -> None: <0> # parse URL <1> parsed = urlparse(url) <2> assert parsed.scheme == "https", "Only https:// URLs are supported." <3> if ":" in parsed.netloc: <4> host, port_str = parsed.netloc.split(":") <5> port = int(port_str) <6> else: <7> host = parsed.netloc <8> port = 443 <9> <10> async with connect( <11> host, <12> port, <13> configuration=configuration, <14> create_protocol=H3Dispatcher, <15> session_ticket_handler=save_session_ticket, <16> ) as dispatch: <17> client = AsyncClient(dispatch=cast(AsyncDispatcher, dispatch)) <18> <19> # perform request <20> start = time.time() <21> if data is not None: <22> response = await client.post( <23> url, <24> data=data.encode(), <25> headers={"content-type": "application/x-www-form-urlencoded"}, <26> ) <27> else: <28> response = await client.get(url) <29> <30> elapsed = time.time() - start <31> <32> # print speed <33> octets = len(response.content) <34> logger.info( <35> "Received %d bytes in %.1f s (%.3f Mbps)" <36> % (octets, elapsed, octets * 8 / elapsed / 1000000) <37> ) <38> <39> # print response <40> for header, value in response.headers.items(): <41> sys.stderr.write(header + ": " + value + "\r\n") <42> sys.stdout.buffer.write(response.content) <43> sys.stdout.buffer.flush() <44>
===========unchanged ref 0=========== at: examples.httpx_client logger = logging.getLogger("client") H3Dispatcher(*args, **kwargs) save_session_ticket(ticket) at: httpx._client AsyncClient(*, auth: AuthTypes | None=None, params: QueryParamTypes | None=None, headers: HeaderTypes | None=None, cookies: CookieTypes | None=None, verify: VerifyTypes=True, cert: CertTypes | None=None, http1: bool=True, http2: bool=False, proxy: ProxyTypes | None=None, proxies: ProxiesTypes | None=None, mounts: None | (typing.Mapping[str, AsyncBaseTransport | None])=None, timeout: TimeoutTypes=DEFAULT_TIMEOUT_CONFIG, follow_redirects: bool=False, limits: Limits=DEFAULT_LIMITS, max_redirects: int=DEFAULT_MAX_REDIRECTS, event_hooks: None | (typing.Mapping[str, list[EventHook]])=None, base_url: URL | str="", transport: AsyncBaseTransport | None=None, app: typing.Callable[..., typing.Any] | None=None, trust_env: bool=True, default_encoding: str | typing.Callable[[bytes], str]="utf-8") at: httpx._client.AsyncClient get(url: URL | str, *, params: QueryParamTypes | None=None, headers: HeaderTypes | None=None, cookies: CookieTypes | None=None, auth: AuthTypes | UseClientDefault | None=USE_CLIENT_DEFAULT, follow_redirects: bool | UseClientDefault=USE_CLIENT_DEFAULT, timeout: TimeoutTypes | UseClientDefault=USE_CLIENT_DEFAULT, extensions: RequestExtensions | None=None) -> Response ===========unchanged ref 1=========== post(url: URL | str, *, content: RequestContent | None=None, data: RequestData | None=None, files: RequestFiles | None=None, json: typing.Any | None=None, params: QueryParamTypes | None=None, headers: HeaderTypes | None=None, cookies: CookieTypes | None=None, auth: AuthTypes | UseClientDefault=USE_CLIENT_DEFAULT, follow_redirects: bool | UseClientDefault=USE_CLIENT_DEFAULT, timeout: TimeoutTypes | UseClientDefault=USE_CLIENT_DEFAULT, extensions: RequestExtensions | None=None) -> Response at: httpx._models.Headers items() -> typing.ItemsView[str, str] at: httpx._models.Response.__init__ self.headers = Headers(headers) at: logging.Logger info(msg: Any, *args: Any, exc_info: _ExcInfoType=..., stack_info: bool=..., extra: Optional[Dict[str, Any]]=..., **kwargs: Any) -> None at: sys stdout: TextIO stderr: TextIO at: time time() -> float at: typing cast(typ: Type[_T], val: Any) -> _T cast(typ: str, val: Any) -> Any cast(typ: object, val: Any) -> Any at: typing.BinaryIO __slots__ = () write(s: AnyStr) -> int at: typing.IO __slots__ = () write(s: AnyStr) -> int at: typing.TextIO __slots__ = () at: urllib.parse urlparse(url: str, scheme: Optional[str]=..., allow_fragments: bool=...) -> ParseResult urlparse(url: Optional[bytes], scheme: Optional[bytes]=..., allow_fragments: bool=...) -> ParseResultBytes
examples.http3_client/perform_http_request
Modified
aiortc~aioquic
3296a718a2762c57fdb8f2bc9e4e4cc670570562
[examples] allow client to make multiple requests and save to dir
<23>:<add> if output_dir is not None: <del> if output_file is not None: <24>:<add> output_path = os.path.join( <add> output_dir, os.path.basename(urlparse(url).path) or "index.html" <add> ) <add> with open(output_path, "wb") as output_file: <add> for http_event in http_events: <del> for http_event in http_events: <25>:<add> if isinstance(http_event, HeadersReceived) and include: <del> if isinstance(http_event, HeadersReceived) and include: <26>:<add> headers = b"" <del> headers = b"" <27>:<add> for k, v in http_event.headers: <del> for k, v in http_event.headers: <28>:<add> headers += k + b": " + v + b"\r\n" <del> headers += k + b": " + v + b"\r\n" <29>:<add> if headers: <del> if headers: <30>:<add> output_file.write(headers + b"\r\n") <del> output_file.write(headers + b"\r\n") <31>:<del> output_file.flush() <32>:<add> elif isinstance(http_event, DataReceived): <del> elif isinstance(http_event, DataReceived): <33>:<add> output_file.write(http_event.data) <del> output_file.write(http_event.data) <34>:<del> output_file.flush()
# module: examples.http3_client def perform_http_request( - client: HttpClient, - url: str, - data: str, - include: bool, - output_file: Optional[BinaryIO], + client: HttpClient, url: str, data: str, include: bool, output_dir: Optional[str], ) -> None: <0> # perform request <1> start = time.time() <2> if data is not None: <3> http_events = await client.post( <4> url, <5> data=data.encode(), <6> headers={"content-type": "application/x-www-form-urlencoded"}, <7> ) <8> else: <9> http_events = await client.get(url) <10> elapsed = time.time() - start <11> <12> # print speed <13> octets = 0 <14> for http_event in http_events: <15> if isinstance(http_event, DataReceived): <16> octets += len(http_event.data) <17> logger.info( <18> "Received %d bytes in %.1f s (%.3f Mbps)" <19> % (octets, elapsed, octets * 8 / elapsed / 1000000) <20> ) <21> <22> # output response <23> if output_file is not None: <24> for http_event in http_events: <25> if isinstance(http_event, HeadersReceived) and include: <26> headers = b"" <27> for k, v in http_event.headers: <28> headers += k + b": " + v + b"\r\n" <29> if headers: <30> output_file.write(headers + b"\r\n") <31> output_file.flush() <32> elif isinstance(http_event, DataReceived): <33> output_file.write(http_event.data) <34> output_file.flush() <35>
===========unchanged ref 0=========== at: examples.http3_client logger = logging.getLogger("client") HttpClient(*args, **kwargs) at: examples.http3_client.HttpClient get(url: str, headers: Dict={}) -> Deque[H3Event] post(url: str, data: bytes, headers: Dict={}) -> Deque[H3Event] at: io.BufferedReader write(self, buffer: ReadableBuffer, /) -> int at: logging.Logger info(msg: Any, *args: Any, exc_info: _ExcInfoType=..., stack_info: bool=..., extra: Optional[Dict[str, Any]]=..., **kwargs: Any) -> None at: os.path join(a: StrPath, *paths: StrPath) -> str join(a: BytesPath, *paths: BytesPath) -> bytes basename(p: _PathLike[AnyStr]) -> AnyStr basename(p: AnyStr) -> AnyStr at: time time() -> float at: typing.BinaryIO __slots__ = () write(s: AnyStr) -> int at: typing.IO __slots__ = () write(s: AnyStr) -> int at: urllib.parse urlparse(url: str, scheme: Optional[str]=..., allow_fragments: bool=...) -> ParseResult urlparse(url: Optional[bytes], scheme: Optional[bytes]=..., allow_fragments: bool=...) -> ParseResultBytes
examples.http3_client/run
Modified
aiortc~aioquic
3296a718a2762c57fdb8f2bc9e4e4cc670570562
[examples] allow client to make multiple requests and save to dir
<1>:<add> parsed = urlparse(urls[0]) <del> parsed = urlparse(url) <23>:<add> ws = await client.websocket(urls[0], subprotocols=["chat", "superchat"]) <del> ws = await client.websocket(url, subprotocols=["chat", "superchat"]) <36>:<del> if output == "-": <37>:<del> output_file = sys.stdout.buffer <38>:<del> elif output: <39>:<del> output_file = open(output, "wb") <40>:<del> else: <41>:<del> output_file = None <42>:<del> <50>:<add> output_dir=output_dir, <del> output_file=output_file,
# module: examples.http3_client def run( configuration: QuicConfiguration, + urls: List[str], - url: str, data: str, include: bool, - output: str, - parallel: int, + output_dir: Optional[str], ) -> None: <0> # parse URL <1> parsed = urlparse(url) <2> assert parsed.scheme in ( <3> "https", <4> "wss", <5> ), "Only https:// or wss:// URLs are supported." <6> if ":" in parsed.netloc: <7> host, port_str = parsed.netloc.split(":") <8> port = int(port_str) <9> else: <10> host = parsed.netloc <11> port = 443 <12> <13> async with connect( <14> host, <15> port, <16> configuration=configuration, <17> create_protocol=HttpClient, <18> session_ticket_handler=save_session_ticket, <19> ) as client: <20> client = cast(HttpClient, client) <21> <22> if parsed.scheme == "wss": <23> ws = await client.websocket(url, subprotocols=["chat", "superchat"]) <24> <25> # send some messages and receive reply <26> for i in range(2): <27> message = "Hello {}, WebSocket!".format(i) <28> print("> " + message) <29> await ws.send(message) <30> <31> message = await ws.recv() <32> print("< " + message) <33> <34> await ws.close() <35> else: <36> if output == "-": <37> output_file = sys.stdout.buffer <38> elif output: <39> output_file = open(output, "wb") <40> else: <41> output_file = None <42> <43> # perform request <44> coros = [ <45> perform_http_request( <46> client=client, <47> url=url, <48> data=data, <49> include=include, <50> output_file=output_file, <51> </s>
===========below chunk 0=========== # module: examples.http3_client def run( configuration: QuicConfiguration, + urls: List[str], - url: str, data: str, include: bool, - output: str, - parallel: int, + output_dir: Optional[str], ) -> None: # offset: 1 for i in range(parallel) ] await asyncio.gather(*coros) ===========unchanged ref 0=========== at: argparse ArgumentParser(prog: Optional[str]=..., usage: Optional[str]=..., description: Optional[str]=..., epilog: Optional[str]=..., parents: Sequence[ArgumentParser]=..., formatter_class: _FormatterClass=..., prefix_chars: str=..., fromfile_prefix_chars: Optional[str]=..., argument_default: Any=..., conflict_handler: str=..., add_help: bool=..., allow_abbrev: bool=...) at: argparse._ActionsContainer add_argument(*name_or_flags: Text, action: Union[Text, Type[Action]]=..., nargs: Union[int, Text]=..., const: Any=..., default: Any=..., type: Union[Callable[[Text], _T], Callable[[str], _T], FileType]=..., choices: Iterable[_T]=..., required: bool=..., help: Optional[Text]=..., metavar: Optional[Union[Text, Tuple[Text, ...]]]=..., dest: Optional[Text]=..., version: Text=..., **kwargs: Any) -> Action ===========unchanged ref 1=========== at: asyncio.tasks gather(coro_or_future1: _FutureT[_T1], coro_or_future2: _FutureT[_T2], coro_or_future3: _FutureT[_T3], coro_or_future4: _FutureT[_T4], *, loop: Optional[AbstractEventLoop]=..., return_exceptions: bool=...) -> Future[ Tuple[Union[_T1, BaseException], Union[_T2, BaseException], Union[_T3, BaseException], Union[_T4, BaseException]] ] gather(coro_or_future1: _FutureT[_T1], coro_or_future2: _FutureT[_T2], coro_or_future3: _FutureT[_T3], coro_or_future4: _FutureT[_T4], *, loop: Optional[AbstractEventLoop]=..., return_exceptions: Literal[False]=...) -> Future[Tuple[_T1, _T2, _T3, _T4]] gather(coro_or_future1: _FutureT[_T1], coro_or_future2: _FutureT[_T2], *, loop: Optional[AbstractEventLoop]=..., return_exceptions: Literal[False]=...) -> Future[Tuple[_T1, _T2]] gather(coro_or_future1: _FutureT[Any], coro_or_future2: _FutureT[Any], coro_or_future3: _FutureT[Any], coro_or_future4: _FutureT[Any], coro_or_future5: _FutureT[Any], coro_or_future6: _FutureT[Any], *coros_or_futures: _FutureT[Any], loop: Optional[AbstractEventLoop]=..., return_exceptions: bool=...) -> Future[List[Any]] gather(coro_or_future1: _FutureT[_T1], coro_or_future2: _FutureT[_T2], coro_or_future3: _FutureT[_T3], *, loop: Optional[AbstractEventLoop]=..., return_exceptions: Literal[False]=...) -> Future[</s> ===========unchanged ref 2=========== at: examples.http3_client HttpClient(*args, **kwargs) perform_http_request(client: HttpClient, url: str, data: str, include: bool, output_dir: Optional[str]) -> None save_session_ticket(ticket) at: typing cast(typ: Type[_T], val: Any) -> _T cast(typ: str, val: Any) -> Any cast(typ: object, val: Any) -> Any List = _alias(list, 1, inst=False, name='List') at: urllib.parse urlparse(url: str, scheme: Optional[str]=..., allow_fragments: bool=...) -> ParseResult urlparse(url: Optional[bytes], scheme: Optional[bytes]=..., allow_fragments: bool=...) -> ParseResultBytes ===========changed ref 0=========== # module: examples.http3_client def perform_http_request( - client: HttpClient, - url: str, - data: str, - include: bool, - output_file: Optional[BinaryIO], + client: HttpClient, url: str, data: str, include: bool, output_dir: Optional[str], ) -> None: # perform request start = time.time() if data is not None: http_events = await client.post( url, data=data.encode(), headers={"content-type": "application/x-www-form-urlencoded"}, ) else: http_events = await client.get(url) elapsed = time.time() - start # print speed octets = 0 for http_event in http_events: if isinstance(http_event, DataReceived): octets += len(http_event.data) logger.info( "Received %d bytes in %.1f s (%.3f Mbps)" % (octets, elapsed, octets * 8 / elapsed / 1000000) ) # output response + if output_dir is not None: - if output_file is not None: + output_path = os.path.join( + output_dir, os.path.basename(urlparse(url).path) or "index.html" + ) + with open(output_path, "wb") as output_file: + for http_event in http_events: - for http_event in http_events: + if isinstance(http_event, HeadersReceived) and include: - if isinstance(http_event, HeadersReceived) and include: + headers = b"" - headers = b"" + for k, v in http_event.headers: - for k, v in http_event.headers: + headers += k + b": " + v + b"\r\n" - headers += k + b": " + v + b"\r\n" + if headers: - if</s> ===========changed ref 1=========== # module: examples.http3_client def perform_http_request( - client: HttpClient, - url: str, - data: str, - include: bool, - output_file: Optional[BinaryIO], + client: HttpClient, url: str, data: str, include: bool, output_dir: Optional[str], ) -> None: # offset: 1 <s>" - headers += k + b": " + v + b"\r\n" + if headers: - if headers: + output_file.write(headers + b"\r\n") - output_file.write(headers + b"\r\n") - output_file.flush() + elif isinstance(http_event, DataReceived): - elif isinstance(http_event, DataReceived): + output_file.write(http_event.data) - output_file.write(http_event.data) - output_file.flush()
aioquic.quic.packet/pull_quic_transport_parameters
Modified
aiortc~aioquic
6222e24f431c17d07abbb21445793a7f61df5b94
[connection] add support for draft-27 transport parameters
<2>:<add> if protocol_version < QuicProtocolVersion.DRAFT_27: <add> with pull_block(buf, 2) as length: <del> with pull_block(buf, 2) as length: <3>:<add> end = buf.tell() + length <del> end = buf.tell() + length <4>:<add> while buf.tell() < end: <del> while buf.tell() < end: <5>:<add> param_id = buf.pull_uint16() <del> param_id = buf.pull_uint16() <6>:<add> param_len = buf.pull_uint16() <del> param_len = buf.pull_uint16() <7>:<add> param_start = buf.tell() <add> if param_id in PARAMS: <add> # parse known parameter <add> param_name, param_type = PARAMS[param_id] <add> if param_type == int: <add> setattr(params, param_name, buf.pull_uint_var()) <add> elif param_type == bytes: <add> setattr(params, param_name, buf.pull_bytes(param_len)) <add> elif param_type == QuicPreferredAddress: <add> setattr(params, param_name, pull_quic_preferred_address(buf)) <add> else: <add> setattr(params, param_name, True
# module: aioquic.quic.packet + def pull_quic_transport_parameters( + buf: Buffer, protocol_version: int + ) -> QuicTransportParameters: - def pull_quic_transport_parameters(buf: Buffer) -> QuicTransportParameters: <0> params = QuicTransportParameters() <1> <2> with pull_block(buf, 2) as length: <3> end = buf.tell() + length <4> while buf.tell() < end: <5> param_id = buf.pull_uint16() <6> param_len = buf.pull_uint16() <7> param_start = buf.tell() <8> if param_id in PARAMS: <9> # parse known parameter <10> param_name, param_type = PARAMS[param_id] <11> if param_type == int: <12> setattr(params, param_name, buf.pull_uint_var()) <13> elif param_type == bytes: <14> setattr(params, param_name, buf.pull_bytes(param_len)) <15> elif param_type == QuicPreferredAddress: <16> setattr(params, param_name, pull_quic_preferred_address(buf)) <17> else: <18> setattr(params, param_name, True) <19> else: <20> # skip unknown parameter <21> buf.pull_bytes(param_len) <22> assert buf.tell() == param_start + param_len <23> <24> return params <25>
===========unchanged ref 0=========== at: aioquic.quic.packet QuicProtocolVersion(x: Union[str, bytes, bytearray], base: int) QuicProtocolVersion(x: Union[str, bytes, SupportsInt, _SupportsIndex, _SupportsTrunc]=...) QuicPreferredAddress(ipv4_address: Optional[Tuple[str, int]], ipv6_address: Optional[Tuple[str, int]], connection_id: bytes, stateless_reset_token: bytes) QuicTransportParameters(original_connection_id: Optional[bytes]=None, idle_timeout: Optional[int]=None, stateless_reset_token: Optional[bytes]=None, max_packet_size: Optional[int]=None, initial_max_data: Optional[int]=None, initial_max_stream_data_bidi_local: Optional[int]=None, initial_max_stream_data_bidi_remote: Optional[int]=None, initial_max_stream_data_uni: Optional[int]=None, initial_max_streams_bidi: Optional[int]=None, initial_max_streams_uni: Optional[int]=None, ack_delay_exponent: Optional[int]=None, max_ack_delay: Optional[int]=None, disable_active_migration: Optional[bool]=False, preferred_address: Optional[QuicPreferredAddress]=None, active_connection_id_limit: Optional[int]=None, max_datagram_frame_size: Optional[int]=None, quantum_readiness: Optional[bytes]=None) ===========unchanged ref 1=========== PARAMS = { 0: ("original_connection_id", bytes), 1: ("idle_timeout", int), 2: ("stateless_reset_token", bytes), 3: ("max_packet_size", int), 4: ("initial_max_data", int), 5: ("initial_max_stream_data_bidi_local", int), 6: ("initial_max_stream_data_bidi_remote", int), 7: ("initial_max_stream_data_uni", int), 8: ("initial_max_streams_bidi", int), 9: ("initial_max_streams_uni", int), 10: ("ack_delay_exponent", int), 11: ("max_ack_delay", int), 12: ("disable_active_migration", bool), 13: ("preferred_address", QuicPreferredAddress), 14: ("active_connection_id_limit", int), 32: ("max_datagram_frame_size", int), 3127: ("quantum_readiness", bytes), } pull_quic_preferred_address(buf: Buffer) -> QuicPreferredAddress ===========changed ref 0=========== # module: aioquic.quic.packet class QuicProtocolVersion(IntEnum): NEGOTIATION = 0 DRAFT_25 = 0xFF000019 + DRAFT_26 = 0xFF00001A + DRAFT_27 = 0xFF00001B
aioquic.quic.packet/push_quic_transport_parameters
Modified
aiortc~aioquic
6222e24f431c17d07abbb21445793a7f61df5b94
[connection] add support for draft-27 transport parameters
<0>:<add> if protocol_version < QuicProtocolVersion.DRAFT_27: <add> with push_block(buf, 2): <del> with push_block(buf, 2): <1>:<add> for param_id, (param_name, param_type) in PARAMS.items(): <add> param_value = getattr(params, param_name) <add> if param_value is not None and param_value is not False: <add> buf.push_uint16(param_id) <add> with push_block(buf, 2): <add> if param_type == int: <add> buf.push_uint_var(param_value) <add> elif param_type == bytes: <add> buf.push_bytes(param_value) <add> elif param_type == QuicPreferredAddress: <add> push_quic_preferred_address(buf, param_value) <add> else: <4>:<del> buf.push_uint16(param_id) <5>:<del> with push_block(buf, 2): <6>:<add> param_buf = Buffer(capacity=65536) <add> if param_type == int: <del> if param_type == int: <7>:<add> param_buf.push_uint_var(param_value) <del> buf.push_uint_var(param_value) <8>:<add> elif param_
# module: aioquic.quic.packet def push_quic_transport_parameters( + buf: Buffer, params: QuicTransportParameters, protocol_version: int - buf: Buffer, params: QuicTransportParameters ) -> None: <0> with push_block(buf, 2): <1> for param_id, (param_name, param_type) in PARAMS.items(): <2> param_value = getattr(params, param_name) <3> if param_value is not None and param_value is not False: <4> buf.push_uint16(param_id) <5> with push_block(buf, 2): <6> if param_type == int: <7> buf.push_uint_var(param_value) <8> elif param_type == bytes: <9> buf.push_bytes(param_value) <10> elif param_type == QuicPreferredAddress: <11> push_quic_preferred_address(buf, param_value) <12>
===========unchanged ref 0=========== at: aioquic.quic.packet QuicPreferredAddress(ipv4_address: Optional[Tuple[str, int]], ipv6_address: Optional[Tuple[str, int]], connection_id: bytes, stateless_reset_token: bytes) PARAMS = { 0: ("original_connection_id", bytes), 1: ("idle_timeout", int), 2: ("stateless_reset_token", bytes), 3: ("max_packet_size", int), 4: ("initial_max_data", int), 5: ("initial_max_stream_data_bidi_local", int), 6: ("initial_max_stream_data_bidi_remote", int), 7: ("initial_max_stream_data_uni", int), 8: ("initial_max_streams_bidi", int), 9: ("initial_max_streams_uni", int), 10: ("ack_delay_exponent", int), 11: ("max_ack_delay", int), 12: ("disable_active_migration", bool), 13: ("preferred_address", QuicPreferredAddress), 14: ("active_connection_id_limit", int), 32: ("max_datagram_frame_size", int), 3127: ("quantum_readiness", bytes), } pull_quic_preferred_address(buf: Buffer) -> QuicPreferredAddress at: aioquic.quic.packet.pull_quic_transport_parameters params = QuicTransportParameters() ===========changed ref 0=========== # module: aioquic.quic.packet class QuicProtocolVersion(IntEnum): NEGOTIATION = 0 DRAFT_25 = 0xFF000019 + DRAFT_26 = 0xFF00001A + DRAFT_27 = 0xFF00001B ===========changed ref 1=========== # module: aioquic.quic.packet + def pull_quic_transport_parameters( + buf: Buffer, protocol_version: int + ) -> QuicTransportParameters: - def pull_quic_transport_parameters(buf: Buffer) -> QuicTransportParameters: params = QuicTransportParameters() + if protocol_version < QuicProtocolVersion.DRAFT_27: + with pull_block(buf, 2) as length: - with pull_block(buf, 2) as length: + end = buf.tell() + length - end = buf.tell() + length + while buf.tell() < end: - while buf.tell() < end: + param_id = buf.pull_uint16() - param_id = buf.pull_uint16() + param_len = buf.pull_uint16() - param_len = buf.pull_uint16() + param_start = buf.tell() + if param_id in PARAMS: + # parse known parameter + param_name, param_type = PARAMS[param_id] + if param_type == int: + setattr(params, param_name, buf.pull_uint_var()) + elif param_type == bytes: + setattr(params, param_name, buf.pull_bytes(param_len)) + elif param_type == QuicPreferredAddress: + setattr(params, param_name, pull_quic_preferred_address(buf)) + else: + setattr(params, param_name, True) + else: + # skip unknown parameter + buf.pull_bytes(param_len) + assert buf.tell() == param_start + param_len + else: + while not buf.eof(): + param_id = buf.pull_uint_var() + param_len = buf.pull_uint_var() param_start = buf.tell() if param_id in PARAMS: # parse known parameter param_name, param_type =</s> ===========changed ref 2=========== # module: aioquic.quic.packet + def pull_quic_transport_parameters( + buf: Buffer, protocol_version: int + ) -> QuicTransportParameters: - def pull_quic_transport_parameters(buf: Buffer) -> QuicTransportParameters: # offset: 1 <s>.tell() if param_id in PARAMS: # parse known parameter param_name, param_type = PARAMS[param_id] if param_type == int: setattr(params, param_name, buf.pull_uint_var()) elif param_type == bytes: setattr(params, param_name, buf.pull_bytes(param_len)) elif param_type == QuicPreferredAddress: setattr(params, param_name, pull_quic_preferred_address(buf)) else: setattr(params, param_name, True) else: # skip unknown parameter buf.pull_bytes(param_len) assert buf.tell() == param_start + param_len return params
tests.test_connection/QuicConnectionTest.test_connect_with_loss_1
Modified
aiortc~aioquic
6222e24f431c17d07abbb21445793a7f61df5b94
[connection] add support for draft-27 transport parameters
<37>:<add> self.assertEqual(datagram_sizes(items), [1280, 1062]) <del> self.assertEqual(datagram_sizes(items), [1280, 1084])
# module: tests.test_connection class QuicConnectionTest(TestCase): def test_connect_with_loss_1(self): <0> """ <1> Check connection is established even in the client's INITIAL is lost. <2> """ <3> <4> def datagram_sizes(items): <5> return [len(x[0]) for x in items] <6> <7> client_configuration = QuicConfiguration(is_client=True) <8> client_configuration.load_verify_locations(cafile=SERVER_CACERTFILE) <9> <10> client = QuicConnection(configuration=client_configuration) <11> client._ack_delay = 0 <12> <13> server_configuration = QuicConfiguration(is_client=False) <14> server_configuration.load_cert_chain(SERVER_CERTFILE, SERVER_KEYFILE) <15> <16> server = QuicConnection(configuration=server_configuration) <17> server._ack_delay = 0 <18> <19> # client sends INITIAL <20> now = 0.0 <21> client.connect(SERVER_ADDR, now=now) <22> items = client.datagrams_to_send(now=now) <23> self.assertEqual(datagram_sizes(items), [1280]) <24> self.assertEqual(client.get_timer(), 1.0) <25> <26> # INITIAL is lost <27> now = 1.0 <28> client.handle_timer(now=now) <29> items = client.datagrams_to_send(now=now) <30> self.assertEqual(datagram_sizes(items), [1280]) <31> self.assertEqual(client.get_timer(), 3.0) <32> <33> # server receives INITIAL, sends INITIAL + HANDSHAKE <34> now = 1.1 <35> server.receive_datagram(items[0][0], CLIENT_ADDR, now=now) <36> items = server.datagrams_to_send(now=now) <37> self.assertEqual(datagram_sizes(items), [1280, 1084]) <38> self.assertEqual(server.get_timer(), 2.1) <39> self.assertEqual</s>
===========below chunk 0=========== # module: tests.test_connection class QuicConnectionTest(TestCase): def test_connect_with_loss_1(self): # offset: 1 self.assertEqual(len(server._loss.spaces[1].sent_packets), 2) self.assertEqual(type(server.next_event()), events.ProtocolNegotiated) self.assertIsNone(server.next_event()) # handshake continues normally now = 1.2 client.receive_datagram(items[0][0], SERVER_ADDR, now=now) client.receive_datagram(items[1][0], SERVER_ADDR, now=now) items = client.datagrams_to_send(now=now) self.assertEqual(datagram_sizes(items), [376]) self.assertAlmostEqual(client.get_timer(), 1.825) self.assertEqual(type(client.next_event()), events.ProtocolNegotiated) self.assertEqual(type(client.next_event()), events.HandshakeCompleted) self.assertEqual(type(client.next_event()), events.ConnectionIdIssued) now = 1.3 server.receive_datagram(items[0][0], CLIENT_ADDR, now=now) items = server.datagrams_to_send(now=now) self.assertEqual(datagram_sizes(items), [229]) self.assertAlmostEqual(server.get_timer(), 1.825) self.assertEqual(len(server._loss.spaces[0].sent_packets), 0) self.assertEqual(len(server._loss.spaces[1].sent_packets), 0) self.assertEqual(type(server.next_event()), events.HandshakeCompleted) self.assertEqual(type(server.next_event()), events.ConnectionIdIssued) now = 1.4 client.receive_datagram(items[0][0], SERVER_ADDR, now=now) items = client.datagrams_to_send(now=now) self.assertEqual</s> ===========below chunk 1=========== # module: tests.test_connection class QuicConnectionTest(TestCase): def test_connect_with_loss_1(self): # offset: 2 <s>ADDR, now=now) items = client.datagrams_to_send(now=now) self.assertEqual(datagram_sizes(items), [32]) self.assertAlmostEqual(client.get_timer(), 61.4) # idle timeout ===========unchanged ref 0=========== at: tests.test_connection CLIENT_ADDR = ("1.2.3.4", 1234) SERVER_ADDR = ("2.3.4.5", 4433) at: tests.utils SERVER_CACERTFILE = os.path.join(os.path.dirname(__file__), "pycacert.pem") SERVER_CERTFILE = os.path.join(os.path.dirname(__file__), "ssl_cert.pem") SERVER_KEYFILE = os.path.join(os.path.dirname(__file__), "ssl_key.pem") at: unittest.case.TestCase failureException: Type[BaseException] longMessage: bool maxDiff: Optional[int] _testMethodName: str _testMethodDoc: str assertEqual(first: Any, second: Any, msg: Any=...) -> None assertIsNone(obj: Any, msg: Any=...) -> None assertAlmostEqual(first: float, second: float, places: Optional[int]=..., msg: Any=..., delta: Optional[float]=...) -> None assertAlmostEqual(first: datetime.datetime, second: datetime.datetime, places: Optional[int]=..., msg: Any=..., delta: Optional[datetime.timedelta]=...) -> None ===========changed ref 0=========== # module: aioquic.h0.connection + H0_ALPN = ["hq-27", "hq-26", "hq-25"] - H0_ALPN = ["hq-25"] ===========changed ref 1=========== # module: aioquic.quic.packet class QuicProtocolVersion(IntEnum): NEGOTIATION = 0 DRAFT_25 = 0xFF000019 + DRAFT_26 = 0xFF00001A + DRAFT_27 = 0xFF00001B ===========changed ref 2=========== # module: aioquic.quic.packet def push_quic_transport_parameters( + buf: Buffer, params: QuicTransportParameters, protocol_version: int - buf: Buffer, params: QuicTransportParameters ) -> None: + if protocol_version < QuicProtocolVersion.DRAFT_27: + with push_block(buf, 2): - with push_block(buf, 2): + for param_id, (param_name, param_type) in PARAMS.items(): + param_value = getattr(params, param_name) + if param_value is not None and param_value is not False: + buf.push_uint16(param_id) + with push_block(buf, 2): + if param_type == int: + buf.push_uint_var(param_value) + elif param_type == bytes: + buf.push_bytes(param_value) + elif param_type == QuicPreferredAddress: + push_quic_preferred_address(buf, param_value) + else: for param_id, (param_name, param_type) in PARAMS.items(): param_value = getattr(params, param_name) if param_value is not None and param_value is not False: - buf.push_uint16(param_id) - with push_block(buf, 2): + param_buf = Buffer(capacity=65536) + if param_type == int: - if param_type == int: + param_buf.push_uint_var(param_value) - buf.push_uint_var(param_value) + elif param_type == bytes: - elif param_type == bytes: + param_buf.push_bytes(param_value) - buf.push_bytes(param_value) + elif param_type == QuicPreferredAddress: - elif param_type == QuicPreferredAddress: + push_quic_preferred_address(param_buf, param_value) - push_</s> ===========changed ref 3=========== # module: aioquic.quic.packet def push_quic_transport_parameters( + buf: Buffer, params: QuicTransportParameters, protocol_version: int - buf: Buffer, params: QuicTransportParameters ) -> None: # offset: 1 <s> QuicPreferredAddress: + push_quic_preferred_address(param_buf, param_value) - push_quic_preferred_address(buf, param_value) + buf.push_uint_var(param_id) + buf.push_uint_var(param_buf.tell()) + buf.push_bytes(param_buf.data)
tests.test_connection/QuicConnectionTest.test_connect_with_loss_2
Modified
aiortc~aioquic
6222e24f431c17d07abbb21445793a7f61df5b94
[connection] add support for draft-27 transport parameters
<26>:<add> self.assertEqual(datagram_sizes(items), [1280, 1062]) <del> self.assertEqual(datagram_sizes(items), [1280, 1084])
# module: tests.test_connection class QuicConnectionTest(TestCase): def test_connect_with_loss_2(self): <0> def datagram_sizes(items): <1> return [len(x[0]) for x in items] <2> <3> client_configuration = QuicConfiguration(is_client=True) <4> client_configuration.load_verify_locations(cafile=SERVER_CACERTFILE) <5> <6> client = QuicConnection(configuration=client_configuration) <7> client._ack_delay = 0 <8> <9> server_configuration = QuicConfiguration(is_client=False) <10> server_configuration.load_cert_chain(SERVER_CERTFILE, SERVER_KEYFILE) <11> <12> server = QuicConnection(configuration=server_configuration) <13> server._ack_delay = 0 <14> <15> # client sends INITIAL <16> now = 0.0 <17> client.connect(SERVER_ADDR, now=now) <18> items = client.datagrams_to_send(now=now) <19> self.assertEqual(datagram_sizes(items), [1280]) <20> self.assertEqual(client.get_timer(), 1.0) <21> <22> # server receives INITIAL, sends INITIAL + HANDSHAKE but second datagram is lost <23> now = 0.1 <24> server.receive_datagram(items[0][0], CLIENT_ADDR, now=now) <25> items = server.datagrams_to_send(now=now) <26> self.assertEqual(datagram_sizes(items), [1280, 1084]) <27> self.assertEqual(server.get_timer(), 1.1) <28> self.assertEqual(len(server._loss.spaces[0].sent_packets), 1) <29> self.assertEqual(len(server._loss.spaces[1].sent_packets), 2) <30> <31> # client only receives first datagram and sends ACKS <32> now = 0.2 <33> client.receive_datagram(items[0][0], SERVER_ADDR, now=now) <34> items = client.datagrams_to_send(now=</s>
===========below chunk 0=========== # module: tests.test_connection class QuicConnectionTest(TestCase): def test_connect_with_loss_2(self): # offset: 1 self.assertEqual(datagram_sizes(items), [97]) self.assertAlmostEqual(client.get_timer(), 0.625) self.assertEqual(type(client.next_event()), events.ProtocolNegotiated) self.assertIsNone(client.next_event()) # client PTO - HANDSHAKE PING now = client.get_timer() # ~0.625 client.handle_timer(now=now) items = client.datagrams_to_send(now=now) self.assertEqual(datagram_sizes(items), [44]) self.assertAlmostEqual(client.get_timer(), 1.875) # server receives PING, discards INITIAL and sends ACK now = 0.725 server.receive_datagram(items[0][0], CLIENT_ADDR, now=now) items = server.datagrams_to_send(now=now) self.assertEqual(datagram_sizes(items), [48]) self.assertAlmostEqual(server.get_timer(), 1.1) self.assertEqual(len(server._loss.spaces[0].sent_packets), 0) self.assertEqual(len(server._loss.spaces[1].sent_packets), 3) self.assertEqual(type(server.next_event()), events.ProtocolNegotiated) self.assertIsNone(server.next_event()) # ACKs are lost, server retransmits HANDSHAKE now = server.get_timer() server.handle_timer(now=now) items = server.datagrams_to_send(now=now) self.assertEqual(datagram_sizes(items), [1280, 876]) self.assertAlmostEqual(server.get_timer(), 3.1) self.assertEqual(len(server._loss.spaces[0].sent_packets), 0) </s> ===========below chunk 1=========== # module: tests.test_connection class QuicConnectionTest(TestCase): def test_connect_with_loss_2(self): # offset: 2 <s>(), 3.1) self.assertEqual(len(server._loss.spaces[0].sent_packets), 0) self.assertEqual(len(server._loss.spaces[1].sent_packets), 3) self.assertIsNone(server.next_event()) # handshake continues normally now = 1.2 client.receive_datagram(items[0][0], SERVER_ADDR, now=now) client.receive_datagram(items[1][0], SERVER_ADDR, now=now) items = client.datagrams_to_send(now=now) self.assertEqual(datagram_sizes(items), [329]) self.assertAlmostEqual(client.get_timer(), 2.45) self.assertEqual(type(client.next_event()), events.HandshakeCompleted) self.assertEqual(type(client.next_event()), events.ConnectionIdIssued) now = 1.3 server.receive_datagram(items[0][0], CLIENT_ADDR, now=now) items = server.datagrams_to_send(now=now) self.assertEqual(datagram_sizes(items), [229]) self.assertAlmostEqual(server.get_timer(), 1.925) self.assertEqual(type(server.next_event()), events.HandshakeCompleted) self.assertEqual(type(server.next_event()), events.ConnectionIdIssued) now = 1.4 client.receive_datagram(items[0][0], SERVER_ADDR, now=now) items = client.datagrams_to_send(now=now) self.assertEqual(datagram_sizes(items), [32]) self.assertAlmostEqual(client.get_timer(), 61.4) ===========unchanged ref 0=========== at: tests.test_connection CLIENT_ADDR = ("1.2.3.4", 1234) SERVER_ADDR = ("2.3.4.5", 4433) at: tests.utils SERVER_CACERTFILE = os.path.join(os.path.dirname(__file__), "pycacert.pem") SERVER_CERTFILE = os.path.join(os.path.dirname(__file__), "ssl_cert.pem") SERVER_KEYFILE = os.path.join(os.path.dirname(__file__), "ssl_key.pem") at: unittest.case.TestCase assertEqual(first: Any, second: Any, msg: Any=...) -> None assertIsNone(obj: Any, msg: Any=...) -> None assertAlmostEqual(first: float, second: float, places: Optional[int]=..., msg: Any=..., delta: Optional[float]=...) -> None assertAlmostEqual(first: datetime.datetime, second: datetime.datetime, places: Optional[int]=..., msg: Any=..., delta: Optional[datetime.timedelta]=...) -> None ===========changed ref 0=========== # module: tests.test_connection class QuicConnectionTest(TestCase): def test_connect_with_loss_1(self): """ Check connection is established even in the client's INITIAL is lost. """ def datagram_sizes(items): return [len(x[0]) for x in items] client_configuration = QuicConfiguration(is_client=True) client_configuration.load_verify_locations(cafile=SERVER_CACERTFILE) client = QuicConnection(configuration=client_configuration) client._ack_delay = 0 server_configuration = QuicConfiguration(is_client=False) server_configuration.load_cert_chain(SERVER_CERTFILE, SERVER_KEYFILE) server = QuicConnection(configuration=server_configuration) server._ack_delay = 0 # client sends INITIAL now = 0.0 client.connect(SERVER_ADDR, now=now) items = client.datagrams_to_send(now=now) self.assertEqual(datagram_sizes(items), [1280]) self.assertEqual(client.get_timer(), 1.0) # INITIAL is lost now = 1.0 client.handle_timer(now=now) items = client.datagrams_to_send(now=now) self.assertEqual(datagram_sizes(items), [1280]) self.assertEqual(client.get_timer(), 3.0) # server receives INITIAL, sends INITIAL + HANDSHAKE now = 1.1 server.receive_datagram(items[0][0], CLIENT_ADDR, now=now) items = server.datagrams_to_send(now=now) + self.assertEqual(datagram_sizes(items), [1280, 1062]) - self.assertEqual(datagram_sizes(items), [1280, 1084]) self.assertEqual(server.get_timer(), 2.1) self.assertEqual(len(server._loss.spaces[0].sent_packets), 1)</s>
tests.test_connection/QuicConnectionTest.test_connect_with_loss_3
Modified
aiortc~aioquic
6222e24f431c17d07abbb21445793a7f61df5b94
[connection] add support for draft-27 transport parameters
<26>:<add> self.assertEqual(datagram_sizes(items), [1280, 1062]) <del> self.assertEqual(datagram_sizes(items), [1280, 1084])
# module: tests.test_connection class QuicConnectionTest(TestCase): def test_connect_with_loss_3(self): <0> def datagram_sizes(items): <1> return [len(x[0]) for x in items] <2> <3> client_configuration = QuicConfiguration(is_client=True) <4> client_configuration.load_verify_locations(cafile=SERVER_CACERTFILE) <5> <6> client = QuicConnection(configuration=client_configuration) <7> client._ack_delay = 0 <8> <9> server_configuration = QuicConfiguration(is_client=False) <10> server_configuration.load_cert_chain(SERVER_CERTFILE, SERVER_KEYFILE) <11> <12> server = QuicConnection(configuration=server_configuration) <13> server._ack_delay = 0 <14> <15> # client sends INITIAL <16> now = 0.0 <17> client.connect(SERVER_ADDR, now=now) <18> items = client.datagrams_to_send(now=now) <19> self.assertEqual(datagram_sizes(items), [1280]) <20> self.assertEqual(client.get_timer(), 1.0) <21> <22> # server receives INITIAL, sends INITIAL + HANDSHAKE <23> now = 0.1 <24> server.receive_datagram(items[0][0], CLIENT_ADDR, now=now) <25> items = server.datagrams_to_send(now=now) <26> self.assertEqual(datagram_sizes(items), [1280, 1084]) <27> self.assertEqual(server.get_timer(), 1.1) <28> self.assertEqual(len(server._loss.spaces[0].sent_packets), 1) <29> self.assertEqual(len(server._loss.spaces[1].sent_packets), 2) <30> <31> # client receives INITIAL + HANDSHAKE <32> now = 0.2 <33> client.receive_datagram(items[0][0], SERVER_ADDR, now=now) <34> client.receive_datagram(items[1][0], SERVER_ADDR, now=now) <35> </s>
===========below chunk 0=========== # module: tests.test_connection class QuicConnectionTest(TestCase): def test_connect_with_loss_3(self): # offset: 1 self.assertEqual(datagram_sizes(items), [376]) self.assertAlmostEqual(client.get_timer(), 0.825) self.assertEqual(type(client.next_event()), events.ProtocolNegotiated) self.assertEqual(type(client.next_event()), events.HandshakeCompleted) self.assertEqual(type(client.next_event()), events.ConnectionIdIssued) # server completes handshake now = 0.3 server.receive_datagram(items[0][0], CLIENT_ADDR, now=now) items = server.datagrams_to_send(now=now) self.assertEqual(datagram_sizes(items), [229]) self.assertAlmostEqual(server.get_timer(), 0.825) self.assertEqual(len(server._loss.spaces[0].sent_packets), 0) self.assertEqual(len(server._loss.spaces[1].sent_packets), 0) self.assertEqual(type(server.next_event()), events.ProtocolNegotiated) self.assertEqual(type(server.next_event()), events.HandshakeCompleted) self.assertEqual(type(server.next_event()), events.ConnectionIdIssued) # server PTO - 1-RTT PING now = 0.825 server.handle_timer(now=now) items = server.datagrams_to_send(now=now) self.assertEqual(datagram_sizes(items), [29]) self.assertAlmostEqual(server.get_timer(), 1.875) # client receives PING, sends ACK now = 0.9 client.receive_datagram(items[0][0], SERVER_ADDR, now=now) items = client.datagrams_to_send(now=now) self.assertEqual(datagram_sizes(items), [32</s> ===========below chunk 1=========== # module: tests.test_connection class QuicConnectionTest(TestCase): def test_connect_with_loss_3(self): # offset: 2 <s> client.datagrams_to_send(now=now) self.assertEqual(datagram_sizes(items), [32]) self.assertAlmostEqual(client.get_timer(), 0.825) # server receives ACK, retransmits HANDSHAKE_DONE now = 1.0 self.assertFalse(server._handshake_done_pending) server.receive_datagram(items[0][0], CLIENT_ADDR, now=now) self.assertTrue(server._handshake_done_pending) items = server.datagrams_to_send(now=now) self.assertFalse(server._handshake_done_pending) self.assertEqual(datagram_sizes(items), [224]) ===========unchanged ref 0=========== at: tests.test_connection CLIENT_ADDR = ("1.2.3.4", 1234) SERVER_ADDR = ("2.3.4.5", 4433) at: tests.utils SERVER_CACERTFILE = os.path.join(os.path.dirname(__file__), "pycacert.pem") SERVER_CERTFILE = os.path.join(os.path.dirname(__file__), "ssl_cert.pem") SERVER_KEYFILE = os.path.join(os.path.dirname(__file__), "ssl_key.pem") at: unittest.case.TestCase assertEqual(first: Any, second: Any, msg: Any=...) -> None assertTrue(expr: Any, msg: Any=...) -> None assertFalse(expr: Any, msg: Any=...) -> None assertAlmostEqual(first: float, second: float, places: Optional[int]=..., msg: Any=..., delta: Optional[float]=...) -> None assertAlmostEqual(first: datetime.datetime, second: datetime.datetime, places: Optional[int]=..., msg: Any=..., delta: Optional[datetime.timedelta]=...) -> None ===========changed ref 0=========== # module: aioquic.h0.connection + H0_ALPN = ["hq-27", "hq-26", "hq-25"] - H0_ALPN = ["hq-25"] ===========changed ref 1=========== # module: aioquic.quic.packet class QuicProtocolVersion(IntEnum): NEGOTIATION = 0 DRAFT_25 = 0xFF000019 + DRAFT_26 = 0xFF00001A + DRAFT_27 = 0xFF00001B ===========changed ref 2=========== # module: tests.test_connection class QuicConnectionTest(TestCase): def test_connect_with_loss_1(self): """ Check connection is established even in the client's INITIAL is lost. """ def datagram_sizes(items): return [len(x[0]) for x in items] client_configuration = QuicConfiguration(is_client=True) client_configuration.load_verify_locations(cafile=SERVER_CACERTFILE) client = QuicConnection(configuration=client_configuration) client._ack_delay = 0 server_configuration = QuicConfiguration(is_client=False) server_configuration.load_cert_chain(SERVER_CERTFILE, SERVER_KEYFILE) server = QuicConnection(configuration=server_configuration) server._ack_delay = 0 # client sends INITIAL now = 0.0 client.connect(SERVER_ADDR, now=now) items = client.datagrams_to_send(now=now) self.assertEqual(datagram_sizes(items), [1280]) self.assertEqual(client.get_timer(), 1.0) # INITIAL is lost now = 1.0 client.handle_timer(now=now) items = client.datagrams_to_send(now=now) self.assertEqual(datagram_sizes(items), [1280]) self.assertEqual(client.get_timer(), 3.0) # server receives INITIAL, sends INITIAL + HANDSHAKE now = 1.1 server.receive_datagram(items[0][0], CLIENT_ADDR, now=now) items = server.datagrams_to_send(now=now) + self.assertEqual(datagram_sizes(items), [1280, 1062]) - self.assertEqual(datagram_sizes(items), [1280, 1084]) self.assertEqual(server.get_timer(), 2.1) self.assertEqual(len(server._loss.spaces[0].sent_packets), 1)</s>
aioquic.quic.connection/QuicConnection._parse_transport_parameters
Modified
aiortc~aioquic
6222e24f431c17d07abbb21445793a7f61df5b94
[connection] add support for draft-27 transport parameters
<0>:<add> quic_transport_parameters = pull_quic_transport_parameters( <del> quic_transport_parameters = pull_quic_transport_parameters(Buffer(data=data)) <1>:<add> Buffer(data=data), protocol_version=self._version <add> )
# module: aioquic.quic.connection class QuicConnection: def _parse_transport_parameters( self, data: bytes, from_session_ticket: bool = False ) -> None: <0> quic_transport_parameters = pull_quic_transport_parameters(Buffer(data=data)) <1> <2> # log event <3> if self._quic_logger is not None and not from_session_ticket: <4> self._quic_logger.log_event( <5> category="transport", <6> event="parameters_set", <7> data=self._quic_logger.encode_transport_parameters( <8> owner="remote", parameters=quic_transport_parameters <9> ), <10> ) <11> <12> # validate remote parameters <13> if ( <14> self._is_client <15> and not from_session_ticket <16> and ( <17> quic_transport_parameters.original_connection_id <18> != self._original_connection_id <19> ) <20> ): <21> raise QuicConnectionError( <22> error_code=QuicErrorCode.TRANSPORT_PARAMETER_ERROR, <23> frame_type=QuicFrameType.CRYPTO, <24> reason_phrase="original_connection_id does not match", <25> ) <26> <27> # store remote parameters <28> if quic_transport_parameters.ack_delay_exponent is not None: <29> self._remote_ack_delay_exponent = self._remote_ack_delay_exponent <30> if quic_transport_parameters.active_connection_id_limit is not None: <31> self._remote_active_connection_id_limit = ( <32> quic_transport_parameters.active_connection_id_limit <33> ) <34> if quic_transport_parameters.idle_timeout is not None: <35> self._remote_idle_timeout = quic_transport_parameters.idle_timeout / 1000.0 <36> if quic_transport_parameters.max_ack_delay is not None: <37> self._loss.max_ack_delay = quic_transport_parameters.max_ack_delay / 1000.0</s>
===========below chunk 0=========== # module: aioquic.quic.connection class QuicConnection: def _parse_transport_parameters( self, data: bytes, from_session_ticket: bool = False ) -> None: # offset: 1 quic_transport_parameters.max_datagram_frame_size ) for param in [ "max_data", "max_stream_data_bidi_local", "max_stream_data_bidi_remote", "max_stream_data_uni", "max_streams_bidi", "max_streams_uni", ]: value = getattr(quic_transport_parameters, "initial_" + param) if value is not None: setattr(self, "_remote_" + param, value) ===========unchanged ref 0=========== at: aioquic.quic.connection QuicConnectionError(error_code: int, frame_type: int, reason_phrase: str) at: aioquic.quic.connection.QuicConnection.__init__ self._is_client = configuration.is_client self._original_connection_id = original_connection_id self._quic_logger: Optional[QuicLoggerTrace] = None self._quic_logger = configuration.quic_logger.start_trace( is_client=configuration.is_client, odcid=logger_connection_id ) self._remote_ack_delay_exponent = 3 self._remote_active_connection_id_limit = 0 self._remote_idle_timeout = 0.0 # seconds self._remote_max_datagram_frame_size: Optional[int] = None self._version: Optional[int] = None self._loss = QuicPacketRecovery( is_client_without_1rtt=self._is_client, quic_logger=self._quic_logger, send_probe=self._send_probe, ) at: aioquic.quic.connection.QuicConnection._close_end self._quic_logger = None at: aioquic.quic.connection.QuicConnection.connect self._version = self._configuration.supported_versions[0] at: aioquic.quic.connection.QuicConnection.receive_datagram self._version = QuicProtocolVersion(header.version) self._version = QuicProtocolVersion(max(common)) self._original_connection_id = self._peer_cid ===========changed ref 0=========== # module: aioquic.h0.connection + H0_ALPN = ["hq-27", "hq-26", "hq-25"] - H0_ALPN = ["hq-25"] ===========changed ref 1=========== # module: aioquic.quic.packet class QuicProtocolVersion(IntEnum): NEGOTIATION = 0 DRAFT_25 = 0xFF000019 + DRAFT_26 = 0xFF00001A + DRAFT_27 = 0xFF00001B ===========changed ref 2=========== # module: aioquic.h3.connection logger = logging.getLogger("http3") + H3_ALPN = ["h3-27", "h3-26", "h3-25"] - H3_ALPN = ["h3-25"] ===========changed ref 3=========== # module: aioquic.quic.packet def push_quic_transport_parameters( + buf: Buffer, params: QuicTransportParameters, protocol_version: int - buf: Buffer, params: QuicTransportParameters ) -> None: + if protocol_version < QuicProtocolVersion.DRAFT_27: + with push_block(buf, 2): - with push_block(buf, 2): + for param_id, (param_name, param_type) in PARAMS.items(): + param_value = getattr(params, param_name) + if param_value is not None and param_value is not False: + buf.push_uint16(param_id) + with push_block(buf, 2): + if param_type == int: + buf.push_uint_var(param_value) + elif param_type == bytes: + buf.push_bytes(param_value) + elif param_type == QuicPreferredAddress: + push_quic_preferred_address(buf, param_value) + else: for param_id, (param_name, param_type) in PARAMS.items(): param_value = getattr(params, param_name) if param_value is not None and param_value is not False: - buf.push_uint16(param_id) - with push_block(buf, 2): + param_buf = Buffer(capacity=65536) + if param_type == int: - if param_type == int: + param_buf.push_uint_var(param_value) - buf.push_uint_var(param_value) + elif param_type == bytes: - elif param_type == bytes: + param_buf.push_bytes(param_value) - buf.push_bytes(param_value) + elif param_type == QuicPreferredAddress: - elif param_type == QuicPreferredAddress: + push_quic_preferred_address(param_buf, param_value) - push_</s> ===========changed ref 4=========== # module: aioquic.quic.packet def push_quic_transport_parameters( + buf: Buffer, params: QuicTransportParameters, protocol_version: int - buf: Buffer, params: QuicTransportParameters ) -> None: # offset: 1 <s> QuicPreferredAddress: + push_quic_preferred_address(param_buf, param_value) - push_quic_preferred_address(buf, param_value) + buf.push_uint_var(param_id) + buf.push_uint_var(param_buf.tell()) + buf.push_bytes(param_buf.data) ===========changed ref 5=========== # module: aioquic.quic.packet + def pull_quic_transport_parameters( + buf: Buffer, protocol_version: int + ) -> QuicTransportParameters: - def pull_quic_transport_parameters(buf: Buffer) -> QuicTransportParameters: params = QuicTransportParameters() + if protocol_version < QuicProtocolVersion.DRAFT_27: + with pull_block(buf, 2) as length: - with pull_block(buf, 2) as length: + end = buf.tell() + length - end = buf.tell() + length + while buf.tell() < end: - while buf.tell() < end: + param_id = buf.pull_uint16() - param_id = buf.pull_uint16() + param_len = buf.pull_uint16() - param_len = buf.pull_uint16() + param_start = buf.tell() + if param_id in PARAMS: + # parse known parameter + param_name, param_type = PARAMS[param_id] + if param_type == int: + setattr(params, param_name, buf.pull_uint_var()) + elif param_type == bytes: + setattr(params, param_name, buf.pull_bytes(param_len)) + elif param_type == QuicPreferredAddress: + setattr(params, param_name, pull_quic_preferred_address(buf)) + else: + setattr(params, param_name, True) + else: + # skip unknown parameter + buf.pull_bytes(param_len) + assert buf.tell() == param_start + param_len + else: + while not buf.eof(): + param_id = buf.pull_uint_var() + param_len = buf.pull_uint_var() param_start = buf.tell() if param_id in PARAMS: # parse known parameter param_name, param_type =</s>
aioquic.quic.connection/QuicConnection._serialize_transport_parameters
Modified
aiortc~aioquic
6222e24f431c17d07abbb21445793a7f61df5b94
[connection] add support for draft-27 transport parameters
<32>:<add> push_quic_transport_parameters( <add> buf, quic_transport_parameters, protocol_version=self._version <add> ) <del> push_quic_transport_parameters(buf, quic_transport
# module: aioquic.quic.connection class QuicConnection: def _serialize_transport_parameters(self) -> bytes: <0> quic_transport_parameters = QuicTransportParameters( <1> ack_delay_exponent=self._local_ack_delay_exponent, <2> active_connection_id_limit=self._local_active_connection_id_limit, <3> idle_timeout=int(self._configuration.idle_timeout * 1000), <4> initial_max_data=self._local_max_data, <5> initial_max_stream_data_bidi_local=self._local_max_stream_data_bidi_local, <6> initial_max_stream_data_bidi_remote=self._local_max_stream_data_bidi_remote, <7> initial_max_stream_data_uni=self._local_max_stream_data_uni, <8> initial_max_streams_bidi=self._local_max_streams_bidi, <9> initial_max_streams_uni=self._local_max_streams_uni, <10> max_ack_delay=25, <11> max_datagram_frame_size=self._configuration.max_datagram_frame_size, <12> quantum_readiness=b"Q" * 1200 <13> if self._configuration.quantum_readiness_test <14> else None, <15> ) <16> if not self._is_client: <17> quic_transport_parameters.original_connection_id = ( <18> self._original_connection_id <19> ) <20> <21> # log event <22> if self._quic_logger is not None: <23> self._quic_logger.log_event( <24> category="transport", <25> event="parameters_set", <26> data=self._quic_logger.encode_transport_parameters( <27> owner="local", parameters=quic_transport_parameters <28> ), <29> ) <30> <31> buf = Buffer(capacity=3 * PACKET_MAX_SIZE) <32> push_quic_transport_parameters(buf, quic_transport</s>
===========below chunk 0=========== # module: aioquic.quic.connection class QuicConnection: def _serialize_transport_parameters(self) -> bytes: # offset: 1 return buf.data ===========unchanged ref 0=========== at: aioquic.quic.connection.QuicConnection.__init__ self._configuration = configuration self._is_client = configuration.is_client self._local_ack_delay_exponent = 3 self._local_active_connection_id_limit = 8 self._local_max_data = configuration.max_data self._local_max_stream_data_bidi_local = configuration.max_stream_data self._local_max_stream_data_bidi_remote = configuration.max_stream_data self._local_max_stream_data_uni = configuration.max_stream_data self._local_max_streams_bidi = 128 self._local_max_streams_uni = 128 self._original_connection_id = original_connection_id self._quic_logger: Optional[QuicLoggerTrace] = None self._quic_logger = configuration.quic_logger.start_trace( is_client=configuration.is_client, odcid=logger_connection_id ) at: aioquic.quic.connection.QuicConnection._close_end self._quic_logger = None at: aioquic.quic.connection.QuicConnection._parse_transport_parameters value = getattr(quic_transport_parameters, "initial_" + param) at: aioquic.quic.connection.QuicConnection._write_connection_limits self._local_max_data *= 2 at: aioquic.quic.connection.QuicConnection.receive_datagram self._original_connection_id = self._peer_cid ===========changed ref 0=========== # module: aioquic.quic.connection class QuicConnection: def _parse_transport_parameters( self, data: bytes, from_session_ticket: bool = False ) -> None: + quic_transport_parameters = pull_quic_transport_parameters( - quic_transport_parameters = pull_quic_transport_parameters(Buffer(data=data)) + Buffer(data=data), protocol_version=self._version + ) # log event if self._quic_logger is not None and not from_session_ticket: self._quic_logger.log_event( category="transport", event="parameters_set", data=self._quic_logger.encode_transport_parameters( owner="remote", parameters=quic_transport_parameters ), ) # validate remote parameters if ( self._is_client and not from_session_ticket and ( quic_transport_parameters.original_connection_id != self._original_connection_id ) ): raise QuicConnectionError( error_code=QuicErrorCode.TRANSPORT_PARAMETER_ERROR, frame_type=QuicFrameType.CRYPTO, reason_phrase="original_connection_id does not match", ) # store remote parameters if quic_transport_parameters.ack_delay_exponent is not None: self._remote_ack_delay_exponent = self._remote_ack_delay_exponent if quic_transport_parameters.active_connection_id_limit is not None: self._remote_active_connection_id_limit = ( quic_transport_parameters.active_connection_id_limit ) if quic_transport_parameters.idle_timeout is not None: self._remote_idle_timeout = quic_transport_parameters.idle_timeout / 1000.0 if quic_transport_parameters.max_ack_delay is not None: self._loss.max_ack_delay = quic_transport_parameters.max_ack_delay / 1000</s> ===========changed ref 1=========== # module: aioquic.quic.connection class QuicConnection: def _parse_transport_parameters( self, data: bytes, from_session_ticket: bool = False ) -> None: # offset: 1 <s> is not None: self._loss.max_ack_delay = quic_transport_parameters.max_ack_delay / 1000.0 self._remote_max_datagram_frame_size = ( quic_transport_parameters.max_datagram_frame_size ) for param in [ "max_data", "max_stream_data_bidi_local", "max_stream_data_bidi_remote", "max_stream_data_uni", "max_streams_bidi", "max_streams_uni", ]: value = getattr(quic_transport_parameters, "initial_" + param) if value is not None: setattr(self, "_remote_" + param, value) ===========changed ref 2=========== # module: aioquic.h0.connection + H0_ALPN = ["hq-27", "hq-26", "hq-25"] - H0_ALPN = ["hq-25"] ===========changed ref 3=========== # module: aioquic.quic.packet class QuicProtocolVersion(IntEnum): NEGOTIATION = 0 DRAFT_25 = 0xFF000019 + DRAFT_26 = 0xFF00001A + DRAFT_27 = 0xFF00001B ===========changed ref 4=========== # module: aioquic.h3.connection logger = logging.getLogger("http3") + H3_ALPN = ["h3-27", "h3-26", "h3-25"] - H3_ALPN = ["h3-25"] ===========changed ref 5=========== # module: aioquic.quic.packet def push_quic_transport_parameters( + buf: Buffer, params: QuicTransportParameters, protocol_version: int - buf: Buffer, params: QuicTransportParameters ) -> None: + if protocol_version < QuicProtocolVersion.DRAFT_27: + with push_block(buf, 2): - with push_block(buf, 2): + for param_id, (param_name, param_type) in PARAMS.items(): + param_value = getattr(params, param_name) + if param_value is not None and param_value is not False: + buf.push_uint16(param_id) + with push_block(buf, 2): + if param_type == int: + buf.push_uint_var(param_value) + elif param_type == bytes: + buf.push_bytes(param_value) + elif param_type == QuicPreferredAddress: + push_quic_preferred_address(buf, param_value) + else: for param_id, (param_name, param_type) in PARAMS.items(): param_value = getattr(params, param_name) if param_value is not None and param_value is not False: - buf.push_uint16(param_id) - with push_block(buf, 2): + param_buf = Buffer(capacity=65536) + if param_type == int: - if param_type == int: + param_buf.push_uint_var(param_value) - buf.push_uint_var(param_value) + elif param_type == bytes: - elif param_type == bytes: + param_buf.push_bytes(param_value) - buf.push_bytes(param_value) + elif param_type == QuicPreferredAddress: - elif param_type == QuicPreferredAddress: + push_quic_preferred_address(param_buf, param_value) - push_</s>
tests.test_packet/ParamsTest.test_params
Modified
aiortc~aioquic
6222e24f431c17d07abbb21445793a7f61df5b94
[connection] add support for draft-27 transport parameters
<1>:<add> "010267100210cc2fd6e7d97a53ab5be85b28d75c8008030247e404048005fff" <del> "004700020010cc2fd6e7d97a53ab5be85b28d75c80080008000106000100026" <2>:<del> "710000600048000ffff000500048000ffff000400048005fffa000a00010300" <3>:<del> "0b0001190003000247e4" <4>:<add> "a05048000ffff06048000ffff0801060a01030b0119" <8>:<add> params = pull_quic_transport_parameters( <del> params = pull_quic_transport_parameters(buf) <9>:<add> buf, protocol_version=QuicProtocolVersion.DRAFT_27 <add> ) <28>:<add> push_quic_transport_parameters( <del> push_quic_transport_parameters(buf, params) <29>:<add> buf, params, protocol_version=QuicProtocolVersion.DRAFT_27 <add> )
# module: tests.test_packet class ParamsTest(TestCase): def test_params(self): <0> data = binascii.unhexlify( <1> "004700020010cc2fd6e7d97a53ab5be85b28d75c80080008000106000100026" <2> "710000600048000ffff000500048000ffff000400048005fffa000a00010300" <3> "0b0001190003000247e4" <4> ) <5> <6> # parse <7> buf = Buffer(data=data) <8> params = pull_quic_transport_parameters(buf) <9> self.assertEqual( <10> params, <11> QuicTransportParameters( <12> idle_timeout=10000, <13> stateless_reset_token=b"\xcc/\xd6\xe7\xd9zS\xab[\xe8[(\xd7\\\x80\x08", <14> max_packet_size=2020, <15> initial_max_data=393210, <16> initial_max_stream_data_bidi_local=65535, <17> initial_max_stream_data_bidi_remote=65535, <18> initial_max_stream_data_uni=None, <19> initial_max_streams_bidi=6, <20> initial_max_streams_uni=None, <21> ack_delay_exponent=3, <22> max_ack_delay=25, <23> ), <24> ) <25> <26> # serialize <27> buf = Buffer(capacity=len(data)) <28> push_quic_transport_parameters(buf, params) <29> self.assertEqual(len(buf.data), len(data)) <30>
===========unchanged ref 0=========== at: binascii unhexlify(hexstr: _Ascii, /) -> bytes at: tests.test_packet.ParamsTest maxDiff = None at: unittest.case.TestCase failureException: Type[BaseException] longMessage: bool maxDiff: Optional[int] _testMethodName: str _testMethodDoc: str assertEqual(first: Any, second: Any, msg: Any=...) -> None ===========changed ref 0=========== # module: aioquic.h0.connection + H0_ALPN = ["hq-27", "hq-26", "hq-25"] - H0_ALPN = ["hq-25"] ===========changed ref 1=========== # module: aioquic.quic.packet class QuicProtocolVersion(IntEnum): NEGOTIATION = 0 DRAFT_25 = 0xFF000019 + DRAFT_26 = 0xFF00001A + DRAFT_27 = 0xFF00001B ===========changed ref 2=========== # module: aioquic.h3.connection logger = logging.getLogger("http3") + H3_ALPN = ["h3-27", "h3-26", "h3-25"] - H3_ALPN = ["h3-25"] ===========changed ref 3=========== # module: aioquic.quic.connection class QuicConnection: def _serialize_transport_parameters(self) -> bytes: quic_transport_parameters = QuicTransportParameters( ack_delay_exponent=self._local_ack_delay_exponent, active_connection_id_limit=self._local_active_connection_id_limit, idle_timeout=int(self._configuration.idle_timeout * 1000), initial_max_data=self._local_max_data, initial_max_stream_data_bidi_local=self._local_max_stream_data_bidi_local, initial_max_stream_data_bidi_remote=self._local_max_stream_data_bidi_remote, initial_max_stream_data_uni=self._local_max_stream_data_uni, initial_max_streams_bidi=self._local_max_streams_bidi, initial_max_streams_uni=self._local_max_streams_uni, max_ack_delay=25, max_datagram_frame_size=self._configuration.max_datagram_frame_size, quantum_readiness=b"Q" * 1200 if self._configuration.quantum_readiness_test else None, ) if not self._is_client: quic_transport_parameters.original_connection_id = ( self._original_connection_id ) # log event if self._quic_logger is not None: self._quic_logger.log_event( category="transport", event="parameters_set", data=self._quic_logger.encode_transport_parameters( owner="local", parameters=quic_transport_parameters ), ) buf = Buffer(capacity=3 * PACKET_MAX_SIZE) + push_quic_transport_parameters( + buf, quic_transport_parameters, protocol_version=self._version + ) - push_quic_transport_parameters(buf, qu</s> ===========changed ref 4=========== # module: aioquic.quic.connection class QuicConnection: def _serialize_transport_parameters(self) -> bytes: # offset: 1 <s>transport_parameters, protocol_version=self._version + ) - push_quic_transport_parameters(buf, quic_transport_parameters) return buf.data ===========changed ref 5=========== # module: aioquic.quic.packet def push_quic_transport_parameters( + buf: Buffer, params: QuicTransportParameters, protocol_version: int - buf: Buffer, params: QuicTransportParameters ) -> None: + if protocol_version < QuicProtocolVersion.DRAFT_27: + with push_block(buf, 2): - with push_block(buf, 2): + for param_id, (param_name, param_type) in PARAMS.items(): + param_value = getattr(params, param_name) + if param_value is not None and param_value is not False: + buf.push_uint16(param_id) + with push_block(buf, 2): + if param_type == int: + buf.push_uint_var(param_value) + elif param_type == bytes: + buf.push_bytes(param_value) + elif param_type == QuicPreferredAddress: + push_quic_preferred_address(buf, param_value) + else: for param_id, (param_name, param_type) in PARAMS.items(): param_value = getattr(params, param_name) if param_value is not None and param_value is not False: - buf.push_uint16(param_id) - with push_block(buf, 2): + param_buf = Buffer(capacity=65536) + if param_type == int: - if param_type == int: + param_buf.push_uint_var(param_value) - buf.push_uint_var(param_value) + elif param_type == bytes: - elif param_type == bytes: + param_buf.push_bytes(param_value) - buf.push_bytes(param_value) + elif param_type == QuicPreferredAddress: - elif param_type == QuicPreferredAddress: + push_quic_preferred_address(param_buf, param_value) - push_</s> ===========changed ref 6=========== # module: aioquic.quic.packet def push_quic_transport_parameters( + buf: Buffer, params: QuicTransportParameters, protocol_version: int - buf: Buffer, params: QuicTransportParameters ) -> None: # offset: 1 <s> QuicPreferredAddress: + push_quic_preferred_address(param_buf, param_value) - push_quic_preferred_address(buf, param_value) + buf.push_uint_var(param_id) + buf.push_uint_var(param_buf.tell()) + buf.push_bytes(param_buf.data)
tests.test_packet/ParamsTest.test_params_disable_active_migration
Modified
aiortc~aioquic
6222e24f431c17d07abbb21445793a7f61df5b94
[connection] add support for draft-27 transport parameters
<0>:<add> data = binascii.unhexlify("0c00") <del> data = binascii.unhexlify("0004000c0000") <4>:<add> params = pull_quic_transport_parameters( <del> params = pull_quic_transport_parameters(buf) <5>:<add> buf, protocol_version=QuicProtocolVersion.DRAFT_27 <add> ) <9>:<add> push_quic_transport_parameters( <del> push_quic_transport_parameters(buf, params) <10>:<add> buf, params, protocol_version=QuicProtocolVersion.DRAFT_27 <add> )
# module: tests.test_packet class ParamsTest(TestCase): def test_params_disable_active_migration(self): <0> data = binascii.unhexlify("0004000c0000") <1> <2> # parse <3> buf = Buffer(data=data) <4> params = pull_quic_transport_parameters(buf) <5> self.assertEqual(params, QuicTransportParameters(disable_active_migration=True)) <6> <7> # serialize <8> buf = Buffer(capacity=len(data)) <9> push_quic_transport_parameters(buf, params) <10> self.assertEqual(buf.data, data) <11>
===========unchanged ref 0=========== at: binascii unhexlify(hexstr: _Ascii, /) -> bytes at: unittest.case.TestCase assertEqual(first: Any, second: Any, msg: Any=...) -> None ===========changed ref 0=========== # module: tests.test_packet class ParamsTest(TestCase): def test_params(self): data = binascii.unhexlify( + "010267100210cc2fd6e7d97a53ab5be85b28d75c8008030247e404048005fff" - "004700020010cc2fd6e7d97a53ab5be85b28d75c80080008000106000100026" - "710000600048000ffff000500048000ffff000400048005fffa000a00010300" - "0b0001190003000247e4" + "a05048000ffff06048000ffff0801060a01030b0119" ) # parse buf = Buffer(data=data) + params = pull_quic_transport_parameters( - params = pull_quic_transport_parameters(buf) + buf, protocol_version=QuicProtocolVersion.DRAFT_27 + ) self.assertEqual( params, QuicTransportParameters( idle_timeout=10000, stateless_reset_token=b"\xcc/\xd6\xe7\xd9zS\xab[\xe8[(\xd7\\\x80\x08", max_packet_size=2020, initial_max_data=393210, initial_max_stream_data_bidi_local=65535, initial_max_stream_data_bidi_remote=65535, initial_max_stream_data_uni=None, initial_max_streams_bidi=6, initial_max_streams_uni=None, ack_delay_exponent=3, max_ack_delay=25, ), ) # serialize buf = Buffer(capacity=len(data)) + push_quic_transport_parameters( - push_quic_transport_parameters(buf, params) + buf, params, protocol_version=QuicProtocolVersion.DRAFT_27 + )</s> ===========changed ref 1=========== # module: tests.test_packet class ParamsTest(TestCase): def test_params(self): # offset: 1 <s>_parameters(buf, params) + buf, params, protocol_version=QuicProtocolVersion.DRAFT_27 + ) self.assertEqual(len(buf.data), len(data)) ===========changed ref 2=========== # module: aioquic.h0.connection + H0_ALPN = ["hq-27", "hq-26", "hq-25"] - H0_ALPN = ["hq-25"] ===========changed ref 3=========== # module: aioquic.quic.packet class QuicProtocolVersion(IntEnum): NEGOTIATION = 0 DRAFT_25 = 0xFF000019 + DRAFT_26 = 0xFF00001A + DRAFT_27 = 0xFF00001B ===========changed ref 4=========== # module: aioquic.h3.connection logger = logging.getLogger("http3") + H3_ALPN = ["h3-27", "h3-26", "h3-25"] - H3_ALPN = ["h3-25"] ===========changed ref 5=========== # module: aioquic.quic.connection class QuicConnection: def _serialize_transport_parameters(self) -> bytes: quic_transport_parameters = QuicTransportParameters( ack_delay_exponent=self._local_ack_delay_exponent, active_connection_id_limit=self._local_active_connection_id_limit, idle_timeout=int(self._configuration.idle_timeout * 1000), initial_max_data=self._local_max_data, initial_max_stream_data_bidi_local=self._local_max_stream_data_bidi_local, initial_max_stream_data_bidi_remote=self._local_max_stream_data_bidi_remote, initial_max_stream_data_uni=self._local_max_stream_data_uni, initial_max_streams_bidi=self._local_max_streams_bidi, initial_max_streams_uni=self._local_max_streams_uni, max_ack_delay=25, max_datagram_frame_size=self._configuration.max_datagram_frame_size, quantum_readiness=b"Q" * 1200 if self._configuration.quantum_readiness_test else None, ) if not self._is_client: quic_transport_parameters.original_connection_id = ( self._original_connection_id ) # log event if self._quic_logger is not None: self._quic_logger.log_event( category="transport", event="parameters_set", data=self._quic_logger.encode_transport_parameters( owner="local", parameters=quic_transport_parameters ), ) buf = Buffer(capacity=3 * PACKET_MAX_SIZE) + push_quic_transport_parameters( + buf, quic_transport_parameters, protocol_version=self._version + ) - push_quic_transport_parameters(buf, qu</s> ===========changed ref 6=========== # module: aioquic.quic.connection class QuicConnection: def _serialize_transport_parameters(self) -> bytes: # offset: 1 <s>transport_parameters, protocol_version=self._version + ) - push_quic_transport_parameters(buf, quic_transport_parameters) return buf.data ===========changed ref 7=========== # module: aioquic.quic.packet def push_quic_transport_parameters( + buf: Buffer, params: QuicTransportParameters, protocol_version: int - buf: Buffer, params: QuicTransportParameters ) -> None: + if protocol_version < QuicProtocolVersion.DRAFT_27: + with push_block(buf, 2): - with push_block(buf, 2): + for param_id, (param_name, param_type) in PARAMS.items(): + param_value = getattr(params, param_name) + if param_value is not None and param_value is not False: + buf.push_uint16(param_id) + with push_block(buf, 2): + if param_type == int: + buf.push_uint_var(param_value) + elif param_type == bytes: + buf.push_bytes(param_value) + elif param_type == QuicPreferredAddress: + push_quic_preferred_address(buf, param_value) + else: for param_id, (param_name, param_type) in PARAMS.items(): param_value = getattr(params, param_name) if param_value is not None and param_value is not False: - buf.push_uint16(param_id) - with push_block(buf, 2): + param_buf = Buffer(capacity=65536) + if param_type == int: - if param_type == int: + param_buf.push_uint_var(param_value) - buf.push_uint_var(param_value) + elif param_type == bytes: - elif param_type == bytes: + param_buf.push_bytes(param_value) - buf.push_bytes(param_value) + elif param_type == QuicPreferredAddress: - elif param_type == QuicPreferredAddress: + push_quic_preferred_address(param_buf, param_value) - push_</s>
tests.test_packet/ParamsTest.test_params_preferred_address
Modified
aiortc~aioquic
6222e24f431c17d07abbb21445793a7f61df5b94
[connection] add support for draft-27 transport parameters
<1>:<del> "008b000100048000753000020010191adf238f8041a56a5fa7a88ddd14f3000" <2>:<del> "400048010000000050004800400000006000480040000000700048004000000" <3>:<del> "08000240640009000103000d003b8ba27b8611532400890200000000f03c91f" <4>:<del> "ffe69a45411531262c4518d63013f0c287ed3573efa9095603746b2e02d4548" <5>:<del> "0ba6643e5c6e7d48ecb4000e000107" <6>:<add> "0d3b8ba27b8611532400890200000000f03c91fffe69a45411531262c4518d6" <add> "3013f0c287ed3573efa9095603746b2e02d45480ba6643e5c6e7d48ecb4" <10>:<add> params = pull_quic_transport_parameters( <del> params = pull_quic_transport_parameters(buf) <11>:<add> buf, protocol_version=QuicProtocolVersion.DRAFT_27 <add> ) <14>:<del> idle_timeout=30000, <15>:<del> stateless_reset_token=b"\x19\x1a\xdf#\x8f\x80A\xa5j_\xa7\xa8\x8d\xdd\x14\xf3", <16>:<del> initial_max_data=1048576, <17>:<del> initial_max_stream_data_bidi_local=262144, <18>:<del> initial_max_stream_data_bidi_remote=262144, <19>:<del> initial_max_stream_data_uni=262144, <20>:<del> initial_max_streams_bidi=100, <21>:<del> initial_max_streams_uni=3,
# module: tests.test_packet class ParamsTest(TestCase): def test_params_preferred_address(self): <0> data = binascii.unhexlify( <1> "008b000100048000753000020010191adf238f8041a56a5fa7a88ddd14f3000" <2> "400048010000000050004800400000006000480040000000700048004000000" <3> "08000240640009000103000d003b8ba27b8611532400890200000000f03c91f" <4> "ffe69a45411531262c4518d63013f0c287ed3573efa9095603746b2e02d4548" <5> "0ba6643e5c6e7d48ecb4000e000107" <6> ) <7> <8> # parse <9> buf = Buffer(data=data) <10> params = pull_quic_transport_parameters(buf) <11> self.assertEqual( <12> params, <13> QuicTransportParameters( <14> idle_timeout=30000, <15> stateless_reset_token=b"\x19\x1a\xdf#\x8f\x80A\xa5j_\xa7\xa8\x8d\xdd\x14\xf3", <16> initial_max_data=1048576, <17> initial_max_stream_data_bidi_local=262144, <18> initial_max_stream_data_bidi_remote=262144, <19> initial_max_stream_data_uni=262144, <20> initial_max_streams_bidi=100, <21> initial_max_streams_uni=3, <22> preferred_address=QuicPreferredAddress( <23> ipv4_address=("139.162.123.134", 4435), <24> ipv6_address=("2400:8902::f03c:91ff:fe69:a454", 4435), <25> connection_id=b"b\xc4Q\x8</s>
===========below chunk 0=========== # module: tests.test_packet class ParamsTest(TestCase): def test_params_preferred_address(self): # offset: 1 stateless_reset_token=b"F\xb2\xe0-EH\x0b\xa6d>\\n}H\xec\xb4", ), active_connection_id_limit=7, ), ) # serialize buf = Buffer(capacity=len(data)) push_quic_transport_parameters(buf, params) self.assertEqual(buf.data, data) ===========unchanged ref 0=========== at: binascii unhexlify(hexstr: _Ascii, /) -> bytes at: unittest.case.TestCase assertEqual(first: Any, second: Any, msg: Any=...) -> None ===========changed ref 0=========== # module: tests.test_packet class ParamsTest(TestCase): def test_params_disable_active_migration(self): + data = binascii.unhexlify("0c00") - data = binascii.unhexlify("0004000c0000") # parse buf = Buffer(data=data) + params = pull_quic_transport_parameters( - params = pull_quic_transport_parameters(buf) + buf, protocol_version=QuicProtocolVersion.DRAFT_27 + ) self.assertEqual(params, QuicTransportParameters(disable_active_migration=True)) # serialize buf = Buffer(capacity=len(data)) + push_quic_transport_parameters( - push_quic_transport_parameters(buf, params) + buf, params, protocol_version=QuicProtocolVersion.DRAFT_27 + ) self.assertEqual(buf.data, data) ===========changed ref 1=========== # module: tests.test_packet class ParamsTest(TestCase): + def test_params_legacy(self): + data = binascii.unhexlify( + "004700020010cc2fd6e7d97a53ab5be85b28d75c80080008000106000100026" + "710000600048000ffff000500048000ffff000400048005fffa000a00010300" + "0b0001190003000247e4" + ) + + # parse + buf = Buffer(data=data) + params = pull_quic_transport_parameters( + buf, protocol_version=QuicProtocolVersion.DRAFT_25 + ) + self.assertEqual( + params, + QuicTransportParameters( + idle_timeout=10000, + stateless_reset_token=b"\xcc/\xd6\xe7\xd9zS\xab[\xe8[(\xd7\\\x80\x08", + max_packet_size=2020, + initial_max_data=393210, + initial_max_stream_data_bidi_local=65535, + initial_max_stream_data_bidi_remote=65535, + initial_max_stream_data_uni=None, + initial_max_streams_bidi=6, + initial_max_streams_uni=None, + ack_delay_exponent=3, + max_ack_delay=25, + ), + ) + + # serialize + buf = Buffer(capacity=len(data)) + push_quic_transport_parameters( + buf, params, protocol_version=QuicProtocolVersion.DRAFT_25 + ) + self.assertEqual(len(buf.data), len(data)) + ===========changed ref 2=========== # module: tests.test_packet class ParamsTest(TestCase): def test_params(self): data = binascii.unhexlify( + "010267100210cc2fd6e7d97a53ab5be85b28d75c8008030247e404048005fff" - "004700020010cc2fd6e7d97a53ab5be85b28d75c80080008000106000100026" - "710000600048000ffff000500048000ffff000400048005fffa000a00010300" - "0b0001190003000247e4" + "a05048000ffff06048000ffff0801060a01030b0119" ) # parse buf = Buffer(data=data) + params = pull_quic_transport_parameters( - params = pull_quic_transport_parameters(buf) + buf, protocol_version=QuicProtocolVersion.DRAFT_27 + ) self.assertEqual( params, QuicTransportParameters( idle_timeout=10000, stateless_reset_token=b"\xcc/\xd6\xe7\xd9zS\xab[\xe8[(\xd7\\\x80\x08", max_packet_size=2020, initial_max_data=393210, initial_max_stream_data_bidi_local=65535, initial_max_stream_data_bidi_remote=65535, initial_max_stream_data_uni=None, initial_max_streams_bidi=6, initial_max_streams_uni=None, ack_delay_exponent=3, max_ack_delay=25, ), ) # serialize buf = Buffer(capacity=len(data)) + push_quic_transport_parameters( - push_quic_transport_parameters(buf, params) + buf, params, protocol_version=QuicProtocolVersion.DRAFT_27 + )</s> ===========changed ref 3=========== # module: tests.test_packet class ParamsTest(TestCase): def test_params(self): # offset: 1 <s>_parameters(buf, params) + buf, params, protocol_version=QuicProtocolVersion.DRAFT_27 + ) self.assertEqual(len(buf.data), len(data)) ===========changed ref 4=========== # module: aioquic.h0.connection + H0_ALPN = ["hq-27", "hq-26", "hq-25"] - H0_ALPN = ["hq-25"] ===========changed ref 5=========== # module: aioquic.quic.packet class QuicProtocolVersion(IntEnum): NEGOTIATION = 0 DRAFT_25 = 0xFF000019 + DRAFT_26 = 0xFF00001A + DRAFT_27 = 0xFF00001B ===========changed ref 6=========== # module: aioquic.h3.connection logger = logging.getLogger("http3") + H3_ALPN = ["h3-27", "h3-26", "h3-25"] - H3_ALPN = ["h3-25"]
tests.test_packet/ParamsTest.test_params_unknown
Modified
aiortc~aioquic
6222e24f431c17d07abbb21445793a7f61df5b94
[connection] add support for draft-27 transport parameters
<0>:<del> # fb.mvfst.net sends a proprietary parameter 65280 <1>:<add> data = binascii.unhexlify("8000ff000100") <del> data = binascii.unhexlify( <2>:<del> "006400050004800104000006000480010400000700048001040000040004801" <3>:<del> "0000000080008c0000000ffffffff00090008c0000000ffffffff0001000480" <4>:<del> "00ea60000a00010300030002500000020010616161616262626263636363646" <5>:<del> "46464ff00000100" <6>:<del> ) <10>:<add> params = pull_quic_transport_parameters( <del> params = pull_quic_transport_parameters(buf) <11>:<del> self.assertEqual( <12>:<del> params, <13>:<del> QuicTransportParameters( <14>:<del> idle_timeout=60000, <15>:<del> stateless_reset_token=b"aaaabbbbccccdddd", <16>:<del> max_packet_size=4096, <17>:<del> initial_max_data=1048576, <18>:<del> initial_max_stream_data_bidi_local=66560, <19>:<del> initial_max_stream_data_bidi_remote=66560, <20>:<del> initial_max_stream_data_uni=66560, <21>:<del> initial_max_streams_bidi=4294967295, <22>:<del> initial_max_streams_uni=4294967295, <23>:<del> ack_delay_exponent=3, <24>:<del> ), <25>:<add> buf, protocol_version=QuicProtocolVersion.DRAFT_27 <26>:<add> self.assertEqual(params, QuicTransportParameters())
# module: tests.test_packet class ParamsTest(TestCase): def test_params_unknown(self): <0> # fb.mvfst.net sends a proprietary parameter 65280 <1> data = binascii.unhexlify( <2> "006400050004800104000006000480010400000700048001040000040004801" <3> "0000000080008c0000000ffffffff00090008c0000000ffffffff0001000480" <4> "00ea60000a00010300030002500000020010616161616262626263636363646" <5> "46464ff00000100" <6> ) <7> <8> # parse <9> buf = Buffer(data=data) <10> params = pull_quic_transport_parameters(buf) <11> self.assertEqual( <12> params, <13> QuicTransportParameters( <14> idle_timeout=60000, <15> stateless_reset_token=b"aaaabbbbccccdddd", <16> max_packet_size=4096, <17> initial_max_data=1048576, <18> initial_max_stream_data_bidi_local=66560, <19> initial_max_stream_data_bidi_remote=66560, <20> initial_max_stream_data_uni=66560, <21> initial_max_streams_bidi=4294967295, <22> initial_max_streams_uni=4294967295, <23> ack_delay_exponent=3, <24> ), <25> ) <26>
===========unchanged ref 0=========== at: binascii unhexlify(hexstr: _Ascii, /) -> bytes at: unittest.case.TestCase assertEqual(first: Any, second: Any, msg: Any=...) -> None ===========changed ref 0=========== # module: tests.test_packet class ParamsTest(TestCase): def test_params_disable_active_migration(self): + data = binascii.unhexlify("0c00") - data = binascii.unhexlify("0004000c0000") # parse buf = Buffer(data=data) + params = pull_quic_transport_parameters( - params = pull_quic_transport_parameters(buf) + buf, protocol_version=QuicProtocolVersion.DRAFT_27 + ) self.assertEqual(params, QuicTransportParameters(disable_active_migration=True)) # serialize buf = Buffer(capacity=len(data)) + push_quic_transport_parameters( - push_quic_transport_parameters(buf, params) + buf, params, protocol_version=QuicProtocolVersion.DRAFT_27 + ) self.assertEqual(buf.data, data) ===========changed ref 1=========== # module: tests.test_packet class ParamsTest(TestCase): + def test_params_legacy(self): + data = binascii.unhexlify( + "004700020010cc2fd6e7d97a53ab5be85b28d75c80080008000106000100026" + "710000600048000ffff000500048000ffff000400048005fffa000a00010300" + "0b0001190003000247e4" + ) + + # parse + buf = Buffer(data=data) + params = pull_quic_transport_parameters( + buf, protocol_version=QuicProtocolVersion.DRAFT_25 + ) + self.assertEqual( + params, + QuicTransportParameters( + idle_timeout=10000, + stateless_reset_token=b"\xcc/\xd6\xe7\xd9zS\xab[\xe8[(\xd7\\\x80\x08", + max_packet_size=2020, + initial_max_data=393210, + initial_max_stream_data_bidi_local=65535, + initial_max_stream_data_bidi_remote=65535, + initial_max_stream_data_uni=None, + initial_max_streams_bidi=6, + initial_max_streams_uni=None, + ack_delay_exponent=3, + max_ack_delay=25, + ), + ) + + # serialize + buf = Buffer(capacity=len(data)) + push_quic_transport_parameters( + buf, params, protocol_version=QuicProtocolVersion.DRAFT_25 + ) + self.assertEqual(len(buf.data), len(data)) + ===========changed ref 2=========== # module: tests.test_packet class ParamsTest(TestCase): def test_params(self): data = binascii.unhexlify( + "010267100210cc2fd6e7d97a53ab5be85b28d75c8008030247e404048005fff" - "004700020010cc2fd6e7d97a53ab5be85b28d75c80080008000106000100026" - "710000600048000ffff000500048000ffff000400048005fffa000a00010300" - "0b0001190003000247e4" + "a05048000ffff06048000ffff0801060a01030b0119" ) # parse buf = Buffer(data=data) + params = pull_quic_transport_parameters( - params = pull_quic_transport_parameters(buf) + buf, protocol_version=QuicProtocolVersion.DRAFT_27 + ) self.assertEqual( params, QuicTransportParameters( idle_timeout=10000, stateless_reset_token=b"\xcc/\xd6\xe7\xd9zS\xab[\xe8[(\xd7\\\x80\x08", max_packet_size=2020, initial_max_data=393210, initial_max_stream_data_bidi_local=65535, initial_max_stream_data_bidi_remote=65535, initial_max_stream_data_uni=None, initial_max_streams_bidi=6, initial_max_streams_uni=None, ack_delay_exponent=3, max_ack_delay=25, ), ) # serialize buf = Buffer(capacity=len(data)) + push_quic_transport_parameters( - push_quic_transport_parameters(buf, params) + buf, params, protocol_version=QuicProtocolVersion.DRAFT_27 + )</s> ===========changed ref 3=========== # module: tests.test_packet class ParamsTest(TestCase): def test_params(self): # offset: 1 <s>_parameters(buf, params) + buf, params, protocol_version=QuicProtocolVersion.DRAFT_27 + ) self.assertEqual(len(buf.data), len(data)) ===========changed ref 4=========== # module: tests.test_packet class ParamsTest(TestCase): def test_params_preferred_address(self): data = binascii.unhexlify( - "008b000100048000753000020010191adf238f8041a56a5fa7a88ddd14f3000" - "400048010000000050004800400000006000480040000000700048004000000" - "08000240640009000103000d003b8ba27b8611532400890200000000f03c91f" - "ffe69a45411531262c4518d63013f0c287ed3573efa9095603746b2e02d4548" - "0ba6643e5c6e7d48ecb4000e000107" + "0d3b8ba27b8611532400890200000000f03c91fffe69a45411531262c4518d6" + "3013f0c287ed3573efa9095603746b2e02d45480ba6643e5c6e7d48ecb4" ) # parse buf = Buffer(data=data) + params = pull_quic_transport_parameters( - params = pull_quic_transport_parameters(buf) + buf, protocol_version=QuicProtocolVersion.DRAFT_27 + ) self.assertEqual( params, QuicTransportParameters( - idle_timeout=30000, - stateless_reset_token=b"\x19\x1a\xdf#\x8f\x80A\xa5j_\xa7\xa8\x8d\xdd\x14\xf3", - initial_max_data=1048576, - initial_max_stream_data_bidi_local=262144, - initial_max_stream_data_bidi_remote=262144, - initial_max_stream_data_uni=26</s>
examples.http3_server/HttpServerProtocol.http_event_received
Modified
aiortc~aioquic
3ed80288b79ef44c36a9d74145ee086e5be0e2e2
[examples] fix mypy error
<31>:<add> scope: Dict
# module: examples.http3_server class HttpServerProtocol(QuicConnectionProtocol): def http_event_received(self, event: H3Event) -> None: <0> if isinstance(event, HeadersReceived) and event.stream_id not in self._handlers: <1> authority = None <2> headers = [] <3> http_version = "0.9" if isinstance(self._http, H0Connection) else "3" <4> raw_path = b"" <5> method = "" <6> protocol = None <7> for header, value in event.headers: <8> if header == b":authority": <9> authority = value <10> headers.append((b"host", value)) <11> elif header == b":method": <12> method = value.decode() <13> elif header == b":path": <14> raw_path = value <15> elif header == b":protocol": <16> protocol = value.decode() <17> elif header and not header.startswith(b":"): <18> headers.append((header, value)) <19> <20> if b"?" in raw_path: <21> path_bytes, query_string = raw_path.split(b"?", maxsplit=1) <22> else: <23> path_bytes, query_string = raw_path, b"" <24> path = path_bytes.decode() <25> <26> # FIXME: add a public API to retrieve peer address <27> client_addr = self._http._quic._network_paths[0].addr <28> client = (client_addr[0], client_addr[1]) <29> <30> handler: Handler <31> if method == "CONNECT" and protocol == "websocket": <32> subprotocols: List[str] = [] <33> for header, value in event.headers: <34> if header == b"sec-websocket-protocol": <35> subprotocols = [x.strip() for x in value.decode().split(",")] <36> scope = { <37> "client": client, <38> "headers": headers, <39> "http_version": http_version, <40> "method": method, <41> "path": path, <42> "</s>
===========below chunk 0=========== # module: examples.http3_server class HttpServerProtocol(QuicConnectionProtocol): def http_event_received(self, event: H3Event) -> None: # offset: 1 "raw_path": raw_path, "root_path": "", "scheme": "wss", "subprotocols": subprotocols, "type": "websocket", } handler = WebSocketHandler( connection=self._http, scope=scope, stream_id=event.stream_id, transmit=self.transmit, ) else: extensions: Dict[str, Dict] = {} if isinstance(self._http, H3Connection): extensions["http.response.push"] = {} scope = { "client": client, "extensions": extensions, "headers": headers, "http_version": http_version, "method": method, "path": path, "query_string": query_string, "raw_path": raw_path, "root_path": "", "scheme": "https", "type": "http", } handler = HttpRequestHandler( authority=authority, connection=self._http, protocol=self, scope=scope, stream_ended=event.stream_ended, stream_id=event.stream_id, transmit=self.transmit, ) self._handlers[event.stream_id] = handler asyncio.ensure_future(handler.run_asgi(application)) elif ( isinstance(event, (DataReceived, HeadersReceived)) and event.stream_id in self._handlers ): handler = self._handlers[event.stream_id] handler.http_event_received(event) ===========unchanged ref 0=========== at: examples.http3_server HttpRequestHandler(*, authority: bytes, connection: HttpConnection, protocol: QuicConnectionProtocol, scope: Dict, stream_ended: bool, stream_id: int, transmit: Callable[[], None]) WebSocketHandler(*, connection: HttpConnection, scope: Dict, stream_id: int, transmit: Callable[[], None]) Handler = Union[HttpRequestHandler, WebSocketHandler] application = getattr(module, attr_str) at: examples.http3_server.HttpServerProtocol.__init__ self._handlers: Dict[int, Handler] = {} self._http: Optional[HttpConnection] = None at: examples.http3_server.HttpServerProtocol.quic_event_received self._http = H3Connection(self._quic) self._http = H0Connection(self._quic) at: typing List = _alias(list, 1, inst=False, name='List') Dict = _alias(dict, 2, inst=False, name='Dict')
tests.test_tls/VerifyCertificateTest.test_verify_certificate_chain_internal_error
Modified
aiortc~aioquic
2408e7a8764a81a6926eee90900dbaeebe1594c7
[tls] report which OpenSSL function call failed
<12>:<add> self.assertEqual(str(cm.exception), "OpenSSL call to X509_store_new failed") <del> self.assertEqual(str(cm.exception), "OpenSSL call failed")
# module: tests.test_tls class VerifyCertificateTest(TestCase): @patch("aioquic.tls.lib.X509_STORE_new") def test_verify_certificate_chain_internal_error(self, mock_store_new): <0> mock_store_new.return_value = tls.ffi.NULL <1> <2> certificate, _ = generate_ec_certificate( <3> common_name="localhost", curve=ec.SECP256R1 <4> ) <5> <6> with self.assertRaises(tls.AlertInternalError) as cm: <7> verify_certificate( <8> cadata=certificate.public_bytes(serialization.Encoding.PEM), <9> certificate=certificate, <10> server_name="localhost", <11> ) <12> self.assertEqual(str(cm.exception), "OpenSSL call failed") <13>
===========unchanged ref 0=========== at: tests.utils generate_ec_certificate(common_name, curve=ec.SECP256R1, alternative_names=[]) at: unittest.case.TestCase failureException: Type[BaseException] longMessage: bool maxDiff: Optional[int] _testMethodName: str _testMethodDoc: str assertEqual(first: Any, second: Any, msg: Any=...) -> None assertRaises(expected_exception: Union[Type[_E], Tuple[Type[_E], ...]], msg: Any=...) -> _AssertRaisesContext[_E] assertRaises(expected_exception: Union[Type[BaseException], Tuple[Type[BaseException], ...]], callable: Callable[..., Any], *args: Any, **kwargs: Any) -> None at: unittest.case._AssertRaisesContext.__exit__ self.exception = exc_value.with_traceback(None) at: unittest.mock _patcher(target: Any, new: _T, spec: Optional[Any]=..., create: bool=..., spec_set: Optional[Any]=..., autospec: Optional[Any]=..., new_callable: Optional[Any]=..., **kwargs: Any) -> _patch[_T] _patcher(target: Any, *, spec: Optional[Any]=..., create: bool=..., spec_set: Optional[Any]=..., autospec: Optional[Any]=..., new_callable: Optional[Any]=..., **kwargs: Any) -> _patch[Union[MagicMock, AsyncMock]]
aioquic.tls/openssl_assert
Modified
aiortc~aioquic
2408e7a8764a81a6926eee90900dbaeebe1594c7
[tls] report which OpenSSL function call failed
<2>:<add> raise AlertInternalError("OpenSSL call to %s failed" % func) <del> raise AlertInternalError("OpenSSL call failed")
# module: aioquic.tls + def openssl_assert(ok: bool, func: str) -> None: - def openssl_assert(ok: bool) -> None: <0> if not ok: <1> lib.ERR_clear_error() <2> raise AlertInternalError("OpenSSL call failed") <3>
===========unchanged ref 0=========== at: aioquic.tls lib = binding.lib AlertInternalError(*args: object) ===========changed ref 0=========== # module: tests.test_tls class VerifyCertificateTest(TestCase): @patch("aioquic.tls.lib.X509_STORE_new") def test_verify_certificate_chain_internal_error(self, mock_store_new): mock_store_new.return_value = tls.ffi.NULL certificate, _ = generate_ec_certificate( common_name="localhost", curve=ec.SECP256R1 ) with self.assertRaises(tls.AlertInternalError) as cm: verify_certificate( cadata=certificate.public_bytes(serialization.Encoding.PEM), certificate=certificate, server_name="localhost", ) + self.assertEqual(str(cm.exception), "OpenSSL call to X509_store_new failed") - self.assertEqual(str(cm.exception), "OpenSSL call failed")
aioquic.tls/verify_certificate
Modified
aiortc~aioquic
2408e7a8764a81a6926eee90900dbaeebe1594c7
[tls] report which OpenSSL function call failed
<30>:<add> openssl_assert(store != ffi.NULL, "X509_store_new") <del> openssl_assert(store != ffi.NULL) <34>:<add> openssl_assert( <add> lib.X509_STORE_set_default_paths(store), "X509_STORE_set_default_paths" <add> ) <del> openssl_assert(lib.X509_STORE_set_default_paths(store))
# module: aioquic.tls def verify_certificate( certificate: x509.Certificate, chain: List[x509.Certificate] = [], server_name: Optional[str] = None, cadata: Optional[bytes] = None, cafile: Optional[str] = None, capath: Optional[str] = None, ) -> None: <0> # verify dates <1> now = utcnow() <2> if now < certificate.not_valid_before: <3> raise AlertCertificateExpired("Certificate is not valid yet") <4> if now > certificate.not_valid_after: <5> raise AlertCertificateExpired("Certificate is no longer valid") <6> <7> # verify subject <8> if server_name is not None: <9> subject = [] <10> subjectAltName: List[Tuple[str, str]] = [] <11> for attr in certificate.subject: <12> if attr.oid == x509.NameOID.COMMON_NAME: <13> subject.append((("commonName", attr.value),)) <14> for ext in certificate.extensions: <15> if isinstance(ext.value, x509.SubjectAlternativeName): <16> for name in ext.value: <17> if isinstance(name, x509.DNSName): <18> subjectAltName.append(("DNS", name.value)) <19> <20> try: <21> ssl.match_hostname( <22> {"subject": tuple(subject), "subjectAltName": tuple(subjectAltName)}, <23> server_name, <24> ) <25> except ssl.CertificateError as exc: <26> raise AlertBadCertificate("\n".join(exc.args)) from exc <27> <28> # verify certificate chain <29> store = lib.X509_STORE_new() <30> openssl_assert(store != ffi.NULL) <31> store = ffi.gc(store, lib.X509_STORE_free) <32> <33> # load default CAs <34> openssl_assert(lib.X509_STORE_set_default_paths(store)) <35> paths = ssl.get_default_verify_paths() <36> openssl_assert( <37> lib.X509_STORE</s>
===========below chunk 0=========== # module: aioquic.tls def verify_certificate( certificate: x509.Certificate, chain: List[x509.Certificate] = [], server_name: Optional[str] = None, cadata: Optional[bytes] = None, cafile: Optional[str] = None, capath: Optional[str] = None, ) -> None: # offset: 1 store, openssl_encode_path(paths.cafile), openssl_encode_path(paths.capath) ) ) # load extra CAs if cadata is not None: for cert in load_pem_x509_certificates(cadata): openssl_assert(lib.X509_STORE_add_cert(store, cert_x509_ptr(cert))) if cafile is not None or capath is not None: openssl_assert( lib.X509_STORE_load_locations( store, openssl_encode_path(cafile), openssl_encode_path(capath) ) ) chain_stack = lib.sk_X509_new_null() openssl_assert(chain_stack != ffi.NULL) chain_stack = ffi.gc(chain_stack, lib.sk_X509_free) for cert in chain: openssl_assert(lib.sk_X509_push(chain_stack, cert_x509_ptr(cert))) store_ctx = lib.X509_STORE_CTX_new() openssl_assert(store_ctx != ffi.NULL) store_ctx = ffi.gc(store_ctx, lib.X509_STORE_CTX_free) openssl_assert( lib.X509_STORE_CTX_init( store_ctx, store, cert_x509_ptr(certificate), chain_stack ) ) res = lib.X509_verify_cert(store_ctx) if not res: err = lib.X509_STORE_CTX_get_error(store_ctx) err_str = openssl_decode_string(lib.X</s> ===========below chunk 1=========== # module: aioquic.tls def verify_certificate( certificate: x509.Certificate, chain: List[x509.Certificate] = [], server_name: Optional[str] = None, cadata: Optional[bytes] = None, cafile: Optional[str] = None, capath: Optional[str] = None, ) -> None: # offset: 2 <s>509_STORE_CTX_get_error(store_ctx) err_str = openssl_decode_string(lib.X509_verify_cert_error_string(err)) raise AlertBadCertificate(err_str) ===========unchanged ref 0=========== at: aioquic.tls ffi = binding.ffi lib = binding.lib utcnow = datetime.datetime.utcnow AlertBadCertificate(*args: object) AlertCertificateExpired(*args: object) load_pem_x509_certificates(data: bytes) -> List[x509.Certificate] openssl_assert(ok: bool, func: str) -> None openssl_encode_path(s: Optional[str]) -> Any cert_x509_ptr(certificate: x509.Certificate) -> Any at: ssl CertificateError = SSLCertVerificationError match_hostname(cert: _PeerCertRetType, hostname: str) -> None get_default_verify_paths() -> DefaultVerifyPaths at: typing Tuple = _TupleType(tuple, -1, inst=False, name='Tuple') List = _alias(list, 1, inst=False, name='List') ===========changed ref 0=========== # module: aioquic.tls + def openssl_assert(ok: bool, func: str) -> None: - def openssl_assert(ok: bool) -> None: if not ok: lib.ERR_clear_error() + raise AlertInternalError("OpenSSL call to %s failed" % func) - raise AlertInternalError("OpenSSL call failed") ===========changed ref 1=========== # module: tests.test_tls class VerifyCertificateTest(TestCase): @patch("aioquic.tls.lib.X509_STORE_new") def test_verify_certificate_chain_internal_error(self, mock_store_new): mock_store_new.return_value = tls.ffi.NULL certificate, _ = generate_ec_certificate( common_name="localhost", curve=ec.SECP256R1 ) with self.assertRaises(tls.AlertInternalError) as cm: verify_certificate( cadata=certificate.public_bytes(serialization.Encoding.PEM), certificate=certificate, server_name="localhost", ) + self.assertEqual(str(cm.exception), "OpenSSL call to X509_store_new failed") - self.assertEqual(str(cm.exception), "OpenSSL call failed")
tests.test_asyncio/HighLevelTest.setUp
Modified
aiortc~aioquic
f2c2c2eff4a05308338ceb34b29b8f3d6d4d7447
[tests] use IPv6 loopback address
<1>:<add> self.server_host = "::1" <add> self.server_port = 4433
# module: tests.test_asyncio class HighLevelTest(TestCase): def setUp(self): <0> self.server = None <1>
===========unchanged ref 0=========== at: tests.test_asyncio.HighLevelTest.run_server self.server = await serve( host="::", port=4433, configuration=configuration, stream_handler=handle_stream, **kwargs ) at: unittest.case.TestCase failureException = AssertionError longMessage = True maxDiff = 80*8 _diffThreshold = 2**16 setUp(self) -> None failUnlessEqual = assertEquals = _deprecate(assertEqual) failUnlessEqual = assertEquals = _deprecate(assertEqual) failIfEqual = assertNotEquals = _deprecate(assertNotEqual) failIfEqual = assertNotEquals = _deprecate(assertNotEqual) failUnlessAlmostEqual = assertAlmostEquals = _deprecate(assertAlmostEqual) failUnlessAlmostEqual = assertAlmostEquals = _deprecate(assertAlmostEqual) failIfAlmostEqual = assertNotAlmostEquals = _deprecate(assertNotAlmostEqual) failIfAlmostEqual = assertNotAlmostEquals = _deprecate(assertNotAlmostEqual) failUnless = assert_ = _deprecate(assertTrue) failUnless = assert_ = _deprecate(assertTrue) failUnlessRaises = _deprecate(assertRaises) failIf = _deprecate(assertFalse) assertRaisesRegexp = _deprecate(assertRaisesRegex) assertRegexpMatches = _deprecate(assertRegex) assertNotRegexpMatches = _deprecate(assertNotRegex)
tests.test_asyncio/HighLevelTest.run_client
Modified
aiortc~aioquic
f2c2c2eff4a05308338ceb34b29b8f3d6d4d7447
[tests] use IPv6 loopback address
<0>:<add> if host is None: <add> host = self.server_host <add> if port is None: <add> port = self.server_port
# module: tests.test_asyncio class HighLevelTest(TestCase): def run_client( self, + host=None, - host, + port=None, - port=4433, cadata=None, cafile=SERVER_CACERTFILE, configuration=None, request=b"ping", **kwargs ): <0> if configuration is None: <1> configuration = QuicConfiguration(is_client=True) <2> configuration.load_verify_locations(cadata=cadata, cafile=cafile) <3> async with connect(host, port, configuration=configuration, **kwargs) as client: <4> # waiting for connected when connected returns immediately <5> await client.wait_connected() <6> <7> reader, writer = await client.create_stream() <8> self.assertEqual(writer.can_write_eof(), True) <9> self.assertEqual(writer.get_extra_info("stream_id"), 0) <10> <11> writer.write(request) <12> writer.write_eof() <13> <14> response = await reader.read() <15> <16> # waiting for closed when closed returns immediately <17> await client.wait_closed() <18> <19> return response <20>
===========unchanged ref 0=========== at: tests.test_asyncio.HighLevelTest.run_server self.server = await serve( host="::", port=4433, configuration=configuration, stream_handler=handle_stream, **kwargs ) at: tests.test_asyncio.HighLevelTest.setUp self.server = None self.server_host = "::1" self.server_port = 4433 at: tests.utils SERVER_CACERTFILE = os.path.join(os.path.dirname(__file__), "pycacert.pem") at: unittest.case.TestCase assertEqual(first: Any, second: Any, msg: Any=...) -> None ===========changed ref 0=========== # module: tests.test_asyncio class HighLevelTest(TestCase): def setUp(self): self.server = None + self.server_host = "::1" + self.server_port = 4433
tests.test_asyncio/HighLevelTest.run_server
Modified
aiortc~aioquic
f2c2c2eff4a05308338ceb34b29b8f3d6d4d7447
[tests] use IPv6 loopback address
<5>:<add> port=4433, <del> port="4433",
# module: tests.test_asyncio class HighLevelTest(TestCase): def run_server(self, configuration=None, **kwargs): <0> if configuration is None: <1> configuration = QuicConfiguration(is_client=False) <2> configuration.load_cert_chain(SERVER_CERTFILE, SERVER_KEYFILE) <3> self.server = await serve( <4> host="::", <5> port="4433", <6> configuration=configuration, <7> stream_handler=handle_stream, <8> **kwargs <9> ) <10> return self.server <11>
===========unchanged ref 0=========== at: tests.test_asyncio.HighLevelTest.run_client response = await reader.read() at: tests.test_asyncio.HighLevelTest.setUp self.server = None at: tests.utils SERVER_CERTFILE = os.path.join(os.path.dirname(__file__), "ssl_cert.pem") SERVER_KEYFILE = os.path.join(os.path.dirname(__file__), "ssl_key.pem") ===========changed ref 0=========== # module: tests.test_asyncio class HighLevelTest(TestCase): def setUp(self): self.server = None + self.server_host = "::1" + self.server_port = 4433 ===========changed ref 1=========== # module: tests.test_asyncio class HighLevelTest(TestCase): def run_client( self, + host=None, - host, + port=None, - port=4433, cadata=None, cafile=SERVER_CACERTFILE, configuration=None, request=b"ping", **kwargs ): + if host is None: + host = self.server_host + if port is None: + port = self.server_port if configuration is None: configuration = QuicConfiguration(is_client=True) configuration.load_verify_locations(cadata=cadata, cafile=cafile) async with connect(host, port, configuration=configuration, **kwargs) as client: # waiting for connected when connected returns immediately await client.wait_connected() reader, writer = await client.create_stream() self.assertEqual(writer.can_write_eof(), True) self.assertEqual(writer.get_extra_info("stream_id"), 0) writer.write(request) writer.write_eof() response = await reader.read() # waiting for closed when closed returns immediately await client.wait_closed() return response
tests.test_asyncio/HighLevelTest.test_connect_and_serve
Modified
aiortc~aioquic
f2c2c2eff4a05308338ceb34b29b8f3d6d4d7447
[tests] use IPv6 loopback address
<1>:<add> response = run(self.run_client()) <del> response = run(self.run_client("127.0.0.1"))
# module: tests.test_asyncio class HighLevelTest(TestCase): def test_connect_and_serve(self): <0> run(self.run_server()) <1> response = run(self.run_client("127.0.0.1")) <2> self.assertEqual(response, b"gnip") <3>
===========unchanged ref 0=========== at: tests.test_asyncio handle_stream(reader, writer) at: tests.test_asyncio.HighLevelTest.run_server configuration = QuicConfiguration(is_client=False) ===========changed ref 0=========== # module: tests.test_asyncio class HighLevelTest(TestCase): def setUp(self): self.server = None + self.server_host = "::1" + self.server_port = 4433 ===========changed ref 1=========== # module: tests.test_asyncio class HighLevelTest(TestCase): def run_server(self, configuration=None, **kwargs): if configuration is None: configuration = QuicConfiguration(is_client=False) configuration.load_cert_chain(SERVER_CERTFILE, SERVER_KEYFILE) self.server = await serve( host="::", + port=4433, - port="4433", configuration=configuration, stream_handler=handle_stream, **kwargs ) return self.server ===========changed ref 2=========== # module: tests.test_asyncio class HighLevelTest(TestCase): def run_client( self, + host=None, - host, + port=None, - port=4433, cadata=None, cafile=SERVER_CACERTFILE, configuration=None, request=b"ping", **kwargs ): + if host is None: + host = self.server_host + if port is None: + port = self.server_port if configuration is None: configuration = QuicConfiguration(is_client=True) configuration.load_verify_locations(cadata=cadata, cafile=cafile) async with connect(host, port, configuration=configuration, **kwargs) as client: # waiting for connected when connected returns immediately await client.wait_connected() reader, writer = await client.create_stream() self.assertEqual(writer.can_write_eof(), True) self.assertEqual(writer.get_extra_info("stream_id"), 0) writer.write(request) writer.write_eof() response = await reader.read() # waiting for closed when closed returns immediately await client.wait_closed() return response
tests.test_asyncio/HighLevelTest.test_connect_and_serve_ec_certificate
Modified
aiortc~aioquic
f2c2c2eff4a05308338ceb34b29b8f3d6d4d7447
[tests] use IPv6 loopback address
<12>:<del> "127.0.0.1",
# module: tests.test_asyncio class HighLevelTest(TestCase): def test_connect_and_serve_ec_certificate(self): <0> certificate, private_key = generate_ec_certificate(common_name="localhost") <1> <2> run( <3> self.run_server( <4> configuration=QuicConfiguration( <5> certificate=certificate, private_key=private_key, is_client=False, <6> ) <7> ) <8> ) <9> <10> response = run( <11> self.run_client( <12> "127.0.0.1", <13> cadata=certificate.public_bytes(serialization.Encoding.PEM), <14> cafile=None, <15> ) <16> ) <17> <18> self.assertEqual(response, b"gnip") <19>
===========unchanged ref 0=========== at: tests.test_asyncio.HighLevelTest run_client(self, host=None, port=None, cadata=None, cafile=SERVER_CACERTFILE, configuration=None, request=b"ping", **kwargs) run_client(host=None, port=None, cadata=None, cafile=SERVER_CACERTFILE, configuration=None, request=b"ping", **kwargs) run_server(self, configuration=None, **kwargs) run_server(configuration=None, **kwargs) at: tests.utils generate_ec_certificate(common_name, curve=ec.SECP256R1, alternative_names=[]) run(coro) at: unittest.case.TestCase assertEqual(first: Any, second: Any, msg: Any=...) -> None ===========changed ref 0=========== # module: tests.test_asyncio class HighLevelTest(TestCase): def run_server(self, configuration=None, **kwargs): if configuration is None: configuration = QuicConfiguration(is_client=False) configuration.load_cert_chain(SERVER_CERTFILE, SERVER_KEYFILE) self.server = await serve( host="::", + port=4433, - port="4433", configuration=configuration, stream_handler=handle_stream, **kwargs ) return self.server ===========changed ref 1=========== # module: tests.test_asyncio class HighLevelTest(TestCase): def run_client( self, + host=None, - host, + port=None, - port=4433, cadata=None, cafile=SERVER_CACERTFILE, configuration=None, request=b"ping", **kwargs ): + if host is None: + host = self.server_host + if port is None: + port = self.server_port if configuration is None: configuration = QuicConfiguration(is_client=True) configuration.load_verify_locations(cadata=cadata, cafile=cafile) async with connect(host, port, configuration=configuration, **kwargs) as client: # waiting for connected when connected returns immediately await client.wait_connected() reader, writer = await client.create_stream() self.assertEqual(writer.can_write_eof(), True) self.assertEqual(writer.get_extra_info("stream_id"), 0) writer.write(request) writer.write_eof() response = await reader.read() # waiting for closed when closed returns immediately await client.wait_closed() return response ===========changed ref 2=========== # module: tests.test_asyncio class HighLevelTest(TestCase): def test_connect_and_serve(self): run(self.run_server()) + response = run(self.run_client()) - response = run(self.run_client("127.0.0.1")) self.assertEqual(response, b"gnip") ===========changed ref 3=========== # module: tests.test_asyncio class HighLevelTest(TestCase): def setUp(self): self.server = None + self.server_host = "::1" + self.server_port = 4433
tests.test_asyncio/HighLevelTest.test_connect_and_serve_large
Modified
aiortc~aioquic
f2c2c2eff4a05308338ceb34b29b8f3d6d4d7447
[tests] use IPv6 loopback address
<5>:<add> response = run(self.run_client(request=data)) <del> response = run(self.run_client("127.0.0.1", request=data))
# module: tests.test_asyncio class HighLevelTest(TestCase): def test_connect_and_serve_large(self): <0> """ <1> Transfer enough data to require raising MAX_DATA and MAX_STREAM_DATA. <2> """ <3> data = b"Z" * 2097152 <4> run(self.run_server()) <5> response = run(self.run_client("127.0.0.1", request=data)) <6> self.assertEqual(response, data) <7>
===========unchanged ref 0=========== at: tests.test_asyncio.HighLevelTest.test_connect_and_serve_ec_certificate response = run( self.run_client( cadata=certificate.public_bytes(serialization.Encoding.PEM), cafile=None, ) ) at: unittest.case.TestCase assertEqual(first: Any, second: Any, msg: Any=...) -> None ===========changed ref 0=========== # module: tests.test_asyncio class HighLevelTest(TestCase): def test_connect_and_serve(self): run(self.run_server()) + response = run(self.run_client()) - response = run(self.run_client("127.0.0.1")) self.assertEqual(response, b"gnip") ===========changed ref 1=========== # module: tests.test_asyncio class HighLevelTest(TestCase): def setUp(self): self.server = None + self.server_host = "::1" + self.server_port = 4433 ===========changed ref 2=========== # module: tests.test_asyncio class HighLevelTest(TestCase): def run_server(self, configuration=None, **kwargs): if configuration is None: configuration = QuicConfiguration(is_client=False) configuration.load_cert_chain(SERVER_CERTFILE, SERVER_KEYFILE) self.server = await serve( host="::", + port=4433, - port="4433", configuration=configuration, stream_handler=handle_stream, **kwargs ) return self.server ===========changed ref 3=========== # module: tests.test_asyncio class HighLevelTest(TestCase): def test_connect_and_serve_ec_certificate(self): certificate, private_key = generate_ec_certificate(common_name="localhost") run( self.run_server( configuration=QuicConfiguration( certificate=certificate, private_key=private_key, is_client=False, ) ) ) response = run( self.run_client( - "127.0.0.1", cadata=certificate.public_bytes(serialization.Encoding.PEM), cafile=None, ) ) self.assertEqual(response, b"gnip") ===========changed ref 4=========== # module: tests.test_asyncio class HighLevelTest(TestCase): def run_client( self, + host=None, - host, + port=None, - port=4433, cadata=None, cafile=SERVER_CACERTFILE, configuration=None, request=b"ping", **kwargs ): + if host is None: + host = self.server_host + if port is None: + port = self.server_port if configuration is None: configuration = QuicConfiguration(is_client=True) configuration.load_verify_locations(cadata=cadata, cafile=cafile) async with connect(host, port, configuration=configuration, **kwargs) as client: # waiting for connected when connected returns immediately await client.wait_connected() reader, writer = await client.create_stream() self.assertEqual(writer.can_write_eof(), True) self.assertEqual(writer.get_extra_info("stream_id"), 0) writer.write(request) writer.write_eof() response = await reader.read() # waiting for closed when closed returns immediately await client.wait_closed() return response
tests.test_asyncio/HighLevelTest.test_connect_and_serve_without_client_configuration
Modified
aiortc~aioquic
f2c2c2eff4a05308338ceb34b29b8f3d6d4d7447
[tests] use IPv6 loopback address
<0>:<add> async def run_client_without_config(): <del> async def run_client_without_config(host, port=4433): <1>:<add> async with connect(self.server_host, self.server_port) as client: <del> async with connect(host, port) as client: <6>:<add> run(run_client_without_config()) <del> run(run_client_without_config("127.0.0.1"))
# module: tests.test_asyncio class HighLevelTest(TestCase): def test_connect_and_serve_without_client_configuration(self): <0> async def run_client_without_config(host, port=4433): <1> async with connect(host, port) as client: <2> await client.ping() <3> <4> run(self.run_server()) <5> with self.assertRaises(ConnectionError): <6> run(run_client_without_config("127.0.0.1")) <7>
===========unchanged ref 0=========== at: tests.test_asyncio.HighLevelTest run_client(self, host=None, port=None, cadata=None, cafile=SERVER_CACERTFILE, configuration=None, request=b"ping", **kwargs) run_client(host=None, port=None, cadata=None, cafile=SERVER_CACERTFILE, configuration=None, request=b"ping", **kwargs) run_server(self, configuration=None, **kwargs) run_server(configuration=None, **kwargs) at: tests.test_asyncio.HighLevelTest.setUp self.server_host = "::1" self.server_port = 4433 at: tests.utils run(coro) at: unittest.case.TestCase assertEqual(first: Any, second: Any, msg: Any=...) -> None ===========changed ref 0=========== # module: tests.test_asyncio class HighLevelTest(TestCase): def run_server(self, configuration=None, **kwargs): if configuration is None: configuration = QuicConfiguration(is_client=False) configuration.load_cert_chain(SERVER_CERTFILE, SERVER_KEYFILE) self.server = await serve( host="::", + port=4433, - port="4433", configuration=configuration, stream_handler=handle_stream, **kwargs ) return self.server ===========changed ref 1=========== # module: tests.test_asyncio class HighLevelTest(TestCase): def run_client( self, + host=None, - host, + port=None, - port=4433, cadata=None, cafile=SERVER_CACERTFILE, configuration=None, request=b"ping", **kwargs ): + if host is None: + host = self.server_host + if port is None: + port = self.server_port if configuration is None: configuration = QuicConfiguration(is_client=True) configuration.load_verify_locations(cadata=cadata, cafile=cafile) async with connect(host, port, configuration=configuration, **kwargs) as client: # waiting for connected when connected returns immediately await client.wait_connected() reader, writer = await client.create_stream() self.assertEqual(writer.can_write_eof(), True) self.assertEqual(writer.get_extra_info("stream_id"), 0) writer.write(request) writer.write_eof() response = await reader.read() # waiting for closed when closed returns immediately await client.wait_closed() return response ===========changed ref 2=========== # module: tests.test_asyncio class HighLevelTest(TestCase): def test_connect_and_serve(self): run(self.run_server()) + response = run(self.run_client()) - response = run(self.run_client("127.0.0.1")) self.assertEqual(response, b"gnip") ===========changed ref 3=========== # module: tests.test_asyncio class HighLevelTest(TestCase): def test_connect_and_serve_large(self): """ Transfer enough data to require raising MAX_DATA and MAX_STREAM_DATA. """ data = b"Z" * 2097152 run(self.run_server()) + response = run(self.run_client(request=data)) - response = run(self.run_client("127.0.0.1", request=data)) self.assertEqual(response, data) ===========changed ref 4=========== # module: tests.test_asyncio class HighLevelTest(TestCase): def setUp(self): self.server = None + self.server_host = "::1" + self.server_port = 4433 ===========changed ref 5=========== # module: tests.test_asyncio class HighLevelTest(TestCase): def test_connect_and_serve_ec_certificate(self): certificate, private_key = generate_ec_certificate(common_name="localhost") run( self.run_server( configuration=QuicConfiguration( certificate=certificate, private_key=private_key, is_client=False, ) ) ) response = run( self.run_client( - "127.0.0.1", cadata=certificate.public_bytes(serialization.Encoding.PEM), cafile=None, ) ) self.assertEqual(response, b"gnip")
tests.test_asyncio/HighLevelTest.test_connect_and_serve_writelines
Modified
aiortc~aioquic
f2c2c2eff4a05308338ceb34b29b8f3d6d4d7447
[tests] use IPv6 loopback address
<0>:<add> async def run_client_writelines(): <del> async def run_client_writelines(host, port=4433): <3>:<add> async with connect( <add> self.server_host, self.server_port, configuration=configuration <add> ) as client: <del> async with connect(host, port, configuration=configuration) as client: <13>:<add> response = run(run_client_writelines()) <del> response = run(run_client_writelines("127.0.0.1"))
# module: tests.test_asyncio class HighLevelTest(TestCase): def test_connect_and_serve_writelines(self): <0> async def run_client_writelines(host, port=4433): <1> configuration = QuicConfiguration(is_client=True) <2> configuration.load_verify_locations(cafile=SERVER_CACERTFILE) <3> async with connect(host, port, configuration=configuration) as client: <4> reader, writer = await client.create_stream() <5> assert writer.can_write_eof() is True <6> <7> writer.writelines([b"01234567", b"89012345"]) <8> writer.write_eof() <9> <10> return await reader.read() <11> <12> run(self.run_server()) <13> response = run(run_client_writelines("127.0.0.1")) <14> self.assertEqual(response, b"5432109876543210") <15>
===========unchanged ref 0=========== at: tests.test_asyncio.HighLevelTest run_server(self, configuration=None, **kwargs) run_server(configuration=None, **kwargs) at: tests.test_asyncio.HighLevelTest.setUp self.server_host = "::1" self.server_port = 4433 at: tests.test_asyncio.HighLevelTest.test_connect_and_serve_without_client_configuration run_client_without_config() at: tests.utils run(coro) SERVER_CACERTFILE = os.path.join(os.path.dirname(__file__), "pycacert.pem") at: unittest.case.TestCase assertRaises(expected_exception: Union[Type[_E], Tuple[Type[_E], ...]], msg: Any=...) -> _AssertRaisesContext[_E] assertRaises(expected_exception: Union[Type[BaseException], Tuple[Type[BaseException], ...]], callable: Callable[..., Any], *args: Any, **kwargs: Any) -> None ===========changed ref 0=========== # module: tests.test_asyncio class HighLevelTest(TestCase): def run_server(self, configuration=None, **kwargs): if configuration is None: configuration = QuicConfiguration(is_client=False) configuration.load_cert_chain(SERVER_CERTFILE, SERVER_KEYFILE) self.server = await serve( host="::", + port=4433, - port="4433", configuration=configuration, stream_handler=handle_stream, **kwargs ) return self.server ===========changed ref 1=========== # module: tests.test_asyncio class HighLevelTest(TestCase): def test_connect_and_serve(self): run(self.run_server()) + response = run(self.run_client()) - response = run(self.run_client("127.0.0.1")) self.assertEqual(response, b"gnip") ===========changed ref 2=========== # module: tests.test_asyncio class HighLevelTest(TestCase): def test_connect_and_serve_large(self): """ Transfer enough data to require raising MAX_DATA and MAX_STREAM_DATA. """ data = b"Z" * 2097152 run(self.run_server()) + response = run(self.run_client(request=data)) - response = run(self.run_client("127.0.0.1", request=data)) self.assertEqual(response, data) ===========changed ref 3=========== # module: tests.test_asyncio class HighLevelTest(TestCase): def setUp(self): self.server = None + self.server_host = "::1" + self.server_port = 4433 ===========changed ref 4=========== # module: tests.test_asyncio class HighLevelTest(TestCase): def test_connect_and_serve_without_client_configuration(self): + async def run_client_without_config(): - async def run_client_without_config(host, port=4433): + async with connect(self.server_host, self.server_port) as client: - async with connect(host, port) as client: await client.ping() run(self.run_server()) with self.assertRaises(ConnectionError): + run(run_client_without_config()) - run(run_client_without_config("127.0.0.1")) ===========changed ref 5=========== # module: tests.test_asyncio class HighLevelTest(TestCase): def test_connect_and_serve_ec_certificate(self): certificate, private_key = generate_ec_certificate(common_name="localhost") run( self.run_server( configuration=QuicConfiguration( certificate=certificate, private_key=private_key, is_client=False, ) ) ) response = run( self.run_client( - "127.0.0.1", cadata=certificate.public_bytes(serialization.Encoding.PEM), cafile=None, ) ) self.assertEqual(response, b"gnip") ===========changed ref 6=========== # module: tests.test_asyncio class HighLevelTest(TestCase): def run_client( self, + host=None, - host, + port=None, - port=4433, cadata=None, cafile=SERVER_CACERTFILE, configuration=None, request=b"ping", **kwargs ): + if host is None: + host = self.server_host + if port is None: + port = self.server_port if configuration is None: configuration = QuicConfiguration(is_client=True) configuration.load_verify_locations(cadata=cadata, cafile=cafile) async with connect(host, port, configuration=configuration, **kwargs) as client: # waiting for connected when connected returns immediately await client.wait_connected() reader, writer = await client.create_stream() self.assertEqual(writer.can_write_eof(), True) self.assertEqual(writer.get_extra_info("stream_id"), 0) writer.write(request) writer.write_eof() response = await reader.read() # waiting for closed when closed returns immediately await client.wait_closed() return response
tests.test_asyncio/HighLevelTest.test_connect_and_serve_with_packet_loss
Modified
aiortc~aioquic
f2c2c2eff4a05308338ceb34b29b8f3d6d4d7447
[tests] use IPv6 loopback address
<14>:<del> "127.0.0.1",
# module: tests.test_asyncio class HighLevelTest(TestCase): @patch("socket.socket.sendto", new_callable=lambda: sendto_with_loss) def test_connect_and_serve_with_packet_loss(self, mock_sendto): <0> """ <1> This test ensures handshake success and stream data is successfully sent <2> and received in the presence of packet loss (randomized 25% in each direction). <3> """ <4> data = b"Z" * 65536 <5> <6> server_configuration = QuicConfiguration( <7> is_client=False, quic_logger=QuicLogger() <8> ) <9> server_configuration.load_cert_chain(SERVER_CERTFILE, SERVER_KEYFILE) <10> run(self.run_server(configuration=server_configuration)) <11> <12> response = run( <13> self.run_client( <14> "127.0.0.1", <15> configuration=QuicConfiguration( <16> is_client=True, quic_logger=QuicLogger() <17> ), <18> request=data, <19> ) <20> ) <21> self.assertEqual(response, data) <22>
===========unchanged ref 0=========== at: tests.test_asyncio sendto_with_loss(self, data, addr=None) at: tests.test_asyncio.HighLevelTest run_client(self, host=None, port=None, cadata=None, cafile=SERVER_CACERTFILE, configuration=None, request=b"ping", **kwargs) run_client(host=None, port=None, cadata=None, cafile=SERVER_CACERTFILE, configuration=None, request=b"ping", **kwargs) run_server(self, configuration=None, **kwargs) run_server(configuration=None, **kwargs) at: tests.test_asyncio.HighLevelTest.test_connect_and_serve_writelines run_client_writelines() at: tests.utils run(coro) SERVER_CERTFILE = os.path.join(os.path.dirname(__file__), "ssl_cert.pem") SERVER_KEYFILE = os.path.join(os.path.dirname(__file__), "ssl_key.pem") at: unittest.case.TestCase assertEqual(first: Any, second: Any, msg: Any=...) -> None at: unittest.mock _patcher(target: Any, new: _T, spec: Optional[Any]=..., create: bool=..., spec_set: Optional[Any]=..., autospec: Optional[Any]=..., new_callable: Optional[Any]=..., **kwargs: Any) -> _patch[_T] _patcher(target: Any, *, spec: Optional[Any]=..., create: bool=..., spec_set: Optional[Any]=..., autospec: Optional[Any]=..., new_callable: Optional[Any]=..., **kwargs: Any) -> _patch[Union[MagicMock, AsyncMock]] ===========changed ref 0=========== # module: tests.test_asyncio class HighLevelTest(TestCase): def run_server(self, configuration=None, **kwargs): if configuration is None: configuration = QuicConfiguration(is_client=False) configuration.load_cert_chain(SERVER_CERTFILE, SERVER_KEYFILE) self.server = await serve( host="::", + port=4433, - port="4433", configuration=configuration, stream_handler=handle_stream, **kwargs ) return self.server ===========changed ref 1=========== # module: tests.test_asyncio class HighLevelTest(TestCase): def run_client( self, + host=None, - host, + port=None, - port=4433, cadata=None, cafile=SERVER_CACERTFILE, configuration=None, request=b"ping", **kwargs ): + if host is None: + host = self.server_host + if port is None: + port = self.server_port if configuration is None: configuration = QuicConfiguration(is_client=True) configuration.load_verify_locations(cadata=cadata, cafile=cafile) async with connect(host, port, configuration=configuration, **kwargs) as client: # waiting for connected when connected returns immediately await client.wait_connected() reader, writer = await client.create_stream() self.assertEqual(writer.can_write_eof(), True) self.assertEqual(writer.get_extra_info("stream_id"), 0) writer.write(request) writer.write_eof() response = await reader.read() # waiting for closed when closed returns immediately await client.wait_closed() return response ===========changed ref 2=========== # module: tests.test_asyncio class HighLevelTest(TestCase): def test_connect_and_serve(self): run(self.run_server()) + response = run(self.run_client()) - response = run(self.run_client("127.0.0.1")) self.assertEqual(response, b"gnip") ===========changed ref 3=========== # module: tests.test_asyncio class HighLevelTest(TestCase): def test_connect_and_serve_large(self): """ Transfer enough data to require raising MAX_DATA and MAX_STREAM_DATA. """ data = b"Z" * 2097152 run(self.run_server()) + response = run(self.run_client(request=data)) - response = run(self.run_client("127.0.0.1", request=data)) self.assertEqual(response, data) ===========changed ref 4=========== # module: tests.test_asyncio class HighLevelTest(TestCase): def setUp(self): self.server = None + self.server_host = "::1" + self.server_port = 4433 ===========changed ref 5=========== # module: tests.test_asyncio class HighLevelTest(TestCase): def test_connect_and_serve_without_client_configuration(self): + async def run_client_without_config(): - async def run_client_without_config(host, port=4433): + async with connect(self.server_host, self.server_port) as client: - async with connect(host, port) as client: await client.ping() run(self.run_server()) with self.assertRaises(ConnectionError): + run(run_client_without_config()) - run(run_client_without_config("127.0.0.1")) ===========changed ref 6=========== # module: tests.test_asyncio class HighLevelTest(TestCase): def test_connect_and_serve_ec_certificate(self): certificate, private_key = generate_ec_certificate(common_name="localhost") run( self.run_server( configuration=QuicConfiguration( certificate=certificate, private_key=private_key, is_client=False, ) ) ) response = run( self.run_client( - "127.0.0.1", cadata=certificate.public_bytes(serialization.Encoding.PEM), cafile=None, ) ) self.assertEqual(response, b"gnip") ===========changed ref 7=========== # module: tests.test_asyncio class HighLevelTest(TestCase): def test_connect_and_serve_writelines(self): + async def run_client_writelines(): - async def run_client_writelines(host, port=4433): configuration = QuicConfiguration(is_client=True) configuration.load_verify_locations(cafile=SERVER_CACERTFILE) + async with connect( + self.server_host, self.server_port, configuration=configuration + ) as client: - async with connect(host, port, configuration=configuration) as client: reader, writer = await client.create_stream() assert writer.can_write_eof() is True writer.writelines([b"01234567", b"89012345"]) writer.write_eof() return await reader.read() run(self.run_server()) + response = run(run_client_writelines()) - response = run(run_client_writelines("127.0.0.1")) self.assertEqual(response, b"5432109876543210")
tests.test_asyncio/HighLevelTest.test_connect_and_serve_with_session_ticket
Modified
aiortc~aioquic
f2c2c2eff4a05308338ceb34b29b8f3d6d4d7447
[tests] use IPv6 loopback address
<15>:<del> response = run( <16>:<add> response = run(self.run_client(session_ticket_handler=save_ticket),) <del> self.run_client("127.0.0.1", session_ticket_handler=save_ticket), <17>:<del> ) <25>:<del> "127.0.0.1",
# module: tests.test_asyncio class HighLevelTest(TestCase): def test_connect_and_serve_with_session_ticket(self): <0> # start server <1> client_ticket = None <2> store = SessionTicketStore() <3> <4> def save_ticket(t): <5> nonlocal client_ticket <6> client_ticket = t <7> <8> run( <9> self.run_server( <10> session_ticket_fetcher=store.pop, session_ticket_handler=store.add <11> ) <12> ) <13> <14> # first request <15> response = run( <16> self.run_client("127.0.0.1", session_ticket_handler=save_ticket), <17> ) <18> self.assertEqual(response, b"gnip") <19> <20> self.assertIsNotNone(client_ticket) <21> <22> # second request <23> run( <24> self.run_client( <25> "127.0.0.1", <26> configuration=QuicConfiguration( <27> is_client=True, session_ticket=client_ticket <28> ), <29> ) <30> ) <31> self.assertEqual(response, b"gnip") <32>
===========unchanged ref 0=========== at: tests.test_asyncio SessionTicketStore() at: tests.test_asyncio.HighLevelTest run_client(self, host=None, port=None, cadata=None, cafile=SERVER_CACERTFILE, configuration=None, request=b"ping", **kwargs) run_client(host=None, port=None, cadata=None, cafile=SERVER_CACERTFILE, configuration=None, request=b"ping", **kwargs) run_server(self, configuration=None, **kwargs) run_server(configuration=None, **kwargs) at: tests.test_asyncio.HighLevelTest.test_connect_and_serve_with_packet_loss data = b"Z" * 65536 response = run( self.run_client( configuration=QuicConfiguration( is_client=True, quic_logger=QuicLogger() ), request=data, ) ) at: tests.test_asyncio.SessionTicketStore add(ticket) pop(label) at: tests.utils run(coro) at: unittest.case.TestCase assertEqual(first: Any, second: Any, msg: Any=...) -> None assertIsNotNone(obj: Any, msg: Any=...) -> None ===========changed ref 0=========== # module: tests.test_asyncio class HighLevelTest(TestCase): def run_server(self, configuration=None, **kwargs): if configuration is None: configuration = QuicConfiguration(is_client=False) configuration.load_cert_chain(SERVER_CERTFILE, SERVER_KEYFILE) self.server = await serve( host="::", + port=4433, - port="4433", configuration=configuration, stream_handler=handle_stream, **kwargs ) return self.server ===========changed ref 1=========== # module: tests.test_asyncio class HighLevelTest(TestCase): def run_client( self, + host=None, - host, + port=None, - port=4433, cadata=None, cafile=SERVER_CACERTFILE, configuration=None, request=b"ping", **kwargs ): + if host is None: + host = self.server_host + if port is None: + port = self.server_port if configuration is None: configuration = QuicConfiguration(is_client=True) configuration.load_verify_locations(cadata=cadata, cafile=cafile) async with connect(host, port, configuration=configuration, **kwargs) as client: # waiting for connected when connected returns immediately await client.wait_connected() reader, writer = await client.create_stream() self.assertEqual(writer.can_write_eof(), True) self.assertEqual(writer.get_extra_info("stream_id"), 0) writer.write(request) writer.write_eof() response = await reader.read() # waiting for closed when closed returns immediately await client.wait_closed() return response ===========changed ref 2=========== # module: tests.test_asyncio class HighLevelTest(TestCase): def test_connect_and_serve(self): run(self.run_server()) + response = run(self.run_client()) - response = run(self.run_client("127.0.0.1")) self.assertEqual(response, b"gnip") ===========changed ref 3=========== # module: tests.test_asyncio class HighLevelTest(TestCase): def test_connect_and_serve_large(self): """ Transfer enough data to require raising MAX_DATA and MAX_STREAM_DATA. """ data = b"Z" * 2097152 run(self.run_server()) + response = run(self.run_client(request=data)) - response = run(self.run_client("127.0.0.1", request=data)) self.assertEqual(response, data) ===========changed ref 4=========== # module: tests.test_asyncio class HighLevelTest(TestCase): def setUp(self): self.server = None + self.server_host = "::1" + self.server_port = 4433 ===========changed ref 5=========== # module: tests.test_asyncio class HighLevelTest(TestCase): def test_connect_and_serve_without_client_configuration(self): + async def run_client_without_config(): - async def run_client_without_config(host, port=4433): + async with connect(self.server_host, self.server_port) as client: - async with connect(host, port) as client: await client.ping() run(self.run_server()) with self.assertRaises(ConnectionError): + run(run_client_without_config()) - run(run_client_without_config("127.0.0.1")) ===========changed ref 6=========== # module: tests.test_asyncio class HighLevelTest(TestCase): @patch("socket.socket.sendto", new_callable=lambda: sendto_with_loss) def test_connect_and_serve_with_packet_loss(self, mock_sendto): """ This test ensures handshake success and stream data is successfully sent and received in the presence of packet loss (randomized 25% in each direction). """ data = b"Z" * 65536 server_configuration = QuicConfiguration( is_client=False, quic_logger=QuicLogger() ) server_configuration.load_cert_chain(SERVER_CERTFILE, SERVER_KEYFILE) run(self.run_server(configuration=server_configuration)) response = run( self.run_client( - "127.0.0.1", configuration=QuicConfiguration( is_client=True, quic_logger=QuicLogger() ), request=data, ) ) self.assertEqual(response, data) ===========changed ref 7=========== # module: tests.test_asyncio class HighLevelTest(TestCase): def test_connect_and_serve_ec_certificate(self): certificate, private_key = generate_ec_certificate(common_name="localhost") run( self.run_server( configuration=QuicConfiguration( certificate=certificate, private_key=private_key, is_client=False, ) ) ) response = run( self.run_client( - "127.0.0.1", cadata=certificate.public_bytes(serialization.Encoding.PEM), cafile=None, ) ) self.assertEqual(response, b"gnip") ===========changed ref 8=========== # module: tests.test_asyncio class HighLevelTest(TestCase): def test_connect_and_serve_writelines(self): + async def run_client_writelines(): - async def run_client_writelines(host, port=4433): configuration = QuicConfiguration(is_client=True) configuration.load_verify_locations(cafile=SERVER_CACERTFILE) + async with connect( + self.server_host, self.server_port, configuration=configuration + ) as client: - async with connect(host, port, configuration=configuration) as client: reader, writer = await client.create_stream() assert writer.can_write_eof() is True writer.writelines([b"01234567", b"89012345"]) writer.write_eof() return await reader.read() run(self.run_server()) + response = run(run_client_writelines()) - response = run(run_client_writelines("127.0.0.1")) self.assertEqual(response, b"5432109876543210")
tests.test_asyncio/HighLevelTest.test_connect_and_serve_with_sni
Modified
aiortc~aioquic
f2c2c2eff4a05308338ceb34b29b8f3d6d4d7447
[tests] use IPv6 loopback address
<1>:<add> response = run(self.run_client(host="localhost")) <del> response = run(self.run_client("localhost"))
# module: tests.test_asyncio class HighLevelTest(TestCase): def test_connect_and_serve_with_sni(self): <0> run(self.run_server()) <1> response = run(self.run_client("localhost")) <2> self.assertEqual(response, b"gnip") <3>
===========unchanged ref 0=========== at: tests.test_asyncio.HighLevelTest.test_connect_and_serve_with_session_ticket response = run(self.run_client(session_ticket_handler=save_ticket),) at: unittest.case.TestCase assertEqual(first: Any, second: Any, msg: Any=...) -> None ===========changed ref 0=========== # module: tests.test_asyncio class HighLevelTest(TestCase): def test_connect_and_serve(self): run(self.run_server()) + response = run(self.run_client()) - response = run(self.run_client("127.0.0.1")) self.assertEqual(response, b"gnip") ===========changed ref 1=========== # module: tests.test_asyncio class HighLevelTest(TestCase): def test_connect_and_serve_large(self): """ Transfer enough data to require raising MAX_DATA and MAX_STREAM_DATA. """ data = b"Z" * 2097152 run(self.run_server()) + response = run(self.run_client(request=data)) - response = run(self.run_client("127.0.0.1", request=data)) self.assertEqual(response, data) ===========changed ref 2=========== # module: tests.test_asyncio class HighLevelTest(TestCase): def setUp(self): self.server = None + self.server_host = "::1" + self.server_port = 4433 ===========changed ref 3=========== # module: tests.test_asyncio class HighLevelTest(TestCase): def test_connect_and_serve_without_client_configuration(self): + async def run_client_without_config(): - async def run_client_without_config(host, port=4433): + async with connect(self.server_host, self.server_port) as client: - async with connect(host, port) as client: await client.ping() run(self.run_server()) with self.assertRaises(ConnectionError): + run(run_client_without_config()) - run(run_client_without_config("127.0.0.1")) ===========changed ref 4=========== # module: tests.test_asyncio class HighLevelTest(TestCase): def run_server(self, configuration=None, **kwargs): if configuration is None: configuration = QuicConfiguration(is_client=False) configuration.load_cert_chain(SERVER_CERTFILE, SERVER_KEYFILE) self.server = await serve( host="::", + port=4433, - port="4433", configuration=configuration, stream_handler=handle_stream, **kwargs ) return self.server ===========changed ref 5=========== # module: tests.test_asyncio class HighLevelTest(TestCase): @patch("socket.socket.sendto", new_callable=lambda: sendto_with_loss) def test_connect_and_serve_with_packet_loss(self, mock_sendto): """ This test ensures handshake success and stream data is successfully sent and received in the presence of packet loss (randomized 25% in each direction). """ data = b"Z" * 65536 server_configuration = QuicConfiguration( is_client=False, quic_logger=QuicLogger() ) server_configuration.load_cert_chain(SERVER_CERTFILE, SERVER_KEYFILE) run(self.run_server(configuration=server_configuration)) response = run( self.run_client( - "127.0.0.1", configuration=QuicConfiguration( is_client=True, quic_logger=QuicLogger() ), request=data, ) ) self.assertEqual(response, data) ===========changed ref 6=========== # module: tests.test_asyncio class HighLevelTest(TestCase): def test_connect_and_serve_ec_certificate(self): certificate, private_key = generate_ec_certificate(common_name="localhost") run( self.run_server( configuration=QuicConfiguration( certificate=certificate, private_key=private_key, is_client=False, ) ) ) response = run( self.run_client( - "127.0.0.1", cadata=certificate.public_bytes(serialization.Encoding.PEM), cafile=None, ) ) self.assertEqual(response, b"gnip") ===========changed ref 7=========== # module: tests.test_asyncio class HighLevelTest(TestCase): def test_connect_and_serve_with_session_ticket(self): # start server client_ticket = None store = SessionTicketStore() def save_ticket(t): nonlocal client_ticket client_ticket = t run( self.run_server( session_ticket_fetcher=store.pop, session_ticket_handler=store.add ) ) # first request - response = run( + response = run(self.run_client(session_ticket_handler=save_ticket),) - self.run_client("127.0.0.1", session_ticket_handler=save_ticket), - ) self.assertEqual(response, b"gnip") self.assertIsNotNone(client_ticket) # second request run( self.run_client( - "127.0.0.1", configuration=QuicConfiguration( is_client=True, session_ticket=client_ticket ), ) ) self.assertEqual(response, b"gnip") ===========changed ref 8=========== # module: tests.test_asyncio class HighLevelTest(TestCase): def test_connect_and_serve_writelines(self): + async def run_client_writelines(): - async def run_client_writelines(host, port=4433): configuration = QuicConfiguration(is_client=True) configuration.load_verify_locations(cafile=SERVER_CACERTFILE) + async with connect( + self.server_host, self.server_port, configuration=configuration + ) as client: - async with connect(host, port, configuration=configuration) as client: reader, writer = await client.create_stream() assert writer.can_write_eof() is True writer.writelines([b"01234567", b"89012345"]) writer.write_eof() return await reader.read() run(self.run_server()) + response = run(run_client_writelines()) - response = run(run_client_writelines("127.0.0.1")) self.assertEqual(response, b"5432109876543210")
tests.test_asyncio/HighLevelTest.test_connect_and_serve_with_stateless_retry
Modified
aiortc~aioquic
f2c2c2eff4a05308338ceb34b29b8f3d6d4d7447
[tests] use IPv6 loopback address
<1>:<add> response = run(self.run_client()) <del> response = run(self.run_client("127.0.0.1"))
# module: tests.test_asyncio class HighLevelTest(TestCase): def test_connect_and_serve_with_stateless_retry(self): <0> run(self.run_server()) <1> response = run(self.run_client("127.0.0.1")) <2> self.assertEqual(response, b"gnip") <3>
===========unchanged ref 0=========== at: tests.test_asyncio.HighLevelTest run_client(self, host=None, port=None, cadata=None, cafile=SERVER_CACERTFILE, configuration=None, request=b"ping", **kwargs) run_client(host=None, port=None, cadata=None, cafile=SERVER_CACERTFILE, configuration=None, request=b"ping", **kwargs) at: tests.utils run(coro) at: unittest.case.TestCase assertEqual(first: Any, second: Any, msg: Any=...) -> None ===========changed ref 0=========== # module: tests.test_asyncio class HighLevelTest(TestCase): def run_client( self, + host=None, - host, + port=None, - port=4433, cadata=None, cafile=SERVER_CACERTFILE, configuration=None, request=b"ping", **kwargs ): + if host is None: + host = self.server_host + if port is None: + port = self.server_port if configuration is None: configuration = QuicConfiguration(is_client=True) configuration.load_verify_locations(cadata=cadata, cafile=cafile) async with connect(host, port, configuration=configuration, **kwargs) as client: # waiting for connected when connected returns immediately await client.wait_connected() reader, writer = await client.create_stream() self.assertEqual(writer.can_write_eof(), True) self.assertEqual(writer.get_extra_info("stream_id"), 0) writer.write(request) writer.write_eof() response = await reader.read() # waiting for closed when closed returns immediately await client.wait_closed() return response ===========changed ref 1=========== # module: tests.test_asyncio class HighLevelTest(TestCase): def test_connect_and_serve_with_sni(self): run(self.run_server()) + response = run(self.run_client(host="localhost")) - response = run(self.run_client("localhost")) self.assertEqual(response, b"gnip") ===========changed ref 2=========== # module: tests.test_asyncio class HighLevelTest(TestCase): def test_connect_and_serve(self): run(self.run_server()) + response = run(self.run_client()) - response = run(self.run_client("127.0.0.1")) self.assertEqual(response, b"gnip") ===========changed ref 3=========== # module: tests.test_asyncio class HighLevelTest(TestCase): def test_connect_and_serve_large(self): """ Transfer enough data to require raising MAX_DATA and MAX_STREAM_DATA. """ data = b"Z" * 2097152 run(self.run_server()) + response = run(self.run_client(request=data)) - response = run(self.run_client("127.0.0.1", request=data)) self.assertEqual(response, data) ===========changed ref 4=========== # module: tests.test_asyncio class HighLevelTest(TestCase): def setUp(self): self.server = None + self.server_host = "::1" + self.server_port = 4433 ===========changed ref 5=========== # module: tests.test_asyncio class HighLevelTest(TestCase): def test_connect_and_serve_without_client_configuration(self): + async def run_client_without_config(): - async def run_client_without_config(host, port=4433): + async with connect(self.server_host, self.server_port) as client: - async with connect(host, port) as client: await client.ping() run(self.run_server()) with self.assertRaises(ConnectionError): + run(run_client_without_config()) - run(run_client_without_config("127.0.0.1")) ===========changed ref 6=========== # module: tests.test_asyncio class HighLevelTest(TestCase): def run_server(self, configuration=None, **kwargs): if configuration is None: configuration = QuicConfiguration(is_client=False) configuration.load_cert_chain(SERVER_CERTFILE, SERVER_KEYFILE) self.server = await serve( host="::", + port=4433, - port="4433", configuration=configuration, stream_handler=handle_stream, **kwargs ) return self.server ===========changed ref 7=========== # module: tests.test_asyncio class HighLevelTest(TestCase): @patch("socket.socket.sendto", new_callable=lambda: sendto_with_loss) def test_connect_and_serve_with_packet_loss(self, mock_sendto): """ This test ensures handshake success and stream data is successfully sent and received in the presence of packet loss (randomized 25% in each direction). """ data = b"Z" * 65536 server_configuration = QuicConfiguration( is_client=False, quic_logger=QuicLogger() ) server_configuration.load_cert_chain(SERVER_CERTFILE, SERVER_KEYFILE) run(self.run_server(configuration=server_configuration)) response = run( self.run_client( - "127.0.0.1", configuration=QuicConfiguration( is_client=True, quic_logger=QuicLogger() ), request=data, ) ) self.assertEqual(response, data) ===========changed ref 8=========== # module: tests.test_asyncio class HighLevelTest(TestCase): def test_connect_and_serve_ec_certificate(self): certificate, private_key = generate_ec_certificate(common_name="localhost") run( self.run_server( configuration=QuicConfiguration( certificate=certificate, private_key=private_key, is_client=False, ) ) ) response = run( self.run_client( - "127.0.0.1", cadata=certificate.public_bytes(serialization.Encoding.PEM), cafile=None, ) ) self.assertEqual(response, b"gnip") ===========changed ref 9=========== # module: tests.test_asyncio class HighLevelTest(TestCase): def test_connect_and_serve_with_session_ticket(self): # start server client_ticket = None store = SessionTicketStore() def save_ticket(t): nonlocal client_ticket client_ticket = t run( self.run_server( session_ticket_fetcher=store.pop, session_ticket_handler=store.add ) ) # first request - response = run( + response = run(self.run_client(session_ticket_handler=save_ticket),) - self.run_client("127.0.0.1", session_ticket_handler=save_ticket), - ) self.assertEqual(response, b"gnip") self.assertIsNotNone(client_ticket) # second request run( self.run_client( - "127.0.0.1", configuration=QuicConfiguration( is_client=True, session_ticket=client_ticket ), ) ) self.assertEqual(response, b"gnip")
tests.test_asyncio/HighLevelTest.test_connect_and_serve_with_stateless_retry_bad_original_connection_id
Modified
aiortc~aioquic
f2c2c2eff4a05308338ceb34b29b8f3d6d4d7447
[tests] use IPv6 loopback address
<12>:<add> run(self.run_client()) <del> run(self.run_client("127.0.0.1"))
# module: tests.test_asyncio class HighLevelTest(TestCase): def test_connect_and_serve_with_stateless_retry_bad_original_connection_id(self): <0> """ <1> If the server's transport parameters do not have the correct <2> original_connection_id the connection fail. <3> """ <4> <5> def create_protocol(*args, **kwargs): <6> protocol = QuicConnectionProtocol(*args, **kwargs) <7> protocol._quic._original_connection_id = None <8> return protocol <9> <10> run(self.run_server(create_protocol=create_protocol, stateless_retry=True)) <11> with self.assertRaises(ConnectionError): <12> run(self.run_client("127.0.0.1")) <13>
===========unchanged ref 0=========== at: tests.test_asyncio.HighLevelTest run_client(self, host=None, port=None, cadata=None, cafile=SERVER_CACERTFILE, configuration=None, request=b"ping", **kwargs) run_client(host=None, port=None, cadata=None, cafile=SERVER_CACERTFILE, configuration=None, request=b"ping", **kwargs) at: tests.utils run(coro) at: unittest.case.TestCase assertEqual(first: Any, second: Any, msg: Any=...) -> None ===========changed ref 0=========== # module: tests.test_asyncio class HighLevelTest(TestCase): def run_client( self, + host=None, - host, + port=None, - port=4433, cadata=None, cafile=SERVER_CACERTFILE, configuration=None, request=b"ping", **kwargs ): + if host is None: + host = self.server_host + if port is None: + port = self.server_port if configuration is None: configuration = QuicConfiguration(is_client=True) configuration.load_verify_locations(cadata=cadata, cafile=cafile) async with connect(host, port, configuration=configuration, **kwargs) as client: # waiting for connected when connected returns immediately await client.wait_connected() reader, writer = await client.create_stream() self.assertEqual(writer.can_write_eof(), True) self.assertEqual(writer.get_extra_info("stream_id"), 0) writer.write(request) writer.write_eof() response = await reader.read() # waiting for closed when closed returns immediately await client.wait_closed() return response ===========changed ref 1=========== # module: tests.test_asyncio class HighLevelTest(TestCase): def test_connect_and_serve_with_stateless_retry(self): run(self.run_server()) + response = run(self.run_client()) - response = run(self.run_client("127.0.0.1")) self.assertEqual(response, b"gnip") ===========changed ref 2=========== # module: tests.test_asyncio class HighLevelTest(TestCase): def test_connect_and_serve_with_sni(self): run(self.run_server()) + response = run(self.run_client(host="localhost")) - response = run(self.run_client("localhost")) self.assertEqual(response, b"gnip") ===========changed ref 3=========== # module: tests.test_asyncio class HighLevelTest(TestCase): def test_connect_and_serve(self): run(self.run_server()) + response = run(self.run_client()) - response = run(self.run_client("127.0.0.1")) self.assertEqual(response, b"gnip") ===========changed ref 4=========== # module: tests.test_asyncio class HighLevelTest(TestCase): def test_connect_and_serve_large(self): """ Transfer enough data to require raising MAX_DATA and MAX_STREAM_DATA. """ data = b"Z" * 2097152 run(self.run_server()) + response = run(self.run_client(request=data)) - response = run(self.run_client("127.0.0.1", request=data)) self.assertEqual(response, data) ===========changed ref 5=========== # module: tests.test_asyncio class HighLevelTest(TestCase): def setUp(self): self.server = None + self.server_host = "::1" + self.server_port = 4433 ===========changed ref 6=========== # module: tests.test_asyncio class HighLevelTest(TestCase): def test_connect_and_serve_without_client_configuration(self): + async def run_client_without_config(): - async def run_client_without_config(host, port=4433): + async with connect(self.server_host, self.server_port) as client: - async with connect(host, port) as client: await client.ping() run(self.run_server()) with self.assertRaises(ConnectionError): + run(run_client_without_config()) - run(run_client_without_config("127.0.0.1")) ===========changed ref 7=========== # module: tests.test_asyncio class HighLevelTest(TestCase): def run_server(self, configuration=None, **kwargs): if configuration is None: configuration = QuicConfiguration(is_client=False) configuration.load_cert_chain(SERVER_CERTFILE, SERVER_KEYFILE) self.server = await serve( host="::", + port=4433, - port="4433", configuration=configuration, stream_handler=handle_stream, **kwargs ) return self.server ===========changed ref 8=========== # module: tests.test_asyncio class HighLevelTest(TestCase): @patch("socket.socket.sendto", new_callable=lambda: sendto_with_loss) def test_connect_and_serve_with_packet_loss(self, mock_sendto): """ This test ensures handshake success and stream data is successfully sent and received in the presence of packet loss (randomized 25% in each direction). """ data = b"Z" * 65536 server_configuration = QuicConfiguration( is_client=False, quic_logger=QuicLogger() ) server_configuration.load_cert_chain(SERVER_CERTFILE, SERVER_KEYFILE) run(self.run_server(configuration=server_configuration)) response = run( self.run_client( - "127.0.0.1", configuration=QuicConfiguration( is_client=True, quic_logger=QuicLogger() ), request=data, ) ) self.assertEqual(response, data) ===========changed ref 9=========== # module: tests.test_asyncio class HighLevelTest(TestCase): def test_connect_and_serve_ec_certificate(self): certificate, private_key = generate_ec_certificate(common_name="localhost") run( self.run_server( configuration=QuicConfiguration( certificate=certificate, private_key=private_key, is_client=False, ) ) ) response = run( self.run_client( - "127.0.0.1", cadata=certificate.public_bytes(serialization.Encoding.PEM), cafile=None, ) ) self.assertEqual(response, b"gnip") ===========changed ref 10=========== # module: tests.test_asyncio class HighLevelTest(TestCase): def test_connect_and_serve_with_session_ticket(self): # start server client_ticket = None store = SessionTicketStore() def save_ticket(t): nonlocal client_ticket client_ticket = t run( self.run_server( session_ticket_fetcher=store.pop, session_ticket_handler=store.add ) ) # first request - response = run( + response = run(self.run_client(session_ticket_handler=save_ticket),) - self.run_client("127.0.0.1", session_ticket_handler=save_ticket), - ) self.assertEqual(response, b"gnip") self.assertIsNotNone(client_ticket) # second request run( self.run_client( - "127.0.0.1", configuration=QuicConfiguration( is_client=True, session_ticket=client_ticket ), ) ) self.assertEqual(response, b"gnip")
tests.test_asyncio/HighLevelTest.test_connect_and_serve_with_stateless_retry_bad
Modified
aiortc~aioquic
f2c2c2eff4a05308338ceb34b29b8f3d6d4d7447
[tests] use IPv6 loopback address
<6>:<del> "127.0.0.1",
# module: tests.test_asyncio class HighLevelTest(TestCase): @patch("aioquic.quic.retry.QuicRetryTokenHandler.validate_token") def test_connect_and_serve_with_stateless_retry_bad(self, mock_validate): <0> mock_validate.side_effect = ValueError("Decryption failed.") <1> <2> run(self.run_server(stateless_retry=True)) <3> with self.assertRaises(ConnectionError): <4> run( <5> self.run_client( <6> "127.0.0.1", <7> configuration=QuicConfiguration(is_client=True, idle_timeout=4.0), <8> ) <9> ) <10>
===========unchanged ref 0=========== at: tests.test_asyncio.HighLevelTest run_client(self, host=None, port=None, cadata=None, cafile=SERVER_CACERTFILE, configuration=None, request=b"ping", **kwargs) run_client(host=None, port=None, cadata=None, cafile=SERVER_CACERTFILE, configuration=None, request=b"ping", **kwargs) run_server(self, configuration=None, **kwargs) run_server(configuration=None, **kwargs) at: tests.utils run(coro) at: unittest.case.TestCase assertRaises(expected_exception: Union[Type[_E], Tuple[Type[_E], ...]], msg: Any=...) -> _AssertRaisesContext[_E] assertRaises(expected_exception: Union[Type[BaseException], Tuple[Type[BaseException], ...]], callable: Callable[..., Any], *args: Any, **kwargs: Any) -> None at: unittest.mock _patcher(target: Any, new: _T, spec: Optional[Any]=..., create: bool=..., spec_set: Optional[Any]=..., autospec: Optional[Any]=..., new_callable: Optional[Any]=..., **kwargs: Any) -> _patch[_T] _patcher(target: Any, *, spec: Optional[Any]=..., create: bool=..., spec_set: Optional[Any]=..., autospec: Optional[Any]=..., new_callable: Optional[Any]=..., **kwargs: Any) -> _patch[Union[MagicMock, AsyncMock]] ===========changed ref 0=========== # module: tests.test_asyncio class HighLevelTest(TestCase): def run_server(self, configuration=None, **kwargs): if configuration is None: configuration = QuicConfiguration(is_client=False) configuration.load_cert_chain(SERVER_CERTFILE, SERVER_KEYFILE) self.server = await serve( host="::", + port=4433, - port="4433", configuration=configuration, stream_handler=handle_stream, **kwargs ) return self.server ===========changed ref 1=========== # module: tests.test_asyncio class HighLevelTest(TestCase): def run_client( self, + host=None, - host, + port=None, - port=4433, cadata=None, cafile=SERVER_CACERTFILE, configuration=None, request=b"ping", **kwargs ): + if host is None: + host = self.server_host + if port is None: + port = self.server_port if configuration is None: configuration = QuicConfiguration(is_client=True) configuration.load_verify_locations(cadata=cadata, cafile=cafile) async with connect(host, port, configuration=configuration, **kwargs) as client: # waiting for connected when connected returns immediately await client.wait_connected() reader, writer = await client.create_stream() self.assertEqual(writer.can_write_eof(), True) self.assertEqual(writer.get_extra_info("stream_id"), 0) writer.write(request) writer.write_eof() response = await reader.read() # waiting for closed when closed returns immediately await client.wait_closed() return response ===========changed ref 2=========== # module: tests.test_asyncio class HighLevelTest(TestCase): def test_connect_and_serve_with_stateless_retry(self): run(self.run_server()) + response = run(self.run_client()) - response = run(self.run_client("127.0.0.1")) self.assertEqual(response, b"gnip") ===========changed ref 3=========== # module: tests.test_asyncio class HighLevelTest(TestCase): def test_connect_and_serve_with_sni(self): run(self.run_server()) + response = run(self.run_client(host="localhost")) - response = run(self.run_client("localhost")) self.assertEqual(response, b"gnip") ===========changed ref 4=========== # module: tests.test_asyncio class HighLevelTest(TestCase): def test_connect_and_serve_with_stateless_retry_bad_original_connection_id(self): """ If the server's transport parameters do not have the correct original_connection_id the connection fail. """ def create_protocol(*args, **kwargs): protocol = QuicConnectionProtocol(*args, **kwargs) protocol._quic._original_connection_id = None return protocol run(self.run_server(create_protocol=create_protocol, stateless_retry=True)) with self.assertRaises(ConnectionError): + run(self.run_client()) - run(self.run_client("127.0.0.1")) ===========changed ref 5=========== # module: tests.test_asyncio class HighLevelTest(TestCase): def test_connect_and_serve(self): run(self.run_server()) + response = run(self.run_client()) - response = run(self.run_client("127.0.0.1")) self.assertEqual(response, b"gnip") ===========changed ref 6=========== # module: tests.test_asyncio class HighLevelTest(TestCase): def test_connect_and_serve_large(self): """ Transfer enough data to require raising MAX_DATA and MAX_STREAM_DATA. """ data = b"Z" * 2097152 run(self.run_server()) + response = run(self.run_client(request=data)) - response = run(self.run_client("127.0.0.1", request=data)) self.assertEqual(response, data) ===========changed ref 7=========== # module: tests.test_asyncio class HighLevelTest(TestCase): def setUp(self): self.server = None + self.server_host = "::1" + self.server_port = 4433 ===========changed ref 8=========== # module: tests.test_asyncio class HighLevelTest(TestCase): def test_connect_and_serve_without_client_configuration(self): + async def run_client_without_config(): - async def run_client_without_config(host, port=4433): + async with connect(self.server_host, self.server_port) as client: - async with connect(host, port) as client: await client.ping() run(self.run_server()) with self.assertRaises(ConnectionError): + run(run_client_without_config()) - run(run_client_without_config("127.0.0.1")) ===========changed ref 9=========== # module: tests.test_asyncio class HighLevelTest(TestCase): @patch("socket.socket.sendto", new_callable=lambda: sendto_with_loss) def test_connect_and_serve_with_packet_loss(self, mock_sendto): """ This test ensures handshake success and stream data is successfully sent and received in the presence of packet loss (randomized 25% in each direction). """ data = b"Z" * 65536 server_configuration = QuicConfiguration( is_client=False, quic_logger=QuicLogger() ) server_configuration.load_cert_chain(SERVER_CERTFILE, SERVER_KEYFILE) run(self.run_server(configuration=server_configuration)) response = run( self.run_client( - "127.0.0.1", configuration=QuicConfiguration( is_client=True, quic_logger=QuicLogger() ), request=data, ) ) self.assertEqual(response, data)
tests.test_asyncio/HighLevelTest.test_connect_and_serve_with_version_negotiation
Modified
aiortc~aioquic
f2c2c2eff4a05308338ceb34b29b8f3d6d4d7447
[tests] use IPv6 loopback address
<6>:<add> response = run(self.run_client(configuration=configuration)) <del> response = run(self.run_client("127.0.0.1", configuration=configuration))
# module: tests.test_asyncio class HighLevelTest(TestCase): def test_connect_and_serve_with_version_negotiation(self): <0> run(self.run_server()) <1> <2> # force version negotiation <3> configuration = QuicConfiguration(is_client=True, quic_logger=QuicLogger()) <4> configuration.supported_versions.insert(0, 0x1A2A3A4A) <5> <6> response = run(self.run_client("127.0.0.1", configuration=configuration)) <7> self.assertEqual(response, b"gnip") <8>
===========unchanged ref 0=========== at: tests.test_asyncio.HighLevelTest run_server(self, configuration=None, **kwargs) run_server(configuration=None, **kwargs) at: tests.utils run(coro) ===========changed ref 0=========== # module: tests.test_asyncio class HighLevelTest(TestCase): def run_server(self, configuration=None, **kwargs): if configuration is None: configuration = QuicConfiguration(is_client=False) configuration.load_cert_chain(SERVER_CERTFILE, SERVER_KEYFILE) self.server = await serve( host="::", + port=4433, - port="4433", configuration=configuration, stream_handler=handle_stream, **kwargs ) return self.server ===========changed ref 1=========== # module: tests.test_asyncio class HighLevelTest(TestCase): def test_connect_and_serve_with_stateless_retry(self): run(self.run_server()) + response = run(self.run_client()) - response = run(self.run_client("127.0.0.1")) self.assertEqual(response, b"gnip") ===========changed ref 2=========== # module: tests.test_asyncio class HighLevelTest(TestCase): def test_connect_and_serve_with_sni(self): run(self.run_server()) + response = run(self.run_client(host="localhost")) - response = run(self.run_client("localhost")) self.assertEqual(response, b"gnip") ===========changed ref 3=========== # module: tests.test_asyncio class HighLevelTest(TestCase): @patch("aioquic.quic.retry.QuicRetryTokenHandler.validate_token") def test_connect_and_serve_with_stateless_retry_bad(self, mock_validate): mock_validate.side_effect = ValueError("Decryption failed.") run(self.run_server(stateless_retry=True)) with self.assertRaises(ConnectionError): run( self.run_client( - "127.0.0.1", configuration=QuicConfiguration(is_client=True, idle_timeout=4.0), ) ) ===========changed ref 4=========== # module: tests.test_asyncio class HighLevelTest(TestCase): def test_connect_and_serve_with_stateless_retry_bad_original_connection_id(self): """ If the server's transport parameters do not have the correct original_connection_id the connection fail. """ def create_protocol(*args, **kwargs): protocol = QuicConnectionProtocol(*args, **kwargs) protocol._quic._original_connection_id = None return protocol run(self.run_server(create_protocol=create_protocol, stateless_retry=True)) with self.assertRaises(ConnectionError): + run(self.run_client()) - run(self.run_client("127.0.0.1")) ===========changed ref 5=========== # module: tests.test_asyncio class HighLevelTest(TestCase): def test_connect_and_serve(self): run(self.run_server()) + response = run(self.run_client()) - response = run(self.run_client("127.0.0.1")) self.assertEqual(response, b"gnip") ===========changed ref 6=========== # module: tests.test_asyncio class HighLevelTest(TestCase): def test_connect_and_serve_large(self): """ Transfer enough data to require raising MAX_DATA and MAX_STREAM_DATA. """ data = b"Z" * 2097152 run(self.run_server()) + response = run(self.run_client(request=data)) - response = run(self.run_client("127.0.0.1", request=data)) self.assertEqual(response, data) ===========changed ref 7=========== # module: tests.test_asyncio class HighLevelTest(TestCase): def setUp(self): self.server = None + self.server_host = "::1" + self.server_port = 4433 ===========changed ref 8=========== # module: tests.test_asyncio class HighLevelTest(TestCase): def test_connect_and_serve_without_client_configuration(self): + async def run_client_without_config(): - async def run_client_without_config(host, port=4433): + async with connect(self.server_host, self.server_port) as client: - async with connect(host, port) as client: await client.ping() run(self.run_server()) with self.assertRaises(ConnectionError): + run(run_client_without_config()) - run(run_client_without_config("127.0.0.1")) ===========changed ref 9=========== # module: tests.test_asyncio class HighLevelTest(TestCase): @patch("socket.socket.sendto", new_callable=lambda: sendto_with_loss) def test_connect_and_serve_with_packet_loss(self, mock_sendto): """ This test ensures handshake success and stream data is successfully sent and received in the presence of packet loss (randomized 25% in each direction). """ data = b"Z" * 65536 server_configuration = QuicConfiguration( is_client=False, quic_logger=QuicLogger() ) server_configuration.load_cert_chain(SERVER_CERTFILE, SERVER_KEYFILE) run(self.run_server(configuration=server_configuration)) response = run( self.run_client( - "127.0.0.1", configuration=QuicConfiguration( is_client=True, quic_logger=QuicLogger() ), request=data, ) ) self.assertEqual(response, data) ===========changed ref 10=========== # module: tests.test_asyncio class HighLevelTest(TestCase): def test_connect_and_serve_ec_certificate(self): certificate, private_key = generate_ec_certificate(common_name="localhost") run( self.run_server( configuration=QuicConfiguration( certificate=certificate, private_key=private_key, is_client=False, ) ) ) response = run( self.run_client( - "127.0.0.1", cadata=certificate.public_bytes(serialization.Encoding.PEM), cafile=None, ) ) self.assertEqual(response, b"gnip") ===========changed ref 11=========== # module: tests.test_asyncio class HighLevelTest(TestCase): def test_connect_and_serve_with_session_ticket(self): # start server client_ticket = None store = SessionTicketStore() def save_ticket(t): nonlocal client_ticket client_ticket = t run( self.run_server( session_ticket_fetcher=store.pop, session_ticket_handler=store.add ) ) # first request - response = run( + response = run(self.run_client(session_ticket_handler=save_ticket),) - self.run_client("127.0.0.1", session_ticket_handler=save_ticket), - ) self.assertEqual(response, b"gnip") self.assertIsNotNone(client_ticket) # second request run( self.run_client( - "127.0.0.1", configuration=QuicConfiguration( is_client=True, session_ticket=client_ticket ), ) ) self.assertEqual(response, b"gnip")
tests.test_asyncio/HighLevelTest.test_connect_timeout
Modified
aiortc~aioquic
f2c2c2eff4a05308338ceb34b29b8f3d6d4d7447
[tests] use IPv6 loopback address
<3>:<del> "127.0.0.1",
# module: tests.test_asyncio class HighLevelTest(TestCase): def test_connect_timeout(self): <0> with self.assertRaises(ConnectionError): <1> run( <2> self.run_client( <3> "127.0.0.1", <4> port=4400, <5> configuration=QuicConfiguration(is_client=True, idle_timeout=5), <6> ) <7> ) <8>
===========unchanged ref 0=========== at: tests.test_asyncio.HighLevelTest run_client(self, host=None, port=None, cadata=None, cafile=SERVER_CACERTFILE, configuration=None, request=b"ping", **kwargs) run_client(host=None, port=None, cadata=None, cafile=SERVER_CACERTFILE, configuration=None, request=b"ping", **kwargs) at: tests.test_asyncio.HighLevelTest.test_connect_and_serve_with_version_negotiation response = run(self.run_client(configuration=configuration)) at: tests.utils run(coro) at: unittest.case.TestCase assertEqual(first: Any, second: Any, msg: Any=...) -> None assertRaises(expected_exception: Union[Type[_E], Tuple[Type[_E], ...]], msg: Any=...) -> _AssertRaisesContext[_E] assertRaises(expected_exception: Union[Type[BaseException], Tuple[Type[BaseException], ...]], callable: Callable[..., Any], *args: Any, **kwargs: Any) -> None ===========changed ref 0=========== # module: tests.test_asyncio class HighLevelTest(TestCase): def run_client( self, + host=None, - host, + port=None, - port=4433, cadata=None, cafile=SERVER_CACERTFILE, configuration=None, request=b"ping", **kwargs ): + if host is None: + host = self.server_host + if port is None: + port = self.server_port if configuration is None: configuration = QuicConfiguration(is_client=True) configuration.load_verify_locations(cadata=cadata, cafile=cafile) async with connect(host, port, configuration=configuration, **kwargs) as client: # waiting for connected when connected returns immediately await client.wait_connected() reader, writer = await client.create_stream() self.assertEqual(writer.can_write_eof(), True) self.assertEqual(writer.get_extra_info("stream_id"), 0) writer.write(request) writer.write_eof() response = await reader.read() # waiting for closed when closed returns immediately await client.wait_closed() return response ===========changed ref 1=========== # module: tests.test_asyncio class HighLevelTest(TestCase): def test_connect_and_serve_with_stateless_retry(self): run(self.run_server()) + response = run(self.run_client()) - response = run(self.run_client("127.0.0.1")) self.assertEqual(response, b"gnip") ===========changed ref 2=========== # module: tests.test_asyncio class HighLevelTest(TestCase): def test_connect_and_serve_with_sni(self): run(self.run_server()) + response = run(self.run_client(host="localhost")) - response = run(self.run_client("localhost")) self.assertEqual(response, b"gnip") ===========changed ref 3=========== # module: tests.test_asyncio class HighLevelTest(TestCase): @patch("aioquic.quic.retry.QuicRetryTokenHandler.validate_token") def test_connect_and_serve_with_stateless_retry_bad(self, mock_validate): mock_validate.side_effect = ValueError("Decryption failed.") run(self.run_server(stateless_retry=True)) with self.assertRaises(ConnectionError): run( self.run_client( - "127.0.0.1", configuration=QuicConfiguration(is_client=True, idle_timeout=4.0), ) ) ===========changed ref 4=========== # module: tests.test_asyncio class HighLevelTest(TestCase): def test_connect_and_serve_with_version_negotiation(self): run(self.run_server()) # force version negotiation configuration = QuicConfiguration(is_client=True, quic_logger=QuicLogger()) configuration.supported_versions.insert(0, 0x1A2A3A4A) + response = run(self.run_client(configuration=configuration)) - response = run(self.run_client("127.0.0.1", configuration=configuration)) self.assertEqual(response, b"gnip") ===========changed ref 5=========== # module: tests.test_asyncio class HighLevelTest(TestCase): def test_connect_and_serve_with_stateless_retry_bad_original_connection_id(self): """ If the server's transport parameters do not have the correct original_connection_id the connection fail. """ def create_protocol(*args, **kwargs): protocol = QuicConnectionProtocol(*args, **kwargs) protocol._quic._original_connection_id = None return protocol run(self.run_server(create_protocol=create_protocol, stateless_retry=True)) with self.assertRaises(ConnectionError): + run(self.run_client()) - run(self.run_client("127.0.0.1")) ===========changed ref 6=========== # module: tests.test_asyncio class HighLevelTest(TestCase): def test_connect_and_serve(self): run(self.run_server()) + response = run(self.run_client()) - response = run(self.run_client("127.0.0.1")) self.assertEqual(response, b"gnip") ===========changed ref 7=========== # module: tests.test_asyncio class HighLevelTest(TestCase): def test_connect_and_serve_large(self): """ Transfer enough data to require raising MAX_DATA and MAX_STREAM_DATA. """ data = b"Z" * 2097152 run(self.run_server()) + response = run(self.run_client(request=data)) - response = run(self.run_client("127.0.0.1", request=data)) self.assertEqual(response, data) ===========changed ref 8=========== # module: tests.test_asyncio class HighLevelTest(TestCase): def setUp(self): self.server = None + self.server_host = "::1" + self.server_port = 4433 ===========changed ref 9=========== # module: tests.test_asyncio class HighLevelTest(TestCase): def test_connect_and_serve_without_client_configuration(self): + async def run_client_without_config(): - async def run_client_without_config(host, port=4433): + async with connect(self.server_host, self.server_port) as client: - async with connect(host, port) as client: await client.ping() run(self.run_server()) with self.assertRaises(ConnectionError): + run(run_client_without_config()) - run(run_client_without_config("127.0.0.1")) ===========changed ref 10=========== # module: tests.test_asyncio class HighLevelTest(TestCase): def run_server(self, configuration=None, **kwargs): if configuration is None: configuration = QuicConfiguration(is_client=False) configuration.load_cert_chain(SERVER_CERTFILE, SERVER_KEYFILE) self.server = await serve( host="::", + port=4433, - port="4433", configuration=configuration, stream_handler=handle_stream, **kwargs ) return self.server
tests.test_asyncio/HighLevelTest.test_connect_timeout_no_wait_connected
Modified
aiortc~aioquic
f2c2c2eff4a05308338ceb34b29b8f3d6d4d7447
[tests] use IPv6 loopback address
<0>:<add> async def run_client_no_wait_connected(configuration): <del> async def run_client_no_wait_connected(host, port, configuration): <3>:<add> self.server_host, <add> 4400, <add> configuration=configuration, <add> wait_connected=False, <del> host, port, configuration=configuration, wait_connected=False <10>:<del> "127.0.0.1", <11>:<del> port=4400,
# module: tests.test_asyncio class HighLevelTest(TestCase): def test_connect_timeout_no_wait_connected(self): <0> async def run_client_no_wait_connected(host, port, configuration): <1> configuration.load_verify_locations(cafile=SERVER_CACERTFILE) <2> async with connect( <3> host, port, configuration=configuration, wait_connected=False <4> ) as client: <5> await client.ping() <6> <7> with self.assertRaises(ConnectionError): <8> run( <9> run_client_no_wait_connected( <10> "127.0.0.1", <11> port=4400, <12> configuration=QuicConfiguration(is_client=True, idle_timeout=5), <13> ) <14> ) <15>
===========unchanged ref 0=========== at: tests.test_asyncio.HighLevelTest.setUp self.server_host = "::1" at: tests.utils run(coro) SERVER_CACERTFILE = os.path.join(os.path.dirname(__file__), "pycacert.pem") at: unittest.case.TestCase assertRaises(expected_exception: Union[Type[_E], Tuple[Type[_E], ...]], msg: Any=...) -> _AssertRaisesContext[_E] assertRaises(expected_exception: Union[Type[BaseException], Tuple[Type[BaseException], ...]], callable: Callable[..., Any], *args: Any, **kwargs: Any) -> None ===========changed ref 0=========== # module: tests.test_asyncio class HighLevelTest(TestCase): def test_connect_timeout(self): with self.assertRaises(ConnectionError): run( self.run_client( - "127.0.0.1", port=4400, configuration=QuicConfiguration(is_client=True, idle_timeout=5), ) ) ===========changed ref 1=========== # module: tests.test_asyncio class HighLevelTest(TestCase): def test_connect_and_serve_with_stateless_retry(self): run(self.run_server()) + response = run(self.run_client()) - response = run(self.run_client("127.0.0.1")) self.assertEqual(response, b"gnip") ===========changed ref 2=========== # module: tests.test_asyncio class HighLevelTest(TestCase): def test_connect_and_serve_with_sni(self): run(self.run_server()) + response = run(self.run_client(host="localhost")) - response = run(self.run_client("localhost")) self.assertEqual(response, b"gnip") ===========changed ref 3=========== # module: tests.test_asyncio class HighLevelTest(TestCase): @patch("aioquic.quic.retry.QuicRetryTokenHandler.validate_token") def test_connect_and_serve_with_stateless_retry_bad(self, mock_validate): mock_validate.side_effect = ValueError("Decryption failed.") run(self.run_server(stateless_retry=True)) with self.assertRaises(ConnectionError): run( self.run_client( - "127.0.0.1", configuration=QuicConfiguration(is_client=True, idle_timeout=4.0), ) ) ===========changed ref 4=========== # module: tests.test_asyncio class HighLevelTest(TestCase): def test_connect_and_serve_with_version_negotiation(self): run(self.run_server()) # force version negotiation configuration = QuicConfiguration(is_client=True, quic_logger=QuicLogger()) configuration.supported_versions.insert(0, 0x1A2A3A4A) + response = run(self.run_client(configuration=configuration)) - response = run(self.run_client("127.0.0.1", configuration=configuration)) self.assertEqual(response, b"gnip") ===========changed ref 5=========== # module: tests.test_asyncio class HighLevelTest(TestCase): def test_connect_and_serve_with_stateless_retry_bad_original_connection_id(self): """ If the server's transport parameters do not have the correct original_connection_id the connection fail. """ def create_protocol(*args, **kwargs): protocol = QuicConnectionProtocol(*args, **kwargs) protocol._quic._original_connection_id = None return protocol run(self.run_server(create_protocol=create_protocol, stateless_retry=True)) with self.assertRaises(ConnectionError): + run(self.run_client()) - run(self.run_client("127.0.0.1")) ===========changed ref 6=========== # module: tests.test_asyncio class HighLevelTest(TestCase): def test_connect_and_serve(self): run(self.run_server()) + response = run(self.run_client()) - response = run(self.run_client("127.0.0.1")) self.assertEqual(response, b"gnip") ===========changed ref 7=========== # module: tests.test_asyncio class HighLevelTest(TestCase): def test_connect_and_serve_large(self): """ Transfer enough data to require raising MAX_DATA and MAX_STREAM_DATA. """ data = b"Z" * 2097152 run(self.run_server()) + response = run(self.run_client(request=data)) - response = run(self.run_client("127.0.0.1", request=data)) self.assertEqual(response, data) ===========changed ref 8=========== # module: tests.test_asyncio class HighLevelTest(TestCase): def setUp(self): self.server = None + self.server_host = "::1" + self.server_port = 4433 ===========changed ref 9=========== # module: tests.test_asyncio class HighLevelTest(TestCase): def test_connect_and_serve_without_client_configuration(self): + async def run_client_without_config(): - async def run_client_without_config(host, port=4433): + async with connect(self.server_host, self.server_port) as client: - async with connect(host, port) as client: await client.ping() run(self.run_server()) with self.assertRaises(ConnectionError): + run(run_client_without_config()) - run(run_client_without_config("127.0.0.1")) ===========changed ref 10=========== # module: tests.test_asyncio class HighLevelTest(TestCase): def run_server(self, configuration=None, **kwargs): if configuration is None: configuration = QuicConfiguration(is_client=False) configuration.load_cert_chain(SERVER_CERTFILE, SERVER_KEYFILE) self.server = await serve( host="::", + port=4433, - port="4433", configuration=configuration, stream_handler=handle_stream, **kwargs ) return self.server ===========changed ref 11=========== # module: tests.test_asyncio class HighLevelTest(TestCase): @patch("socket.socket.sendto", new_callable=lambda: sendto_with_loss) def test_connect_and_serve_with_packet_loss(self, mock_sendto): """ This test ensures handshake success and stream data is successfully sent and received in the presence of packet loss (randomized 25% in each direction). """ data = b"Z" * 65536 server_configuration = QuicConfiguration( is_client=False, quic_logger=QuicLogger() ) server_configuration.load_cert_chain(SERVER_CERTFILE, SERVER_KEYFILE) run(self.run_server(configuration=server_configuration)) response = run( self.run_client( - "127.0.0.1", configuration=QuicConfiguration( is_client=True, quic_logger=QuicLogger() ), request=data, ) ) self.assertEqual(response, data)
tests.test_asyncio/HighLevelTest.test_change_connection_id
Modified
aiortc~aioquic
f2c2c2eff4a05308338ceb34b29b8f3d6d4d7447
[tests] use IPv6 loopback address
<0>:<add> async def run_client_change_connection_id(): <del> async def run_client_change_connection_id(host, port=4433): <3>:<add> async with connect( <add> self.server_host, self.server_port, configuration=configuration <add> ) as client: <del> async with connect(host, port, configuration=configuration) as client: <9>:<add> run(run_client_change_connection_id()) <del> run(run_client_change_connection_id("127.0.0.1"))
# module: tests.test_asyncio class HighLevelTest(TestCase): def test_change_connection_id(self): <0> async def run_client_change_connection_id(host, port=4433): <1> configuration = QuicConfiguration(is_client=True) <2> configuration.load_verify_locations(cafile=SERVER_CACERTFILE) <3> async with connect(host, port, configuration=configuration) as client: <4> await client.ping() <5> client.change_connection_id() <6> await client.ping() <7> <8> run(self.run_server()) <9> run(run_client_change_connection_id("127.0.0.1")) <10>
===========unchanged ref 0=========== at: tests.test_asyncio.HighLevelTest.setUp self.server_host = "::1" self.server_port = 4433 at: tests.utils SERVER_CACERTFILE = os.path.join(os.path.dirname(__file__), "pycacert.pem") ===========changed ref 0=========== # module: tests.test_asyncio class HighLevelTest(TestCase): def test_connect_timeout(self): with self.assertRaises(ConnectionError): run( self.run_client( - "127.0.0.1", port=4400, configuration=QuicConfiguration(is_client=True, idle_timeout=5), ) ) ===========changed ref 1=========== # module: tests.test_asyncio class HighLevelTest(TestCase): def test_connect_and_serve_with_stateless_retry(self): run(self.run_server()) + response = run(self.run_client()) - response = run(self.run_client("127.0.0.1")) self.assertEqual(response, b"gnip") ===========changed ref 2=========== # module: tests.test_asyncio class HighLevelTest(TestCase): def test_connect_and_serve_with_sni(self): run(self.run_server()) + response = run(self.run_client(host="localhost")) - response = run(self.run_client("localhost")) self.assertEqual(response, b"gnip") ===========changed ref 3=========== # module: tests.test_asyncio class HighLevelTest(TestCase): @patch("aioquic.quic.retry.QuicRetryTokenHandler.validate_token") def test_connect_and_serve_with_stateless_retry_bad(self, mock_validate): mock_validate.side_effect = ValueError("Decryption failed.") run(self.run_server(stateless_retry=True)) with self.assertRaises(ConnectionError): run( self.run_client( - "127.0.0.1", configuration=QuicConfiguration(is_client=True, idle_timeout=4.0), ) ) ===========changed ref 4=========== # module: tests.test_asyncio class HighLevelTest(TestCase): def test_connect_and_serve_with_version_negotiation(self): run(self.run_server()) # force version negotiation configuration = QuicConfiguration(is_client=True, quic_logger=QuicLogger()) configuration.supported_versions.insert(0, 0x1A2A3A4A) + response = run(self.run_client(configuration=configuration)) - response = run(self.run_client("127.0.0.1", configuration=configuration)) self.assertEqual(response, b"gnip") ===========changed ref 5=========== # module: tests.test_asyncio class HighLevelTest(TestCase): def test_connect_timeout_no_wait_connected(self): + async def run_client_no_wait_connected(configuration): - async def run_client_no_wait_connected(host, port, configuration): configuration.load_verify_locations(cafile=SERVER_CACERTFILE) async with connect( + self.server_host, + 4400, + configuration=configuration, + wait_connected=False, - host, port, configuration=configuration, wait_connected=False ) as client: await client.ping() with self.assertRaises(ConnectionError): run( run_client_no_wait_connected( - "127.0.0.1", - port=4400, configuration=QuicConfiguration(is_client=True, idle_timeout=5), ) ) ===========changed ref 6=========== # module: tests.test_asyncio class HighLevelTest(TestCase): def test_connect_and_serve_with_stateless_retry_bad_original_connection_id(self): """ If the server's transport parameters do not have the correct original_connection_id the connection fail. """ def create_protocol(*args, **kwargs): protocol = QuicConnectionProtocol(*args, **kwargs) protocol._quic._original_connection_id = None return protocol run(self.run_server(create_protocol=create_protocol, stateless_retry=True)) with self.assertRaises(ConnectionError): + run(self.run_client()) - run(self.run_client("127.0.0.1")) ===========changed ref 7=========== # module: tests.test_asyncio class HighLevelTest(TestCase): def test_connect_and_serve(self): run(self.run_server()) + response = run(self.run_client()) - response = run(self.run_client("127.0.0.1")) self.assertEqual(response, b"gnip") ===========changed ref 8=========== # module: tests.test_asyncio class HighLevelTest(TestCase): def test_connect_and_serve_large(self): """ Transfer enough data to require raising MAX_DATA and MAX_STREAM_DATA. """ data = b"Z" * 2097152 run(self.run_server()) + response = run(self.run_client(request=data)) - response = run(self.run_client("127.0.0.1", request=data)) self.assertEqual(response, data) ===========changed ref 9=========== # module: tests.test_asyncio class HighLevelTest(TestCase): def setUp(self): self.server = None + self.server_host = "::1" + self.server_port = 4433 ===========changed ref 10=========== # module: tests.test_asyncio class HighLevelTest(TestCase): def test_connect_and_serve_without_client_configuration(self): + async def run_client_without_config(): - async def run_client_without_config(host, port=4433): + async with connect(self.server_host, self.server_port) as client: - async with connect(host, port) as client: await client.ping() run(self.run_server()) with self.assertRaises(ConnectionError): + run(run_client_without_config()) - run(run_client_without_config("127.0.0.1")) ===========changed ref 11=========== # module: tests.test_asyncio class HighLevelTest(TestCase): def run_server(self, configuration=None, **kwargs): if configuration is None: configuration = QuicConfiguration(is_client=False) configuration.load_cert_chain(SERVER_CERTFILE, SERVER_KEYFILE) self.server = await serve( host="::", + port=4433, - port="4433", configuration=configuration, stream_handler=handle_stream, **kwargs ) return self.server ===========changed ref 12=========== # module: tests.test_asyncio class HighLevelTest(TestCase): @patch("socket.socket.sendto", new_callable=lambda: sendto_with_loss) def test_connect_and_serve_with_packet_loss(self, mock_sendto): """ This test ensures handshake success and stream data is successfully sent and received in the presence of packet loss (randomized 25% in each direction). """ data = b"Z" * 65536 server_configuration = QuicConfiguration( is_client=False, quic_logger=QuicLogger() ) server_configuration.load_cert_chain(SERVER_CERTFILE, SERVER_KEYFILE) run(self.run_server(configuration=server_configuration)) response = run( self.run_client( - "127.0.0.1", configuration=QuicConfiguration( is_client=True, quic_logger=QuicLogger() ), request=data, ) ) self.assertEqual(response, data)
tests.test_asyncio/HighLevelTest.test_key_update
Modified
aiortc~aioquic
f2c2c2eff4a05308338ceb34b29b8f3d6d4d7447
[tests] use IPv6 loopback address
<0>:<add> async def run_client_key_update(): <del> async def run_client_key_update(host, port=4433): <3>:<add> async with connect( <add> self.server_host, self.server_port, configuration=configuration <add> ) as client: <del> async with connect(host, port, configuration=configuration) as client: <9>:<add> run(run_client_key_update()) <del> run(run_client_key_update("127.0.0.1"))
# module: tests.test_asyncio class HighLevelTest(TestCase): def test_key_update(self): <0> async def run_client_key_update(host, port=4433): <1> configuration = QuicConfiguration(is_client=True) <2> configuration.load_verify_locations(cafile=SERVER_CACERTFILE) <3> async with connect(host, port, configuration=configuration) as client: <4> await client.ping() <5> client.request_key_update() <6> await client.ping() <7> <8> run(self.run_server()) <9> run(run_client_key_update("127.0.0.1")) <10>
===========unchanged ref 0=========== at: tests.test_asyncio.HighLevelTest run_server(self, configuration=None, **kwargs) run_server(configuration=None, **kwargs) at: tests.test_asyncio.HighLevelTest.setUp self.server_host = "::1" self.server_port = 4433 at: tests.test_asyncio.HighLevelTest.test_change_connection_id run_client_change_connection_id() at: tests.utils run(coro) SERVER_CACERTFILE = os.path.join(os.path.dirname(__file__), "pycacert.pem") ===========changed ref 0=========== # module: tests.test_asyncio class HighLevelTest(TestCase): def run_server(self, configuration=None, **kwargs): if configuration is None: configuration = QuicConfiguration(is_client=False) configuration.load_cert_chain(SERVER_CERTFILE, SERVER_KEYFILE) self.server = await serve( host="::", + port=4433, - port="4433", configuration=configuration, stream_handler=handle_stream, **kwargs ) return self.server ===========changed ref 1=========== # module: tests.test_asyncio class HighLevelTest(TestCase): def test_connect_timeout(self): with self.assertRaises(ConnectionError): run( self.run_client( - "127.0.0.1", port=4400, configuration=QuicConfiguration(is_client=True, idle_timeout=5), ) ) ===========changed ref 2=========== # module: tests.test_asyncio class HighLevelTest(TestCase): def test_connect_and_serve_with_stateless_retry(self): run(self.run_server()) + response = run(self.run_client()) - response = run(self.run_client("127.0.0.1")) self.assertEqual(response, b"gnip") ===========changed ref 3=========== # module: tests.test_asyncio class HighLevelTest(TestCase): def test_connect_and_serve_with_sni(self): run(self.run_server()) + response = run(self.run_client(host="localhost")) - response = run(self.run_client("localhost")) self.assertEqual(response, b"gnip") ===========changed ref 4=========== # module: tests.test_asyncio class HighLevelTest(TestCase): @patch("aioquic.quic.retry.QuicRetryTokenHandler.validate_token") def test_connect_and_serve_with_stateless_retry_bad(self, mock_validate): mock_validate.side_effect = ValueError("Decryption failed.") run(self.run_server(stateless_retry=True)) with self.assertRaises(ConnectionError): run( self.run_client( - "127.0.0.1", configuration=QuicConfiguration(is_client=True, idle_timeout=4.0), ) ) ===========changed ref 5=========== # module: tests.test_asyncio class HighLevelTest(TestCase): def test_connect_and_serve_with_version_negotiation(self): run(self.run_server()) # force version negotiation configuration = QuicConfiguration(is_client=True, quic_logger=QuicLogger()) configuration.supported_versions.insert(0, 0x1A2A3A4A) + response = run(self.run_client(configuration=configuration)) - response = run(self.run_client("127.0.0.1", configuration=configuration)) self.assertEqual(response, b"gnip") ===========changed ref 6=========== # module: tests.test_asyncio class HighLevelTest(TestCase): def test_change_connection_id(self): + async def run_client_change_connection_id(): - async def run_client_change_connection_id(host, port=4433): configuration = QuicConfiguration(is_client=True) configuration.load_verify_locations(cafile=SERVER_CACERTFILE) + async with connect( + self.server_host, self.server_port, configuration=configuration + ) as client: - async with connect(host, port, configuration=configuration) as client: await client.ping() client.change_connection_id() await client.ping() run(self.run_server()) + run(run_client_change_connection_id()) - run(run_client_change_connection_id("127.0.0.1")) ===========changed ref 7=========== # module: tests.test_asyncio class HighLevelTest(TestCase): def test_connect_timeout_no_wait_connected(self): + async def run_client_no_wait_connected(configuration): - async def run_client_no_wait_connected(host, port, configuration): configuration.load_verify_locations(cafile=SERVER_CACERTFILE) async with connect( + self.server_host, + 4400, + configuration=configuration, + wait_connected=False, - host, port, configuration=configuration, wait_connected=False ) as client: await client.ping() with self.assertRaises(ConnectionError): run( run_client_no_wait_connected( - "127.0.0.1", - port=4400, configuration=QuicConfiguration(is_client=True, idle_timeout=5), ) ) ===========changed ref 8=========== # module: tests.test_asyncio class HighLevelTest(TestCase): def test_connect_and_serve_with_stateless_retry_bad_original_connection_id(self): """ If the server's transport parameters do not have the correct original_connection_id the connection fail. """ def create_protocol(*args, **kwargs): protocol = QuicConnectionProtocol(*args, **kwargs) protocol._quic._original_connection_id = None return protocol run(self.run_server(create_protocol=create_protocol, stateless_retry=True)) with self.assertRaises(ConnectionError): + run(self.run_client()) - run(self.run_client("127.0.0.1")) ===========changed ref 9=========== # module: tests.test_asyncio class HighLevelTest(TestCase): def test_connect_and_serve(self): run(self.run_server()) + response = run(self.run_client()) - response = run(self.run_client("127.0.0.1")) self.assertEqual(response, b"gnip") ===========changed ref 10=========== # module: tests.test_asyncio class HighLevelTest(TestCase): def test_connect_and_serve_large(self): """ Transfer enough data to require raising MAX_DATA and MAX_STREAM_DATA. """ data = b"Z" * 2097152 run(self.run_server()) + response = run(self.run_client(request=data)) - response = run(self.run_client("127.0.0.1", request=data)) self.assertEqual(response, data) ===========changed ref 11=========== # module: tests.test_asyncio class HighLevelTest(TestCase): def setUp(self): self.server = None + self.server_host = "::1" + self.server_port = 4433
tests.test_asyncio/HighLevelTest.test_ping
Modified
aiortc~aioquic
f2c2c2eff4a05308338ceb34b29b8f3d6d4d7447
[tests] use IPv6 loopback address
<0>:<add> async def run_client_ping(): <del> async def run_client_ping(host, port=4433): <3>:<add> async with connect( <add> self.server_host, self.server_port, configuration=configuration <add> ) as client: <del> async with connect(host, port, configuration=configuration) as client: <8>:<add> run(run_client_ping()) <del> run(run_client_ping("127.0.0.1"))
# module: tests.test_asyncio class HighLevelTest(TestCase): def test_ping(self): <0> async def run_client_ping(host, port=4433): <1> configuration = QuicConfiguration(is_client=True) <2> configuration.load_verify_locations(cafile=SERVER_CACERTFILE) <3> async with connect(host, port, configuration=configuration) as client: <4> await client.ping() <5> await client.ping() <6> <7> run(self.run_server()) <8> run(run_client_ping("127.0.0.1")) <9>
===========unchanged ref 0=========== at: tests.test_asyncio.HighLevelTest run_server(self, configuration=None, **kwargs) run_server(configuration=None, **kwargs) at: tests.test_asyncio.HighLevelTest.test_key_update run_client_key_update() at: tests.utils run(coro) SERVER_CACERTFILE = os.path.join(os.path.dirname(__file__), "pycacert.pem") ===========changed ref 0=========== # module: tests.test_asyncio class HighLevelTest(TestCase): def run_server(self, configuration=None, **kwargs): if configuration is None: configuration = QuicConfiguration(is_client=False) configuration.load_cert_chain(SERVER_CERTFILE, SERVER_KEYFILE) self.server = await serve( host="::", + port=4433, - port="4433", configuration=configuration, stream_handler=handle_stream, **kwargs ) return self.server ===========changed ref 1=========== # module: tests.test_asyncio class HighLevelTest(TestCase): def test_connect_timeout(self): with self.assertRaises(ConnectionError): run( self.run_client( - "127.0.0.1", port=4400, configuration=QuicConfiguration(is_client=True, idle_timeout=5), ) ) ===========changed ref 2=========== # module: tests.test_asyncio class HighLevelTest(TestCase): def test_connect_and_serve_with_stateless_retry(self): run(self.run_server()) + response = run(self.run_client()) - response = run(self.run_client("127.0.0.1")) self.assertEqual(response, b"gnip") ===========changed ref 3=========== # module: tests.test_asyncio class HighLevelTest(TestCase): def test_connect_and_serve_with_sni(self): run(self.run_server()) + response = run(self.run_client(host="localhost")) - response = run(self.run_client("localhost")) self.assertEqual(response, b"gnip") ===========changed ref 4=========== # module: tests.test_asyncio class HighLevelTest(TestCase): @patch("aioquic.quic.retry.QuicRetryTokenHandler.validate_token") def test_connect_and_serve_with_stateless_retry_bad(self, mock_validate): mock_validate.side_effect = ValueError("Decryption failed.") run(self.run_server(stateless_retry=True)) with self.assertRaises(ConnectionError): run( self.run_client( - "127.0.0.1", configuration=QuicConfiguration(is_client=True, idle_timeout=4.0), ) ) ===========changed ref 5=========== # module: tests.test_asyncio class HighLevelTest(TestCase): def test_connect_and_serve_with_version_negotiation(self): run(self.run_server()) # force version negotiation configuration = QuicConfiguration(is_client=True, quic_logger=QuicLogger()) configuration.supported_versions.insert(0, 0x1A2A3A4A) + response = run(self.run_client(configuration=configuration)) - response = run(self.run_client("127.0.0.1", configuration=configuration)) self.assertEqual(response, b"gnip") ===========changed ref 6=========== # module: tests.test_asyncio class HighLevelTest(TestCase): def test_key_update(self): + async def run_client_key_update(): - async def run_client_key_update(host, port=4433): configuration = QuicConfiguration(is_client=True) configuration.load_verify_locations(cafile=SERVER_CACERTFILE) + async with connect( + self.server_host, self.server_port, configuration=configuration + ) as client: - async with connect(host, port, configuration=configuration) as client: await client.ping() client.request_key_update() await client.ping() run(self.run_server()) + run(run_client_key_update()) - run(run_client_key_update("127.0.0.1")) ===========changed ref 7=========== # module: tests.test_asyncio class HighLevelTest(TestCase): def test_change_connection_id(self): + async def run_client_change_connection_id(): - async def run_client_change_connection_id(host, port=4433): configuration = QuicConfiguration(is_client=True) configuration.load_verify_locations(cafile=SERVER_CACERTFILE) + async with connect( + self.server_host, self.server_port, configuration=configuration + ) as client: - async with connect(host, port, configuration=configuration) as client: await client.ping() client.change_connection_id() await client.ping() run(self.run_server()) + run(run_client_change_connection_id()) - run(run_client_change_connection_id("127.0.0.1")) ===========changed ref 8=========== # module: tests.test_asyncio class HighLevelTest(TestCase): def test_connect_timeout_no_wait_connected(self): + async def run_client_no_wait_connected(configuration): - async def run_client_no_wait_connected(host, port, configuration): configuration.load_verify_locations(cafile=SERVER_CACERTFILE) async with connect( + self.server_host, + 4400, + configuration=configuration, + wait_connected=False, - host, port, configuration=configuration, wait_connected=False ) as client: await client.ping() with self.assertRaises(ConnectionError): run( run_client_no_wait_connected( - "127.0.0.1", - port=4400, configuration=QuicConfiguration(is_client=True, idle_timeout=5), ) ) ===========changed ref 9=========== # module: tests.test_asyncio class HighLevelTest(TestCase): def test_connect_and_serve_with_stateless_retry_bad_original_connection_id(self): """ If the server's transport parameters do not have the correct original_connection_id the connection fail. """ def create_protocol(*args, **kwargs): protocol = QuicConnectionProtocol(*args, **kwargs) protocol._quic._original_connection_id = None return protocol run(self.run_server(create_protocol=create_protocol, stateless_retry=True)) with self.assertRaises(ConnectionError): + run(self.run_client()) - run(self.run_client("127.0.0.1")) ===========changed ref 10=========== # module: tests.test_asyncio class HighLevelTest(TestCase): def test_connect_and_serve(self): run(self.run_server()) + response = run(self.run_client()) - response = run(self.run_client("127.0.0.1")) self.assertEqual(response, b"gnip") ===========changed ref 11=========== # module: tests.test_asyncio class HighLevelTest(TestCase): def test_connect_and_serve_large(self): """ Transfer enough data to require raising MAX_DATA and MAX_STREAM_DATA. """ data = b"Z" * 2097152 run(self.run_server()) + response = run(self.run_client(request=data)) - response = run(self.run_client("127.0.0.1", request=data)) self.assertEqual(response, data)
tests.test_asyncio/HighLevelTest.test_ping_parallel
Modified
aiortc~aioquic
f2c2c2eff4a05308338ceb34b29b8f3d6d4d7447
[tests] use IPv6 loopback address
<0>:<add> async def run_client_ping(): <del> async def run_client_ping(host, port=4433): <3>:<add> async with connect( <add> self.server_host, self.server_port, configuration=configuration <add> ) as client: <del> async with connect(host, port, configuration=configuration) as client: <8>:<add> run(run_client_ping()) <del> run(run_client_ping("127.0.0.1"))
# module: tests.test_asyncio class HighLevelTest(TestCase): def test_ping_parallel(self): <0> async def run_client_ping(host, port=4433): <1> configuration = QuicConfiguration(is_client=True) <2> configuration.load_verify_locations(cafile=SERVER_CACERTFILE) <3> async with connect(host, port, configuration=configuration) as client: <4> coros = [client.ping() for x in range(16)] <5> await asyncio.gather(*coros) <6> <7> run(self.run_server()) <8> run(run_client_ping("127.0.0.1")) <9>
===========unchanged ref 0=========== at: tests.test_asyncio.HighLevelTest run_server(self, configuration=None, **kwargs) run_server(configuration=None, **kwargs) at: tests.test_asyncio.HighLevelTest.setUp self.server_host = "::1" self.server_port = 4433 at: tests.test_asyncio.HighLevelTest.test_ping run_client_ping() at: tests.utils run(coro) ===========changed ref 0=========== # module: tests.test_asyncio class HighLevelTest(TestCase): def run_server(self, configuration=None, **kwargs): if configuration is None: configuration = QuicConfiguration(is_client=False) configuration.load_cert_chain(SERVER_CERTFILE, SERVER_KEYFILE) self.server = await serve( host="::", + port=4433, - port="4433", configuration=configuration, stream_handler=handle_stream, **kwargs ) return self.server ===========changed ref 1=========== # module: tests.test_asyncio class HighLevelTest(TestCase): def test_connect_timeout(self): with self.assertRaises(ConnectionError): run( self.run_client( - "127.0.0.1", port=4400, configuration=QuicConfiguration(is_client=True, idle_timeout=5), ) ) ===========changed ref 2=========== # module: tests.test_asyncio class HighLevelTest(TestCase): def test_connect_and_serve_with_stateless_retry(self): run(self.run_server()) + response = run(self.run_client()) - response = run(self.run_client("127.0.0.1")) self.assertEqual(response, b"gnip") ===========changed ref 3=========== # module: tests.test_asyncio class HighLevelTest(TestCase): def test_connect_and_serve_with_sni(self): run(self.run_server()) + response = run(self.run_client(host="localhost")) - response = run(self.run_client("localhost")) self.assertEqual(response, b"gnip") ===========changed ref 4=========== # module: tests.test_asyncio class HighLevelTest(TestCase): def test_ping(self): + async def run_client_ping(): - async def run_client_ping(host, port=4433): configuration = QuicConfiguration(is_client=True) configuration.load_verify_locations(cafile=SERVER_CACERTFILE) + async with connect( + self.server_host, self.server_port, configuration=configuration + ) as client: - async with connect(host, port, configuration=configuration) as client: await client.ping() await client.ping() run(self.run_server()) + run(run_client_ping()) - run(run_client_ping("127.0.0.1")) ===========changed ref 5=========== # module: tests.test_asyncio class HighLevelTest(TestCase): @patch("aioquic.quic.retry.QuicRetryTokenHandler.validate_token") def test_connect_and_serve_with_stateless_retry_bad(self, mock_validate): mock_validate.side_effect = ValueError("Decryption failed.") run(self.run_server(stateless_retry=True)) with self.assertRaises(ConnectionError): run( self.run_client( - "127.0.0.1", configuration=QuicConfiguration(is_client=True, idle_timeout=4.0), ) ) ===========changed ref 6=========== # module: tests.test_asyncio class HighLevelTest(TestCase): def test_connect_and_serve_with_version_negotiation(self): run(self.run_server()) # force version negotiation configuration = QuicConfiguration(is_client=True, quic_logger=QuicLogger()) configuration.supported_versions.insert(0, 0x1A2A3A4A) + response = run(self.run_client(configuration=configuration)) - response = run(self.run_client("127.0.0.1", configuration=configuration)) self.assertEqual(response, b"gnip") ===========changed ref 7=========== # module: tests.test_asyncio class HighLevelTest(TestCase): def test_key_update(self): + async def run_client_key_update(): - async def run_client_key_update(host, port=4433): configuration = QuicConfiguration(is_client=True) configuration.load_verify_locations(cafile=SERVER_CACERTFILE) + async with connect( + self.server_host, self.server_port, configuration=configuration + ) as client: - async with connect(host, port, configuration=configuration) as client: await client.ping() client.request_key_update() await client.ping() run(self.run_server()) + run(run_client_key_update()) - run(run_client_key_update("127.0.0.1")) ===========changed ref 8=========== # module: tests.test_asyncio class HighLevelTest(TestCase): def test_change_connection_id(self): + async def run_client_change_connection_id(): - async def run_client_change_connection_id(host, port=4433): configuration = QuicConfiguration(is_client=True) configuration.load_verify_locations(cafile=SERVER_CACERTFILE) + async with connect( + self.server_host, self.server_port, configuration=configuration + ) as client: - async with connect(host, port, configuration=configuration) as client: await client.ping() client.change_connection_id() await client.ping() run(self.run_server()) + run(run_client_change_connection_id()) - run(run_client_change_connection_id("127.0.0.1")) ===========changed ref 9=========== # module: tests.test_asyncio class HighLevelTest(TestCase): def test_connect_timeout_no_wait_connected(self): + async def run_client_no_wait_connected(configuration): - async def run_client_no_wait_connected(host, port, configuration): configuration.load_verify_locations(cafile=SERVER_CACERTFILE) async with connect( + self.server_host, + 4400, + configuration=configuration, + wait_connected=False, - host, port, configuration=configuration, wait_connected=False ) as client: await client.ping() with self.assertRaises(ConnectionError): run( run_client_no_wait_connected( - "127.0.0.1", - port=4400, configuration=QuicConfiguration(is_client=True, idle_timeout=5), ) ) ===========changed ref 10=========== # module: tests.test_asyncio class HighLevelTest(TestCase): def test_connect_and_serve_with_stateless_retry_bad_original_connection_id(self): """ If the server's transport parameters do not have the correct original_connection_id the connection fail. """ def create_protocol(*args, **kwargs): protocol = QuicConnectionProtocol(*args, **kwargs) protocol._quic._original_connection_id = None return protocol run(self.run_server(create_protocol=create_protocol, stateless_retry=True)) with self.assertRaises(ConnectionError): + run(self.run_client()) - run(self.run_client("127.0.0.1"))
aioquic.tls/verify_certificate
Modified
aiortc~aioquic
6fb4999fd6adf92135915f869d0f20cf1e63ccb8
[tls] only load default locations if they are defined
# module: aioquic.tls def verify_certificate( certificate: x509.Certificate, chain: List[x509.Certificate] = [], server_name: Optional[str] = None, cadata: Optional[bytes] = None, cafile: Optional[str] = None, capath: Optional[str] = None, ) -> None: <0> # verify dates <1> now = utcnow() <2> if now < certificate.not_valid_before: <3> raise AlertCertificateExpired("Certificate is not valid yet") <4> if now > certificate.not_valid_after: <5> raise AlertCertificateExpired("Certificate is no longer valid") <6> <7> # verify subject <8> if server_name is not None: <9> subject = [] <10> subjectAltName: List[Tuple[str, str]] = [] <11> for attr in certificate.subject: <12> if attr.oid == x509.NameOID.COMMON_NAME: <13> subject.append((("commonName", attr.value),)) <14> for ext in certificate.extensions: <15> if isinstance(ext.value, x509.SubjectAlternativeName): <16> for name in ext.value: <17> if isinstance(name, x509.DNSName): <18> subjectAltName.append(("DNS", name.value)) <19> <20> try: <21> ssl.match_hostname( <22> {"subject": tuple(subject), "subjectAltName": tuple(subjectAltName)}, <23> server_name, <24> ) <25> except ssl.CertificateError as exc: <26> raise AlertBadCertificate("\n".join(exc.args)) from exc <27> <28> # verify certificate chain <29> store = lib.X509_STORE_new() <30> openssl_assert(store != ffi.NULL, "X509_store_new") <31> store = ffi.gc(store, lib.X509_STORE_free) <32> <33> # load default CAs <34> openssl_assert( <35> lib.X509_STORE_set_default_paths(store), "X509_STORE_set_default_paths" <36> ) <37> paths</s>
===========below chunk 0=========== # module: aioquic.tls def verify_certificate( certificate: x509.Certificate, chain: List[x509.Certificate] = [], server_name: Optional[str] = None, cadata: Optional[bytes] = None, cafile: Optional[str] = None, capath: Optional[str] = None, ) -> None: # offset: 1 openssl_assert( lib.X509_STORE_load_locations( store, openssl_encode_path(paths.cafile), openssl_encode_path(paths.capath) ), "X509_STORE_load_locations", ) # load extra CAs if cadata is not None: for cert in load_pem_x509_certificates(cadata): openssl_assert( lib.X509_STORE_add_cert(store, cert_x509_ptr(cert)), "X509_STORE_add_cert", ) if cafile is not None or capath is not None: openssl_assert( lib.X509_STORE_load_locations( store, openssl_encode_path(cafile), openssl_encode_path(capath) ), "X509_STORE_load_locations", ) chain_stack = lib.sk_X509_new_null() openssl_assert(chain_stack != ffi.NULL, "sk_X509_new_null") chain_stack = ffi.gc(chain_stack, lib.sk_X509_free) for cert in chain: openssl_assert( lib.sk_X509_push(chain_stack, cert_x509_ptr(cert)), "sk_X509_push" ) store_ctx = lib.X509_STORE_CTX_new() openssl_assert(store_ctx != ffi.NULL, "X509_STORE_CTX_new") store_ctx = ffi.gc(store_ctx, lib.X509_STORE_CTX_free) openssl_assert( lib</s> ===========below chunk 1=========== # module: aioquic.tls def verify_certificate( certificate: x509.Certificate, chain: List[x509.Certificate] = [], server_name: Optional[str] = None, cadata: Optional[bytes] = None, cafile: Optional[str] = None, capath: Optional[str] = None, ) -> None: # offset: 2 <s> = ffi.gc(store_ctx, lib.X509_STORE_CTX_free) openssl_assert( lib.X509_STORE_CTX_init( store_ctx, store, cert_x509_ptr(certificate), chain_stack ), "X509_STORE_CTX_init", ) res = lib.X509_verify_cert(store_ctx) if not res: err = lib.X509_STORE_CTX_get_error(store_ctx) err_str = openssl_decode_string(lib.X509_verify_cert_error_string(err)) raise AlertBadCertificate(err_str) ===========unchanged ref 0=========== at: aioquic.tls ffi = binding.ffi lib = binding.lib utcnow = datetime.datetime.utcnow AlertBadCertificate(*args: object) AlertCertificateExpired(*args: object) load_pem_x509_certificates(data: bytes) -> List[x509.Certificate] openssl_assert(ok: bool, func: str) -> None openssl_encode_path(s: Optional[str]) -> Any cert_x509_ptr(certificate: x509.Certificate) -> Any at: ssl CertificateError = SSLCertVerificationError match_hostname(cert: _PeerCertRetType, hostname: str) -> None get_default_verify_paths() -> DefaultVerifyPaths at: typing Tuple = _TupleType(tuple, -1, inst=False, name='Tuple') List = _alias(list, 1, inst=False, name='List')
tests.test_h3/H3ConnectionTest.test_request
Modified
aiortc~aioquic
fd084338b0d72d996d1be928d81f8beb3b55c3f9
[tests] disable packet pacing for H0 / H3 tests
<0>:<del> with client_and_server( <1>:<del> client_options={"alpn_protocols": H3_ALPN}, <2>:<del> server_options={"alpn_protocols": H3_ALPN}, <3>:<add> with h3_client_and_server() as (quic_client, quic_server): <del> ) as (quic_client, quic_server):
# module: tests.test_h3 class H3ConnectionTest(TestCase): def test_request(self): <0> with client_and_server( <1> client_options={"alpn_protocols": H3_ALPN}, <2> server_options={"alpn_protocols": H3_ALPN}, <3> ) as (quic_client, quic_server): <4> h3_client = H3Connection(quic_client) <5> h3_server = H3Connection(quic_server) <6> <7> # make first request <8> self._make_request(h3_client, h3_server) <9> <10> # make second request <11> self._make_request(h3_client, h3_server) <12> <13> # make third request -> dynamic table <14> self._make_request(h3_client, h3_server) <15>
===========unchanged ref 0=========== at: tests.test_h3 h3_client_and_server() at: tests.test_h3.FakeQuicConnection.__init__ self.closed = None at: tests.test_h3.FakeQuicConnection.close self.closed = (error_code, reason_phrase) at: tests.test_h3.H3ConnectionTest maxDiff = None _make_request(h3_client, h3_server) at: tests.test_h3.H3ConnectionTest.test_handle_request_frame_wrong_frame_type quic_server = FakeQuicConnection( configuration=QuicConfiguration(is_client=False) ) at: unittest.case.TestCase failureException: Type[BaseException] longMessage: bool maxDiff: Optional[int] _testMethodName: str _testMethodDoc: str assertEqual(first: Any, second: Any, msg: Any=...) -> None ===========changed ref 0=========== # module: tests.test_h3 + def h3_client_and_server(): + return client_and_server( + client_options={"alpn_protocols": H3_ALPN}, + client_patch=disable_packet_pacing, + server_options={"alpn_protocols": H3_ALPN}, + server_patch=disable_packet_pacing, + ) +
tests.test_h3/H3ConnectionTest.test_request_headers_only
Modified
aiortc~aioquic
fd084338b0d72d996d1be928d81f8beb3b55c3f9
[tests] disable packet pacing for H0 / H3 tests
<0>:<del> with client_and_server( <1>:<del> client_options={"alpn_protocols": H3_ALPN}, <2>:<del> server_options={"alpn_protocols": H3_ALPN}, <3>:<add> with h3_client_and_server() as (quic_client, quic_server): <del> ) as (quic_client, quic_server):
# module: tests.test_h3 class H3ConnectionTest(TestCase): def test_request_headers_only(self): <0> with client_and_server( <1> client_options={"alpn_protocols": H3_ALPN}, <2> server_options={"alpn_protocols": H3_ALPN}, <3> ) as (quic_client, quic_server): <4> h3_client = H3Connection(quic_client) <5> h3_server = H3Connection(quic_server) <6> <7> # send request <8> stream_id = quic_client.get_next_available_stream_id() <9> h3_client.send_headers( <10> stream_id=stream_id, <11> headers=[ <12> (b":method", b"HEAD"), <13> (b":scheme", b"https"), <14> (b":authority", b"localhost"), <15> (b":path", b"/"), <16> (b"x-foo", b"client"), <17> ], <18> end_stream=True, <19> ) <20> <21> # receive request <22> events = h3_transfer(quic_client, h3_server) <23> self.assertEqual( <24> events, <25> [ <26> HeadersReceived( <27> headers=[ <28> (b":method", b"HEAD"), <29> (b":scheme", b"https"), <30> (b":authority", b"localhost"), <31> (b":path", b"/"), <32> (b"x-foo", b"client"), <33> ], <34> stream_id=stream_id, <35> stream_ended=True, <36> ) <37> ], <38> ) <39> <40> # send response <41> h3_server.send_headers( <42> stream_id=stream_id, <43> headers=[ <44> (b":status", b"200"), <45> (b"content-type", b"text/html; charset=utf-8"), <46> (b"x</s>
===========below chunk 0=========== # module: tests.test_h3 class H3ConnectionTest(TestCase): def test_request_headers_only(self): # offset: 1 ], end_stream=True, ) # receive response events = h3_transfer(quic_server, h3_client) self.assertEqual( events, [ HeadersReceived( headers=[ (b":status", b"200"), (b"content-type", b"text/html; charset=utf-8"), (b"x-foo", b"server"), ], stream_id=stream_id, stream_ended=True, ) ], ) ===========unchanged ref 0=========== at: tests.test_h3 h3_client_and_server() h3_transfer(quic_sender, h3_receiver) at: tests.test_h3.H3ConnectionTest _make_request(h3_client, h3_server) at: tests.test_h3.H3ConnectionTest.test_request h3_client = H3Connection(quic_client) h3_server = H3Connection(quic_server) at: unittest.case.TestCase assertEqual(first: Any, second: Any, msg: Any=...) -> None ===========changed ref 0=========== # module: tests.test_h3 + def h3_client_and_server(): + return client_and_server( + client_options={"alpn_protocols": H3_ALPN}, + client_patch=disable_packet_pacing, + server_options={"alpn_protocols": H3_ALPN}, + server_patch=disable_packet_pacing, + ) + ===========changed ref 1=========== # module: tests.test_h3 class H3ConnectionTest(TestCase): def test_request(self): - with client_and_server( - client_options={"alpn_protocols": H3_ALPN}, - server_options={"alpn_protocols": H3_ALPN}, + with h3_client_and_server() as (quic_client, quic_server): - ) as (quic_client, quic_server): h3_client = H3Connection(quic_client) h3_server = H3Connection(quic_server) # make first request self._make_request(h3_client, h3_server) # make second request self._make_request(h3_client, h3_server) # make third request -> dynamic table self._make_request(h3_client, h3_server)
tests.test_h3/H3ConnectionTest.test_request_with_server_push_max_push_id
Modified
aiortc~aioquic
fd084338b0d72d996d1be928d81f8beb3b55c3f9
[tests] disable packet pacing for H0 / H3 tests
<0>:<del> with client_and_server( <1>:<del> client_options={"alpn_protocols": H3_ALPN}, <2>:<del> server_options={"alpn_protocols": H3_ALPN}, <3>:<add> with h3_client_and_server() as (quic_client, quic_server): <del> ) as (quic_client, quic_server):
# module: tests.test_h3 class H3ConnectionTest(TestCase): def test_request_with_server_push_max_push_id(self): <0> with client_and_server( <1> client_options={"alpn_protocols": H3_ALPN}, <2> server_options={"alpn_protocols": H3_ALPN}, <3> ) as (quic_client, quic_server): <4> h3_client = H3Connection(quic_client) <5> h3_server = H3Connection(quic_server) <6> <7> # send request <8> stream_id = quic_client.get_next_available_stream_id() <9> h3_client.send_headers( <10> stream_id=stream_id, <11> headers=[ <12> (b":method", b"GET"), <13> (b":scheme", b"https"), <14> (b":authority", b"localhost"), <15> (b":path", b"/"), <16> ], <17> end_stream=True, <18> ) <19> <20> # receive request <21> events = h3_transfer(quic_client, h3_server) <22> self.assertEqual( <23> events, <24> [ <25> HeadersReceived( <26> headers=[ <27> (b":method", b"GET"), <28> (b":scheme", b"https"), <29> (b":authority", b"localhost"), <30> (b":path", b"/"), <31> ], <32> stream_id=stream_id, <33> stream_ended=True, <34> ) <35> ], <36> ) <37> <38> # send push promises <39> for i in range(0, 8): <40> h3_server.send_push_promise( <41> stream_id=stream_id, <42> headers=[ <43> (b":method", b"GET"), <44> (b":scheme", b"https"), <45> (b":authority", b"localhost"), <46> (b":path", "/{}.css</s>
===========below chunk 0=========== # module: tests.test_h3 class H3ConnectionTest(TestCase): def test_request_with_server_push_max_push_id(self): # offset: 1 ], ) # send one too many with self.assertRaises(NoAvailablePushIDError): h3_server.send_push_promise( stream_id=stream_id, headers=[ (b":method", b"GET"), (b":scheme", b"https"), (b":authority", b"localhost"), (b":path", b"/8.css"), ], ) ===========unchanged ref 0=========== at: tests.test_h3 h3_client_and_server() h3_transfer(quic_sender, h3_receiver) at: unittest.case.TestCase assertEqual(first: Any, second: Any, msg: Any=...) -> None assertRaises(expected_exception: Union[Type[_E], Tuple[Type[_E], ...]], msg: Any=...) -> _AssertRaisesContext[_E] assertRaises(expected_exception: Union[Type[BaseException], Tuple[Type[BaseException], ...]], callable: Callable[..., Any], *args: Any, **kwargs: Any) -> None ===========changed ref 0=========== # module: tests.test_h3 + def h3_client_and_server(): + return client_and_server( + client_options={"alpn_protocols": H3_ALPN}, + client_patch=disable_packet_pacing, + server_options={"alpn_protocols": H3_ALPN}, + server_patch=disable_packet_pacing, + ) + ===========changed ref 1=========== # module: tests.test_h3 class H3ConnectionTest(TestCase): def test_request(self): - with client_and_server( - client_options={"alpn_protocols": H3_ALPN}, - server_options={"alpn_protocols": H3_ALPN}, + with h3_client_and_server() as (quic_client, quic_server): - ) as (quic_client, quic_server): h3_client = H3Connection(quic_client) h3_server = H3Connection(quic_server) # make first request self._make_request(h3_client, h3_server) # make second request self._make_request(h3_client, h3_server) # make third request -> dynamic table self._make_request(h3_client, h3_server) ===========changed ref 2=========== # module: tests.test_h3 class H3ConnectionTest(TestCase): def test_request_headers_only(self): - with client_and_server( - client_options={"alpn_protocols": H3_ALPN}, - server_options={"alpn_protocols": H3_ALPN}, + with h3_client_and_server() as (quic_client, quic_server): - ) as (quic_client, quic_server): h3_client = H3Connection(quic_client) h3_server = H3Connection(quic_server) # send request stream_id = quic_client.get_next_available_stream_id() h3_client.send_headers( stream_id=stream_id, headers=[ (b":method", b"HEAD"), (b":scheme", b"https"), (b":authority", b"localhost"), (b":path", b"/"), (b"x-foo", b"client"), ], end_stream=True, ) # receive request events = h3_transfer(quic_client, h3_server) self.assertEqual( events, [ HeadersReceived( headers=[ (b":method", b"HEAD"), (b":scheme", b"https"), (b":authority", b"localhost"), (b":path", b"/"), (b"x-foo", b"client"), ], stream_id=stream_id, stream_ended=True, ) ], ) # send response h3_server.send_headers( stream_id=stream_id, headers=[ (b":status", b"200"), (b"content-type", b"text/html; charset=utf-8"), (b"x-foo", b"server"), ], end_stream=</s> ===========changed ref 3=========== # module: tests.test_h3 class H3ConnectionTest(TestCase): def test_request_headers_only(self): # offset: 1 <s>=utf-8"), (b"x-foo", b"server"), ], end_stream=True, ) # receive response events = h3_transfer(quic_server, h3_client) self.assertEqual( events, [ HeadersReceived( headers=[ (b":status", b"200"), (b"content-type", b"text/html; charset=utf-8"), (b"x-foo", b"server"), ], stream_id=stream_id, stream_ended=True, ) ], ) ===========changed ref 4=========== # module: tests.test_h3 class H3ConnectionTest(TestCase): def test_request_with_server_push(self): - with client_and_server( - client_options={"alpn_protocols": H3_ALPN}, - server_options={"alpn_protocols": H3_ALPN}, + with h3_client_and_server() as (quic_client, quic_server): - ) as (quic_client, quic_server): h3_client = H3Connection(quic_client) h3_server = H3Connection(quic_server) # send request stream_id = quic_client.get_next_available_stream_id() h3_client.send_headers( stream_id=stream_id, headers=[ (b":method", b"GET"), (b":scheme", b"https"), (b":authority", b"localhost"), (b":path", b"/"), ], end_stream=True, ) # receive request events = h3_transfer(quic_client, h3_server) self.assertEqual( events, [ HeadersReceived( headers=[ (b":method", b"GET"), (b":scheme", b"https"), (b":authority", b"localhost"), (b":path", b"/"), ], stream_id=stream_id, stream_ended=True, ) ], ) # send push promises push_stream_id_css = h3_server.send_push_promise( stream_id=stream_id, headers=[ (b":method", b"GET"), (b":scheme", b"https"), (b":authority", b"localhost"), (b":path", b"/app.css"), ], ) self.assertEqual(push_stream_id_css,</s>
tests.test_h3/H3ConnectionTest.test_uni_stream_grease
Modified
aiortc~aioquic
fd084338b0d72d996d1be928d81f8beb3b55c3f9
[tests] disable packet pacing for H0 / H3 tests
<0>:<del> with client_and_server( <1>:<del> client_options={"alpn_protocols": H3_ALPN}, <2>:<del> server_options={"alpn_protocols": H3_ALPN}, <3>:<add> with h3_client_and_server() as (quic_client, quic_server): <del> ) as (quic_client, quic_server):
# module: tests.test_h3 class H3ConnectionTest(TestCase): def test_uni_stream_grease(self): <0> with client_and_server( <1> client_options={"alpn_protocols": H3_ALPN}, <2> server_options={"alpn_protocols": H3_ALPN}, <3> ) as (quic_client, quic_server): <4> h3_server = H3Connection(quic_server) <5> <6> quic_client.send_stream_data( <7> 14, b"\xff\xff\xff\xff\xff\xff\xff\xfeGREASE is the word" <8> ) <9> self.assertEqual(h3_transfer(quic_client, h3_server), []) <10>
===========unchanged ref 0=========== at: tests.test_h3 h3_client_and_server() h3_transfer(quic_sender, h3_receiver) at: tests.test_h3.H3ConnectionTest.test_uni_stream_grease h3_server = H3Connection(quic_server) at: unittest.case.TestCase assertEqual(first: Any, second: Any, msg: Any=...) -> None ===========changed ref 0=========== # module: tests.test_h3 + def h3_client_and_server(): + return client_and_server( + client_options={"alpn_protocols": H3_ALPN}, + client_patch=disable_packet_pacing, + server_options={"alpn_protocols": H3_ALPN}, + server_patch=disable_packet_pacing, + ) + ===========changed ref 1=========== # module: tests.test_h3 class H3ConnectionTest(TestCase): def test_request_with_server_push_max_push_id(self): - with client_and_server( - client_options={"alpn_protocols": H3_ALPN}, - server_options={"alpn_protocols": H3_ALPN}, + with h3_client_and_server() as (quic_client, quic_server): - ) as (quic_client, quic_server): h3_client = H3Connection(quic_client) h3_server = H3Connection(quic_server) # send request stream_id = quic_client.get_next_available_stream_id() h3_client.send_headers( stream_id=stream_id, headers=[ (b":method", b"GET"), (b":scheme", b"https"), (b":authority", b"localhost"), (b":path", b"/"), ], end_stream=True, ) # receive request events = h3_transfer(quic_client, h3_server) self.assertEqual( events, [ HeadersReceived( headers=[ (b":method", b"GET"), (b":scheme", b"https"), (b":authority", b"localhost"), (b":path", b"/"), ], stream_id=stream_id, stream_ended=True, ) ], ) # send push promises for i in range(0, 8): h3_server.send_push_promise( stream_id=stream_id, headers=[ (b":method", b"GET"), (b":scheme", b"https"), (b":authority", b"localhost"), (b":path", "/{}.css".format(i).encode("ascii")), ], ) </s> ===========changed ref 2=========== # module: tests.test_h3 class H3ConnectionTest(TestCase): def test_request_with_server_push_max_push_id(self): # offset: 1 <s>"), (b":path", "/{}.css".format(i).encode("ascii")), ], ) # send one too many with self.assertRaises(NoAvailablePushIDError): h3_server.send_push_promise( stream_id=stream_id, headers=[ (b":method", b"GET"), (b":scheme", b"https"), (b":authority", b"localhost"), (b":path", b"/8.css"), ], ) ===========changed ref 3=========== # module: tests.test_h3 class H3ConnectionTest(TestCase): def test_request(self): - with client_and_server( - client_options={"alpn_protocols": H3_ALPN}, - server_options={"alpn_protocols": H3_ALPN}, + with h3_client_and_server() as (quic_client, quic_server): - ) as (quic_client, quic_server): h3_client = H3Connection(quic_client) h3_server = H3Connection(quic_server) # make first request self._make_request(h3_client, h3_server) # make second request self._make_request(h3_client, h3_server) # make third request -> dynamic table self._make_request(h3_client, h3_server) ===========changed ref 4=========== # module: tests.test_h3 class H3ConnectionTest(TestCase): def test_request_headers_only(self): - with client_and_server( - client_options={"alpn_protocols": H3_ALPN}, - server_options={"alpn_protocols": H3_ALPN}, + with h3_client_and_server() as (quic_client, quic_server): - ) as (quic_client, quic_server): h3_client = H3Connection(quic_client) h3_server = H3Connection(quic_server) # send request stream_id = quic_client.get_next_available_stream_id() h3_client.send_headers( stream_id=stream_id, headers=[ (b":method", b"HEAD"), (b":scheme", b"https"), (b":authority", b"localhost"), (b":path", b"/"), (b"x-foo", b"client"), ], end_stream=True, ) # receive request events = h3_transfer(quic_client, h3_server) self.assertEqual( events, [ HeadersReceived( headers=[ (b":method", b"HEAD"), (b":scheme", b"https"), (b":authority", b"localhost"), (b":path", b"/"), (b"x-foo", b"client"), ], stream_id=stream_id, stream_ended=True, ) ], ) # send response h3_server.send_headers( stream_id=stream_id, headers=[ (b":status", b"200"), (b"content-type", b"text/html; charset=utf-8"), (b"x-foo", b"server"), ], end_stream=</s> ===========changed ref 5=========== # module: tests.test_h3 class H3ConnectionTest(TestCase): def test_request_headers_only(self): # offset: 1 <s>=utf-8"), (b"x-foo", b"server"), ], end_stream=True, ) # receive response events = h3_transfer(quic_server, h3_client) self.assertEqual( events, [ HeadersReceived( headers=[ (b":status", b"200"), (b"content-type", b"text/html; charset=utf-8"), (b"x-foo", b"server"), ], stream_id=stream_id, stream_ended=True, ) ], )
tests.test_h3/H3ConnectionTest.test_request_with_trailers
Modified
aiortc~aioquic
fd084338b0d72d996d1be928d81f8beb3b55c3f9
[tests] disable packet pacing for H0 / H3 tests
<0>:<del> with client_and_server( <1>:<del> client_options={"alpn_protocols": H3_ALPN}, <2>:<del> server_options={"alpn_protocols": H3_ALPN}, <3>:<add> with h3_client_and_server() as (quic_client, quic_server): <del> ) as (quic_client, quic_server):
# module: tests.test_h3 class H3ConnectionTest(TestCase): def test_request_with_trailers(self): <0> with client_and_server( <1> client_options={"alpn_protocols": H3_ALPN}, <2> server_options={"alpn_protocols": H3_ALPN}, <3> ) as (quic_client, quic_server): <4> h3_client = H3Connection(quic_client) <5> h3_server = H3Connection(quic_server) <6> <7> # send request with trailers <8> stream_id = quic_client.get_next_available_stream_id() <9> h3_client.send_headers( <10> stream_id=stream_id, <11> headers=[ <12> (b":method", b"GET"), <13> (b":scheme", b"https"), <14> (b":authority", b"localhost"), <15> (b":path", b"/"), <16> ], <17> end_stream=False, <18> ) <19> h3_client.send_headers( <20> stream_id=stream_id, <21> headers=[(b"x-some-trailer", b"foo")], <22> end_stream=True, <23> ) <24> <25> # receive request <26> events = h3_transfer(quic_client, h3_server) <27> self.assertEqual( <28> events, <29> [ <30> HeadersReceived( <31> headers=[ <32> (b":method", b"GET"), <33> (b":scheme", b"https"), <34> (b":authority", b"localhost"), <35> (b":path", b"/"), <36> ], <37> stream_id=stream_id, <38> stream_ended=False, <39> ), <40> HeadersReceived( <41> headers=[(b"x-some-trailer", b"foo")], <42> stream_id=stream_id, <43> stream_ended=True, <44> ), <45> ], </s>
===========below chunk 0=========== # module: tests.test_h3 class H3ConnectionTest(TestCase): def test_request_with_trailers(self): # offset: 1 # send response h3_server.send_headers( stream_id=stream_id, headers=[ (b":status", b"200"), (b"content-type", b"text/html; charset=utf-8"), ], end_stream=False, ) h3_server.send_data( stream_id=stream_id, data=b"<html><body>hello</body></html>", end_stream=False, ) h3_server.send_headers( stream_id=stream_id, headers=[(b"x-some-trailer", b"bar")], end_stream=True, ) # receive response events = h3_transfer(quic_server, h3_client) self.assertEqual( events, [ HeadersReceived( headers=[ (b":status", b"200"), (b"content-type", b"text/html; charset=utf-8"), ], stream_id=stream_id, stream_ended=False, ), DataReceived( data=b"<html><body>hello</body></html>", stream_id=stream_id, stream_ended=False, ), HeadersReceived( headers=[(b"x-some-trailer", b"bar")], stream_id=stream_id, stream_ended=True, ), ], ) ===========unchanged ref 0=========== at: tests.test_h3 h3_client_and_server() h3_transfer(quic_sender, h3_receiver) at: tests.test_h3.H3ConnectionTest.test_request_with_trailers h3_client = H3Connection(quic_client) h3_server = H3Connection(quic_server) at: unittest.case.TestCase assertEqual(first: Any, second: Any, msg: Any=...) -> None ===========changed ref 0=========== # module: tests.test_h3 + def h3_client_and_server(): + return client_and_server( + client_options={"alpn_protocols": H3_ALPN}, + client_patch=disable_packet_pacing, + server_options={"alpn_protocols": H3_ALPN}, + server_patch=disable_packet_pacing, + ) + ===========changed ref 1=========== # module: tests.test_h3 class H3ConnectionTest(TestCase): def test_uni_stream_grease(self): - with client_and_server( - client_options={"alpn_protocols": H3_ALPN}, - server_options={"alpn_protocols": H3_ALPN}, + with h3_client_and_server() as (quic_client, quic_server): - ) as (quic_client, quic_server): h3_server = H3Connection(quic_server) quic_client.send_stream_data( 14, b"\xff\xff\xff\xff\xff\xff\xff\xfeGREASE is the word" ) self.assertEqual(h3_transfer(quic_client, h3_server), []) ===========changed ref 2=========== # module: tests.test_h3 class H3ConnectionTest(TestCase): def test_request_with_server_push_max_push_id(self): - with client_and_server( - client_options={"alpn_protocols": H3_ALPN}, - server_options={"alpn_protocols": H3_ALPN}, + with h3_client_and_server() as (quic_client, quic_server): - ) as (quic_client, quic_server): h3_client = H3Connection(quic_client) h3_server = H3Connection(quic_server) # send request stream_id = quic_client.get_next_available_stream_id() h3_client.send_headers( stream_id=stream_id, headers=[ (b":method", b"GET"), (b":scheme", b"https"), (b":authority", b"localhost"), (b":path", b"/"), ], end_stream=True, ) # receive request events = h3_transfer(quic_client, h3_server) self.assertEqual( events, [ HeadersReceived( headers=[ (b":method", b"GET"), (b":scheme", b"https"), (b":authority", b"localhost"), (b":path", b"/"), ], stream_id=stream_id, stream_ended=True, ) ], ) # send push promises for i in range(0, 8): h3_server.send_push_promise( stream_id=stream_id, headers=[ (b":method", b"GET"), (b":scheme", b"https"), (b":authority", b"localhost"), (b":path", "/{}.css".format(i).encode("ascii")), ], ) </s> ===========changed ref 3=========== # module: tests.test_h3 class H3ConnectionTest(TestCase): def test_request_with_server_push_max_push_id(self): # offset: 1 <s>"), (b":path", "/{}.css".format(i).encode("ascii")), ], ) # send one too many with self.assertRaises(NoAvailablePushIDError): h3_server.send_push_promise( stream_id=stream_id, headers=[ (b":method", b"GET"), (b":scheme", b"https"), (b":authority", b"localhost"), (b":path", b"/8.css"), ], ) ===========changed ref 4=========== # module: tests.test_h3 class H3ConnectionTest(TestCase): def test_request(self): - with client_and_server( - client_options={"alpn_protocols": H3_ALPN}, - server_options={"alpn_protocols": H3_ALPN}, + with h3_client_and_server() as (quic_client, quic_server): - ) as (quic_client, quic_server): h3_client = H3Connection(quic_client) h3_server = H3Connection(quic_server) # make first request self._make_request(h3_client, h3_server) # make second request self._make_request(h3_client, h3_server) # make third request -> dynamic table self._make_request(h3_client, h3_server)
tests.test_h3/H3ConnectionTest.test_uni_stream_type
Modified
aiortc~aioquic
fd084338b0d72d996d1be928d81f8beb3b55c3f9
[tests] disable packet pacing for H0 / H3 tests
<0>:<del> with client_and_server( <1>:<del> client_options={"alpn_protocols": H3_ALPN}, <2>:<del> server_options={"alpn_protocols": H3_ALPN}, <3>:<add> with h3_client_and_server() as (quic_client, quic_server): <del> ) as (quic_client, quic_server):
# module: tests.test_h3 class H3ConnectionTest(TestCase): def test_uni_stream_type(self): <0> with client_and_server( <1> client_options={"alpn_protocols": H3_ALPN}, <2> server_options={"alpn_protocols": H3_ALPN}, <3> ) as (quic_client, quic_server): <4> h3_server = H3Connection(quic_server) <5> <6> # unknown stream type 9 <7> stream_id = quic_client.get_next_available_stream_id(is_unidirectional=True) <8> self.assertEqual(stream_id, 2) <9> quic_client.send_stream_data(stream_id, b"\x09") <10> self.assertEqual(h3_transfer(quic_client, h3_server), []) <11> self.assertEqual(list(h3_server._stream.keys()), [2]) <12> self.assertEqual(h3_server._stream[2].buffer, b"") <13> self.assertEqual(h3_server._stream[2].stream_type, 9) <14> <15> # unknown stream type 64, one byte at a time <16> stream_id = quic_client.get_next_available_stream_id(is_unidirectional=True) <17> self.assertEqual(stream_id, 6) <18> <19> quic_client.send_stream_data(stream_id, b"\x40") <20> self.assertEqual(h3_transfer(quic_client, h3_server), []) <21> self.assertEqual(list(h3_server._stream.keys()), [2, 6]) <22> self.assertEqual(h3_server._stream[2].buffer, b"") <23> self.assertEqual(h3_server._stream[2].stream_type, 9) <24> self.assertEqual(h3_server._stream[6].buffer, b"\x40") <25> self.assertEqual(h3_server._stream[6].stream_type, None)</s>
===========below chunk 0=========== # module: tests.test_h3 class H3ConnectionTest(TestCase): def test_uni_stream_type(self): # offset: 1 quic_client.send_stream_data(stream_id, b"\x40") self.assertEqual(h3_transfer(quic_client, h3_server), []) self.assertEqual(list(h3_server._stream.keys()), [2, 6]) self.assertEqual(h3_server._stream[2].buffer, b"") self.assertEqual(h3_server._stream[2].stream_type, 9) self.assertEqual(h3_server._stream[6].buffer, b"") self.assertEqual(h3_server._stream[6].stream_type, 64) ===========unchanged ref 0=========== at: tests.test_h3 h3_transfer(quic_sender, h3_receiver) at: tests.test_h3.H3ConnectionTest.test_uni_stream_type h3_server = H3Connection(quic_server) at: unittest.case.TestCase assertEqual(first: Any, second: Any, msg: Any=...) -> None ===========changed ref 0=========== # module: tests.test_h3 class H3ConnectionTest(TestCase): def test_uni_stream_grease(self): - with client_and_server( - client_options={"alpn_protocols": H3_ALPN}, - server_options={"alpn_protocols": H3_ALPN}, + with h3_client_and_server() as (quic_client, quic_server): - ) as (quic_client, quic_server): h3_server = H3Connection(quic_server) quic_client.send_stream_data( 14, b"\xff\xff\xff\xff\xff\xff\xff\xfeGREASE is the word" ) self.assertEqual(h3_transfer(quic_client, h3_server), []) ===========changed ref 1=========== # module: tests.test_h3 class H3ConnectionTest(TestCase): def test_request_with_trailers(self): - with client_and_server( - client_options={"alpn_protocols": H3_ALPN}, - server_options={"alpn_protocols": H3_ALPN}, + with h3_client_and_server() as (quic_client, quic_server): - ) as (quic_client, quic_server): h3_client = H3Connection(quic_client) h3_server = H3Connection(quic_server) # send request with trailers stream_id = quic_client.get_next_available_stream_id() h3_client.send_headers( stream_id=stream_id, headers=[ (b":method", b"GET"), (b":scheme", b"https"), (b":authority", b"localhost"), (b":path", b"/"), ], end_stream=False, ) h3_client.send_headers( stream_id=stream_id, headers=[(b"x-some-trailer", b"foo")], end_stream=True, ) # receive request events = h3_transfer(quic_client, h3_server) self.assertEqual( events, [ HeadersReceived( headers=[ (b":method", b"GET"), (b":scheme", b"https"), (b":authority", b"localhost"), (b":path", b"/"), ], stream_id=stream_id, stream_ended=False, ), HeadersReceived( headers=[(b"x-some-trailer", b"foo")], stream_id=stream_id, stream_ended=True, ), ], ) # send response h3_server.send_</s> ===========changed ref 2=========== # module: tests.test_h3 class H3ConnectionTest(TestCase): def test_request_with_trailers(self): # offset: 1 <s>ended=True, ), ], ) # send response h3_server.send_headers( stream_id=stream_id, headers=[ (b":status", b"200"), (b"content-type", b"text/html; charset=utf-8"), ], end_stream=False, ) h3_server.send_data( stream_id=stream_id, data=b"<html><body>hello</body></html>", end_stream=False, ) h3_server.send_headers( stream_id=stream_id, headers=[(b"x-some-trailer", b"bar")], end_stream=True, ) # receive response events = h3_transfer(quic_server, h3_client) self.assertEqual( events, [ HeadersReceived( headers=[ (b":status", b"200"), (b"content-type", b"text/html; charset=utf-8"), ], stream_id=stream_id, stream_ended=False, ), DataReceived( data=b"<html><body>hello</body></html>", stream_id=stream_id, stream_ended=False, ), HeadersReceived( headers=[(b"x-some-trailer", b"bar")], stream_id=stream_id, stream_ended=True, ), ], ) ===========changed ref 3=========== # module: tests.test_h3 class H3ConnectionTest(TestCase): def test_request_with_server_push_max_push_id(self): - with client_and_server( - client_options={"alpn_protocols": H3_ALPN}, - server_options={"alpn_protocols": H3_ALPN}, + with h3_client_and_server() as (quic_client, quic_server): - ) as (quic_client, quic_server): h3_client = H3Connection(quic_client) h3_server = H3Connection(quic_server) # send request stream_id = quic_client.get_next_available_stream_id() h3_client.send_headers( stream_id=stream_id, headers=[ (b":method", b"GET"), (b":scheme", b"https"), (b":authority", b"localhost"), (b":path", b"/"), ], end_stream=True, ) # receive request events = h3_transfer(quic_client, h3_server) self.assertEqual( events, [ HeadersReceived( headers=[ (b":method", b"GET"), (b":scheme", b"https"), (b":authority", b"localhost"), (b":path", b"/"), ], stream_id=stream_id, stream_ended=True, ) ], ) # send push promises for i in range(0, 8): h3_server.send_push_promise( stream_id=stream_id, headers=[ (b":method", b"GET"), (b":scheme", b"https"), (b":authority", b"localhost"), (b":path", "/{}.css".format(i).encode("ascii")), ], ) </s>