{ // 获取包含Hugging Face文本的span元素 const spans = link.querySelectorAll('span.whitespace-nowrap, span.hidden.whitespace-nowrap'); spans.forEach(span => { if (span.textContent && span.textContent.trim().match(/Hugging\s*Face/i)) { span.textContent = 'AI快站'; } }); }); // 替换logo图片的alt属性 document.querySelectorAll('img[alt*="Hugging"], img[alt*="Face"]').forEach(img => { if (img.alt.match(/Hugging\s*Face/i)) { img.alt = 'AI快站 logo'; } }); } // 替换导航栏中的链接 function replaceNavigationLinks() { // 已替换标记,防止重复运行 if (window._navLinksReplaced) { return; } // 已经替换过的链接集合,防止重复替换 const replacedLinks = new Set(); // 只在导航栏区域查找和替换链接 const headerArea = document.querySelector('header') || document.querySelector('nav'); if (!headerArea) { return; } // 在导航区域内查找链接 const navLinks = headerArea.querySelectorAll('a'); navLinks.forEach(link => { // 如果已经替换过,跳过 if (replacedLinks.has(link)) return; const linkText = link.textContent.trim(); const linkHref = link.getAttribute('href') || ''; // 替换Spaces链接 - 仅替换一次 if ( (linkHref.includes('/spaces') || linkHref === '/spaces' || linkText === 'Spaces' || linkText.match(/^s*Spacess*$/i)) && linkText !== 'PDF TO Markdown' && linkText !== 'PDF TO Markdown' ) { link.textContent = 'PDF TO Markdown'; link.href = 'https://fast360.xyz'; link.setAttribute('target', '_blank'); link.setAttribute('rel', 'noopener noreferrer'); replacedLinks.add(link); } // 删除Posts链接 else if ( (linkHref.includes('/posts') || linkHref === '/posts' || linkText === 'Posts' || linkText.match(/^s*Postss*$/i)) ) { if (link.parentNode) { link.parentNode.removeChild(link); } replacedLinks.add(link); } // 替换Docs链接 - 仅替换一次 else if ( (linkHref.includes('/docs') || linkHref === '/docs' || linkText === 'Docs' || linkText.match(/^s*Docss*$/i)) && linkText !== 'Voice Cloning' ) { link.textContent = 'Voice Cloning'; link.href = 'https://vibevoice.info/'; replacedLinks.add(link); } // 删除Enterprise链接 else if ( (linkHref.includes('/enterprise') || linkHref === '/enterprise' || linkText === 'Enterprise' || linkText.match(/^s*Enterprises*$/i)) ) { if (link.parentNode) { link.parentNode.removeChild(link); } replacedLinks.add(link); } }); // 查找可能嵌套的Spaces和Posts文本 const textNodes = []; function findTextNodes(element) { if (element.nodeType === Node.TEXT_NODE) { const text = element.textContent.trim(); if (text === 'Spaces' || text === 'Posts' || text === 'Enterprise') { textNodes.push(element); } } else { for (const child of element.childNodes) { findTextNodes(child); } } } // 只在导航区域内查找文本节点 findTextNodes(headerArea); // 替换找到的文本节点 textNodes.forEach(node => { const text = node.textContent.trim(); if (text === 'Spaces') { node.textContent = node.textContent.replace(/Spaces/g, 'PDF TO Markdown'); } else if (text === 'Posts') { // 删除Posts文本节点 if (node.parentNode) { node.parentNode.removeChild(node); } } else if (text === 'Enterprise') { // 删除Enterprise文本节点 if (node.parentNode) { node.parentNode.removeChild(node); } } }); // 标记已替换完成 window._navLinksReplaced = true; } // 替换代码区域中的域名 function replaceCodeDomains() { // 特别处理span.hljs-string和span.njs-string元素 document.querySelectorAll('span.hljs-string, span.njs-string, span[class*="hljs-string"], span[class*="njs-string"]').forEach(span => { if (span.textContent && span.textContent.includes('huggingface.co')) { span.textContent = span.textContent.replace(/huggingface.co/g, 'aifasthub.com'); } }); // 替换hljs-string类的span中的域名(移除多余的转义符号) document.querySelectorAll('span.hljs-string, span[class*="hljs-string"]').forEach(span => { if (span.textContent && span.textContent.includes('huggingface.co')) { span.textContent = span.textContent.replace(/huggingface.co/g, 'aifasthub.com'); } }); // 替换pre和code标签中包含git clone命令的域名 document.querySelectorAll('pre, code').forEach(element => { if (element.textContent && element.textContent.includes('git clone')) { const text = element.innerHTML; if (text.includes('huggingface.co')) { element.innerHTML = text.replace(/huggingface.co/g, 'aifasthub.com'); } } }); // 处理特定的命令行示例 document.querySelectorAll('pre, code').forEach(element => { const text = element.innerHTML; if (text.includes('huggingface.co')) { // 针对git clone命令的专门处理 if (text.includes('git clone') || text.includes('GIT_LFS_SKIP_SMUDGE=1')) { element.innerHTML = text.replace(/huggingface.co/g, 'aifasthub.com'); } } }); // 特别处理模型下载页面上的代码片段 document.querySelectorAll('.flex.border-t, .svelte_hydrator, .inline-block').forEach(container => { const content = container.innerHTML; if (content && content.includes('huggingface.co')) { container.innerHTML = content.replace(/huggingface.co/g, 'aifasthub.com'); } }); // 特别处理模型仓库克隆对话框中的代码片段 try { // 查找包含"Clone this model repository"标题的对话框 const cloneDialog = document.querySelector('.svelte_hydration_boundary, [data-target="MainHeader"]'); if (cloneDialog) { // 查找对话框中所有的代码片段和命令示例 const codeElements = cloneDialog.querySelectorAll('pre, code, span'); codeElements.forEach(element => { if (element.textContent && element.textContent.includes('huggingface.co')) { if (element.innerHTML.includes('huggingface.co')) { element.innerHTML = element.innerHTML.replace(/huggingface.co/g, 'aifasthub.com'); } else { element.textContent = element.textContent.replace(/huggingface.co/g, 'aifasthub.com'); } } }); } // 更精确地定位克隆命令中的域名 document.querySelectorAll('[data-target]').forEach(container => { const codeBlocks = container.querySelectorAll('pre, code, span.hljs-string'); codeBlocks.forEach(block => { if (block.textContent && block.textContent.includes('huggingface.co')) { if (block.innerHTML.includes('huggingface.co')) { block.innerHTML = block.innerHTML.replace(/huggingface.co/g, 'aifasthub.com'); } else { block.textContent = block.textContent.replace(/huggingface.co/g, 'aifasthub.com'); } } }); }); } catch (e) { // 错误处理但不打印日志 } } // 当DOM加载完成后执行替换 if (document.readyState === 'loading') { document.addEventListener('DOMContentLoaded', () => { replaceHeaderBranding(); replaceNavigationLinks(); replaceCodeDomains(); // 只在必要时执行替换 - 3秒后再次检查 setTimeout(() => { if (!window._navLinksReplaced) { console.log('[Client] 3秒后重新检查导航链接'); replaceNavigationLinks(); } }, 3000); }); } else { replaceHeaderBranding(); replaceNavigationLinks(); replaceCodeDomains(); // 只在必要时执行替换 - 3秒后再次检查 setTimeout(() => { if (!window._navLinksReplaced) { console.log('[Client] 3秒后重新检查导航链接'); replaceNavigationLinks(); } }, 3000); } // 增加一个MutationObserver来处理可能的动态元素加载 const observer = new MutationObserver(mutations => { // 检查是否导航区域有变化 const hasNavChanges = mutations.some(mutation => { // 检查是否存在header或nav元素变化 return Array.from(mutation.addedNodes).some(node => { if (node.nodeType === Node.ELEMENT_NODE) { // 检查是否是导航元素或其子元素 if (node.tagName === 'HEADER' || node.tagName === 'NAV' || node.querySelector('header, nav')) { return true; } // 检查是否在导航元素内部 let parent = node.parentElement; while (parent) { if (parent.tagName === 'HEADER' || parent.tagName === 'NAV') { return true; } parent = parent.parentElement; } } return false; }); }); // 只在导航区域有变化时执行替换 if (hasNavChanges) { // 重置替换状态,允许再次替换 window._navLinksReplaced = false; replaceHeaderBranding(); replaceNavigationLinks(); } }); // 开始观察document.body的变化,包括子节点 if (document.body) { observer.observe(document.body, { childList: true, subtree: true }); } else { document.addEventListener('DOMContentLoaded', () => { observer.observe(document.body, { childList: true, subtree: true }); }); } })(); \")\n"},"path":{"kind":"string","value":"assign_candidate.py"},"size":{"kind":"number","value":7818,"string":"7,818"},"nl_text":{"kind":"string","value":"!/usr/bin/python htmlquery to get candidate data for the rowsquery to get the faculty and project names for the table headersquery to get all current candidate-faculty pairs in the databasestart connectionexecute query 1 get results to above standard queryexecute query 2get results to above standard queryexecute query 3get results to above standard queryget all the candidate-faculty pair ids currently in the database which will be used in the section that checks and uses form dataretrieve form dataif form is empty, then it's possible that everything is to be deleted from the Assignment tableif not form: if results3: truncateStatement = \"DELETE FROM Assignment;\" connection = sqlite3.connect('db/BRITEREU.db') c = connection.cursor() c.execute(truncateStatement) connection.commit()check what checkboxes are checkedif checkbox was selected that was not previously selected - insert those pairs into the Assignment tableif checkbox is no longer selected - delete those pairs from the Assignment tablefind pairs that are in the selected list (pairlist) and not in the current database list (res3)find pairs that are not in the selected list(pairlist) and are in the current database list (res3)query the database again to now get all updated pairsexecute query 1 get results to above standard queryform action for user to submit checkboxes selections gets list of facultyadds all the faculty who are in the database as columnsget the Project IDs for the projects so that you concatenate to the CID to formulate a value pairadded proper URL for reference to reviewer pageprint the candidate table with a checkbox for each faculty memberadd submit button for assigning faculty to candidatesend formfiltering section for the table"},"nl_size":{"kind":"number","value":1781,"string":"1,781"},"nl_language":{"kind":"string","value":"en"},"nl_language_score":{"kind":"number","value":0.8289442658424377,"string":"0.828944"}}},{"rowIdx":7803,"cells":{"content":{"kind":"string","value":"\"\"\"Test the UniFi Protect switch platform.\"\"\"\n# pylint: disable=protected-access\nfrom __future__ import annotations\n\nfrom unittest.mock import AsyncMock, Mock\n\nimport pytest\nfrom pyunifiprotect.data import (\n Camera,\n Light,\n RecordingMode,\n SmartDetectObjectType,\n VideoMode,\n)\n\nfrom homeassistant.components.unifiprotect.const import DEFAULT_ATTRIBUTION\nfrom homeassistant.components.unifiprotect.switch import (\n CAMERA_SWITCHES,\n LIGHT_SWITCHES,\n ProtectSwitchEntityDescription,\n)\nfrom homeassistant.const import ATTR_ATTRIBUTION, ATTR_ENTITY_ID, STATE_OFF, Platform\nfrom homeassistant.core import HomeAssistant\nfrom homeassistant.helpers import entity_registry as er\n\nfrom .conftest import (\n MockEntityFixture,\n assert_entity_counts,\n enable_entity,\n ids_from_device_description,\n)\n\nCAMERA_SWITCHES_BASIC = [\n d\n for d in CAMERA_SWITCHES\n if d.name != \"Detections: Face\"\n and d.name != \"Detections: Package\"\n and d.name != \"SSH Enabled\"\n]\nCAMERA_SWITCHES_NO_EXTRA = [\n d for d in CAMERA_SWITCHES_BASIC if d.name not in (\"High FPS\", \"Privacy Mode\")\n]\n\n\n@pytest.fixture(name=\"light\")\nasync def light_fixture(\n hass: HomeAssistant, mock_entry: MockEntityFixture, mock_light: Light\n):\n \"\"\"Fixture for a single light for testing the switch platform.\"\"\"\n\n # disable pydantic validation so mocking can happen\n Light.__config__.validate_assignment = False\n\n light_obj = mock_light.copy(deep=True)\n light_obj._api = mock_entry.api\n light_obj.name = \"Test Light\"\n light_obj.is_ssh_enabled = False\n light_obj.light_device_settings.is_indicator_enabled = False\n\n mock_entry.api.bootstrap.reset_objects()\n mock_entry.api.bootstrap.lights = {\n light_obj.id: light_obj,\n }\n\n await hass.config_entries.async_setup(mock_entry.entry.entry_id)\n await hass.async_block_till_done()\n\n assert_entity_counts(hass, Platform.SWITCH, 2, 1)\n\n yield light_obj\n\n Light.__config__.validate_assignment = True\n\n\n@pytest.fixture(name=\"camera\")\nasync def camera_fixture(\n hass: HomeAssistant, mock_entry: MockEntityFixture, mock_camera: Camera\n):\n \"\"\"Fixture for a single camera for testing the switch platform.\"\"\"\n\n # disable pydantic validation so mocking can happen\n Camera.__config__.validate_assignment = False\n\n camera_obj = mock_camera.copy(deep=True)\n camera_obj._api = mock_entry.api\n camera_obj.channels[0]._api = mock_entry.api\n camera_obj.channels[1]._api = mock_entry.api\n camera_obj.channels[2]._api = mock_entry.api\n camera_obj.name = \"Test Camera\"\n camera_obj.recording_settings.mode = RecordingMode.DETECTIONS\n camera_obj.feature_flags.has_led_status = True\n camera_obj.feature_flags.has_hdr = True\n camera_obj.feature_flags.video_modes = [VideoMode.DEFAULT, VideoMode.HIGH_FPS]\n camera_obj.feature_flags.has_privacy_mask = True\n camera_obj.feature_flags.has_speaker = True\n camera_obj.feature_flags.has_smart_detect = True\n camera_obj.feature_flags.smart_detect_types = [\n SmartDetectObjectType.PERSON,\n SmartDetectObjectType.VEHICLE,\n ]\n camera_obj.is_ssh_enabled = False\n camera_obj.led_settings.is_enabled = False\n camera_obj.hdr_mode = False\n camera_obj.video_mode = VideoMode.DEFAULT\n camera_obj.remove_privacy_zone()\n camera_obj.speaker_settings.are_system_sounds_enabled = False\n camera_obj.osd_settings.is_name_enabled = False\n camera_obj.osd_settings.is_date_enabled = False\n camera_obj.osd_settings.is_logo_enabled = False\n camera_obj.osd_settings.is_debug_enabled = False\n camera_obj.smart_detect_settings.object_types = []\n\n mock_entry.api.bootstrap.reset_objects()\n mock_entry.api.bootstrap.cameras = {\n camera_obj.id: camera_obj,\n }\n\n await hass.config_entries.async_setup(mock_entry.entry.entry_id)\n await hass.async_block_till_done()\n\n assert_entity_counts(hass, Platform.SWITCH, 12, 11)\n\n yield camera_obj\n\n Camera.__config__.validate_assignment = True\n\n\n@pytest.fixture(name=\"camera_none\")\nasync def camera_none_fixture(\n hass: HomeAssistant, mock_entry: MockEntityFixture, mock_camera: Camera\n):\n \"\"\"Fixture for a single camera for testing the switch platform.\"\"\"\n\n # disable pydantic validation so mocking can happen\n Camera.__config__.validate_assignment = False\n\n camera_obj = mock_camera.copy(deep=True)\n camera_obj._api = mock_entry.api\n camera_obj.channels[0]._api = mock_entry.api\n camera_obj.channels[1]._api = mock_entry.api\n camera_obj.channels[2]._api = mock_entry.api\n camera_obj.name = \"Test Camera\"\n camera_obj.recording_settings.mode = RecordingMode.DETECTIONS\n camera_obj.feature_flags.has_led_status = False\n camera_obj.feature_flags.has_hdr = False\n camera_obj.feature_flags.video_modes = [VideoMode.DEFAULT]\n camera_obj.feature_flags.has_privacy_mask = False\n camera_obj.feature_flags.has_speaker = False\n camera_obj.feature_flags.has_smart_detect = False\n camera_obj.is_ssh_enabled = False\n camera_obj.osd_settings.is_name_enabled = False\n camera_obj.osd_settings.is_date_enabled = False\n camera_obj.osd_settings.is_logo_enabled = False\n camera_obj.osd_settings.is_debug_enabled = False\n\n mock_entry.api.bootstrap.reset_objects()\n mock_entry.api.bootstrap.cameras = {\n camera_obj.id: camera_obj,\n }\n\n await hass.config_entries.async_setup(mock_entry.entry.entry_id)\n await hass.async_block_till_done()\n\n assert_entity_counts(hass, Platform.SWITCH, 5, 4)\n\n yield camera_obj\n\n Camera.__config__.validate_assignment = True\n\n\n@pytest.fixture(name=\"camera_privacy\")\nasync def camera_privacy_fixture(\n hass: HomeAssistant, mock_entry: MockEntityFixture, mock_camera: Camera\n):\n \"\"\"Fixture for a single camera for testing the switch platform.\"\"\"\n\n # disable pydantic validation so mocking can happen\n Camera.__config__.validate_assignment = False\n\n camera_obj = mock_camera.copy(deep=True)\n camera_obj._api = mock_entry.api\n camera_obj.channels[0]._api = mock_entry.api\n camera_obj.channels[1]._api = mock_entry.api\n camera_obj.channels[2]._api = mock_entry.api\n camera_obj.name = \"Test Camera\"\n camera_obj.recording_settings.mode = RecordingMode.NEVER\n camera_obj.feature_flags.has_led_status = False\n camera_obj.feature_flags.has_hdr = False\n camera_obj.feature_flags.video_modes = [VideoMode.DEFAULT]\n camera_obj.feature_flags.has_privacy_mask = True\n camera_obj.feature_flags.has_speaker = False\n camera_obj.feature_flags.has_smart_detect = False\n camera_obj.add_privacy_zone()\n camera_obj.is_ssh_enabled = False\n camera_obj.osd_settings.is_name_enabled = False\n camera_obj.osd_settings.is_date_enabled = False\n camera_obj.osd_settings.is_logo_enabled = False\n camera_obj.osd_settings.is_debug_enabled = False\n\n mock_entry.api.bootstrap.reset_objects()\n mock_entry.api.bootstrap.cameras = {\n camera_obj.id: camera_obj,\n }\n\n await hass.config_entries.async_setup(mock_entry.entry.entry_id)\n await hass.async_block_till_done()\n\n assert_entity_counts(hass, Platform.SWITCH, 6, 5)\n\n yield camera_obj\n\n Camera.__config__.validate_assignment = True\n\n\nasync def test_switch_setup_light(\n hass: HomeAssistant,\n mock_entry: MockEntityFixture,\n light: Light,\n):\n \"\"\"Test switch entity setup for light devices.\"\"\"\n\n entity_registry = er.async_get(hass)\n\n description = LIGHT_SWITCHES[1]\n\n unique_id, entity_id = ids_from_device_description(\n Platform.SWITCH, light, description\n )\n\n entity = entity_registry.async_get(entity_id)\n assert entity\n assert entity.unique_id == unique_id\n\n state = hass.states.get(entity_id)\n assert state\n assert state.state == STATE_OFF\n assert state.attributes[ATTR_ATTRIBUTION] == DEFAULT_ATTRIBUTION\n\n description = LIGHT_SWITCHES[0]\n\n unique_id = f\"{light.id}_{description.key}\"\n entity_id = f\"switch.test_light_{description.name.lower().replace(' ', '_')}\"\n\n entity = entity_registry.async_get(entity_id)\n assert entity\n assert entity.disabled is True\n assert entity.unique_id == unique_id\n\n await enable_entity(hass, mock_entry.entry.entry_id, entity_id)\n\n state = hass.states.get(entity_id)\n assert state\n assert state.state == STATE_OFF\n assert state.attributes[ATTR_ATTRIBUTION] == DEFAULT_ATTRIBUTION\n\n\nasync def test_switch_setup_camera_all(\n hass: HomeAssistant,\n mock_entry: MockEntityFixture,\n camera: Camera,\n):\n \"\"\"Test switch entity setup for camera devices (all enabled feature flags).\"\"\"\n\n entity_registry = er.async_get(hass)\n\n for description in CAMERA_SWITCHES_BASIC:\n unique_id, entity_id = ids_from_device_description(\n Platform.SWITCH, camera, description\n )\n\n entity = entity_registry.async_get(entity_id)\n assert entity\n assert entity.unique_id == unique_id\n\n state = hass.states.get(entity_id)\n assert state\n assert state.state == STATE_OFF\n assert state.attributes[ATTR_ATTRIBUTION] == DEFAULT_ATTRIBUTION\n\n description = CAMERA_SWITCHES[0]\n\n description_entity_name = (\n description.name.lower().replace(\":\", \"\").replace(\" \", \"_\")\n )\n unique_id = f\"{camera.id}_{description.key}\"\n entity_id = f\"switch.test_camera_{description_entity_name}\"\n\n entity = entity_registry.async_get(entity_id)\n assert entity\n assert entity.disabled is True\n assert entity.unique_id == unique_id\n\n await enable_entity(hass, mock_entry.entry.entry_id, entity_id)\n\n state = hass.states.get(entity_id)\n assert state\n assert state.state == STATE_OFF\n assert state.attributes[ATTR_ATTRIBUTION] == DEFAULT_ATTRIBUTION\n\n\nasync def test_switch_setup_camera_none(\n hass: HomeAssistant,\n mock_entry: MockEntityFixture,\n camera_none: Camera,\n):\n \"\"\"Test switch entity setup for camera devices (no enabled feature flags).\"\"\"\n\n entity_registry = er.async_get(hass)\n\n for description in CAMERA_SWITCHES_BASIC:\n if description.ufp_required_field is not None:\n continue\n\n unique_id, entity_id = ids_from_device_description(\n Platform.SWITCH, camera_none, description\n )\n\n entity = entity_registry.async_get(entity_id)\n assert entity\n assert entity.unique_id == unique_id\n\n state = hass.states.get(entity_id)\n assert state\n assert state.state == STATE_OFF\n assert state.attributes[ATTR_ATTRIBUTION] == DEFAULT_ATTRIBUTION\n\n description = CAMERA_SWITCHES[0]\n\n description_entity_name = (\n description.name.lower().replace(\":\", \"\").replace(\" \", \"_\")\n )\n unique_id = f\"{camera_none.id}_{description.key}\"\n entity_id = f\"switch.test_camera_{description_entity_name}\"\n\n entity = entity_registry.async_get(entity_id)\n assert entity\n assert entity.disabled is True\n assert entity.unique_id == unique_id\n\n await enable_entity(hass, mock_entry.entry.entry_id, entity_id)\n\n state = hass.states.get(entity_id)\n assert state\n assert state.state == STATE_OFF\n assert state.attributes[ATTR_ATTRIBUTION] == DEFAULT_ATTRIBUTION\n\n\nasync def test_switch_light_status(hass: HomeAssistant, light: Light):\n \"\"\"Tests status light switch for lights.\"\"\"\n\n description = LIGHT_SWITCHES[1]\n\n light.__fields__[\"set_status_light\"] = Mock()\n light.set_status_light = AsyncMock()\n\n _, entity_id = ids_from_device_description(Platform.SWITCH, light, description)\n\n await hass.services.async_call(\n \"switch\", \"turn_on\", {ATTR_ENTITY_ID: entity_id}, blocking=True\n )\n\n light.set_status_light.assert_called_once_with(True)\n\n await hass.services.async_call(\n \"switch\", \"turn_off\", {ATTR_ENTITY_ID: entity_id}, blocking=True\n )\n\n light.set_status_light.assert_called_with(False)\n\n\nasync def test_switch_camera_ssh(\n hass: HomeAssistant, camera: Camera, mock_entry: MockEntityFixture\n):\n \"\"\"Tests SSH switch for cameras.\"\"\"\n\n description = CAMERA_SWITCHES[0]\n\n camera.__fields__[\"set_ssh\"] = Mock()\n camera.set_ssh = AsyncMock()\n\n _, entity_id = ids_from_device_description(Platform.SWITCH, camera, description)\n await enable_entity(hass, mock_entry.entry.entry_id, entity_id)\n\n await hass.services.async_call(\n \"switch\", \"turn_on\", {ATTR_ENTITY_ID: entity_id}, blocking=True\n )\n\n camera.set_ssh.assert_called_once_with(True)\n\n await hass.services.async_call(\n \"switch\", \"turn_off\", {ATTR_ENTITY_ID: entity_id}, blocking=True\n )\n\n camera.set_ssh.assert_called_with(False)\n\n\n@pytest.mark.parametrize(\"description\", CAMERA_SWITCHES_NO_EXTRA)\nasync def test_switch_camera_simple(\n hass: HomeAssistant, camera: Camera, description: ProtectSwitchEntityDescription\n):\n \"\"\"Tests all simple switches for cameras.\"\"\"\n\n assert description.ufp_set_method is not None\n\n camera.__fields__[description.ufp_set_method] = Mock()\n setattr(camera, description.ufp_set_method, AsyncMock())\n set_method = getattr(camera, description.ufp_set_method)\n\n _, entity_id = ids_from_device_description(Platform.SWITCH, camera, description)\n\n await hass.services.async_call(\n \"switch\", \"turn_on\", {ATTR_ENTITY_ID: entity_id}, blocking=True\n )\n\n set_method.assert_called_once_with(True)\n\n await hass.services.async_call(\n \"switch\", \"turn_off\", {ATTR_ENTITY_ID: entity_id}, blocking=True\n )\n\n set_method.assert_called_with(False)\n\n\nasync def test_switch_camera_highfps(hass: HomeAssistant, camera: Camera):\n \"\"\"Tests High FPS switch for cameras.\"\"\"\n\n description = CAMERA_SWITCHES[3]\n\n camera.__fields__[\"set_video_mode\"] = Mock()\n camera.set_video_mode = AsyncMock()\n\n _, entity_id = ids_from_device_description(Platform.SWITCH, camera, description)\n\n await hass.services.async_call(\n \"switch\", \"turn_on\", {ATTR_ENTITY_ID: entity_id}, blocking=True\n )\n\n camera.set_video_mode.assert_called_once_with(VideoMode.HIGH_FPS)\n\n await hass.services.async_call(\n \"switch\", \"turn_off\", {ATTR_ENTITY_ID: entity_id}, blocking=True\n )\n\n camera.set_video_mode.assert_called_with(VideoMode.DEFAULT)\n\n\nasync def test_switch_camera_privacy(hass: HomeAssistant, camera: Camera):\n \"\"\"Tests Privacy Mode switch for cameras.\"\"\"\n\n description = CAMERA_SWITCHES[4]\n\n camera.__fields__[\"set_privacy\"] = Mock()\n camera.set_privacy = AsyncMock()\n\n _, entity_id = ids_from_device_description(Platform.SWITCH, camera, description)\n\n await hass.services.async_call(\n \"switch\", \"turn_on\", {ATTR_ENTITY_ID: entity_id}, blocking=True\n )\n\n camera.set_privacy.assert_called_once_with(True, 0, RecordingMode.NEVER)\n\n await hass.services.async_call(\n \"switch\", \"turn_off\", {ATTR_ENTITY_ID: entity_id}, blocking=True\n )\n\n camera.set_privacy.assert_called_with(\n False, camera.mic_volume, camera.recording_settings.mode\n )\n\n\nasync def test_switch_camera_privacy_already_on(\n hass: HomeAssistant, camera_privacy: Camera\n):\n \"\"\"Tests Privacy Mode switch for cameras with privacy mode defaulted on.\"\"\"\n\n description = CAMERA_SWITCHES[4]\n\n camera_privacy.__fields__[\"set_privacy\"] = Mock()\n camera_privacy.set_privacy = AsyncMock()\n\n _, entity_id = ids_from_device_description(\n Platform.SWITCH, camera_privacy, description\n )\n\n await hass.services.async_call(\n \"switch\", \"turn_off\", {ATTR_ENTITY_ID: entity_id}, blocking=True\n )\n\n camera_privacy.set_privacy.assert_called_once_with(False, 100, RecordingMode.ALWAYS)\n"},"path":{"kind":"string","value":"tests/components/unifiprotect/test_switch.py"},"size":{"kind":"number","value":15576,"string":"15,576"},"nl_text":{"kind":"string","value":"Test the UniFi Protect switch platform.\n\n pylint: disable=protected-access disable pydantic validation so mocking can happen disable pydantic validation so mocking can happen disable pydantic validation so mocking can happen disable pydantic validation so mocking can happen"},"nl_size":{"kind":"number","value":274,"string":"274"},"nl_language":{"kind":"string","value":"en"},"nl_language_score":{"kind":"number","value":0.4995156228542328,"string":"0.499516"}}},{"rowIdx":7804,"cells":{"content":{"kind":"string","value":"#!/usr/bin/env python3\n\n''' \nModel for Riemannian feature calculation and classification for EEG data\n'''\n\nimport numpy as np\nfrom sklearn.svm import LinearSVC, SVC\n\nfrom riemannian_multiscale import RiemannianMultiscale, QuantizedRiemannianMultiscale\nfrom filters import load_filterbank\nfrom utilities import quantize\n\n__author__ = \"Michael Hersche, Tino Rellstab and Tibor Schneider\"\n__email__ = \"herschmi@ethz.ch,tinor@ethz.ch\"\n\nDATA_PATH = \"dataset/\"\n# QUANTIZED = True\n# ONLY_2HZ_BANDS = True\n\nclass RiemannianModel():\n \"\"\" Riemannian Model \"\"\"\n def __init__(self, svm_kernel='linear', svm_c=0.1, fs=250, bands=None, time_windows=None,\n riem_opt='Riemann', rho=0.1, filter_type='butter', filter_order=2,\n random_state=None):\n \"\"\" Constructor\n\n Args:\n\n Parameters\n ----------\n\n svm_kernel: str {'linear', 'sigmoid', 'rbf'}\n kernel used for classifier\n\n svm_c: float\n regularization parameter for the classifier\n\n fs: int\n sampling rate of the data\n\n bands: list of int\n bandwidths used in filterbanks (default: [2, 4, 8, 16, 32])\n\n time_windows: list of list of ints, shape = (N, 2)\n time windows used, in seconds (default: [[2,5, 6]])\n\n riem_opt: str {\"riemann\", \"Riemann_Euclid\", \"Whitened_Euclid\", \"No_Adaptation\"}\n type of riemannian used\n\n rho: float\n Normalization parameter for the covariance matrix of the riemannian\n\n filter_type: str {\"butter\", \"fir\"}\n Type of the filter\n\n filter_order: int\n Order of the filter\n\n random_state: int or None\n random seed used in the SVM\n \"\"\"\n\n # setup classifier\n if svm_kernel == 'linear':\n self.classifier = LinearSVC(C=svm_c, loss='hinge', random_state=random_state, tol=0.00001)\n else:\n self.classifier = SVC(C=svm_c, kernel=svm_kernel, degree=10, gamma='auto',\n cache_size=10000, random_state=random_state)\n\n # setup Filterbank\n if bands is None:\n bandwidths = np.array([2, 4, 8, 16, 32])\n else:\n bandwidths = np.array(bands)\n filter_bank = load_filterbank(bandwidths, fs, order=filter_order, max_freq=40, ftype=filter_type)\n\n # setup Time Windows\n if time_windows is None:\n time_windows = (np.array([[2.5, 6]]) * fs).astype(int)\n # time_windows = (np.array([[2.0, 5.0]]) * fs).astype(int)\n else:\n time_windows = (np.array(time_windows) * fs).astype(int)\n\n # setup riemannian\n self.riemannian = RiemannianMultiscale(filter_bank, time_windows, riem_opt=riem_opt,\n rho=rho, vectorized=True)\n\n # store dimensionality\n self.no_bands = filter_bank.shape[0]\n self.no_time_windows = time_windows.shape[0]\n self.no_riem = None\n self.no_features = None\n\n def fit(self, samples, labels):\n \"\"\" Training\n\n Parameters\n ----------\n\n samples: np.array, size=(N, C, T)\n training samples\n\n labels: np.array, size=(N)\n training labels\n \"\"\"\n # extract the number of eatures\n assert len(samples.shape) == 3\n no_channels = samples.shape[1]\n self.no_riem = int(no_channels * (no_channels + 1) / 2) # Total number of CSP feature per band and timewindow\n self.no_features = self.no_riem * self.no_bands * self.no_time_windows\n\n # fit and extract training features from the riemannian\n features = self.riemannian.fit(samples)\n self.classifier.fit(features, labels)\n\n def score(self, samples, labels):\n \"\"\" Measure the performance, returns success rate\n\n Parameters\n ----------\n\n samples: np.array, size=(N, C, T)\n training samples\n\n labels: np.array, size=(N)\n training labels\n\n Returns\n -------\n\n float: score of the model\n \"\"\"\n features = self.riemannian.features(samples)\n return self.classifier.score(features, labels)\n\n def predict(self, samples):\n \"\"\" Predict some data\n\n Parameters\n ----------\n\n samples: np.array, size=(N, C, T)\n training samples\n\n Returns\n -------\n\n np.array, size=[N]: prediction\n \"\"\"\n features = self.riemannian.features(samples)\n return self.classifier.predict(features)\n\n\nclass QuantizedRiemannianModel():\n \"\"\" QuantizedRiemannian Model \"\"\"\n def __init__(self, svm_c=0.1, fs=250, bands=None, riem_opt='Riemann', rho=0.1, filter_order=2,\n random_state=None, num_bits=8, bitshift_scale=True):\n \"\"\" Constructor\n\n Parameters\n ----------\n\n svm_c: float\n regularization parameter for the classifier\n\n fs: int\n sampling rate of the data\n\n bands: list of int\n bandwidths used in filterbanks (default: [2, 4, 8, 16, 32])\n\n riem_opt: str {\"riemann\", \"Riemann_Euclid\", \"Whitened_Euclid\", \"No_Adaptation\"}\n type of riemannian used\n\n rho: float\n Normalization parameter for the covariance matrix of the riemannian\n\n filter_order: int\n Order of the filter\n\n random_state: int or None\n random seed used in the SVM\n\n num_bits: int\n Number of bits used for quantization\n\n bitshift_scale: bool\n if True, make sure that all scale factors between one part and the next is a bitshift\n \"\"\"\n\n self.num_bits = num_bits\n self.bitshift_scale = bitshift_scale\n\n # setup classifier\n self.classifier = LinearSVC(C=svm_c, loss='hinge', random_state=random_state, tol=0.00001)\n\n # setup Filterbank\n if bands is None:\n bandwidths = np.array([2, 4, 8, 16, 32])\n else:\n bandwidths = np.array(bands)\n filter_bank = load_filterbank(bandwidths, fs, order=filter_order, max_freq=40, ftype=\"butter\")\n\n # setup Time Windows\n time_windows = (np.array([[2.5, 6]]) * fs).astype(int)\n # time_windows = (np.array([[2.0, 5.0]]) * fs).astype(int) # !!!!!\n\n # setup riemannian\n self.riemannian = QuantizedRiemannianMultiscale(filter_bank, time_windows, riem_opt=riem_opt,\n rho=rho, vectorized=True, num_bits=num_bits,\n bitshift_scale=bitshift_scale)\n\n # prepare quantized weights and biases\n self.scale_weight = 0\n self.scale_bias = 0\n\n # store dimensionality\n self.no_bands = filter_bank.shape[0]\n self.no_time_windows = time_windows.shape[0]\n self.no_riem = None\n self.no_features = None\n\n def fit(self, samples, labels):\n \"\"\" Training\n\n Parameters\n ----------\n\n samples: np.array, size=(N, C, T)\n training samples\n\n labels: np.array, size=(N)\n training labels\n \"\"\"\n # extract the number of eatures\n assert len(samples.shape) == 3\n no_channels = samples.shape[1]\n self.no_riem = int(no_channels * (no_channels + 1) / 2) # Total number of CSP feature per band and timewindow\n self.no_features = self.no_riem * self.no_bands * self.no_time_windows\n\n # prepare scale factors\n self.riemannian.prepare_quantization(samples)\n\n # fit and extract training features from the riemannian\n features = self.riemannian.fit(samples)\n self.classifier.fit(features, labels)\n\n # quantize the classifier\n self.scale_weight = max(self.scale_weight, np.abs(self.classifier.coef_).max())\n weights = quantize(self.classifier.coef_, self.scale_weight, self.num_bits, do_round=True)\n self.classifier.coef_ = weights\n\n # do not quantize the bias, this one will be added in 32 bit, and quantization does not\n # matter here...\n\n # self.scale_bias = max(self.scale_bias, np.abs(self.classifier.intercept_).max())\n # bias = quantize(self.classifier.intercept_, self.scale_weight, self.num_bits,\n # do_round=True)\n # self.classifier.intercept_ = bias\n\n def score(self, samples, labels):\n \"\"\" Measure the performance, returns success rate\n\n Parameters\n ----------\n\n samples: np.array, size=(N, C, T)\n training samples\n\n labels: np.array, size=(N)\n training labels\n\n Returns\n -------\n\n float: score of the model\n \"\"\"\n features = self.riemannian.features(samples)\n return self.classifier.score(features, labels)\n\n def predict(self, samples):\n \"\"\" Predict some data\n\n Parameters\n ----------\n\n samples: np.array, size=(N, C, T)\n training samples\n\n Returns\n -------\n\n np.array, size=[N]: prediction\n \"\"\"\n features = self.riemannian.features(samples)\n return self.classifier.predict(features)\n\n def predict_with_intermediate(self, sample, verbose=True):\n \"\"\" Predict some data\n\n Parameters\n ----------\n\n samples: np.array, size=(C, T)\n training sample\n\n Returns\n -------\n\n ordered dictionary including every intermediate result and the output\n \"\"\"\n if verbose:\n print(\"Predict sample with intermediate matrices\")\n assert len(sample.shape) == 2\n result = self.riemannian.onetrial_feature_with_intermediate(sample)\n features = next(reversed(result.values()))\n features = features.reshape(1, -1)\n result[\"svm_result\"] = self.classifier.decision_function(features)\n result[\"prediction\"] = self.classifier.predict(features)\n return result\n\n def get_data_dict(self):\n \"\"\" Returns a nested dictionary containing all necessary data \"\"\"\n return {\"num_bits\": self.num_bits,\n \"bitshift_scale\": self.bitshift_scale,\n \"SVM\": {\"weights\": self.classifier.coef_,\n \"weight_scale\": self.scale_weight,\n \"bias\": self.classifier.intercept_},\n \"riemannian\": self.riemannian.get_data_dict()}\n"},"path":{"kind":"string","value":"multiscale_bci_python/riemannian_model.py"},"size":{"kind":"number","value":10590,"string":"10,590"},"nl_text":{"kind":"string","value":"QuantizedRiemannian Model \nRiemannian Model \nConstructor\n\nArgs:\n\nParameters\n----------\n\nsvm_kernel: str {'linear', 'sigmoid', 'rbf'}\n kernel used for classifier\n\nsvm_c: float\n regularization parameter for the classifier\n\nfs: int\n sampling rate of the data\n\nbands: list of int\n bandwidths used in filterbanks (default: [2, 4, 8, 16, 32])\n\ntime_windows: list of list of ints, shape = (N, 2)\n time windows used, in seconds (default: [[2,5, 6]])\n\nriem_opt: str {\"riemann\", \"Riemann_Euclid\", \"Whitened_Euclid\", \"No_Adaptation\"}\n type of riemannian used\n\nrho: float\n Normalization parameter for the covariance matrix of the riemannian\n\nfilter_type: str {\"butter\", \"fir\"}\n Type of the filter\n\nfilter_order: int\n Order of the filter\n\nrandom_state: int or None\n random seed used in the SVM\nConstructor\n\nParameters\n----------\n\nsvm_c: float\n regularization parameter for the classifier\n\nfs: int\n sampling rate of the data\n\nbands: list of int\n bandwidths used in filterbanks (default: [2, 4, 8, 16, 32])\n\nriem_opt: str {\"riemann\", \"Riemann_Euclid\", \"Whitened_Euclid\", \"No_Adaptation\"}\n type of riemannian used\n\nrho: float\n Normalization parameter for the covariance matrix of the riemannian\n\nfilter_order: int\n Order of the filter\n\nrandom_state: int or None\n random seed used in the SVM\n\nnum_bits: int\n Number of bits used for quantization\n\nbitshift_scale: bool\n if True, make sure that all scale factors between one part and the next is a bitshift\nTraining\n\nParameters\n----------\n\nsamples: np.array, size=(N, C, T)\n training samples\n\nlabels: np.array, size=(N)\n training labels\nTraining\n\nParameters\n----------\n\nsamples: np.array, size=(N, C, T)\n training samples\n\nlabels: np.array, size=(N)\n training labels\nReturns a nested dictionary containing all necessary data \nPredict some data\n\nParameters\n----------\n\nsamples: np.array, size=(N, C, T)\n training samples\n\nReturns\n-------\n\nnp.array, size=[N]: prediction\nPredict some data\n\nParameters\n----------\n\nsamples: np.array, size=(N, C, T)\n training samples\n\nReturns\n-------\n\nnp.array, size=[N]: prediction\nPredict some data\n\nParameters\n----------\n\nsamples: np.array, size=(C, T)\n training sample\n\nReturns\n-------\n\nordered dictionary including every intermediate result and the output\nMeasure the performance, returns success rate\n\nParameters\n----------\n\nsamples: np.array, size=(N, C, T)\n training samples\n\nlabels: np.array, size=(N)\n training labels\n\nReturns\n-------\n\nfloat: score of the model\nMeasure the performance, returns success rate\n\nParameters\n----------\n\nsamples: np.array, size=(N, C, T)\n training samples\n\nlabels: np.array, size=(N)\n training labels\n\nReturns\n-------\n\nfloat: score of the model\nModel for Riemannian feature calculation and classification for EEG data\n\n!/usr/bin/env python3 QUANTIZED = True ONLY_2HZ_BANDS = True setup classifier setup Filterbank setup Time Windows time_windows = (np.array([[2.0, 5.0]]) * fs).astype(int) setup riemannian store dimensionality extract the number of eatures Total number of CSP feature per band and timewindow fit and extract training features from the riemannian setup classifier setup Filterbank setup Time Windows time_windows = (np.array([[2.0, 5.0]]) * fs).astype(int) !!!!! setup riemannian prepare quantized weights and biases store dimensionality extract the number of eatures Total number of CSP feature per band and timewindow prepare scale factors fit and extract training features from the riemannian quantize the classifier do not quantize the bias, this one will be added in 32 bit, and quantization does not matter here... self.scale_bias = max(self.scale_bias, np.abs(self.classifier.intercept_).max()) bias = quantize(self.classifier.intercept_, self.scale_weight, self.num_bits, do_round=True) self.classifier.intercept_ = bias"},"nl_size":{"kind":"number","value":4013,"string":"4,013"},"nl_language":{"kind":"string","value":"en"},"nl_language_score":{"kind":"number","value":0.4927401840686798,"string":"0.49274"}}},{"rowIdx":7805,"cells":{"content":{"kind":"string","value":"import logging\nimport os\nimport random\nimport time\nfrom functools import lru_cache\n\nimport cv2\nimport numpy as np\n\nimport imgreco.main\nfrom Arknights.helper import logger\nfrom addons.activity import ActivityAddOn, get_stage_map\nfrom addons.base import BaseAddOn, pil2cv, crop_cv_by_rect, show_img\nfrom addons.common_cache import load_game_data\nfrom imgreco.ocr.cnocr import ocr_and_correct\n\nicon1 = cv2.imread(os.path.join(os.path.realpath(os.path.dirname(__file__)), 'icon1.png'), cv2.IMREAD_GRAYSCALE)\nicon2 = cv2.imread(os.path.join(os.path.realpath(os.path.dirname(__file__)), 'icon2.png'), cv2.IMREAD_GRAYSCALE)\n\n\n@lru_cache(maxsize=1)\ndef get_activity_infos():\n return load_game_data('activity_table')['basicInfo']\n\n\n@lru_cache()\ndef get_available_activity(display_type=None):\n activity_infos = get_activity_infos()\n name_set = set()\n for aid, info in activity_infos.items():\n if info.get('displayType') in {'SIDESTORY', 'BRANCHLINE'}:\n if info['displayType'] == 'BRANCHLINE' or info.get('isReplicate'):\n raw_name = info['name'][:-3] if info.get('isReplicate') else info['name']\n if display_type is None or display_type == info['displayType']:\n name_set.add(raw_name)\n return name_set\n\n\ndef get_activity_name(activity):\n name = activity['name']\n if activity['isReplicate']:\n return name[:-3]\n return name\n\n\ndef crop_image_only_outside(gray_img, raw_img, threshold=128, padding=3):\n mask = gray_img > threshold\n m, n = gray_img.shape\n mask0, mask1 = mask.any(0), mask.any(1)\n col_start, col_end = mask0.argmax(), n - mask0[::-1].argmax()\n row_start, row_end = mask1.argmax(), m - mask1[::-1].argmax()\n return raw_img[row_start - padding:row_end + padding, col_start - padding:col_end + padding]\n\n\nclass StartSpStageAddon(BaseAddOn):\n def __init__(self, helper=None):\n super(StartSpStageAddon, self).__init__(helper)\n self.scale = self.helper.viewport[1] / 720\n if self.helper.viewport != (1280, 720):\n logger.warning('It may produce some weird effects when the resolution is not 1280x720.')\n\n def apply_scale(self, value):\n if self.scale == 1:\n return value\n return int(value * self.scale)\n\n def run(self, stage_code: str, repeat_times: int = 1000, try_current_activity=True):\n stage_code = stage_code.upper()\n if try_current_activity:\n try:\n return ActivityAddOn(self.helper).run(stage_code, repeat_times)\n except:\n pass\n stage_code_map, zone_linear_map = get_stage_map()\n if stage_code not in stage_code_map:\n raise RuntimeError(f'无效的关卡: {stage_code}')\n stage = stage_code_map[stage_code]\n activity_id = stage['zoneId'].split('_')[0]\n activity_infos = get_activity_infos()\n activity = activity_infos[activity_id]\n logger.debug(f'stage: {stage}, activity: {activity}')\n self.enter_activity(activity)\n stage_linear = zone_linear_map[stage['zoneId']]\n self.helper.find_and_tap_stage_by_ocr(None, stage_code, stage_linear)\n return self.helper.module_battle_slim(None, repeat_times)\n\n def enter_activity(self, activity):\n vh = self.vh\n act_name = get_activity_name(activity)\n if act_name not in get_available_activity():\n raise RuntimeError(f'无效的活动: {act_name}')\n self.open_terminal()\n if activity['displayType'] == 'BRANCHLINE':\n self.tap_branch_line()\n else:\n self.tap_side_story()\n crop_flag = activity['displayType'] == 'SIDESTORY'\n act_pos_map = self.get_all_act_pos(crop_flag)\n if act_name not in act_pos_map:\n if activity['displayType'] == 'BRANCHLINE':\n raise RuntimeError(f'找不到相应活动: {act_name}')\n last_acts = act_pos_map.keys()\n while True:\n origin_x = random.randint(int(5.833 * vh), int(24.861 * vh))\n origin_y = random.randint(int(57.222 * vh), int(77.917 * vh))\n move = -random.randint(int(vh // 5), int(vh // 4))\n self.helper.adb.touch_swipe2((origin_x, origin_y),\n (random.randint(-20, 20), move), random.randint(900, 1200))\n act_pos_map = self.get_all_act_pos(crop_flag)\n if act_name in act_pos_map:\n break\n if last_acts == act_pos_map.keys():\n raise RuntimeError(f'找不到相应活动: {act_name}')\n last_acts = act_pos_map.keys()\n logger.info(f'switch to {act_name}')\n self.click(act_pos_map[act_name], 1)\n self.tap_enter_activity()\n\n def tap_back(self):\n vw, vh = self.vw, self.vh\n self.helper.tap_rect((2.222 * vh, 1.944 * vh, 22.361 * vh, 8.333 * vh))\n time.sleep(0.5)\n\n def get_all_act_pos(self, crop=False):\n act_map = {}\n screen = self.screenshot()\n cv_screen = pil2cv(screen)\n for icon in [icon1, icon2]:\n act_map.update(self.get_act_pos_by_icon(cv_screen, icon, crop))\n logger.info(act_map)\n return act_map\n\n def get_act_pos_by_icon(self, cv_screen, icon, crop=False):\n vh, vw = self.vh, self.vw\n raw_screen = cv_screen.copy()\n if self.scale != 1:\n cv_screen = cv2.resize(cv_screen, (int(self.helper.viewport[0] / self.scale), 720))\n roi = crop_cv_by_rect(cv_screen, (0, 0, 10.000 * vh, 100.000 * vh))\n roi = cv2.cvtColor(roi, cv2.COLOR_RGB2GRAY)\n result = cv2.matchTemplate(roi, icon, cv2.TM_CCOEFF_NORMED)\n loc = np.where(result >= 0.8)\n tag_set = set()\n tag_set2 = set()\n res = {}\n dbg_screen = raw_screen.copy()\n available_activity = get_available_activity()\n for pt in zip(*loc[::-1]):\n pos_key = (pt[0] // 100, pt[1] // 100)\n pos_key2 = (int(pt[0] / 100 + 0.5), int(pt[1] / 100 + 0.5))\n if pos_key in tag_set or pos_key2 in tag_set2:\n continue\n tag_set.add(pos_key)\n tag_set2.add(pos_key2)\n if icon1 is icon:\n x, y = (int(pt[0]) + 35, int(pt[1]) - 6)\n tw, th = map(self.apply_scale, (180, 40))\n else:\n x, y = (int(pt[0]) + 35, int(pt[1]) - 3)\n tw, th = map(self.apply_scale, (150, 30))\n l, t = map(self.apply_scale, (x, y))\n tag_img = raw_screen[t:t + th, l:l + tw]\n if crop:\n gray_tag = cv2.cvtColor(tag_img, cv2.COLOR_RGB2GRAY)\n tag_img = crop_image_only_outside(gray_tag, tag_img, 160)\n factor = 2.5 - self.scale\n if factor > 1:\n # print(factor)\n tag_img = cv2.resize(tag_img, (0, 0), fx=factor, fy=factor, interpolation=cv2.INTER_LINEAR)\n # show_img(tag_img)\n # conv-lite-fc has better accuracy, but it is slower than densenet-lite-fc.\n name = ocr_and_correct(tag_img, available_activity, model_name='densenet-lite-fc', log_level=logging.INFO)\n if name:\n res[name] = (int(l + 85 * self.scale), int(t + 20 * self.scale))\n cv2.rectangle(dbg_screen, (l, t), (l + tw, t + th), (255, 255, 0), 2)\n # show_img(dbg_screen)\n return res\n\n def tap_side_story(self):\n vh, vw = self.vh, self.vw\n logger.info('open side story view')\n self.helper.tap_rect((44.297 * vw, 88.611 * vh, 56.406 * vw, 98.750 * vh))\n time.sleep(1)\n\n def tap_branch_line(self):\n logger.info('open branch line view')\n vh, vw = self.vh, self.vw\n self.helper.tap_rect((29.375 * vw, 88.611 * vh, 41.719 * vw, 98.750 * vh))\n time.sleep(1)\n\n def tap_enter_activity(self):\n logger.info('enter activity')\n vh, vw = self.vh, self.vw\n self.helper.tap_rect((100 * vw - 24.583 * vh, 69.167 * vh, 100 * vw - 8.750 * vh, 75.556 * vh))\n time.sleep(1)\n\n def open_terminal(self):\n self.helper.back_to_main()\n logger.info('open terminal')\n self.helper.tap_quadrilateral(imgreco.main.get_ballte_corners(self.screenshot()))\n time.sleep(1)\n\n\nif __name__ == '__main__':\n StartSpStageAddon().run('CB-10', 0, False)\n # StartSpStageAddon().get_all_act_pos()\n"},"path":{"kind":"string","value":"addons/start_sp_stage/__init__.py"},"size":{"kind":"number","value":8440,"string":"8,440"},"nl_text":{"kind":"string","value":"print(factor) show_img(tag_img) conv-lite-fc has better accuracy, but it is slower than densenet-lite-fc. show_img(dbg_screen) StartSpStageAddon().get_all_act_pos()"},"nl_size":{"kind":"number","value":164,"string":"164"},"nl_language":{"kind":"string","value":"en"},"nl_language_score":{"kind":"number","value":0.6649408340454102,"string":"0.664941"}}},{"rowIdx":7806,"cells":{"content":{"kind":"string","value":"import os\nimport sys\nsys.path.append(os.path.dirname(__file__))\n\nclass AbstractSystemMeter:\n \"\"\"Common system meter interface for all resource monitorings.\n\n For each system resource to monitor, a wrapper class will be written as subclass of this one. This way we have\n a common \"interface\" for all system resources to test.\n\n This approach is choosen since python has no real interfaces like Java or C-Sharp.\n \"\"\"\n\n def __init__(self, resource_name):\n self.resource_name = resource_name\n\n\n def measure(self, func):\n self._start()\n func()\n return self._stop()\n\n\n def _start(self):\n raise NotImplementedError(\"The method is not implemented yet.\")\n\n\n def _stop(self):\n raise NotImplementedError(\"The method is not implemented yet.\")\n"},"path":{"kind":"string","value":"measure/system/AbstractSystemMeter.py"},"size":{"kind":"number","value":800,"string":"800"},"nl_text":{"kind":"string","value":"Common system meter interface for all resource monitorings.\n\nFor each system resource to monitor, a wrapper class will be written as subclass of this one. This way we have\na common \"interface\" for all system resources to test.\n\nThis approach is choosen since python has no real interfaces like Java or C-Sharp."},"nl_size":{"kind":"number","value":310,"string":"310"},"nl_language":{"kind":"string","value":"en"},"nl_language_score":{"kind":"number","value":0.9281718134880066,"string":"0.928172"}}},{"rowIdx":7807,"cells":{"content":{"kind":"string","value":"#!/usr/bin/python\n\nfrom __future__ import division\nimport sys\nimport math\nimport cmath\nimport numpy as np\nfrom numpy import genfromtxt\nimport csv\nfrom decimal import Decimal\nimport os\nimport random\nfrom lyrics import *\n\n# BEATLES: Bundle of Essential and Assistive Tools Library for Electronic Structure\n# A tribute to the Beatles\n#\n# Updated June 14, 2020 by Hassan Harb\n#\n# / | \\\n# / | \\\n# /O O | O O\\\n# //|\\ /|\\ /|\\ /|\\\\ \n# /=/ \\=/ \\= / \\=/ \\=\\\n# / == == == == == \\\n# / == == == == == \\ \n# (The original Beatles)\n# (ASCII retrieved from https://www.asciiart.eu/music/musicians/beatles )\n#\n#########################################################################\n#\n# NBasGrab: reads in a name of .fchk file\n# output: -Number of basis functions\n# -Charge\n# -Multiplicity\n# -Number of Atoms\n# -Cartesian Coordinates\n# -Atomic Symbols\n# -SCF Energy \n# -Total Energy (needs to be added)\n\n# Section 1: Reading from gaussian formatted checkpoint file\n\ndef NBasGrab(filename):\n NBasis = 0 \n NElem = 0\n SCFEnergy = 0.0\n Charge = 0\n Multiplicity = 0\n NAtoms = 0\n temp = 1\n with open(filename, 'r') as origin:\n for line in origin:\n if \"Number of basis functions\" in line:\n words = line.split()\n for i in words:\n for letter in i:\n if(letter.isdigit()):\n NBasis = NBasis*10 + int(letter)\n if \"Charge \" in line:\n words = line.split()\n for i in words:\n for letter in i:\n if(letter==\"-\"):\n temp = -1\n if(letter.isdigit()):\n Charge = Charge*10 + int(letter)\n Charge = Charge*temp\n if \"Multiplicity\" in line:\n words = line.split()\n for i in words:\n for letter in i:\n if(letter.isdigit()):\n Multiplicity = Multiplicity*10 + int(letter)\n if \"Number of atoms\" in line:\n words = line.split()\n for i in words:\n for letter in i:\n if(letter.isdigit()):\n NAtoms = NAtoms*10 + int(letter)\n\n if \"SCF Energy\" in line:\n words = line.split()\n# print \"SCF Energy = \", words[3], \" Hartree\"\n SCFEnergy = float(words[3])\n# print \"SCF Energy (float) = \", SCFEnergy\n\n# if \"Total Energy\" in line:\n# words = line.split()\n# TotalEnergy = float(words[3])\n# print \"Total Energy = \", TotalEnergy, \" Hartree\"\n\n NElem = NBasis*NBasis\n# print \"Number of Basis Functions (subroutine) = \", NBasis, \"\\n\"\n# print \"Charge (subroutine) = \", Charge, \"\\n\"\n return NBasis, NElem, Charge, Multiplicity, NAtoms, SCFEnergy\n\n# GeomGet: reads in the file name, number of atoms\n# Output: -One dimensional vector (NAtoms * 3) that includes the cartesian coordinates of each atom\n#\n \ndef GeomGet(filename,NAtoms):\n p = 0\n r = 0\n n = 1\n NElements = NAtoms * 3 \n RawCart = np.zeros(NElements)\n if (NElements%5 == 0):\n n = 0\n RawCartLines = int(NElements/5) + n\n# print \"Raw Cart lines = \", RawCartLines\n# print \"Number of Atoms =\", NAtoms\n# print \"Number of coordinates =\", NElements\n with open(filename,'r') as origin:\n for i, line in enumerate(origin):\n if \"Current cartesian coordinates\" in line:\n i = i + 1\n pointer = i\n# print \"Cartesian Coordinates starts at line :\", pointer\n endpointer = pointer + RawCartLines - 1\n# print \"Cartesian Coordinates ends at line :\", endpointer\n for m in range(0,endpointer - pointer +1):\n nextline = origin.next()\n nextline = nextline.split()\n for p in range(p,len(nextline)):\n RawCart[r] = nextline[p]\n r = r + 1\n p = 0\n# print \"Raw Cart (subroutine) = \", RawCart\n RawCart = RawCart/1.88973\n# print \"Raw Cart (converted to Angstroms) = \", RawCart\n return RawCart\n\n# GetAtoms: Reads in file name, number of atoms\n# output: -One dimensional vector (NAtoms) that contains the atomic numbers of the atoms \n#\n\ndef GetAtoms(filename1,NAtoms):\n p = 0\n r = 0\n n = 1\n AtomicNum = np.zeros(NAtoms)\n if (NAtoms%6 ==0):\n n = 0\n AtomLines = int(NAtoms/6) + n\n\n with open(filename1,'r') as origin:\n for i, line in enumerate(origin):\n if \"Atomic numbers\" in line:\n i = i + 1\n pointer = i\n endpointer = pointer + AtomLines -1\n for m in range(0, endpointer - pointer + 1):\n nextline = origin.next()\n nextline = nextline.split()\n for p in range(p,len(nextline)):\n AtomicNum[r] = nextline[p]\n r = r + 1\n p = 0\n return AtomicNum\n\n# MatGrab: Reads in filename, NBasis, user-defined switch\n# Output: -Alpha MO Coefficients (Done)\n# -Beta MO Coefficients (Done)\n# -Alpha Density Matrix (Done)\n# -Beta Density Matrix (Done)\n# -Alpha MO Energies (Done)\n# -Beta MO Energies (Done)\n#\n# Switch: 1 = Alpha MO Coefficients\n# -1 = Beta MO Coefficients\n# 2 = Alpha and Beta Density Matrices\n# 3 = Alpha MO Energies\n# -3 = Beta MO Energies\n#\ndef MatGrab(filename,NBasis,switch):\n if (switch == 1):\n filename1 = filename\n MOElements = NBasis * NBasis\n MOlines = int(MOElements/5) + 1\n if (NBasis%5 == 0):\n MOlines = MOlines - 1\n p = 0\n r = 0\n AOE = 0\n MOrawa = np.zeros(NBasis*NBasis)\n with open(filename1,'r') as origin:\n for i, line in enumerate(origin):\n if \"Alpha Orbital Energies\" in line:\n AOE = i\n if \"Alpha MO coefficients\" in line:\n i=i+1\n AMO=i\n# print \"Alpha MO coefficients starts at line :\", i\n j=i+MOlines-1\n# print \"Alpha MO coefficients ends at line :\", j\n for m in range(0,j-i+1):\n nextline = origin.next()\n nextline = nextline.split()\n for p in range(p,len(nextline)):\n MOrawa[r] = nextline[p]\n r = r+1\n p = 0\n# print \"MO Raw = \", MOrawa\n return MOrawa\n\n if (switch == -1):\n filename1 = filename\n MOElements = NBasis * NBasis\n MOlines = int(MOElements/5) + 1\n if (NBasis%5 == 0):\n MOlines = MOlines - 1\n p = 0\n r = 0\n BOE = 0\n BMO = 0\n MOrawb = np.zeros(NBasis*NBasis)\n with open(filename1,'r') as origin:\n for i, line in enumerate(origin):\n if \"Beta Orbital Energies\" in line:\n BOE = i\n if \"Beta MO coefficients\" in line:\n i=i+1\n BMO=i\n j=i+MOlines-1\n for m in range(0,j-i+1):\n nextline = origin.next()\n nextline = nextline.split()\n for p in range(p,len(nextline)):\n MOrawb[r] = nextline[p]\n r = r+1\n p = 0\n\n# print \"MO Raw = \", MOrawb\n return MOrawb\n\n if (switch == 2):\n filename1 = filename\n PElements = int(NBasis*(NBasis+1)/2)\n Plines = int(PElements/5) + 1\n TotalPraw = np.zeros(PElements)\n SpinPraw = np.zeros(PElements)\n\n with open(filename1,'r') as origin:\n for i, line in enumerate(origin):\n if \"Total SCF Density\" in line:\n i=i+1\n r = 0\n p = 0\n# print \"Total SCF Density starts at line :\", i\n j=i+Plines-1\n# print \"Total SCF Density ends at line :\", j\n for m in range(0,j-i+1):\n nextline = origin.next()\n nextline = nextline.split()\n for p in range(0,len(nextline)):\n if (r != PElements):\n TotalPraw[r] = nextline[p]\n r = r+1\n p = 0\n# HH + : Bug ... :(\n with open(filename1,'r') as origin:\n for i, line in enumerate(origin):\n if \"Spin SCF Density\" in line:\n# print \"Found Spin density!\"\n i=i+1\n r = 0\n p = 0\n# print \"Spin SCF Density starts at line: \", i\n j=i+Plines-1\n# print \"Spin SCF Density ends at line: \", j\n for m in range(0,j-i+1):\n nextline = origin.next()\n nextline = nextline.split()\n for p in range(p,len(nextline)):\n if (r != PElements):\n SpinPraw[r] = nextline[p]\n r = r+1\n p = 0\n# HH - : End of bug (hopefully!)\n\n PalphaRaw = (np.add(TotalPraw,SpinPraw)) * 0.5\n PbetaRaw = (np.subtract(TotalPraw,SpinPraw)) * 0.5\n Palpha = symmetrize(PalphaRaw)\n Pbeta = symmetrize(PbetaRaw) \n return Palpha, Pbeta\n\n if (switch == 3):\n filename1 = filename\n AlphaMO = np.zeros(NBasis)\n AlphaMOlines = int(NBasis/5) + 1\n if (NBasis % 5 == 0):\n AlphaMOlines = AlphaMOlines - 1\n with open(filename1,'r') as origin:\n for i, line in enumerate(origin):\n if \"Alpha Orbital Energies\" in line:\n i = i + 1\n r = 0\n p = 0\n# print \"Alpha MO Energies starts at line: \", i\n j = i + AlphaMOlines - 1\n# print \"Alpha MO Energies ends at line: \", j\n for m in range(0,j-i+1):\n nextline = origin.next()\n nextline = nextline.split()\n for p in range(p,len(nextline)):\n AlphaMO[r] = nextline[p]\n r = r + 1\n p = 0\n# print \"Alpha MO energies = \", AlphaMO\n return AlphaMO\n\n if (switch == -3):\n filename1 = filename\n BetaMO = np.zeros(NBasis)\n BetaMOlines = int(NBasis/5) + 1\n if (NBasis % 5 == 0):\n BetaMOlines = BetaMOlines - 1\n with open(filename1,'r') as origin:\n for i, line in enumerate(origin):\n if \"Beta Orbital Energies\" in line:\n i = i + 1\n r = 0\n p = 0\n# print \"Beta MO Energies starts at line: \", i\n j = i + BetaMOlines - 1\n# print \"Beta MO Energies ends at line: \", j\n for m in range(0,j-i+1):\n nextline = origin.next()\n nextline = nextline.split()\n for p in range(p,len(nextline)):\n BetaMO[r] = nextline[p]\n r = r + 1\n p = 0\n# print \"Beta MO energies = \", BetaMO\n return BetaMO\n\n# sci_notation: reads in a number\n# output: prints the number in the desired scientific notation. note that this function has a different output than the one found in nio.py\n#\ndef sci_notation(n):\n a = '%.8f' % n\n return '%.8f' % Decimal(n.real)\n\n# fchk_notation: reads in a number\n# output: prints the number in the desired notation for fchk files\n#\ndef fchk_notation(n):\n a = '%.8E' % n\n return '%.8E' % Decimal(n.real)\n\n# AtomicSymbol: Reads in atomic number of the element\n# Output: -Atomic Symbol\n# \n\ndef AtomicSymbol(AtomicNumber):\n p = AtomicNumber - 1\n PTlist = ['H','He','Li','Be','B','C','N','O','F','Ne','Na','Mg','Al','Si','P','S','Cl','Ar','K','Ca','Sc','Ti','V','Cr','Mn','Fe','Co','Ni','Cu','Zn','Ga','Ge','As','Se','Br','Kr','Rb','Sr','Y','Zr','Nb','Mo','Tc','Ru','Rh','Pd','Ah','Cd','In','Sn','Sb','Te','I','Xe','Cs','Ba','La','Ce','Pr','Nd','Pm','Sm','Eu','Gd','Tb','Dy','Ho','Er','Tm','Yb','Lu','Hf','Ta','W','Re','Os','Ir','Pt','Au','Hb','Tl','Pb','Bi','Po','At','Rn','Fr','Ra','Ac','Th','Pa','U','Np','Pu','Am','Cm','Bk','Cf','Es','Fm','Md','No','Lr','Rf','Db','Sg','Bh','Hs','Mt','Ds','Rg','Cn','Uut','Fl','Uup','Lv','Uus','Uuo']\n# print \"There are currently \", len(PTlist), \" atoms defined\"\n return PTlist[p]\n\n# Symmetrize: Reads in a packed symmetric column matrix into NBasis x NBasis square matrix \n# Output: -Matrix(NBasis,NBasis)\n#\n\ndef symmetrize(a):\n Nbas = int((np.sqrt(8*len(a)+1)-1)/2)\n b = np.zeros((Nbas,Nbas))\n n = 0\n for i in range(0,Nbas):\n for j in range(0,i+1):\n b[i,j]=a[n]\n b[j,i]=a[n]\n n=n+1\n return b\n\n# Column2Square: Reads in a packed column matrix, number of basis functions.\n# Output: -Matrix(NBasis,NBasis)\n\ndef column2square(A,NBasis):\n C = np.zeros((NBasis,NBasis))\n t=0\n for i in range(0,NBasis):\n for j in range(0,NBasis):\n C[j,i]=float(A[t])\n t=t+1\n return C\n\n# GetOverlap: Reads in packed column matrix, number of basis functions.\n# Output: -Overlap Matrix (NBasis,NBasis)\n\ndef GetOverlap(A,NBasis):\n C = column2square(A,NBasis)\n CInv = np.linalg.inv(C)\n S = np.dot(np.transpose(CInv),CInv)\n return S \n\n# PrintSI: Reads in filename, user-defined switch\n# Output: -SCF Energy, Charge, Multiplicity, Geometry\n#\n# Switch: 1 = print to new file (filename1-SI.txt)\n# -1 = print to screen\n#\n\ndef PrintSI(filename1,switch):\n NBasis, NElementsGrab, Charge, Multiplicity, NAtoms, SCFEnergy = NBasGrab(filename1)\n AtomicNum = GetAtoms(filename1,NAtoms)\n RawCart = GeomGet(filename1,NAtoms)\n Cart = np.resize(RawCart,(NAtoms,3))\n filename2 = os.path.splitext(filename1)[0] + \"-SI.txt\"\n filename1 = os.path.splitext(filename1)[0] \n if (switch == 1):\n with open(filename2,'w') as f2:\n f2.write(\"SI info for \")\n f2.write(filename1)\n f2.write(\"\\n\\n\")\n f2.write(\"SCF Energy = \")\n f2.write(str(SCFEnergy))\n f2.write(\" Hartree\")\n f2.write(\"\\n\\n\")\n f2.write(str(Charge))\n f2.write(\" \")\n f2.write(str(Multiplicity))\n f2.write(\"\\n\")\n for i in range(0,NAtoms):\n h = i + 1\n z = AtomicNum[i]\n Atom = AtomicSymbol(int(z))\n f2.write(Atom)\n f2.write(\" \")\n for j in range(0,3):\n if (Cart[i,j] >= 0):\n f2.write(\" \")\n f2.write(str(sci_notation(Cart[i,j])))\n f2.write(\" \")\n f2.write(\"\\n\")\n f2.write(\" \")\n f2.write(\"\\n\\n\")\n return filename2\n if (switch == -1):\n print \"SCF Energy = \", SCFEnergy, \" Hartree\\n\"\n print \"Charge = \", Charge, \"\\n\"\n print \"Multiplicity = \", Multiplicity, \"\\n\"\n print \"Cartesian Geometry:\\n\"\n for i in range(0,NAtoms):\n h = i + 1\n z = AtomicNum[i]\n Atom = AtomicSymbol(int(z))\n print Atom, sci_notation(Cart[i,0]), sci_notation(Cart[i,1]), sci_notation(Cart[i,2])\n print \"\\n\" \n \n# CalcNO: Reads in filename, NBasis\n# Output: Natural Orbitals eigenvalues and eigenvectors (both alpha and beta)\n# \n\ndef CalcNO(filename,NBasis):\n Palpha, Pbeta = MatGrab(filename,NBasis,2) \n C = MatGrab(filename,NBasis,1)\n S = GetOverlap(C,NBasis)\n Svals, Svecs = np.linalg.eig(S)\n Sval_minhalf = (np.diag(Svals**(0.5)))\n Shalf = np.dot(Svecs,np.dot(Sval_minhalf,np.transpose(Svecs)))\n NOvalsA, NOvecsA = np.linalg.eig(np.dot(Shalf,np.dot(Shalf,Palpha)))\n NOvalsB, NOvecsB = np.linalg.eig(np.dot(Shalf,np.dot(Shalf,Pbeta))) \n NOvalsA = NOvalsA.real\n NOvalsB = NOvalsB.real \n NOvecsA = NOvecsA.real\n NOvecsB = NOvecsB.real\n NOvecsA = np.dot(np.linalg.inv(Shalf),NOvecsA)\n NOvecsB = np.dot(np.linalg.inv(Shalf),NOvecsB)\n return NOvecsA, NOvecsB, NOvalsA, NOvalsB\n\n# NElec: Reads in filename\n# Output: Total number of electrons, Alpha Electrons, Beta Electrons\n#\n\ndef NElec(filename):\n NElec = 0\n NAlpha = 0\n NBeta = 0\n with open(filename, 'r') as origin:\n for line in origin:\n if \"Number of electrons\" in line:\n words = line.split()\n for i in words:\n for letter in i:\n if(letter.isdigit()):\n NElec = NElec*10 + int(letter)\n if \"Number of alpha electrons\" in line:\n words = line.split()\n for i in words:\n for letter in i:\n if(letter.isdigit()):\n NAlpha = NAlpha*10 + int(letter)\n if \"Number of beta electrons\" in line:\n words = line.split()\n for i in words:\n for letter in i:\n if(letter.isdigit()):\n NBeta = NBeta*10 + int(letter)\n return NElec, NAlpha, NBeta\n\n# OrbTransform: Reads in Alpha Density Matrix, Beta Density Matrix, Overlap Matrix, n\n# Output: New Density Matrices: P' = S**(1-n).P.S**(n)\n#\n\ndef OrbTransform(Pa,Pb,S,n):\n Svals, Svecs = np.linalg.eig(S)\n Sval1 = np.diag(Svals**(n))\n Sval2 = np.diag(Svals**(1-n)) \n Sdag1 = np.dot(Svecs,np.dot(Sval1,np.transpose(Svecs)))\n Sdag2 = np.dot(Svecs,np.dot(Sval2,np.transpose(Svecs)))\n PdagAlpha = np.dot(Sdag1,np.dot(Pa,Sdag2))\n PdagBeta = np.dot(Sdag1,np.dot(Pb,Sdag2))\n# print \"OrbTransform Subroutine test:\\n\"\n# print \"PdagAlpha = \", PdagAlpha, \"\\n\"\n# print \"PdagBeta = \", PdagBeta, \"\\n\"\n OvalsA, OvecsA = np.linalg.eig(PdagAlpha)\n OvalsB, OvecsB = np.linalg.eig(PdagBeta) \n# print \"OVals A = \", OvalsA, \"\\n\"\n# print \"OVecs A = \", OvecsA, \"\\n\"\n# print \"OVals B = \", OvalsB, \"\\n\"\n# print \"OVecs B = \", OvecsB, \"\\n\"\n return PdagAlpha, PdagBeta, OvecsA, OvecsB, OvalsA, OvalsB\n\n# CartoZmat: Transforms Cartesian coordinates to z-matrix form\n# Input: NAtoms, RawCart, AtomicNum\n# Output: z-matrix printed on the screen\n#\n\n# Note that there are three other functions here, Dist, Angle, and Torsion. \n# They are used to calculate the appropriate parameters for the z-matrix\n# switch = 1 : print z-matrix to screen\n# switch = -1 : print z-matrix to new textfile \n\ndef DistAB(e1,e2):\n R = 0.0\n for i in range(len(e1)):\n R = R + (e1[i]-e2[i])**(2)\n R = R**(0.5) \n return R\n\ndef AngleABC(e1,e2,e3):\n eab_x = (e2[0] - e1[0]) / DistAB(e1,e2)\n eab_y = (e2[1] - e1[1]) / DistAB(e1,e2)\n eab_z = (e2[2] - e1[2]) / DistAB(e1,e2)\n\n ebc_x = - (e3[0] - e2[0]) / DistAB(e2,e3)\n ebc_y = - (e3[1] - e2[1]) / DistAB(e2,e3)\n ebc_z = - (e3[2] - e2[2]) / DistAB(e2,e3)\n\n eab = [eab_x, eab_y, eab_z]\n ebc = [ebc_x, ebc_y, ebc_z]\n \n cos_angle = np.dot(eab,ebc)\n angle = np.arccos(cos_angle) / 3.1415926535 * 180\n return eab, ebc, angle \n\ndef TorsionABCD(e1,e2,e3,e4):\n\n eab_x = (e2[0] - e1[0]) / DistAB(e1,e2)\n eab_y = (e2[1] - e1[1]) / DistAB(e1,e2)\n eab_z = (e2[2] - e1[2]) / DistAB(e1,e2) \n\n ebc_x = (e3[0] - e2[0]) / DistAB(e2,e3)\n ebc_y = (e3[1] - e2[1]) / DistAB(e2,e3)\n ebc_z = (e3[2] - e2[2]) / DistAB(e2,e3)\n\n ecd_x = (e4[0] - e3[0]) / DistAB(e3,e4)\n ecd_y = (e4[1] - e3[1]) / DistAB(e3,e4)\n ecd_z = (e4[2] - e3[2]) / DistAB(e3,e4)\n\n eab = [eab_x, eab_y, eab_z]\n ebc = [ebc_x, ebc_y, ebc_z]\n ecd = [ecd_x, ecd_y, ecd_z]\n\n n1 = np.cross(eab,ebc) / (np.linalg.norm(np.cross(eab,ebc))) \n n2 = np.cross(ebc,ecd) / (np.linalg.norm(np.cross(ebc,ecd)))\n\n u1 = n2\n u3 = ebc/np.linalg.norm(ebc)\n u2 = np.cross(u3,u1)\n\n cos_angle = np.dot(n1,n2)\n sin_angle = np.dot(n1,u2)\n \n angle = -math.atan2(sin_angle,cos_angle) / 3.1415926535 * 180\n return angle\n\ndef CartoZmat(RawCart,NAtoms,AtomicNum,filename2,switch):\n if (switch == 1):\n Cart = np.resize(RawCart,(NAtoms,3))\n# print \"Cartesian = \", Cart \n# print \"Atoms list = \", AtomicNum\n for i in range(len(AtomicNum)):\n Symbol = AtomicSymbol(int(AtomicNum[i]))\n if (i > 2):\n e4 = [Cart[i,0],Cart[i,1],Cart[i,2]]\n e3 = [Cart[2,0],Cart[2,1],Cart[2,2]]\n e2 = [Cart[1,0],Cart[1,1],Cart[1,2]]\n e1 = [Cart[0,0],Cart[0,1],Cart[0,2]]\n R = DistAB(e4,e1)\n eab, ebc, A = AngleABC(e2,e1,e4)\n D = TorsionABCD(e4,e1,e2,e3)\n print Symbol, 1 , R , 2, A , 3, D \n elif (i > 1):\n e4 = [Cart[i,0],Cart[i,1],Cart[i,2]]\n e2 = [Cart[1,0],Cart[1,1],Cart[1,2]]\n e1 = [Cart[0,0],Cart[0,1],Cart[0,2]]\n R = DistAB(e4,e1)\n eab, ebc, A = AngleABC(e2,e1,e4)\n print Symbol, 1 , R , 2, A\n elif (i > 0):\n e4 = [Cart[i,0],Cart[i,1],Cart[i,2]]\n e1 = [Cart[0,0],Cart[0,1],Cart[0,2]]\n R = DistAB(e4,e1)\n print Symbol, 1, R \n elif (i == 0):\n print Symbol\n elif (switch == -1):\n Cart = np.resize(RawCart,(NAtoms,3))\n #open new file\n filename = os.path.splitext(filename2)[0] + \"-zmat.txt\"\n with open(filename,'w') as f2:\n NBasis, NElem, Charge, Multiplicity, NAtoms, SCFEnergy = NBasGrab(filename2)\n f2.write(\"Z-Matrix file for \")\n f2.write(filename2)\n f2.write(\"\\n\\n\")\n f2.write(str(Charge))\n f2.write(\" \")\n f2.write(str(Multiplicity))\n f2.write(\"\\n\")\n for i in range(len(AtomicNum)):\n Symbol = AtomicSymbol(int(AtomicNum[i]))\n if (i > 2):\n e4 = [Cart[i,0],Cart[i,1],Cart[i,2]]\n e3 = [Cart[2,0],Cart[2,1],Cart[2,2]]\n e2 = [Cart[1,0],Cart[1,1],Cart[1,2]]\n e1 = [Cart[0,0],Cart[0,1],Cart[0,2]]\n R = DistAB(e4,e1)\n eab, ebc, A = AngleABC(e2,e1,e4)\n D = TorsionABCD(e4,e1,e2,e3)\n f2.write(Symbol)\n f2.write(\" 1 \") \n f2.write(str(R)) \n f2.write(\" 2 \") \n f2.write( str(A)) \n f2.write(\" 3 \") \n f2.write(str(D))\n f2.write(\"\\n\") \n elif (i > 1):\n e4 = [Cart[i,0],Cart[i,1],Cart[i,2]]\n e2 = [Cart[1,0],Cart[1,1],Cart[1,2]]\n e1 = [Cart[0,0],Cart[0,1],Cart[0,2]]\n R = DistAB(e4,e1)\n eab, ebc, A = AngleABC(e2,e1,e4)\n f2.write(str(Symbol)) \n f2.write(\" 1 \")\n f2.write (str(R)) \n f2.write(\" 2 \") \n f2.write(str(A)) \n f2.write(\"\\n\")\n elif (i > 0):\n e4 = [Cart[i,0],Cart[i,1],Cart[i,2]]\n e1 = [Cart[0,0],Cart[0,1],Cart[0,2]]\n R = DistAB(e4,e1)\n f2.write(Symbol) \n f2.write(\" 1 \")\n f2.write(str(R))\n f2.write(\"\\n\")\n elif (i == 0):\n f2.write(Symbol)\n f2.write(\"\\n\") \n# print \"test test\"\n\n# Section 2: Reading from gaussian matrix files\n\n# MatGrab2: Reads in matrices from gaussian matrix file\n# \n# Switch: 1 : Alpha Core Hamiltonian\n# -1 : Beta Core Hamiltonian\n# 2 : Alpha Fock Matrix\n# -2 : Beta Fock Matrix\n# 3 : Dipole matrix elements (x,y,z) [IN PROGRESS]\n\ndef MatGrab2(filename,NBasis,switch):\n print \"Reading from Matrix file\\n\"\n if (switch == 1):\n print \"Reading Alpha Core Hamiltonian Matrix:\\n\"\n NElements = int(NBasis*(NBasis + 1)/2)\n print \"Looking for \", NElements, \" elements of the core hamilonian\\n\"\n CoreHRawa = np.zeros(NElements)\n p = 0\n n = 0 \n r = 0\n with open(filename,'r') as origin:\n for i, line in enumerate(origin):\n if \"CORE HAMILTONIAN ALPHA\" in line :\n while (p < (NElements)):\n NLines = NBasis - 5*r\n if (NLines < 0):\n print \"Done Reading Core Hamolitonian\"\n j = i+3\n i = i + 4\n end = j + NLines - 1\n nextline = origin.next()\n for m in range(i,i+NLines):\n nextline = origin.next()\n words = nextline.split()\n for j in range(1,len(words)):\n CoreHRawa[p] = float(words[j].replace('D','E'))\n p = p + 1\n r = r + 1\n i = m - 2\n return CoreHRawa\n if (switch == -1):\n print \"Reading Beta Core Hamiltonian Matrix:\\n\"\n NElements = int(NBasis*(NBasis + 1)/2)\n print \"Looking for \", NElements, \" elements of the core hamilonian\\n\"\n CoreHRawb = np.zeros(NElements)\n p = 0\n n = 0\n r = 0\n with open(filename,'r') as origin:\n for i, line in enumerate(origin):\n if \"CORE HAMILTONIAN BETA\" in line :\n while (p < (NElements)):\n NLines = NBasis - 5*r\n if (NLines < 0):\n print \"Done Reading Core Hamolitonian\"\n j = i+3\n i = i + 4\n end = j + NLines - 1\n nextline = origin.next()\n for m in range(i,i+NLines):\n nextline = origin.next()\n words = nextline.split()\n for j in range(1,len(words)):\n CoreHRawb[p] = float(words[j].replace('D','E'))\n p = p + 1\n r = r + 1\n i = m - 2\n return CoreHRawb\n\n if (switch == 2):\n print \"Reading Alpha Fock Matrix:\\n\"\n NElements = int(NBasis*(NBasis + 1)/2)\n print \"Looking for \", NElements, \" elements of the fock matrix\\n\"\n FockRawA = np.zeros(NElements)\n p = 0\n n = 0\n r = 0\n with open(filename,'r') as origin:\n for i, line in enumerate(origin):\n if \"ALPHA FOCK MATRIX\" in line :\n while (p < (NElements)):\n NLines = NBasis - 5*r\n if (NLines < 0):\n print \"Done Reading fock matrix\"\n j = i+3\n i = i + 4\n end = j + NLines - 1\n nextline = origin.next()\n for m in range(i,i+NLines):\n nextline = origin.next()\n words = nextline.split()\n for j in range(1,len(words)):\n FockRawA[p] = float(words[j].replace('D','E'))\n p = p + 1\n r = r + 1\n i = m - 2\n return FockRawA\n\n if (switch == -2):\n print \"Reading Beta Fock Matrix:\\n\"\n NElements = int(NBasis*(NBasis + 1)/2)\n print \"Looking for \", NElements, \" elements of the fock matrix\\n\"\n FockRawB = np.zeros(NElements)\n p = 0\n n = 0\n r = 0\n with open(filename,'r') as origin:\n for i, line in enumerate(origin):\n if \"BETA FOCK MATRIX\" in line :\n while (p < (NElements)):\n NLines = NBasis - 5*r\n if (NLines < 0):\n print \"Done Reading fock matrix\"\n j = i+3\n i = i + 4\n end = j + NLines - 1\n nextline = origin.next()\n for m in range(i,i+NLines):\n nextline = origin.next()\n words = nextline.split()\n for j in range(1,len(words)):\n FockRawB[p] = float(words[j].replace('D','E'))\n p = p + 1\n r = r + 1\n i = m - 2\n return FockRawB\n\n if (switch == 3):\n# print \"Reading Dipole integrals, matrix x\\n\"\n NElements = int(NBasis*(NBasis +1)/2)\n# print \"Looking for \", NElements, \" elements of the Dipole integrals matrix x\\n\"\n DipX_Raw = np.zeros(NElements)\n p = 0\n n = 0\n r = 0\n with open(filename,'r') as origin:\n for i, line in enumerate(origin):\n if \" DIPOLE INTEGRALS, matrix 1\" in line:\n while (p < NElements):\n NLines = NBasis - 5*r\n if (NLines < 0):\n# print \"Done reading Dipole X matrix\\n\"\n j = i+3\n i = i + 4\n end = j + NLines -1\n nextline = origin.next()\n words = nextline.split()\n for m in range(i,i+NLines):\n nextline = origin.next()\n words = nextline.split()\n for j in range(1,len(words)):\n DipX_Raw[p] = float(words[j].replace('D','E'))\n p = p + 1\n r = r + 1\n i = m - 2\n# print \"Dip X raw = \", DipX_Raw\n\n# print \"Reading Dipole integrals, matrix y\\n\"\n NElements = int(NBasis*(NBasis +1)/2)\n print \"Looking for \", NElements, \" elements of the Dipole integrals matrix y\\n\"\n DipY_Raw = np.zeros(NElements)\n p = 0\n n = 0\n r = 0\n with open(filename,'r') as origin:\n for i, line in enumerate(origin):\n if \" DIPOLE INTEGRALS, matrix 2\" in line:\n while (p < NElements):\n NLines = NBasis - 5*r\n if (NLines < 0):\n# print \"Done reading Dipole Y matrix\\n\"\n j = i+3\n i = i + 4\n end = j + NLines -1\n nextline = origin.next()\n words = nextline.split()\n for m in range(i,i+NLines):\n nextline = origin.next()\n words = nextline.split()\n for j in range(1,len(words)):\n DipY_Raw[p] = float(words[j].replace('D','E'))\n p = p + 1\n r = r + 1\n i = m - 2\n# print \"Dip Y raw = \", DipY_Raw \n\n # print \"Looking for \", NElements, \" elements of the Dipole integrals matrix z\\n\"\n DipZ_Raw = np.zeros(NElements)\n p = 0\n n = 0\n r = 0\n with open(filename,'r') as origin:\n for i, line in enumerate(origin):\n if \" DIPOLE INTEGRALS, matrix 3\" in line:\n while (p < NElements):\n NLines = NBasis - 5*r\n if (NLines < 0):\n print \"Done reading Dipole Z matrix\\n\"\n j = i+3\n i = i + 4\n end = j + NLines -1\n nextline = origin.next()\n words = nextline.split()\n for m in range(i,i+NLines):\n nextline = origin.next()\n words = nextline.split()\n for j in range(1,len(words)):\n DipZ_Raw[p] = float(words[j].replace('D','E'))\n p = p + 1\n r = r + 1\n i = m - 2\n # print \"Dip Z raw = \", DipZ_Raw\n return symmetrizeMat(DipX_Raw), symmetrizeMat(DipY_Raw), symmetrizeMat(DipZ_Raw)\n\n\n\n\n\n# SymmetrizeMat: Reads in packed matrix (recovered from Matrix file) and prints out NBasis x NBasis matrix\n# Input: Packed lower triangular A\n# Output: N x N Matrix\n\ndef symmetrizeMat(a):\n NBasis = int((np.sqrt(8*len(a)+1)-1)/2)\n NewMat = np.zeros((NBasis,NBasis))\n NElements = len(a)\n t = 0\n l = 0\n start = 0\n loop = NBasis\n nBlock = int(NBasis/5)\n nRem = NBasis%5\n# print \"nBlock = \", nBlock\n# print \"nRem = \", nRem\n i = start\n j = start\n if (nBlock == 0):\n nBlock =1\n\n while (l < nBlock):\n# print \"retrieving block \", l\n for i in range (start,loop):\n for j in range(start,start+5):\n if (j<=i):\n# print \"i,j = \",i,j\n NewMat[i,j] = a[t]\n NewMat[j,i] = a[t]\n# print \"A[t]= \", a[t]\n t = t + 1\n start = start + 5\n l = l + 1\n# print \"t = \", t\n# print \"values of i and j after nBlock loop is over: \", i, j\n j = j + 1\n start = j\n# print \"NBasis - nRem = \", NBasis -nRem\n i = NBasis - nRem\n while (i < NBasis):\n j = start\n while (j <= i):\n# print \"i,j = \",i,j\n NewMat[i,j] = a[t]\n NewMat[j,i] = a[t]\n# print \"A[t]= \", a[t]\n t = t + 1\n j = j + 1\n i = i + 1\n# print \"final value of t = \", t\n return NewMat\n\n# ERIRead: reads in regular 2e integrals from formatted matrix file\n# Note that to get these integrals, use SCF=Conventional and int=NoRaff (saves integrals to disk and prints out regular 2e integrals)\n# Input: matrix filename\n# Output: 2D Matrix, two columns: Column 1 = compound index, Column 2 = integral value\n# \n# Two small functions are defined here: swap(a,b) and Fourindex(a,b,c,d)\n\n\ndef swap(a,b):\n return b,a\n\ndef Fourindex(a,b,c,d):\n a = int(a)\n b = int(b)\n c = int(c)\n d = int(d)\n if (a < b):\n a, b = swap(a,b)\n if (c < d):\n c, d = swap(c,d)\n e = int(a*(a+1)/2 + b)\n f = int(c*(c+1)/2 + d)\n if (e= 0):\n f2.write(\" \")\n f2.write(str(fchk_notation(e1[j].real)))\n if (counter%5 == 0):\n f2.write(\"\\n\")\n counter=0\n counter=counter+1\n counter =1 \n BOE = AOE + (int(NBasis/5)+2)\n if (NBasis%5 != 0):\n f2.write(\"\\n\")\n if (NBasis%5 == 0):\n BOE = BOE - 1 \n f2.write(data[BOE])\n for j in range(0,NBasis):\n f2.write(\" \")\n if (e2[j] >= 0):\n f2.write(\" \")\n f2.write(str(fchk_notation(e2[j].real)))\n if (counter%5 ==0):\n f2.write(\"\\n\")\n counter=0\n counter = counter+1\n counter =1\n AMO = BOE + (int(NBasis/5)+2)\n if (NBasis%5 != 0):\n f2.write(\"\\n\")\n if (NBasis%5 == 0):\n AMO = AMO - 1\n f2.write(data[AMO])\n for i in range(0,NBasis):\n for j in range(0,NBasis):\n f2.write(\" \")\n if (V1[j,i] >= 0):\n f2.write(\" \")\n f2.write(str(fchk_notation(V1[j,i].real)))\n if (counter%5 ==0):\n f2.write(\"\\n\")\n counter=0\n counter = counter + 1\n counter = 1\n BMO = AMO + (int(NBasis*NBasis/5))+2\n if (NBasis%5 != 0):\n f2.write(\"\\n\")\n if (NBasis%5 == 0):\n BMO = BMO - 1\n f2.write(data[BMO])\n for i in range(0,NBasis):\n for j in range(0,NBasis):\n f2.write(\" \")\n if (V2[j,i] >= 0):\n f2.write(\" \")\n f2.write(str(fchk_notation(V2[j,i].real)))\n if (counter%5 ==0):\n f2.write(\"\\n\")\n counter=0\n counter = counter + 1\n counter = 1\n if (NBasis%5 != 0):\n f2.write(\"\\n\")\n pointer = BMO + (int(NBasis*NBasis/5))+2\n while (pointer < len(data)):\n f2.write(data[pointer])\n pointer = pointer+1\n print \"Done.\" \n\n# OVMerge: Does the opposite of OVParse, merges back the Occ and Virt components of the MO Coefficient matrix\n# Input : A (Occ Matrix), B(Vir Matrix), Number of occupied orbitals, NBasis\n# \n# Output : V = Full MO Coefficient Matrix\n#\n# (this subroutine has the exact opposite functionality of OVParse)\n#\n\ndef OVMerge(A,B,NOcc,NBasis):\n V = np.zeros((NBasis,NBasis))\n for i in range(0,NOcc):\n V[:,i] = A[:,i]\n\n for j in range(NOcc,NBasis):\n V[:,j] = B[:,j-NOcc]\n\n return V\n\n# DistanceMatrix: Calculates distances between all atoms in a molecule\n# Input : fchk file name\n# \n# Output : Returns Distance Matrix and Atomic Symbol array.\n#\n# Unfinished part: generate and return a distance matrix (NAtoms x NAtoms) \n#\n\ndef DistanceMatrix(filename):\n NBasis, NElem, Charge, Multiplicity, NAtoms, SCFEnergy = NBasGrab(filename)\n Atomic_Numbers = GetAtoms(filename,NAtoms)\n Atomic_Symbol = [\"\"]*NAtoms\n for i in range(0,NAtoms):\n Atomic_Symbol[i] = AtomicSymbol(int(Atomic_Numbers[i]))\n RawCart = GeomGet(filename,NAtoms)\n Cart = np.resize(RawCart,(NAtoms,3))\n Distance_Matrix = np.zeros((NAtoms,NAtoms))\n for i in range(0,NAtoms):\n for j in range(i+1,NAtoms):\n e2 = [Cart[j,0],Cart[j,1],Cart[j,2]]\n e1 = [Cart[i,0],Cart[i,1],Cart[i,2]]\n Distance_Matrix[i,j] = np.around(DistAB(e1,e2),decimals=2)\n Distance_Matrix[j,i] = np.around(DistAB(e1,e2),decimals=2)\n return Distance_Matrix, Atomic_Symbol\n\n# PrintLyrics: A Function made just for fun, prints out a random quote from the Beatles songs\n# Input: None, but reads in the lyrics.py library file (partially complete)\n#\n# Output: None, prints lyrics.\n#\n \ndef PrintLyrics():\n n = random.randint(1,32)\n LyricsLibrary(n)\n\n# GetAtomicWeights: Grabs the \"real atomic weights\" from the fchk file\n# Input: filename, Number of Atoms\n# \n# Output: One dimensional array, AtomicWeight, of dimensions NAtoms.\n#\n\ndef GetAtomicWeights(filename1,NAtoms):\n p = 0\n r = 0\n n = 1\n AtomicWeight = np.zeros(NAtoms)\n if (NAtoms%5 ==0):\n n = 0\n AtomLines = int(NAtoms/5) + n\n\n with open(filename1,'r') as origin:\n for i, line in enumerate(origin):\n if \"Real atomic weights\" in line:\n i = i + 1\n pointer = i\n endpointer = pointer + AtomLines -1\n for m in range(0, endpointer - pointer + 1):\n nextline = origin.next()\n nextline = nextline.split()\n for p in range(p,len(nextline)):\n AtomicWeight[r] = nextline[p]\n r = r + 1\n p = 0\n AtomicWeight = np.around(AtomicWeight,decimals=3)\n return AtomicWeight\n\n\n# WriteMOsQChem: Subroutine that replaces the MO coefficients and orbital energies in a fchk file (QChem Version)\n# Input: Input filename, output filename, Orbital coefficient alpha, orbital coefficient beta, Orbtial energies alpha, orbital energies beta, number of basis functions \n# \n# Output: None. New file will be generated (filename3) that has the new Orbital coefficients and energies\n#\n\ndef WriteMOsQChem(filename1,filename3,V1,V2,e1,e2,NBasis):\n\n MOlines = int(len(V1)/5) + 1\n p = 0\n r = 0\n with open(filename1,'r') as origin:\n for i, line in enumerate(origin):\n if \"Alpha Orbital Energies\" in line:\n AOE = i+1\n AOE_header = line\n if \"Alpha MO coefficients\" in line:\n AMO = i+1\n AMO_header = line\n if \"Beta Orbital Energies\" in line:\n BOE = i+1\n BOE_header = line\n if \"Beta MO coefficients\" in line:\n BMO = i+1\n BMO_header = line\n\n pointer=0\n counter=1\n\n Start_point = min(AMO,BMO,AOE,BOE)\n\n with open(filename1,'r') as origin:\n data = origin.readlines()\n with open(filename3,'w') as f2:\n \n print \"Writing results to new output file: \", filename3, \" ... \"\n \n while (pointer < Start_point-1):\n f2.write(data[pointer])\n pointer = pointer+1\n print \"pointer at line = \", pointer\n f2.write(AOE_header)\n for j in range(0,NBasis):\n f2.write(\" \")\n if (e1[j] >= 0):\n f2.write(\" \")\n f2.write(str(fchk_notation(e1[j].real)))\n if (counter%5 == 0):\n f2.write(\"\\n\")\n counter=0\n counter=counter+1\n counter =1 \n BOE = AOE + (int(NBasis/5)+2)\n if (NBasis%5 != 0):\n f2.write(\"\\n\")\n if (NBasis%5 == 0):\n BOE = BOE - 1 \n f2.write(BOE_header)\n# f2.write(\"Beta Orbital Energies\\n\")\n for j in range(0,NBasis):\n f2.write(\" \")\n if (e2[j] >= 0):\n f2.write(\" \")\n f2.write(str(fchk_notation(e2[j].real)))\n if (counter%5 ==0):\n f2.write(\"\\n\")\n counter=0\n counter = counter+1\n counter =1\n AMO = BOE + (int(NBasis/5)+2)\n if (NBasis%5 != 0):\n f2.write(\"\\n\")\n if (NBasis%5 == 0):\n AMO = AMO - 1\n# f2.write(\"Alpha MO coefficients\\n\")\n f2.write(AMO_header)\n for i in range(0,NBasis):\n for j in range(0,NBasis):\n f2.write(\" \")\n if (V1[j,i] >= 0):\n f2.write(\" \")\n f2.write(str(fchk_notation(V1[j,i].real)))\n if (counter%5 ==0):\n f2.write(\"\\n\")\n counter=0\n counter = counter + 1\n counter = 1\n BMO = AMO + (int(NBasis*NBasis/5))+2\n if (NBasis%5 != 0):\n f2.write(\"\\n\")\n if (NBasis%5 == 0):\n BMO = BMO - 1\n# f2.write(\"Beta MO Coefficients\\n\")\n f2.write(BMO_header)\n# f2.write(data[BMO])\n for i in range(0,NBasis):\n for j in range(0,NBasis):\n f2.write(\" \")\n if (V2[j,i] >= 0):\n f2.write(\" \")\n f2.write(str(fchk_notation(V2[j,i].real)))\n if (counter%5 ==0):\n f2.write(\"\\n\")\n counter=0\n counter = counter + 1\n counter = 1\n if (NBasis%5 != 0):\n f2.write(\"\\n\")\n pointer = BMO + (int(NBasis*NBasis/5))+2\n# while (pointer < len(data)):\n# f2.write(data[pointer])\n# pointer = pointer+1\n print \"Done.\" \n\n# ContractMat: Subroutine that reads in two square matrices (NBasis x NBasis) and returns their contraction (scalar)\n# Input: Matrices A and B (dimensions: NBasis x NBasis), NBasis\n#\n# Output: scalar m = Sum_(mu,nu) A_(mu,nu) * B_(mu,nu)\n#\n\ndef ContractMat(A,B,NBasis):\n value = 0.0\n for i in range(0,NBasis):\n for j in range(0,NBasis):\n value = value + A[i,j]*B[i,j]\n return value\n\n# Work in progress: Basis set reader:\n\ndef ReadBasisSet(filename):\n NBasis, NElem, Charge, Multiplicity, NAtoms, SCFEnergy = NBasGrab(filename)\n print \"Number of Basis functions =\", NBasis\n print \"Number of atoms =\", NAtoms\n Atomic_Numbers = GetAtoms(filename,NAtoms)\n print \"Atomic Numbers =\", Atomic_Numbers\n Atomic_Symbol = [\"\"]*NAtoms\n for i in range(0,NAtoms):\n Atomic_Symbol[i] = AtomicSymbol(int(Atomic_Numbers[i]))\n print \"Atomic Symbols =\", Atomic_Symbol\n\n\n"},"path":{"kind":"string","value":"BEATLES.py"},"size":{"kind":"number","value":49185,"string":"49,185"},"nl_text":{"kind":"string","value":"!/usr/bin/python BEATLES: Bundle of Essential and Assistive Tools Library for Electronic Structure A tribute to the Beatles Updated June 14, 2020 by Hassan Harb / | \\ / | \\ /O O | O O\\ //|\\ /|\\ /|\\ /|\\\\ /=/ \\=/ \\= / \\=/ \\=\\ / == == == == == \\ / == == == == == \\ (The original Beatles) (ASCII retrieved from https://www.asciiart.eu/music/musicians/beatles ) NBasGrab: reads in a name of .fchk file output: -Number of basis functions -Charge -Multiplicity -Number of Atoms -Cartesian Coordinates -Atomic Symbols -SCF Energy -Total Energy (needs to be added) Section 1: Reading from gaussian formatted checkpoint file print \"SCF Energy = \", words[3], \" Hartree\" print \"SCF Energy (float) = \", SCFEnergy if \"Total Energy\" in line: words = line.split() TotalEnergy = float(words[3]) print \"Total Energy = \", TotalEnergy, \" Hartree\" print \"Number of Basis Functions (subroutine) = \", NBasis, \"\\n\" print \"Charge (subroutine) = \", Charge, \"\\n\" GeomGet: reads in the file name, number of atoms Output: -One dimensional vector (NAtoms * 3) that includes the cartesian coordinates of each atom print \"Raw Cart lines = \", RawCartLines print \"Number of Atoms =\", NAtoms print \"Number of coordinates =\", NElements print \"Cartesian Coordinates starts at line :\", pointer print \"Cartesian Coordinates ends at line :\", endpointer print \"Raw Cart (subroutine) = \", RawCart print \"Raw Cart (converted to Angstroms) = \", RawCart GetAtoms: Reads in file name, number of atoms output: -One dimensional vector (NAtoms) that contains the atomic numbers of the atoms MatGrab: Reads in filename, NBasis, user-defined switch Output: -Alpha MO Coefficients (Done) -Beta MO Coefficients (Done) -Alpha Density Matrix (Done) -Beta Density Matrix (Done) -Alpha MO Energies (Done) -Beta MO Energies (Done) Switch: 1 = Alpha MO Coefficients -1 = Beta MO Coefficients 2 = Alpha and Beta Density Matrices 3 = Alpha MO Energies -3 = Beta MO Energies print \"Alpha MO coefficients starts at line :\", i print \"Alpha MO coefficients ends at line :\", j print \"MO Raw = \", MOrawa print \"MO Raw = \", MOrawb print \"Total SCF Density starts at line :\", i print \"Total SCF Density ends at line :\", j HH + : Bug ... :( print \"Found Spin density!\" print \"Spin SCF Density starts at line: \", i print \"Spin SCF Density ends at line: \", j HH - : End of bug (hopefully!) print \"Alpha MO Energies starts at line: \", i print \"Alpha MO Energies ends at line: \", j print \"Alpha MO energies = \", AlphaMO print \"Beta MO Energies starts at line: \", i print \"Beta MO Energies ends at line: \", j print \"Beta MO energies = \", BetaMO sci_notation: reads in a number output: prints the number in the desired scientific notation. note that this function has a different output than the one found in nio.py fchk_notation: reads in a number output: prints the number in the desired notation for fchk files AtomicSymbol: Reads in atomic number of the element Output: -Atomic Symbol print \"There are currently \", len(PTlist), \" atoms defined\" Symmetrize: Reads in a packed symmetric column matrix into NBasis x NBasis square matrix Output: -Matrix(NBasis,NBasis) Column2Square: Reads in a packed column matrix, number of basis functions. Output: -Matrix(NBasis,NBasis) GetOverlap: Reads in packed column matrix, number of basis functions. Output: -Overlap Matrix (NBasis,NBasis) PrintSI: Reads in filename, user-defined switch Output: -SCF Energy, Charge, Multiplicity, Geometry Switch: 1 = print to new file (filename1-SI.txt) -1 = print to screen CalcNO: Reads in filename, NBasis Output: Natural Orbitals eigenvalues and eigenvectors (both alpha and beta) NElec: Reads in filename Output: Total number of electrons, Alpha Electrons, Beta Electrons OrbTransform: Reads in Alpha Density Matrix, Beta Density Matrix, Overlap Matrix, n Output: New Density Matrices: P' = S**(1-n).P.S**(n) print \"OrbTransform Subroutine test:\\n\" print \"PdagAlpha = \", PdagAlpha, \"\\n\" print \"PdagBeta = \", PdagBeta, \"\\n\" print \"OVals A = \", OvalsA, \"\\n\" print \"OVecs A = \", OvecsA, \"\\n\" print \"OVals B = \", OvalsB, \"\\n\" print \"OVecs B = \", OvecsB, \"\\n\" CartoZmat: Transforms Cartesian coordinates to z-matrix form Input: NAtoms, RawCart, AtomicNum Output: z-matrix printed on the screen Note that there are three other functions here, Dist, Angle, and Torsion. They are used to calculate the appropriate parameters for the z-matrix switch = 1 : print z-matrix to screen switch = -1 : print z-matrix to new textfile print \"Cartesian = \", Cart print \"Atoms list = \", AtomicNumopen new file print \"test test\" Section 2: Reading from gaussian matrix files MatGrab2: Reads in matrices from gaussian matrix file Switch: 1 : Alpha Core Hamiltonian -1 : Beta Core Hamiltonian 2 : Alpha Fock Matrix -2 : Beta Fock Matrix 3 : Dipole matrix elements (x,y,z) [IN PROGRESS] print \"Reading Dipole integrals, matrix x\\n\" print \"Looking for \", NElements, \" elements of the Dipole integrals matrix x\\n\" print \"Done reading Dipole X matrix\\n\" print \"Dip X raw = \", DipX_Raw print \"Reading Dipole integrals, matrix y\\n\" print \"Done reading Dipole Y matrix\\n\" print \"Dip Y raw = \", DipY_Raw print \"Looking for \", NElements, \" elements of the Dipole integrals matrix z\\n\" print \"Dip Z raw = \", DipZ_Raw SymmetrizeMat: Reads in packed matrix (recovered from Matrix file) and prints out NBasis x NBasis matrix Input: Packed lower triangular A Output: N x N Matrix print \"nBlock = \", nBlock print \"nRem = \", nRem print \"retrieving block \", l print \"i,j = \",i,j print \"A[t]= \", a[t] print \"t = \", t print \"values of i and j after nBlock loop is over: \", i, j print \"NBasis - nRem = \", NBasis -nRem print \"i,j = \",i,j print \"A[t]= \", a[t] print \"final value of t = \", t ERIRead: reads in regular 2e integrals from formatted matrix file Note that to get these integrals, use SCF=Conventional and int=NoRaff (saves integrals to disk and prints out regular 2e integrals) Input: matrix filename Output: 2D Matrix, two columns: Column 1 = compound index, Column 2 = integral value Two small functions are defined here: swap(a,b) and Fourindex(a,b,c,d) print \"(\",int(eri_raw[p,0]),int(eri_raw[p,1]),\"|\",int(eri_raw[p,2]),int(eri_raw[p,3]),\") = \", eri_raw[p,4] print \"ERI RAW = \", eri_raw print \"mu nu lambda sigma = \", int(eri_compact[i,0]), \", int = \", eri_compact[i,1], \"One D array Value =\", eri_array[eri_compact[i,0]] OVParse breaks down the MO coefficient matrix (NBasis x NBasis) into an occupied (NBasis x NOcc) and a virtual (NBasis x (Nbasis-NOcc)) matrices Input: A: MO Coefficient (NBasis x NBasis) NBasis NOcc = number of electrons Output: A_Occ: rectangular NBasis x NOcc matrix: Columns of occupied MOs A_Virt: rectangular NBasis x (NBasis - NOcc) matrix: Columns of virtual MOs Note TO SELF: Needs to be tested more, was only tested on H2 and V jobs. Biorthog: Calculates the overlap between two sets of MO Coefficients, prints out the final value of the overlap Input: A, B: MO Coefficients, can either be full or parsed (using OVParse subroutine) S: AO overlap matrix Output: the final value of the overlap Option: switch: 1 : print all relevant matrices -1 : Dont print any matrices eqn numbers based on personal notes eq. 1 eq. 2 PickColumn: Subroutine that selects a specific column from a two dimensional matrix (NBasis,NBasis), outputs an array (NBasis,1) Input: A: Two dimensional matrix NBasis: Number of basis functions for A i: the position of the column to be selected Output: One dimensional array (NBasis,1) that is the i-th column of matrix A WriteMOs: Subroutine that replaces the MO coefficients and orbital energies in a fchk file Input: Input filename, output filename, Orbital coefficient alpha, orbital coefficient beta, Orbtial energies alpha, orbital energies beta, number of basis functions Output: None. New file will be generated (filename3) that has the new Orbital coefficients and energies OVMerge: Does the opposite of OVParse, merges back the Occ and Virt components of the MO Coefficient matrix Input : A (Occ Matrix), B(Vir Matrix), Number of occupied orbitals, NBasis Output : V = Full MO Coefficient Matrix (this subroutine has the exact opposite functionality of OVParse) DistanceMatrix: Calculates distances between all atoms in a molecule Input : fchk file name Output : Returns Distance Matrix and Atomic Symbol array. Unfinished part: generate and return a distance matrix (NAtoms x NAtoms) PrintLyrics: A Function made just for fun, prints out a random quote from the Beatles songs Input: None, but reads in the lyrics.py library file (partially complete) Output: None, prints lyrics. GetAtomicWeights: Grabs the \"real atomic weights\" from the fchk file Input: filename, Number of Atoms Output: One dimensional array, AtomicWeight, of dimensions NAtoms. WriteMOsQChem: Subroutine that replaces the MO coefficients and orbital energies in a fchk file (QChem Version) Input: Input filename, output filename, Orbital coefficient alpha, orbital coefficient beta, Orbtial energies alpha, orbital energies beta, number of basis functions Output: None. New file will be generated (filename3) that has the new Orbital coefficients and energies f2.write(\"Beta Orbital Energies\\n\") f2.write(\"Alpha MO coefficients\\n\") f2.write(\"Beta MO Coefficients\\n\") f2.write(data[BMO]) while (pointer < len(data)): f2.write(data[pointer]) pointer = pointer+1 ContractMat: Subroutine that reads in two square matrices (NBasis x NBasis) and returns their contraction (scalar) Input: Matrices A and B (dimensions: NBasis x NBasis), NBasis Output: scalar m = Sum_(mu,nu) A_(mu,nu) * B_(mu,nu) Work in progress: Basis set reader:"},"nl_size":{"kind":"number","value":10618,"string":"10,618"},"nl_language":{"kind":"string","value":"en"},"nl_language_score":{"kind":"number","value":0.7499576807022095,"string":"0.749958"}}},{"rowIdx":7808,"cells":{"content":{"kind":"string","value":"\"\"\"Utility functions related to file operations.\"\"\"\nimport copy\nimport logging\nimport os\nimport subprocess\nimport sys\nfrom argparse import Namespace\nfrom collections import OrderedDict\nfrom contextlib import contextmanager\nfrom pathlib import Path\nfrom tempfile import NamedTemporaryFile\nfrom typing import TYPE_CHECKING, Any, Dict, Iterator, List, Optional, Set, Union\n\n# import wcmatch\nimport wcmatch.pathlib\nfrom wcmatch.wcmatch import RECURSIVE, WcMatch\n\nfrom ansiblelint.config import BASE_KINDS, options\nfrom ansiblelint.constants import FileType\n\nif TYPE_CHECKING:\n # https://github.com/PyCQA/pylint/issues/3979\n BasePathLike = os.PathLike[Any] # pylint: disable=unsubscriptable-object\nelse:\n BasePathLike = os.PathLike\n\n_logger = logging.getLogger(__package__)\n\n\ndef normpath(path: Union[str, BasePathLike]) -> str:\n \"\"\"\n Normalize a path in order to provide a more consistent output.\n\n Currently it generates a relative path but in the future we may want to\n make this user configurable.\n \"\"\"\n # conversion to string in order to allow receiving non string objects\n relpath = os.path.relpath(str(path))\n abspath = os.path.abspath(str(path))\n # we avoid returning relative paths that endup at root level\n if abspath in relpath:\n return abspath\n return relpath\n\n\n@contextmanager\ndef cwd(path: Union[str, BasePathLike]) -> Iterator[None]:\n \"\"\"Context manager for temporary changing current working directory.\"\"\"\n old_pwd = os.getcwd()\n os.chdir(path)\n try:\n yield\n finally:\n os.chdir(old_pwd)\n\n\ndef expand_path_vars(path: str) -> str:\n \"\"\"Expand the environment or ~ variables in a path string.\"\"\"\n # It may be possible for function to be called with a Path object\n path = str(path).strip()\n path = os.path.expanduser(path)\n path = os.path.expandvars(path)\n return path\n\n\ndef expand_paths_vars(paths: List[str]) -> List[str]:\n \"\"\"Expand the environment or ~ variables in a list.\"\"\"\n paths = [expand_path_vars(p) for p in paths]\n return paths\n\n\ndef kind_from_path(path: Path, base: bool = False) -> FileType:\n \"\"\"Determine the file kind based on its name.\n\n When called with base=True, it will return the base file type instead\n of the explicit one. That is expected to return 'yaml' for any yaml files.\n \"\"\"\n # pathlib.Path.match patterns are very limited, they do not support *a*.yml\n # glob.glob supports **/foo.yml but not multiple extensions\n pathex = wcmatch.pathlib.PurePath(path.absolute().resolve())\n kinds = options.kinds if not base else BASE_KINDS\n for entry in kinds:\n for k, v in entry.items():\n if pathex.globmatch(\n v,\n flags=(\n wcmatch.pathlib.GLOBSTAR\n | wcmatch.pathlib.BRACE\n | wcmatch.pathlib.DOTGLOB\n ),\n ):\n return str(k) # type: ignore\n\n if base:\n # Unknown base file type is default\n return \"\"\n\n if path.is_dir():\n return \"role\"\n\n if str(path) == '/dev/stdin':\n return \"playbook\"\n\n # Unknown file types report a empty string (evaluated as False)\n return \"\"\n\n\nclass Lintable:\n \"\"\"Defines a file/folder that can be linted.\n\n Providing file content when creating the object allow creation of in-memory\n instances that do not need files to be present on disk.\n \"\"\"\n\n def __init__(\n self,\n name: Union[str, Path],\n content: Optional[str] = None,\n kind: Optional[FileType] = None,\n ):\n \"\"\"Create a Lintable instance.\"\"\"\n # Filename is effective file on disk, for stdin is a namedtempfile\n self.filename: str = str(name)\n self.dir: str = \"\"\n self.kind: Optional[FileType] = None\n\n if isinstance(name, str):\n self.name = normpath(name)\n self.path = Path(self.name)\n else:\n self.name = str(name)\n self.path = name\n self._content = content\n\n # if the lintable is part of a role, we save role folder name\n self.role = \"\"\n parts = self.path.parent.parts\n if 'roles' in parts:\n role = self.path\n while role.parent.name != \"roles\" and role.name:\n role = role.parent\n if role.exists:\n self.role = role.name\n\n if str(self.path) in ['/dev/stdin', '-']:\n # pylint: disable=consider-using-with\n self.file = NamedTemporaryFile(mode=\"w+\", suffix=\"playbook.yml\")\n self.filename = self.file.name\n self._content = sys.stdin.read()\n self.file.write(self._content)\n self.file.flush()\n self.path = Path(self.file.name)\n self.name = 'stdin'\n self.kind = 'playbook'\n self.dir = '/'\n else:\n self.kind = kind or kind_from_path(self.path)\n # We store absolute directory in dir\n if not self.dir:\n if self.kind == \"role\":\n self.dir = str(self.path.resolve())\n else:\n self.dir = str(self.path.parent.resolve())\n\n # determine base file kind (yaml, xml, ini, ...)\n self.base_kind = kind_from_path(self.path, base=True)\n\n def __getitem__(self, key: Any) -> Any:\n \"\"\"Provide compatibility subscriptable support.\"\"\"\n if key == 'path':\n return str(self.path)\n if key == 'type':\n return str(self.kind)\n raise NotImplementedError()\n\n def get(self, key: Any, default: Any = None) -> Any:\n \"\"\"Provide compatibility subscriptable support.\"\"\"\n try:\n return self.__getitem__(key)\n except NotImplementedError:\n return default\n\n @property\n def content(self) -> str:\n \"\"\"Retried file content, from internal cache or disk.\"\"\"\n if self._content is None:\n with open(self.path, mode='r', encoding='utf-8') as f:\n self._content = f.read()\n return self._content\n\n def __hash__(self) -> int:\n \"\"\"Return a hash value of the lintables.\"\"\"\n return hash((self.name, self.kind))\n\n def __eq__(self, other: object) -> bool:\n \"\"\"Identify whether the other object represents the same rule match.\"\"\"\n if isinstance(other, Lintable):\n return bool(self.name == other.name and self.kind == other.kind)\n return False\n\n def __repr__(self) -> str:\n \"\"\"Return user friendly representation of a lintable.\"\"\"\n return f\"{self.name} ({self.kind})\"\n\n\ndef discover_lintables(options: Namespace) -> Dict[str, Any]:\n \"\"\"Find all files that we know how to lint.\"\"\"\n # git is preferred as it also considers .gitignore\n git_command = ['git', 'ls-files', '-z']\n out = None\n\n try:\n out = subprocess.check_output(\n git_command, stderr=subprocess.STDOUT, universal_newlines=True\n ).split(\"\\x00\")[:-1]\n _logger.info(\"Discovered files to lint using: %s\", ' '.join(git_command))\n except subprocess.CalledProcessError as exc:\n if not (exc.returncode == 128 and 'fatal: not a git repository' in exc.output):\n _logger.warning(\n \"Failed to discover lintable files using git: %s\",\n exc.output.rstrip('\\n'),\n )\n except FileNotFoundError as exc:\n if options.verbosity:\n _logger.warning(\"Failed to locate command: %s\", exc)\n\n if out is None:\n exclude_pattern = \"|\".join(options.exclude_paths)\n _logger.info(\"Looking up for files, excluding %s ...\", exclude_pattern)\n out = WcMatch('.', exclude_pattern=exclude_pattern, flags=RECURSIVE).match()\n\n return OrderedDict.fromkeys(sorted(out))\n\n\ndef guess_project_dir() -> str:\n \"\"\"Return detected project dir or user home directory.\"\"\"\n try:\n result = subprocess.run(\n [\"git\", \"rev-parse\", \"--show-toplevel\"],\n stderr=subprocess.PIPE,\n stdout=subprocess.PIPE,\n universal_newlines=True,\n check=False,\n )\n except FileNotFoundError:\n # if git is absent we use home directory\n return str(Path.home())\n\n if result.returncode != 0:\n return str(Path.home())\n\n return result.stdout.splitlines()[0]\n\n\ndef expand_dirs_in_lintables(lintables: Set[Lintable]) -> None:\n \"\"\"Return all recognized lintables within given directory.\"\"\"\n should_expand = False\n\n for item in lintables:\n if item.path.is_dir():\n should_expand = True\n break\n\n if should_expand:\n # this relies on git and we do not want to call unless needed\n all_files = discover_lintables(options)\n\n for item in copy.copy(lintables):\n if item.path.is_dir():\n for filename in all_files:\n if filename.startswith(str(item.path)):\n lintables.add(Lintable(filename))\n"},"path":{"kind":"string","value":"src/ansiblelint/file_utils.py"},"size":{"kind":"number","value":8968,"string":"8,968"},"nl_text":{"kind":"string","value":"Defines a file/folder that can be linted.\n\nProviding file content when creating the object allow creation of in-memory\ninstances that do not need files to be present on disk.\nIdentify whether the other object represents the same rule match.\nProvide compatibility subscriptable support.\nReturn a hash value of the lintables.\nCreate a Lintable instance.\nReturn user friendly representation of a lintable.\nRetried file content, from internal cache or disk.\nContext manager for temporary changing current working directory.\nFind all files that we know how to lint.\nReturn all recognized lintables within given directory.\nExpand the environment or ~ variables in a path string.\nExpand the environment or ~ variables in a list.\nProvide compatibility subscriptable support.\nReturn detected project dir or user home directory.\nDetermine the file kind based on its name.\n\nWhen called with base=True, it will return the base file type instead\nof the explicit one. That is expected to return 'yaml' for any yaml files.\nNormalize a path in order to provide a more consistent output.\n\nCurrently it generates a relative path but in the future we may want to\nmake this user configurable.\nUtility functions related to file operations.\n\n import wcmatch https://github.com/PyCQA/pylint/issues/3979 pylint: disable=unsubscriptable-object conversion to string in order to allow receiving non string objects we avoid returning relative paths that endup at root level It may be possible for function to be called with a Path object pathlib.Path.match patterns are very limited, they do not support *a*.yml glob.glob supports **/foo.yml but not multiple extensions type: ignore Unknown base file type is default Unknown file types report a empty string (evaluated as False) Filename is effective file on disk, for stdin is a namedtempfile if the lintable is part of a role, we save role folder name pylint: disable=consider-using-with We store absolute directory in dir determine base file kind (yaml, xml, ini, ...) git is preferred as it also considers .gitignore if git is absent we use home directory this relies on git and we do not want to call unless needed"},"nl_size":{"kind":"number","value":2141,"string":"2,141"},"nl_language":{"kind":"string","value":"en"},"nl_language_score":{"kind":"number","value":0.8368096947669983,"string":"0.83681"}}},{"rowIdx":7809,"cells":{"content":{"kind":"string","value":"# -*- coding: utf-8 -*-\n\nimport gzip\nimport bz2\nimport numpy as np\n\n\ndef advanced_open(filepath, *args, **kwargs):\n \"\"\" Open function interface for files with different extensions.\n\n Parameters\n ----------\n filepath: str\n File path with extension.\n args: list\n Non-key arguments\n kwargs: dict\n Key arguments\n\n Returns\n -------\n\n \"\"\"\n open_fn = open\n if filepath.endswith('.gz'):\n open_fn = gzip.open\n elif filepath.endswith('.bz2'):\n open_fn = bz2.open\n\n return open_fn(filepath, mode=\"rt\", *args, **kwargs)\n\n\ndef load_kg_file(filepath, separator=\"\\t\", as_stream=False):\n \"\"\" Import knowledge graph from file\n\n Parameters\n ----------\n filepath: str\n File path\n separator: str\n File column separator\n\n Returns\n -------\n iterator\n The knowledge graph triplets obtained from the files with size [?, 3]\n \"\"\"\n\n kg_triples = []\n with advanced_open(filepath) as file_content:\n for line in file_content:\n kg_triples.append(line.strip().split(separator))\n return np.array(kg_triples)\n\n\ndef load_kg_file_as_stream(filepath, separator=\"\\t\"):\n \"\"\" Import knowledge graph from file as a stream\n\n Parameters\n ----------\n filepath: str\n File path\n separator: str\n File column separator\n\n Returns\n -------\n generator\n The knowledge graph triplets obtained from the files with size [?, 3]\n \"\"\"\n\n with advanced_open(filepath) as file_content:\n for line in file_content:\n yield line.strip().split(separator)"},"path":{"kind":"string","value":"benchmarking/libkge/libkge/io/base.py"},"size":{"kind":"number","value":1606,"string":"1,606"},"nl_text":{"kind":"string","value":"Open function interface for files with different extensions.\n\nParameters\n----------\nfilepath: str\n File path with extension.\nargs: list\n Non-key arguments\nkwargs: dict\n Key arguments\n\nReturns\n-------\nImport knowledge graph from file\n\nParameters\n----------\nfilepath: str\n File path\nseparator: str\n File column separator\n\nReturns\n-------\niterator\n The knowledge graph triplets obtained from the files with size [?, 3]\nImport knowledge graph from file as a stream\n\nParameters\n----------\nfilepath: str\n File path\nseparator: str\n File column separator\n\nReturns\n-------\ngenerator\n The knowledge graph triplets obtained from the files with size [?, 3]\n\n -*- coding: utf-8 -*-"},"nl_size":{"kind":"number","value":695,"string":"695"},"nl_language":{"kind":"string","value":"en"},"nl_language_score":{"kind":"number","value":0.5699399709701538,"string":"0.56994"}}},{"rowIdx":7810,"cells":{"content":{"kind":"string","value":"import os\nimport requests\nimport datetime\n\nfrom django.http import HttpResponseRedirect, HttpResponse, JsonResponse, Http404\nfrom django.template import loader\nfrom django.contrib.auth import login\nfrom django.conf import settings\nfrom django.http import Http404\nfrom django.utils import timezone\nfrom requests import status_codes\n\nfrom rest_framework.authtoken.models import Token\nfrom rest_framework.decorators import api_view\nfrom rest_framework.response import Response\nfrom rest_framework.permissions import AllowAny, IsAuthenticated, IsAuthenticatedOrReadOnly\nfrom rest_framework import viewsets, status\n\nfrom channels.layers import get_channel_layer\nfrom asgiref.sync import async_to_sync\n\nfrom constance import config\nimport constance.settings\n\nfrom tau.twitch.models import TwitchAPIScope, TwitchEventSubSubscription\nfrom tau.users.models import User\nfrom .forms import ChannelNameForm, FirstRunForm\nfrom .utils import cleanup_remote_webhooks, cleanup_webhooks, log_request, check_access_token_expired, refresh_access_token, teardown_all_acct_webhooks, teardown_webhooks\nfrom tau.twitch.models import TwitchHelixEndpoint\n\n@api_view(['POST'])\ndef irc_message_view(request):\n channel_layer = get_channel_layer()\n async_to_sync(channel_layer.group_send)('twitchchat', {\n 'type': 'twitchchat.event',\n 'data': request.data\n })\n return Response({}, status=status.HTTP_201_CREATED)\n\n@api_view(['GET', 'POST', 'PUT', 'PATCH', 'DELETE'])\ndef helix_view(request, helix_path=None):\n if check_access_token_expired():\n refresh_access_token()\n try:\n endpoint_instance = TwitchHelixEndpoint.objects.get(\n endpoint=helix_path,\n method=request.method\n )\n if endpoint_instance.token_type == 'OA':\n token = config.TWITCH_ACCESS_TOKEN\n else:\n token = config.TWITCH_APP_ACCESS_TOKEN\n except TwitchHelixEndpoint.DoesNotExist:\n token = config.TWITCH_ACCESS_TOKEN\n body = request.data\n client_id = os.environ.get('TWITCH_APP_ID', None)\n headers = {\n 'Authorization': 'Bearer {}'.format(token),\n 'Client-Id': client_id\n }\n \n url = f'https://api.twitch.tv/helix/' \\\n f'{helix_path}'\n uri = request.build_absolute_uri()\n url_params = ''\n if uri.count('?') > 0:\n url_params = uri.split('?', 1)[1]\n if url_params != '':\n url += f'?{url_params}'\n \n if request.method == 'GET':\n data = requests.get(\n url,\n headers=headers\n )\n elif request.method == 'POST':\n data = requests.post(\n url,\n data=body,\n headers=headers\n )\n elif request.method == 'PUT':\n data = requests.put(\n url,\n data=body,\n headers=headers\n )\n print(data)\n elif request.method == 'PATCH':\n data = requests.patch(\n url,\n data=body,\n headers=headers\n )\n elif request.method == 'DELETE':\n data = requests.delete(\n url,\n headers=headers\n )\n try:\n if(settings.DEBUG_TWITCH_CALLS):\n log_request(data)\n stream_data = data.json()\n except ValueError:\n stream_data = None\n\n return Response(stream_data, status=data.status_code)\n\ndef home_view(request):\n user_count = User.objects.all().exclude(username='worker_process').count()\n if user_count == 0:\n return HttpResponseRedirect('/first-run/')\n # elif not request.user.is_authenticated:\n # return HttpResponseRedirect('/accounts/login/')\n elif config.CHANNEL == '':\n return HttpResponseRedirect('/set-channel/')\n elif config.SCOPE_UPDATED_NEEDED:\n return HttpResponseRedirect('/refresh-token-scope/')\n else:\n # # template = loader.get_template('home.html')\n # template = loader.get_template('dashboard/index.html')\n # return HttpResponse(template.render({'config': config}, request))\n return HttpResponseRedirect('/dashboard')\n\ndef first_run_view(request):\n user_count = User.objects.all().exclude(username='worker_process').count()\n if user_count > 0: # If users already exist, it is not first run\n return HttpResponseRedirect('/') # reject creating a new super-user\n if request.method == 'POST':\n form = FirstRunForm(request.POST)\n if form.is_valid():\n user = User.objects.create_user(\n form.cleaned_data['username'],\n password=form.cleaned_data['password1']\n )\n user.is_superuser=True\n user.is_staff=True\n user.save()\n login(request, user)\n return HttpResponseRedirect('/')\n else:\n template = loader.get_template('registration/first-run.html')\n return HttpResponse(template.render({}, request))\n else:\n template = loader.get_template('registration/first-run.html')\n return HttpResponse(template.render({}, request))\n\ndef get_channel_name_view(request):\n if request.method == 'POST':\n port = os.environ.get('PORT', 8000)\n form = ChannelNameForm(request.POST)\n if form.is_valid():\n # Process the data\n config.CHANNEL = form.cleaned_data['channel_name']\n scope=' '.join(settings.TOKEN_SCOPES)\n client_id = os.environ.get('TWITCH_APP_ID', None)\n url = f'https://id.twitch.tv/oauth2/authorize?' \\\n f'client_id={client_id}&' \\\n f'redirect_uri={settings.BASE_URL}/twitch-callback/&' \\\n f'response_type=code&' \\\n f'scope={scope}&' \\\n f'force_verify=true'\n return HttpResponseRedirect(url)\n else:\n # Show some error page\n pass\n else:\n template = loader.get_template('registration/twitch-channel-setup.html')\n return HttpResponse(template.render({}, request))\n\ndef refresh_token_scope(request):\n client_id = os.environ.get('TWITCH_APP_ID', None)\n\n helix_scopes = list(\n TwitchAPIScope.objects.filter(\n required=True\n ).values_list('scope', flat=True)\n )\n eventsub_scopes = list(\n TwitchEventSubSubscription.objects.filter(\n active=True\n ).values_list('scope_required', flat=True)\n )\n scopes = list(set(settings.TOKEN_SCOPES + eventsub_scopes + helix_scopes))\n scopes = list(filter(lambda x: (x is not None), scopes))\n scope=' '.join(scopes)\n\n url = f'https://id.twitch.tv/oauth2/authorize?' \\\n f'client_id={client_id}&' \\\n f'redirect_uri={settings.BASE_URL}/twitch-callback/&' \\\n f'response_type=code&' \\\n f'scope={scope}&' \\\n f'force_verify=true'\n return HttpResponseRedirect(url)\n\n@api_view()\ndef get_tau_token(request):\n if not request.user.is_authenticated:\n return JsonResponse({'error': 'You must be logged into access this endpoint.'})\n else:\n token = Token.objects.get(user=request.user)\n return JsonResponse({'token': token.key})\n\n@api_view(['GET'])\ndef get_public_url(request):\n if not request.user.is_authenticated:\n return JsonResponse({'error': 'You must be logged into access this endpoint.'})\n else:\n public_url = config.PUBLIC_URL\n return JsonResponse({'public_url': public_url})\n\n@api_view(['POST'])\ndef refresh_tau_token(request):\n if not request.user.is_authenticated:\n return JsonResponse({'error': 'You must be logged into access this endpoint.'})\n else:\n token = Token.objects.get(user=request.user)\n token.delete()\n token = Token.objects.create(user=request.user)\n\n return JsonResponse({'token': token.key})\n\n@api_view(['POST'])\ndef reset_webhooks(request):\n if not request.user.is_authenticated:\n return JsonResponse({'error': 'You must be logged into access this endpoint.'})\n data = request.data\n if data['type'] == 'all':\n teardown_all_acct_webhooks()\n elif data['type'] == 'remote':\n token = Token.objects.get(user=request.user)\n cleanup_remote_webhooks()\n elif data['type'] == 'broken':\n token = Token.objects.get(user=request.user)\n cleanup_webhooks()\n else:\n return JsonResponse({'webhooks_reset': False, 'error': 'Proper type not found.'})\n config.FORCE_WEBHOOK_REFRESH = True\n return JsonResponse({'webhooks_reset': True})\n\ndef process_twitch_callback_view(request):\n port = os.environ.get('PORT', 8000)\n params = request.GET\n auth_code = params['code']\n client_id = os.environ.get('TWITCH_APP_ID', None)\n client_secret = os.environ.get('TWITCH_CLIENT_SECRET', None)\n auth_r = requests.post('https://id.twitch.tv/oauth2/token', data = {\n 'client_id': client_id,\n 'client_secret': client_secret,\n 'code': auth_code,\n 'grant_type': 'authorization_code',\n 'redirect_uri': f'{settings.BASE_URL}/twitch-callback/'\n })\n response_data = auth_r.json()\n if(settings.DEBUG_TWITCH_CALLS):\n log_request(auth_r)\n config.TWITCH_ACCESS_TOKEN = response_data['access_token']\n config.TWITCH_REFRESH_TOKEN = response_data['refresh_token']\n expiration = timezone.now() + datetime.timedelta(seconds=response_data['expires_in'])\n config.TWITCH_ACCESS_TOKEN_EXPIRATION = expiration\n scope=' '.join(settings.TOKEN_SCOPES)\n app_auth_r = requests.post('https://id.twitch.tv/oauth2/token', data = {\n 'client_id': client_id,\n 'client_secret': client_secret,\n 'grant_type': 'client_credentials',\n 'scope': scope\n })\n if(settings.DEBUG_TWITCH_CALLS):\n log_request(app_auth_r)\n app_auth_data = app_auth_r.json()\n config.TWITCH_APP_ACCESS_TOKEN = app_auth_data['access_token']\n config.SCOPE_UPDATED_NEEDED = False\n config.SCOPES_REFRESHED = True\n headers = {\n 'Authorization': 'Bearer {}'.format(config.TWITCH_ACCESS_TOKEN),\n 'Client-Id': client_id\n }\n user_r = requests.get('https://api.twitch.tv/helix/users', headers=headers)\n if(settings.DEBUG_TWITCH_CALLS):\n log_request(user_r)\n user_data = user_r.json()\n channel_id = user_data['data'][0]['id']\n config.CHANNEL_ID = channel_id\n return HttpResponseRedirect('/')\n\n\nclass HeartbeatViewSet(viewsets.ViewSet):\n permission_classes = (IsAuthenticatedOrReadOnly, )\n\n def list(self, request, *args, **kwargs):\n response = {'message': 'pong'}\n return Response(response)\n\n\nclass TAUSettingsViewSet(viewsets.ViewSet):\n permission_classes = (IsAuthenticated, )\n\n valid_keys = ['USE_IRC']\n\n def list(self, request, *args, **kwargs):\n response = {key.lower(): getattr(config, key) for key in self.valid_keys}\n return Response(response)\n\n def retrieve(self, request, pk=None):\n if pk.upper() in self.valid_keys:\n return Response({pk: getattr(config, pk.upper())})\n else:\n raise Http404\n\n def update(self, request, pk=None):\n if pk.upper() in self.valid_keys:\n data = request.data\n setattr(config, pk.upper(), data['value'])\n return Response({pk: data['value']})\n else:\n raise Http404\n\n\nclass ServiceStatusViewSet(viewsets.ViewSet):\n permission_classes = (IsAuthenticated, )\n\n def update(self, request, pk=None):\n if pk.startswith('STATUS_') and hasattr(config, pk):\n data = request.data\n new_status = data['status']\n setattr(config, pk, new_status)\n return Response({\n pk: new_status\n })\n elif pk == 'SET_ALL':\n status_keys = filter(\n lambda x: x.startswith('STATUS_'),\n constance.settings.CONFIG.keys()\n )\n data = request.data\n new_status = data['status']\n for key in status_keys:\n setattr(config, key, new_status)\n return Response({\n 'reset': 'complete'\n })\n else:\n raise Http404(\"Config does not exist\")\n"},"path":{"kind":"string","value":"tau/core/views.py"},"size":{"kind":"number","value":12154,"string":"12,154"},"nl_text":{"kind":"string","value":"elif not request.user.is_authenticated: return HttpResponseRedirect('/accounts/login/') template = loader.get_template('home.html') template = loader.get_template('dashboard/index.html') return HttpResponse(template.render({'config': config}, request)) If users already exist, it is not first run reject creating a new super-user Process the data Show some error page"},"nl_size":{"kind":"number","value":372,"string":"372"},"nl_language":{"kind":"string","value":"en"},"nl_language_score":{"kind":"number","value":0.34147465229034424,"string":"0.341475"}}},{"rowIdx":7811,"cells":{"content":{"kind":"string","value":"def find_words(string, word_set):\n if string == \"\" or not word_set:\n return None\n if string in word_set: # O(1)\n return [string]\n #\"bedbathbeyondunk\"\n #{'bed', 'bath', 'bedbath', 'and', 'beyond'}\n\n tmp = \"\" # bedbathbeyondunk\n out = [] # []\n retro = False # True\n i = 0\n while i < len(string): # i = 15\n if not retro:\n tmp += string[i]\n\n if tmp in word_set:\n out.append(tmp)\n tmp = \"\"\n\n if i == len(string)-1 and tmp != \"\":\n if not out:\n return None\n tmp = out.pop() + tmp\n retro = True\n i -= 1\n i += 1\n\n return out \n\n\nassert find_words(\n \"bedbathandbeyond\", \n set(['bed', 'bath', 'bedbath', 'and', 'beyond'])\n) == ['bed', 'bath', 'and', 'beyond']\n\nassert find_words(\n \"thequickbrownfox\", \n set(['quick', 'brown', 'the', 'fox'])\n) == ['the', 'quick', 'brown', 'fox']\n\nassert find_words(\n \"thequickbrownfoxa\", \n set(['quick', 'brown', 'the', 'fox'])\n) == None\n"},"path":{"kind":"string","value":"reconstruct-words.py"},"size":{"kind":"number","value":1041,"string":"1,041"},"nl_text":{"kind":"string","value":"O(1)\"bedbathbeyondunk\"{'bed', 'bath', 'bedbath', 'and', 'beyond'} bedbathbeyondunk [] True i = 15"},"nl_size":{"kind":"number","value":97,"string":"97"},"nl_language":{"kind":"string","value":"de"},"nl_language_score":{"kind":"number","value":0.0524565726518631,"string":"0.052457"}}},{"rowIdx":7812,"cells":{"content":{"kind":"string","value":"import json\nimport yaml\nfrom pathlib import Path\nfrom brownie import *\nfrom substrateinterface import Keypair\nfrom hashlib import blake2b\nimport base58\n\n\ndef get_derivative_account(root_account, index):\n seed_bytes = b'modlpy/utilisuba'\n\n root_account_bytes = bytes.fromhex(Keypair(root_account).public_key[2:])\n index_bytes = int(index).to_bytes(2, 'little')\n\n entropy = blake2b(seed_bytes + root_account_bytes + index_bytes, digest_size=32).digest()\n input_bytes = bytes([42]) + entropy\n checksum = blake2b(b'SS58PRE' + input_bytes).digest()\n return base58.b58encode(input_bytes + checksum[:2]).decode()\n\n\n\n\nclass Contracts:\n user = None\n proxy_admin = None\n lido = None\n vksm = None\n oracle_master = None\n wstksm = None\n auth_manager = None\n controller = None\n ledgers = None\n validators = None\n\n def __init__(self, _user, _proxy_admin, _lido, _vksm, _oracle_master, _wstksm, _auth_manager, _controller, _ledgers, _validators):\n self.user = _user\n self.proxy_admin = _proxy_admin\n self.lido = _lido\n self.vksm = _vksm\n self.oracle_master = _oracle_master\n self.wstksm = _wstksm\n self.auth_manager = _auth_manager\n self.controller = _controller\n self.ledgers = _ledgers\n self.validators = _validators\n\nNETWORK=\"kusama\"\n\ndef load_deployments(network):\n path = './deployments/' + network + '.json'\n if Path(path).is_file():\n with open(path) as file:\n return json.load(file)\n else:\n return {}\n\ndef load_deployment_config(network):\n with open('./deployment-config.yml') as file:\n return yaml.safe_load(file)['networks'][network]\n\nCONFIG = load_deployment_config(NETWORK)\nDEPLOYMENTS = load_deployments(NETWORK)\n\n\ndef gen_ledger_account(index):\n sovereign = CONFIG['sovereign_account']\n root_index = CONFIG['root_derivative_index']\n\n controller = get_derivative_account(sovereign, root_index)\n return get_derivative_account(controller, index)\n\n\n#contracts = run('./scripts/prepare_env.py') from brownie console --network=moonbase\ndef main():\n user = accounts.load(CONFIG['deployer'])\n\n proxy_admin = ProxyAdminMock.at(DEPLOYMENTS['ProxyAdmin'])\n\n lido = Lido.at(DEPLOYMENTS['Lido'])\n vksm = vKSM_mock.at(CONFIG['precompiles']['vksm'])\n oracle_master = OracleMaster.at(DEPLOYMENTS['OracleMaster'])\n wstksm = WstKSM.at(DEPLOYMENTS['WstKSM'])\n auth_manager = AuthManager.at(DEPLOYMENTS['AuthManager'])\n controller = Controller.at(DEPLOYMENTS['Controller'])\n\n ledgers = [ Ledger.at(addr) for addr in lido.getLedgerAddresses() ]\n\n # current validators in moonbase\n validator_1 = Keypair(\"5CX2ov8tmW6nZwy6Eouzc7VxFHcAyZioNm5QjEUYc7zjbS66\").public_key\n validator_2 = Keypair(\"5FRiNmoi9HFGFrY3K9xsSCeewRtA2pcXTZVZrwLacPCfvHum\").public_key\n validator_3 = Keypair(\"5EcdgHV81hu6YpPucSMrWbdQRBUr18XypiiGsgQ7HREYdrWG\").public_key\n validator_4 = Keypair(\"5FCEmzonc34D2SXXv2CMsDoFWCVivH2a2Mwe32t9BT1TcpAD\").public_key\n validator_5 = Keypair(\"5Ehgvgk1LERD5aTEWw6HLdKZurBqcRYbHXvrAtTgYPhUpr1R\").public_key\n\n validators = [validator_1, validator_2, validator_3, validator_4, validator_5]\n\n # 5CxXVE7pHqzR4kzfz6nop529odm8eVemFFtStruyNQvdTopo\n # 5GxgDNMhbvMhuJzXC2voX5nKUyNaNQFCZxgnoa18eGiBBZwt\n # 5Cqb9WXVQQF73a1dcJEBFS2bWrukaC6dmzjeWZeJHj3NMwvB\n\n return Contracts(user, proxy_admin, lido, vksm, oracle_master, wstksm, auth_manager, controller, ledgers, validators)\n"},"path":{"kind":"string","value":"scripts/prepare_env.py"},"size":{"kind":"number","value":3484,"string":"3,484"},"nl_text":{"kind":"string","value":"contracts = run('./scripts/prepare_env.py') from brownie console --network=moonbase current validators in moonbase 5CxXVE7pHqzR4kzfz6nop529odm8eVemFFtStruyNQvdTopo 5GxgDNMhbvMhuJzXC2voX5nKUyNaNQFCZxgnoa18eGiBBZwt 5Cqb9WXVQQF73a1dcJEBFS2bWrukaC6dmzjeWZeJHj3NMwvB"},"nl_size":{"kind":"number","value":261,"string":"261"},"nl_language":{"kind":"string","value":"en"},"nl_language_score":{"kind":"number","value":0.23536495864391327,"string":"0.235365"}}},{"rowIdx":7813,"cells":{"content":{"kind":"string","value":"import datetime\nfrom dateutil.parser import parse\nfrom decimal import Decimal\nimport re\nimport importlib\nfrom django.core.exceptions import ObjectDoesNotExist, MultipleObjectsReturned\nfrom django.utils import datetime_safe\nfrom tastypie.bundle import Bundle\nfrom tastypie.exceptions import ApiFieldError, NotFound\nfrom tastypie.utils import dict_strip_unicode_keys, make_aware\n\n\nclass NOT_PROVIDED:\n def __str__(self):\n return 'No default provided.'\n\n\nDATE_REGEX = re.compile('^(?P\\d{4})-(?P\\d{2})-(?P\\d{2}).*?$')\nDATETIME_REGEX = re.compile('^(?P\\d{4})-(?P\\d{2})-(?P\\d{2})(T|\\s+)(?P\\d{2}):(?P\\d{2}):(?P\\d{2}).*?$')\n\n\n# All the ApiField variants.\n\nclass ApiField(object):\n \"\"\"The base implementation of a field used by the resources.\"\"\"\n dehydrated_type = 'string'\n help_text = ''\n\n def __init__(self, attribute=None, default=NOT_PROVIDED, null=False, blank=False, readonly=False, unique=False, help_text=None):\n \"\"\"\n Sets up the field. This is generally called when the containing\n ``Resource`` is initialized.\n\n Optionally accepts an ``attribute``, which should be a string of\n either an instance attribute or callable off the object during the\n ``dehydrate`` or push data onto an object during the ``hydrate``.\n Defaults to ``None``, meaning data will be manually accessed.\n\n Optionally accepts a ``default``, which provides default data when the\n object being ``dehydrated``/``hydrated`` has no data on the field.\n Defaults to ``NOT_PROVIDED``.\n\n Optionally accepts a ``null``, which indicated whether or not a\n ``None`` is allowable data on the field. Defaults to ``False``.\n\n Optionally accepts a ``blank``, which indicated whether or not\n data may be omitted on the field. Defaults to ``False``.\n\n Optionally accepts a ``readonly``, which indicates whether the field\n is used during the ``hydrate`` or not. Defaults to ``False``.\n\n Optionally accepts a ``unique``, which indicates if the field is a\n unique identifier for the object.\n\n Optionally accepts ``help_text``, which lets you provide a\n human-readable description of the field exposed at the schema level.\n Defaults to the per-Field definition.\n \"\"\"\n # Track what the index thinks this field is called.\n self.instance_name = None\n self._resource = None\n self.attribute = attribute\n self._default = default\n self.null = null\n self.blank = blank\n self.readonly = readonly\n self.value = None\n self.unique = unique\n\n if help_text:\n self.help_text = help_text\n\n def contribute_to_class(self, cls, name):\n # Do the least we can here so that we don't hate ourselves in the\n # morning.\n self.instance_name = name\n self._resource = cls\n\n def has_default(self):\n \"\"\"Returns a boolean of whether this field has a default value.\"\"\"\n return self._default is not NOT_PROVIDED\n\n @property\n def default(self):\n \"\"\"Returns the default value for the field.\"\"\"\n if callable(self._default):\n return self._default()\n\n return self._default\n\n def dehydrate(self, bundle):\n \"\"\"\n Takes data from the provided object and prepares it for the\n resource.\n \"\"\"\n if self.attribute is not None:\n # Check for `__` in the field for looking through the relation.\n attrs = self.attribute.split('__')\n current_object = bundle.obj\n\n for attr in attrs:\n previous_object = current_object\n current_object = getattr(current_object, attr, None)\n\n if current_object is None:\n if self.has_default():\n current_object = self._default\n # Fall out of the loop, given any further attempts at\n # accesses will fail miserably.\n break\n elif self.null:\n current_object = None\n # Fall out of the loop, given any further attempts at\n # accesses will fail miserably.\n break\n else:\n raise ApiFieldError(\"The object '%r' has an empty attribute '%s' and doesn't allow a default or null value.\" % (previous_object, attr))\n\n if callable(current_object):\n current_object = current_object()\n\n return self.convert(current_object)\n\n if self.has_default():\n return self.convert(self.default)\n else:\n return None\n\n def convert(self, value):\n \"\"\"\n Handles conversion between the data found and the type of the field.\n\n Extending classes should override this method and provide correct\n data coercion.\n \"\"\"\n return value\n\n def hydrate(self, bundle):\n \"\"\"\n Takes data stored in the bundle for the field and returns it. Used for\n taking simple data and building a instance object.\n \"\"\"\n if self.readonly:\n return None\n if not bundle.data.has_key(self.instance_name):\n\n is_related = getattr(self, 'is_related', False)\n is_m2m = getattr(self, 'is_m2m', False)\n\n if is_related and not is_m2m:\n # We've got an FK (or alike field) & a possible parent object.\n # Check for it.\n if bundle.related_obj and bundle.related_name in (self.attribute, self.instance_name):\n return bundle.related_obj\n\n # Functor for safely checking if bundle.obj has a non-None property\n def has_non_null_attr(obj, name):\n try:\n return getattr(obj, name, None) is not None\n except:\n if is_related:\n return None\n else:\n raise\n\n if self.blank:\n return None\n elif self.attribute and has_non_null_attr(bundle.obj, self.attribute):\n return getattr(bundle.obj, self.attribute)\n elif self.instance_name and has_non_null_attr(bundle.obj, self.instance_name):\n return getattr(bundle.obj, self.instance_name)\n elif self.has_default():\n if callable(self._default):\n return self._default()\n\n return self._default\n elif self.null:\n return None\n else:\n raise ApiFieldError(\"The '%s' field has no data and doesn't allow a default or null value.\" % self.instance_name)\n\n bundle_val = bundle.data[self.instance_name]\n\n if bundle_val is None and not self.null:\n raise ApiFieldError(\"The '%s' field doesn't allow a null value.\" % self.instance_name)\n else:\n return bundle_val\n\n def set_value_on_bundle_obj(self, bundle, value):\n \"\"\"\n Overrideable hook for writing a value into the object on a bundle. Enables the use of\n custom setters in your app code if setattr() is too raw for your fancy ORM model.\n \"\"\"\n\n try:\n setattr(bundle.obj, self.attribute, value)\n except Exception, e:\n raise ApiFieldError(\"The '%s' field couldn't set value '%s': %s\" %\n (self.instance_name, value, e))\n\n\nclass CharField(ApiField):\n \"\"\"\n A text field of arbitrary length.\n\n Covers both ``models.CharField`` and ``models.TextField``.\n \"\"\"\n dehydrated_type = 'string'\n help_text = 'Unicode string data. Ex: \"Hello World\"'\n\n def convert(self, value):\n if value is None:\n return None\n\n return unicode(value)\n\n\nclass FileField(ApiField):\n \"\"\"\n A file-related field.\n\n Covers both ``models.FileField`` and ``models.ImageField``.\n \"\"\"\n dehydrated_type = 'string'\n help_text = 'A file URL as a string. Ex: \"http://media.example.com/media/photos/my_photo.jpg\"'\n\n def convert(self, value):\n if value is None:\n return None\n\n try:\n # Try to return the URL if it's a ``File``, falling back to the string\n # itself if it's been overridden or is a default.\n return getattr(value, 'url', value)\n except ValueError:\n return None\n\n\nclass IntegerField(ApiField):\n \"\"\"\n An integer field.\n\n Covers ``models.IntegerField``, ``models.PositiveIntegerField``,\n ``models.PositiveSmallIntegerField`` and ``models.SmallIntegerField``.\n \"\"\"\n dehydrated_type = 'integer'\n help_text = 'Integer data. Ex: 2673'\n\n def convert(self, value):\n if value is None:\n return None\n\n return int(value)\n\n\nclass FloatField(ApiField):\n \"\"\"\n A floating point field.\n \"\"\"\n dehydrated_type = 'float'\n help_text = 'Floating point numeric data. Ex: 26.73'\n\n def convert(self, value):\n if value is None:\n return None\n\n return float(value)\n\n\nclass DecimalField(ApiField):\n \"\"\"\n A decimal field.\n \"\"\"\n dehydrated_type = 'decimal'\n help_text = 'Fixed precision numeric data. Ex: 26.73'\n\n def convert(self, value):\n if value is None:\n return None\n\n return Decimal(value)\n\n def hydrate(self, bundle):\n value = super(DecimalField, self).hydrate(bundle)\n\n if value and not isinstance(value, Decimal):\n value = Decimal(value)\n\n return value\n\n\nclass BooleanField(ApiField):\n \"\"\"\n A boolean field.\n\n Covers both ``models.BooleanField`` and ``models.NullBooleanField``.\n \"\"\"\n dehydrated_type = 'boolean'\n help_text = 'Boolean data. Ex: True'\n\n def convert(self, value):\n if value is None:\n return None\n\n return bool(value)\n\n\nclass ListField(ApiField):\n \"\"\"\n A list field.\n \"\"\"\n dehydrated_type = 'list'\n help_text = \"A list of data. Ex: ['abc', 26.73, 8]\"\n\n def convert(self, value):\n if value is None:\n return None\n\n return list(value)\n\n\nclass DictField(ApiField):\n \"\"\"\n A dictionary field.\n \"\"\"\n dehydrated_type = 'dict'\n help_text = \"A dictionary of data. Ex: {'price': 26.73, 'name': 'Daniel'}\"\n\n def convert(self, value):\n if value is None:\n return None\n\n return dict(value)\n\n\nclass DateField(ApiField):\n \"\"\"\n A date field.\n \"\"\"\n dehydrated_type = 'date'\n help_text = 'A date as a string. Ex: \"2010-11-10\"'\n\n def convert(self, value):\n if value is None:\n return None\n\n if isinstance(value, basestring):\n match = DATE_REGEX.search(value)\n\n if match:\n data = match.groupdict()\n return datetime_safe.date(int(data['year']), int(data['month']), int(data['day']))\n else:\n raise ApiFieldError(\"Date provided to '%s' field doesn't appear to be a valid date string: '%s'\" % (self.instance_name, value))\n\n return value\n\n def hydrate(self, bundle):\n value = super(DateField, self).hydrate(bundle)\n\n if value and not hasattr(value, 'year'):\n try:\n # Try to rip a date/datetime out of it.\n value = make_aware(parse(value))\n\n if hasattr(value, 'hour'):\n value = value.date()\n except ValueError:\n pass\n\n return value\n\n\nclass DateTimeField(ApiField):\n \"\"\"\n A datetime field.\n \"\"\"\n dehydrated_type = 'datetime'\n help_text = 'A date & time as a string. Ex: \"2010-11-10T03:07:43\"'\n\n def convert(self, value):\n if value is None:\n return None\n\n if isinstance(value, basestring):\n match = DATETIME_REGEX.search(value)\n\n if match:\n data = match.groupdict()\n return make_aware(datetime_safe.datetime(int(data['year']), int(data['month']), int(data['day']), int(data['hour']), int(data['minute']), int(data['second'])))\n else:\n raise ApiFieldError(\"Datetime provided to '%s' field doesn't appear to be a valid datetime string: '%s'\" % (self.instance_name, value))\n\n return value\n\n def hydrate(self, bundle):\n value = super(DateTimeField, self).hydrate(bundle)\n\n if value and not hasattr(value, 'year'):\n try:\n # Try to rip a date/datetime out of it.\n value = make_aware(parse(value))\n except ValueError:\n pass\n\n return value\n\n\nclass RelatedField(ApiField):\n \"\"\"\n Provides access to data that is related within the database.\n\n The ``RelatedField`` base class is not intended for direct use but provides\n functionality that ``ToOneField`` and ``ToManyField`` build upon.\n\n The contents of this field actually point to another ``Resource``,\n rather than the related object. This allows the field to represent its data\n in different ways.\n\n The abstractions based around this are \"leaky\" in that, unlike the other\n fields provided by ``tastypie``, these fields don't handle arbitrary objects\n very well. The subclasses use Django's ORM layer to make things go, though\n there is no ORM-specific code at this level.\n \"\"\"\n dehydrated_type = 'related'\n is_related = True\n self_referential = False\n help_text = 'A related resource. Can be either a URI or set of nested resource data.'\n\n def __init__(self, to, attribute, related_name=None, default=NOT_PROVIDED, null=False, blank=False, readonly=False, full=False, unique=False, help_text=None):\n \"\"\"\n Builds the field and prepares it to access to related data.\n\n The ``to`` argument should point to a ``Resource`` class, NOT\n to a ``Model``. Required.\n\n The ``attribute`` argument should specify what field/callable points to\n the related data on the instance object. Required.\n\n Optionally accepts a ``related_name`` argument. Currently unused, as\n unlike Django's ORM layer, reverse relations between ``Resource``\n classes are not automatically created. Defaults to ``None``.\n\n Optionally accepts a ``null``, which indicated whether or not a\n ``None`` is allowable data on the field. Defaults to ``False``.\n\n Optionally accepts a ``blank``, which indicated whether or not\n data may be omitted on the field. Defaults to ``False``.\n\n Optionally accepts a ``readonly``, which indicates whether the field\n is used during the ``hydrate`` or not. Defaults to ``False``.\n\n Optionally accepts a ``full``, which indicates how the related\n ``Resource`` will appear post-``dehydrate``. If ``False``, the\n related ``Resource`` will appear as a URL to the endpoint of that\n resource. If ``True``, the result of the sub-resource's\n ``dehydrate`` will be included in full.\n\n Optionally accepts a ``unique``, which indicates if the field is a\n unique identifier for the object.\n\n Optionally accepts ``help_text``, which lets you provide a\n human-readable description of the field exposed at the schema level.\n Defaults to the per-Field definition.\n \"\"\"\n self.instance_name = None\n self._resource = None\n self.to = to\n self.attribute = attribute\n self.related_name = related_name\n self._default = default\n self.null = null\n self.blank = blank\n self.readonly = readonly\n self.full = full\n self.api_name = None\n self.resource_name = None\n self.unique = unique\n self._to_class = None\n\n if self.to == 'self':\n self.self_referential = True\n self._to_class = self.__class__\n\n if help_text:\n self.help_text = help_text\n\n def contribute_to_class(self, cls, name):\n super(RelatedField, self).contribute_to_class(cls, name)\n\n # Check if we're self-referential and hook it up.\n # We can't do this quite like Django because there's no ``AppCache``\n # here (which I think we should avoid as long as possible).\n if self.self_referential or self.to == 'self':\n self._to_class = cls\n\n def get_related_resource(self, related_instance=None):\n \"\"\"\n Instantiates the related resource.\n \"\"\"\n\n instance = self.to_class(api_name=self.api_name)\n instance.api_name = self.api_name\n\n return instance\n\n @property\n def to_class(self):\n # We need to be lazy here, because when the metaclass constructs the\n # Resources, other classes may not exist yet.\n # That said, memoize this so we never have to relookup/reimport.\n if self._to_class:\n return self._to_class\n\n if not isinstance(self.to, basestring):\n self._to_class = self.to\n return self._to_class\n\n # It's a string. Let's figure it out.\n if '.' in self.to:\n # Try to import.\n module_bits = self.to.split('.')\n module_path, class_name = '.'.join(module_bits[:-1]), module_bits[-1]\n module = importlib.import_module(module_path)\n else:\n # We've got a bare class name here, which won't work (No AppCache\n # to rely on). Try to throw a useful error.\n raise ImportError(\"Tastypie requires a Python-style path () to lazy load related resources. Only given '%s'.\" % self.to)\n\n self._to_class = getattr(module, class_name, None)\n\n if self._to_class is None:\n raise ImportError(\"Module '%s' does not appear to have a class called '%s'.\" % (module_path, class_name))\n\n return self._to_class\n\n def dehydrate_related(self, bundle, related_resource, related_instance):\n \"\"\"\n Based on the ``full_resource``, returns either the endpoint or the data\n from ``full_dehydrate`` for the related resource.\n \"\"\"\n if not self.full:\n # Be a good netizen.\n return related_resource.get_resource_uri(bundle)\n else:\n # ZOMG extra data and big payloads.\n bundle = related_resource.build_bundle(obj=related_instance, request=bundle.request)\n return related_resource.full_dehydrate(bundle)\n\n def resource_from_uri(self, fk_resource, uri, request=None, related_obj=None, related_name=None):\n \"\"\"\n Given a URI is provided, the related resource is attempted to be\n loaded based on the identifiers in the URI.\n \"\"\"\n try:\n obj = fk_resource.get_via_uri(uri, request=request)\n bundle = fk_resource.build_bundle(obj=obj, request=request)\n return fk_resource.full_dehydrate(bundle)\n except ObjectDoesNotExist:\n raise ApiFieldError(\"Could not find the provided object via resource URI '%s'.\" % uri)\n\n def resource_from_data(self, fk_resource, data, request=None, related_obj=None, related_name=None):\n \"\"\"\n Given a dictionary-like structure is provided, a fresh related\n resource is created using that data.\n \"\"\"\n # Try to hydrate the data provided.\n data = dict_strip_unicode_keys(data)\n fk_bundle = fk_resource.build_bundle(data=data, request=request)\n\n if related_obj:\n fk_bundle.related_obj = related_obj\n fk_bundle.related_name = related_name\n\n # We need to check to see if updates are allowed on the FK\n # resource. If not, we'll just return a populated bundle instead\n # of mistakenly updating something that should be read-only.\n if not fk_resource.can_update():\n\n # If the resource already exists and the client specified where to find it, we look it up.\n if 'resource_uri' in data:\n obj = fk_resource.get_via_uri(data['resource_uri'], request=request)\n fk_bundle.install_existing_obj( obj )\n return fk_bundle\n\n # If the resource supports creation, then we can full_hydrate() and create a new instance.\n elif fk_resource.can_create():\n return fk_resource.full_hydrate(fk_bundle)\n\n else:\n raise ApiFieldError(\"Resource %s does not support being created via POST\" %\n fk_resource._meta.resource_name)\n\n try:\n return fk_resource.obj_update(fk_bundle, **data)\n except NotFound:\n try:\n # Attempt lookup by primary key\n lookup_kwargs = dict((k, v) for k, v in data.iteritems() if getattr(fk_resource, k).unique)\n\n if not lookup_kwargs:\n raise NotFound()\n return fk_resource.obj_update(fk_bundle, **lookup_kwargs)\n except NotFound:\n fk_bundle = fk_resource.full_hydrate(fk_bundle)\n fk_resource.is_valid(fk_bundle, request)\n return fk_bundle\n except MultipleObjectsReturned:\n return fk_resource.full_hydrate(fk_bundle)\n\n def resource_from_pk(self, fk_resource, obj, request=None, related_obj=None, related_name=None):\n \"\"\"\n Given an object with a ``pk`` attribute, the related resource\n is attempted to be loaded via that PK.\n \"\"\"\n bundle = fk_resource.build_bundle(obj=obj, request=request)\n return fk_resource.full_dehydrate(bundle)\n\n def build_related_resource(self, value, request=None, related_obj=None, related_name=None):\n \"\"\"\n Returns a bundle of data built by the related resource, usually via\n ``hydrate`` with the data provided.\n\n Accepts either a URI, a data dictionary (or dictionary-like structure)\n or an object with a ``pk``.\n \"\"\"\n self.fk_resource = self.to_class(api_name=self.api_name)\n kwargs = {\n 'request': request,\n 'related_obj': related_obj,\n 'related_name': related_name,\n }\n\n if isinstance(value, basestring):\n # We got a URI. Load the object and assign it.\n return self.resource_from_uri(self.fk_resource, value, **kwargs)\n elif isinstance(value, Bundle):\n # We got a valid bundle object, the RelatedField had full=True\n return value\n elif isinstance(value, dict):\n # We've got a data dictionary.\n # Since this leads to creation, this is the only one of these\n # methods that might care about \"parent\" data.\n return self.resource_from_data(self.fk_resource, value, **kwargs)\n elif hasattr(value, 'pk'):\n # We've got an object with a primary key.\n return self.resource_from_pk(self.fk_resource, value, **kwargs)\n else:\n raise ApiFieldError(\"The '%s' field was given data that was not a URI, not a dictionary-alike and does not have a 'pk' attribute: %s.\" % (self.instance_name, value))\n\n\nclass ToOneField(RelatedField):\n \"\"\"\n Provides access to related data via foreign key.\n\n This subclass requires Django's ORM layer to work properly.\n \"\"\"\n help_text = 'A single related resource. Can be either a URI or set of nested resource data.'\n\n def __init__(self, to, attribute, related_name=None, default=NOT_PROVIDED,\n null=False, blank=False, readonly=False, full=False,\n unique=False, help_text=None):\n super(ToOneField, self).__init__(\n to, attribute, related_name=related_name, default=default,\n null=null, blank=blank, readonly=readonly, full=full,\n unique=unique, help_text=help_text\n )\n self.fk_resource = None\n\n def dehydrate(self, bundle):\n foreign_obj = None\n\n if isinstance(self.attribute, basestring):\n attrs = self.attribute.split('__')\n foreign_obj = bundle.obj\n\n for attr in attrs:\n previous_obj = foreign_obj\n try:\n foreign_obj = getattr(foreign_obj, attr, None)\n except ObjectDoesNotExist:\n foreign_obj = None\n elif callable(self.attribute):\n foreign_obj = self.attribute(bundle)\n\n if not foreign_obj:\n if not self.null:\n raise ApiFieldError(\"The model '%r' has an empty attribute '%s' and doesn't allow a null value.\" % (previous_obj, attr))\n\n return None\n\n self.fk_resource = self.get_related_resource(foreign_obj)\n fk_bundle = Bundle(obj=foreign_obj, request=bundle.request)\n return self.dehydrate_related(fk_bundle, self.fk_resource, foreign_obj)\n\n def hydrate(self, bundle):\n value = super(ToOneField, self).hydrate(bundle)\n\n if value is None:\n return value\n\n return self.build_related_resource(value, request=bundle.request)\n\nclass ForeignKey(ToOneField):\n \"\"\"\n A convenience subclass for those who prefer to mirror ``django.db.models``.\n \"\"\"\n pass\n\n\nclass OneToOneField(ToOneField):\n \"\"\"\n A convenience subclass for those who prefer to mirror ``django.db.models``.\n \"\"\"\n pass\n\n\nclass ToManyField(RelatedField):\n \"\"\"\n Provides access to related data via a join table.\n\n This subclass requires Django's ORM layer to work properly.\n\n Note that the ``hydrate`` portions of this field are quite different than\n any other field. ``hydrate_m2m`` actually handles the data and relations.\n This is due to the way Django implements M2M relationships.\n \"\"\"\n is_m2m = True\n help_text = 'Many related resources. Can be either a list of URIs or list of individually nested resource data.'\n\n def __init__(self, to, attribute, related_name=None, default=NOT_PROVIDED,\n null=False, blank=False, readonly=False, full=False,\n unique=False, help_text=None):\n super(ToManyField, self).__init__(\n to, attribute, related_name=related_name, default=default,\n null=null, blank=blank, readonly=readonly, full=full,\n unique=unique, help_text=help_text\n )\n self.m2m_bundles = []\n\n def dehydrate(self, bundle):\n if not bundle.obj or not bundle.obj.pk:\n if not self.null:\n raise ApiFieldError(\"The model '%r' does not have a primary key and can not be used in a ToMany context.\" % bundle.obj)\n\n return []\n\n the_m2ms = None\n previous_obj = bundle.obj\n attr = self.attribute\n\n if isinstance(self.attribute, basestring):\n attrs = self.attribute.split('__')\n the_m2ms = bundle.obj\n\n for attr in attrs:\n previous_obj = the_m2ms\n try:\n the_m2ms = getattr(the_m2ms, attr, None)\n except ObjectDoesNotExist:\n the_m2ms = None\n\n if not the_m2ms:\n break\n\n elif callable(self.attribute):\n the_m2ms = self.attribute(bundle)\n\n if not the_m2ms:\n if not self.null:\n raise ApiFieldError(\"The model '%r' has an empty attribute '%s' and doesn't allow a null value.\" % (previous_obj, attr))\n\n return []\n\n self.m2m_resources = []\n m2m_dehydrated = []\n\n # TODO: Also model-specific and leaky. Relies on there being a\n # ``Manager`` there.\n for m2m in the_m2ms.all():\n m2m_resource = self.get_related_resource(m2m)\n m2m_bundle = Bundle(obj=m2m, request=bundle.request)\n self.m2m_resources.append(m2m_resource)\n m2m_dehydrated.append(self.dehydrate_related(m2m_bundle, m2m_resource, m2m))\n\n return m2m_dehydrated\n\n def hydrate(self, bundle):\n pass\n\n def hydrate_m2m(self, bundle):\n if self.readonly:\n return None\n\n if bundle.data.get(self.instance_name) is None:\n if self.blank:\n return []\n elif self.null:\n return []\n else:\n raise ApiFieldError(\"The '%s' field has no data and doesn't allow a null value.\" % self.instance_name)\n\n m2m_hydrated = []\n\n for value in bundle.data.get(self.instance_name):\n if value is None:\n continue\n\n kwargs = {\n 'request': bundle.request,\n }\n\n if self.related_name:\n kwargs['related_obj'] = bundle.obj\n kwargs['related_name'] = self.related_name\n\n m2m_hydrated.append(self.build_related_resource(value, **kwargs))\n\n return m2m_hydrated\n\n\nclass ManyToManyField(ToManyField):\n \"\"\"\n A convenience subclass for those who prefer to mirror ``django.db.models``.\n \"\"\"\n pass\n\n\nclass OneToManyField(ToManyField):\n \"\"\"\n A convenience subclass for those who prefer to mirror ``django.db.models``.\n \"\"\"\n pass\n\n\nclass TimeField(ApiField):\n dehydrated_type = 'time'\n help_text = 'A time as string. Ex: \"20:05:23\"'\n\n def dehydrate(self, obj):\n return self.convert(super(TimeField, self).dehydrate(obj))\n\n def convert(self, value):\n if isinstance(value, basestring):\n return self.to_time(value)\n return value\n\n def to_time(self, s):\n try:\n dt = parse(s)\n except ValueError, e:\n raise ApiFieldError(str(e))\n else:\n return datetime.time(dt.hour, dt.minute, dt.second)\n\n def hydrate(self, bundle):\n value = super(TimeField, self).hydrate(bundle)\n\n if value and not isinstance(value, datetime.time):\n value = self.to_time(value)\n\n return value\n"},"path":{"kind":"string","value":"tastypie/fields.py"},"size":{"kind":"number","value":29911,"string":"29,911"},"nl_text":{"kind":"string","value":"All the ApiField variants. Track what the index thinks this field is called. Do the least we can here so that we don't hate ourselves in the morning. Check for `__` in the field for looking through the relation. Fall out of the loop, given any further attempts at accesses will fail miserably. Fall out of the loop, given any further attempts at accesses will fail miserably. We've got an FK (or alike field) & a possible parent object. Check for it. Functor for safely checking if bundle.obj has a non-None property Try to return the URL if it's a ``File``, falling back to the string itself if it's been overridden or is a default. Try to rip a date/datetime out of it. Try to rip a date/datetime out of it. Check if we're self-referential and hook it up. We can't do this quite like Django because there's no ``AppCache`` here (which I think we should avoid as long as possible). We need to be lazy here, because when the metaclass constructs the Resources, other classes may not exist yet. That said, memoize this so we never have to relookup/reimport. It's a string. Let's figure it out. Try to import. We've got a bare class name here, which won't work (No AppCache to rely on). Try to throw a useful error. Be a good netizen. ZOMG extra data and big payloads. Try to hydrate the data provided. We need to check to see if updates are allowed on the FK resource. If not, we'll just return a populated bundle instead of mistakenly updating something that should be read-only. If the resource already exists and the client specified where to find it, we look it up. If the resource supports creation, then we can full_hydrate() and create a new instance. Attempt lookup by primary key We got a URI. Load the object and assign it. We got a valid bundle object, the RelatedField had full=True We've got a data dictionary. Since this leads to creation, this is the only one of these methods that might care about \"parent\" data. We've got an object with a primary key. TODO: Also model-specific and leaky. Relies on there being a ``Manager`` there."},"nl_size":{"kind":"number","value":2053,"string":"2,053"},"nl_language":{"kind":"string","value":"en"},"nl_language_score":{"kind":"number","value":0.933933675289154,"string":"0.933934"}}},{"rowIdx":7814,"cells":{"content":{"kind":"string","value":"# coding: utf-8\n\n\"\"\"\n UltraCart Rest API V2\n\n UltraCart REST API Version 2\n\n OpenAPI spec version: 2.0.0\n Contact: support@ultracart.com\n Generated by: https://github.com/swagger-api/swagger-codegen.git\n\"\"\"\n\n\nfrom pprint import pformat\nfrom six import iteritems\nimport re\n\n\nclass CouponFreeItemAndShippingWithSubtotal(object):\n \"\"\"\n NOTE: This class is auto generated by the swagger code generator program.\n Do not edit the class manually.\n \"\"\"\n\n\n \"\"\"\n Attributes:\n swagger_types (dict): The key is attribute name\n and the value is attribute type.\n attribute_map (dict): The key is attribute name\n and the value is json key in definition.\n \"\"\"\n swagger_types = {\n 'currency_code': 'str',\n 'items': 'list[str]',\n 'limit': 'int',\n 'shipping_methods': 'list[str]',\n 'subtotal_amount': 'float'\n }\n\n attribute_map = {\n 'currency_code': 'currency_code',\n 'items': 'items',\n 'limit': 'limit',\n 'shipping_methods': 'shipping_methods',\n 'subtotal_amount': 'subtotal_amount'\n }\n\n def __init__(self, currency_code=None, items=None, limit=None, shipping_methods=None, subtotal_amount=None):\n \"\"\"\n CouponFreeItemAndShippingWithSubtotal - a model defined in Swagger\n \"\"\"\n\n self._currency_code = None\n self._items = None\n self._limit = None\n self._shipping_methods = None\n self._subtotal_amount = None\n self.discriminator = None\n\n if currency_code is not None:\n self.currency_code = currency_code\n if items is not None:\n self.items = items\n if limit is not None:\n self.limit = limit\n if shipping_methods is not None:\n self.shipping_methods = shipping_methods\n if subtotal_amount is not None:\n self.subtotal_amount = subtotal_amount\n\n @property\n def currency_code(self):\n \"\"\"\n Gets the currency_code of this CouponFreeItemAndShippingWithSubtotal.\n The ISO-4217 three letter currency code the customer is viewing prices in\n\n :return: The currency_code of this CouponFreeItemAndShippingWithSubtotal.\n :rtype: str\n \"\"\"\n return self._currency_code\n\n @currency_code.setter\n def currency_code(self, currency_code):\n \"\"\"\n Sets the currency_code of this CouponFreeItemAndShippingWithSubtotal.\n The ISO-4217 three letter currency code the customer is viewing prices in\n\n :param currency_code: The currency_code of this CouponFreeItemAndShippingWithSubtotal.\n :type: str\n \"\"\"\n if currency_code is not None and len(currency_code) > 3:\n raise ValueError(\"Invalid value for `currency_code`, length must be less than or equal to `3`\")\n\n self._currency_code = currency_code\n\n @property\n def items(self):\n \"\"\"\n Gets the items of this CouponFreeItemAndShippingWithSubtotal.\n A list of items that are eligible for this discount_price.\n\n :return: The items of this CouponFreeItemAndShippingWithSubtotal.\n :rtype: list[str]\n \"\"\"\n return self._items\n\n @items.setter\n def items(self, items):\n \"\"\"\n Sets the items of this CouponFreeItemAndShippingWithSubtotal.\n A list of items that are eligible for this discount_price.\n\n :param items: The items of this CouponFreeItemAndShippingWithSubtotal.\n :type: list[str]\n \"\"\"\n\n self._items = items\n\n @property\n def limit(self):\n \"\"\"\n Gets the limit of this CouponFreeItemAndShippingWithSubtotal.\n The limit of free items that may be received when purchasing multiple items\n\n :return: The limit of this CouponFreeItemAndShippingWithSubtotal.\n :rtype: int\n \"\"\"\n return self._limit\n\n @limit.setter\n def limit(self, limit):\n \"\"\"\n Sets the limit of this CouponFreeItemAndShippingWithSubtotal.\n The limit of free items that may be received when purchasing multiple items\n\n :param limit: The limit of this CouponFreeItemAndShippingWithSubtotal.\n :type: int\n \"\"\"\n\n self._limit = limit\n\n @property\n def shipping_methods(self):\n \"\"\"\n Gets the shipping_methods of this CouponFreeItemAndShippingWithSubtotal.\n One or more shipping methods that may be free\n\n :return: The shipping_methods of this CouponFreeItemAndShippingWithSubtotal.\n :rtype: list[str]\n \"\"\"\n return self._shipping_methods\n\n @shipping_methods.setter\n def shipping_methods(self, shipping_methods):\n \"\"\"\n Sets the shipping_methods of this CouponFreeItemAndShippingWithSubtotal.\n One or more shipping methods that may be free\n\n :param shipping_methods: The shipping_methods of this CouponFreeItemAndShippingWithSubtotal.\n :type: list[str]\n \"\"\"\n\n self._shipping_methods = shipping_methods\n\n @property\n def subtotal_amount(self):\n \"\"\"\n Gets the subtotal_amount of this CouponFreeItemAndShippingWithSubtotal.\n The amount of subtotal required to receive the discount percent\n\n :return: The subtotal_amount of this CouponFreeItemAndShippingWithSubtotal.\n :rtype: float\n \"\"\"\n return self._subtotal_amount\n\n @subtotal_amount.setter\n def subtotal_amount(self, subtotal_amount):\n \"\"\"\n Sets the subtotal_amount of this CouponFreeItemAndShippingWithSubtotal.\n The amount of subtotal required to receive the discount percent\n\n :param subtotal_amount: The subtotal_amount of this CouponFreeItemAndShippingWithSubtotal.\n :type: float\n \"\"\"\n\n self._subtotal_amount = subtotal_amount\n\n def to_dict(self):\n \"\"\"\n Returns the model properties as a dict\n \"\"\"\n result = {}\n\n for attr, _ in iteritems(self.swagger_types):\n value = getattr(self, attr)\n if isinstance(value, list):\n result[attr] = list(map(\n lambda x: x.to_dict() if hasattr(x, \"to_dict\") else x,\n value\n ))\n elif hasattr(value, \"to_dict\"):\n result[attr] = value.to_dict()\n elif isinstance(value, dict):\n result[attr] = dict(map(\n lambda item: (item[0], item[1].to_dict())\n if hasattr(item[1], \"to_dict\") else item,\n value.items()\n ))\n else:\n result[attr] = value\n\n return result\n\n def to_str(self):\n \"\"\"\n Returns the string representation of the model\n \"\"\"\n return pformat(self.to_dict())\n\n def __repr__(self):\n \"\"\"\n For `print` and `pprint`\n \"\"\"\n return self.to_str()\n\n def __eq__(self, other):\n \"\"\"\n Returns true if both objects are equal\n \"\"\"\n if not isinstance(other, CouponFreeItemAndShippingWithSubtotal):\n return False\n\n return self.__dict__ == other.__dict__\n\n def __ne__(self, other):\n \"\"\"\n Returns true if both objects are not equal\n \"\"\"\n return not self == other\n"},"path":{"kind":"string","value":"ultracart/models/coupon_free_item_and_shipping_with_subtotal.py"},"size":{"kind":"number","value":7294,"string":"7,294"},"nl_text":{"kind":"string","value":"NOTE: This class is auto generated by the swagger code generator program.\nDo not edit the class manually.\nReturns true if both objects are equal\nCouponFreeItemAndShippingWithSubtotal - a model defined in Swagger\nReturns true if both objects are not equal\nFor `print` and `pprint`\nGets the currency_code of this CouponFreeItemAndShippingWithSubtotal.\nThe ISO-4217 three letter currency code the customer is viewing prices in\n\n:return: The currency_code of this CouponFreeItemAndShippingWithSubtotal.\n:rtype: str\nSets the currency_code of this CouponFreeItemAndShippingWithSubtotal.\nThe ISO-4217 three letter currency code the customer is viewing prices in\n\n:param currency_code: The currency_code of this CouponFreeItemAndShippingWithSubtotal.\n:type: str\nGets the items of this CouponFreeItemAndShippingWithSubtotal.\nA list of items that are eligible for this discount_price.\n\n:return: The items of this CouponFreeItemAndShippingWithSubtotal.\n:rtype: list[str]\nSets the items of this CouponFreeItemAndShippingWithSubtotal.\nA list of items that are eligible for this discount_price.\n\n:param items: The items of this CouponFreeItemAndShippingWithSubtotal.\n:type: list[str]\nGets the limit of this CouponFreeItemAndShippingWithSubtotal.\nThe limit of free items that may be received when purchasing multiple items\n\n:return: The limit of this CouponFreeItemAndShippingWithSubtotal.\n:rtype: int\nSets the limit of this CouponFreeItemAndShippingWithSubtotal.\nThe limit of free items that may be received when purchasing multiple items\n\n:param limit: The limit of this CouponFreeItemAndShippingWithSubtotal.\n:type: int\nGets the shipping_methods of this CouponFreeItemAndShippingWithSubtotal.\nOne or more shipping methods that may be free\n\n:return: The shipping_methods of this CouponFreeItemAndShippingWithSubtotal.\n:rtype: list[str]\nSets the shipping_methods of this CouponFreeItemAndShippingWithSubtotal.\nOne or more shipping methods that may be free\n\n:param shipping_methods: The shipping_methods of this CouponFreeItemAndShippingWithSubtotal.\n:type: list[str]\nGets the subtotal_amount of this CouponFreeItemAndShippingWithSubtotal.\nThe amount of subtotal required to receive the discount percent\n\n:return: The subtotal_amount of this CouponFreeItemAndShippingWithSubtotal.\n:rtype: float\nSets the subtotal_amount of this CouponFreeItemAndShippingWithSubtotal.\nThe amount of subtotal required to receive the discount percent\n\n:param subtotal_amount: The subtotal_amount of this CouponFreeItemAndShippingWithSubtotal.\n:type: float\nReturns the model properties as a dict\nReturns the string representation of the model\nUltraCart Rest API V2\n\nUltraCart REST API Version 2\n\nOpenAPI spec version: 2.0.0\nContact: support@ultracart.com\nGenerated by: https://github.com/swagger-api/swagger-codegen.git\n\n coding: utf-8"},"nl_size":{"kind":"number","value":2799,"string":"2,799"},"nl_language":{"kind":"string","value":"en"},"nl_language_score":{"kind":"number","value":0.6899495720863342,"string":"0.68995"}}},{"rowIdx":7815,"cells":{"content":{"kind":"string","value":"\nimport RPi.GPIO as GPIO\nimport time,sys, datetime, json, requests\nfrom requests.exceptions import ConnectionError, Timeout, TooManyRedirects\n\n'''\nConfigure raspberry\n'''\n\nGPIO.setmode(GPIO.BCM)\ninpt = 13\nGPIO.setup(inpt,GPIO.IN)\n\n'''\nConfigure some global variables\n'''\n\ncurrent_input = GPIO.input(inpt) # This is used to compare to the new_input later.\ntotal_rotations = 0 # This is a counter. It gets reset after the number of seconds in rotation_downtime.\ncup_movements = 200 # This is how many rotations occur as a cup of liquid passes through.\nrotation_downtime = 5 # Sets the cut-off time for establishing a water-flow event.\nlast_movement_time = time.time() + rotation_downtime # This is used to determine if a new water-flow event should be created.\nrecord_data = False # A flag used to trigger database insert.\n\ndata = []\n\nprint('Control C to exit')\n\ndef commit_data(data):\n\n '''\n This passes data to the data base as a single row. It then resets/empties data.\n '''\n\n url = 'http://localhost:1880/sensor'\n headers = {\n 'Accepts': 'application/json'\n }\n\n print(f\"1: {data[0]}\")\n send_jsn = json.dumps({\"Movements\": data[0][1], \"Cups\": data[0][2], \"Gallons\": data[0][3], \"Liters\": data[0][4]})\n\n try:\n response = requests.post(url, data=send_jsn, headers=headers)\n print(response.text)\n except (ConnectionError, Timeout, TooManyRedirects) as e:\n print(e)\n \n data = []\n return data\n\ndef prep_and_send(data,total_rotations):\n\n '''\n Calculates measurements (cups and gallons). Prepares the data into a database-friendly tuple. Appends that tuple to a list. \n \n It then tries to connect to database. If it is not successful then it does nothing but saves the data; it will try to send \n the list of data-tuples the next time there is a water-flow event. \n \n Once the connection is successful data is emptied in commit_data().\n '''\n\n total_cups = total_rotations/cup_movements\n total_gallons = total_cups/16\n total_liters = total_gallons*3.78541\n now = datetime.datetime.now() \n print('{}: Movements: {}. \\nCups: {}. \\nGallons: {}. \\nLiters: {}'.format(now,total_rotations,total_cups,total_gallons,total_liters))\n\n current_data = (\n now,\n round(total_rotations,2),\n round(total_cups,2),\n round(total_gallons,2), \n round(total_liters,2), \n )\n data.append(current_data)\n\n print(f\"datos: {data}\")\n data = commit_data(data)\t\t\t\n \n return data\n\nwhile True:\n\n '''\n This is what actually runs the whole time. \n It first checks to see if new_input is different from current_input. This would be the case if there was a rotation.\n Once it detects that the input is different it knows water is flowing.\n It starts tracking the total_rotations and when the last rotation occured. \n After each rotation it refreshes the value of the last rotation time.\n It waits a few seconds (rotation_downtime) after the last rotation time to make sure the water has stopped. \n Once the water stops it passes the total_rotations to prep_and_send(). \n It also passes 'data' which is any previous water-flow events that were not successfully sent at the time they were recorded.\n '''\n\n new_input = GPIO.input(inpt)\n if new_input != current_input:\n total_rotations += 1\n if time.time() <= last_movement_time: #if it hasn't been more than 10 seconds\n record_data = True\n current_input = new_input\n last_movement_time = time.time() + rotation_downtime\n else: #flow starts\n last_movement_time = time.time() + rotation_downtime\n\n elif record_data == True and time.time() > last_movement_time: #if it's been x seconds since last change\n data = prep_and_send(data,total_rotations)\n record_data = False\n total_rotations = 0\n last_movement_time = time.time() + rotation_downtime\n current_input = new_input\n\n try:\n None\n #print('New input: ',new_input, '. Current input: ', current_input, '. Movements: ', total_rotations)\n except KeyboardInterrupt:\n print('\\nCTRL C - Exiting nicely')\n GPIO.cleanup()\n sys.exit()\n"},"path":{"kind":"string","value":"software/read-sensor-python/waterFlow/waterFlowMeter.py"},"size":{"kind":"number","value":4411,"string":"4,411"},"nl_text":{"kind":"string","value":"This passes data to the data base as a single row. It then resets/empties data.\nCalculates measurements (cups and gallons). Prepares the data into a database-friendly tuple. Appends that tuple to a list. \n\nIt then tries to connect to database. If it is not successful then it does nothing but saves the data; it will try to send \nthe list of data-tuples the next time there is a water-flow event. \n\nOnce the connection is successful data is emptied in commit_data().\n\n This is used to compare to the new_input later. This is a counter. It gets reset after the number of seconds in rotation_downtime. This is how many rotations occur as a cup of liquid passes through. Sets the cut-off time for establishing a water-flow event. This is used to determine if a new water-flow event should be created. A flag used to trigger database insert.if it hasn't been more than 10 secondsflow startsif it's been x seconds since last changeprint('New input: ',new_input, '. Current input: ', current_input, '. Movements: ', total_rotations)"},"nl_size":{"kind":"number","value":1026,"string":"1,026"},"nl_language":{"kind":"string","value":"en"},"nl_language_score":{"kind":"number","value":0.9124253988265991,"string":"0.912425"}}},{"rowIdx":7816,"cells":{"content":{"kind":"string","value":"import re\nimport traceback\nimport subprocess\nfrom serviceDB import ServiceDB\n\n\nclass NeadmServiceWrapper:\n _service_list_cmd = ['/opt/nedge/neadm/neadm', 'service', 'list']\n\n # _status_cmd = ['/opt/nedge/neadm/fake-neadm-status.sh']\n _service_list_header = re.compile(\"^.*TYPE.*NAME.*SERVERID.*STATUS.*$\")\n # unit_id key well be added during parsing of each line\n _service_list_names = ['type', 'name', 'sid', 'status']\n\n def __init__(self, db):\n self.exit_code = 0\n self.db = ServiceDB(db)\n\n def get_exit_code(self):\n return self.exit_code\n\n def get_raw_output(self, command):\n try:\n output = subprocess.check_output(command, stderr=subprocess.STDOUT)\n self.exit_code = 0\n return output\n except subprocess.CalledProcessError as ex:\n self.exit_code = ex.returncode\n return ex.output\n except Exception as e:\n self.exit_code = 1\n return \"Failed to start {0} command.' \\\n Exeption {1}\".format(command, e.output)\n\n def get_all_services(self):\n output = self.get_raw_output(NeadmServiceWrapper._service_list_cmd)\n # print(output)\n result = NeadmServiceList()\n # error exit code\n if self.exit_code:\n result.exit_code = self.exit_code\n result.output = output\n return result\n\n output_array = output.split('\\n')\n for line in output_array:\n # print(line)\n if NeadmServiceWrapper._service_list_header.match(line):\n continue\n\n params = line.split()\n # print(params)\n # print(len(params))\n if len(params) < 4:\n continue\n\n service_record = {}\n\n for name in NeadmServiceWrapper._service_list_names:\n service_record[name] = params[\n NeadmServiceWrapper._service_list_names.index(name)]\n\n # check ServiceDB for sid and unit_id already joined\n # add unit_id key\n db_record = self.db.find(sid=service_record['sid'],\n service_name=service_record['name'])\n\n if len(db_record) == 1:\n service_record['unit_id'] = db_record[0]['unit_id']\n else:\n service_record['unit_id'] = ''\n\n # print(node)\n result.append(service_record)\n\n # print(status)\n return result\n\n def exec_cmd(self, cmd_name, cmd):\n try:\n print(\"\\t{0} cmd is {1}\".format(cmd_name, ' '.join(cmd)))\n subprocess.check_output(cmd)\n\n except Exception as ex:\n raise Exception('in {0}\\nMessage:{1}\\nTrace: {2}'.format(\n self.__class__.__name__, ex.message, traceback.format_exc()))\n\n # is node included into service nodes list\n def is_node_exist(self, service_name, sid):\n services = self.get_all_services()\n return services.is_already_in_service(service_name, sid)\n\n # is iscsi service already created\n def is_service_exist(self, service_name):\n services = self.get_all_services()\n return services.is_service_exist(service_name)\n\n # create new iscsi(cinder) service by name\n def create_iscsi_service(self, service_name):\n cmd = ['/opt/nedge/neadm/neadm', 'service', 'create', 'iscsi',\n service_name]\n if not self.is_service_exist(service_name):\n self.exec_cmd('create_iscsi_service', cmd)\n else:\n print(\"create_iscsi_service: Service {} already exist!\".format(\n service_name))\n\n # create new swift service by name\n def create_swift_service(self, service_name):\n cmd = ['/opt/nedge/neadm/neadm', 'service', 'create', 'swift',\n service_name]\n if not self.is_service_exist(service_name):\n self.exec_cmd('create_swift_service', cmd)\n else:\n print(\"create_swift_service: Service {} already exist!\".format(\n service_name))\n\n # remove iscsi service by name\n def delete_service(self, service_name):\n cmd = ['/opt/nedge/neadm/neadm', 'service', 'delete', service_name]\n if self.is_service_exist(service_name):\n self.exec_cmd('delete_service', cmd)\n else:\n print(\"remove_iscsi_service: {0} service does not exist\".format(\n service_name))\n\n def is_service_enabled(self, service_name):\n services = self.get_all_services()\n return services.is_service_enabled(service_name)\n\n # serve command, apply swift servie to cluster\n def serve_service(self, service_name, cluster_name):\n cmd = ['/opt/nedge/neadm/neadm', 'service', 'serve', service_name,\n cluster_name]\n if not self.is_service_exist(service_name):\n print(\"serve_service: Service {} does not exist\".format(\n service_name))\n return\n\n self.exec_cmd('serve_service', cmd)\n\n # enable service if exist\n def enable_service(self, service_name):\n cmd = ['/opt/nedge/neadm/neadm', 'service', 'enable', service_name]\n if not self.is_service_exist(service_name):\n print(\"enable_service: Service {} does not exist\".format(\n service_name))\n return\n\n if not self.is_service_enabled(service_name):\n self.exec_cmd('enable_service', cmd)\n else:\n print(\"enable_service: Service {} already enabled\".format(\n service_name))\n\n def disable_service(self, service_name):\n cmd = ['/opt/nedge/neadm/neadm', 'service', 'disable', service_name]\n\n if not self.is_service_exist(service_name):\n print(\"disable_service: Service {} does not exist\".format(\n service_name))\n return\n\n if self.is_service_enabled(service_name):\n self.exec_cmd('disable_service', cmd)\n else:\n print(\"disable_service: Service {} already disabled\".format(\n service_name))\n\n def add_node_to_service(self, service_name, sid, unit_id):\n cmd = ['/opt/nedge/neadm/neadm', 'service', 'add', service_name, sid]\n if not self.is_node_exist(service_name, sid):\n self.exec_cmd('add_node_to_service', cmd)\n\n # add node to persistent db\n # self.db.add(sid, unit_id, service_name)\n else:\n print(\"\\tadd_node_to_service:\"\n \"Node {0} already exist as service node\".format(sid))\n\n self.db.add(sid, unit_id, service_name)\n\n def get_service_node_count(self, service_name):\n services = self.get_all_services()\n return len(services.get_service_nodes(service_name))\n\n def remove_node_by_unit_id(self, unit_id):\n service = self.db.find(unit_id=unit_id)\n if len(service) > 0:\n sid = service[0]['sid']\n service_name = service[0]['service']\n self.remove_node_from_service(service_name, sid, unit_id)\n else:\n print(\"Can't find service by unit_id:{}\".format(unit_id))\n\n def disable_service_by_unit_id(self, unit_id):\n service = self.db.find(unit_id=unit_id)\n if len(service) > 0:\n service_name = service[0]['service']\n print(\"service to disable is :{}\".format(service_name))\n self.disable_service(service_name)\n else:\n print(\"Can't find service by unit_id:{}\".format(unit_id))\n\n def remove_node_from_service(self, service_name, sid, unit_id):\n cmd = ['/opt/nedge/neadm/neadm', 'service', 'remove', service_name,\n sid]\n if self.is_node_exist(service_name, sid):\n self.exec_cmd('remove_node_from_service', cmd)\n\n node_count = self.get_service_node_count(service_name)\n if node_count == 0:\n self.delete_service(service_name)\n\n else:\n print(\"\\tremove_node_from_service: \"\n \"Node {} does not exist to remove\".format(sid))\n\n # remove from persistent db\n self.db.remove(sid, unit_id)\n\n def print_services(self):\n service_list = self.get_all_services()\n service_list.show()\n\n\nclass NeadmServiceList:\n def __init__(self):\n # service records array\n self.service_records = []\n self.exit_code = 0\n self.output = \"\"\n\n def is_correct(self):\n return True if self.exit_code == 0 else False\n\n def get_all(self):\n return self.service_records\n\n def get_service_nodes(self, service_name):\n return filter(lambda service: service['name'] == service_name and\n service['sid'] != '-',\n self.service_records)\n\n def get_iscsi_nodes(self):\n return filter(lambda service: service['type'] == 'iscsi' and\n service['sid'] != '-',\n self.service_records)\n\n def get_iscsi_nodes_by_service_name(self, service_name):\n return filter(lambda service: service['type'] == 'iscsi' and\n service['name'] == service_name and\n service['sid'] != '-',\n self.service_records)\n\n def get_swift_nodes(self):\n return filter(lambda service: service['type'] == 'swift' and\n service['sid'] != '-',\n self.service_records)\n\n def get_swift_nodes_by_service_name(self, service_name):\n return filter(lambda service: service['type'] == 'swift' and\n service['name'] == service_name and\n service['sid'] != '-',\n self.service_records)\n\n # is node present into whole services list\n def is_already_listed(self, sid):\n return True if filter(lambda service: service['sid'] == sid,\n self.service_records) else False\n\n # is node presented in service already\n def is_already_in_service(self, service_name, sid):\n return True if filter(lambda service: service['sid'] == sid and\n service['name'] == service_name,\n self.service_records) else False\n\n def is_service_exist(self, service_name):\n return True if filter(lambda service: service['name'] == service_name,\n self.service_records) else False\n\n def is_service_enabled(self, service_name):\n nodes = self.get_service_nodes(service_name)\n print(nodes)\n if len(nodes) > 0:\n if nodes[0]['status'] == 'enabled':\n return True\n return False\n\n def append(self, service_record):\n self.service_records.append(service_record)\n\n # def show(self):\n # print('TYPE\\t\\tNAME\\t\\t\\tID\\t\\t\\tSTATE\\t\\t\\tUNIT_ID')\n # for record in self.service_records:\n # print(\"{0:<{col0}}{1:<{col1}}{2:<{col2}}\"+\n # \"{3:<{col3}}{4:<{col4}}\".format(\n # record['type'],\n # record['name'],\n # record['sid'],\n # record['status'],\n # record['unit_id'],\n # col0=8,\n # col1=20,\n # col2=36,\n # col3=12,\n # col4=16))\n # print(\"\")\n"},"path":{"kind":"string","value":"nexentaedge/neadmServiceWrapper.py"},"size":{"kind":"number","value":11309,"string":"11,309"},"nl_text":{"kind":"string","value":"_status_cmd = ['/opt/nedge/neadm/fake-neadm-status.sh'] unit_id key well be added during parsing of each line print(output) error exit code print(line) print(params) print(len(params)) check ServiceDB for sid and unit_id already joined add unit_id key print(node) print(status) is node included into service nodes list is iscsi service already created create new iscsi(cinder) service by name create new swift service by name remove iscsi service by name serve command, apply swift servie to cluster enable service if exist add node to persistent db self.db.add(sid, unit_id, service_name) remove from persistent db service records array is node present into whole services list is node presented in service already def show(self): print('TYPE\\t\\tNAME\\t\\t\\tID\\t\\t\\tSTATE\\t\\t\\tUNIT_ID') for record in self.service_records: print(\"{0:<{col0}}{1:<{col1}}{2:<{col2}}\"+ \"{3:<{col3}}{4:<{col4}}\".format( record['type'], record['name'], record['sid'], record['status'], record['unit_id'], col0=8, col1=20, col2=36, col3=12, col4=16)) print(\"\")"},"nl_size":{"kind":"number","value":1194,"string":"1,194"},"nl_language":{"kind":"string","value":"en"},"nl_language_score":{"kind":"number","value":0.6950269341468811,"string":"0.695027"}}},{"rowIdx":7817,"cells":{"content":{"kind":"string","value":"#!/usr/bin/env python3\n#\n# Copyright (c) 2019 LG Electronics, Inc.\n#\n# This software contains code licensed as described in LICENSE.\n#\n\nimport os\nimport lgsvl\nimport random\nimport time\nfrom pathlib import Path\nimport json\n\nsim = lgsvl.Simulator(os.environ.get(\"SIMULATOR_HOST\", \"127.0.0.1\"), 8181)\n\nlayer_mask = 0\nlayer_mask |= 1 << 0 # 0 is the layer for the road (default)\n\nif sim.current_scene == \"SanFrancisco\":\n sim.reset()\nelse:\n sim.load(\"SanFrancisco\")\n\n# if sim.current_scene == \"Testbed\":\n# sim.reset()\n# else:\n# sim.load(\"Testbed\")\n\nspawns = sim.get_spawn()\n\nspawns[0].position.x = 705.6\nspawns[0].position.y = 10.1\nspawns[0].position.z = -308.7\nspawns[0].rotation.y -= 95\n\nforward = lgsvl.utils.transform_to_forward(spawns[0])\nright = lgsvl.utils.transform_to_right(spawns[0])\n\nstate = lgsvl.AgentState()\n# state.transform.position = spawns[0].position\nstate.transform.position = spawns[0].position\nstate.transform.rotation = spawns[0].rotation\n\nego = sim.add_agent(\"SingleLiDAR (Autoware)\", lgsvl.AgentType.EGO, state)\nego.connect_bridge(os.environ.get(\"BRIDGE_HOST\", \"127.0.0.1\"), 9090)\n\n\n#------- Stand vehicle -------#\n#set stand vehicle's initial position\n\npose_arr = [\n (-3, 5),\n (-3, 10),\n (-3, 15),\n (-3, 20),\n (-5, 25),\n (3, 30),\n (-1, 40),\n (-6, 33)\n]\n\nsv_state_arr = []\n\nfor (x, y) in pose_arr:\n sv_state_arr.append(lgsvl.AgentState())\n sv_state_arr[-1].transform.position = spawns[0].position + y * forward + x * right\n sv_state_arr[-1].transform.rotation = spawns[0].rotation\n\n _ = sim.add_agent(\"Sedan\", lgsvl.AgentType.NPC, sv_state_arr[-1])\n\n# for i in range(30):\n# sv_state_arr.append(lgsvl.AgentState())\n# sv_state_arr[-1].transform.position = spawns[0].position + (150 + i * 7) * forward + 3.5 * right\n# sv_state_arr[-1].transform.rotation = spawns[0].rotation\n\n# _ = sim.add_agent(\"Sedan\", lgsvl.AgentType.NPC, sv_state_arr[-1])\n\n# for i in range(30):\n# sv_state_arr.append(lgsvl.AgentState())\n# sv_state_arr[-1].transform.position = spawns[0].position + (150 + i * 7) * forward - 6 * right\n# sv_state_arr[-1].transform.rotation = spawns[0].rotation\n\n# _ = sim.add_agent(\"Sedan\", lgsvl.AgentType.NPC, sv_state_arr[-1])\n\n\nsim.run()\n"},"path":{"kind":"string","value":"autoware.ai/autoware_files/lgsvl_file/scripts/testbed_scenario/sanfrancisco.py"},"size":{"kind":"number","value":2198,"string":"2,198"},"nl_text":{"kind":"string","value":"!/usr/bin/env python3 Copyright (c) 2019 LG Electronics, Inc. This software contains code licensed as described in LICENSE. 0 is the layer for the road (default) if sim.current_scene == \"Testbed\": sim.reset() else: sim.load(\"Testbed\") state.transform.position = spawns[0].position------- Stand vehicle -------set stand vehicle's initial position for i in range(30): sv_state_arr.append(lgsvl.AgentState()) sv_state_arr[-1].transform.position = spawns[0].position + (150 + i * 7) * forward + 3.5 * right sv_state_arr[-1].transform.rotation = spawns[0].rotation _ = sim.add_agent(\"Sedan\", lgsvl.AgentType.NPC, sv_state_arr[-1]) for i in range(30): sv_state_arr.append(lgsvl.AgentState()) sv_state_arr[-1].transform.position = spawns[0].position + (150 + i * 7) * forward - 6 * right sv_state_arr[-1].transform.rotation = spawns[0].rotation _ = sim.add_agent(\"Sedan\", lgsvl.AgentType.NPC, sv_state_arr[-1])"},"nl_size":{"kind":"number","value":923,"string":"923"},"nl_language":{"kind":"string","value":"en"},"nl_language_score":{"kind":"number","value":0.3179031014442444,"string":"0.317903"}}},{"rowIdx":7818,"cells":{"content":{"kind":"string","value":"\"\"\"\n Space object.\n\n Refer: https://developer.twitter.com/en/docs/twitter-api/data-dictionary/object-model/space\n\"\"\"\n\nfrom dataclasses import dataclass, field\nfrom typing import List, Optional\n\nfrom .base import BaseModel\n\n\n@dataclass\nclass Space(BaseModel):\n \"\"\"\n A class representing the space object.\n \"\"\"\n\n id: Optional[str] = field(default=None)\n state: Optional[str] = field(default=None)\n created_at: Optional[str] = field(default=None, repr=False)\n host_ids: Optional[List[str]] = field(default=None, repr=False)\n lang: Optional[str] = field(default=None, repr=False)\n is_ticketed: Optional[bool] = field(default=None, repr=False)\n invited_user_ids: Optional[List[str]] = field(default=None, repr=False)\n participant_count: Optional[int] = field(default=None, repr=False)\n scheduled_start: Optional[str] = field(default=None, repr=False)\n speaker_ids: Optional[List[str]] = field(default=None, repr=False)\n started_at: Optional[str] = field(default=None, repr=False)\n title: Optional[str] = field(default=None, repr=False)\n updated_at: Optional[str] = field(default=None, repr=False)\n"},"path":{"kind":"string","value":"pytwitter/models/space.py"},"size":{"kind":"number","value":1146,"string":"1,146"},"nl_text":{"kind":"string","value":"A class representing the space object.\nSpace object.\n\nRefer: https://developer.twitter.com/en/docs/twitter-api/data-dictionary/object-model/space"},"nl_size":{"kind":"number","value":145,"string":"145"},"nl_language":{"kind":"string","value":"en"},"nl_language_score":{"kind":"number","value":0.5425821542739868,"string":"0.542582"}}},{"rowIdx":7819,"cells":{"content":{"kind":"string","value":"\"\"\"Make / Download Telegram Sticker Packs without installing Third Party applications\nAvailable Commands:\n.kangsticker [Optional Emoji]\n.packinfo\n.getsticker\"\"\"\nfrom telethon import events\nfrom io import BytesIO\nfrom PIL import Image\nimport asyncio\nimport datetime\nfrom collections import defaultdict\nimport math\nimport os\nimport requests\nimport zipfile\nfrom telethon.errors.rpcerrorlist import StickersetInvalidError\nfrom telethon.errors import MessageNotModifiedError\nfrom telethon.tl.functions.account import UpdateNotifySettingsRequest\nfrom telethon.tl.functions.messages import GetStickerSetRequest\nfrom telethon.tl.types import (\n DocumentAttributeFilename,\n DocumentAttributeSticker,\n InputMediaUploadedDocument,\n InputPeerNotifySettings,\n InputStickerSetID,\n InputStickerSetShortName,\n MessageMediaPhoto\n)\nfrom uniborg.util import admin_cmd\n\n\n@borg.on(admin_cmd(pattern=\"kangsticker ?(.*)\"))\nasync def _(event):\n if event.fwd_from:\n return\n if not event.is_reply:\n await event.edit(\"Reply to a photo to add to my personal sticker pack.\")\n return\n reply_message = await event.get_reply_message()\n sticker_emoji = \"🔥\"\n input_str = event.pattern_match.group(1)\n if input_str:\n sticker_emoji = input_str\n\n me = borg.me\n userid = event.from_id\n packname = f\"{userid}'s @MC0917 Pack\"\n packshortname = f\"MC_0917_{userid}\" # format: Uni_Borg_userid\n\n is_a_s = is_it_animated_sticker(reply_message)\n file_ext_ns_ion = \"@MC0917_Sticker.png\"\n file = await borg.download_file(reply_message.media)\n uploaded_sticker = None\n if is_a_s:\n file_ext_ns_ion = \"AnimatedSticker.tgs\"\n uploaded_sticker = await borg.upload_file(file, file_name=file_ext_ns_ion)\n packname = f\"{userid}'s @AnimatedStickersGroup\"\n packshortname = f\"MC_0917_{userid}_as\" # format: Uni_Borg_userid\n elif not is_message_image(reply_message):\n await event.edit(\"Invalid message type\")\n return\n else:\n with BytesIO(file) as mem_file, BytesIO() as sticker:\n resize_image(mem_file, sticker)\n sticker.seek(0)\n uploaded_sticker = await borg.upload_file(sticker, file_name=file_ext_ns_ion)\n\n await event.edit(\"Processing this sticker. Please Wait!\")\n\n async with borg.conversation(\"@Stickers\") as bot_conv:\n now = datetime.datetime.now()\n dt = now + datetime.timedelta(minutes=1)\n if not await stickerset_exists(bot_conv, packshortname):\n await silently_send_message(bot_conv, \"/cancel\")\n if is_a_s:\n response = await silently_send_message(bot_conv, \"/newanimated\")\n else:\n response = await silently_send_message(bot_conv, \"/newpack\")\n if \"Yay!\" not in response.text:\n await event.edit(f\"**FAILED**! @Stickers replied: {response.text}\")\n return\n response = await silently_send_message(bot_conv, packname)\n if not response.text.startswith(\"Alright!\"):\n await event.edit(f\"**FAILED**! @Stickers replied: {response.text}\")\n return\n w = await bot_conv.send_file(\n file=uploaded_sticker,\n allow_cache=False,\n force_document=True\n )\n response = await bot_conv.get_response()\n if \"Sorry\" in response.text:\n await event.edit(f\"**FAILED**! @Stickers replied: {response.text}\")\n return\n await silently_send_message(bot_conv, sticker_emoji)\n await silently_send_message(bot_conv, \"/publish\")\n response = await silently_send_message(bot_conv, f\"<{packname}>\")\n await silently_send_message(bot_conv, \"/skip\")\n response = await silently_send_message(bot_conv, packshortname)\n if response.text == \"Sorry, this short name is already taken.\":\n await event.edit(f\"**FAILED**! @Stickers replied: {response.text}\")\n return\n else:\n await silently_send_message(bot_conv, \"/cancel\")\n await silently_send_message(bot_conv, \"/addsticker\")\n await silently_send_message(bot_conv, packshortname)\n await bot_conv.send_file(\n file=uploaded_sticker,\n allow_cache=False,\n force_document=True\n )\n response = await bot_conv.get_response()\n if \"Sorry\" in response.text:\n await event.edit(f\"**FAILED**! @Stickers replied: {response.text}\")\n return\n await silently_send_message(bot_conv, sticker_emoji)\n await silently_send_message(bot_conv, \"/done\")\n\n await event.edit(f\"sticker added! Your pack can be found [here](t.me/addstickers/{packshortname})\")\n\n\n@borg.on(admin_cmd(pattern=\"packinfo\"))\nasync def _(event):\n if event.fwd_from:\n return\n if not event.is_reply:\n await event.edit(\"Reply to any sticker to get it's pack info.\")\n return\n rep_msg = await event.get_reply_message()\n if not rep_msg.document:\n await event.edit(\"Reply to any sticker to get it's pack info.\")\n return\n stickerset_attr_s = rep_msg.document.attributes\n stickerset_attr = find_instance(stickerset_attr_s, DocumentAttributeSticker)\n if not stickerset_attr.stickerset:\n await event.edit(\"sticker does not belong to a pack.\")\n return\n get_stickerset = await borg(\n GetStickerSetRequest(\n InputStickerSetID(\n id=stickerset_attr.stickerset.id,\n access_hash=stickerset_attr.stickerset.access_hash\n )\n )\n )\n pack_emojis = []\n for document_sticker in get_stickerset.packs:\n if document_sticker.emoticon not in pack_emojis:\n pack_emojis.append(document_sticker.emoticon)\n await event.edit(f\"**Sticker Title:** `{get_stickerset.set.title}\\n`\"\n f\"**Sticker Short Name:** `{get_stickerset.set.short_name}`\\n\"\n f\"**Official:** `{get_stickerset.set.official}`\\n\"\n f\"**Archived:** `{get_stickerset.set.archived}`\\n\"\n f\"**Stickers In Pack:** `{len(get_stickerset.packs)}`\\n\"\n f\"**Emojis In Pack:** {' '.join(pack_emojis)}\")\n\n\n@borg.on(admin_cmd(pattern=\"getsticker ?(.*)\"))\nasync def _(event):\n if event.fwd_from:\n return\n input_str = event.pattern_match.group(1)\n if not os.path.isdir(Config.TMP_DOWNLOAD_DIRECTORY):\n os.makedirs(Config.TMP_DOWNLOAD_DIRECTORY)\n if event.reply_to_msg_id:\n reply_message = await event.get_reply_message()\n # https://gist.github.com/udf/e4e3dbb2e831c8b580d8fddd312714f7\n if not reply_message.sticker:\n return\n sticker = reply_message.sticker\n sticker_attrib = find_instance(sticker.attributes, DocumentAttributeSticker)\n if not sticker_attrib.stickerset:\n await event.reply(\"This sticker is not part of a pack\")\n return\n is_a_s = is_it_animated_sticker(reply_message)\n file_ext_ns_ion = \"webp\"\n file_caption = \"https://t.me/RoseSupportChat/33801\"\n if is_a_s:\n file_ext_ns_ion = \"tgs\"\n file_caption = \"Forward the ZIP file to @AnimatedStickersRoBot to get lottIE JSON containing the vector information.\"\n sticker_set = await borg(GetStickerSetRequest(sticker_attrib.stickerset))\n pack_file = os.path.join(Config.TMP_DOWNLOAD_DIRECTORY, sticker_set.set.short_name, \"pack.txt\")\n if os.path.isfile(pack_file):\n os.remove(pack_file)\n # Sticker emojis are retrieved as a mapping of\n # : \n # So we need to build a mapping of : \n # Thanks, Durov\n emojis = defaultdict(str)\n for pack in sticker_set.packs:\n for document_id in pack.documents:\n emojis[document_id] += pack.emoticon\n async def download(sticker, emojis, path, file):\n await borg.download_media(sticker, file=os.path.join(path, file))\n with open(pack_file, \"a\") as f:\n f.write(f\"{{'image_file': '{file}','emojis':{emojis[sticker.id]}}},\")\n pending_tasks = [\n asyncio.ensure_future(\n download(document, emojis, Config.TMP_DOWNLOAD_DIRECTORY + sticker_set.set.short_name, f\"{i:03d}.{file_ext_ns_ion}\")\n ) for i, document in enumerate(sticker_set.documents)\n ]\n await event.edit(f\"Downloading {sticker_set.set.count} sticker(s) to .{Config.TMP_DOWNLOAD_DIRECTORY}{sticker_set.set.short_name}...\")\n num_tasks = len(pending_tasks)\n while 1:\n done, pending_tasks = await asyncio.wait(pending_tasks, timeout=2.5,\n return_when=asyncio.FIRST_COMPLETED)\n try:\n await event.edit(\n f\"Downloaded {num_tasks - len(pending_tasks)}/{sticker_set.set.count}\")\n except MessageNotModifiedError:\n pass\n if not pending_tasks:\n break\n await event.edit(\"Downloading to my local completed\")\n # https://gist.github.com/udf/e4e3dbb2e831c8b580d8fddd312714f7\n directory_name = Config.TMP_DOWNLOAD_DIRECTORY + sticker_set.set.short_name\n zipf = zipfile.ZipFile(directory_name + \".zip\", \"w\", zipfile.ZIP_DEFLATED)\n zipdir(directory_name, zipf)\n zipf.close()\n await borg.send_file(\n event.chat_id,\n directory_name + \".zip\",\n caption=file_caption,\n force_document=True,\n allow_cache=False,\n reply_to=event.message.id,\n progress_callback=progress\n )\n try:\n os.remove(directory_name + \".zip\")\n os.remove(directory_name)\n except:\n pass\n await event.edit(\"task Completed\")\n await asyncio.sleep(3)\n await event.delete()\n else:\n await event.edit(\"TODO: Not Implemented\")\n\n\n# Helpers\n\ndef is_it_animated_sticker(message):\n try:\n if message.media and message.media.document:\n mime_type = message.media.document.mime_type\n if \"tgsticker\" in mime_type:\n return True\n else:\n return False\n else:\n return False\n except:\n return False\n\n\ndef is_message_image(message):\n if message.media:\n if isinstance(message.media, MessageMediaPhoto):\n return True\n if message.media.document:\n if message.media.document.mime_type.split(\"/\")[0] == \"image\":\n return True\n return False\n return False\n\n\nasync def silently_send_message(conv, text):\n await conv.send_message(text)\n response = await conv.get_response()\n await conv.mark_read(message=response)\n return response\n\n\nasync def stickerset_exists(conv, setname):\n try:\n await borg(GetStickerSetRequest(InputStickerSetShortName(setname)))\n response = await silently_send_message(conv, \"/addsticker\")\n if response.text == \"Invalid pack selected.\":\n await silently_send_message(conv, \"/cancel\")\n return False\n await silently_send_message(conv, \"/cancel\")\n return True\n except StickersetInvalidError:\n return False\n\n\ndef resize_image(image, save_locaton):\n \"\"\" Copyright Rhyse Simpson:\n https://github.com/skittles9823/SkittBot/blob/master/tg_bot/modules/stickers.py\n \"\"\"\n im = Image.open(image)\n maxsize = (512, 512)\n if (im.width and im.height) < 512:\n size1 = im.width\n size2 = im.height\n if im.width > im.height:\n scale = 512 / size1\n size1new = 512\n size2new = size2 * scale\n else:\n scale = 512 / size2\n size1new = size1 * scale\n size2new = 512\n size1new = math.floor(size1new)\n size2new = math.floor(size2new)\n sizenew = (size1new, size2new)\n im = im.resize(sizenew)\n else:\n im.thumbnail(maxsize)\n im.save(save_locaton, \"PNG\")\n\n\ndef progress(current, total):\n logger.info(\"Uploaded: {} of {}\\nCompleted {}\".format(current, total, (current / total) * 100))\n\n\ndef find_instance(items, class_or_tuple):\n for item in items:\n if isinstance(item, class_or_tuple):\n return item\n return None\n\n\ndef zipdir(path, ziph):\n # ziph is zipfile handle\n for root, dirs, files in os.walk(path):\n for file in files:\n ziph.write(os.path.join(root, file))\n os.remove(os.path.join(root, file))\n"},"path":{"kind":"string","value":"stdplugins/stickers.py"},"size":{"kind":"number","value":12714,"string":"12,714"},"nl_text":{"kind":"string","value":"Copyright Rhyse Simpson:\nhttps://github.com/skittles9823/SkittBot/blob/master/tg_bot/modules/stickers.py\nMake / Download Telegram Sticker Packs without installing Third Party applications\nAvailable Commands:\n.kangsticker [Optional Emoji]\n.packinfo\n.getsticker\n\n format: Uni_Borg_userid format: Uni_Borg_userid https://gist.github.com/udf/e4e3dbb2e831c8b580d8fddd312714f7 Sticker emojis are retrieved as a mapping of : So we need to build a mapping of : Thanks, Durov https://gist.github.com/udf/e4e3dbb2e831c8b580d8fddd312714f7 Helpers ziph is zipfile handle"},"nl_size":{"kind":"number","value":638,"string":"638"},"nl_language":{"kind":"string","value":"en"},"nl_language_score":{"kind":"number","value":0.6827234029769897,"string":"0.682723"}}},{"rowIdx":7820,"cells":{"content":{"kind":"string","value":"#!/usr/bin/env python3\n# Copyright (c) 2015-2017 The Bitcoin Core developers\n# Distributed under the MIT software license, see the accompanying\n# file COPYING or http://www.opensource.org/licenses/mit-license.php.\n\"\"\"Test processing of unrequested blocks.\n\nSetup: two nodes, node0+node1, not connected to each other. Node1 will have\nnMinimumChainWork set to 0x10, so it won't process low-work unrequested blocks.\n\nWe have one P2PInterface connection to node0 called test_node, and one to node1\ncalled min_work_node.\n\nThe test:\n1. Generate one block on each node, to leave IBD.\n\n2. Mine a new block on each tip, and deliver to each node from node's peer.\n The tip should advance for node0, but node1 should skip processing due to\n nMinimumChainWork.\n\nNode1 is unused in tests 3-7:\n\n3. Mine a block that forks from the genesis block, and deliver to test_node.\n Node0 should not process this block (just accept the header), because it\n is unrequested and doesn't have more or equal work to the tip.\n\n4a,b. Send another two blocks that build on the forking block.\n Node0 should process the second block but be stuck on the shorter chain,\n because it's missing an intermediate block.\n\n4c.Send 288 more blocks on the longer chain (the number of blocks ahead\n we currently store).\n Node0 should process all but the last block (too far ahead in height).\n\n5. Send a duplicate of the block in #3 to Node0.\n Node0 should not process the block because it is unrequested, and stay on\n the shorter chain.\n\n6. Send Node0 an inv for the height 3 block produced in #4 above.\n Node0 should figure out that Node0 has the missing height 2 block and send a\n getdata.\n\n7. Send Node0 the missing block again.\n Node0 should process and the tip should advance.\n\n8. Create a fork which is invalid at a height longer than the current chain\n (ie to which the node will try to reorg) but which has headers built on top\n of the invalid block. Check that we get disconnected if we send more headers\n on the chain the node now knows to be invalid.\n\n9. Test Node1 is able to sync when connected to node0 (which should have sufficient\n work on its chain).\n\"\"\"\n\nfrom test_framework.mininode import *\nfrom test_framework.test_framework import BitcoinTestFramework\nfrom test_framework.util import *\nimport time\nfrom test_framework.blocktools import create_block, create_coinbase, create_transaction\n\nclass AcceptBlockTest(BitcoinTestFramework):\n def add_options(self, parser):\n parser.add_option(\"--testbinary\", dest=\"testbinary\",\n default=os.getenv(\"BITCOIND\", \"uexd\"),\n help=\"uexd binary to test\")\n\n def set_test_params(self):\n self.setup_clean_chain = True\n self.num_nodes = 2\n self.extra_args = [[], [\"-minimumchainwork=0x10\"]]\n\n def setup_network(self):\n # Node0 will be used to test behavior of processing unrequested blocks\n # from peers which are not whitelisted, while Node1 will be used for\n # the whitelisted case.\n # Node2 will be used for non-whitelisted peers to test the interaction\n # with nMinimumChainWork.\n self.setup_nodes()\n\n def run_test(self):\n # Setup the p2p connections and start up the network thread.\n # test_node connects to node0 (not whitelisted)\n test_node = self.nodes[0].add_p2p_connection(P2PInterface())\n # min_work_node connects to node1 (whitelisted)\n min_work_node = self.nodes[1].add_p2p_connection(P2PInterface())\n\n network_thread_start()\n\n # Test logic begins here\n test_node.wait_for_verack()\n min_work_node.wait_for_verack()\n\n # 1. Have nodes mine a block (leave IBD)\n [ n.generate(1) for n in self.nodes ]\n tips = [ int(\"0x\" + n.getbestblockhash(), 0) for n in self.nodes ]\n\n # 2. Send one block that builds on each tip.\n # This should be accepted by node0\n blocks_h2 = [] # the height 2 blocks on each node's chain\n block_time = int(time.time()) + 1\n for i in range(2):\n blocks_h2.append(create_block(tips[i], create_coinbase(2), block_time))\n blocks_h2[i].solve()\n block_time += 1\n test_node.send_message(msg_block(blocks_h2[0]))\n min_work_node.send_message(msg_block(blocks_h2[1]))\n\n for x in [test_node, min_work_node]:\n x.sync_with_ping()\n assert_equal(self.nodes[0].getblockcount(), 2)\n assert_equal(self.nodes[1].getblockcount(), 1)\n self.log.info(\"First height 2 block accepted by node0; correctly rejected by node1\")\n\n # 3. Send another block that builds on genesis.\n block_h1f = create_block(int(\"0x\" + self.nodes[0].getblockhash(0), 0), create_coinbase(1), block_time)\n block_time += 1\n block_h1f.solve()\n test_node.send_message(msg_block(block_h1f))\n\n test_node.sync_with_ping()\n tip_entry_found = False\n for x in self.nodes[0].getchaintips():\n if x['hash'] == block_h1f.hash:\n assert_equal(x['status'], \"headers-only\")\n tip_entry_found = True\n assert(tip_entry_found)\n assert_raises_rpc_error(-1, \"Block not found on disk\", self.nodes[0].getblock, block_h1f.hash)\n\n # 4. Send another two block that build on the fork.\n block_h2f = create_block(block_h1f.sha256, create_coinbase(2), block_time)\n block_time += 1\n block_h2f.solve()\n test_node.send_message(msg_block(block_h2f))\n\n test_node.sync_with_ping()\n # Since the earlier block was not processed by node, the new block\n # can't be fully validated.\n tip_entry_found = False\n for x in self.nodes[0].getchaintips():\n if x['hash'] == block_h2f.hash:\n assert_equal(x['status'], \"headers-only\")\n tip_entry_found = True\n assert(tip_entry_found)\n\n # But this block should be accepted by node since it has equal work.\n self.nodes[0].getblock(block_h2f.hash)\n self.log.info(\"Second height 2 block accepted, but not reorg'ed to\")\n\n # 4b. Now send another block that builds on the forking chain.\n block_h3 = create_block(block_h2f.sha256, create_coinbase(3), block_h2f.nTime+1)\n block_h3.solve()\n test_node.send_message(msg_block(block_h3))\n\n test_node.sync_with_ping()\n # Since the earlier block was not processed by node, the new block\n # can't be fully validated.\n tip_entry_found = False\n for x in self.nodes[0].getchaintips():\n if x['hash'] == block_h3.hash:\n assert_equal(x['status'], \"headers-only\")\n tip_entry_found = True\n assert(tip_entry_found)\n self.nodes[0].getblock(block_h3.hash)\n\n # But this block should be accepted by node since it has more work.\n self.nodes[0].getblock(block_h3.hash)\n self.log.info(\"Unrequested more-work block accepted\")\n\n # 4c. Now mine 288 more blocks and deliver; all should be processed but\n # the last (height-too-high) on node (as long as its not missing any headers)\n tip = block_h3\n all_blocks = []\n for i in range(288):\n next_block = create_block(tip.sha256, create_coinbase(i + 4), tip.nTime+1)\n next_block.solve()\n all_blocks.append(next_block)\n tip = next_block\n\n # Now send the block at height 5 and check that it wasn't accepted (missing header)\n test_node.send_message(msg_block(all_blocks[1]))\n test_node.sync_with_ping()\n assert_raises_rpc_error(-5, \"Block not found\", self.nodes[0].getblock, all_blocks[1].hash)\n assert_raises_rpc_error(-5, \"Block not found\", self.nodes[0].getblockheader, all_blocks[1].hash)\n\n # The block at height 5 should be accepted if we provide the missing header, though\n headers_message = msg_headers()\n headers_message.headers.append(CBlockHeader(all_blocks[0]))\n test_node.send_message(headers_message)\n test_node.send_message(msg_block(all_blocks[1]))\n test_node.sync_with_ping()\n self.nodes[0].getblock(all_blocks[1].hash)\n\n # Now send the blocks in all_blocks\n for i in range(288):\n test_node.send_message(msg_block(all_blocks[i]))\n test_node.sync_with_ping()\n\n # Blocks 1-287 should be accepted, block 288 should be ignored because it's too far ahead\n for x in all_blocks[:-1]:\n self.nodes[0].getblock(x.hash)\n assert_raises_rpc_error(-1, \"Block not found on disk\", self.nodes[0].getblock, all_blocks[-1].hash)\n\n # 5. Test handling of unrequested block on the node that didn't process\n # Should still not be processed (even though it has a child that has more\n # work).\n\n # The node should have requested the blocks at some point, so\n # disconnect/reconnect first\n\n self.nodes[0].disconnect_p2ps()\n self.nodes[1].disconnect_p2ps()\n network_thread_join()\n\n test_node = self.nodes[0].add_p2p_connection(P2PInterface())\n network_thread_start()\n test_node.wait_for_verack()\n\n test_node.send_message(msg_block(block_h1f))\n\n test_node.sync_with_ping()\n assert_equal(self.nodes[0].getblockcount(), 2)\n self.log.info(\"Unrequested block that would complete more-work chain was ignored\")\n\n # 6. Try to get node to request the missing block.\n # Poke the node with an inv for block at height 3 and see if that\n # triggers a getdata on block 2 (it should if block 2 is missing).\n with mininode_lock:\n # Clear state so we can check the getdata request\n test_node.last_message.pop(\"getdata\", None)\n test_node.send_message(msg_inv([CInv(2, block_h3.sha256)]))\n\n test_node.sync_with_ping()\n with mininode_lock:\n getdata = test_node.last_message[\"getdata\"]\n\n # Check that the getdata includes the right block\n assert_equal(getdata.inv[0].hash, block_h1f.sha256)\n self.log.info(\"Inv at tip triggered getdata for unprocessed block\")\n\n # 7. Send the missing block for the third time (now it is requested)\n test_node.send_message(msg_block(block_h1f))\n\n test_node.sync_with_ping()\n assert_equal(self.nodes[0].getblockcount(), 290)\n self.nodes[0].getblock(all_blocks[286].hash)\n assert_equal(self.nodes[0].getbestblockhash(), all_blocks[286].hash)\n assert_raises_rpc_error(-1, \"Block not found on disk\", self.nodes[0].getblock, all_blocks[287].hash)\n self.log.info(\"Successfully reorged to longer chain from non-whitelisted peer\")\n\n # 8. Create a chain which is invalid at a height longer than the\n # current chain, but which has more blocks on top of that\n block_289f = create_block(all_blocks[284].sha256, create_coinbase(289), all_blocks[284].nTime+1)\n block_289f.solve()\n block_290f = create_block(block_289f.sha256, create_coinbase(290), block_289f.nTime+1)\n block_290f.solve()\n block_291 = create_block(block_290f.sha256, create_coinbase(291), block_290f.nTime+1)\n # block_291 spends a coinbase below maturity!\n block_291.vtx.append(create_transaction(block_290f.vtx[0], 0, b\"42\", 1))\n block_291.hashMerkleRoot = block_291.calc_merkle_root()\n block_291.solve()\n block_292 = create_block(block_291.sha256, create_coinbase(292), block_291.nTime+1)\n block_292.solve()\n\n # Now send all the headers on the chain and enough blocks to trigger reorg\n headers_message = msg_headers()\n headers_message.headers.append(CBlockHeader(block_289f))\n headers_message.headers.append(CBlockHeader(block_290f))\n headers_message.headers.append(CBlockHeader(block_291))\n headers_message.headers.append(CBlockHeader(block_292))\n test_node.send_message(headers_message)\n\n test_node.sync_with_ping()\n tip_entry_found = False\n for x in self.nodes[0].getchaintips():\n if x['hash'] == block_292.hash:\n assert_equal(x['status'], \"headers-only\")\n tip_entry_found = True\n assert(tip_entry_found)\n assert_raises_rpc_error(-1, \"Block not found on disk\", self.nodes[0].getblock, block_292.hash)\n\n test_node.send_message(msg_block(block_289f))\n test_node.send_message(msg_block(block_290f))\n\n test_node.sync_with_ping()\n self.nodes[0].getblock(block_289f.hash)\n self.nodes[0].getblock(block_290f.hash)\n\n test_node.send_message(msg_block(block_291))\n\n # At this point we've sent an obviously-bogus block, wait for full processing\n # without assuming whether we will be disconnected or not\n try:\n # Only wait a short while so the test doesn't take forever if we do get\n # disconnected\n test_node.sync_with_ping(timeout=1)\n except AssertionError:\n test_node.wait_for_disconnect()\n\n self.nodes[0].disconnect_p2ps()\n test_node = self.nodes[0].add_p2p_connection(P2PInterface())\n\n network_thread_start()\n test_node.wait_for_verack()\n\n # We should have failed reorg and switched back to 290 (but have block 291)\n assert_equal(self.nodes[0].getblockcount(), 290)\n assert_equal(self.nodes[0].getbestblockhash(), all_blocks[286].hash)\n assert_equal(self.nodes[0].getblock(block_291.hash)[\"confirmations\"], -1)\n\n # Now send a new header on the invalid chain, indicating we're forked off, and expect to get disconnected\n block_293 = create_block(block_292.sha256, create_coinbase(293), block_292.nTime+1)\n block_293.solve()\n headers_message = msg_headers()\n headers_message.headers.append(CBlockHeader(block_293))\n test_node.send_message(headers_message)\n test_node.wait_for_disconnect()\n\n # 9. Connect node1 to node0 and ensure it is able to sync\n connect_nodes(self.nodes[0], 1)\n sync_blocks([self.nodes[0], self.nodes[1]])\n self.log.info(\"Successfully synced nodes 1 and 0\")\n\nif __name__ == '__main__':\n AcceptBlockTest().main()\n"},"path":{"kind":"string","value":"test/functional/p2p_unrequested_blocks.py"},"size":{"kind":"number","value":14257,"string":"14,257"},"nl_text":{"kind":"string","value":"Test processing of unrequested blocks.\n\nSetup: two nodes, node0+node1, not connected to each other. Node1 will have\nnMinimumChainWork set to 0x10, so it won't process low-work unrequested blocks.\n\nWe have one P2PInterface connection to node0 called test_node, and one to node1\ncalled min_work_node.\n\nThe test:\n1. Generate one block on each node, to leave IBD.\n\n2. Mine a new block on each tip, and deliver to each node from node's peer.\n The tip should advance for node0, but node1 should skip processing due to\n nMinimumChainWork.\n\nNode1 is unused in tests 3-7:\n\n3. Mine a block that forks from the genesis block, and deliver to test_node.\n Node0 should not process this block (just accept the header), because it\n is unrequested and doesn't have more or equal work to the tip.\n\n4a,b. Send another two blocks that build on the forking block.\n Node0 should process the second block but be stuck on the shorter chain,\n because it's missing an intermediate block.\n\n4c.Send 288 more blocks on the longer chain (the number of blocks ahead\n we currently store).\n Node0 should process all but the last block (too far ahead in height).\n\n5. Send a duplicate of the block in #3 to Node0.\n Node0 should not process the block because it is unrequested, and stay on\n the shorter chain.\n\n6. Send Node0 an inv for the height 3 block produced in #4 above.\n Node0 should figure out that Node0 has the missing height 2 block and send a\n getdata.\n\n7. Send Node0 the missing block again.\n Node0 should process and the tip should advance.\n\n8. Create a fork which is invalid at a height longer than the current chain\n (ie to which the node will try to reorg) but which has headers built on top\n of the invalid block. Check that we get disconnected if we send more headers\n on the chain the node now knows to be invalid.\n\n9. Test Node1 is able to sync when connected to node0 (which should have sufficient\n work on its chain).\n\n!/usr/bin/env python3 Copyright (c) 2015-2017 The Bitcoin Core developers Distributed under the MIT software license, see the accompanying file COPYING or http://www.opensource.org/licenses/mit-license.php. Node0 will be used to test behavior of processing unrequested blocks from peers which are not whitelisted, while Node1 will be used for the whitelisted case. Node2 will be used for non-whitelisted peers to test the interaction with nMinimumChainWork. Setup the p2p connections and start up the network thread. test_node connects to node0 (not whitelisted) min_work_node connects to node1 (whitelisted) Test logic begins here 1. Have nodes mine a block (leave IBD) 2. Send one block that builds on each tip. This should be accepted by node0 the height 2 blocks on each node's chain 3. Send another block that builds on genesis. 4. Send another two block that build on the fork. Since the earlier block was not processed by node, the new block can't be fully validated. But this block should be accepted by node since it has equal work. 4b. Now send another block that builds on the forking chain. Since the earlier block was not processed by node, the new block can't be fully validated. But this block should be accepted by node since it has more work. 4c. Now mine 288 more blocks and deliver; all should be processed but the last (height-too-high) on node (as long as its not missing any headers) Now send the block at height 5 and check that it wasn't accepted (missing header) The block at height 5 should be accepted if we provide the missing header, though Now send the blocks in all_blocks Blocks 1-287 should be accepted, block 288 should be ignored because it's too far ahead 5. Test handling of unrequested block on the node that didn't process Should still not be processed (even though it has a child that has more work). The node should have requested the blocks at some point, so disconnect/reconnect first 6. Try to get node to request the missing block. Poke the node with an inv for block at height 3 and see if that triggers a getdata on block 2 (it should if block 2 is missing). Clear state so we can check the getdata request Check that the getdata includes the right block 7. Send the missing block for the third time (now it is requested) 8. Create a chain which is invalid at a height longer than the current chain, but which has more blocks on top of that block_291 spends a coinbase below maturity! Now send all the headers on the chain and enough blocks to trigger reorg At this point we've sent an obviously-bogus block, wait for full processing without assuming whether we will be disconnected or not Only wait a short while so the test doesn't take forever if we do get disconnected We should have failed reorg and switched back to 290 (but have block 291) Now send a new header on the invalid chain, indicating we're forked off, and expect to get disconnected 9. Connect node1 to node0 and ensure it is able to sync"},"nl_size":{"kind":"number","value":4893,"string":"4,893"},"nl_language":{"kind":"string","value":"en"},"nl_language_score":{"kind":"number","value":0.9296813011169434,"string":"0.929681"}}},{"rowIdx":7821,"cells":{"content":{"kind":"string","value":"import networkx\nimport random\n\ndef regularize_graph(graph,d):\n regularized = True\n for node_id in list(graph.nodes()):\n if graph.in_degree(node_id)!=d or graph.out_degree(node_id)!=d:\n regularized = False\n break\n while not regularized:\n lost_in_degree_ids = []\n full_in_degree_ids = []\n for node_id in list(graph.nodes()):\n if graph.in_degree(node_id) EPS or not self.done:\n dist = []\n for j in range(0,len(self.goals_x)):\n vec1 = np.array([self.goals_x[j],self.goals_y[j],0.0]) - np.array([self.current_poses[0][i].position.x,self.current_poses[0][i].position.y,0.0]) #Vector from current position to a goal\n rotation = (self.current_poses[0][i].orientation.x,self.current_poses[0][i].orientation.y,self.current_poses[0][i].orientation.z,self.current_poses[0][i].orientation.w)\n roll,pitch,yaw = tf.transformations.euler_from_quaternion(rotation)\n unit_vec = np.array([np.cos(yaw), np.sin(yaw),0.0])\n self.theta_phi[i][j] = (np.arccos(np.dot(vec1,unit_vec)/np.linalg.norm(vec1)))\n dist.append(np.linalg.norm([self.current_poses[0][i].position.x - self.goals_x[j],self.current_poses[0][i].position.y - self.goals_y[j]]))\n\n self.probability_goal_window[i][self.itr] = self.mv_nd.pdf(np.array(self.theta_phi[i]));\n\n self.probability_goal[i] = np.array([1.0]*self.goal_num)\n for k in range(0,len(self.probability_goal_window[i])):\n gf = np.exp((k-self.window_size)/5)\n self.probability_goal[i] = np.power(self.probability_goal_window[i][k],gf)* np.array(self.probability_goal[i]) # Linear prediction of goal\n\n for ln in range(0,len(self.goals_x)):\n self.probability_goal[i][ln] = (1/dist[ln])*self.probability_goal[i][ln];\n\n self.probability_goal[i] = (self.probability_goal[i]-np.min(self.probability_goal[i]))/(np.max(self.probability_goal[i])-np.min(self.probability_goal[i]))\n\n\n self.itr = self.itr + 1\n if self.itr == self.window_size:\n self.itr = 0\n\n self.done = True\n\n self.predict_goal()\n\n\n def predict_goal(self):\n idx = 0\n max_prob = 0.0\n p_goal = PredictedGoal()\n\n for i in range(0,len(self.current_poses[0])):\n for j in range(0,len(self.goals_x)):\n if(max_prob 10)\n for name, cmd in _ip.alias_manager.aliases:\n # we must strip dots from alias names\n nt.assert_not_in('.', name)\n\n # rehashx must fill up syscmdlist\n scoms = _ip.db['syscmdlist']\n nt.assert_true(len(scoms) > 10)\n\n\ndef test_magic_parse_options():\n \"\"\"Test that we don't mangle paths when parsing magic options.\"\"\"\n ip = get_ipython()\n path = 'c:\\\\x'\n m = DummyMagics(ip)\n opts = m.parse_options('-f %s' % path,'f:')[0]\n # argv splitting is os-dependent\n if os.name == 'posix':\n expected = 'c:x'\n else:\n expected = path\n nt.assert_equal(opts['f'], expected)\n\ndef test_magic_parse_long_options():\n \"\"\"Magic.parse_options can handle --foo=bar long options\"\"\"\n ip = get_ipython()\n m = DummyMagics(ip)\n opts, _ = m.parse_options('--foo --bar=bubble', 'a', 'foo', 'bar=')\n nt.assert_in('foo', opts)\n nt.assert_in('bar', opts)\n nt.assert_equal(opts['bar'], \"bubble\")\n\n\n@dec.skip_without('sqlite3')\ndef doctest_hist_f():\n \"\"\"Test %hist -f with temporary filename.\n\n In [9]: import tempfile\n\n In [10]: tfile = tempfile.mktemp('.py','tmp-ipython-')\n\n In [11]: %hist -nl -f $tfile 3\n\n In [13]: import os; os.unlink(tfile)\n \"\"\"\n\n\n@dec.skip_without('sqlite3')\ndef doctest_hist_r():\n \"\"\"Test %hist -r\n\n XXX - This test is not recording the output correctly. For some reason, in\n testing mode the raw history isn't getting populated. No idea why.\n Disabling the output checking for now, though at least we do run it.\n\n In [1]: 'hist' in _ip.lsmagic()\n Out[1]: True\n\n In [2]: x=1\n\n In [3]: %hist -rl 2\n x=1 # random\n %hist -r 2\n \"\"\"\n\n\n@dec.skip_without('sqlite3')\ndef doctest_hist_op():\n \"\"\"Test %hist -op\n\n In [1]: class b(float):\n ...: pass\n ...: \n\n In [2]: class s(object):\n ...: def __str__(self):\n ...: return 's'\n ...: \n\n In [3]: \n\n In [4]: class r(b):\n ...: def __repr__(self):\n ...: return 'r'\n ...: \n\n In [5]: class sr(s,r): pass\n ...: \n\n In [6]: \n\n In [7]: bb=b()\n\n In [8]: ss=s()\n\n In [9]: rr=r()\n\n In [10]: ssrr=sr()\n\n In [11]: 4.5\n Out[11]: 4.5\n\n In [12]: str(ss)\n Out[12]: 's'\n\n In [13]: \n\n In [14]: %hist -op\n >>> class b:\n ... pass\n ... \n >>> class s(b):\n ... def __str__(self):\n ... return 's'\n ... \n >>> \n >>> class r(b):\n ... def __repr__(self):\n ... return 'r'\n ... \n >>> class sr(s,r): pass\n >>> \n >>> bb=b()\n >>> ss=s()\n >>> rr=r()\n >>> ssrr=sr()\n >>> 4.5\n 4.5\n >>> str(ss)\n 's'\n >>> \n \"\"\"\n\ndef test_hist_pof():\n ip = get_ipython()\n ip.run_cell(u\"1+2\", store_history=True)\n #raise Exception(ip.history_manager.session_number)\n #raise Exception(list(ip.history_manager._get_range_session()))\n with TemporaryDirectory() as td:\n tf = os.path.join(td, 'hist.py')\n ip.run_line_magic('history', '-pof %s' % tf)\n assert os.path.isfile(tf)\n\n\n@dec.skip_without('sqlite3')\ndef test_macro():\n ip = get_ipython()\n ip.history_manager.reset() # Clear any existing history.\n cmds = [\"a=1\", \"def b():\\n return a**2\", \"print(a,b())\"]\n for i, cmd in enumerate(cmds, start=1):\n ip.history_manager.store_inputs(i, cmd)\n ip.magic(\"macro test 1-3\")\n nt.assert_equal(ip.user_ns[\"test\"].value, \"\\n\".join(cmds)+\"\\n\")\n \n # List macros\n nt.assert_in(\"test\", ip.magic(\"macro\"))\n\n\n@dec.skip_without('sqlite3')\ndef test_macro_run():\n \"\"\"Test that we can run a multi-line macro successfully.\"\"\"\n ip = get_ipython()\n ip.history_manager.reset()\n cmds = [\"a=10\", \"a+=1\", \"print(a)\", \"%macro test 2-3\"]\n for cmd in cmds:\n ip.run_cell(cmd, store_history=True)\n nt.assert_equal(ip.user_ns[\"test\"].value, \"a+=1\\nprint(a)\\n\")\n with tt.AssertPrints(\"12\"):\n ip.run_cell(\"test\")\n with tt.AssertPrints(\"13\"):\n ip.run_cell(\"test\")\n\n\ndef test_magic_magic():\n \"\"\"Test %magic\"\"\"\n ip = get_ipython()\n with capture_output() as captured:\n ip.magic(\"magic\")\n \n stdout = captured.stdout\n nt.assert_in('%magic', stdout)\n nt.assert_in('IPython', stdout)\n nt.assert_in('Available', stdout)\n\n\n@dec.skipif_not_numpy\ndef test_numpy_reset_array_undec():\n \"Test '%reset array' functionality\"\n _ip.ex('import numpy as np')\n _ip.ex('a = np.empty(2)')\n nt.assert_in('a', _ip.user_ns)\n _ip.magic('reset -f array')\n nt.assert_not_in('a', _ip.user_ns)\n\ndef test_reset_out():\n \"Test '%reset out' magic\"\n _ip.run_cell(\"parrot = 'dead'\", store_history=True)\n # test '%reset -f out', make an Out prompt\n _ip.run_cell(\"parrot\", store_history=True)\n nt.assert_true('dead' in [_ip.user_ns[x] for x in ('_','__','___')])\n _ip.magic('reset -f out')\n nt.assert_false('dead' in [_ip.user_ns[x] for x in ('_','__','___')])\n nt.assert_equal(len(_ip.user_ns['Out']), 0)\n\ndef test_reset_in():\n \"Test '%reset in' magic\"\n # test '%reset -f in'\n _ip.run_cell(\"parrot\", store_history=True)\n nt.assert_true('parrot' in [_ip.user_ns[x] for x in ('_i','_ii','_iii')])\n _ip.magic('%reset -f in')\n nt.assert_false('parrot' in [_ip.user_ns[x] for x in ('_i','_ii','_iii')])\n nt.assert_equal(len(set(_ip.user_ns['In'])), 1)\n\ndef test_reset_dhist():\n \"Test '%reset dhist' magic\"\n _ip.run_cell(\"tmp = [d for d in _dh]\") # copy before clearing\n _ip.magic('cd ' + os.path.dirname(nt.__file__))\n _ip.magic('cd -')\n nt.assert_true(len(_ip.user_ns['_dh']) > 0)\n _ip.magic('reset -f dhist')\n nt.assert_equal(len(_ip.user_ns['_dh']), 0)\n _ip.run_cell(\"_dh = [d for d in tmp]\") #restore\n\ndef test_reset_in_length():\n \"Test that '%reset in' preserves In[] length\"\n _ip.run_cell(\"print 'foo'\")\n _ip.run_cell(\"reset -f in\")\n nt.assert_equal(len(_ip.user_ns['In']), _ip.displayhook.prompt_count+1)\n\ndef test_tb_syntaxerror():\n \"\"\"test %tb after a SyntaxError\"\"\"\n ip = get_ipython()\n ip.run_cell(\"for\")\n \n # trap and validate stdout\n save_stdout = sys.stdout\n try:\n sys.stdout = StringIO()\n ip.run_cell(\"%tb\")\n out = sys.stdout.getvalue()\n finally:\n sys.stdout = save_stdout\n # trim output, and only check the last line\n last_line = out.rstrip().splitlines()[-1].strip()\n nt.assert_equal(last_line, \"SyntaxError: invalid syntax\")\n\n\ndef test_time():\n ip = get_ipython()\n \n with tt.AssertPrints(\"Wall time: \"):\n ip.run_cell(\"%time None\")\n \n ip.run_cell(\"def f(kmjy):\\n\"\n \" %time print (2*kmjy)\")\n \n with tt.AssertPrints(\"Wall time: \"):\n with tt.AssertPrints(\"hihi\", suppress=False):\n ip.run_cell(\"f('hi')\")\n\n\n@dec.skip_win32\ndef test_time2():\n ip = get_ipython()\n \n with tt.AssertPrints(\"CPU times: user \"):\n ip.run_cell(\"%time None\")\n\ndef test_time3():\n \"\"\"Erroneous magic function calls, issue gh-3334\"\"\"\n ip = get_ipython()\n ip.user_ns.pop('run', None)\n \n with tt.AssertNotPrints(\"not found\", channel='stderr'):\n ip.run_cell(\"%%time\\n\"\n \"run = 0\\n\"\n \"run += 1\")\n\ndef test_doctest_mode():\n \"Toggle doctest_mode twice, it should be a no-op and run without error\"\n _ip.magic('doctest_mode')\n _ip.magic('doctest_mode')\n\n\ndef test_parse_options():\n \"\"\"Tests for basic options parsing in magics.\"\"\"\n # These are only the most minimal of tests, more should be added later. At\n # the very least we check that basic text/unicode calls work OK.\n m = DummyMagics(_ip)\n nt.assert_equal(m.parse_options('foo', '')[1], 'foo')\n nt.assert_equal(m.parse_options(u'foo', '')[1], u'foo')\n\n\ndef test_dirops():\n \"\"\"Test various directory handling operations.\"\"\"\n # curpath = lambda :os.path.splitdrive(os.getcwd())[1].replace('\\\\','/')\n curpath = os.getcwd\n startdir = os.getcwd()\n ipdir = os.path.realpath(_ip.ipython_dir)\n try:\n _ip.magic('cd \"%s\"' % ipdir)\n nt.assert_equal(curpath(), ipdir)\n _ip.magic('cd -')\n nt.assert_equal(curpath(), startdir)\n _ip.magic('pushd \"%s\"' % ipdir)\n nt.assert_equal(curpath(), ipdir)\n _ip.magic('popd')\n nt.assert_equal(curpath(), startdir)\n finally:\n os.chdir(startdir)\n\n\ndef test_cd_force_quiet():\n \"\"\"Test OSMagics.cd_force_quiet option\"\"\"\n _ip.config.OSMagics.cd_force_quiet = True\n osmagics = osm.OSMagics(shell=_ip)\n\n startdir = os.getcwd()\n ipdir = os.path.realpath(_ip.ipython_dir)\n\n try:\n with tt.AssertNotPrints(ipdir):\n osmagics.cd('\"%s\"' % ipdir)\n with tt.AssertNotPrints(startdir):\n osmagics.cd('-')\n finally:\n os.chdir(startdir)\n\n\ndef test_xmode():\n # Calling xmode three times should be a no-op\n xmode = _ip.InteractiveTB.mode\n for i in range(4):\n _ip.magic(\"xmode\")\n nt.assert_equal(_ip.InteractiveTB.mode, xmode)\n \ndef test_reset_hard():\n monitor = []\n class A(object):\n def __del__(self):\n monitor.append(1)\n def __repr__(self):\n return \"\"\n \n _ip.user_ns[\"a\"] = A()\n _ip.run_cell(\"a\")\n \n nt.assert_equal(monitor, [])\n _ip.magic(\"reset -f\")\n nt.assert_equal(monitor, [1])\n \nclass TestXdel(tt.TempFileMixin):\n def test_xdel(self):\n \"\"\"Test that references from %run are cleared by xdel.\"\"\"\n src = (\"class A(object):\\n\"\n \" monitor = []\\n\"\n \" def __del__(self):\\n\"\n \" self.monitor.append(1)\\n\"\n \"a = A()\\n\")\n self.mktmp(src)\n # %run creates some hidden references...\n _ip.magic(\"run %s\" % self.fname)\n # ... as does the displayhook.\n _ip.run_cell(\"a\")\n \n monitor = _ip.user_ns[\"A\"].monitor\n nt.assert_equal(monitor, [])\n \n _ip.magic(\"xdel a\")\n \n # Check that a's __del__ method has been called.\n nt.assert_equal(monitor, [1])\n\ndef doctest_who():\n \"\"\"doctest for %who\n \n In [1]: %reset -f\n \n In [2]: alpha = 123\n \n In [3]: beta = 'beta'\n \n In [4]: %who int\n alpha\n \n In [5]: %who str\n beta\n \n In [6]: %whos\n Variable Type Data/Info\n ----------------------------\n alpha int 123\n beta str beta\n \n In [7]: %who_ls\n Out[7]: ['alpha', 'beta']\n \"\"\"\n\ndef test_whos():\n \"\"\"Check that whos is protected against objects where repr() fails.\"\"\"\n class A(object):\n def __repr__(self):\n raise Exception()\n _ip.user_ns['a'] = A()\n _ip.magic(\"whos\")\n\ndef doctest_precision():\n \"\"\"doctest for %precision\n \n In [1]: f = get_ipython().display_formatter.formatters['text/plain']\n \n In [2]: %precision 5\n Out[2]: '%.5f'\n \n In [3]: f.float_format\n Out[3]: '%.5f'\n \n In [4]: %precision %e\n Out[4]: '%e'\n \n In [5]: f(3.1415927)\n Out[5]: '3.141593e+00'\n \"\"\"\n\ndef test_psearch():\n with tt.AssertPrints(\"dict.fromkeys\"):\n _ip.run_cell(\"dict.fr*?\")\n\ndef test_timeit_shlex():\n \"\"\"test shlex issues with timeit (#1109)\"\"\"\n _ip.ex(\"def f(*a,**kw): pass\")\n _ip.magic('timeit -n1 \"this is a bug\".count(\" \")')\n _ip.magic('timeit -r1 -n1 f(\" \", 1)')\n _ip.magic('timeit -r1 -n1 f(\" \", 1, \" \", 2, \" \")')\n _ip.magic('timeit -r1 -n1 (\"a \" + \"b\")')\n _ip.magic('timeit -r1 -n1 f(\"a \" + \"b\")')\n _ip.magic('timeit -r1 -n1 f(\"a \" + \"b \")')\n\n\ndef test_timeit_special_syntax():\n \"Test %%timeit with IPython special syntax\"\n @register_line_magic\n def lmagic(line):\n ip = get_ipython()\n ip.user_ns['lmagic_out'] = line\n\n # line mode test\n _ip.run_line_magic('timeit', '-n1 -r1 %lmagic my line')\n nt.assert_equal(_ip.user_ns['lmagic_out'], 'my line')\n # cell mode test\n _ip.run_cell_magic('timeit', '-n1 -r1', '%lmagic my line2')\n nt.assert_equal(_ip.user_ns['lmagic_out'], 'my line2')\n\ndef test_timeit_return():\n \"\"\"\n test whether timeit -o return object\n \"\"\"\n\n res = _ip.run_line_magic('timeit','-n10 -r10 -o 1')\n assert(res is not None)\n\ndef test_timeit_quiet():\n \"\"\"\n test quiet option of timeit magic\n \"\"\"\n with tt.AssertNotPrints(\"loops\"):\n _ip.run_cell(\"%timeit -n1 -r1 -q 1\")\n\ndef test_timeit_return_quiet():\n with tt.AssertNotPrints(\"loops\"):\n res = _ip.run_line_magic('timeit', '-n1 -r1 -q -o 1')\n assert (res is not None)\n\ndef test_timeit_invalid_return():\n with nt.assert_raises_regex(SyntaxError, \"outside function\"):\n _ip.run_line_magic('timeit', 'return')\n\n@dec.skipif(execution.profile is None)\ndef test_prun_special_syntax():\n \"Test %%prun with IPython special syntax\"\n @register_line_magic\n def lmagic(line):\n ip = get_ipython()\n ip.user_ns['lmagic_out'] = line\n\n # line mode test\n _ip.run_line_magic('prun', '-q %lmagic my line')\n nt.assert_equal(_ip.user_ns['lmagic_out'], 'my line')\n # cell mode test\n _ip.run_cell_magic('prun', '-q', '%lmagic my line2')\n nt.assert_equal(_ip.user_ns['lmagic_out'], 'my line2')\n\n@dec.skipif(execution.profile is None)\ndef test_prun_quotes():\n \"Test that prun does not clobber string escapes (GH #1302)\"\n _ip.magic(r\"prun -q x = '\\t'\")\n nt.assert_equal(_ip.user_ns['x'], '\\t')\n\ndef test_extension():\n # Debugging information for failures of this test\n print('sys.path:')\n for p in sys.path:\n print(' ', p)\n print('CWD', os.getcwd())\n\n nt.assert_raises(ImportError, _ip.magic, \"load_ext daft_extension\")\n daft_path = os.path.join(os.path.dirname(__file__), \"daft_extension\")\n sys.path.insert(0, daft_path)\n try:\n _ip.user_ns.pop('arq', None)\n invalidate_caches() # Clear import caches\n _ip.magic(\"load_ext daft_extension\")\n nt.assert_equal(_ip.user_ns['arq'], 185)\n _ip.magic(\"unload_ext daft_extension\")\n assert 'arq' not in _ip.user_ns\n finally:\n sys.path.remove(daft_path)\n\n\ndef test_notebook_export_json():\n _ip = get_ipython()\n _ip.history_manager.reset() # Clear any existing history.\n cmds = [u\"a=1\", u\"def b():\\n return a**2\", u\"print('noël, été', b())\"]\n for i, cmd in enumerate(cmds, start=1):\n _ip.history_manager.store_inputs(i, cmd)\n with TemporaryDirectory() as td:\n outfile = os.path.join(td, \"nb.ipynb\")\n _ip.magic(\"notebook -e %s\" % outfile)\n\n\nclass TestEnv(TestCase):\n\n def test_env(self):\n env = _ip.magic(\"env\")\n self.assertTrue(isinstance(env, dict))\n\n def test_env_get_set_simple(self):\n env = _ip.magic(\"env var val1\")\n self.assertEqual(env, None)\n self.assertEqual(os.environ['var'], 'val1')\n self.assertEqual(_ip.magic(\"env var\"), 'val1')\n env = _ip.magic(\"env var=val2\")\n self.assertEqual(env, None)\n self.assertEqual(os.environ['var'], 'val2')\n\n def test_env_get_set_complex(self):\n env = _ip.magic(\"env var 'val1 '' 'val2\")\n self.assertEqual(env, None)\n self.assertEqual(os.environ['var'], \"'val1 '' 'val2\")\n self.assertEqual(_ip.magic(\"env var\"), \"'val1 '' 'val2\")\n env = _ip.magic('env var=val2 val3=\"val4')\n self.assertEqual(env, None)\n self.assertEqual(os.environ['var'], 'val2 val3=\"val4')\n\n def test_env_set_bad_input(self):\n self.assertRaises(UsageError, lambda: _ip.magic(\"set_env var\"))\n\n def test_env_set_whitespace(self):\n self.assertRaises(UsageError, lambda: _ip.magic(\"env var A=B\"))\n\n\nclass CellMagicTestCase(TestCase):\n\n def check_ident(self, magic):\n # Manually called, we get the result\n out = _ip.run_cell_magic(magic, 'a', 'b')\n nt.assert_equal(out, ('a','b'))\n # Via run_cell, it goes into the user's namespace via displayhook\n _ip.run_cell('%%' + magic +' c\\nd\\n')\n nt.assert_equal(_ip.user_ns['_'], ('c','d\\n'))\n\n def test_cell_magic_func_deco(self):\n \"Cell magic using simple decorator\"\n @register_cell_magic\n def cellm(line, cell):\n return line, cell\n\n self.check_ident('cellm')\n\n def test_cell_magic_reg(self):\n \"Cell magic manually registered\"\n def cellm(line, cell):\n return line, cell\n\n _ip.register_magic_function(cellm, 'cell', 'cellm2')\n self.check_ident('cellm2')\n\n def test_cell_magic_class(self):\n \"Cell magics declared via a class\"\n @magics_class\n class MyMagics(Magics):\n\n @cell_magic\n def cellm3(self, line, cell):\n return line, cell\n\n _ip.register_magics(MyMagics)\n self.check_ident('cellm3')\n\n def test_cell_magic_class2(self):\n \"Cell magics declared via a class, #2\"\n @magics_class\n class MyMagics2(Magics):\n\n @cell_magic('cellm4')\n def cellm33(self, line, cell):\n return line, cell\n \n _ip.register_magics(MyMagics2)\n self.check_ident('cellm4')\n # Check that nothing is registered as 'cellm33'\n c33 = _ip.find_cell_magic('cellm33')\n nt.assert_equal(c33, None)\n\ndef test_file():\n \"\"\"Basic %%writefile\"\"\"\n ip = get_ipython()\n with TemporaryDirectory() as td:\n fname = os.path.join(td, 'file1')\n ip.run_cell_magic(\"writefile\", fname, u'\\n'.join([\n 'line1',\n 'line2',\n ]))\n with open(fname) as f:\n s = f.read()\n nt.assert_in('line1\\n', s)\n nt.assert_in('line2', s)\n\n@dec.skip_win32\ndef test_file_single_quote():\n \"\"\"Basic %%writefile with embedded single quotes\"\"\"\n ip = get_ipython()\n with TemporaryDirectory() as td:\n fname = os.path.join(td, '\\'file1\\'')\n ip.run_cell_magic(\"writefile\", fname, u'\\n'.join([\n 'line1',\n 'line2',\n ]))\n with open(fname) as f:\n s = f.read()\n nt.assert_in('line1\\n', s)\n nt.assert_in('line2', s)\n\n@dec.skip_win32\ndef test_file_double_quote():\n \"\"\"Basic %%writefile with embedded double quotes\"\"\"\n ip = get_ipython()\n with TemporaryDirectory() as td:\n fname = os.path.join(td, '\"file1\"')\n ip.run_cell_magic(\"writefile\", fname, u'\\n'.join([\n 'line1',\n 'line2',\n ]))\n with open(fname) as f:\n s = f.read()\n nt.assert_in('line1\\n', s)\n nt.assert_in('line2', s)\n\ndef test_file_var_expand():\n \"\"\"%%writefile $filename\"\"\"\n ip = get_ipython()\n with TemporaryDirectory() as td:\n fname = os.path.join(td, 'file1')\n ip.user_ns['filename'] = fname\n ip.run_cell_magic(\"writefile\", '$filename', u'\\n'.join([\n 'line1',\n 'line2',\n ]))\n with open(fname) as f:\n s = f.read()\n nt.assert_in('line1\\n', s)\n nt.assert_in('line2', s)\n\ndef test_file_unicode():\n \"\"\"%%writefile with unicode cell\"\"\"\n ip = get_ipython()\n with TemporaryDirectory() as td:\n fname = os.path.join(td, 'file1')\n ip.run_cell_magic(\"writefile\", fname, u'\\n'.join([\n u'liné1',\n u'liné2',\n ]))\n with io.open(fname, encoding='utf-8') as f:\n s = f.read()\n nt.assert_in(u'liné1\\n', s)\n nt.assert_in(u'liné2', s)\n\ndef test_file_amend():\n \"\"\"%%writefile -a amends files\"\"\"\n ip = get_ipython()\n with TemporaryDirectory() as td:\n fname = os.path.join(td, 'file2')\n ip.run_cell_magic(\"writefile\", fname, u'\\n'.join([\n 'line1',\n 'line2',\n ]))\n ip.run_cell_magic(\"writefile\", \"-a %s\" % fname, u'\\n'.join([\n 'line3',\n 'line4',\n ]))\n with open(fname) as f:\n s = f.read()\n nt.assert_in('line1\\n', s)\n nt.assert_in('line3\\n', s)\n\ndef test_file_spaces():\n \"\"\"%%file with spaces in filename\"\"\"\n ip = get_ipython()\n with TemporaryWorkingDirectory() as td:\n fname = \"file name\"\n ip.run_cell_magic(\"file\", '\"%s\"'%fname, u'\\n'.join([\n 'line1',\n 'line2',\n ]))\n with open(fname) as f:\n s = f.read()\n nt.assert_in('line1\\n', s)\n nt.assert_in('line2', s)\n \ndef test_script_config():\n ip = get_ipython()\n ip.config.ScriptMagics.script_magics = ['whoda']\n sm = script.ScriptMagics(shell=ip)\n nt.assert_in('whoda', sm.magics['cell'])\n\n@dec.skip_win32\ndef test_script_out():\n ip = get_ipython()\n ip.run_cell_magic(\"script\", \"--out output sh\", \"echo 'hi'\")\n nt.assert_equal(ip.user_ns['output'], 'hi\\n')\n\n@dec.skip_win32\ndef test_script_err():\n ip = get_ipython()\n ip.run_cell_magic(\"script\", \"--err error sh\", \"echo 'hello' >&2\")\n nt.assert_equal(ip.user_ns['error'], 'hello\\n')\n\n@dec.skip_win32\ndef test_script_out_err():\n ip = get_ipython()\n ip.run_cell_magic(\"script\", \"--out output --err error sh\", \"echo 'hi'\\necho 'hello' >&2\")\n nt.assert_equal(ip.user_ns['output'], 'hi\\n')\n nt.assert_equal(ip.user_ns['error'], 'hello\\n')\n\n@dec.skip_win32\ndef test_script_bg_out():\n ip = get_ipython()\n ip.run_cell_magic(\"script\", \"--bg --out output sh\", \"echo 'hi'\")\n\n nt.assert_equal(ip.user_ns['output'].read(), b'hi\\n')\n ip.user_ns['output'].close()\n\n@dec.skip_win32\ndef test_script_bg_err():\n ip = get_ipython()\n ip.run_cell_magic(\"script\", \"--bg --err error sh\", \"echo 'hello' >&2\")\n nt.assert_equal(ip.user_ns['error'].read(), b'hello\\n')\n ip.user_ns['error'].close()\n\n@dec.skip_win32\ndef test_script_bg_out_err():\n ip = get_ipython()\n ip.run_cell_magic(\"script\", \"--bg --out output --err error sh\", \"echo 'hi'\\necho 'hello' >&2\")\n nt.assert_equal(ip.user_ns['output'].read(), b'hi\\n')\n nt.assert_equal(ip.user_ns['error'].read(), b'hello\\n')\n ip.user_ns['output'].close()\n ip.user_ns['error'].close()\n\ndef test_script_defaults():\n ip = get_ipython()\n for cmd in ['sh', 'bash', 'perl', 'ruby']:\n try:\n find_cmd(cmd)\n except Exception:\n pass\n else:\n nt.assert_in(cmd, ip.magics_manager.magics['cell'])\n\n\n@magics_class\nclass FooFoo(Magics):\n \"\"\"class with both %foo and %%foo magics\"\"\"\n @line_magic('foo')\n def line_foo(self, line):\n \"I am line foo\"\n pass\n\n @cell_magic(\"foo\")\n def cell_foo(self, line, cell):\n \"I am cell foo, not line foo\"\n pass\n\ndef test_line_cell_info():\n \"\"\"%%foo and %foo magics are distinguishable to inspect\"\"\"\n ip = get_ipython()\n ip.magics_manager.register(FooFoo)\n oinfo = ip.object_inspect('foo')\n nt.assert_true(oinfo['found'])\n nt.assert_true(oinfo['ismagic'])\n \n oinfo = ip.object_inspect('%%foo')\n nt.assert_true(oinfo['found'])\n nt.assert_true(oinfo['ismagic'])\n nt.assert_equal(oinfo['docstring'], FooFoo.cell_foo.__doc__)\n\n oinfo = ip.object_inspect('%foo')\n nt.assert_true(oinfo['found'])\n nt.assert_true(oinfo['ismagic'])\n nt.assert_equal(oinfo['docstring'], FooFoo.line_foo.__doc__)\n\ndef test_multiple_magics():\n ip = get_ipython()\n foo1 = FooFoo(ip)\n foo2 = FooFoo(ip)\n mm = ip.magics_manager\n mm.register(foo1)\n nt.assert_true(mm.magics['line']['foo'].__self__ is foo1)\n mm.register(foo2)\n nt.assert_true(mm.magics['line']['foo'].__self__ is foo2)\n\ndef test_alias_magic():\n \"\"\"Test %alias_magic.\"\"\"\n ip = get_ipython()\n mm = ip.magics_manager\n\n # Basic operation: both cell and line magics are created, if possible.\n ip.run_line_magic('alias_magic', 'timeit_alias timeit')\n nt.assert_in('timeit_alias', mm.magics['line'])\n nt.assert_in('timeit_alias', mm.magics['cell'])\n\n # --cell is specified, line magic not created.\n ip.run_line_magic('alias_magic', '--cell timeit_cell_alias timeit')\n nt.assert_not_in('timeit_cell_alias', mm.magics['line'])\n nt.assert_in('timeit_cell_alias', mm.magics['cell'])\n\n # Test that line alias is created successfully.\n ip.run_line_magic('alias_magic', '--line env_alias env')\n nt.assert_equal(ip.run_line_magic('env', ''),\n ip.run_line_magic('env_alias', ''))\n\n # Test that line alias with parameters passed in is created successfully.\n ip.run_line_magic('alias_magic', '--line history_alias history --params ' + shlex.quote('3'))\n nt.assert_in('history_alias', mm.magics['line'])\n\n\ndef test_save():\n \"\"\"Test %save.\"\"\"\n ip = get_ipython()\n ip.history_manager.reset() # Clear any existing history.\n cmds = [u\"a=1\", u\"def b():\\n return a**2\", u\"print(a, b())\"]\n for i, cmd in enumerate(cmds, start=1):\n ip.history_manager.store_inputs(i, cmd)\n with TemporaryDirectory() as tmpdir:\n file = os.path.join(tmpdir, \"testsave.py\")\n ip.run_line_magic(\"save\", \"%s 1-10\" % file)\n with open(file) as f:\n content = f.read()\n nt.assert_equal(content.count(cmds[0]), 1)\n nt.assert_in('coding: utf-8', content)\n ip.run_line_magic(\"save\", \"-a %s 1-10\" % file)\n with open(file) as f:\n content = f.read()\n nt.assert_equal(content.count(cmds[0]), 2)\n nt.assert_in('coding: utf-8', content)\n\n\ndef test_store():\n \"\"\"Test %store.\"\"\"\n ip = get_ipython()\n ip.run_line_magic('load_ext', 'storemagic')\n \n # make sure the storage is empty\n ip.run_line_magic('store', '-z')\n ip.user_ns['var'] = 42\n ip.run_line_magic('store', 'var')\n ip.user_ns['var'] = 39\n ip.run_line_magic('store', '-r')\n nt.assert_equal(ip.user_ns['var'], 42)\n\n ip.run_line_magic('store', '-d var')\n ip.user_ns['var'] = 39\n ip.run_line_magic('store' , '-r')\n nt.assert_equal(ip.user_ns['var'], 39)\n\n\ndef _run_edit_test(arg_s, exp_filename=None,\n exp_lineno=-1,\n exp_contents=None,\n exp_is_temp=None):\n ip = get_ipython()\n M = code.CodeMagics(ip)\n last_call = ['','']\n opts,args = M.parse_options(arg_s,'prxn:')\n filename, lineno, is_temp = M._find_edit_target(ip, args, opts, last_call)\n \n if exp_filename is not None:\n nt.assert_equal(exp_filename, filename)\n if exp_contents is not None:\n with io.open(filename, 'r', encoding='utf-8') as f:\n contents = f.read()\n nt.assert_equal(exp_contents, contents)\n if exp_lineno != -1:\n nt.assert_equal(exp_lineno, lineno)\n if exp_is_temp is not None:\n nt.assert_equal(exp_is_temp, is_temp)\n\n\ndef test_edit_interactive():\n \"\"\"%edit on interactively defined objects\"\"\"\n ip = get_ipython()\n n = ip.execution_count\n ip.run_cell(u\"def foo(): return 1\", store_history=True)\n \n try:\n _run_edit_test(\"foo\")\n except code.InteractivelyDefined as e:\n nt.assert_equal(e.index, n)\n else:\n raise AssertionError(\"Should have raised InteractivelyDefined\")\n\n\ndef test_edit_cell():\n \"\"\"%edit [cell id]\"\"\"\n ip = get_ipython()\n \n ip.run_cell(u\"def foo(): return 1\", store_history=True)\n \n # test\n _run_edit_test(\"1\", exp_contents=ip.user_ns['In'][1], exp_is_temp=True)\n\ndef test_bookmark():\n ip = get_ipython()\n ip.run_line_magic('bookmark', 'bmname')\n with tt.AssertPrints('bmname'):\n ip.run_line_magic('bookmark', '-l')\n ip.run_line_magic('bookmark', '-d bmname')\n\ndef test_ls_magic():\n ip = get_ipython()\n json_formatter = ip.display_formatter.formatters['application/json']\n json_formatter.enabled = True\n lsmagic = ip.magic('lsmagic')\n with warnings.catch_warnings(record=True) as w:\n j = json_formatter(lsmagic)\n nt.assert_equal(sorted(j), ['cell', 'line'])\n nt.assert_equal(w, []) # no warnings\n\ndef test_strip_initial_indent():\n def sii(s):\n lines = s.splitlines()\n return '\\n'.join(code.strip_initial_indent(lines))\n\n nt.assert_equal(sii(\" a = 1\\nb = 2\"), \"a = 1\\nb = 2\")\n nt.assert_equal(sii(\" a\\n b\\nc\"), \"a\\n b\\nc\")\n nt.assert_equal(sii(\"a\\n b\"), \"a\\n b\")\n\ndef test_logging_magic_quiet_from_arg():\n _ip.config.LoggingMagics.quiet = False\n lm = logging.LoggingMagics(shell=_ip)\n with TemporaryDirectory() as td:\n try:\n with tt.AssertNotPrints(re.compile(\"Activating.*\")):\n lm.logstart('-q {}'.format(\n os.path.join(td, \"quiet_from_arg.log\")))\n finally:\n _ip.logger.logstop()\n\ndef test_logging_magic_quiet_from_config():\n _ip.config.LoggingMagics.quiet = True\n lm = logging.LoggingMagics(shell=_ip)\n with TemporaryDirectory() as td:\n try:\n with tt.AssertNotPrints(re.compile(\"Activating.*\")):\n lm.logstart(os.path.join(td, \"quiet_from_config.log\"))\n finally:\n _ip.logger.logstop()\n\n\ndef test_logging_magic_not_quiet():\n _ip.config.LoggingMagics.quiet = False\n lm = logging.LoggingMagics(shell=_ip)\n with TemporaryDirectory() as td:\n try:\n with tt.AssertPrints(re.compile(\"Activating.*\")):\n lm.logstart(os.path.join(td, \"not_quiet.log\"))\n finally:\n _ip.logger.logstop()\n\n\ndef test_time_no_var_expand():\n _ip.user_ns['a'] = 5\n _ip.user_ns['b'] = []\n _ip.magic('time b.append(\"{a}\")')\n assert _ip.user_ns['b'] == ['{a}']\n\n\n# this is slow, put at the end for local testing.\ndef test_timeit_arguments():\n \"Test valid timeit arguments, should not cause SyntaxError (GH #1269)\"\n if sys.version_info < (3,7):\n _ip.magic(\"timeit ('#')\")\n else:\n # 3.7 optimize no-op statement like above out, and complain there is\n # nothing in the for loop.\n _ip.magic(\"timeit a=('#')\")\n"},"path":{"kind":"string","value":"env/lib/python3.6/site-packages/IPython/core/tests/test_magic.py"},"size":{"kind":"number","value":34178,"string":"34,178"},"nl_text":{"kind":"string","value":"class with both %foo and %%foo magics\nI am cell foo, not line foo\nTest %hist -f with temporary filename.\n\nIn [9]: import tempfile\n\nIn [10]: tfile = tempfile.mktemp('.py','tmp-ipython-')\n\nIn [11]: %hist -nl -f $tfile 3\n\nIn [13]: import os; os.unlink(tfile)\nTest %hist -op\n\nIn [1]: class b(float):\n ...: pass\n ...: \n\nIn [2]: class s(object):\n ...: def __str__(self):\n ...: return 's'\n ...: \n\nIn [3]: \n\nIn [4]: class r(b):\n ...: def __repr__(self):\n ...: return 'r'\n ...: \n\nIn [5]: class sr(s,r): pass\n ...: \n\nIn [6]: \n\nIn [7]: bb=b()\n\nIn [8]: ss=s()\n\nIn [9]: rr=r()\n\nIn [10]: ssrr=sr()\n\nIn [11]: 4.5\nOut[11]: 4.5\n\nIn [12]: str(ss)\nOut[12]: 's'\n\nIn [13]: \n\nIn [14]: %hist -op\n>>> class b:\n... pass\n... \n>>> class s(b):\n... def __str__(self):\n... return 's'\n... \n>>> \n>>> class r(b):\n... def __repr__(self):\n... return 'r'\n... \n>>> class sr(s,r): pass\n>>> \n>>> bb=b()\n>>> ss=s()\n>>> rr=r()\n>>> ssrr=sr()\n>>> 4.5\n4.5\n>>> str(ss)\n's'\n>>> \nTest %hist -r\n\nXXX - This test is not recording the output correctly. For some reason, in\ntesting mode the raw history isn't getting populated. No idea why.\nDisabling the output checking for now, though at least we do run it.\n\nIn [1]: 'hist' in _ip.lsmagic()\nOut[1]: True\n\nIn [2]: x=1\n\nIn [3]: %hist -rl 2\nx=1 # random\n%hist -r 2\ndoctest for %precision\n\nIn [1]: f = get_ipython().display_formatter.formatters['text/plain']\n\nIn [2]: %precision 5\nOut[2]: '%.5f'\n\nIn [3]: f.float_format\nOut[3]: '%.5f'\n\nIn [4]: %precision %e\nOut[4]: '%e'\n\nIn [5]: f(3.1415927)\nOut[5]: '3.141593e+00'\ndoctest for %who\n\nIn [1]: %reset -f\n\nIn [2]: alpha = 123\n\nIn [3]: beta = 'beta'\n\nIn [4]: %who int\nalpha\n\nIn [5]: %who str\nbeta\n\nIn [6]: %whos\nVariable Type Data/Info\n----------------------------\nalpha int 123\nbeta str beta\n\nIn [7]: %who_ls\nOut[7]: ['alpha', 'beta']\nI am line foo\nTest %alias_magic.\nTest OSMagics.cd_force_quiet option\nCell magics declared via a class\nCell magics declared via a class, #2\nCell magic using simple decorator\nCell magic manually registered\ntest that config magic does not raise\ncan happen if Configurable init is moved too early into\nMagics.__init__ as then a Config object will be registered as a\nmagic.\ntest that config magic prints available configs in unique and\nsorted order. \ntest that config with a classname prints the class's options. \nTest various directory handling operations.\nToggle doctest_mode twice, it should be a no-op and run without error\n%edit [cell id]\n%edit on interactively defined objects\nBasic %%writefile\n%%writefile -a amends files\nBasic %%writefile with embedded double quotes\nBasic %%writefile with embedded single quotes\n%%file with spaces in filename\n%%writefile with unicode cell\n%%writefile $filename\n%%foo and %foo magics are distinguishable to inspect\nTest that we can run a multi-line macro successfully.\nTest %magic\nMagic.parse_options can handle --foo=bar long options\nTest that we don't mangle paths when parsing magic options.\nTest '%reset array' functionality\nTests for basic options parsing in magics.\nTest that prun does not clobber string escapes (GH #1302)\nTest %%prun with IPython special syntax\nTest '%reset dhist' magic\nTest '%reset in' magic\nTest that '%reset in' preserves In[] length\nTest '%reset out' magic\nTest %save.\nTest %store.\ntest %tb after a SyntaxError\nErroneous magic function calls, issue gh-3334\nTest valid timeit arguments, should not cause SyntaxError (GH #1269)\ntest quiet option of timeit magic\ntest whether timeit -o return object\ntest shlex issues with timeit (#1109)\nTest %%timeit with IPython special syntax\nCheck that whos is protected against objects where repr() fails.\nTest that references from %run are cleared by xdel.\nTests for various magic functions.\n\nNeeds to be run by nose (to make ipython session available).\n\n -*- coding: utf-8 -*- magic not found raises UsageError ensure result isn't success when a magic isn't found magic not found raises UsageError ensure result isn't success when a magic isn't found should not raise. clear up everything Practically ALL ipython development systems will have more than 10 aliases we must strip dots from alias names rehashx must fill up syscmdlist argv splitting is os-dependentraise Exception(ip.history_manager.session_number)raise Exception(list(ip.history_manager._get_range_session())) Clear any existing history. List macros test '%reset -f out', make an Out prompt test '%reset -f in' copy before clearingrestore trap and validate stdout trim output, and only check the last line These are only the most minimal of tests, more should be added later. At the very least we check that basic text/unicode calls work OK. curpath = lambda :os.path.splitdrive(os.getcwd())[1].replace('\\\\','/') Calling xmode three times should be a no-op %run creates some hidden references... ... as does the displayhook. Check that a's __del__ method has been called. line mode test cell mode test line mode test cell mode test Debugging information for failures of this test Clear import caches Clear any existing history. Manually called, we get the result Via run_cell, it goes into the user's namespace via displayhook Check that nothing is registered as 'cellm33' Basic operation: both cell and line magics are created, if possible. --cell is specified, line magic not created. Test that line alias is created successfully. Test that line alias with parameters passed in is created successfully. Clear any existing history. make sure the storage is empty test no warnings this is slow, put at the end for local testing. 3.7 optimize no-op statement like above out, and complain there is nothing in the for loop."},"nl_size":{"kind":"number","value":5670,"string":"5,670"},"nl_language":{"kind":"string","value":"en"},"nl_language_score":{"kind":"number","value":0.706978976726532,"string":"0.706979"}}},{"rowIdx":7825,"cells":{"content":{"kind":"string","value":"# Copyright 2021 DAI Foundation\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at: http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom typing import List, Dict, Tuple\n\nfrom ethtx.models.semantics_model import (\n ParameterSemantics,\n EventSemantics,\n FunctionSemantics,\n TransformationSemantics,\n)\n\n\ndef _decode_parameters_list(raw_parameters_list: list) -> List[ParameterSemantics]:\n parameters_list = []\n\n if not raw_parameters_list:\n return parameters_list\n\n for raw_parameter_semantics in raw_parameters_list:\n\n if \"indexed\" in raw_parameter_semantics:\n indexed = raw_parameter_semantics[\"indexed\"]\n else:\n indexed = False\n\n if \"dynamic\" in raw_parameter_semantics:\n dynamic = raw_parameter_semantics[\"dynamic\"]\n else:\n dynamic = False\n\n if raw_parameter_semantics[\"type\"] == \"tuple\":\n components = _decode_parameters_list(raw_parameter_semantics[\"components\"])\n else:\n components = []\n\n parameters_list.append(\n ParameterSemantics(\n raw_parameter_semantics[\"name\"],\n raw_parameter_semantics[\"type\"],\n components,\n indexed,\n dynamic,\n )\n )\n return parameters_list\n\n\ndef decode_events_and_functions(\n abi: dict,\n) -> Tuple[Dict[str, EventSemantics], Dict[str, FunctionSemantics]]:\n events = dict()\n for signature, raw_event_semantics in abi.get(\"events\", {}).items():\n parameters = _decode_parameters_list(raw_event_semantics.get(\"parameters\"))\n events[signature] = EventSemantics(\n signature,\n raw_event_semantics[\"anonymous\"],\n raw_event_semantics[\"name\"],\n parameters,\n )\n\n functions = dict()\n for signature, raw_function_semantics in abi.get(\"functions\", {}).items():\n if raw_function_semantics:\n inputs = _decode_parameters_list(raw_function_semantics.get(\"inputs\"))\n outputs = _decode_parameters_list(raw_function_semantics.get(\"outputs\"))\n name = raw_function_semantics[\"name\"]\n else:\n inputs = outputs = []\n name = signature\n\n functions[signature] = FunctionSemantics(signature, name, inputs, outputs)\n\n return events, functions\n\n\ndef decode_transformations(\n raw_transformations: dict,\n) -> Dict[str, Dict[str, TransformationSemantics]]:\n transformations = dict()\n if raw_transformations:\n for signature, transformation in raw_transformations.items():\n transformations[signature] = dict()\n for parameter_name, parameter_transformation in transformation.get(\n \"arguments\", dict()\n ).items():\n transformations[signature][parameter_name] = TransformationSemantics(\n parameter_transformation.get(\"name\"),\n parameter_transformation.get(\"type\"),\n parameter_transformation.get(\"value\"),\n )\n return transformations\n"},"path":{"kind":"string","value":"ethtx/decoders/decoders/semantics.py"},"size":{"kind":"number","value":3532,"string":"3,532"},"nl_text":{"kind":"string","value":"Copyright 2021 DAI Foundation Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with the License. You may obtain a copy of the License at: http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License."},"nl_size":{"kind":"number","value":556,"string":"556"},"nl_language":{"kind":"string","value":"en"},"nl_language_score":{"kind":"number","value":0.848164439201355,"string":"0.848164"}}},{"rowIdx":7826,"cells":{"content":{"kind":"string","value":"\"\"\"\nSupport for Xiaomi Yeelight Wifi color bulb.\n\nFor more details about this platform, please refer to the documentation at\nhttps://home-assistant.io/components/light.yeelight/\n\"\"\"\nimport logging\n\nimport voluptuous as vol\n\nfrom homeassistant.util.color import (\n color_temperature_mired_to_kelvin as mired_to_kelvin,\n color_temperature_kelvin_to_mired as kelvin_to_mired)\nfrom homeassistant.const import CONF_DEVICES, CONF_NAME\nfrom homeassistant.components.light import (\n ATTR_BRIGHTNESS, ATTR_HS_COLOR, ATTR_TRANSITION, ATTR_COLOR_TEMP,\n ATTR_FLASH, FLASH_SHORT, FLASH_LONG, ATTR_EFFECT, SUPPORT_BRIGHTNESS,\n SUPPORT_COLOR, SUPPORT_TRANSITION, SUPPORT_COLOR_TEMP, SUPPORT_FLASH,\n SUPPORT_EFFECT, Light, PLATFORM_SCHEMA, ATTR_ENTITY_ID, DOMAIN)\nimport homeassistant.helpers.config_validation as cv\nimport homeassistant.util.color as color_util\n\nREQUIREMENTS = ['yeelight==0.4.0']\n\n_LOGGER = logging.getLogger(__name__)\n\nLEGACY_DEVICE_TYPE_MAP = {\n 'color1': 'rgb',\n 'mono1': 'white',\n 'strip1': 'strip',\n 'bslamp1': 'bedside',\n 'ceiling1': 'ceiling',\n}\n\nDEFAULT_NAME = 'Yeelight'\nDEFAULT_TRANSITION = 350\n\nCONF_TRANSITION = 'transition'\nCONF_SAVE_ON_CHANGE = 'save_on_change'\nCONF_MODE_MUSIC = 'use_music_mode'\n\nDATA_KEY = 'light.yeelight'\n\nDEVICE_SCHEMA = vol.Schema({\n vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,\n vol.Optional(CONF_TRANSITION, default=DEFAULT_TRANSITION): cv.positive_int,\n vol.Optional(CONF_MODE_MUSIC, default=False): cv.boolean,\n vol.Optional(CONF_SAVE_ON_CHANGE, default=True): cv.boolean,\n})\n\nPLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(\n {vol.Optional(CONF_DEVICES, default={}): {cv.string: DEVICE_SCHEMA}, })\n\nSUPPORT_YEELIGHT = (SUPPORT_BRIGHTNESS |\n SUPPORT_TRANSITION |\n SUPPORT_FLASH)\n\nSUPPORT_YEELIGHT_RGB = (SUPPORT_YEELIGHT |\n SUPPORT_COLOR |\n SUPPORT_EFFECT |\n SUPPORT_COLOR_TEMP)\n\nYEELIGHT_MIN_KELVIN = YEELIGHT_MAX_KELVIN = 2700\nYEELIGHT_RGB_MIN_KELVIN = 1700\nYEELIGHT_RGB_MAX_KELVIN = 6500\n\nEFFECT_DISCO = \"Disco\"\nEFFECT_TEMP = \"Slow Temp\"\nEFFECT_STROBE = \"Strobe epilepsy!\"\nEFFECT_STROBE_COLOR = \"Strobe color\"\nEFFECT_ALARM = \"Alarm\"\nEFFECT_POLICE = \"Police\"\nEFFECT_POLICE2 = \"Police2\"\nEFFECT_CHRISTMAS = \"Christmas\"\nEFFECT_RGB = \"RGB\"\nEFFECT_RANDOM_LOOP = \"Random Loop\"\nEFFECT_FAST_RANDOM_LOOP = \"Fast Random Loop\"\nEFFECT_SLOWDOWN = \"Slowdown\"\nEFFECT_WHATSAPP = \"WhatsApp\"\nEFFECT_FACEBOOK = \"Facebook\"\nEFFECT_TWITTER = \"Twitter\"\nEFFECT_STOP = \"Stop\"\n\nYEELIGHT_EFFECT_LIST = [\n EFFECT_DISCO,\n EFFECT_TEMP,\n EFFECT_STROBE,\n EFFECT_STROBE_COLOR,\n EFFECT_ALARM,\n EFFECT_POLICE,\n EFFECT_POLICE2,\n EFFECT_CHRISTMAS,\n EFFECT_RGB,\n EFFECT_RANDOM_LOOP,\n EFFECT_FAST_RANDOM_LOOP,\n EFFECT_SLOWDOWN,\n EFFECT_WHATSAPP,\n EFFECT_FACEBOOK,\n EFFECT_TWITTER,\n EFFECT_STOP]\n\nSERVICE_SET_MODE = 'yeelight_set_mode'\nATTR_MODE = 'mode'\n\nYEELIGHT_SERVICE_SCHEMA = vol.Schema({\n vol.Optional(ATTR_ENTITY_ID): cv.entity_ids,\n})\n\n\ndef _cmd(func):\n \"\"\"Define a wrapper to catch exceptions from the bulb.\"\"\"\n def _wrap(self, *args, **kwargs):\n import yeelight\n try:\n _LOGGER.debug(\"Calling %s with %s %s\", func, args, kwargs)\n return func(self, *args, **kwargs)\n except yeelight.BulbException as ex:\n _LOGGER.error(\"Error when calling %s: %s\", func, ex)\n\n return _wrap\n\n\ndef setup_platform(hass, config, add_devices, discovery_info=None):\n \"\"\"Set up the Yeelight bulbs.\"\"\"\n from yeelight.enums import PowerMode\n\n if DATA_KEY not in hass.data:\n hass.data[DATA_KEY] = {}\n\n lights = []\n if discovery_info is not None:\n _LOGGER.debug(\"Adding autodetected %s\", discovery_info['hostname'])\n\n device_type = discovery_info['device_type']\n device_type = LEGACY_DEVICE_TYPE_MAP.get(device_type, device_type)\n\n # Not using hostname, as it seems to vary.\n name = \"yeelight_%s_%s\" % (device_type,\n discovery_info['properties']['mac'])\n host = discovery_info['host']\n device = {'name': name, 'ipaddr': host}\n\n light = YeelightLight(device, DEVICE_SCHEMA({}))\n lights.append(light)\n hass.data[DATA_KEY][host] = light\n else:\n for host, device_config in config[CONF_DEVICES].items():\n device = {'name': device_config[CONF_NAME], 'ipaddr': host}\n light = YeelightLight(device, device_config)\n lights.append(light)\n hass.data[DATA_KEY][host] = light\n\n add_devices(lights, True)\n\n def service_handler(service):\n \"\"\"Dispatch service calls to target entities.\"\"\"\n params = {key: value for key, value in service.data.items()\n if key != ATTR_ENTITY_ID}\n entity_ids = service.data.get(ATTR_ENTITY_ID)\n if entity_ids:\n target_devices = [dev for dev in hass.data[DATA_KEY].values()\n if dev.entity_id in entity_ids]\n else:\n target_devices = hass.data[DATA_KEY].values()\n\n for target_device in target_devices:\n if service.service == SERVICE_SET_MODE:\n target_device.set_mode(**params)\n\n service_schema_set_mode = YEELIGHT_SERVICE_SCHEMA.extend({\n vol.Required(ATTR_MODE):\n vol.In([mode.name.lower() for mode in PowerMode])\n })\n hass.services.register(\n DOMAIN, SERVICE_SET_MODE, service_handler,\n schema=service_schema_set_mode)\n\n\nclass YeelightLight(Light):\n \"\"\"Representation of a Yeelight light.\"\"\"\n\n def __init__(self, device, config):\n \"\"\"Initialize the Yeelight light.\"\"\"\n self.config = config\n self._name = device['name']\n self._ipaddr = device['ipaddr']\n\n self._supported_features = SUPPORT_YEELIGHT\n self._available = False\n self._bulb_device = None\n\n self._brightness = None\n self._color_temp = None\n self._is_on = None\n self._hs = None\n\n @property\n def available(self) -> bool:\n \"\"\"Return if bulb is available.\"\"\"\n return self._available\n\n @property\n def supported_features(self) -> int:\n \"\"\"Flag supported features.\"\"\"\n return self._supported_features\n\n @property\n def effect_list(self):\n \"\"\"Return the list of supported effects.\"\"\"\n return YEELIGHT_EFFECT_LIST\n\n @property\n def color_temp(self) -> int:\n \"\"\"Return the color temperature.\"\"\"\n return self._color_temp\n\n @property\n def name(self) -> str:\n \"\"\"Return the name of the device if any.\"\"\"\n return self._name\n\n @property\n def is_on(self) -> bool:\n \"\"\"Return true if device is on.\"\"\"\n return self._is_on\n\n @property\n def brightness(self) -> int:\n \"\"\"Return the brightness of this light between 1..255.\"\"\"\n return self._brightness\n\n @property\n def min_mireds(self):\n \"\"\"Return minimum supported color temperature.\"\"\"\n if self.supported_features & SUPPORT_COLOR_TEMP:\n return kelvin_to_mired(YEELIGHT_RGB_MAX_KELVIN)\n return kelvin_to_mired(YEELIGHT_MAX_KELVIN)\n\n @property\n def max_mireds(self):\n \"\"\"Return maximum supported color temperature.\"\"\"\n if self.supported_features & SUPPORT_COLOR_TEMP:\n return kelvin_to_mired(YEELIGHT_RGB_MIN_KELVIN)\n return kelvin_to_mired(YEELIGHT_MIN_KELVIN)\n\n def _get_hs_from_properties(self):\n rgb = self._properties.get('rgb', None)\n color_mode = self._properties.get('color_mode', None)\n if not rgb or not color_mode:\n return None\n\n color_mode = int(color_mode)\n if color_mode == 2: # color temperature\n temp_in_k = mired_to_kelvin(self._color_temp)\n return color_util.color_temperature_to_hs(temp_in_k)\n if color_mode == 3: # hsv\n hue = int(self._properties.get('hue'))\n sat = int(self._properties.get('sat'))\n return (hue / 360 * 65536, sat / 100 * 255)\n\n rgb = int(rgb)\n blue = rgb & 0xff\n green = (rgb >> 8) & 0xff\n red = (rgb >> 16) & 0xff\n\n return color_util.color_RGB_to_hs(red, green, blue)\n\n @property\n def hs_color(self) -> tuple:\n \"\"\"Return the color property.\"\"\"\n return self._hs\n\n @property\n def _properties(self) -> dict:\n return self._bulb.last_properties\n\n @property\n def _bulb(self) -> 'yeelight.Bulb':\n import yeelight\n if self._bulb_device is None:\n try:\n self._bulb_device = yeelight.Bulb(self._ipaddr)\n self._bulb_device.get_properties() # force init for type\n\n self._available = True\n except yeelight.BulbException as ex:\n self._available = False\n _LOGGER.error(\"Failed to connect to bulb %s, %s: %s\",\n self._ipaddr, self._name, ex)\n\n return self._bulb_device\n\n def set_music_mode(self, mode) -> None:\n \"\"\"Set the music mode on or off.\"\"\"\n if mode:\n self._bulb.start_music()\n else:\n self._bulb.stop_music()\n\n def update(self) -> None:\n \"\"\"Update properties from the bulb.\"\"\"\n import yeelight\n try:\n self._bulb.get_properties()\n\n if self._bulb_device.bulb_type == yeelight.BulbType.Color:\n self._supported_features = SUPPORT_YEELIGHT_RGB\n\n self._is_on = self._properties.get('power') == 'on'\n\n bright = self._properties.get('bright', None)\n if bright:\n self._brightness = round(255 * (int(bright) / 100))\n\n temp_in_k = self._properties.get('ct', None)\n if temp_in_k:\n self._color_temp = kelvin_to_mired(int(temp_in_k))\n\n self._hs = self._get_hs_from_properties()\n\n self._available = True\n except yeelight.BulbException as ex:\n if self._available: # just inform once\n _LOGGER.error(\"Unable to update bulb status: %s\", ex)\n self._available = False\n\n @_cmd\n def set_brightness(self, brightness, duration) -> None:\n \"\"\"Set bulb brightness.\"\"\"\n if brightness:\n _LOGGER.debug(\"Setting brightness: %s\", brightness)\n self._bulb.set_brightness(brightness / 255 * 100,\n duration=duration)\n\n @_cmd\n def set_rgb(self, rgb, duration) -> None:\n \"\"\"Set bulb's color.\"\"\"\n if rgb and self.supported_features & SUPPORT_COLOR:\n _LOGGER.debug(\"Setting RGB: %s\", rgb)\n self._bulb.set_rgb(rgb[0], rgb[1], rgb[2], duration=duration)\n\n @_cmd\n def set_colortemp(self, colortemp, duration) -> None:\n \"\"\"Set bulb's color temperature.\"\"\"\n if colortemp and self.supported_features & SUPPORT_COLOR_TEMP:\n temp_in_k = mired_to_kelvin(colortemp)\n _LOGGER.debug(\"Setting color temp: %s K\", temp_in_k)\n\n self._bulb.set_color_temp(temp_in_k, duration=duration)\n\n @_cmd\n def set_default(self) -> None:\n \"\"\"Set current options as default.\"\"\"\n self._bulb.set_default()\n\n @_cmd\n def set_flash(self, flash) -> None:\n \"\"\"Activate flash.\"\"\"\n if flash:\n from yeelight import (RGBTransition, SleepTransition, Flow,\n BulbException)\n if self._bulb.last_properties[\"color_mode\"] != 1:\n _LOGGER.error(\"Flash supported currently only in RGB mode.\")\n return\n\n transition = int(self.config[CONF_TRANSITION])\n if flash == FLASH_LONG:\n count = 1\n duration = transition * 5\n if flash == FLASH_SHORT:\n count = 1\n duration = transition * 2\n\n red, green, blue = color_util.color_hs_to_RGB(*self._hs)\n\n transitions = list()\n transitions.append(\n RGBTransition(255, 0, 0, brightness=10, duration=duration))\n transitions.append(SleepTransition(\n duration=transition))\n transitions.append(\n RGBTransition(red, green, blue, brightness=self.brightness,\n duration=duration))\n\n flow = Flow(count=count, transitions=transitions)\n try:\n self._bulb.start_flow(flow)\n except BulbException as ex:\n _LOGGER.error(\"Unable to set flash: %s\", ex)\n\n @_cmd\n def set_effect(self, effect) -> None:\n \"\"\"Activate effect.\"\"\"\n if effect:\n from yeelight import (Flow, BulbException)\n from yeelight.transitions import (disco, temp, strobe, pulse,\n strobe_color, alarm, police,\n police2, christmas, rgb,\n randomloop, slowdown)\n if effect == EFFECT_STOP:\n self._bulb.stop_flow()\n return\n if effect == EFFECT_DISCO:\n flow = Flow(count=0, transitions=disco())\n if effect == EFFECT_TEMP:\n flow = Flow(count=0, transitions=temp())\n if effect == EFFECT_STROBE:\n flow = Flow(count=0, transitions=strobe())\n if effect == EFFECT_STROBE_COLOR:\n flow = Flow(count=0, transitions=strobe_color())\n if effect == EFFECT_ALARM:\n flow = Flow(count=0, transitions=alarm())\n if effect == EFFECT_POLICE:\n flow = Flow(count=0, transitions=police())\n if effect == EFFECT_POLICE2:\n flow = Flow(count=0, transitions=police2())\n if effect == EFFECT_CHRISTMAS:\n flow = Flow(count=0, transitions=christmas())\n if effect == EFFECT_RGB:\n flow = Flow(count=0, transitions=rgb())\n if effect == EFFECT_RANDOM_LOOP:\n flow = Flow(count=0, transitions=randomloop())\n if effect == EFFECT_FAST_RANDOM_LOOP:\n flow = Flow(count=0, transitions=randomloop(duration=250))\n if effect == EFFECT_SLOWDOWN:\n flow = Flow(count=0, transitions=slowdown())\n if effect == EFFECT_WHATSAPP:\n flow = Flow(count=2, transitions=pulse(37, 211, 102))\n if effect == EFFECT_FACEBOOK:\n flow = Flow(count=2, transitions=pulse(59, 89, 152))\n if effect == EFFECT_TWITTER:\n flow = Flow(count=2, transitions=pulse(0, 172, 237))\n\n try:\n self._bulb.start_flow(flow)\n except BulbException as ex:\n _LOGGER.error(\"Unable to set effect: %s\", ex)\n\n def turn_on(self, **kwargs) -> None:\n \"\"\"Turn the bulb on.\"\"\"\n import yeelight\n brightness = kwargs.get(ATTR_BRIGHTNESS)\n colortemp = kwargs.get(ATTR_COLOR_TEMP)\n hs_color = kwargs.get(ATTR_HS_COLOR)\n rgb = color_util.color_hs_to_RGB(*hs_color) if hs_color else None\n flash = kwargs.get(ATTR_FLASH)\n effect = kwargs.get(ATTR_EFFECT)\n\n duration = int(self.config[CONF_TRANSITION]) # in ms\n if ATTR_TRANSITION in kwargs: # passed kwarg overrides config\n duration = int(kwargs.get(ATTR_TRANSITION) * 1000) # kwarg in s\n\n try:\n self._bulb.turn_on(duration=duration)\n except yeelight.BulbException as ex:\n _LOGGER.error(\"Unable to turn the bulb on: %s\", ex)\n return\n\n if self.config[CONF_MODE_MUSIC] and not self._bulb.music_mode:\n try:\n self.set_music_mode(self.config[CONF_MODE_MUSIC])\n except yeelight.BulbException as ex:\n _LOGGER.error(\"Unable to turn on music mode,\"\n \"consider disabling it: %s\", ex)\n\n try:\n # values checked for none in methods\n self.set_rgb(rgb, duration)\n self.set_colortemp(colortemp, duration)\n self.set_brightness(brightness, duration)\n self.set_flash(flash)\n self.set_effect(effect)\n except yeelight.BulbException as ex:\n _LOGGER.error(\"Unable to set bulb properties: %s\", ex)\n return\n\n # save the current state if we had a manual change.\n if self.config[CONF_SAVE_ON_CHANGE] and (brightness\n or colortemp\n or rgb):\n try:\n self.set_default()\n except yeelight.BulbException as ex:\n _LOGGER.error(\"Unable to set the defaults: %s\", ex)\n return\n\n def turn_off(self, **kwargs) -> None:\n \"\"\"Turn off.\"\"\"\n import yeelight\n duration = int(self.config[CONF_TRANSITION]) # in ms\n if ATTR_TRANSITION in kwargs: # passed kwarg overrides config\n duration = int(kwargs.get(ATTR_TRANSITION) * 1000) # kwarg in s\n try:\n self._bulb.turn_off(duration=duration)\n except yeelight.BulbException as ex:\n _LOGGER.error(\"Unable to turn the bulb off: %s\", ex)\n\n def set_mode(self, mode: str):\n \"\"\"Set a power mode.\"\"\"\n import yeelight\n try:\n self._bulb.set_power_mode(yeelight.enums.PowerMode[mode.upper()])\n except yeelight.BulbException as ex:\n _LOGGER.error(\"Unable to set the power mode: %s\", ex)\n"},"path":{"kind":"string","value":"homeassistant/components/light/yeelight.py"},"size":{"kind":"number","value":17617,"string":"17,617"},"nl_text":{"kind":"string","value":"Representation of a Yeelight light.\nInitialize the Yeelight light.\nDefine a wrapper to catch exceptions from the bulb.\nReturn if bulb is available.\nReturn the brightness of this light between 1..255.\nReturn the color temperature.\nReturn the list of supported effects.\nReturn the color property.\nReturn true if device is on.\nReturn maximum supported color temperature.\nReturn minimum supported color temperature.\nReturn the name of the device if any.\nDispatch service calls to target entities.\nSet bulb brightness.\nSet bulb's color temperature.\nSet current options as default.\nActivate effect.\nActivate flash.\nSet a power mode.\nSet the music mode on or off.\nSet bulb's color.\nSet up the Yeelight bulbs.\nFlag supported features.\nTurn off.\nTurn the bulb on.\nUpdate properties from the bulb.\nSupport for Xiaomi Yeelight Wifi color bulb.\n\nFor more details about this platform, please refer to the documentation at\nhttps://home-assistant.io/components/light.yeelight/\n\n Not using hostname, as it seems to vary. color temperature hsv force init for type just inform once in ms passed kwarg overrides config kwarg in s values checked for none in methods save the current state if we had a manual change. in ms passed kwarg overrides config kwarg in s"},"nl_size":{"kind":"number","value":1242,"string":"1,242"},"nl_language":{"kind":"string","value":"en"},"nl_language_score":{"kind":"number","value":0.7844385504722595,"string":"0.784439"}}},{"rowIdx":7827,"cells":{"content":{"kind":"string","value":"import numpy as np \nimport sys\n\nclass RBF():\n def __init__(self, Input, Output, Ptypes, Nclasses):\n\n self.input = Input\n self.hidden = Ptypes * Nclasses\n self.output = Output\n self.ptypes = Ptypes\n self.nclasses = Nclasses\n\n self.protos = 0\n self.weights = 0\n self.spread = 0\n \n def createPrototypes(self, data):\n\n groups = np.random.randint(0, data.shape[0], size = (self.hidden))\n \n prototypes = np.zeros((self.hidden, data.shape[1]))\n \n i = 0\n \n for element in groups: \n prototypes[i] = data[element, :]\n i += 1\n \n self.protos = prototypes\n\n def sigma(self):\n \n temp = 0\n \n for i in range(self.hidden):\n for j in range(self.hidden):\n distance = np.square(np.linalg.norm(self.protos[i] - self.protos[j]))\n \n if distance > temp:\n temp = distance\n \n self.spread = temp/np.sqrt(self.hidden)\n\n def train(self, data, classes):\n\n self.createPrototypes(data)\n self.sigma()\n hidden_out = np.zeros(shape=(0,self.hidden))\n \n for data in data:\n output=[]\n \n for proto in self.protos:\n distance = np.square(np.linalg.norm(data - proto))\n neuron_output = np.exp(-(distance)/(np.square(self.spread)))\n output.append(neuron_output)\n hidden_out = np.vstack([hidden_out,np.array(output)])\n \n self.weights = np.dot(np.linalg.pinv(hidden_out), classes)\n\n def test(self, data, classes):\n \n right = 0\n \n for i in range(len(data)):\n \n d = data[i]\n output = []\n \n for proto in self.protos:\n distance = np.square(np.linalg.norm(d-proto))\n neuron_output = np.exp(-(distance)/np.square(self.spread))\n output.append(neuron_output)\n \n network_output = np.dot(np.array(output),self.weights)\n \n print (\"Expected: \", classes[i].argmax(axis=0) +1)\n print (\"Result: \", network_output.argmax(axis=0) + 1)\n print ()\n\n if network_output.argmax(axis=0) + 1 == classes[i].argmax(axis=0) +1:\n right += 1\n \n print (\"Accuracy(%): \", (right * 100) / len(data))\n\ndef read_iris(percentage):\n \n dataset = np.loadtxt('iris.data', delimiter=',', skiprows=0)\n\n np.random.shuffle(dataset)\n \n q = int(dataset.shape[0] * percentage) + 2\n \n X_training = dataset[0:q, 0:4]\n Y_training = dataset[0:q, 4]\n \n X_test = dataset[q:150, 0:4]\n Y_test = dataset[q:150, 4]\n \n return X_training, Y_training, X_test, Y_test\n\ndef process_iris_data(data):\n \n p_data = np.zeros((data.shape[0], data.shape[1]))\n\n max_col1 = np.amax(data[:,0])\n max_col2 = np.amax(data[:,1])\n max_col3 = np.amax(data[:,2])\n max_col4 = np.amax(data[:,3])\n\n for n in range(len(data)):\n \n p_data[n, 0] = data[n,0] / max_col1\n p_data[n, 1] = data[n,1] / max_col2\n p_data[n, 2] = data[n,2] / max_col3\n p_data[n, 3] = data[n,3] / max_col4\n\n return p_data\n\ndef process_iris_labels(labels, operation):\n \n if operation == 0:\n \n p_labels = np.zeros((labels.shape[0], 3))\n\n for n in range(len(labels)):\n p_labels[n, int(labels[n])] = 1 \n\n return p_labels\n else:\n p_labels = np.argmax(labels, axis=1)\n return p_labels\n\n\nif __name__ == '__main__':\n \n # input params\n # percentage \n \n parameters = (sys.argv)\n print(parameters)\n\n x1, y1, x2, y2 = read_iris(float(parameters[1]))\n xp = process_iris_data(x1)\n yp = process_iris_labels(y1,0)\n\n nn = RBF(xp.shape[1], y1.shape[0], xp.shape[1], 3)\n\n nn.train(xp, yp) \n\n xp = process_iris_data(x2)\n yp = process_iris_labels(y2,0)\n nn.test(xp, yp)"},"path":{"kind":"string","value":"Assignment 3/rbf.py"},"size":{"kind":"number","value":4072,"string":"4,072"},"nl_text":{"kind":"string","value":"input params percentage"},"nl_size":{"kind":"number","value":23,"string":"23"},"nl_language":{"kind":"string","value":"en"},"nl_language_score":{"kind":"number","value":0.3445621132850647,"string":"0.344562"}}},{"rowIdx":7828,"cells":{"content":{"kind":"string","value":"#-------------------------------------------------------------------------------\n# Name:GUI Calculator\n# Purpose:Simple calculator with GUI using tkinter\n#\n# Author: Daniel Campos\n#\n# Created: Monday Dec 1st, 2014\n#-------------------------------------------------------------------------------\nfrom tkinter import *\nimport math\nclass Calculator:\n '''GUI for the calculator'''\n def __init__(self, master):\n self.master = master \n self.stringContents = ''\n self.displayStr = StringVar(self.stringContents)\n self.display = Label(master, textvariable=self.displayStr, width=25, anchor=E, relief=SUNKEN).grid(row=0, columnspan=4)\n\n self.seven = Button(master, width=3, text='7', command=lambda: self.addSymbol('7')).grid(row=1, column=0)\n self.eight = Button(master, width=3, text='8', command=lambda: self.addSymbol('8')).grid(row=1, column=1)\n self.nine = Button(master, width=3, text='9', command=lambda: self.addSymbol('9')).grid(row=1, column=2)\n self.div = Button(master, width=3, text='/', command=lambda: self.addSymbol('/')).grid(row=1, column=3)\n self.master.bind('7', self.addKeyboardSymbol)\n self.master.bind('8', self.addKeyboardSymbol)\n self.master.bind('9', self.addKeyboardSymbol)\n self.master.bind('/', self.addKeyboardSymbol)\n\n self.four = Button(master, width=3, text='4', command=lambda: self.addSymbol('4')).grid(row=3, column=0)\n self.five = Button(master, width=3, text='5', command=lambda: self.addSymbol('5')).grid(row=3, column=1)\n self.six = Button(master, width=3, text='6', command=lambda: self.addSymbol('6')).grid(row=3, column=2)\n self.times = Button(master, width=3, text='*', command=lambda: self.addSymbol('*')).grid(row=3, column=3)\n self.master.bind('4', self.addKeyboardSymbol)\n self.master.bind('5', self.addKeyboardSymbol)\n self.master.bind('6', self.addKeyboardSymbol)\n self.master.bind('*', self.addKeyboardSymbol)\n\n self.one = Button(master, width=3, text='1', command=lambda: self.addSymbol('1')).grid(row=4, column=0)\n self.two = Button(master, width=3, text='2', command=lambda: self.addSymbol('2')).grid(row=4, column=1)\n self.three = Button(master, width=3, text='3', command=lambda: self.addSymbol('3')).grid(row=4, column=2)\n self.minus = Button(master, width=3, text='-', command=lambda: self.addSymbol('-')).grid(row=4, column=3)\n self.master.bind('1', self.addKeyboardSymbol)\n self.master.bind('2', self.addKeyboardSymbol)\n self.master.bind('3', self.addKeyboardSymbol)\n self.master.bind('-', self.addKeyboardSymbol)\n\n self.zero = Button(master, width=3, text='0', command=lambda: self.addSymbol('0')).grid(row=5, column=0)\n self.point = Button(master, width=3, text='.', command=lambda: self.addSymbol('.')).grid(row=5, column=1)\n self.equals = Button(master, width=3, text='=', command=lambda: self.evaluate()).grid(row=5, column=2) \n self.plus = Button(master, width=3, text='+', command=lambda: self.addSymbol('+')).grid(row=5, column=3)\n\n self.master.bind('0', self.addKeyboardSymbol)\n self.master.bind('.', self.addKeyboardSymbol)\n self.master.bind('', self.evaluate)\n self.master.bind('+', self.addKeyboardSymbol)\n\n self.c = Button(master, width=3, text='C', command=lambda: self.clear()).grid(row=6, column=0)\n self.d = Button(master, width=3, text='D', command=lambda: self.backSpace()).grid(row=6, column=1)\n self.lparren = Button(master, width=3, text='(', command=lambda: self.addSymbol('(')).grid(row=6, column=2)\n self.rparren = Button(master, width=3, text=')', command=lambda: self.addSymbol(')')).grid(row=6, column=3)\n\n self.master.bind('C', self.clear)\n self.master.bind('c', self.clear)\n self.master.bind('', self.backSpace)\n self.master.bind('(', self.addKeyboardSymbol)\n self.master.bind(')', self.addKeyboardSymbol)\n def addSymbol(self, char):\n '''Displays the inputted char onto the display''' \n self.stringContents += char\n self.displayStr.set(self.stringContents)\n def addKeyboardSymbol(self,event):\n '''Displays the inputted char onto the display''' \n self.stringContents += str(repr(event.char))[1:-1]\n self.displayStr.set(self.stringContents)\n def evaluate(self, evt=None):\n '''Evalutes the expression'''\n try:\n self.displayStr.set(eval(self.stringContents))\n self.stringContents = str(eval(self.stringContents))\n except Exception as e:\n self.displayStr.set('Error')\n self.stringContents = '' \n def clear(self, evt=None):\n '''Clears the expression'''\n self.stringContents = ''\n self.displayStr.set(self.stringContents)\n def backSpace(self, evt=None):\n '''Backspace on expression'''\n self.stringContents = self.stringContents[:-1]\n self.displayStr.set(self.stringContents)\ndef Main():\n master = Tk()\n calculator = Calculator(master)\n calculator.master.title('Calculator')\n calculator.master.resizable(False, False)\n master.mainloop()\nif __name__ == '__main__':\n Main()"},"path":{"kind":"string","value":"ProgrammingInPython/proj08_daniel_campos.py"},"size":{"kind":"number","value":5273,"string":"5,273"},"nl_text":{"kind":"string","value":"GUI for the calculator\nDisplays the inputted char onto the display\nDisplays the inputted char onto the display\nBackspace on expression\nClears the expression\nEvalutes the expression\n\n------------------------------------------------------------------------------- Name:GUI Calculator Purpose:Simple calculator with GUI using tkinter Author: Daniel Campos Created: Monday Dec 1st, 2014-------------------------------------------------------------------------------"},"nl_size":{"kind":"number","value":461,"string":"461"},"nl_language":{"kind":"string","value":"en"},"nl_language_score":{"kind":"number","value":0.41298019886016846,"string":"0.41298"}}},{"rowIdx":7829,"cells":{"content":{"kind":"string","value":"\"\"\"\nURLConf for Satchmo Contacts.\n\"\"\"\n\nfrom django.conf.urls.defaults import patterns\nfrom signals_ahoy.signals import collect_urls\nfrom satchmo_store import contact\nfrom satchmo_store.shop.satchmo_settings import get_satchmo_setting\n\nssl = get_satchmo_setting('SSL', default_value=False)\n\nurlpatterns = patterns('satchmo_store.contact.views',\n (r'^$', 'view', {}, 'satchmo_account_info'),\n (r'^update/$', 'update', {}, 'satchmo_profile_update'),\n (r'^ajax_state/$', 'ajax_get_state', {'SSL': ssl}, 'satchmo_contact_ajax_state'),\n)\n\ncollect_urls.send(sender=contact, patterns=urlpatterns)\n"},"path":{"kind":"string","value":"satchmo/apps/satchmo_store/contact/urls.py"},"size":{"kind":"number","value":598,"string":"598"},"nl_text":{"kind":"string","value":"URLConf for Satchmo Contacts."},"nl_size":{"kind":"number","value":29,"string":"29"},"nl_language":{"kind":"string","value":"en"},"nl_language_score":{"kind":"number","value":0.7977340221405029,"string":"0.797734"}}},{"rowIdx":7830,"cells":{"content":{"kind":"string","value":"import sys\n\nimport requests\nimport argparse\nimport json\nimport os\nimport configparser\nimport arrow\nfrom colorama import init\nimport traceback\n\n\ndef get_color(color_code):\n return '\\x1b[%sm' % color_code\n\n\ndef parse_brief(brief):\n sentences = None\n if args.news:\n sentences = json.loads(\n requests.get(\n \"https://corpus.vocabulary.com/api/1.0/examples.json?maxResults=5&query=\" + args.word).text)[\n 'result']['sentences']\n\n word = WORD_COLOR + brief['wordOut'] + \": \"\n if 'relation' in brief['lemma']:\n word += TEXT_COLOR + (\n \"%s为%s的%s\" % (\n brief['wordOut'], brief['lemma']['lemma'],\n brief['lemma']['relation']))\n print(word)\n pron = \"\"\n if 'usPron' in brief:\n pron += HINT_COLOR + \" 美音 \" + TEXT_COLOR + \"/%s/\" % brief['usPron'][\n 'ps']\n if 'ukPron' in brief:\n pron += HINT_COLOR + \" 英音 \" + TEXT_COLOR + \"/%s/\" % brief['ukPron'][\n 'ps']\n if pron:\n print(pron)\n if 'chnDefinitions' in brief:\n print(SECTION_COLOR + \"中文释义\")\n for chn_def in brief['chnDefinitions']:\n if 'pos' in chn_def:\n print(\n \" \" + HINT_COLOR + chn_def['pos'].ljust(8) + TEXT_COLOR +\n chn_def[\n 'meaning'])\n else:\n print(\" \" + \"\".ljust(8) + TEXT_COLOR + chn_def['meaning'])\n if 'engDefinitions' in brief:\n print(SECTION_COLOR + \"英文释义\")\n for eng_def in brief['engDefinitions']:\n if 'pos' in eng_def:\n print(\n \" \" + HINT_COLOR + eng_def['pos'].ljust(8) + TEXT_COLOR +\n eng_def[\n 'meaning'])\n else:\n print(\" \" + \"\".ljust(8) + TEXT_COLOR + eng_def['meaning'])\n if sentences:\n print(SECTION_COLOR + \"新闻例句\")\n for i, sentence in enumerate(sentences):\n print(TEXT_COLOR,\n \"\".ljust(4) + (str(i + 1) + \".\").ljust(3) + sentence[\n 'sentence'])\n print(SOURCE_COLOR,\n \"\".ljust(7) + sentence['volume']['corpus']['name'] + \"\".ljust(\n 4) +\n arrow.get(sentence['volume']['dateAdded']).format(\n \"MMM DD, YYYY\"))\n\n\ndef parse_source(sentence_group):\n if 'source' not in sentence_group:\n return \"牛津高阶英汉双解词典\"\n else:\n return sourceDict[sentence_group['source']]\n\n\ndef parse_detail(detail):\n parse_brief(detail['wordBrief'])\n if 'sentenceLists' in detail:\n print(SECTION_COLOR + \"双语例句\")\n for sentenceGroup in detail['sentenceLists']:\n count = 1\n print(\"\".ljust(4) + HINT_COLOR + parse_source(sentenceGroup))\n for sentence in sentenceGroup['sentences']:\n print(TEXT_COLOR + \"\".ljust(8) + (\"%s.\" % str(count)).ljust(3) +\n sentence['eng'])\n print(\"\".ljust(8) + \"\".ljust(3) + sentence['chn'])\n if count >= default_sent:\n break\n count += 1\n\n\ninit()\n\nsourceDict = {\"CAMBRIDGE\": \"剑桥高阶英汉双解词典\", \"LONGMAN\": \"朗文当代高级英语词典\",\n \"COLLINS\": \"柯林斯英汉双解大词典\", \"ONLINE\": \"金山词霸\"}\nparser = argparse.ArgumentParser(description='manual to this script')\nparser.add_argument('word', type=str, help=\"The word you want to query\")\nparser.add_argument('--detail', '-d', action='store', default=0, const=2,\n nargs='?', type=int, dest='detail',\n help=\"Show the detailed meaning of the word\")\nparser.add_argument('--brief', '-b', action='store_true', default=True,\n help=\"Show the brief meaning of the word\", )\nparser.add_argument('--news', '-n', action='store_true', default=False,\n help=\"Whether show sentence examples from news\")\n\nargs = parser.parse_args()\nif getattr(sys, 'frozen', False):\n # we are running in a bundle\n bundle_dir = os.path.split(sys.executable)[0]\nelse:\n # we are running in a normal Python environment\n bundle_dir = os.path.dirname(os.path.abspath(__file__))\nconfig_path = os.path.join(bundle_dir, \"color.ini\")\nconfig = configparser.ConfigParser()\nconfig.read(config_path)\nWORD_COLOR = get_color(\n config.getint('COLOR', 'word_color') if config.getint('COLOR',\n 'word_color') else 91)\nHINT_COLOR = get_color(\n config.getint('COLOR', 'hint_color') if config.getint('COLOR',\n 'hint_color') else 92)\nSECTION_COLOR = get_color(\n config.getint('COLOR', 'section_color') if config.getint('COLOR',\n 'section_color') else 93)\nTEXT_COLOR = get_color(\n config.getint('COLOR', 'text_color') if config.getint('COLOR',\n 'text_color') else 97)\nSOURCE_COLOR = get_color(\n config.getint('COLOR', 'source_color') if config.getint('COLOR',\n 'source_color') else 90)\nENDPOINT = config.get(\"CONFIG\", \"endpoint\")\n\ndetail = json.loads(\n requests.get(ENDPOINT + \"/word/detail?json=true&word=\" + args.word).text)\ndefault_sent = args.detail\n\ntry:\n if args.detail:\n parse_detail(detail)\n else:\n parse_brief(detail['wordBrief'])\nexcept Exception as e:\n traceback.print_exc()\n print(\"该单词不存在\")\n"},"path":{"kind":"string","value":"win_python/idict.py"},"size":{"kind":"number","value":5642,"string":"5,642"},"nl_text":{"kind":"string","value":"we are running in a bundle we are running in a normal Python environment"},"nl_size":{"kind":"number","value":72,"string":"72"},"nl_language":{"kind":"string","value":"en"},"nl_language_score":{"kind":"number","value":0.9534972310066223,"string":"0.953497"}}},{"rowIdx":7831,"cells":{"content":{"kind":"string","value":"# Python\nimport unittest\nfrom copy import deepcopy\nfrom unittest.mock import Mock\n\n# ATS\nfrom ats.topology import Device\n\n# Genie\nfrom genie.libs.ops.igmp.iosxe.igmp import Igmp\nfrom genie.libs.ops.igmp.iosxe.tests.igmp_output import IgmpOutput\n\n# Parser\nfrom genie.libs.parser.iosxe.show_igmp import ShowIpIgmpInterface, \\\n ShowIpIgmpGroupsDetail, \\\n ShowIpIgmpSsmMapping\n\n# iosxe show_vrf\nfrom genie.libs.parser.iosxe.show_vrf import ShowVrfDetail\n\noutputs = {}\noutputs['show ip igmp interface'] = IgmpOutput.ShowIpIgmpInterface_default\noutputs['show ip igmp vrf VRF1 interface'] = IgmpOutput.ShowIpIgmpInterface_VRF1\noutputs['show ip igmp groups detail'] = IgmpOutput.ShowIpIgmpGroupsDetail_default\noutputs['show ip igmp vrf VRF1 groups detail'] = IgmpOutput.ShowIpIgmpGroupsDetail_VRF1\noutputs['show ip igmp ssm-mapping 239.1.1.1'] = IgmpOutput.ShowIpIgmpSsmMapping_default_1\noutputs['show ip igmp ssm-mapping 239.2.2.2'] = IgmpOutput.ShowIpIgmpSsmMapping_default_2\noutputs['show ip igmp ssm-mapping 239.3.3.3'] = IgmpOutput.ShowIpIgmpSsmMapping_default_3\noutputs['show ip igmp ssm-mapping 239.4.4.4'] = IgmpOutput.ShowIpIgmpSsmMapping_default_4\noutputs['show ip igmp ssm-mapping 239.5.5.5'] = IgmpOutput.ShowIpIgmpSsmMapping_default_5\noutputs['show ip igmp ssm-mapping 239.6.6.6'] = IgmpOutput.ShowIpIgmpSsmMapping_default_6\noutputs['show ip igmp ssm-mapping 239.7.7.7'] = IgmpOutput.ShowIpIgmpSsmMapping_default_7\noutputs['show ip igmp ssm-mapping 239.8.8.8'] = IgmpOutput.ShowIpIgmpSsmMapping_default_8\noutputs['show ip igmp ssm-mapping 239.9.9.9'] = IgmpOutput.ShowIpIgmpSsmMapping_default_9\noutputs['show ip igmp ssm-mapping 224.0.1.40'] = IgmpOutput.ShowIpIgmpSsmMapping_default_10\noutputs['show ip igmp vrf VRF1 ssm-mapping 239.1.1.1'] = IgmpOutput.ShowIpIgmpSsmMapping_VRF1_1\noutputs['show ip igmp vrf VRF1 ssm-mapping 239.2.2.2'] = IgmpOutput.ShowIpIgmpSsmMapping_VRF1_2\noutputs['show ip igmp vrf VRF1 ssm-mapping 239.3.3.3'] = IgmpOutput.ShowIpIgmpSsmMapping_VRF1_3\noutputs['show ip igmp vrf VRF1 ssm-mapping 239.4.4.4'] = IgmpOutput.ShowIpIgmpSsmMapping_VRF1_4\noutputs['show ip igmp vrf VRF1 ssm-mapping 239.5.5.5'] = IgmpOutput.ShowIpIgmpSsmMapping_VRF1_5\noutputs['show ip igmp vrf VRF1 ssm-mapping 239.6.6.6'] = IgmpOutput.ShowIpIgmpSsmMapping_VRF1_6\noutputs['show ip igmp vrf VRF1 ssm-mapping 239.7.7.7'] = IgmpOutput.ShowIpIgmpSsmMapping_VRF1_7\noutputs['show ip igmp vrf VRF1 ssm-mapping 239.8.8.8'] = IgmpOutput.ShowIpIgmpSsmMapping_VRF1_8\noutputs['show ip igmp vrf VRF1 ssm-mapping 224.0.1.40'] = IgmpOutput.ShowIpIgmpSsmMapping_VRF1_10\n\n\ndef mapper(key):\n return outputs[key]\n\n\nclass test_igmp(unittest.TestCase):\n\n def setUp(self):\n self.device = Device(name='aDevice')\n self.device.os = 'iosxe'\n self.device.mapping={}\n self.device.mapping['cli']='cli'\n # Give the device as a connection type\n # This is done in order to call the parser on the output provided\n self.device.connectionmgr.connections['cli'] = self.device\n\n def test_complete_output(self):\n self.maxDiff = None\n igmp = Igmp(device=self.device)\n # Get outputs\n igmp.maker.outputs[ShowVrfDetail] = \\\n {'': IgmpOutput.ShowVrfDetail}\n\n # Return outputs above as inputs to parser when called\n self.device.execute = Mock()\n self.device.execute.side_effect = mapper\n\n # Learn the feature\n igmp.learn()\n\n # Verify Ops was created successfully\n self.assertEqual(igmp.info, IgmpOutput.Igmp_info)\n\n def test_empty_output(self):\n self.maxDiff = None\n igmp = Igmp(device=self.device)\n # Get outputs\n igmp.maker.outputs[ShowVrfDetail] = \\\n {'': {}}\n\n # Return outputs above as inputs to parser when called\n self.device.execute = Mock()\n outputs['show ip igmp interface'] = ''\n outputs['show ip igmp vrf VRF1 interface'] = ''\n outputs['show ip igmp groups detail'] = ''\n outputs['show ip igmp vrf VRF1 groups detail'] = ''\n outputs['show ip igmp ssm-mapping 239.1.1.1'] = ''\n outputs['show ip igmp vrf VRF1 ssm-mapping 239.1.1.1'] = ''\n self.device.execute.side_effect = mapper\n\n\n # Learn the feature\n igmp.learn()\n\n # revert the outputs\n outputs['show ip igmp interface'] = IgmpOutput.ShowIpIgmpInterface_default\n outputs['show ip igmp vrf VRF1 interface'] = IgmpOutput.ShowIpIgmpInterface_VRF1\n outputs['show ip igmp groups detail'] = IgmpOutput.ShowIpIgmpGroupsDetail_default\n outputs['show ip igmp vrf VRF1 groups detail'] = IgmpOutput.ShowIpIgmpGroupsDetail_VRF1\n outputs['show ip igmp ssm-mapping 239.1.1.1'] = IgmpOutput.ShowIpIgmpSsmMapping_default_1\n outputs['show ip igmp vrf VRF1 ssm-mapping 239.1.1.1'] = IgmpOutput.ShowIpIgmpSsmMapping_VRF1_1\n\n # Check no attribute not found\n with self.assertRaises(AttributeError):\n igmp.info['vrfs']\n\n def test_selective_attribute(self):\n self.maxDiff = None\n igmp = Igmp(device=self.device)\n # Get outputs\n igmp.maker.outputs[ShowVrfDetail] = \\\n {'': IgmpOutput.ShowVrfDetail}\n\n # Return outputs above as inputs to parser when called\n self.device.execute = Mock()\n self.device.execute.side_effect = mapper\n\n # Learn the feature\n igmp.learn() \n\n # Check specific attribute values\n # info - default vrf\n self.assertEqual(igmp.info['vrfs']['default']['max_groups'], 20)\n # info - vrf VRF1\n self.assertEqual(igmp.info['vrfs']['VRF1']['interfaces']\\\n ['GigabitEthernet2']['querier'], '20.1.2.1')\n\n def test_incomplete_output(self):\n self.maxDiff = None\n \n igmp = Igmp(device=self.device)\n\n # Get outputs\n igmp.maker.outputs[ShowVrfDetail] = \\\n {'': IgmpOutput.ShowVrfDetail}\n\n # Return outputs above as inputs to parser when called\n self.device.execute = Mock()\n\n # overwrite output with empty output\n outputs['show ip igmp vrf VRF1 groups detail'] = '''\\\n show ip igmp vrf VRF1 groups detail\n '''\n self.device.execute.side_effect = mapper\n\n # Learn the feature\n igmp.learn()\n\n # Delete missing specific attribute values\n expect_dict = deepcopy(IgmpOutput.Igmp_info)\n del(expect_dict['vrfs']['VRF1']['interfaces']['GigabitEthernet2']['join_group'])\n del(expect_dict['vrfs']['VRF1']['interfaces']['GigabitEthernet2']['static_group'])\n del(expect_dict['vrfs']['VRF1']['interfaces']['GigabitEthernet2']['group'])\n del(expect_dict['vrfs']['VRF1']['ssm_map'])\n\n \n # Verify Ops was created successfully\n self.assertEqual(igmp.info, expect_dict)\n\n\nif __name__ == '__main__':\n unittest.main()\n"},"path":{"kind":"string","value":"pkgs/ops-pkg/src/genie/libs/ops/igmp/iosxe/tests/test_igmp.py"},"size":{"kind":"number","value":6950,"string":"6,950"},"nl_text":{"kind":"string","value":"Python ATS Genie Parser iosxe show_vrf Give the device as a connection type This is done in order to call the parser on the output provided Get outputs Return outputs above as inputs to parser when called Learn the feature Verify Ops was created successfully Get outputs Return outputs above as inputs to parser when called Learn the feature revert the outputs Check no attribute not found Get outputs Return outputs above as inputs to parser when called Learn the feature Check specific attribute values info - default vrf info - vrf VRF1 Get outputs Return outputs above as inputs to parser when called overwrite output with empty output Learn the feature Delete missing specific attribute values Verify Ops was created successfully"},"nl_size":{"kind":"number","value":734,"string":"734"},"nl_language":{"kind":"string","value":"en"},"nl_language_score":{"kind":"number","value":0.841743528842926,"string":"0.841744"}}},{"rowIdx":7832,"cells":{"content":{"kind":"string","value":"#!/usr/bin/python\n#\n# linearize-hashes.py: List blocks in a linear, no-fork version of the chain.\n#\n# Copyright (c) 2013 The Bitcoin developers\n# Distributed under the MIT/X11 software license, see the accompanying\n# file COPYING or http://www.opensource.org/licenses/mit-license.php.\n#\n\nimport json\nimport struct\nimport re\nimport base64\nimport httplib\nimport sys\n\nsettings = {}\n\nclass BitcoinRPC:\n\tOBJID = 1\n\n\tdef __init__(self, host, port, username, password):\n\t\tauthpair = \"%s:%s\" % (username, password)\n\t\tself.authhdr = \"Basic %s\" % (base64.b64encode(authpair))\n\t\tself.conn = httplib.HTTPConnection(host, port, False, 30)\n\tdef rpc(self, method, params=None):\n\t\tself.OBJID += 1\n\t\tobj = { 'version' : '1.1',\n\t\t\t'method' : method,\n\t\t\t'id' : self.OBJID }\n\t\tif params is None:\n\t\t\tobj['params'] = []\n\t\telse:\n\t\t\tobj['params'] = params\n\t\tself.conn.request('POST', '/', json.dumps(obj),\n\t\t\t{ 'Authorization' : self.authhdr,\n\t\t\t 'Content-type' : 'application/json' })\n\n\t\tresp = self.conn.getresponse()\n\t\tif resp is None:\n\t\t\tprint \"JSON-RPC: no response\"\n\t\t\treturn None\n\n\t\tbody = resp.read()\n\t\tresp_obj = json.loads(body)\n\t\tif resp_obj is None:\n\t\t\tprint \"JSON-RPC: cannot JSON-decode body\"\n\t\t\treturn None\n\t\tif 'error' in resp_obj and resp_obj['error'] != None:\n\t\t\treturn resp_obj['error']\n\t\tif 'result' not in resp_obj:\n\t\t\tprint \"JSON-RPC: no result in object\"\n\t\t\treturn None\n\n\t\treturn resp_obj['result']\n\tdef getblock(self, hash, verbose=True):\n\t\treturn self.rpc('getblock', [hash, verbose])\n\tdef getblockhash(self, index):\n\t\treturn self.rpc('getblockhash', [index])\n\ndef get_block_hashes(settings):\n\trpc = BitcoinRPC(settings['host'], settings['port'],\n\t\t\t settings['rpcuser'], settings['rpcpassword'])\n\n\tfor height in xrange(settings['min_height'], settings['max_height']+1):\n\t\thash = rpc.getblockhash(height)\n\n\t\tprint(hash)\n\nif __name__ == '__main__':\n\tif len(sys.argv) != 2:\n\t\tprint \"Usage: linearize-hashes.py CONFIG-FILE\"\n\t\tsys.exit(1)\n\n\tf = open(sys.argv[1])\n\tfor line in f:\n\t\t# skip comment lines\n\t\tm = re.search('^\\s*#', line)\n\t\tif m:\n\t\t\tcontinue\n\n\t\t# parse key=value lines\n\t\tm = re.search('^(\\w+)\\s*=\\s*(\\S.*)$', line)\n\t\tif m is None:\n\t\t\tcontinue\n\t\tsettings[m.group(1)] = m.group(2)\n\tf.close()\n\n\tif 'host' not in settings:\n\t\tsettings['host'] = '127.0.0.1'\n\tif 'port' not in settings:\n\t\tsettings['port'] = 4242\n\tif 'min_height' not in settings:\n\t\tsettings['min_height'] = 0\n\tif 'max_height' not in settings:\n\t\tsettings['max_height'] = 319000\n\tif 'rpcuser' not in settings or 'rpcpassword' not in settings:\n\t\tprint \"Missing username and/or password in cfg file\"\n\t\tsys.exit(1)\n\n\tsettings['port'] = int(settings['port'])\n\tsettings['min_height'] = int(settings['min_height'])\n\tsettings['max_height'] = int(settings['max_height'])\n\n\tget_block_hashes(settings)\n\n"},"path":{"kind":"string","value":"contrib/linearize/linearize-hashes.py"},"size":{"kind":"number","value":2761,"string":"2,761"},"nl_text":{"kind":"string","value":"!/usr/bin/python linearize-hashes.py: List blocks in a linear, no-fork version of the chain. Copyright (c) 2013 The Bitcoin developers Distributed under the MIT/X11 software license, see the accompanying file COPYING or http://www.opensource.org/licenses/mit-license.php. skip comment lines parse key=value lines"},"nl_size":{"kind":"number","value":313,"string":"313"},"nl_language":{"kind":"string","value":"en"},"nl_language_score":{"kind":"number","value":0.6735787987709045,"string":"0.673579"}}},{"rowIdx":7833,"cells":{"content":{"kind":"string","value":"#!/usr/bin/env python\n\"\"\"\nFunctions for estimating quantities from nested sampling runs.\nEach estimator function should have arguments:\n\n.. code-block:: python\n\n def estimator_func(self, ns_run, logw=None, simulate=False):\n ...\n\nAny additional arguments required for the function should be keyword\narguments.\n\nThe ``logw`` argument allows the log weights for the points in the run to be\nprovided - this is useful if many estimators are being calculated from\nthe same run as it allows ``logw`` to only be calculated once. If it is not\nspecified, ``logw`` is calculated from the run when required.\n\nThe simulate argument is passed to ``ns_run_utils.get_logw``, and is only used\nif the function needs to calculate ``logw``.\n\"\"\"\n\nimport functools\nimport numpy as np\nimport scipy\nimport nestcheck.ns_run_utils\n\n\n# Estimators\n# ----------\n\ndef count_samples(ns_run, **kwargs):\n r\"\"\"Number of samples in run.\n\n Unlike most estimators this does not require log weights, but for\n convenience will not throw an error if they are specified.\n\n Parameters\n ----------\n ns_run: dict\n Nested sampling run dict (see the data_processing module\n docstring for more details).\n\n Returns\n -------\n int\n \"\"\"\n kwargs.pop('logw', None)\n kwargs.pop('simulate', None)\n if kwargs:\n raise TypeError('Unexpected **kwargs: {0}'.format(kwargs))\n return ns_run['logl'].shape[0]\n\n\ndef logz(ns_run, logw=None, simulate=False):\n r\"\"\"Natural log of Bayesian evidence :math:`\\log \\mathcal{Z}`.\n\n Parameters\n ----------\n ns_run: dict\n Nested sampling run dict (see the data_processing module\n docstring for more details).\n logw: None or 1d numpy array, optional\n Log weights of samples.\n simulate: bool, optional\n Passed to ns_run_utils.get_logw if logw needs to be\n calculated.\n\n Returns\n -------\n float\n \"\"\"\n if logw is None:\n logw = nestcheck.ns_run_utils.get_logw(ns_run, simulate=simulate)\n return scipy.special.logsumexp(logw)\n\n\ndef evidence(ns_run, logw=None, simulate=False):\n r\"\"\"Bayesian evidence :math:`\\log \\mathcal{Z}`.\n\n Parameters\n ----------\n ns_run: dict\n Nested sampling run dict (see the data_processing module\n docstring for more details).\n logw: None or 1d numpy array, optional\n Log weights of samples.\n simulate: bool, optional\n Passed to ns_run_utils.get_logw if logw needs to be\n calculated.\n\n Returns\n -------\n float\n \"\"\"\n if logw is None:\n logw = nestcheck.ns_run_utils.get_logw(ns_run, simulate=simulate)\n return np.exp(scipy.special.logsumexp(logw))\n\n\ndef param_mean(ns_run, logw=None, simulate=False, param_ind=0,\n handle_indexerror=False):\n \"\"\"Mean of a single parameter (single component of theta).\n\n Parameters\n ----------\n ns_run: dict\n Nested sampling run dict (see the data_processing module\n docstring for more details).\n logw: None or 1d numpy array, optional\n Log weights of samples.\n simulate: bool, optional\n Passed to ns_run_utils.get_logw if logw needs to be\n calculated.\n param_ind: int, optional\n Index of parameter for which the mean should be calculated. This\n corresponds to the column of ns_run['theta'] which contains the\n parameter.\n handle_indexerror: bool, optional\n Make the function function return nan rather than raising an\n IndexError if param_ind >= ndim. This is useful when applying\n the same list of estimators to data sets of different dimensions.\n\n Returns\n -------\n float\n \"\"\"\n if logw is None:\n logw = nestcheck.ns_run_utils.get_logw(ns_run, simulate=simulate)\n w_relative = np.exp(logw - logw.max())\n try:\n return (np.sum(w_relative * ns_run['theta'][:, param_ind])\n / np.sum(w_relative))\n except IndexError:\n if handle_indexerror:\n return np.nan\n else:\n raise\n\n\ndef param_cred(ns_run, logw=None, simulate=False, probability=0.5,\n param_ind=0):\n \"\"\"One-tailed credible interval on the value of a single parameter\n (component of theta).\n\n Parameters\n ----------\n ns_run: dict\n Nested sampling run dict (see the data_processing module\n docstring for more details).\n logw: None or 1d numpy array, optional\n Log weights of samples.\n simulate: bool, optional\n Passed to ns_run_utils.get_logw if logw needs to be\n calculated.\n probability: float, optional\n Quantile to estimate - must be in open interval (0, 1).\n For example, use 0.5 for the median and 0.84 for the upper\n 84% quantile. Passed to weighted_quantile.\n param_ind: int, optional\n Index of parameter for which the credible interval should be\n calculated. This corresponds to the column of ns_run['theta']\n which contains the parameter.\n\n Returns\n -------\n float\n \"\"\"\n if logw is None:\n logw = nestcheck.ns_run_utils.get_logw(ns_run, simulate=simulate)\n w_relative = np.exp(logw - logw.max()) # protect against overflow\n return weighted_quantile(probability, ns_run['theta'][:, param_ind],\n w_relative)\n\n\ndef param_squared_mean(ns_run, logw=None, simulate=False, param_ind=0):\n \"\"\"Mean of the square of single parameter (second moment of its\n posterior distribution).\n\n Parameters\n ----------\n ns_run: dict\n Nested sampling run dict (see the data_processing module\n docstring for more details).\n logw: None or 1d numpy array, optional\n Log weights of samples.\n simulate: bool, optional\n Passed to ns_run_utils.get_logw if logw needs to be\n calculated.\n param_ind: int, optional\n Index of parameter for which the second moment should be\n calculated. This corresponds to the column of ns_run['theta']\n which contains the parameter.\n\n Returns\n -------\n float\n \"\"\"\n if logw is None:\n logw = nestcheck.ns_run_utils.get_logw(ns_run, simulate=simulate)\n w_relative = np.exp(logw - logw.max()) # protect against overflow\n w_relative /= np.sum(w_relative)\n return np.sum(w_relative * (ns_run['theta'][:, param_ind] ** 2))\n\n\ndef r_mean(ns_run, logw=None, simulate=False):\n \"\"\"Mean of the radial coordinate (magnitude of theta vector).\n\n Parameters\n ----------\n ns_run: dict\n Nested sampling run dict (see the data_processing module\n docstring for more details).\n logw: None or 1d numpy array, optional\n Log weights of samples.\n simulate: bool, optional\n Passed to ns_run_utils.get_logw if logw needs to be\n calculated.\n\n Returns\n -------\n float\n \"\"\"\n if logw is None:\n logw = nestcheck.ns_run_utils.get_logw(ns_run, simulate=simulate)\n w_relative = np.exp(logw - logw.max())\n r = np.sqrt(np.sum(ns_run['theta'] ** 2, axis=1))\n return np.sum(w_relative * r) / np.sum(w_relative)\n\n\ndef r_cred(ns_run, logw=None, simulate=False, probability=0.5):\n \"\"\"One-tailed credible interval on the value of the radial coordinate\n (magnitude of theta vector).\n\n Parameters\n ----------\n ns_run: dict\n Nested sampling run dict (see the data_processing module\n docstring for more details).\n logw: None or 1d numpy array, optional\n Log weights of samples.\n simulate: bool, optional\n Passed to ns_run_utils.get_logw if logw needs to be\n calculated.\n probability: float, optional\n Quantile to estimate - must be in open interval (0, 1).\n For example, use 0.5 for the median and 0.84 for the upper\n 84% quantile. Passed to weighted_quantile.\n\n Returns\n -------\n float\n \"\"\"\n if logw is None:\n logw = nestcheck.ns_run_utils.get_logw(ns_run, simulate=simulate)\n w_relative = np.exp(logw - logw.max()) # protect against overflow\n r = np.sqrt(np.sum(ns_run['theta'] ** 2, axis=1))\n return weighted_quantile(probability, r, w_relative)\n\n\n# Helper functions\n# ----------------\n\n\ndef get_latex_name(func_in, **kwargs):\n \"\"\"\n Produce a latex formatted name for each function for use in labelling\n results.\n\n Parameters\n ----------\n func_in: function\n kwargs: dict, optional\n Kwargs for function.\n\n Returns\n -------\n latex_name: str\n Latex formatted name for the function.\n \"\"\"\n if isinstance(func_in, functools.partial):\n func = func_in.func\n assert not set(func_in.keywords) & set(kwargs), (\n 'kwargs={0} and func_in.keywords={1} contain repeated keys'\n .format(kwargs, func_in.keywords))\n kwargs.update(func_in.keywords)\n else:\n func = func_in\n param_ind = kwargs.pop('param_ind', 0)\n probability = kwargs.pop('probability', 0.5)\n kwargs.pop('handle_indexerror', None)\n if kwargs:\n raise TypeError('Unexpected **kwargs: {0}'.format(kwargs))\n ind_str = r'{\\hat{' + str(param_ind + 1) + '}}'\n latex_name_dict = {\n 'count_samples': r'samples',\n 'logz': r'$\\mathrm{log} \\mathcal{Z}$',\n 'evidence': r'$\\mathcal{Z}$',\n 'r_mean': r'$\\overline{|\\theta|}$',\n 'param_mean': r'$\\overline{\\theta_' + ind_str + '}$',\n 'param_squared_mean': r'$\\overline{\\theta^2_' + ind_str + '}$'}\n # Add credible interval names\n if probability == 0.5:\n cred_str = r'$\\mathrm{median}('\n else:\n # format percent without trailing zeros\n percent_str = ('%f' % (probability * 100)).rstrip('0').rstrip('.')\n cred_str = r'$\\mathrm{C.I.}_{' + percent_str + r'\\%}('\n latex_name_dict['param_cred'] = cred_str + r'\\theta_' + ind_str + ')$'\n latex_name_dict['r_cred'] = cred_str + r'|\\theta|)$'\n try:\n return latex_name_dict[func.__name__]\n except KeyError as err:\n err.args = err.args + ('get_latex_name not yet set up for ' +\n func.__name__,)\n raise\n\n\ndef weighted_quantile(probability, values, weights):\n \"\"\"\n Get quantile estimate for input probability given weighted samples using\n linear interpolation.\n\n Parameters\n ----------\n probability: float\n Quantile to estimate - must be in open interval (0, 1).\n For example, use 0.5 for the median and 0.84 for the upper\n 84% quantile.\n values: 1d numpy array\n Sample values.\n weights: 1d numpy array\n Corresponding sample weights (same shape as values).\n\n Returns\n -------\n quantile: float\n \"\"\"\n assert 1 > probability > 0, (\n 'credible interval prob= ' + str(probability) + ' not in (0, 1)')\n assert values.shape == weights.shape\n assert values.ndim == 1\n assert weights.ndim == 1\n sorted_inds = np.argsort(values)\n quantiles = np.cumsum(weights[sorted_inds]) - (0.5 * weights[sorted_inds])\n quantiles /= np.sum(weights)\n return np.interp(probability, quantiles, values[sorted_inds])\n"},"path":{"kind":"string","value":"nestcheck/estimators.py"},"size":{"kind":"number","value":11030,"string":"11,030"},"nl_text":{"kind":"string","value":"Number of samples in run.\n\nUnlike most estimators this does not require log weights, but for\nconvenience will not throw an error if they are specified.\n\nParameters\n----------\nns_run: dict\n Nested sampling run dict (see the data_processing module\n docstring for more details).\n\nReturns\n-------\nint\nBayesian evidence :math:`\\log \\mathcal{Z}`.\n\nParameters\n----------\nns_run: dict\n Nested sampling run dict (see the data_processing module\n docstring for more details).\nlogw: None or 1d numpy array, optional\n Log weights of samples.\nsimulate: bool, optional\n Passed to ns_run_utils.get_logw if logw needs to be\n calculated.\n\nReturns\n-------\nfloat\nProduce a latex formatted name for each function for use in labelling\nresults.\n\nParameters\n----------\nfunc_in: function\nkwargs: dict, optional\n Kwargs for function.\n\nReturns\n-------\nlatex_name: str\n Latex formatted name for the function.\nNatural log of Bayesian evidence :math:`\\log \\mathcal{Z}`.\n\nParameters\n----------\nns_run: dict\n Nested sampling run dict (see the data_processing module\n docstring for more details).\nlogw: None or 1d numpy array, optional\n Log weights of samples.\nsimulate: bool, optional\n Passed to ns_run_utils.get_logw if logw needs to be\n calculated.\n\nReturns\n-------\nfloat\nOne-tailed credible interval on the value of a single parameter\n(component of theta).\n\nParameters\n----------\nns_run: dict\n Nested sampling run dict (see the data_processing module\n docstring for more details).\nlogw: None or 1d numpy array, optional\n Log weights of samples.\nsimulate: bool, optional\n Passed to ns_run_utils.get_logw if logw needs to be\n calculated.\nprobability: float, optional\n Quantile to estimate - must be in open interval (0, 1).\n For example, use 0.5 for the median and 0.84 for the upper\n 84% quantile. Passed to weighted_quantile.\nparam_ind: int, optional\n Index of parameter for which the credible interval should be\n calculated. This corresponds to the column of ns_run['theta']\n which contains the parameter.\n\nReturns\n-------\nfloat\nMean of a single parameter (single component of theta).\n\nParameters\n----------\nns_run: dict\n Nested sampling run dict (see the data_processing module\n docstring for more details).\nlogw: None or 1d numpy array, optional\n Log weights of samples.\nsimulate: bool, optional\n Passed to ns_run_utils.get_logw if logw needs to be\n calculated.\nparam_ind: int, optional\n Index of parameter for which the mean should be calculated. This\n corresponds to the column of ns_run['theta'] which contains the\n parameter.\nhandle_indexerror: bool, optional\n Make the function function return nan rather than raising an\n IndexError if param_ind >= ndim. This is useful when applying\n the same list of estimators to data sets of different dimensions.\n\nReturns\n-------\nfloat\nMean of the square of single parameter (second moment of its\nposterior distribution).\n\nParameters\n----------\nns_run: dict\n Nested sampling run dict (see the data_processing module\n docstring for more details).\nlogw: None or 1d numpy array, optional\n Log weights of samples.\nsimulate: bool, optional\n Passed to ns_run_utils.get_logw if logw needs to be\n calculated.\nparam_ind: int, optional\n Index of parameter for which the second moment should be\n calculated. This corresponds to the column of ns_run['theta']\n which contains the parameter.\n\nReturns\n-------\nfloat\nOne-tailed credible interval on the value of the radial coordinate\n(magnitude of theta vector).\n\nParameters\n----------\nns_run: dict\n Nested sampling run dict (see the data_processing module\n docstring for more details).\nlogw: None or 1d numpy array, optional\n Log weights of samples.\nsimulate: bool, optional\n Passed to ns_run_utils.get_logw if logw needs to be\n calculated.\nprobability: float, optional\n Quantile to estimate - must be in open interval (0, 1).\n For example, use 0.5 for the median and 0.84 for the upper\n 84% quantile. Passed to weighted_quantile.\n\nReturns\n-------\nfloat\nMean of the radial coordinate (magnitude of theta vector).\n\nParameters\n----------\nns_run: dict\n Nested sampling run dict (see the data_processing module\n docstring for more details).\nlogw: None or 1d numpy array, optional\n Log weights of samples.\nsimulate: bool, optional\n Passed to ns_run_utils.get_logw if logw needs to be\n calculated.\n\nReturns\n-------\nfloat\nGet quantile estimate for input probability given weighted samples using\nlinear interpolation.\n\nParameters\n----------\nprobability: float\n Quantile to estimate - must be in open interval (0, 1).\n For example, use 0.5 for the median and 0.84 for the upper\n 84% quantile.\nvalues: 1d numpy array\n Sample values.\nweights: 1d numpy array\n Corresponding sample weights (same shape as values).\n\nReturns\n-------\nquantile: float\nFunctions for estimating quantities from nested sampling runs.\nEach estimator function should have arguments:\n\n.. code-block:: python\n\n def estimator_func(self, ns_run, logw=None, simulate=False):\n ...\n\nAny additional arguments required for the function should be keyword\narguments.\n\nThe ``logw`` argument allows the log weights for the points in the run to be\nprovided - this is useful if many estimators are being calculated from\nthe same run as it allows ``logw`` to only be calculated once. If it is not\nspecified, ``logw`` is calculated from the run when required.\n\nThe simulate argument is passed to ``ns_run_utils.get_logw``, and is only used\nif the function needs to calculate ``logw``.\n\n!/usr/bin/env python Estimators ---------- protect against overflow protect against overflow protect against overflow Helper functions ---------------- Add credible interval names format percent without trailing zeros"},"nl_size":{"kind":"number","value":5795,"string":"5,795"},"nl_language":{"kind":"string","value":"en"},"nl_language_score":{"kind":"number","value":0.5544084310531616,"string":"0.554408"}}},{"rowIdx":7834,"cells":{"content":{"kind":"string","value":"\"\"\"Database schema functions and information for Toron node files.\n\nToron nodes are stored as individual files. The file format is\nmanaged, internally, as a relational database. The schema for this\ndatabase is shown below as a simplified ERD (entity relationship\ndiagram). SQL foreign key relationships are represented with hyphen\nand pipe characters ('-' and '|'). Other, more complex relationships\nare represented with bullet points ('•') and these are enforced at\nthe application layer:\n\n +------------------+\n +---------------------+ | relation |\n | edge | +------------------+\n +---------------------+ | relation_id | •••• \n | edge_id |------->| edge_id | •\n | name | ••••••| other_element_id |<•••••\n | type_info | • •••| element_id |<-+ +--------------+\n | description | • • | proportion | | | quantity |\n | user_properties | • • | mapping_level | | +--------------+\n | other_uuid | • • +------------------+ | | quantity_id |\n | other_filename_hint | • • | +->| _location_id |\n | other_element_hash |<•• • | | | attributes |\n | is_complete |<••••• +-----------------+ | | value |\n +---------------------+ | | +--------------+\n | |\n +------------+ | +--------------+ | +---------------+\n | element | | | location | | | structure |\n +------------+ | +--------------+ | +---------------+\n +------| element_id |--+ | _location_id |--+ | _structure_id |\n | | label_a |••••>| label_a |<••••| label_a |\n | | label_b |••••>| label_b |<••••| label_b |\n | | label_c |••••>| label_c |<••••| label_c |\n | | ... |••••>| ... |<••••| ... |\n | +------------+ +--------------+ +---------------+\n |\n | +-------------------+ +----------+\n | | element_weight | +-------------+ | property |\n | +-------------------+ | weight | +----------+\n | | element_weight_id | +-------------+ | key |\n | | weight_id |<----| weight_id | | value |\n +->| element_id |••• | name | +----------+\n | value | • | type_info |\n +-------------------+ • | description |\n ••>| is_complete |\n +-------------+\n\"\"\"\n\nimport itertools\nimport os\nimport re\nimport sqlite3\nfrom contextlib import contextmanager\nfrom json import loads as _loads\nfrom urllib.parse import quote as urllib_parse_quote\n\nfrom ._exceptions import ToronError\n\n\nsqlite3.register_converter('TEXT_JSON', _loads)\nsqlite3.register_converter('TEXT_ATTRIBUTES', _loads)\n\n\ndef _is_sqlite_json1_enabled():\n \"\"\"Check if SQLite implementation includes JSON1 extension.\"\"\"\n # The inclusion of JSON functions is optional when compiling SQLite.\n # In versions 3.38.0 and newer, JSON functions are included by\n # default but can be disabled (opt-out policy). For older versions\n # of SQLite, JSON functions are available on an opt-in basis. It is\n # necessary to test for their presence rathern than referencing the\n # SQLite version number.\n #\n # For more information, see:\n # https://www.sqlite.org/json1.html#compiling_in_json_support\n\n con = sqlite3.connect(':memory:')\n try:\n con.execute(\"SELECT json_valid('123')\")\n except sqlite3.OperationalError:\n return False\n finally:\n con.close()\n return True\n\n\nSQLITE_JSON1_ENABLED = _is_sqlite_json1_enabled()\n\n\n_schema_script = \"\"\"\n PRAGMA foreign_keys = ON;\n\n CREATE TABLE edge(\n edge_id INTEGER PRIMARY KEY,\n name TEXT NOT NULL,\n type_info TEXT_ATTRIBUTES NOT NULL,\n description TEXT,\n user_properties TEXT_USERPROPERTIES,\n other_uuid TEXT CHECK (other_uuid LIKE '________-____-____-____-____________') NOT NULL,\n other_filename_hint TEXT NOT NULL,\n other_element_hash TEXT,\n is_complete INTEGER CHECK (is_complete IN (0, 1)),\n UNIQUE (name, other_uuid)\n );\n\n CREATE TABLE relation(\n relation_id INTEGER PRIMARY KEY,\n edge_id INTEGER,\n other_element_id INTEGER NOT NULL,\n element_id INTEGER,\n proportion REAL CHECK (0.0 < proportion AND proportion <= 1.0) NOT NULL,\n mapping_level INTEGER NOT NULL,\n FOREIGN KEY(edge_id) REFERENCES edge(edge_id) ON DELETE CASCADE,\n FOREIGN KEY(element_id) REFERENCES element(element_id) DEFERRABLE INITIALLY DEFERRED,\n UNIQUE (edge_id, other_element_id, element_id)\n );\n\n CREATE TABLE element(\n element_id INTEGER PRIMARY KEY AUTOINCREMENT /* <- Must not reuse id values. */\n /* label columns added programmatically */\n );\n\n CREATE TABLE location(\n _location_id INTEGER PRIMARY KEY\n /* label columns added programmatically */\n );\n\n CREATE TABLE structure(\n _structure_id INTEGER PRIMARY KEY\n /* label columns added programmatically */\n );\n\n CREATE TABLE quantity(\n quantity_id INTEGER PRIMARY KEY,\n _location_id INTEGER,\n attributes TEXT_ATTRIBUTES NOT NULL,\n value NUMERIC NOT NULL,\n FOREIGN KEY(_location_id) REFERENCES location(_location_id)\n );\n\n CREATE TABLE weight(\n weight_id INTEGER PRIMARY KEY,\n name TEXT NOT NULL,\n type_info TEXT_ATTRIBUTES NOT NULL,\n description TEXT,\n is_complete INTEGER CHECK (is_complete IN (0, 1)),\n UNIQUE (name)\n );\n\n CREATE TABLE element_weight(\n element_weight_id INTEGER PRIMARY KEY,\n weight_id INTEGER,\n element_id INTEGER,\n value REAL NOT NULL,\n FOREIGN KEY(weight_id) REFERENCES weight(weight_id) ON DELETE CASCADE,\n FOREIGN KEY(element_id) REFERENCES element(element_id) DEFERRABLE INITIALLY DEFERRED,\n UNIQUE (element_id, weight_id)\n );\n\n CREATE TABLE property(\n key TEXT PRIMARY KEY NOT NULL,\n value TEXT_JSON\n );\n\n INSERT INTO property VALUES ('schema_version', '1');\n\"\"\"\n\n\ndef _is_wellformed_json(x):\n \"\"\"Return 1 if *x* is well-formed JSON or return 0 if *x* is not\n well-formed. This function should be registered with SQLite (via\n the create_function() method) when the JSON1 extension is not\n available.\n\n This function mimics the JSON1 json_valid() function, see:\n https://www.sqlite.org/json1.html#jvalid\n \"\"\"\n try:\n _loads(x)\n except (ValueError, TypeError):\n return 0\n return 1\n\n\ndef _make_trigger_for_json(insert_or_update, table, column):\n \"\"\"Return a SQL statement for creating a temporary trigger. The\n trigger is used to validate the contents of TEXT_JSON type columns.\n The trigger will pass without error if the JSON is wellformed.\n \"\"\"\n if insert_or_update.upper() not in {'INSERT', 'UPDATE'}:\n msg = f\"expected 'INSERT' or 'UPDATE', got {insert_or_update!r}\"\n raise ValueError(msg)\n\n if SQLITE_JSON1_ENABLED:\n when_clause = f\"\"\"\n NEW.{column} IS NOT NULL\n AND json_valid(NEW.{column}) = 0\n \"\"\".rstrip()\n else:\n when_clause = f\"\"\"\n NEW.{column} IS NOT NULL\n AND is_wellformed_json(NEW.{column}) = 0\n \"\"\".rstrip()\n\n return f'''\n CREATE TEMPORARY TRIGGER IF NOT EXISTS trigger_check_{insert_or_update.lower()}_{table}_{column}\n BEFORE {insert_or_update.upper()} ON main.{table} FOR EACH ROW\n WHEN{when_clause}\n BEGIN\n SELECT RAISE(ABORT, '{table}.{column} must be wellformed JSON');\n END;\n '''\n\n\ndef _is_wellformed_user_properties(x):\n \"\"\"Check if *x* is a wellformed TEXT_USERPROPERTIES value.\n A wellformed TEXT_USERPROPERTIES value is a string containing\n a JSON formatted object. Returns 1 if *x* is valid or 0 if\n it's not.\n\n This function should be registered as an application-defined\n SQL function and used in queries when SQLite's JSON1 extension\n is not enabled.\n \"\"\"\n try:\n obj = _loads(x)\n except (ValueError, TypeError):\n return 0\n\n if isinstance(obj, dict):\n return 1\n return 0\n\n\ndef _make_trigger_for_user_properties(insert_or_update, table, column):\n \"\"\"Return a CREATE TRIGGER statement to check TEXT_USERPROPERTIES\n values. This trigger is used to check values before they are saved\n in the database.\n\n A wellformed TEXT_USERPROPERTIES value is a string containing\n a JSON formatted object.\n\n The trigger will pass without error if the value is wellformed.\n \"\"\"\n if SQLITE_JSON1_ENABLED:\n user_properties_check = f\"(json_valid(NEW.{column}) = 0 OR json_type(NEW.{column}) != 'object')\"\n else:\n user_properties_check = f'is_wellformed_user_properties(NEW.{column}) = 0'\n\n return f'''\n CREATE TEMPORARY TRIGGER IF NOT EXISTS trigger_check_{insert_or_update.lower()}_{table}_{column}\n BEFORE {insert_or_update.upper()} ON main.{table} FOR EACH ROW\n WHEN\n NEW.{column} IS NOT NULL\n AND {user_properties_check}\n BEGIN\n SELECT RAISE(ABORT, '{table}.{column} must be wellformed JSON object');\n END;\n '''\n\n\ndef _is_wellformed_attributes(x):\n \"\"\"Returns 1 if *x* is a wellformed TEXT_ATTRIBUTES column\n value else returns 0. TEXT_ATTRIBUTES should be flat, JSON\n object strings. This function should be registered with SQLite\n (via the create_function() method) when the JSON1 extension\n is not available.\n \"\"\"\n try:\n obj = _loads(x)\n except (ValueError, TypeError):\n return 0\n\n if not isinstance(obj, dict):\n return 0\n\n for value in obj.values():\n if not isinstance(value, str):\n return 0\n\n return 1\n\n\ndef _make_trigger_for_attributes(insert_or_update, table, column):\n \"\"\"Return a SQL statement for creating a temporary trigger. The\n trigger is used to validate the contents of TEXT_ATTRIBUTES\n type columns.\n\n The trigger will pass without error if the JSON is a wellformed\n \"object\" containing \"text\" values.\n\n The trigger will raise an error if the value is:\n\n * not wellformed JSON\n * not an \"object\" type\n * an \"object\" type that contains one or more \"integer\", \"real\",\n \"true\", \"false\", \"null\", \"object\" or \"array\" types\n \"\"\"\n if insert_or_update.upper() not in {'INSERT', 'UPDATE'}:\n msg = f\"expected 'INSERT' or 'UPDATE', got {insert_or_update!r}\"\n raise ValueError(msg)\n\n if SQLITE_JSON1_ENABLED:\n when_clause = f\"\"\"\n NEW.{column} IS NOT NULL\n AND (json_valid(NEW.{column}) = 0\n OR json_type(NEW.{column}) != 'object'\n OR (SELECT COUNT(*)\n FROM json_each(NEW.{column})\n WHERE json_each.type != 'text') != 0)\n \"\"\".rstrip()\n else:\n when_clause = f\"\"\"\n NEW.{column} IS NOT NULL\n AND is_wellformed_attributes(NEW.{column}) = 0\n \"\"\".rstrip()\n\n return f'''\n CREATE TEMPORARY TRIGGER IF NOT EXISTS trigger_check_{insert_or_update.lower()}_{table}_{column}\n BEFORE {insert_or_update.upper()} ON main.{table} FOR EACH ROW\n WHEN{when_clause}\n BEGIN\n SELECT RAISE(ABORT, '{table}.{column} must be a JSON object with text values');\n END;\n '''\n\n\ndef _add_functions_and_triggers(connection):\n \"\"\"Create triggers and application-defined functions *connection*.\n\n Note: This function must not be executed on an empty connection.\n The table schema must exist before triggers can be created.\n \"\"\"\n if not SQLITE_JSON1_ENABLED:\n try:\n connection.create_function(\n 'is_wellformed_json', 1, _is_wellformed_json, deterministic=True)\n connection.create_function(\n 'is_wellformed_user_properties', 1, _is_wellformed_user_properties, deterministic=True)\n connection.create_function(\n 'is_wellformed_attributes', 1, _is_wellformed_attributes, deterministic=True)\n except TypeError:\n connection.create_function('is_wellformed_json', 1, _is_wellformed_json)\n connection.create_function('is_wellformed_user_properties', 1, _is_wellformed_user_properties)\n connection.create_function('is_wellformed_attributes', 1, _is_wellformed_attributes)\n\n connection.execute(_make_trigger_for_json('INSERT', 'property', 'value'))\n connection.execute(_make_trigger_for_json('UPDATE', 'property', 'value'))\n\n connection.execute(_make_trigger_for_user_properties('INSERT', 'edge', 'user_properties'))\n connection.execute(_make_trigger_for_user_properties('UPDATE', 'edge', 'user_properties'))\n\n jsonflatobj_columns = [\n ('edge', 'type_info'),\n ('quantity', 'attributes'),\n ('weight', 'type_info'),\n ]\n for table, column in jsonflatobj_columns:\n connection.execute(_make_trigger_for_attributes('INSERT', table, column))\n connection.execute(_make_trigger_for_attributes('UPDATE', table, column))\n\n\ndef _path_to_sqlite_uri(path):\n \"\"\"Convert a path into a SQLite compatible URI.\n\n Unlike pathlib's URI handling, SQLite accepts relative URI paths.\n For details, see:\n\n https://www.sqlite.org/uri.html#the_uri_path\n \"\"\"\n if os.name == 'nt': # Windows\n if re.match(r'^[a-zA-Z]:', path):\n path = os.path.abspath(path) # If drive-letter, must be absolute.\n drive_prefix = f'/{path[:2]}' # Must not url-quote colon after drive-letter.\n path = path[2:]\n else:\n drive_prefix = ''\n path = path.replace('\\\\', '/')\n path = urllib_parse_quote(path)\n path = f'{drive_prefix}{path}'\n else:\n path = urllib_parse_quote(path)\n\n path = re.sub('/+', '/', path)\n return f'file:{path}'\n\n\ndef connect(path, mode='rwc'):\n \"\"\"Returns a sqlite3 connection to a Toron node file.\"\"\"\n uri_path = _path_to_sqlite_uri(path)\n uri_path = f'{uri_path}?mode={mode}'\n\n try:\n get_connection = lambda: sqlite3.connect(\n database=uri_path,\n detect_types=sqlite3.PARSE_DECLTYPES,\n isolation_level=None,\n uri=True,\n )\n if os.path.exists(path):\n con = get_connection()\n else:\n con = get_connection()\n con.executescript(_schema_script) # Create database schema.\n except sqlite3.OperationalError as err:\n msg = str(err).replace('database file', f'node file {path!r}')\n raise ToronError(msg)\n\n try:\n _add_functions_and_triggers(con)\n except (sqlite3.OperationalError, sqlite3.DatabaseError):\n # Raises OperationalError when *path* is a database with an unknown\n # schema and DatabaseError when *path* is a file but not a database.\n con.close()\n raise ToronError(f'Path is not a Toron node: {path!r}')\n\n cur = con.execute(\"SELECT value FROM property WHERE key='schema_version'\")\n schema_version, *_ = cur.fetchone() or (None,)\n cur.close()\n\n if schema_version != 1: # When schema version is unsupported.\n msg = f'Unsupported Toron node format: schema version {schema_version!r}'\n raise ToronError(msg)\n\n return con\n\n\n_SAVEPOINT_NAME_GENERATOR = (f'svpnt{n}' for n in itertools.count())\n\n\nclass savepoint(object):\n \"\"\"Context manager to wrap a block of code inside a SAVEPOINT.\n If the block exists without errors, the SAVEPOINT is released\n and the changes are committed. If an error occurs, all of the\n changes are rolled back:\n\n cur = con.cursor()\n with savepoint(cur):\n cur.execute(...)\n \"\"\"\n def __init__(self, cursor):\n if cursor.connection.isolation_level is not None:\n isolation_level = cursor.connection.isolation_level\n msg = (\n f'isolation_level must be None, got: {isolation_level!r}\\n'\n '\\n'\n 'For explicit transaction handling, the connection must '\n 'be operating in \"autocommit\" mode. Turn on autocommit '\n 'mode by setting \"con.isolation_level = None\".'\n )\n raise sqlite3.OperationalError(msg)\n\n self.name = next(_SAVEPOINT_NAME_GENERATOR)\n self.cursor = cursor\n\n def __enter__(self):\n self.cursor.execute(f'SAVEPOINT {self.name}')\n\n def __exit__(self, exc_type, exc_val, exc_tb):\n if exc_type is None:\n self.cursor.execute(f'RELEASE {self.name}')\n else:\n self.cursor.execute(f'ROLLBACK TO {self.name}')\n\n\n@contextmanager\ndef transaction(path_or_connection, mode=None):\n \"\"\"A context manager that yields a cursor that runs in an\n isolated transaction. If the context manager exits without\n errors, the transaction is committed. If an exception is\n raised, all changes are rolled-back.\n \"\"\"\n if isinstance(path_or_connection, sqlite3.Connection):\n connection = path_or_connection\n connection_close = lambda: None # Don't close already-existing cursor.\n else:\n connection = connect(path_or_connection, mode=mode)\n connection_close = connection.close\n\n cursor = connection.cursor()\n try:\n with savepoint(cursor):\n yield cursor\n finally:\n cursor.close()\n connection_close()\n\n"},"path":{"kind":"string","value":"toron/_node_schema.py"},"size":{"kind":"number","value":18111,"string":"18,111"},"nl_text":{"kind":"string","value":"Context manager to wrap a block of code inside a SAVEPOINT.\nIf the block exists without errors, the SAVEPOINT is released\nand the changes are committed. If an error occurs, all of the\nchanges are rolled back:\n\n cur = con.cursor()\n with savepoint(cur):\n cur.execute(...)\nCreate triggers and application-defined functions *connection*.\n\nNote: This function must not be executed on an empty connection.\nThe table schema must exist before triggers can be created.\nCheck if SQLite implementation includes JSON1 extension.\nReturns 1 if *x* is a wellformed TEXT_ATTRIBUTES column\nvalue else returns 0. TEXT_ATTRIBUTES should be flat, JSON\nobject strings. This function should be registered with SQLite\n(via the create_function() method) when the JSON1 extension\nis not available.\nReturn 1 if *x* is well-formed JSON or return 0 if *x* is not\nwell-formed. This function should be registered with SQLite (via\nthe create_function() method) when the JSON1 extension is not\navailable.\n\nThis function mimics the JSON1 json_valid() function, see:\n https://www.sqlite.org/json1.html#jvalid\nCheck if *x* is a wellformed TEXT_USERPROPERTIES value.\nA wellformed TEXT_USERPROPERTIES value is a string containing\na JSON formatted object. Returns 1 if *x* is valid or 0 if\nit's not.\n\nThis function should be registered as an application-defined\nSQL function and used in queries when SQLite's JSON1 extension\nis not enabled.\nReturn a SQL statement for creating a temporary trigger. The\ntrigger is used to validate the contents of TEXT_ATTRIBUTES\ntype columns.\n\nThe trigger will pass without error if the JSON is a wellformed\n\"object\" containing \"text\" values.\n\nThe trigger will raise an error if the value is:\n\n * not wellformed JSON\n * not an \"object\" type\n * an \"object\" type that contains one or more \"integer\", \"real\",\n \"true\", \"false\", \"null\", \"object\" or \"array\" types\nReturn a SQL statement for creating a temporary trigger. The\ntrigger is used to validate the contents of TEXT_JSON type columns.\nThe trigger will pass without error if the JSON is wellformed.\nReturn a CREATE TRIGGER statement to check TEXT_USERPROPERTIES\nvalues. This trigger is used to check values before they are saved\nin the database.\n\nA wellformed TEXT_USERPROPERTIES value is a string containing\na JSON formatted object.\n\nThe trigger will pass without error if the value is wellformed.\nConvert a path into a SQLite compatible URI.\n\nUnlike pathlib's URI handling, SQLite accepts relative URI paths.\nFor details, see:\n\n https://www.sqlite.org/uri.html#the_uri_path\nReturns a sqlite3 connection to a Toron node file.\nA context manager that yields a cursor that runs in an\nisolated transaction. If the context manager exits without\nerrors, the transaction is committed. If an exception is\nraised, all changes are rolled-back.\nDatabase schema functions and information for Toron node files.\n\nToron nodes are stored as individual files. The file format is\nmanaged, internally, as a relational database. The schema for this\ndatabase is shown below as a simplified ERD (entity relationship\ndiagram). SQL foreign key relationships are represented with hyphen\nand pipe characters ('-' and '|'). Other, more complex relationships\nare represented with bullet points ('•') and these are enforced at\nthe application layer:\n\n +------------------+\n +---------------------+ | relation |\n | edge | +------------------+\n +---------------------+ | relation_id | •••• \n | edge_id |------->| edge_id | •\n | name | ••••••| other_element_id |<•••••\n | type_info | • •••| element_id |<-+ +--------------+\n | description | • • | proportion | | | quantity |\n | user_properties | • • | mapping_level | | +--------------+\n | other_uuid | • • +------------------+ | | quantity_id |\n | other_filename_hint | • • | +->| _location_id |\n | other_element_hash |<•• • | | | attributes |\n | is_complete |<••••• +-----------------+ | | value |\n +---------------------+ | | +--------------+\n | |\n +------------+ | +--------------+ | +---------------+\n | element | | | location | | | structure |\n +------------+ | +--------------+ | +---------------+\n +------| element_id |--+ | _location_id |--+ | _structure_id |\n | | label_a |••••>| label_a |<••••| label_a |\n | | label_b |••••>| label_b |<••••| label_b |\n | | label_c |••••>| label_c |<••••| label_c |\n | | ... |••••>| ... |<••••| ... |\n | +------------+ +--------------+ +---------------+\n |\n | +-------------------+ +----------+\n | | element_weight | +-------------+ | property |\n | +-------------------+ | weight | +----------+\n | | element_weight_id | +-------------+ | key |\n | | weight_id |<----| weight_id | | value |\n +->| element_id |••• | name | +----------+\n | value | • | type_info |\n +-------------------+ • | description |\n ••>| is_complete |\n +-------------+\n\n The inclusion of JSON functions is optional when compiling SQLite. In versions 3.38.0 and newer, JSON functions are included by default but can be disabled (opt-out policy). For older versions of SQLite, JSON functions are available on an opt-in basis. It is necessary to test for their presence rathern than referencing the SQLite version number. For more information, see: https://www.sqlite.org/json1.htmlcompiling_in_json_support Windows If drive-letter, must be absolute. Must not url-quote colon after drive-letter. Create database schema. Raises OperationalError when *path* is a database with an unknown schema and DatabaseError when *path* is a file but not a database. When schema version is unsupported. Don't close already-existing cursor."},"nl_size":{"kind":"number","value":6512,"string":"6,512"},"nl_language":{"kind":"string","value":"en"},"nl_language_score":{"kind":"number","value":0.679119348526001,"string":"0.679119"}}},{"rowIdx":7835,"cells":{"content":{"kind":"string","value":"#!/usr/bin/env python2\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Jul 5 09:10:56 2018\n\n@author: gtucker\n\"\"\"\n\nimport numpy as np\nimport datetime\nfrom grainhill import GrainFacetSimulator\nfrom grainhill import SlopeMeasurer\nimport landlab\nfrom landlab.io.native_landlab import save_grid\nimport os\n\n\ndef create_folder(directory):\n try:\n if not os.path.exists(directory):\n os.makedirs(directory)\n except OSError:\n print('Error: Creating directory ' + directory)\n\n\nparams = {\n 'grid_size' : (111, 81),\n 'report_interval' : 5.0, \n 'output_interval' : 1.0e99, \n 'disturbance_rate' : 1.0e-4,\n 'weathering_rate' : 0.0,\n 'dissolution_rate': 0.0,\n 'friction_coef' : 1.0,\n 'fault_x' : -0.01, \n 'cell_width' : 0.5, \n 'grav_accel' : 9.8,\n }\n\n\n# Open a file to record output:\nd = datetime.datetime.today()\ntoday_str = str(d.year) + str(d.month).zfill(2) + str(d.day).zfill(2)\nresults_file = open('results_v_vs_w' + today_str + '.csv', 'w')\nresults_file.write('Landlab version,' + landlab.__version__ + ',\\n')\n\n\n# Print header in file\nresults_file.write('Uplift interval (yr),Weathering rate '\n + 'parameter (1/yr),Gradient (m/m),'\n + 'Slope angle (deg)\\n')\n\n\n# Sweep through a range of dissolution rate parameters\nfor uplift_interval_exp in np.arange(2, 5.2, 0.2):\n for weath_exp in np.arange(-5, -1.8, 0.2):\n\n weath_rate = 10.0**weath_exp\n uplift_interval = 10.0**uplift_interval_exp\n params['uplift_interval'] = uplift_interval\n params['weathering_rate'] = weath_rate\n\n # Set run duration long enough for uplift of 150 rows\n params['run_duration'] = 100 * uplift_interval\n params['plot_interval'] = 10 * uplift_interval\n\n print('Uplift interval: ' + str(params['uplift_interval']) + ' 1/y')\n print('Weathering rate: ' + str(params['weathering_rate']) + ' 1/y')\n\n opname = ('tau' + str(int(round(10 * uplift_interval_exp))) + 'w' + str(int(round(10 * weath_exp))))\n create_folder(opname)\n params['plot_file_name'] = opname + '/' + opname\n\n gfs = GrainFacetSimulator(**params)\n gfs.run()\n\n sm = SlopeMeasurer(gfs)\n sm.pick_rock_surface()\n (m, b) = sm.fit_straight_line_to_surface()\n angle = np.degrees(np.arctan(m))\n\n results_file.write(str(uplift_interval) + ',' + str(weath_rate) + ','\n + str(m) + ',' + str(angle) + '\\n')\n results_file.flush()\n\n save_grid(gfs.grid, opname + '/' + opname + '.grid', clobber=True)\n\nresults_file.close()\n"},"path":{"kind":"string","value":"ModelRunScripts/SensitivityAnalysisDandV/run_v_w.py"},"size":{"kind":"number","value":2601,"string":"2,601"},"nl_text":{"kind":"string","value":"Created on Thu Jul 5 09:10:56 2018\n\n@author: gtucker\n\n!/usr/bin/env python2 -*- coding: utf-8 -*- Open a file to record output: Print header in file Sweep through a range of dissolution rate parameters Set run duration long enough for uplift of 150 rows"},"nl_size":{"kind":"number","value":254,"string":"254"},"nl_language":{"kind":"string","value":"en"},"nl_language_score":{"kind":"number","value":0.7189329862594604,"string":"0.718933"}}},{"rowIdx":7836,"cells":{"content":{"kind":"string","value":"from Jumpscale import j\nimport re\n\n# ACTIONS\n## R = Regex Replace\n## RI = Regex Replace case insensitive\n\nDO = \"\"\"\nRI| j.application.JSBase$ | j.baseclasses.object\nRI| j.data.cache. | j.core.cache.\nRI| j.data.text. | j.core.text.\nRI| from jumpscale import j | from Jumpscale import j \nRI| j.application.jsbase_get_class() | j.baseclasses.object\nRI| .base_class_config | .JSBaseClassConfig\nRI| .base_class_configs | .JSBaseClassConfigs\nRI| j.logging. | j.logger.\nRI | Jumpscale.logging. | Jumpscale.core.logging.\nRI| self._location | self.__jslocation__\nRI| j.data.serializer. | j.data.serializers.\nRI| self.prefab.core.file_write | j.sal.fs.writeFile\nRI| self.prefab.core.run | j.sal.process.execute\nRI| self.prefab.core.createDir | j.sal.fs.createDir\nRI| self.prefab.core.file_download | self.prefab.core.file_download\nRI| self.prefab.system.package.install | j.builders.system.package.ensure\n\n\"\"\"\n\nERRORS = \"\"\"\nconfigmanager._base_class_config\n\"\"\"\n\nJSBASE = j.baseclasses.object\n\n\nclass FixerReplacer(j.baseclasses.object):\n def __init__(self):\n JSBASE.__init__(self)\n self.rules = []\n\n for rule in DO.split(\"\\n\"):\n if rule.strip() == \"\":\n continue\n if rule.strip().startswith(\"#\"):\n continue\n cmd, from_, to_ = rule.split(\"|\")\n if cmd.lower().strip() == \"ri\":\n self.rules.append(ReplaceIgnoreCase(from_, to_))\n elif cmd.lower().strip() == \"r\":\n self.rules.append(ReplaceNormal(from_, to_))\n else:\n raise j.exceptions.Base(\"unknown rule:%s\" % rule)\n\n def line_process(self, line):\n changed = False\n # if \"\\t\" in line:\n # line = line.replace(\"\\t\",\" \")\n # changed = True\n for rule in self.rules:\n line1 = rule.replace(line)\n if line1 != line:\n changed = True\n line = line1\n return changed, line\n\n def file_process(self, path, write=False, root=\"\"):\n out = \"\"\n nr = 0\n\n for line in j.sal.fs.readFile(path).split(\"\\n\"):\n nr += 1\n changed, line2 = self.line_process(line)\n if changed:\n path2 = j.sal.fs.pathRemoveDirPart(path, root)\n if path2 not in self.changes:\n self.changes[path2] = {}\n changes = self.changes[path2]\n changes[\"line\"] = nr\n changes[\"from\"] = line\n changes[\"to..\"] = line2\n out += \"%s\\n\" % line2\n else:\n out += \"%s\\n\" % line\n if len(self.changes) > 0 and write:\n j.sal.fs.writeFile(path, out)\n\n def dir_process(self, path, extensions=[\"py\", \"txt\", \"md\"], recursive=True, write=False):\n path = j.sal.fs.pathNormalize(path)\n self.changes = {}\n for ext in extensions:\n for p in j.sal.fs.listFilesInDir(path, recursive=recursive, filter=\"*.%s\" % ext, followSymlinks=False):\n self._log_debug(\"process file:%s\" % p)\n self.file_process(root=path, path=p, write=write)\n print(j.data.serializers.yaml.dumps(self.changes))\n\n\nclass ReplaceIgnoreCase:\n def __init__(self, from_, to_, prepend=\"\", append=\"\"):\n self.from_ = from_.strip()\n self.to_ = to_.strip()\n self.regex = re.compile(re.escape(prepend + self.from_ + append), re.IGNORECASE | re.VERBOSE)\n\n def replace(self, txt):\n m = self.regex.search(txt)\n\n if m:\n found = m.string[m.start() : m.end()]\n txt2 = txt.replace(found, self.to_)\n return txt2\n else:\n return txt\n\n\nclass ReplaceNormal(ReplaceIgnoreCase):\n def __init__(self, from_, to_, prepend=\"\", append=\"\"):\n ReplaceIgnoreCase.__init__(self, from_, to_, re.VERBOSE)\n self.regex = re.compile(re.escape(prepend + self.from_ + append))\n"},"path":{"kind":"string","value":"sandbox/lib/jumpscale/JumpscaleLibs/tools/fixer/FixerReplace.py"},"size":{"kind":"number","value":3927,"string":"3,927"},"nl_text":{"kind":"string","value":"ACTIONS R = Regex Replace RI = Regex Replace case insensitive if \"\\t\" in line: line = line.replace(\"\\t\",\" \") changed = True"},"nl_size":{"kind":"number","value":134,"string":"134"},"nl_language":{"kind":"string","value":"en"},"nl_language_score":{"kind":"number","value":0.6762816309928894,"string":"0.676282"}}},{"rowIdx":7837,"cells":{"content":{"kind":"string","value":"\"\"\"Client for Triton Inference Server using REST API.\n\nReferences:\n-\nhttps://github.com/kubeflow/kfserving/blob/master/docs/predict-api/v2/required_api.md#httprest\n-\nhttps://github.com/triton-inference-server/client/tree/master/src/python/examples\n-\nhttps://github.com/triton-inference-server/client/blob/master/src/python/library/tritonclient/http/__init__.py\n\"\"\"\n\nimport json\nimport time\nimport threading\nimport distribution\nimport clients.base_rest_client\nimport clients.utils\nimport tensorflow.compat.v1 as tf\nimport requests as r\nimport numpy as np\nimport tritonclient.http as triton_httpclient\nimport tritonclient.utils as triton_utils\nfrom tensorflow.python.framework import dtypes\n\n\nclass TritonRest(clients.base_rest_client.BaseRestClient):\n\n def generate_rest_request_from_dictionary(self, row_dict):\n triton_request_inputs = []\n for key, value in row_dict.items():\n t = clients.utils.get_type(value, self._default_float_type,\n self._default_int_type)\n if t == np.object_:\n value = clients.utils.map_multi_dimensional_list(\n value, lambda s: s.encode(\"utf-8\"))\n numpy_value = np.array(value, dtype=t)\n triton_request_input = triton_httpclient.InferInput(\n key, list(numpy_value.shape), triton_utils.np_to_triton_dtype(t))\n triton_request_input.set_data_from_numpy(\n numpy_value, binary_data=True) # binary_data=True by default\n triton_request_inputs.append(triton_request_input)\n # https://github.com/triton-inference-server/client/blob/530bcac5f1574aa2222930076200544eb274245c/src/python/library/tritonclient/http/__init__.py#L81\n # Returns tuple - request and request len to pass in Infer-Header-Content-Length header\n (request, json_size) = triton_httpclient._get_inference_request(\n inputs=triton_request_inputs,\n request_id=\"\",\n outputs=None,\n sequence_id=0,\n sequence_start=0,\n sequence_end=0,\n priority=0,\n timeout=None)\n\n headers = {}\n if json_size:\n headers[\"Inference-Header-Content-Length\"] = str(json_size)\n return (request, headers)\n\n def get_requests_from_dictionary(self, path):\n rows = []\n with tf.gfile.GFile(path, \"r\") as f:\n for line in f:\n row_dict = eval(line)\n rows.append(self.generate_rest_request_from_dictionary(row_dict))\n return rows\n\n def get_requests_from_tfrecord(self, path, count, batch_size):\n raise NotImplementedError()\n\n def get_requests_from_file(self, path):\n raise NotImplementedError()\n\n def get_uri(self):\n if self._host.startswith(\"http\"):\n return self._host\n else:\n # https://github.com/kubeflow/kfserving/blob/master/docs/predict-api/v2/required_api.md#httprest\n if self._model_version:\n return f\"http://{self._host}:{self._port}/v2/models/{self._model_name}/versions/{self._model_version}/infer\"\n else:\n return f\"http://{self._host}:{self._port}/v2/models/{self._model_name}/infer\"\n"},"path":{"kind":"string","value":"clients/triton_rest.py"},"size":{"kind":"number","value":3001,"string":"3,001"},"nl_text":{"kind":"string","value":"Client for Triton Inference Server using REST API.\n\nReferences:\n-\nhttps://github.com/kubeflow/kfserving/blob/master/docs/predict-api/v2/required_api.md#httprest\n-\nhttps://github.com/triton-inference-server/client/tree/master/src/python/examples\n-\nhttps://github.com/triton-inference-server/client/blob/master/src/python/library/tritonclient/http/__init__.py\n\n binary_data=True by default https://github.com/triton-inference-server/client/blob/530bcac5f1574aa2222930076200544eb274245c/src/python/library/tritonclient/http/__init__.pyL81 Returns tuple - request and request len to pass in Infer-Header-Content-Length header https://github.com/kubeflow/kfserving/blob/master/docs/predict-api/v2/required_api.mdhttprest"},"nl_size":{"kind":"number","value":715,"string":"715"},"nl_language":{"kind":"string","value":"en"},"nl_language_score":{"kind":"number","value":0.5379958748817444,"string":"0.537996"}}},{"rowIdx":7838,"cells":{"content":{"kind":"string","value":"# encoding: utf-8\n\"\"\"\nThis module defines the things that are used in setup.py for building JupyterLab\nThis includes:\n * Functions for finding things like packages, package data, etc.\n * A function for checking dependencies.\n\"\"\"\n\n# Copyright (c) Jupyter Development Team.\n# Distributed under the terms of the Modified BSD License.\n\nimport io\nimport json\nimport os\nimport pipes\nimport sys\nimport shutil\nimport tempfile\nimport os.path as osp\nfrom os.path import join as pjoin\n\nfrom distutils import log\nfrom distutils.cmd import Command\nfrom distutils.version import LooseVersion\nfrom setuptools.command.egg_info import egg_info\nfrom setuptools.command.bdist_egg import bdist_egg\nfrom subprocess import check_call\n\n\nif sys.platform == 'win32':\n from subprocess import list2cmdline\nelse:\n def list2cmdline(cmd_list):\n return ' '.join(map(pipes.quote, cmd_list))\n\n# the name of the project\nname = 'jupyterlab'\n\n\nhere = osp.dirname(osp.abspath(__file__))\nis_repo = osp.exists(pjoin(here, '.git'))\n\nversion_ns = {}\nwith io.open(pjoin(here, name, '_version.py'), encoding=\"utf8\") as f:\n exec(f.read(), {}, version_ns)\n\n\ndef run(cmd, *args, **kwargs):\n \"\"\"Echo a command before running it\"\"\"\n log.info('> ' + list2cmdline(cmd))\n kwargs['shell'] = (sys.platform == 'win32')\n return check_call(cmd, *args, **kwargs)\n\n\n#---------------------------------------------------------------------------\n# Find packages\n#---------------------------------------------------------------------------\n\ndef find_packages():\n \"\"\"\n Find all of the packages.\n \"\"\"\n packages = []\n for dir, subdirs, files in os.walk('jupyterlab'):\n if 'node_modules' in subdirs:\n subdirs.remove('node_modules')\n package = dir.replace(osp.sep, '.')\n if '__init__.py' not in files:\n # not a package\n continue\n packages.append(package)\n return packages\n\n\n#---------------------------------------------------------------------------\n# Find package data\n#---------------------------------------------------------------------------\n\ndef find_package_data():\n \"\"\"\n Find package_data.\n \"\"\"\n theme_dirs = []\n for dir, subdirs, files in os.walk(pjoin('jupyterlab', 'themes')):\n slice_len = len('jupyterlab' + os.sep)\n theme_dirs.append(pjoin(dir[slice_len:], '*'))\n\n schema_dirs = []\n for dir, subdirs, files in os.walk(pjoin('jupyterlab', 'schemas')):\n slice_len = len('jupyterlab' + os.sep)\n schema_dirs.append(pjoin(dir[slice_len:], '*'))\n\n return {\n 'jupyterlab': ['build/*', '*.js', 'package.app.json',\n 'yarn.lock', 'yarn.app.lock', '.yarnrc'\n ] + theme_dirs + schema_dirs\n }\n\n\ndef find_data_files():\n \"\"\"\n Find data_files.\n \"\"\"\n if not os.path.exists(pjoin('jupyterlab', 'build')):\n return []\n\n files = []\n\n static_files = os.listdir(pjoin('jupyterlab', 'build'))\n files.append(('share/jupyter/lab/static',\n ['jupyterlab/build/%s' % f for f in static_files]))\n\n for dir, subdirs, fnames in os.walk(pjoin('jupyterlab', 'schemas')):\n dir = dir.replace(os.sep, '/')\n schema_files = []\n for fname in fnames:\n schema_files.append('%s/%s' % (dir, fname))\n slice_len = len('jupyterlab/')\n files.append(('share/jupyter/lab/%s' % dir[slice_len:], schema_files))\n\n for dir, subdirs, fnames in os.walk(pjoin('jupyterlab', 'themes')):\n dir = dir.replace(os.sep, '/')\n themes_files = []\n for fname in fnames:\n themes_files.append('%s/%s' % (dir, fname))\n slice_len = len('jupyterlab/')\n files.append(('share/jupyter/lab/%s' % dir[slice_len:], themes_files))\n\n return files\n\n\ndef js_prerelease(command, strict=False):\n \"\"\"decorator for building minified js/css prior to another command\"\"\"\n class DecoratedCommand(command):\n\n def run(self):\n jsdeps = self.distribution.get_command_obj('jsdeps')\n if not is_repo and all(osp.exists(t) for t in jsdeps.targets):\n # sdist, nothing to do\n command.run(self)\n return\n\n try:\n self.distribution.run_command('jsdeps')\n except Exception as e:\n missing = [t for t in jsdeps.targets if not osp.exists(t)]\n if strict or missing:\n log.warn('js check failed')\n if missing:\n log.error('missing files: %s' % missing)\n raise e\n else:\n log.warn('js check failed (not a problem)')\n log.warn(str(e))\n command.run(self)\n return DecoratedCommand\n\n\ndef update_package_data(distribution):\n \"\"\"update build_py options to get package_data changes\"\"\"\n build_py = distribution.get_command_obj('build_py')\n build_py.finalize_options()\n\n\nclass CheckAssets(Command):\n description = 'check for required assets'\n\n user_options = []\n\n # Representative files that should exist after a successful build\n targets = [\n pjoin(here, 'jupyterlab', 'build', 'release_data.json'),\n pjoin(here, 'jupyterlab', 'build', 'main.bundle.js'),\n pjoin(here, 'jupyterlab', 'schemas', '@jupyterlab',\n 'shortcuts-extension', 'plugin.json'),\n pjoin(here, 'jupyterlab', 'themes', '@jupyterlab',\n 'theme-light-extension',\n 'images', 'jupyterlab.svg')\n ]\n\n def initialize_options(self):\n pass\n\n def finalize_options(self):\n pass\n\n def run(self):\n for t in self.targets:\n if not osp.exists(t):\n msg = 'Missing file: %s' % t\n raise ValueError(msg)\n\n target = pjoin(here, 'jupyterlab', 'build', 'release_data.json')\n with open(target) as fid:\n data = json.load(fid)\n\n if (LooseVersion(data['version']) !=\n LooseVersion(version_ns['__version__'])):\n msg = 'Release assets version mismatch, please run npm publish'\n raise ValueError(msg)\n\n # update package data in case this created new files\n update_package_data(self.distribution)\n\n\nclass bdist_egg_disabled(bdist_egg):\n \"\"\"Disabled version of bdist_egg\n Prevents setup.py install performing setuptools' default easy_install,\n which it should never ever do.\n \"\"\"\n def run(self):\n sys.exit(\"Aborting implicit building of eggs. Use `pip install .` to install from source.\")\n\n\nclass custom_egg_info(egg_info):\n \"\"\"Prune JavaScript folders from egg_info to avoid locking up pip.\n \"\"\"\n\n def run(self):\n folders = ['examples', 'packages', 'test', 'node_modules']\n folders = [f for f in folders if os.path.exists(pjoin(here, f))]\n tempdir = tempfile.mkdtemp()\n for folder in folders:\n shutil.move(pjoin(here, folder), tempdir)\n value = egg_info.run(self)\n for folder in folders:\n shutil.move(pjoin(tempdir, folder), here)\n shutil.rmtree(tempdir)\n return value\n"},"path":{"kind":"string","value":"setupbase.py"},"size":{"kind":"number","value":7123,"string":"7,123"},"nl_text":{"kind":"string","value":"Disabled version of bdist_egg\nPrevents setup.py install performing setuptools' default easy_install,\nwhich it should never ever do.\nPrune JavaScript folders from egg_info to avoid locking up pip.\n \nFind data_files.\nFind package_data.\nFind all of the packages.\ndecorator for building minified js/css prior to another command\nEcho a command before running it\nupdate build_py options to get package_data changes\nThis module defines the things that are used in setup.py for building JupyterLab\nThis includes:\n * Functions for finding things like packages, package data, etc.\n * A function for checking dependencies.\n\n encoding: utf-8 Copyright (c) Jupyter Development Team. Distributed under the terms of the Modified BSD License. the name of the project--------------------------------------------------------------------------- Find packages--------------------------------------------------------------------------- not a package--------------------------------------------------------------------------- Find package data--------------------------------------------------------------------------- sdist, nothing to do Representative files that should exist after a successful build update package data in case this created new files"},"nl_size":{"kind":"number","value":1241,"string":"1,241"},"nl_language":{"kind":"string","value":"en"},"nl_language_score":{"kind":"number","value":0.683698832988739,"string":"0.683699"}}},{"rowIdx":7839,"cells":{"content":{"kind":"string","value":"# # SPDX-License-Identifier: MIT\n# from augur.augurplugin import AugurPlugin\n# from augur.application import Application\n\n# class HousekeeperPlugin(AugurPlugin):\n# \"\"\"\n# This plugin serves as an example as to how to load plugins into Augur\n# \"\"\"\n# def __init__(self, augur_app):\n# super().__init__(augur_app)\n# self.__housekeeper = self.__call__()\n\n# def __call__(self):\n# from .housekeeper import Housekeeper\n# return Housekeeper(\n# user=self._augur.read_config('Database', 'user', 'AUGUR_DB_USER', 'root'),\n# password=self._augur.read_config('Database', 'password', 'AUGUR_DB_PASS', 'password'),\n# host=self._augur.read_config('Database', 'host', 'AUGUR_DB_HOST', '127.0.0.1'),\n# port=self._augur.read_config('Database', 'port', 'AUGUR_DB_PORT', '3306'),\n# dbname=self._augur.read_config('Database', 'database', 'AUGUR_DB_NAME', 'msr14')\n# )\n\n\n# HousekeeperPlugin.augur_plugin_meta = {\n# 'name': 'housekeeper',\n# 'datasource': True\n# }\n# Application.register_plugin(HousekeeperPlugin)\n\n# __all__ = ['HousekeeperPlugin']"},"path":{"kind":"string","value":"augur/housekeeper/__init__.py"},"size":{"kind":"number","value":1173,"string":"1,173"},"nl_text":{"kind":"string","value":"SPDX-License-Identifier: MIT from augur.augurplugin import AugurPlugin from augur.application import Application class HousekeeperPlugin(AugurPlugin): \"\"\" This plugin serves as an example as to how to load plugins into Augur \"\"\" def __init__(self, augur_app): super().__init__(augur_app) self.__housekeeper = self.__call__() def __call__(self): from .housekeeper import Housekeeper return Housekeeper( user=self._augur.read_config('Database', 'user', 'AUGUR_DB_USER', 'root'), password=self._augur.read_config('Database', 'password', 'AUGUR_DB_PASS', 'password'), host=self._augur.read_config('Database', 'host', 'AUGUR_DB_HOST', '127.0.0.1'), port=self._augur.read_config('Database', 'port', 'AUGUR_DB_PORT', '3306'), dbname=self._augur.read_config('Database', 'database', 'AUGUR_DB_NAME', 'msr14') ) HousekeeperPlugin.augur_plugin_meta = { 'name': 'housekeeper', 'datasource': True } Application.register_plugin(HousekeeperPlugin) __all__ = ['HousekeeperPlugin']"},"nl_size":{"kind":"number","value":1116,"string":"1,116"},"nl_language":{"kind":"string","value":"en"},"nl_language_score":{"kind":"number","value":0.30077236890792847,"string":"0.300772"}}},{"rowIdx":7840,"cells":{"content":{"kind":"string","value":"\"\"\" Video Link: https://youtu.be/1s-Tj65AKZA \"\"\"\nfrom seleniumbase import __version__\nfrom seleniumbase import BaseCase\n\n\nclass HackTests(BaseCase):\n def test_all_your_base_are_belong_to_us(self):\n # First make sure that seleniumbase 1.65.0 or newer is installed\n version = __version__.split(\".\")\n if version[0] == \"1\" and int(version[1]) < 65:\n raise Exception(\n \"This test requires minimum seleniumbase version: 1.65.0\"\n )\n self.set_window_size(1220, 740)\n ayb = \"ALL YOUR BASE\"\n abtu = \"ARE BELONG TO US\"\n aybabtu = \"%s %s\" % (ayb, abtu)\n sb_banner_logo = \"//seleniumbase.io/cdn/img/sb_logo_10.png\"\n sb_dashboard_logo = \"//seleniumbase.io/img/dash_pie_3.png\"\n yt_chip = \"#chips yt-chip-cloud-chip-renderer:nth-of-type\"\n wiki = \"https://en.wikipedia.org/wiki/All_your_base_are_belong_to_us\"\n\n self.open(wiki)\n self.click_if_visible('button[aria-label=\"Close\"]')\n self.set_text_content(\"h1#firstHeading\", aybabtu)\n self.set_text_content(\"#ca-history a\", aybabtu)\n self.set_text_content('#n-mainpage-description a', \"ALL\")\n self.set_text_content('#n-contents a', \"YOUR\")\n self.set_text_content('#n-currentevents a', \"BASE\")\n self.set_text_content('#n-randompage a', \"ARE\")\n self.set_text_content('#n-aboutsite a', \"BELONG\")\n self.set_text_content('#n-contactpage a', \"TO\")\n self.set_text_content('#n-sitesupport a', \"US\")\n self.set_text_content('.tocsection-1 span.toctext', \"ALL\")\n self.set_text_content('.tocsection-2 span.toctext', \"YOUR\")\n self.set_text_content('.tocsection-3 span.toctext', \"BASE\")\n self.set_text_content('.tocsection-4 span.toctext', \"ARE\")\n self.set_text_content('.tocsection-5 span.toctext', \"BELONG\")\n self.set_text_content('.tocsection-6 span.toctext', \"TO\")\n self.set_text_content('.tocsection-7 span.toctext', \"US\")\n self.highlight(\"h1#firstHeading\", loops=2, scroll=False)\n self.highlight(\"#ca-history a\", loops=2, scroll=False)\n self.highlight(\"nav#p-navigation\", loops=2, scroll=False)\n self.highlight(\"div#toc\", loops=2, scroll=False)\n self.highlight('.tocsection-1 span.toctext', loops=1, scroll=False)\n self.highlight('.tocsection-2 span.toctext', loops=1, scroll=False)\n self.highlight('.tocsection-3 span.toctext', loops=2, scroll=False)\n self.highlight('.tocsection-4 span.toctext', loops=1, scroll=False)\n self.highlight('.tocsection-5 span.toctext', loops=1, scroll=False)\n self.highlight('.tocsection-6 span.toctext', loops=1, scroll=False)\n self.highlight('.tocsection-7 span.toctext', loops=2, scroll=False)\n zoom_in = 'div.thumbinner{zoom: 1.4;-moz-transform: scale(1.4);}'\n self.add_css_style(zoom_in)\n self.highlight(\"div.thumbinner\", loops=8, scroll=False)\n\n self.open(\"https://www.apple.com/store\")\n self.set_text_content(\"div.rs-shop-subheader\", aybabtu)\n self.set_text_content('#shelf-1 a[href*=\"mac\"]', \"ALL\")\n self.set_text_content('#shelf-1 a[href*=\"iphone\"]', \"YOUR\")\n self.set_text_content('#shelf-1 a[href*=\"ipad\"]', \"BASE\")\n self.set_text_content('#shelf-1 a[href*=\"watch\"]', \"ARE\")\n self.set_text_content('#shelf-1 a[href*=\"airpods\"]', \"BELONG\")\n self.set_text_content('#shelf-1 a[href*=\"airtag\"]', \"TO\")\n self.set_text_content('#shelf-1 a[href*=\"tv\"]', \"US\")\n self.set_text_content('#shelf-1 a[href*=\"homepod\"]', \".\")\n self.set_text_content(\"h2\", aybabtu + \". \")\n self.highlight(\"div.rs-shop-subheader\", loops=6, scroll=False)\n self.highlight(\"#shelf-1\", loops=2, scroll=False)\n self.highlight('#shelf-1 a[href*=\"mac\"]', loops=1, scroll=False)\n self.highlight('#shelf-1 a[href*=\"iphone\"]', loops=1, scroll=False)\n self.highlight('#shelf-1 a[href*=\"ipad\"]', loops=3, scroll=False)\n self.highlight('#shelf-1 a[href*=\"watch\"]', loops=1, scroll=False)\n self.highlight('#shelf-1 a[href*=\"airpods\"]', loops=1, scroll=False)\n self.highlight('#shelf-1 a[href*=\"airtag\"]', loops=1, scroll=False)\n self.highlight('#shelf-1 a[href*=\"tv\"]', loops=3, scroll=False)\n self.highlight(\"h2\", loops=9, scroll=False)\n\n self.open(\"https://google.com/ncr\")\n self.set_text_content('a[href*=\"about.google\"]', ayb)\n self.set_text_content('a[href*=\"store.google\"]', abtu)\n self.set_text_content('a[href*=\"mail.google.com\"]', ayb)\n self.set_text_content('a[href*=\"google.com/img\"]', abtu)\n self.set_attributes('[value=\"Google Search\"]', \"value\", ayb)\n self.set_attributes('[value=\"I\\'m Feeling Lucky\"]', \"value\", abtu)\n zoom_in = 'a{zoom: 1.2;-moz-transform: scale(1.2);}'\n self.add_css_style(zoom_in)\n zoom_in = (\n '[value=\"ALL YOUR BASE\"]{zoom: 1.3;-moz-transform: scale(1.3);}'\n '[value=\"ARE BELONG TO US\"]{zoom: 1.3;-moz-transform: scale(1.3);}'\n )\n self.add_css_style(zoom_in)\n self.highlight('a[href*=\"about.google\"]', loops=3)\n self.highlight('a[href*=\"store.google\"]', loops=3)\n self.highlight('a[href*=\"mail.google.com\"]', loops=3)\n self.highlight('a[href*=\"google.com/img\"]', loops=3)\n self.highlight('form[role=\"search\"]', loops=8)\n\n self.open(\"https://twitter.com/\")\n if not self.is_element_visible('a[href*=\"w/signup\"] span'):\n self.refresh()\n if self.is_element_visible('a[href*=\"w/signup\"] span'):\n self.set_text_content('a[href*=\"w/signup\"] span', aybabtu)\n self.highlight('a[href*=\"w/signup\"] span', loops=6, scroll=False)\n self.highlight('a[href*=\"w/signup\"]', loops=6, scroll=False)\n\n self.open(\"https://www.youtube.com/\")\n self.set_text_content('%s(1)' % yt_chip, \"ALL\")\n self.set_text_content('%s(2)' % yt_chip, \"YOUR\")\n self.set_text_content('%s(3)' % yt_chip, \"BASE\")\n self.set_text_content('%s(4)' % yt_chip, \"ARE\")\n self.set_text_content('%s(5)' % yt_chip, \"BELONG\")\n self.set_text_content('%s(6)' % yt_chip, \"TO\")\n self.set_text_content('%s(7)' % yt_chip, \"US\")\n self.set_text_content('%s(8)' % yt_chip, \"!\")\n self.set_text_content('%s(9)' % yt_chip, \"!\")\n self.set_text_content('%s(10)' % yt_chip, \"!\")\n self.click_if_visible(\"#dismiss-button\")\n self.click_if_visible('button[aria-label=\"Close\"]')\n self.highlight(\"#scroll-container\", loops=5, scroll=False)\n self.highlight('%s(1)' % yt_chip, loops=1, scroll=False)\n self.highlight('%s(2)' % yt_chip, loops=1, scroll=False)\n self.highlight('%s(3)' % yt_chip, loops=3, scroll=False)\n self.highlight('%s(4)' % yt_chip, loops=1, scroll=False)\n self.highlight('%s(5)' % yt_chip, loops=1, scroll=False)\n self.highlight('%s(6)' % yt_chip, loops=1, scroll=False)\n self.highlight('%s(7)' % yt_chip, loops=3, scroll=False)\n self.highlight(\"#scroll-container\", loops=7, scroll=False)\n\n self.open(\"https://github.com/features/actions\")\n self.set_text_content('a[href=\"/team\"]', ayb)\n self.set_text_content('a[href=\"/enterprise\"]', abtu)\n self.set_text_content('h1 span:nth-child(1)', ayb)\n self.set_text_content('h1 span:nth-of-type(2)', \"ARE\")\n self.set_text_content('h1 span:nth-of-type(3)', \"BELONG\")\n self.set_text_content('h1 span:nth-of-type(4)', \"TO\")\n self.set_text_content('h1 span:nth-of-type(5)', \"US\")\n self.type('input[name=\"q\"]', aybabtu.lower())\n self.click(\"h1\", scroll=False)\n self.highlight(\"nav\", loops=5, scroll=False)\n self.highlight('input[name=\"q\"]', loops=5, scroll=False)\n self.highlight(\"h1\", loops=8, scroll=False)\n\n self.open(\"https://dev.to/top/infinity\")\n self.click_if_visible('button[aria-label=\"Close campaign banner\"]')\n self.set_text_content('nav a[data-text=\"Relevant\"]', \"ALL\")\n self.set_text_content('nav a[data-text=\"Latest\"]', \"YOUR\")\n self.set_text_content('nav a[data-text=\"Top\"]', \"BASE\")\n self.set_text_content('nav a[data-text=\"Week\"]', \"ARE\")\n self.set_text_content('nav a[data-text=\"Month\"]', \"BELONG\")\n self.set_text_content('nav a[data-text=\"Year\"]', \"TO\")\n self.set_text_content('nav a[data-text=\"Infinity\"]', \"US\")\n self.set_text_content('aside a[class*=\"tful\"]', aybabtu)\n self.set_text_content('aside a[aria-label=\"Create new account\"]', ayb)\n self.set_text_content('aside a[aria-label=\"Log in\"]', abtu)\n self.set_text_content('aside a[class*=\"tful\"]:nth-child(2)', aybabtu)\n self.set_text_content('aside a[class*=\"tful\"]:nth-child(3)', aybabtu)\n self.set_text_content('aside a[class*=\"tful\"]:nth-child(4)', aybabtu)\n self.set_text_content('aside a[class*=\"tful\"]:nth-child(5)', aybabtu)\n self.set_attribute(\"a.crayons-avatar img\", \"src\", sb_dashboard_logo)\n self.set_text_content('.profile-preview-card button', \"SeleniumBase\")\n self.set_text_content('h2.crayons-story__title a', aybabtu)\n self.type('input[name=\"q\"]', aybabtu)\n self.highlight('input[name=\"q\"]', loops=4, scroll=False)\n self.highlight('[aria-label=\"Primary sidebar\"] div div', scroll=False)\n self.highlight('nav a[data-text=\"Relevant\"]', loops=1, scroll=False)\n self.highlight('nav a[data-text=\"Latest\"]', loops=1, scroll=False)\n self.highlight('nav a[data-text=\"Top\"]', loops=2, scroll=False)\n self.highlight('nav a[data-text=\"Week\"]', loops=1, scroll=False)\n self.highlight('nav a[data-text=\"Month\"]', loops=1, scroll=False)\n self.highlight('nav a[data-text=\"Year\"]', loops=1, scroll=False)\n self.highlight('nav a[data-text=\"Infinity\"]', loops=2, scroll=False)\n self.highlight('aside[id*=\"sidebar\"] section', loops=5, scroll=False)\n self.highlight(\"div.crayons-story__body\", loops=7, scroll=False)\n\n self.open(\"https://azure.microsoft.com/en-us/services/playfab/\")\n self.set_text_content(\"h1\", aybabtu)\n self.set_text_content('a[aria-label*=\"Try PlayF\"]', ayb)\n self.set_text_content('a[aria-label*=\"Sign in to\"]', abtu)\n self.set_text_content('span:contains(\"Chat with Sales\")', aybabtu)\n self.highlight(\"h1\", loops=6, scroll=False)\n self.highlight('a[aria-label*=\"Try PlayF\"]', loops=4, scroll=False)\n self.highlight('a[aria-label*=\"Sign in to\"]', loops=4, scroll=False)\n self.highlight('button#live-engage-btn', loops=6, scroll=False)\n\n self.open(\"https://www.snapchat.com/\")\n self.set_text_content(\"h1\", ayb)\n self.set_text_content(\"form .button-large span span\", abtu)\n zoom_in = 'a.button-large span{zoom: 1.2;-moz-transform: scale(1.2);}'\n self.add_css_style(zoom_in)\n self.highlight(\"h1\", loops=6, scroll=False)\n self.highlight(\"form .button-large span span\", loops=8, scroll=False)\n\n self.open(\"https://store.steampowered.com/\")\n self.set_text_content('div.content a[href*=\"/about/\"]', \" \")\n self.set_text_content('div.content a[href*=\"help.steam\"]', aybabtu)\n self.set_text_content(\"#foryou_tab a\", \"ALL\")\n self.set_text_content(\"#noteworthy_tab a\", \"YOUR BASE\")\n self.set_text_content(\"#genre_tab a\", \"ARE\")\n self.set_text_content('span:contains(\"Points Shop\")', \"BELONG\")\n self.set_text_content('span:contains(\"News\")', \"TO\")\n self.set_text_content('span:contains(\"Labs\")', \"US\")\n self.set_value(\"input#store_nav_search_term\", ayb + \" . . . .\")\n self.highlight('div.content a[href*=\"help.steam\"]', loops=6)\n self.highlight('#store_nav_area', loops=2, scroll=False)\n self.highlight(\"#foryou_tab a\", loops=1, scroll=False)\n self.highlight(\"#noteworthy_tab a\", loops=3, scroll=False)\n self.highlight(\"#genre_tab a\", loops=1, scroll=False)\n self.highlight('span:contains(\"BELONG\")', loops=1, scroll=False)\n self.highlight('span:contains(\"TO\")', loops=1, scroll=False)\n self.highlight('span:contains(\"US\")', loops=2, scroll=False)\n self.js_click('input[id*=\"nav_search\"]')\n self.highlight('input[id*=\"nav_search\"]', loops=6, scroll=False)\n\n self.open(\"https://xkcd.com/286/\")\n self.set_text_content('a[href=\"/archive\"]', \"ALL\")\n self.set_text_content('a[href*=\"what-if\"]', \"YOUR\")\n self.set_text_content('a[href*=\"//blag.\"]', \"BASE\")\n self.set_text_content('a[href*=\"/about\"]', abtu)\n self.remove_element('li:contains(\"Feed\")')\n self.remove_element('li:contains(\"TW\")')\n self.remove_element('li:contains(\"Books\")')\n self.remove_element('li:contains(\"What\")')\n self.remove_element('li:contains(\"WI\")')\n self.set_attributes(\"#news img\", \"src\", sb_banner_logo)\n self.set_text_content('#ctitle', aybabtu)\n self.set_text_content('a[rel=\"prev\"]', \"All\")\n self.set_text_content('a[href*=\"random\"]', \"Your\")\n self.set_text_content('a[rel=\"next\"]', \"Base\")\n self.highlight(\"#topLeft ul\", loops=5, scroll=False)\n self.highlight('a[href=\"/archive\"]', loops=1, scroll=False)\n self.highlight('a[href*=\"what-if\"]', loops=1, scroll=False)\n self.highlight('a[href*=\"//blag.\"]', loops=2, scroll=False)\n self.highlight('a[href*=\"/about\"]', loops=5, scroll=False)\n self.highlight('a[rel=\"prev\"]', loops=1, scroll=False)\n self.highlight('a[href*=\"random\"]', loops=1, scroll=False)\n self.highlight('a[rel=\"next\"]', loops=3, scroll=False)\n self.highlight(\"#ctitle\", loops=7, scroll=False)\n\n self.open(\"https://www.nintendo.com/whatsnew/\")\n self.set_text_content('button[aria-label=\"Search\"]', aybabtu)\n self.set_text_content('button[data-section=\"newsevents\"]', aybabtu)\n self.set_text_content(\"h2\", aybabtu)\n self.highlight('div.search-flex', loops=4, scroll=False)\n self.highlight('button[data-section*=\"news\"]', loops=4, scroll=False)\n self.highlight(\"h2\", loops=6, scroll=False)\n\n self.open(\"https://support.gog.com/hc/en-us?product=gog\")\n self.set_text_content(\"div.intro-title\", aybabtu)\n self.set_text_content(\"h4\", aybabtu)\n self.highlight(\"div.intro-title\", loops=8, scroll=False)\n self.highlight(\"h4\", loops=8, scroll=False)\n\n self.open(\"https://slack.com/help/articles/204714258-Giphy-for-Slack\")\n self.set_text_content(\"h1\", aybabtu)\n self.set_text_content('a[prettyslug=\"getting-started\"]', \"ALL\")\n self.set_text_content('a[prettyslug=\"using-slack\"]', \"YOUR\")\n self.set_text_content('a[prettyslug=\"your-profile\"]', \"BASE\")\n self.set_text_content('a[prettyslug=\"connect-tools\"]', \"ARE\")\n self.set_text_content('a[prettyslug=\"administration\"]', \"BELONG\")\n self.set_text_content('a[prettyslug=\"tutorials\"]', \"TO US\")\n self.highlight(\"h1\", loops=4, scroll=False)\n self.highlight(\"div#global_menu\", loops=2, scroll=False)\n self.highlight('a[prettyslug*=\"g-started\"]', loops=1, scroll=False)\n self.highlight('a[prettyslug=\"using-slack\"]', loops=1, scroll=False)\n self.highlight('a[prettyslug=\"your-profile\"]', loops=2, scroll=False)\n self.highlight('a[prettyslug=\"connect-tools\"]', loops=1, scroll=False)\n self.highlight('a[prettyslug=\"administration\"]', loops=1, scroll=False)\n self.highlight('a[prettyslug=\"tutorials\"]', loops=2, scroll=False)\n\n self.open(\"https://kubernetes.io/\")\n self.set_text_content('nav a[href=\"/docs/\"]', \"ALL\")\n self.set_text_content('nav a[href=\"/blog/\"]', \"YOUR\")\n self.set_text_content('nav a[href=\"/training/\"]', \"BASE\")\n self.set_text_content('nav a[href=\"/partners/\"]', \"ARE\")\n self.set_text_content('nav a[href=\"/community/\"]', \"BELONG\")\n self.set_text_content('nav a[href=\"/case-studies/\"]', \"TO\")\n self.set_text_content('nav #navbarDropdown', \"US\")\n self.set_text_content('nav #navbarDropdownMenuLink', \".\")\n if self.is_element_visible(\"h1\"):\n self.set_text_content(\"h1\", aybabtu)\n self.highlight(\"nav ul.navbar-nav\", loops=3, scroll=False)\n self.highlight('nav a[href=\"/docs/\"]', loops=1, scroll=False)\n self.highlight('nav a[href=\"/blog/\"]', loops=1, scroll=False)\n self.highlight('nav a[href=\"/training/\"]', loops=2, scroll=False)\n self.highlight('nav a[href=\"/partners/\"]', loops=1, scroll=False)\n self.highlight('nav a[href=\"/community/\"]', loops=1, scroll=False)\n self.highlight('nav a[href=\"/case-studies/\"]', loops=1, scroll=False)\n self.highlight('nav #navbarDropdown', loops=2, scroll=False)\n if self.is_element_visible(\"h1\"):\n self.highlight('h1', loops=6, scroll=False)\n\n self.open(\"https://www.selenium.dev/\")\n self.set_attributes(\"a.dropdown-toggle\", \"class\", \"nav-link\")\n self.set_text_content('li a:contains(\"About\")', \"ALL\")\n self.set_text_content('li a:contains(\"Downloads\")', \"YOUR\")\n self.set_text_content('li a:contains(\"Documentation\")', \"BASE\")\n self.set_text_content('li a:contains(\"Projects\")', \"ARE\")\n self.set_text_content('li a:contains(\"Support\")', \"BELONG\")\n self.set_text_content('li a:contains(\"Blog\")', \"TO\")\n self.set_text_content('li a:contains(\"English\")', \"US\")\n self.set_text_content(\"div.lead\", aybabtu)\n self.set_text_content(\"h2\", aybabtu)\n zoom_in = 'div.lead{zoom: 1.25;-moz-transform: scale(1.25);}'\n self.add_css_style(zoom_in)\n self.highlight(\"div#main_navbar\", loops=1, scroll=False)\n self.highlight('li a:contains(\"ALL\")', loops=1, scroll=False)\n self.highlight('li a:contains(\"YOUR\")', loops=1, scroll=False)\n self.highlight('li a:contains(\"BASE\")', loops=2, scroll=False)\n self.highlight('li a:contains(\"ARE\")', loops=1, scroll=False)\n self.highlight('li a:contains(\"BELONG\")', loops=1, scroll=False)\n self.highlight('li a:contains(\"TO\")', loops=1, scroll=False)\n self.highlight('li a:contains(\"US\")', loops=2, scroll=False)\n self.highlight(\"div.lead\", loops=6, scroll=False)\n self.highlight(\"h2\", loops=8, scroll=False)\n\n self.open(\"https://www.python.org/\")\n self.set_text_content('a[class=\"donate-button\"]', ayb)\n self.set_text_content(\"#about a\", \"ALL\")\n self.set_text_content(\"#downloads a\", \"YOUR\")\n self.set_text_content(\"#documentation a\", \"BASE\")\n self.set_text_content(\"#community a\", \"ARE\")\n self.set_text_content(\"#success-stories a\", \"BELONG\")\n self.set_text_content(\"#news a\", \"TO\")\n self.set_text_content(\"#events a\", \"US\")\n self.highlight('a[class=\"donate-button\"]', loops=4, scroll=False)\n self.highlight(\"nav#mainnav\", loops=5, scroll=False)\n self.highlight(\"#about a\", loops=1, scroll=False)\n self.highlight(\"#downloads a\", loops=1, scroll=False)\n self.highlight(\"#documentation a\", loops=2, scroll=False)\n self.highlight(\"#community a\", loops=1, scroll=False)\n self.highlight(\"#success-stories a\", loops=1, scroll=False)\n self.highlight(\"#news a\", loops=1, scroll=False)\n self.highlight(\"#events a\", loops=2, scroll=False)\n\n self.open(\"https://docs.pytest.org/\")\n self.set_text_content(\"h1\", \"pytest: \" + aybabtu)\n self.highlight(\"h1\", loops=10, scroll=False)\n\n self.open(\"https://wordpress.com/\")\n self.set_text_content('a[title=\"Plans & Pricing\"]', aybabtu)\n self.set_text_content('a[title=\"Get Started\"]', ayb)\n self.set_text_content(\"p.no-widows\", aybabtu)\n self.set_text_content(\"a#lpc-button\", \"Automate with SeleniumBase\")\n self.highlight('a[title=\"Plans & Pricing\"]', loops=6, scroll=False)\n self.highlight('a[title=\"Get Started\"]', loops=4, scroll=False)\n self.highlight(\"p.no-widows\", loops=8, scroll=False)\n self.highlight(\"a#lpc-button\", loops=4, scroll=False)\n\n self.open(\"https://seleniumbase.com/\")\n self.set_text_content(\"h1\", aybabtu)\n self.highlight(\"h1\", loops=10, scroll=False)\n\n self.open(\"https://pypi.org/\")\n self.set_text_content('a[href=\"/sponsors/\"]', aybabtu)\n self.set_text_content(\"h1\", aybabtu)\n self.set_value(\"input#search\", aybabtu, scroll=False)\n self.highlight('a[href=\"/sponsors/\"]', loops=6, scroll=False)\n self.highlight(\"h1\", loops=6, scroll=False)\n self.highlight(\"input#search\", loops=8, scroll=False)\n\n self.open(\"https://www.atlassian.com/software/jira\")\n self.set_text_content('a[href*=\"jira/pricing\"]', ayb)\n self.set_text_content('a[href*=\"jira/enterprise\"]', abtu)\n self.set_text_content('a[href=\"/software/jira/features\"]', \"\")\n self.set_text_content('a[href=\"/software/jira/guides\"]', \"\")\n self.set_text_content(\"h1\", ayb)\n self.set_text_content('div.xs-none-bottom a[href*=\"free\"]', abtu)\n self.highlight(\"ul.imkt-navbar__link-list\", loops=2, scroll=False)\n self.highlight('a[href*=\"jira/pricing\"]', loops=3, scroll=False)\n self.highlight('a[href*=\"jira/enterprise\"]', loops=3, scroll=False)\n self.highlight(\"h1\", loops=3, scroll=False)\n self.highlight('div.xs-none-bottom a[href*=\"free\"]', scroll=False)\n\n self.open(\"https://status.iboss.com/ibcloud/app/cloudStatus.html\")\n self.set_text_content('div[translate*=\"cloudStatus\"]', ayb)\n self.set_text_content('div[translate*=\"maintenance\"]', \"ARE\")\n self.set_text_content('div[translate*=\"advisory\"]', \"BELONG\")\n self.set_text_content('div[translate*=\"incident\"]', \"TO US\")\n self.set_text_content(\"h1\", \"Cloud Status - \" + aybabtu)\n self.highlight(\"nav div.ibcloud-header-contents\", loops=3)\n self.highlight('div[translate*=\"cloudStatus\"]', loops=4)\n self.highlight('div[translate*=\"maintenance\"]', loops=1)\n self.highlight('div[translate*=\"advisory\"]', loops=1)\n self.highlight('div[translate*=\"incident\"]', loops=3)\n self.highlight(\"h1\", loops=9, scroll=False)\n\n self.open(\"https://git-scm.com/\")\n self.set_text_content(\"span#tagline\", aybabtu)\n self.set_text_content(\"#nav-about h3\", ayb)\n self.set_text_content(\"#nav-documentation h3\", abtu)\n self.highlight(\"span#tagline\", loops=8, scroll=False)\n self.highlight(\"#nav-about h3\", loops=5, scroll=False)\n self.highlight(\"#nav-documentation h3\", loops=6, scroll=False)\n\n self.open(\"https://teamtreehouse.com/\")\n self.set_text_content(\"li.nav-item-free-trial\", aybabtu)\n self.set_text_content(\"h1\", aybabtu)\n self.set_text_content(\"h2\", aybabtu)\n self.set_text_content(\"p.homepage-signup-form-banner\", aybabtu)\n self.highlight(\"li.nav-item-free-trial\", loops=6, scroll=False)\n self.highlight(\"h1\", loops=6, scroll=False)\n self.highlight('p[class*=\"signup-form\"]', loops=8, scroll=False)\n\n self.open(\"https://pragprog.com/\")\n self.set_text_content(\"header p\", aybabtu)\n zoom_in = 'header p{zoom: 1.35;-moz-transform: scale(1.35);}'\n self.add_css_style(zoom_in)\n self.highlight(\"header p\", loops=10, scroll=False)\n\n self.open(\"https://seleniumbase.io/\")\n self.set_text_content(\"h1\", aybabtu)\n self.highlight(\"h1\", loops=10, scroll=False)\n"},"path":{"kind":"string","value":"examples/hack_the_planet.py"},"size":{"kind":"number","value":23602,"string":"23,602"},"nl_text":{"kind":"string","value":"Video Link: https://youtu.be/1s-Tj65AKZA \n\n First make sure that seleniumbase 1.65.0 or newer is installed"},"nl_size":{"kind":"number","value":106,"string":"106"},"nl_language":{"kind":"string","value":"en"},"nl_language_score":{"kind":"number","value":0.7373580932617188,"string":"0.737358"}}},{"rowIdx":7841,"cells":{"content":{"kind":"string","value":"import numpy as np\r\nimport pandas as pd\r\nfrom sklearn.tree import DecisionTreeRegressor\r\nfrom sklearn.model_selection import KFold\r\nimport plotly.express as px\r\nfrom plotly.subplots import make_subplots\r\nimport plotly.graph_objects as go\r\n\r\n\r\n# import data and preprocess it\r\ndef preprocessing(file_name: str):\r\n\r\n # data import\r\n fish_df = pd.read_csv(file_name)\r\n fish_df = pd.get_dummies(fish_df, columns=['Species'], prefix='Species')\r\n\r\n return fish_df\r\n\r\n\r\n# train-test split by a percentage.\r\n# input: dataframe, label column name, split ration, and random state\r\n# returns: x_train, x_test, y_train, y_test\r\ndef split_df(user_df: pd.DataFrame, label_name: str, split_ratio=0.8, random_value=42):\r\n\r\n x_train = user_df.sample(frac=split_ratio, random_state=random_value)\r\n x_test = user_df.drop(x_train.index)\r\n\r\n return x_train.drop(label_name, axis=1), x_test.drop(label_name, axis=1), pd.DataFrame(\r\n x_train[label_name]), pd.DataFrame(x_test[label_name])\r\n\r\n\r\n# Create as arrays of trees in a given size and depth\r\ndef create_random_forest(forest_size: int, max_depth: int, random_state_local: int):\r\n\r\n random_forest = []\r\n for i in range(0, forest_size, 1):\r\n\r\n random_forest.append(DecisionTreeRegressor(criterion='friedman_mse', max_depth=max_depth,\r\n random_state=random_state_local))\r\n\r\n return random_forest\r\n\r\n\r\n# train trees in a forest by fitting each tree to the previous tree's error\r\n# input: forest of trees, initial training guess, x and y databases, alpha coefficient.\r\n# returns: trained forest, initial average value, r_matrix of solutions and mse_list of the results (mean square error)\r\ndef train_forest(random_forest: list, initial_average_weight: float, x_df: pd.DataFrame, y_df: pd.DataFrame,\r\n alpha: float = 0.1):\r\n\r\n # initial average weight and residuals to be used in the 1st tree\r\n predictions = np.ones(len(y_df))*initial_average_weight\r\n residuals = np.array(y_df['Weight'])-predictions\r\n residuals_matrix = [residuals]\r\n\r\n # calculates the first mse value\r\n mse_list = [(np.square(residuals)).sum()/len(predictions)]\r\n\r\n for tree in random_forest:\r\n\r\n # train the current stump\r\n tree.fit(x_df, residuals)\r\n\r\n # predict results based on its training error\r\n residuals = tree.predict(x_df)\r\n\r\n # record residuals and calculate mse\r\n residuals_matrix.append(residuals)\r\n mse_list.append((np.square(residuals)).sum()/len(predictions))\r\n\r\n # update predictions and calculate new residuals\r\n predictions = predictions + alpha * residuals\r\n residuals = np.array(y_df['Weight']) - predictions\r\n\r\n return random_forest, predictions, residuals_matrix, mse_list\r\n\r\n\r\n# predict test database by the trained random forest\r\n# input: forest of trees, initial training guess, x and y databases.\r\n# returns: mse_list of the forest (mean square error)\r\ndef test_forest(random_forest: list, initial_average_weight: float, x_df: pd.DataFrame, y_df: pd.DataFrame,\r\n alpha: float = 0.1):\r\n\r\n predictions = np.ones(len(y_df))*initial_average_weight\r\n mse_list = [(np.square(np.array(y_df['Weight']) - predictions)).sum()/len(predictions)]\r\n\r\n for tree in random_forest:\r\n\r\n predictions = predictions + alpha * tree.predict(x_df)\r\n mse_list.append((np.square(np.array(y_df['Weight']) - predictions)).sum()//len(predictions))\r\n\r\n return predictions, mse_list\r\n\r\n\r\ndef main():\r\n\r\n # data import and preprocessing\r\n fish_df = preprocessing(\"Fish.csv\")\r\n\r\n # splitting of the data\r\n x_train, x_test, y_train, y_test = split_df(fish_df, 'Weight', 0.8, 42)\r\n\r\n # setting up a random forest:\r\n #forest_size_list = [4, 5, 6, 7, 8] # variable calibrated by KFold train-validate\r\n forest_size = 20\r\n # max_depth_list = [1, 2, 3, 4, 5] # variable calibrated by KFold train-validate\r\n max_depth = 3\r\n random_state_local = 42\r\n random_forest = create_random_forest(forest_size, max_depth, random_state_local)\r\n\r\n #%% Train\r\n #alpha_list = [0.1, 0.3, 0.5, 0.7, 0.9] # variable calibrated by KFold train-validate\r\n alpha = 0.5 # gradiant coefficient\r\n \r\n kf = KFold(n_splits=2, shuffle=True, random_state=42)\r\n for train_index, test_index in kf.split(x_train, y_train):\r\n\r\n X_train, X_validate = x_train.iloc[train_index], x_train.iloc[test_index]\r\n Y_train, Y_validate = y_train.iloc[train_index], y_train.iloc[test_index]\r\n\r\n # first guess\r\n initial_average_weight = np.average(Y_train['Weight'].tolist())\r\n\r\n # train forest\r\n random_forest, predictions_train, r_matrix, mse_list_train = train_forest(random_forest, initial_average_weight,\r\n X_train, Y_train, alpha)\r\n\r\n # validate\r\n predictions_validate, mse_list_validate = test_forest(random_forest, initial_average_weight, X_validate,\r\n Y_validate, alpha)\r\n\r\n results = pd.DataFrame(data=np.arange(0, forest_size+1, 1), columns=['tree_intervals'])\r\n results['Train'] = mse_list_train\r\n results['Validation'] = mse_list_validate\r\n fig = px.scatter(results, x='tree_intervals', y=['Train', 'Validation'], size='tree_intervals')\r\n fig.update_layout(xaxis_title=\"Amount of Intervals (num.)\", yaxis_title=\"mean square error\")\r\n fig.show()\r\n\r\n #%% Test\r\n predictions_test, mse_list_test = test_forest(random_forest, initial_average_weight, x_test, y_test, alpha)\r\n\r\n # %% plot success rate vs tree intervals\r\n fig = make_subplots(rows=1, cols=3, subplot_titles=('Train', 'Validation', 'Test'),\r\n x_title='Amount of Intervals (num.)', y_title='mean square error')\r\n\r\n results = pd.DataFrame(data=np.arange(0, forest_size+1, 1), columns=['tree_intervals'])\r\n results['Train'] = mse_list_train\r\n fig.add_trace(go.Scatter(x=results['tree_intervals'], y=results['Train'], name='Train'), row=1, col=1)\r\n\r\n results = pd.DataFrame(data=np.arange(0, forest_size + 1, 1), columns=['tree_intervals'])\r\n results['Validation'] = mse_list_validate\r\n fig.add_trace(go.Scatter(x=results['tree_intervals'], y=results['Validation'], name='Validation'), row=1, col=2)\r\n\r\n results = pd.DataFrame(data=np.arange(0, forest_size + 1, 1), columns=['tree_intervals'])\r\n results['Test'] = mse_list_test\r\n fig.add_trace(go.Scatter(x=results['tree_intervals'], y=results['Test'], name='Test'), row=1, col=3)\r\n\r\n fig.update_layout(title_text=\"Random Forest Gradient Boosting\")\r\n fig.show()\r\n\r\n\r\nif __name__ == '__main__':\r\n main()\r\n"},"path":{"kind":"string","value":"gradient-boosting/main.py"},"size":{"kind":"number","value":6766,"string":"6,766"},"nl_text":{"kind":"string","value":"import data and preprocess it data import train-test split by a percentage. input: dataframe, label column name, split ration, and random state returns: x_train, x_test, y_train, y_test Create as arrays of trees in a given size and depth train trees in a forest by fitting each tree to the previous tree's error input: forest of trees, initial training guess, x and y databases, alpha coefficient. returns: trained forest, initial average value, r_matrix of solutions and mse_list of the results (mean square error) initial average weight and residuals to be used in the 1st tree calculates the first mse value train the current stump predict results based on its training error record residuals and calculate mse update predictions and calculate new residuals predict test database by the trained random forest input: forest of trees, initial training guess, x and y databases. returns: mse_list of the forest (mean square error) data import and preprocessing splitting of the data setting up a random forest:forest_size_list = [4, 5, 6, 7, 8] variable calibrated by KFold train-validate max_depth_list = [1, 2, 3, 4, 5] variable calibrated by KFold train-validate%% Trainalpha_list = [0.1, 0.3, 0.5, 0.7, 0.9] variable calibrated by KFold train-validate gradiant coefficient first guess train forest validate%% Test %% plot success rate vs tree intervals"},"nl_size":{"kind":"number","value":1359,"string":"1,359"},"nl_language":{"kind":"string","value":"en"},"nl_language_score":{"kind":"number","value":0.8060593605041504,"string":"0.806059"}}},{"rowIdx":7842,"cells":{"content":{"kind":"string","value":"# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http:# www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport os\n\nDJANGO_APPS = [ \"kafka\" ]\nREQUIRES_HADOOP = False\nMENU_INDEX = 100\nNICE_NAME = \"Kafka\"\nICON = \"kafka/art/icon_kafka_24.png\"\nIS_URL_NAMESPACED = True\n\nPROJECT_ROOT = os.path.dirname(os.path.realpath(__file__))\nMETRICS_INI = os.path.join(PROJECT_ROOT, 'metrics.ini')\n"},"path":{"kind":"string","value":"kafka/src/kafka/settings.py"},"size":{"kind":"number","value":1057,"string":"1,057"},"nl_text":{"kind":"string","value":"Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. See the NOTICE file distributed with this work for additional information regarding copyright ownership. The ASF licenses this file to you under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http: www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License."},"nl_size":{"kind":"number","value":748,"string":"748"},"nl_language":{"kind":"string","value":"en"},"nl_language_score":{"kind":"number","value":0.8867126107215881,"string":"0.886713"}}},{"rowIdx":7843,"cells":{"content":{"kind":"string","value":"# -*- coding: utf-8 -*-\nSUCCESSFUL_TERMINAL_STATUSES = ('complete', )\nUNSUCCESSFUL_TERMINAL_STATUSES = ('cancelled', 'unsuccessful')\nCONTRACT_REQUIRED_FIELDS = [\n 'awardID', 'contractID', 'items', 'suppliers',\n 'value', 'dateSigned',\n #'documents'\n]\nCONTRACT_NOT_REQUIRED_FIELDS = [\n 'contractNumber', 'title', 'title_en', 'title_ru',\n 'description', 'description_en', 'description_ru'\n]\n"},"path":{"kind":"string","value":"openregistry/convoy/loki/constants.py"},"size":{"kind":"number","value":403,"string":"403"},"nl_text":{"kind":"string","value":"-*- coding: utf-8 -*-'documents'"},"nl_size":{"kind":"number","value":32,"string":"32"},"nl_language":{"kind":"string","value":"en"},"nl_language_score":{"kind":"number","value":0.9070906639099121,"string":"0.907091"}}},{"rowIdx":7844,"cells":{"content":{"kind":"string","value":"\"\"\"\nPlugin for Czech TV (Ceska televize).\n\nFollowing channels are working:\n * CT1 - https://www.ceskatelevize.cz/porady/ct1/\n * CT2 - https://www.ceskatelevize.cz/porady/ct2/\n * CT24 - https://ct24.ceskatelevize.cz/#live\n * CT sport - https://www.ceskatelevize.cz/sport/zive-vysilani/\n * CT Decko - https://decko.ceskatelevize.cz/zive\n * CT Art - https://www.ceskatelevize.cz/porady/art/\n\nAdditionally, videos from iVysilani archive should work as well.\n\"\"\"\nimport json\nimport logging\nimport re\nfrom html import unescape as html_unescape\nfrom urllib.parse import quote\n\nfrom streamlink.plugin import Plugin, PluginError, pluginmatcher\nfrom streamlink.plugin.api import useragents, validate\nfrom streamlink.stream import DASHStream, HLSStream\n\nlog = logging.getLogger(__name__)\n\n\n@pluginmatcher(re.compile(\n r'https?://([\\w-]+\\.)*ceskatelevize\\.cz'\n))\nclass Ceskatelevize(Plugin):\n ajax_url = 'https://www.ceskatelevize.cz/ivysilani/ajax/get-client-playlist'\n _player_re = re.compile(\n r'ivysilani/embed/iFramePlayer[^\"]+'\n )\n _hash_re = re.compile(\n r'hash:\"([0-9a-z]+)\"'\n )\n _playlist_info_re = re.compile(\n r'{\"type\":\"([a-z]+)\",\"id\":\"([0-9]+)\"'\n )\n _playlist_url_schema = validate.Schema({\n validate.optional(\"streamingProtocol\"): validate.text,\n \"url\": validate.any(\n validate.url(),\n \"Error\",\n \"error_region\"\n )\n })\n _playlist_schema = validate.Schema({\n \"playlist\": [{\n validate.optional(\"type\"): validate.text,\n \"streamUrls\": {\n \"main\": validate.url(),\n }\n }]\n })\n\n def _get_streams(self):\n self.session.http.headers.update({'User-Agent': useragents.IPAD})\n self.session.http.verify = False\n log.warning('SSL certificate verification is disabled.')\n # fetch requested url and find playlist info\n response = self.session.http.get(self.url)\n info = self._find_playlist_info(response)\n\n if not info:\n # do next try with new API\n def _fallback_api(*args, **kwargs):\n self.api2 = CeskatelevizeAPI2(self.session, self.url, *args, **kwargs)\n return self.api2._get_streams()\n\n # playlist info not found, let's try to find player url\n player_url = self._find_player_url(response)\n if not player_url:\n log.debug('Cannot find playlist info or player url, do next try with new API')\n return _fallback_api(res=response)\n\n # get player url and try to find playlist info in it\n response = self.session.http.get(player_url)\n info = self._find_playlist_info(response)\n if not info:\n log.debug('Cannot find playlist info in the player url, do next try with new API')\n return _fallback_api()\n\n log.trace('{0!r}'.format(info))\n\n data = {\n 'playlist[0][type]': info['type'],\n 'playlist[0][id]': info['id'],\n 'requestUrl': '/ivysilani/embed/iFramePlayer.php',\n 'requestSource': 'iVysilani',\n 'type': 'html'\n }\n headers = {\n 'x-addr': '127.0.0.1',\n }\n\n # fetch playlist url\n response = self.session.http.post(\n self.ajax_url,\n data=data,\n headers=headers\n )\n json_data = self.session.http.json(response, schema=self._playlist_url_schema)\n log.trace('{0!r}'.format(json_data))\n\n if json_data['url'] in ['Error', 'error_region']:\n log.error('This stream is not available')\n return\n\n # fetch playlist\n response = self.session.http.post(json_data['url'])\n json_data = self.session.http.json(response, schema=self._playlist_schema)\n log.trace('{0!r}'.format(json_data))\n playlist = json_data['playlist'][0]['streamUrls']['main']\n return HLSStream.parse_variant_playlist(self.session, playlist)\n\n @classmethod\n def _find_playlist_info(cls, response):\n \"\"\"\n Finds playlist info (type, id) in HTTP response.\n\n :param response: Response object.\n :returns: Dictionary with type and id.\n \"\"\"\n values = {}\n matches = cls._playlist_info_re.search(response.text)\n if matches:\n values['type'] = matches.group(1)\n values['id'] = matches.group(2)\n\n return values\n\n @classmethod\n def _find_player_url(cls, response):\n \"\"\"\n Finds embedded player url in HTTP response.\n\n :param response: Response object.\n :returns: Player url (str).\n \"\"\"\n url = ''\n matches = cls._player_re.search(response.text)\n if matches:\n tmp_url = matches.group(0).replace('&amp;', '&')\n if 'hash' not in tmp_url:\n # there's no hash in the URL, try to find it\n matches = cls._hash_re.search(response.text)\n if matches:\n url = tmp_url + '&hash=' + matches.group(1)\n else:\n url = tmp_url\n\n return 'http://ceskatelevize.cz/' + url\n\n\nclass CeskatelevizeAPI2:\n _player_api = 'https://playlist.ceskatelevize.cz/'\n _url_re = re.compile(r'http(s)?://([^.]*.)?ceskatelevize.cz')\n _playlist_info_re = re.compile(r'{\\s*\"type\":\\s*\"([a-z]+)\",\\s*\"id\":\\s*\"(\\w+)\"')\n _playlist_schema = validate.Schema({\n \"CODE\": validate.contains(\"OK\"),\n \"RESULT\": {\n \"playlist\": [{\n \"streamUrls\": {\n \"main\": validate.url(),\n }\n }]\n }\n })\n _ctcomp_re = re.compile(r'data-ctcomp=\"Video\"\\sdata-video-id=\"(?P[^\"]*)\"\\sdata-ctcomp-data=\"(?P[^\"]+)\">')\n _ctcomp_schema = validate.Schema(\n validate.text,\n validate.transform(_ctcomp_re.findall),\n validate.transform(lambda vl: [{\"video-id\": v[0], \"ctcomp-data\": json.loads(html_unescape(v[1]))} for v in vl])\n )\n _playlist_info_schema = validate.Schema({\n \"type\": validate.text,\n \"id\": validate.any(validate.text, int),\n \"key\": validate.text,\n \"date\": validate.text,\n \"requestSource\": validate.text,\n \"drm\": int,\n validate.optional(\"canBePlay\"): int,\n validate.optional(\"assetId\"): validate.text,\n \"quality\": validate.text,\n validate.optional(\"region\"): int\n })\n\n def __init__(self, session, url, res=None):\n self.session = session\n self.url = url\n self.response = res\n\n def _get_streams(self):\n if self.response is None:\n infos = self.session.http.get(self.url, schema=self._ctcomp_schema)\n else:\n infos = self.session.http.json(self.response, schema=self._ctcomp_schema)\n if not infos:\n # playlist infos not found\n raise PluginError('Cannot find playlist infos!')\n\n vod_prio = len(infos) == 2\n for info in infos:\n try:\n pl = info['ctcomp-data']['source']['playlist'][0]\n except KeyError:\n raise PluginError('Cannot find playlist info!')\n\n pl = self._playlist_info_schema.validate(pl)\n if vod_prio and pl['type'] != 'VOD':\n continue\n\n log.trace('{0!r}'.format(info))\n if pl['type'] == 'LIVE':\n data = {\n \"contentType\": \"live\",\n \"items\": [{\n \"id\": pl[\"id\"],\n \"assetId\": pl[\"assetId\"],\n \"key\": pl[\"key\"],\n \"playerType\": \"dash\",\n \"date\": pl[\"date\"],\n \"requestSource\": pl[\"requestSource\"],\n \"drm\": pl[\"drm\"],\n \"quality\": pl[\"quality\"],\n }]\n }\n elif pl['type'] == 'VOD':\n data = {\n \"contentType\": \"vod\",\n \"items\": [{\n \"id\": pl[\"id\"],\n \"key\": pl[\"key\"],\n \"playerType\": \"dash\",\n \"date\": pl[\"date\"],\n \"requestSource\": pl[\"requestSource\"],\n \"drm\": pl[\"drm\"],\n \"canBePlay\": pl[\"canBePlay\"],\n \"quality\": pl[\"quality\"],\n \"region\": pl[\"region\"]\n }]\n }\n\n headers = {\n \"Content-Type\": \"application/x-www-form-urlencoded; charset=UTF-8\",\n }\n\n data = json.dumps(data)\n response = self.session.http.post(\n self._player_api,\n data=\"data={}\".format(quote(data)),\n headers=headers\n )\n json_data = self.session.http.json(response, schema=self._playlist_schema)\n log.trace('{0!r}'.format(json_data))\n playlist = json_data['RESULT']['playlist'][0]['streamUrls']['main']\n yield from DASHStream.parse_manifest(self.session, playlist).items()\n\n\n__plugin__ = Ceskatelevize\n"},"path":{"kind":"string","value":"src/streamlink/plugins/ceskatelevize.py"},"size":{"kind":"number","value":9161,"string":"9,161"},"nl_text":{"kind":"string","value":"Finds embedded player url in HTTP response.\n\n:param response: Response object.\n:returns: Player url (str).\nFinds playlist info (type, id) in HTTP response.\n\n:param response: Response object.\n:returns: Dictionary with type and id.\nPlugin for Czech TV (Ceska televize).\n\nFollowing channels are working:\n * CT1 - https://www.ceskatelevize.cz/porady/ct1/\n * CT2 - https://www.ceskatelevize.cz/porady/ct2/\n * CT24 - https://ct24.ceskatelevize.cz/#live\n * CT sport - https://www.ceskatelevize.cz/sport/zive-vysilani/\n * CT Decko - https://decko.ceskatelevize.cz/zive\n * CT Art - https://www.ceskatelevize.cz/porady/art/\n\nAdditionally, videos from iVysilani archive should work as well.\n\n fetch requested url and find playlist info do next try with new API playlist info not found, let's try to find player url get player url and try to find playlist info in it fetch playlist url fetch playlist there's no hash in the URL, try to find it playlist infos not found"},"nl_size":{"kind":"number","value":974,"string":"974"},"nl_language":{"kind":"string","value":"en"},"nl_language_score":{"kind":"number","value":0.7592093348503113,"string":"0.759209"}}},{"rowIdx":7845,"cells":{"content":{"kind":"string","value":"#basic example of dict synat\nmy_dict = {'key1':'value1','key2':'value2','key3':'value3'}\nprint(my_dict)\nprint(my_dict['key3'])\n\n#xmpl 2\nprices = {'apple':100,'banana':60,'gavava':90,'rice':50}\nprint(prices['rice'])\n"},"path":{"kind":"string","value":"python_basics/Dictionary/dict.py"},"size":{"kind":"number","value":215,"string":"215"},"nl_text":{"kind":"string","value":"basic example of dict synatxmpl 2"},"nl_size":{"kind":"number","value":33,"string":"33"},"nl_language":{"kind":"string","value":"en"},"nl_language_score":{"kind":"number","value":0.4142121374607086,"string":"0.414212"}}},{"rowIdx":7846,"cells":{"content":{"kind":"string","value":"import numpy as np\nfrom time import sleep\nimport struct\nimport matplotlib.pyplot as plt\n\n\n# input raw samples from MCU\n# in_data = 'out/data_raw.txt'\nin_data = 'out/8bit.txt'\nfs = 5000\nin_bits = 8\n\n# load file\nraw = np.loadtxt(in_data)\n\n# Stats\nprint(\"Max=%d Min=%d Mean=%d swing=%d %.1fbits\" % \\\n (np.max(raw), np.min(raw), np.mean(raw),\n np.max(raw) - np.min(raw), np.log2(np.max(raw) - np.min(raw))))\n\n# generate different bit audio\ndata_depth = {}\nprint(raw)\ndata_depth['16bit'] = 2**(in_bits-16)*(raw / (2**(in_bits-16))).astype('int')\nprint(data_depth['16bit'])\ndata_depth['10bit'] = 2**(in_bits-10)*(raw / (2**(in_bits-10))).astype('int')\ndata_depth['8bit'] = 2**(in_bits-8)*(raw / (2**(in_bits-8))).astype('int')\ndata_depth['7bit'] = 2**(in_bits-7)*(raw / (2**(in_bits-7))).astype('int')\ndata_depth['6bit'] = 2**(in_bits-6)*(raw / (2**(in_bits-6))).astype('int')\ndata_depth['2bit'] = 2**(in_bits-2)*(raw / (2**(in_bits-2))).astype('int')\n\n# normalize and zero mean all \nfor key in data_depth:\n data_depth[key] = data_depth[key] - np.mean(data_depth[key])\n data_depth[key] = data_depth[key] / np.max(np.abs(data_depth[key]))\n\n# write audio files\nfrom scipy.io.wavfile import write\nfor key in data_depth:\n write('out/test'+key+'.wav', fs, data_depth[key])\n\n# plot some\nt = np.arange(0, len(raw)/fs, 1/fs)\nfig, axs = plt.subplots(1, 1)\n\naxs.step(t, data_depth['16bit'], label='16bit')\naxs.step(t, data_depth['8bit'], label='8bit')\naxs.step(t, data_depth['7bit'], label='7bit')\naxs.step(t, data_depth['6bit'], label='6bit')\naxs.step(t, data_depth['2bit'], label='2bit')\n# axs.set_xlim(0, 6e-3)\n# axs.set_ylim(-1, 1)\naxs.set_xlabel('time [s]')\naxs.set_ylabel('mic data')\naxs.grid(True)\naxs.legend()\n\nfig.tight_layout()\nplt.show()\n\n\n"},"path":{"kind":"string","value":"audio/edison/audio/bit_depth_analyze.py"},"size":{"kind":"number","value":1741,"string":"1,741"},"nl_text":{"kind":"string","value":"input raw samples from MCU in_data = 'out/data_raw.txt' load file Stats generate different bit audio normalize and zero mean all write audio files plot some axs.set_xlim(0, 6e-3) axs.set_ylim(-1, 1)"},"nl_size":{"kind":"number","value":199,"string":"199"},"nl_language":{"kind":"string","value":"en"},"nl_language_score":{"kind":"number","value":0.4981168508529663,"string":"0.498117"}}},{"rowIdx":7847,"cells":{"content":{"kind":"string","value":"#!/home/pi/Documents/Codigos/API_Estacao/bin/python3\n\n\"\"\"Simple FTDI EEPROM configurator.\n\"\"\"\n\n# Copyright (c) 2019-2020, Emmanuel Blot \n# All rights reserved.\n#\n# SPDX-License-Identifier: BSD-3-Clause\n\nfrom argparse import ArgumentParser, FileType\nfrom io import StringIO\nfrom logging import Formatter, StreamHandler, DEBUG, ERROR\nfrom sys import modules, stderr\nfrom textwrap import fill\nfrom traceback import format_exc\nfrom pyftdi import FtdiLogger\nfrom pyftdi.eeprom import FtdiEeprom\nfrom pyftdi.ftdi import Ftdi\nfrom pyftdi.misc import add_custom_devices, hexdump\n\n#pylint: disable-msg=too-many-locals\n#pylint: disable-msg=too-many-branches\n#pylint: disable-msg=too-many-statements\n\n\ndef main():\n \"\"\"Main routine\"\"\"\n debug = False\n try:\n argparser = ArgumentParser(description=modules[__name__].__doc__)\n argparser.add_argument('device', nargs='?', default='ftdi:///?',\n help='serial port device name')\n argparser.add_argument('-x', '--hexdump', action='store_true',\n help='dump EEPROM content as ASCII')\n argparser.add_argument('-X', '--hexblock', type=int,\n help='dump EEPROM as indented hexa blocks')\n argparser.add_argument('-i', '--input', type=FileType('rt'),\n help='input ini file to load EEPROM content')\n argparser.add_argument('-l', '--load', default='all',\n choices=('all', 'raw', 'values'),\n help='section(s) to load from input file')\n argparser.add_argument('-o', '--output', type=FileType('wt'),\n help='output ini file to save EEPROM content')\n argparser.add_argument('-s', '--serial-number',\n help='set serial number')\n argparser.add_argument('-m', '--manufacturer',\n help='set manufacturer name')\n argparser.add_argument('-p', '--product',\n help='set product name')\n argparser.add_argument('-c', '--config', action='append',\n help='change/configure a property '\n 'as key=value pair')\n argparser.add_argument('-e', '--erase', action='store_true',\n help='erase the whole EEPROM content')\n argparser.add_argument('-u', '--update', action='store_true',\n help='perform actual update, use w/ care')\n argparser.add_argument('-P', '--vidpid', action='append',\n help='specify a custom VID:PID device ID, '\n 'may be repeated')\n argparser.add_argument('-V', '--virtual', type=FileType('r'),\n help='use a virtual device, specified as YaML')\n argparser.add_argument('-v', '--verbose', action='count', default=0,\n help='increase verbosity')\n argparser.add_argument('-d', '--debug', action='store_true',\n help='enable debug mode')\n args = argparser.parse_args()\n debug = args.debug\n\n if not args.device:\n argparser.error('Serial device not specified')\n\n loglevel = max(DEBUG, ERROR - (10 * args.verbose))\n loglevel = min(ERROR, loglevel)\n if debug:\n formatter = Formatter('%(asctime)s.%(msecs)03d %(name)-20s '\n '%(message)s', '%H:%M:%S')\n else:\n formatter = Formatter('%(message)s')\n FtdiLogger.set_formatter(formatter)\n FtdiLogger.set_level(loglevel)\n FtdiLogger.log.addHandler(StreamHandler(stderr))\n\n if args.virtual:\n #pylint: disable-msg=import-outside-toplevel\n from pyftdi.usbtools import UsbTools\n # Force PyUSB to use PyFtdi test framework for USB backends\n UsbTools.BACKENDS = ('pyftdi.tests.backend.usbvirt', )\n # Ensure the virtual backend can be found and is loaded\n backend = UsbTools.find_backend()\n loader = backend.create_loader()()\n loader.load(args.virtual)\n\n try:\n add_custom_devices(Ftdi, args.vidpid, force_hex=True)\n except ValueError as exc:\n argparser.error(str(exc))\n\n eeprom = FtdiEeprom()\n eeprom.open(args.device)\n if args.erase:\n eeprom.erase()\n if args.input:\n eeprom.load_config(args.input, args.load)\n if args.serial_number:\n eeprom.set_serial_number(args.serial_number)\n if args.manufacturer:\n eeprom.set_manufacturer_name(args.manufacturer)\n if args.product:\n eeprom.set_product_name(args.product)\n for conf in args.config or []:\n if conf == '?':\n helpstr = ', '.join(sorted(eeprom.properties))\n print(fill(helpstr, initial_indent=' ',\n subsequent_indent=' '))\n exit(1)\n for sep in ':=':\n if sep in conf:\n name, value = conf.split(sep, 1)\n if not value:\n argparser.error('Configuration %s without value' %\n conf)\n helpio = StringIO()\n eeprom.set_property(name, value, helpio)\n helpstr = helpio.getvalue()\n if helpstr:\n print(fill(helpstr, initial_indent=' ',\n subsequent_indent=' '))\n exit(1)\n break\n else:\n argparser.error('Missing name:value separator in %s' % conf)\n if args.hexdump:\n print(hexdump(eeprom.data))\n if args.hexblock is not None:\n indent = ' ' * args.hexblock\n for pos in range(0, len(eeprom.data), 16):\n hexa = ' '.join(['%02x' % x for x in eeprom.data[pos:pos+16]])\n print(indent, hexa, sep='')\n if args.update:\n if eeprom.commit(False):\n eeprom.reset_device()\n if args.verbose > 0:\n eeprom.dump_config()\n if args.output:\n eeprom.save_config(args.output)\n\n except (ImportError, IOError, NotImplementedError, ValueError) as exc:\n print('\\nError: %s' % exc, file=stderr)\n if debug:\n print(format_exc(chain=False), file=stderr)\n exit(1)\n except KeyboardInterrupt:\n exit(2)\n\n\nif __name__ == '__main__':\n main()\n"},"path":{"kind":"string","value":"bin/ftconf.py"},"size":{"kind":"number","value":6686,"string":"6,686"},"nl_text":{"kind":"string","value":"Main routine\nSimple FTDI EEPROM configurator.\n\n!/home/pi/Documents/Codigos/API_Estacao/bin/python3 Copyright (c) 2019-2020, Emmanuel Blot All rights reserved. SPDX-License-Identifier: BSD-3-Clausepylint: disable-msg=too-many-localspylint: disable-msg=too-many-branchespylint: disable-msg=too-many-statementspylint: disable-msg=import-outside-toplevel Force PyUSB to use PyFtdi test framework for USB backends Ensure the virtual backend can be found and is loaded"},"nl_size":{"kind":"number","value":486,"string":"486"},"nl_language":{"kind":"string","value":"en"},"nl_language_score":{"kind":"number","value":0.5533414483070374,"string":"0.553341"}}},{"rowIdx":7848,"cells":{"content":{"kind":"string","value":"#!/usr/bin/env python3\n######################################################\n## Calibrating the extrinsics between T265 and D4xx ##\n## Based on this example: https://github.com/IntelRealSense/librealsense/pull/4355\n## with changes and modifications.\n######################################################\n\n######################################################\n#\n# General steps:\n# 1. Mount the two cameras rigidly\n# 2. Print any one of the checkerboards from: https://markhedleyjones.com/projects/calibration-checkerboard-collection\n# - The default settings in this script are for: https://markhedleyjones.com/storage/checkerboards/Checkerboard-A4-25mm-8x6.pdf\n# - Measure the actual printed grid size of the squares and modify size.\n# 3. Modify the script:\n# - Change grid_H, grid_W and size according to the actual printed checkerboard.\n# - Change the path and file_name if necessary (ex: use this script as standalone).\n# 4. Run the script online:\n# - python calibrate_extrinsics.py\n# 5. The results include intrinsics (save file) and extrinsics (terminal output)\n# \n######################################################\n\nfrom __future__ import print_function\n\nimport pyrealsense2 as rs\nimport numpy as np\nnp.set_printoptions(suppress=True,precision=5)\nimport cv2\nassert cv2.__version__[0] >= '3', 'The fisheye module requires opencv version >= 3.0.0'\nimport os\nimport shutil\nimport json\nimport argparse\nimport glob\nfrom collections import OrderedDict\n\n\nparser = argparse.ArgumentParser()\nparser.add_argument('--SN_T265', help='serial number of T265')\nparser.add_argument('--SN_D4xx', help='serial number of D4xx')\nparser.add_argument('--path', default=\"calibration_results\", help='image path')\nparser.add_argument('--file_name', default=\"/intrinsics.json\", help='intrinsics calibration file name')\nparser.add_argument('--save_tmp', default=False, help='save the temporary files of this program, useful for debugging purposes')\nparser.add_argument('--grid_H', default=8, help='grid height (inner corners)')\nparser.add_argument('--grid_W', default=6, help='grid width (inner corners)')\nparser.add_argument('--size', default=0.0282, help='grid side length')\nparser.add_argument('--calibrate', default=False, help='run calibration (only)', action='store_true')\nparser.add_argument('--visualize', default=True, help='with GUI', action='store_true')\nargs = parser.parse_args()\nCHECKERBOARD = (args.grid_H, args.grid_W)\nSIDE_LENGTH = args.size\n\ntmp_folder = args.path + \"/tmp\"\n\ndef add_camera_calibration(intrinsics, streams = None):\n cam = {}\n cam['center_px'] = [intrinsics.ppx, intrinsics.ppy]\n cam['focal_length_px'] = [intrinsics.fx, intrinsics.fy]\n cam['distortion'] = {}\n cam['distortion']['type'] = 'kannalabrandt4'\n cam['distortion']['k'] = intrinsics.coeffs[:4]\n if streams:\n ext = streams[\"cam1\"].get_extrinsics_to(streams[\"pose\"]) # w.r.t.\n #print(ext)\n cam[\"extrinsics\"] = {}\n cam[\"extrinsics\"][\"T\"] = ext.translation\n #print(ext.rotation)\n cam[\"extrinsics\"][\"R\"] = ext.rotation\n return cam\n\ndef save_intrinsics(directory, file_name, intrinsics, streams):\n D = OrderedDict() # in order (cam1,cam2)\n D['cameras'] = []\n D['cameras'].append(add_camera_calibration(intrinsics[\"cam1\"], streams))\n D['cameras'].append(add_camera_calibration(intrinsics[\"cam2\"]))\n\n if not os.path.exists(directory):\n os.mkdir(directory)\n with open(directory + file_name, 'w') as f:\n json.dump(D, f, indent=4)\n print(\"Intrinsics output written to \" + directory + file_name)\n\n\ndef read_calibration(cam, extrinsics = False):\n #print(\"read_calibration\")\n # intrinsics\n K = np.array([[cam['focal_length_px'][0], 0, cam['center_px'][0]],\n [ 0, cam['focal_length_px'][1], cam['center_px'][1]],\n [ 0, 0, 1]])\n D = np.array(cam['distortion']['k'])\n\n if extrinsics:\n H = np.eye(4)\n H[:3,:3] = np.reshape(cam[\"extrinsics\"][\"R\"],(3,3))\n H[:3,3] = cam[\"extrinsics\"][\"T\"]\n #print(H)\n return (K, D, H)\n return (K, D)\n\ndef load_calibration(directory, file_name):\n with open(directory + file_name, 'r') as f:\n D = json.load(f)\n\n (K1, D1, H1) = read_calibration(D['cameras'][0], True)\n (K2, D2) = read_calibration(D['cameras'][1])\n return (K1, D1, K2, D2, H1)\n\ndef find_realsense_serial_no(type):\n\n camera_name = ['Intel RealSense T265', 'Intel RealSense D435']\n\n # Get realsense pipeline handle\n pipe = rs.pipeline()\n\n # Find the T265\n devices = rs.context().devices\n for i in range(len(devices)):\n if (devices[i].get_info(rs.camera_info.name) == camera_name[type]):\n print('Found one connected ' + camera_name[type] + ' with serial no:', devices[i].get_info(rs.camera_info.serial_number))\n return devices[i].get_info(rs.camera_info.serial_number)\n\n print('No ' + camera_name[type] + ' found, please check connection or input serial manually')\n return None\n\nif not args.calibrate:\n # Obtain the serial number of the cameras, either automatically or from user's input\n print(\"Trying to connect devices...\")\n serial_t265 = None\n serial_d4xx = None\n\n if (not args.SN_T265):\n serial_t265 = find_realsense_serial_no(0)\n else:\n serial_t265 = args.SN_T265\n\n if (not args.SN_D4xx):\n serial_d4xx = find_realsense_serial_no(1)\n else:\n serial_d4xx = args.SN_D4xx\n\n if (not serial_t265) or (not serial_d4xx):\n print(\"Specify serial numbers --SN_T265 and --SN_D4xx (for online calibration, or --calibrate for prerecorded images with --path path to folder)\")\n exit()\n\n # cam 1\n pipe1 = rs.pipeline()\n cfg1 = rs.config()\n cfg1.enable_device(serial_t265)\n pipe1.start(cfg1)\n\n # cam 2\n pipe2 = rs.pipeline()\n cfg2 = rs.config()\n cfg2.enable_device(serial_d4xx)\n cfg2.enable_all_streams()\n pipe2_profile = pipe2.start(cfg2)\n sensor_depth = pipe2_profile.get_device().first_depth_sensor()\n sensor_depth.set_option(rs.option.emitter_enabled, 0) # turn OFF projector\n\n try:\n # Retreive the stream and intrinsic properties for both cameras\n profile1 = pipe1.get_active_profile()\n profile2 = pipe2.get_active_profile()\n # future improvements: make both stream configureable\n streams = {\"cam1\" : profile1.get_stream(rs.stream.fisheye, 1).as_video_stream_profile(),\n \"pose\" : profile1.get_stream(rs.stream.pose),\n \"cam2\" : profile2.get_stream(rs.stream.infrared, 1).as_video_stream_profile()} # IR1\n #\"cam2\" : profile1.get_stream(rs.stream.fisheye, 2).as_video_stream_profile()} # test\n intrinsics = {\"cam1\" : streams[\"cam1\"].get_intrinsics(),\n \"cam2\" : streams[\"cam2\"].get_intrinsics()}\n #print(\"cam1:\", intrinsics[\"cam1\"])\n #print(\"cam2:\", intrinsics[\"right\"])\n\n save_intrinsics(args.path, args.file_name, intrinsics, streams)\n\n # capture images\n i = 0\n print(\"Press 's' to save image.\\nPress 'q' or 'c' to quit recording and start the calibration.\")\n while True:\n # cam 1\n frames1 = pipe1.wait_for_frames()\n f_fe1 = frames1.get_fisheye_frame(1) # left fisheye\n f_fe2 = frames1.get_fisheye_frame(2) # right fisheye\n if not f_fe1 or not f_fe2:\n continue\n img_fe1 = np.asanyarray(f_fe1.get_data())\n img_fe2 = np.asanyarray(f_fe2.get_data())\n\n # cam 2\n frames2 = pipe2.wait_for_frames()\n f_ir1 = frames2.get_infrared_frame(1) # left infrared\n f_ir2 = frames2.get_infrared_frame(2) # right infrared\n f_color = frames2.get_color_frame()\n if not f_ir1 or not f_ir2 or not f_color:\n continue\n img_ir1 = np.asanyarray(f_ir1.get_data())\n img_ir2 = np.asanyarray(f_ir2.get_data())\n img_color = np.asanyarray(f_color.get_data())\n\n # TODO: configure streams\n img1 = img_fe1\n img2 = img_ir1\n\n # display\n cv2.imshow('cam1', img1)\n cv2.imshow('cam2', img2)\n\n # save or quit\n k = cv2.waitKey(1)\n if k == ord('s'):\n print(\"'s' key pressed. Saving temp images..\")\n if not os.path.exists(tmp_folder):\n os.mkdir(tmp_folder)\n cv2.imwrite(tmp_folder + '/fe1_' + str(i) + '.png', img_fe1)\n cv2.imwrite(tmp_folder + '/fe2_' + str(i) + '.png', img_fe2)\n cv2.imwrite(tmp_folder + '/ir1_' + str(i) + '.png', img_ir1)\n # cv2.imwrite(tmp_folder+ '/ir2_' + str(i) + '.png', img_ir2)\n cv2.imwrite(tmp_folder + '/color_' + str(i) + '.png', img_color)\n print(\"Saved temp images in temp folder \" + tmp_folder)\n i = i+1\n\n if k == ord('q') or k == ord('c'):\n break\n\n finally:\n pipe1.stop()\n pipe2.stop()\n\n\n# calibrate\nprint(\"Calibrate extrinsics now...\")\n\n# arrays to store detections\nP3 = [] # w.r.t. target frame\nP2_1 = [] # in image #1\nP2_2 = [] # in image #2\n\n# TODO: configure streams\nimages1 = glob.glob(tmp_folder + '/fe1_*')\n#images2 = glob.glob(tmp_folder + '/fe2_*') # test\nimages2 = glob.glob(tmp_folder + '/ir1_*')\nimages1.sort()\nimages2.sort()\n#print(images1)\n#print(images2)\n\nif len(images1) == len(images2) == 0:\n print(\"No images found. Exit.\")\n exit(0)\n\n\ntry:\n for i, fname in enumerate(images1):\n img1 = cv2.imread(images1[i])\n img2 = cv2.imread(images2[i])\n\n gray1 = cv2.cvtColor(img1, cv2.COLOR_BGR2GRAY)\n gray2 = cv2.cvtColor(img2, cv2.COLOR_BGR2GRAY)\n\n # detect\n ret1, corners1 = cv2.findChessboardCorners(gray1, CHECKERBOARD, None)\n ret2, corners2 = cv2.findChessboardCorners(gray2, CHECKERBOARD, None)\n\n if ret1 and ret2:\n # subpixel refinement\n criteria_sub = (cv2.TermCriteria_COUNT + cv2.TERM_CRITERIA_EPS, 10, 1e-1)\n rt = cv2.cornerSubPix(gray1, corners1, (7, 7), (-1, -1), criteria_sub)\n P2_1.append(corners1)\n if args.visualize:\n ret1 = cv2.drawChessboardCorners(img1, CHECKERBOARD, corners1, ret1)\n cv2.imshow(\"img1\", img1)\n cv2.waitKey(200)\n\n rt = cv2.cornerSubPix(gray2, corners2, (7, 7), (-1, -1), criteria_sub)\n P2_2.append(corners2)\n if args.visualize:\n ret2 = cv2.drawChessboardCorners(img2, CHECKERBOARD, corners2, ret2)\n cv2.imshow(\"img2\", img2)\n cv2.waitKey(200)\nexcept cv2.error as e:\n print(\"Error: \", e)\n\n# calibration (stereo extrinsics)\nR = np.zeros((1, 1, 3), dtype=np.float64)\nT = np.zeros((1, 1, 3), dtype=np.float64)\n\nN = len(P2_1) # number of successful detections\n\np3d = np.zeros( (CHECKERBOARD[0]*CHECKERBOARD[1], 1, 3) , np.float64)\np3d[:,0, :2] = np.mgrid[0:CHECKERBOARD[0], 0:CHECKERBOARD[1]].T.reshape(-1, 2)\n\n# fisheye.stereoCalibrate needs different data structures/dimensions than cv2.stereoCalibrate, i.e. (N, 1, CHECKERBOARD[0]*CHECKERBOARD[1], 2/3)!\nP3 = np.array([p3d]*N, dtype=np.float64)\nP2_1 = np.asarray(P2_1, dtype=np.float64)\nP2_2 = np.asarray(P2_2, dtype=np.float64)\n\nP3 = np.reshape(P3, (N, 1, CHECKERBOARD[0]*CHECKERBOARD[1], 3))*SIDE_LENGTH\nP2_1 = np.reshape(P2_1, (N, 1, CHECKERBOARD[0]*CHECKERBOARD[1], 2))\nP2_2 = np.reshape(P2_2, (N, 1, CHECKERBOARD[0]*CHECKERBOARD[1], 2))\n\n(K1, D1, K2, D2, H1) = load_calibration(args.path, args.file_name)\n\ntry:\n (rms, _, _, _, _, R, T) = \\\n cv2.fisheye.stereoCalibrate(\n P3,\n P2_1,\n P2_2,\n K1,\n D1,\n K2,\n D2,\n (0,0), # only used to initialize intrinsics when no intrinsics provided\n R,\n T,\n cv2.fisheye.CALIB_FIX_INTRINSIC # extrinsics only\n )\nexcept cv2.error as e:\n print(\"Error: \", e)\n print(\"Please make sure that the checkerboard exists in the images. See tmp images in \" + tmp_folder + \" to debug.\")\n exit()\n\nprint(\"RMS:\", rms)\n\nH_cam2_cam1 = np.eye(4)\nH_cam2_cam1[:3,:3] = R\nH_cam2_cam1[:3,3] = T.flatten()\n\n# w.r.t. pose\nH_ir1_fe1 = H_cam2_cam1 # TODO: configure\nH_pose_fe1 = H1\n\nH_pose_ir1 = H_pose_fe1.dot( np.linalg.inv(H_ir1_fe1) )\nprint(\"H (ir1 wrt pose) =\", H_pose_ir1)\n\nfn = args.path + \"/H.txt\"\nnp.savetxt(fn, H_pose_ir1, fmt='%.9f')\nprint(\"Extrinsic output written to\", fn)\n\nif not args.save_tmp:\n if os.path.isdir(tmp_folder):\n shutil.rmtree(tmp_folder, ignore_errors=True)\n print(\"Temporary files deleted. If you wish to keep the tmp files, use --save_tmp True.\")"},"path":{"kind":"string","value":"robot/src/vision_to_mavros/scripts/calibrate_extrinsics.py"},"size":{"kind":"number","value":12847,"string":"12,847"},"nl_text":{"kind":"string","value":"!/usr/bin/env python3 Calibrating the extrinsics between T265 and D4xx Based on this example: https://github.com/IntelRealSense/librealsense/pull/4355 with changes and modifications. General steps: 1. Mount the two cameras rigidly 2. Print any one of the checkerboards from: https://markhedleyjones.com/projects/calibration-checkerboard-collection - The default settings in this script are for: https://markhedleyjones.com/storage/checkerboards/Checkerboard-A4-25mm-8x6.pdf - Measure the actual printed grid size of the squares and modify size. 3. Modify the script: - Change grid_H, grid_W and size according to the actual printed checkerboard. - Change the path and file_name if necessary (ex: use this script as standalone). 4. Run the script online: - python calibrate_extrinsics.py 5. The results include intrinsics (save file) and extrinsics (terminal output) w.r.t.print(ext)print(ext.rotation) in order (cam1,cam2)print(\"read_calibration\") intrinsicsprint(H) Get realsense pipeline handle Find the T265 Obtain the serial number of the cameras, either automatically or from user's input cam 1 cam 2 turn OFF projector Retreive the stream and intrinsic properties for both cameras future improvements: make both stream configureable IR1\"cam2\" : profile1.get_stream(rs.stream.fisheye, 2).as_video_stream_profile()} testprint(\"cam1:\", intrinsics[\"cam1\"])print(\"cam2:\", intrinsics[\"right\"]) capture images cam 1 left fisheye right fisheye cam 2 left infrared right infrared TODO: configure streams display save or quit cv2.imwrite(tmp_folder+ '/ir2_' + str(i) + '.png', img_ir2) calibrate arrays to store detections w.r.t. target frame in image 1 in image 2 TODO: configure streamsimages2 = glob.glob(tmp_folder + '/fe2_*') testprint(images1)print(images2) detect subpixel refinement calibration (stereo extrinsics) number of successful detections fisheye.stereoCalibrate needs different data structures/dimensions than cv2.stereoCalibrate, i.e. (N, 1, CHECKERBOARD[0]*CHECKERBOARD[1], 2/3)! only used to initialize intrinsics when no intrinsics provided extrinsics only w.r.t. pose TODO: configure"},"nl_size":{"kind":"number","value":2113,"string":"2,113"},"nl_language":{"kind":"string","value":"en"},"nl_language_score":{"kind":"number","value":0.6467118859291077,"string":"0.646712"}}},{"rowIdx":7849,"cells":{"content":{"kind":"string","value":"\"\"\"Implementation of Rule L044.\"\"\"\nfrom typing import Optional\n\nfrom sqlfluff.core.rules.analysis.select_crawler import Query, SelectCrawler\nfrom sqlfluff.core.parser import BaseSegment\nfrom sqlfluff.core.rules.base import BaseRule, LintResult, RuleContext\nfrom sqlfluff.core.rules.doc_decorators import document_groups\nfrom sqlfluff.core.rules.functional import sp\n\n\nclass RuleFailure(Exception):\n \"\"\"Exception class for reporting lint failure inside deeply nested code.\"\"\"\n\n def __init__(self, anchor: BaseSegment):\n self.anchor: BaseSegment = anchor\n\n\n@document_groups\nclass Rule_L044(BaseRule):\n \"\"\"Query produces an unknown number of result columns.\n\n **Anti-pattern**\n\n Querying all columns using ``*`` produces a query result where the number\n or ordering of columns changes if the upstream table's schema changes.\n This should generally be avoided because it can cause slow performance,\n cause important schema changes to go undetected, or break production code.\n For example:\n\n * If a query does ``SELECT t.*`` and is expected to return columns ``a``, ``b``,\n and ``c``, the actual columns returned will be wrong/different if columns\n are added to or deleted from the input table.\n * ``UNION`` and ``DIFFERENCE`` clauses require the inputs have the same number\n of columns (and compatible types).\n * ``JOIN`` queries may break due to new column name conflicts, e.g. the\n query references a column ``c`` which initially existed in only one input\n table but a column of the same name is added to another table.\n * ``CREATE TABLE (<>) AS SELECT *``\n\n\n .. code-block:: sql\n\n WITH cte AS (\n SELECT * FROM foo\n )\n\n SELECT * FROM cte\n UNION\n SELECT a, b FROM t\n\n **Best practice**\n\n Somewhere along the \"path\" to the source data, specify columns explicitly.\n\n .. code-block:: sql\n\n WITH cte AS (\n SELECT * FROM foo\n )\n\n SELECT a, b FROM cte\n UNION\n SELECT a, b FROM t\n\n \"\"\"\n\n groups = (\"all\",)\n _works_on_unparsable = False\n\n def _handle_alias(self, selectable, alias_info, query):\n select_info_target = SelectCrawler.get(\n query, alias_info.from_expression_element\n )[0]\n if isinstance(select_info_target, str):\n # It's an alias to an external table whose\n # number of columns could vary without our\n # knowledge. Thus, warn.\n self.logger.debug(\n f\"Query target {select_info_target} is external. Generating warning.\"\n )\n raise RuleFailure(selectable.selectable)\n else:\n # Handle nested SELECT.\n self._analyze_result_columns(select_info_target)\n\n def _analyze_result_columns(self, query: Query):\n \"\"\"Given info on a list of SELECTs, determine whether to warn.\"\"\"\n # Recursively walk from the given query (select_info_list) to any\n # wildcard columns in the select targets. If every wildcard evdentually\n # resolves to a query without wildcards, all is well. Otherwise, warn.\n if not query.selectables:\n return # pragma: no cover\n for selectable in query.selectables:\n self.logger.debug(f\"Analyzing query: {selectable.selectable.raw}\")\n for wildcard in selectable.get_wildcard_info():\n if wildcard.tables:\n for wildcard_table in wildcard.tables:\n self.logger.debug(\n f\"Wildcard: {wildcard.segment.raw} has target \"\n \"{wildcard_table}\"\n )\n # Is it an alias?\n alias_info = selectable.find_alias(wildcard_table)\n if alias_info:\n # Found the alias matching the wildcard. Recurse,\n # analyzing the query associated with that alias.\n self._handle_alias(selectable, alias_info, query)\n else:\n # Not an alias. Is it a CTE?\n cte = query.lookup_cte(wildcard_table)\n if cte:\n # Wildcard refers to a CTE. Analyze it.\n self._analyze_result_columns(cte)\n else:\n # Not CTE, not table alias. Presumably an\n # external table. Warn.\n self.logger.debug(\n f\"Query target {wildcard_table} is external. \"\n \"Generating warning.\"\n )\n raise RuleFailure(selectable.selectable)\n else:\n # No table was specified with the wildcard. Assume we're\n # querying from a nested select in FROM.\n query_list = SelectCrawler.get(\n query, query.selectables[0].selectable\n )\n for o in query_list:\n if isinstance(o, Query):\n self._analyze_result_columns(o)\n return\n self.logger.debug(\n f'Query target \"{query.selectables[0].selectable.raw}\" has no '\n \"targets. Generating warning.\"\n )\n raise RuleFailure(query.selectables[0].selectable)\n\n def _eval(self, context: RuleContext) -> Optional[LintResult]:\n \"\"\"Outermost query should produce known number of columns.\"\"\"\n start_types = [\"select_statement\", \"set_expression\", \"with_compound_statement\"]\n if context.segment.is_type(\n *start_types\n ) and not context.functional.parent_stack.any(sp.is_type(*start_types)):\n crawler = SelectCrawler(context.segment, context.dialect)\n\n # Begin analysis at the outer query.\n if crawler.query_tree:\n try:\n return self._analyze_result_columns(crawler.query_tree)\n except RuleFailure as e:\n return LintResult(anchor=e.anchor)\n return None\n"},"path":{"kind":"string","value":"src/sqlfluff/rules/L044.py"},"size":{"kind":"number","value":6370,"string":"6,370"},"nl_text":{"kind":"string","value":"Exception class for reporting lint failure inside deeply nested code.\nQuery produces an unknown number of result columns.\n\n**Anti-pattern**\n\nQuerying all columns using ``*`` produces a query result where the number\nor ordering of columns changes if the upstream table's schema changes.\nThis should generally be avoided because it can cause slow performance,\ncause important schema changes to go undetected, or break production code.\nFor example:\n\n* If a query does ``SELECT t.*`` and is expected to return columns ``a``, ``b``,\n and ``c``, the actual columns returned will be wrong/different if columns\n are added to or deleted from the input table.\n* ``UNION`` and ``DIFFERENCE`` clauses require the inputs have the same number\n of columns (and compatible types).\n* ``JOIN`` queries may break due to new column name conflicts, e.g. the\n query references a column ``c`` which initially existed in only one input\n table but a column of the same name is added to another table.\n* ``CREATE TABLE (<>) AS SELECT *``\n\n\n.. code-block:: sql\n\n WITH cte AS (\n SELECT * FROM foo\n )\n\n SELECT * FROM cte\n UNION\n SELECT a, b FROM t\n\n**Best practice**\n\nSomewhere along the \"path\" to the source data, specify columns explicitly.\n\n.. code-block:: sql\n\n WITH cte AS (\n SELECT * FROM foo\n )\n\n SELECT a, b FROM cte\n UNION\n SELECT a, b FROM t\nGiven info on a list of SELECTs, determine whether to warn.\nOutermost query should produce known number of columns.\nImplementation of Rule L044.\n\n It's an alias to an external table whose number of columns could vary without our knowledge. Thus, warn. Handle nested SELECT. Recursively walk from the given query (select_info_list) to any wildcard columns in the select targets. If every wildcard evdentually resolves to a query without wildcards, all is well. Otherwise, warn. pragma: no cover Is it an alias? Found the alias matching the wildcard. Recurse, analyzing the query associated with that alias. Not an alias. Is it a CTE? Wildcard refers to a CTE. Analyze it. Not CTE, not table alias. Presumably an external table. Warn. No table was specified with the wildcard. Assume we're querying from a nested select in FROM. Begin analysis at the outer query."},"nl_size":{"kind":"number","value":2248,"string":"2,248"},"nl_language":{"kind":"string","value":"en"},"nl_language_score":{"kind":"number","value":0.8242278099060059,"string":"0.824228"}}},{"rowIdx":7850,"cells":{"content":{"kind":"string","value":"import os, paramiko, time, schedule, smtplib, ssl\nfrom datetime import datetime\nfrom email.message import EmailMessage\n\nhost='localhost'\nport='5432'\nuser='postgres'\npassword='admin'\ndatabase='testdb'\n\n#chemin de sauvegarde locale\nlocal_dir = 'C:\\\\Users\\\\Kamla\\\\projets\\\\auto-backup-sqldb\\\\backup\\\\'\n#local_dir = 'Chemin vers le dossier de la base de donnees a sauvegarder\\\\'\n\n#chemin de sauvegarde distant\nremote_dir = '/C:/Users/vmwin10/Documents/ftpfile/'\n\ndef job():\n print(\"Backup working...\")\n \n filestamp = time.strftime('%Y-%m-%dT%H-%M-%S.%z')\n \n #nom pour le fichier sql qui serra genere par pg_dump\n database_remote = database+\"_\"+filestamp+\".bak.sql\"\n \n PASS=\"set PGPASSWORD=%s\" % (password)\n #lancement de la commande mysqldump qui va faire une sauvegarde en local\n #les fichiers sont sauvegarder dans le respertoire 'backup'\n os.system(\"(cd backup) && (\"+PASS+\") && (pg_dump -h %s -p %s -U %s -f %s -C -d %s)\" % (host, port, user, database_remote, database))\n \n print(\"Database dumped to \"+database_remote)\n \n # debut du SFTP\n ssh_client=paramiko.SSHClient()\n ssh_client.set_missing_host_key_policy(paramiko.AutoAddPolicy())\n\n #on se connecte a la machine dans laquelle serra sauvegarde le le fichier backup\n ssh_client.connect(hostname='192.168.126.2',username='vmwin10',password='vmwin10')\n\n ftp_client=ssh_client.open_sftp()\n \n #envoie du fichier local vers le remote\n ftp_client.put(local_dir+database_remote,remote_dir+database_remote)\n ftp_client.close()\n print(\"Successfull Backup\")\n \n # A chaque backup un email est envoye\n msg = EmailMessage()\n msg.set_content(\"Un backup vient d'etre effectue\")\n msg[\"Subject\"] = \"Email de Backup\"\n msg[\"From\"] = \"ksb.cmr@gmail.com\"\n msg[\"To\"] = \"test@mail.com\"\n context=ssl.create_default_context()\n with smtplib.SMTP(\"smtp.gmail.com\", port=587) as smtp:\n smtp.starttls(context=context)\n smtp.login(msg[\"From\"], \"password\")\n smtp.send_message(msg) \n \n# le backup se fait chaque 1h\n\nschedule.every(3).seconds.do(job)\n#schedule.every(15).minutes.do(job)\n#schedule.every().hour.do(job)\n#schedule.every().day.at(\"10:30\").do(job)\n#schedule.every(10).to(10).minutes.do(job)\n#schedule.every().monday.do(job)\n#schedule.every().wednesday.at(\"15:00\").do(job)\n#schedule.every().minute.at(\":15\").do(job)\n\nwhile True:\n schedule.run_pending()\n time.sleep(1)\n"},"path":{"kind":"string","value":"pgsqlbackup.py"},"size":{"kind":"number","value":2432,"string":"2,432"},"nl_text":{"kind":"string","value":"chemin de sauvegarde localelocal_dir = 'Chemin vers le dossier de la base de donnees a sauvegarder\\\\'chemin de sauvegarde distantnom pour le fichier sql qui serra genere par pg_dumplancement de la commande mysqldump qui va faire une sauvegarde en localles fichiers sont sauvegarder dans le respertoire 'backup' debut du SFTPon se connecte a la machine dans laquelle serra sauvegarde le le fichier backupenvoie du fichier local vers le remote A chaque backup un email est envoye le backup se fait chaque 1hschedule.every(15).minutes.do(job)schedule.every().hour.do(job)schedule.every().day.at(\"10:30\").do(job)schedule.every(10).to(10).minutes.do(job)schedule.every().monday.do(job)schedule.every().wednesday.at(\"15:00\").do(job)schedule.every().minute.at(\":15\").do(job)"},"nl_size":{"kind":"number","value":767,"string":"767"},"nl_language":{"kind":"string","value":"fr"},"nl_language_score":{"kind":"number","value":0.8325746059417725,"string":"0.832575"}}},{"rowIdx":7851,"cells":{"content":{"kind":"string","value":"# Configuration file for the Sphinx documentation builder.\n#\n# This file only contains a selection of the most common options. For a full\n# list see the documentation:\n# https://www.sphinx-doc.org/en/master/usage/configuration.html\n\n# -- Path setup --------------------------------------------------------------\n\n# If extensions (or modules to document with autodoc) are in another directory,\n# add these directories to sys.path here. If the directory is relative to the\n# documentation root, use os.path.abspath to make it absolute, like shown here.\n\nfrom datetime import date\nfrom pathlib import Path\n\nROOT_DIR = Path(__file__).resolve(strict=True).parent.parent\nPACKAGE_DIR = ROOT_DIR / \"email_service\"\nDOCS_DIR = ROOT_DIR / \"email_service\"\nversion_file_path = PACKAGE_DIR / \"version.py\"\n\ncode_obj = compile(version_file_path.read_text(), version_file_path, \"exec\")\n__version__ = dict()\nexec(code_obj, __version__)\nversion = __version__[\"__version__\"]\n\n# -- Project information -----------------------------------------------------\n\nproject = \"Email Service\"\ncopyright = \"\"\"2021, Aditya Raman\"\"\"\nauthor = \"Aditya Raman\"\n\n# The full version, including alpha/beta/rc tags\nversion = release = f\"v{version}\"\ntoday = str(date.today())\nlanguage = \"en\"\n\n# -- General configuration ---------------------------------------------------\n\n# Add any Sphinx extension module names here, as strings. They can be\n# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom\n# ones.\nextensions = [\n \"sphinx_rtd_theme\",\n \"sphinx.ext.autodoc\",\n \"sphinx.ext.napoleon\",\n \"sphinx.ext.viewcode\",\n]\n\n# Add any paths that contain templates here, relative to this directory.\ntemplates_path = [\"_templates\"]\n\n# List of patterns, relative to source directory, that match files and\n# directories to ignore when looking for source files.\n# This pattern also affects html_static_path and html_extra_path.\nexclude_patterns = [\"_build\", \"Thumbs.db\", \".DS_Store\"]\n\n# -- Options for HTML output -------------------------------------------------\n\n# The theme to use for HTML and HTML Help pages. See the documentation for\n# a list of builtin themes.\n#\nhtml_theme = \"sphinx_rtd_theme\" # alternate: \"alabaster\"\n\n# Add any paths that contain custom static files (such as style sheets) here,\n# relative to this directory. They are copied after the builtin static files,\n# so a file named \"default.css\" will overwrite the builtin \"default.css\".\nhtml_static_path = [\"_static\"]\n# These paths are either relative to html_static_path\n# or fully qualified paths (eg. https://...)\n# html_css_files = []\n#\n# html_style = \"\"\n\nmaster_doc = \"index\"\n\nlatex_elements = {\n # The paper size ('letterpaper' or 'a4paper').\n #\n \"papersize\": \"a4paper\",\n # The font size ('10pt', '11pt' or '12pt').\n #\n # 'pointsize': '10pt',\n # Additional stuff for the LaTeX preamble.\n #\n \"preamble\": \"\\\\addto\\\\captionsenglish{\\\\renewcommand{\\\\contentsname}{Table of contents}}\",\n # Latex figure (float) alignment\n #\n # 'figure_align': 'htbp',\n}\nlatex_show_urls = \"footnote\"\n\n# If true, the current module name will be prepended to all description\n# unit titles (such as .. function::).\nadd_module_names = False\nadd_function_parentheses = False\nshow_authors = True\n"},"path":{"kind":"string","value":"docs/conf.py"},"size":{"kind":"number","value":3252,"string":"3,252"},"nl_text":{"kind":"string","value":"Configuration file for the Sphinx documentation builder. This file only contains a selection of the most common options. For a full list see the documentation: https://www.sphinx-doc.org/en/master/usage/configuration.html -- Path setup -------------------------------------------------------------- If extensions (or modules to document with autodoc) are in another directory, add these directories to sys.path here. If the directory is relative to the documentation root, use os.path.abspath to make it absolute, like shown here. -- Project information ----------------------------------------------------- The full version, including alpha/beta/rc tags -- General configuration --------------------------------------------------- Add any Sphinx extension module names here, as strings. They can be extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones. Add any paths that contain templates here, relative to this directory. List of patterns, relative to source directory, that match files and directories to ignore when looking for source files. This pattern also affects html_static_path and html_extra_path. -- Options for HTML output ------------------------------------------------- The theme to use for HTML and HTML Help pages. See the documentation for a list of builtin themes. alternate: \"alabaster\" Add any paths that contain custom static files (such as style sheets) here, relative to this directory. They are copied after the builtin static files, so a file named \"default.css\" will overwrite the builtin \"default.css\". These paths are either relative to html_static_path or fully qualified paths (eg. https://...) html_css_files = [] html_style = \"\" The paper size ('letterpaper' or 'a4paper'). The font size ('10pt', '11pt' or '12pt'). 'pointsize': '10pt', Additional stuff for the LaTeX preamble. Latex figure (float) alignment 'figure_align': 'htbp', If true, the current module name will be prepended to all description unit titles (such as .. function::)."},"nl_size":{"kind":"number","value":1996,"string":"1,996"},"nl_language":{"kind":"string","value":"en"},"nl_language_score":{"kind":"number","value":0.6835110783576965,"string":"0.683511"}}},{"rowIdx":7852,"cells":{"content":{"kind":"string","value":"# NOTE - Still seems to be a leak here somewhere\n# gateway count doesnt hit zero. Hence the print statements!\n\nimport sys\n\nsys.coinit_flags = 0 # Must be free-threaded!\nimport win32api, pythoncom, time\nimport pywintypes\nimport os\nimport winerror\nimport win32com\nimport win32com.client.connect\nfrom win32com.test.util import CheckClean\nfrom win32com.client import constants, DispatchBaseClass, CastTo, VARIANT\nfrom win32com.test.util import RegisterPythonServer\nfrom pywin32_testutil import str2memory\nimport datetime\nimport decimal\nimport win32timezone\n\nimportMsg = \"**** PyCOMTest is not installed ***\\n PyCOMTest is a Python test specific COM client and server.\\n It is likely this server is not installed on this machine\\n To install the server, you must get the win32com sources\\n and build it using MS Visual C++\"\n\nerror = Exception\n\n# This test uses a Python implemented COM server - ensure correctly registered.\nRegisterPythonServer(\n os.path.join(os.path.dirname(__file__), \"..\", \"servers\", \"test_pycomtest.py\"),\n \"Python.Test.PyCOMTest\",\n)\n\nfrom win32com.client import gencache\n\ntry:\n gencache.EnsureModule(\"{6BCDCB60-5605-11D0-AE5F-CADD4C000000}\", 0, 1, 1)\nexcept pythoncom.com_error:\n print(\"The PyCOMTest module can not be located or generated.\")\n print(importMsg)\n raise RuntimeError(importMsg)\n\n# We had a bg where RegisterInterfaces would fail if gencache had\n# already been run - exercise that here\nfrom win32com import universal\n\nuniversal.RegisterInterfaces(\"{6BCDCB60-5605-11D0-AE5F-CADD4C000000}\", 0, 1, 1)\n\nverbose = 0\n\n# convert a normal int to a long int - used to avoid, eg, '1L' for py3k\n# friendliness\ndef ensure_long(int_val):\n if sys.version_info > (3,):\n # py3k - no such thing as a 'long'\n return int_val\n # on py2x, we just use an expression that results in a long\n return 0x100000000 - 0x100000000 + int_val\n\n\ndef check_get_set(func, arg):\n got = func(arg)\n if got != arg:\n raise error(\"%s failed - expected %r, got %r\" % (func, arg, got))\n\n\ndef check_get_set_raises(exc, func, arg):\n try:\n got = func(arg)\n except exc as e:\n pass # what we expect!\n else:\n raise error(\n \"%s with arg %r didn't raise %s - returned %r\" % (func, arg, exc, got)\n )\n\n\ndef progress(*args):\n if verbose:\n for arg in args:\n print(arg, end=\" \")\n print()\n\n\ndef TestApplyResult(fn, args, result):\n try:\n fnName = str(fn).split()[1]\n except:\n fnName = str(fn)\n progress(\"Testing \", fnName)\n pref = \"function \" + fnName\n rc = fn(*args)\n if rc != result:\n raise error(\"%s failed - result not %r but %r\" % (pref, result, rc))\n\n\ndef TestConstant(constName, pyConst):\n try:\n comConst = getattr(constants, constName)\n except:\n raise error(\"Constant %s missing\" % (constName,))\n if comConst != pyConst:\n raise error(\n \"Constant value wrong for %s - got %s, wanted %s\"\n % (constName, comConst, pyConst)\n )\n\n\n# Simple handler class. This demo only fires one event.\nclass RandomEventHandler:\n def _Init(self):\n self.fireds = {}\n\n def OnFire(self, no):\n try:\n self.fireds[no] = self.fireds[no] + 1\n except KeyError:\n self.fireds[no] = 0\n\n def OnFireWithNamedParams(self, no, a_bool, out1, out2):\n # This test exists mainly to help with an old bug, where named\n # params would come in reverse.\n Missing = pythoncom.Missing\n if no is not Missing:\n # We know our impl called 'OnFire' with the same ID\n assert no in self.fireds\n assert no + 1 == out1, \"expecting 'out1' param to be ID+1\"\n assert no + 2 == out2, \"expecting 'out2' param to be ID+2\"\n # The middle must be a boolean.\n assert a_bool is Missing or type(a_bool) == bool, \"middle param not a bool\"\n return out1 + 2, out2 + 2\n\n def _DumpFireds(self):\n if not self.fireds:\n print(\"ERROR: Nothing was received!\")\n for firedId, no in self.fireds.items():\n progress(\"ID %d fired %d times\" % (firedId, no))\n\n\n# A simple handler class that derives from object (ie, a \"new style class\") -\n# only relevant for Python 2.x (ie, the 2 classes should be identical in 3.x)\nclass NewStyleRandomEventHandler(object):\n def _Init(self):\n self.fireds = {}\n\n def OnFire(self, no):\n try:\n self.fireds[no] = self.fireds[no] + 1\n except KeyError:\n self.fireds[no] = 0\n\n def OnFireWithNamedParams(self, no, a_bool, out1, out2):\n # This test exists mainly to help with an old bug, where named\n # params would come in reverse.\n Missing = pythoncom.Missing\n if no is not Missing:\n # We know our impl called 'OnFire' with the same ID\n assert no in self.fireds\n assert no + 1 == out1, \"expecting 'out1' param to be ID+1\"\n assert no + 2 == out2, \"expecting 'out2' param to be ID+2\"\n # The middle must be a boolean.\n assert a_bool is Missing or type(a_bool) == bool, \"middle param not a bool\"\n return out1 + 2, out2 + 2\n\n def _DumpFireds(self):\n if not self.fireds:\n print(\"ERROR: Nothing was received!\")\n for firedId, no in self.fireds.items():\n progress(\"ID %d fired %d times\" % (firedId, no))\n\n\n# Test everything which can be tested using both the \"dynamic\" and \"generated\"\n# COM objects (or when there are very subtle differences)\ndef TestCommon(o, is_generated):\n progress(\"Getting counter\")\n counter = o.GetSimpleCounter()\n TestCounter(counter, is_generated)\n\n progress(\"Checking default args\")\n rc = o.TestOptionals()\n if rc[:-1] != (\"def\", 0, 1) or abs(rc[-1] - 3.14) > 0.01:\n print(rc)\n raise error(\"Did not get the optional values correctly\")\n rc = o.TestOptionals(\"Hi\", 2, 3, 1.1)\n if rc[:-1] != (\"Hi\", 2, 3) or abs(rc[-1] - 1.1) > 0.01:\n print(rc)\n raise error(\"Did not get the specified optional values correctly\")\n rc = o.TestOptionals2(0)\n if rc != (0, \"\", 1):\n print(rc)\n raise error(\"Did not get the optional2 values correctly\")\n rc = o.TestOptionals2(1.1, \"Hi\", 2)\n if rc[1:] != (\"Hi\", 2) or abs(rc[0] - 1.1) > 0.01:\n print(rc)\n raise error(\"Did not get the specified optional2 values correctly\")\n\n progress(\"Checking getting/passing IUnknown\")\n check_get_set(o.GetSetUnknown, o)\n progress(\"Checking getting/passing IDispatch\")\n # This might be called with either the interface or the CoClass - but these\n # functions always return from the interface.\n expected_class = o.__class__\n # CoClass instances have `default_interface`\n expected_class = getattr(expected_class, \"default_interface\", expected_class)\n if not isinstance(o.GetSetDispatch(o), expected_class):\n raise error(\"GetSetDispatch failed: %r\" % (o.GetSetDispatch(o),))\n progress(\"Checking getting/passing IDispatch of known type\")\n expected_class = o.__class__\n expected_class = getattr(expected_class, \"default_interface\", expected_class)\n if o.GetSetInterface(o).__class__ != expected_class:\n raise error(\"GetSetDispatch failed\")\n\n progress(\"Checking misc args\")\n check_get_set(o.GetSetVariant, 4)\n check_get_set(o.GetSetVariant, \"foo\")\n check_get_set(o.GetSetVariant, o)\n\n # signed/unsigned.\n check_get_set(o.GetSetInt, 0)\n check_get_set(o.GetSetInt, -1)\n check_get_set(o.GetSetInt, 1)\n\n check_get_set(o.GetSetUnsignedInt, 0)\n check_get_set(o.GetSetUnsignedInt, 1)\n check_get_set(o.GetSetUnsignedInt, 0x80000000)\n if o.GetSetUnsignedInt(-1) != 0xFFFFFFFF:\n # -1 is a special case - we accept a negative int (silently converting to\n # unsigned) but when getting it back we convert it to a long.\n raise error(\"unsigned -1 failed\")\n\n check_get_set(o.GetSetLong, 0)\n check_get_set(o.GetSetLong, -1)\n check_get_set(o.GetSetLong, 1)\n\n check_get_set(o.GetSetUnsignedLong, 0)\n check_get_set(o.GetSetUnsignedLong, 1)\n check_get_set(o.GetSetUnsignedLong, 0x80000000)\n # -1 is a special case - see above.\n if o.GetSetUnsignedLong(-1) != 0xFFFFFFFF:\n raise error(\"unsigned -1 failed\")\n\n # We want to explicitly test > 32 bits. py3k has no 'maxint' and\n # 'maxsize+1' is no good on 64bit platforms as its 65 bits!\n big = 2147483647 # sys.maxint on py2k\n for l in big, big + 1, 1 << 65:\n check_get_set(o.GetSetVariant, l)\n\n progress(\"Checking structs\")\n r = o.GetStruct()\n assert r.int_value == 99 and str(r.str_value) == \"Hello from C++\"\n assert o.DoubleString(\"foo\") == \"foofoo\"\n\n progress(\"Checking var args\")\n o.SetVarArgs(\"Hi\", \"There\", \"From\", \"Python\", 1)\n if o.GetLastVarArgs() != (\"Hi\", \"There\", \"From\", \"Python\", 1):\n raise error(\"VarArgs failed -\" + str(o.GetLastVarArgs()))\n\n progress(\"Checking arrays\")\n l = []\n TestApplyResult(o.SetVariantSafeArray, (l,), len(l))\n l = [1, 2, 3, 4]\n TestApplyResult(o.SetVariantSafeArray, (l,), len(l))\n TestApplyResult(\n o.CheckVariantSafeArray,\n (\n (\n 1,\n 2,\n 3,\n 4,\n ),\n ),\n 1,\n )\n\n # and binary\n TestApplyResult(o.SetBinSafeArray, (str2memory(\"foo\\0bar\"),), 7)\n\n progress(\"Checking properties\")\n o.LongProp = 3\n if o.LongProp != 3 or o.IntProp != 3:\n raise error(\"Property value wrong - got %d/%d\" % (o.LongProp, o.IntProp))\n o.LongProp = o.IntProp = -3\n if o.LongProp != -3 or o.IntProp != -3:\n raise error(\"Property value wrong - got %d/%d\" % (o.LongProp, o.IntProp))\n # This number fits in an unsigned long. Attempting to set it to a normal\n # long will involve overflow, which is to be expected. But we do\n # expect it to work in a property explicitly a VT_UI4.\n check = 3 * 10 ** 9\n o.ULongProp = check\n if o.ULongProp != check:\n raise error(\n \"Property value wrong - got %d (expected %d)\" % (o.ULongProp, check)\n )\n\n TestApplyResult(o.Test, (\"Unused\", 99), 1) # A bool function\n TestApplyResult(o.Test, (\"Unused\", -1), 1) # A bool function\n TestApplyResult(o.Test, (\"Unused\", 1 == 1), 1) # A bool function\n TestApplyResult(o.Test, (\"Unused\", 0), 0)\n TestApplyResult(o.Test, (\"Unused\", 1 == 0), 0)\n\n assert o.DoubleString(\"foo\") == \"foofoo\"\n\n TestConstant(\"ULongTest1\", ensure_long(0xFFFFFFFF))\n TestConstant(\"ULongTest2\", ensure_long(0x7FFFFFFF))\n TestConstant(\"LongTest1\", ensure_long(-0x7FFFFFFF))\n TestConstant(\"LongTest2\", ensure_long(0x7FFFFFFF))\n TestConstant(\"UCharTest\", 255)\n TestConstant(\"CharTest\", -1)\n # 'Hello World', but the 'r' is the \"Registered\" sign (\\xae)\n TestConstant(\"StringTest\", \"Hello Wo\\xaeld\")\n\n progress(\"Checking dates and times\")\n # For now *all* times passed must be tz-aware.\n now = win32timezone.now()\n # but conversion to and from a VARIANT loses sub-second...\n now = now.replace(microsecond=0)\n later = now + datetime.timedelta(seconds=1)\n TestApplyResult(o.EarliestDate, (now, later), now)\n\n # The below used to fail with `ValueError: microsecond must be in 0..999999` - see #1655\n # https://planetcalc.com/7027/ says that float is: Sun, 25 Mar 1951 7:23:49 am\n assert o.MakeDate(18712.308206013888) == datetime.datetime.fromisoformat(\n \"1951-03-25 07:23:49+00:00\"\n )\n\n progress(\"Checking currency\")\n # currency.\n pythoncom.__future_currency__ = 1\n if o.CurrencyProp != 0:\n raise error(\"Expecting 0, got %r\" % (o.CurrencyProp,))\n for val in (\"1234.5678\", \"1234.56\", \"1234\"):\n o.CurrencyProp = decimal.Decimal(val)\n if o.CurrencyProp != decimal.Decimal(val):\n raise error(\"%s got %r\" % (val, o.CurrencyProp))\n v1 = decimal.Decimal(\"1234.5678\")\n TestApplyResult(o.DoubleCurrency, (v1,), v1 * 2)\n\n v2 = decimal.Decimal(\"9012.3456\")\n TestApplyResult(o.AddCurrencies, (v1, v2), v1 + v2)\n\n TestTrickyTypesWithVariants(o, is_generated)\n progress(\"Checking win32com.client.VARIANT\")\n TestPyVariant(o, is_generated)\n\n\ndef TestTrickyTypesWithVariants(o, is_generated):\n # Test tricky stuff with type handling and generally only works with\n # \"generated\" support but can be worked around using VARIANT.\n if is_generated:\n got = o.TestByRefVariant(2)\n else:\n v = VARIANT(pythoncom.VT_BYREF | pythoncom.VT_VARIANT, 2)\n o.TestByRefVariant(v)\n got = v.value\n if got != 4:\n raise error(\"TestByRefVariant failed\")\n\n if is_generated:\n got = o.TestByRefString(\"Foo\")\n else:\n v = VARIANT(pythoncom.VT_BYREF | pythoncom.VT_BSTR, \"Foo\")\n o.TestByRefString(v)\n got = v.value\n if got != \"FooFoo\":\n raise error(\"TestByRefString failed\")\n\n # check we can pass ints as a VT_UI1\n vals = [1, 2, 3, 4]\n if is_generated:\n arg = vals\n else:\n arg = VARIANT(pythoncom.VT_ARRAY | pythoncom.VT_UI1, vals)\n TestApplyResult(o.SetBinSafeArray, (arg,), len(vals))\n\n # safearrays of doubles and floats\n vals = [0, 1.1, 2.2, 3.3]\n if is_generated:\n arg = vals\n else:\n arg = VARIANT(pythoncom.VT_ARRAY | pythoncom.VT_R8, vals)\n TestApplyResult(o.SetDoubleSafeArray, (arg,), len(vals))\n\n if is_generated:\n arg = vals\n else:\n arg = VARIANT(pythoncom.VT_ARRAY | pythoncom.VT_R4, vals)\n TestApplyResult(o.SetFloatSafeArray, (arg,), len(vals))\n\n vals = [1.1, 2.2, 3.3, 4.4]\n expected = (1.1 * 2, 2.2 * 2, 3.3 * 2, 4.4 * 2)\n if is_generated:\n TestApplyResult(o.ChangeDoubleSafeArray, (vals,), expected)\n else:\n arg = VARIANT(pythoncom.VT_BYREF | pythoncom.VT_ARRAY | pythoncom.VT_R8, vals)\n o.ChangeDoubleSafeArray(arg)\n if arg.value != expected:\n raise error(\"ChangeDoubleSafeArray got the wrong value\")\n\n if is_generated:\n got = o.DoubleInOutString(\"foo\")\n else:\n v = VARIANT(pythoncom.VT_BYREF | pythoncom.VT_BSTR, \"foo\")\n o.DoubleInOutString(v)\n got = v.value\n assert got == \"foofoo\", got\n\n val = decimal.Decimal(\"1234.5678\")\n if is_generated:\n got = o.DoubleCurrencyByVal(val)\n else:\n v = VARIANT(pythoncom.VT_BYREF | pythoncom.VT_CY, val)\n o.DoubleCurrencyByVal(v)\n got = v.value\n assert got == val * 2\n\n\ndef TestDynamic():\n progress(\"Testing Dynamic\")\n import win32com.client.dynamic\n\n o = win32com.client.dynamic.DumbDispatch(\"PyCOMTest.PyCOMTest\")\n TestCommon(o, False)\n\n counter = win32com.client.dynamic.DumbDispatch(\"PyCOMTest.SimpleCounter\")\n TestCounter(counter, False)\n\n # Dynamic doesn't know this should be an int, so we get a COM\n # TypeMismatch error.\n try:\n check_get_set_raises(ValueError, o.GetSetInt, \"foo\")\n raise error(\"no exception raised\")\n except pythoncom.com_error as exc:\n if exc.hresult != winerror.DISP_E_TYPEMISMATCH:\n raise\n\n arg1 = VARIANT(pythoncom.VT_R4 | pythoncom.VT_BYREF, 2.0)\n arg2 = VARIANT(pythoncom.VT_BOOL | pythoncom.VT_BYREF, True)\n arg3 = VARIANT(pythoncom.VT_I4 | pythoncom.VT_BYREF, 4)\n o.TestInOut(arg1, arg2, arg3)\n assert arg1.value == 4.0, arg1\n assert arg2.value == False\n assert arg3.value == 8\n\n # damn - props with params don't work for dynamic objects :(\n # o.SetParamProp(0, 1)\n # if o.ParamProp(0) != 1:\n # raise RuntimeError, o.paramProp(0)\n\n\ndef TestGenerated():\n # Create an instance of the server.\n from win32com.client.gencache import EnsureDispatch\n\n o = EnsureDispatch(\"PyCOMTest.PyCOMTest\")\n TestCommon(o, True)\n\n counter = EnsureDispatch(\"PyCOMTest.SimpleCounter\")\n TestCounter(counter, True)\n\n # This dance lets us get a CoClass even though it's not explicitly registered.\n # This is `CoPyComTest`\n from win32com.client.CLSIDToClass import GetClass\n\n coclass_o = GetClass(\"{8EE0C520-5605-11D0-AE5F-CADD4C000000}\")()\n TestCommon(coclass_o, True)\n\n # Test the regression reported in #1753\n assert bool(coclass_o)\n\n # This is `CoSimpleCounter` and the counter tests should work.\n coclass = GetClass(\"{B88DD310-BAE8-11D0-AE86-76F2C1000000}\")()\n TestCounter(coclass, True)\n\n # XXX - this is failing in dynamic tests, but should work fine.\n i1, i2 = o.GetMultipleInterfaces()\n if not isinstance(i1, DispatchBaseClass) or not isinstance(i2, DispatchBaseClass):\n # Yay - is now an instance returned!\n raise error(\n \"GetMultipleInterfaces did not return instances - got '%s', '%s'\" % (i1, i2)\n )\n del i1\n del i2\n\n # Generated knows to only pass a 32bit int, so should fail.\n check_get_set_raises(OverflowError, o.GetSetInt, 0x80000000)\n check_get_set_raises(OverflowError, o.GetSetLong, 0x80000000)\n\n # Generated knows this should be an int, so raises ValueError\n check_get_set_raises(ValueError, o.GetSetInt, \"foo\")\n check_get_set_raises(ValueError, o.GetSetLong, \"foo\")\n\n # Pass some non-sequence objects to our array decoder, and watch it fail.\n try:\n o.SetVariantSafeArray(\"foo\")\n raise error(\"Expected a type error\")\n except TypeError:\n pass\n try:\n o.SetVariantSafeArray(666)\n raise error(\"Expected a type error\")\n except TypeError:\n pass\n\n o.GetSimpleSafeArray(None)\n TestApplyResult(o.GetSimpleSafeArray, (None,), tuple(range(10)))\n resultCheck = tuple(range(5)), tuple(range(10)), tuple(range(20))\n TestApplyResult(o.GetSafeArrays, (None, None, None), resultCheck)\n\n l = []\n TestApplyResult(o.SetIntSafeArray, (l,), len(l))\n l = [1, 2, 3, 4]\n TestApplyResult(o.SetIntSafeArray, (l,), len(l))\n ll = [1, 2, 3, 0x100000000]\n TestApplyResult(o.SetLongLongSafeArray, (ll,), len(ll))\n TestApplyResult(o.SetULongLongSafeArray, (ll,), len(ll))\n\n # Tell the server to do what it does!\n TestApplyResult(o.Test2, (constants.Attr2,), constants.Attr2)\n TestApplyResult(o.Test3, (constants.Attr2,), constants.Attr2)\n TestApplyResult(o.Test4, (constants.Attr2,), constants.Attr2)\n TestApplyResult(o.Test5, (constants.Attr2,), constants.Attr2)\n\n TestApplyResult(o.Test6, (constants.WideAttr1,), constants.WideAttr1)\n TestApplyResult(o.Test6, (constants.WideAttr2,), constants.WideAttr2)\n TestApplyResult(o.Test6, (constants.WideAttr3,), constants.WideAttr3)\n TestApplyResult(o.Test6, (constants.WideAttr4,), constants.WideAttr4)\n TestApplyResult(o.Test6, (constants.WideAttr5,), constants.WideAttr5)\n\n TestApplyResult(o.TestInOut, (2.0, True, 4), (4.0, False, 8))\n\n o.SetParamProp(0, 1)\n if o.ParamProp(0) != 1:\n raise RuntimeError(o.paramProp(0))\n\n # Make sure CastTo works - even though it is only casting it to itself!\n o2 = CastTo(o, \"IPyCOMTest\")\n if o != o2:\n raise error(\"CastTo should have returned the same object\")\n\n # Do the connection point thing...\n # Create a connection object.\n progress(\"Testing connection points\")\n o2 = win32com.client.DispatchWithEvents(o, RandomEventHandler)\n TestEvents(o2, o2)\n o2 = win32com.client.DispatchWithEvents(o, NewStyleRandomEventHandler)\n TestEvents(o2, o2)\n # and a plain \"WithEvents\".\n handler = win32com.client.WithEvents(o, RandomEventHandler)\n TestEvents(o, handler)\n handler = win32com.client.WithEvents(o, NewStyleRandomEventHandler)\n TestEvents(o, handler)\n progress(\"Finished generated .py test.\")\n\n\ndef TestEvents(o, handler):\n sessions = []\n handler._Init()\n try:\n for i in range(3):\n session = o.Start()\n sessions.append(session)\n time.sleep(0.5)\n finally:\n # Stop the servers\n for session in sessions:\n o.Stop(session)\n handler._DumpFireds()\n handler.close()\n\n\ndef _TestPyVariant(o, is_generated, val, checker=None):\n if is_generated:\n vt, got = o.GetVariantAndType(val)\n else:\n # Gotta supply all 3 args with the last 2 being explicit variants to\n # get the byref behaviour.\n var_vt = VARIANT(pythoncom.VT_UI2 | pythoncom.VT_BYREF, 0)\n var_result = VARIANT(pythoncom.VT_VARIANT | pythoncom.VT_BYREF, 0)\n o.GetVariantAndType(val, var_vt, var_result)\n vt = var_vt.value\n got = var_result.value\n if checker is not None:\n checker(got)\n return\n # default checking.\n assert vt == val.varianttype, (vt, val.varianttype)\n # Handle our safe-array test - if the passed value is a list of variants,\n # compare against the actual values.\n if type(val.value) in (tuple, list):\n check = [v.value if isinstance(v, VARIANT) else v for v in val.value]\n # pythoncom always returns arrays as tuples.\n got = list(got)\n else:\n check = val.value\n assert type(check) == type(got), (type(check), type(got))\n assert check == got, (check, got)\n\n\ndef _TestPyVariantFails(o, is_generated, val, exc):\n try:\n _TestPyVariant(o, is_generated, val)\n raise error(\"Setting %r didn't raise %s\" % (val, exc))\n except exc:\n pass\n\n\ndef TestPyVariant(o, is_generated):\n _TestPyVariant(o, is_generated, VARIANT(pythoncom.VT_UI1, 1))\n _TestPyVariant(\n o, is_generated, VARIANT(pythoncom.VT_ARRAY | pythoncom.VT_UI4, [1, 2, 3])\n )\n _TestPyVariant(o, is_generated, VARIANT(pythoncom.VT_BSTR, \"hello\"))\n _TestPyVariant(\n o,\n is_generated,\n VARIANT(pythoncom.VT_ARRAY | pythoncom.VT_BSTR, [\"hello\", \"there\"]),\n )\n\n def check_dispatch(got):\n assert isinstance(got._oleobj_, pythoncom.TypeIIDs[pythoncom.IID_IDispatch])\n\n _TestPyVariant(o, is_generated, VARIANT(pythoncom.VT_DISPATCH, o), check_dispatch)\n _TestPyVariant(\n o, is_generated, VARIANT(pythoncom.VT_ARRAY | pythoncom.VT_DISPATCH, [o])\n )\n # an array of variants each with a specific type.\n v = VARIANT(\n pythoncom.VT_ARRAY | pythoncom.VT_VARIANT,\n [\n VARIANT(pythoncom.VT_UI4, 1),\n VARIANT(pythoncom.VT_UI4, 2),\n VARIANT(pythoncom.VT_UI4, 3),\n ],\n )\n _TestPyVariant(o, is_generated, v)\n\n # and failures\n _TestPyVariantFails(o, is_generated, VARIANT(pythoncom.VT_UI1, \"foo\"), ValueError)\n\n\ndef TestCounter(counter, bIsGenerated):\n # Test random access into container\n progress(\"Testing counter\", repr(counter))\n import random\n\n for i in range(50):\n num = int(random.random() * len(counter))\n try:\n # XXX - this appears broken by commit 08a14d4deb374eaa06378509cf44078ad467b9dc -\n # We shouldn't need to do generated differently than dynamic.\n if bIsGenerated:\n ret = counter.Item(num + 1)\n else:\n ret = counter[num]\n if ret != num + 1:\n raise error(\n \"Random access into element %d failed - return was %s\"\n % (num, repr(ret))\n )\n except IndexError:\n raise error(\"** IndexError accessing collection element %d\" % num)\n\n num = 0\n if bIsGenerated:\n counter.SetTestProperty(1)\n counter.TestProperty = 1 # Note this has a second, default arg.\n counter.SetTestProperty(1, 2)\n if counter.TestPropertyWithDef != 0:\n raise error(\"Unexpected property set value!\")\n if counter.TestPropertyNoDef(1) != 1:\n raise error(\"Unexpected property set value!\")\n else:\n pass\n # counter.TestProperty = 1\n\n counter.LBound = 1\n counter.UBound = 10\n if counter.LBound != 1 or counter.UBound != 10:\n print(\"** Error - counter did not keep its properties\")\n\n if bIsGenerated:\n bounds = counter.GetBounds()\n if bounds[0] != 1 or bounds[1] != 10:\n raise error(\"** Error - counter did not give the same properties back\")\n counter.SetBounds(bounds[0], bounds[1])\n\n for item in counter:\n num = num + 1\n if num != len(counter):\n raise error(\"*** Length of counter and loop iterations dont match ***\")\n if num != 10:\n raise error(\"*** Unexpected number of loop iterations ***\")\n\n try:\n counter = iter(counter)._iter_.Clone() # Test Clone() and enum directly\n except AttributeError:\n # *sob* - sometimes this is a real iterator and sometimes not :/\n progress(\"Finished testing counter (but skipped the iterator stuff\")\n return\n counter.Reset()\n num = 0\n for item in counter:\n num = num + 1\n if num != 10:\n raise error(\"*** Unexpected number of loop iterations - got %d ***\" % num)\n progress(\"Finished testing counter\")\n\n\ndef TestLocalVTable(ob):\n # Python doesn't fully implement this interface.\n if ob.DoubleString(\"foo\") != \"foofoo\":\n raise error(\"couldn't foofoo\")\n\n\n###############################\n##\n## Some vtable tests of the interface\n##\ndef TestVTable(clsctx=pythoncom.CLSCTX_ALL):\n # Any vtable interfaces marked as dual *should* be able to be\n # correctly implemented as IDispatch.\n ob = win32com.client.Dispatch(\"Python.Test.PyCOMTest\")\n TestLocalVTable(ob)\n # Now test it via vtable - use some C++ code to help here as Python can't do it directly yet.\n tester = win32com.client.Dispatch(\"PyCOMTest.PyCOMTest\")\n testee = pythoncom.CoCreateInstance(\n \"Python.Test.PyCOMTest\", None, clsctx, pythoncom.IID_IUnknown\n )\n # check we fail gracefully with None passed.\n try:\n tester.TestMyInterface(None)\n except pythoncom.com_error as details:\n pass\n # and a real object.\n tester.TestMyInterface(testee)\n\n\ndef TestVTable2():\n # We once crashed creating our object with the native interface as\n # the first IID specified. We must do it _after_ the tests, so that\n # Python has already had the gateway registered from last run.\n ob = win32com.client.Dispatch(\"Python.Test.PyCOMTest\")\n iid = pythoncom.InterfaceNames[\"IPyCOMTest\"]\n clsid = \"Python.Test.PyCOMTest\"\n clsctx = pythoncom.CLSCTX_SERVER\n try:\n testee = pythoncom.CoCreateInstance(clsid, None, clsctx, iid)\n except TypeError:\n # Python can't actually _use_ this interface yet, so this is\n # \"expected\". Any COM error is not.\n pass\n\n\ndef TestVTableMI():\n clsctx = pythoncom.CLSCTX_SERVER\n ob = pythoncom.CoCreateInstance(\n \"Python.Test.PyCOMTestMI\", None, clsctx, pythoncom.IID_IUnknown\n )\n # This inherits from IStream.\n ob.QueryInterface(pythoncom.IID_IStream)\n # This implements IStorage, specifying the IID as a string\n ob.QueryInterface(pythoncom.IID_IStorage)\n # IDispatch should always work\n ob.QueryInterface(pythoncom.IID_IDispatch)\n\n iid = pythoncom.InterfaceNames[\"IPyCOMTest\"]\n try:\n ob.QueryInterface(iid)\n except TypeError:\n # Python can't actually _use_ this interface yet, so this is\n # \"expected\". Any COM error is not.\n pass\n\n\ndef TestQueryInterface(long_lived_server=0, iterations=5):\n tester = win32com.client.Dispatch(\"PyCOMTest.PyCOMTest\")\n if long_lived_server:\n # Create a local server\n t0 = win32com.client.Dispatch(\n \"Python.Test.PyCOMTest\", clsctx=pythoncom.CLSCTX_LOCAL_SERVER\n )\n # Request custom interfaces a number of times\n prompt = [\n \"Testing QueryInterface without long-lived local-server #%d of %d...\",\n \"Testing QueryInterface with long-lived local-server #%d of %d...\",\n ]\n\n for i in range(iterations):\n progress(prompt[long_lived_server != 0] % (i + 1, iterations))\n tester.TestQueryInterface()\n\n\nclass Tester(win32com.test.util.TestCase):\n def testVTableInProc(self):\n # We used to crash running this the second time - do it a few times\n for i in range(3):\n progress(\"Testing VTables in-process #%d...\" % (i + 1))\n TestVTable(pythoncom.CLSCTX_INPROC_SERVER)\n\n def testVTableLocalServer(self):\n for i in range(3):\n progress(\"Testing VTables out-of-process #%d...\" % (i + 1))\n TestVTable(pythoncom.CLSCTX_LOCAL_SERVER)\n\n def testVTable2(self):\n for i in range(3):\n TestVTable2()\n\n def testVTableMI(self):\n for i in range(3):\n TestVTableMI()\n\n def testMultiQueryInterface(self):\n TestQueryInterface(0, 6)\n # When we use the custom interface in the presence of a long-lived\n # local server, i.e. a local server that is already running when\n # we request an instance of our COM object, and remains afterwards,\n # then after repeated requests to create an instance of our object\n # the custom interface disappears -- i.e. QueryInterface fails with\n # E_NOINTERFACE. Set the upper range of the following test to 2 to\n # pass this test, i.e. TestQueryInterface(1,2)\n TestQueryInterface(1, 6)\n\n def testDynamic(self):\n TestDynamic()\n\n def testGenerated(self):\n TestGenerated()\n\n\nif __name__ == \"__main__\":\n # XXX - todo - Complete hack to crank threading support.\n # Should NOT be necessary\n def NullThreadFunc():\n pass\n\n import _thread\n\n _thread.start_new(NullThreadFunc, ())\n\n if \"-v\" in sys.argv:\n verbose = 1\n\n win32com.test.util.testmain()\n"},"path":{"kind":"string","value":"env/Lib/site-packages/win32com/test/testPyComTest.py"},"size":{"kind":"number","value":29446,"string":"29,446"},"nl_text":{"kind":"string","value":"NOTE - Still seems to be a leak here somewhere gateway count doesnt hit zero. Hence the print statements! Must be free-threaded! This test uses a Python implemented COM server - ensure correctly registered. We had a bg where RegisterInterfaces would fail if gencache had already been run - exercise that here convert a normal int to a long int - used to avoid, eg, '1L' for py3k friendliness py3k - no such thing as a 'long' on py2x, we just use an expression that results in a long what we expect! Simple handler class. This demo only fires one event. This test exists mainly to help with an old bug, where named params would come in reverse. We know our impl called 'OnFire' with the same ID The middle must be a boolean. A simple handler class that derives from object (ie, a \"new style class\") - only relevant for Python 2.x (ie, the 2 classes should be identical in 3.x) This test exists mainly to help with an old bug, where named params would come in reverse. We know our impl called 'OnFire' with the same ID The middle must be a boolean. Test everything which can be tested using both the \"dynamic\" and \"generated\" COM objects (or when there are very subtle differences) This might be called with either the interface or the CoClass - but these functions always return from the interface. CoClass instances have `default_interface` signed/unsigned. -1 is a special case - we accept a negative int (silently converting to unsigned) but when getting it back we convert it to a long. -1 is a special case - see above. We want to explicitly test > 32 bits. py3k has no 'maxint' and 'maxsize+1' is no good on 64bit platforms as its 65 bits! sys.maxint on py2k and binary This number fits in an unsigned long. Attempting to set it to a normal long will involve overflow, which is to be expected. But we do expect it to work in a property explicitly a VT_UI4. A bool function A bool function A bool function 'Hello World', but the 'r' is the \"Registered\" sign (\\xae) For now *all* times passed must be tz-aware. but conversion to and from a VARIANT loses sub-second... The below used to fail with `ValueError: microsecond must be in 0..999999` - see 1655 https://planetcalc.com/7027/ says that float is: Sun, 25 Mar 1951 7:23:49 am currency. Test tricky stuff with type handling and generally only works with \"generated\" support but can be worked around using VARIANT. check we can pass ints as a VT_UI1 safearrays of doubles and floats Dynamic doesn't know this should be an int, so we get a COM TypeMismatch error. damn - props with params don't work for dynamic objects :( o.SetParamProp(0, 1) if o.ParamProp(0) != 1: raise RuntimeError, o.paramProp(0) Create an instance of the server. This dance lets us get a CoClass even though it's not explicitly registered. This is `CoPyComTest` Test the regression reported in 1753 This is `CoSimpleCounter` and the counter tests should work. XXX - this is failing in dynamic tests, but should work fine. Yay - is now an instance returned! Generated knows to only pass a 32bit int, so should fail. Generated knows this should be an int, so raises ValueError Pass some non-sequence objects to our array decoder, and watch it fail. Tell the server to do what it does! Make sure CastTo works - even though it is only casting it to itself! Do the connection point thing... Create a connection object. and a plain \"WithEvents\". Stop the servers Gotta supply all 3 args with the last 2 being explicit variants to get the byref behaviour. default checking. Handle our safe-array test - if the passed value is a list of variants, compare against the actual values. pythoncom always returns arrays as tuples. an array of variants each with a specific type. and failures Test random access into container XXX - this appears broken by commit 08a14d4deb374eaa06378509cf44078ad467b9dc - We shouldn't need to do generated differently than dynamic. Note this has a second, default arg. counter.TestProperty = 1 Test Clone() and enum directly *sob* - sometimes this is a real iterator and sometimes not :/ Python doesn't fully implement this interface. Some vtable tests of the interface Any vtable interfaces marked as dual *should* be able to be correctly implemented as IDispatch. Now test it via vtable - use some C++ code to help here as Python can't do it directly yet. check we fail gracefully with None passed. and a real object. We once crashed creating our object with the native interface as the first IID specified. We must do it _after_ the tests, so that Python has already had the gateway registered from last run. Python can't actually _use_ this interface yet, so this is \"expected\". Any COM error is not. This inherits from IStream. This implements IStorage, specifying the IID as a string IDispatch should always work Python can't actually _use_ this interface yet, so this is \"expected\". Any COM error is not. Create a local server Request custom interfaces a number of times We used to crash running this the second time - do it a few times When we use the custom interface in the presence of a long-lived local server, i.e. a local server that is already running when we request an instance of our COM object, and remains afterwards, then after repeated requests to create an instance of our object the custom interface disappears -- i.e. QueryInterface fails with E_NOINTERFACE. Set the upper range of the following test to 2 to pass this test, i.e. TestQueryInterface(1,2) XXX - todo - Complete hack to crank threading support. Should NOT be necessary"},"nl_size":{"kind":"number","value":5515,"string":"5,515"},"nl_language":{"kind":"string","value":"en"},"nl_language_score":{"kind":"number","value":0.8858829736709595,"string":"0.885883"}}},{"rowIdx":7853,"cells":{"content":{"kind":"string","value":"# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n\n# pylint: disable=no-member, too-many-lines, redefined-builtin, protected-access, unused-import, invalid-name\n# pylint: disable=too-many-arguments, too-many-locals, no-name-in-module, too-many-branches, too-many-statements\n\"\"\"Read individual image files and perform augmentations.\"\"\"\n\nfrom __future__ import absolute_import, print_function\n\nimport os\nimport random\nimport logging\nimport json\nimport warnings\nimport numpy as np\n\ntry:\n import cv2\nexcept ImportError:\n cv2 = None\n\nfrom ..base import numeric_types\nfrom .. import ndarray as nd\nfrom ..ndarray import _internal\nfrom ..ndarray._internal import _cvimresize as imresize\nfrom ..ndarray._internal import _cvcopyMakeBorder as copyMakeBorder\nfrom .. import io\nfrom .. import recordio\n\n\ndef imread(filename, *args, **kwargs):\n \"\"\"Read and decode an image to an NDArray.\n\n Note: `imread` uses OpenCV (not the CV2 Python library).\n MXNet must have been built with USE_OPENCV=1 for `imdecode` to work.\n\n Parameters\n ----------\n filename : str\n Name of the image file to be loaded.\n flag : {0, 1}, default 1\n 1 for three channel color output. 0 for grayscale output.\n to_rgb : bool, default True\n True for RGB formatted output (MXNet default).\n False for BGR formatted output (OpenCV default).\n out : NDArray, optional\n Output buffer. Use `None` for automatic allocation.\n\n Returns\n -------\n NDArray\n An `NDArray` containing the image.\n\n Example\n -------\n >>> mx.img.imread(\"flower.jpg\")\n \n\n Set `flag` parameter to 0 to get grayscale output\n\n >>> mx.img.imread(\"flower.jpg\", flag=0)\n \n\n Set `to_rgb` parameter to 0 to get output in OpenCV format (BGR)\n\n >>> mx.img.imread(\"flower.jpg\", to_rgb=0)\n \n \"\"\"\n return _internal._cvimread(filename, *args, **kwargs)\n\n\ndef imdecode(buf, *args, **kwargs):\n \"\"\"Decode an image to an NDArray.\n\n Note: `imdecode` uses OpenCV (not the CV2 Python library).\n MXNet must have been built with USE_OPENCV=1 for `imdecode` to work.\n\n Parameters\n ----------\n buf : str/bytes or numpy.ndarray\n Binary image data as string or numpy ndarray.\n flag : int, optional, default=1\n 1 for three channel color output. 0 for grayscale output.\n to_rgb : int, optional, default=1\n 1 for RGB formatted output (MXNet default). 0 for BGR formatted output (OpenCV default).\n out : NDArray, optional\n Output buffer. Use `None` for automatic allocation.\n\n Returns\n -------\n NDArray\n An `NDArray` containing the image.\n\n Example\n -------\n >>> with open(\"flower.jpg\", 'rb') as fp:\n ... str_image = fp.read()\n ...\n >>> image = mx.img.imdecode(str_image)\n >>> image\n \n\n Set `flag` parameter to 0 to get grayscale output\n\n >>> with open(\"flower.jpg\", 'rb') as fp:\n ... str_image = fp.read()\n ...\n >>> image = mx.img.imdecode(str_image, flag=0)\n >>> image\n \n\n Set `to_rgb` parameter to 0 to get output in OpenCV format (BGR)\n\n >>> with open(\"flower.jpg\", 'rb') as fp:\n ... str_image = fp.read()\n ...\n >>> image = mx.img.imdecode(str_image, to_rgb=0)\n >>> image\n \n \"\"\"\n if not isinstance(buf, nd.NDArray):\n buf = nd.array(np.frombuffer(buf, dtype=np.uint8), dtype=np.uint8)\n return _internal._cvimdecode(buf, *args, **kwargs)\n\n\ndef scale_down(src_size, size):\n \"\"\"Scales down crop size if it's larger than image size.\n\n If width/height of the crop is larger than the width/height of the image,\n sets the width/height to the width/height of the image.\n\n Parameters\n ----------\n src_size : tuple of int\n Size of the image in (width, height) format.\n size : tuple of int\n Size of the crop in (width, height) format.\n\n Returns\n -------\n tuple of int\n A tuple containing the scaled crop size in (width, height) format.\n\n Example\n --------\n >>> src_size = (640,480)\n >>> size = (720,120)\n >>> new_size = mx.img.scale_down(src_size, size)\n >>> new_size\n (640,106)\n \"\"\"\n w, h = size\n sw, sh = src_size\n if sh < h:\n w, h = float(w * sh) / h, sh\n if sw < w:\n w, h = sw, float(h * sw) / w\n return int(w), int(h)\n\n\ndef _get_interp_method(interp, sizes=()):\n \"\"\"Get the interpolation method for resize functions.\n The major purpose of this function is to wrap a random interp method selection\n and a auto-estimation method.\n\n Parameters\n ----------\n interp : int\n interpolation method for all resizing operations\n\n Possible values:\n 0: Nearest Neighbors Interpolation.\n 1: Bilinear interpolation.\n 2: Area-based (resampling using pixel area relation). It may be a\n preferred method for image decimation, as it gives moire-free\n results. But when the image is zoomed, it is similar to the Nearest\n Neighbors method. (used by default).\n 3: Bicubic interpolation over 4x4 pixel neighborhood.\n 4: Lanczos interpolation over 8x8 pixel neighborhood.\n 9: Cubic for enlarge, area for shrink, bilinear for others\n 10: Random select from interpolation method metioned above.\n Note:\n When shrinking an image, it will generally look best with AREA-based\n interpolation, whereas, when enlarging an image, it will generally look best\n with Bicubic (slow) or Bilinear (faster but still looks OK).\n More details can be found in the documentation of OpenCV, please refer to\n http://docs.opencv.org/master/da/d54/group__imgproc__transform.html.\n sizes : tuple of int\n (old_height, old_width, new_height, new_width), if None provided, auto(9)\n will return Area(2) anyway.\n\n Returns\n -------\n int\n interp method from 0 to 4\n \"\"\"\n if interp == 9:\n if sizes:\n assert len(sizes) == 4\n oh, ow, nh, nw = sizes\n if nh > oh and nw > ow:\n return 2\n elif nh < oh and nw < ow:\n return 3\n else:\n return 1\n else:\n return 2\n if interp == 10:\n return random.randint(0, 4)\n if interp not in (0, 1, 2, 3, 4):\n raise ValueError('Unknown interp method %d' % interp)\n return interp\n\n\ndef resize_short(src, size, interp=2):\n \"\"\"Resizes shorter edge to size.\n\n Note: `resize_short` uses OpenCV (not the CV2 Python library).\n MXNet must have been built with OpenCV for `resize_short` to work.\n\n Resizes the original image by setting the shorter edge to size\n and setting the longer edge accordingly.\n Resizing function is called from OpenCV.\n\n Parameters\n ----------\n src : NDArray\n The original image.\n size : int\n The length to be set for the shorter edge.\n interp : int, optional, default=2\n Interpolation method used for resizing the image.\n Possible values:\n 0: Nearest Neighbors Interpolation.\n 1: Bilinear interpolation.\n 2: Area-based (resampling using pixel area relation). It may be a\n preferred method for image decimation, as it gives moire-free\n results. But when the image is zoomed, it is similar to the Nearest\n Neighbors method. (used by default).\n 3: Bicubic interpolation over 4x4 pixel neighborhood.\n 4: Lanczos interpolation over 8x8 pixel neighborhood.\n 9: Cubic for enlarge, area for shrink, bilinear for others\n 10: Random select from interpolation method metioned above.\n Note:\n When shrinking an image, it will generally look best with AREA-based\n interpolation, whereas, when enlarging an image, it will generally look best\n with Bicubic (slow) or Bilinear (faster but still looks OK).\n More details can be found in the documentation of OpenCV, please refer to\n http://docs.opencv.org/master/da/d54/group__imgproc__transform.html.\n\n Returns\n -------\n NDArray\n An 'NDArray' containing the resized image.\n\n Example\n -------\n >>> with open(\"flower.jpeg\", 'rb') as fp:\n ... str_image = fp.read()\n ...\n >>> image = mx.img.imdecode(str_image)\n >>> image\n \n >>> size = 640\n >>> new_image = mx.img.resize_short(image, size)\n >>> new_image\n \n \"\"\"\n h, w, _ = src.shape\n if h > w:\n new_h, new_w = size * h // w, size\n else:\n new_h, new_w = size, size * w // h\n return imresize(src, new_w, new_h, interp=_get_interp_method(interp, (h, w, new_h, new_w)))\n\n\ndef fixed_crop(src, x0, y0, w, h, size=None, interp=2):\n \"\"\"Crop src at fixed location, and (optionally) resize it to size.\n\n Parameters\n ----------\n src : NDArray\n Input image\n x0 : int\n Left boundary of the cropping area\n y0 : int\n Top boundary of the cropping area\n w : int\n Width of the cropping area\n h : int\n Height of the cropping area\n size : tuple of (w, h)\n Optional, resize to new size after cropping\n interp : int, optional, default=2\n Interpolation method. See resize_short for details.\n\n Returns\n -------\n NDArray\n An `NDArray` containing the cropped image.\n \"\"\"\n out = nd.crop(src, begin=(y0, x0, 0), end=(y0 + h, x0 + w, int(src.shape[2])))\n if size is not None and (w, h) != size:\n sizes = (h, w, size[1], size[0])\n out = imresize(out, *size, interp=_get_interp_method(interp, sizes))\n return out\n\n\ndef random_crop(src, size, interp=2):\n \"\"\"Randomly crop `src` with `size` (width, height).\n Upsample result if `src` is smaller than `size`.\n\n Parameters\n ----------\n src: Source image `NDArray`\n size: Size of the crop formatted as (width, height). If the `size` is larger\n than the image, then the source image is upsampled to `size` and returned.\n interp: int, optional, default=2\n Interpolation method. See resize_short for details.\n Returns\n -------\n NDArray\n An `NDArray` containing the cropped image.\n Tuple\n A tuple (x, y, width, height) where (x, y) is top-left position of the crop in the\n original image and (width, height) are the dimensions of the cropped image.\n\n Example\n -------\n >>> im = mx.nd.array(cv2.imread(\"flower.jpg\"))\n >>> cropped_im, rect = mx.image.random_crop(im, (100, 100))\n >>> print cropped_im\n \n >>> print rect\n (20, 21, 100, 100)\n \"\"\"\n\n h, w, _ = src.shape\n new_w, new_h = scale_down((w, h), size)\n\n x0 = random.randint(0, w - new_w)\n y0 = random.randint(0, h - new_h)\n\n out = fixed_crop(src, x0, y0, new_w, new_h, size, interp)\n return out, (x0, y0, new_w, new_h)\n\n\ndef center_crop(src, size, interp=2):\n \"\"\"Crops the image `src` to the given `size` by trimming on all four\n sides and preserving the center of the image. Upsamples if `src` is smaller\n than `size`.\n\n .. note:: This requires MXNet to be compiled with USE_OPENCV.\n\n Parameters\n ----------\n src : NDArray\n Binary source image data.\n size : list or tuple of int\n The desired output image size.\n interp : int, optional, default=2\n Interpolation method. See resize_short for details.\n\n Returns\n -------\n NDArray\n The cropped image.\n Tuple\n (x, y, width, height) where x, y are the positions of the crop in the\n original image and width, height the dimensions of the crop.\n\n Example\n -------\n >>> with open(\"flower.jpg\", 'rb') as fp:\n ... str_image = fp.read()\n ...\n >>> image = mx.image.imdecode(str_image)\n >>> image\n \n >>> cropped_image, (x, y, width, height) = mx.image.center_crop(image, (1000, 500))\n >>> cropped_image\n \n >>> x, y, width, height\n (1241, 910, 1000, 500)\n \"\"\"\n\n h, w, _ = src.shape\n new_w, new_h = scale_down((w, h), size)\n\n x0 = int((w - new_w) / 2)\n y0 = int((h - new_h) / 2)\n\n out = fixed_crop(src, x0, y0, new_w, new_h, size, interp)\n return out, (x0, y0, new_w, new_h)\n\n\ndef color_normalize(src, mean, std=None):\n \"\"\"Normalize src with mean and std.\n\n Parameters\n ----------\n src : NDArray\n Input image\n mean : NDArray\n RGB mean to be subtracted\n std : NDArray\n RGB standard deviation to be divided\n\n Returns\n -------\n NDArray\n An `NDArray` containing the normalized image.\n \"\"\"\n if mean is not None:\n src -= mean\n if std is not None:\n src /= std\n return src\n\n\ndef random_size_crop(src, size, area, ratio, interp=2, **kwargs):\n \"\"\"Randomly crop src with size. Randomize area and aspect ratio.\n\n Parameters\n ----------\n src : NDArray\n Input image\n size : tuple of (int, int)\n Size of the crop formatted as (width, height).\n area : float in (0, 1] or tuple of (float, float)\n If tuple, minimum area and maximum area to be maintained after cropping\n If float, minimum area to be maintained after cropping, maximum area is set to 1.0\n ratio : tuple of (float, float)\n Aspect ratio range as (min_aspect_ratio, max_aspect_ratio)\n interp: int, optional, default=2\n Interpolation method. See resize_short for details.\n Returns\n -------\n NDArray\n An `NDArray` containing the cropped image.\n Tuple\n A tuple (x, y, width, height) where (x, y) is top-left position of the crop in the\n original image and (width, height) are the dimensions of the cropped image.\n\n \"\"\"\n h, w, _ = src.shape\n src_area = h * w\n\n if 'min_area' in kwargs:\n warnings.warn('`min_area` is deprecated. Please use `area` instead.',\n DeprecationWarning)\n area = kwargs.pop('min_area')\n assert not kwargs, \"unexpected keyword arguments for `random_size_crop`.\"\n\n if isinstance(area, numeric_types):\n area = (area, 1.0)\n for _ in range(10):\n target_area = random.uniform(area[0], area[1]) * src_area\n new_ratio = random.uniform(*ratio)\n\n new_w = int(round(np.sqrt(target_area * new_ratio)))\n new_h = int(round(np.sqrt(target_area / new_ratio)))\n\n if random.random() < 0.5:\n new_h, new_w = new_w, new_h\n\n if new_w <= w and new_h <= h:\n x0 = random.randint(0, w - new_w)\n y0 = random.randint(0, h - new_h)\n\n out = fixed_crop(src, x0, y0, new_w, new_h, size, interp)\n return out, (x0, y0, new_w, new_h)\n\n # fall back to center_crop\n return center_crop(src, size, interp)\n\n\nclass Augmenter(object):\n \"\"\"Image Augmenter base class\"\"\"\n def __init__(self, **kwargs):\n self._kwargs = kwargs\n for k, v in self._kwargs.items():\n if isinstance(v, nd.NDArray):\n v = v.asnumpy()\n if isinstance(v, np.ndarray):\n v = v.tolist()\n self._kwargs[k] = v\n\n def dumps(self):\n \"\"\"Saves the Augmenter to string\n\n Returns\n -------\n str\n JSON formatted string that describes the Augmenter.\n \"\"\"\n return json.dumps([self.__class__.__name__.lower(), self._kwargs])\n\n def __call__(self, src):\n \"\"\"Abstract implementation body\"\"\"\n raise NotImplementedError(\"Must override implementation.\")\n\n\nclass SequentialAug(Augmenter):\n \"\"\"Composing a sequential augmenter list.\n\n Parameters\n ----------\n ts : list of augmenters\n A series of augmenters to be applied in sequential order.\n \"\"\"\n def __init__(self, ts):\n super(SequentialAug, self).__init__()\n self.ts = ts\n\n def dumps(self):\n \"\"\"Override the default to avoid duplicate dump.\"\"\"\n return [self.__class__.__name__.lower(), [x.dumps() for x in self.ts]]\n\n def __call__(self, src):\n \"\"\"Augmenter body\"\"\"\n for aug in self.ts:\n src = aug(src)\n return src\n\n\nclass ResizeAug(Augmenter):\n \"\"\"Make resize shorter edge to size augmenter.\n\n Parameters\n ----------\n size : int\n The length to be set for the shorter edge.\n interp : int, optional, default=2\n Interpolation method. See resize_short for details.\n \"\"\"\n def __init__(self, size, interp=2):\n super(ResizeAug, self).__init__(size=size, interp=interp)\n self.size = size\n self.interp = interp\n\n def __call__(self, src):\n \"\"\"Augmenter body\"\"\"\n return resize_short(src, self.size, self.interp)\n\n\nclass ForceResizeAug(Augmenter):\n \"\"\"Force resize to size regardless of aspect ratio\n\n Parameters\n ----------\n size : tuple of (int, int)\n The desired size as in (width, height)\n interp : int, optional, default=2\n Interpolation method. See resize_short for details.\n \"\"\"\n def __init__(self, size, interp=2):\n super(ForceResizeAug, self).__init__(size=size, interp=interp)\n self.size = size\n self.interp = interp\n\n def __call__(self, src):\n \"\"\"Augmenter body\"\"\"\n sizes = (src.shape[0], src.shape[1], self.size[1], self.size[0])\n return imresize(src, *self.size, interp=_get_interp_method(self.interp, sizes))\n\n\nclass RandomCropAug(Augmenter):\n \"\"\"Make random crop augmenter\n\n Parameters\n ----------\n size : int\n The length to be set for the shorter edge.\n interp : int, optional, default=2\n Interpolation method. See resize_short for details.\n \"\"\"\n def __init__(self, size, interp=2):\n super(RandomCropAug, self).__init__(size=size, interp=interp)\n self.size = size\n self.interp = interp\n\n def __call__(self, src):\n \"\"\"Augmenter body\"\"\"\n return random_crop(src, self.size, self.interp)[0]\n\n\nclass RandomSizedCropAug(Augmenter):\n \"\"\"Make random crop with random resizing and random aspect ratio jitter augmenter.\n\n Parameters\n ----------\n size : tuple of (int, int)\n Size of the crop formatted as (width, height).\n area : float in (0, 1] or tuple of (float, float)\n If tuple, minimum area and maximum area to be maintained after cropping\n If float, minimum area to be maintained after cropping, maximum area is set to 1.0\n ratio : tuple of (float, float)\n Aspect ratio range as (min_aspect_ratio, max_aspect_ratio)\n interp: int, optional, default=2\n Interpolation method. See resize_short for details.\n \"\"\"\n def __init__(self, size, area, ratio, interp=2, **kwargs):\n super(RandomSizedCropAug, self).__init__(size=size, area=area,\n ratio=ratio, interp=interp)\n self.size = size\n if 'min_area' in kwargs:\n warnings.warn('`min_area` is deprecated. Please use `area` instead.',\n DeprecationWarning)\n self.area = kwargs.pop('min_area')\n else:\n self.area = area\n self.ratio = ratio\n self.interp = interp\n assert not kwargs, \"unexpected keyword arguments for `RandomSizedCropAug`.\"\n\n def __call__(self, src):\n \"\"\"Augmenter body\"\"\"\n return random_size_crop(src, self.size, self.area, self.ratio, self.interp)[0]\n\n\nclass CenterCropAug(Augmenter):\n \"\"\"Make center crop augmenter.\n\n Parameters\n ----------\n size : list or tuple of int\n The desired output image size.\n interp : int, optional, default=2\n Interpolation method. See resize_short for details.\n \"\"\"\n def __init__(self, size, interp=2):\n super(CenterCropAug, self).__init__(size=size, interp=interp)\n self.size = size\n self.interp = interp\n\n def __call__(self, src):\n \"\"\"Augmenter body\"\"\"\n return center_crop(src, self.size, self.interp)[0]\n\n\nclass RandomOrderAug(Augmenter):\n \"\"\"Apply list of augmenters in random order\n\n Parameters\n ----------\n ts : list of augmenters\n A series of augmenters to be applied in random order\n \"\"\"\n def __init__(self, ts):\n super(RandomOrderAug, self).__init__()\n self.ts = ts\n\n def dumps(self):\n \"\"\"Override the default to avoid duplicate dump.\"\"\"\n return [self.__class__.__name__.lower(), [x.dumps() for x in self.ts]]\n\n def __call__(self, src):\n \"\"\"Augmenter body\"\"\"\n random.shuffle(self.ts)\n for t in self.ts:\n src = t(src)\n return src\n\n\nclass BrightnessJitterAug(Augmenter):\n \"\"\"Random brightness jitter augmentation.\n\n Parameters\n ----------\n brightness : float\n The brightness jitter ratio range, [0, 1]\n \"\"\"\n def __init__(self, brightness):\n super(BrightnessJitterAug, self).__init__(brightness=brightness)\n self.brightness = brightness\n\n def __call__(self, src):\n \"\"\"Augmenter body\"\"\"\n alpha = 1.0 + random.uniform(-self.brightness, self.brightness)\n src *= alpha\n return src\n\n\nclass ContrastJitterAug(Augmenter):\n \"\"\"Random contrast jitter augmentation.\n\n Parameters\n ----------\n contrast : float\n The contrast jitter ratio range, [0, 1]\n \"\"\"\n def __init__(self, contrast):\n super(ContrastJitterAug, self).__init__(contrast=contrast)\n self.contrast = contrast\n self.coef = nd.array([[[0.299, 0.587, 0.114]]])\n\n def __call__(self, src):\n \"\"\"Augmenter body\"\"\"\n alpha = 1.0 + random.uniform(-self.contrast, self.contrast)\n gray = src * self.coef\n gray = (3.0 * (1.0 - alpha) / gray.size) * nd.sum(gray)\n src *= alpha\n src += gray\n return src\n\n\nclass SaturationJitterAug(Augmenter):\n \"\"\"Random saturation jitter augmentation.\n\n Parameters\n ----------\n saturation : float\n The saturation jitter ratio range, [0, 1]\n \"\"\"\n def __init__(self, saturation):\n super(SaturationJitterAug, self).__init__(saturation=saturation)\n self.saturation = saturation\n self.coef = nd.array([[[0.299, 0.587, 0.114]]])\n\n def __call__(self, src):\n \"\"\"Augmenter body\"\"\"\n alpha = 1.0 + random.uniform(-self.saturation, self.saturation)\n gray = src * self.coef\n gray = nd.sum(gray, axis=2, keepdims=True)\n gray *= (1.0 - alpha)\n src *= alpha\n src += gray\n return src\n\n\nclass HueJitterAug(Augmenter):\n \"\"\"Random hue jitter augmentation.\n\n Parameters\n ----------\n hue : float\n The hue jitter ratio range, [0, 1]\n \"\"\"\n def __init__(self, hue):\n super(HueJitterAug, self).__init__(hue=hue)\n self.hue = hue\n self.tyiq = np.array([[0.299, 0.587, 0.114],\n [0.596, -0.274, -0.321],\n [0.211, -0.523, 0.311]])\n self.ityiq = np.array([[1.0, 0.956, 0.621],\n [1.0, -0.272, -0.647],\n [1.0, -1.107, 1.705]])\n\n def __call__(self, src):\n \"\"\"Augmenter body.\n Using approximate linear transfomation described in:\n https://beesbuzz.biz/code/hsv_color_transforms.php\n \"\"\"\n alpha = random.uniform(-self.hue, self.hue)\n u = np.cos(alpha * np.pi)\n w = np.sin(alpha * np.pi)\n bt = np.array([[1.0, 0.0, 0.0],\n [0.0, u, -w],\n [0.0, w, u]])\n t = np.dot(np.dot(self.ityiq, bt), self.tyiq).T\n src = nd.dot(src, nd.array(t))\n return src\n\n\nclass ColorJitterAug(RandomOrderAug):\n \"\"\"Apply random brightness, contrast and saturation jitter in random order.\n\n Parameters\n ----------\n brightness : float\n The brightness jitter ratio range, [0, 1]\n contrast : float\n The contrast jitter ratio range, [0, 1]\n saturation : float\n The saturation jitter ratio range, [0, 1]\n \"\"\"\n def __init__(self, brightness, contrast, saturation):\n ts = []\n if brightness > 0:\n ts.append(BrightnessJitterAug(brightness))\n if contrast > 0:\n ts.append(ContrastJitterAug(contrast))\n if saturation > 0:\n ts.append(SaturationJitterAug(saturation))\n super(ColorJitterAug, self).__init__(ts)\n\n\nclass LightingAug(Augmenter):\n \"\"\"Add PCA based noise.\n\n Parameters\n ----------\n alphastd : float\n Noise level\n eigval : 3x1 np.array\n Eigen values\n eigvec : 3x3 np.array\n Eigen vectors\n \"\"\"\n def __init__(self, alphastd, eigval, eigvec):\n super(LightingAug, self).__init__(alphastd=alphastd, eigval=eigval, eigvec=eigvec)\n self.alphastd = alphastd\n self.eigval = eigval\n self.eigvec = eigvec\n\n def __call__(self, src):\n \"\"\"Augmenter body\"\"\"\n alpha = np.random.normal(0, self.alphastd, size=(3,))\n rgb = np.dot(self.eigvec * alpha, self.eigval)\n src += nd.array(rgb)\n return src\n\n\nclass ColorNormalizeAug(Augmenter):\n \"\"\"Mean and std normalization.\n\n Parameters\n ----------\n mean : NDArray\n RGB mean to be subtracted\n std : NDArray\n RGB standard deviation to be divided\n \"\"\"\n def __init__(self, mean, std):\n super(ColorNormalizeAug, self).__init__(mean=mean, std=std)\n self.mean = mean if mean is None or isinstance(mean, nd.NDArray) else nd.array(mean)\n self.std = std if std is None or isinstance(std, nd.NDArray) else nd.array(std)\n\n def __call__(self, src):\n \"\"\"Augmenter body\"\"\"\n return color_normalize(src, self.mean, self.std)\n\n\nclass RandomGrayAug(Augmenter):\n \"\"\"Randomly convert to gray image.\n\n Parameters\n ----------\n p : float\n Probability to convert to grayscale\n \"\"\"\n def __init__(self, p):\n super(RandomGrayAug, self).__init__(p=p)\n self.p = p\n self.mat = nd.array([[0.21, 0.21, 0.21],\n [0.72, 0.72, 0.72],\n [0.07, 0.07, 0.07]])\n\n def __call__(self, src):\n \"\"\"Augmenter body\"\"\"\n if random.random() < self.p:\n src = nd.dot(src, self.mat)\n return src\n\n\nclass HorizontalFlipAug(Augmenter):\n \"\"\"Random horizontal flip.\n\n Parameters\n ----------\n p : float\n Probability to flip image horizontally\n \"\"\"\n def __init__(self, p):\n super(HorizontalFlipAug, self).__init__(p=p)\n self.p = p\n\n def __call__(self, src):\n \"\"\"Augmenter body\"\"\"\n if random.random() < self.p:\n src = nd.flip(src, axis=1)\n return src\n\n\nclass CastAug(Augmenter):\n \"\"\"Cast to float32\"\"\"\n def __init__(self, typ='float32'):\n super(CastAug, self).__init__(type=typ)\n self.typ = typ\n\n def __call__(self, src):\n \"\"\"Augmenter body\"\"\"\n src = src.astype(self.typ)\n return src\n\n\ndef CreateAugmenter(data_shape, resize=0, rand_crop=False, rand_resize=False, rand_mirror=False,\n mean=None, std=None, brightness=0, contrast=0, saturation=0, hue=0,\n pca_noise=0, rand_gray=0, inter_method=2):\n \"\"\"Creates an augmenter list.\n\n Parameters\n ----------\n data_shape : tuple of int\n Shape for output data\n resize : int\n Resize shorter edge if larger than 0 at the begining\n rand_crop : bool\n Whether to enable random cropping other than center crop\n rand_resize : bool\n Whether to enable random sized cropping, require rand_crop to be enabled\n rand_gray : float\n [0, 1], probability to convert to grayscale for all channels, the number\n of channels will not be reduced to 1\n rand_mirror : bool\n Whether to apply horizontal flip to image with probability 0.5\n mean : np.ndarray or None\n Mean pixel values for [r, g, b]\n std : np.ndarray or None\n Standard deviations for [r, g, b]\n brightness : float\n Brightness jittering range (percent)\n contrast : float\n Contrast jittering range (percent)\n saturation : float\n Saturation jittering range (percent)\n hue : float\n Hue jittering range (percent)\n pca_noise : float\n Pca noise level (percent)\n inter_method : int, default=2(Area-based)\n Interpolation method for all resizing operations\n\n Possible values:\n 0: Nearest Neighbors Interpolation.\n 1: Bilinear interpolation.\n 2: Area-based (resampling using pixel area relation). It may be a\n preferred method for image decimation, as it gives moire-free\n results. But when the image is zoomed, it is similar to the Nearest\n Neighbors method. (used by default).\n 3: Bicubic interpolation over 4x4 pixel neighborhood.\n 4: Lanczos interpolation over 8x8 pixel neighborhood.\n 9: Cubic for enlarge, area for shrink, bilinear for others\n 10: Random select from interpolation method metioned above.\n Note:\n When shrinking an image, it will generally look best with AREA-based\n interpolation, whereas, when enlarging an image, it will generally look best\n with Bicubic (slow) or Bilinear (faster but still looks OK).\n\n Examples\n --------\n >>> # An example of creating multiple augmenters\n >>> augs = mx.image.CreateAugmenter(data_shape=(3, 300, 300), rand_mirror=True,\n ... mean=True, brightness=0.125, contrast=0.125, rand_gray=0.05,\n ... saturation=0.125, pca_noise=0.05, inter_method=10)\n >>> # dump the details\n >>> for aug in augs:\n ... aug.dumps()\n \"\"\"\n auglist = []\n\n if resize > 0:\n auglist.append(ResizeAug(resize, inter_method))\n\n crop_size = (data_shape[2], data_shape[1])\n if rand_resize:\n assert rand_crop\n auglist.append(RandomSizedCropAug(crop_size, 0.08, (3.0 / 4.0, 4.0 / 3.0), inter_method))\n elif rand_crop:\n auglist.append(RandomCropAug(crop_size, inter_method))\n else:\n auglist.append(CenterCropAug(crop_size, inter_method))\n\n if rand_mirror:\n auglist.append(HorizontalFlipAug(0.5))\n\n auglist.append(CastAug())\n\n if brightness or contrast or saturation:\n auglist.append(ColorJitterAug(brightness, contrast, saturation))\n\n if hue:\n auglist.append(HueJitterAug(hue))\n\n if pca_noise > 0:\n eigval = np.array([55.46, 4.794, 1.148])\n eigvec = np.array([[-0.5675, 0.7192, 0.4009],\n [-0.5808, -0.0045, -0.8140],\n [-0.5836, -0.6948, 0.4203]])\n auglist.append(LightingAug(pca_noise, eigval, eigvec))\n\n if rand_gray > 0:\n auglist.append(RandomGrayAug(rand_gray))\n\n if mean is True:\n mean = nd.array([123.68, 116.28, 103.53])\n elif mean is not None:\n assert isinstance(mean, (np.ndarray, nd.NDArray)) and mean.shape[0] in [1, 3]\n\n if std is True:\n std = nd.array([58.395, 57.12, 57.375])\n elif std is not None:\n assert isinstance(std, (np.ndarray, nd.NDArray)) and std.shape[0] in [1, 3]\n\n if mean is not None or std is not None:\n auglist.append(ColorNormalizeAug(mean, std))\n\n return auglist\n\n\nclass ImageIter(io.DataIter):\n \"\"\"Image data iterator with a large number of augmentation choices.\n This iterator supports reading from both .rec files and raw image files.\n\n To load input images from .rec files, use `path_imgrec` parameter and to load from raw image\n files, use `path_imglist` and `path_root` parameters.\n\n To use data partition (for distributed training) or shuffling, specify `path_imgidx` parameter.\n\n Parameters\n ----------\n batch_size : int\n Number of examples per batch.\n data_shape : tuple\n Data shape in (channels, height, width) format.\n For now, only RGB image with 3 channels is supported.\n label_width : int, optional\n Number of labels per example. The default label width is 1.\n path_imgrec : str\n Path to image record file (.rec).\n Created with tools/im2rec.py or bin/im2rec.\n path_imglist : str\n Path to image list (.lst).\n Created with tools/im2rec.py or with custom script.\n Format: Tab separated record of index, one or more labels and relative_path_from_root.\n imglist: list\n A list of images with the label(s).\n Each item is a list [imagelabel: float or list of float, imgpath].\n path_root : str\n Root folder of image files.\n path_imgidx : str\n Path to image index file. Needed for partition and shuffling when using .rec source.\n shuffle : bool\n Whether to shuffle all images at the start of each iteration or not.\n Can be slow for HDD.\n part_index : int\n Partition index.\n num_parts : int\n Total number of partitions.\n data_name : str\n Data name for provided symbols.\n label_name : str\n Label name for provided symbols.\n dtype : str\n Label data type. Default: float32. Other options: int32, int64, float64\n last_batch_handle : str, optional\n How to handle the last batch.\n This parameter can be 'pad'(default), 'discard' or 'roll_over'.\n If 'pad', the last batch will be padded with data starting from the begining\n If 'discard', the last batch will be discarded\n If 'roll_over', the remaining elements will be rolled over to the next iteration\n kwargs : ...\n More arguments for creating augmenter. See mx.image.CreateAugmenter.\n \"\"\"\n\n def __init__(self, batch_size, data_shape, label_width=1,\n path_imgrec=None, path_imglist=None, path_root=None, path_imgidx=None,\n shuffle=False, part_index=0, num_parts=1, aug_list=None, imglist=None,\n data_name='data', label_name='softmax_label', dtype='float32',\n last_batch_handle='pad', **kwargs):\n super(ImageIter, self).__init__()\n assert path_imgrec or path_imglist or (isinstance(imglist, list))\n assert dtype in ['int32', 'float32', 'int64', 'float64'], dtype + ' label not supported'\n num_threads = os.environ.get('MXNET_CPU_WORKER_NTHREADS', 1)\n logging.info('Using %s threads for decoding...', str(num_threads))\n logging.info('Set enviroment variable MXNET_CPU_WORKER_NTHREADS to a'\n ' larger number to use more threads.')\n class_name = self.__class__.__name__\n if path_imgrec:\n logging.info('%s: loading recordio %s...',\n class_name, path_imgrec)\n if path_imgidx:\n self.imgrec = recordio.MXIndexedRecordIO(path_imgidx, path_imgrec, 'r') # pylint: disable=redefined-variable-type\n self.imgidx = list(self.imgrec.keys)\n else:\n self.imgrec = recordio.MXRecordIO(path_imgrec, 'r') # pylint: disable=redefined-variable-type\n self.imgidx = None\n else:\n self.imgrec = None\n\n if path_imglist:\n logging.info('%s: loading image list %s...', class_name, path_imglist)\n with open(path_imglist) as fin:\n imglist = {}\n imgkeys = []\n for line in iter(fin.readline, ''):\n line = line.strip().split('\\t')\n label = nd.array(line[1:-1], dtype=dtype)\n key = int(line[0])\n imglist[key] = (label, line[-1])\n imgkeys.append(key)\n self.imglist = imglist\n elif isinstance(imglist, list):\n logging.info('%s: loading image list...', class_name)\n result = {}\n imgkeys = []\n index = 1\n for img in imglist:\n key = str(index) # pylint: disable=redefined-variable-type\n index += 1\n if len(img) > 2:\n label = nd.array(img[:-1], dtype=dtype)\n elif isinstance(img[0], numeric_types):\n label = nd.array([img[0]], dtype=dtype)\n else:\n label = nd.array(img[0], dtype=dtype)\n result[key] = (label, img[-1])\n imgkeys.append(str(key))\n self.imglist = result\n else:\n self.imglist = None\n self.path_root = path_root\n\n self.check_data_shape(data_shape)\n self.provide_data = [(data_name, (batch_size,) + data_shape)]\n if label_width > 1:\n self.provide_label = [(label_name, (batch_size, label_width))]\n else:\n self.provide_label = [(label_name, (batch_size,))]\n self.batch_size = batch_size\n self.data_shape = data_shape\n self.label_width = label_width\n self.shuffle = shuffle\n if self.imgrec is None:\n self.seq = imgkeys\n elif shuffle or num_parts > 1:\n assert self.imgidx is not None\n self.seq = self.imgidx\n else:\n self.seq = None\n\n if num_parts > 1:\n assert part_index < num_parts\n N = len(self.seq)\n C = N // num_parts\n self.seq = self.seq[part_index * C:(part_index + 1) * C]\n if aug_list is None:\n self.auglist = CreateAugmenter(data_shape, **kwargs)\n else:\n self.auglist = aug_list\n self.cur = 0\n self._allow_read = True\n self.last_batch_handle = last_batch_handle\n self.num_image = len(self.seq) if self.seq is not None else None\n self._cache_data = None\n self._cache_label = None\n self._cache_idx = None\n self.reset()\n\n def reset(self):\n \"\"\"Resets the iterator to the beginning of the data.\"\"\"\n if self.seq is not None and self.shuffle:\n random.shuffle(self.seq)\n if self.last_batch_handle != 'roll_over' or \\\n self._cache_data is None:\n if self.imgrec is not None:\n self.imgrec.reset()\n self.cur = 0\n if self._allow_read is False:\n self._allow_read = True\n\n def hard_reset(self):\n \"\"\"Resets the iterator and ignore roll over data\"\"\"\n if self.seq is not None and self.shuffle:\n random.shuffle(self.seq)\n if self.imgrec is not None:\n self.imgrec.reset()\n self.cur = 0\n self._allow_read = True\n self._cache_data = None\n self._cache_label = None\n self._cache_idx = None\n\n def next_sample(self):\n \"\"\"Helper function for reading in next sample.\"\"\"\n if self._allow_read is False:\n raise StopIteration\n if self.seq is not None:\n if self.cur < self.num_image:\n idx = self.seq[self.cur]\n else:\n if self.last_batch_handle != 'discard':\n self.cur = 0\n raise StopIteration\n self.cur += 1\n if self.imgrec is not None:\n s = self.imgrec.read_idx(idx)\n header, img = recordio.unpack(s)\n if self.imglist is None:\n return header.label, img\n else:\n return self.imglist[idx][0], img\n else:\n label, fname = self.imglist[idx]\n return label, self.read_image(fname)\n else:\n s = self.imgrec.read()\n if s is None:\n if self.last_batch_handle != 'discard':\n self.imgrec.reset()\n raise StopIteration\n header, img = recordio.unpack(s)\n return header.label, img\n\n def _batchify(self, batch_data, batch_label, start=0):\n \"\"\"Helper function for batchifying data\"\"\"\n i = start\n batch_size = self.batch_size\n try:\n while i < batch_size:\n label, s = self.next_sample()\n data = self.imdecode(s)\n try:\n self.check_valid_image(data)\n except RuntimeError as e:\n logging.debug('Invalid image, skipping: %s', str(e))\n continue\n data = self.augmentation_transform(data)\n assert i < batch_size, 'Batch size must be multiples of augmenter output length'\n batch_data[i] = self.postprocess_data(data)\n batch_label[i] = label\n i += 1\n except StopIteration:\n if not i:\n raise StopIteration\n return i\n\n def next(self):\n \"\"\"Returns the next batch of data.\"\"\"\n batch_size = self.batch_size\n c, h, w = self.data_shape\n # if last batch data is rolled over\n if self._cache_data is not None:\n # check both the data and label have values\n assert self._cache_label is not None, \"_cache_label didn't have values\"\n assert self._cache_idx is not None, \"_cache_idx didn't have values\"\n batch_data = self._cache_data\n batch_label = self._cache_label\n i = self._cache_idx\n # clear the cache data\n else:\n batch_data = nd.empty((batch_size, c, h, w))\n batch_label = nd.empty(self.provide_label[0][1])\n i = self._batchify(batch_data, batch_label)\n # calculate the padding\n pad = batch_size - i\n # handle padding for the last batch\n if pad != 0:\n if self.last_batch_handle == 'discard':\n raise StopIteration\n # if the option is 'roll_over', throw StopIteration and cache the data\n elif self.last_batch_handle == 'roll_over' and \\\n self._cache_data is None:\n self._cache_data = batch_data\n self._cache_label = batch_label\n self._cache_idx = i\n raise StopIteration\n else:\n _ = self._batchify(batch_data, batch_label, i)\n if self.last_batch_handle == 'pad':\n self._allow_read = False\n else:\n self._cache_data = None\n self._cache_label = None\n self._cache_idx = None\n return io.DataBatch([batch_data], [batch_label], pad=pad)\n\n def check_data_shape(self, data_shape):\n \"\"\"Checks if the input data shape is valid\"\"\"\n if not len(data_shape) == 3:\n raise ValueError('data_shape should have length 3, with dimensions CxHxW')\n if not data_shape[0] == 3:\n raise ValueError('This iterator expects inputs to have 3 channels.')\n\n def check_valid_image(self, data):\n \"\"\"Checks if the input data is valid\"\"\"\n if len(data[0].shape) == 0:\n raise RuntimeError('Data shape is wrong')\n\n def imdecode(self, s):\n \"\"\"Decodes a string or byte string to an NDArray.\n See mx.img.imdecode for more details.\"\"\"\n def locate():\n \"\"\"Locate the image file/index if decode fails.\"\"\"\n if self.seq is not None:\n idx = self.seq[(self.cur % self.num_image) - 1]\n else:\n idx = (self.cur % self.num_image) - 1\n if self.imglist is not None:\n _, fname = self.imglist[idx]\n msg = \"filename: {}\".format(fname)\n else:\n msg = \"index: {}\".format(idx)\n return \"Broken image \" + msg\n try:\n img = imdecode(s)\n except Exception as e:\n raise RuntimeError(\"{}, {}\".format(locate(), e))\n return img\n\n def read_image(self, fname):\n \"\"\"Reads an input image `fname` and returns the decoded raw bytes.\n Example usage:\n ----------\n >>> dataIter.read_image('Face.jpg') # returns decoded raw bytes.\n \"\"\"\n with open(os.path.join(self.path_root, fname), 'rb') as fin:\n img = fin.read()\n return img\n\n def augmentation_transform(self, data):\n \"\"\"Transforms input data with specified augmentation.\"\"\"\n for aug in self.auglist:\n data = aug(data)\n return data\n\n def postprocess_data(self, datum):\n \"\"\"Final postprocessing step before image is loaded into the batch.\"\"\"\n return nd.transpose(datum, axes=(2, 0, 1))\n"},"path":{"kind":"string","value":"python/mxnet/image/image.py"},"size":{"kind":"number","value":45108,"string":"45,108"},"nl_text":{"kind":"string","value":"Image Augmenter base class\nRandom brightness jitter augmentation.\n\nParameters\n----------\nbrightness : float\n The brightness jitter ratio range, [0, 1]\nCast to float32\nMake center crop augmenter.\n\nParameters\n----------\nsize : list or tuple of int\n The desired output image size.\ninterp : int, optional, default=2\n Interpolation method. See resize_short for details.\nApply random brightness, contrast and saturation jitter in random order.\n\nParameters\n----------\nbrightness : float\n The brightness jitter ratio range, [0, 1]\ncontrast : float\n The contrast jitter ratio range, [0, 1]\nsaturation : float\n The saturation jitter ratio range, [0, 1]\nMean and std normalization.\n\nParameters\n----------\nmean : NDArray\n RGB mean to be subtracted\nstd : NDArray\n RGB standard deviation to be divided\nRandom contrast jitter augmentation.\n\nParameters\n----------\ncontrast : float\n The contrast jitter ratio range, [0, 1]\nForce resize to size regardless of aspect ratio\n\nParameters\n----------\nsize : tuple of (int, int)\n The desired size as in (width, height)\ninterp : int, optional, default=2\n Interpolation method. See resize_short for details.\nRandom horizontal flip.\n\nParameters\n----------\np : float\n Probability to flip image horizontally\nRandom hue jitter augmentation.\n\nParameters\n----------\nhue : float\n The hue jitter ratio range, [0, 1]\nImage data iterator with a large number of augmentation choices.\nThis iterator supports reading from both .rec files and raw image files.\n\nTo load input images from .rec files, use `path_imgrec` parameter and to load from raw image\nfiles, use `path_imglist` and `path_root` parameters.\n\nTo use data partition (for distributed training) or shuffling, specify `path_imgidx` parameter.\n\nParameters\n----------\nbatch_size : int\n Number of examples per batch.\ndata_shape : tuple\n Data shape in (channels, height, width) format.\n For now, only RGB image with 3 channels is supported.\nlabel_width : int, optional\n Number of labels per example. The default label width is 1.\npath_imgrec : str\n Path to image record file (.rec).\n Created with tools/im2rec.py or bin/im2rec.\npath_imglist : str\n Path to image list (.lst).\n Created with tools/im2rec.py or with custom script.\n Format: Tab separated record of index, one or more labels and relative_path_from_root.\nimglist: list\n A list of images with the label(s).\n Each item is a list [imagelabel: float or list of float, imgpath].\npath_root : str\n Root folder of image files.\npath_imgidx : str\n Path to image index file. Needed for partition and shuffling when using .rec source.\nshuffle : bool\n Whether to shuffle all images at the start of each iteration or not.\n Can be slow for HDD.\npart_index : int\n Partition index.\nnum_parts : int\n Total number of partitions.\ndata_name : str\n Data name for provided symbols.\nlabel_name : str\n Label name for provided symbols.\ndtype : str\n Label data type. Default: float32. Other options: int32, int64, float64\nlast_batch_handle : str, optional\n How to handle the last batch.\n This parameter can be 'pad'(default), 'discard' or 'roll_over'.\n If 'pad', the last batch will be padded with data starting from the begining\n If 'discard', the last batch will be discarded\n If 'roll_over', the remaining elements will be rolled over to the next iteration\nkwargs : ...\n More arguments for creating augmenter. See mx.image.CreateAugmenter.\nAdd PCA based noise.\n\nParameters\n----------\nalphastd : float\n Noise level\neigval : 3x1 np.array\n Eigen values\neigvec : 3x3 np.array\n Eigen vectors\nMake random crop augmenter\n\nParameters\n----------\nsize : int\n The length to be set for the shorter edge.\ninterp : int, optional, default=2\n Interpolation method. See resize_short for details.\nRandomly convert to gray image.\n\nParameters\n----------\np : float\n Probability to convert to grayscale\nApply list of augmenters in random order\n\nParameters\n----------\nts : list of augmenters\n A series of augmenters to be applied in random order\nMake random crop with random resizing and random aspect ratio jitter augmenter.\n\nParameters\n----------\nsize : tuple of (int, int)\n Size of the crop formatted as (width, height).\narea : float in (0, 1] or tuple of (float, float)\n If tuple, minimum area and maximum area to be maintained after cropping\n If float, minimum area to be maintained after cropping, maximum area is set to 1.0\nratio : tuple of (float, float)\n Aspect ratio range as (min_aspect_ratio, max_aspect_ratio)\ninterp: int, optional, default=2\n Interpolation method. See resize_short for details.\nMake resize shorter edge to size augmenter.\n\nParameters\n----------\nsize : int\n The length to be set for the shorter edge.\ninterp : int, optional, default=2\n Interpolation method. See resize_short for details.\nRandom saturation jitter augmentation.\n\nParameters\n----------\nsaturation : float\n The saturation jitter ratio range, [0, 1]\nComposing a sequential augmenter list.\n\nParameters\n----------\nts : list of augmenters\n A series of augmenters to be applied in sequential order.\nCreates an augmenter list.\n\nParameters\n----------\ndata_shape : tuple of int\n Shape for output data\nresize : int\n Resize shorter edge if larger than 0 at the begining\nrand_crop : bool\n Whether to enable random cropping other than center crop\nrand_resize : bool\n Whether to enable random sized cropping, require rand_crop to be enabled\nrand_gray : float\n [0, 1], probability to convert to grayscale for all channels, the number\n of channels will not be reduced to 1\nrand_mirror : bool\n Whether to apply horizontal flip to image with probability 0.5\nmean : np.ndarray or None\n Mean pixel values for [r, g, b]\nstd : np.ndarray or None\n Standard deviations for [r, g, b]\nbrightness : float\n Brightness jittering range (percent)\ncontrast : float\n Contrast jittering range (percent)\nsaturation : float\n Saturation jittering range (percent)\nhue : float\n Hue jittering range (percent)\npca_noise : float\n Pca noise level (percent)\ninter_method : int, default=2(Area-based)\n Interpolation method for all resizing operations\n\n Possible values:\n 0: Nearest Neighbors Interpolation.\n 1: Bilinear interpolation.\n 2: Area-based (resampling using pixel area relation). It may be a\n preferred method for image decimation, as it gives moire-free\n results. But when the image is zoomed, it is similar to the Nearest\n Neighbors method. (used by default).\n 3: Bicubic interpolation over 4x4 pixel neighborhood.\n 4: Lanczos interpolation over 8x8 pixel neighborhood.\n 9: Cubic for enlarge, area for shrink, bilinear for others\n 10: Random select from interpolation method metioned above.\n Note:\n When shrinking an image, it will generally look best with AREA-based\n interpolation, whereas, when enlarging an image, it will generally look best\n with Bicubic (slow) or Bilinear (faster but still looks OK).\n\nExamples\n--------\n>>> # An example of creating multiple augmenters\n>>> augs = mx.image.CreateAugmenter(data_shape=(3, 300, 300), rand_mirror=True,\n... mean=True, brightness=0.125, contrast=0.125, rand_gray=0.05,\n... saturation=0.125, pca_noise=0.05, inter_method=10)\n>>> # dump the details\n>>> for aug in augs:\n... aug.dumps()\nAbstract implementation body\nAugmenter body\nAugmenter body\nAugmenter body\nAugmenter body\nAugmenter body\nAugmenter body\nAugmenter body\nAugmenter body\nAugmenter body\nAugmenter body\nAugmenter body.\nUsing approximate linear transfomation described in:\nhttps://beesbuzz.biz/code/hsv_color_transforms.php\nAugmenter body\nAugmenter body\nAugmenter body\nAugmenter body\nAugmenter body\nHelper function for batchifying data\nGet the interpolation method for resize functions.\nThe major purpose of this function is to wrap a random interp method selection\nand a auto-estimation method.\n\nParameters\n----------\ninterp : int\n interpolation method for all resizing operations\n\n Possible values:\n 0: Nearest Neighbors Interpolation.\n 1: Bilinear interpolation.\n 2: Area-based (resampling using pixel area relation). It may be a\n preferred method for image decimation, as it gives moire-free\n results. But when the image is zoomed, it is similar to the Nearest\n Neighbors method. (used by default).\n 3: Bicubic interpolation over 4x4 pixel neighborhood.\n 4: Lanczos interpolation over 8x8 pixel neighborhood.\n 9: Cubic for enlarge, area for shrink, bilinear for others\n 10: Random select from interpolation method metioned above.\n Note:\n When shrinking an image, it will generally look best with AREA-based\n interpolation, whereas, when enlarging an image, it will generally look best\n with Bicubic (slow) or Bilinear (faster but still looks OK).\n More details can be found in the documentation of OpenCV, please refer to\n http://docs.opencv.org/master/da/d54/group__imgproc__transform.html.\nsizes : tuple of int\n (old_height, old_width, new_height, new_width), if None provided, auto(9)\n will return Area(2) anyway.\n\nReturns\n-------\nint\n interp method from 0 to 4\nTransforms input data with specified augmentation.\nCrops the image `src` to the given `size` by trimming on all four\nsides and preserving the center of the image. Upsamples if `src` is smaller\nthan `size`.\n\n.. note:: This requires MXNet to be compiled with USE_OPENCV.\n\nParameters\n----------\nsrc : NDArray\n Binary source image data.\nsize : list or tuple of int\n The desired output image size.\ninterp : int, optional, default=2\n Interpolation method. See resize_short for details.\n\nReturns\n-------\nNDArray\n The cropped image.\nTuple\n (x, y, width, height) where x, y are the positions of the crop in the\n original image and width, height the dimensions of the crop.\n\nExample\n-------\n>>> with open(\"flower.jpg\", 'rb') as fp:\n... str_image = fp.read()\n...\n>>> image = mx.image.imdecode(str_image)\n>>> image\n\n>>> cropped_image, (x, y, width, height) = mx.image.center_crop(image, (1000, 500))\n>>> cropped_image\n\n>>> x, y, width, height\n(1241, 910, 1000, 500)\nChecks if the input data shape is valid\nChecks if the input data is valid\nNormalize src with mean and std.\n\nParameters\n----------\nsrc : NDArray\n Input image\nmean : NDArray\n RGB mean to be subtracted\nstd : NDArray\n RGB standard deviation to be divided\n\nReturns\n-------\nNDArray\n An `NDArray` containing the normalized image.\nSaves the Augmenter to string\n\nReturns\n-------\nstr\n JSON formatted string that describes the Augmenter.\nOverride the default to avoid duplicate dump.\nOverride the default to avoid duplicate dump.\nCrop src at fixed location, and (optionally) resize it to size.\n\nParameters\n----------\nsrc : NDArray\n Input image\nx0 : int\n Left boundary of the cropping area\ny0 : int\n Top boundary of the cropping area\nw : int\n Width of the cropping area\nh : int\n Height of the cropping area\nsize : tuple of (w, h)\n Optional, resize to new size after cropping\ninterp : int, optional, default=2\n Interpolation method. See resize_short for details.\n\nReturns\n-------\nNDArray\n An `NDArray` containing the cropped image.\nResets the iterator and ignore roll over data\nDecode an image to an NDArray.\n\nNote: `imdecode` uses OpenCV (not the CV2 Python library).\nMXNet must have been built with USE_OPENCV=1 for `imdecode` to work.\n\nParameters\n----------\nbuf : str/bytes or numpy.ndarray\n Binary image data as string or numpy ndarray.\nflag : int, optional, default=1\n 1 for three channel color output. 0 for grayscale output.\nto_rgb : int, optional, default=1\n 1 for RGB formatted output (MXNet default). 0 for BGR formatted output (OpenCV default).\nout : NDArray, optional\n Output buffer. Use `None` for automatic allocation.\n\nReturns\n-------\nNDArray\n An `NDArray` containing the image.\n\nExample\n-------\n>>> with open(\"flower.jpg\", 'rb') as fp:\n... str_image = fp.read()\n...\n>>> image = mx.img.imdecode(str_image)\n>>> image\n\n\nSet `flag` parameter to 0 to get grayscale output\n\n>>> with open(\"flower.jpg\", 'rb') as fp:\n... str_image = fp.read()\n...\n>>> image = mx.img.imdecode(str_image, flag=0)\n>>> image\n\n\nSet `to_rgb` parameter to 0 to get output in OpenCV format (BGR)\n\n>>> with open(\"flower.jpg\", 'rb') as fp:\n... str_image = fp.read()\n...\n>>> image = mx.img.imdecode(str_image, to_rgb=0)\n>>> image\n\nDecodes a string or byte string to an NDArray.\nSee mx.img.imdecode for more details.\nRead and decode an image to an NDArray.\n\nNote: `imread` uses OpenCV (not the CV2 Python library).\nMXNet must have been built with USE_OPENCV=1 for `imdecode` to work.\n\nParameters\n----------\nfilename : str\n Name of the image file to be loaded.\nflag : {0, 1}, default 1\n 1 for three channel color output. 0 for grayscale output.\nto_rgb : bool, default True\n True for RGB formatted output (MXNet default).\n False for BGR formatted output (OpenCV default).\nout : NDArray, optional\n Output buffer. Use `None` for automatic allocation.\n\nReturns\n-------\nNDArray\n An `NDArray` containing the image.\n\nExample\n-------\n>>> mx.img.imread(\"flower.jpg\")\n\n\nSet `flag` parameter to 0 to get grayscale output\n\n>>> mx.img.imread(\"flower.jpg\", flag=0)\n\n\nSet `to_rgb` parameter to 0 to get output in OpenCV format (BGR)\n\n>>> mx.img.imread(\"flower.jpg\", to_rgb=0)\n\nLocate the image file/index if decode fails.\nReturns the next batch of data.\nHelper function for reading in next sample.\nFinal postprocessing step before image is loaded into the batch.\nRandomly crop `src` with `size` (width, height).\nUpsample result if `src` is smaller than `size`.\n\nParameters\n----------\nsrc: Source image `NDArray`\nsize: Size of the crop formatted as (width, height). If the `size` is larger\n than the image, then the source image is upsampled to `size` and returned.\ninterp: int, optional, default=2\n Interpolation method. See resize_short for details.\nReturns\n-------\nNDArray\n An `NDArray` containing the cropped image.\nTuple\n A tuple (x, y, width, height) where (x, y) is top-left position of the crop in the\n original image and (width, height) are the dimensions of the cropped image.\n\nExample\n-------\n>>> im = mx.nd.array(cv2.imread(\"flower.jpg\"))\n>>> cropped_im, rect = mx.image.random_crop(im, (100, 100))\n>>> print cropped_im\n\n>>> print rect\n(20, 21, 100, 100)\nRandomly crop src with size. Randomize area and aspect ratio.\n\nParameters\n----------\nsrc : NDArray\n Input image\nsize : tuple of (int, int)\n Size of the crop formatted as (width, height).\narea : float in (0, 1] or tuple of (float, float)\n If tuple, minimum area and maximum area to be maintained after cropping\n If float, minimum area to be maintained after cropping, maximum area is set to 1.0\nratio : tuple of (float, float)\n Aspect ratio range as (min_aspect_ratio, max_aspect_ratio)\ninterp: int, optional, default=2\n Interpolation method. See resize_short for details.\nReturns\n-------\nNDArray\n An `NDArray` containing the cropped image.\nTuple\n A tuple (x, y, width, height) where (x, y) is top-left position of the crop in the\n original image and (width, height) are the dimensions of the cropped image.\nReads an input image `fname` and returns the decoded raw bytes.\nExample usage:\n----------\n>>> dataIter.read_image('Face.jpg') # returns decoded raw bytes.\nResets the iterator to the beginning of the data.\nResizes shorter edge to size.\n\nNote: `resize_short` uses OpenCV (not the CV2 Python library).\nMXNet must have been built with OpenCV for `resize_short` to work.\n\nResizes the original image by setting the shorter edge to size\nand setting the longer edge accordingly.\nResizing function is called from OpenCV.\n\nParameters\n----------\nsrc : NDArray\n The original image.\nsize : int\n The length to be set for the shorter edge.\ninterp : int, optional, default=2\n Interpolation method used for resizing the image.\n Possible values:\n 0: Nearest Neighbors Interpolation.\n 1: Bilinear interpolation.\n 2: Area-based (resampling using pixel area relation). It may be a\n preferred method for image decimation, as it gives moire-free\n results. But when the image is zoomed, it is similar to the Nearest\n Neighbors method. (used by default).\n 3: Bicubic interpolation over 4x4 pixel neighborhood.\n 4: Lanczos interpolation over 8x8 pixel neighborhood.\n 9: Cubic for enlarge, area for shrink, bilinear for others\n 10: Random select from interpolation method metioned above.\n Note:\n When shrinking an image, it will generally look best with AREA-based\n interpolation, whereas, when enlarging an image, it will generally look best\n with Bicubic (slow) or Bilinear (faster but still looks OK).\n More details can be found in the documentation of OpenCV, please refer to\n http://docs.opencv.org/master/da/d54/group__imgproc__transform.html.\n\nReturns\n-------\nNDArray\n An 'NDArray' containing the resized image.\n\nExample\n-------\n>>> with open(\"flower.jpeg\", 'rb') as fp:\n... str_image = fp.read()\n...\n>>> image = mx.img.imdecode(str_image)\n>>> image\n\n>>> size = 640\n>>> new_image = mx.img.resize_short(image, size)\n>>> new_image\n\nScales down crop size if it's larger than image size.\n\nIf width/height of the crop is larger than the width/height of the image,\nsets the width/height to the width/height of the image.\n\nParameters\n----------\nsrc_size : tuple of int\n Size of the image in (width, height) format.\nsize : tuple of int\n Size of the crop in (width, height) format.\n\nReturns\n-------\ntuple of int\n A tuple containing the scaled crop size in (width, height) format.\n\nExample\n--------\n>>> src_size = (640,480)\n>>> size = (720,120)\n>>> new_size = mx.img.scale_down(src_size, size)\n>>> new_size\n(640,106)\nRead individual image files and perform augmentations.\n\n Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. See the NOTICE file distributed with this work for additional information regarding copyright ownership. The ASF licenses this file to you under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. pylint: disable=no-member, too-many-lines, redefined-builtin, protected-access, unused-import, invalid-name pylint: disable=too-many-arguments, too-many-locals, no-name-in-module, too-many-branches, too-many-statements fall back to center_crop pylint: disable=redefined-variable-type pylint: disable=redefined-variable-type pylint: disable=redefined-variable-type if last batch data is rolled over check both the data and label have values clear the cache data calculate the padding handle padding for the last batch if the option is 'roll_over', throw StopIteration and cache the data"},"nl_size":{"kind":"number","value":19440,"string":"19,440"},"nl_language":{"kind":"string","value":"en"},"nl_language_score":{"kind":"number","value":0.6377875208854675,"string":"0.637788"}}},{"rowIdx":7854,"cells":{"content":{"kind":"string","value":"# coding: utf-8\n\n\"\"\"\n OpenShift API (with Kubernetes)\n\n OpenShift provides builds, application lifecycle, image content management, and administrative policy on top of Kubernetes. The API allows consistent management of those objects. All API operations are authenticated via an Authorization bearer token that is provided for service accounts as a generated secret (in JWT form) or via the native OAuth endpoint located at /oauth/authorize. Core infrastructure components may use openshift.client certificates that require no authentication. All API operations return a 'resourceVersion' string that represents the version of the object in the underlying storage. The standard LIST operation performs a snapshot read of the underlying objects, returning a resourceVersion representing a consistent version of the listed objects. The WATCH operation allows all updates to a set of objects after the provided resourceVersion to be observed by a openshift.client. By listing and beginning a watch from the returned resourceVersion, openshift.clients may observe a consistent view of the state of one or more objects. Note that WATCH always returns the update after the provided resourceVersion. Watch may be extended a limited time in the past - using etcd 2 the watch window is 1000 events (which on a large cluster may only be a few tens of seconds) so openshift.clients must explicitly handle the \\\"watch to old error\\\" by re-listing. Objects are divided into two rough categories - those that have a lifecycle and must reflect the state of the cluster, and those that have no state. Objects with lifecycle typically have three main sections: * 'metadata' common to all objects * a 'spec' that represents the desired state * a 'status' that represents how much of the desired state is reflected on the cluster at the current time Objects that have no state have 'metadata' but may lack a 'spec' or 'status' section. Objects are divided into those that are namespace scoped (only exist inside of a namespace) and those that are cluster scoped (exist outside of a namespace). A namespace scoped resource will be deleted when the namespace is deleted and cannot be created if the namespace has not yet been created or is in the process of deletion. Cluster scoped resources are typically only accessible to admins - resources like nodes, persistent volumes, and cluster policy. All objects have a schema that is a combination of the 'kind' and 'apiVersion' fields. This schema is additive only for any given version - no backwards incompatible changes are allowed without incrementing the apiVersion. The server will return and accept a number of standard responses that share a common schema - for instance, the common error type is 'unversioned.Status' (described below) and will be returned on any error from the API server. The API is available in multiple serialization formats - the default is JSON (Accept: application/json and Content-Type: application/json) but openshift.clients may also use YAML (application/yaml) or the native Protobuf schema (application/vnd.kubernetes.protobuf). Note that the format of the WATCH API call is slightly different - for JSON it returns newline delimited objects while for Protobuf it returns length-delimited frames (4 bytes in network-order) that contain a 'versioned.Watch' Protobuf object. See the OpenShift documentation at https://docs.openshift.org for more information. \n\n OpenAPI spec version: v3.6.0-alpha.0\n \n Generated by: https://github.com/swagger-api/swagger-codegen.git\n\"\"\"\n\n\nfrom __future__ import absolute_import\n\nimport os\nimport sys\nimport unittest\n\nimport openshift.client\nfrom kubernetes.client.rest import ApiException\nfrom openshift.client.models.v1beta1_cpu_target_utilization import V1beta1CPUTargetUtilization\n\n\nclass TestV1beta1CPUTargetUtilization(unittest.TestCase):\n \"\"\" V1beta1CPUTargetUtilization unit test stubs \"\"\"\n\n def setUp(self):\n pass\n\n def tearDown(self):\n pass\n\n def testV1beta1CPUTargetUtilization(self):\n \"\"\"\n Test V1beta1CPUTargetUtilization\n \"\"\"\n model = openshift.client.models.v1beta1_cpu_target_utilization.V1beta1CPUTargetUtilization()\n\n\nif __name__ == '__main__':\n unittest.main()\n"},"path":{"kind":"string","value":"openshift/test/test_v1beta1_cpu_target_utilization.py"},"size":{"kind":"number","value":4258,"string":"4,258"},"nl_text":{"kind":"string","value":"V1beta1CPUTargetUtilization unit test stubs \nTest V1beta1CPUTargetUtilization\nOpenShift API (with Kubernetes)\n\nOpenShift provides builds, application lifecycle, image content management, and administrative policy on top of Kubernetes. The API allows consistent management of those objects. All API operations are authenticated via an Authorization bearer token that is provided for service accounts as a generated secret (in JWT form) or via the native OAuth endpoint located at /oauth/authorize. Core infrastructure components may use openshift.client certificates that require no authentication. All API operations return a 'resourceVersion' string that represents the version of the object in the underlying storage. The standard LIST operation performs a snapshot read of the underlying objects, returning a resourceVersion representing a consistent version of the listed objects. The WATCH operation allows all updates to a set of objects after the provided resourceVersion to be observed by a openshift.client. By listing and beginning a watch from the returned resourceVersion, openshift.clients may observe a consistent view of the state of one or more objects. Note that WATCH always returns the update after the provided resourceVersion. Watch may be extended a limited time in the past - using etcd 2 the watch window is 1000 events (which on a large cluster may only be a few tens of seconds) so openshift.clients must explicitly handle the \"watch to old error\" by re-listing. Objects are divided into two rough categories - those that have a lifecycle and must reflect the state of the cluster, and those that have no state. Objects with lifecycle typically have three main sections: * 'metadata' common to all objects * a 'spec' that represents the desired state * a 'status' that represents how much of the desired state is reflected on the cluster at the current time Objects that have no state have 'metadata' but may lack a 'spec' or 'status' section. Objects are divided into those that are namespace scoped (only exist inside of a namespace) and those that are cluster scoped (exist outside of a namespace). A namespace scoped resource will be deleted when the namespace is deleted and cannot be created if the namespace has not yet been created or is in the process of deletion. Cluster scoped resources are typically only accessible to admins - resources like nodes, persistent volumes, and cluster policy. All objects have a schema that is a combination of the 'kind' and 'apiVersion' fields. This schema is additive only for any given version - no backwards incompatible changes are allowed without incrementing the apiVersion. The server will return and accept a number of standard responses that share a common schema - for instance, the common error type is 'unversioned.Status' (described below) and will be returned on any error from the API server. The API is available in multiple serialization formats - the default is JSON (Accept: application/json and Content-Type: application/json) but openshift.clients may also use YAML (application/yaml) or the native Protobuf schema (application/vnd.kubernetes.protobuf). Note that the format of the WATCH API call is slightly different - for JSON it returns newline delimited objects while for Protobuf it returns length-delimited frames (4 bytes in network-order) that contain a 'versioned.Watch' Protobuf object. See the OpenShift documentation at https://docs.openshift.org for more information. \n\nOpenAPI spec version: v3.6.0-alpha.0\n\nGenerated by: https://github.com/swagger-api/swagger-codegen.git\n\n coding: utf-8"},"nl_size":{"kind":"number","value":3605,"string":"3,605"},"nl_language":{"kind":"string","value":"en"},"nl_language_score":{"kind":"number","value":0.8775695562362671,"string":"0.87757"}}},{"rowIdx":7855,"cells":{"content":{"kind":"string","value":"import warnings\nimport mmcv\nimport numpy as np\nimport torch\nfrom torch.nn.modules.utils import _pair\n\nfrom mmdet.core.anchor.builder import ANCHOR_GENERATORS\nfrom mmdet.core.anchor import AnchorGenerator\n\n@ANCHOR_GENERATORS.register_module(force=True)\nclass SSDAnchorGenerator(AnchorGenerator):\n \"\"\"Anchor generator for SSD\n\n Args:\n strides (list[int] | list[tuple[int, int]]): Strides of anchors\n in multiple feature levels.\n ratios (list[float]): The list of ratios between the height and width\n of anchors in a single level.\n basesize_ratio_range (tuple(float)): Ratio range of anchors.\n input_size (int): Size of feature map, 300 for SSD300,\n 512 for SSD512.\n scale_major (bool): Whether to multiply scales first when generating\n base anchors. If true, the anchors in the same row will have the\n same scales. It is always set to be False in SSD.\n \"\"\"\n\n def __init__(self,\n strides,\n ratios,\n basesize_ratio_range,\n input_size=300,\n scale_major=True):\n assert len(strides) == len(ratios)\n assert mmcv.is_tuple_of(basesize_ratio_range, float)\n\n self.strides = [_pair(stride) for stride in strides]\n self.input_size = max(input_size) if isinstance(input_size, (list,tuple)) else input_size\n self.centers = [(stride[0] / 2., stride[1] / 2.)\n for stride in self.strides]\n self.basesize_ratio_range = basesize_ratio_range\n\n # calculate anchor ratios and sizes\n min_ratio, max_ratio = basesize_ratio_range\n min_ratio = int(min_ratio * 100)\n max_ratio = int(max_ratio * 100)\n step = int(np.floor(max_ratio - min_ratio) / (self.num_levels - 2))\n min_sizes = []\n max_sizes = []\n for ratio in range(int(min_ratio), int(max_ratio) + 1, step):\n min_sizes.append(int(self.input_size * ratio / 100))\n max_sizes.append(int(self.input_size * (ratio + step) / 100))\n if self.input_size == 300:\n if basesize_ratio_range[0] == 0.15: # SSD300 COCO\n min_sizes.insert(0, int(self.input_size * 7 / 100))\n max_sizes.insert(0, int(self.input_size * 15 / 100))\n elif basesize_ratio_range[0] == 0.2: # SSD300 VOC\n min_sizes.insert(0, int(self.input_size * 10 / 100))\n max_sizes.insert(0, int(self.input_size * 20 / 100))\n else:\n min_sizes.insert(0, int(self.input_size * basesize_ratio_range[0] * 0.4))\n max_sizes.insert(0, int(self.input_size * basesize_ratio_range[0]))\n warnings.warn(\n 'according to original SSD, basesize_ratio_range[0] should be either 0.15'\n 'or 0.2 when input_size is 300, got '\n f'{basesize_ratio_range[0]}.')\n elif self.input_size == 512:\n if basesize_ratio_range[0] == 0.1: # SSD512 COCO\n min_sizes.insert(0, int(self.input_size * 4 / 100))\n max_sizes.insert(0, int(self.input_size * 10 / 100))\n elif basesize_ratio_range[0] == 0.15: # SSD512 VOC\n min_sizes.insert(0, int(self.input_size * 7 / 100))\n max_sizes.insert(0, int(self.input_size * 15 / 100))\n else:\n min_sizes.insert(0, int(self.input_size * basesize_ratio_range[0] * 0.4))\n max_sizes.insert(0, int(self.input_size * basesize_ratio_range[0]))\n warnings.warn('according to original SSD, basesize_ratio_range[0] should be either 0.1'\n 'or 0.15 when input_size is 512, got'\n f' {basesize_ratio_range[0]}.')\n else:\n if basesize_ratio_range[0] == 0.1: # SSD512 COCO\n min_sizes.insert(0, int(self.input_size * 4 / 100))\n max_sizes.insert(0, int(self.input_size * 10 / 100))\n else:\n min_sizes.insert(0, int(self.input_size * basesize_ratio_range[0] * 0.4))\n max_sizes.insert(0, int(self.input_size * basesize_ratio_range[0]))\n\n anchor_ratios = []\n anchor_scales = []\n for k in range(len(self.strides)):\n scales = [1., np.sqrt(max_sizes[k] / min_sizes[k])]\n anchor_ratio = [1.]\n for r in ratios[k]:\n anchor_ratio += [1 / r, r] # 4 or 6 ratio\n anchor_ratios.append(torch.Tensor(anchor_ratio))\n anchor_scales.append(torch.Tensor(scales))\n\n self.base_sizes = min_sizes\n self.scales = anchor_scales\n self.ratios = anchor_ratios\n self.scale_major = scale_major\n self.center_offset = 0\n self.base_anchors = self.gen_base_anchors()\n # added for proto export\n self.min_sizes = min_sizes\n self.max_sizes = max_sizes\n\n def gen_base_anchors(self):\n \"\"\"Generate base anchors.\n\n Returns:\n list(torch.Tensor): Base anchors of a feature grid in multiple \\\n feature levels.\n \"\"\"\n multi_level_base_anchors = []\n for i, base_size in enumerate(self.base_sizes):\n base_anchors = self.gen_single_level_base_anchors(\n base_size,\n scales=self.scales[i],\n ratios=self.ratios[i],\n center=self.centers[i])\n indices = list(range(len(self.ratios[i])))\n indices.insert(1, len(indices))\n base_anchors = torch.index_select(base_anchors, 0,\n torch.LongTensor(indices))\n multi_level_base_anchors.append(base_anchors)\n return multi_level_base_anchors\n\n def __repr__(self):\n \"\"\"str: a string that describes the module\"\"\"\n indent_str = ' '\n repr_str = self.__class__.__name__ + '(\\n'\n repr_str += f'{indent_str}strides={self.strides},\\n'\n repr_str += f'{indent_str}scales={self.scales},\\n'\n repr_str += f'{indent_str}scale_major={self.scale_major},\\n'\n repr_str += f'{indent_str}input_size={self.input_size},\\n'\n repr_str += f'{indent_str}scales={self.scales},\\n'\n repr_str += f'{indent_str}ratios={self.ratios},\\n'\n repr_str += f'{indent_str}num_levels={self.num_levels},\\n'\n repr_str += f'{indent_str}base_sizes={self.base_sizes},\\n'\n repr_str += f'{indent_str}basesize_ratio_range='\n repr_str += f'{self.basesize_ratio_range})'\n return repr_str\n\n"},"path":{"kind":"string","value":"xmmdet/core/anchor/anchor_generator.py"},"size":{"kind":"number","value":6591,"string":"6,591"},"nl_text":{"kind":"string","value":"Anchor generator for SSD\n\nArgs:\n strides (list[int] | list[tuple[int, int]]): Strides of anchors\n in multiple feature levels.\n ratios (list[float]): The list of ratios between the height and width\n of anchors in a single level.\n basesize_ratio_range (tuple(float)): Ratio range of anchors.\n input_size (int): Size of feature map, 300 for SSD300,\n 512 for SSD512.\n scale_major (bool): Whether to multiply scales first when generating\n base anchors. If true, the anchors in the same row will have the\n same scales. It is always set to be False in SSD.\nstr: a string that describes the module\nGenerate base anchors.\n\nReturns:\n list(torch.Tensor): Base anchors of a feature grid in multiple feature levels.\n\n calculate anchor ratios and sizes SSD300 COCO SSD300 VOC SSD512 COCO SSD512 VOC SSD512 COCO 4 or 6 ratio added for proto export"},"nl_size":{"kind":"number","value":902,"string":"902"},"nl_language":{"kind":"string","value":"en"},"nl_language_score":{"kind":"number","value":0.6131839752197266,"string":"0.613184"}}},{"rowIdx":7856,"cells":{"content":{"kind":"string","value":"from __future__ import absolute_import\n\n\"\"\"This module offers a display and interaction frontend with Qt.\n\nIt will try importing PySide first, and if that fails PyQt. The code will\nconstantly be tested with both bindings.\"\"\"\n\nfrom .displaywidgets import DisplayWidget, NewDisplayWidget\nfrom .control import ControlWidget\n#from .mainwin import ZasimMainWindow\ndisplay_objects = []\n\nclass ZasimDisplay(object):\n\n simulator = None\n \"\"\"The `Simulator` object for this display.\"\"\"\n\n display = None\n \"\"\"The `BaseDisplayWidget` in use.\"\"\"\n\n window = None\n \"\"\"The `ZasimMainWindow` instance in use.\"\"\"\n\n control = None\n \"\"\"The `ControlWidget` in use.\"\"\"\n\n def __init__(self, simulator):\n \"\"\"Instantiate a Display (thas is: a window with a display widget and\n simulation controls) from a simulator.\n\n :param simulator: The simulator to use.\"\"\"\n\n self.simulator = simulator\n\n if not self.display:\n if 'tiles' in self.simulator.palette_info:\n self.display = NewDisplayWidget(self.simulator)\n else:\n self.display = DisplayWidget(self.simulator)\n\n if self.control is None:\n self.control = ControlWidget(self.simulator)\n\n from .mainwin import ZasimMainWindow\n self.window = ZasimMainWindow(self.simulator, self.display, self.control)\n\n display_objects.append(self.window)\n self.window.show()\n\n def set_scale(self, scale):\n \"\"\"Sets the scale of the display component.\"\"\"\n self.display.set_scale(scale)\n\n"},"path":{"kind":"string","value":"zasim/gui/display.py"},"size":{"kind":"number","value":1565,"string":"1,565"},"nl_text":{"kind":"string","value":"Instantiate a Display (thas is: a window with a display widget and\nsimulation controls) from a simulator.\n\n:param simulator: The simulator to use.\nSets the scale of the display component.\n\nfrom .mainwin import ZasimMainWindow"},"nl_size":{"kind":"number","value":225,"string":"225"},"nl_language":{"kind":"string","value":"en"},"nl_language_score":{"kind":"number","value":0.6565561294555664,"string":"0.656556"}}},{"rowIdx":7857,"cells":{"content":{"kind":"string","value":"import numpy\nfrom chainer.backends import cuda\nfrom chainer import function_node\nfrom chainer.utils import type_check\n\n\nclass ResizeImages3D(function_node.FunctionNode):\n\n def __init__(self, output_shape):\n self.out_H = output_shape[0]\n self.out_W = output_shape[1]\n self.out_D = output_shape[2]\n\n def check_type_forward(self, in_types):\n n_in = in_types.size()\n type_check.expect(n_in == 1)\n\n x_type = in_types[0]\n type_check.expect(\n x_type.dtype.char == 'f',\n x_type.ndim == 5\n )\n\n def forward(self, inputs):\n x, = inputs\n xp = cuda.get_array_module(x)\n\n B, C, H, W, D = x.shape\n\n u_1d = xp.linspace(0, W - 1, num=self.out_W)\n v_1d = xp.linspace(0, H - 1, num=self.out_H)\n t_1d = xp.linspace(0, D - 1, num=self.out_D)\n grid = xp.meshgrid(u_1d, v_1d, t_1d)\n u = grid[0].ravel()\n v = grid[1].ravel()\n t = grid[2].ravel()\n\n u0 = xp.floor(u).astype(numpy.int32)\n u0 = u0.clip(0, W - 2)\n u1 = u0 + 1\n v0 = xp.floor(v).astype(numpy.int32)\n v0 = v0.clip(0, H - 2)\n v1 = v0 + 1\n t0 = xp.floor(t).astype(numpy.int32)\n t0 = t0.clip(0, D - 2)\n t1 = t0 + 1\n\n # weights\n w1 = (u1 - u) * (v1 - v) * (t1 - t)\n w2 = (u - u0) * (v1 - v) * (t1 - t)\n w3 = (u1 - u) * (v - v0) * (t1 - t)\n w4 = (u - u0) * (v - v0) * (t1 - t)\n w5 = (u1 - u) * (v1 - v) * (t - t0)\n w6 = (u - u0) * (v1 - v) * (t - t0)\n w7 = (u1 - u) * (v - v0) * (t - t0)\n w8 = (u - u0) * (v - v0) * (t - t0)\n w1 = w1.astype(x.dtype)\n w2 = w2.astype(x.dtype)\n w3 = w3.astype(x.dtype)\n w4 = w4.astype(x.dtype)\n w5 = w5.astype(x.dtype)\n w6 = w6.astype(x.dtype)\n w7 = w7.astype(x.dtype)\n w8 = w8.astype(x.dtype)\n\n y = (w1[None, None, :] * x[:, :, v0, u0, t0] +\n w2[None, None, :] * x[:, :, v0, u1, t0] +\n w3[None, None, :] * x[:, :, v1, u0, t0] +\n w4[None, None, :] * x[:, :, v1, u1, t0] +\n w5[None, None, :] * x[:, :, v0, u0, t1] +\n w6[None, None, :] * x[:, :, v0, u1, t1] +\n w7[None, None, :] * x[:, :, v1, u0, t1] +\n w8[None, None, :] * x[:, :, v1, u1, t1])\n y = y.reshape(B, C, self.out_H, self.out_W, self.out_D)\n return y,\n\n def backward(self, indexes, grad_outputs):\n return ResizeImagesGrad3D(\n self.inputs[0].shape,\n (self.out_H, self.out_W, self.out_D)).apply(grad_outputs)\n\n\nclass ResizeImagesGrad3D(function_node.FunctionNode):\n\n def __init__(self, input_shape, output_shape):\n self.out_H = output_shape[0]\n self.out_W = output_shape[1]\n self.out_D = output_shape[2]\n self.input_shape = input_shape\n\n def check_type_forward(self, in_types):\n n_in = in_types.size()\n type_check.expect(n_in == 1)\n\n x_type = in_types[0]\n type_check.expect(\n x_type.dtype.char == 'f',\n x_type.ndim == 5\n )\n\n def forward(self, inputs):\n xp = cuda.get_array_module(*inputs)\n gy, = inputs\n\n B, C, H, W, D = self.input_shape\n\n u_1d = xp.linspace(0, W - 1, num=self.out_W)\n v_1d = xp.linspace(0, H - 1, num=self.out_H)\n t_1d = xp.linspace(0, D - 1, num=self.out_D)\n grid = xp.meshgrid(u_1d, v_1d, t_1d)\n u = grid[0].ravel()\n v = grid[1].ravel()\n t = grid[2].ravel()\n\n u0 = xp.floor(u).astype(numpy.int32)\n u0 = u0.clip(0, W - 2)\n u1 = u0 + 1\n v0 = xp.floor(v).astype(numpy.int32)\n v0 = v0.clip(0, H - 2)\n v1 = v0 + 1\n t0 = xp.floor(t).astype(numpy.int32)\n t0 = t0.clip(0, D - 2)\n t1 = t0 + 1\n\n # weights\n wu0 = u - u0\n wu1 = u1 - u\n wv0 = v - v0\n wv1 = v1 - v\n wt0 = t - t0\n wt1 = t1 - t\n wu0 = wu0.astype(gy.dtype)\n wu1 = wu1.astype(gy.dtype)\n wv0 = wv0.astype(gy.dtype)\n wv1 = wv1.astype(gy.dtype)\n wt0 = wt0.astype(gy.dtype)\n wt1 = wt1.astype(gy.dtype)\n\n # --- gx\n if xp is numpy:\n scatter_add = numpy.add.at\n else:\n scatter_add = cuda.cupyx.scatter_add\n\n gx = xp.zeros(self.input_shape, dtype=gy.dtype)\n gy = gy.reshape(B, C, -1)\n scatter_add(gx, (slice(None), slice(None), v0, u0, t0),\n gy * wu1 * wv1 * wt1)\n scatter_add(gx, (slice(None), slice(None), v0, u1, t0),\n gy * wu0 * wv1 * wt1)\n scatter_add(gx, (slice(None), slice(None), v1, u0, t0),\n gy * wu1 * wv0 * wt1)\n scatter_add(gx, (slice(None), slice(None), v1, u1, t0),\n gy * wu0 * wv0 * wt1)\n scatter_add(gx, (slice(None), slice(None), v0, u0, t1),\n gy * wu1 * wv1 * wt0)\n scatter_add(gx, (slice(None), slice(None), v0, u1, t1),\n gy * wu0 * wv1 * wt0)\n scatter_add(gx, (slice(None), slice(None), v1, u0, t1),\n gy * wu1 * wv0 * wt0)\n scatter_add(gx, (slice(None), slice(None), v1, u1, t1),\n gy * wu0 * wv0 * wt0)\n return gx,\n\n def backward(self, indexes, grad_outputs):\n return ResizeImages3D(\n (self.out_H, self.out_W, self.out_D)).apply(grad_outputs)\n\n\ndef resize_images_3d(x, output_shape):\n \"\"\"Resize images to the given shape.\n This function resizes 3D data to :obj:`output_shape`.\n Currently, only bilinear interpolation is supported as the sampling method.\n Notatition: here is a notation for dimensionalities.\n - :math:`n` is the batch size.\n - :math:`c_I` is the number of the input channels.\n - :math:`h`, :math:`w` and :math:`d` are the height, width and depth of the\n input image, respectively.\n - :math:`h_O`, :math:`w_O` and :math:`d_0` are the height, width and depth\n of the output image.\n Args:\n x (~chainer.Variable):\n Input variable of shape :math:`(n, c_I, h, w, d)`.\n output_shape (tuple):\n This is a tuple of length 3 whose values are :obj:`(h_O, w_O, d_O)`.\n Returns:\n ~chainer.Variable: Resized image whose shape is \\\n :math:`(n, c_I, h_O, w_O, d_O)`.\n \"\"\"\n return ResizeImages3D(output_shape).apply((x,))[0]\n"},"path":{"kind":"string","value":"src/links/model/resize_images_3d.py"},"size":{"kind":"number","value":6408,"string":"6,408"},"nl_text":{"kind":"string","value":"Resize images to the given shape.\nThis function resizes 3D data to :obj:`output_shape`.\nCurrently, only bilinear interpolation is supported as the sampling method.\nNotatition: here is a notation for dimensionalities.\n- :math:`n` is the batch size.\n- :math:`c_I` is the number of the input channels.\n- :math:`h`, :math:`w` and :math:`d` are the height, width and depth of the\n input image, respectively.\n- :math:`h_O`, :math:`w_O` and :math:`d_0` are the height, width and depth\n of the output image.\nArgs:\n x (~chainer.Variable):\n Input variable of shape :math:`(n, c_I, h, w, d)`.\n output_shape (tuple):\n This is a tuple of length 3 whose values are :obj:`(h_O, w_O, d_O)`.\nReturns:\n ~chainer.Variable: Resized image whose shape is :math:`(n, c_I, h_O, w_O, d_O)`.\n\n weights weights --- gx"},"nl_size":{"kind":"number","value":823,"string":"823"},"nl_language":{"kind":"string","value":"en"},"nl_language_score":{"kind":"number","value":0.8066570162773132,"string":"0.806657"}}},{"rowIdx":7858,"cells":{"content":{"kind":"string","value":"# coding=utf-8\r\n\r\n'''\r\nauthor: ShiLei Miao\r\nanalyses and build model about NBA\r\n'''\r\n\r\nimport numpy as np\r\nfrom numpy import *\r\nimport pandas as pd\r\nfrom pandas import *\r\nimport os\r\nfrom sklearn.ensemble import RandomForestClassifier\r\nfrom sklearn.cross_validation import KFold\r\nfrom sklearn import metrics\r\n\r\n\r\nos.chdir(r'E:\\PycharmProjects\\Rong360\\dta')\r\n\r\ndef loadDataSetT(path):\r\n data = pd.read_csv(path)\r\n dataSet = data.values[0:,2:]\r\n dataLabel = data.values[0:,1:2] \r\n return dataSet,dataLabel\r\n\r\ndef transLabel(Mat_Labels):\r\n labels = []\r\n for item in Mat_Labels:\r\n labels.append(item[0])\r\n labels = array(labels)\r\n return labels\r\n\r\n\r\n\r\ndef P_YYYY(N_train, target_train, N_test, target_test):\r\n clf = RandomForestClassifier(n_estimators=300, random_state=520341, max_depth=9,\\\r\n min_samples_split=3, class_weight='balanced_subsample')\r\n clf = clf.fit(N_train, target_train)\r\n\r\n pred = clf.predict_proba(N_test)\r\n pred = DataFrame(pred)[0].values\r\n N_auc = metrics.roc_auc_score(target_test, 1 - pred)\r\n print N_auc\r\n print '\\n'\r\n return N_auc, clf\r\n\r\ndef preds_calculate(Mat_Train,Mat_Labels):\r\n kf = KFold(len(Mat_Train), n_folds=10)\r\n NN_auc = []\r\n for train_index, test_index in kf:\r\n X_train, X_test = Mat_Train[train_index], Mat_Train[test_index]\r\n y_train, y_test = Mat_Labels[train_index], Mat_Labels[test_index]\r\n N_auc, clf = P_YYYY(X_train, y_train, X_test, y_test)\r\n NN_auc.append(N_auc)\r\n mean_auc = mean(NN_auc)\r\n print 'AUC均值:',mean_auc\r\n return mean_auc, clf\r\n\r\n\r\n\r\n# 训练集\r\nS_train_user_info = pd.read_csv(r'Generate_dta\\S_train_user_info.csv')\r\nN_train_user_info = pd.read_csv(r'Generate_dta\\N_train_user_info.csv').drop(['lable'],axis=1)\r\nrelation1_train = pd.read_csv(r'Generate_dta\\0909relation1_train.csv')\r\nrelation2_train = pd.read_csv(r'Generate_dta\\0909relation2_train.csv')\r\nN_train_consumption1 = pd.read_csv(r'Generate_dta\\N_train_consumption1.csv').drop(['lable'],axis=1)\r\nt_consumption = pd.read_csv(r'Generate_dta\\t_consumption.csv')\r\n\r\n#rong_tag 没有使用 【下面的数据是one-hot后的特征】\r\nrong_tag_train = pd.read_csv(r'Generate_dta\\N_rong_tag_train.csv').drop(['lable'],axis=1)\r\nN_rong_tag_train_var = pd.read_excel(r'Stat_importance_var.xls')\r\nN_rong_tag_train_var = N_rong_tag_train_var[N_rong_tag_train_var['Importance']>10]\r\nN_rong_tag_train = rong_tag_train.reindex(columns = N_rong_tag_train_var['Feature'].values)\r\nN_rong_tag_train['user_id'] = rong_tag_train['user_id']\r\nN_rong_tag_train = N_rong_tag_train.replace([None], [-1])\r\n\r\ntrain = merge(S_train_user_info,N_train_user_info,how=\"left\", left_on='user_id', right_on='user_id')\r\ntrain = merge(train,relation1_train,how=\"left\", left_on='user_id', right_on='user_id')\r\ntrain = merge(train,relation2_train,how=\"left\", left_on='user_id', right_on='user_id')\r\ntrain = merge(train,N_train_consumption1,how=\"left\", left_on='user_id', right_on='user_id')\r\ntrain = merge(train,t_consumption,how=\"left\", left_on='user_id', right_on='user_id')\r\n\r\n\r\ntrain = train.replace([None], [-1])\r\ntrain['category_null'] = (train<0).sum(axis=1)\r\n\r\n## 在统计的train跟test缺失的情况后,选择剔除用户的特征缺失个数为187的【基本都是product_id=2】\r\ntrain = train[train['category_null'] < 187]\r\ntrain = DataFrame(train.values,columns=train.columns)\r\n\r\ntrain = merge(train,N_rong_tag_train,how=\"left\", left_on='user_id', right_on='user_id')\r\n\r\n\r\nMat_Train = train.drop(['user_id','lable','category_null'],axis=1)\r\nMat_Train = array(Mat_Train)\r\nMat_Label = train['lable'].astype(int)\r\n\r\nmean_auc, clf = preds_calculate(Mat_Train,Mat_Label)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n"},"path":{"kind":"string","value":"Procedure/2_M1/train/m2-cv-rf.py"},"size":{"kind":"number","value":3732,"string":"3,732"},"nl_text":{"kind":"string","value":"coding=utf-8 训练集rong_tag 没有使用 【下面的数据是one-hot后的特征】 在统计的train跟test缺失的情况后,选择剔除用户的特征缺失个数为187的【基本都是product_id=2】"},"nl_size":{"kind":"number","value":107,"string":"107"},"nl_language":{"kind":"string","value":"zh"},"nl_language_score":{"kind":"number","value":0.9808525443077087,"string":"0.980853"}}},{"rowIdx":7859,"cells":{"content":{"kind":"string","value":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\nfrom django.contrib.auth.models import User\nfrom django.db import models\n\n\nclass Post(models.Model):\n\tstatus_ITEMS = (\n \t(1, '上线'),\n\t\t(2, '草稿'),\n\t\t(3, '删除'),\n\t)\n\ttitle = models.CharField(max_length=50, verbose_name='标题')\n\tdesc = models.CharField(max_length=255, blank=True, verbose_name='摘要')\n\tcategory = models.ForeignKey('Category', verbose_name='分类')\n\ttags = models.ManyToManyField('Tag', related_name=\"posts\", verbose_name='标签')\n\tcontent = models.TextField(verbose_name='内容', help_text='注:目前仅支持Markdown格式')\n\tstatus = models.PositiveIntegerField(default=1, choices=status_ITEMS, verbose_name='状态')\n\towner = models.ForeignKey(User, verbose_name='作者')\n\tcreated_time = models.DateTimeField(auto_now_add=True, verbose_name='创建时间')\n\tlasted_update_time = models.DateTimeField(auto_now=True, verbose_name='更新时间')\n\t\n\tdef status_show(self):\n\t\treturn '当前状态:%s'%(self.status)\n\tstatus_show.short_description = '展示站台' \n\tdef __unicode__(self):\n\t\treturn self.title\n\t\n\tclass Meta:\n\t\tverbose_name = verbose_name_plural = '文章'\n\n \n\n\n\nclass Category(models.Model):\n\tstatus_ITEMS = (\n\t\t(1, '可用'),\n\t\t(2, '删除'),\n\n\t)\n\tname = models.CharField(max_length=50,verbose_name='名称')\n\tstatus = models.PositiveIntegerField(default=1, choices=status_ITEMS, verbose_name='状态')\n\towner = models.ForeignKey(User, verbose_name='作者')\n\tis_nav = models.BooleanField(default=False, verbose_name=\"是否为导航\")\n\tcreated_time = models.DateTimeField(auto_now_add=True, verbose_name='创建时间')\n# parent = models.ForeignKey('Category', verbose_name='分类')\n\t\n\tdef __unicode__(self):\n\t\treturn self.name\n\t \n\tclass Meta:\n\t\tverbose_name = verbose_name_plural = '分类'\n\n\nclass Tag(models.Model):\n\tstatus_ITEMS= (\n\t\t(1, '正常'),\n\t\t(2, '删除'),\n\t)\n\n\tname = models.CharField(max_length=50,verbose_name='名称')\n\tstatus = models.PositiveIntegerField(default=1, choices=status_ITEMS, verbose_name='状态')\n\towner = models.ForeignKey(User, verbose_name='作者')\n\tcreated_time = models.DateTimeField(auto_now_add=True, verbose_name='创建时间')\n\t\n\tdef __unicode__(self):\n\t\treturn self.name\n\t\n\tclass Meta:\n\t\tverbose_name = verbose_name_plural = '标签'\n\n\n"},"path":{"kind":"string","value":"typeidea/blog/models.py"},"size":{"kind":"number","value":2307,"string":"2,307"},"nl_text":{"kind":"string","value":"-*- coding: utf-8 -*- parent = models.ForeignKey('Category', verbose_name='分类')"},"nl_size":{"kind":"number","value":82,"string":"82"},"nl_language":{"kind":"string","value":"en"},"nl_language_score":{"kind":"number","value":0.4261758029460907,"string":"0.426176"}}},{"rowIdx":7860,"cells":{"content":{"kind":"string","value":"#!/usr/bin/env python2.7\n\n# Advanced Multi-Mission Operations System (AMMOS) Instrument Toolkit (AIT)\n# Bespoke Link to Instruments and Small Satellites (BLISS)\n#\n# Copyright 2016, by the California Institute of Technology. ALL RIGHTS\n# RESERVED. United States Government Sponsorship acknowledged. Any\n# commercial use must be negotiated with the Office of Technology Transfer\n# at the California Institute of Technology.\n#\n# This software may be subject to U.S. export control laws. By accepting\n# this software, the user agrees to comply with all applicable U.S. export\n# laws and regulations. User has the responsibility to obtain export licenses,\n# or other export authority as may be required before exporting such\n# information to foreign countries or providing access to foreign persons.\n\nimport time\nimport datetime\nimport mock\nimport os\nimport os.path\nimport nose\nimport nose.tools\n\nimport ait.core\nfrom ait.core import dmc\n\nLEAPSECOND_DATA_RESPONSE = '''#\n# Updated through IERS Bulletin C55\n# File expires on: 28 December 2018\n#\n#@\t3754944000\n#\n2272060800\t10\t# 1 Jan 1972\n2287785600\t11\t# 1 Jul 1972\n2303683200\t12\t# 1 Jan 1973\n2335219200\t13\t# 1 Jan 1974\n2366755200\t14\t# 1 Jan 1975\n2398291200\t15\t# 1 Jan 1976\n2429913600\t16\t# 1 Jan 1977\n2461449600\t17\t# 1 Jan 1978\n2492985600\t18\t# 1 Jan 1979\n2524521600\t19\t# 1 Jan 1980\n2571782400\t20\t# 1 Jul 1981\n2603318400\t21\t# 1 Jul 1982\n2634854400\t22\t# 1 Jul 1983\n2698012800\t23\t# 1 Jul 1985\n2776982400\t24\t# 1 Jan 1988\n2840140800\t25\t# 1 Jan 1990\n2871676800\t26\t# 1 Jan 1991\n2918937600\t27\t# 1 Jul 1992\n2950473600\t28\t# 1 Jul 1993\n2982009600\t29\t# 1 Jul 1994\n3029443200\t30\t# 1 Jan 1996\n3076704000\t31\t# 1 Jul 1997\n'''\n\nclass MockResponse:\n def __init__(self, text, status_code):\n self.text = text\n self.status_code = status_code\n\ndef test_getTimestampUTC():\n expected = time.strftime('%Y-%j', time.gmtime())\n\n actual = time.strftime('%Y-%j', time.gmtime(dmc.getTimestampUTC()[0]))\n\n assert actual == expected\n\ndef test_getUTCDatetimeDOY_w_days():\n days = 1\n t = datetime.datetime.utcnow() + datetime.timedelta(days=days)\n timestamp = t.timetuple()\n exp_year = timestamp.tm_year\n exp_day = '%03d' % timestamp.tm_yday\n\n dtime = dmc.getUTCDatetimeDOY(days=days).split('T')[0].split('-')\n assert str(exp_year) == dtime[0]\n assert str(exp_day) == dtime[1]\n\ndef test_leap_second_attrs():\n ait.config.leapseconds._config['filename'] = os.path.join(\n os.path.dirname(__file__), \"testdata\", \"dmc\", \"leapseconds.dat\"\n )\n\n ls = dmc.LeapSeconds\n ls._load_leap_second_data()\n assert ls.leapseconds == ls._data['leapseconds']\n assert ls.valid_date == ls._data['valid']\n assert ls.get_current_GPS_offset() == ls.leapseconds[-1][-1]\n\n@nose.tools.raises(ValueError)\ndef test_leap_second_by_date_invalid_gps_date():\n ait.config.leapseconds._config['filename'] = os.path.join(\n os.path.dirname(__file__), \"testdata\", \"dmc\", \"leapseconds.dat\"\n )\n\n dmc.LeapSeconds._load_leap_second_data()\n dmc.LeapSeconds.get_GPS_offset_for_date(datetime.datetime(1980, 1, 1))\n\ndef test_leap_second_by_date():\n ait.config.leapseconds._config['filename'] = os.path.join(\n os.path.dirname(__file__), \"testdata\", \"dmc\", \"leapseconds.dat\"\n )\n\n ls = dmc.LeapSeconds\n ls._load_leap_second_data()\n assert ls.get_GPS_offset_for_date(datetime.datetime(1981, 1, 1)) == 0\n assert ls.get_GPS_offset_for_date(datetime.datetime(1981, 7, 1)) == 1\n assert ls.get_GPS_offset_for_date(datetime.datetime(1982, 7, 1)) == 2\n assert ls.get_GPS_offset_for_date(datetime.datetime(1983, 7, 1)) == 3\n assert ls.get_GPS_offset_for_date(datetime.datetime(1985, 7, 1)) == 4\n assert ls.get_GPS_offset_for_date(datetime.datetime(1988, 1, 1)) == 5\n assert ls.get_GPS_offset_for_date(datetime.datetime(1990, 1, 1)) == 6\n assert ls.get_GPS_offset_for_date(datetime.datetime(1991, 1, 1)) == 7\n assert ls.get_GPS_offset_for_date(datetime.datetime(1992, 7, 1)) == 8\n assert ls.get_GPS_offset_for_date(datetime.datetime(1993, 7, 1)) == 9\n assert ls.get_GPS_offset_for_date(datetime.datetime(1994, 7, 1)) == 10\n assert ls.get_GPS_offset_for_date(datetime.datetime(1996, 1, 1)) == 11\n assert ls.get_GPS_offset_for_date(datetime.datetime(1997, 7, 1)) == 12\n assert ls.get_GPS_offset_for_date(datetime.datetime(1999, 1, 1)) == 13\n assert ls.get_GPS_offset_for_date(datetime.datetime(2006, 1, 1)) == 14\n assert ls.get_GPS_offset_for_date(datetime.datetime(2009, 1, 1)) == 15\n assert ls.get_GPS_offset_for_date(datetime.datetime(2012, 7, 1)) == 16\n assert ls.get_GPS_offset_for_date(datetime.datetime(2015, 7, 1)) == 17\n assert ls.get_GPS_offset_for_date(datetime.datetime(2017, 1, 1)) == 18\n\n # Make sure not supplying a date returns the offset for the current date\n assert (ls.get_GPS_offset_for_date(datetime.datetime.utcnow()) ==\n ls.get_GPS_offset_for_date())\n\ndef test_leap_second_data_load():\n ait.config.leapseconds._config['filename'] = os.path.join(\n os.path.dirname(__file__), \"testdata\", \"dmc\", \"leapseconds.dat\"\n )\n\n assert type(dmc.LeapSeconds.leapseconds) == type([])\n assert dmc.LeapSeconds.leapseconds[0] == (datetime.datetime(1981, 7, 1), 1)\n assert type(dmc.LeapSeconds.valid_date) == type(datetime.datetime.now())\n\n@nose.tools.raises(ValueError)\n@mock.patch('requests.get', mock.MagicMock(return_value=MockResponse(LEAPSECOND_DATA_RESPONSE, 400)))\ndef test_failed_leapsecond_load_and_update():\n ait.config.leapseconds._config['filename'] = os.path.join(\n os.path.dirname(__file__), \"invalidpath\", \"leapseconds.dat\"\n )\n\n dmc.LeapSeconds._data = None\n dmc.LeapSeconds._load_leap_second_data()\n\n@mock.patch('requests.get', mock.MagicMock(return_value=MockResponse(LEAPSECOND_DATA_RESPONSE, 200)))\ndef test_update_leap_second_data():\n ait.config.leapseconds._config['filename'] = os.path.join(\n os.path.dirname(__file__), \"testdata\", \"dmc\", \"tmp_leapseconds.out\"\n )\n\n dmc.LeapSeconds._data = None\n dmc.LeapSeconds._update_leap_second_data()\n\n assert type(dmc.LeapSeconds.leapseconds) == type([])\n assert dmc.LeapSeconds.leapseconds[0] == (datetime.datetime(1981, 7, 1), 1)\n assert type(dmc.LeapSeconds.valid_date) == type(datetime.datetime.now())\n\n assert os.path.isfile(ait.config.leapseconds.filename)\n os.remove(ait.config.leapseconds.filename)\n\n@nose.tools.raises(ValueError)\n@mock.patch('requests.get', mock.MagicMock(return_value=MockResponse(LEAPSECOND_DATA_RESPONSE, 400)))\ndef test_unable_to_pull_leapsecond_data():\n ait.config.leapseconds._config['filename'] = os.path.join(\n os.path.dirname(__file__), \"testdata\", \"dmc\", \"tmp_leapseconds.out\"\n )\n\n dmc.LeapSeconds._data = None\n dmc.LeapSeconds._update_leap_second_data()\n\nif __name__ == '__main__':\n nose.main()\n"},"path":{"kind":"string","value":"ait/core/test/test_dmc.py"},"size":{"kind":"number","value":6860,"string":"6,860"},"nl_text":{"kind":"string","value":"!/usr/bin/env python2.7 Advanced Multi-Mission Operations System (AMMOS) Instrument Toolkit (AIT) Bespoke Link to Instruments and Small Satellites (BLISS) Copyright 2016, by the California Institute of Technology. ALL RIGHTS RESERVED. United States Government Sponsorship acknowledged. Any commercial use must be negotiated with the Office of Technology Transfer at the California Institute of Technology. This software may be subject to U.S. export control laws. By accepting this software, the user agrees to comply with all applicable U.S. export laws and regulations. User has the responsibility to obtain export licenses, or other export authority as may be required before exporting such information to foreign countries or providing access to foreign persons. Make sure not supplying a date returns the offset for the current date"},"nl_size":{"kind":"number","value":837,"string":"837"},"nl_language":{"kind":"string","value":"en"},"nl_language_score":{"kind":"number","value":0.9020805954933167,"string":"0.902081"}}},{"rowIdx":7861,"cells":{"content":{"kind":"string","value":"# Copyright 2018 the GPflow authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport numpy as np\nfrom numpy.random import randn\nimport tensorflow as tf\nimport pytest\nimport gpflow\nfrom gpflow import logdensities, settings\nfrom gpflow.test_util import session_tf\nfrom scipy.stats import multivariate_normal as mvn\nfrom numpy.testing import assert_allclose\n\n\nrng = np.random.RandomState(1)\n\n\n@pytest.mark.parametrize(\"x\", [randn(4,10), randn(4,1)])\n@pytest.mark.parametrize(\"mu\", [randn(4,10), randn(4,1)])\n@pytest.mark.parametrize(\"cov_sqrt\", [randn(4,4), np.eye(4)])\ndef test_multivariate_normal(session_tf, x, mu, cov_sqrt):\n cov = np.dot(cov_sqrt, cov_sqrt.T)\n L = np.linalg.cholesky(cov)\n\n x_tf = tf.placeholder(settings.float_type)\n mu_tf = tf.placeholder(settings.float_type)\n gp_result = logdensities.multivariate_normal(\n x_tf, mu_tf, tf.convert_to_tensor(L))\n\n gp_result = session_tf.run(gp_result, feed_dict={x_tf: x, mu_tf: mu})\n\n if mu.shape[1] > 1:\n if x.shape[1] > 1:\n sp_result = [mvn.logpdf(x[:,i], mu[:,i], cov) for i in range(mu.shape[1])]\n else:\n sp_result = [mvn.logpdf(x.ravel(), mu[:, i], cov) for i in range(mu.shape[1])]\n else:\n sp_result = mvn.logpdf(x.T, mu.ravel(), cov)\n assert_allclose(gp_result, sp_result)\n\ndef test_shape_asserts(session_tf):\n A = np.random.randn(5)\n B = np.random.randn(5)\n L = np.tril(np.random.randn(5, 5))\n\n # Static shape check:\n with pytest.raises(ValueError):\n tA = tf.identity(A)\n tB = tf.identity(B)\n tL = tf.identity(L)\n res = logdensities.multivariate_normal(tA, tB, tL)\n\n # Dynamic shape check:\n # the following results in a segfault before PR#964\n with pytest.raises(tf.errors.InvalidArgumentError):\n vA = tf.placeholder(tf.float64)\n vB = tf.placeholder(tf.float64)\n vL = tf.placeholder(tf.float64)\n res = logdensities.multivariate_normal(vA, vB, vL)\n session_tf.run(res, {vA: A, vB: B, vL: L})\n"},"path":{"kind":"string","value":"tests/test_logdensities.py"},"size":{"kind":"number","value":2521,"string":"2,521"},"nl_text":{"kind":"string","value":"Copyright 2018 the GPflow authors. Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Static shape check: Dynamic shape check: the following results in a segfault before PR964"},"nl_size":{"kind":"number","value":642,"string":"642"},"nl_language":{"kind":"string","value":"en"},"nl_language_score":{"kind":"number","value":0.857075035572052,"string":"0.857075"}}},{"rowIdx":7862,"cells":{"content":{"kind":"string","value":"# -*- coding: utf-8 -*-\n\"\"\"\nwsproto/handshake\n~~~~~~~~~~~~~~~~~~\n\nAn implementation of WebSocket handshakes.\n\"\"\"\nfrom collections import deque\nfrom typing import Deque, Dict, Generator, List, Optional, Union\n\nimport h11\n\nfrom .connection import Connection, ConnectionState, ConnectionType\nfrom .events import AcceptConnection, Event, RejectConnection, RejectData, Request\nfrom .extensions import Extension\nfrom .typing import Headers\nfrom .utilities import (\n generate_accept_token,\n generate_nonce,\n LocalProtocolError,\n normed_header_dict,\n RemoteProtocolError,\n split_comma_header,\n)\n\n# RFC6455, Section 4.2.1/6 - Reading the Client's Opening Handshake\nWEBSOCKET_VERSION = b\"13\"\n\n\nclass H11Handshake:\n \"\"\"A Handshake implementation for HTTP/1.1 connections.\"\"\"\n\n def __init__(self, connection_type: ConnectionType) -> None:\n self.client = connection_type is ConnectionType.CLIENT\n self._state = ConnectionState.CONNECTING\n\n if self.client:\n self._h11_connection = h11.Connection(h11.CLIENT)\n else:\n self._h11_connection = h11.Connection(h11.SERVER)\n\n self._connection: Optional[Connection] = None\n self._events: Deque[Event] = deque()\n self._initiating_request: Optional[Request] = None\n self._nonce: Optional[bytes] = None\n\n @property\n def state(self) -> ConnectionState:\n return self._state\n\n @property\n def connection(self) -> Optional[Connection]:\n \"\"\"Return the established connection.\n\n This will either return the connection or raise a\n LocalProtocolError if the connection has not yet been\n established.\n\n :rtype: h11.Connection\n \"\"\"\n return self._connection\n\n def initiate_upgrade_connection(self, headers: Headers, path: str) -> None:\n \"\"\"Initiate an upgrade connection.\n\n This should be used if the request has already be received and\n parsed.\n\n :param list headers: HTTP headers represented as a list of 2-tuples.\n :param str path: A URL path.\n \"\"\"\n if self.client:\n raise LocalProtocolError(\n \"Cannot initiate an upgrade connection when acting as the client\"\n )\n upgrade_request = h11.Request(method=b\"GET\", target=path, headers=headers)\n h11_client = h11.Connection(h11.CLIENT)\n self.receive_data(h11_client.send(upgrade_request))\n\n def send(self, event: Event) -> bytes:\n \"\"\"Send an event to the remote.\n\n This will return the bytes to send based on the event or raise\n a LocalProtocolError if the event is not valid given the\n state.\n\n :returns: Data to send to the WebSocket peer.\n :rtype: bytes\n \"\"\"\n data = b\"\"\n if isinstance(event, Request):\n data += self._initiate_connection(event)\n elif isinstance(event, AcceptConnection):\n data += self._accept(event)\n elif isinstance(event, RejectConnection):\n data += self._reject(event)\n elif isinstance(event, RejectData):\n data += self._send_reject_data(event)\n else:\n raise LocalProtocolError(\n \"Event {} cannot be sent during the handshake\".format(event)\n )\n return data\n\n def receive_data(self, data: bytes) -> None:\n \"\"\"Receive data from the remote.\n\n A list of events that the remote peer triggered by sending\n this data can be retrieved with :meth:`events`.\n\n :param bytes data: Data received from the WebSocket peer.\n \"\"\"\n self._h11_connection.receive_data(data)\n while True:\n try:\n event = self._h11_connection.next_event()\n except h11.RemoteProtocolError:\n raise RemoteProtocolError(\n \"Bad HTTP message\", event_hint=RejectConnection()\n )\n if (\n isinstance(event, h11.ConnectionClosed)\n or event is h11.NEED_DATA\n or event is h11.PAUSED\n ):\n break\n\n if self.client:\n if isinstance(event, h11.InformationalResponse):\n if event.status_code == 101:\n self._events.append(self._establish_client_connection(event))\n else:\n self._events.append(\n RejectConnection(\n headers=event.headers,\n status_code=event.status_code,\n has_body=False,\n )\n )\n self._state = ConnectionState.CLOSED\n elif isinstance(event, h11.Response):\n self._state = ConnectionState.REJECTING\n self._events.append(\n RejectConnection(\n headers=event.headers,\n status_code=event.status_code,\n has_body=True,\n )\n )\n elif isinstance(event, h11.Data):\n self._events.append(\n RejectData(data=event.data, body_finished=False)\n )\n elif isinstance(event, h11.EndOfMessage):\n self._events.append(RejectData(data=b\"\", body_finished=True))\n self._state = ConnectionState.CLOSED\n else:\n if isinstance(event, h11.Request):\n self._events.append(self._process_connection_request(event))\n\n def events(self) -> Generator[Event, None, None]:\n \"\"\"Return a generator that provides any events that have been generated\n by protocol activity.\n\n :returns: a generator that yields H11 events.\n \"\"\"\n while self._events:\n yield self._events.popleft()\n\n ############ Server mode methods\n\n def _process_connection_request(self, event: h11.Request) -> Request:\n if event.method != b\"GET\":\n raise RemoteProtocolError(\n \"Request method must be GET\", event_hint=RejectConnection()\n )\n connection_tokens = None\n extensions: List[str] = []\n host = None\n key = None\n subprotocols: List[str] = []\n upgrade = b\"\"\n version = None\n headers: Headers = []\n for name, value in event.headers:\n name = name.lower()\n if name == b\"connection\":\n connection_tokens = split_comma_header(value)\n elif name == b\"host\":\n host = value.decode(\"ascii\")\n continue # Skip appending to headers\n elif name == b\"sec-websocket-extensions\":\n extensions = split_comma_header(value)\n continue # Skip appending to headers\n elif name == b\"sec-websocket-key\":\n key = value\n elif name == b\"sec-websocket-protocol\":\n subprotocols = split_comma_header(value)\n continue # Skip appending to headers\n elif name == b\"sec-websocket-version\":\n version = value\n elif name == b\"upgrade\":\n upgrade = value\n headers.append((name, value))\n if connection_tokens is None or not any(\n token.lower() == \"upgrade\" for token in connection_tokens\n ):\n raise RemoteProtocolError(\n \"Missing header, 'Connection: Upgrade'\", event_hint=RejectConnection()\n )\n if version != WEBSOCKET_VERSION:\n raise RemoteProtocolError(\n \"Missing header, 'Sec-WebSocket-Version'\",\n event_hint=RejectConnection(\n headers=[(b\"Sec-WebSocket-Version\", WEBSOCKET_VERSION)],\n status_code=426,\n ),\n )\n if key is None:\n raise RemoteProtocolError(\n \"Missing header, 'Sec-WebSocket-Key'\", event_hint=RejectConnection()\n )\n if upgrade.lower() != b\"websocket\":\n raise RemoteProtocolError(\n \"Missing header, 'Upgrade: WebSocket'\", event_hint=RejectConnection()\n )\n if version is None:\n raise RemoteProtocolError(\n \"Missing header, 'Sec-WebSocket-Version'\", event_hint=RejectConnection()\n )\n\n self._initiating_request = Request(\n extensions=extensions,\n extra_headers=headers,\n host=host,\n subprotocols=subprotocols,\n target=event.target.decode(\"ascii\"),\n )\n return self._initiating_request\n\n def _accept(self, event: AcceptConnection) -> bytes:\n request_headers = normed_header_dict(self._initiating_request.extra_headers)\n\n nonce = request_headers[b\"sec-websocket-key\"]\n accept_token = generate_accept_token(nonce)\n\n headers = [\n (b\"Upgrade\", b\"WebSocket\"),\n (b\"Connection\", b\"Upgrade\"),\n (b\"Sec-WebSocket-Accept\", accept_token),\n ]\n\n if event.subprotocol is not None:\n if event.subprotocol not in self._initiating_request.subprotocols:\n raise LocalProtocolError(\n \"unexpected subprotocol {}\".format(event.subprotocol)\n )\n headers.append(\n (b\"Sec-WebSocket-Protocol\", event.subprotocol.encode(\"ascii\"))\n )\n\n if event.extensions:\n accepts = server_extensions_handshake( # type: ignore\n self._initiating_request.extensions, event.extensions\n )\n if accepts:\n headers.append((b\"Sec-WebSocket-Extensions\", accepts))\n\n response = h11.InformationalResponse(\n status_code=101, headers=headers + event.extra_headers\n )\n self._connection = Connection(\n ConnectionType.CLIENT if self.client else ConnectionType.SERVER,\n event.extensions,\n )\n self._state = ConnectionState.OPEN\n return self._h11_connection.send(response)\n\n def _reject(self, event: RejectConnection) -> bytes:\n if self.state != ConnectionState.CONNECTING:\n raise LocalProtocolError(\n \"Connection cannot be rejected in state %s\" % self.state\n )\n\n headers = event.headers\n if not event.has_body:\n headers.append((b\"content-length\", b\"0\"))\n response = h11.Response(status_code=event.status_code, headers=headers)\n data = self._h11_connection.send(response)\n self._state = ConnectionState.REJECTING\n if not event.has_body:\n data += self._h11_connection.send(h11.EndOfMessage())\n self._state = ConnectionState.CLOSED\n return data\n\n def _send_reject_data(self, event: RejectData) -> bytes:\n if self.state != ConnectionState.REJECTING:\n raise LocalProtocolError(\n \"Cannot send rejection data in state {}\".format(self.state)\n )\n\n data = self._h11_connection.send(h11.Data(data=event.data))\n if event.body_finished:\n data += self._h11_connection.send(h11.EndOfMessage())\n self._state = ConnectionState.CLOSED\n return data\n\n ############ Client mode methods\n\n def _initiate_connection(self, request: Request) -> bytes:\n self._initiating_request = request\n self._nonce = generate_nonce()\n\n headers = [\n (b\"Host\", request.host.encode(\"ascii\")),\n (b\"Upgrade\", b\"WebSocket\"),\n (b\"Connection\", b\"Upgrade\"),\n (b\"Sec-WebSocket-Key\", self._nonce),\n (b\"Sec-WebSocket-Version\", WEBSOCKET_VERSION),\n ]\n\n if request.subprotocols:\n headers.append(\n (\n b\"Sec-WebSocket-Protocol\",\n (\", \".join(request.subprotocols)).encode(\"ascii\"),\n )\n )\n\n if request.extensions:\n offers = {e.name: e.offer() for e in request.extensions} # type: ignore\n extensions = []\n for name, params in offers.items():\n name = name.encode(\"ascii\")\n if params is True:\n extensions.append(name)\n elif params:\n extensions.append(\n b\"%s; %s\" % (name, params.encode(\"ascii\")) # type: ignore\n )\n if extensions:\n headers.append((b\"Sec-WebSocket-Extensions\", b\", \".join(extensions)))\n\n upgrade = h11.Request(\n method=b\"GET\",\n target=request.target.encode(\"ascii\"),\n headers=headers + request.extra_headers,\n )\n return self._h11_connection.send(upgrade)\n\n def _establish_client_connection(\n self, event: h11.InformationalResponse\n ) -> AcceptConnection: # noqa: MC0001\n accept = None\n connection_tokens = None\n accepts: List[str] = []\n subprotocol = None\n upgrade = b\"\"\n headers: Headers = []\n for name, value in event.headers:\n name = name.lower()\n if name == b\"connection\":\n connection_tokens = split_comma_header(value)\n continue # Skip appending to headers\n elif name == b\"sec-websocket-extensions\":\n accepts = split_comma_header(value)\n continue # Skip appending to headers\n elif name == b\"sec-websocket-accept\":\n accept = value\n continue # Skip appending to headers\n elif name == b\"sec-websocket-protocol\":\n subprotocol = value\n continue # Skip appending to headers\n elif name == b\"upgrade\":\n upgrade = value\n continue # Skip appending to headers\n headers.append((name, value))\n\n if connection_tokens is None or not any(\n token.lower() == \"upgrade\" for token in connection_tokens\n ):\n raise RemoteProtocolError(\n \"Missing header, 'Connection: Upgrade'\", event_hint=RejectConnection()\n )\n if upgrade.lower() != b\"websocket\":\n raise RemoteProtocolError(\n \"Missing header, 'Upgrade: WebSocket'\", event_hint=RejectConnection()\n )\n accept_token = generate_accept_token(self._nonce)\n if accept != accept_token:\n raise RemoteProtocolError(\"Bad accept token\", event_hint=RejectConnection())\n if subprotocol is not None:\n subprotocol = subprotocol.decode(\"ascii\")\n if subprotocol not in self._initiating_request.subprotocols:\n raise RemoteProtocolError(\n \"unrecognized subprotocol {}\".format(subprotocol),\n event_hint=RejectConnection(),\n )\n extensions = client_extensions_handshake( # type: ignore\n accepts, self._initiating_request.extensions\n )\n\n self._connection = Connection(\n ConnectionType.CLIENT if self.client else ConnectionType.SERVER,\n extensions,\n self._h11_connection.trailing_data[0],\n )\n self._state = ConnectionState.OPEN\n return AcceptConnection(\n extensions=extensions, extra_headers=headers, subprotocol=subprotocol\n )\n\n def __repr__(self) -> str:\n return \"{}(client={}, state={})\".format(\n self.__class__.__name__, self.client, self.state\n )\n\n\ndef server_extensions_handshake(\n requested: List[str], supported: List[Extension]\n) -> Optional[bytes]:\n \"\"\"Agree on the extensions to use returning an appropriate header value.\n\n This returns None if there are no agreed extensions\n \"\"\"\n accepts: Dict[str, Union[bool, bytes]] = {}\n for offer in requested:\n name = offer.split(\";\", 1)[0].strip()\n for extension in supported:\n if extension.name == name:\n accept = extension.accept(offer)\n if accept is True:\n accepts[extension.name] = True\n elif accept is not False and accept is not None:\n accepts[extension.name] = accept.encode(\"ascii\") # type: ignore\n\n if accepts:\n extensions: List[bytes] = []\n for name, params in accepts.items():\n name = name.encode(\"ascii\") # type: ignore\n if params is True:\n extensions.append(name) # type: ignore\n else:\n if params == b\"\":\n extensions.append(b\"%s\" % (name))\n else:\n extensions.append(b\"%s; %s\" % (name, params))\n return b\", \".join(extensions)\n\n return None\n\n\ndef client_extensions_handshake(\n accepted: List[str], supported: List[Extension]\n) -> List[Extension]:\n # This raises RemoteProtocolError is the accepted extension is not\n # supported.\n extensions = []\n for accept in accepted:\n name = accept.split(\";\", 1)[0].strip()\n for extension in supported:\n if extension.name == name:\n extension.finalize(accept)\n extensions.append(extension)\n break\n else:\n raise RemoteProtocolError(\n \"unrecognized extension {}\".format(name), event_hint=RejectConnection()\n )\n return extensions\n"},"path":{"kind":"string","value":"wsproto/handshake.py"},"size":{"kind":"number","value":17527,"string":"17,527"},"nl_text":{"kind":"string","value":"A Handshake implementation for HTTP/1.1 connections.\nReturn the established connection.\n\nThis will either return the connection or raise a\nLocalProtocolError if the connection has not yet been\nestablished.\n\n:rtype: h11.Connection\nReturn a generator that provides any events that have been generated\nby protocol activity.\n\n:returns: a generator that yields H11 events.\nInitiate an upgrade connection.\n\nThis should be used if the request has already be received and\nparsed.\n\n:param list headers: HTTP headers represented as a list of 2-tuples.\n:param str path: A URL path.\nReceive data from the remote.\n\nA list of events that the remote peer triggered by sending\nthis data can be retrieved with :meth:`events`.\n\n:param bytes data: Data received from the WebSocket peer.\nSend an event to the remote.\n\nThis will return the bytes to send based on the event or raise\na LocalProtocolError if the event is not valid given the\nstate.\n\n:returns: Data to send to the WebSocket peer.\n:rtype: bytes\nAgree on the extensions to use returning an appropriate header value.\n\nThis returns None if there are no agreed extensions\nwsproto/handshake\n~~~~~~~~~~~~~~~~~~\n\nAn implementation of WebSocket handshakes.\n\n -*- coding: utf-8 -*- RFC6455, Section 4.2.1/6 - Reading the Client's Opening Handshake Server mode methods Skip appending to headers Skip appending to headers Skip appending to headers type: ignore Client mode methods type: ignore type: ignore noqa: MC0001 Skip appending to headers Skip appending to headers Skip appending to headers Skip appending to headers Skip appending to headers type: ignore type: ignore type: ignore type: ignore This raises RemoteProtocolError is the accepted extension is not supported."},"nl_size":{"kind":"number","value":1707,"string":"1,707"},"nl_language":{"kind":"string","value":"en"},"nl_language_score":{"kind":"number","value":0.8176034092903137,"string":"0.817603"}}},{"rowIdx":7863,"cells":{"content":{"kind":"string","value":"import numpy as np\nfrom astropy.io import fits\nfrom scipy.interpolate import interp1d\n\n\n# Fitting Sline3\n\n\ndef fit_spline3(y, x, order=3, nsum=3):\n\n\n y_resampled = [np.median(y[i:i + nsum]) for i in range(0, len(y) - len(y) % nsum, nsum)]\n x_resampled = np.linspace(0, len(y), len(y_resampled))\n\n # Fitting\n f = interp1d(x_resampled, y_resampled, kind=order, bounds_error=True)\n\n # Return function to be constructed with any other x array\n return f\n\n\n# Local Minima and Maxima\ndef local_minmax(data, nmin=2, nmax=2):\n # Identifying indices of local minima-maxima points\n id_min = (np.gradient(np.sign(np.gradient(data))) > 0).nonzero()[0] # index of local min\n id_max = (np.gradient(np.sign(np.gradient(data))) < 0).nonzero()[0] # index of local max\n\n # Taking values at min/max points\n list_min, list_max = data[id_min], data[id_max]\n\n # Sorting minima-maxima values (bigger --> lower)\n list_min, id_min = (list(p) for p in zip(*sorted(zip(list_min, id_min), reverse=False)))\n list_max, id_max = (list(p) for p in zip(*sorted(zip(list_max, id_max), reverse=True)))\n\n # Taking the desired number of local minima-maxima points\n list_min, list_max, id_min, id_max = list_min[0:nmin], list_max[0:nmax], id_min[0:nmin], id_max[0:nmax]\n\n return list_min, list_max, id_min, id_max\n\n\ndef trim_slitedge(flat, plot=True):\n # Getting input data\n ccddata = fits.getdata(flat, ignore_missing_end=True)\n\n # Collapse flat in the dispersion direction\n flat_collapsed = fits.getdata(flat, ignore_missing_end=True).sum(axis=1) / ccddata.shape[1]\n lines = np.arange(0, flat_collapsed.size, 1)\n\n # Excluding first pixels in the spatial direction\n cut = 3\n c_flat = flat_collapsed[cut:-cut]\n c_lines = np.arange(0, c_flat.size, 1)\n\n # Fittin cubic spline. It's working very well with order=5, nsum=2\n func_splin3 = fit_spline3(c_flat, c_lines, order=5, nsum=2)\n smooth_flat = func_splin3(c_lines)\n\n # Compute 1st and flat smoothed\n dy = np.gradient(smooth_flat)\n dy2 = np.gradient(dy)\n\n # Regions to compute local minina-maxima\n # Region one: it represent first 40 percent of all data\n # Region two: ... last 40%\n pixa, pixb = int(len(c_flat) * 0.4), int(len(c_flat) * 0.6)\n dy2_one, dy2_two = dy2[0:pixa], dy2[pixb:]\n\n # Reg. 1: Compute local min/max of the 2nd derivative\n list_min_1, list_max_1, id_min_1, id_max_1 = local_minmax(dy2_one, nmin=1, nmax=1)\n list_min_2, list_max_2, id_min_2, id_max_2 = local_minmax(dy2_two, nmin=1, nmax=1)\n\n # Indice have to be reshifted to the original indices of the function dy2\n id_min_2 = np.array(id_min_2) + pixb\n\n # Slit edges are the local maxima/minima 1/2 [accounting the cutted pixels]\n slit_1, slit_2 = int(np.array(id_min_1) + cut), int(np.array(id_min_2) + cut)\n\n print slit_1, slit_2\n\n if plot is True:\n import matplotlib.pyplot as plt\n c_lines += cut\n plt.plot(lines, flat_collapsed, 'k-', label='Flat Collapsed')\n plt.plot(lines[slit_1:slit_2], flat_collapsed[slit_1:slit_2], 'r-', label = 'Cutted Flat')\n plt.plot(c_lines, dy, 'g-', label=\"Dy/dx\")\n plt.plot(c_lines, dy2, 'y-', label=\"Dy2/dx\")\n plt.plot(slit_1, list_min_1, 'bo', label='Slit Edge 1 ')\n plt.plot(slit_2, list_min_2, 'ro', label='Slit Edge 2')\n plt.xlim(lines.min() - 50, lines.max() + 50)\n plt.legend(loc='best')\n plt.show()\n\n return slit_1, slit_2\n\nflat = '/home/davidsanm/PyCharmProjects/GoodmanDataReduction/2016-03-20/RED/master_flat_600.fits'\ntrim_slitedge(flat, plot = True)"},"path":{"kind":"string","value":"trim_slitedge.py"},"size":{"kind":"number","value":3601,"string":"3,601"},"nl_text":{"kind":"string","value":"Fitting Sline3 Fitting Return function to be constructed with any other x array Local Minima and Maxima Identifying indices of local minima-maxima points index of local min index of local max Taking values at min/max points Sorting minima-maxima values (bigger --> lower) Taking the desired number of local minima-maxima points Getting input data Collapse flat in the dispersion direction Excluding first pixels in the spatial direction Fittin cubic spline. It's working very well with order=5, nsum=2 Compute 1st and flat smoothed Regions to compute local minina-maxima Region one: it represent first 40 percent of all data Region two: ... last 40% Reg. 1: Compute local min/max of the 2nd derivative Indice have to be reshifted to the original indices of the function dy2 Slit edges are the local maxima/minima 1/2 [accounting the cutted pixels]"},"nl_size":{"kind":"number","value":847,"string":"847"},"nl_language":{"kind":"string","value":"en"},"nl_language_score":{"kind":"number","value":0.7401696443557739,"string":"0.74017"}}},{"rowIdx":7864,"cells":{"content":{"kind":"string","value":"# -*- encoding: utf-8 -*-\n# $Id: __init__.py,v 1.8.2.2 2007/05/22 21:06:52 customdesigned Exp $\n#\n# This file is part of the pydns project.\n# Homepage: http://pydns.sourceforge.net\n#\n# This code is covered by the standard Python License.\n#\n\n# __init__.py for DNS class.\n\n__version__ = '2.3.1'\n\nimport Type,Opcode,Status,Class\nfrom Base import DnsRequest, DNSError\nfrom Lib import DnsResult\nfrom Base import *\nfrom Lib import *\nError=DNSError\nfrom lazy import *\nRequest = DnsRequest\nResult = DnsResult\nfrom Serialization import Serialize,DeSerialize\n\n#\n# $Log: __init__.py,v $\n# Revision 1.8.2.2 2007/05/22 21:06:52 customdesigned\n# utf-8 in __init__.py\n#\n# Revision 1.8.2.1 2007/05/22 20:39:20 customdesigned\n# Release 2.3.1\n#\n# Revision 1.8 2002/05/06 06:17:49 anthonybaxter\n# found that the old README file called itself release 2.2. So make\n# this one 2.3...\n#\n# Revision 1.7 2002/05/06 06:16:15 anthonybaxter\n# make some sort of reasonable version string. releasewards ho!\n#\n# Revision 1.6 2002/03/19 13:05:02 anthonybaxter\n# converted to class based exceptions (there goes the python1.4 compatibility :)\n#\n# removed a quite gross use of 'eval()'.\n#\n# Revision 1.5 2002/03/19 12:41:33 anthonybaxter\n# tabnannied and reindented everything. 4 space indent, no tabs.\n# yay.\n#\n# Revision 1.4 2001/11/26 17:57:51 stroeder\n# Added __version__\n#\n# Revision 1.3 2001/08/09 09:08:55 anthonybaxter\n# added identifying header to top of each file\n#\n# Revision 1.2 2001/07/19 06:57:07 anthony\n# cvs keywords added\n#\n#\n"},"path":{"kind":"string","value":"tools/hipdnsproxy/DNS/__init__.py"},"size":{"kind":"number","value":1527,"string":"1,527"},"nl_text":{"kind":"string","value":"-*- encoding: utf-8 -*- $Id: __init__.py,v 1.8.2.2 2007/05/22 21:06:52 customdesigned Exp $ This file is part of the pydns project. Homepage: http://pydns.sourceforge.net This code is covered by the standard Python License. __init__.py for DNS class. $Log: __init__.py,v $ Revision 1.8.2.2 2007/05/22 21:06:52 customdesigned utf-8 in __init__.py Revision 1.8.2.1 2007/05/22 20:39:20 customdesigned Release 2.3.1 Revision 1.8 2002/05/06 06:17:49 anthonybaxter found that the old README file called itself release 2.2. So make this one 2.3... Revision 1.7 2002/05/06 06:16:15 anthonybaxter make some sort of reasonable version string. releasewards ho! Revision 1.6 2002/03/19 13:05:02 anthonybaxter converted to class based exceptions (there goes the python1.4 compatibility :) removed a quite gross use of 'eval()'. Revision 1.5 2002/03/19 12:41:33 anthonybaxter tabnannied and reindented everything. 4 space indent, no tabs. yay. Revision 1.4 2001/11/26 17:57:51 stroeder Added __version__ Revision 1.3 2001/08/09 09:08:55 anthonybaxter added identifying header to top of each file Revision 1.2 2001/07/19 06:57:07 anthony cvs keywords added"},"nl_size":{"kind":"number","value":1159,"string":"1,159"},"nl_language":{"kind":"string","value":"en"},"nl_language_score":{"kind":"number","value":0.7005176544189453,"string":"0.700518"}}},{"rowIdx":7865,"cells":{"content":{"kind":"string","value":"import os\nimport sys\nimport setuptools\n\n# To prevent importing about and thereby breaking the coverage info we use this\n# exec hack\nabout = {}\nwith open('python_utils/__about__.py') as fp:\n exec(fp.read(), about)\n\n\nif os.path.isfile('README.rst'):\n long_description = open('README.rst').read()\nelse:\n long_description = 'See http://pypi.python.org/pypi/python-utils/'\n\n\nneeds_pytest = set(['ptr', 'pytest', 'test']).intersection(sys.argv)\npytest_runner = ['pytest-runner'] if needs_pytest else []\n\n\nif __name__ == '__main__':\n setuptools.setup(\n name='python-utils',\n version=about['__version__'],\n author=about['__author__'],\n author_email=about['__author_email__'],\n description=about['__description__'],\n url=about['__url__'],\n license='BSD',\n packages=setuptools.find_packages(),\n long_description=long_description,\n install_requires=['six'],\n tests_require=['pytest'],\n setup_requires=[] + pytest_runner,\n classifiers=['License :: OSI Approved :: BSD License'],\n )\n\n"},"path":{"kind":"string","value":"setup.py"},"size":{"kind":"number","value":1077,"string":"1,077"},"nl_text":{"kind":"string","value":"To prevent importing about and thereby breaking the coverage info we use this exec hack"},"nl_size":{"kind":"number","value":87,"string":"87"},"nl_language":{"kind":"string","value":"en"},"nl_language_score":{"kind":"number","value":0.841132402420044,"string":"0.841132"}}},{"rowIdx":7866,"cells":{"content":{"kind":"string","value":"import logging as log\nimport cv2\nimport sys\nimport numpy as np\n\n\nclass LandmarksDetectionModel:\n '''\n Class for the Face Landmarks Detection Model.\n\n Load and configure inference plugins for the specified target devices,\n and performs either synchronous or asynchronous modes for the\n specified infer requests.\n '''\n\n def __init__(self, model_name, device='CPU', extensions=None, async_infer=True):\n '''\n Set instance variables.\n '''\n self.plugin = None\n self.network = None\n self.exec_network = None\n self.infer_request_handle = None\n\n self.input_blob = None\n self.input_shape = None\n self.output_blob = None\n self.output_shape = None\n\n self.model_name = model_name\n self.device = device\n self.extensions = extensions\n self.async_infer = async_infer\n\n def load_model(self, plugin):\n '''\n This method is for loading the model (in IR format) to the device specified by the user.\n Default device is CPU.\n '''\n\n # Get model\n model_structure = self.model_name + '.xml'\n model_weights = self.model_name + '.bin'\n\n # Initialize the plugin - load the inference engine API\n # Plugin is the one already created for the Face Detection model\n self.plugin = plugin\n\n # Add a CPU extension, if applicable\n if self.extensions and 'CPU' in self.device:\n self.plugin.add_extension(self.extensions, self.device)\n\n # Read the IR as IENetwork\n try:\n self.network = self.plugin.read_network(model=model_structure, weights=model_weights)\n except:\n raise ValueError(\"Could not initialise the network. Have you entered the correct model path?\")\n\n # Check if model and CPU plugin are supported\n if self.device == 'CPU':\n self.check_model()\n\n # Load the IENetwork into the plugin\n self.exec_network = self.plugin.load_network(network=self.network, device_name=self.device, num_requests=1)\n\n # Get the input and output layers\n self.input_blob = next(iter(self.network.inputs))\n self.input_shape = self.network.inputs[self.input_blob].shape\n self.output_blob = next(iter(self.network.outputs))\n self.output_shape = self.network.outputs[self.output_blob].shape\n return\n\n def predict(self, image):\n '''\n This method is meant for running predictions on the input image.\n '''\n if np.all(np.array(image.shape)):\n # Create input image to feed into the network\n net_input = {self.input_blob: self.preprocess_input(image)}\n\n # Start inference. Infer mode (async/sync) is input by user\n if self.async_infer:\n self.infer_request_handle = self.exec_network.start_async(request_id=0, inputs=net_input)\n # Wait for the result of the inference\n if self.exec_network.requests[0].wait(-1) == 0:\n # Get result of the inference request\n outputs = self.infer_request_handle.outputs[self.output_blob]\n eyes_coords, crop_left, crop_right = self.preprocess_output(outputs, image)\n\n else:\n self.infer_request_handle = self.exec_network.infer(inputs=net_input)\n # Get result of the inference request\n outputs = self.infer_request_handle[self.output_blob]\n eyes_coords, crop_left, crop_right = self.preprocess_output(outputs, image)\n\n else:\n eyes_coords = []\n crop_left = []\n crop_right = []\n\n return eyes_coords, crop_left, crop_right\n\n def check_model(self):\n '''\n This method check whether the model (along with the plugin) is support on the CPU device.\n If anything is missing (such as a CPU extension), let the user know and exit the programm.\n '''\n\n supported_layers = self.plugin.query_network(network=self.network, device_name='CPU')\n unsupported_layers = [l for l in self.network.layers.keys() if l not in supported_layers]\n\n if len(unsupported_layers) != 0:\n log.error(\"Unsupported layers found: {}\".format(unsupported_layers))\n if self.extensions:\n log.error(\"The extensions specified do not support some layers. Please specify a new extension.\")\n else:\n log.error(\n \"Please try to specify an extension library path by using the --extensions command line argument.\")\n sys.exit(1)\n return\n\n def preprocess_input(self, image):\n '''\n Method to process inputs before feeding them into the model for inference.\n '''\n image = cv2.resize(image, (self.input_shape[3], self.input_shape[2]))\n image = image.transpose((2, 0, 1))\n image = image.reshape(1, *image.shape)\n return image\n\n def preprocess_output(self, outputs, image):\n '''\n Method to process outputs before feeding them into the next model for\n inference or for the last step of the app.\n '''\n\n w = image.shape[1]\n h = image.shape[0]\n outputs = outputs[0]\n\n xl, yl = int(outputs[0][0][0] * w), int(outputs[1][0][0] * h)\n xr, yr = int(outputs[2][0][0] * w), int(outputs[3][0][0] * h)\n\n eyes_coords = [xl, yl, xr, yr]\n\n # Using the fact that eyes take 1/5 of your face width\n # define bounding boxes around the eyes according to this\n square_size = int(w / 10)\n left_eye_box = [xl - square_size, yl - square_size, xl + square_size, yl + square_size]\n right_eye_box = [xr - square_size, yr - square_size, xr + square_size, yr + square_size]\n\n crop_left = image[left_eye_box[1]:left_eye_box[3], left_eye_box[0]:left_eye_box[2]]\n crop_right = image[right_eye_box[1]:right_eye_box[3], right_eye_box[0]:right_eye_box[2]]\n\n return eyes_coords, crop_left, crop_right\n"},"path":{"kind":"string","value":"src/facial_landmarks_detection.py"},"size":{"kind":"number","value":6044,"string":"6,044"},"nl_text":{"kind":"string","value":"Class for the Face Landmarks Detection Model.\n\nLoad and configure inference plugins for the specified target devices,\nand performs either synchronous or asynchronous modes for the\nspecified infer requests.\nSet instance variables.\nThis method check whether the model (along with the plugin) is support on the CPU device.\nIf anything is missing (such as a CPU extension), let the user know and exit the programm.\nThis method is for loading the model (in IR format) to the device specified by the user.\nDefault device is CPU.\nThis method is meant for running predictions on the input image.\nMethod to process inputs before feeding them into the model for inference.\nMethod to process outputs before feeding them into the next model for\ninference or for the last step of the app.\n\n Get model Initialize the plugin - load the inference engine API Plugin is the one already created for the Face Detection model Add a CPU extension, if applicable Read the IR as IENetwork Check if model and CPU plugin are supported Load the IENetwork into the plugin Get the input and output layers Create input image to feed into the network Start inference. Infer mode (async/sync) is input by user Wait for the result of the inference Get result of the inference request Get result of the inference request Using the fact that eyes take 1/5 of your face width define bounding boxes around the eyes according to this"},"nl_size":{"kind":"number","value":1395,"string":"1,395"},"nl_language":{"kind":"string","value":"en"},"nl_language_score":{"kind":"number","value":0.8491008877754211,"string":"0.849101"}}},{"rowIdx":7867,"cells":{"content":{"kind":"string","value":"\"\"\"\nASGI config for FYP project.\n\nIt exposes the ASGI callable as a module-level variable named ``application``.\n\nFor more information on this file, see\nhttps://docs.djangoproject.com/en/3.0/howto/deployment/asgi/\n\"\"\"\n\nimport os\n\nfrom django.core.asgi import get_asgi_application\n\nos.environ.setdefault('DJANGO_SETTINGS_MODULE', 'FYP.settings')\n\napplication = get_asgi_application()\n"},"path":{"kind":"string","value":"src/FYP/FYP/asgi.py"},"size":{"kind":"number","value":383,"string":"383"},"nl_text":{"kind":"string","value":"ASGI config for FYP project.\n\nIt exposes the ASGI callable as a module-level variable named ``application``.\n\nFor more information on this file, see\nhttps://docs.djangoproject.com/en/3.0/howto/deployment/asgi/"},"nl_size":{"kind":"number","value":209,"string":"209"},"nl_language":{"kind":"string","value":"en"},"nl_language_score":{"kind":"number","value":0.7294002771377563,"string":"0.7294"}}},{"rowIdx":7868,"cells":{"content":{"kind":"string","value":"# -*- coding: utf-8 -*-\nfrom __future__ import print_function\n\nfrom IPython import get_ipython\nfrom IPython.display import (\n display,\n Javascript,\n)\nfrom IPython.core import magic_arguments\nfrom IPython.core.magic import (\n Magics,\n magics_class,\n cell_magic,\n)\nfrom IPython.utils.importstring import import_item\n\n\nimport yaml\n\n__version__ = \"0.2.0\"\n\n\n@magics_class\nclass YAMLMagics(Magics):\n \"\"\"\n Write and load YAML in the IPython Notebook. Uses SafeLoader by default.\n\n Example:\n\n %%yaml x -lyaml.Loader\n foo:\n bar: baz\n\n \"\"\"\n\n def __init__(self, shell):\n super(YAMLMagics, self).__init__(shell)\n\n @cell_magic\n @magic_arguments.magic_arguments()\n @magic_arguments.argument(\n \"var_name\",\n default=None,\n nargs=\"?\",\n help=\"\"\"Name of local variable to set to parsed value\"\"\"\n )\n @magic_arguments.argument(\n \"-l\", \"--loader\",\n default=\"yaml.SafeLoader\",\n help=\"\"\"Dotted-notation class to use for loading\"\"\"\n )\n def yaml(self, line, cell):\n line = line.strip()\n args = magic_arguments.parse_argstring(self.yaml, line)\n\n display(Javascript(\n \"\"\"\n require(\n [\n \"notebook/js/codecell\",\n \"codemirror/mode/yaml/yaml\"\n ],\n function(cc){\n cc.CodeCell.options_default.highlight_modes.magic_yaml = {\n reg: [\"^%%yaml\"]\n }\n }\n );\n \"\"\"))\n\n loader = get_ipython().user_global_ns.get(args.loader, None)\n if loader is None:\n loader = import_item(args.loader)\n\n try:\n val = yaml.load(cell, Loader=loader)\n except yaml.YAMLError as err:\n print(err)\n return\n\n if args.var_name is not None:\n get_ipython().user_ns[args.var_name] = val\n else:\n return val\n\n\ndef load_ipython_extension(ip):\n ip = get_ipython()\n ip.register_magics(YAMLMagics)\n"},"path":{"kind":"string","value":"yamlmagic.py"},"size":{"kind":"number","value":2090,"string":"2,090"},"nl_text":{"kind":"string","value":"Write and load YAML in the IPython Notebook. Uses SafeLoader by default.\n\nExample:\n\n %%yaml x -lyaml.Loader\n foo:\n bar: baz\n\n -*- coding: utf-8 -*-"},"nl_size":{"kind":"number","value":160,"string":"160"},"nl_language":{"kind":"string","value":"en"},"nl_language_score":{"kind":"number","value":0.5752121806144714,"string":"0.575212"}}},{"rowIdx":7869,"cells":{"content":{"kind":"string","value":"import h5py\nimport pickle\nimport numpy as np\n# import read_affect_data as r\n# from tqdm import tqdm\nimport random\n\nfrom PIL import Image, ImageOps, ImageEnhance\nimport colorsys\n\n\n# def read_h5_data_set(path):\n# f = h5py.File(path, 'r')\n# time_stamps = list(f[list(f.keys())[0]].keys())\n# d = {time : dict() for time in time_stamps}\n# for feature in list(f.keys()):\n# if hasattr(f[feature], 'keys'):\n\n# for time in tqdm(list(f[feature].keys())):\n# k = list(f[feature][time].keys())[0]\n# d[time][feature] = np.array(f[feature][time][k])\n# return d\n\n\n# def read_pkl_data_set(path):\n# f = r.load_pickle(path)\n# time_stamps = list(f[list(f.keys())[0]].keys())\n# d = {time : dict() for time in time_stamps}\n# for feature in list(f.keys()):\n# if hasattr(f[feature], 'keys'):\n\n# for time in tqdm(list(f[feature].keys())):\n# if hasattr(f[feature][time], 'keys'):\n# for k in list(f[feature][time].keys()):\n# d[time][feature] = np.array(f[feature][time][k])\n# return d\n\n\n##############################################################################\n# Visual\ndef visual_robustness(tests, noise_level=0.3, gray=True, contrast=True, s_and_p=True, gaus=True, rot=True, crop=True):\n noises = []\n if gray:\n noises.append(grayscale)\n if contrast:\n noises.append(low_contrast)\n if s_and_p:\n noises.append(salt_and_pepper)\n if gaus:\n noises.append(gaussian)\n if rot:\n noises.append(rotate)\n if crop:\n noises.append(random_crop)\n robustness_tests = []\n for i in range(len(tests)):\n img = Image.fromarray(tests[i])\n for noise in noises:\n img = noise(img, noise_level)\n robustness_tests.append(np.array(img))\n return robustness_tests\n\n\ndef grayscale(img, p):\n if np.random.sample() <= p:\n return ImageOps.grayscale(img)\n else:\n return img\n\n\ndef low_contrast(img, factor):\n if np.random.sample() <= p:\n enhancer = ImageEnhance.Contrast(img)\n return enhancer.enhance(factor)\n else:\n return img\n\n\ndef inversion(img, p):\n if np.random.sample() <= p:\n return ImageOps.invert(img)\n else:\n return img\n\n\ndef WB(img, p):\n if np.random.sample() <= p:\n kelvin_table = {1000: (255, 56, 0), 1500: (255, 109, 0), 2000: (255, 137, 18), 2500: (255, 161, 72), 3000: (255, 180, 107), 3500: (255, 196, 137), 4000: (255, 209, 163), 4500: (255, 219, 186), 5000: (255, 228, 206), 5500: (\n 255, 236, 224), 6000: (255, 243, 239), 6500: (255, 249, 253), 7000: (245, 243, 255), 7500: (235, 238, 255), 8000: (227, 233, 255), 8500: (220, 229, 255), 9000: (214, 225, 255), 9500: (208, 222, 255), 10000: (204, 219, 255)}\n temp = np.random.choice(kelvin_table.keys())\n r, g, b = kelvin_table[temp]\n matrix = (r / 255.0, 0.0, 0.0, 0.0,\n 0.0, g / 255.0, 0.0, 0.0,\n 0.0, 0.0, b / 255.0, 0.0)\n return img.convert('RGB', matrix)\n else:\n return img\n\n\ndef colorize(img, p):\n if np.random.sample() <= p:\n color = np.random.choice(['red', 'blue', 'green'])\n layer = Image.new('RGB', img.size, color)\n return Image.blend(img, layer, 0.3)\n else:\n return img\n\n\ndef salt_and_pepper(img, p):\n if np.random.sample() <= p:\n output = np.copy(np.array(img))\n nb_salt = np.ceil(p*output.size*0.5)\n coords = [np.random.randint(0, i-1, int(nb_salt))\n for i in output.shape]\n for i in coords:\n output[i] = 1\n nb_pepper = np.ceil(p*output.size*0.5)\n coords = [np.random.randint(0, i-1, int(nb_pepper))\n for i in output.shape]\n for i in coords:\n output[i] = 0\n return Image.fromarray(output)\n else:\n return img\n\n\ndef gaussian(img, p):\n if np.random.sample() <= p:\n height, width = np.array(img).shape\n gauss = np.random.normal(0, p, (height, width))\n return Image.fromarray((np.array(img)+gauss).astype('uint8'))\n else:\n return img\n\n\ndef rotate(img, p):\n if np.random.sample() <= p:\n angle = np.random.random_sample()*40-20\n return img.rotate(angle, Image.BILINEAR)\n else:\n return img\n\n\ndef horizontal_flip(img, p):\n if np.random.sample() <= p:\n return img.transpose(Image.FLIP_LEFT_RIGHT)\n else:\n return img\n\n\ndef random_crop(img, p):\n if np.random.sample() <= p:\n dim = np.array(img).shape\n height = dim[0]\n width = dim[1]\n cropped_height = height / 5\n cropped_width = width / 5\n init_height = np.random.random_sample() * cropped_height\n init_width = np.random.random_sample() * cropped_width\n end_height = height - cropped_height + init_height\n end_width = width - cropped_width + init_width\n return img.crop((init_width, init_height, end_width, end_height)).resize((height, width))\n else:\n return img\n\n\ndef periodic(img, periodic_noise_filename=\"periodic_noise\"):\n height = img.height\n width = img.width\n output = []\n for i in range(6):\n noise = Image.open(\"{}_{}.png\".format(\n periodic_noise_filename, i+1)).convert(\"RGBA\")\n noise = random_crop(rotate(noise.resize(\n (width*2, height*2)), np.random.random_sample()*360, 'white'), height, width)\n output.append(Image.blend(img.convert(\"RGBA\"), noise, 0.3))\n return output\n\n\n##############################################################################\n# Text\ndef text_robustness(tests, noise_level=0.3, swap=True, rand_mid=True, typo=True, sticky=True, omit=True):\n noises = []\n if swap:\n noises.append(swap_letter)\n if rand_mid:\n noises.append(random_mid)\n if typo:\n noises.append(qwerty_typo)\n if sticky:\n noises.append(sticky_keys)\n if omit:\n noises.append(omission)\n robustness_tests = []\n for i in range(len(tests)):\n newtext = []\n text = tests[i].lower().split()\n for word in text:\n if len(word) > 3 and np.random.sample() <= noise_level:\n mode = np.random.randint(len(noises))\n newtext.append(noises[mode](word))\n else:\n newtext.append(word)\n robustness_tests.append(' '.join(newtext))\n return np.array(robustness_tests)\n\n\ndef last_char(word):\n for i in range(len(word)):\n if word[len(word)-1-i].isalpha():\n return len(word) - 1 - i\n\n\ndef swap_letter(word):\n # swap two random adjacent letters\n last = last_char(word)\n pos = np.random.randint(last-2) + 1\n return word[:pos] + word[pos+1] + word[pos] + word[pos+2:]\n\n\ndef random_mid(word):\n # randomly permute the middle chunk of a word (all letters except the first and last letter)\n last = last_char(word)\n mid = [char for char in word[1:last]]\n np.random.shuffle(mid)\n return word[0]+''.join(mid)+word[last:]\n\n\ndef qwerty_typo(word, num_typo=1):\n # randomly replace num_typo number of letters of a word to a one adjacent to it on qwerty keyboard\n qwerty = {'q': ['w'], 'w': ['q', 'e', 's'], 'e': ['w', 'r', 'd'], 'r': ['e', 't', 'f'], 't': ['r', 'g', 'y'], 'y': ['t', 'u', 'h'], 'u': ['y', 'i', 'j'], 'i': ['u', 'o', 'k'], 'o': ['i', 'p', 'l'], 'p': ['o'], 'a': ['q', 's', 'z'], 's': ['a', 'w', 'd', 'x', 'z'], 'd': ['s', 'e', 'f', 'x', 'c'], 'f': ['d', 'r', 'g', 'c', 'v'], 'g': [\n 'f', 't', 'h', 'v', 'b'], 'h': ['g', 'y', 'j', 'b', 'n'], 'j': ['h', 'u', 'k', 'n', 'm'], 'k': ['j', 'i', 'l', 'm'], 'l': ['k', 'o'], 'z': ['a', 's', 'x'], 'x': ['z', 's', 'd', 'c'], 'c': ['x', 'd', 'f', 'v'], 'v': ['c', 'f', 'g', 'b'], 'b': ['v', 'g', 'h', 'n'], 'n': ['b', 'h', 'm', 'j'], 'm': ['n', 'j', 'k']}\n last = last_char(word)\n typos = np.arange(last+1)\n np.random.shuffle(typos)\n for i in range(num_typo):\n typo = qwerty[word[typos[i]]]\n key = typo[np.random.randint(len(typo))]\n word = word[:typos[i]] + key + word[typos[i]+1:]\n return word\n\n\ndef sticky_keys(word, num_sticky=1):\n # randomly repeat num_sticky number of letters of a word\n last = last_char(word)\n sticky = np.arange(last+1)\n np.random.shuffle(sticky)\n for i in range(num_sticky):\n word = word[:sticky[i]] + word[sticky[i]] + word[sticky[i]:]\n return word\n\n\ndef omission(word, num_omit=1):\n # randomly omit num_omit number of letters of a word\n last = last_char(word)\n for i in range(num_omit):\n omit = np.random.randint(last-1) + 1\n word = word[:omit] + word[omit+1:]\n last -= 1\n return word\n\n##############################################################################\n# Audio\n\n\ndef audio_robustness(tests, noise_level=0.3, noises=None):\n if noises == None:\n noises = [additive_white_gaussian_noise,\n audio_random_dropout, audio_structured_dropout]\n robustness_tests = np.zeros(tests.shape)\n for i in range(len(tests)):\n if np.random.sample() <= noise_level:\n mode = np.random.randint(len(noises))\n robustness_tests[i] = noises[mode](tests[i], noise_level)\n return robustness_tests\n\n\ndef additive_white_gaussian_noise(signal, noise_level):\n # SNR = 10 * log((RMS of signal)^2 / (RMS of noise)^2)\n # RMS_s = np.sqrt(np.mean(signal*signal))\n # RMS_n = np.sqrt(RMS_s*RMS_s / (np.power(10, SNR/10)))\n noise = np.random.normal(0, noise_level, signal.shape[0])\n return signal + noise\n\n\ndef audio_structured_dropout(sig, p, step=10):\n # each consecutive time steps are chosen with probability p to be dropped\n res = [sig[i] for i in range(len(sig))]\n for i in range(len(res)-step+1):\n if (res[i] != 0) and np.random.random_sample() < p:\n for j in range(step):\n res[i+j] = 0\n return res\n\n\ndef audio_random_dropout(sig, p):\n return audio_structured_dropout(sig, 1, p)\n\n\n##############################################################################\n# Time-Series\ndef timeseries_robustness(tests, noise_level=0.3, noise=True, rand_drop=True, struct_drop=True, modality_map=None):\n robust_tests = np.array(tests)\n if noise:\n robust_tests = white_noise(robust_tests, noise_level)\n if rand_drop:\n robust_tests = random_drop(robust_tests, noise_level)\n if struct_drop:\n robust_tests = structured_drop(robust_tests, noise_level, modality_map)\n return robust_tests\n\n\n# add noise sampled from zero-mean Gaussian with standard deviation p at every time step\ndef white_noise(data, p):\n for i in range(len(data)):\n for time in range(len(data[i])):\n data[i][time] += np.random.normal(0, p)\n return data\n\n# each entry is dropped independently with probability p\n\n\ndef random_drop(data, p):\n for i in range(len(data)):\n for time in range(len(data[i])):\n for feature in range(len(data[i][time])):\n if np.random.random_sample() < p:\n data[i][time][feature] = 0\n # else:\n # result = dict()\n # for time in data:\n # for feature in data[time]:\n # if np.random.random_sample() < p:\n # result[time][feature] = np.zeros(data[time][feature].shape)\n # else:\n # result[time][feature] = data[time][feature]\n return data\n\n\n# independently for each modality, each time step is chosen with probability p\n# at which all feature dimensions are dropped\ndef structured_drop(data, p, modality_map):\n for i in range(len(data)):\n for time in range(len(data[i])):\n if np.random.random_sample() < p:\n data[i][time] = np.zeros(data[i][time].shape)\n # else:\n # result = dict()\n # for time in data:\n # for modality in modality_map.keys():\n # if np.random.random_sample() < p:\n # for feature in modality_map[modality]:\n # result[time][feature] = np.zeros(data[time][feature].shape)\n # else:\n # for feature in modality_map[modality]:\n # result[time][feature] = data[time][feature]\n return data\n\n\n##############################################################################\n# Tabular\ndef add_tabular_noise(tests, noise_level=0.3, drop=True, swap=True):\n robust_tests = np.array(tests)\n if drop:\n robust_tests = drop_entry(robust_tests, noise_level)\n if swap:\n robust_tests = swap_entry(robust_tests, noise_level)\n return robust_tests\n\n\ndef drop_entry(data, p):\n for i in range(len(data)):\n for j in range(len(data[i])):\n if np.random.random_sample() < p:\n data[i][j] = 0\n else:\n data[i][j] = data[i][j]\n return data\n\n\ndef swap_entry(data, p):\n for i in range(len(data)):\n for j in range(1, len(data[i])):\n if np.random.random_sample() < p:\n data[i][j] = data[i][j-1]\n data[i][j-1] = data[i][j]\n return data\n\n\nif __name__ == '__main__':\n print('='*5 + 'Multi Affect' + '='*5)\n print('1. CMU-MOSI, Aligned')\n print('2. CMU-MOSI, Unaligned')\n print('3. CMU-MOSEI, Aligned')\n print('4. CMU-MOSEI, Unaligned')\n print('5. CMU-POM, Aligned')\n print('6. CMU-POM, Unaligned')\n print('7. UR-Funny')\n print('8. Sarcasm')\n print('9. Deception')\n\n opt = int(input('Input option: '))\n print('='*22)\n if opt == 1:\n data = read_h5_data_set('./mosi/mosi.hdf5')\n modality_map = {'vision': ['FACET_4.2', 'OpenFace_1'], 'text': [\n 'words'], 'vocal': ['COVAREP', 'OpenSmile_emobase2010']}\n elif opt == 2:\n print(\"To be implemented!\")\n # data = read_h5_data_set('./mosi/mosi_unalign.hdf5')\n elif opt == 3:\n data = read_h5_data_set('./mosei/mosei.hdf5')\n modality_map = {'vision': ['OpenFace_2'],\n 'text': ['words'], 'vocal': ['COVAREP']}\n elif opt == 4:\n print(\"To be implemented!\")\n # data = read_h5_data_set('./mosei/mosei_unalign.hdf5')\n elif opt == 5:\n data = read_h5_data_set('./pom/pom.hdf5')\n modality_map = {'vision': ['FACET_4.2', 'OpenFace2'], 'text': [\n 'words'], 'vocal': ['COVAREP']}\n elif opt == 6:\n print(\"To be implemented!\")\n # data = read_h5_data_set('./pom/pom_unalign.hdf5')\n elif opt == 7:\n data = read_pkl_data_set('./urfunny/urfunny.pkl')\n # time = data[list(data.keys())[0]]\n # k = data[list(data[time].keys())[0]]\n \n elif opt == 8:\n print(\"To be implemented!\")\n # display_sarcasm_data_set('./sarcasm/sarcasm.pkl')\n elif opt == 9:\n print(\"To be implemented!\")\n # display_pkl_data_set('./deception/deception.pkl')\n else:\n print('Wrong Input!')\n"},"path":{"kind":"string","value":"deprecated/robustness_tests_draft.py"},"size":{"kind":"number","value":14949,"string":"14,949"},"nl_text":{"kind":"string","value":"import read_affect_data as r from tqdm import tqdm def read_h5_data_set(path): f = h5py.File(path, 'r') time_stamps = list(f[list(f.keys())[0]].keys()) d = {time : dict() for time in time_stamps} for feature in list(f.keys()): if hasattr(f[feature], 'keys'): for time in tqdm(list(f[feature].keys())): k = list(f[feature][time].keys())[0] d[time][feature] = np.array(f[feature][time][k]) return d def read_pkl_data_set(path): f = r.load_pickle(path) time_stamps = list(f[list(f.keys())[0]].keys()) d = {time : dict() for time in time_stamps} for feature in list(f.keys()): if hasattr(f[feature], 'keys'): for time in tqdm(list(f[feature].keys())): if hasattr(f[feature][time], 'keys'): for k in list(f[feature][time].keys()): d[time][feature] = np.array(f[feature][time][k]) return d Visual Text swap two random adjacent letters randomly permute the middle chunk of a word (all letters except the first and last letter) randomly replace num_typo number of letters of a word to a one adjacent to it on qwerty keyboard randomly repeat num_sticky number of letters of a word randomly omit num_omit number of letters of a word Audio SNR = 10 * log((RMS of signal)^2 / (RMS of noise)^2) RMS_s = np.sqrt(np.mean(signal*signal)) RMS_n = np.sqrt(RMS_s*RMS_s / (np.power(10, SNR/10))) each consecutive time steps are chosen with probability p to be dropped Time-Series add noise sampled from zero-mean Gaussian with standard deviation p at every time step each entry is dropped independently with probability p else: result = dict() for time in data: for feature in data[time]: if np.random.random_sample() < p: result[time][feature] = np.zeros(data[time][feature].shape) else: result[time][feature] = data[time][feature] independently for each modality, each time step is chosen with probability p at which all feature dimensions are dropped else: result = dict() for time in data: for modality in modality_map.keys(): if np.random.random_sample() < p: for feature in modality_map[modality]: result[time][feature] = np.zeros(data[time][feature].shape) else: for feature in modality_map[modality]: result[time][feature] = data[time][feature] Tabular data = read_h5_data_set('./mosi/mosi_unalign.hdf5') data = read_h5_data_set('./mosei/mosei_unalign.hdf5') data = read_h5_data_set('./pom/pom_unalign.hdf5') time = data[list(data.keys())[0]] k = data[list(data[time].keys())[0]] display_sarcasm_data_set('./sarcasm/sarcasm.pkl') display_pkl_data_set('./deception/deception.pkl')"},"nl_size":{"kind":"number","value":2823,"string":"2,823"},"nl_language":{"kind":"string","value":"en"},"nl_language_score":{"kind":"number","value":0.597830057144165,"string":"0.59783"}}},{"rowIdx":7870,"cells":{"content":{"kind":"string","value":"import os, sys\nimport ROOT\nfrom ROOT import TH1F,TH2F,TFile,TTree,TCanvas, TProfile, TNtuple, gErrorIgnoreLevel, kInfo, kWarning\nfrom tqdm import tqdm\nfrom particle import Particle, PDGID\n\ntqdm_disable = False\nROOT.gErrorIgnoreLevel = kWarning;\n\nFile = TFile(\"/home/kshi/Zprime/Zp_data_Ntuple/WmTo3l_ZpM45.root\",\"READ\")\ntree = File.Get(\"Ana/passedEvents\")\n\nnEntries = tree.GetEntries()\n\nW, p, none, other = 0, 0, 0, 0\nothers = []\n\nfor i in tqdm(range(0, nEntries)):\n tree.GetEntry(i)\n\n #for j in range(0,tree.lep_matchedR03_MomMomId.size()):\n # if abs(tree.lep_matchedR03_MomMomId[j])>=11 and abs(tree.lep_matchedR03_MomMomId[j])<=18:\n # print \"Event:\" + str(tree.Event) + \", Lepton \" + str(j) + \" MomMomid is: \" + lepton#str(tree.lep_matchedR03_MomMomId[j])\n\n #for j in range(0,tree.lep_matchedR03_PdgId.size()):\n # if (abs(tree.lep_matchedR03_PdgId[j])<11 or abs(tree.lep_matchedR03_PdgId[j]>18)) and tree.lep_matchedR03_PdgId[j]!=0:\n # print \"Event:\" + str(tree.Event) + \" has lepton id of \" + Particle.from_pdgid(tree.lep_matchedR03_PdgId[j]).name\n\n #for j in range(0,tree.GENlep_id.size()):\n # if PDGID(tree.GENlep_id[j]).is_valid==False:\n # print \"Invalid lep id \" + str(tree.GENlep_id[j])\n # if PDGID(tree.GENlep_MomId[j]).is_valid==False:\n # print \"Invalid lep mom id \" + str(tree.GENlep_MomId[j])\n # if PDGID(tree.GENlep_MomMomId[j]).is_valid==False:\n # print \"Invalid lep mom mom id \" + str(tree.GENlep_MomMomId[j])\n # else:\n # print \"Event:\" + str(tree.Event) + \", Lepton \" + str(j) + \" is a \" + Particle.from_pdgid(tree.GENlep_id[j]).name + \" that came from a \" + Particle.from_pdgid(tree.GENlep_MomId[j]).name + \" which came from a \" + Particle.from_pdgid(tree.GENlep_MomMomId[j]).name\n\n for j in range(0,tree.lep_matchedR03_PdgId.size()):\n #if PDGID(tree.lep_matchedR03_PdgId[j]).is_valid==False:\n # print \"Invalid lep id \" + str(tree.lep_matchedR03_PdgId[j])\n #if PDGID(tree.lep_matchedR03_MomId[j]).is_valid==False:\n # print \"Invalid lep mom id \" + str(tree.lep_matchedR03_MomId[j])\n #if PDGID(tree.lep_matchedR03_MomMomId[j]).is_valid==False:\n # print \"Invalid lep mom mom id \" + str(tree.lep_matchedR03_MomMomId[j])\n ##if tree.lep_matchedR03_PdgId[j]!=999888 and tree.lep_matchedR03_MomId!=999888 and tree.lep_matchedR03_MomMomId[j]!=999888:\n ## print \"Event:\" + str(tree.Event) + \", Lepton \" + str(j) + \" is a \" + Particle.from_pdgid(tree.lep_matchedR03_PdgId[j]).name + \" that came from a \" + Particle.from_pdgid(tree.lep_matchedR03_MomId[j]).name + \" which came from a \" + Particle.from_pdgid(tree.lep_matchedR03_MomMomId[j]).name\n #elif tree.lep_matchedR03_MomId[j]==999888:\n # print \"Event:\" + str(tree.Event) + \", Lepton \" + str(j) + \" is a \" + Particle.from_pdgid(tree.lep_matchedR03_PdgId[j]).name + \" that came from a \" + str(tree.lep_matchedR03_MomId[j]) + \" which came from a \" + Particle.from_pdgid(tree.lep_matchedR03_MomMomId[j]).name\n \n if tree.lep_matchedR03_MomId[j]==999888:\n if abs(tree.lep_matchedR03_MomMomId[j])==24:\n W+=1\n elif abs(tree.lep_matchedR03_MomMomId[j])==2212:\n p+=1\n elif abs(tree.lep_matchedR03_MomMomId[j])==0:\n none+=1\n else:\n other+=1\n others.append(tree.lep_matchedR03_MomMomId[j])\n\nprint \"Sources of Z':\"\nprint \"W = \" + str(W) + \", p = \" + str(p) + \", none = \" + str(none) + \", other = \" + str(other)\nfor i in range(0, len(others)):\n print \"Other MomMomId: \" + str(others[i])\n"},"path":{"kind":"string","value":"Wto3l/mom_counting.py"},"size":{"kind":"number","value":3474,"string":"3,474"},"nl_text":{"kind":"string","value":"for j in range(0,tree.lep_matchedR03_MomMomId.size()): if abs(tree.lep_matchedR03_MomMomId[j])>=11 and abs(tree.lep_matchedR03_MomMomId[j])<=18: print \"Event:\" + str(tree.Event) + \", Lepton \" + str(j) + \" MomMomid is: \" + leptonstr(tree.lep_matchedR03_MomMomId[j])for j in range(0,tree.lep_matchedR03_PdgId.size()): if (abs(tree.lep_matchedR03_PdgId[j])<11 or abs(tree.lep_matchedR03_PdgId[j]>18)) and tree.lep_matchedR03_PdgId[j]!=0: print \"Event:\" + str(tree.Event) + \" has lepton id of \" + Particle.from_pdgid(tree.lep_matchedR03_PdgId[j]).namefor j in range(0,tree.GENlep_id.size()): if PDGID(tree.GENlep_id[j]).is_valid==False: print \"Invalid lep id \" + str(tree.GENlep_id[j]) if PDGID(tree.GENlep_MomId[j]).is_valid==False: print \"Invalid lep mom id \" + str(tree.GENlep_MomId[j]) if PDGID(tree.GENlep_MomMomId[j]).is_valid==False: print \"Invalid lep mom mom id \" + str(tree.GENlep_MomMomId[j]) else: print \"Event:\" + str(tree.Event) + \", Lepton \" + str(j) + \" is a \" + Particle.from_pdgid(tree.GENlep_id[j]).name + \" that came from a \" + Particle.from_pdgid(tree.GENlep_MomId[j]).name + \" which came from a \" + Particle.from_pdgid(tree.GENlep_MomMomId[j]).nameif PDGID(tree.lep_matchedR03_PdgId[j]).is_valid==False: print \"Invalid lep id \" + str(tree.lep_matchedR03_PdgId[j])if PDGID(tree.lep_matchedR03_MomId[j]).is_valid==False: print \"Invalid lep mom id \" + str(tree.lep_matchedR03_MomId[j])if PDGID(tree.lep_matchedR03_MomMomId[j]).is_valid==False: print \"Invalid lep mom mom id \" + str(tree.lep_matchedR03_MomMomId[j])if tree.lep_matchedR03_PdgId[j]!=999888 and tree.lep_matchedR03_MomId!=999888 and tree.lep_matchedR03_MomMomId[j]!=999888: print \"Event:\" + str(tree.Event) + \", Lepton \" + str(j) + \" is a \" + Particle.from_pdgid(tree.lep_matchedR03_PdgId[j]).name + \" that came from a \" + Particle.from_pdgid(tree.lep_matchedR03_MomId[j]).name + \" which came from a \" + Particle.from_pdgid(tree.lep_matchedR03_MomMomId[j]).nameelif tree.lep_matchedR03_MomId[j]==999888: print \"Event:\" + str(tree.Event) + \", Lepton \" + str(j) + \" is a \" + Particle.from_pdgid(tree.lep_matchedR03_PdgId[j]).name + \" that came from a \" + str(tree.lep_matchedR03_MomId[j]) + \" which came from a \" + Particle.from_pdgid(tree.lep_matchedR03_MomMomId[j]).name"},"nl_size":{"kind":"number","value":2277,"string":"2,277"},"nl_language":{"kind":"string","value":"en"},"nl_language_score":{"kind":"number","value":0.6195318698883057,"string":"0.619532"}}},{"rowIdx":7871,"cells":{"content":{"kind":"string","value":"import logging\nfrom korbit.client.korbit_client import KorbitClient\n\nlogging.basicConfig(level=logging.INFO)\n\nproperties_sandbox_file = '../properties_sandbox_test.json'\ncontext_sandbox_file = '../context_sandbox.json'\n\nkbclient = KorbitClient(properties_sandbox_file, context_sandbox_file)\n\nprint(kbclient.getUserInfo())\n\n# 매수 Buy\n# print( kbclient.buy(price=300000, coin_amount=1) )\n# # 매도 Sell\n# print( kbclient.sell(price=300000, coin_amount=1) )\nprint( kbclient.getOpenOrders() )\n\n\n# Wallet Test\nwallet = kbclient.getWallet()\nbalance = wallet['balance']\npending_orders = wallet['pendingOrders']\navailable = wallet['available']\n\nprint(balance)\nprint(pending_orders)\nprint(available)"},"path":{"kind":"string","value":"test/korbit/client/korbit_client_tests.py"},"size":{"kind":"number","value":694,"string":"694"},"nl_text":{"kind":"string","value":"매수 Buy print( kbclient.buy(price=300000, coin_amount=1) ) 매도 Sell print( kbclient.sell(price=300000, coin_amount=1) ) Wallet Test"},"nl_size":{"kind":"number","value":130,"string":"130"},"nl_language":{"kind":"string","value":"en"},"nl_language_score":{"kind":"number","value":0.47219768166542053,"string":"0.472198"}}},{"rowIdx":7872,"cells":{"content":{"kind":"string","value":"import asyncio\nimport socket\n\nfrom stor.server.server import StorServer\nfrom stor.types.peer_info import PeerInfo\n\n\ndef start_reconnect_task(server: StorServer, peer_info_arg: PeerInfo, log, auth: bool):\n \"\"\"\n Start a background task that checks connection and reconnects periodically to a peer.\n \"\"\"\n # If peer_info_arg is already an address, use it, otherwise resolve it here.\n if peer_info_arg.is_valid():\n peer_info = peer_info_arg\n else:\n peer_info = PeerInfo(socket.gethostbyname(peer_info_arg.host), peer_info_arg.port)\n\n async def connection_check():\n while True:\n peer_retry = True\n for _, connection in server.all_connections.items():\n if connection.get_peer_info() == peer_info or connection.get_peer_info() == peer_info_arg:\n peer_retry = False\n if peer_retry:\n log.info(f\"Reconnecting to peer {peer_info}\")\n try:\n await server.start_client(peer_info, None, auth=auth)\n except Exception as e:\n log.info(f\"Failed to connect to {peer_info} {e}\")\n await asyncio.sleep(3)\n\n return asyncio.create_task(connection_check())\n"},"path":{"kind":"string","value":"stor/server/reconnect_task.py"},"size":{"kind":"number","value":1235,"string":"1,235"},"nl_text":{"kind":"string","value":"Start a background task that checks connection and reconnects periodically to a peer.\n\n If peer_info_arg is already an address, use it, otherwise resolve it here."},"nl_size":{"kind":"number","value":162,"string":"162"},"nl_language":{"kind":"string","value":"en"},"nl_language_score":{"kind":"number","value":0.8542771935462952,"string":"0.854277"}}},{"rowIdx":7873,"cells":{"content":{"kind":"string","value":"\"\"\"\nGenerates code metrics for a given project. Whereas code_metrics.py operates\non a single stream of source code input, this program walks a project tree and\ngenerates reports based on all of the source code found.\n\nTODO: project config should be supplied as input, not imported\n\"\"\"\n\nimport os, shutil\nimport code_metrics, metrics_formatter, stats, config\n\ndef find_available_filename(filename):\n\tif not os.path.exists(filename):\n\t\treturn filename\n\tattempts = 1\n\tfilename += str(attempts)\n\twhile os.path.exists(filename):\n\t\tattempts += 1\n\t\tif (attempts > 999):\n\t\t\tprint('error: could not find available filename', filename)\n\t\t\texit()\n\t\tfilename = filename[:len(filename)-1] + str(attempts)\n\treturn filename\n\ndef is_code_file(path):\n\tfilename, file_ext = os.path.splitext(path)\n\treturn file_ext in config.code_filename_extensions\n\ndef find_files(root_path, filter):\n\tresult = []\n\tfor root, dirs, files in os.walk(root_path):\n\t\tfor file_name in files:\n\t\t\tif not filter(file_name):\n\t\t\t\tcontinue\n\t\t\tpath = os.path.join(root, file_name)\n\t\t\tresult.append(path)\n\treturn result\n\ndef add_project_totals(project_report, file_reports):\n\tproject_report['file_count'] = len(file_reports)\n\tproject_report['function_count'] = 0\n\tproject_report['line_count'] = 0\n\tproject_report['lines_ending_in_whitespace_count'] = 0\n\tproject_report['line_length_distribution'] = {}\n\tproject_report['line_indent_distribution'] = {}\n\n\tfor filename, file_report in file_reports.items():\n\t\tif file_report == {}:\n\t\t\tcontinue\n\t\tproject_report['function_count'] += len(file_report['functions'])\n\t\tproject_report['line_count'] += file_report['line_count']\n\n\t\t# TODO: figure out how to aggregate project stats like this\n\t\t#project_report['lines_ending_in_whitespace_count'] += file_report['lines_ending_in_whitespace_count']\n\t\t#stats.merge_into_distribution(project_report['line_length_distribution'], file_report['line_length_distribution'])\n\t\t#stats.merge_into_distribution(project_report['line_indent_distribution'], file_report['line_indent_distribution'])\n\ndef report(project_root):\n\tfile_reports = {}\n\tfor path in find_files(project_root, is_code_file):\n\t\ttarget_lang = code_metrics.file_ext_lang(path)\n\t\twith open(path, 'r') as input_file:\n\t\t\ttry:\n\t\t\t\tfile_reports[path] = code_metrics.report(path, input_file.read(), target_lang)\n\t\t\texcept IOError:\n\t\t\t\tcontinue\n\tproject_report = {\n\t\t'source_path': project_root,\n\t\t'files': file_reports\n\t}\n\tadd_project_totals(project_report, file_reports)\n\treturn project_report\n\ndef write_report_file(report, path, target_dir):\n\tif report == {}:\n\t\treturn\n\n\tfilename = metrics_formatter.convert_path_to_report_filename(path)\n\tout_file_path = target_dir + '/' + filename\n\tout_file_path = find_available_filename(out_file_path)\n\n\twith open(out_file_path, 'w') as output_file:\n\t\tmetrics_formatter.write_report(report, 'html', output_file)\n\ndef write_report(project_report, target_dir):\n\tif os.path.exists(target_dir):\n\t\tprint('error: cannot create output dir', target_dir)\n\t\texit()\n\tos.mkdir(target_dir)\n\n\twith open(target_dir + '/' + 'index.html', 'w') as output_file:\n\t\tmetrics_formatter.write_project_index(project_report, 'html', output_file)\n\n\tfor path, report in project_report['files'].items():\n\t\twrite_report_file(report, path, target_dir)\n\nif __name__ == '__main__':\n\t# TODO: make output format configurable\n\toutput_dir = config.project_report_output_dir # TODO: also accept command line flag\n\toutput_dir = find_available_filename(output_dir)\n\twrite_report(report(config.project_root), output_dir)\n\tshutil.copy('Chart.min.js', output_dir)\n"},"path":{"kind":"string","value":"project_metrics.py"},"size":{"kind":"number","value":3550,"string":"3,550"},"nl_text":{"kind":"string","value":"Generates code metrics for a given project. Whereas code_metrics.py operates\non a single stream of source code input, this program walks a project tree and\ngenerates reports based on all of the source code found.\n\nTODO: project config should be supplied as input, not imported\n\n TODO: figure out how to aggregate project stats like thisproject_report['lines_ending_in_whitespace_count'] += file_report['lines_ending_in_whitespace_count']stats.merge_into_distribution(project_report['line_length_distribution'], file_report['line_length_distribution'])stats.merge_into_distribution(project_report['line_indent_distribution'], file_report['line_indent_distribution']) TODO: make output format configurable TODO: also accept command line flag"},"nl_size":{"kind":"number","value":739,"string":"739"},"nl_language":{"kind":"string","value":"en"},"nl_language_score":{"kind":"number","value":0.5915559530258179,"string":"0.591556"}}},{"rowIdx":7874,"cells":{"content":{"kind":"string","value":"\"\"\"\nswitchboard.manager\n~~~~~~~~~~~~~~~~\n\n:copyright: (c) 2015 Kyle Adams.\n:license: Apache License 2.0, see LICENSE for more details.\n\"\"\"\n\nimport logging\n\nimport sqlalchemy as sqla\n\nfrom .base import ModelDict\nfrom .models import (\n Model,\n Switch,\n DISABLED, SELECTIVE, GLOBAL, INHERIT,\n INCLUDE, EXCLUDE,\n)\nfrom .proxy import SwitchProxy\nfrom .settings import settings, Settings\nfrom .store import SQLAlchemyStore\n\n\nlog = logging.getLogger(__name__)\n# These are (mostly) read-only module variables since we want it shared among\n# any and all threads. The only exception to read-only is when they are\n# populated on Switchboard startup (i.e., operator.register()).\nregistry = {}\nregistry_by_namespace = {}\n\n\ndef nested_config(config):\n cfg = {}\n token = 'switchboard.'\n for k, v in config.iteritems():\n if k.startswith(token):\n cfg[k.replace(token, '')] = v\n return cfg\n\n\ndef configure(config={}, nested=False, cache=None):\n \"\"\"Useful for when you need to control Switchboard's setup.\"\"\"\n if nested:\n config = nested_config(config)\n # Re-read settings to make sure we have everything.\n Settings.init(cache=cache, **config)\n operator.cache = cache\n\n # Establish the connection to the database.\n timeout = getattr(settings, 'SWITCHBOARD_TIMEOUT', 10)\n dburl = settings.SWITCHBOARD_DBURL\n if dburl:\n engine = sqla.create_engine(\n dburl, connect_args={'connect_timeout': timeout})\n Switch.store = SQLAlchemyStore(engine, settings.SWITCHBOARD_DBTABLE)\n # Register the builtins.\n __import__('switchboard.builtins')\n\n\nclass SwitchManager(ModelDict):\n DISABLED = DISABLED\n SELECTIVE = SELECTIVE\n GLOBAL = GLOBAL\n INHERIT = INHERIT\n\n INCLUDE = INCLUDE\n EXCLUDE = EXCLUDE\n\n def __init__(self, *args, **kwargs):\n # Inject args and kwargs that are known quantities; the SwitchManager\n # will always deal with the Switch model and so on.\n new_args = [Switch]\n new_args.extend(args)\n kwargs['key'] = 'key'\n kwargs['value'] = 'value'\n self.result_cache = None\n self.context = {}\n super(SwitchManager, self).__init__(*new_args, **kwargs)\n\n def __unicode__(self):\n return \"<%s: %s (%s)>\" % (self.__class__.__name__,\n getattr(self, 'model', ''),\n registry.values())\n\n def __getitem__(self, key):\n \"\"\"\n Returns a SwitchProxy, rather than a Switch. It allows us to\n easily extend the Switches method and automatically include our\n manager instance.\n \"\"\"\n return SwitchProxy(self, super(SwitchManager, self).__getitem__(key))\n\n def with_result_cache(func):\n \"\"\"\n Decorator specifically for is_active. If self.result_cache is set to a {}\n the is_active results will be cached for each set of params.\n \"\"\"\n def inner(self, *args, **kwargs):\n dic = self.result_cache\n cache_key = None\n if dic is not None:\n cache_key = (args, tuple(kwargs.items()))\n try:\n result = dic.get(cache_key)\n except TypeError as e: # not hashable\n log.debug('Switchboard result cache not active for this \"%s\" check due to: %s within args: %s',\n args[0], e, repr(cache_key)[:200])\n cache_key = None\n else:\n if result is not None:\n return result\n result = func(self, *args, **kwargs)\n if cache_key is not None:\n dic[cache_key] = result\n return result\n return inner\n\n @with_result_cache\n def is_active(self, key, *instances, **kwargs):\n \"\"\"\n Returns ``True`` if any of ``instances`` match an active switch.\n Otherwise returns ``False``.\n\n >>> operator.is_active('my_feature', request) #doctest: +SKIP\n \"\"\"\n try:\n default = kwargs.pop('default', False)\n\n # Check all parents for a disabled state\n parts = key.split(':')\n if len(parts) > 1:\n child_kwargs = kwargs.copy()\n child_kwargs['default'] = None\n result = self.is_active(':'.join(parts[:-1]), *instances,\n **child_kwargs)\n\n if result is False:\n return result\n elif result is True:\n default = result\n\n try:\n switch = self[key]\n except KeyError:\n # switch is not defined, defer to parent\n return default\n\n if switch.status == GLOBAL:\n return True\n elif switch.status == DISABLED:\n return False\n elif switch.status == INHERIT:\n return default\n\n conditions = switch.value\n # If no conditions are set, we inherit from parents\n if not conditions:\n return default\n\n instances = list(instances) if instances else []\n instances.extend(self.context.values())\n\n # check each switch to see if it can execute\n return_value = False\n\n for namespace, condition in conditions.iteritems():\n condition_set = registry_by_namespace.get(namespace)\n if not condition_set:\n continue\n result = condition_set.has_active_condition(condition,\n instances)\n if result is False:\n return False\n elif result is True:\n return_value = True\n except:\n log.exception('Error checking if switch \"%s\" is active', key)\n return_value = False\n\n # there were no matching conditions, so it must not be enabled\n return return_value\n\n def register(self, condition_set):\n \"\"\"\n Registers a condition set with the manager.\n\n >>> condition_set = MyConditionSet() #doctest: +SKIP\n >>> operator.register(condition_set) #doctest: +SKIP\n \"\"\"\n\n if callable(condition_set):\n condition_set = condition_set()\n registry[condition_set.get_id()] = condition_set\n registry_by_namespace[condition_set.get_namespace()] = condition_set\n\n def unregister(self, condition_set):\n \"\"\"\n Unregisters a condition set with the manager.\n\n >>> operator.unregister(condition_set) #doctest: +SKIP\n \"\"\"\n if callable(condition_set):\n condition_set = condition_set()\n registry.pop(condition_set.get_id(), None)\n registry_by_namespace.pop(condition_set.get_namespace(), None)\n\n def get_condition_set_by_id(self, switch_id):\n \"\"\"\n Given the identifier of a condition set (described in\n ConditionSet.get_id()), returns the registered instance.\n \"\"\"\n return registry[switch_id]\n\n def get_condition_sets(self):\n \"\"\"\n Returns a generator yielding all currently registered\n ConditionSet instances.\n \"\"\"\n return registry.itervalues()\n\n def get_all_conditions(self):\n \"\"\"\n Returns a generator which yields groups of lists of conditions.\n\n >>> for set_id, label, field in operator.get_all_conditions(): #doctest: +SKIP\n >>> print \"%(label)s: %(field)s\" % (label, field.label) #doctest: +SKIP\n \"\"\"\n cs = self.get_condition_sets()\n for condition_set in sorted(cs, key=lambda x: x.get_group_label()):\n group = unicode(condition_set.get_group_label())\n for field in condition_set.fields.itervalues():\n yield condition_set.get_id(), group, field\n\n def as_request(self, user=None, ip_address=None):\n from .helpers import MockRequest\n return MockRequest(user, ip_address)\n\n\nauto_create = getattr(settings, 'SWITCHBOARD_AUTO_CREATE', True)\noperator = SwitchManager(auto_create=auto_create)\n"},"path":{"kind":"string","value":"switchboard/manager.py"},"size":{"kind":"number","value":8168,"string":"8,168"},"nl_text":{"kind":"string","value":"Returns a SwitchProxy, rather than a Switch. It allows us to\neasily extend the Switches method and automatically include our\nmanager instance.\nUseful for when you need to control Switchboard's setup.\nReturns a generator which yields groups of lists of conditions.\n\n>>> for set_id, label, field in operator.get_all_conditions(): #doctest: +SKIP\n>>> print \"%(label)s: %(field)s\" % (label, field.label) #doctest: +SKIP\nGiven the identifier of a condition set (described in\nConditionSet.get_id()), returns the registered instance.\nReturns a generator yielding all currently registered\nConditionSet instances.\nReturns ``True`` if any of ``instances`` match an active switch.\nOtherwise returns ``False``.\n\n>>> operator.is_active('my_feature', request) #doctest: +SKIP\nRegisters a condition set with the manager.\n\n>>> condition_set = MyConditionSet() #doctest: +SKIP\n>>> operator.register(condition_set) #doctest: +SKIP\nUnregisters a condition set with the manager.\n\n>>> operator.unregister(condition_set) #doctest: +SKIP\nDecorator specifically for is_active. If self.result_cache is set to a {}\nthe is_active results will be cached for each set of params.\nswitchboard.manager\n~~~~~~~~~~~~~~~~\n\n:copyright: (c) 2015 Kyle Adams.\n:license: Apache License 2.0, see LICENSE for more details.\n\n These are (mostly) read-only module variables since we want it shared among any and all threads. The only exception to read-only is when they are populated on Switchboard startup (i.e., operator.register()). Re-read settings to make sure we have everything. Establish the connection to the database. Register the builtins. Inject args and kwargs that are known quantities; the SwitchManager will always deal with the Switch model and so on. not hashable Check all parents for a disabled state switch is not defined, defer to parent If no conditions are set, we inherit from parents check each switch to see if it can execute there were no matching conditions, so it must not be enabled"},"nl_size":{"kind":"number","value":1973,"string":"1,973"},"nl_language":{"kind":"string","value":"en"},"nl_language_score":{"kind":"number","value":0.8247671723365784,"string":"0.824767"}}},{"rowIdx":7875,"cells":{"content":{"kind":"string","value":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n#----------------------------------------------------------------------------\n# Created By Rodrigo Wilkens\n# Last update 27/March/2022\n# version ='1.0'\n# ---------------------------------------------------------------------------\n\ndef join_institution(institution):\n if len(institution)==0:\n return None\n if len(institution)==1:\n return institution[0]\n res = \", \".join(institution[:-1])\n res += \" and \" + institution[-1]\n return res\n\n\ndef get_user(or_id,client_acl, force_institution=False):\n c = None\n try:\n c = client_acl.get_profile(or_id)\n except:\n print(\"\\nERROR: or_id not found\", or_id)\n return {\"first_name\":or_id, \"last_name\":or_id,\"name\":or_id, \"username\":or_id, \"emails\":or_id, \"institution\":\"NA\"}, True\n try:\n if or_id[0] == \"~\":\n emails = client_acl.search_profiles(ids=[or_id])\n assert len(emails) >= 1\n else:\n emails = client_acl.search_profiles(ids=[c.id])\n assert len(emails) >= 1\n # emails = [or_id]\n except:\n print(\"\\nERROR: or_id not associated to an email\", or_id)\n return {\"first_name\":or_id, \"last_name\":or_id,\"name\":or_id, \"username\":or_id, \"emails\":or_id, \"institution\":\"NA\"}, True\n # try:\n if True:\n c = c.content\n namePrefered = None\n for name in c[\"names\"]:\n if namePrefered==None or ('preferred' in name and name['preferred']):\n namePrefered = name\n name = \" \".join([namePrefered['first'] if type(namePrefered['first'])==str else '', \n namePrefered['middle'] if namePrefered['middle']!=None else '', \n namePrefered['last'] if namePrefered['last']!=None else '' ]).replace(\" \", \" \")\n first_name = namePrefered['first'].strip() if type(namePrefered['first'])==str else ''\n middle_name = namePrefered['middle'].strip() if namePrefered['middle']!=None else ''\n last_name = namePrefered['last'].strip() if namePrefered['last']!=None else ''\n username = namePrefered['username'].strip()\n if len(first_name)>2:\n first_name = \" \".join([n[0].upper() + n[1:].lower() if (n==n.upper() or n==n.lower()) else n for n in first_name.split(\" \")])\n if len(middle_name)>2:\n middle_name = \" \".join([n[0].upper() + n[1:].lower() if (n==n.upper() or n==n.lower()) else n for n in middle_name.split(\" \")])\n if len(last_name)>2:\n last_name = \" \".join([n[0].upper() + n[1:].lower() if (n==n.upper() or n==n.lower()) else n for n in last_name.split(\" \")])\n \n\n if 'preferredEmail' in emails[0].content:\n emails = emails[0].content['preferredEmail']\n else:\n emails = emails[0].content['emails'][0]\n emails = emails.replace(\"_\",\"\\\\_\")\n\n institution = []\n if 'history' in c:\n for h in c['history']:\n if 'end' not in h or h['end'] == None:\n institution.append(h['institution'][\"name\"])\n ret = {\"first_name\":first_name, \"last_name\":last_name,\"name\":name, \"username\":username, \"emails\":emails}\n institution = join_institution(institution)\n if institution:\n ret[\"institution\"] = institution\n else:\n if force_institution:\n ret[\"institution\"] = \"NA\"\n if len(middle_name)>0:\n ret[\"middle_name\"]=middle_name\n if \"gscholar\" in c:\n ret[\"google_scholar_id\"] = c[\"gscholar\"]\n if 'dblp' in c:\n ret['dblp_id'] = c['dblp']\n if 'homepage' in c:\n ret['homepage'] = c['homepage']\n if 'orcid'in c:\n ret['orcid'] = c['orcid']\n if 'semanticScholar' in c:\n ret[\"semantic_scholar_id\"] = c['semanticScholar']\n return ret, False\n"},"path":{"kind":"string","value":"openreview/util.py"},"size":{"kind":"number","value":3883,"string":"3,883"},"nl_text":{"kind":"string","value":"!/usr/bin/env python -*- coding: utf-8 -*----------------------------------------------------------------------------- Created By Rodrigo Wilkens Last update 27/March/2022 version ='1.0' --------------------------------------------------------------------------- emails = [or_id] try:"},"nl_size":{"kind":"number","value":284,"string":"284"},"nl_language":{"kind":"string","value":"en"},"nl_language_score":{"kind":"number","value":0.358508437871933,"string":"0.358508"}}},{"rowIdx":7876,"cells":{"content":{"kind":"string","value":"from pdf_reports import ReportWriter\n\n# DEFINE A WRITER WITH DEFAULT TEMPLATE AND VALUES\nreport_writer = ReportWriter(\n default_stylesheets=[\"style.css\"],\n default_template=\"template.pug\",\n title=\"My default title\",\n version=\"0.1.2\"\n)\n\n# THEN LATER IN YOUR CODE:\nhtml = report_writer.pug_to_html(my_name=\"Zulko\", my_organization=\"EGF\")\nreport_writer.write_report(html, \"example_reportwriter.pdf\")"},"path":{"kind":"string","value":"examples/example_reportwriter/example_reportwriter.py"},"size":{"kind":"number","value":408,"string":"408"},"nl_text":{"kind":"string","value":"DEFINE A WRITER WITH DEFAULT TEMPLATE AND VALUES THEN LATER IN YOUR CODE:"},"nl_size":{"kind":"number","value":73,"string":"73"},"nl_language":{"kind":"string","value":"en"},"nl_language_score":{"kind":"number","value":0.8784096240997314,"string":"0.87841"}}},{"rowIdx":7877,"cells":{"content":{"kind":"string","value":"# coding=utf-8\n# *** WARNING: this file was generated by test. ***\n# *** Do not edit by hand unless you're certain you know what you are doing! ***\n\nimport warnings\nimport pulumi\nimport pulumi.runtime\nfrom typing import Any, Mapping, Optional, Sequence, Union, overload\nfrom . import _utilities\n\n__all__ = [\n 'Foo',\n]\n\n@pulumi.input_type\nclass Foo:\n def __init__(__self__, *,\n a: Optional[bool] = None):\n if a is not None:\n pulumi.set(__self__, \"a\", a)\n\n @property\n @pulumi.getter\n def a(self) -> Optional[bool]:\n return pulumi.get(self, \"a\")\n\n @a.setter\n def a(self, value: Optional[bool]):\n pulumi.set(self, \"a\", value)\n\n\n"},"path":{"kind":"string","value":"pkg/codegen/testing/test/testdata/plain-schema-gh6957/python/pulumi_xyz/_inputs.py"},"size":{"kind":"number","value":693,"string":"693"},"nl_text":{"kind":"string","value":"coding=utf-8 *** WARNING: this file was generated by test. *** *** Do not edit by hand unless you're certain you know what you are doing! ***"},"nl_size":{"kind":"number","value":141,"string":"141"},"nl_language":{"kind":"string","value":"en"},"nl_language_score":{"kind":"number","value":0.9744953513145447,"string":"0.974495"}}},{"rowIdx":7878,"cells":{"content":{"kind":"string","value":"import os\nimport os.path as osp\nimport numpy as np\n# `pip install easydict` if you don't have it\nfrom easydict import EasyDict as edict\n\n__C = edict()\n# Consumers can get config by:\n# from fast_rcnn_config import cfg\ncfg = __C\n\n#\n# Training options\n#\n\n__C.TRAIN = edict()\n\n# Online hard negative mining\n__C.TRAIN.HARD_POSITIVE_MINING = True\n__C.TRAIN.HARD_NEGATIVE_MINING = True\n__C.TRAIN.BG_THRESH_LOW = 0.0\n\n__C.TRAIN.ORIG_SIZE = False\n\n# Initial learning rate\n__C.TRAIN.LEARNING_RATE = 0.001\n\n# Momentum\n__C.TRAIN.MOMENTUM = 0.9\n\n# Weight decay, for regularization\n__C.TRAIN.WEIGHT_DECAY = 0.0005\n\n# Factor for reducing the learning rate\n__C.TRAIN.GAMMA = 0.1\n\n# Step size for reducing the learning rate, currently only support one step\n__C.TRAIN.STEPSIZE = [30000]\n\n# Iteration intervals for showing the loss during training, on command line interface\n__C.TRAIN.DISPLAY = 50\n# Iteration intervals for save check point\n__C.TRAIN.CHECKPOINT = 500\n# Whether to double the learning rate for bias\n__C.TRAIN.DOUBLE_BIAS = True\n\n# Whether to initialize the weights with truncated normal distribution\n__C.TRAIN.TRUNCATED = False\n\n# Whether to have weight decay on bias as well\n__C.TRAIN.BIAS_DECAY = False\n\n# Whether to add ground truth boxes to the pool when sampling regions\n__C.TRAIN.USE_GT = False\n\n# Whether to use aspect-ratio grouping of training images, introduced merely for saving\n# GPU memory\n__C.TRAIN.ASPECT_GROUPING = False\n\n# The number of snapshots kept, older ones are deleted to save space\n__C.TRAIN.SNAPSHOT_KEPT = 3\n\n# The time interval for saving tensorflow summaries\n__C.TRAIN.SUMMARY_INTERVAL = 180\n\n# Scale to use during training (can list multiple scales)\n# The scale is the pixel size of an image's shortest side\n__C.TRAIN.SCALES = (600,800)\n\n# Max pixel size of the longest side of a scaled input image\n__C.TRAIN.MAX_SIZE = 1200\n\n# Trim size for input images to create minibatch\n__C.TRAIN.TRIM_HEIGHT = 600\n__C.TRAIN.TRIM_WIDTH = 600\n\n# Images to use per minibatch\n__C.TRAIN.IMS_PER_BATCH = 1\n\n# Minibatch size (number of regions of interest [ROIs])\n__C.TRAIN.BATCH_SIZE = 256\n\n# Fraction of minibatch that is labeled foreground (i.e. class > 0)\n__C.TRAIN.FG_FRACTION = 0.25\n\n# Overlap threshold for a ROI to be considered foreground (if >= FG_THRESH)\n__C.TRAIN.FG_THRESH = 0.5\n\n# Overlap threshold for a ROI to be considered background (class = 0 if\n# overlap in [LO, HI))\n__C.TRAIN.BG_THRESH_HI = 0.5\n__C.TRAIN.BG_THRESH_LO = 0.0\n\n# Use horizontally-flipped images during training?\n__C.TRAIN.USE_FLIPPED = True\n\n# Train bounding-box regressors\n__C.TRAIN.BBOX_REG = True\n\n# Overlap required between a ROI and ground-truth box in order for that ROI to\n# be used as a bounding-box regression training example\n__C.TRAIN.BBOX_THRESH = 0.5\n\n# Iterations between snapshots\n__C.TRAIN.SNAPSHOT_ITERS = 5000\n\n# solver.prototxt specifies the snapshot path prefix, this adds an optional\n# infix to yield the path: [_]_iters_XYZ.caffemodel\n__C.TRAIN.SNAPSHOT_PREFIX = 'res101_faster_rcnn'\n# __C.TRAIN.SNAPSHOT_INFIX = ''\n\n# Use a prefetch thread in roi_data_layer.layer\n# So far I haven't found this useful; likely more engineering work is required\n# __C.TRAIN.USE_PREFETCH = False\n\n# Normalize the targets (subtract empirical mean, divide by empirical stddev)\n__C.TRAIN.BBOX_NORMALIZE_TARGETS = True\n# Deprecated (inside weights)\n__C.TRAIN.BBOX_INSIDE_WEIGHTS = (1.0, 1.0, 1.0, 1.0)\n# Normalize the targets using \"precomputed\" (or made up) means and stdevs\n# (BBOX_NORMALIZE_TARGETS must also be True)\n__C.TRAIN.BBOX_NORMALIZE_TARGETS_PRECOMPUTED = True\n__C.TRAIN.BBOX_NORMALIZE_MEANS = (0.0, 0.0, 0.0, 0.0)\n__C.TRAIN.BBOX_NORMALIZE_STDS = (0.1, 0.1, 0.2, 0.2)\n\n# Train using these proposals\n__C.TRAIN.PROPOSAL_METHOD = 'gt'\n\n# Make minibatches from images that have similar aspect ratios (i.e. both\n# tall and thin or both short and wide) in order to avoid wasting computation\n# on zero-padding.\n\n# Use RPN to detect objects\n__C.TRAIN.HAS_RPN = True\n# IOU >= thresh: positive example\n__C.TRAIN.ANCHOR_POSITIVE_OVERLAP = 0.5\n# IOU < thresh: negative example\n__C.TRAIN.ANCHOR_NEGATIVE_OVERLAP = 0.3\n# If an anchor statisfied by positive and negative conditions set to negative\n__C.TRAIN.RPN_CLOBBER_POSITIVES = False\n# Max number of foreground examples\n__C.TRAIN.RPN_FG_FRACTION = 0.25\n# Total number of examples\n__C.TRAIN.RPN_BATCHSIZE = 384\n# NMS threshold used on RPN proposals\n__C.TRAIN.RPN_NMS_THRESH = 0.7\n# Number of top scoring boxes to keep before apply NMS to RPN proposals\n__C.TRAIN.RPN_PRE_NMS_TOP_N = 12000\n# Number of top scoring boxes to keep after applying NMS to RPN proposals\n__C.TRAIN.RPN_POST_NMS_TOP_N = 2000\n# Proposal height and width both need to be greater than RPN_MIN_SIZE (at orig image scale)\n__C.TRAIN.RPN_MIN_SIZE = 4\n# Deprecated (outside weights)\n__C.TRAIN.RPN_BBOX_INSIDE_WEIGHTS = (1.0, 1.0, 1.0, 1.0)\n# Give the positive RPN examples weight of p * 1 / {num positives}\n# and give negatives a weight of (1 - p)\n# Set to -1.0 to use uniform example weighting\n__C.TRAIN.RPN_POSITIVE_WEIGHT = -1.0\n# Whether to use all ground truth bounding boxes for training,\n# For COCO, setting USE_ALL_GT to False will exclude boxes that are flagged as ''iscrowd''\n__C.TRAIN.USE_ALL_GT = True\n\n# Whether to tune the batch normalization parameters during training\n__C.TRAIN.BN_TRAIN = False\n\n#\n# Testing options\n#\n__C.TEST = edict()\n\n# Scale to use during testing (can NOT list multiple scales)\n# The scale is the pixel size of an image's shortest side\n__C.TEST.SCALES = (1200,)\n\n# Max pixel size of the longest side of a scaled input image\n__C.TEST.MAX_SIZE = 1600\n\n__C.TEST.ORIG_SIZE = False\n# Overlap threshold used for non-maximum suppression (suppress boxes with\n# IoU >= this threshold)\n__C.TEST.NMS = 0.3\n\n# Experimental: treat the (K+1) units in the cls_score layer as linear\n# predictors (trained, eg, with one-vs-rest SVMs).\n__C.TEST.SVM = False\n\n# Test using bounding-box regressors\n__C.TEST.BBOX_REG = True\n\n# Propose boxes\n__C.TEST.HAS_RPN = False\n\n# Test using these proposals\n__C.TEST.PROPOSAL_METHOD = 'gt'\n\n## NMS threshold used on RPN proposals\n__C.TEST.RPN_NMS_THRESH = 0.3\n## Number of top scoring boxes to keep before apply NMS to RPN proposals\n__C.TEST.RPN_PRE_NMS_TOP_N = 6000\n\n## Number of top scoring boxes to keep after applying NMS to RPN proposals\n__C.TEST.RPN_POST_NMS_TOP_N = 300\n\n# Proposal height and width both need to be greater than RPN_MIN_SIZE (at orig image scale)\n__C.TEST.RPN_MIN_SIZE = 16\n\n# Testing mode, default to be 'nms', 'top' is slower but better\n# See report for details\n__C.TEST.MODE = 'nms'\n\n# Only useful when TEST.MODE is 'top', specifies the number of top proposals to select\n__C.TEST.RPN_TOP_N = 5000\n\n#\n# ResNet options\n#\n\n__C.RESNET = edict()\n\n# Option to set if max-pooling is appended after crop_and_resize.\n# if true, the region will be resized to a square of 2xPOOLING_SIZE,\n# then 2x2 max-pooling is applied; otherwise the region will be directly\n# resized to a square of POOLING_SIZE\n__C.RESNET.MAX_POOL = False\n\n# Number of fixed blocks during training, by default the first of all 4 blocks is fixed\n# Range: 0 (none) to 3 (all)\n__C.RESNET.FIXED_BLOCKS = 1\n\n#\n# MobileNet options\n#\n\n__C.MOBILENET = edict()\n\n# Whether to regularize the depth-wise filters during training\n__C.MOBILENET.REGU_DEPTH = False\n\n# Number of fixed layers during training, by default the first of all 14 layers is fixed\n# Range: 0 (none) to 12 (all)\n__C.MOBILENET.FIXED_LAYERS = 5\n\n# Weight decay for the mobilenet weights\n__C.MOBILENET.WEIGHT_DECAY = 0.00004\n\n# Depth multiplier\n__C.MOBILENET.DEPTH_MULTIPLIER = 1.\n\n#\n# MISC\n#\n\n# The mapping from image coordinates to feature map coordinates might cause\n# some boxes that are distinct in image space to become identical in feature\n# coordinates. If DEDUP_BOXES > 0, then DEDUP_BOXES is used as the scale factor\n# for identifying duplicate boxes.\n# 1/16 is correct for {Alex,Caffe}Net, VGG_CNN_M_1024, and VGG16\n__C.DEDUP_BOXES = 1. / 16.\n\n# Pixel mean values (BGR order) as a (1, 1, 3) array\n# We use the same pixel mean for all networks even though it's not exactly what\n# they were trained with\n__C.PIXEL_MEANS = np.array([[[102.9801, 115.9465, 122.7717]]])\n\n# For reproducibility\n__C.RNG_SEED = 3\n\n# A small number that's used many times\n__C.EPS = 1e-14\n\n# Root directory of project\n__C.ROOT_DIR = osp.abspath(osp.join(osp.dirname(__file__), '..', '..'))\n\n# Data directory\n__C.DATA_DIR = osp.abspath(osp.join(__C.ROOT_DIR, 'data'))\n\n# Name (or path to) the matlab executable\n__C.MATLAB = 'matlab'\n\n# Place outputs under an experiments directory\n__C.EXP_DIR = 'default'\n\n# Use GPU implementation of non-maximum suppression\n__C.USE_GPU_NMS = True\n\n# Default GPU device id\n__C.GPU_ID = 0\n\n__C.POOLING_MODE = 'crop'\n\n# Size of the pooled region after RoI pooling\n__C.POOLING_SIZE = 7\n\n# Maximal number of gt rois in an image during Training\n__C.MAX_NUM_GT_BOXES = 20\n\n# Anchor scales for RPN\n__C.ANCHOR_SCALES = [8, 16, 32]\n\n# Anchor ratios for RPN\n__C.ANCHOR_RATIOS = [0.5, 1, 2]\n\n# Feature stride for RPN\n__C.FEAT_STRIDE = [16, ]\n\n__C.CUDA = False\n\n__C.CROP_RESIZE_WITH_MAX_POOL = True\n\nimport pdb\n\n\ndef get_output_dir(imdb_name, net_name=None,output_dir='output'):\n \"\"\"Return the directory where experimental artifacts are placed.\n If the directory does not exist, it is created.\n\n A canonical path is built using the name from an imdb and a network\n (if not None).\n \"\"\"\n\n outdir = osp.abspath(osp.join(cfg.ROOT_DIR, output_dir, cfg.EXP_DIR, imdb_name))\n if net_name is not None:\n outdir = osp.join(outdir, net_name)\n\n if not os.path.exists(outdir):\n os.makedirs(outdir)\n return outdir\n\n\ndef get_output_tb_dir(imdb, weights_filename):\n \"\"\"Return the directory where tensorflow summaries are placed.\n If the directory does not exist, it is created.\n A canonical path is built using the name from an imdb and a network\n (if not None).\n \"\"\"\n outdir = osp.abspath(osp.join(__C.ROOT_DIR, 'tensorboard', __C.EXP_DIR, imdb.name))\n if weights_filename is None:\n weights_filename = 'default'\n outdir = osp.join(outdir, weights_filename)\n if not os.path.exists(outdir):\n os.makedirs(outdir)\n return outdir\n\n\ndef _merge_a_into_b(a, b):\n \"\"\"Merge config dictionary a into config dictionary b, clobbering the\n options in b whenever they are also specified in a.\n \"\"\"\n if type(a) is not edict:\n return\n\n for k, v in a.items():\n # a must specify keys that are in b\n if k not in b:\n raise KeyError('{} is not a valid config key'.format(k))\n\n # the types must match, too\n old_type = type(b[k])\n if old_type is not type(v):\n if isinstance(b[k], np.ndarray):\n v = np.array(v, dtype=b[k].dtype)\n else:\n raise ValueError(('Type mismatch ({} vs. {}) '\n 'for config key: {}').format(type(b[k]),\n type(v), k))\n\n # recursively merge dicts\n if type(v) is edict:\n try:\n _merge_a_into_b(a[k], b[k])\n except:\n print(('Error under config key: {}'.format(k)))\n raise\n else:\n b[k] = v\n\n\ndef cfg_from_file(filename):\n \"\"\"Load a config file and merge it into the default options.\"\"\"\n import yaml\n with open(filename, 'r') as f:\n yaml_cfg = edict(yaml.load(f))\n\n _merge_a_into_b(yaml_cfg, __C)\n\n\ndef cfg_from_list(cfg_list):\n \"\"\"Set config keys via list (e.g., from command line).\"\"\"\n from ast import literal_eval\n assert len(cfg_list) % 2 == 0\n for k, v in zip(cfg_list[0::2], cfg_list[1::2]):\n key_list = k.split('.')\n d = __C\n for subkey in key_list[:-1]:\n assert subkey in d\n d = d[subkey]\n subkey = key_list[-1]\n assert subkey in d\n try:\n value = literal_eval(v)\n except:\n # handle the case when v is a string literal\n value = v\n assert type(value) == type(d[subkey]), \\\n 'type {} does not match original type {}'.format(\n type(value), type(d[subkey]))\n d[subkey] = value\n"},"path":{"kind":"string","value":"model/utils/config.py"},"size":{"kind":"number","value":12253,"string":"12,253"},"nl_text":{"kind":"string","value":"Merge config dictionary a into config dictionary b, clobbering the\noptions in b whenever they are also specified in a.\nLoad a config file and merge it into the default options.\nSet config keys via list (e.g., from command line).\nReturn the directory where experimental artifacts are placed.\nIf the directory does not exist, it is created.\n\nA canonical path is built using the name from an imdb and a network\n(if not None).\nReturn the directory where tensorflow summaries are placed.\nIf the directory does not exist, it is created.\nA canonical path is built using the name from an imdb and a network\n(if not None).\n\n `pip install easydict` if you don't have it Consumers can get config by: from fast_rcnn_config import cfg Training options Online hard negative mining Initial learning rate Momentum Weight decay, for regularization Factor for reducing the learning rate Step size for reducing the learning rate, currently only support one step Iteration intervals for showing the loss during training, on command line interface Iteration intervals for save check point Whether to double the learning rate for bias Whether to initialize the weights with truncated normal distribution Whether to have weight decay on bias as well Whether to add ground truth boxes to the pool when sampling regions Whether to use aspect-ratio grouping of training images, introduced merely for saving GPU memory The number of snapshots kept, older ones are deleted to save space The time interval for saving tensorflow summaries Scale to use during training (can list multiple scales) The scale is the pixel size of an image's shortest side Max pixel size of the longest side of a scaled input image Trim size for input images to create minibatch Images to use per minibatch Minibatch size (number of regions of interest [ROIs]) Fraction of minibatch that is labeled foreground (i.e. class > 0) Overlap threshold for a ROI to be considered foreground (if >= FG_THRESH) Overlap threshold for a ROI to be considered background (class = 0 if overlap in [LO, HI)) Use horizontally-flipped images during training? Train bounding-box regressors Overlap required between a ROI and ground-truth box in order for that ROI to be used as a bounding-box regression training example Iterations between snapshots solver.prototxt specifies the snapshot path prefix, this adds an optional infix to yield the path: [_]_iters_XYZ.caffemodel __C.TRAIN.SNAPSHOT_INFIX = '' Use a prefetch thread in roi_data_layer.layer So far I haven't found this useful; likely more engineering work is required __C.TRAIN.USE_PREFETCH = False Normalize the targets (subtract empirical mean, divide by empirical stddev) Deprecated (inside weights) Normalize the targets using \"precomputed\" (or made up) means and stdevs (BBOX_NORMALIZE_TARGETS must also be True) Train using these proposals Make minibatches from images that have similar aspect ratios (i.e. both tall and thin or both short and wide) in order to avoid wasting computation on zero-padding. Use RPN to detect objects IOU >= thresh: positive example IOU < thresh: negative example If an anchor statisfied by positive and negative conditions set to negative Max number of foreground examples Total number of examples NMS threshold used on RPN proposals Number of top scoring boxes to keep before apply NMS to RPN proposals Number of top scoring boxes to keep after applying NMS to RPN proposals Proposal height and width both need to be greater than RPN_MIN_SIZE (at orig image scale) Deprecated (outside weights) Give the positive RPN examples weight of p * 1 / {num positives} and give negatives a weight of (1 - p) Set to -1.0 to use uniform example weighting Whether to use all ground truth bounding boxes for training, For COCO, setting USE_ALL_GT to False will exclude boxes that are flagged as ''iscrowd'' Whether to tune the batch normalization parameters during training Testing options Scale to use during testing (can NOT list multiple scales) The scale is the pixel size of an image's shortest side Max pixel size of the longest side of a scaled input image Overlap threshold used for non-maximum suppression (suppress boxes with IoU >= this threshold) Experimental: treat the (K+1) units in the cls_score layer as linear predictors (trained, eg, with one-vs-rest SVMs). Test using bounding-box regressors Propose boxes Test using these proposals NMS threshold used on RPN proposals Number of top scoring boxes to keep before apply NMS to RPN proposals Number of top scoring boxes to keep after applying NMS to RPN proposals Proposal height and width both need to be greater than RPN_MIN_SIZE (at orig image scale) Testing mode, default to be 'nms', 'top' is slower but better See report for details Only useful when TEST.MODE is 'top', specifies the number of top proposals to select ResNet options Option to set if max-pooling is appended after crop_and_resize. if true, the region will be resized to a square of 2xPOOLING_SIZE, then 2x2 max-pooling is applied; otherwise the region will be directly resized to a square of POOLING_SIZE Number of fixed blocks during training, by default the first of all 4 blocks is fixed Range: 0 (none) to 3 (all) MobileNet options Whether to regularize the depth-wise filters during training Number of fixed layers during training, by default the first of all 14 layers is fixed Range: 0 (none) to 12 (all) Weight decay for the mobilenet weights Depth multiplier MISC The mapping from image coordinates to feature map coordinates might cause some boxes that are distinct in image space to become identical in feature coordinates. If DEDUP_BOXES > 0, then DEDUP_BOXES is used as the scale factor for identifying duplicate boxes. 1/16 is correct for {Alex,Caffe}Net, VGG_CNN_M_1024, and VGG16 Pixel mean values (BGR order) as a (1, 1, 3) array We use the same pixel mean for all networks even though it's not exactly what they were trained with For reproducibility A small number that's used many times Root directory of project Data directory Name (or path to) the matlab executable Place outputs under an experiments directory Use GPU implementation of non-maximum suppression Default GPU device id Size of the pooled region after RoI pooling Maximal number of gt rois in an image during Training Anchor scales for RPN Anchor ratios for RPN Feature stride for RPN a must specify keys that are in b the types must match, too recursively merge dicts handle the case when v is a string literal"},"nl_size":{"kind":"number","value":6463,"string":"6,463"},"nl_language":{"kind":"string","value":"en"},"nl_language_score":{"kind":"number","value":0.8613997101783752,"string":"0.8614"}}},{"rowIdx":7879,"cells":{"content":{"kind":"string","value":"# Code in this file is copied and adapted from \n# https://github.com/berkeleydeeprlcourse\n\nimport json\n\n\"\"\"\n\nSome simple logging functionality, inspired by rllab's logging.\nAssumes that each diagnostic gets logged each iteration\n\nCall logz.configure_output_dir() to start logging to a \ntab-separated-values file (some_folder_name/log.txt)\n\n\"\"\"\n\nimport os.path as osp, shutil, time, atexit, os, subprocess\n\ncolor2num = dict(\n gray=30,\n red=31,\n green=32,\n yellow=33,\n blue=34,\n magenta=35,\n cyan=36,\n white=37,\n crimson=38\n)\n\ndef colorize(string, color, bold=False, highlight=False):\n attr = []\n num = color2num[color]\n if highlight: num += 10\n attr.append(str(num))\n if bold: attr.append('1')\n return '\\x1b[%sm%s\\x1b[0m' % (';'.join(attr), string)\n\nclass G(object):\n output_dir = None\n output_file = None\n first_row = True\n log_headers = []\n log_current_row = {}\n\ndef configure_output_dir(d=None):\n \"\"\"\n Set output directory to d, or to /tmp/somerandomnumber if d is None\n \"\"\"\n G.first_row = True\n G.log_headers = []\n G.log_current_row = {}\n \n G.output_dir = d or \"/tmp/experiments/%i\"%int(time.time())\n if not osp.exists(G.output_dir):\n os.makedirs(G.output_dir)\n G.output_file = open(osp.join(G.output_dir, \"log.txt\"), 'w')\n atexit.register(G.output_file.close)\n print(colorize(\"Logging data to %s\"%G.output_file.name, 'green', bold=True))\n\ndef log_tabular(key, val):\n \"\"\"\n Log a value of some diagnostic\n Call this once for each diagnostic quantity, each iteration\n \"\"\"\n if G.first_row:\n G.log_headers.append(key)\n else:\n assert key in G.log_headers, \"Trying to introduce a new key %s that you didn't include in the first iteration\"%key\n assert key not in G.log_current_row, \"You already set %s this iteration. Maybe you forgot to call dump_tabular()\"%key\n G.log_current_row[key] = val\n\n \ndef save_params(params):\n with open(osp.join(G.output_dir, \"params.json\"), 'w') as out:\n out.write(json.dumps(params, separators=(',\\n','\\t:\\t'), sort_keys=True))\n\n\ndef dump_tabular():\n \"\"\"\n Write all of the diagnostics from the current iteration\n \"\"\"\n vals = []\n key_lens = [len(key) for key in G.log_headers]\n max_key_len = max(15,max(key_lens))\n keystr = '%'+'%d'%max_key_len\n fmt = \"| \" + keystr + \"s | %15s |\"\n n_slashes = 22 + max_key_len\n print(\"-\"*n_slashes)\n for key in G.log_headers:\n val = G.log_current_row.get(key, \"\")\n if hasattr(val, \"__float__\"): valstr = \"%8.3g\"%val\n else: valstr = val\n print(fmt%(key, valstr))\n vals.append(val)\n print(\"-\"*n_slashes)\n if G.output_file is not None:\n if G.first_row:\n G.output_file.write(\"\\t\".join(G.log_headers))\n G.output_file.write(\"\\n\")\n G.output_file.write(\"\\t\".join(map(str,vals)))\n G.output_file.write(\"\\n\")\n G.output_file.flush()\n G.log_current_row.clear()\n G.first_row=False\n"},"path":{"kind":"string","value":"ADMCode/snuz/ars/logz.py"},"size":{"kind":"number","value":3011,"string":"3,011"},"nl_text":{"kind":"string","value":"Set output directory to d, or to /tmp/somerandomnumber if d is None\nWrite all of the diagnostics from the current iteration\nLog a value of some diagnostic\nCall this once for each diagnostic quantity, each iteration\n\n Code in this file is copied and adapted from https://github.com/berkeleydeeprlcourse"},"nl_size":{"kind":"number","value":302,"string":"302"},"nl_language":{"kind":"string","value":"en"},"nl_language_score":{"kind":"number","value":0.8583332300186157,"string":"0.858333"}}},{"rowIdx":7880,"cells":{"content":{"kind":"string","value":"# !/usr/bin/env python\n# -*-coding: utf-8 -*-\n\n__author__ = 'wtq'\n\nLOG_PATH = \"monitor_logging.log\"\n\nREDIS_HOST = \"127.0.0.1\"\nREDIS_PORT = 6379\n\n# 采集的间隔与间断时长\nMONITOR_INTERVAL = 1\nMONITOR_PEROID = 3\n\n# 监控的读写速率的网卡\nNET_NAME = 'eth0'\n\n# 系统内各台机器的名字,以此来计算系统的平均负载信息\nSYSTEM_MACHINE_NAME = [\"storage1\", \"storage2\"]\n\n# 用来计算客户端链接数的机器名字,一般为master\nCLIENT_LINK_MACNHIE = [\"storage1\"]\n\nDISK_ALL_SPACE = 100\nCPU_KERNEL_NUMS = 32\nMEM_ALL_SPACE = 100\n\nFASTDFSPORT = '8000'\nREDIS_SYSTEM_KEY = 'system'\n\nFASTDFS_PEROID = 3\n"},"path":{"kind":"string","value":"config/config.py"},"size":{"kind":"number","value":631,"string":"631"},"nl_text":{"kind":"string","value":"!/usr/bin/env python -*-coding: utf-8 -*- 采集的间隔与间断时长 监控的读写速率的网卡 系统内各台机器的名字,以此来计算系统的平均负载信息 用来计算客户端链接数的机器名字,一般为master"},"nl_size":{"kind":"number","value":115,"string":"115"},"nl_language":{"kind":"string","value":"zh"},"nl_language_score":{"kind":"number","value":0.930899441242218,"string":"0.930899"}}},{"rowIdx":7881,"cells":{"content":{"kind":"string","value":"#!/usr/bin/env python\n# (works in both Python 2 and Python 3)\n\n# Offline HTML Indexer v1.32 (c) 2013-15,2020 Silas S. Brown.\n\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n# \n# http://www.apache.org/licenses/LICENSE-2.0\n# \n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# This is a Python program for creating large indices of\n# HTML text which can be queried using simple Javascript\n# that works on many mobile phone browsers without needing\n# an Internet connection or a Web server. This is useful if\n# you want to load a dictionary or other reference onto your\n# phone (or computer) for use when connectivity is not\n# available.\n# The input HTML should be interspersed with anchors like\n# this: where xyz is the index heading\n# for the following text. There should be one such anchor\n# before each entry and an extra anchor at the end of the\n# text; everything before the first anchor is counted as the\n# \"header\" and everything after the last as the \"footer\". If\n# these are empty, a default \"mobile friendly\" HTML header\n# and footer specifying UTF-8 encoding will be\n# added. Anchors may be linked from other entries; these\n# links are changed as necessary.\n# Opening any of the resulting HTML files should display a\n# textbox that lets you type the first few letters of the\n# word you wish to look up; the browser will then jump to\n# whatever heading is alphabetically nearest to the typed-in\n# text.\n\n# Configuration\n# -------------\n\ninfile = None # None = standard input, or set a \"filename\"\noutdir = \".\" # current directory by default\nalphabet = \"abcdefghijklmnopqrstuvwxyz\" # set to None for all characters and case-sensitive\nignore_text_in_parentheses = True # or False, for parentheses in index headings\nmore_sensible_punctuation_sort_order = True\n\nremove_utf8_diacritics = True # or False, for removing diacritics in index headings (not in main text);\n# assumes UTF-8. (Letters with diacritics will be treated as though they did not have any.)\n\nmax_filesize = 64*1024 # of each HTML file\n# (max_filesize can be exceeded by 1 very large entry)\n\n# Where to find history:\n# on GitHub at https://github.com/ssb22/indexer\n# and on GitLab at https://gitlab.com/ssb22/indexer\n# and on BitBucket https://bitbucket.org/ssb22/indexer\n# and at https://gitlab.developers.cam.ac.uk/ssb22/indexer\n# and in China: https://gitee.com/ssb22/indexer\n\n# ---------------------------------------------------------------\n\nimport re,sys,os,time\nif type(\"\")==type(u\"\"): izip = zip # Python 3\nelse: from itertools import izip # Python 2\n\nif infile:\n sys.stderr.write(\"Reading from \"+infile+\"... \")\n infile = open(infile)\nelse:\n sys.stderr.write(\"Reading from standard input... \")\n infile = sys.stdin\nfragments = re.split(r'',infile.read())\n# odd indices should be the tag names, even should be the HTML in between\nassert len(fragments)>3, \"Couldn't find 2 or more hash tags (were they formatted correctly?)\"\nassert len(fragments)%2, \"re.split not returning groups??\"\nheader,footer = fragments[0],fragments[-1]\nif not header.strip(): header=\"\"\"\"\"\"\nif not footer.strip(): footer = \"\"\nfragments = fragments[1:-1]\nsys.stderr.write(\"%d entries\\n\" % len(fragments))\ndef alphaOnly(x):\n if ignore_text_in_parentheses: x=re.sub(r\"\\([^)]*\\)[;, ]*\",\"\",x)\n if alphabet: x=''.join(c for c in x.lower() if c in alphabet)\n return re.sub(r\"^[@,;]*\",\"\",x) # see ohi_latex.py\nif more_sensible_punctuation_sort_order:\n _ao1 = alphaOnly\n alphaOnly = lambda x: _ao1(re.sub('([;,]);+',r'\\1',x.replace('-',' ').replace(',','~COM~').replace(';',',').replace('~COM~',';').replace(' ',';'))) # gives ; < , == space (useful if ; is used to separate definitions and , is used before extra words to be added at the start; better set space EQUAL to comma, not higher, or will end up in wrong place if user inputs something forgetting the comma)\n if alphabet:\n for c in '@,;':\n if not c in alphabet: alphabet += c\nif remove_utf8_diacritics:\n _ao = alphaOnly ; import unicodedata\n def S(s):\n if type(u\"\")==type(\"\"): return s # Python 3\n else: return s.encode('utf-8') # Python 2\n def U(s):\n if type(s)==type(u\"\"): return s\n return s.decode('utf-8')\n alphaOnly = lambda x: _ao(S(u''.join((c for c in unicodedata.normalize('NFD',U(x)) if not unicodedata.category(c).startswith('M')))))\nfragments = list(zip(map(alphaOnly,fragments[::2]), fragments[1::2]))\nfragments.sort()\nclass ChangedLetters:\n def __init__(self): self.lastText = \"\"\n def __call__(self,text):\n \"Find shortest prefix of text that differentiates it from previous item (empty string if no difference)\"\n assert text >= self.lastText, \"input must have been properly sorted\"\n i = 0\n for c1,c2 in izip(self.lastText+chr(0),text):\n i += 1\n if not c1==c2:\n self.lastText = text\n return text[:i]\n assert text==self.lastText, repr(text)+\"!=\"+repr(self.lastText)\n return \"\" # no difference from lastText\nchangedLetters = ChangedLetters() ; f2 = []\nfragments.reverse()\nsys.stderr.write(\"Minimizing prefixes... \")\nwhile fragments:\n x,y = fragments.pop()\n x = changedLetters(x)\n if f2 and not x: f2[-1] = (f2[-1][0], f2[-1][1]+y) # combine effectively-identical ones\n else: f2.append((x,y))\nsys.stderr.write(\"done\\n\")\nfragments = f2\ndef tag(n):\n if n: return '' % n\n else: return ''\ndef old_javascript_array(array):\n \"in case the browser doesn't support JSON, and to save some separator bytes\"\n array = list(array) # in case it was an iterator\n sepChar = ord(' ')\n chars_used = set(''.join(array))\n assert '\"' not in chars_used and '\\\\' not in chars_used and '<' not in chars_used and '&' not in chars_used, \"Can't use special chars (unless you change this code to escape them)\"\n while True:\n if chr(sepChar) not in chars_used and not chr(sepChar) in r'\\\"<&': break\n sepChar += 1\n assert sepChar < 127, \"can't find a suitable separator char (hard-code the array instead?)\"\n return '\"'+chr(sepChar).join(array)+'\".split(\"'+chr(sepChar)+'\")'\njs_binchop = \"\"\"function(a,i) {\nfunction inner(a,i,lo,hi) {\nvar mid=lo+Math.floor((hi-lo)/2);\nif(mid==lo || a[mid]==i) return a[mid];\nif(a[mid] > i) return inner(a,i,lo,mid);\nreturn inner(a,i,mid,hi);\n} return inner(a,i,0,a.length);\n}\"\"\"\njs_binchop_dx = js_binchop.replace(\"return a[mid]\",\"return mid\")\ndef js_hashjump(hashtags): return \"\"\"\"\"\" % (js_binchop,old_javascript_array(hashtags)) # (the h!=n test is needed to avoid loop on some browsers e.g. PocketIE7)\n# #_h and #_f are special hashes for header and footer, used for \"Next page\" and \"Previous page\" links\n# (HTML5 defaults type to text/javascript, as do all pre-HTML5 browsers including NN2's 'script language=\"javascript\"' thing, so we might as well save a few bytes)\n\n__lastStartEnd = None\ndef htmlDoc(start,end,docNo):\n \"Returns an HTML document containing fragments[start:end]. docNo is used to generate previous/next page links as appropriate. Caches its return value in case called again with same start,end (in which case docNo is ignored on second call).\"\n global __lastStartEnd,__lastDoc\n if not (start,end) == __lastStartEnd:\n __lastStartEnd = (start,end)\n __lastDoc = header+js_hashjump(x for x,y in fragments[start:end] if x)\n if start:\n assert docNo, \"Document 0 should start at 0\"\n __lastDoc += '

Previous page

' % (docNo-1,)\n __lastDoc += ''.join(tag(x)+y for x,y in fragments[start:end])\n if end max_filesize:\n eTry = int(eTry / int(sLen / max_filesize)) # rough start point\n while eTry > 1 and len(htmlDoc(start,start+eTry,docNo)) > max_filesize:\n eTry = int(eTry/2)\n if eTry < 1: eTry = 1\n while eTry < len(fragments)-start and len(htmlDoc(start,start+eTry,docNo)) < max_filesize: eTry += 1\n return start + max(1,eTry-1)\ndef allRanges():\n start = docNo = 0\n while start < len(fragments):\n end = findEnd(start,docNo)\n sys.stderr.write(\"\\rSegmenting (%d/%d)\" % (end,len(fragments)))\n yield start,end\n start = end ; docNo += 1\nsys.stderr.write(\"Segmenting\")\nstartsList = []\nfor start,end in allRanges():\n open((\"%s%s%d.html\" % (outdir,os.sep,len(startsList))),\"w\").write(htmlDoc(start,end,len(startsList)))\n startsList.append(start)\nif alphabet:\n assert not '\"' in alphabet and not '\\\\' in alphabet and not '&' in alphabet and not '<' in alphabet, \"Can't use special characters in alphabet (unless js_alphabet is modified to quote them)\"\n js_alphabet = \"\"\"var a=val.toLowerCase(),i; val=\"\";\nfor(i=0; i < a.length; i++) { var c=a.charAt(i); if(\"%s\".indexOf(c)>-1) val += c }\n\"\"\" % alphabet # TODO: what if user types letters with diacritics, when remove_utf8_diacritics is set?\nelse: js_alphabet = \"\"\nif more_sensible_punctuation_sort_order: js_alphabet = \"val = val.replace(/-/g,' ').replace(/,/g,'~COM~').replace(/;/g,',').replace(/~COM~/g,';').replace(/ /g,';').replace(/([;,]);+/g,'$1');\" + js_alphabet\n\ndef hashReload(footer):\n # If a footer refers to index.html#example, need to\n # make sure the hash script runs when clicking there\n # from the index page itself.\n strToFind = '\n
Lookup:
%s\"\"\" % (hashReload(linkSub(header)),js_alphabet,js_binchop_dx,old_javascript_array(fragments[s][0] for s in startsList),hashReload(linkSub(footer))))\nsys.stderr.write(\" %d files\\n\" % (len(startsList)+1))\n"},"path":{"kind":"string","value":"ohi.py"},"size":{"kind":"number","value":12229,"string":"12,229"},"nl_text":{"kind":"string","value":"Find shortest prefix of text that differentiates it from previous item (empty string if no difference)\nGiven 'start' (an index into 'fragments'), find an 'end' that produces the largest possible htmlDoc less than max_filesize. docNo is used to generate previous/next page links as appropriate.\nReturns an HTML document containing fragments[start:end]. docNo is used to generate previous/next page links as appropriate. Caches its return value in case called again with same start,end (in which case docNo is ignored on second call).\nin case the browser doesn't support JSON, and to save some separator bytes\n\n!/usr/bin/env python (works in both Python 2 and Python 3) Offline HTML Indexer v1.32 (c) 2013-15,2020 Silas S. Brown. Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. This is a Python program for creating large indices of HTML text which can be queried using simple Javascript that works on many mobile phone browsers without needing an Internet connection or a Web server. This is useful if you want to load a dictionary or other reference onto your phone (or computer) for use when connectivity is not available. The input HTML should be interspersed with anchors like this:
where xyz is the index heading for the following text. There should be one such anchor before each entry and an extra anchor at the end of the text; everything before the first anchor is counted as the \"header\" and everything after the last as the \"footer\". If these are empty, a default \"mobile friendly\" HTML header and footer specifying UTF-8 encoding will be added. Anchors may be linked from other entries; these links are changed as necessary. Opening any of the resulting HTML files should display a textbox that lets you type the first few letters of the word you wish to look up; the browser will then jump to whatever heading is alphabetically nearest to the typed-in text. Configuration ------------- None = standard input, or set a \"filename\" current directory by default set to None for all characters and case-sensitive or False, for parentheses in index headings or False, for removing diacritics in index headings (not in main text); assumes UTF-8. (Letters with diacritics will be treated as though they did not have any.) of each HTML file (max_filesize can be exceeded by 1 very large entry) Where to find history: on GitHub at https://github.com/ssb22/indexer and on GitLab at https://gitlab.com/ssb22/indexer and on BitBucket https://bitbucket.org/ssb22/indexer and at https://gitlab.developers.cam.ac.uk/ssb22/indexer and in China: https://gitee.com/ssb22/indexer --------------------------------------------------------------- Python 3 Python 2 odd indices should be the tag names, even should be the HTML in between see ohi_latex.py gives ; < , == space (useful if ; is used to separate definitions and , is used before extra words to be added at the start; better set space EQUAL to comma, not higher, or will end up in wrong place if user inputs something forgetting the comma) Python 3 Python 2 no difference from lastText combine effectively-identical ones in case it was an iterator (the h!=n test is needed to avoid loop on some browsers e.g. PocketIE7) _h and _f are special hashes for header and footer, used for \"Next page\" and \"Previous page\" links (HTML5 defaults type to text/javascript, as do all pre-HTML5 browsers including NN2's 'script language=\"javascript\"' thing, so we might as well save a few bytes) (do link to index.htmlwhatever rather than directly, so link still works if docs change) rough start point TODO: what if user types letters with diacritics, when remove_utf8_diacritics is set? If a footer refers to index.htmlexample, need to make sure the hash script runs when clicking there from the index page itself. TODO: what if it's quoted differently and/or has extra attributes? (ohi.html does specify using \" quoting though)"},"nl_size":{"kind":"number","value":4366,"string":"4,366"},"nl_language":{"kind":"string","value":"en"},"nl_language_score":{"kind":"number","value":0.8514211773872375,"string":"0.851421"}}},{"rowIdx":7882,"cells":{"content":{"kind":"string","value":"import environ\nfrom pathlib import Path\n\nenv = environ.Env(\n # Sets debug to False if it cannot find .env\n DEBUG=(bool, False)\n)\n\nenviron.Env.read_env()\n\n\n# GENERAL\n# ------------------------------------------------------------------------------\n# Build paths inside the project like this: BASE_DIR / 'subdir'.\nBASE_DIR = Path(__file__).resolve().parent.parent\n# https://docs.djangoproject.com/en/dev/ref/settings/#std:setting-SECRET_KEY\nSECRET_KEY = env.str('SECRET_KEY')\n# https://docs.djangoproject.com/en/dev/ref/settings/#debug\nDEBUG = env.bool('DEBUG')\n# https://docs.djangoproject.com/en/dev/ref/settings/#allowed-hosts\nALLOWED_HOSTS = tuple(env.list('ALLOWED_HOSTS'))\n\n# APPS\n# ------------------------------------------------------------------------------\n# https://docs.djangoproject.com/en/dev/ref/settings/#installed-apps\nINSTALLED_APPS = [\n 'django.contrib.admin',\n 'django.contrib.auth',\n 'django.contrib.contenttypes',\n 'django.contrib.sessions',\n 'django.contrib.messages',\n 'whitenoise.runserver_nostatic',\n 'django.contrib.staticfiles',\n 'django.contrib.sites',\n\n # Third-party\n 'allauth',\n 'allauth.account',\n 'crispy_forms',\n 'debug_toolbar',\n\n # Local\n 'accounts',\n 'pages',\n 'snacks',\n]\n\n# MIDDLEWARE\n# ------------------------------------------------------------------------------\n# https://docs.djangoproject.com/en/dev/ref/settings/#middleware\nMIDDLEWARE = [\n 'django.middleware.security.SecurityMiddleware',\n 'whitenoise.middleware.WhiteNoiseMiddleware',\n 'django.contrib.sessions.middleware.SessionMiddleware',\n 'django.middleware.common.CommonMiddleware',\n 'debug_toolbar.middleware.DebugToolbarMiddleware',\n 'django.middleware.csrf.CsrfViewMiddleware',\n 'django.contrib.auth.middleware.AuthenticationMiddleware',\n 'django.contrib.messages.middleware.MessageMiddleware',\n 'django.middleware.clickjacking.XFrameOptionsMiddleware',\n]\n\n# URLS\n# ------------------------------------------------------------------------------\n# https://docs.djangoproject.com/en/dev/ref/settings/#root-urlconf\nROOT_URLCONF = \"config.urls\"\n# https://docs.djangoproject.com/en/dev/ref/settings/#wsgi-application\nWSGI_APPLICATION = \"config.wsgi.application\"\n\n# TEMPLATES\n# ------------------------------------------------------------------------------\n# https://docs.djangoproject.com/en/dev/ref/settings/#templates\nTEMPLATES = [\n {\n 'BACKEND': 'django.template.backends.django.DjangoTemplates',\n 'DIRS': ['templates'],\n 'APP_DIRS': True,\n 'OPTIONS': {\n 'context_processors': [\n 'django.template.context_processors.debug',\n 'django.template.context_processors.request',\n 'django.contrib.auth.context_processors.auth',\n 'django.contrib.messages.context_processors.messages',\n ],\n },\n },\n]\n\n# DATABASES\n# ------------------------------------------------------------------------------\n# https://docs.djangoproject.com/en/dev/ref/settings/#databases\nDATABASES = {\n 'default': {\n 'ENGINE': 'django.db.backends.sqlite3',\n 'NAME': BASE_DIR / 'db.sqlite3',\n }\n}\n\n# PASSWORDS\n# ------------------------------------------------------------------------------\n# https://docs.djangoproject.com/en/dev/ref/settings/#auth-password-validators\nAUTH_PASSWORD_VALIDATORS = [\n {\n 'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',\n },\n {\n 'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',\n },\n {\n 'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',\n },\n {\n 'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',\n },\n]\n\n# INTERNATIONALIZATION\n# ------------------------------------------------------------------------------\n# https://docs.djangoproject.com/en/dev/topics/i18n/\n# https://docs.djangoproject.com/en/dev/ref/settings/#language-code\nLANGUAGE_CODE = 'en-us'\n# https://docs.djangoproject.com/en/dev/ref/settings/#time-zone\nTIME_ZONE = 'UTC'\n# https://docs.djangoproject.com/en/dev/ref/settings/#std:setting-USE_I18N\nUSE_I18N = True\n# https://docs.djangoproject.com/en/dev/ref/settings/#use-l10n\nUSE_L10N = True\n# https://docs.djangoproject.com/en/dev/ref/settings/#use-tz\nUSE_TZ = True\n\n\n# STATIC\n# ------------------------------------------------------------------------------\n# https://docs.djangoproject.com/en/dev/ref/settings/#static-root\nSTATIC_ROOT = str(BASE_DIR.joinpath('staticfiles'))\n# https://docs.djangoproject.com/en/dev/ref/settings/#static-url\nSTATIC_URL = '/static/'\n# https://docs.djangoproject.com/en/dev/ref/contrib/staticfiles/#std:setting-STATICFILES_DIRS\nSTATICFILES_DIRS = [str(BASE_DIR.joinpath('static'))]\n# http://whitenoise.evans.io/en/stable/django.html#add-compression-and-caching-support\nSTATICFILES_STORAGE = \"whitenoise.storage.CompressedManifestStaticFilesStorage\"\n\n# DJANGO-CRISPY-FORMS CONFIGS\n# ------------------------------------------------------------------------------\n# https://django-crispy-forms.readthedocs.io/en/latest/install.html#template-packs\nCRISPY_TEMPLATE_PACK = \"bootstrap4\"\n\n# EMAIL\n# ------------------------------------------------------------------------------\n# https://docs.djangoproject.com/en/dev/ref/settings/#email-backend\nEMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'\n\n# DJANGO-DEBUG-TOOLBAR CONFIGS\n# ------------------------------------------------------------------------------\n# https://django-debug-toolbar.readthedocs.io/en/latest/installation.html\n# https://docs.djangoproject.com/en/dev/ref/settings/#internal-ips\nINTERNAL_IPS = ['127.0.0.1']\n\n# CUSTOM USER MODEL CONFIGS\n# ------------------------------------------------------------------------------\n# https://docs.djangoproject.com/en/dev/topics/auth/customizing/#substituting-a-custom-user-model\nAUTH_USER_MODEL = 'accounts.CustomUser'\n\n# DJANGO-ALLAUTH CONFIGS\n# ------------------------------------------------------------------------------\n# https://docs.djangoproject.com/en/dev/ref/settings/#site-id\nSITE_ID = 1\n# https://docs.djangoproject.com/en/dev/ref/settings/#login-redirect-url\nLOGIN_REDIRECT_URL = 'home'\n# https://django-allauth.readthedocs.io/en/latest/views.html#logout-account-logout\nACCOUNT_LOGOUT_REDIRECT_URL = 'home'\n# https://django-allauth.readthedocs.io/en/latest/installation.html?highlight=backends\nAUTHENTICATION_BACKENDS = (\n \"django.contrib.auth.backends.ModelBackend\",\n \"allauth.account.auth_backends.AuthenticationBackend\",\n)\n# https://django-allauth.readthedocs.io/en/latest/configuration.html\nACCOUNT_SESSION_REMEMBER = True\nACCOUNT_SIGNUP_PASSWORD_ENTER_TWICE = False\nACCOUNT_USERNAME_REQUIRED = False\nACCOUNT_AUTHENTICATION_METHOD = 'email'\nACCOUNT_EMAIL_REQUIRED = True\nACCOUNT_UNIQUE_EMAIL = True\n"},"path":{"kind":"string","value":"config/settings.py"},"size":{"kind":"number","value":6857,"string":"6,857"},"nl_text":{"kind":"string","value":"Sets debug to False if it cannot find .env GENERAL ------------------------------------------------------------------------------ Build paths inside the project like this: BASE_DIR / 'subdir'. https://docs.djangoproject.com/en/dev/ref/settings/std:setting-SECRET_KEY https://docs.djangoproject.com/en/dev/ref/settings/debug https://docs.djangoproject.com/en/dev/ref/settings/allowed-hosts APPS ------------------------------------------------------------------------------ https://docs.djangoproject.com/en/dev/ref/settings/installed-apps Third-party Local MIDDLEWARE ------------------------------------------------------------------------------ https://docs.djangoproject.com/en/dev/ref/settings/middleware URLS ------------------------------------------------------------------------------ https://docs.djangoproject.com/en/dev/ref/settings/root-urlconf https://docs.djangoproject.com/en/dev/ref/settings/wsgi-application TEMPLATES ------------------------------------------------------------------------------ https://docs.djangoproject.com/en/dev/ref/settings/templates DATABASES ------------------------------------------------------------------------------ https://docs.djangoproject.com/en/dev/ref/settings/databases PASSWORDS ------------------------------------------------------------------------------ https://docs.djangoproject.com/en/dev/ref/settings/auth-password-validators INTERNATIONALIZATION ------------------------------------------------------------------------------ https://docs.djangoproject.com/en/dev/topics/i18n/ https://docs.djangoproject.com/en/dev/ref/settings/language-code https://docs.djangoproject.com/en/dev/ref/settings/time-zone https://docs.djangoproject.com/en/dev/ref/settings/std:setting-USE_I18N https://docs.djangoproject.com/en/dev/ref/settings/use-l10n https://docs.djangoproject.com/en/dev/ref/settings/use-tz STATIC ------------------------------------------------------------------------------ https://docs.djangoproject.com/en/dev/ref/settings/static-root https://docs.djangoproject.com/en/dev/ref/settings/static-url https://docs.djangoproject.com/en/dev/ref/contrib/staticfiles/std:setting-STATICFILES_DIRS http://whitenoise.evans.io/en/stable/django.htmladd-compression-and-caching-support DJANGO-CRISPY-FORMS CONFIGS ------------------------------------------------------------------------------ https://django-crispy-forms.readthedocs.io/en/latest/install.htmltemplate-packs EMAIL ------------------------------------------------------------------------------ https://docs.djangoproject.com/en/dev/ref/settings/email-backend DJANGO-DEBUG-TOOLBAR CONFIGS ------------------------------------------------------------------------------ https://django-debug-toolbar.readthedocs.io/en/latest/installation.html https://docs.djangoproject.com/en/dev/ref/settings/internal-ips CUSTOM USER MODEL CONFIGS ------------------------------------------------------------------------------ https://docs.djangoproject.com/en/dev/topics/auth/customizing/substituting-a-custom-user-model DJANGO-ALLAUTH CONFIGS ------------------------------------------------------------------------------ https://docs.djangoproject.com/en/dev/ref/settings/site-id https://docs.djangoproject.com/en/dev/ref/settings/login-redirect-url https://django-allauth.readthedocs.io/en/latest/views.htmllogout-account-logout https://django-allauth.readthedocs.io/en/latest/installation.html?highlight=backends https://django-allauth.readthedocs.io/en/latest/configuration.html"},"nl_size":{"kind":"number","value":3486,"string":"3,486"},"nl_language":{"kind":"string","value":"en"},"nl_language_score":{"kind":"number","value":0.4159325957298279,"string":"0.415933"}}},{"rowIdx":7883,"cells":{"content":{"kind":"string","value":"# model settings\nmodel = dict(\n type='CenterNet',\n pretrained='modelzoo://resnet18',\n backbone=dict(\n type='ResNet',\n depth=18,\n num_stages=4,\n out_indices=(0, 1, 2, 3),\n frozen_stages=1,\n norm_eval=False,\n add_summay_every_n_step=200,\n style='pytorch'),\n neck=dict(type='None'),\n bbox_head=dict(\n type='CXTHead',\n inplanes=(64, 128, 256, 512),\n head_conv=128,\n wh_conv=64,\n use_deconv=False,\n norm_after_upsample=False,\n hm_head_conv_num=2,\n wh_head_conv_num=2,\n ct_head_conv_num=1,\n fovea_hm=False,\n num_classes=81,\n use_exp_wh=False,\n wh_offset_base=16,\n wh_area_process='norm',\n shortcut_cfg=(1, 2, 3),\n shortcut_attention=(False, False, False),\n norm_cfg=dict(type='BN'),\n norm_wh=False,\n avg_wh_weightv3=False,\n center_ratio=0.2,\n hm_init_value=None,\n giou_weight=5.,\n merge_weight=1.,\n hm_weight=1.,\n ct_weight=1.))\ncudnn_benchmark = True\n# training and testing settings\ntrain_cfg = dict(\n vis_every_n_iters=100,\n debug=False)\ntest_cfg = dict(\n score_thr=0.05,\n max_per_img=100)\n# dataset settings\ndataset_type = 'CocoDataset'\ndata_root = 'data/coco/'\nimg_norm_cfg = dict(\n mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)\ntrain_pipeline = [\n dict(type='LoadImageFromFile', to_float32=True),\n dict(type='LoadAnnotations', with_bbox=True),\n dict(\n type='PhotoMetricDistortion',\n brightness_delta=32,\n contrast_range=(0.5, 1.5),\n saturation_range=(0.5, 1.5),\n hue_delta=18),\n dict(\n type='Expand',\n mean=img_norm_cfg['mean'],\n to_rgb=img_norm_cfg['to_rgb'],\n ratio_range=(1, 4)),\n dict(\n type='MinIoURandomCrop',\n min_ious=(0.1, 0.3, 0.5, 0.7, 0.9),\n min_crop_size=0.3),\n dict(type='Resize', img_scale=(512, 512), keep_ratio=False),\n dict(type='Normalize', **img_norm_cfg),\n dict(type='RandomFlip', flip_ratio=0.5),\n dict(type='DefaultFormatBundle'),\n dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']),\n]\ntest_pipeline = [\n dict(type='LoadImageFromFile'),\n dict(\n type='MultiScaleFlipAug',\n img_scale=(512, 512),\n flip=False,\n transforms=[\n dict(type='Resize', keep_ratio=False),\n dict(type='Normalize', **img_norm_cfg),\n dict(type='ImageToTensor', keys=['img']),\n dict(type='Collect', keys=['img']),\n ])\n]\ndata = dict(\n imgs_per_gpu=16,\n workers_per_gpu=4,\n train=dict(\n type=dataset_type,\n ann_file=data_root + 'annotations/instances_train2017.json',\n img_prefix=data_root + 'train2017/',\n pipeline=train_pipeline),\n val=dict(\n type=dataset_type,\n ann_file=data_root + 'annotations/instances_val2017.json',\n img_prefix=data_root + 'val2017/',\n pipeline=test_pipeline),\n test=dict(\n type=dataset_type,\n ann_file=data_root + 'annotations/instances_val2017.json',\n img_prefix=data_root + 'val2017/',\n pipeline=test_pipeline))\n# optimizer\noptimizer = dict(type='SGD', lr=0.003, momentum=0.9, weight_decay=0.0003,\n paramwise_options=dict(bias_lr_mult=2., bias_decay_mult=0.))\noptimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2))\n# learning policy\nlr_config = dict(\n policy='step',\n warmup='linear',\n warmup_iters=500,\n warmup_ratio=1.0 / 5,\n step=[18, 22])\ncheckpoint_config = dict(save_every_n_steps=200, max_to_keep=1, keep_every_n_epochs=18)\nbbox_head_hist_config = dict(\n model_type=['ConvModule', 'DeformConvPack'],\n sub_modules=['bbox_head'],\n save_every_n_steps=200)\n# yapf:disable\nlog_config = dict(interval=20)\n# yapf:enable\n# runtime settings\ntotal_epochs = 24\ndist_params = dict(backend='nccl')\nlog_level = 'INFO'\nwork_dir = 'eft18_o16_v1norm_3lr_alpha2_wd3e4_s123_nos_2x'\nload_from = None\nresume_from = None\nworkflow = [('train', 1)]\n"},"path":{"kind":"string","value":"configs/centernext/eft18_o16_v1norm_3lr_alpha2_wd3e4_s123_nos_2x.py"},"size":{"kind":"number","value":4096,"string":"4,096"},"nl_text":{"kind":"string","value":"model settings training and testing settings dataset settings optimizer learning policy yapf:disable yapf:enable runtime settings"},"nl_size":{"kind":"number","value":129,"string":"129"},"nl_language":{"kind":"string","value":"en"},"nl_language_score":{"kind":"number","value":0.7788625359535217,"string":"0.778863"}}},{"rowIdx":7884,"cells":{"content":{"kind":"string","value":"import multiprocessing as mp\nimport itertools\nimport traceback\nimport pickle\n\nimport numpy as np\n\nfrom numba import cuda\nfrom numba.cuda.testing import (skip_on_cudasim, skip_under_cuda_memcheck,\n ContextResettingTestCase, ForeignArray)\nimport unittest\n\n\ndef core_ipc_handle_test(the_work, result_queue):\n try:\n arr = the_work()\n # Catch anything going wrong in the worker function\n except: # noqa: E722\n # FAILED. propagate the exception as a string\n succ = False\n out = traceback.format_exc()\n else:\n # OK. send the ndarray back\n succ = True\n out = arr\n result_queue.put((succ, out))\n\n\ndef base_ipc_handle_test(handle, size, result_queue):\n def the_work():\n dtype = np.dtype(np.intp)\n with cuda.open_ipc_array(handle, shape=size // dtype.itemsize,\n dtype=dtype) as darr:\n # copy the data to host\n return darr.copy_to_host()\n\n core_ipc_handle_test(the_work, result_queue)\n\n\ndef serialize_ipc_handle_test(handle, result_queue):\n def the_work():\n dtype = np.dtype(np.intp)\n darr = handle.open_array(cuda.current_context(),\n shape=handle.size // dtype.itemsize,\n dtype=dtype)\n # copy the data to host\n arr = darr.copy_to_host()\n handle.close()\n return arr\n\n core_ipc_handle_test(the_work, result_queue)\n\n\ndef ipc_array_test(ipcarr, result_queue):\n try:\n with ipcarr as darr:\n arr = darr.copy_to_host()\n try:\n # should fail to reopen\n with ipcarr:\n pass\n except ValueError as e:\n if str(e) != 'IpcHandle is already opened':\n raise AssertionError('invalid exception message')\n else:\n raise AssertionError('did not raise on reopen')\n # Catch any exception so we can propagate it\n except: # noqa: E722\n # FAILED. propagate the exception as a string\n succ = False\n out = traceback.format_exc()\n else:\n # OK. send the ndarray back\n succ = True\n out = arr\n result_queue.put((succ, out))\n\n\n@skip_under_cuda_memcheck('Hangs cuda-memcheck')\n@skip_on_cudasim('Ipc not available in CUDASIM')\nclass TestIpcMemory(ContextResettingTestCase):\n def test_ipc_handle(self):\n # prepare data for IPC\n arr = np.arange(10, dtype=np.intp)\n devarr = cuda.to_device(arr)\n\n # create IPC handle\n ctx = cuda.current_context()\n ipch = ctx.get_ipc_handle(devarr.gpu_data)\n\n # manually prepare for serialization as bytes\n handle_bytes = bytes(ipch.handle)\n size = ipch.size\n\n # spawn new process for testing\n ctx = mp.get_context('spawn')\n result_queue = ctx.Queue()\n args = (handle_bytes, size, result_queue)\n proc = ctx.Process(target=base_ipc_handle_test, args=args)\n proc.start()\n succ, out = result_queue.get()\n if not succ:\n self.fail(out)\n else:\n np.testing.assert_equal(arr, out)\n proc.join(3)\n\n def variants(self):\n # Test with no slicing and various different slices\n indices = (None, slice(3, None), slice(3, 8), slice(None, 8))\n # Test with a Numba DeviceNDArray, or an array from elsewhere through\n # the CUDA Array Interface\n foreigns = (False, True)\n return itertools.product(indices, foreigns)\n\n def check_ipc_handle_serialization(self, index_arg=None, foreign=False):\n # prepare data for IPC\n arr = np.arange(10, dtype=np.intp)\n devarr = cuda.to_device(arr)\n if index_arg is not None:\n devarr = devarr[index_arg]\n if foreign:\n devarr = cuda.as_cuda_array(ForeignArray(devarr))\n expect = devarr.copy_to_host()\n\n # create IPC handle\n ctx = cuda.current_context()\n ipch = ctx.get_ipc_handle(devarr.gpu_data)\n\n # pickle\n buf = pickle.dumps(ipch)\n ipch_recon = pickle.loads(buf)\n self.assertIs(ipch_recon.base, None)\n self.assertEqual(tuple(ipch_recon.handle), tuple(ipch.handle))\n self.assertEqual(ipch_recon.size, ipch.size)\n\n # spawn new process for testing\n ctx = mp.get_context('spawn')\n result_queue = ctx.Queue()\n args = (ipch, result_queue)\n proc = ctx.Process(target=serialize_ipc_handle_test, args=args)\n proc.start()\n succ, out = result_queue.get()\n if not succ:\n self.fail(out)\n else:\n np.testing.assert_equal(expect, out)\n proc.join(3)\n\n def test_ipc_handle_serialization(self):\n for index, foreign, in self.variants():\n with self.subTest(index=index, foreign=foreign):\n self.check_ipc_handle_serialization(index, foreign)\n\n def check_ipc_array(self, index_arg=None, foreign=False):\n # prepare data for IPC\n arr = np.arange(10, dtype=np.intp)\n devarr = cuda.to_device(arr)\n # Slice\n if index_arg is not None:\n devarr = devarr[index_arg]\n if foreign:\n devarr = cuda.as_cuda_array(ForeignArray(devarr))\n expect = devarr.copy_to_host()\n ipch = devarr.get_ipc_handle()\n\n # spawn new process for testing\n ctx = mp.get_context('spawn')\n result_queue = ctx.Queue()\n args = (ipch, result_queue)\n proc = ctx.Process(target=ipc_array_test, args=args)\n proc.start()\n succ, out = result_queue.get()\n if not succ:\n self.fail(out)\n else:\n np.testing.assert_equal(expect, out)\n proc.join(3)\n\n def test_ipc_array(self):\n for index, foreign, in self.variants():\n with self.subTest(index=index, foreign=foreign):\n self.check_ipc_array(index, foreign)\n\n\ndef staged_ipc_handle_test(handle, device_num, result_queue):\n def the_work():\n with cuda.gpus[device_num]:\n this_ctx = cuda.devices.get_context()\n deviceptr = handle.open_staged(this_ctx)\n arrsize = handle.size // np.dtype(np.intp).itemsize\n hostarray = np.zeros(arrsize, dtype=np.intp)\n cuda.driver.device_to_host(\n hostarray, deviceptr, size=handle.size,\n )\n handle.close()\n return hostarray\n\n core_ipc_handle_test(the_work, result_queue)\n\n\ndef staged_ipc_array_test(ipcarr, device_num, result_queue):\n try:\n with cuda.gpus[device_num]:\n with ipcarr as darr:\n arr = darr.copy_to_host()\n try:\n # should fail to reopen\n with ipcarr:\n pass\n except ValueError as e:\n if str(e) != 'IpcHandle is already opened':\n raise AssertionError('invalid exception message')\n else:\n raise AssertionError('did not raise on reopen')\n # Catch any exception so we can propagate it\n except: # noqa: E722\n # FAILED. propagate the exception as a string\n succ = False\n out = traceback.format_exc()\n else:\n # OK. send the ndarray back\n succ = True\n out = arr\n result_queue.put((succ, out))\n\n\n@skip_under_cuda_memcheck('Hangs cuda-memcheck')\n@skip_on_cudasim('Ipc not available in CUDASIM')\nclass TestIpcStaged(ContextResettingTestCase):\n def test_staged(self):\n # prepare data for IPC\n arr = np.arange(10, dtype=np.intp)\n devarr = cuda.to_device(arr)\n\n # spawn new process for testing\n mpctx = mp.get_context('spawn')\n result_queue = mpctx.Queue()\n\n # create IPC handle\n ctx = cuda.current_context()\n ipch = ctx.get_ipc_handle(devarr.gpu_data)\n # pickle\n buf = pickle.dumps(ipch)\n ipch_recon = pickle.loads(buf)\n self.assertIs(ipch_recon.base, None)\n self.assertEqual(tuple(ipch_recon.handle), tuple(ipch.handle))\n self.assertEqual(ipch_recon.size, ipch.size)\n\n # Test on every CUDA devices\n for device_num in range(len(cuda.gpus)):\n args = (ipch, device_num, result_queue)\n proc = mpctx.Process(target=staged_ipc_handle_test, args=args)\n proc.start()\n succ, out = result_queue.get()\n proc.join(3)\n if not succ:\n self.fail(out)\n else:\n np.testing.assert_equal(arr, out)\n\n def test_ipc_array(self):\n for device_num in range(len(cuda.gpus)):\n # prepare data for IPC\n arr = np.random.random(10)\n devarr = cuda.to_device(arr)\n ipch = devarr.get_ipc_handle()\n\n # spawn new process for testing\n ctx = mp.get_context('spawn')\n result_queue = ctx.Queue()\n args = (ipch, device_num, result_queue)\n proc = ctx.Process(target=staged_ipc_array_test, args=args)\n proc.start()\n succ, out = result_queue.get()\n proc.join(3)\n if not succ:\n self.fail(out)\n else:\n np.testing.assert_equal(arr, out)\n\n\nif __name__ == '__main__':\n unittest.main()\n"},"path":{"kind":"string","value":"numba/cuda/tests/cudapy/test_ipc.py"},"size":{"kind":"number","value":9385,"string":"9,385"},"nl_text":{"kind":"string","value":"Catch anything going wrong in the worker function noqa: E722 FAILED. propagate the exception as a string OK. send the ndarray back copy the data to host copy the data to host should fail to reopen Catch any exception so we can propagate it noqa: E722 FAILED. propagate the exception as a string OK. send the ndarray back prepare data for IPC create IPC handle manually prepare for serialization as bytes spawn new process for testing Test with no slicing and various different slices Test with a Numba DeviceNDArray, or an array from elsewhere through the CUDA Array Interface prepare data for IPC create IPC handle pickle spawn new process for testing prepare data for IPC Slice spawn new process for testing should fail to reopen Catch any exception so we can propagate it noqa: E722 FAILED. propagate the exception as a string OK. send the ndarray back prepare data for IPC spawn new process for testing create IPC handle pickle Test on every CUDA devices prepare data for IPC spawn new process for testing"},"nl_size":{"kind":"number","value":1009,"string":"1,009"},"nl_language":{"kind":"string","value":"en"},"nl_language_score":{"kind":"number","value":0.7996553182601929,"string":"0.799655"}}},{"rowIdx":7885,"cells":{"content":{"kind":"string","value":"#!/usr/bin/env python2\n\n# Copyright 2016 Vimal Manohar\n# 2016 Johns Hopkins University (author: Daniel Povey)\n# Apache 2.0\n\nfrom __future__ import print_function\nimport argparse\nimport logging\nimport sys\nfrom collections import defaultdict\n\n\"\"\"\nThis script reads and writes the 'ctm-edits' file that is\nproduced by get_ctm_edits.py.\n\nIt modifies the ctm-edits so that non-scored words\nare not counted as errors: for instance, if there are things like\n[COUGH] and [NOISE] in the transcript, deletions, insertions and\nsubstitutions involving them are allowed, and we modify the reference\nto correspond to the hypothesis.\n\nIf you supply the directory (the one that corresponds to\nhow you decoded the data) to this script, it assumes that the \ndirectory contains phones/align_lexicon.int, and it uses this to work\nout a reasonable guess of the non-scored phones, based on which have\na single-word pronunciation that maps to a silence phone.\nIt then uses the words.txt to work out the written form of those words.\n\nAlternatively, you may specify a file containing the non-scored words one\nper line, with the --non-scored-words option.\n\nNon-scored words that were deleted (i.e. they were in the ref but not the\nhyp) are simply removed from the ctm. For non-scored words that\nwere inserted or substituted, we change the reference word to match the\nhyp word, but instead of marking the operation as 'cor' (correct), we\nmark it as 'fix' (fixed), so that it will not be positively counted as a correct\nword for purposes of finding the optimal segment boundaries.\n\ne.g.\n \n[note: the will always be 1].\n\nAJJacobs_2007P-0001605-0003029 1 0 0.09 1.0 sil\nAJJacobs_2007P-0001605-0003029 1 0.09 0.15 i 1.0 i cor\nAJJacobs_2007P-0001605-0003029 1 0.24 0.25 thought 1.0 thought cor\nAJJacobs_2007P-0001605-0003029 1 0.49 0.14 i'd 1.0 i'd cor\nAJJacobs_2007P-0001605-0003029 1 0.63 0.22 tell 1.0 tell cor\nAJJacobs_2007P-0001605-0003029 1 0.85 0.11 you 1.0 you cor\nAJJacobs_2007P-0001605-0003029 1 0.96 0.05 a 1.0 a cor\nAJJacobs_2007P-0001605-0003029 1 1.01 0.24 little 1.0 little cor\nAJJacobs_2007P-0001605-0003029 1 1.25 0.5 about 1.0 about cor\nAJJacobs_2007P-0001605-0003029 1 1.75 0.48 [UH] 1.0 [UH] cor\n\"\"\"\n\nlogger = logging.getLogger(__name__)\nlogger.setLevel(logging.INFO)\nhandler = logging.StreamHandler()\nhandler.setLevel(logging.INFO)\nformatter = logging.Formatter('%(asctime)s [%(filename)s:%(lineno)s - '\n '%(funcName)s - %(levelname)s ] %(message)s')\nhandler.setFormatter(formatter)\nlogger.addHandler(handler)\n\n\nparser = argparse.ArgumentParser(\n description = \"This program modifies the reference in the ctm-edits which \"\n \"is output by steps/cleanup/internal/get_ctm_edits.py, to allow insertions, deletions and \"\n \"substitutions of non-scored words, and [if --allow-repetitions=true], \"\n \"duplications of single words or pairs of scored words (to account for dysfluencies \"\n \"that were not transcribed). Note: deletions and substitutions of non-scored words \"\n \"after the reference is corrected, will be marked as operation 'fix' rather than \"\n \"'cor' (correct) so that the downstream processing knows that this was not in \"\n \"the original reference. Also by defaults tags non-scored words as such when \"\n \"they are correct; see the --tag-non-scored option.\")\n\nparser.add_argument(\"--verbose\", type = int, default = 1,\n choices=[0,1,2,3],\n help = \"Verbose level, higher = more verbose output\")\nparser.add_argument(\"--allow-repetitions\", type = str, default = 'true',\n choices=['true','false'],\n help = \"If true, allow repetitions in the transcript of one or \"\n \"two-word sequences: for instance if the ref says 'i' but \"\n \"the hyp says 'i i', or the ref says 'but then' and the hyp says \"\n \"'but then but then', fix the reference accordingly. Intervening \"\n \"non-scored words are allowed between the repetitions. These \"\n \"fixes will be marked as 'cor', not as 'fix', since there is \"\n \"generally no way to tell which repetition was the 'real' one \"\n \"(and since we're generally confident that such things were \"\n \"actually uttered).\")\nparser.add_argument(\"non_scored_words_in\", metavar = \"\",\n help=\"Filename of file containing a list of non-scored words, \"\n \"one per line. See steps/cleanup/get_nonscored_words.py.\")\nparser.add_argument(\"ctm_edits_in\", metavar = \"\",\n help = \"Filename of input ctm-edits file. \"\n \"Use /dev/stdin for standard input.\")\nparser.add_argument(\"ctm_edits_out\", metavar = \"\",\n help = \"Filename of output ctm-edits file. \"\n \"Use /dev/stdout for standard output.\")\n\nargs = parser.parse_args()\n\n\n\ndef ReadNonScoredWords(non_scored_words_file):\n global non_scored_words\n try:\n f = open(non_scored_words_file)\n except:\n sys.exit(\"modify_ctm_edits.py: error opening file: \"\n \"--non-scored-words=\" + non_scored_words_file)\n for line in f.readlines():\n a = line.split()\n if not len(line.split()) == 1:\n sys.exit(\"modify_ctm_edits.py: bad line in non-scored-words \"\n \"file {0}: {1}\".format(non_scored_words_file, line))\n non_scored_words.add(a[0])\n f.close()\n\n\n\n# The ctm-edits file format is as follows [note: file-id is really utterance-id\n# in this context].\n# \n# e.g.:\n# AJJacobs_2007P-0001605-0003029 1 0 0.09 1.0 sil\n# AJJacobs_2007P-0001605-0003029 1 0.09 0.15 i 1.0 i cor\n# ...\n# This function processes a single line of ctm-edits input for fixing\n# \"non-scored\" words. The input 'a' is the split line as an array of fields.\n# It modifies the object 'a'. This function returns the modified array,\n# and please note that it is destructive of its input 'a'.\n# If it returnso the empty array then the line is to be deleted.\ndef ProcessLineForNonScoredWords(a):\n global num_lines, num_correct_lines, ref_change_stats\n try:\n assert len(a) == 8\n num_lines += 1\n # we could do:\n # [ file, channel, start, duration, hyp_word, confidence, ref_word, edit_type ] = a\n duration = a[3]\n hyp_word = a[4]\n ref_word = a[6]\n edit_type = a[7]\n if edit_type == 'ins':\n assert ref_word == ''\n if hyp_word in non_scored_words:\n # insert this non-scored word into the reference.\n ref_change_stats[ref_word + ' -> ' + hyp_word] += 1\n ref_word = hyp_word\n edit_type = 'fix'\n elif edit_type == 'del':\n assert hyp_word == '' and float(duration) == 0.0\n if ref_word in non_scored_words:\n ref_change_stats[ref_word + ' -> ' + hyp_word] += 1\n return []\n elif edit_type == 'sub':\n assert hyp_word != ''\n if hyp_word in non_scored_words and ref_word in non_scored_words:\n # we also allow replacing one non-scored word with another.\n ref_change_stats[ref_word + ' -> ' + hyp_word] += 1\n ref_word = hyp_word\n edit_type = 'fix'\n else:\n assert edit_type == 'cor' or edit_type == 'sil'\n num_correct_lines += 1\n\n a[4] = hyp_word\n a[6] = ref_word\n a[7] = edit_type\n return a\n\n except Exception:\n logger.error(\"bad line in ctm-edits input: \"\n \"{0}\".format(a))\n raise RuntimeError\n\n# This function processes the split lines of one utterance (as a\n# list of lists of fields), to allow repetitions of words, so if the\n# reference says 'i' but the hyp says 'i i', or the ref says\n# 'you know' and the hyp says 'you know you know', we change the\n# ref to match.\n# It returns the modified list-of-lists [but note that the input\n# is actually modified].\ndef ProcessUtteranceForRepetitions(split_lines_of_utt):\n global non_scored_words, repetition_stats\n # The array 'selected_lines' will contain the indexes of of selected\n # elements of 'split_lines_of_utt'. Consider split_line =\n # split_lines_of_utt[i]. If the hyp and ref words in split_line are both\n # either '' or non-scoreable words, we discard the index.\n # Otherwise we put it into selected_lines.\n selected_line_indexes = []\n # selected_edits will contain, for each element of selected_line_indexes, the\n # corresponding edit_type from the original utterance previous to\n # this function call ('cor', 'ins', etc.).\n #\n # As a special case, if there was a substitution ('sub') where the\n # reference word was a non-scored word and the hyp word was a real word,\n # we mark it in this array as 'ins', because for purposes of this algorithm\n # it behaves the same as an insertion.\n #\n # Whenever we do any operation that will change the reference, we change\n # all the selected_edits in the array to None so that they won't match\n # any further operations.\n selected_edits = []\n # selected_hyp_words will contain, for each element of selected_line_indexes, the\n # corresponding hyp_word.\n selected_hyp_words = []\n\n for i in range(len(split_lines_of_utt)):\n split_line = split_lines_of_utt[i]\n hyp_word = split_line[4]\n ref_word = split_line[6]\n # keep_this_line will be True if we are going to keep this line in the\n # 'selected lines' for further processing of repetitions. We only\n # eliminate lines involving non-scored words or epsilon in both hyp\n # and reference position\n # [note: epsilon in hyp position for non-empty segments indicates\n # optional-silence, and it does make sense to make this 'invisible',\n # just like non-scored words, for the purposes of this code.]\n keep_this_line = True\n if (hyp_word == '' or hyp_word in non_scored_words) and \\\n (ref_word == '' or ref_word in non_scored_words):\n keep_this_line = False\n if keep_this_line:\n selected_line_indexes.append(i)\n edit_type = split_line[7]\n if edit_type == 'sub' and ref_word in non_scored_words:\n assert not hyp_word in non_scored_words\n # For purposes of this algorithm, substitution of, say,\n # '[COUGH]' by 'hello' behaves like an insertion of 'hello',\n # since we're willing to remove the '[COUGH]' from the\n # transript.\n edit_type = 'ins'\n selected_edits.append(edit_type)\n selected_hyp_words.append(hyp_word)\n\n # indexes_to_fix will be a list of indexes into 'selected_indexes' where we\n # plan to fix the ref to match the hyp.\n indexes_to_fix = []\n\n # This loop scans for, and fixes, two-word insertions that follow,\n # or precede, the corresponding correct words.\n for i in range(0, len(selected_line_indexes) - 3):\n this_indexes = selected_line_indexes[i:i+4]\n this_hyp_words = selected_hyp_words[i:i+4]\n\n if this_hyp_words[0] == this_hyp_words[2] and \\\n this_hyp_words[1] == this_hyp_words[3] and \\\n this_hyp_words[0] != this_hyp_words[1]:\n # if the hyp words were of the form [ 'a', 'b', 'a', 'b' ]...\n this_edits = selected_edits[i:i+4]\n if this_edits == [ 'cor', 'cor', 'ins', 'ins' ] or \\\n this_edits == [ 'ins', 'ins', 'cor', 'cor' ]:\n if this_edits[0] == 'cor':\n indexes_to_fix += [ i+2, i+3 ]\n else:\n indexes_to_fix += [ i, i+1 ]\n\n # the next line prevents this region of the text being used\n # in any further edits.\n selected_edits[i:i+4] = [ None, None, None, None ]\n word_pair = this_hyp_words[0] + ' ' + this_hyp_words[1]\n # e.g. word_pair = 'hi there'\n # add 2 because these stats are of words.\n repetition_stats[word_pair] += 2\n # the next line prevents this region of the text being used\n # in any further edits.\n selected_edits[i:i+4] = [ None, None, None, None ]\n\n # This loop scans for, and fixes, one-word insertions that follow,\n # or precede, the corresponding correct words.\n for i in range(0, len(selected_line_indexes) - 1):\n this_indexes = selected_line_indexes[i:i+2]\n this_hyp_words = selected_hyp_words[i:i+2]\n\n if this_hyp_words[0] == this_hyp_words[1]:\n # if the hyp words were of the form [ 'a', 'a' ]...\n this_edits = selected_edits[i:i+2]\n if this_edits == [ 'cor', 'ins' ] or this_edits == [ 'ins', 'cor' ]:\n if this_edits[0] == 'cor':\n indexes_to_fix.append(i+1)\n else:\n indexes_to_fix.append(i)\n repetition_stats[this_hyp_words[0]] += 1\n # the next line prevents this region of the text being used\n # in any further edits.\n selected_edits[i:i+2] = [ None, None ]\n\n for i in indexes_to_fix:\n j = selected_line_indexes[i]\n split_line = split_lines_of_utt[j]\n ref_word = split_line[6]\n hyp_word = split_line[4]\n assert ref_word == '' or ref_word in non_scored_words\n # we replace reference with the decoded word, which will be a\n # repetition.\n split_line[6] = hyp_word\n split_line[7] = 'cor'\n\n return split_lines_of_utt\n\n\n# note: split_lines_of_utt is a list of lists, one per line, each containing the\n# sequence of fields.\n# Returns the same format of data after processing.\ndef ProcessUtterance(split_lines_of_utt):\n new_split_lines_of_utt = []\n for split_line in split_lines_of_utt:\n new_split_line = ProcessLineForNonScoredWords(split_line)\n if new_split_line != []:\n new_split_lines_of_utt.append(new_split_line)\n if args.allow_repetitions == 'true':\n new_split_lines_of_utt = ProcessUtteranceForRepetitions(new_split_lines_of_utt)\n return new_split_lines_of_utt\n\n\ndef ProcessData():\n try:\n f_in = open(args.ctm_edits_in)\n except:\n sys.exit(\"modify_ctm_edits.py: error opening ctm-edits input \"\n \"file {0}\".format(args.ctm_edits_in))\n try:\n f_out = open(args.ctm_edits_out, 'w')\n except:\n sys.exit(\"modify_ctm_edits.py: error opening ctm-edits output \"\n \"file {0}\".format(args.ctm_edits_out))\n num_lines_processed = 0\n\n\n # Most of what we're doing in the lines below is splitting the input lines\n # and grouping them per utterance, before giving them to ProcessUtterance()\n # and then printing the modified lines.\n first_line = f_in.readline()\n if first_line == '':\n sys.exit(\"modify_ctm_edits.py: empty input\")\n split_pending_line = first_line.split()\n if len(split_pending_line) == 0:\n sys.exit(\"modify_ctm_edits.py: bad input line \" + first_line)\n cur_utterance = split_pending_line[0]\n split_lines_of_cur_utterance = []\n\n while True:\n if len(split_pending_line) == 0 or split_pending_line[0] != cur_utterance:\n split_lines_of_cur_utterance = ProcessUtterance(split_lines_of_cur_utterance)\n for split_line in split_lines_of_cur_utterance:\n print(' '.join(split_line), file = f_out)\n split_lines_of_cur_utterance = []\n if len(split_pending_line) == 0:\n break\n else:\n cur_utterance = split_pending_line[0]\n\n split_lines_of_cur_utterance.append(split_pending_line)\n next_line = f_in.readline()\n split_pending_line = next_line.split()\n if len(split_pending_line) == 0:\n if next_line != '':\n sys.exit(\"modify_ctm_edits.py: got an empty or whitespace input line\")\n try:\n f_out.close()\n except:\n sys.exit(\"modify_ctm_edits.py: error closing ctm-edits output \"\n \"(broken pipe or full disk?)\")\n\ndef PrintNonScoredStats():\n if args.verbose < 1:\n return\n if num_lines == 0:\n print(\"modify_ctm_edits.py: processed no input.\", file = sys.stderr)\n num_lines_modified = sum(ref_change_stats.values())\n num_incorrect_lines = num_lines - num_correct_lines\n percent_lines_incorrect= '%.2f' % (num_incorrect_lines * 100.0 / num_lines)\n percent_modified = '%.2f' % (num_lines_modified * 100.0 / num_lines);\n if num_incorrect_lines > 0:\n percent_of_incorrect_modified = '%.2f' % (num_lines_modified * 100.0 /\n num_incorrect_lines)\n else:\n percent_of_incorrect_modified = float('nan')\n print(\"modify_ctm_edits.py: processed {0} lines of ctm ({1}% of which incorrect), \"\n \"of which {2} were changed fixing the reference for non-scored words \"\n \"({3}% of lines, or {4}% of incorrect lines)\".format(\n num_lines, percent_lines_incorrect, num_lines_modified,\n percent_modified, percent_of_incorrect_modified),\n file = sys.stderr)\n\n keys = sorted(ref_change_stats.keys(), reverse=True,\n key = lambda x: ref_change_stats[x])\n num_keys_to_print = 40 if args.verbose >= 2 else 10\n\n print(\"modify_ctm_edits.py: most common edits (as percentages \"\n \"of all such edits) are:\\n\" +\n ('\\n'.join([ '%s [%.2f%%]' % (k, ref_change_stats[k]*100.0/num_lines_modified)\n for k in keys[0:num_keys_to_print]]))\n + '\\n...'if num_keys_to_print < len(keys) else '',\n file = sys.stderr)\n\n\ndef PrintRepetitionStats():\n if args.verbose < 1 or sum(repetition_stats.values()) == 0:\n return\n num_lines_modified = sum(repetition_stats.values())\n num_incorrect_lines = num_lines - num_correct_lines\n percent_lines_incorrect= '%.2f' % (num_incorrect_lines * 100.0 / num_lines)\n percent_modified = '%.2f' % (num_lines_modified * 100.0 / num_lines);\n if num_incorrect_lines > 0:\n percent_of_incorrect_modified = '%.2f' % (num_lines_modified * 100.0 /\n num_incorrect_lines)\n else:\n percent_of_incorrect_modified = float('nan')\n print(\"modify_ctm_edits.py: processed {0} lines of ctm ({1}% of which incorrect), \"\n \"of which {2} were changed fixing the reference for repetitions ({3}% of \"\n \"lines, or {4}% of incorrect lines)\".format(\n num_lines, percent_lines_incorrect, num_lines_modified,\n percent_modified, percent_of_incorrect_modified),\n file = sys.stderr)\n\n keys = sorted(repetition_stats.keys(), reverse=True,\n key = lambda x: repetition_stats[x])\n num_keys_to_print = 40 if args.verbose >= 2 else 10\n\n print(\"modify_ctm_edits.py: most common repetitions inserted into reference (as percentages \"\n \"of all words fixed in this way) are:\\n\" +\n ('\\n'.join([ '%s [%.2f%%]' % (k, repetition_stats[k]*100.0/num_lines_modified)\n for k in keys[0:num_keys_to_print]]))\n + '\\n...' if num_keys_to_print < len(keys) else '',\n file = sys.stderr)\n\n\nnon_scored_words = set()\nReadNonScoredWords(args.non_scored_words_in)\n\nnum_lines = 0\nnum_correct_lines = 0\n# ref_change_stats will be a map from a string like\n# 'foo -> bar' to an integer count; it keeps track of how much we changed\n# the reference.\nref_change_stats = defaultdict(int)\n# repetition_stats will be a map from strings like\n# 'a', or 'a b' (the repeated strings), to an integer count; like\n# ref_change_stats, it keeps track of how many changes we made\n# in allowing repetitions.\nrepetition_stats = defaultdict(int)\n\nProcessData()\nPrintNonScoredStats()\nPrintRepetitionStats()\n"},"path":{"kind":"string","value":"egs/wsj/s5/steps/cleanup/internal/modify_ctm_edits.py"},"size":{"kind":"number","value":20279,"string":"20,279"},"nl_text":{"kind":"string","value":"!/usr/bin/env python2 Copyright 2016 Vimal Manohar 2016 Johns Hopkins University (author: Daniel Povey) Apache 2.0 The ctm-edits file format is as follows [note: file-id is really utterance-id in this context]. e.g.: AJJacobs_2007P-0001605-0003029 1 0 0.09 1.0 sil AJJacobs_2007P-0001605-0003029 1 0.09 0.15 i 1.0 i cor ... This function processes a single line of ctm-edits input for fixing \"non-scored\" words. The input 'a' is the split line as an array of fields. It modifies the object 'a'. This function returns the modified array, and please note that it is destructive of its input 'a'. If it returnso the empty array then the line is to be deleted. we could do: [ file, channel, start, duration, hyp_word, confidence, ref_word, edit_type ] = a insert this non-scored word into the reference. we also allow replacing one non-scored word with another. This function processes the split lines of one utterance (as a list of lists of fields), to allow repetitions of words, so if the reference says 'i' but the hyp says 'i i', or the ref says 'you know' and the hyp says 'you know you know', we change the ref to match. It returns the modified list-of-lists [but note that the input is actually modified]. The array 'selected_lines' will contain the indexes of of selected elements of 'split_lines_of_utt'. Consider split_line = split_lines_of_utt[i]. If the hyp and ref words in split_line are both either '' or non-scoreable words, we discard the index. Otherwise we put it into selected_lines. selected_edits will contain, for each element of selected_line_indexes, the corresponding edit_type from the original utterance previous to this function call ('cor', 'ins', etc.). As a special case, if there was a substitution ('sub') where the reference word was a non-scored word and the hyp word was a real word, we mark it in this array as 'ins', because for purposes of this algorithm it behaves the same as an insertion. Whenever we do any operation that will change the reference, we change all the selected_edits in the array to None so that they won't match any further operations. selected_hyp_words will contain, for each element of selected_line_indexes, the corresponding hyp_word. keep_this_line will be True if we are going to keep this line in the 'selected lines' for further processing of repetitions. We only eliminate lines involving non-scored words or epsilon in both hyp and reference position [note: epsilon in hyp position for non-empty segments indicates optional-silence, and it does make sense to make this 'invisible', just like non-scored words, for the purposes of this code.] For purposes of this algorithm, substitution of, say, '[COUGH]' by 'hello' behaves like an insertion of 'hello', since we're willing to remove the '[COUGH]' from the transript. indexes_to_fix will be a list of indexes into 'selected_indexes' where we plan to fix the ref to match the hyp. This loop scans for, and fixes, two-word insertions that follow, or precede, the corresponding correct words. if the hyp words were of the form [ 'a', 'b', 'a', 'b' ]... the next line prevents this region of the text being used in any further edits. e.g. word_pair = 'hi there' add 2 because these stats are of words. the next line prevents this region of the text being used in any further edits. This loop scans for, and fixes, one-word insertions that follow, or precede, the corresponding correct words. if the hyp words were of the form [ 'a', 'a' ]... the next line prevents this region of the text being used in any further edits. we replace reference with the decoded word, which will be a repetition. note: split_lines_of_utt is a list of lists, one per line, each containing the sequence of fields. Returns the same format of data after processing. Most of what we're doing in the lines below is splitting the input lines and grouping them per utterance, before giving them to ProcessUtterance() and then printing the modified lines. ref_change_stats will be a map from a string like 'foo -> bar' to an integer count; it keeps track of how much we changed the reference. repetition_stats will be a map from strings like 'a', or 'a b' (the repeated strings), to an integer count; like ref_change_stats, it keeps track of how many changes we made in allowing repetitions."},"nl_size":{"kind":"number","value":4397,"string":"4,397"},"nl_language":{"kind":"string","value":"en"},"nl_language_score":{"kind":"number","value":0.8637376427650452,"string":"0.863738"}}},{"rowIdx":7886,"cells":{"content":{"kind":"string","value":"# Copyright (C) 2010-2011 Richard Lincoln\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to\n# deal in the Software without restriction, including without limitation the\n# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or\n# sell copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS\n# IN THE SOFTWARE.\n\n\"\"\"An extension to the Core and Topology package that models information on the electrical characteristics of Transmission and Distribution networks. This package is used by network applications such as State Estimation, Load Flow and Optimal Power Flow.\n\"\"\"\n\nfrom CIM15.CDPSM.Connectivity.IEC61970.Wires.Fuse import Fuse\nfrom CIM15.CDPSM.Connectivity.IEC61970.Wires.EnergyConsumer import EnergyConsumer\nfrom CIM15.CDPSM.Connectivity.IEC61970.Wires.Switch import Switch\nfrom CIM15.CDPSM.Connectivity.IEC61970.Wires.Disconnector import Disconnector\nfrom CIM15.CDPSM.Connectivity.IEC61970.Wires.ACLineSegment import ACLineSegment\nfrom CIM15.CDPSM.Connectivity.IEC61970.Wires.SynchronousMachine import SynchronousMachine\nfrom CIM15.CDPSM.Connectivity.IEC61970.Wires.BusbarSection import BusbarSection\nfrom CIM15.CDPSM.Connectivity.IEC61970.Wires.LoadBreakSwitch import LoadBreakSwitch\nfrom CIM15.CDPSM.Connectivity.IEC61970.Wires.TransformerTank import TransformerTank\nfrom CIM15.CDPSM.Connectivity.IEC61970.Wires.GroundDisconnector import GroundDisconnector\nfrom CIM15.CDPSM.Connectivity.IEC61970.Wires.PowerTransformerEnd import PowerTransformerEnd\nfrom CIM15.CDPSM.Connectivity.IEC61970.Wires.Junction import Junction\nfrom CIM15.CDPSM.Connectivity.IEC61970.Wires.SeriesCompensator import SeriesCompensator\nfrom CIM15.CDPSM.Connectivity.IEC61970.Wires.Breaker import Breaker\nfrom CIM15.CDPSM.Connectivity.IEC61970.Wires.TransformerTankEnd import TransformerTankEnd\nfrom CIM15.CDPSM.Connectivity.IEC61970.Wires.Sectionaliser import Sectionaliser\nfrom CIM15.CDPSM.Connectivity.IEC61970.Wires.DCLineSegment import DCLineSegment\nfrom CIM15.CDPSM.Connectivity.IEC61970.Wires.Line import Line\nfrom CIM15.CDPSM.Connectivity.IEC61970.Wires.Conductor import Conductor\nfrom CIM15.CDPSM.Connectivity.IEC61970.Wires.PowerTransformer import PowerTransformer\nfrom CIM15.CDPSM.Connectivity.IEC61970.Wires.Ground import Ground\nfrom CIM15.CDPSM.Connectivity.IEC61970.Wires.TransformerEnd import TransformerEnd\nfrom CIM15.CDPSM.Connectivity.IEC61970.Wires.ShuntCompensator import ShuntCompensator\nfrom CIM15.CDPSM.Connectivity.IEC61970.Wires.EnergySource import EnergySource\nfrom CIM15.CDPSM.Connectivity.IEC61970.Wires.Jumper import Jumper\n\nnsURI = \"http://iec.ch/TC57/2010/CIM-schema-cim15?profile=http://iec.ch/TC57/2011/iec61968-13/CDPSM/Connectivity#Wires\"\nnsPrefix = \"cimWires\"\n\n"},"path":{"kind":"string","value":"CIM15/CDPSM/Connectivity/IEC61970/Wires/__init__.py"},"size":{"kind":"number","value":3466,"string":"3,466"},"nl_text":{"kind":"string","value":"An extension to the Core and Topology package that models information on the electrical characteristics of Transmission and Distribution networks. This package is used by network applications such as State Estimation, Load Flow and Optimal Power Flow.\n\n Copyright (C) 2010-2011 Richard Lincoln Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the \"Software\"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE."},"nl_size":{"kind":"number","value":1314,"string":"1,314"},"nl_language":{"kind":"string","value":"en"},"nl_language_score":{"kind":"number","value":0.8853253722190857,"string":"0.885325"}}},{"rowIdx":7887,"cells":{"content":{"kind":"string","value":"from django.apps import AppConfig\n\n\nclass BooksConfig(AppConfig):\n name = 'bookstudio.books'\n verbose_name = 'books'\n\n def ready(self):\n \"\"\"Override this to put in:\n Users system checks\n Users signal registration\n \"\"\"\n pass\n"},"path":{"kind":"string","value":"bookstudio/books/apps.py"},"size":{"kind":"number","value":276,"string":"276"},"nl_text":{"kind":"string","value":"Override this to put in:\nUsers system checks\nUsers signal registration"},"nl_size":{"kind":"number","value":70,"string":"70"},"nl_language":{"kind":"string","value":"en"},"nl_language_score":{"kind":"number","value":0.7086131572723389,"string":"0.708613"}}},{"rowIdx":7888,"cells":{"content":{"kind":"string","value":"# pylint: disable=too-few-public-methods, no-member\n\"\"\"API for scheduling learning rate.\"\"\"\nfrom .. import symbol as sym\n\nclass LRScheduler(object):\n \"\"\"Base class of a learning rate scheduler.\n\n A scheduler returns a new learning rate based on the number of updates that have\n been performed.\n\n Parameters\n ----------\n base_lr : float, optional\n The initial learning rate.\n \"\"\"\n def __init__(self, base_lr=0.01, name='LRScheduler'):\n self.name = name\n self.base_lr = base_lr\n\n def __call__(self, num_update):\n \"\"\"Return a new learning rate based on number of updates.\n\n Parameters\n ----------\n num_update: nnvm Symbol\n the number of updates applied to weight.\n \"\"\"\n raise NotImplementedError(\"__call__ method must be overridden.\")\n\nclass FactorScheduler(LRScheduler):\n \"\"\"Reduce the learning rate by a factor for every *n* steps.\n\n It returns a new learning rate by::\n\n base_lr * pow(factor, num_update/step)\n\n Parameters\n ----------\n step : int\n Changes the learning rate for every n updates.\n factor : float, optional\n The factor to change the learning rate.\n stop_factor_lr : float, optional\n Stop updating the learning rate if it is less than this value.\n \"\"\"\n def __init__(self, step, factor=1, stop_factor_lr=1e-8, name='FactorScheduler', **kwargs):\n super(FactorScheduler, self).__init__(name=name, **kwargs)\n if step < 1:\n raise ValueError(\"Schedule step must be greater or equal than 1 round\")\n if factor > 1.0:\n raise ValueError(\"Factor must be no more than 1 to make lr reduce\")\n self.step = step\n self.factor = factor\n self.stop_factor_lr = stop_factor_lr\n\n def __call__(self, num_update):\n updated_lr = self.base_lr * self.factor ** (num_update / self.step)\n return sym.clip(updated_lr, a_min=self.stop_factor_lr, a_max=self.base_lr)\n"},"path":{"kind":"string","value":"nnvm/python/nnvm/compiler/lr_scheduler.py"},"size":{"kind":"number","value":1985,"string":"1,985"},"nl_text":{"kind":"string","value":"Reduce the learning rate by a factor for every *n* steps.\n\nIt returns a new learning rate by::\n\n base_lr * pow(factor, num_update/step)\n\nParameters\n----------\nstep : int\n Changes the learning rate for every n updates.\nfactor : float, optional\n The factor to change the learning rate.\nstop_factor_lr : float, optional\n Stop updating the learning rate if it is less than this value.\nBase class of a learning rate scheduler.\n\nA scheduler returns a new learning rate based on the number of updates that have\nbeen performed.\n\nParameters\n----------\nbase_lr : float, optional\n The initial learning rate.\nReturn a new learning rate based on number of updates.\n\nParameters\n----------\nnum_update: nnvm Symbol\n the number of updates applied to weight.\nAPI for scheduling learning rate.\n\n pylint: disable=too-few-public-methods, no-member"},"nl_size":{"kind":"number","value":844,"string":"844"},"nl_language":{"kind":"string","value":"en"},"nl_language_score":{"kind":"number","value":0.6930851936340332,"string":"0.693085"}}},{"rowIdx":7889,"cells":{"content":{"kind":"string","value":"import sys\nimport os\nimport re\nimport tempfile\n\nimport auto_editor\nimport auto_editor.vanparse as vanparse\nfrom auto_editor.utils.log import Log\nfrom auto_editor.ffwrapper import FFmpeg\n\ndef grep_options(parser):\n parser.add_argument('--no-filename', action='store_true',\n help='Never print filenames with output lines.')\n parser.add_argument('--max-count', '-m', type=int, default=None,\n help='Stop reading a file after NUM matching lines.')\n parser.add_argument('--count', '-c', action='store_true',\n help='Suppress normal output; instead print count of matching lines for each file.')\n parser.add_argument('--ignore-case', '-i', action='store_true',\n help='Ignore case distinctions for the PATTERN.')\n parser.add_argument('--timecode', action='store_true',\n help=\"Print the match's timecode.\")\n parser.add_argument('--time', action='store_true',\n help=\"Print when the match happens. (Ignore ending).\")\n parser.add_argument('--ffmpeg-location', default=None,\n help='Point to your custom ffmpeg file.')\n parser.add_argument('--my-ffmpeg', action='store_true',\n help='Use the ffmpeg on your PATH instead of the one packaged.')\n parser.add_argument('--help', '-h', action='store_true',\n help='Print info about the program or an option and exit.')\n parser.add_required('input', nargs='*', help='The path to a file you want inspected.')\n return parser\n\n# stackoverflow.com/questions/9662346/python-code-to-remove-html-tags-from-a-string\ndef cleanhtml(raw_html: str) -> str:\n cleanr = re.compile('<.*?>')\n cleantext = re.sub(cleanr, '', raw_html)\n return cleantext\n\n\ndef grep_core(\n media_file: str, add_prefix: bool, ffmpeg: FFmpeg, args, log: Log, TEMP: str\n) -> None:\n\n \"\"\"\n We're using the WEBVTT subtitle format. It's better than srt\n because it doesn't emit line numbers and the time code is in\n (hh:mm:ss.sss) instead of (dd:hh:mm:ss,sss)\n \"\"\"\n\n out_file = os.path.join(TEMP, 'media.vtt')\n ffmpeg.run(['-i', media_file, out_file])\n\n count = 0\n\n flags = 0\n if args.ignore_case:\n flags = re.IGNORECASE\n\n prefix = ''\n if add_prefix:\n prefix = '{}:'.format(os.path.splitext(os.path.basename(media_file))[0])\n\n if args.max_count is None:\n args.max_count = float('inf')\n\n timecode = ''\n line_number = -1\n with open(out_file, 'r') as file:\n while True:\n line = file.readline()\n line_number += 1\n if line_number == 0:\n continue\n\n if not line or count >= args.max_count:\n break\n\n if line.strip() == '':\n continue\n\n if re.match(r'\\d*:\\d\\d.\\d*\\s-->\\s\\d*:\\d\\d.\\d*', line):\n if args.time:\n timecode = line.split('-->')[0].strip() + ' '\n else:\n timecode = line.strip() + '; '\n continue\n\n line = cleanhtml(line)\n match = re.search(args.input[0], line, flags)\n line = line.strip()\n\n if match:\n count += 1\n if not args.count:\n if args.timecode or args.time:\n print(prefix + timecode + line)\n else:\n print(prefix + line)\n\n if args.count:\n print(prefix + str(count))\n\n\ndef main(sys_args=sys.argv[1:]):\n parser = vanparse.ArgumentParser('grep', auto_editor.version,\n description='Read and match subtitle tracks in media files.',\n )\n parser = grep_options(parser)\n\n TEMP = tempfile.mkdtemp()\n log = Log(temp=TEMP)\n\n try:\n args = parser.parse_args(sys_args)\n except vanparse.ParserError as e:\n log.error(str(e))\n\n ffmpeg = FFmpeg(args.ffmpeg_location, args.my_ffmpeg, debug=False)\n\n media_files = args.input[1:]\n\n add_prefix = (len(media_files) > 1 or os.path.isdir(media_files[0])) and not args.no_filename\n\n for media_file in media_files:\n if not os.path.exists(media_file):\n log.error(f'{media_file}: File does not exist.')\n\n if os.path.isdir(media_file):\n for _, _, files in os.walk(media_file):\n for file in files:\n if file == '.DS_Store':\n continue\n\n grep_core(os.path.join(media_file, file), add_prefix, ffmpeg, args,\n log, TEMP)\n else:\n grep_core(media_file, add_prefix, ffmpeg, args, log, TEMP)\n\n log.cleanup()\n\n\nif __name__ == '__main__':\n main()\n"},"path":{"kind":"string","value":"auto_editor/subcommands/grep.py"},"size":{"kind":"number","value":4597,"string":"4,597"},"nl_text":{"kind":"string","value":"We're using the WEBVTT subtitle format. It's better than srt\nbecause it doesn't emit line numbers and the time code is in\n(hh:mm:ss.sss) instead of (dd:hh:mm:ss,sss)\n\n stackoverflow.com/questions/9662346/python-code-to-remove-html-tags-from-a-string"},"nl_size":{"kind":"number","value":249,"string":"249"},"nl_language":{"kind":"string","value":"en"},"nl_language_score":{"kind":"number","value":0.7641016840934753,"string":"0.764102"}}},{"rowIdx":7890,"cells":{"content":{"kind":"string","value":"\"\"\"\nCreate a blueprint with endpoints for logins from configured identity providers.\n\nThe identity providers include, for example, Google, Shibboleth, or another\nfence instance. See the other files in this directory for the definitions of\nthe endpoints for each provider.\n\"\"\"\n\nfrom authlib.common.urls import add_params_to_uri\nimport flask\nimport requests\n\nfrom cdislogging import get_logger\n\nfrom fence.blueprints.login.cilogon import CilogonLogin, CilogonCallback\nfrom fence.blueprints.login.cognito import CognitoLogin, CognitoCallback\nfrom fence.blueprints.login.fence_login import FenceLogin, FenceCallback\nfrom fence.blueprints.login.google import GoogleLogin, GoogleCallback\nfrom fence.blueprints.login.shib import ShibbolethLogin, ShibbolethCallback\nfrom fence.blueprints.login.microsoft import MicrosoftLogin, MicrosoftCallback\nfrom fence.blueprints.login.okta import OktaLogin, OktaCallback\nfrom fence.blueprints.login.orcid import ORCIDLogin, ORCIDCallback\nfrom fence.blueprints.login.ras import RASLogin, RASCallback\nfrom fence.blueprints.login.synapse import SynapseLogin, SynapseCallback\nfrom fence.errors import InternalError\nfrom fence.resources.audit.utils import enable_audit_logging\nfrom fence.restful import RestfulApi\nfrom fence.config import config\n\nlogger = get_logger(__name__)\n\n# Mapping from IDP ID to the name in the URL on the blueprint (see below).\nIDP_URL_MAP = {\n \"fence\": \"fence\",\n \"google\": \"google\",\n \"shibboleth\": \"shib\",\n \"orcid\": \"orcid\",\n \"synapse\": \"synapse\",\n \"microsoft\": \"microsoft\",\n \"okta\": \"okta\",\n \"cognito\": \"cognito\",\n \"ras\": \"ras\",\n \"cilogon\": \"cilogon\",\n}\n\n\ndef absolute_login_url(provider_id, fence_idp=None, shib_idp=None):\n \"\"\"\n Args:\n provider_id (str): provider to log in with; an IDP_URL_MAP key.\n fence_idp (str, optional): if provider_id is \"fence\"\n (multi-tenant Fence setup), fence_idp can be any of the\n providers supported by the other Fence. If not specified,\n will default to NIH login.\n shib_idp (str, optional): if provider_id is \"fence\" and\n fence_idp is \"shibboleth\", shib_idp can be any Shibboleth/\n InCommon provider. If not specified, will default to NIH\n login.\n\n Returns:\n str: login URL for this provider, including extra query\n parameters if fence_idp and/or shib_idp are specified.\n \"\"\"\n try:\n base_url = config[\"BASE_URL\"].rstrip(\"/\")\n login_url = base_url + \"/login/{}\".format(IDP_URL_MAP[provider_id])\n except KeyError as e:\n raise InternalError(\"identity provider misconfigured: {}\".format(str(e)))\n\n params = {}\n if fence_idp:\n params[\"idp\"] = fence_idp\n if shib_idp:\n params[\"shib_idp\"] = shib_idp\n login_url = add_params_to_uri(login_url, params)\n\n return login_url\n\n\ndef provider_info(login_details):\n \"\"\"\n Args:\n login_details (dict):\n { name, desc, idp, fence_idp, shib_idps, secondary }\n - \"idp\": a configured provider.\n Multiple options can be configured with the same idp.\n - if provider_id is \"fence\", \"fence_idp\" can be any of the\n providers supported by the other Fence. If not specified, will\n default to NIH login.\n - if provider_id is \"fence\" and fence_idp is \"shibboleth\", a\n list of \"shib_idps\" can be configured for InCommon login. If\n not specified, will default to NIH login.\n - Optional parameters: \"desc\" (description) and \"secondary\"\n (boolean - can be used by the frontend to display secondary\n buttons differently).\n\n Returns:\n dict: { name, desc, idp, urls, secondary }\n - urls: list of { name, url } dictionaries\n \"\"\"\n info = {\n # \"id\" deprecated, replaced by \"idp\"\n \"id\": login_details[\"idp\"],\n \"idp\": login_details[\"idp\"],\n \"name\": login_details[\"name\"],\n # \"url\" deprecated, replaced by \"urls\"\n \"url\": absolute_login_url(login_details[\"idp\"]),\n \"desc\": login_details.get(\"desc\", None),\n \"secondary\": login_details.get(\"secondary\", False),\n }\n\n # for Fence multi-tenant login\n fence_idp = None\n if login_details[\"idp\"] == \"fence\":\n fence_idp = login_details.get(\"fence_idp\")\n\n # handle Shibboleth IDPs: InCommon login can either be configured\n # directly in this Fence, or through multi-tenant Fence\n if (\n login_details[\"idp\"] == \"shibboleth\" or fence_idp == \"shibboleth\"\n ) and \"shib_idps\" in login_details:\n # get list of all available shib IDPs\n if not hasattr(flask.current_app, \"all_shib_idps\"):\n flask.current_app.all_shib_idps = get_all_shib_idps()\n\n requested_shib_idps = login_details[\"shib_idps\"]\n if requested_shib_idps == \"*\":\n shib_idps = flask.current_app.all_shib_idps\n elif isinstance(requested_shib_idps, list):\n # get the display names for each requested shib IDP\n shib_idps = []\n for requested_shib_idp in requested_shib_idps:\n shib_idp = next(\n (\n available_shib_idp\n for available_shib_idp in flask.current_app.all_shib_idps\n if available_shib_idp[\"idp\"] == requested_shib_idp\n ),\n None,\n )\n if not shib_idp:\n raise InternalError(\n 'Requested shib_idp \"{}\" does not exist'.format(\n requested_shib_idp\n )\n )\n shib_idps.append(shib_idp)\n else:\n raise InternalError(\n 'fence provider misconfigured: \"shib_idps\" must be a list or \"*\", got {}'.format(\n requested_shib_idps\n )\n )\n\n info[\"urls\"] = [\n {\n \"name\": shib_idp[\"name\"],\n \"url\": absolute_login_url(\n login_details[\"idp\"], fence_idp, shib_idp[\"idp\"]\n ),\n }\n for shib_idp in shib_idps\n ]\n\n # non-Shibboleth provider\n else:\n info[\"urls\"] = [\n {\n \"name\": login_details[\"name\"],\n \"url\": absolute_login_url(login_details[\"idp\"], fence_idp),\n }\n ]\n\n return info\n\n\ndef get_login_providers_info():\n # default login option\n if config.get(\"DEFAULT_LOGIN_IDP\"):\n default_idp = config[\"DEFAULT_LOGIN_IDP\"]\n elif \"default\" in config.get(\"ENABLED_IDENTITY_PROVIDERS\", {}):\n # fall back on ENABLED_IDENTITY_PROVIDERS.default\n default_idp = config[\"ENABLED_IDENTITY_PROVIDERS\"][\"default\"]\n else:\n logger.warning(\"DEFAULT_LOGIN_IDP not configured\")\n default_idp = None\n\n # other login options\n if config[\"LOGIN_OPTIONS\"]:\n login_options = config[\"LOGIN_OPTIONS\"]\n elif \"providers\" in config.get(\"ENABLED_IDENTITY_PROVIDERS\", {}):\n # fall back on \"providers\" and convert to \"login_options\" format\n enabled_providers = config[\"ENABLED_IDENTITY_PROVIDERS\"][\"providers\"]\n login_options = [\n {\n \"name\": details.get(\"name\"),\n \"idp\": idp,\n \"desc\": details.get(\"desc\"),\n \"secondary\": details.get(\"secondary\"),\n }\n for idp, details in enabled_providers.items()\n ]\n else:\n logger.warning(\"LOGIN_OPTIONS not configured or empty\")\n login_options = []\n\n try:\n all_provider_info = [\n provider_info(login_details) for login_details in login_options\n ]\n except KeyError as e:\n raise InternalError(\"LOGIN_OPTIONS misconfigured: cannot find key {}\".format(e))\n\n # if several login_options are defined for this default IDP, will\n # default to the first one:\n default_provider_info = next(\n (info for info in all_provider_info if info[\"idp\"] == default_idp), None\n )\n if not default_provider_info:\n raise InternalError(\n \"default provider misconfigured: DEFAULT_LOGIN_IDP is set to {}, which is not configured in LOGIN_OPTIONS\".format(\n default_idp\n )\n )\n\n return default_provider_info, all_provider_info\n\n\ndef make_login_blueprint():\n \"\"\"\n Return:\n flask.Blueprint: the blueprint used for ``/login`` endpoints\n\n Raises:\n ValueError: if app is not amenably configured\n \"\"\"\n\n blueprint = flask.Blueprint(\"login\", __name__)\n blueprint_api = RestfulApi(blueprint, decorators=[enable_audit_logging])\n\n @blueprint.route(\"\", methods=[\"GET\"])\n def default_login():\n \"\"\"\n The default root login route.\n \"\"\"\n default_provider_info, all_provider_info = get_login_providers_info()\n return flask.jsonify(\n {\"default_provider\": default_provider_info, \"providers\": all_provider_info}\n )\n\n # Add identity provider login routes for IDPs enabled in the config.\n configured_idps = config[\"OPENID_CONNECT\"].keys()\n\n if \"fence\" in configured_idps:\n blueprint_api.add_resource(FenceLogin, \"/fence\", strict_slashes=False)\n blueprint_api.add_resource(FenceCallback, \"/fence/login\", strict_slashes=False)\n\n if \"google\" in configured_idps:\n blueprint_api.add_resource(GoogleLogin, \"/google\", strict_slashes=False)\n blueprint_api.add_resource(\n GoogleCallback, \"/google/login\", strict_slashes=False\n )\n\n if \"orcid\" in configured_idps:\n blueprint_api.add_resource(ORCIDLogin, \"/orcid\", strict_slashes=False)\n blueprint_api.add_resource(ORCIDCallback, \"/orcid/login\", strict_slashes=False)\n\n if \"ras\" in configured_idps:\n blueprint_api.add_resource(RASLogin, \"/ras\", strict_slashes=False)\n # note that the callback endpoint is \"/ras/callback\", not \"/ras/login\" like other IDPs\n blueprint_api.add_resource(RASCallback, \"/ras/callback\", strict_slashes=False)\n\n if \"synapse\" in configured_idps:\n blueprint_api.add_resource(SynapseLogin, \"/synapse\", strict_slashes=False)\n blueprint_api.add_resource(\n SynapseCallback, \"/synapse/login\", strict_slashes=False\n )\n\n if \"microsoft\" in configured_idps:\n blueprint_api.add_resource(MicrosoftLogin, \"/microsoft\", strict_slashes=False)\n blueprint_api.add_resource(\n MicrosoftCallback, \"/microsoft/login\", strict_slashes=False\n )\n\n if \"okta\" in configured_idps:\n blueprint_api.add_resource(OktaLogin, \"/okta\", strict_slashes=False)\n blueprint_api.add_resource(OktaCallback, \"/okta/login\", strict_slashes=False)\n\n if \"cognito\" in configured_idps:\n blueprint_api.add_resource(CognitoLogin, \"/cognito\", strict_slashes=False)\n blueprint_api.add_resource(\n CognitoCallback, \"/cognito/login\", strict_slashes=False\n )\n\n if \"shibboleth\" in configured_idps:\n blueprint_api.add_resource(ShibbolethLogin, \"/shib\", strict_slashes=False)\n blueprint_api.add_resource(\n ShibbolethCallback, \"/shib/login\", strict_slashes=False\n )\n\n if \"cilogon\" in configured_idps:\n blueprint_api.add_resource(CilogonLogin, \"/cilogon\", strict_slashes=False)\n blueprint_api.add_resource(\n CilogonCallback, \"/cilogon/login\", strict_slashes=False\n )\n\n return blueprint\n\n\ndef get_all_shib_idps():\n \"\"\"\n Get the list of all existing Shibboleth IDPs.\n This function only returns the information we need to generate login URLs.\n\n Returns:\n list: list of {\"idp\": \"\", \"name\": \"\"} dictionaries\n \"\"\"\n url = config[\"OPENID_CONNECT\"].get(\"fence\", {}).get(\"shibboleth_discovery_url\")\n if not url:\n raise InternalError(\n \"Unable to get list of Shibboleth IDPs: OPENID_CONNECT.fence.shibboleth_discovery_url not configured\"\n )\n res = requests.get(url)\n assert (\n res.status_code == 200\n ), \"Unable to get list of Shibboleth IDPs from {}\".format(url)\n\n all_shib_idps = []\n for shib_idp in res.json():\n if \"entityID\" not in shib_idp:\n logger.warning(\n f\"get_all_shib_idps(): 'entityID' field not in IDP data: {shib_idp}. Skipping this IDP.\"\n )\n continue\n idp = shib_idp[\"entityID\"]\n if len(shib_idp.get(\"DisplayNames\", [])) > 0:\n name = get_shib_idp_en_name(shib_idp[\"DisplayNames\"])\n else:\n logger.warning(\n f\"get_all_shib_idps(): 'DisplayNames' field not in IDP data: {shib_idp}. Using IDP ID '{idp}' as IDP name.\"\n )\n name = idp\n all_shib_idps.append(\n {\n \"idp\": idp,\n \"name\": name,\n }\n )\n return all_shib_idps\n\n\ndef get_shib_idp_en_name(names):\n \"\"\"\n Returns a name in English for a Shibboleth IDP, or the first available\n name if no English name was provided.\n\n Args:\n names (list): list of {\"lang\": \"\", \"value\": \"\"} dictionaries\n Example:\n [\n {\n \"value\": \"University of Chicago\",\n \"lang\": \"en\"\n },\n {\n \"value\": \"Universidad de Chicago\",\n \"lang\": \"es\"\n }\n ]\n\n Returns:\n str: Display name to use for this Shibboleth IDP\n \"\"\"\n for name in names:\n if name.get(\"lang\") == \"en\":\n return name[\"value\"]\n return names[0][\"value\"]\n"},"path":{"kind":"string","value":"fence/blueprints/login/__init__.py"},"size":{"kind":"number","value":13602,"string":"13,602"},"nl_text":{"kind":"string","value":"Args:\n provider_id (str): provider to log in with; an IDP_URL_MAP key.\n fence_idp (str, optional): if provider_id is \"fence\"\n (multi-tenant Fence setup), fence_idp can be any of the\n providers supported by the other Fence. If not specified,\n will default to NIH login.\n shib_idp (str, optional): if provider_id is \"fence\" and\n fence_idp is \"shibboleth\", shib_idp can be any Shibboleth/\n InCommon provider. If not specified, will default to NIH\n login.\n\nReturns:\n str: login URL for this provider, including extra query\n parameters if fence_idp and/or shib_idp are specified.\nThe default root login route.\nGet the list of all existing Shibboleth IDPs.\nThis function only returns the information we need to generate login URLs.\n\nReturns:\n list: list of {\"idp\": \"\", \"name\": \"\"} dictionaries\nReturns a name in English for a Shibboleth IDP, or the first available\nname if no English name was provided.\n\nArgs:\n names (list): list of {\"lang\": \"\", \"value\": \"\"} dictionaries\n Example:\n [\n {\n \"value\": \"University of Chicago\",\n \"lang\": \"en\"\n },\n {\n \"value\": \"Universidad de Chicago\",\n \"lang\": \"es\"\n }\n ]\n\nReturns:\n str: Display name to use for this Shibboleth IDP\nReturn:\n flask.Blueprint: the blueprint used for ``/login`` endpoints\n\nRaises:\n ValueError: if app is not amenably configured\nArgs:\n login_details (dict):\n { name, desc, idp, fence_idp, shib_idps, secondary }\n - \"idp\": a configured provider.\n Multiple options can be configured with the same idp.\n - if provider_id is \"fence\", \"fence_idp\" can be any of the\n providers supported by the other Fence. If not specified, will\n default to NIH login.\n - if provider_id is \"fence\" and fence_idp is \"shibboleth\", a\n list of \"shib_idps\" can be configured for InCommon login. If\n not specified, will default to NIH login.\n - Optional parameters: \"desc\" (description) and \"secondary\"\n (boolean - can be used by the frontend to display secondary\n buttons differently).\n\nReturns:\n dict: { name, desc, idp, urls, secondary }\n - urls: list of { name, url } dictionaries\nCreate a blueprint with endpoints for logins from configured identity providers.\n\nThe identity providers include, for example, Google, Shibboleth, or another\nfence instance. See the other files in this directory for the definitions of\nthe endpoints for each provider.\n\n Mapping from IDP ID to the name in the URL on the blueprint (see below). \"id\" deprecated, replaced by \"idp\" \"url\" deprecated, replaced by \"urls\" for Fence multi-tenant login handle Shibboleth IDPs: InCommon login can either be configured directly in this Fence, or through multi-tenant Fence get list of all available shib IDPs get the display names for each requested shib IDP non-Shibboleth provider default login option fall back on ENABLED_IDENTITY_PROVIDERS.default other login options fall back on \"providers\" and convert to \"login_options\" format if several login_options are defined for this default IDP, will default to the first one: Add identity provider login routes for IDPs enabled in the config. note that the callback endpoint is \"/ras/callback\", not \"/ras/login\" like other IDPs"},"nl_size":{"kind":"number","value":3319,"string":"3,319"},"nl_language":{"kind":"string","value":"en"},"nl_language_score":{"kind":"number","value":0.6877502799034119,"string":"0.68775"}}},{"rowIdx":7891,"cells":{"content":{"kind":"string","value":"#! /usr/bin/python\n\"\"\"\nMonitoring functions for xrootd cache server, producing classads\nthat can be handed to condor\n\"\"\"\n\nimport os\nimport math\nimport time\nimport errno\nimport struct\nimport collections\n\nimport six\nfrom six.moves import urllib\n\nimport classad\nimport XRootD.client\n\n__all__ = ['collect_cache_stats']\n\n# these paths in the cache are to be treated as top level \"VOs\" for stats collection\nvo_paths = [ '/user', '/pnfs/fnal.gov/usr' ]\n\ndef _split_path(path):\n \"\"\" Split a path into a list of directory names \"\"\"\n if path[0] != '/':\n raise Exception(\"Not absolute path\")\n result = []\n while path != '/':\n path, tail = os.path.split(path)\n if tail: result.append(tail)\n return list(reversed(result))\n\ndef _is_prefix(lhs, rhs):\n \"\"\" return True if the first list is a prefix of the second \"\"\"\n rhs = list(rhs)\n while rhs:\n if lhs == rhs: return True\n rhs.pop()\n return False\n\ndef scan_cache_dirs(rootdir):\n \"\"\" Scan the top level directory of the cache.\n Walks the path looking for directories that are not in vo_paths.\n For each of these generate a cache summary\n \"\"\"\n\n results = {}\n try:\n root_components = _split_path(rootdir)\n for dirpath, dirnames, filenames in os.walk(rootdir, topdown=True):\n # get the path components as a list, removing the rootdir part\n dirpath_components = _split_path(dirpath)[len(root_components):]\n for name in list(dirnames):\n path_components = dirpath_components + [name]\n for p in [ _split_path(p) for p in vo_paths]:\n # if this directory is in vo_paths, keep recursing\n if _is_prefix( path_components, p):\n break\n else:\n # if nothing is in vo_paths, get the stats and remove from dirnames\n # so this walk goes no further\n vo_name = os.path.join('/', *path_components)\n try:\n results[vo_name] = scan_vo_dir(os.path.join(dirpath, name))\n except (OSError, IOError) as ex:\n results[vo_name] = {'scan_vo_dir_error': str(ex) }\n dirnames.remove(name)\n return results\n except (OSError, IOError) as ex:\n return { 'scan_cache_dirs_error' : { 'message' : str(ex) } } # error message?\n\n\ndef scan_vo_dir(vodir):\n \"\"\" Scan a VO directory (assumed to be the whole directory tree after the top level \"\"\"\n\n now = time.time()\n totalsize = 0\n nfiles = 0\n naccesses = 0\n accesses = collections.defaultdict(int)\n most_recent_access = 0\n bad_cinfo_files = 0\n for root, dirs, files in os.walk(vodir):\n fnames = set(files)\n # Somebody might add a file ending in .cinfo in the cache\n # so look for the f, f.cinfo pair\n for f, cinfo in ((f, f + '.cinfo') for f in fnames if f + '.cinfo' in fnames):\n try:\n st = os.stat(os.path.join(root, f))\n except OSError as ex:\n if ex.errno == errno.ENOENT:\n # must have just been deleted\n continue\n else: raise\n try:\n access_info = read_cinfo(os.path.join(root, cinfo), now)\n except OSError as ex:\n if ex.errno == errno.ENOENT:\n continue\n else:\n bad_cinfo_files += 1\n access_info = { \"naccesses\" : 0, \"last_access\": 0, \"by_hour\" : {} }\n except ReadCInfoError as ex:\n bad_cinfo_files += 1\n access_info = ex.access_info\n\n nfiles += 1\n file_size = st.st_blocks*512 # allow for sparse files\n totalsize += file_size\n naccesses += access_info[\"naccesses\"]\n most_recent_access = max(most_recent_access, access_info[\"last_access\"])\n\n for h in access_info[\"by_hour\"]:\n accesses[\"naccesses_hr_\" + h] += access_info[\"by_hour\"][h]\n accesses[\"bytes_hr_\" + h] += access_info[\"bytes_hr\"][h]\n\n result = classad.ClassAd({\n \"used_bytes\" : totalsize,\n \"nfiles\" : nfiles,\n \"naccesses\" : naccesses,\n \"bad_cinfo_files\" : bad_cinfo_files\n })\n result.update(accesses)\n if most_recent_access > 0:\n result[\"most_recent_access_time\"] = most_recent_access\n return result\n\n\n# Parsing the cinfo files\n\n# The header (not a c struct; consecutive separate values with no padding)\n# version + buffer size + file size (blocks)\n# int + long long + long long\n_header_fmt = '=iqq'\n_header_fmt_size = struct.calcsize(_header_fmt)\n\n# then the number of accesses\n# int\n_int_fmt = '@q'\n_int_fmt_size = struct.calcsize(_int_fmt)\n\n# each access contains a struct (native size + padding)\n# AttachTime + DetachTime + BytesDisk + BytesRam + BytesMissed\n# time_t + long long + long long + long long + long long\n_status_fmt = '@qqqqq'\n_status_fmt_size = struct.calcsize(_status_fmt)\n\nclass ReadCInfoError(Exception):\n def __init__(self, *args):\n Exception.__init__(self, *args)\n if len(args) > 1:\n self.access_info = args[1]\n else:\n self.access_info = {}\n\ndef read_cinfo(cinfo_file, now):\n \"\"\" Try to extract useful info from the cinfo file \"\"\"\n\n result = { \"naccesses\": 0,\n \"last_access\": 0,\n \"by_hour\" : { \"01\": 0, \"12\": 0, \"24\": 0 },\n \"bytes_hr\" : { \"01\": 0, \"12\": 0, \"24\": 0 },\n }\n\n cf = open(cinfo_file, 'rb')\n\n # read and unpack the header\n buf = cf.read(_header_fmt_size)\n if len(buf) < _header_fmt_size:\n # a mangled file\n raise ReadCInfoError(\"%s header too short\" % cinfo_file, result)\n\n version, buffer_size, file_size = struct.unpack(_header_fmt, buf)\n\n # we only understand version 2\n if version != 2:\n raise ReadCInfoError(\"%s unknown version: %s\" % (cinfo_file, version), result)\n\n # Get the size of the state vector and skip over it\n # buff_synced uses 1 bit per bufferSize block of bytes\n # Length is rounded up to the nearest byte\n buff_synced_len = int(math.ceil(float(file_size)/buffer_size/8))\n\n # If the file_size is zero, state vector length is 1\n # (Difference is due to Python's integer division returning the floor)\n if file_size == 0:\n buff_synced_len = 1\n\n cf.read(buff_synced_len)\n\n # Go past cksum (char[16]) and creationTime (time_t)\n cf.read(16 + 8)\n\n # now the access count (an int)\n buf = cf.read(_int_fmt_size)\n if len(buf) < _int_fmt_size:\n raise ReadCInfoError(\"%s: invalid access field\" % cinfo_file, result)\n\n access_count, = struct.unpack(_int_fmt, buf)\n\n result[\"naccesses\"] = access_count\n\n if access_count < 0:\n raise ReadCInfoError(\"%s: invalid access count: %s\" % (cinfo_file, access_count), result)\n elif access_count == 0:\n return result\n\n # read the access times\n\n hr_01 = now - 60*60\n hr_12 = now - 12*60*60\n hr_24 = now - 24*60*60\n\n # Read AStat structs\n try:\n for buf in iter(lambda: cf.read(_status_fmt_size), b''):\n access_time, _, bytes_disk, bytes_ram, _ = struct.unpack(_status_fmt, buf)\n result[\"last_access\"] = access_time\n\n #print access_time, bytes_disk, bytes_ram\n #print time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(access_time))\n\n intervals = list()\n if access_time >= hr_01: intervals.append('01')\n if access_time >= hr_12: intervals.append('12')\n if access_time >= hr_24: intervals.append('24')\n else:\n # no longer interested\n next\n\n for interval in intervals:\n result[\"by_hour\"][interval] += 1\n result[\"bytes_hr\"][interval] += bytes_disk + bytes_ram\n except struct.error as ex:\n # return what we've got\n raise ReadCInfoError(\"%s unable to decode access time data: %s\" % (cinfo_file, str(ex)), result)\n\n return result\n\n\ndef test_xrootd_server(url):\n \"\"\" Contact the xrootd server to check if it's alive\n \"\"\"\n try:\n myclient = XRootD.client.FileSystem(url)\n startt = time.time()\n response, _ = myclient.ping(timeout=10)\n elapsed = time.time() - startt\n\n if response.fatal:\n status = \"fatal\"\n elif response.error:\n status = \"error\"\n elif response.ok:\n status = \"ok\"\n else:\n status = \"unknown\"\n\n result = {\"ping_response_status\" : status, \"ping_response_code\" : response.code,\n \"ping_response_message\" : response.message, \"ping_elapsed_time\" : elapsed}\n\n return result\n\n except Exception as ex: # more specific exception would be better\n return {\"ping_response_status\" : \"failed\", \"ping_response_code\" : -1,\n \"ping_response_message\" : str(ex), \"ping_elapsed_time\" : 0.0}\n\n\ndef get_cache_info(rootdir, cache_max_fs_fraction):\n \"\"\"Get information about the cache itself\"\"\"\n result = {}\n try:\n stat = os.statvfs(rootdir)\n\n total_size = int(stat.f_blocks*stat.f_bsize*cache_max_fs_fraction)\n free_size = int(total_size - (stat.f_blocks-stat.f_bfree)*stat.f_bsize)\n\n result['total_cache_bytes'] = total_size\n result['free_cache_bytes'] = free_size\n result['free_cache_fraction'] = 1 - float(stat.f_blocks-stat.f_bfree)/int(stat.f_blocks*cache_max_fs_fraction)\n\n return result\n except (OSError, IOError) as ex:\n return {}\n\n\ndef collect_cache_stats(url, rootdir, cache_max_fs_fraction=1.0):\n \"\"\" Collect stats on the cache server \"\"\"\n start_time = time.time()\n\n parsed_url = urllib.parse.urlparse(url)\n\n # Python 2.6's urlparse returns a ParseResult object whereas\n # Python 2.4's urlparse returns a tuple that doesn't handle\n # root:// properly\n try:\n if parsed_url.scheme not in ('root', 'xroot'):\n raise Exception(\"URL '%s' is not an xrootd url\" % url)\n\n hostname = parsed_url.netloc\n except AttributeError:\n if parsed_url[0] not in ('root', 'xroot'):\n raise Exception(\"URL '%s' is not an xrootd url\" % url)\n\n hostname = parsed_url[2][2:] # Avoid the '//' prefix\n\n result = {'MyType' : 'Machine', 'Name': 'xrootd@%s' % hostname, 'stats_time' : int(start_time)}\n result.update(test_xrootd_server(url))\n result.update(get_cache_info(rootdir, cache_max_fs_fraction))\n\n stats_per_vo = scan_cache_dirs(rootdir)\n # add up the sizes\n totals = dict()\n most_recent_access = 0\n result['VO'] = {}\n for vo, vostats in stats_per_vo.items():\n for k, v in vostats.items():\n if k == \"most_recent_access_time\":\n most_recent_access = max(most_recent_access, v)\n else:\n try:\n totals[k] += v\n except KeyError:\n totals[k] = v\n result['VO'][vo] = vostats\n result['used_cache_bytes'] = totals.pop(\"used_bytes\", 0)\n for k, v in totals.items():\n result[\"total_\" + k] = v\n if most_recent_access > 0:\n result[\"most_recent_access_time\"] = most_recent_access\n\n result['time_to_collect_stats'] = time.time() - start_time\n return classad.ClassAd(result)\n\n\nif __name__ == '__main__':\n import sys\n args = sys.argv[1:]\n if len(args) > 2:\n args[2] = float(args[2])\n elif len(args) == 2:\n args.append(0.99) # max cache fraction\n print(collect_cache_stats(*args))\n"},"path":{"kind":"string","value":"src/xrootd_cache_stats.py"},"size":{"kind":"number","value":11746,"string":"11,746"},"nl_text":{"kind":"string","value":"return True if the first list is a prefix of the second \nSplit a path into a list of directory names \nCollect stats on the cache server \nGet information about the cache itself\nTry to extract useful info from the cinfo file \nScan the top level directory of the cache.\nWalks the path looking for directories that are not in vo_paths.\nFor each of these generate a cache summary\nScan a VO directory (assumed to be the whole directory tree after the top level \nContact the xrootd server to check if it's alive\n \nMonitoring functions for xrootd cache server, producing classads\nthat can be handed to condor\n\n! /usr/bin/python these paths in the cache are to be treated as top level \"VOs\" for stats collection get the path components as a list, removing the rootdir part if this directory is in vo_paths, keep recursing if nothing is in vo_paths, get the stats and remove from dirnames so this walk goes no further error message? Somebody might add a file ending in .cinfo in the cache so look for the f, f.cinfo pair must have just been deleted allow for sparse files Parsing the cinfo files The header (not a c struct; consecutive separate values with no padding) version + buffer size + file size (blocks) int + long long + long long then the number of accesses int each access contains a struct (native size + padding) AttachTime + DetachTime + BytesDisk + BytesRam + BytesMissed time_t + long long + long long + long long + long long read and unpack the header a mangled file we only understand version 2 Get the size of the state vector and skip over it buff_synced uses 1 bit per bufferSize block of bytes Length is rounded up to the nearest byte If the file_size is zero, state vector length is 1 (Difference is due to Python's integer division returning the floor) Go past cksum (char[16]) and creationTime (time_t) now the access count (an int) read the access times Read AStat structsprint access_time, bytes_disk, bytes_ramprint time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(access_time)) no longer interested return what we've got more specific exception would be better Python 2.6's urlparse returns a ParseResult object whereas Python 2.4's urlparse returns a tuple that doesn't handle root:// properly Avoid the '//' prefix add up the sizes max cache fraction"},"nl_size":{"kind":"number","value":2289,"string":"2,289"},"nl_language":{"kind":"string","value":"en"},"nl_language_score":{"kind":"number","value":0.7733702063560486,"string":"0.77337"}}},{"rowIdx":7892,"cells":{"content":{"kind":"string","value":"'''\ndShell output classes\n\n@author: tparker\n'''\nimport os\nimport sys\nimport logging\nimport struct\nimport datetime\nimport dshell\nimport util\n\n\nclass Output(object):\n\n '''\n dShell output base class, extended by output types\n '''\n\n _DEFAULT_FORMAT = ''\n _DEFAULT_TIMEFORMAT = '%Y-%m-%d %H:%M:%S'\n _DEFAULT_DELIM = ' '\n _NULL = None\n\n # true if you want to remove extra fields from the parsed record\n _FILTER_EXTRA = False\n\n def __init__(self, *a, **kw):\n '''\n base output class constructor\n configuration kwords:\n logger= to pass in a logger\n format='format string' to override default formatstring for output class\n pcap = filename to write pcap\n '''\n # setup the logger\n self.logger = kw.get('logger', logging)\n\n # parse the format string\n self.setformat(kw.get('format', self._DEFAULT_FORMAT))\n self.timeformat = (kw.get('timeformat', self._DEFAULT_TIMEFORMAT))\n self.delim = (kw.get('delim', self._DEFAULT_DELIM))\n\n if 'pcap' in kw:\n self.pcapwriter = PCAPWriter(kw['pcap'])\n else:\n self.pcapwriter = None\n\n # this is up to the output plugin to process\n # by default stuffs extra fields and data into 'extra' field\n # if _FILTER_EXTRA is true\n self.extra = kw.get('extra', False)\n\n # create the default session writer\n if 'session' in kw:\n self.sessionwriter = SessionWriter(**kw)\n else:\n self.sessionwriter = None\n\n # write a message to the log\n def log(self, msg, level=logging.INFO, *args, **kw):\n '''write a message to the log\n passes all args and kwargs thru to logging\n except for level= is used to set logging level'''\n self.logger.log(level, msg, *args, **kw)\n\n def setformat(self, formatstr=None, typemap=None):\n '''parse a format string and extract the field info\n if no string given, reverts to default for class\n will set self.fields to be a list of (name,type,spec) tuples\n self.fieldnames to a list of fieldnames\n and self.fieldmap to a list of key=in value=out mappings\n format string can also map in field to out field with %(in:out)spectype\n or specify an explicit out type with %(in:out)specintype:outtype\n (note this breaks compatibility with text formatting,\n but useful for db or other output modules)\n a typemap of [intype]=outtype (or [in]=(newintype,outtype)\n can be used to map and replace types\n '''\n if formatstr:\n self.format = formatstr + \"\\n\"\n else:\n self.format = self._DEFAULT_FORMAT + \"\\n\"\n self.fields = [] # will be a (name,type,length) tuple\n self.fieldnames = []\n self.fieldmap = {}\n # get all the field names\n e = 0\n while True:\n # find the next format spec of %(...)\n s = self.format.find('%', e) + 1\n if s < 1 or self.format[s] != '(':\n break # not %(...\n e = self.format.find(')', s)\n if e < 0:\n break # didn't find a closing paren\n # get text between parens as field name\n fname = self.format[s + 1:e]\n # len/precision specs will be 0-9 between ) and type char\n fspec = ''\n for i in xrange(e + 1, len(self.format)):\n if self.format[i] in '1234567890.+-# lLh':\n fspec += self.format[i]\n else:\n break # this char is not a spec char, it is the type char\n ftype = self.format[i]\n i += 1\n # is the field type a intype:outtype def?\n if i < len(self.format) and self.format[i] == ':':\n e = self.format.find(' ', i) # find the end whitespace\n # split on: to get input:output mapping\n ftype, outtype = self.format[i - 1:e].split(':')\n else:\n outtype = None # output will be same as input type\n e = i # start at next char on loop\n try: # field name to column mapping\n fname, fmap = fname.split(':')\n except:\n fmap = fname # no mapping\n if typemap and ftype in typemap and not outtype:\n try:\n (ftype, outtype) = typemap[ftype]\n except:\n outtype = typemap[ftype]\n # append the field name,type,spec,mapping\n self.fields.append((fname, ftype, fspec))\n self.fieldnames.append(fname)\n if outtype:\n self.fieldmap[fname] = (fmap, outtype) # map of in to out,type\n\n def parse(self, *args, **kw):\n '''parse the input args/kwargs into a record dict according to format string\n - timestamps are formatted to date/time strings\n - fields not in the input will be defined but blank\n - extra fields in the record will be formatted into a\n \"name=value name2=value2...\" string and put in 'extra'\n - args will go into 'data'\n - format keyword can contain a new format string to use (this also sets format for future output)\n '''\n # convert timestamps to proper format\n for ts in [k for k in kw if k == 'ts' or k.endswith('time')]:\n dt = ts[:-4] + 'datetime' # ts->datetime , Xtime -> Xdatetime\n kw[dt] = datetime.datetime.fromtimestamp(\n float(kw[ts])).strftime(self.timeformat) # format properly\n if kw.get('direction') is 'cs':\n kw['dir_arrow'] = '->'\n elif kw.get('direction') is 'sc':\n kw['dir_arrow'] = '<-'\n else:\n kw['dir_arrow'] = '--'\n if 'format' in kw:\n self.setformat(kw['format']) # change the format string?\n del kw['format']\n # create the record initialized to the _NULL value\n rec = dict((f, self._NULL) for f in self.fieldnames)\n # populate record from datadict if datadict key is a field\n if self._FILTER_EXTRA:\n rec.update(\n dict((f, kw[f]) for f in self.fieldnames if (f in kw and kw[f] != None)))\n # place extra datadict keys into the extra field (and exclude the\n # addr tuple)\n if self.extra:\n rec['extra'] = self.delim.join(['%s=%s' % (f, kw[f]) for f in sorted(\n kw.keys()) if f not in self.fieldnames and f != 'addr'])\n else: # not filtering extra, just lump them in as fields\n rec.update(kw)\n # populate the data field\n if args:\n rec['data'] = self.delim.join(map(str, args))\n return rec\n\n def dump(self, pkt=None, **kw): # pass packets to pcap\n '''dump raw packet data to an output\n override this if you want a format other than pcap'''\n pktdata = str(pkt) # might be string, might be a dpkt object\n pktlen = kw.get('len', len(pktdata))\n if self.pcapwriter:\n self.pcapwriter.write(pktlen, pktdata, kw['ts'])\n else:\n self.log(util.hexPlusAscii(str(pkt)), level=logging.DEBUG)\n\n # close the PCAP output\n def close(self):\n if self.pcapwriter:\n self.pcapwriter.close()\n\n def dispatch(self, m, *args, **kwargs):\n '''dispatch from Q pop'''\n if m == 'write':\n self.write(*args, **kwargs)\n if m == 'alert':\n self.alert(*args, **kwargs)\n if m == 'dump':\n self.dump(*args, **kwargs)\n\n\nclass FileOutput(Output):\n\n def __init__(self, *args, **kw):\n '''configuration for fileoutput:\n fh=\n file=filename to write to\n mode=mode to open file as, default 'w'\n '''\n # do base init first\n Output.__init__(self, *args, **kw)\n # get the output filehandle or file\n f = None\n if 'fh' in kw:\n self.fh = kw['fh']\n return\n elif 'file' in kw:\n f = kw['file']\n elif args:\n f = args[0]\n if f:\n if 'mode' in kw:\n mode = kw['mode']\n else:\n mode = 'w'\n if mode == 'noclobber':\n mode = 'w'\n try:\n while os.stat(f):\n p = f.split('-')\n try:\n p, n = p[:-1], int(p[-1])\n except ValueError:\n n = 0\n f = '-'.join(p + ['%04d' % (int(n) + 1)])\n except OSError:\n pass # file not found\n self.fh = open(f, mode)\n else:\n self.fh = sys.stdout\n\n def write(self, obj, **kw):\n '''write session data to the session output or stdout'''\n if self.sessionwriter:\n self.sessionwriter.write(obj, **kw)\n elif self.fh:\n self.fh.write(str(obj))\n\n def close(self):\n '''close output if not stdout'''\n if self.fh != sys.stdout:\n self.fh.close()\n Output.close(self)\n\n\nclass TextOutput(FileOutput):\n\n '''formatted text output to file or stdout'''\n\n _DEFAULT_FORMAT = \"%(decoder)s %(datetime)s %(sip)16s:%(sport)-5s %(dir_arrow)s %(dip)16s:%(dport)-5s ** %(data)s **\"\n _NULL = ''\n\n _FILTER_EXTRA = True\n\n def __init__(self, *args, **kw):\n if 'extra' in kw:\n self._DEFAULT_FORMAT += \" [ %(extra)s ]\"\n FileOutput.__init__(self, *args, **kw)\n\n def alert(self, *args, **kw):\n '''write an alert record\n we pass in the decoder object and args/dict'''\n rec = self.parse(*args, **kw)\n if rec:\n self.fh.write(self.format % rec)\n\n\nclass DBOutput(Output):\n\n '''format strings as used by the DBOutput module to create tables and map fields\n these follow the usual %(name)type and in most cases a custom format string will work\n defualt type maps are:\n s,r = VARCHAR (if field len given) /TEXT (if no len)\n c = CHAR(1)\n x,X,o = VARCHAR\n d,i,u = INTEGER\n e,E,f,F,g,G = DECIMAL\n with the following extra: (using these breaks text format string compatibility)\n b = boolean\n t = timestamp\n D = datetime\n T = this field selects table\n (following are postgres-only)\n A = inet\n H = host\n N = cidr\n M = macaddr\n format string can also map field to column with %(field:column)type\n or specify an explicit column type with %(field:column)pytype:DBTYPE\n (note this also breaks compatibility with text format strings)\n '''\n\n _DEFAULT_FORMAT = \"%(decoder)T %(ts:timestamp)t %(sip)s %(sport)s %(dip)s %(dport)s %(data:alert)s\"\n _NULL = None\n # format type to (type,coltype) map\n _TYPEMAP = {'s': 'VARCHAR', 'r': 'VARCHAR', 'c': 'CHAR(1)',\n 'x': 'VARCHAR', 'X': 'VARCHAR', 'o': 'VARCHAR',\n 'd': 'INTEGER', 'i': 'INTEGER', 'u': 'INTEGER',\n 'e': 'DECIMAL', 'E': 'DECIMAL',\n 'f': 'DECIMAL', 'F': 'DECIMAL',\n 'g': 'DECIMAL', 'G': 'DECIMAL',\n # 'b' isn't a python type, so (ftype,DBTYPE) tuple for value formats input as ftype\n 'b': ('d', 'BOOLEAN'),\n # not standard across database types!\n 't': ('f', 'TIMESTAMP'), 'D': ('s', 'DATETIME'),\n 'A': ('s', 'INET'), 'H': ('s', 'HOST'), 'N': ('s', 'CIDR'), 'M': ('s', 'MACADDR')} # these are postgres specific\n\n # acceptable params to pass to db module connect method\n _DBCONNPARAMS = ['host', 'user', 'passwd',\n 'password', 'db', 'database', 'port', 'charset']\n\n # map of db type to insert placeholder. '%s' is the default, but sqlite3 doesn't like it\n # you can override this with the 'placeholder' config keyword\n _DBTYPE_PLACEHOLDER_MAP = {'sqlite3': '?'}\n\n def __init__(self, *args, **kw):\n '''configuration:\n config=db config .ini file name to parse\n\n config keywords:\n\n dbtype=database type, selects DB API module to load\n in conf file use [dbtype] section name instead\n\n host,user,passwd,password,db,database,port will be passed to db module if present\n\n table=db table to use if not specified by a field\n\n insert_param=character to use as parameter placeholder for INSERT\n (sqlite3=?, default=%%s)\n\n format_types=types to format before insert (default=x)\n ('s' to pad strings, 'x' to convert to hex, 'f' to format floats, 'fx' for hex and floats...)\n '''\n self.dbconfig = kw.copy()\n # if we were passed a config.ini file, parse it and add the k/v pairs\n # to the config\n if 'config' in self.dbconfig:\n import ConfigParser\n config = ConfigParser.ConfigParser()\n config.read(self.dbconfig['config'])\n sections = config.sections()\n if len(sections) > 0:\n self.dbconfig['dbtype'] = sections[0]\n for k, v in config.items(sections[0], raw=True):\n self.dbconfig[k] = v\n # import the db module\n self.db = __import__(self.dbconfig['dbtype'])\n # create a connection, using a dict filtered to db conn params\n self.dbconn = self.db.connect(\n *args, **dict((k, self.dbconfig[k]) for k in self._DBCONNPARAMS if k in self.dbconfig))\n # do the base init last to catch the format string, etc.. (as it may\n # have come from the config file)\n Output.__init__(self, *args, **self.dbconfig)\n\n def createtable(self, table=None):\n '''creates a table based on the format string'''\n if not table and 'table' in self.dbconfig:\n table = self.dbconfig['table']\n try:\n cursor = self.dbconn.cursor()\n sqlfields = []\n for fname, ftype, fspec in [f for f in self.fields if f[1] != 'T']:\n ctype = self.fieldmap[fname][1]\n # if no width spec, use TEXT instead of VARCHAR and hope the db\n # likes it\n if ctype == 'VARCHAR' and not fspec:\n ctype = 'TEXT'\n fdef = self.fieldmap[fname][0] + ' ' + ctype\n if fspec:\n # try to conver python format spec to something SQL will\n # take\n fdef += '(' + \\\n fspec.strip('+-# lLh').replace('.', ',') + ')'\n sqlfields.append(fdef)\n sql = 'CREATE TABLE \"' + table + '\" (' + ','.join(sqlfields) + ')'\n self.log(sql, logging.DEBUG)\n return cursor.execute(sql)\n except:\n raise\n\n def close(self):\n '''closes database connection'''\n self.dbconn.close()\n Output.close(self)\n\n def alert(self, *args, **kw):\n '''write an output record\n we pass in the decoder object and args/dict'''\n rec = self.parse(self, *args, **kw)\n if rec:\n self.insert(rec)\n\n def setformat(self, formatstr=None):\n '''calls main setformat and then builds the insert SQL'''\n # what is the insert param?? some databases use %s, some use ?\n # try to map it or take the placeholder keyword from config\n ph = self.dbconfig.get('insert_param',\n self._DBTYPE_PLACEHOLDER_MAP.get(\n self.dbconfig['dbtype'], '%%s')\n )\n # these are the types we need to format before passing to the db\n self.format_types = self.dbconfig.get('format_types', 'x')\n Output.setformat(self, formatstr, typemap=self._TYPEMAP)\n # build all fields we map (except for [T]able select)\n self.tablefield = 'decoder' # default to decodername\n for fname, ftype, fspec in self.fields:\n if ftype == 'T':\n self.tablefield = fname\n sqlfields = [self.fieldmap[fname][0]\n for (fname, ftype, fspec) in self.fields if fname in self.fieldmap]\n self.insertsql = 'INSERT INTO \"%%s\" (%s) VALUES (%s)' % (\n ','.join(sqlfields), ','.join([ph] * len(sqlfields)))\n\n def insert(self, rec, table=None):\n ''' inserts rec dict using self.format into table (if given, else default or specified by field)\n if insert fails, tries to create table and insert again before raising exception '''\n if not table:\n if 'table' in self.dbconfig:\n table = self.dbconfig['table']\n elif rec[self.tablefield]:\n table = rec[self.tablefield]\n try:\n sqlvalues = []\n cursor = self.dbconn.cursor()\n for fname, ftype, fspec in self.fields:\n if fname in self.fieldmap:\n # do we preformat this data?\n if ftype in self.format_types:\n sqlvalues.append(('%' + fspec + ftype) % rec[fname])\n else:\n sqlvalues.append(rec[fname])\n # create a INSERT INTO table (fields) VALUES (?,?,?) for execute\n sql = self.insertsql % table\n self.log(sql + ' %s' % sqlvalues, logging.DEBUG)\n except:\n raise\n # try once, if it fails, try to create table and retry\n # throws on second failure or create table failure\n fail = False\n while True:\n try:\n cursor.execute(sql, sqlvalues)\n self.dbconn.commit()\n break # success\n except Exception, e:\n self.log(e, level=logging.WARNING)\n if fail:\n raise\n else:\n fail = True\n try:\n self.createtable(table)\n except:\n raise\n\n\nclass PCAPWriter(FileOutput):\n\n '''writes a pcap file'''\n\n def __init__(self, *args, **kw):\n FileOutput.__init__(self, *args, **kw)\n if self.fh:\n self.fh.write(\n struct.pack('IHHIIII', 0xa1b2c3d4, 2, 4, 0, 0, 65535, 1))\n\n # overrides Output.write to write session as PCAP\n # data flow is Output.dump->pcapwriter.write\n def write(self, pktlen, pktdata, ts):\n if self.fh:\n self.fh.write(\n struct.pack('II', int(ts), int((ts - int(ts)) * 1000000)))\n # captured length, original length\n self.fh.write(struct.pack('II', len(pktdata), pktlen))\n self.fh.write(pktdata)\n\n\nclass SessionWriter(Output):\n\n '''writes the session to one or more files'''\n\n def __init__(self, session=None, **kw):\n self.file = kw.get('session', session)\n self.dir = kw.get('direction', 'both')\n self.mode = kw.get('mode', 'a')\n self.timeformat = (kw.get('timeformat', self._DEFAULT_TIMEFORMAT))\n self.fieldnames = []\n\n def write(self, obj, **kwargs):\n out = None\n kw = dict(**kwargs)\n # if a session object with info() and data() methods (conn or blob, but\n # not packet)\n try:\n kw.update(**obj.info()) # get object info\n kw = self.parse(**kw)\n if self.dir == 'both':\n ds = [None]\n elif self.dir == 'split':\n ds = ['cs', 'sc']\n else:\n ds = [self.dir]\n for d in ds:\n kw.update(direction=d if d else 'both') # set direction\n # format filename and open\n out = FileOutput(self.file % kw, mode=self.mode)\n # write obj data for direction\n out.fh.write(obj.data(direction=d))\n out.close()\n except: # if not a session object\n # build filename from kw\n out = FileOutput(self.file % kw, mode=self.mode)\n out.fh.write(str(obj))\n out.close()\n\n\nclass QueueOutput(Output):\n\n '''pipes pickled packets to parent process'''\n\n def __init__(self, q, **kwargs):\n self.queue = q\n Output.__init__(self, **kwargs)\n\n def write(self, *args, **kw): self.dispatch('write', *args, **kw)\n\n def alert(self, *args, **kw): self.dispatch('alert', *args, **kw)\n\n def dump(self, *args, **kw): self.dispatch('dump', *args, **kw)\n\n def dispatch(self, m, *args, **kw): # takes (method,...) to Q\n self.queue.put((m, args, kw))\n\n def close(self):\n self.queue.close()\n Output.close(self)\n\n\n# default output module\nobj = TextOutput\n"},"path":{"kind":"string","value":"lib/output/output.py"},"size":{"kind":"number","value":21310,"string":"21,310"},"nl_text":{"kind":"string","value":"true if you want to remove extra fields from the parsed record setup the logger parse the format string this is up to the output plugin to process by default stuffs extra fields and data into 'extra' field if _FILTER_EXTRA is true create the default session writer write a message to the log will be a (name,type,length) tuple get all the field names find the next format spec of %(...) not %(... didn't find a closing paren get text between parens as field name len/precision specs will be 0-9 between ) and type char this char is not a spec char, it is the type char is the field type a intype:outtype def? find the end whitespace split on: to get input:output mapping output will be same as input type start at next char on loop field name to column mapping no mapping append the field name,type,spec,mapping map of in to out,type convert timestamps to proper format ts->datetime , Xtime -> Xdatetime format properly change the format string? create the record initialized to the _NULL value populate record from datadict if datadict key is a field place extra datadict keys into the extra field (and exclude the addr tuple) not filtering extra, just lump them in as fields populate the data field pass packets to pcap might be string, might be a dpkt object close the PCAP output do base init first get the output filehandle or file file not found format type to (type,coltype) map 'b' isn't a python type, so (ftype,DBTYPE) tuple for value formats input as ftype not standard across database types! these are postgres specific acceptable params to pass to db module connect method map of db type to insert placeholder. '%s' is the default, but sqlite3 doesn't like it you can override this with the 'placeholder' config keyword if we were passed a config.ini file, parse it and add the k/v pairs to the config import the db module create a connection, using a dict filtered to db conn params do the base init last to catch the format string, etc.. (as it may have come from the config file) if no width spec, use TEXT instead of VARCHAR and hope the db likes it try to conver python format spec to something SQL will take what is the insert param?? some databases use %s, some use ? try to map it or take the placeholder keyword from config these are the types we need to format before passing to the db build all fields we map (except for [T]able select) default to decodername do we preformat this data? create a INSERT INTO table (fields) VALUES (?,?,?) for execute try once, if it fails, try to create table and retry throws on second failure or create table failure success overrides Output.write to write session as PCAP data flow is Output.dump->pcapwriter.write captured length, original length if a session object with info() and data() methods (conn or blob, but not packet) get object info set direction format filename and open write obj data for direction if not a session object build filename from kw takes (method,...) to Q default output module"},"nl_size":{"kind":"number","value":2967,"string":"2,967"},"nl_language":{"kind":"string","value":"en"},"nl_language_score":{"kind":"number","value":0.7619408965110779,"string":"0.761941"}}},{"rowIdx":7893,"cells":{"content":{"kind":"string","value":"\"\"\"Auto-generated file, do not edit by hand. MQ metadata\"\"\"\nfrom ..phonemetadata import NumberFormat, PhoneNumberDesc, PhoneMetadata\n\nPHONE_METADATA_MQ = PhoneMetadata(id='MQ', country_code=596, international_prefix='00',\n general_desc=PhoneNumberDesc(national_number_pattern='[56]\\\\d{8}', possible_number_pattern='\\\\d{9}'),\n fixed_line=PhoneNumberDesc(national_number_pattern='596(?:0[2-5]|[12]0|3[05-9]|4[024-8]|[5-7]\\\\d|89|9[4-8])\\\\d{4}', possible_number_pattern='\\\\d{9}', example_number='596301234'),\n mobile=PhoneNumberDesc(national_number_pattern='696(?:[0-479]\\\\d|5[01]|8[0-689])\\\\d{4}', possible_number_pattern='\\\\d{9}', example_number='696201234'),\n toll_free=PhoneNumberDesc(national_number_pattern='NA', possible_number_pattern='NA'),\n premium_rate=PhoneNumberDesc(national_number_pattern='NA', possible_number_pattern='NA'),\n shared_cost=PhoneNumberDesc(national_number_pattern='NA', possible_number_pattern='NA'),\n personal_number=PhoneNumberDesc(national_number_pattern='NA', possible_number_pattern='NA'),\n voip=PhoneNumberDesc(national_number_pattern='NA', possible_number_pattern='NA'),\n pager=PhoneNumberDesc(national_number_pattern='NA', possible_number_pattern='NA'),\n uan=PhoneNumberDesc(national_number_pattern='NA', possible_number_pattern='NA'),\n voicemail=PhoneNumberDesc(national_number_pattern='NA', possible_number_pattern='NA'),\n no_international_dialling=PhoneNumberDesc(national_number_pattern='NA', possible_number_pattern='NA'),\n national_prefix='0',\n national_prefix_for_parsing='0',\n number_format=[NumberFormat(pattern='(\\\\d{3})(\\\\d{2})(\\\\d{2})(\\\\d{2})', format=u'\\\\1 \\\\2 \\\\3 \\\\4', national_prefix_formatting_rule=u'0\\\\1')])\n"},"path":{"kind":"string","value":"python/phonenumbers/data/region_MQ.py"},"size":{"kind":"number","value":1706,"string":"1,706"},"nl_text":{"kind":"string","value":"Auto-generated file, do not edit by hand. MQ metadata"},"nl_size":{"kind":"number","value":53,"string":"53"},"nl_language":{"kind":"string","value":"en"},"nl_language_score":{"kind":"number","value":0.7867065072059631,"string":"0.786707"}}},{"rowIdx":7894,"cells":{"content":{"kind":"string","value":"# coding=utf-8\n# *** WARNING: this file was generated by the Pulumi SDK Generator. ***\n# *** Do not edit by hand unless you're certain you know what you are doing! ***\n\nimport warnings\nimport pulumi\nimport pulumi.runtime\nfrom typing import Any, Mapping, Optional, Sequence, Union, overload\nfrom ... import _utilities\nfrom . import outputs\nfrom ._inputs import *\n\n__all__ = ['VpnSiteArgs', 'VpnSite']\n\n@pulumi.input_type\nclass VpnSiteArgs:\n def __init__(__self__, *,\n resource_group_name: pulumi.Input[str],\n address_space: Optional[pulumi.Input['AddressSpaceArgs']] = None,\n bgp_properties: Optional[pulumi.Input['BgpSettingsArgs']] = None,\n device_properties: Optional[pulumi.Input['DevicePropertiesArgs']] = None,\n id: Optional[pulumi.Input[str]] = None,\n ip_address: Optional[pulumi.Input[str]] = None,\n is_security_site: Optional[pulumi.Input[bool]] = None,\n location: Optional[pulumi.Input[str]] = None,\n site_key: Optional[pulumi.Input[str]] = None,\n tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,\n virtual_wan: Optional[pulumi.Input['SubResourceArgs']] = None,\n vpn_site_links: Optional[pulumi.Input[Sequence[pulumi.Input['VpnSiteLinkArgs']]]] = None,\n vpn_site_name: Optional[pulumi.Input[str]] = None):\n \"\"\"\n The set of arguments for constructing a VpnSite resource.\n :param pulumi.Input[str] resource_group_name: The resource group name of the VpnSite.\n :param pulumi.Input['AddressSpaceArgs'] address_space: The AddressSpace that contains an array of IP address ranges.\n :param pulumi.Input['BgpSettingsArgs'] bgp_properties: The set of bgp properties.\n :param pulumi.Input['DevicePropertiesArgs'] device_properties: The device properties.\n :param pulumi.Input[str] id: Resource ID.\n :param pulumi.Input[str] ip_address: The ip-address for the vpn-site.\n :param pulumi.Input[bool] is_security_site: IsSecuritySite flag.\n :param pulumi.Input[str] location: Resource location.\n :param pulumi.Input[str] site_key: The key for vpn-site that can be used for connections.\n :param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Resource tags.\n :param pulumi.Input['SubResourceArgs'] virtual_wan: The VirtualWAN to which the vpnSite belongs.\n :param pulumi.Input[Sequence[pulumi.Input['VpnSiteLinkArgs']]] vpn_site_links: List of all vpn site links.\n :param pulumi.Input[str] vpn_site_name: The name of the VpnSite being created or updated.\n \"\"\"\n pulumi.set(__self__, \"resource_group_name\", resource_group_name)\n if address_space is not None:\n pulumi.set(__self__, \"address_space\", address_space)\n if bgp_properties is not None:\n pulumi.set(__self__, \"bgp_properties\", bgp_properties)\n if device_properties is not None:\n pulumi.set(__self__, \"device_properties\", device_properties)\n if id is not None:\n pulumi.set(__self__, \"id\", id)\n if ip_address is not None:\n pulumi.set(__self__, \"ip_address\", ip_address)\n if is_security_site is not None:\n pulumi.set(__self__, \"is_security_site\", is_security_site)\n if location is not None:\n pulumi.set(__self__, \"location\", location)\n if site_key is not None:\n pulumi.set(__self__, \"site_key\", site_key)\n if tags is not None:\n pulumi.set(__self__, \"tags\", tags)\n if virtual_wan is not None:\n pulumi.set(__self__, \"virtual_wan\", virtual_wan)\n if vpn_site_links is not None:\n pulumi.set(__self__, \"vpn_site_links\", vpn_site_links)\n if vpn_site_name is not None:\n pulumi.set(__self__, \"vpn_site_name\", vpn_site_name)\n\n @property\n @pulumi.getter(name=\"resourceGroupName\")\n def resource_group_name(self) -> pulumi.Input[str]:\n \"\"\"\n The resource group name of the VpnSite.\n \"\"\"\n return pulumi.get(self, \"resource_group_name\")\n\n @resource_group_name.setter\n def resource_group_name(self, value: pulumi.Input[str]):\n pulumi.set(self, \"resource_group_name\", value)\n\n @property\n @pulumi.getter(name=\"addressSpace\")\n def address_space(self) -> Optional[pulumi.Input['AddressSpaceArgs']]:\n \"\"\"\n The AddressSpace that contains an array of IP address ranges.\n \"\"\"\n return pulumi.get(self, \"address_space\")\n\n @address_space.setter\n def address_space(self, value: Optional[pulumi.Input['AddressSpaceArgs']]):\n pulumi.set(self, \"address_space\", value)\n\n @property\n @pulumi.getter(name=\"bgpProperties\")\n def bgp_properties(self) -> Optional[pulumi.Input['BgpSettingsArgs']]:\n \"\"\"\n The set of bgp properties.\n \"\"\"\n return pulumi.get(self, \"bgp_properties\")\n\n @bgp_properties.setter\n def bgp_properties(self, value: Optional[pulumi.Input['BgpSettingsArgs']]):\n pulumi.set(self, \"bgp_properties\", value)\n\n @property\n @pulumi.getter(name=\"deviceProperties\")\n def device_properties(self) -> Optional[pulumi.Input['DevicePropertiesArgs']]:\n \"\"\"\n The device properties.\n \"\"\"\n return pulumi.get(self, \"device_properties\")\n\n @device_properties.setter\n def device_properties(self, value: Optional[pulumi.Input['DevicePropertiesArgs']]):\n pulumi.set(self, \"device_properties\", value)\n\n @property\n @pulumi.getter\n def id(self) -> Optional[pulumi.Input[str]]:\n \"\"\"\n Resource ID.\n \"\"\"\n return pulumi.get(self, \"id\")\n\n @id.setter\n def id(self, value: Optional[pulumi.Input[str]]):\n pulumi.set(self, \"id\", value)\n\n @property\n @pulumi.getter(name=\"ipAddress\")\n def ip_address(self) -> Optional[pulumi.Input[str]]:\n \"\"\"\n The ip-address for the vpn-site.\n \"\"\"\n return pulumi.get(self, \"ip_address\")\n\n @ip_address.setter\n def ip_address(self, value: Optional[pulumi.Input[str]]):\n pulumi.set(self, \"ip_address\", value)\n\n @property\n @pulumi.getter(name=\"isSecuritySite\")\n def is_security_site(self) -> Optional[pulumi.Input[bool]]:\n \"\"\"\n IsSecuritySite flag.\n \"\"\"\n return pulumi.get(self, \"is_security_site\")\n\n @is_security_site.setter\n def is_security_site(self, value: Optional[pulumi.Input[bool]]):\n pulumi.set(self, \"is_security_site\", value)\n\n @property\n @pulumi.getter\n def location(self) -> Optional[pulumi.Input[str]]:\n \"\"\"\n Resource location.\n \"\"\"\n return pulumi.get(self, \"location\")\n\n @location.setter\n def location(self, value: Optional[pulumi.Input[str]]):\n pulumi.set(self, \"location\", value)\n\n @property\n @pulumi.getter(name=\"siteKey\")\n def site_key(self) -> Optional[pulumi.Input[str]]:\n \"\"\"\n The key for vpn-site that can be used for connections.\n \"\"\"\n return pulumi.get(self, \"site_key\")\n\n @site_key.setter\n def site_key(self, value: Optional[pulumi.Input[str]]):\n pulumi.set(self, \"site_key\", value)\n\n @property\n @pulumi.getter\n def tags(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:\n \"\"\"\n Resource tags.\n \"\"\"\n return pulumi.get(self, \"tags\")\n\n @tags.setter\n def tags(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):\n pulumi.set(self, \"tags\", value)\n\n @property\n @pulumi.getter(name=\"virtualWan\")\n def virtual_wan(self) -> Optional[pulumi.Input['SubResourceArgs']]:\n \"\"\"\n The VirtualWAN to which the vpnSite belongs.\n \"\"\"\n return pulumi.get(self, \"virtual_wan\")\n\n @virtual_wan.setter\n def virtual_wan(self, value: Optional[pulumi.Input['SubResourceArgs']]):\n pulumi.set(self, \"virtual_wan\", value)\n\n @property\n @pulumi.getter(name=\"vpnSiteLinks\")\n def vpn_site_links(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['VpnSiteLinkArgs']]]]:\n \"\"\"\n List of all vpn site links.\n \"\"\"\n return pulumi.get(self, \"vpn_site_links\")\n\n @vpn_site_links.setter\n def vpn_site_links(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['VpnSiteLinkArgs']]]]):\n pulumi.set(self, \"vpn_site_links\", value)\n\n @property\n @pulumi.getter(name=\"vpnSiteName\")\n def vpn_site_name(self) -> Optional[pulumi.Input[str]]:\n \"\"\"\n The name of the VpnSite being created or updated.\n \"\"\"\n return pulumi.get(self, \"vpn_site_name\")\n\n @vpn_site_name.setter\n def vpn_site_name(self, value: Optional[pulumi.Input[str]]):\n pulumi.set(self, \"vpn_site_name\", value)\n\n\nclass VpnSite(pulumi.CustomResource):\n @overload\n def __init__(__self__,\n resource_name: str,\n opts: Optional[pulumi.ResourceOptions] = None,\n address_space: Optional[pulumi.Input[pulumi.InputType['AddressSpaceArgs']]] = None,\n bgp_properties: Optional[pulumi.Input[pulumi.InputType['BgpSettingsArgs']]] = None,\n device_properties: Optional[pulumi.Input[pulumi.InputType['DevicePropertiesArgs']]] = None,\n id: Optional[pulumi.Input[str]] = None,\n ip_address: Optional[pulumi.Input[str]] = None,\n is_security_site: Optional[pulumi.Input[bool]] = None,\n location: Optional[pulumi.Input[str]] = None,\n resource_group_name: Optional[pulumi.Input[str]] = None,\n site_key: Optional[pulumi.Input[str]] = None,\n tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,\n virtual_wan: Optional[pulumi.Input[pulumi.InputType['SubResourceArgs']]] = None,\n vpn_site_links: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['VpnSiteLinkArgs']]]]] = None,\n vpn_site_name: Optional[pulumi.Input[str]] = None,\n __props__=None):\n \"\"\"\n VpnSite Resource.\n\n :param str resource_name: The name of the resource.\n :param pulumi.ResourceOptions opts: Options for the resource.\n :param pulumi.Input[pulumi.InputType['AddressSpaceArgs']] address_space: The AddressSpace that contains an array of IP address ranges.\n :param pulumi.Input[pulumi.InputType['BgpSettingsArgs']] bgp_properties: The set of bgp properties.\n :param pulumi.Input[pulumi.InputType['DevicePropertiesArgs']] device_properties: The device properties.\n :param pulumi.Input[str] id: Resource ID.\n :param pulumi.Input[str] ip_address: The ip-address for the vpn-site.\n :param pulumi.Input[bool] is_security_site: IsSecuritySite flag.\n :param pulumi.Input[str] location: Resource location.\n :param pulumi.Input[str] resource_group_name: The resource group name of the VpnSite.\n :param pulumi.Input[str] site_key: The key for vpn-site that can be used for connections.\n :param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Resource tags.\n :param pulumi.Input[pulumi.InputType['SubResourceArgs']] virtual_wan: The VirtualWAN to which the vpnSite belongs.\n :param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['VpnSiteLinkArgs']]]] vpn_site_links: List of all vpn site links.\n :param pulumi.Input[str] vpn_site_name: The name of the VpnSite being created or updated.\n \"\"\"\n ...\n @overload\n def __init__(__self__,\n resource_name: str,\n args: VpnSiteArgs,\n opts: Optional[pulumi.ResourceOptions] = None):\n \"\"\"\n VpnSite Resource.\n\n :param str resource_name: The name of the resource.\n :param VpnSiteArgs args: The arguments to use to populate this resource's properties.\n :param pulumi.ResourceOptions opts: Options for the resource.\n \"\"\"\n ...\n def __init__(__self__, resource_name: str, *args, **kwargs):\n resource_args, opts = _utilities.get_resource_args_opts(VpnSiteArgs, pulumi.ResourceOptions, *args, **kwargs)\n if resource_args is not None:\n __self__._internal_init(resource_name, opts, **resource_args.__dict__)\n else:\n __self__._internal_init(resource_name, *args, **kwargs)\n\n def _internal_init(__self__,\n resource_name: str,\n opts: Optional[pulumi.ResourceOptions] = None,\n address_space: Optional[pulumi.Input[pulumi.InputType['AddressSpaceArgs']]] = None,\n bgp_properties: Optional[pulumi.Input[pulumi.InputType['BgpSettingsArgs']]] = None,\n device_properties: Optional[pulumi.Input[pulumi.InputType['DevicePropertiesArgs']]] = None,\n id: Optional[pulumi.Input[str]] = None,\n ip_address: Optional[pulumi.Input[str]] = None,\n is_security_site: Optional[pulumi.Input[bool]] = None,\n location: Optional[pulumi.Input[str]] = None,\n resource_group_name: Optional[pulumi.Input[str]] = None,\n site_key: Optional[pulumi.Input[str]] = None,\n tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,\n virtual_wan: Optional[pulumi.Input[pulumi.InputType['SubResourceArgs']]] = None,\n vpn_site_links: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['VpnSiteLinkArgs']]]]] = None,\n vpn_site_name: Optional[pulumi.Input[str]] = None,\n __props__=None):\n if opts is None:\n opts = pulumi.ResourceOptions()\n if not isinstance(opts, pulumi.ResourceOptions):\n raise TypeError('Expected resource options to be a ResourceOptions instance')\n if opts.version is None:\n opts.version = _utilities.get_version()\n if opts.id is None:\n if __props__ is not None:\n raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')\n __props__ = VpnSiteArgs.__new__(VpnSiteArgs)\n\n __props__.__dict__[\"address_space\"] = address_space\n __props__.__dict__[\"bgp_properties\"] = bgp_properties\n __props__.__dict__[\"device_properties\"] = device_properties\n __props__.__dict__[\"id\"] = id\n __props__.__dict__[\"ip_address\"] = ip_address\n __props__.__dict__[\"is_security_site\"] = is_security_site\n __props__.__dict__[\"location\"] = location\n if resource_group_name is None and not opts.urn:\n raise TypeError(\"Missing required property 'resource_group_name'\")\n __props__.__dict__[\"resource_group_name\"] = resource_group_name\n __props__.__dict__[\"site_key\"] = site_key\n __props__.__dict__[\"tags\"] = tags\n __props__.__dict__[\"virtual_wan\"] = virtual_wan\n __props__.__dict__[\"vpn_site_links\"] = vpn_site_links\n __props__.__dict__[\"vpn_site_name\"] = vpn_site_name\n __props__.__dict__[\"etag\"] = None\n __props__.__dict__[\"name\"] = None\n __props__.__dict__[\"provisioning_state\"] = None\n __props__.__dict__[\"type\"] = None\n alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_=\"azure-nextgen:network/v20200301:VpnSite\"), pulumi.Alias(type_=\"azure-native:network:VpnSite\"), pulumi.Alias(type_=\"azure-nextgen:network:VpnSite\"), pulumi.Alias(type_=\"azure-native:network/v20180401:VpnSite\"), pulumi.Alias(type_=\"azure-nextgen:network/v20180401:VpnSite\"), pulumi.Alias(type_=\"azure-native:network/v20180601:VpnSite\"), pulumi.Alias(type_=\"azure-nextgen:network/v20180601:VpnSite\"), pulumi.Alias(type_=\"azure-native:network/v20180701:VpnSite\"), pulumi.Alias(type_=\"azure-nextgen:network/v20180701:VpnSite\"), pulumi.Alias(type_=\"azure-native:network/v20180801:VpnSite\"), pulumi.Alias(type_=\"azure-nextgen:network/v20180801:VpnSite\"), pulumi.Alias(type_=\"azure-native:network/v20181001:VpnSite\"), pulumi.Alias(type_=\"azure-nextgen:network/v20181001:VpnSite\"), pulumi.Alias(type_=\"azure-native:network/v20181101:VpnSite\"), pulumi.Alias(type_=\"azure-nextgen:network/v20181101:VpnSite\"), pulumi.Alias(type_=\"azure-native:network/v20181201:VpnSite\"), pulumi.Alias(type_=\"azure-nextgen:network/v20181201:VpnSite\"), pulumi.Alias(type_=\"azure-native:network/v20190201:VpnSite\"), pulumi.Alias(type_=\"azure-nextgen:network/v20190201:VpnSite\"), pulumi.Alias(type_=\"azure-native:network/v20190401:VpnSite\"), pulumi.Alias(type_=\"azure-nextgen:network/v20190401:VpnSite\"), pulumi.Alias(type_=\"azure-native:network/v20190601:VpnSite\"), pulumi.Alias(type_=\"azure-nextgen:network/v20190601:VpnSite\"), pulumi.Alias(type_=\"azure-native:network/v20190701:VpnSite\"), pulumi.Alias(type_=\"azure-nextgen:network/v20190701:VpnSite\"), pulumi.Alias(type_=\"azure-native:network/v20190801:VpnSite\"), pulumi.Alias(type_=\"azure-nextgen:network/v20190801:VpnSite\"), pulumi.Alias(type_=\"azure-native:network/v20190901:VpnSite\"), pulumi.Alias(type_=\"azure-nextgen:network/v20190901:VpnSite\"), pulumi.Alias(type_=\"azure-native:network/v20191101:VpnSite\"), pulumi.Alias(type_=\"azure-nextgen:network/v20191101:VpnSite\"), pulumi.Alias(type_=\"azure-native:network/v20191201:VpnSite\"), pulumi.Alias(type_=\"azure-nextgen:network/v20191201:VpnSite\"), pulumi.Alias(type_=\"azure-native:network/v20200401:VpnSite\"), pulumi.Alias(type_=\"azure-nextgen:network/v20200401:VpnSite\"), pulumi.Alias(type_=\"azure-native:network/v20200501:VpnSite\"), pulumi.Alias(type_=\"azure-nextgen:network/v20200501:VpnSite\"), pulumi.Alias(type_=\"azure-native:network/v20200601:VpnSite\"), pulumi.Alias(type_=\"azure-nextgen:network/v20200601:VpnSite\"), pulumi.Alias(type_=\"azure-native:network/v20200701:VpnSite\"), pulumi.Alias(type_=\"azure-nextgen:network/v20200701:VpnSite\"), pulumi.Alias(type_=\"azure-native:network/v20200801:VpnSite\"), pulumi.Alias(type_=\"azure-nextgen:network/v20200801:VpnSite\"), pulumi.Alias(type_=\"azure-native:network/v20201101:VpnSite\"), pulumi.Alias(type_=\"azure-nextgen:network/v20201101:VpnSite\")])\n opts = pulumi.ResourceOptions.merge(opts, alias_opts)\n super(VpnSite, __self__).__init__(\n 'azure-native:network/v20200301:VpnSite',\n resource_name,\n __props__,\n opts)\n\n @staticmethod\n def get(resource_name: str,\n id: pulumi.Input[str],\n opts: Optional[pulumi.ResourceOptions] = None) -> 'VpnSite':\n \"\"\"\n Get an existing VpnSite resource's state with the given name, id, and optional extra\n properties used to qualify the lookup.\n\n :param str resource_name: The unique name of the resulting resource.\n :param pulumi.Input[str] id: The unique provider ID of the resource to lookup.\n :param pulumi.ResourceOptions opts: Options for the resource.\n \"\"\"\n opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))\n\n __props__ = VpnSiteArgs.__new__(VpnSiteArgs)\n\n __props__.__dict__[\"address_space\"] = None\n __props__.__dict__[\"bgp_properties\"] = None\n __props__.__dict__[\"device_properties\"] = None\n __props__.__dict__[\"etag\"] = None\n __props__.__dict__[\"ip_address\"] = None\n __props__.__dict__[\"is_security_site\"] = None\n __props__.__dict__[\"location\"] = None\n __props__.__dict__[\"name\"] = None\n __props__.__dict__[\"provisioning_state\"] = None\n __props__.__dict__[\"site_key\"] = None\n __props__.__dict__[\"tags\"] = None\n __props__.__dict__[\"type\"] = None\n __props__.__dict__[\"virtual_wan\"] = None\n __props__.__dict__[\"vpn_site_links\"] = None\n return VpnSite(resource_name, opts=opts, __props__=__props__)\n\n @property\n @pulumi.getter(name=\"addressSpace\")\n def address_space(self) -> pulumi.Output[Optional['outputs.AddressSpaceResponse']]:\n \"\"\"\n The AddressSpace that contains an array of IP address ranges.\n \"\"\"\n return pulumi.get(self, \"address_space\")\n\n @property\n @pulumi.getter(name=\"bgpProperties\")\n def bgp_properties(self) -> pulumi.Output[Optional['outputs.BgpSettingsResponse']]:\n \"\"\"\n The set of bgp properties.\n \"\"\"\n return pulumi.get(self, \"bgp_properties\")\n\n @property\n @pulumi.getter(name=\"deviceProperties\")\n def device_properties(self) -> pulumi.Output[Optional['outputs.DevicePropertiesResponse']]:\n \"\"\"\n The device properties.\n \"\"\"\n return pulumi.get(self, \"device_properties\")\n\n @property\n @pulumi.getter\n def etag(self) -> pulumi.Output[str]:\n \"\"\"\n A unique read-only string that changes whenever the resource is updated.\n \"\"\"\n return pulumi.get(self, \"etag\")\n\n @property\n @pulumi.getter(name=\"ipAddress\")\n def ip_address(self) -> pulumi.Output[Optional[str]]:\n \"\"\"\n The ip-address for the vpn-site.\n \"\"\"\n return pulumi.get(self, \"ip_address\")\n\n @property\n @pulumi.getter(name=\"isSecuritySite\")\n def is_security_site(self) -> pulumi.Output[Optional[bool]]:\n \"\"\"\n IsSecuritySite flag.\n \"\"\"\n return pulumi.get(self, \"is_security_site\")\n\n @property\n @pulumi.getter\n def location(self) -> pulumi.Output[str]:\n \"\"\"\n Resource location.\n \"\"\"\n return pulumi.get(self, \"location\")\n\n @property\n @pulumi.getter\n def name(self) -> pulumi.Output[str]:\n \"\"\"\n Resource name.\n \"\"\"\n return pulumi.get(self, \"name\")\n\n @property\n @pulumi.getter(name=\"provisioningState\")\n def provisioning_state(self) -> pulumi.Output[str]:\n \"\"\"\n The provisioning state of the VPN site resource.\n \"\"\"\n return pulumi.get(self, \"provisioning_state\")\n\n @property\n @pulumi.getter(name=\"siteKey\")\n def site_key(self) -> pulumi.Output[Optional[str]]:\n \"\"\"\n The key for vpn-site that can be used for connections.\n \"\"\"\n return pulumi.get(self, \"site_key\")\n\n @property\n @pulumi.getter\n def tags(self) -> pulumi.Output[Optional[Mapping[str, str]]]:\n \"\"\"\n Resource tags.\n \"\"\"\n return pulumi.get(self, \"tags\")\n\n @property\n @pulumi.getter\n def type(self) -> pulumi.Output[str]:\n \"\"\"\n Resource type.\n \"\"\"\n return pulumi.get(self, \"type\")\n\n @property\n @pulumi.getter(name=\"virtualWan\")\n def virtual_wan(self) -> pulumi.Output[Optional['outputs.SubResourceResponse']]:\n \"\"\"\n The VirtualWAN to which the vpnSite belongs.\n \"\"\"\n return pulumi.get(self, \"virtual_wan\")\n\n @property\n @pulumi.getter(name=\"vpnSiteLinks\")\n def vpn_site_links(self) -> pulumi.Output[Optional[Sequence['outputs.VpnSiteLinkResponse']]]:\n \"\"\"\n List of all vpn site links.\n \"\"\"\n return pulumi.get(self, \"vpn_site_links\")\n\n"},"path":{"kind":"string","value":"sdk/python/pulumi_azure_native/network/v20200301/vpn_site.py"},"size":{"kind":"number","value":23051,"string":"23,051"},"nl_text":{"kind":"string","value":"The set of arguments for constructing a VpnSite resource.\n:param pulumi.Input[str] resource_group_name: The resource group name of the VpnSite.\n:param pulumi.Input['AddressSpaceArgs'] address_space: The AddressSpace that contains an array of IP address ranges.\n:param pulumi.Input['BgpSettingsArgs'] bgp_properties: The set of bgp properties.\n:param pulumi.Input['DevicePropertiesArgs'] device_properties: The device properties.\n:param pulumi.Input[str] id: Resource ID.\n:param pulumi.Input[str] ip_address: The ip-address for the vpn-site.\n:param pulumi.Input[bool] is_security_site: IsSecuritySite flag.\n:param pulumi.Input[str] location: Resource location.\n:param pulumi.Input[str] site_key: The key for vpn-site that can be used for connections.\n:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Resource tags.\n:param pulumi.Input['SubResourceArgs'] virtual_wan: The VirtualWAN to which the vpnSite belongs.\n:param pulumi.Input[Sequence[pulumi.Input['VpnSiteLinkArgs']]] vpn_site_links: List of all vpn site links.\n:param pulumi.Input[str] vpn_site_name: The name of the VpnSite being created or updated.\nVpnSite Resource.\n\n:param str resource_name: The name of the resource.\n:param pulumi.ResourceOptions opts: Options for the resource.\n:param pulumi.Input[pulumi.InputType['AddressSpaceArgs']] address_space: The AddressSpace that contains an array of IP address ranges.\n:param pulumi.Input[pulumi.InputType['BgpSettingsArgs']] bgp_properties: The set of bgp properties.\n:param pulumi.Input[pulumi.InputType['DevicePropertiesArgs']] device_properties: The device properties.\n:param pulumi.Input[str] id: Resource ID.\n:param pulumi.Input[str] ip_address: The ip-address for the vpn-site.\n:param pulumi.Input[bool] is_security_site: IsSecuritySite flag.\n:param pulumi.Input[str] location: Resource location.\n:param pulumi.Input[str] resource_group_name: The resource group name of the VpnSite.\n:param pulumi.Input[str] site_key: The key for vpn-site that can be used for connections.\n:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Resource tags.\n:param pulumi.Input[pulumi.InputType['SubResourceArgs']] virtual_wan: The VirtualWAN to which the vpnSite belongs.\n:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['VpnSiteLinkArgs']]]] vpn_site_links: List of all vpn site links.\n:param pulumi.Input[str] vpn_site_name: The name of the VpnSite being created or updated.\nVpnSite Resource.\n\n:param str resource_name: The name of the resource.\n:param VpnSiteArgs args: The arguments to use to populate this resource's properties.\n:param pulumi.ResourceOptions opts: Options for the resource.\nThe AddressSpace that contains an array of IP address ranges.\nThe AddressSpace that contains an array of IP address ranges.\nThe set of bgp properties.\nThe set of bgp properties.\nThe device properties.\nThe device properties.\nA unique read-only string that changes whenever the resource is updated.\nGet an existing VpnSite resource's state with the given name, id, and optional extra\nproperties used to qualify the lookup.\n\n:param str resource_name: The unique name of the resulting resource.\n:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.\n:param pulumi.ResourceOptions opts: Options for the resource.\nResource ID.\nThe ip-address for the vpn-site.\nThe ip-address for the vpn-site.\nIsSecuritySite flag.\nIsSecuritySite flag.\nResource location.\nResource location.\nResource name.\nThe provisioning state of the VPN site resource.\nThe resource group name of the VpnSite.\nThe key for vpn-site that can be used for connections.\nThe key for vpn-site that can be used for connections.\nResource tags.\nResource tags.\nResource type.\nThe VirtualWAN to which the vpnSite belongs.\nThe VirtualWAN to which the vpnSite belongs.\nList of all vpn site links.\nList of all vpn site links.\nThe name of the VpnSite being created or updated.\n\n coding=utf-8 *** WARNING: this file was generated by the Pulumi SDK Generator. *** *** Do not edit by hand unless you're certain you know what you are doing! ***"},"nl_size":{"kind":"number","value":4029,"string":"4,029"},"nl_language":{"kind":"string","value":"en"},"nl_language_score":{"kind":"number","value":0.595940113067627,"string":"0.59594"}}},{"rowIdx":7895,"cells":{"content":{"kind":"string","value":"# -*- coding: utf-8 -*-\n\n\"\"\"\nCopyright [2009-2018] EMBL-European Bioinformatics Institute\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\nhttp://www.apache.org/licenses/LICENSE-2.0\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n\"\"\"\n\nfrom pathlib import Path\n\nimport click\n\nfrom rnacentral_pipeline.rnacentral import attempted, r2dt\n\n\n@click.group(\"r2dt\")\ndef cli():\n \"\"\"\n A group of commands for parsing data from secondary structures into an\n importable format.\n \"\"\"\n pass\n\n\n@cli.command(\"process-svgs\")\n@click.option(\"--allow-missing\", is_flag=True, default=False)\n@click.argument(\"model_info\", type=click.File(\"r\"))\n@click.argument(\"directory\", type=click.Path())\n@click.argument(\"output\", type=click.File(\"w\"))\ndef process_svgs(model_info, directory, output, allow_missing=False):\n \"\"\"\n Process all SVG secondary structures in the given directory and produce a\n single data file that can be imported into the database.\n \"\"\"\n r2dt.write(model_info, directory, output, allow_missing=allow_missing)\n\n\n@cli.group(\"should-show\")\ndef should_show():\n \"\"\"\n Some commands relating to building a model for should show as well as\n running it.\n \"\"\"\n\n\n@should_show.command(\"convert-sheet\")\n@click.argument(\"filename\", type=click.File(\"r\"))\n@click.argument(\"output\", type=click.File(\"w\"))\ndef convert_sheet(filename, output):\n \"\"\"\n This command is to convert a downloaded google sheet csv into a csv that can\n be used for training data. Often we will build a spreadsheet of example URS\n and then use that to build a training set. It is nice since you can embedd\n an SVG in google sheets so it is fast for us to compare several of them.\n\n In order to move that back into the training data you can download that\n sheet as a CSV and then run this command on it to build the CSV that is used\n in training. It requires there be a 'urs' and 'Labeled Should show' column\n to build the CSV. The values in labeled should show must be true/false\n (ignoring case).\n \"\"\"\n r2dt.write_converted_sheet(filename, output)\n\n\n@should_show.command(\"fetch-data\")\n@click.option(\"--db-url\", envvar=\"PGDATABASE\")\n@click.argument(\"filename\", type=click.File(\"r\"))\n@click.argument(\"output\", type=click.File(\"w\"))\ndef fetch_training_data(filename, output, db_url=None):\n \"\"\"\n This builds a CSV file of training data to use for the model building. I\n keep it separate so I can build a training csv and play with it interactivly\n before committing the final modeling building logic to the pipeline.\n \"\"\"\n r2dt.write_training_data(filename, db_url, output)\n\n\n@should_show.command(\"inspect-data\")\n@click.option(\"--db-url\", envvar=\"PGDATABASE\")\n@click.argument(\"filename\", type=click.File(\"r\"))\n@click.argument(\"output\", type=click.File(\"w\"))\ndef fetch_inspect_data(filename, output, db_url=None):\n \"\"\"\n This is the command to use when trying to fetch more examples to add to the\n training set. This will fetch some information that is useful for a person\n to evaluate a diagram and decide if it should be true/false in the training\n set.\n \"\"\"\n r2dt.write_training_data(filename, db_url, output)\n\n\n@should_show.command(\"build-model\")\n@click.option(\"--db-url\", envvar=\"PGDATABASE\")\n@click.argument(\"training-info\", type=click.File(\"r\"))\n@click.argument(\"model\", type=click.Path())\ndef build_model(training_info, model, db_url=None):\n \"\"\"\n This builds a model given then training information. The training\n information should be a csv file of:\n URS,flag\n The flag must be 1 or 0 to indicate if the URS should be shown or not. THis\n will fetch the data like the fetch-data command but will then build a model\n and write it out the the output file directly.\n \"\"\"\n r2dt.build_model(training_info, db_url, Path(model))\n\n\n@should_show.command(\"compute\")\n@click.option(\"--db-url\", envvar=\"PGDATABASE\")\n@click.argument(\"model\", type=click.Path())\n@click.argument(\"filename\", type=click.File(\"r\"))\n@click.argument(\"output\", type=click.File(\"w\"))\ndef write_should_show(model, filename, output, db_url=None):\n \"\"\"\n This computes the should show values for the data in the given file and a\n file listing urs ids to use. The data needed for the URS will be fetched\n from the database. This is meant to operate on large batches, like\n relabeling the entire database.\n \"\"\"\n r2dt.write_should_show(model, filename, db_url, output)\n\n\n@cli.group(\"model-info\")\ndef model_info():\n \"\"\"\n Commands for parsing and generating data files we can import into the\n database as model info files.\n \"\"\"\n pass\n\n\n@model_info.command(\"crw\")\n@click.argument(\"filename\", type=click.File(\"r\"))\n@click.argument(\"output\", default=\"-\", type=click.File(\"w\"))\ndef crw_model_info(filename, output):\n \"\"\"\n Parse the CRW metadata file and produce\n \"\"\"\n r2dt.write_crw(filename, output)\n\n\n@model_info.command(\"ribovision\")\n@click.argument(\"filename\", type=click.File(\"r\"))\n@click.argument(\"output\", default=\"-\", type=click.File(\"w\"))\ndef ribovision_model_info(filename, output):\n \"\"\"\n Parse the metadata.tsv file from R2DT for Ribovision models to\n produce something we can put in our database.\n \"\"\"\n r2dt.write_ribovision(filename, output)\n\n\n@model_info.command(\"gtrnadb\")\n@click.argument(\"filename\", type=click.File(\"r\"))\n@click.argument(\"output\", default=\"-\", type=click.File(\"w\"))\ndef gtrnadb_model_info(filename, output):\n \"\"\"\n Parse the metadata.tsv file from R2DT for gtrnadb models to\n produce something we can put in our database.\n \"\"\"\n r2dt.write_gtrnadb(filename, output)\n\n\n@model_info.command(\"rnase-p\")\n@click.argument(\"filename\", type=click.File(\"r\"))\n@click.argument(\"output\", default=\"-\", type=click.File(\"w\"))\ndef rnase_p_model_info(filename, output):\n \"\"\"\n Parse the metadata.tsv file from R2DT for Ribovision models to\n produce something we can put in our database.\n \"\"\"\n r2dt.write_rnase_p(filename, output)\n\n\n@cli.command(\"create-attempted\")\n@click.argument(\"filename\", type=click.File(\"r\"))\n@click.argument(\"output\", default=\"-\", type=click.File(\"w\"))\ndef r2dt_create_attempted(filename, output):\n attempted.r2dt(filename, output)\n\n\n@cli.command(\"publish\")\n@click.option(\"--suffix\", default=\"\")\n@click.option(\"--allow-missing\", is_flag=True, default=False)\n@click.argument(\"model_info\", type=click.File(\"r\"))\n@click.argument(\n \"directory\",\n type=click.Path(\n writable=False,\n dir_okay=True,\n file_okay=False,\n ),\n)\n@click.argument(\n \"output\",\n type=click.Path(\n writable=True,\n dir_okay=True,\n file_okay=False,\n ),\n)\ndef r2dt_publish(model_info, directory, output, allow_missing, suffix=\"\"):\n r2dt.publish(\n model_info, directory, output, allow_missing=allow_missing, suffix=suffix\n )\n\n\n@cli.command(\"prepare-s3\")\n@click.option(\"--allow-missing\", is_flag=True, default=False)\n@click.argument(\"model_info\", type=click.File(\"r\"))\n@click.argument(\n \"directory\",\n type=click.Path(\n writable=False,\n dir_okay=True,\n file_okay=False,\n ),\n)\n@click.argument(\n \"output\",\n type=click.Path(\n writable=True,\n dir_okay=True,\n file_okay=False,\n ),\n)\n@click.argument(\"file_list\", type=click.Path())\ndef r2dt_prepare_s3(model_info, directory, output, file_list, allow_missing):\n file_list = Path(file_list)\n output = Path(output)\n r2dt.prepare_s3(\n model_info, directory, output, file_list, allow_missing=allow_missing\n )\n"},"path":{"kind":"string","value":"rnacentral_pipeline/cli/r2dt.py"},"size":{"kind":"number","value":7915,"string":"7,915"},"nl_text":{"kind":"string","value":"This builds a model given then training information. The training\ninformation should be a csv file of:\n URS,flag\nThe flag must be 1 or 0 to indicate if the URS should be shown or not. THis\nwill fetch the data like the fetch-data command but will then build a model\nand write it out the the output file directly.\nA group of commands for parsing data from secondary structures into an\nimportable format.\nThis command is to convert a downloaded google sheet csv into a csv that can\nbe used for training data. Often we will build a spreadsheet of example URS\nand then use that to build a training set. It is nice since you can embedd\nan SVG in google sheets so it is fast for us to compare several of them.\n\nIn order to move that back into the training data you can download that\nsheet as a CSV and then run this command on it to build the CSV that is used\nin training. It requires there be a 'urs' and 'Labeled Should show' column\nto build the CSV. The values in labeled should show must be true/false\n(ignoring case).\nParse the CRW metadata file and produce\nThis is the command to use when trying to fetch more examples to add to the\ntraining set. This will fetch some information that is useful for a person\nto evaluate a diagram and decide if it should be true/false in the training\nset.\nThis builds a CSV file of training data to use for the model building. I\nkeep it separate so I can build a training csv and play with it interactivly\nbefore committing the final modeling building logic to the pipeline.\nParse the metadata.tsv file from R2DT for gtrnadb models to\nproduce something we can put in our database.\nCommands for parsing and generating data files we can import into the\ndatabase as model info files.\nProcess all SVG secondary structures in the given directory and produce a\nsingle data file that can be imported into the database.\nParse the metadata.tsv file from R2DT for Ribovision models to\nproduce something we can put in our database.\nParse the metadata.tsv file from R2DT for Ribovision models to\nproduce something we can put in our database.\nSome commands relating to building a model for should show as well as\nrunning it.\nThis computes the should show values for the data in the given file and a\nfile listing urs ids to use. The data needed for the URS will be fetched\nfrom the database. This is meant to operate on large batches, like\nrelabeling the entire database.\nCopyright [2009-2018] EMBL-European Bioinformatics Institute\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\nhttp://www.apache.org/licenses/LICENSE-2.0\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n\n -*- coding: utf-8 -*-"},"nl_size":{"kind":"number","value":2995,"string":"2,995"},"nl_language":{"kind":"string","value":"en"},"nl_language_score":{"kind":"number","value":0.895942211151123,"string":"0.895942"}}},{"rowIdx":7896,"cells":{"content":{"kind":"string","value":"import os\nimport pyudev\nimport psutil\nimport logging\nimport time\n\nfrom arm.ripper import music_brainz\nfrom arm.ui import db\nfrom arm.config.config import cfg\nfrom flask_login import LoginManager, current_user, login_user, UserMixin # noqa: F401\nfrom prettytable import PrettyTable\n\nhidden_attribs = (\"OMDB_API_KEY\", \"EMBY_USERID\", \"EMBY_PASSWORD\", \"EMBY_API_KEY\", \"PB_KEY\", \"IFTTT_KEY\", \"PO_KEY\",\n \"PO_USER_KEY\", \"PO_APP_KEY\", \"ARM_API_KEY\", \"TMDB_API_KEY\")\nHIDDEN_VALUE = \"\"\n\n\nclass Job(db.Model):\n job_id = db.Column(db.Integer, primary_key=True)\n arm_version = db.Column(db.String(20))\n crc_id = db.Column(db.String(63))\n logfile = db.Column(db.String(256))\n start_time = db.Column(db.DateTime)\n stop_time = db.Column(db.DateTime)\n job_length = db.Column(db.String(12))\n status = db.Column(db.String(32))\n stage = db.Column(db.String(63))\n no_of_titles = db.Column(db.Integer)\n title = db.Column(db.String(256))\n title_auto = db.Column(db.String(256))\n title_manual = db.Column(db.String(256))\n year = db.Column(db.String(4))\n year_auto = db.Column(db.String(4))\n year_manual = db.Column(db.String(4))\n video_type = db.Column(db.String(20))\n video_type_auto = db.Column(db.String(20))\n video_type_manual = db.Column(db.String(20))\n imdb_id = db.Column(db.String(15))\n imdb_id_auto = db.Column(db.String(15))\n imdb_id_manual = db.Column(db.String(15))\n poster_url = db.Column(db.String(256))\n poster_url_auto = db.Column(db.String(256))\n poster_url_manual = db.Column(db.String(256))\n devpath = db.Column(db.String(15))\n mountpoint = db.Column(db.String(20))\n hasnicetitle = db.Column(db.Boolean)\n errors = db.Column(db.Text)\n disctype = db.Column(db.String(20)) # dvd/bluray/data/music/unknown\n label = db.Column(db.String(256))\n path = db.Column(db.String(256))\n ejected = db.Column(db.Boolean)\n updated = db.Column(db.Boolean)\n pid = db.Column(db.Integer)\n pid_hash = db.Column(db.Integer)\n tracks = db.relationship('Track', backref='job', lazy='dynamic')\n config = db.relationship('Config', uselist=False, backref=\"job\")\n\n def __init__(self, devpath):\n \"\"\"Return a disc object\"\"\"\n self.devpath = devpath\n self.mountpoint = \"/mnt\" + devpath\n self.hasnicetitle = False\n self.video_type = \"unknown\"\n self.ejected = False\n self.updated = False\n if cfg['VIDEOTYPE'] != \"auto\":\n self.video_type = cfg['VIDEOTYPE']\n self.parse_udev()\n self.get_pid()\n\n def parse_udev(self):\n \"\"\"Parse udev for properties of current disc\"\"\"\n context = pyudev.Context()\n device = pyudev.Devices.from_device_file(context, self.devpath)\n self.disctype = \"unknown\"\n\n for key, value in device.items():\n if key == \"ID_FS_LABEL\":\n self.label = value\n if value == \"iso9660\":\n self.disctype = \"data\"\n elif key == \"ID_CDROM_MEDIA_BD\":\n self.disctype = \"bluray\"\n elif key == \"ID_CDROM_MEDIA_DVD\":\n self.disctype = \"dvd\"\n elif key == \"ID_CDROM_MEDIA_TRACK_COUNT_AUDIO\":\n self.disctype = \"music\"\n else:\n pass\n\n def get_pid(self):\n pid = os.getpid()\n p = psutil.Process(pid)\n self.pid = pid\n self.pid_hash = hash(p)\n\n def get_disc_type(self, found_hvdvd_ts):\n if self.disctype == \"music\":\n logging.debug(\"Disc is music.\")\n self.label = music_brainz.main(self)\n elif os.path.isdir(self.mountpoint + \"/VIDEO_TS\"):\n logging.debug(f\"Found: {self.mountpoint}/VIDEO_TS\")\n self.disctype = \"dvd\"\n elif os.path.isdir(self.mountpoint + \"/video_ts\"):\n logging.debug(f\"Found: {self.mountpoint}/video_ts\")\n self.disctype = \"dvd\"\n elif os.path.isdir(self.mountpoint + \"/BDMV\"):\n logging.debug(f\"Found: {self.mountpoint}/BDMV\")\n self.disctype = \"bluray\"\n elif os.path.isdir(self.mountpoint + \"/HVDVD_TS\"):\n logging.debug(f\"Found: {self.mountpoint}/HVDVD_TS\")\n # do something here\n elif found_hvdvd_ts:\n logging.debug(\"Found file: HVDVD_TS\")\n # do something here too\n else:\n logging.debug(\"Did not find valid dvd/bd files. Changing disctype to 'data'\")\n self.disctype = \"data\"\n\n def identify_audio_cd(self):\n \"\"\"\n Get the title for audio cds to use for the logfile name.\n\n Needs the job class passed into it so it can be forwarded to mb\n\n return - only the logfile - setup_logging() adds the full path\n \"\"\"\n # Use the music label if we can find it - defaults to music_cd.log\n disc_id = music_brainz.get_disc_id(self)\n mb_title = music_brainz.get_title(disc_id, self)\n if mb_title == \"not identified\":\n self.label = self.title = \"not identified\"\n logfile = \"music_cd.log\"\n new_log_file = f\"music_cd_{round(time.time() * 100)}.log\"\n else:\n logfile = f\"{mb_title}.log\"\n new_log_file = f\"{mb_title}_{round(time.time() * 100)}.log\"\n\n temp_log_full = os.path.join(cfg['LOGPATH'], logfile)\n logfile = new_log_file if os.path.isfile(temp_log_full) else logfile\n return logfile\n\n def __str__(self):\n \"\"\"Returns a string of the object\"\"\"\n\n s = self.__class__.__name__ + \": \"\n for attr, value in self.__dict__.items():\n s = s + \"(\" + str(attr) + \"=\" + str(value) + \") \"\n\n return s\n\n def pretty_table(self):\n \"\"\"Returns a string of the prettytable\"\"\"\n x = PrettyTable()\n x.field_names = [\"Config\", \"Value\"]\n x._max_width = {\"Config\": 50, \"Value\": 60}\n for attr, value in self.__dict__.items():\n if attr == \"config\":\n x.add_row([str(attr), str(value.pretty_table())])\n else:\n x.add_row([str(attr), str(value)])\n return str(x.get_string())\n\n def get_d(self):\n r = {}\n for key, value in self.__dict__.items():\n if '_sa_instance_state' not in key:\n r[str(key)] = str(value)\n return r\n\n def __repr__(self):\n return ''.format(self.label)\n\n def eject(self):\n \"\"\"Eject disc if it hasn't previously been ejected\"\"\"\n if not self.ejected:\n self.ejected = True\n try:\n if os.system(\"umount \" + self.devpath):\n logging.debug(\"we unmounted disc\" + self.devpath)\n if os.system(\"eject \" + self.devpath):\n logging.debug(\"we ejected disc\" + self.devpath)\n self.ejected = True\n else:\n logging.debug(\"failed to eject\" + self.devpath)\n except Exception as e:\n logging.debug(self.devpath + \" couldn't be ejected \" + str(e))\n\n\nclass Track(db.Model):\n track_id = db.Column(db.Integer, primary_key=True)\n job_id = db.Column(db.Integer, db.ForeignKey('job.job_id'))\n track_number = db.Column(db.String(4))\n length = db.Column(db.Integer)\n aspect_ratio = db.Column(db.String(20))\n fps = db.Column(db.Float)\n main_feature = db.Column(db.Boolean)\n basename = db.Column(db.String(256))\n filename = db.Column(db.String(256))\n orig_filename = db.Column(db.String(256))\n new_filename = db.Column(db.String(256))\n ripped = db.Column(db.Boolean)\n status = db.Column(db.String(32))\n error = db.Column(db.Text)\n source = db.Column(db.String(32))\n\n def __init__(self, job_id, track_number, length, aspect_ratio, fps, main_feature, source, basename, filename):\n \"\"\"Return a track object\"\"\"\n self.job_id = job_id\n self.track_number = track_number\n self.length = length\n self.aspect_ratio = aspect_ratio\n self.fps = fps\n self.main_feature = main_feature\n self.source = source\n self.basename = basename\n self.filename = filename\n self.ripped = False\n\n def __repr__(self):\n return ''.format(self.track_number)\n\n\nclass Config(db.Model):\n CONFIG_ID = db.Column(db.Integer, primary_key=True)\n job_id = db.Column(db.Integer, db.ForeignKey('job.job_id'))\n ARM_CHECK_UDF = db.Column(db.Boolean)\n GET_VIDEO_TITLE = db.Column(db.Boolean)\n SKIP_TRANSCODE = db.Column(db.Boolean)\n VIDEOTYPE = db.Column(db.String(25))\n MINLENGTH = db.Column(db.String(6))\n MAXLENGTH = db.Column(db.String(6))\n MANUAL_WAIT = db.Column(db.Boolean)\n MANUAL_WAIT_TIME = db.Column(db.Integer)\n RAW_PATH = db.Column(db.String(255))\n TRANSCODE_PATH = db.Column(db.String(255))\n COMPLETED_PATH = db.Column(db.String(255))\n EXTRAS_SUB = db.Column(db.String(255))\n INSTALLPATH = db.Column(db.String(255))\n LOGPATH = db.Column(db.String(255))\n LOGLEVEL = db.Column(db.String(255))\n LOGLIFE = db.Column(db.Integer)\n DBFILE = db.Column(db.String(255))\n WEBSERVER_IP = db.Column(db.String(25))\n WEBSERVER_PORT = db.Column(db.Integer)\n SET_MEDIA_PERMISSIONS = db.Column(db.Boolean)\n CHMOD_VALUE = db.Column(db.Integer)\n SET_MEDIA_OWNER = db.Column(db.Boolean)\n CHOWN_USER = db.Column(db.String(50))\n CHOWN_GROUP = db.Column(db.String(50))\n RIPMETHOD = db.Column(db.String(25))\n MKV_ARGS = db.Column(db.String(25))\n DELRAWFILES = db.Column(db.Boolean)\n HASHEDKEYS = db.Column(db.Boolean)\n HB_PRESET_DVD = db.Column(db.String(256))\n HB_PRESET_BD = db.Column(db.String(256))\n DEST_EXT = db.Column(db.String(10))\n HANDBRAKE_CLI = db.Column(db.String(25))\n MAINFEATURE = db.Column(db.Boolean)\n HB_ARGS_DVD = db.Column(db.String(256))\n HB_ARGS_BD = db.Column(db.String(256))\n EMBY_REFRESH = db.Column(db.Boolean)\n EMBY_SERVER = db.Column(db.String(25))\n EMBY_PORT = db.Column(db.String(6))\n EMBY_CLIENT = db.Column(db.String(25))\n EMBY_DEVICE = db.Column(db.String(50))\n EMBY_DEVICEID = db.Column(db.String(128))\n EMBY_USERNAME = db.Column(db.String(50))\n EMBY_USERID = db.Column(db.String(128))\n EMBY_PASSWORD = db.Column(db.String(128))\n EMBY_API_KEY = db.Column(db.String(64))\n NOTIFY_RIP = db.Column(db.Boolean)\n NOTIFY_TRANSCODE = db.Column(db.Boolean)\n PB_KEY = db.Column(db.String(64))\n IFTTT_KEY = db.Column(db.String(64))\n IFTTT_EVENT = db.Column(db.String(25))\n PO_USER_KEY = db.Column(db.String(64))\n PO_APP_KEY = db.Column(db.String(64))\n OMDB_API_KEY = db.Column(db.String(64))\n\n def __init__(self, c, job_id):\n self.__dict__.update(c)\n self.job_id = job_id\n\n def list_params(self):\n \"\"\"Returns a string of the object\"\"\"\n s = self.__class__.__name__ + \": \"\n for attr, value in self.__dict__.items():\n if s:\n s = s + \"\\n\"\n if str(attr) in hidden_attribs and value:\n value = HIDDEN_VALUE\n s = s + str(attr) + \":\" + str(value)\n\n return s\n\n def __str__(self):\n \"\"\"Returns a string of the object\"\"\"\n s = self.__class__.__name__ + \": \"\n for attr, value in self.__dict__.items():\n if str(attr) in hidden_attribs and value:\n value = HIDDEN_VALUE\n s = s + \"(\" + str(attr) + \"=\" + str(value) + \") \"\n\n return s\n\n def pretty_table(self):\n \"\"\"Returns a string of the prettytable\"\"\"\n x = PrettyTable()\n x.field_names = [\"Config\", \"Value\"]\n x._max_width = {\"Config\": 20, \"Value\": 30}\n for attr, value in self.__dict__.items():\n if str(attr) in hidden_attribs and value:\n value = HIDDEN_VALUE\n x.add_row([str(attr), str(value)])\n return str(x.get_string())\n\n def get_d(self):\n r = {}\n for key, value in self.__dict__.items():\n if str(key) not in hidden_attribs:\n r[str(key)] = str(value)\n return r\n\n\nclass User(db.Model, UserMixin):\n user_id = db.Column(db.Integer, index=True, primary_key=True)\n email = db.Column(db.String(64))\n password = db.Column(db.String(128))\n hash = db.Column(db.String(256))\n\n def __init__(self, email=None, password=None, hashed=None):\n self.email = email\n self.password = password\n self.hash = hashed\n\n def __repr__(self):\n return '' % (self.email)\n\n def get_id(self):\n return self.user_id\n\n\nclass AlembicVersion(db.Model):\n version_num = db.Column(db.String(36), autoincrement=False, primary_key=True)\n\n def __init__(self, version=None):\n self.version_num = version\n\n\nclass UISettings(db.Model):\n id = db.Column(db.Integer, autoincrement=True, primary_key=True)\n use_icons = db.Column(db.Boolean)\n save_remote_images = db.Column(db.Boolean)\n bootstrap_skin = db.Column(db.String(64))\n language = db.Column(db.String(4))\n index_refresh = db.Column(db.Integer)\n database_limit = db.Column(db.Integer)\n\n def __init__(self, use_icons=None, save_remote_images=None, bootstrap_skin=None, language=None, index_refresh=None,\n database_limit=None):\n self.use_icons = use_icons\n self.save_remote_images = save_remote_images\n self.bootstrap_skin = bootstrap_skin\n self.language = language\n self.index_refresh = index_refresh\n self.database_limit = database_limit\n\n def __repr__(self):\n return '' % self.id\n\n def __str__(self):\n \"\"\"Returns a string of the object\"\"\"\n\n s = self.__class__.__name__ + \": \"\n for attr, value in self.__dict__.items():\n s = s + \"(\" + str(attr) + \"=\" + str(value) + \") \"\n\n return s\n\n def get_d(self):\n r = {}\n for key, value in self.__dict__.items():\n if '_sa_instance_state' not in key:\n r[str(key)] = str(value)\n return r\n"},"path":{"kind":"string","value":"arm/models/models.py"},"size":{"kind":"number","value":14015,"string":"14,015"},"nl_text":{"kind":"string","value":"Return a disc object\nReturn a track object\nReturns a string of the object\nReturns a string of the object\nReturns a string of the object\nEject disc if it hasn't previously been ejected\nGet the title for audio cds to use for the logfile name.\n\nNeeds the job class passed into it so it can be forwarded to mb\n\nreturn - only the logfile - setup_logging() adds the full path\nReturns a string of the object\nParse udev for properties of current disc\nReturns a string of the prettytable\nReturns a string of the prettytable\n\n noqa: F401 dvd/bluray/data/music/unknown do something here do something here too Use the music label if we can find it - defaults to music_cd.log"},"nl_size":{"kind":"number","value":662,"string":"662"},"nl_language":{"kind":"string","value":"en"},"nl_language_score":{"kind":"number","value":0.6283080577850342,"string":"0.628308"}}},{"rowIdx":7897,"cells":{"content":{"kind":"string","value":"# Copyright 2020 XEBIALABS\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the \"Software\"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n#\n# https://stackoverflow.com/questions/16910955/programmatically-configure-logback-appender?noredirect=1\n#\nimport ch.qos.logback.core.Appender as LogAppender\nimport ch.qos.logback.core.util.COWArrayList as COWArrayList\nimport ch.qos.logback.classic.encoder.PatternLayoutEncoder as PatternLayoutEncoder\nimport ch.qos.logback.core.FileAppender as FileAppender\n\nimport org.slf4j.LoggerFactory as LoggerFactory\nimport ch.qos.logback.classic.Level as logLevels\nimport json\n\ndef getLogAppenders( loggerName=\"console\" ):\n loggerMap = []\n myLogger = LoggerFactory.getLogger(\"logmanager\")\n loggerContext = LoggerFactory.getILoggerFactory()\n\n myLogger.error(\"===================\")\n appenderMap = {}\n for logger in loggerContext.getLoggerList():\n appenderList = logger.iteratorForAppenders()\n while appenderList.hasNext():\n appender = appenderList.next()\n logger.error(\"Logger %s\" % appender.getName())\n if appender.getName() not in appenderMap.keys():\n loggerMap.append({\"name\": appender.getName(), \"appender\": \"NA\"})\n myLogger.error(\"Appender %s: %s\" % (appender.getName(), \"NA\"))\n myLogger.error(\"===================\")\n return loggerMap\n\ndef createLogAppender( name, file ):\n lc = LoggerFactory.getILoggerFactory()\n ple = PatternLayoutEncoder()\n ple.setPattern(\"%date %level [%thread] %logger{10} [%file:%line] %msg%n\")\n ple.setContext(lc)\n ple.start()\n fileAppender = FileAppender()\n fileAppender.setFile(file)\n fileAppender.setEncoder(ple)\n fileAppender.setContext(lc)\n fileAppender.start()\n\n logger = LoggerFactory.getLogger(string)\n logger.addAppender(fileAppender)\n #logger.setLevel(logLevels.DEBUG)\n # set to true if root should log too\n logger.setAdditive(True)\n return logger\n\n\nmyLogger = LoggerFactory.getLogger(\"logmanager\")\nverb = \"GET\"\n\nif (request):\n if (request.query):\n if (request.query['verb']):\n verb = request.query['verb']\n\nif( verb == \"create\"):\n string = request.query['string']\n file = request.query['file']\n myLogger.info(\"Setting %s to %s\" % (string, file))\n createLogAppender(string, file)\n\nloggerMap = getLogAppenders()\nmyLogger.error(\"%s\" % json.dumps(loggerMap, indent=4, sort_keys=True))\n\nresponse.entity = {\"status\": \"OK\", \"data\":loggerMap }\n"},"path":{"kind":"string","value":"src/main/resources/restapi/logger/getLogAppenders.py"},"size":{"kind":"number","value":3408,"string":"3,408"},"nl_text":{"kind":"string","value":"Copyright 2020 XEBIALABS Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the \"Software\"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. https://stackoverflow.com/questions/16910955/programmatically-configure-logback-appender?noredirect=1logger.setLevel(logLevels.DEBUG) set to true if root should log too"},"nl_size":{"kind":"number","value":1214,"string":"1,214"},"nl_language":{"kind":"string","value":"en"},"nl_language_score":{"kind":"number","value":0.8371472358703613,"string":"0.837147"}}},{"rowIdx":7898,"cells":{"content":{"kind":"string","value":"# coding: utf-8\n\n\"\"\"\n App Center Client\n\n Microsoft Visual Studio App Center API # noqa: E501\n\n OpenAPI spec version: preview\n Contact: benedetto.abbenanti@gmail.com\n Project Repository: https://github.com/b3nab/appcenter-sdks\n\"\"\"\n\nfrom __future__ import absolute_import\n\nimport unittest\n\nimport appcenter_sdk\nfrom DistributionGroupAppsDeleteRequest.clsDistributionGroupAppsDeleteRequest import DistributionGroupAppsDeleteRequest # noqa: E501\nfrom appcenter_sdk.rest import ApiException\n\n\nclass TestDistributionGroupAppsDeleteRequest(unittest.TestCase):\n \"\"\"DistributionGroupAppsDeleteRequest unit test stubs\"\"\"\n\n def setUp(self):\n pass\n\n def tearDown(self):\n pass\n\n def testDistributionGroupAppsDeleteRequest(self):\n \"\"\"Test DistributionGroupAppsDeleteRequest\"\"\"\n # FIXME: construct object with mandatory attributes with example values\n # model = appcenter_sdk.models.clsDistributionGroupAppsDeleteRequest.DistributionGroupAppsDeleteRequest() # noqa: E501\n pass\n\n\nif __name__ == '__main__':\n unittest.main()\n"},"path":{"kind":"string","value":"sdks/python/test/test_DistributionGroupAppsDeleteRequest.py"},"size":{"kind":"number","value":1084,"string":"1,084"},"nl_text":{"kind":"string","value":"DistributionGroupAppsDeleteRequest unit test stubs\nTest DistributionGroupAppsDeleteRequest\nApp Center Client\n\nMicrosoft Visual Studio App Center API # noqa: E501\n\nOpenAPI spec version: preview\nContact: benedetto.abbenanti@gmail.com\nProject Repository: https://github.com/b3nab/appcenter-sdks\n\n coding: utf-8 noqa: E501 FIXME: construct object with mandatory attributes with example values model = appcenter_sdk.models.clsDistributionGroupAppsDeleteRequest.DistributionGroupAppsDeleteRequest() noqa: E501"},"nl_size":{"kind":"number","value":506,"string":"506"},"nl_language":{"kind":"string","value":"en"},"nl_language_score":{"kind":"number","value":0.5095123648643494,"string":"0.509512"}}},{"rowIdx":7899,"cells":{"content":{"kind":"string","value":"# Copyright 2008-2015 Nokia Solutions and Networks\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom .tags import TagPatterns\n\n\nclass Criticality(object):\n\n def __init__(self, critical_tags=None, non_critical_tags=None):\n self.critical_tags = self._get_tag_patterns(critical_tags)\n self.non_critical_tags = self._get_tag_patterns(non_critical_tags)\n\n def _get_tag_patterns(self, tags):\n return TagPatterns(tags) if not isinstance(tags, TagPatterns) else tags\n\n def tag_is_critical(self, tag):\n return self.critical_tags.match(tag)\n\n def tag_is_non_critical(self, tag):\n return self.non_critical_tags.match(tag)\n\n def test_is_critical(self, test):\n if self.critical_tags and not self.critical_tags.match(test.tags):\n return False\n return not self.non_critical_tags.match(test.tags)\n\n def __bool__(self):\n return bool(self.critical_tags or self.non_critical_tags)\n\n #PY2\n def __nonzero__(self):\n return self.__bool__()\n"},"path":{"kind":"string","value":"src/robot/model/criticality.py"},"size":{"kind":"number","value":1527,"string":"1,527"},"nl_text":{"kind":"string","value":"Copyright 2008-2015 Nokia Solutions and Networks Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.PY2"},"nl_size":{"kind":"number","value":582,"string":"582"},"nl_language":{"kind":"string","value":"en"},"nl_language_score":{"kind":"number","value":0.8578140139579773,"string":"0.857814"}}}],"truncated":false,"partial":false},"paginationData":{"pageIndex":78,"numItemsPerPage":100,"numTotalItems":8000,"offset":7800,"length":100}},"jwt":"eyJhbGciOiJFZERTQSJ9.eyJyZWFkIjp0cnVlLCJwZXJtaXNzaW9ucyI6eyJyZXBvLmNvbnRlbnQucmVhZCI6dHJ1ZX0sImlhdCI6MTc1ODM5ODIzNSwic3ViIjoiL2RhdGFzZXRzL2xvdWJuYWJubC9sYW5ndWFnZV9pZF9iaWdjb2RlIiwiZXhwIjoxNzU4NDAxODM1LCJpc3MiOiJodHRwczovL2h1Z2dpbmdmYWNlLmNvIn0.8h-E2GKuOMU_DExvb6dm6y4xdr51Eth9zzyg0U4iTuU_QrpjO0xFUGNUtTD2vr51AUnY9DFznYnIA3mqtT_DCA","displayUrls":true},"discussionsStats":{"closed":0,"open":0,"total":0},"fullWidth":true,"hasGatedAccess":true,"hasFullAccess":true,"isEmbedded":false,"savedQueries":{"community":[],"user":[]}}">
content
stringlengths
27
928k
path
stringlengths
4
230
size
int64
27
928k
nl_text
stringlengths
21
396k
nl_size
int64
21
396k
nl_language
stringlengths
2
3
nl_language_score
float64
0.04
1
# MIT LICENSE # # Copyright 1997 - 2020 by IXIA Keysight # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), # to deal in the Software without restriction, including without limitation # the rights to use, copy, modify, merge, publish, distribute, sublicense, # and/or sell copies of the Software, and to permit persons to whom the # Software is furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN # THE SOFTWARE. from ixnetwork_restpy.base import Base from ixnetwork_restpy.files import Files class SwitchConfigLearnedInformation(Base): """NOT DEFINED The SwitchConfigLearnedInformation class encapsulates a list of switchConfigLearnedInformation resources that are managed by the system. A list of resources can be retrieved from the server using the SwitchConfigLearnedInformation.find() method. """ __slots__ = () _SDM_NAME = 'switchConfigLearnedInformation' _SDM_ATT_MAP = { 'ConfigFlags': 'configFlags', 'DataPathId': 'dataPathId', 'DataPathIdAsHex': 'dataPathIdAsHex', 'ErrorCode': 'errorCode', 'ErrorType': 'errorType', 'Latency': 'latency', 'LocalIp': 'localIp', 'MissSendLength': 'missSendLength', 'NegotiatedVersion': 'negotiatedVersion', 'RemoteIp': 'remoteIp', 'ReplyState': 'replyState', } def __init__(self, parent): super(SwitchConfigLearnedInformation, self).__init__(parent) @property def ConfigFlags(self): """ Returns ------- - str: NOT DEFINED """ return self._get_attribute(self._SDM_ATT_MAP['ConfigFlags']) @property def DataPathId(self): """ Returns ------- - str: NOT DEFINED """ return self._get_attribute(self._SDM_ATT_MAP['DataPathId']) @property def DataPathIdAsHex(self): """ Returns ------- - str: NOT DEFINED """ return self._get_attribute(self._SDM_ATT_MAP['DataPathIdAsHex']) @property def ErrorCode(self): """ Returns ------- - str: NOT DEFINED """ return self._get_attribute(self._SDM_ATT_MAP['ErrorCode']) @property def ErrorType(self): """ Returns ------- - str: NOT DEFINED """ return self._get_attribute(self._SDM_ATT_MAP['ErrorType']) @property def Latency(self): """ Returns ------- - number: NOT DEFINED """ return self._get_attribute(self._SDM_ATT_MAP['Latency']) @property def LocalIp(self): """ Returns ------- - str: NOT DEFINED """ return self._get_attribute(self._SDM_ATT_MAP['LocalIp']) @property def MissSendLength(self): """ Returns ------- - number: NOT DEFINED """ return self._get_attribute(self._SDM_ATT_MAP['MissSendLength']) @property def NegotiatedVersion(self): """ Returns ------- - str: NOT DEFINED """ return self._get_attribute(self._SDM_ATT_MAP['NegotiatedVersion']) @property def RemoteIp(self): """ Returns ------- - str: NOT DEFINED """ return self._get_attribute(self._SDM_ATT_MAP['RemoteIp']) @property def ReplyState(self): """ Returns ------- - str: NOT DEFINED """ return self._get_attribute(self._SDM_ATT_MAP['ReplyState']) def find(self, ConfigFlags=None, DataPathId=None, DataPathIdAsHex=None, ErrorCode=None, ErrorType=None, Latency=None, LocalIp=None, MissSendLength=None, NegotiatedVersion=None, RemoteIp=None, ReplyState=None): """Finds and retrieves switchConfigLearnedInformation resources from the server. All named parameters are evaluated on the server using regex. The named parameters can be used to selectively retrieve switchConfigLearnedInformation resources from the server. To retrieve an exact match ensure the parameter value starts with ^ and ends with $ By default the find method takes no parameters and will retrieve all switchConfigLearnedInformation resources from the server. Args ---- - ConfigFlags (str): NOT DEFINED - DataPathId (str): NOT DEFINED - DataPathIdAsHex (str): NOT DEFINED - ErrorCode (str): NOT DEFINED - ErrorType (str): NOT DEFINED - Latency (number): NOT DEFINED - LocalIp (str): NOT DEFINED - MissSendLength (number): NOT DEFINED - NegotiatedVersion (str): NOT DEFINED - RemoteIp (str): NOT DEFINED - ReplyState (str): NOT DEFINED Returns ------- - self: This instance with matching switchConfigLearnedInformation resources retrieved from the server available through an iterator or index Raises ------ - ServerError: The server has encountered an uncategorized error condition """ return self._select(self._map_locals(self._SDM_ATT_MAP, locals())) def read(self, href): """Retrieves a single instance of switchConfigLearnedInformation data from the server. Args ---- - href (str): An href to the instance to be retrieved Returns ------- - self: This instance with the switchConfigLearnedInformation resources from the server available through an iterator or index Raises ------ - NotFoundError: The requested resource does not exist on the server - ServerError: The server has encountered an uncategorized error condition """ return self._read(href)
ixnetwork_restpy/testplatform/sessions/ixnetwork/vport/protocols/switchconfiglearnedinformation_e983ca5da0eadbba93d1ce1d2903a5b7.py
6,457
NOT DEFINED The SwitchConfigLearnedInformation class encapsulates a list of switchConfigLearnedInformation resources that are managed by the system. A list of resources can be retrieved from the server using the SwitchConfigLearnedInformation.find() method. Returns ------- - str: NOT DEFINED Returns ------- - str: NOT DEFINED Returns ------- - str: NOT DEFINED Returns ------- - str: NOT DEFINED Returns ------- - str: NOT DEFINED Returns ------- - number: NOT DEFINED Returns ------- - str: NOT DEFINED Returns ------- - number: NOT DEFINED Returns ------- - str: NOT DEFINED Returns ------- - str: NOT DEFINED Returns ------- - str: NOT DEFINED Finds and retrieves switchConfigLearnedInformation resources from the server. All named parameters are evaluated on the server using regex. The named parameters can be used to selectively retrieve switchConfigLearnedInformation resources from the server. To retrieve an exact match ensure the parameter value starts with ^ and ends with $ By default the find method takes no parameters and will retrieve all switchConfigLearnedInformation resources from the server. Args ---- - ConfigFlags (str): NOT DEFINED - DataPathId (str): NOT DEFINED - DataPathIdAsHex (str): NOT DEFINED - ErrorCode (str): NOT DEFINED - ErrorType (str): NOT DEFINED - Latency (number): NOT DEFINED - LocalIp (str): NOT DEFINED - MissSendLength (number): NOT DEFINED - NegotiatedVersion (str): NOT DEFINED - RemoteIp (str): NOT DEFINED - ReplyState (str): NOT DEFINED Returns ------- - self: This instance with matching switchConfigLearnedInformation resources retrieved from the server available through an iterator or index Raises ------ - ServerError: The server has encountered an uncategorized error condition Retrieves a single instance of switchConfigLearnedInformation data from the server. Args ---- - href (str): An href to the instance to be retrieved Returns ------- - self: This instance with the switchConfigLearnedInformation resources from the server available through an iterator or index Raises ------ - NotFoundError: The requested resource does not exist on the server - ServerError: The server has encountered an uncategorized error condition MIT LICENSE Copyright 1997 - 2020 by IXIA Keysight Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
3,266
en
0.692968
# -*- coding: utf-8 -*- """Cisco DNA Center Claim a Device to a Site data model. Copyright (c) 2019-2021 Cisco Systems. Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. """ from __future__ import ( absolute_import, division, print_function, unicode_literals, ) import fastjsonschema import json from dnacentersdk.exceptions import MalformedRequest from builtins import * class JSONSchemaValidator5889Fb844939A13B(object): """Claim a Device to a Site request schema definition.""" def __init__(self): super(JSONSchemaValidator5889Fb844939A13B, self).__init__() self._validator = fastjsonschema.compile(json.loads( '''{ "properties": { "deviceId": { "type": [ "string", "null" ] }, "siteId": { "type": [ "string", "null" ] }, "type": { "enum": [ "Default", "AccessPoint", "StackSwitch", "Sensor", "MobilityExpress", null ], "type": [ "string", "null" ] } }, "type": "object" }'''.replace("\n" + ' ' * 16, '') )) def validate(self, request): try: self._validator(request) except fastjsonschema.exceptions.JsonSchemaException as e: raise MalformedRequest( '{} is invalid. Reason: {}'.format(request, e.message) )
dnacentersdk/models/validators/v1_2_10/jsd_5889fb844939a13b.py
2,704
Claim a Device to a Site request schema definition. Cisco DNA Center Claim a Device to a Site data model. Copyright (c) 2019-2021 Cisco Systems. Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -*- coding: utf-8 -*-
1,193
en
0.869151
#!/usr/bin/python import sys import cgi import cgitb import sqlite3 reload(sys) sys.setdefaultencoding('utf-8') cgitb.enable() # html print("Content-type: text/html\n") print('<meta charset="utf-8">') print("<html><head>") print('''<script type="text/javascript" src="https://ajax.googleapis.com/ajax/libs/jquery/3.3.1/jquery.min.js"></script>''') print("<title>BRITE REU Candidates</title>") print('''<link rel="stylesheet" href="https://bioed.bu.edu/students_21/group_proj/group_K/css/nav.css"> <link rel="stylesheet" href="https://bioed.bu.edu/students_21/group_proj/group_K/css/appadmin.css"> </head>''') print("<body>") print('''<div id="bg-image">''') print('''<div id="topnav"> <a href="https://bioed.bu.edu/cgi-bin/students_21/group_proj/group_K/show_applicant_admin.py">Applicant List</a> <a href="https://bioed.bu.edu/cgi-bin/students_21/group_proj/group_K/stats_admin.py">Applicant Statistics</a> <a href="#assign users">Assign Users</a> <a href="https://bioed.bu.edu/cgi-bin/students_21/group_proj/group_K/input_projects.py">Input Faculty Projects</a> <a href="https://bioed.bu.edu/cgi-bin/students_21/group_proj/group_K/review_summary_admin.py">View All Past Reviews</a> <a class="active" href="https://bioed.bu.edu/cgi-bin/students_21/group_proj/group_K/assign_candidate.py">Assign Candidates to Faculty</a> <a href="https://bioed.bu.edu/cgi-bin/students_21/group_proj/group_K/can_pref.py">Candidate Preferences</a> <a href="https://bioed.bu.edu/cgi-bin/students_21/group_proj/group_K/match.py">Match Candidates to Faculty</a> <a href="https://bioed.bu.edu/cgi-bin/students_21/group_proj/group_K/finalmatch.py">Final Matches</a> <a href="https://bioed.bu.edu/cgi-bin/students_21/group_proj/group_K/help_admin.py">Help</a> <a href="https://bioed.bu.edu/cgi-bin/students_21/group_proj/group_K/about_admin.py">About/Contact</a> </div>''') print("<h3>Select Checkboxes to Assign Candidates to Faculty Members</h3>") print("<h4>To Remove an Assignment, Uncheck the Checkbox</h4>") #query to get candidate data for the rows query1 = "SELECT cid, firstname, lastname FROM Applicant join Candidate on Applicant.aid=Candidate.cid;" #query to get the faculty and project names for the table headers query2 = 'SELECT pid, uid, fname || " " || lname || ":\n" || project_name FROM Project JOIN User using(uid) ORDER BY(lname);' #query to get all current candidate-faculty pairs in the database query3 = 'SELECT cid || "_" || pid, assigned_at FROM Assignment ORDER BY(cid);' #start connection connection = sqlite3.connect('db/BRITEREU.db') c = connection.cursor() try: #execute query 1 c.execute(query1) #get results to above standard query results1 = c.fetchall() except Exception: print("<p><font color=red><b>Error Query 1</b></font></p>") try: #execute query 2 c.execute(query2) #get results to above standard query results2 = c.fetchall() except Exception: print("<p><font color=red><b>Error Query 2</b></font></p>") try: #execute query 3 c.execute(query3) #get results to above standard query results3 = c.fetchall() except Exception: print("<p><font color=red><b>Error Query 3</b></font></p>") c.close() connection.close() #get all the candidate-faculty pair ids currently in the database which will be used in the section that checks and uses form data cfids = [cf[0] for cf in results3] #retrieve form data form = cgi.FieldStorage() #if form is empty, then it's possible that everything is to be deleted from the Assignment table #if not form: # if results3: # truncateStatement = "DELETE FROM Assignment;" # connection = sqlite3.connect('db/BRITEREU.db') # c = connection.cursor() # c.execute(truncateStatement) # connection.commit() #check what checkboxes are checked #if checkbox was selected that was not previously selected - insert those pairs into the Assignment table #if checkbox is no longer selected - delete those pairs from the Assignment table if form: res3 = [pair for pair in cfids] pairlist = form.getlist("cf") #find pairs that are in the selected list (pairlist) and not in the current database list (res3) tobe_inserted = list(set(pairlist) - set(res3)) tobe_inserted = [tuple(i.split("_")) for i in tobe_inserted] #find pairs that are not in the selected list(pairlist) and are in the current database list (res3) tobe_removed = list(set(res3) - set(pairlist)) tobe_removed = [tuple(map(int, i.split("_"))) for i in tobe_removed] if tobe_inserted or tobe_removed: connection = sqlite3.connect('db/BRITEREU.db') c = connection.cursor() for pair in tobe_inserted: insertStatement = "INSERT INTO Assignment(cid, pid) VALUES (%s, %s);" % pair c.execute(insertStatement) connection.commit() for pair in tobe_removed: deleteStatement = 'DELETE FROM Assignment WHERE cid ="%s" and pid ="%s";' % pair c.execute(deleteStatement) connection.commit() c.close() connection.close() #query the database again to now get all updated pairs query4 = 'SELECT cid || "_" || pid, assigned_at FROM Assignment ORDER BY(cid);' connection = sqlite3.connect('db/BRITEREU.db') c = connection.cursor() try: #execute query 1 c.execute(query4) #get results to above standard query results4 = c.fetchall() except Exception: print("<p><font color=red><b>Error Query 4</b></font></p>") #form action for user to submit checkboxes selections print('''<form name="form1" id="form1" action="https://bioed.bu.edu/cgi-bin/students_21/group_proj/group_K/assign_candidate.py" method="post" >''') print('<table id=Candidate class="dataframe">') print("<tr><th>Candidate ID</th><th>Candidate Name</th>") #gets list of faculty #adds all the faculty who are in the database as columns for faculty in results2: print("<th>%s</th>") % faculty[2] print("</tr>") #get the Project IDs for the projects so that you concatenate to the CID to formulate a value pair pids = [faculty[0] for faculty in results2] #added proper URL for reference to reviewer page #print the candidate table with a checkbox for each faculty member for row in results1: print('''<tr><td><a href="https://bioed.bu.edu/cgi-bin/students_21/group_proj/group_K/reviewer.py?AID=%s">%s</a></td><td>%s %s</td>''') % (row[0], row[0], row[1], row[2]) for f in pids: for cf_pair in results4: if (str(row[0])+"_"+str(f)) in cf_pair: print('<td><input title="%s GMT" type="checkbox" name="cf" value=%s checked="checked" />rank</td>') % (cf_pair[1], (str(row[0])+"_"+str(f))) break else: print('<td><input type="checkbox" name="cf" value=%s /></td>') % (str(row[0])+"_"+str(f)) print("</tr>") #add submit button for assigning faculty to candidates print('<input type="submit" value="Assign Candidates" /><br /><br />') #end form print("</form>") #filtering section for the table print("</table>") print('''<script src="https://bioed.bu.edu/students_21/group_proj/group_K/tablefilter/tablefilter.js"></script>''') print('''<script data-config=""> var filtersConfig = { base_path: 'https://bioed.bu.edu/students_21/divyas3/tablefilter/', auto_filter: { delay: 110 //milliseconds }, filters_row_index: 1, state: true, alternate_rows: true, rows_counter: true, btn_reset: true, status_bar: true, msg_filter: 'Filtering...' }; var tf = new TableFilter(Candidate, filtersConfig); tf.init(); </script>''') print("</body> </html>")
assign_candidate.py
7,818
!/usr/bin/python htmlquery to get candidate data for the rowsquery to get the faculty and project names for the table headersquery to get all current candidate-faculty pairs in the databasestart connectionexecute query 1 get results to above standard queryexecute query 2get results to above standard queryexecute query 3get results to above standard queryget all the candidate-faculty pair ids currently in the database which will be used in the section that checks and uses form dataretrieve form dataif form is empty, then it's possible that everything is to be deleted from the Assignment tableif not form: if results3: truncateStatement = "DELETE FROM Assignment;" connection = sqlite3.connect('db/BRITEREU.db') c = connection.cursor() c.execute(truncateStatement) connection.commit()check what checkboxes are checkedif checkbox was selected that was not previously selected - insert those pairs into the Assignment tableif checkbox is no longer selected - delete those pairs from the Assignment tablefind pairs that are in the selected list (pairlist) and not in the current database list (res3)find pairs that are not in the selected list(pairlist) and are in the current database list (res3)query the database again to now get all updated pairsexecute query 1 get results to above standard queryform action for user to submit checkboxes selections gets list of facultyadds all the faculty who are in the database as columnsget the Project IDs for the projects so that you concatenate to the CID to formulate a value pairadded proper URL for reference to reviewer pageprint the candidate table with a checkbox for each faculty memberadd submit button for assigning faculty to candidatesend formfiltering section for the table
1,781
en
0.828944
"""Test the UniFi Protect switch platform.""" # pylint: disable=protected-access from __future__ import annotations from unittest.mock import AsyncMock, Mock import pytest from pyunifiprotect.data import ( Camera, Light, RecordingMode, SmartDetectObjectType, VideoMode, ) from homeassistant.components.unifiprotect.const import DEFAULT_ATTRIBUTION from homeassistant.components.unifiprotect.switch import ( CAMERA_SWITCHES, LIGHT_SWITCHES, ProtectSwitchEntityDescription, ) from homeassistant.const import ATTR_ATTRIBUTION, ATTR_ENTITY_ID, STATE_OFF, Platform from homeassistant.core import HomeAssistant from homeassistant.helpers import entity_registry as er from .conftest import ( MockEntityFixture, assert_entity_counts, enable_entity, ids_from_device_description, ) CAMERA_SWITCHES_BASIC = [ d for d in CAMERA_SWITCHES if d.name != "Detections: Face" and d.name != "Detections: Package" and d.name != "SSH Enabled" ] CAMERA_SWITCHES_NO_EXTRA = [ d for d in CAMERA_SWITCHES_BASIC if d.name not in ("High FPS", "Privacy Mode") ] @pytest.fixture(name="light") async def light_fixture( hass: HomeAssistant, mock_entry: MockEntityFixture, mock_light: Light ): """Fixture for a single light for testing the switch platform.""" # disable pydantic validation so mocking can happen Light.__config__.validate_assignment = False light_obj = mock_light.copy(deep=True) light_obj._api = mock_entry.api light_obj.name = "Test Light" light_obj.is_ssh_enabled = False light_obj.light_device_settings.is_indicator_enabled = False mock_entry.api.bootstrap.reset_objects() mock_entry.api.bootstrap.lights = { light_obj.id: light_obj, } await hass.config_entries.async_setup(mock_entry.entry.entry_id) await hass.async_block_till_done() assert_entity_counts(hass, Platform.SWITCH, 2, 1) yield light_obj Light.__config__.validate_assignment = True @pytest.fixture(name="camera") async def camera_fixture( hass: HomeAssistant, mock_entry: MockEntityFixture, mock_camera: Camera ): """Fixture for a single camera for testing the switch platform.""" # disable pydantic validation so mocking can happen Camera.__config__.validate_assignment = False camera_obj = mock_camera.copy(deep=True) camera_obj._api = mock_entry.api camera_obj.channels[0]._api = mock_entry.api camera_obj.channels[1]._api = mock_entry.api camera_obj.channels[2]._api = mock_entry.api camera_obj.name = "Test Camera" camera_obj.recording_settings.mode = RecordingMode.DETECTIONS camera_obj.feature_flags.has_led_status = True camera_obj.feature_flags.has_hdr = True camera_obj.feature_flags.video_modes = [VideoMode.DEFAULT, VideoMode.HIGH_FPS] camera_obj.feature_flags.has_privacy_mask = True camera_obj.feature_flags.has_speaker = True camera_obj.feature_flags.has_smart_detect = True camera_obj.feature_flags.smart_detect_types = [ SmartDetectObjectType.PERSON, SmartDetectObjectType.VEHICLE, ] camera_obj.is_ssh_enabled = False camera_obj.led_settings.is_enabled = False camera_obj.hdr_mode = False camera_obj.video_mode = VideoMode.DEFAULT camera_obj.remove_privacy_zone() camera_obj.speaker_settings.are_system_sounds_enabled = False camera_obj.osd_settings.is_name_enabled = False camera_obj.osd_settings.is_date_enabled = False camera_obj.osd_settings.is_logo_enabled = False camera_obj.osd_settings.is_debug_enabled = False camera_obj.smart_detect_settings.object_types = [] mock_entry.api.bootstrap.reset_objects() mock_entry.api.bootstrap.cameras = { camera_obj.id: camera_obj, } await hass.config_entries.async_setup(mock_entry.entry.entry_id) await hass.async_block_till_done() assert_entity_counts(hass, Platform.SWITCH, 12, 11) yield camera_obj Camera.__config__.validate_assignment = True @pytest.fixture(name="camera_none") async def camera_none_fixture( hass: HomeAssistant, mock_entry: MockEntityFixture, mock_camera: Camera ): """Fixture for a single camera for testing the switch platform.""" # disable pydantic validation so mocking can happen Camera.__config__.validate_assignment = False camera_obj = mock_camera.copy(deep=True) camera_obj._api = mock_entry.api camera_obj.channels[0]._api = mock_entry.api camera_obj.channels[1]._api = mock_entry.api camera_obj.channels[2]._api = mock_entry.api camera_obj.name = "Test Camera" camera_obj.recording_settings.mode = RecordingMode.DETECTIONS camera_obj.feature_flags.has_led_status = False camera_obj.feature_flags.has_hdr = False camera_obj.feature_flags.video_modes = [VideoMode.DEFAULT] camera_obj.feature_flags.has_privacy_mask = False camera_obj.feature_flags.has_speaker = False camera_obj.feature_flags.has_smart_detect = False camera_obj.is_ssh_enabled = False camera_obj.osd_settings.is_name_enabled = False camera_obj.osd_settings.is_date_enabled = False camera_obj.osd_settings.is_logo_enabled = False camera_obj.osd_settings.is_debug_enabled = False mock_entry.api.bootstrap.reset_objects() mock_entry.api.bootstrap.cameras = { camera_obj.id: camera_obj, } await hass.config_entries.async_setup(mock_entry.entry.entry_id) await hass.async_block_till_done() assert_entity_counts(hass, Platform.SWITCH, 5, 4) yield camera_obj Camera.__config__.validate_assignment = True @pytest.fixture(name="camera_privacy") async def camera_privacy_fixture( hass: HomeAssistant, mock_entry: MockEntityFixture, mock_camera: Camera ): """Fixture for a single camera for testing the switch platform.""" # disable pydantic validation so mocking can happen Camera.__config__.validate_assignment = False camera_obj = mock_camera.copy(deep=True) camera_obj._api = mock_entry.api camera_obj.channels[0]._api = mock_entry.api camera_obj.channels[1]._api = mock_entry.api camera_obj.channels[2]._api = mock_entry.api camera_obj.name = "Test Camera" camera_obj.recording_settings.mode = RecordingMode.NEVER camera_obj.feature_flags.has_led_status = False camera_obj.feature_flags.has_hdr = False camera_obj.feature_flags.video_modes = [VideoMode.DEFAULT] camera_obj.feature_flags.has_privacy_mask = True camera_obj.feature_flags.has_speaker = False camera_obj.feature_flags.has_smart_detect = False camera_obj.add_privacy_zone() camera_obj.is_ssh_enabled = False camera_obj.osd_settings.is_name_enabled = False camera_obj.osd_settings.is_date_enabled = False camera_obj.osd_settings.is_logo_enabled = False camera_obj.osd_settings.is_debug_enabled = False mock_entry.api.bootstrap.reset_objects() mock_entry.api.bootstrap.cameras = { camera_obj.id: camera_obj, } await hass.config_entries.async_setup(mock_entry.entry.entry_id) await hass.async_block_till_done() assert_entity_counts(hass, Platform.SWITCH, 6, 5) yield camera_obj Camera.__config__.validate_assignment = True async def test_switch_setup_light( hass: HomeAssistant, mock_entry: MockEntityFixture, light: Light, ): """Test switch entity setup for light devices.""" entity_registry = er.async_get(hass) description = LIGHT_SWITCHES[1] unique_id, entity_id = ids_from_device_description( Platform.SWITCH, light, description ) entity = entity_registry.async_get(entity_id) assert entity assert entity.unique_id == unique_id state = hass.states.get(entity_id) assert state assert state.state == STATE_OFF assert state.attributes[ATTR_ATTRIBUTION] == DEFAULT_ATTRIBUTION description = LIGHT_SWITCHES[0] unique_id = f"{light.id}_{description.key}" entity_id = f"switch.test_light_{description.name.lower().replace(' ', '_')}" entity = entity_registry.async_get(entity_id) assert entity assert entity.disabled is True assert entity.unique_id == unique_id await enable_entity(hass, mock_entry.entry.entry_id, entity_id) state = hass.states.get(entity_id) assert state assert state.state == STATE_OFF assert state.attributes[ATTR_ATTRIBUTION] == DEFAULT_ATTRIBUTION async def test_switch_setup_camera_all( hass: HomeAssistant, mock_entry: MockEntityFixture, camera: Camera, ): """Test switch entity setup for camera devices (all enabled feature flags).""" entity_registry = er.async_get(hass) for description in CAMERA_SWITCHES_BASIC: unique_id, entity_id = ids_from_device_description( Platform.SWITCH, camera, description ) entity = entity_registry.async_get(entity_id) assert entity assert entity.unique_id == unique_id state = hass.states.get(entity_id) assert state assert state.state == STATE_OFF assert state.attributes[ATTR_ATTRIBUTION] == DEFAULT_ATTRIBUTION description = CAMERA_SWITCHES[0] description_entity_name = ( description.name.lower().replace(":", "").replace(" ", "_") ) unique_id = f"{camera.id}_{description.key}" entity_id = f"switch.test_camera_{description_entity_name}" entity = entity_registry.async_get(entity_id) assert entity assert entity.disabled is True assert entity.unique_id == unique_id await enable_entity(hass, mock_entry.entry.entry_id, entity_id) state = hass.states.get(entity_id) assert state assert state.state == STATE_OFF assert state.attributes[ATTR_ATTRIBUTION] == DEFAULT_ATTRIBUTION async def test_switch_setup_camera_none( hass: HomeAssistant, mock_entry: MockEntityFixture, camera_none: Camera, ): """Test switch entity setup for camera devices (no enabled feature flags).""" entity_registry = er.async_get(hass) for description in CAMERA_SWITCHES_BASIC: if description.ufp_required_field is not None: continue unique_id, entity_id = ids_from_device_description( Platform.SWITCH, camera_none, description ) entity = entity_registry.async_get(entity_id) assert entity assert entity.unique_id == unique_id state = hass.states.get(entity_id) assert state assert state.state == STATE_OFF assert state.attributes[ATTR_ATTRIBUTION] == DEFAULT_ATTRIBUTION description = CAMERA_SWITCHES[0] description_entity_name = ( description.name.lower().replace(":", "").replace(" ", "_") ) unique_id = f"{camera_none.id}_{description.key}" entity_id = f"switch.test_camera_{description_entity_name}" entity = entity_registry.async_get(entity_id) assert entity assert entity.disabled is True assert entity.unique_id == unique_id await enable_entity(hass, mock_entry.entry.entry_id, entity_id) state = hass.states.get(entity_id) assert state assert state.state == STATE_OFF assert state.attributes[ATTR_ATTRIBUTION] == DEFAULT_ATTRIBUTION async def test_switch_light_status(hass: HomeAssistant, light: Light): """Tests status light switch for lights.""" description = LIGHT_SWITCHES[1] light.__fields__["set_status_light"] = Mock() light.set_status_light = AsyncMock() _, entity_id = ids_from_device_description(Platform.SWITCH, light, description) await hass.services.async_call( "switch", "turn_on", {ATTR_ENTITY_ID: entity_id}, blocking=True ) light.set_status_light.assert_called_once_with(True) await hass.services.async_call( "switch", "turn_off", {ATTR_ENTITY_ID: entity_id}, blocking=True ) light.set_status_light.assert_called_with(False) async def test_switch_camera_ssh( hass: HomeAssistant, camera: Camera, mock_entry: MockEntityFixture ): """Tests SSH switch for cameras.""" description = CAMERA_SWITCHES[0] camera.__fields__["set_ssh"] = Mock() camera.set_ssh = AsyncMock() _, entity_id = ids_from_device_description(Platform.SWITCH, camera, description) await enable_entity(hass, mock_entry.entry.entry_id, entity_id) await hass.services.async_call( "switch", "turn_on", {ATTR_ENTITY_ID: entity_id}, blocking=True ) camera.set_ssh.assert_called_once_with(True) await hass.services.async_call( "switch", "turn_off", {ATTR_ENTITY_ID: entity_id}, blocking=True ) camera.set_ssh.assert_called_with(False) @pytest.mark.parametrize("description", CAMERA_SWITCHES_NO_EXTRA) async def test_switch_camera_simple( hass: HomeAssistant, camera: Camera, description: ProtectSwitchEntityDescription ): """Tests all simple switches for cameras.""" assert description.ufp_set_method is not None camera.__fields__[description.ufp_set_method] = Mock() setattr(camera, description.ufp_set_method, AsyncMock()) set_method = getattr(camera, description.ufp_set_method) _, entity_id = ids_from_device_description(Platform.SWITCH, camera, description) await hass.services.async_call( "switch", "turn_on", {ATTR_ENTITY_ID: entity_id}, blocking=True ) set_method.assert_called_once_with(True) await hass.services.async_call( "switch", "turn_off", {ATTR_ENTITY_ID: entity_id}, blocking=True ) set_method.assert_called_with(False) async def test_switch_camera_highfps(hass: HomeAssistant, camera: Camera): """Tests High FPS switch for cameras.""" description = CAMERA_SWITCHES[3] camera.__fields__["set_video_mode"] = Mock() camera.set_video_mode = AsyncMock() _, entity_id = ids_from_device_description(Platform.SWITCH, camera, description) await hass.services.async_call( "switch", "turn_on", {ATTR_ENTITY_ID: entity_id}, blocking=True ) camera.set_video_mode.assert_called_once_with(VideoMode.HIGH_FPS) await hass.services.async_call( "switch", "turn_off", {ATTR_ENTITY_ID: entity_id}, blocking=True ) camera.set_video_mode.assert_called_with(VideoMode.DEFAULT) async def test_switch_camera_privacy(hass: HomeAssistant, camera: Camera): """Tests Privacy Mode switch for cameras.""" description = CAMERA_SWITCHES[4] camera.__fields__["set_privacy"] = Mock() camera.set_privacy = AsyncMock() _, entity_id = ids_from_device_description(Platform.SWITCH, camera, description) await hass.services.async_call( "switch", "turn_on", {ATTR_ENTITY_ID: entity_id}, blocking=True ) camera.set_privacy.assert_called_once_with(True, 0, RecordingMode.NEVER) await hass.services.async_call( "switch", "turn_off", {ATTR_ENTITY_ID: entity_id}, blocking=True ) camera.set_privacy.assert_called_with( False, camera.mic_volume, camera.recording_settings.mode ) async def test_switch_camera_privacy_already_on( hass: HomeAssistant, camera_privacy: Camera ): """Tests Privacy Mode switch for cameras with privacy mode defaulted on.""" description = CAMERA_SWITCHES[4] camera_privacy.__fields__["set_privacy"] = Mock() camera_privacy.set_privacy = AsyncMock() _, entity_id = ids_from_device_description( Platform.SWITCH, camera_privacy, description ) await hass.services.async_call( "switch", "turn_off", {ATTR_ENTITY_ID: entity_id}, blocking=True ) camera_privacy.set_privacy.assert_called_once_with(False, 100, RecordingMode.ALWAYS)
tests/components/unifiprotect/test_switch.py
15,576
Test the UniFi Protect switch platform. pylint: disable=protected-access disable pydantic validation so mocking can happen disable pydantic validation so mocking can happen disable pydantic validation so mocking can happen disable pydantic validation so mocking can happen
274
en
0.499516
#!/usr/bin/env python3 ''' Model for Riemannian feature calculation and classification for EEG data ''' import numpy as np from sklearn.svm import LinearSVC, SVC from riemannian_multiscale import RiemannianMultiscale, QuantizedRiemannianMultiscale from filters import load_filterbank from utilities import quantize __author__ = "Michael Hersche, Tino Rellstab and Tibor Schneider" __email__ = "[email protected],[email protected]" DATA_PATH = "dataset/" # QUANTIZED = True # ONLY_2HZ_BANDS = True class RiemannianModel(): """ Riemannian Model """ def __init__(self, svm_kernel='linear', svm_c=0.1, fs=250, bands=None, time_windows=None, riem_opt='Riemann', rho=0.1, filter_type='butter', filter_order=2, random_state=None): """ Constructor Args: Parameters ---------- svm_kernel: str {'linear', 'sigmoid', 'rbf'} kernel used for classifier svm_c: float regularization parameter for the classifier fs: int sampling rate of the data bands: list of int bandwidths used in filterbanks (default: [2, 4, 8, 16, 32]) time_windows: list of list of ints, shape = (N, 2) time windows used, in seconds (default: [[2,5, 6]]) riem_opt: str {"riemann", "Riemann_Euclid", "Whitened_Euclid", "No_Adaptation"} type of riemannian used rho: float Normalization parameter for the covariance matrix of the riemannian filter_type: str {"butter", "fir"} Type of the filter filter_order: int Order of the filter random_state: int or None random seed used in the SVM """ # setup classifier if svm_kernel == 'linear': self.classifier = LinearSVC(C=svm_c, loss='hinge', random_state=random_state, tol=0.00001) else: self.classifier = SVC(C=svm_c, kernel=svm_kernel, degree=10, gamma='auto', cache_size=10000, random_state=random_state) # setup Filterbank if bands is None: bandwidths = np.array([2, 4, 8, 16, 32]) else: bandwidths = np.array(bands) filter_bank = load_filterbank(bandwidths, fs, order=filter_order, max_freq=40, ftype=filter_type) # setup Time Windows if time_windows is None: time_windows = (np.array([[2.5, 6]]) * fs).astype(int) # time_windows = (np.array([[2.0, 5.0]]) * fs).astype(int) else: time_windows = (np.array(time_windows) * fs).astype(int) # setup riemannian self.riemannian = RiemannianMultiscale(filter_bank, time_windows, riem_opt=riem_opt, rho=rho, vectorized=True) # store dimensionality self.no_bands = filter_bank.shape[0] self.no_time_windows = time_windows.shape[0] self.no_riem = None self.no_features = None def fit(self, samples, labels): """ Training Parameters ---------- samples: np.array, size=(N, C, T) training samples labels: np.array, size=(N) training labels """ # extract the number of eatures assert len(samples.shape) == 3 no_channels = samples.shape[1] self.no_riem = int(no_channels * (no_channels + 1) / 2) # Total number of CSP feature per band and timewindow self.no_features = self.no_riem * self.no_bands * self.no_time_windows # fit and extract training features from the riemannian features = self.riemannian.fit(samples) self.classifier.fit(features, labels) def score(self, samples, labels): """ Measure the performance, returns success rate Parameters ---------- samples: np.array, size=(N, C, T) training samples labels: np.array, size=(N) training labels Returns ------- float: score of the model """ features = self.riemannian.features(samples) return self.classifier.score(features, labels) def predict(self, samples): """ Predict some data Parameters ---------- samples: np.array, size=(N, C, T) training samples Returns ------- np.array, size=[N]: prediction """ features = self.riemannian.features(samples) return self.classifier.predict(features) class QuantizedRiemannianModel(): """ QuantizedRiemannian Model """ def __init__(self, svm_c=0.1, fs=250, bands=None, riem_opt='Riemann', rho=0.1, filter_order=2, random_state=None, num_bits=8, bitshift_scale=True): """ Constructor Parameters ---------- svm_c: float regularization parameter for the classifier fs: int sampling rate of the data bands: list of int bandwidths used in filterbanks (default: [2, 4, 8, 16, 32]) riem_opt: str {"riemann", "Riemann_Euclid", "Whitened_Euclid", "No_Adaptation"} type of riemannian used rho: float Normalization parameter for the covariance matrix of the riemannian filter_order: int Order of the filter random_state: int or None random seed used in the SVM num_bits: int Number of bits used for quantization bitshift_scale: bool if True, make sure that all scale factors between one part and the next is a bitshift """ self.num_bits = num_bits self.bitshift_scale = bitshift_scale # setup classifier self.classifier = LinearSVC(C=svm_c, loss='hinge', random_state=random_state, tol=0.00001) # setup Filterbank if bands is None: bandwidths = np.array([2, 4, 8, 16, 32]) else: bandwidths = np.array(bands) filter_bank = load_filterbank(bandwidths, fs, order=filter_order, max_freq=40, ftype="butter") # setup Time Windows time_windows = (np.array([[2.5, 6]]) * fs).astype(int) # time_windows = (np.array([[2.0, 5.0]]) * fs).astype(int) # !!!!! # setup riemannian self.riemannian = QuantizedRiemannianMultiscale(filter_bank, time_windows, riem_opt=riem_opt, rho=rho, vectorized=True, num_bits=num_bits, bitshift_scale=bitshift_scale) # prepare quantized weights and biases self.scale_weight = 0 self.scale_bias = 0 # store dimensionality self.no_bands = filter_bank.shape[0] self.no_time_windows = time_windows.shape[0] self.no_riem = None self.no_features = None def fit(self, samples, labels): """ Training Parameters ---------- samples: np.array, size=(N, C, T) training samples labels: np.array, size=(N) training labels """ # extract the number of eatures assert len(samples.shape) == 3 no_channels = samples.shape[1] self.no_riem = int(no_channels * (no_channels + 1) / 2) # Total number of CSP feature per band and timewindow self.no_features = self.no_riem * self.no_bands * self.no_time_windows # prepare scale factors self.riemannian.prepare_quantization(samples) # fit and extract training features from the riemannian features = self.riemannian.fit(samples) self.classifier.fit(features, labels) # quantize the classifier self.scale_weight = max(self.scale_weight, np.abs(self.classifier.coef_).max()) weights = quantize(self.classifier.coef_, self.scale_weight, self.num_bits, do_round=True) self.classifier.coef_ = weights # do not quantize the bias, this one will be added in 32 bit, and quantization does not # matter here... # self.scale_bias = max(self.scale_bias, np.abs(self.classifier.intercept_).max()) # bias = quantize(self.classifier.intercept_, self.scale_weight, self.num_bits, # do_round=True) # self.classifier.intercept_ = bias def score(self, samples, labels): """ Measure the performance, returns success rate Parameters ---------- samples: np.array, size=(N, C, T) training samples labels: np.array, size=(N) training labels Returns ------- float: score of the model """ features = self.riemannian.features(samples) return self.classifier.score(features, labels) def predict(self, samples): """ Predict some data Parameters ---------- samples: np.array, size=(N, C, T) training samples Returns ------- np.array, size=[N]: prediction """ features = self.riemannian.features(samples) return self.classifier.predict(features) def predict_with_intermediate(self, sample, verbose=True): """ Predict some data Parameters ---------- samples: np.array, size=(C, T) training sample Returns ------- ordered dictionary including every intermediate result and the output """ if verbose: print("Predict sample with intermediate matrices") assert len(sample.shape) == 2 result = self.riemannian.onetrial_feature_with_intermediate(sample) features = next(reversed(result.values())) features = features.reshape(1, -1) result["svm_result"] = self.classifier.decision_function(features) result["prediction"] = self.classifier.predict(features) return result def get_data_dict(self): """ Returns a nested dictionary containing all necessary data """ return {"num_bits": self.num_bits, "bitshift_scale": self.bitshift_scale, "SVM": {"weights": self.classifier.coef_, "weight_scale": self.scale_weight, "bias": self.classifier.intercept_}, "riemannian": self.riemannian.get_data_dict()}
multiscale_bci_python/riemannian_model.py
10,590
QuantizedRiemannian Model Riemannian Model Constructor Args: Parameters ---------- svm_kernel: str {'linear', 'sigmoid', 'rbf'} kernel used for classifier svm_c: float regularization parameter for the classifier fs: int sampling rate of the data bands: list of int bandwidths used in filterbanks (default: [2, 4, 8, 16, 32]) time_windows: list of list of ints, shape = (N, 2) time windows used, in seconds (default: [[2,5, 6]]) riem_opt: str {"riemann", "Riemann_Euclid", "Whitened_Euclid", "No_Adaptation"} type of riemannian used rho: float Normalization parameter for the covariance matrix of the riemannian filter_type: str {"butter", "fir"} Type of the filter filter_order: int Order of the filter random_state: int or None random seed used in the SVM Constructor Parameters ---------- svm_c: float regularization parameter for the classifier fs: int sampling rate of the data bands: list of int bandwidths used in filterbanks (default: [2, 4, 8, 16, 32]) riem_opt: str {"riemann", "Riemann_Euclid", "Whitened_Euclid", "No_Adaptation"} type of riemannian used rho: float Normalization parameter for the covariance matrix of the riemannian filter_order: int Order of the filter random_state: int or None random seed used in the SVM num_bits: int Number of bits used for quantization bitshift_scale: bool if True, make sure that all scale factors between one part and the next is a bitshift Training Parameters ---------- samples: np.array, size=(N, C, T) training samples labels: np.array, size=(N) training labels Training Parameters ---------- samples: np.array, size=(N, C, T) training samples labels: np.array, size=(N) training labels Returns a nested dictionary containing all necessary data Predict some data Parameters ---------- samples: np.array, size=(N, C, T) training samples Returns ------- np.array, size=[N]: prediction Predict some data Parameters ---------- samples: np.array, size=(N, C, T) training samples Returns ------- np.array, size=[N]: prediction Predict some data Parameters ---------- samples: np.array, size=(C, T) training sample Returns ------- ordered dictionary including every intermediate result and the output Measure the performance, returns success rate Parameters ---------- samples: np.array, size=(N, C, T) training samples labels: np.array, size=(N) training labels Returns ------- float: score of the model Measure the performance, returns success rate Parameters ---------- samples: np.array, size=(N, C, T) training samples labels: np.array, size=(N) training labels Returns ------- float: score of the model Model for Riemannian feature calculation and classification for EEG data !/usr/bin/env python3 QUANTIZED = True ONLY_2HZ_BANDS = True setup classifier setup Filterbank setup Time Windows time_windows = (np.array([[2.0, 5.0]]) * fs).astype(int) setup riemannian store dimensionality extract the number of eatures Total number of CSP feature per band and timewindow fit and extract training features from the riemannian setup classifier setup Filterbank setup Time Windows time_windows = (np.array([[2.0, 5.0]]) * fs).astype(int) !!!!! setup riemannian prepare quantized weights and biases store dimensionality extract the number of eatures Total number of CSP feature per band and timewindow prepare scale factors fit and extract training features from the riemannian quantize the classifier do not quantize the bias, this one will be added in 32 bit, and quantization does not matter here... self.scale_bias = max(self.scale_bias, np.abs(self.classifier.intercept_).max()) bias = quantize(self.classifier.intercept_, self.scale_weight, self.num_bits, do_round=True) self.classifier.intercept_ = bias
4,013
en
0.49274
import logging import os import random import time from functools import lru_cache import cv2 import numpy as np import imgreco.main from Arknights.helper import logger from addons.activity import ActivityAddOn, get_stage_map from addons.base import BaseAddOn, pil2cv, crop_cv_by_rect, show_img from addons.common_cache import load_game_data from imgreco.ocr.cnocr import ocr_and_correct icon1 = cv2.imread(os.path.join(os.path.realpath(os.path.dirname(__file__)), 'icon1.png'), cv2.IMREAD_GRAYSCALE) icon2 = cv2.imread(os.path.join(os.path.realpath(os.path.dirname(__file__)), 'icon2.png'), cv2.IMREAD_GRAYSCALE) @lru_cache(maxsize=1) def get_activity_infos(): return load_game_data('activity_table')['basicInfo'] @lru_cache() def get_available_activity(display_type=None): activity_infos = get_activity_infos() name_set = set() for aid, info in activity_infos.items(): if info.get('displayType') in {'SIDESTORY', 'BRANCHLINE'}: if info['displayType'] == 'BRANCHLINE' or info.get('isReplicate'): raw_name = info['name'][:-3] if info.get('isReplicate') else info['name'] if display_type is None or display_type == info['displayType']: name_set.add(raw_name) return name_set def get_activity_name(activity): name = activity['name'] if activity['isReplicate']: return name[:-3] return name def crop_image_only_outside(gray_img, raw_img, threshold=128, padding=3): mask = gray_img > threshold m, n = gray_img.shape mask0, mask1 = mask.any(0), mask.any(1) col_start, col_end = mask0.argmax(), n - mask0[::-1].argmax() row_start, row_end = mask1.argmax(), m - mask1[::-1].argmax() return raw_img[row_start - padding:row_end + padding, col_start - padding:col_end + padding] class StartSpStageAddon(BaseAddOn): def __init__(self, helper=None): super(StartSpStageAddon, self).__init__(helper) self.scale = self.helper.viewport[1] / 720 if self.helper.viewport != (1280, 720): logger.warning('It may produce some weird effects when the resolution is not 1280x720.') def apply_scale(self, value): if self.scale == 1: return value return int(value * self.scale) def run(self, stage_code: str, repeat_times: int = 1000, try_current_activity=True): stage_code = stage_code.upper() if try_current_activity: try: return ActivityAddOn(self.helper).run(stage_code, repeat_times) except: pass stage_code_map, zone_linear_map = get_stage_map() if stage_code not in stage_code_map: raise RuntimeError(f'无效的关卡: {stage_code}') stage = stage_code_map[stage_code] activity_id = stage['zoneId'].split('_')[0] activity_infos = get_activity_infos() activity = activity_infos[activity_id] logger.debug(f'stage: {stage}, activity: {activity}') self.enter_activity(activity) stage_linear = zone_linear_map[stage['zoneId']] self.helper.find_and_tap_stage_by_ocr(None, stage_code, stage_linear) return self.helper.module_battle_slim(None, repeat_times) def enter_activity(self, activity): vh = self.vh act_name = get_activity_name(activity) if act_name not in get_available_activity(): raise RuntimeError(f'无效的活动: {act_name}') self.open_terminal() if activity['displayType'] == 'BRANCHLINE': self.tap_branch_line() else: self.tap_side_story() crop_flag = activity['displayType'] == 'SIDESTORY' act_pos_map = self.get_all_act_pos(crop_flag) if act_name not in act_pos_map: if activity['displayType'] == 'BRANCHLINE': raise RuntimeError(f'找不到相应活动: {act_name}') last_acts = act_pos_map.keys() while True: origin_x = random.randint(int(5.833 * vh), int(24.861 * vh)) origin_y = random.randint(int(57.222 * vh), int(77.917 * vh)) move = -random.randint(int(vh // 5), int(vh // 4)) self.helper.adb.touch_swipe2((origin_x, origin_y), (random.randint(-20, 20), move), random.randint(900, 1200)) act_pos_map = self.get_all_act_pos(crop_flag) if act_name in act_pos_map: break if last_acts == act_pos_map.keys(): raise RuntimeError(f'找不到相应活动: {act_name}') last_acts = act_pos_map.keys() logger.info(f'switch to {act_name}') self.click(act_pos_map[act_name], 1) self.tap_enter_activity() def tap_back(self): vw, vh = self.vw, self.vh self.helper.tap_rect((2.222 * vh, 1.944 * vh, 22.361 * vh, 8.333 * vh)) time.sleep(0.5) def get_all_act_pos(self, crop=False): act_map = {} screen = self.screenshot() cv_screen = pil2cv(screen) for icon in [icon1, icon2]: act_map.update(self.get_act_pos_by_icon(cv_screen, icon, crop)) logger.info(act_map) return act_map def get_act_pos_by_icon(self, cv_screen, icon, crop=False): vh, vw = self.vh, self.vw raw_screen = cv_screen.copy() if self.scale != 1: cv_screen = cv2.resize(cv_screen, (int(self.helper.viewport[0] / self.scale), 720)) roi = crop_cv_by_rect(cv_screen, (0, 0, 10.000 * vh, 100.000 * vh)) roi = cv2.cvtColor(roi, cv2.COLOR_RGB2GRAY) result = cv2.matchTemplate(roi, icon, cv2.TM_CCOEFF_NORMED) loc = np.where(result >= 0.8) tag_set = set() tag_set2 = set() res = {} dbg_screen = raw_screen.copy() available_activity = get_available_activity() for pt in zip(*loc[::-1]): pos_key = (pt[0] // 100, pt[1] // 100) pos_key2 = (int(pt[0] / 100 + 0.5), int(pt[1] / 100 + 0.5)) if pos_key in tag_set or pos_key2 in tag_set2: continue tag_set.add(pos_key) tag_set2.add(pos_key2) if icon1 is icon: x, y = (int(pt[0]) + 35, int(pt[1]) - 6) tw, th = map(self.apply_scale, (180, 40)) else: x, y = (int(pt[0]) + 35, int(pt[1]) - 3) tw, th = map(self.apply_scale, (150, 30)) l, t = map(self.apply_scale, (x, y)) tag_img = raw_screen[t:t + th, l:l + tw] if crop: gray_tag = cv2.cvtColor(tag_img, cv2.COLOR_RGB2GRAY) tag_img = crop_image_only_outside(gray_tag, tag_img, 160) factor = 2.5 - self.scale if factor > 1: # print(factor) tag_img = cv2.resize(tag_img, (0, 0), fx=factor, fy=factor, interpolation=cv2.INTER_LINEAR) # show_img(tag_img) # conv-lite-fc has better accuracy, but it is slower than densenet-lite-fc. name = ocr_and_correct(tag_img, available_activity, model_name='densenet-lite-fc', log_level=logging.INFO) if name: res[name] = (int(l + 85 * self.scale), int(t + 20 * self.scale)) cv2.rectangle(dbg_screen, (l, t), (l + tw, t + th), (255, 255, 0), 2) # show_img(dbg_screen) return res def tap_side_story(self): vh, vw = self.vh, self.vw logger.info('open side story view') self.helper.tap_rect((44.297 * vw, 88.611 * vh, 56.406 * vw, 98.750 * vh)) time.sleep(1) def tap_branch_line(self): logger.info('open branch line view') vh, vw = self.vh, self.vw self.helper.tap_rect((29.375 * vw, 88.611 * vh, 41.719 * vw, 98.750 * vh)) time.sleep(1) def tap_enter_activity(self): logger.info('enter activity') vh, vw = self.vh, self.vw self.helper.tap_rect((100 * vw - 24.583 * vh, 69.167 * vh, 100 * vw - 8.750 * vh, 75.556 * vh)) time.sleep(1) def open_terminal(self): self.helper.back_to_main() logger.info('open terminal') self.helper.tap_quadrilateral(imgreco.main.get_ballte_corners(self.screenshot())) time.sleep(1) if __name__ == '__main__': StartSpStageAddon().run('CB-10', 0, False) # StartSpStageAddon().get_all_act_pos()
addons/start_sp_stage/__init__.py
8,440
print(factor) show_img(tag_img) conv-lite-fc has better accuracy, but it is slower than densenet-lite-fc. show_img(dbg_screen) StartSpStageAddon().get_all_act_pos()
164
en
0.664941
import os import sys sys.path.append(os.path.dirname(__file__)) class AbstractSystemMeter: """Common system meter interface for all resource monitorings. For each system resource to monitor, a wrapper class will be written as subclass of this one. This way we have a common "interface" for all system resources to test. This approach is choosen since python has no real interfaces like Java or C-Sharp. """ def __init__(self, resource_name): self.resource_name = resource_name def measure(self, func): self._start() func() return self._stop() def _start(self): raise NotImplementedError("The method is not implemented yet.") def _stop(self): raise NotImplementedError("The method is not implemented yet.")
measure/system/AbstractSystemMeter.py
800
Common system meter interface for all resource monitorings. For each system resource to monitor, a wrapper class will be written as subclass of this one. This way we have a common "interface" for all system resources to test. This approach is choosen since python has no real interfaces like Java or C-Sharp.
310
en
0.928172
#!/usr/bin/python from __future__ import division import sys import math import cmath import numpy as np from numpy import genfromtxt import csv from decimal import Decimal import os import random from lyrics import * # BEATLES: Bundle of Essential and Assistive Tools Library for Electronic Structure # A tribute to the Beatles # # Updated June 14, 2020 by Hassan Harb # # / | \ # / | \ # /O O | O O\ # //|\ /|\ /|\ /|\\ # /=/ \=/ \= / \=/ \=\ # / == == == == == \ # / == == == == == \ # (The original Beatles) # (ASCII retrieved from https://www.asciiart.eu/music/musicians/beatles ) # ######################################################################### # # NBasGrab: reads in a name of .fchk file # output: -Number of basis functions # -Charge # -Multiplicity # -Number of Atoms # -Cartesian Coordinates # -Atomic Symbols # -SCF Energy # -Total Energy (needs to be added) # Section 1: Reading from gaussian formatted checkpoint file def NBasGrab(filename): NBasis = 0 NElem = 0 SCFEnergy = 0.0 Charge = 0 Multiplicity = 0 NAtoms = 0 temp = 1 with open(filename, 'r') as origin: for line in origin: if "Number of basis functions" in line: words = line.split() for i in words: for letter in i: if(letter.isdigit()): NBasis = NBasis*10 + int(letter) if "Charge " in line: words = line.split() for i in words: for letter in i: if(letter=="-"): temp = -1 if(letter.isdigit()): Charge = Charge*10 + int(letter) Charge = Charge*temp if "Multiplicity" in line: words = line.split() for i in words: for letter in i: if(letter.isdigit()): Multiplicity = Multiplicity*10 + int(letter) if "Number of atoms" in line: words = line.split() for i in words: for letter in i: if(letter.isdigit()): NAtoms = NAtoms*10 + int(letter) if "SCF Energy" in line: words = line.split() # print "SCF Energy = ", words[3], " Hartree" SCFEnergy = float(words[3]) # print "SCF Energy (float) = ", SCFEnergy # if "Total Energy" in line: # words = line.split() # TotalEnergy = float(words[3]) # print "Total Energy = ", TotalEnergy, " Hartree" NElem = NBasis*NBasis # print "Number of Basis Functions (subroutine) = ", NBasis, "\n" # print "Charge (subroutine) = ", Charge, "\n" return NBasis, NElem, Charge, Multiplicity, NAtoms, SCFEnergy # GeomGet: reads in the file name, number of atoms # Output: -One dimensional vector (NAtoms * 3) that includes the cartesian coordinates of each atom # def GeomGet(filename,NAtoms): p = 0 r = 0 n = 1 NElements = NAtoms * 3 RawCart = np.zeros(NElements) if (NElements%5 == 0): n = 0 RawCartLines = int(NElements/5) + n # print "Raw Cart lines = ", RawCartLines # print "Number of Atoms =", NAtoms # print "Number of coordinates =", NElements with open(filename,'r') as origin: for i, line in enumerate(origin): if "Current cartesian coordinates" in line: i = i + 1 pointer = i # print "Cartesian Coordinates starts at line :", pointer endpointer = pointer + RawCartLines - 1 # print "Cartesian Coordinates ends at line :", endpointer for m in range(0,endpointer - pointer +1): nextline = origin.next() nextline = nextline.split() for p in range(p,len(nextline)): RawCart[r] = nextline[p] r = r + 1 p = 0 # print "Raw Cart (subroutine) = ", RawCart RawCart = RawCart/1.88973 # print "Raw Cart (converted to Angstroms) = ", RawCart return RawCart # GetAtoms: Reads in file name, number of atoms # output: -One dimensional vector (NAtoms) that contains the atomic numbers of the atoms # def GetAtoms(filename1,NAtoms): p = 0 r = 0 n = 1 AtomicNum = np.zeros(NAtoms) if (NAtoms%6 ==0): n = 0 AtomLines = int(NAtoms/6) + n with open(filename1,'r') as origin: for i, line in enumerate(origin): if "Atomic numbers" in line: i = i + 1 pointer = i endpointer = pointer + AtomLines -1 for m in range(0, endpointer - pointer + 1): nextline = origin.next() nextline = nextline.split() for p in range(p,len(nextline)): AtomicNum[r] = nextline[p] r = r + 1 p = 0 return AtomicNum # MatGrab: Reads in filename, NBasis, user-defined switch # Output: -Alpha MO Coefficients (Done) # -Beta MO Coefficients (Done) # -Alpha Density Matrix (Done) # -Beta Density Matrix (Done) # -Alpha MO Energies (Done) # -Beta MO Energies (Done) # # Switch: 1 = Alpha MO Coefficients # -1 = Beta MO Coefficients # 2 = Alpha and Beta Density Matrices # 3 = Alpha MO Energies # -3 = Beta MO Energies # def MatGrab(filename,NBasis,switch): if (switch == 1): filename1 = filename MOElements = NBasis * NBasis MOlines = int(MOElements/5) + 1 if (NBasis%5 == 0): MOlines = MOlines - 1 p = 0 r = 0 AOE = 0 MOrawa = np.zeros(NBasis*NBasis) with open(filename1,'r') as origin: for i, line in enumerate(origin): if "Alpha Orbital Energies" in line: AOE = i if "Alpha MO coefficients" in line: i=i+1 AMO=i # print "Alpha MO coefficients starts at line :", i j=i+MOlines-1 # print "Alpha MO coefficients ends at line :", j for m in range(0,j-i+1): nextline = origin.next() nextline = nextline.split() for p in range(p,len(nextline)): MOrawa[r] = nextline[p] r = r+1 p = 0 # print "MO Raw = ", MOrawa return MOrawa if (switch == -1): filename1 = filename MOElements = NBasis * NBasis MOlines = int(MOElements/5) + 1 if (NBasis%5 == 0): MOlines = MOlines - 1 p = 0 r = 0 BOE = 0 BMO = 0 MOrawb = np.zeros(NBasis*NBasis) with open(filename1,'r') as origin: for i, line in enumerate(origin): if "Beta Orbital Energies" in line: BOE = i if "Beta MO coefficients" in line: i=i+1 BMO=i j=i+MOlines-1 for m in range(0,j-i+1): nextline = origin.next() nextline = nextline.split() for p in range(p,len(nextline)): MOrawb[r] = nextline[p] r = r+1 p = 0 # print "MO Raw = ", MOrawb return MOrawb if (switch == 2): filename1 = filename PElements = int(NBasis*(NBasis+1)/2) Plines = int(PElements/5) + 1 TotalPraw = np.zeros(PElements) SpinPraw = np.zeros(PElements) with open(filename1,'r') as origin: for i, line in enumerate(origin): if "Total SCF Density" in line: i=i+1 r = 0 p = 0 # print "Total SCF Density starts at line :", i j=i+Plines-1 # print "Total SCF Density ends at line :", j for m in range(0,j-i+1): nextline = origin.next() nextline = nextline.split() for p in range(0,len(nextline)): if (r != PElements): TotalPraw[r] = nextline[p] r = r+1 p = 0 # HH + : Bug ... :( with open(filename1,'r') as origin: for i, line in enumerate(origin): if "Spin SCF Density" in line: # print "Found Spin density!" i=i+1 r = 0 p = 0 # print "Spin SCF Density starts at line: ", i j=i+Plines-1 # print "Spin SCF Density ends at line: ", j for m in range(0,j-i+1): nextline = origin.next() nextline = nextline.split() for p in range(p,len(nextline)): if (r != PElements): SpinPraw[r] = nextline[p] r = r+1 p = 0 # HH - : End of bug (hopefully!) PalphaRaw = (np.add(TotalPraw,SpinPraw)) * 0.5 PbetaRaw = (np.subtract(TotalPraw,SpinPraw)) * 0.5 Palpha = symmetrize(PalphaRaw) Pbeta = symmetrize(PbetaRaw) return Palpha, Pbeta if (switch == 3): filename1 = filename AlphaMO = np.zeros(NBasis) AlphaMOlines = int(NBasis/5) + 1 if (NBasis % 5 == 0): AlphaMOlines = AlphaMOlines - 1 with open(filename1,'r') as origin: for i, line in enumerate(origin): if "Alpha Orbital Energies" in line: i = i + 1 r = 0 p = 0 # print "Alpha MO Energies starts at line: ", i j = i + AlphaMOlines - 1 # print "Alpha MO Energies ends at line: ", j for m in range(0,j-i+1): nextline = origin.next() nextline = nextline.split() for p in range(p,len(nextline)): AlphaMO[r] = nextline[p] r = r + 1 p = 0 # print "Alpha MO energies = ", AlphaMO return AlphaMO if (switch == -3): filename1 = filename BetaMO = np.zeros(NBasis) BetaMOlines = int(NBasis/5) + 1 if (NBasis % 5 == 0): BetaMOlines = BetaMOlines - 1 with open(filename1,'r') as origin: for i, line in enumerate(origin): if "Beta Orbital Energies" in line: i = i + 1 r = 0 p = 0 # print "Beta MO Energies starts at line: ", i j = i + BetaMOlines - 1 # print "Beta MO Energies ends at line: ", j for m in range(0,j-i+1): nextline = origin.next() nextline = nextline.split() for p in range(p,len(nextline)): BetaMO[r] = nextline[p] r = r + 1 p = 0 # print "Beta MO energies = ", BetaMO return BetaMO # sci_notation: reads in a number # output: prints the number in the desired scientific notation. note that this function has a different output than the one found in nio.py # def sci_notation(n): a = '%.8f' % n return '%.8f' % Decimal(n.real) # fchk_notation: reads in a number # output: prints the number in the desired notation for fchk files # def fchk_notation(n): a = '%.8E' % n return '%.8E' % Decimal(n.real) # AtomicSymbol: Reads in atomic number of the element # Output: -Atomic Symbol # def AtomicSymbol(AtomicNumber): p = AtomicNumber - 1 PTlist = ['H','He','Li','Be','B','C','N','O','F','Ne','Na','Mg','Al','Si','P','S','Cl','Ar','K','Ca','Sc','Ti','V','Cr','Mn','Fe','Co','Ni','Cu','Zn','Ga','Ge','As','Se','Br','Kr','Rb','Sr','Y','Zr','Nb','Mo','Tc','Ru','Rh','Pd','Ah','Cd','In','Sn','Sb','Te','I','Xe','Cs','Ba','La','Ce','Pr','Nd','Pm','Sm','Eu','Gd','Tb','Dy','Ho','Er','Tm','Yb','Lu','Hf','Ta','W','Re','Os','Ir','Pt','Au','Hb','Tl','Pb','Bi','Po','At','Rn','Fr','Ra','Ac','Th','Pa','U','Np','Pu','Am','Cm','Bk','Cf','Es','Fm','Md','No','Lr','Rf','Db','Sg','Bh','Hs','Mt','Ds','Rg','Cn','Uut','Fl','Uup','Lv','Uus','Uuo'] # print "There are currently ", len(PTlist), " atoms defined" return PTlist[p] # Symmetrize: Reads in a packed symmetric column matrix into NBasis x NBasis square matrix # Output: -Matrix(NBasis,NBasis) # def symmetrize(a): Nbas = int((np.sqrt(8*len(a)+1)-1)/2) b = np.zeros((Nbas,Nbas)) n = 0 for i in range(0,Nbas): for j in range(0,i+1): b[i,j]=a[n] b[j,i]=a[n] n=n+1 return b # Column2Square: Reads in a packed column matrix, number of basis functions. # Output: -Matrix(NBasis,NBasis) def column2square(A,NBasis): C = np.zeros((NBasis,NBasis)) t=0 for i in range(0,NBasis): for j in range(0,NBasis): C[j,i]=float(A[t]) t=t+1 return C # GetOverlap: Reads in packed column matrix, number of basis functions. # Output: -Overlap Matrix (NBasis,NBasis) def GetOverlap(A,NBasis): C = column2square(A,NBasis) CInv = np.linalg.inv(C) S = np.dot(np.transpose(CInv),CInv) return S # PrintSI: Reads in filename, user-defined switch # Output: -SCF Energy, Charge, Multiplicity, Geometry # # Switch: 1 = print to new file (filename1-SI.txt) # -1 = print to screen # def PrintSI(filename1,switch): NBasis, NElementsGrab, Charge, Multiplicity, NAtoms, SCFEnergy = NBasGrab(filename1) AtomicNum = GetAtoms(filename1,NAtoms) RawCart = GeomGet(filename1,NAtoms) Cart = np.resize(RawCart,(NAtoms,3)) filename2 = os.path.splitext(filename1)[0] + "-SI.txt" filename1 = os.path.splitext(filename1)[0] if (switch == 1): with open(filename2,'w') as f2: f2.write("SI info for ") f2.write(filename1) f2.write("\n\n") f2.write("SCF Energy = ") f2.write(str(SCFEnergy)) f2.write(" Hartree") f2.write("\n\n") f2.write(str(Charge)) f2.write(" ") f2.write(str(Multiplicity)) f2.write("\n") for i in range(0,NAtoms): h = i + 1 z = AtomicNum[i] Atom = AtomicSymbol(int(z)) f2.write(Atom) f2.write(" ") for j in range(0,3): if (Cart[i,j] >= 0): f2.write(" ") f2.write(str(sci_notation(Cart[i,j]))) f2.write(" ") f2.write("\n") f2.write(" ") f2.write("\n\n") return filename2 if (switch == -1): print "SCF Energy = ", SCFEnergy, " Hartree\n" print "Charge = ", Charge, "\n" print "Multiplicity = ", Multiplicity, "\n" print "Cartesian Geometry:\n" for i in range(0,NAtoms): h = i + 1 z = AtomicNum[i] Atom = AtomicSymbol(int(z)) print Atom, sci_notation(Cart[i,0]), sci_notation(Cart[i,1]), sci_notation(Cart[i,2]) print "\n" # CalcNO: Reads in filename, NBasis # Output: Natural Orbitals eigenvalues and eigenvectors (both alpha and beta) # def CalcNO(filename,NBasis): Palpha, Pbeta = MatGrab(filename,NBasis,2) C = MatGrab(filename,NBasis,1) S = GetOverlap(C,NBasis) Svals, Svecs = np.linalg.eig(S) Sval_minhalf = (np.diag(Svals**(0.5))) Shalf = np.dot(Svecs,np.dot(Sval_minhalf,np.transpose(Svecs))) NOvalsA, NOvecsA = np.linalg.eig(np.dot(Shalf,np.dot(Shalf,Palpha))) NOvalsB, NOvecsB = np.linalg.eig(np.dot(Shalf,np.dot(Shalf,Pbeta))) NOvalsA = NOvalsA.real NOvalsB = NOvalsB.real NOvecsA = NOvecsA.real NOvecsB = NOvecsB.real NOvecsA = np.dot(np.linalg.inv(Shalf),NOvecsA) NOvecsB = np.dot(np.linalg.inv(Shalf),NOvecsB) return NOvecsA, NOvecsB, NOvalsA, NOvalsB # NElec: Reads in filename # Output: Total number of electrons, Alpha Electrons, Beta Electrons # def NElec(filename): NElec = 0 NAlpha = 0 NBeta = 0 with open(filename, 'r') as origin: for line in origin: if "Number of electrons" in line: words = line.split() for i in words: for letter in i: if(letter.isdigit()): NElec = NElec*10 + int(letter) if "Number of alpha electrons" in line: words = line.split() for i in words: for letter in i: if(letter.isdigit()): NAlpha = NAlpha*10 + int(letter) if "Number of beta electrons" in line: words = line.split() for i in words: for letter in i: if(letter.isdigit()): NBeta = NBeta*10 + int(letter) return NElec, NAlpha, NBeta # OrbTransform: Reads in Alpha Density Matrix, Beta Density Matrix, Overlap Matrix, n # Output: New Density Matrices: P' = S**(1-n).P.S**(n) # def OrbTransform(Pa,Pb,S,n): Svals, Svecs = np.linalg.eig(S) Sval1 = np.diag(Svals**(n)) Sval2 = np.diag(Svals**(1-n)) Sdag1 = np.dot(Svecs,np.dot(Sval1,np.transpose(Svecs))) Sdag2 = np.dot(Svecs,np.dot(Sval2,np.transpose(Svecs))) PdagAlpha = np.dot(Sdag1,np.dot(Pa,Sdag2)) PdagBeta = np.dot(Sdag1,np.dot(Pb,Sdag2)) # print "OrbTransform Subroutine test:\n" # print "PdagAlpha = ", PdagAlpha, "\n" # print "PdagBeta = ", PdagBeta, "\n" OvalsA, OvecsA = np.linalg.eig(PdagAlpha) OvalsB, OvecsB = np.linalg.eig(PdagBeta) # print "OVals A = ", OvalsA, "\n" # print "OVecs A = ", OvecsA, "\n" # print "OVals B = ", OvalsB, "\n" # print "OVecs B = ", OvecsB, "\n" return PdagAlpha, PdagBeta, OvecsA, OvecsB, OvalsA, OvalsB # CartoZmat: Transforms Cartesian coordinates to z-matrix form # Input: NAtoms, RawCart, AtomicNum # Output: z-matrix printed on the screen # # Note that there are three other functions here, Dist, Angle, and Torsion. # They are used to calculate the appropriate parameters for the z-matrix # switch = 1 : print z-matrix to screen # switch = -1 : print z-matrix to new textfile def DistAB(e1,e2): R = 0.0 for i in range(len(e1)): R = R + (e1[i]-e2[i])**(2) R = R**(0.5) return R def AngleABC(e1,e2,e3): eab_x = (e2[0] - e1[0]) / DistAB(e1,e2) eab_y = (e2[1] - e1[1]) / DistAB(e1,e2) eab_z = (e2[2] - e1[2]) / DistAB(e1,e2) ebc_x = - (e3[0] - e2[0]) / DistAB(e2,e3) ebc_y = - (e3[1] - e2[1]) / DistAB(e2,e3) ebc_z = - (e3[2] - e2[2]) / DistAB(e2,e3) eab = [eab_x, eab_y, eab_z] ebc = [ebc_x, ebc_y, ebc_z] cos_angle = np.dot(eab,ebc) angle = np.arccos(cos_angle) / 3.1415926535 * 180 return eab, ebc, angle def TorsionABCD(e1,e2,e3,e4): eab_x = (e2[0] - e1[0]) / DistAB(e1,e2) eab_y = (e2[1] - e1[1]) / DistAB(e1,e2) eab_z = (e2[2] - e1[2]) / DistAB(e1,e2) ebc_x = (e3[0] - e2[0]) / DistAB(e2,e3) ebc_y = (e3[1] - e2[1]) / DistAB(e2,e3) ebc_z = (e3[2] - e2[2]) / DistAB(e2,e3) ecd_x = (e4[0] - e3[0]) / DistAB(e3,e4) ecd_y = (e4[1] - e3[1]) / DistAB(e3,e4) ecd_z = (e4[2] - e3[2]) / DistAB(e3,e4) eab = [eab_x, eab_y, eab_z] ebc = [ebc_x, ebc_y, ebc_z] ecd = [ecd_x, ecd_y, ecd_z] n1 = np.cross(eab,ebc) / (np.linalg.norm(np.cross(eab,ebc))) n2 = np.cross(ebc,ecd) / (np.linalg.norm(np.cross(ebc,ecd))) u1 = n2 u3 = ebc/np.linalg.norm(ebc) u2 = np.cross(u3,u1) cos_angle = np.dot(n1,n2) sin_angle = np.dot(n1,u2) angle = -math.atan2(sin_angle,cos_angle) / 3.1415926535 * 180 return angle def CartoZmat(RawCart,NAtoms,AtomicNum,filename2,switch): if (switch == 1): Cart = np.resize(RawCart,(NAtoms,3)) # print "Cartesian = ", Cart # print "Atoms list = ", AtomicNum for i in range(len(AtomicNum)): Symbol = AtomicSymbol(int(AtomicNum[i])) if (i > 2): e4 = [Cart[i,0],Cart[i,1],Cart[i,2]] e3 = [Cart[2,0],Cart[2,1],Cart[2,2]] e2 = [Cart[1,0],Cart[1,1],Cart[1,2]] e1 = [Cart[0,0],Cart[0,1],Cart[0,2]] R = DistAB(e4,e1) eab, ebc, A = AngleABC(e2,e1,e4) D = TorsionABCD(e4,e1,e2,e3) print Symbol, 1 , R , 2, A , 3, D elif (i > 1): e4 = [Cart[i,0],Cart[i,1],Cart[i,2]] e2 = [Cart[1,0],Cart[1,1],Cart[1,2]] e1 = [Cart[0,0],Cart[0,1],Cart[0,2]] R = DistAB(e4,e1) eab, ebc, A = AngleABC(e2,e1,e4) print Symbol, 1 , R , 2, A elif (i > 0): e4 = [Cart[i,0],Cart[i,1],Cart[i,2]] e1 = [Cart[0,0],Cart[0,1],Cart[0,2]] R = DistAB(e4,e1) print Symbol, 1, R elif (i == 0): print Symbol elif (switch == -1): Cart = np.resize(RawCart,(NAtoms,3)) #open new file filename = os.path.splitext(filename2)[0] + "-zmat.txt" with open(filename,'w') as f2: NBasis, NElem, Charge, Multiplicity, NAtoms, SCFEnergy = NBasGrab(filename2) f2.write("Z-Matrix file for ") f2.write(filename2) f2.write("\n\n") f2.write(str(Charge)) f2.write(" ") f2.write(str(Multiplicity)) f2.write("\n") for i in range(len(AtomicNum)): Symbol = AtomicSymbol(int(AtomicNum[i])) if (i > 2): e4 = [Cart[i,0],Cart[i,1],Cart[i,2]] e3 = [Cart[2,0],Cart[2,1],Cart[2,2]] e2 = [Cart[1,0],Cart[1,1],Cart[1,2]] e1 = [Cart[0,0],Cart[0,1],Cart[0,2]] R = DistAB(e4,e1) eab, ebc, A = AngleABC(e2,e1,e4) D = TorsionABCD(e4,e1,e2,e3) f2.write(Symbol) f2.write(" 1 ") f2.write(str(R)) f2.write(" 2 ") f2.write( str(A)) f2.write(" 3 ") f2.write(str(D)) f2.write("\n") elif (i > 1): e4 = [Cart[i,0],Cart[i,1],Cart[i,2]] e2 = [Cart[1,0],Cart[1,1],Cart[1,2]] e1 = [Cart[0,0],Cart[0,1],Cart[0,2]] R = DistAB(e4,e1) eab, ebc, A = AngleABC(e2,e1,e4) f2.write(str(Symbol)) f2.write(" 1 ") f2.write (str(R)) f2.write(" 2 ") f2.write(str(A)) f2.write("\n") elif (i > 0): e4 = [Cart[i,0],Cart[i,1],Cart[i,2]] e1 = [Cart[0,0],Cart[0,1],Cart[0,2]] R = DistAB(e4,e1) f2.write(Symbol) f2.write(" 1 ") f2.write(str(R)) f2.write("\n") elif (i == 0): f2.write(Symbol) f2.write("\n") # print "test test" # Section 2: Reading from gaussian matrix files # MatGrab2: Reads in matrices from gaussian matrix file # # Switch: 1 : Alpha Core Hamiltonian # -1 : Beta Core Hamiltonian # 2 : Alpha Fock Matrix # -2 : Beta Fock Matrix # 3 : Dipole matrix elements (x,y,z) [IN PROGRESS] def MatGrab2(filename,NBasis,switch): print "Reading from Matrix file\n" if (switch == 1): print "Reading Alpha Core Hamiltonian Matrix:\n" NElements = int(NBasis*(NBasis + 1)/2) print "Looking for ", NElements, " elements of the core hamilonian\n" CoreHRawa = np.zeros(NElements) p = 0 n = 0 r = 0 with open(filename,'r') as origin: for i, line in enumerate(origin): if "CORE HAMILTONIAN ALPHA" in line : while (p < (NElements)): NLines = NBasis - 5*r if (NLines < 0): print "Done Reading Core Hamolitonian" j = i+3 i = i + 4 end = j + NLines - 1 nextline = origin.next() for m in range(i,i+NLines): nextline = origin.next() words = nextline.split() for j in range(1,len(words)): CoreHRawa[p] = float(words[j].replace('D','E')) p = p + 1 r = r + 1 i = m - 2 return CoreHRawa if (switch == -1): print "Reading Beta Core Hamiltonian Matrix:\n" NElements = int(NBasis*(NBasis + 1)/2) print "Looking for ", NElements, " elements of the core hamilonian\n" CoreHRawb = np.zeros(NElements) p = 0 n = 0 r = 0 with open(filename,'r') as origin: for i, line in enumerate(origin): if "CORE HAMILTONIAN BETA" in line : while (p < (NElements)): NLines = NBasis - 5*r if (NLines < 0): print "Done Reading Core Hamolitonian" j = i+3 i = i + 4 end = j + NLines - 1 nextline = origin.next() for m in range(i,i+NLines): nextline = origin.next() words = nextline.split() for j in range(1,len(words)): CoreHRawb[p] = float(words[j].replace('D','E')) p = p + 1 r = r + 1 i = m - 2 return CoreHRawb if (switch == 2): print "Reading Alpha Fock Matrix:\n" NElements = int(NBasis*(NBasis + 1)/2) print "Looking for ", NElements, " elements of the fock matrix\n" FockRawA = np.zeros(NElements) p = 0 n = 0 r = 0 with open(filename,'r') as origin: for i, line in enumerate(origin): if "ALPHA FOCK MATRIX" in line : while (p < (NElements)): NLines = NBasis - 5*r if (NLines < 0): print "Done Reading fock matrix" j = i+3 i = i + 4 end = j + NLines - 1 nextline = origin.next() for m in range(i,i+NLines): nextline = origin.next() words = nextline.split() for j in range(1,len(words)): FockRawA[p] = float(words[j].replace('D','E')) p = p + 1 r = r + 1 i = m - 2 return FockRawA if (switch == -2): print "Reading Beta Fock Matrix:\n" NElements = int(NBasis*(NBasis + 1)/2) print "Looking for ", NElements, " elements of the fock matrix\n" FockRawB = np.zeros(NElements) p = 0 n = 0 r = 0 with open(filename,'r') as origin: for i, line in enumerate(origin): if "BETA FOCK MATRIX" in line : while (p < (NElements)): NLines = NBasis - 5*r if (NLines < 0): print "Done Reading fock matrix" j = i+3 i = i + 4 end = j + NLines - 1 nextline = origin.next() for m in range(i,i+NLines): nextline = origin.next() words = nextline.split() for j in range(1,len(words)): FockRawB[p] = float(words[j].replace('D','E')) p = p + 1 r = r + 1 i = m - 2 return FockRawB if (switch == 3): # print "Reading Dipole integrals, matrix x\n" NElements = int(NBasis*(NBasis +1)/2) # print "Looking for ", NElements, " elements of the Dipole integrals matrix x\n" DipX_Raw = np.zeros(NElements) p = 0 n = 0 r = 0 with open(filename,'r') as origin: for i, line in enumerate(origin): if " DIPOLE INTEGRALS, matrix 1" in line: while (p < NElements): NLines = NBasis - 5*r if (NLines < 0): # print "Done reading Dipole X matrix\n" j = i+3 i = i + 4 end = j + NLines -1 nextline = origin.next() words = nextline.split() for m in range(i,i+NLines): nextline = origin.next() words = nextline.split() for j in range(1,len(words)): DipX_Raw[p] = float(words[j].replace('D','E')) p = p + 1 r = r + 1 i = m - 2 # print "Dip X raw = ", DipX_Raw # print "Reading Dipole integrals, matrix y\n" NElements = int(NBasis*(NBasis +1)/2) print "Looking for ", NElements, " elements of the Dipole integrals matrix y\n" DipY_Raw = np.zeros(NElements) p = 0 n = 0 r = 0 with open(filename,'r') as origin: for i, line in enumerate(origin): if " DIPOLE INTEGRALS, matrix 2" in line: while (p < NElements): NLines = NBasis - 5*r if (NLines < 0): # print "Done reading Dipole Y matrix\n" j = i+3 i = i + 4 end = j + NLines -1 nextline = origin.next() words = nextline.split() for m in range(i,i+NLines): nextline = origin.next() words = nextline.split() for j in range(1,len(words)): DipY_Raw[p] = float(words[j].replace('D','E')) p = p + 1 r = r + 1 i = m - 2 # print "Dip Y raw = ", DipY_Raw # print "Looking for ", NElements, " elements of the Dipole integrals matrix z\n" DipZ_Raw = np.zeros(NElements) p = 0 n = 0 r = 0 with open(filename,'r') as origin: for i, line in enumerate(origin): if " DIPOLE INTEGRALS, matrix 3" in line: while (p < NElements): NLines = NBasis - 5*r if (NLines < 0): print "Done reading Dipole Z matrix\n" j = i+3 i = i + 4 end = j + NLines -1 nextline = origin.next() words = nextline.split() for m in range(i,i+NLines): nextline = origin.next() words = nextline.split() for j in range(1,len(words)): DipZ_Raw[p] = float(words[j].replace('D','E')) p = p + 1 r = r + 1 i = m - 2 # print "Dip Z raw = ", DipZ_Raw return symmetrizeMat(DipX_Raw), symmetrizeMat(DipY_Raw), symmetrizeMat(DipZ_Raw) # SymmetrizeMat: Reads in packed matrix (recovered from Matrix file) and prints out NBasis x NBasis matrix # Input: Packed lower triangular A # Output: N x N Matrix def symmetrizeMat(a): NBasis = int((np.sqrt(8*len(a)+1)-1)/2) NewMat = np.zeros((NBasis,NBasis)) NElements = len(a) t = 0 l = 0 start = 0 loop = NBasis nBlock = int(NBasis/5) nRem = NBasis%5 # print "nBlock = ", nBlock # print "nRem = ", nRem i = start j = start if (nBlock == 0): nBlock =1 while (l < nBlock): # print "retrieving block ", l for i in range (start,loop): for j in range(start,start+5): if (j<=i): # print "i,j = ",i,j NewMat[i,j] = a[t] NewMat[j,i] = a[t] # print "A[t]= ", a[t] t = t + 1 start = start + 5 l = l + 1 # print "t = ", t # print "values of i and j after nBlock loop is over: ", i, j j = j + 1 start = j # print "NBasis - nRem = ", NBasis -nRem i = NBasis - nRem while (i < NBasis): j = start while (j <= i): # print "i,j = ",i,j NewMat[i,j] = a[t] NewMat[j,i] = a[t] # print "A[t]= ", a[t] t = t + 1 j = j + 1 i = i + 1 # print "final value of t = ", t return NewMat # ERIRead: reads in regular 2e integrals from formatted matrix file # Note that to get these integrals, use SCF=Conventional and int=NoRaff (saves integrals to disk and prints out regular 2e integrals) # Input: matrix filename # Output: 2D Matrix, two columns: Column 1 = compound index, Column 2 = integral value # # Two small functions are defined here: swap(a,b) and Fourindex(a,b,c,d) def swap(a,b): return b,a def Fourindex(a,b,c,d): a = int(a) b = int(b) c = int(c) d = int(d) if (a < b): a, b = swap(a,b) if (c < d): c, d = swap(c,d) e = int(a*(a+1)/2 + b) f = int(c*(c+1)/2 + d) if (e<f): e,f = swap(e,f) g = e*(e +1)/2 + f return int(g) def ERIRead(filename,NBasis): NElements = 0 p = 0 print "Reading ERIs from Gaussian Matrix File" print "Subroutine can only read regular 2e integrals (NO RAFINETTI)" with open(filename,'r') as origin: for i, line in enumerate(origin): if "Label REGULAR 2E INTEGRALS" in line: print "Found 2e integrals!" words = line.split() print "Total number of elements = ", words[9] NElements = int(words[9]) print "NElements = ", NElements eri_raw = np.zeros((NElements,5)) while (p < NElements): nextline = origin.next() words = nextline.split() eri_raw[p,0] = words[1] eri_raw[p,1] = words[3] eri_raw[p,2] = words[5] eri_raw[p,3] = words[7] eri_raw[p,4] = float(words[9].replace('D','E')) # print "(",int(eri_raw[p,0]),int(eri_raw[p,1]),"|",int(eri_raw[p,2]),int(eri_raw[p,3]),") = ", eri_raw[p,4] p = p + 1 # print "ERI RAW = ", eri_raw NTotal = Fourindex(NBasis,NBasis,NBasis,NBasis) + 1 eri_array = np.zeros(NTotal) eri_compact = np.zeros((NElements,2)) print "Total length of sparse 1D vector =", NTotal print "Now forming compound indices" for i in range(0,NElements): eri_compact[i,0] = Fourindex(eri_raw[i,0], eri_raw[i,1], eri_raw[i,2], eri_raw[i,3]) eri_compact[i,1] = eri_raw[i,4] eri_array[int(eri_compact[i,0])] = eri_compact[i,1] # print "mu nu lambda sigma = ", int(eri_compact[i,0]), ", int = ", eri_compact[i,1], "One D array Value =", eri_array[eri_compact[i,0]] return eri_array # OVParse breaks down the MO coefficient matrix (NBasis x NBasis) into an occupied (NBasis x NOcc) and a virtual (NBasis x (Nbasis-NOcc)) matrices # Input: A: MO Coefficient (NBasis x NBasis) # NBasis # NOcc = number of electrons # # Output: A_Occ: rectangular NBasis x NOcc matrix: Columns of occupied MOs # A_Virt: rectangular NBasis x (NBasis - NOcc) matrix: Columns of virtual MOs ## Note TO SELF: Needs to be tested more, was only tested on H2 and V jobs. def OVParse(A,NBasis,NOcc): A_Occ = np.zeros((NBasis,NOcc)) A_Virt = np.zeros((NBasis,NBasis-NOcc)) for i in range(0,NOcc): A_Occ[:,i] = A[:,i] for j in range(0,NBasis-NOcc): A_Virt[:,j] = A[:,j+NOcc] return A_Occ, A_Virt # Biorthog: Calculates the overlap between two sets of MO Coefficients, prints out the final value of the overlap # Input: A, B: MO Coefficients, can either be full or parsed (using OVParse subroutine) # S: AO overlap matrix # # Output: the final value of the overlap # # Option: switch: 1 : print all relevant matrices # -1 : Dont print any matrices # def Biorthog(A,B,S,switch): # eqn numbers based on personal notes D = np.dot(np.transpose(B),np.dot(S,A)) # eq. 1 u, d, v = np.linalg.svd(D,full_matrices=True) # eq. 2 DtD = np.dot(np.transpose(D),D) l, V = np.linalg.eig(DtD) U = np.dot(D,V) if (switch==1): print "D = ", D print "DtD = ", DtD print "lambdas = ", l print "Eig Vecs of DtD = ", V print "Determinants = ", np.linalg.det(u), np.linalg.det(v) print "u = ", u print "v = ", v overlap = np.linalg.det(u)*np.prod(d)*np.linalg.det(v) return d, u, v, D # PickColumn: Subroutine that selects a specific column from a two dimensional matrix (NBasis,NBasis), outputs an array (NBasis,1) # Input: A: Two dimensional matrix # NBasis: Number of basis functions for A # i: the position of the column to be selected # # Output: One dimensional array (NBasis,1) that is the i-th column of matrix A # def PickColumn(A,NBasis,i): A_Column = np.zeros((NBasis,1)) for j in range(0,NBasis): A_Column[j,0] = A[j,i] return A_Column # WriteMOs: Subroutine that replaces the MO coefficients and orbital energies in a fchk file # Input: Input filename, output filename, Orbital coefficient alpha, orbital coefficient beta, Orbtial energies alpha, orbital energies beta, number of basis functions # # Output: None. New file will be generated (filename3) that has the new Orbital coefficients and energies # def WriteMOs(filename1,filename3,V1,V2,e1,e2,NBasis): MOlines = int(len(V1)/5) + 1 p = 0 r = 0 AOE = 0 with open(filename1,'r') as origin: for i, line in enumerate(origin): if "Alpha Orbital Energies" in line: AOE = i if "Alpha MO coefficients" in line: i=i+1 AMO=i j=i+MOlines-1 for m in range(0,j-i+1): nextline = origin.next() nextline = nextline.split() for p in range(p,len(nextline)): r = r+1 p = 0 if "Beta Orbital Energies" in line: BOE = i if "Beta MO coefficients" in line: r = 0 i=i+1 BMO = i j=i+MOlines-1 for m in range(0,j-i+1): nextline = origin.next() nextline = nextline.split() for p in range(p,len(nextline)): r = r+1 p = 0 pointer=0 counter=1 with open(filename1,'r') as origin: data = origin.readlines() if "Alpha Orbital Energies" in line: AOE = i BOE = AOE + int(NBasis/5) + 1 with open(filename3,'w') as f2: print "Writing results to new output file: ", filename3, " ... " while (pointer < AOE+1): f2.write(data[pointer]) pointer = pointer+1 for j in range(0,NBasis): f2.write(" ") if (e1[j] >= 0): f2.write(" ") f2.write(str(fchk_notation(e1[j].real))) if (counter%5 == 0): f2.write("\n") counter=0 counter=counter+1 counter =1 BOE = AOE + (int(NBasis/5)+2) if (NBasis%5 != 0): f2.write("\n") if (NBasis%5 == 0): BOE = BOE - 1 f2.write(data[BOE]) for j in range(0,NBasis): f2.write(" ") if (e2[j] >= 0): f2.write(" ") f2.write(str(fchk_notation(e2[j].real))) if (counter%5 ==0): f2.write("\n") counter=0 counter = counter+1 counter =1 AMO = BOE + (int(NBasis/5)+2) if (NBasis%5 != 0): f2.write("\n") if (NBasis%5 == 0): AMO = AMO - 1 f2.write(data[AMO]) for i in range(0,NBasis): for j in range(0,NBasis): f2.write(" ") if (V1[j,i] >= 0): f2.write(" ") f2.write(str(fchk_notation(V1[j,i].real))) if (counter%5 ==0): f2.write("\n") counter=0 counter = counter + 1 counter = 1 BMO = AMO + (int(NBasis*NBasis/5))+2 if (NBasis%5 != 0): f2.write("\n") if (NBasis%5 == 0): BMO = BMO - 1 f2.write(data[BMO]) for i in range(0,NBasis): for j in range(0,NBasis): f2.write(" ") if (V2[j,i] >= 0): f2.write(" ") f2.write(str(fchk_notation(V2[j,i].real))) if (counter%5 ==0): f2.write("\n") counter=0 counter = counter + 1 counter = 1 if (NBasis%5 != 0): f2.write("\n") pointer = BMO + (int(NBasis*NBasis/5))+2 while (pointer < len(data)): f2.write(data[pointer]) pointer = pointer+1 print "Done." # OVMerge: Does the opposite of OVParse, merges back the Occ and Virt components of the MO Coefficient matrix # Input : A (Occ Matrix), B(Vir Matrix), Number of occupied orbitals, NBasis # # Output : V = Full MO Coefficient Matrix # # (this subroutine has the exact opposite functionality of OVParse) # def OVMerge(A,B,NOcc,NBasis): V = np.zeros((NBasis,NBasis)) for i in range(0,NOcc): V[:,i] = A[:,i] for j in range(NOcc,NBasis): V[:,j] = B[:,j-NOcc] return V # DistanceMatrix: Calculates distances between all atoms in a molecule # Input : fchk file name # # Output : Returns Distance Matrix and Atomic Symbol array. # # Unfinished part: generate and return a distance matrix (NAtoms x NAtoms) # def DistanceMatrix(filename): NBasis, NElem, Charge, Multiplicity, NAtoms, SCFEnergy = NBasGrab(filename) Atomic_Numbers = GetAtoms(filename,NAtoms) Atomic_Symbol = [""]*NAtoms for i in range(0,NAtoms): Atomic_Symbol[i] = AtomicSymbol(int(Atomic_Numbers[i])) RawCart = GeomGet(filename,NAtoms) Cart = np.resize(RawCart,(NAtoms,3)) Distance_Matrix = np.zeros((NAtoms,NAtoms)) for i in range(0,NAtoms): for j in range(i+1,NAtoms): e2 = [Cart[j,0],Cart[j,1],Cart[j,2]] e1 = [Cart[i,0],Cart[i,1],Cart[i,2]] Distance_Matrix[i,j] = np.around(DistAB(e1,e2),decimals=2) Distance_Matrix[j,i] = np.around(DistAB(e1,e2),decimals=2) return Distance_Matrix, Atomic_Symbol # PrintLyrics: A Function made just for fun, prints out a random quote from the Beatles songs # Input: None, but reads in the lyrics.py library file (partially complete) # # Output: None, prints lyrics. # def PrintLyrics(): n = random.randint(1,32) LyricsLibrary(n) # GetAtomicWeights: Grabs the "real atomic weights" from the fchk file # Input: filename, Number of Atoms # # Output: One dimensional array, AtomicWeight, of dimensions NAtoms. # def GetAtomicWeights(filename1,NAtoms): p = 0 r = 0 n = 1 AtomicWeight = np.zeros(NAtoms) if (NAtoms%5 ==0): n = 0 AtomLines = int(NAtoms/5) + n with open(filename1,'r') as origin: for i, line in enumerate(origin): if "Real atomic weights" in line: i = i + 1 pointer = i endpointer = pointer + AtomLines -1 for m in range(0, endpointer - pointer + 1): nextline = origin.next() nextline = nextline.split() for p in range(p,len(nextline)): AtomicWeight[r] = nextline[p] r = r + 1 p = 0 AtomicWeight = np.around(AtomicWeight,decimals=3) return AtomicWeight # WriteMOsQChem: Subroutine that replaces the MO coefficients and orbital energies in a fchk file (QChem Version) # Input: Input filename, output filename, Orbital coefficient alpha, orbital coefficient beta, Orbtial energies alpha, orbital energies beta, number of basis functions # # Output: None. New file will be generated (filename3) that has the new Orbital coefficients and energies # def WriteMOsQChem(filename1,filename3,V1,V2,e1,e2,NBasis): MOlines = int(len(V1)/5) + 1 p = 0 r = 0 with open(filename1,'r') as origin: for i, line in enumerate(origin): if "Alpha Orbital Energies" in line: AOE = i+1 AOE_header = line if "Alpha MO coefficients" in line: AMO = i+1 AMO_header = line if "Beta Orbital Energies" in line: BOE = i+1 BOE_header = line if "Beta MO coefficients" in line: BMO = i+1 BMO_header = line pointer=0 counter=1 Start_point = min(AMO,BMO,AOE,BOE) with open(filename1,'r') as origin: data = origin.readlines() with open(filename3,'w') as f2: print "Writing results to new output file: ", filename3, " ... " while (pointer < Start_point-1): f2.write(data[pointer]) pointer = pointer+1 print "pointer at line = ", pointer f2.write(AOE_header) for j in range(0,NBasis): f2.write(" ") if (e1[j] >= 0): f2.write(" ") f2.write(str(fchk_notation(e1[j].real))) if (counter%5 == 0): f2.write("\n") counter=0 counter=counter+1 counter =1 BOE = AOE + (int(NBasis/5)+2) if (NBasis%5 != 0): f2.write("\n") if (NBasis%5 == 0): BOE = BOE - 1 f2.write(BOE_header) # f2.write("Beta Orbital Energies\n") for j in range(0,NBasis): f2.write(" ") if (e2[j] >= 0): f2.write(" ") f2.write(str(fchk_notation(e2[j].real))) if (counter%5 ==0): f2.write("\n") counter=0 counter = counter+1 counter =1 AMO = BOE + (int(NBasis/5)+2) if (NBasis%5 != 0): f2.write("\n") if (NBasis%5 == 0): AMO = AMO - 1 # f2.write("Alpha MO coefficients\n") f2.write(AMO_header) for i in range(0,NBasis): for j in range(0,NBasis): f2.write(" ") if (V1[j,i] >= 0): f2.write(" ") f2.write(str(fchk_notation(V1[j,i].real))) if (counter%5 ==0): f2.write("\n") counter=0 counter = counter + 1 counter = 1 BMO = AMO + (int(NBasis*NBasis/5))+2 if (NBasis%5 != 0): f2.write("\n") if (NBasis%5 == 0): BMO = BMO - 1 # f2.write("Beta MO Coefficients\n") f2.write(BMO_header) # f2.write(data[BMO]) for i in range(0,NBasis): for j in range(0,NBasis): f2.write(" ") if (V2[j,i] >= 0): f2.write(" ") f2.write(str(fchk_notation(V2[j,i].real))) if (counter%5 ==0): f2.write("\n") counter=0 counter = counter + 1 counter = 1 if (NBasis%5 != 0): f2.write("\n") pointer = BMO + (int(NBasis*NBasis/5))+2 # while (pointer < len(data)): # f2.write(data[pointer]) # pointer = pointer+1 print "Done." # ContractMat: Subroutine that reads in two square matrices (NBasis x NBasis) and returns their contraction (scalar) # Input: Matrices A and B (dimensions: NBasis x NBasis), NBasis # # Output: scalar m = Sum_(mu,nu) A_(mu,nu) * B_(mu,nu) # def ContractMat(A,B,NBasis): value = 0.0 for i in range(0,NBasis): for j in range(0,NBasis): value = value + A[i,j]*B[i,j] return value # Work in progress: Basis set reader: def ReadBasisSet(filename): NBasis, NElem, Charge, Multiplicity, NAtoms, SCFEnergy = NBasGrab(filename) print "Number of Basis functions =", NBasis print "Number of atoms =", NAtoms Atomic_Numbers = GetAtoms(filename,NAtoms) print "Atomic Numbers =", Atomic_Numbers Atomic_Symbol = [""]*NAtoms for i in range(0,NAtoms): Atomic_Symbol[i] = AtomicSymbol(int(Atomic_Numbers[i])) print "Atomic Symbols =", Atomic_Symbol
BEATLES.py
49,185
!/usr/bin/python BEATLES: Bundle of Essential and Assistive Tools Library for Electronic Structure A tribute to the Beatles Updated June 14, 2020 by Hassan Harb / | \ / | \ /O O | O O\ //|\ /|\ /|\ /|\\ /=/ \=/ \= / \=/ \=\ / == == == == == \ / == == == == == \ (The original Beatles) (ASCII retrieved from https://www.asciiart.eu/music/musicians/beatles ) NBasGrab: reads in a name of .fchk file output: -Number of basis functions -Charge -Multiplicity -Number of Atoms -Cartesian Coordinates -Atomic Symbols -SCF Energy -Total Energy (needs to be added) Section 1: Reading from gaussian formatted checkpoint file print "SCF Energy = ", words[3], " Hartree" print "SCF Energy (float) = ", SCFEnergy if "Total Energy" in line: words = line.split() TotalEnergy = float(words[3]) print "Total Energy = ", TotalEnergy, " Hartree" print "Number of Basis Functions (subroutine) = ", NBasis, "\n" print "Charge (subroutine) = ", Charge, "\n" GeomGet: reads in the file name, number of atoms Output: -One dimensional vector (NAtoms * 3) that includes the cartesian coordinates of each atom print "Raw Cart lines = ", RawCartLines print "Number of Atoms =", NAtoms print "Number of coordinates =", NElements print "Cartesian Coordinates starts at line :", pointer print "Cartesian Coordinates ends at line :", endpointer print "Raw Cart (subroutine) = ", RawCart print "Raw Cart (converted to Angstroms) = ", RawCart GetAtoms: Reads in file name, number of atoms output: -One dimensional vector (NAtoms) that contains the atomic numbers of the atoms MatGrab: Reads in filename, NBasis, user-defined switch Output: -Alpha MO Coefficients (Done) -Beta MO Coefficients (Done) -Alpha Density Matrix (Done) -Beta Density Matrix (Done) -Alpha MO Energies (Done) -Beta MO Energies (Done) Switch: 1 = Alpha MO Coefficients -1 = Beta MO Coefficients 2 = Alpha and Beta Density Matrices 3 = Alpha MO Energies -3 = Beta MO Energies print "Alpha MO coefficients starts at line :", i print "Alpha MO coefficients ends at line :", j print "MO Raw = ", MOrawa print "MO Raw = ", MOrawb print "Total SCF Density starts at line :", i print "Total SCF Density ends at line :", j HH + : Bug ... :( print "Found Spin density!" print "Spin SCF Density starts at line: ", i print "Spin SCF Density ends at line: ", j HH - : End of bug (hopefully!) print "Alpha MO Energies starts at line: ", i print "Alpha MO Energies ends at line: ", j print "Alpha MO energies = ", AlphaMO print "Beta MO Energies starts at line: ", i print "Beta MO Energies ends at line: ", j print "Beta MO energies = ", BetaMO sci_notation: reads in a number output: prints the number in the desired scientific notation. note that this function has a different output than the one found in nio.py fchk_notation: reads in a number output: prints the number in the desired notation for fchk files AtomicSymbol: Reads in atomic number of the element Output: -Atomic Symbol print "There are currently ", len(PTlist), " atoms defined" Symmetrize: Reads in a packed symmetric column matrix into NBasis x NBasis square matrix Output: -Matrix(NBasis,NBasis) Column2Square: Reads in a packed column matrix, number of basis functions. Output: -Matrix(NBasis,NBasis) GetOverlap: Reads in packed column matrix, number of basis functions. Output: -Overlap Matrix (NBasis,NBasis) PrintSI: Reads in filename, user-defined switch Output: -SCF Energy, Charge, Multiplicity, Geometry Switch: 1 = print to new file (filename1-SI.txt) -1 = print to screen CalcNO: Reads in filename, NBasis Output: Natural Orbitals eigenvalues and eigenvectors (both alpha and beta) NElec: Reads in filename Output: Total number of electrons, Alpha Electrons, Beta Electrons OrbTransform: Reads in Alpha Density Matrix, Beta Density Matrix, Overlap Matrix, n Output: New Density Matrices: P' = S**(1-n).P.S**(n) print "OrbTransform Subroutine test:\n" print "PdagAlpha = ", PdagAlpha, "\n" print "PdagBeta = ", PdagBeta, "\n" print "OVals A = ", OvalsA, "\n" print "OVecs A = ", OvecsA, "\n" print "OVals B = ", OvalsB, "\n" print "OVecs B = ", OvecsB, "\n" CartoZmat: Transforms Cartesian coordinates to z-matrix form Input: NAtoms, RawCart, AtomicNum Output: z-matrix printed on the screen Note that there are three other functions here, Dist, Angle, and Torsion. They are used to calculate the appropriate parameters for the z-matrix switch = 1 : print z-matrix to screen switch = -1 : print z-matrix to new textfile print "Cartesian = ", Cart print "Atoms list = ", AtomicNumopen new file print "test test" Section 2: Reading from gaussian matrix files MatGrab2: Reads in matrices from gaussian matrix file Switch: 1 : Alpha Core Hamiltonian -1 : Beta Core Hamiltonian 2 : Alpha Fock Matrix -2 : Beta Fock Matrix 3 : Dipole matrix elements (x,y,z) [IN PROGRESS] print "Reading Dipole integrals, matrix x\n" print "Looking for ", NElements, " elements of the Dipole integrals matrix x\n" print "Done reading Dipole X matrix\n" print "Dip X raw = ", DipX_Raw print "Reading Dipole integrals, matrix y\n" print "Done reading Dipole Y matrix\n" print "Dip Y raw = ", DipY_Raw print "Looking for ", NElements, " elements of the Dipole integrals matrix z\n" print "Dip Z raw = ", DipZ_Raw SymmetrizeMat: Reads in packed matrix (recovered from Matrix file) and prints out NBasis x NBasis matrix Input: Packed lower triangular A Output: N x N Matrix print "nBlock = ", nBlock print "nRem = ", nRem print "retrieving block ", l print "i,j = ",i,j print "A[t]= ", a[t] print "t = ", t print "values of i and j after nBlock loop is over: ", i, j print "NBasis - nRem = ", NBasis -nRem print "i,j = ",i,j print "A[t]= ", a[t] print "final value of t = ", t ERIRead: reads in regular 2e integrals from formatted matrix file Note that to get these integrals, use SCF=Conventional and int=NoRaff (saves integrals to disk and prints out regular 2e integrals) Input: matrix filename Output: 2D Matrix, two columns: Column 1 = compound index, Column 2 = integral value Two small functions are defined here: swap(a,b) and Fourindex(a,b,c,d) print "(",int(eri_raw[p,0]),int(eri_raw[p,1]),"|",int(eri_raw[p,2]),int(eri_raw[p,3]),") = ", eri_raw[p,4] print "ERI RAW = ", eri_raw print "mu nu lambda sigma = ", int(eri_compact[i,0]), ", int = ", eri_compact[i,1], "One D array Value =", eri_array[eri_compact[i,0]] OVParse breaks down the MO coefficient matrix (NBasis x NBasis) into an occupied (NBasis x NOcc) and a virtual (NBasis x (Nbasis-NOcc)) matrices Input: A: MO Coefficient (NBasis x NBasis) NBasis NOcc = number of electrons Output: A_Occ: rectangular NBasis x NOcc matrix: Columns of occupied MOs A_Virt: rectangular NBasis x (NBasis - NOcc) matrix: Columns of virtual MOs Note TO SELF: Needs to be tested more, was only tested on H2 and V jobs. Biorthog: Calculates the overlap between two sets of MO Coefficients, prints out the final value of the overlap Input: A, B: MO Coefficients, can either be full or parsed (using OVParse subroutine) S: AO overlap matrix Output: the final value of the overlap Option: switch: 1 : print all relevant matrices -1 : Dont print any matrices eqn numbers based on personal notes eq. 1 eq. 2 PickColumn: Subroutine that selects a specific column from a two dimensional matrix (NBasis,NBasis), outputs an array (NBasis,1) Input: A: Two dimensional matrix NBasis: Number of basis functions for A i: the position of the column to be selected Output: One dimensional array (NBasis,1) that is the i-th column of matrix A WriteMOs: Subroutine that replaces the MO coefficients and orbital energies in a fchk file Input: Input filename, output filename, Orbital coefficient alpha, orbital coefficient beta, Orbtial energies alpha, orbital energies beta, number of basis functions Output: None. New file will be generated (filename3) that has the new Orbital coefficients and energies OVMerge: Does the opposite of OVParse, merges back the Occ and Virt components of the MO Coefficient matrix Input : A (Occ Matrix), B(Vir Matrix), Number of occupied orbitals, NBasis Output : V = Full MO Coefficient Matrix (this subroutine has the exact opposite functionality of OVParse) DistanceMatrix: Calculates distances between all atoms in a molecule Input : fchk file name Output : Returns Distance Matrix and Atomic Symbol array. Unfinished part: generate and return a distance matrix (NAtoms x NAtoms) PrintLyrics: A Function made just for fun, prints out a random quote from the Beatles songs Input: None, but reads in the lyrics.py library file (partially complete) Output: None, prints lyrics. GetAtomicWeights: Grabs the "real atomic weights" from the fchk file Input: filename, Number of Atoms Output: One dimensional array, AtomicWeight, of dimensions NAtoms. WriteMOsQChem: Subroutine that replaces the MO coefficients and orbital energies in a fchk file (QChem Version) Input: Input filename, output filename, Orbital coefficient alpha, orbital coefficient beta, Orbtial energies alpha, orbital energies beta, number of basis functions Output: None. New file will be generated (filename3) that has the new Orbital coefficients and energies f2.write("Beta Orbital Energies\n") f2.write("Alpha MO coefficients\n") f2.write("Beta MO Coefficients\n") f2.write(data[BMO]) while (pointer < len(data)): f2.write(data[pointer]) pointer = pointer+1 ContractMat: Subroutine that reads in two square matrices (NBasis x NBasis) and returns their contraction (scalar) Input: Matrices A and B (dimensions: NBasis x NBasis), NBasis Output: scalar m = Sum_(mu,nu) A_(mu,nu) * B_(mu,nu) Work in progress: Basis set reader:
10,618
en
0.749958
"""Utility functions related to file operations.""" import copy import logging import os import subprocess import sys from argparse import Namespace from collections import OrderedDict from contextlib import contextmanager from pathlib import Path from tempfile import NamedTemporaryFile from typing import TYPE_CHECKING, Any, Dict, Iterator, List, Optional, Set, Union # import wcmatch import wcmatch.pathlib from wcmatch.wcmatch import RECURSIVE, WcMatch from ansiblelint.config import BASE_KINDS, options from ansiblelint.constants import FileType if TYPE_CHECKING: # https://github.com/PyCQA/pylint/issues/3979 BasePathLike = os.PathLike[Any] # pylint: disable=unsubscriptable-object else: BasePathLike = os.PathLike _logger = logging.getLogger(__package__) def normpath(path: Union[str, BasePathLike]) -> str: """ Normalize a path in order to provide a more consistent output. Currently it generates a relative path but in the future we may want to make this user configurable. """ # conversion to string in order to allow receiving non string objects relpath = os.path.relpath(str(path)) abspath = os.path.abspath(str(path)) # we avoid returning relative paths that endup at root level if abspath in relpath: return abspath return relpath @contextmanager def cwd(path: Union[str, BasePathLike]) -> Iterator[None]: """Context manager for temporary changing current working directory.""" old_pwd = os.getcwd() os.chdir(path) try: yield finally: os.chdir(old_pwd) def expand_path_vars(path: str) -> str: """Expand the environment or ~ variables in a path string.""" # It may be possible for function to be called with a Path object path = str(path).strip() path = os.path.expanduser(path) path = os.path.expandvars(path) return path def expand_paths_vars(paths: List[str]) -> List[str]: """Expand the environment or ~ variables in a list.""" paths = [expand_path_vars(p) for p in paths] return paths def kind_from_path(path: Path, base: bool = False) -> FileType: """Determine the file kind based on its name. When called with base=True, it will return the base file type instead of the explicit one. That is expected to return 'yaml' for any yaml files. """ # pathlib.Path.match patterns are very limited, they do not support *a*.yml # glob.glob supports **/foo.yml but not multiple extensions pathex = wcmatch.pathlib.PurePath(path.absolute().resolve()) kinds = options.kinds if not base else BASE_KINDS for entry in kinds: for k, v in entry.items(): if pathex.globmatch( v, flags=( wcmatch.pathlib.GLOBSTAR | wcmatch.pathlib.BRACE | wcmatch.pathlib.DOTGLOB ), ): return str(k) # type: ignore if base: # Unknown base file type is default return "" if path.is_dir(): return "role" if str(path) == '/dev/stdin': return "playbook" # Unknown file types report a empty string (evaluated as False) return "" class Lintable: """Defines a file/folder that can be linted. Providing file content when creating the object allow creation of in-memory instances that do not need files to be present on disk. """ def __init__( self, name: Union[str, Path], content: Optional[str] = None, kind: Optional[FileType] = None, ): """Create a Lintable instance.""" # Filename is effective file on disk, for stdin is a namedtempfile self.filename: str = str(name) self.dir: str = "" self.kind: Optional[FileType] = None if isinstance(name, str): self.name = normpath(name) self.path = Path(self.name) else: self.name = str(name) self.path = name self._content = content # if the lintable is part of a role, we save role folder name self.role = "" parts = self.path.parent.parts if 'roles' in parts: role = self.path while role.parent.name != "roles" and role.name: role = role.parent if role.exists: self.role = role.name if str(self.path) in ['/dev/stdin', '-']: # pylint: disable=consider-using-with self.file = NamedTemporaryFile(mode="w+", suffix="playbook.yml") self.filename = self.file.name self._content = sys.stdin.read() self.file.write(self._content) self.file.flush() self.path = Path(self.file.name) self.name = 'stdin' self.kind = 'playbook' self.dir = '/' else: self.kind = kind or kind_from_path(self.path) # We store absolute directory in dir if not self.dir: if self.kind == "role": self.dir = str(self.path.resolve()) else: self.dir = str(self.path.parent.resolve()) # determine base file kind (yaml, xml, ini, ...) self.base_kind = kind_from_path(self.path, base=True) def __getitem__(self, key: Any) -> Any: """Provide compatibility subscriptable support.""" if key == 'path': return str(self.path) if key == 'type': return str(self.kind) raise NotImplementedError() def get(self, key: Any, default: Any = None) -> Any: """Provide compatibility subscriptable support.""" try: return self.__getitem__(key) except NotImplementedError: return default @property def content(self) -> str: """Retried file content, from internal cache or disk.""" if self._content is None: with open(self.path, mode='r', encoding='utf-8') as f: self._content = f.read() return self._content def __hash__(self) -> int: """Return a hash value of the lintables.""" return hash((self.name, self.kind)) def __eq__(self, other: object) -> bool: """Identify whether the other object represents the same rule match.""" if isinstance(other, Lintable): return bool(self.name == other.name and self.kind == other.kind) return False def __repr__(self) -> str: """Return user friendly representation of a lintable.""" return f"{self.name} ({self.kind})" def discover_lintables(options: Namespace) -> Dict[str, Any]: """Find all files that we know how to lint.""" # git is preferred as it also considers .gitignore git_command = ['git', 'ls-files', '-z'] out = None try: out = subprocess.check_output( git_command, stderr=subprocess.STDOUT, universal_newlines=True ).split("\x00")[:-1] _logger.info("Discovered files to lint using: %s", ' '.join(git_command)) except subprocess.CalledProcessError as exc: if not (exc.returncode == 128 and 'fatal: not a git repository' in exc.output): _logger.warning( "Failed to discover lintable files using git: %s", exc.output.rstrip('\n'), ) except FileNotFoundError as exc: if options.verbosity: _logger.warning("Failed to locate command: %s", exc) if out is None: exclude_pattern = "|".join(options.exclude_paths) _logger.info("Looking up for files, excluding %s ...", exclude_pattern) out = WcMatch('.', exclude_pattern=exclude_pattern, flags=RECURSIVE).match() return OrderedDict.fromkeys(sorted(out)) def guess_project_dir() -> str: """Return detected project dir or user home directory.""" try: result = subprocess.run( ["git", "rev-parse", "--show-toplevel"], stderr=subprocess.PIPE, stdout=subprocess.PIPE, universal_newlines=True, check=False, ) except FileNotFoundError: # if git is absent we use home directory return str(Path.home()) if result.returncode != 0: return str(Path.home()) return result.stdout.splitlines()[0] def expand_dirs_in_lintables(lintables: Set[Lintable]) -> None: """Return all recognized lintables within given directory.""" should_expand = False for item in lintables: if item.path.is_dir(): should_expand = True break if should_expand: # this relies on git and we do not want to call unless needed all_files = discover_lintables(options) for item in copy.copy(lintables): if item.path.is_dir(): for filename in all_files: if filename.startswith(str(item.path)): lintables.add(Lintable(filename))
src/ansiblelint/file_utils.py
8,968
Defines a file/folder that can be linted. Providing file content when creating the object allow creation of in-memory instances that do not need files to be present on disk. Identify whether the other object represents the same rule match. Provide compatibility subscriptable support. Return a hash value of the lintables. Create a Lintable instance. Return user friendly representation of a lintable. Retried file content, from internal cache or disk. Context manager for temporary changing current working directory. Find all files that we know how to lint. Return all recognized lintables within given directory. Expand the environment or ~ variables in a path string. Expand the environment or ~ variables in a list. Provide compatibility subscriptable support. Return detected project dir or user home directory. Determine the file kind based on its name. When called with base=True, it will return the base file type instead of the explicit one. That is expected to return 'yaml' for any yaml files. Normalize a path in order to provide a more consistent output. Currently it generates a relative path but in the future we may want to make this user configurable. Utility functions related to file operations. import wcmatch https://github.com/PyCQA/pylint/issues/3979 pylint: disable=unsubscriptable-object conversion to string in order to allow receiving non string objects we avoid returning relative paths that endup at root level It may be possible for function to be called with a Path object pathlib.Path.match patterns are very limited, they do not support *a*.yml glob.glob supports **/foo.yml but not multiple extensions type: ignore Unknown base file type is default Unknown file types report a empty string (evaluated as False) Filename is effective file on disk, for stdin is a namedtempfile if the lintable is part of a role, we save role folder name pylint: disable=consider-using-with We store absolute directory in dir determine base file kind (yaml, xml, ini, ...) git is preferred as it also considers .gitignore if git is absent we use home directory this relies on git and we do not want to call unless needed
2,141
en
0.83681
# -*- coding: utf-8 -*- import gzip import bz2 import numpy as np def advanced_open(filepath, *args, **kwargs): """ Open function interface for files with different extensions. Parameters ---------- filepath: str File path with extension. args: list Non-key arguments kwargs: dict Key arguments Returns ------- """ open_fn = open if filepath.endswith('.gz'): open_fn = gzip.open elif filepath.endswith('.bz2'): open_fn = bz2.open return open_fn(filepath, mode="rt", *args, **kwargs) def load_kg_file(filepath, separator="\t", as_stream=False): """ Import knowledge graph from file Parameters ---------- filepath: str File path separator: str File column separator Returns ------- iterator The knowledge graph triplets obtained from the files with size [?, 3] """ kg_triples = [] with advanced_open(filepath) as file_content: for line in file_content: kg_triples.append(line.strip().split(separator)) return np.array(kg_triples) def load_kg_file_as_stream(filepath, separator="\t"): """ Import knowledge graph from file as a stream Parameters ---------- filepath: str File path separator: str File column separator Returns ------- generator The knowledge graph triplets obtained from the files with size [?, 3] """ with advanced_open(filepath) as file_content: for line in file_content: yield line.strip().split(separator)
benchmarking/libkge/libkge/io/base.py
1,606
Open function interface for files with different extensions. Parameters ---------- filepath: str File path with extension. args: list Non-key arguments kwargs: dict Key arguments Returns ------- Import knowledge graph from file Parameters ---------- filepath: str File path separator: str File column separator Returns ------- iterator The knowledge graph triplets obtained from the files with size [?, 3] Import knowledge graph from file as a stream Parameters ---------- filepath: str File path separator: str File column separator Returns ------- generator The knowledge graph triplets obtained from the files with size [?, 3] -*- coding: utf-8 -*-
695
en
0.56994
import os import requests import datetime from django.http import HttpResponseRedirect, HttpResponse, JsonResponse, Http404 from django.template import loader from django.contrib.auth import login from django.conf import settings from django.http import Http404 from django.utils import timezone from requests import status_codes from rest_framework.authtoken.models import Token from rest_framework.decorators import api_view from rest_framework.response import Response from rest_framework.permissions import AllowAny, IsAuthenticated, IsAuthenticatedOrReadOnly from rest_framework import viewsets, status from channels.layers import get_channel_layer from asgiref.sync import async_to_sync from constance import config import constance.settings from tau.twitch.models import TwitchAPIScope, TwitchEventSubSubscription from tau.users.models import User from .forms import ChannelNameForm, FirstRunForm from .utils import cleanup_remote_webhooks, cleanup_webhooks, log_request, check_access_token_expired, refresh_access_token, teardown_all_acct_webhooks, teardown_webhooks from tau.twitch.models import TwitchHelixEndpoint @api_view(['POST']) def irc_message_view(request): channel_layer = get_channel_layer() async_to_sync(channel_layer.group_send)('twitchchat', { 'type': 'twitchchat.event', 'data': request.data }) return Response({}, status=status.HTTP_201_CREATED) @api_view(['GET', 'POST', 'PUT', 'PATCH', 'DELETE']) def helix_view(request, helix_path=None): if check_access_token_expired(): refresh_access_token() try: endpoint_instance = TwitchHelixEndpoint.objects.get( endpoint=helix_path, method=request.method ) if endpoint_instance.token_type == 'OA': token = config.TWITCH_ACCESS_TOKEN else: token = config.TWITCH_APP_ACCESS_TOKEN except TwitchHelixEndpoint.DoesNotExist: token = config.TWITCH_ACCESS_TOKEN body = request.data client_id = os.environ.get('TWITCH_APP_ID', None) headers = { 'Authorization': 'Bearer {}'.format(token), 'Client-Id': client_id } url = f'https://api.twitch.tv/helix/' \ f'{helix_path}' uri = request.build_absolute_uri() url_params = '' if uri.count('?') > 0: url_params = uri.split('?', 1)[1] if url_params != '': url += f'?{url_params}' if request.method == 'GET': data = requests.get( url, headers=headers ) elif request.method == 'POST': data = requests.post( url, data=body, headers=headers ) elif request.method == 'PUT': data = requests.put( url, data=body, headers=headers ) print(data) elif request.method == 'PATCH': data = requests.patch( url, data=body, headers=headers ) elif request.method == 'DELETE': data = requests.delete( url, headers=headers ) try: if(settings.DEBUG_TWITCH_CALLS): log_request(data) stream_data = data.json() except ValueError: stream_data = None return Response(stream_data, status=data.status_code) def home_view(request): user_count = User.objects.all().exclude(username='worker_process').count() if user_count == 0: return HttpResponseRedirect('/first-run/') # elif not request.user.is_authenticated: # return HttpResponseRedirect('/accounts/login/') elif config.CHANNEL == '': return HttpResponseRedirect('/set-channel/') elif config.SCOPE_UPDATED_NEEDED: return HttpResponseRedirect('/refresh-token-scope/') else: # # template = loader.get_template('home.html') # template = loader.get_template('dashboard/index.html') # return HttpResponse(template.render({'config': config}, request)) return HttpResponseRedirect('/dashboard') def first_run_view(request): user_count = User.objects.all().exclude(username='worker_process').count() if user_count > 0: # If users already exist, it is not first run return HttpResponseRedirect('/') # reject creating a new super-user if request.method == 'POST': form = FirstRunForm(request.POST) if form.is_valid(): user = User.objects.create_user( form.cleaned_data['username'], password=form.cleaned_data['password1'] ) user.is_superuser=True user.is_staff=True user.save() login(request, user) return HttpResponseRedirect('/') else: template = loader.get_template('registration/first-run.html') return HttpResponse(template.render({}, request)) else: template = loader.get_template('registration/first-run.html') return HttpResponse(template.render({}, request)) def get_channel_name_view(request): if request.method == 'POST': port = os.environ.get('PORT', 8000) form = ChannelNameForm(request.POST) if form.is_valid(): # Process the data config.CHANNEL = form.cleaned_data['channel_name'] scope=' '.join(settings.TOKEN_SCOPES) client_id = os.environ.get('TWITCH_APP_ID', None) url = f'https://id.twitch.tv/oauth2/authorize?' \ f'client_id={client_id}&' \ f'redirect_uri={settings.BASE_URL}/twitch-callback/&' \ f'response_type=code&' \ f'scope={scope}&' \ f'force_verify=true' return HttpResponseRedirect(url) else: # Show some error page pass else: template = loader.get_template('registration/twitch-channel-setup.html') return HttpResponse(template.render({}, request)) def refresh_token_scope(request): client_id = os.environ.get('TWITCH_APP_ID', None) helix_scopes = list( TwitchAPIScope.objects.filter( required=True ).values_list('scope', flat=True) ) eventsub_scopes = list( TwitchEventSubSubscription.objects.filter( active=True ).values_list('scope_required', flat=True) ) scopes = list(set(settings.TOKEN_SCOPES + eventsub_scopes + helix_scopes)) scopes = list(filter(lambda x: (x is not None), scopes)) scope=' '.join(scopes) url = f'https://id.twitch.tv/oauth2/authorize?' \ f'client_id={client_id}&' \ f'redirect_uri={settings.BASE_URL}/twitch-callback/&' \ f'response_type=code&' \ f'scope={scope}&' \ f'force_verify=true' return HttpResponseRedirect(url) @api_view() def get_tau_token(request): if not request.user.is_authenticated: return JsonResponse({'error': 'You must be logged into access this endpoint.'}) else: token = Token.objects.get(user=request.user) return JsonResponse({'token': token.key}) @api_view(['GET']) def get_public_url(request): if not request.user.is_authenticated: return JsonResponse({'error': 'You must be logged into access this endpoint.'}) else: public_url = config.PUBLIC_URL return JsonResponse({'public_url': public_url}) @api_view(['POST']) def refresh_tau_token(request): if not request.user.is_authenticated: return JsonResponse({'error': 'You must be logged into access this endpoint.'}) else: token = Token.objects.get(user=request.user) token.delete() token = Token.objects.create(user=request.user) return JsonResponse({'token': token.key}) @api_view(['POST']) def reset_webhooks(request): if not request.user.is_authenticated: return JsonResponse({'error': 'You must be logged into access this endpoint.'}) data = request.data if data['type'] == 'all': teardown_all_acct_webhooks() elif data['type'] == 'remote': token = Token.objects.get(user=request.user) cleanup_remote_webhooks() elif data['type'] == 'broken': token = Token.objects.get(user=request.user) cleanup_webhooks() else: return JsonResponse({'webhooks_reset': False, 'error': 'Proper type not found.'}) config.FORCE_WEBHOOK_REFRESH = True return JsonResponse({'webhooks_reset': True}) def process_twitch_callback_view(request): port = os.environ.get('PORT', 8000) params = request.GET auth_code = params['code'] client_id = os.environ.get('TWITCH_APP_ID', None) client_secret = os.environ.get('TWITCH_CLIENT_SECRET', None) auth_r = requests.post('https://id.twitch.tv/oauth2/token', data = { 'client_id': client_id, 'client_secret': client_secret, 'code': auth_code, 'grant_type': 'authorization_code', 'redirect_uri': f'{settings.BASE_URL}/twitch-callback/' }) response_data = auth_r.json() if(settings.DEBUG_TWITCH_CALLS): log_request(auth_r) config.TWITCH_ACCESS_TOKEN = response_data['access_token'] config.TWITCH_REFRESH_TOKEN = response_data['refresh_token'] expiration = timezone.now() + datetime.timedelta(seconds=response_data['expires_in']) config.TWITCH_ACCESS_TOKEN_EXPIRATION = expiration scope=' '.join(settings.TOKEN_SCOPES) app_auth_r = requests.post('https://id.twitch.tv/oauth2/token', data = { 'client_id': client_id, 'client_secret': client_secret, 'grant_type': 'client_credentials', 'scope': scope }) if(settings.DEBUG_TWITCH_CALLS): log_request(app_auth_r) app_auth_data = app_auth_r.json() config.TWITCH_APP_ACCESS_TOKEN = app_auth_data['access_token'] config.SCOPE_UPDATED_NEEDED = False config.SCOPES_REFRESHED = True headers = { 'Authorization': 'Bearer {}'.format(config.TWITCH_ACCESS_TOKEN), 'Client-Id': client_id } user_r = requests.get('https://api.twitch.tv/helix/users', headers=headers) if(settings.DEBUG_TWITCH_CALLS): log_request(user_r) user_data = user_r.json() channel_id = user_data['data'][0]['id'] config.CHANNEL_ID = channel_id return HttpResponseRedirect('/') class HeartbeatViewSet(viewsets.ViewSet): permission_classes = (IsAuthenticatedOrReadOnly, ) def list(self, request, *args, **kwargs): response = {'message': 'pong'} return Response(response) class TAUSettingsViewSet(viewsets.ViewSet): permission_classes = (IsAuthenticated, ) valid_keys = ['USE_IRC'] def list(self, request, *args, **kwargs): response = {key.lower(): getattr(config, key) for key in self.valid_keys} return Response(response) def retrieve(self, request, pk=None): if pk.upper() in self.valid_keys: return Response({pk: getattr(config, pk.upper())}) else: raise Http404 def update(self, request, pk=None): if pk.upper() in self.valid_keys: data = request.data setattr(config, pk.upper(), data['value']) return Response({pk: data['value']}) else: raise Http404 class ServiceStatusViewSet(viewsets.ViewSet): permission_classes = (IsAuthenticated, ) def update(self, request, pk=None): if pk.startswith('STATUS_') and hasattr(config, pk): data = request.data new_status = data['status'] setattr(config, pk, new_status) return Response({ pk: new_status }) elif pk == 'SET_ALL': status_keys = filter( lambda x: x.startswith('STATUS_'), constance.settings.CONFIG.keys() ) data = request.data new_status = data['status'] for key in status_keys: setattr(config, key, new_status) return Response({ 'reset': 'complete' }) else: raise Http404("Config does not exist")
tau/core/views.py
12,154
elif not request.user.is_authenticated: return HttpResponseRedirect('/accounts/login/') template = loader.get_template('home.html') template = loader.get_template('dashboard/index.html') return HttpResponse(template.render({'config': config}, request)) If users already exist, it is not first run reject creating a new super-user Process the data Show some error page
372
en
0.341475
def find_words(string, word_set): if string == "" or not word_set: return None if string in word_set: # O(1) return [string] #"bedbathbeyondunk" #{'bed', 'bath', 'bedbath', 'and', 'beyond'} tmp = "" # bedbathbeyondunk out = [] # [] retro = False # True i = 0 while i < len(string): # i = 15 if not retro: tmp += string[i] if tmp in word_set: out.append(tmp) tmp = "" if i == len(string)-1 and tmp != "": if not out: return None tmp = out.pop() + tmp retro = True i -= 1 i += 1 return out assert find_words( "bedbathandbeyond", set(['bed', 'bath', 'bedbath', 'and', 'beyond']) ) == ['bed', 'bath', 'and', 'beyond'] assert find_words( "thequickbrownfox", set(['quick', 'brown', 'the', 'fox']) ) == ['the', 'quick', 'brown', 'fox'] assert find_words( "thequickbrownfoxa", set(['quick', 'brown', 'the', 'fox']) ) == None
reconstruct-words.py
1,041
O(1)"bedbathbeyondunk"{'bed', 'bath', 'bedbath', 'and', 'beyond'} bedbathbeyondunk [] True i = 15
97
de
0.052457
import json import yaml from pathlib import Path from brownie import * from substrateinterface import Keypair from hashlib import blake2b import base58 def get_derivative_account(root_account, index): seed_bytes = b'modlpy/utilisuba' root_account_bytes = bytes.fromhex(Keypair(root_account).public_key[2:]) index_bytes = int(index).to_bytes(2, 'little') entropy = blake2b(seed_bytes + root_account_bytes + index_bytes, digest_size=32).digest() input_bytes = bytes([42]) + entropy checksum = blake2b(b'SS58PRE' + input_bytes).digest() return base58.b58encode(input_bytes + checksum[:2]).decode() class Contracts: user = None proxy_admin = None lido = None vksm = None oracle_master = None wstksm = None auth_manager = None controller = None ledgers = None validators = None def __init__(self, _user, _proxy_admin, _lido, _vksm, _oracle_master, _wstksm, _auth_manager, _controller, _ledgers, _validators): self.user = _user self.proxy_admin = _proxy_admin self.lido = _lido self.vksm = _vksm self.oracle_master = _oracle_master self.wstksm = _wstksm self.auth_manager = _auth_manager self.controller = _controller self.ledgers = _ledgers self.validators = _validators NETWORK="kusama" def load_deployments(network): path = './deployments/' + network + '.json' if Path(path).is_file(): with open(path) as file: return json.load(file) else: return {} def load_deployment_config(network): with open('./deployment-config.yml') as file: return yaml.safe_load(file)['networks'][network] CONFIG = load_deployment_config(NETWORK) DEPLOYMENTS = load_deployments(NETWORK) def gen_ledger_account(index): sovereign = CONFIG['sovereign_account'] root_index = CONFIG['root_derivative_index'] controller = get_derivative_account(sovereign, root_index) return get_derivative_account(controller, index) #contracts = run('./scripts/prepare_env.py') from brownie console --network=moonbase def main(): user = accounts.load(CONFIG['deployer']) proxy_admin = ProxyAdminMock.at(DEPLOYMENTS['ProxyAdmin']) lido = Lido.at(DEPLOYMENTS['Lido']) vksm = vKSM_mock.at(CONFIG['precompiles']['vksm']) oracle_master = OracleMaster.at(DEPLOYMENTS['OracleMaster']) wstksm = WstKSM.at(DEPLOYMENTS['WstKSM']) auth_manager = AuthManager.at(DEPLOYMENTS['AuthManager']) controller = Controller.at(DEPLOYMENTS['Controller']) ledgers = [ Ledger.at(addr) for addr in lido.getLedgerAddresses() ] # current validators in moonbase validator_1 = Keypair("5CX2ov8tmW6nZwy6Eouzc7VxFHcAyZioNm5QjEUYc7zjbS66").public_key validator_2 = Keypair("5FRiNmoi9HFGFrY3K9xsSCeewRtA2pcXTZVZrwLacPCfvHum").public_key validator_3 = Keypair("5EcdgHV81hu6YpPucSMrWbdQRBUr18XypiiGsgQ7HREYdrWG").public_key validator_4 = Keypair("5FCEmzonc34D2SXXv2CMsDoFWCVivH2a2Mwe32t9BT1TcpAD").public_key validator_5 = Keypair("5Ehgvgk1LERD5aTEWw6HLdKZurBqcRYbHXvrAtTgYPhUpr1R").public_key validators = [validator_1, validator_2, validator_3, validator_4, validator_5] # 5CxXVE7pHqzR4kzfz6nop529odm8eVemFFtStruyNQvdTopo # 5GxgDNMhbvMhuJzXC2voX5nKUyNaNQFCZxgnoa18eGiBBZwt # 5Cqb9WXVQQF73a1dcJEBFS2bWrukaC6dmzjeWZeJHj3NMwvB return Contracts(user, proxy_admin, lido, vksm, oracle_master, wstksm, auth_manager, controller, ledgers, validators)
scripts/prepare_env.py
3,484
contracts = run('./scripts/prepare_env.py') from brownie console --network=moonbase current validators in moonbase 5CxXVE7pHqzR4kzfz6nop529odm8eVemFFtStruyNQvdTopo 5GxgDNMhbvMhuJzXC2voX5nKUyNaNQFCZxgnoa18eGiBBZwt 5Cqb9WXVQQF73a1dcJEBFS2bWrukaC6dmzjeWZeJHj3NMwvB
261
en
0.235365
import datetime from dateutil.parser import parse from decimal import Decimal import re import importlib from django.core.exceptions import ObjectDoesNotExist, MultipleObjectsReturned from django.utils import datetime_safe from tastypie.bundle import Bundle from tastypie.exceptions import ApiFieldError, NotFound from tastypie.utils import dict_strip_unicode_keys, make_aware class NOT_PROVIDED: def __str__(self): return 'No default provided.' DATE_REGEX = re.compile('^(?P<year>\d{4})-(?P<month>\d{2})-(?P<day>\d{2}).*?$') DATETIME_REGEX = re.compile('^(?P<year>\d{4})-(?P<month>\d{2})-(?P<day>\d{2})(T|\s+)(?P<hour>\d{2}):(?P<minute>\d{2}):(?P<second>\d{2}).*?$') # All the ApiField variants. class ApiField(object): """The base implementation of a field used by the resources.""" dehydrated_type = 'string' help_text = '' def __init__(self, attribute=None, default=NOT_PROVIDED, null=False, blank=False, readonly=False, unique=False, help_text=None): """ Sets up the field. This is generally called when the containing ``Resource`` is initialized. Optionally accepts an ``attribute``, which should be a string of either an instance attribute or callable off the object during the ``dehydrate`` or push data onto an object during the ``hydrate``. Defaults to ``None``, meaning data will be manually accessed. Optionally accepts a ``default``, which provides default data when the object being ``dehydrated``/``hydrated`` has no data on the field. Defaults to ``NOT_PROVIDED``. Optionally accepts a ``null``, which indicated whether or not a ``None`` is allowable data on the field. Defaults to ``False``. Optionally accepts a ``blank``, which indicated whether or not data may be omitted on the field. Defaults to ``False``. Optionally accepts a ``readonly``, which indicates whether the field is used during the ``hydrate`` or not. Defaults to ``False``. Optionally accepts a ``unique``, which indicates if the field is a unique identifier for the object. Optionally accepts ``help_text``, which lets you provide a human-readable description of the field exposed at the schema level. Defaults to the per-Field definition. """ # Track what the index thinks this field is called. self.instance_name = None self._resource = None self.attribute = attribute self._default = default self.null = null self.blank = blank self.readonly = readonly self.value = None self.unique = unique if help_text: self.help_text = help_text def contribute_to_class(self, cls, name): # Do the least we can here so that we don't hate ourselves in the # morning. self.instance_name = name self._resource = cls def has_default(self): """Returns a boolean of whether this field has a default value.""" return self._default is not NOT_PROVIDED @property def default(self): """Returns the default value for the field.""" if callable(self._default): return self._default() return self._default def dehydrate(self, bundle): """ Takes data from the provided object and prepares it for the resource. """ if self.attribute is not None: # Check for `__` in the field for looking through the relation. attrs = self.attribute.split('__') current_object = bundle.obj for attr in attrs: previous_object = current_object current_object = getattr(current_object, attr, None) if current_object is None: if self.has_default(): current_object = self._default # Fall out of the loop, given any further attempts at # accesses will fail miserably. break elif self.null: current_object = None # Fall out of the loop, given any further attempts at # accesses will fail miserably. break else: raise ApiFieldError("The object '%r' has an empty attribute '%s' and doesn't allow a default or null value." % (previous_object, attr)) if callable(current_object): current_object = current_object() return self.convert(current_object) if self.has_default(): return self.convert(self.default) else: return None def convert(self, value): """ Handles conversion between the data found and the type of the field. Extending classes should override this method and provide correct data coercion. """ return value def hydrate(self, bundle): """ Takes data stored in the bundle for the field and returns it. Used for taking simple data and building a instance object. """ if self.readonly: return None if not bundle.data.has_key(self.instance_name): is_related = getattr(self, 'is_related', False) is_m2m = getattr(self, 'is_m2m', False) if is_related and not is_m2m: # We've got an FK (or alike field) & a possible parent object. # Check for it. if bundle.related_obj and bundle.related_name in (self.attribute, self.instance_name): return bundle.related_obj # Functor for safely checking if bundle.obj has a non-None property def has_non_null_attr(obj, name): try: return getattr(obj, name, None) is not None except: if is_related: return None else: raise if self.blank: return None elif self.attribute and has_non_null_attr(bundle.obj, self.attribute): return getattr(bundle.obj, self.attribute) elif self.instance_name and has_non_null_attr(bundle.obj, self.instance_name): return getattr(bundle.obj, self.instance_name) elif self.has_default(): if callable(self._default): return self._default() return self._default elif self.null: return None else: raise ApiFieldError("The '%s' field has no data and doesn't allow a default or null value." % self.instance_name) bundle_val = bundle.data[self.instance_name] if bundle_val is None and not self.null: raise ApiFieldError("The '%s' field doesn't allow a null value." % self.instance_name) else: return bundle_val def set_value_on_bundle_obj(self, bundle, value): """ Overrideable hook for writing a value into the object on a bundle. Enables the use of custom setters in your app code if setattr() is too raw for your fancy ORM model. """ try: setattr(bundle.obj, self.attribute, value) except Exception, e: raise ApiFieldError("The '%s' field couldn't set value '%s': %s" % (self.instance_name, value, e)) class CharField(ApiField): """ A text field of arbitrary length. Covers both ``models.CharField`` and ``models.TextField``. """ dehydrated_type = 'string' help_text = 'Unicode string data. Ex: "Hello World"' def convert(self, value): if value is None: return None return unicode(value) class FileField(ApiField): """ A file-related field. Covers both ``models.FileField`` and ``models.ImageField``. """ dehydrated_type = 'string' help_text = 'A file URL as a string. Ex: "http://media.example.com/media/photos/my_photo.jpg"' def convert(self, value): if value is None: return None try: # Try to return the URL if it's a ``File``, falling back to the string # itself if it's been overridden or is a default. return getattr(value, 'url', value) except ValueError: return None class IntegerField(ApiField): """ An integer field. Covers ``models.IntegerField``, ``models.PositiveIntegerField``, ``models.PositiveSmallIntegerField`` and ``models.SmallIntegerField``. """ dehydrated_type = 'integer' help_text = 'Integer data. Ex: 2673' def convert(self, value): if value is None: return None return int(value) class FloatField(ApiField): """ A floating point field. """ dehydrated_type = 'float' help_text = 'Floating point numeric data. Ex: 26.73' def convert(self, value): if value is None: return None return float(value) class DecimalField(ApiField): """ A decimal field. """ dehydrated_type = 'decimal' help_text = 'Fixed precision numeric data. Ex: 26.73' def convert(self, value): if value is None: return None return Decimal(value) def hydrate(self, bundle): value = super(DecimalField, self).hydrate(bundle) if value and not isinstance(value, Decimal): value = Decimal(value) return value class BooleanField(ApiField): """ A boolean field. Covers both ``models.BooleanField`` and ``models.NullBooleanField``. """ dehydrated_type = 'boolean' help_text = 'Boolean data. Ex: True' def convert(self, value): if value is None: return None return bool(value) class ListField(ApiField): """ A list field. """ dehydrated_type = 'list' help_text = "A list of data. Ex: ['abc', 26.73, 8]" def convert(self, value): if value is None: return None return list(value) class DictField(ApiField): """ A dictionary field. """ dehydrated_type = 'dict' help_text = "A dictionary of data. Ex: {'price': 26.73, 'name': 'Daniel'}" def convert(self, value): if value is None: return None return dict(value) class DateField(ApiField): """ A date field. """ dehydrated_type = 'date' help_text = 'A date as a string. Ex: "2010-11-10"' def convert(self, value): if value is None: return None if isinstance(value, basestring): match = DATE_REGEX.search(value) if match: data = match.groupdict() return datetime_safe.date(int(data['year']), int(data['month']), int(data['day'])) else: raise ApiFieldError("Date provided to '%s' field doesn't appear to be a valid date string: '%s'" % (self.instance_name, value)) return value def hydrate(self, bundle): value = super(DateField, self).hydrate(bundle) if value and not hasattr(value, 'year'): try: # Try to rip a date/datetime out of it. value = make_aware(parse(value)) if hasattr(value, 'hour'): value = value.date() except ValueError: pass return value class DateTimeField(ApiField): """ A datetime field. """ dehydrated_type = 'datetime' help_text = 'A date & time as a string. Ex: "2010-11-10T03:07:43"' def convert(self, value): if value is None: return None if isinstance(value, basestring): match = DATETIME_REGEX.search(value) if match: data = match.groupdict() return make_aware(datetime_safe.datetime(int(data['year']), int(data['month']), int(data['day']), int(data['hour']), int(data['minute']), int(data['second']))) else: raise ApiFieldError("Datetime provided to '%s' field doesn't appear to be a valid datetime string: '%s'" % (self.instance_name, value)) return value def hydrate(self, bundle): value = super(DateTimeField, self).hydrate(bundle) if value and not hasattr(value, 'year'): try: # Try to rip a date/datetime out of it. value = make_aware(parse(value)) except ValueError: pass return value class RelatedField(ApiField): """ Provides access to data that is related within the database. The ``RelatedField`` base class is not intended for direct use but provides functionality that ``ToOneField`` and ``ToManyField`` build upon. The contents of this field actually point to another ``Resource``, rather than the related object. This allows the field to represent its data in different ways. The abstractions based around this are "leaky" in that, unlike the other fields provided by ``tastypie``, these fields don't handle arbitrary objects very well. The subclasses use Django's ORM layer to make things go, though there is no ORM-specific code at this level. """ dehydrated_type = 'related' is_related = True self_referential = False help_text = 'A related resource. Can be either a URI or set of nested resource data.' def __init__(self, to, attribute, related_name=None, default=NOT_PROVIDED, null=False, blank=False, readonly=False, full=False, unique=False, help_text=None): """ Builds the field and prepares it to access to related data. The ``to`` argument should point to a ``Resource`` class, NOT to a ``Model``. Required. The ``attribute`` argument should specify what field/callable points to the related data on the instance object. Required. Optionally accepts a ``related_name`` argument. Currently unused, as unlike Django's ORM layer, reverse relations between ``Resource`` classes are not automatically created. Defaults to ``None``. Optionally accepts a ``null``, which indicated whether or not a ``None`` is allowable data on the field. Defaults to ``False``. Optionally accepts a ``blank``, which indicated whether or not data may be omitted on the field. Defaults to ``False``. Optionally accepts a ``readonly``, which indicates whether the field is used during the ``hydrate`` or not. Defaults to ``False``. Optionally accepts a ``full``, which indicates how the related ``Resource`` will appear post-``dehydrate``. If ``False``, the related ``Resource`` will appear as a URL to the endpoint of that resource. If ``True``, the result of the sub-resource's ``dehydrate`` will be included in full. Optionally accepts a ``unique``, which indicates if the field is a unique identifier for the object. Optionally accepts ``help_text``, which lets you provide a human-readable description of the field exposed at the schema level. Defaults to the per-Field definition. """ self.instance_name = None self._resource = None self.to = to self.attribute = attribute self.related_name = related_name self._default = default self.null = null self.blank = blank self.readonly = readonly self.full = full self.api_name = None self.resource_name = None self.unique = unique self._to_class = None if self.to == 'self': self.self_referential = True self._to_class = self.__class__ if help_text: self.help_text = help_text def contribute_to_class(self, cls, name): super(RelatedField, self).contribute_to_class(cls, name) # Check if we're self-referential and hook it up. # We can't do this quite like Django because there's no ``AppCache`` # here (which I think we should avoid as long as possible). if self.self_referential or self.to == 'self': self._to_class = cls def get_related_resource(self, related_instance=None): """ Instantiates the related resource. """ instance = self.to_class(api_name=self.api_name) instance.api_name = self.api_name return instance @property def to_class(self): # We need to be lazy here, because when the metaclass constructs the # Resources, other classes may not exist yet. # That said, memoize this so we never have to relookup/reimport. if self._to_class: return self._to_class if not isinstance(self.to, basestring): self._to_class = self.to return self._to_class # It's a string. Let's figure it out. if '.' in self.to: # Try to import. module_bits = self.to.split('.') module_path, class_name = '.'.join(module_bits[:-1]), module_bits[-1] module = importlib.import_module(module_path) else: # We've got a bare class name here, which won't work (No AppCache # to rely on). Try to throw a useful error. raise ImportError("Tastypie requires a Python-style path (<module.module.Class>) to lazy load related resources. Only given '%s'." % self.to) self._to_class = getattr(module, class_name, None) if self._to_class is None: raise ImportError("Module '%s' does not appear to have a class called '%s'." % (module_path, class_name)) return self._to_class def dehydrate_related(self, bundle, related_resource, related_instance): """ Based on the ``full_resource``, returns either the endpoint or the data from ``full_dehydrate`` for the related resource. """ if not self.full: # Be a good netizen. return related_resource.get_resource_uri(bundle) else: # ZOMG extra data and big payloads. bundle = related_resource.build_bundle(obj=related_instance, request=bundle.request) return related_resource.full_dehydrate(bundle) def resource_from_uri(self, fk_resource, uri, request=None, related_obj=None, related_name=None): """ Given a URI is provided, the related resource is attempted to be loaded based on the identifiers in the URI. """ try: obj = fk_resource.get_via_uri(uri, request=request) bundle = fk_resource.build_bundle(obj=obj, request=request) return fk_resource.full_dehydrate(bundle) except ObjectDoesNotExist: raise ApiFieldError("Could not find the provided object via resource URI '%s'." % uri) def resource_from_data(self, fk_resource, data, request=None, related_obj=None, related_name=None): """ Given a dictionary-like structure is provided, a fresh related resource is created using that data. """ # Try to hydrate the data provided. data = dict_strip_unicode_keys(data) fk_bundle = fk_resource.build_bundle(data=data, request=request) if related_obj: fk_bundle.related_obj = related_obj fk_bundle.related_name = related_name # We need to check to see if updates are allowed on the FK # resource. If not, we'll just return a populated bundle instead # of mistakenly updating something that should be read-only. if not fk_resource.can_update(): # If the resource already exists and the client specified where to find it, we look it up. if 'resource_uri' in data: obj = fk_resource.get_via_uri(data['resource_uri'], request=request) fk_bundle.install_existing_obj( obj ) return fk_bundle # If the resource supports creation, then we can full_hydrate() and create a new instance. elif fk_resource.can_create(): return fk_resource.full_hydrate(fk_bundle) else: raise ApiFieldError("Resource %s does not support being created via POST" % fk_resource._meta.resource_name) try: return fk_resource.obj_update(fk_bundle, **data) except NotFound: try: # Attempt lookup by primary key lookup_kwargs = dict((k, v) for k, v in data.iteritems() if getattr(fk_resource, k).unique) if not lookup_kwargs: raise NotFound() return fk_resource.obj_update(fk_bundle, **lookup_kwargs) except NotFound: fk_bundle = fk_resource.full_hydrate(fk_bundle) fk_resource.is_valid(fk_bundle, request) return fk_bundle except MultipleObjectsReturned: return fk_resource.full_hydrate(fk_bundle) def resource_from_pk(self, fk_resource, obj, request=None, related_obj=None, related_name=None): """ Given an object with a ``pk`` attribute, the related resource is attempted to be loaded via that PK. """ bundle = fk_resource.build_bundle(obj=obj, request=request) return fk_resource.full_dehydrate(bundle) def build_related_resource(self, value, request=None, related_obj=None, related_name=None): """ Returns a bundle of data built by the related resource, usually via ``hydrate`` with the data provided. Accepts either a URI, a data dictionary (or dictionary-like structure) or an object with a ``pk``. """ self.fk_resource = self.to_class(api_name=self.api_name) kwargs = { 'request': request, 'related_obj': related_obj, 'related_name': related_name, } if isinstance(value, basestring): # We got a URI. Load the object and assign it. return self.resource_from_uri(self.fk_resource, value, **kwargs) elif isinstance(value, Bundle): # We got a valid bundle object, the RelatedField had full=True return value elif isinstance(value, dict): # We've got a data dictionary. # Since this leads to creation, this is the only one of these # methods that might care about "parent" data. return self.resource_from_data(self.fk_resource, value, **kwargs) elif hasattr(value, 'pk'): # We've got an object with a primary key. return self.resource_from_pk(self.fk_resource, value, **kwargs) else: raise ApiFieldError("The '%s' field was given data that was not a URI, not a dictionary-alike and does not have a 'pk' attribute: %s." % (self.instance_name, value)) class ToOneField(RelatedField): """ Provides access to related data via foreign key. This subclass requires Django's ORM layer to work properly. """ help_text = 'A single related resource. Can be either a URI or set of nested resource data.' def __init__(self, to, attribute, related_name=None, default=NOT_PROVIDED, null=False, blank=False, readonly=False, full=False, unique=False, help_text=None): super(ToOneField, self).__init__( to, attribute, related_name=related_name, default=default, null=null, blank=blank, readonly=readonly, full=full, unique=unique, help_text=help_text ) self.fk_resource = None def dehydrate(self, bundle): foreign_obj = None if isinstance(self.attribute, basestring): attrs = self.attribute.split('__') foreign_obj = bundle.obj for attr in attrs: previous_obj = foreign_obj try: foreign_obj = getattr(foreign_obj, attr, None) except ObjectDoesNotExist: foreign_obj = None elif callable(self.attribute): foreign_obj = self.attribute(bundle) if not foreign_obj: if not self.null: raise ApiFieldError("The model '%r' has an empty attribute '%s' and doesn't allow a null value." % (previous_obj, attr)) return None self.fk_resource = self.get_related_resource(foreign_obj) fk_bundle = Bundle(obj=foreign_obj, request=bundle.request) return self.dehydrate_related(fk_bundle, self.fk_resource, foreign_obj) def hydrate(self, bundle): value = super(ToOneField, self).hydrate(bundle) if value is None: return value return self.build_related_resource(value, request=bundle.request) class ForeignKey(ToOneField): """ A convenience subclass for those who prefer to mirror ``django.db.models``. """ pass class OneToOneField(ToOneField): """ A convenience subclass for those who prefer to mirror ``django.db.models``. """ pass class ToManyField(RelatedField): """ Provides access to related data via a join table. This subclass requires Django's ORM layer to work properly. Note that the ``hydrate`` portions of this field are quite different than any other field. ``hydrate_m2m`` actually handles the data and relations. This is due to the way Django implements M2M relationships. """ is_m2m = True help_text = 'Many related resources. Can be either a list of URIs or list of individually nested resource data.' def __init__(self, to, attribute, related_name=None, default=NOT_PROVIDED, null=False, blank=False, readonly=False, full=False, unique=False, help_text=None): super(ToManyField, self).__init__( to, attribute, related_name=related_name, default=default, null=null, blank=blank, readonly=readonly, full=full, unique=unique, help_text=help_text ) self.m2m_bundles = [] def dehydrate(self, bundle): if not bundle.obj or not bundle.obj.pk: if not self.null: raise ApiFieldError("The model '%r' does not have a primary key and can not be used in a ToMany context." % bundle.obj) return [] the_m2ms = None previous_obj = bundle.obj attr = self.attribute if isinstance(self.attribute, basestring): attrs = self.attribute.split('__') the_m2ms = bundle.obj for attr in attrs: previous_obj = the_m2ms try: the_m2ms = getattr(the_m2ms, attr, None) except ObjectDoesNotExist: the_m2ms = None if not the_m2ms: break elif callable(self.attribute): the_m2ms = self.attribute(bundle) if not the_m2ms: if not self.null: raise ApiFieldError("The model '%r' has an empty attribute '%s' and doesn't allow a null value." % (previous_obj, attr)) return [] self.m2m_resources = [] m2m_dehydrated = [] # TODO: Also model-specific and leaky. Relies on there being a # ``Manager`` there. for m2m in the_m2ms.all(): m2m_resource = self.get_related_resource(m2m) m2m_bundle = Bundle(obj=m2m, request=bundle.request) self.m2m_resources.append(m2m_resource) m2m_dehydrated.append(self.dehydrate_related(m2m_bundle, m2m_resource, m2m)) return m2m_dehydrated def hydrate(self, bundle): pass def hydrate_m2m(self, bundle): if self.readonly: return None if bundle.data.get(self.instance_name) is None: if self.blank: return [] elif self.null: return [] else: raise ApiFieldError("The '%s' field has no data and doesn't allow a null value." % self.instance_name) m2m_hydrated = [] for value in bundle.data.get(self.instance_name): if value is None: continue kwargs = { 'request': bundle.request, } if self.related_name: kwargs['related_obj'] = bundle.obj kwargs['related_name'] = self.related_name m2m_hydrated.append(self.build_related_resource(value, **kwargs)) return m2m_hydrated class ManyToManyField(ToManyField): """ A convenience subclass for those who prefer to mirror ``django.db.models``. """ pass class OneToManyField(ToManyField): """ A convenience subclass for those who prefer to mirror ``django.db.models``. """ pass class TimeField(ApiField): dehydrated_type = 'time' help_text = 'A time as string. Ex: "20:05:23"' def dehydrate(self, obj): return self.convert(super(TimeField, self).dehydrate(obj)) def convert(self, value): if isinstance(value, basestring): return self.to_time(value) return value def to_time(self, s): try: dt = parse(s) except ValueError, e: raise ApiFieldError(str(e)) else: return datetime.time(dt.hour, dt.minute, dt.second) def hydrate(self, bundle): value = super(TimeField, self).hydrate(bundle) if value and not isinstance(value, datetime.time): value = self.to_time(value) return value
tastypie/fields.py
29,911
All the ApiField variants. Track what the index thinks this field is called. Do the least we can here so that we don't hate ourselves in the morning. Check for `__` in the field for looking through the relation. Fall out of the loop, given any further attempts at accesses will fail miserably. Fall out of the loop, given any further attempts at accesses will fail miserably. We've got an FK (or alike field) & a possible parent object. Check for it. Functor for safely checking if bundle.obj has a non-None property Try to return the URL if it's a ``File``, falling back to the string itself if it's been overridden or is a default. Try to rip a date/datetime out of it. Try to rip a date/datetime out of it. Check if we're self-referential and hook it up. We can't do this quite like Django because there's no ``AppCache`` here (which I think we should avoid as long as possible). We need to be lazy here, because when the metaclass constructs the Resources, other classes may not exist yet. That said, memoize this so we never have to relookup/reimport. It's a string. Let's figure it out. Try to import. We've got a bare class name here, which won't work (No AppCache to rely on). Try to throw a useful error. Be a good netizen. ZOMG extra data and big payloads. Try to hydrate the data provided. We need to check to see if updates are allowed on the FK resource. If not, we'll just return a populated bundle instead of mistakenly updating something that should be read-only. If the resource already exists and the client specified where to find it, we look it up. If the resource supports creation, then we can full_hydrate() and create a new instance. Attempt lookup by primary key We got a URI. Load the object and assign it. We got a valid bundle object, the RelatedField had full=True We've got a data dictionary. Since this leads to creation, this is the only one of these methods that might care about "parent" data. We've got an object with a primary key. TODO: Also model-specific and leaky. Relies on there being a ``Manager`` there.
2,053
en
0.933934
# coding: utf-8 """ UltraCart Rest API V2 UltraCart REST API Version 2 OpenAPI spec version: 2.0.0 Contact: [email protected] Generated by: https://github.com/swagger-api/swagger-codegen.git """ from pprint import pformat from six import iteritems import re class CouponFreeItemAndShippingWithSubtotal(object): """ NOTE: This class is auto generated by the swagger code generator program. Do not edit the class manually. """ """ Attributes: swagger_types (dict): The key is attribute name and the value is attribute type. attribute_map (dict): The key is attribute name and the value is json key in definition. """ swagger_types = { 'currency_code': 'str', 'items': 'list[str]', 'limit': 'int', 'shipping_methods': 'list[str]', 'subtotal_amount': 'float' } attribute_map = { 'currency_code': 'currency_code', 'items': 'items', 'limit': 'limit', 'shipping_methods': 'shipping_methods', 'subtotal_amount': 'subtotal_amount' } def __init__(self, currency_code=None, items=None, limit=None, shipping_methods=None, subtotal_amount=None): """ CouponFreeItemAndShippingWithSubtotal - a model defined in Swagger """ self._currency_code = None self._items = None self._limit = None self._shipping_methods = None self._subtotal_amount = None self.discriminator = None if currency_code is not None: self.currency_code = currency_code if items is not None: self.items = items if limit is not None: self.limit = limit if shipping_methods is not None: self.shipping_methods = shipping_methods if subtotal_amount is not None: self.subtotal_amount = subtotal_amount @property def currency_code(self): """ Gets the currency_code of this CouponFreeItemAndShippingWithSubtotal. The ISO-4217 three letter currency code the customer is viewing prices in :return: The currency_code of this CouponFreeItemAndShippingWithSubtotal. :rtype: str """ return self._currency_code @currency_code.setter def currency_code(self, currency_code): """ Sets the currency_code of this CouponFreeItemAndShippingWithSubtotal. The ISO-4217 three letter currency code the customer is viewing prices in :param currency_code: The currency_code of this CouponFreeItemAndShippingWithSubtotal. :type: str """ if currency_code is not None and len(currency_code) > 3: raise ValueError("Invalid value for `currency_code`, length must be less than or equal to `3`") self._currency_code = currency_code @property def items(self): """ Gets the items of this CouponFreeItemAndShippingWithSubtotal. A list of items that are eligible for this discount_price. :return: The items of this CouponFreeItemAndShippingWithSubtotal. :rtype: list[str] """ return self._items @items.setter def items(self, items): """ Sets the items of this CouponFreeItemAndShippingWithSubtotal. A list of items that are eligible for this discount_price. :param items: The items of this CouponFreeItemAndShippingWithSubtotal. :type: list[str] """ self._items = items @property def limit(self): """ Gets the limit of this CouponFreeItemAndShippingWithSubtotal. The limit of free items that may be received when purchasing multiple items :return: The limit of this CouponFreeItemAndShippingWithSubtotal. :rtype: int """ return self._limit @limit.setter def limit(self, limit): """ Sets the limit of this CouponFreeItemAndShippingWithSubtotal. The limit of free items that may be received when purchasing multiple items :param limit: The limit of this CouponFreeItemAndShippingWithSubtotal. :type: int """ self._limit = limit @property def shipping_methods(self): """ Gets the shipping_methods of this CouponFreeItemAndShippingWithSubtotal. One or more shipping methods that may be free :return: The shipping_methods of this CouponFreeItemAndShippingWithSubtotal. :rtype: list[str] """ return self._shipping_methods @shipping_methods.setter def shipping_methods(self, shipping_methods): """ Sets the shipping_methods of this CouponFreeItemAndShippingWithSubtotal. One or more shipping methods that may be free :param shipping_methods: The shipping_methods of this CouponFreeItemAndShippingWithSubtotal. :type: list[str] """ self._shipping_methods = shipping_methods @property def subtotal_amount(self): """ Gets the subtotal_amount of this CouponFreeItemAndShippingWithSubtotal. The amount of subtotal required to receive the discount percent :return: The subtotal_amount of this CouponFreeItemAndShippingWithSubtotal. :rtype: float """ return self._subtotal_amount @subtotal_amount.setter def subtotal_amount(self, subtotal_amount): """ Sets the subtotal_amount of this CouponFreeItemAndShippingWithSubtotal. The amount of subtotal required to receive the discount percent :param subtotal_amount: The subtotal_amount of this CouponFreeItemAndShippingWithSubtotal. :type: float """ self._subtotal_amount = subtotal_amount def to_dict(self): """ Returns the model properties as a dict """ result = {} for attr, _ in iteritems(self.swagger_types): value = getattr(self, attr) if isinstance(value, list): result[attr] = list(map( lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value )) elif hasattr(value, "to_dict"): result[attr] = value.to_dict() elif isinstance(value, dict): result[attr] = dict(map( lambda item: (item[0], item[1].to_dict()) if hasattr(item[1], "to_dict") else item, value.items() )) else: result[attr] = value return result def to_str(self): """ Returns the string representation of the model """ return pformat(self.to_dict()) def __repr__(self): """ For `print` and `pprint` """ return self.to_str() def __eq__(self, other): """ Returns true if both objects are equal """ if not isinstance(other, CouponFreeItemAndShippingWithSubtotal): return False return self.__dict__ == other.__dict__ def __ne__(self, other): """ Returns true if both objects are not equal """ return not self == other
ultracart/models/coupon_free_item_and_shipping_with_subtotal.py
7,294
NOTE: This class is auto generated by the swagger code generator program. Do not edit the class manually. Returns true if both objects are equal CouponFreeItemAndShippingWithSubtotal - a model defined in Swagger Returns true if both objects are not equal For `print` and `pprint` Gets the currency_code of this CouponFreeItemAndShippingWithSubtotal. The ISO-4217 three letter currency code the customer is viewing prices in :return: The currency_code of this CouponFreeItemAndShippingWithSubtotal. :rtype: str Sets the currency_code of this CouponFreeItemAndShippingWithSubtotal. The ISO-4217 three letter currency code the customer is viewing prices in :param currency_code: The currency_code of this CouponFreeItemAndShippingWithSubtotal. :type: str Gets the items of this CouponFreeItemAndShippingWithSubtotal. A list of items that are eligible for this discount_price. :return: The items of this CouponFreeItemAndShippingWithSubtotal. :rtype: list[str] Sets the items of this CouponFreeItemAndShippingWithSubtotal. A list of items that are eligible for this discount_price. :param items: The items of this CouponFreeItemAndShippingWithSubtotal. :type: list[str] Gets the limit of this CouponFreeItemAndShippingWithSubtotal. The limit of free items that may be received when purchasing multiple items :return: The limit of this CouponFreeItemAndShippingWithSubtotal. :rtype: int Sets the limit of this CouponFreeItemAndShippingWithSubtotal. The limit of free items that may be received when purchasing multiple items :param limit: The limit of this CouponFreeItemAndShippingWithSubtotal. :type: int Gets the shipping_methods of this CouponFreeItemAndShippingWithSubtotal. One or more shipping methods that may be free :return: The shipping_methods of this CouponFreeItemAndShippingWithSubtotal. :rtype: list[str] Sets the shipping_methods of this CouponFreeItemAndShippingWithSubtotal. One or more shipping methods that may be free :param shipping_methods: The shipping_methods of this CouponFreeItemAndShippingWithSubtotal. :type: list[str] Gets the subtotal_amount of this CouponFreeItemAndShippingWithSubtotal. The amount of subtotal required to receive the discount percent :return: The subtotal_amount of this CouponFreeItemAndShippingWithSubtotal. :rtype: float Sets the subtotal_amount of this CouponFreeItemAndShippingWithSubtotal. The amount of subtotal required to receive the discount percent :param subtotal_amount: The subtotal_amount of this CouponFreeItemAndShippingWithSubtotal. :type: float Returns the model properties as a dict Returns the string representation of the model UltraCart Rest API V2 UltraCart REST API Version 2 OpenAPI spec version: 2.0.0 Contact: [email protected] Generated by: https://github.com/swagger-api/swagger-codegen.git coding: utf-8
2,799
en
0.68995
import RPi.GPIO as GPIO import time,sys, datetime, json, requests from requests.exceptions import ConnectionError, Timeout, TooManyRedirects ''' Configure raspberry ''' GPIO.setmode(GPIO.BCM) inpt = 13 GPIO.setup(inpt,GPIO.IN) ''' Configure some global variables ''' current_input = GPIO.input(inpt) # This is used to compare to the new_input later. total_rotations = 0 # This is a counter. It gets reset after the number of seconds in rotation_downtime. cup_movements = 200 # This is how many rotations occur as a cup of liquid passes through. rotation_downtime = 5 # Sets the cut-off time for establishing a water-flow event. last_movement_time = time.time() + rotation_downtime # This is used to determine if a new water-flow event should be created. record_data = False # A flag used to trigger database insert. data = [] print('Control C to exit') def commit_data(data): ''' This passes data to the data base as a single row. It then resets/empties data. ''' url = 'http://localhost:1880/sensor' headers = { 'Accepts': 'application/json' } print(f"1: {data[0]}") send_jsn = json.dumps({"Movements": data[0][1], "Cups": data[0][2], "Gallons": data[0][3], "Liters": data[0][4]}) try: response = requests.post(url, data=send_jsn, headers=headers) print(response.text) except (ConnectionError, Timeout, TooManyRedirects) as e: print(e) data = [] return data def prep_and_send(data,total_rotations): ''' Calculates measurements (cups and gallons). Prepares the data into a database-friendly tuple. Appends that tuple to a list. It then tries to connect to database. If it is not successful then it does nothing but saves the data; it will try to send the list of data-tuples the next time there is a water-flow event. Once the connection is successful data is emptied in commit_data(). ''' total_cups = total_rotations/cup_movements total_gallons = total_cups/16 total_liters = total_gallons*3.78541 now = datetime.datetime.now() print('{}: Movements: {}. \nCups: {}. \nGallons: {}. \nLiters: {}'.format(now,total_rotations,total_cups,total_gallons,total_liters)) current_data = ( now, round(total_rotations,2), round(total_cups,2), round(total_gallons,2), round(total_liters,2), ) data.append(current_data) print(f"datos: {data}") data = commit_data(data) return data while True: ''' This is what actually runs the whole time. It first checks to see if new_input is different from current_input. This would be the case if there was a rotation. Once it detects that the input is different it knows water is flowing. It starts tracking the total_rotations and when the last rotation occured. After each rotation it refreshes the value of the last rotation time. It waits a few seconds (rotation_downtime) after the last rotation time to make sure the water has stopped. Once the water stops it passes the total_rotations to prep_and_send(). It also passes 'data' which is any previous water-flow events that were not successfully sent at the time they were recorded. ''' new_input = GPIO.input(inpt) if new_input != current_input: total_rotations += 1 if time.time() <= last_movement_time: #if it hasn't been more than 10 seconds record_data = True current_input = new_input last_movement_time = time.time() + rotation_downtime else: #flow starts last_movement_time = time.time() + rotation_downtime elif record_data == True and time.time() > last_movement_time: #if it's been x seconds since last change data = prep_and_send(data,total_rotations) record_data = False total_rotations = 0 last_movement_time = time.time() + rotation_downtime current_input = new_input try: None #print('New input: ',new_input, '. Current input: ', current_input, '. Movements: ', total_rotations) except KeyboardInterrupt: print('\nCTRL C - Exiting nicely') GPIO.cleanup() sys.exit()
software/read-sensor-python/waterFlow/waterFlowMeter.py
4,411
This passes data to the data base as a single row. It then resets/empties data. Calculates measurements (cups and gallons). Prepares the data into a database-friendly tuple. Appends that tuple to a list. It then tries to connect to database. If it is not successful then it does nothing but saves the data; it will try to send the list of data-tuples the next time there is a water-flow event. Once the connection is successful data is emptied in commit_data(). This is used to compare to the new_input later. This is a counter. It gets reset after the number of seconds in rotation_downtime. This is how many rotations occur as a cup of liquid passes through. Sets the cut-off time for establishing a water-flow event. This is used to determine if a new water-flow event should be created. A flag used to trigger database insert.if it hasn't been more than 10 secondsflow startsif it's been x seconds since last changeprint('New input: ',new_input, '. Current input: ', current_input, '. Movements: ', total_rotations)
1,026
en
0.912425
import re import traceback import subprocess from serviceDB import ServiceDB class NeadmServiceWrapper: _service_list_cmd = ['/opt/nedge/neadm/neadm', 'service', 'list'] # _status_cmd = ['/opt/nedge/neadm/fake-neadm-status.sh'] _service_list_header = re.compile("^.*TYPE.*NAME.*SERVERID.*STATUS.*$") # unit_id key well be added during parsing of each line _service_list_names = ['type', 'name', 'sid', 'status'] def __init__(self, db): self.exit_code = 0 self.db = ServiceDB(db) def get_exit_code(self): return self.exit_code def get_raw_output(self, command): try: output = subprocess.check_output(command, stderr=subprocess.STDOUT) self.exit_code = 0 return output except subprocess.CalledProcessError as ex: self.exit_code = ex.returncode return ex.output except Exception as e: self.exit_code = 1 return "Failed to start {0} command.' \ Exeption {1}".format(command, e.output) def get_all_services(self): output = self.get_raw_output(NeadmServiceWrapper._service_list_cmd) # print(output) result = NeadmServiceList() # error exit code if self.exit_code: result.exit_code = self.exit_code result.output = output return result output_array = output.split('\n') for line in output_array: # print(line) if NeadmServiceWrapper._service_list_header.match(line): continue params = line.split() # print(params) # print(len(params)) if len(params) < 4: continue service_record = {} for name in NeadmServiceWrapper._service_list_names: service_record[name] = params[ NeadmServiceWrapper._service_list_names.index(name)] # check ServiceDB for sid and unit_id already joined # add unit_id key db_record = self.db.find(sid=service_record['sid'], service_name=service_record['name']) if len(db_record) == 1: service_record['unit_id'] = db_record[0]['unit_id'] else: service_record['unit_id'] = '' # print(node) result.append(service_record) # print(status) return result def exec_cmd(self, cmd_name, cmd): try: print("\t{0} cmd is {1}".format(cmd_name, ' '.join(cmd))) subprocess.check_output(cmd) except Exception as ex: raise Exception('in {0}\nMessage:{1}\nTrace: {2}'.format( self.__class__.__name__, ex.message, traceback.format_exc())) # is node included into service nodes list def is_node_exist(self, service_name, sid): services = self.get_all_services() return services.is_already_in_service(service_name, sid) # is iscsi service already created def is_service_exist(self, service_name): services = self.get_all_services() return services.is_service_exist(service_name) # create new iscsi(cinder) service by name def create_iscsi_service(self, service_name): cmd = ['/opt/nedge/neadm/neadm', 'service', 'create', 'iscsi', service_name] if not self.is_service_exist(service_name): self.exec_cmd('create_iscsi_service', cmd) else: print("create_iscsi_service: Service {} already exist!".format( service_name)) # create new swift service by name def create_swift_service(self, service_name): cmd = ['/opt/nedge/neadm/neadm', 'service', 'create', 'swift', service_name] if not self.is_service_exist(service_name): self.exec_cmd('create_swift_service', cmd) else: print("create_swift_service: Service {} already exist!".format( service_name)) # remove iscsi service by name def delete_service(self, service_name): cmd = ['/opt/nedge/neadm/neadm', 'service', 'delete', service_name] if self.is_service_exist(service_name): self.exec_cmd('delete_service', cmd) else: print("remove_iscsi_service: {0} service does not exist".format( service_name)) def is_service_enabled(self, service_name): services = self.get_all_services() return services.is_service_enabled(service_name) # serve command, apply swift servie to cluster def serve_service(self, service_name, cluster_name): cmd = ['/opt/nedge/neadm/neadm', 'service', 'serve', service_name, cluster_name] if not self.is_service_exist(service_name): print("serve_service: Service {} does not exist".format( service_name)) return self.exec_cmd('serve_service', cmd) # enable service if exist def enable_service(self, service_name): cmd = ['/opt/nedge/neadm/neadm', 'service', 'enable', service_name] if not self.is_service_exist(service_name): print("enable_service: Service {} does not exist".format( service_name)) return if not self.is_service_enabled(service_name): self.exec_cmd('enable_service', cmd) else: print("enable_service: Service {} already enabled".format( service_name)) def disable_service(self, service_name): cmd = ['/opt/nedge/neadm/neadm', 'service', 'disable', service_name] if not self.is_service_exist(service_name): print("disable_service: Service {} does not exist".format( service_name)) return if self.is_service_enabled(service_name): self.exec_cmd('disable_service', cmd) else: print("disable_service: Service {} already disabled".format( service_name)) def add_node_to_service(self, service_name, sid, unit_id): cmd = ['/opt/nedge/neadm/neadm', 'service', 'add', service_name, sid] if not self.is_node_exist(service_name, sid): self.exec_cmd('add_node_to_service', cmd) # add node to persistent db # self.db.add(sid, unit_id, service_name) else: print("\tadd_node_to_service:" "Node {0} already exist as service node".format(sid)) self.db.add(sid, unit_id, service_name) def get_service_node_count(self, service_name): services = self.get_all_services() return len(services.get_service_nodes(service_name)) def remove_node_by_unit_id(self, unit_id): service = self.db.find(unit_id=unit_id) if len(service) > 0: sid = service[0]['sid'] service_name = service[0]['service'] self.remove_node_from_service(service_name, sid, unit_id) else: print("Can't find service by unit_id:{}".format(unit_id)) def disable_service_by_unit_id(self, unit_id): service = self.db.find(unit_id=unit_id) if len(service) > 0: service_name = service[0]['service'] print("service to disable is :{}".format(service_name)) self.disable_service(service_name) else: print("Can't find service by unit_id:{}".format(unit_id)) def remove_node_from_service(self, service_name, sid, unit_id): cmd = ['/opt/nedge/neadm/neadm', 'service', 'remove', service_name, sid] if self.is_node_exist(service_name, sid): self.exec_cmd('remove_node_from_service', cmd) node_count = self.get_service_node_count(service_name) if node_count == 0: self.delete_service(service_name) else: print("\tremove_node_from_service: " "Node {} does not exist to remove".format(sid)) # remove from persistent db self.db.remove(sid, unit_id) def print_services(self): service_list = self.get_all_services() service_list.show() class NeadmServiceList: def __init__(self): # service records array self.service_records = [] self.exit_code = 0 self.output = "" def is_correct(self): return True if self.exit_code == 0 else False def get_all(self): return self.service_records def get_service_nodes(self, service_name): return filter(lambda service: service['name'] == service_name and service['sid'] != '-', self.service_records) def get_iscsi_nodes(self): return filter(lambda service: service['type'] == 'iscsi' and service['sid'] != '-', self.service_records) def get_iscsi_nodes_by_service_name(self, service_name): return filter(lambda service: service['type'] == 'iscsi' and service['name'] == service_name and service['sid'] != '-', self.service_records) def get_swift_nodes(self): return filter(lambda service: service['type'] == 'swift' and service['sid'] != '-', self.service_records) def get_swift_nodes_by_service_name(self, service_name): return filter(lambda service: service['type'] == 'swift' and service['name'] == service_name and service['sid'] != '-', self.service_records) # is node present into whole services list def is_already_listed(self, sid): return True if filter(lambda service: service['sid'] == sid, self.service_records) else False # is node presented in service already def is_already_in_service(self, service_name, sid): return True if filter(lambda service: service['sid'] == sid and service['name'] == service_name, self.service_records) else False def is_service_exist(self, service_name): return True if filter(lambda service: service['name'] == service_name, self.service_records) else False def is_service_enabled(self, service_name): nodes = self.get_service_nodes(service_name) print(nodes) if len(nodes) > 0: if nodes[0]['status'] == 'enabled': return True return False def append(self, service_record): self.service_records.append(service_record) # def show(self): # print('TYPE\t\tNAME\t\t\tID\t\t\tSTATE\t\t\tUNIT_ID') # for record in self.service_records: # print("{0:<{col0}}{1:<{col1}}{2:<{col2}}"+ # "{3:<{col3}}{4:<{col4}}".format( # record['type'], # record['name'], # record['sid'], # record['status'], # record['unit_id'], # col0=8, # col1=20, # col2=36, # col3=12, # col4=16)) # print("")
nexentaedge/neadmServiceWrapper.py
11,309
_status_cmd = ['/opt/nedge/neadm/fake-neadm-status.sh'] unit_id key well be added during parsing of each line print(output) error exit code print(line) print(params) print(len(params)) check ServiceDB for sid and unit_id already joined add unit_id key print(node) print(status) is node included into service nodes list is iscsi service already created create new iscsi(cinder) service by name create new swift service by name remove iscsi service by name serve command, apply swift servie to cluster enable service if exist add node to persistent db self.db.add(sid, unit_id, service_name) remove from persistent db service records array is node present into whole services list is node presented in service already def show(self): print('TYPE\t\tNAME\t\t\tID\t\t\tSTATE\t\t\tUNIT_ID') for record in self.service_records: print("{0:<{col0}}{1:<{col1}}{2:<{col2}}"+ "{3:<{col3}}{4:<{col4}}".format( record['type'], record['name'], record['sid'], record['status'], record['unit_id'], col0=8, col1=20, col2=36, col3=12, col4=16)) print("")
1,194
en
0.695027
#!/usr/bin/env python3 # # Copyright (c) 2019 LG Electronics, Inc. # # This software contains code licensed as described in LICENSE. # import os import lgsvl import random import time from pathlib import Path import json sim = lgsvl.Simulator(os.environ.get("SIMULATOR_HOST", "127.0.0.1"), 8181) layer_mask = 0 layer_mask |= 1 << 0 # 0 is the layer for the road (default) if sim.current_scene == "SanFrancisco": sim.reset() else: sim.load("SanFrancisco") # if sim.current_scene == "Testbed": # sim.reset() # else: # sim.load("Testbed") spawns = sim.get_spawn() spawns[0].position.x = 705.6 spawns[0].position.y = 10.1 spawns[0].position.z = -308.7 spawns[0].rotation.y -= 95 forward = lgsvl.utils.transform_to_forward(spawns[0]) right = lgsvl.utils.transform_to_right(spawns[0]) state = lgsvl.AgentState() # state.transform.position = spawns[0].position state.transform.position = spawns[0].position state.transform.rotation = spawns[0].rotation ego = sim.add_agent("SingleLiDAR (Autoware)", lgsvl.AgentType.EGO, state) ego.connect_bridge(os.environ.get("BRIDGE_HOST", "127.0.0.1"), 9090) #------- Stand vehicle -------# #set stand vehicle's initial position pose_arr = [ (-3, 5), (-3, 10), (-3, 15), (-3, 20), (-5, 25), (3, 30), (-1, 40), (-6, 33) ] sv_state_arr = [] for (x, y) in pose_arr: sv_state_arr.append(lgsvl.AgentState()) sv_state_arr[-1].transform.position = spawns[0].position + y * forward + x * right sv_state_arr[-1].transform.rotation = spawns[0].rotation _ = sim.add_agent("Sedan", lgsvl.AgentType.NPC, sv_state_arr[-1]) # for i in range(30): # sv_state_arr.append(lgsvl.AgentState()) # sv_state_arr[-1].transform.position = spawns[0].position + (150 + i * 7) * forward + 3.5 * right # sv_state_arr[-1].transform.rotation = spawns[0].rotation # _ = sim.add_agent("Sedan", lgsvl.AgentType.NPC, sv_state_arr[-1]) # for i in range(30): # sv_state_arr.append(lgsvl.AgentState()) # sv_state_arr[-1].transform.position = spawns[0].position + (150 + i * 7) * forward - 6 * right # sv_state_arr[-1].transform.rotation = spawns[0].rotation # _ = sim.add_agent("Sedan", lgsvl.AgentType.NPC, sv_state_arr[-1]) sim.run()
autoware.ai/autoware_files/lgsvl_file/scripts/testbed_scenario/sanfrancisco.py
2,198
!/usr/bin/env python3 Copyright (c) 2019 LG Electronics, Inc. This software contains code licensed as described in LICENSE. 0 is the layer for the road (default) if sim.current_scene == "Testbed": sim.reset() else: sim.load("Testbed") state.transform.position = spawns[0].position------- Stand vehicle -------set stand vehicle's initial position for i in range(30): sv_state_arr.append(lgsvl.AgentState()) sv_state_arr[-1].transform.position = spawns[0].position + (150 + i * 7) * forward + 3.5 * right sv_state_arr[-1].transform.rotation = spawns[0].rotation _ = sim.add_agent("Sedan", lgsvl.AgentType.NPC, sv_state_arr[-1]) for i in range(30): sv_state_arr.append(lgsvl.AgentState()) sv_state_arr[-1].transform.position = spawns[0].position + (150 + i * 7) * forward - 6 * right sv_state_arr[-1].transform.rotation = spawns[0].rotation _ = sim.add_agent("Sedan", lgsvl.AgentType.NPC, sv_state_arr[-1])
923
en
0.317903
""" Space object. Refer: https://developer.twitter.com/en/docs/twitter-api/data-dictionary/object-model/space """ from dataclasses import dataclass, field from typing import List, Optional from .base import BaseModel @dataclass class Space(BaseModel): """ A class representing the space object. """ id: Optional[str] = field(default=None) state: Optional[str] = field(default=None) created_at: Optional[str] = field(default=None, repr=False) host_ids: Optional[List[str]] = field(default=None, repr=False) lang: Optional[str] = field(default=None, repr=False) is_ticketed: Optional[bool] = field(default=None, repr=False) invited_user_ids: Optional[List[str]] = field(default=None, repr=False) participant_count: Optional[int] = field(default=None, repr=False) scheduled_start: Optional[str] = field(default=None, repr=False) speaker_ids: Optional[List[str]] = field(default=None, repr=False) started_at: Optional[str] = field(default=None, repr=False) title: Optional[str] = field(default=None, repr=False) updated_at: Optional[str] = field(default=None, repr=False)
pytwitter/models/space.py
1,146
A class representing the space object. Space object. Refer: https://developer.twitter.com/en/docs/twitter-api/data-dictionary/object-model/space
145
en
0.542582
"""Make / Download Telegram Sticker Packs without installing Third Party applications Available Commands: .kangsticker [Optional Emoji] .packinfo .getsticker""" from telethon import events from io import BytesIO from PIL import Image import asyncio import datetime from collections import defaultdict import math import os import requests import zipfile from telethon.errors.rpcerrorlist import StickersetInvalidError from telethon.errors import MessageNotModifiedError from telethon.tl.functions.account import UpdateNotifySettingsRequest from telethon.tl.functions.messages import GetStickerSetRequest from telethon.tl.types import ( DocumentAttributeFilename, DocumentAttributeSticker, InputMediaUploadedDocument, InputPeerNotifySettings, InputStickerSetID, InputStickerSetShortName, MessageMediaPhoto ) from uniborg.util import admin_cmd @borg.on(admin_cmd(pattern="kangsticker ?(.*)")) async def _(event): if event.fwd_from: return if not event.is_reply: await event.edit("Reply to a photo to add to my personal sticker pack.") return reply_message = await event.get_reply_message() sticker_emoji = "🔥" input_str = event.pattern_match.group(1) if input_str: sticker_emoji = input_str me = borg.me userid = event.from_id packname = f"{userid}'s @MC0917 Pack" packshortname = f"MC_0917_{userid}" # format: Uni_Borg_userid is_a_s = is_it_animated_sticker(reply_message) file_ext_ns_ion = "@MC0917_Sticker.png" file = await borg.download_file(reply_message.media) uploaded_sticker = None if is_a_s: file_ext_ns_ion = "AnimatedSticker.tgs" uploaded_sticker = await borg.upload_file(file, file_name=file_ext_ns_ion) packname = f"{userid}'s @AnimatedStickersGroup" packshortname = f"MC_0917_{userid}_as" # format: Uni_Borg_userid elif not is_message_image(reply_message): await event.edit("Invalid message type") return else: with BytesIO(file) as mem_file, BytesIO() as sticker: resize_image(mem_file, sticker) sticker.seek(0) uploaded_sticker = await borg.upload_file(sticker, file_name=file_ext_ns_ion) await event.edit("Processing this sticker. Please Wait!") async with borg.conversation("@Stickers") as bot_conv: now = datetime.datetime.now() dt = now + datetime.timedelta(minutes=1) if not await stickerset_exists(bot_conv, packshortname): await silently_send_message(bot_conv, "/cancel") if is_a_s: response = await silently_send_message(bot_conv, "/newanimated") else: response = await silently_send_message(bot_conv, "/newpack") if "Yay!" not in response.text: await event.edit(f"**FAILED**! @Stickers replied: {response.text}") return response = await silently_send_message(bot_conv, packname) if not response.text.startswith("Alright!"): await event.edit(f"**FAILED**! @Stickers replied: {response.text}") return w = await bot_conv.send_file( file=uploaded_sticker, allow_cache=False, force_document=True ) response = await bot_conv.get_response() if "Sorry" in response.text: await event.edit(f"**FAILED**! @Stickers replied: {response.text}") return await silently_send_message(bot_conv, sticker_emoji) await silently_send_message(bot_conv, "/publish") response = await silently_send_message(bot_conv, f"<{packname}>") await silently_send_message(bot_conv, "/skip") response = await silently_send_message(bot_conv, packshortname) if response.text == "Sorry, this short name is already taken.": await event.edit(f"**FAILED**! @Stickers replied: {response.text}") return else: await silently_send_message(bot_conv, "/cancel") await silently_send_message(bot_conv, "/addsticker") await silently_send_message(bot_conv, packshortname) await bot_conv.send_file( file=uploaded_sticker, allow_cache=False, force_document=True ) response = await bot_conv.get_response() if "Sorry" in response.text: await event.edit(f"**FAILED**! @Stickers replied: {response.text}") return await silently_send_message(bot_conv, sticker_emoji) await silently_send_message(bot_conv, "/done") await event.edit(f"sticker added! Your pack can be found [here](t.me/addstickers/{packshortname})") @borg.on(admin_cmd(pattern="packinfo")) async def _(event): if event.fwd_from: return if not event.is_reply: await event.edit("Reply to any sticker to get it's pack info.") return rep_msg = await event.get_reply_message() if not rep_msg.document: await event.edit("Reply to any sticker to get it's pack info.") return stickerset_attr_s = rep_msg.document.attributes stickerset_attr = find_instance(stickerset_attr_s, DocumentAttributeSticker) if not stickerset_attr.stickerset: await event.edit("sticker does not belong to a pack.") return get_stickerset = await borg( GetStickerSetRequest( InputStickerSetID( id=stickerset_attr.stickerset.id, access_hash=stickerset_attr.stickerset.access_hash ) ) ) pack_emojis = [] for document_sticker in get_stickerset.packs: if document_sticker.emoticon not in pack_emojis: pack_emojis.append(document_sticker.emoticon) await event.edit(f"**Sticker Title:** `{get_stickerset.set.title}\n`" f"**Sticker Short Name:** `{get_stickerset.set.short_name}`\n" f"**Official:** `{get_stickerset.set.official}`\n" f"**Archived:** `{get_stickerset.set.archived}`\n" f"**Stickers In Pack:** `{len(get_stickerset.packs)}`\n" f"**Emojis In Pack:** {' '.join(pack_emojis)}") @borg.on(admin_cmd(pattern="getsticker ?(.*)")) async def _(event): if event.fwd_from: return input_str = event.pattern_match.group(1) if not os.path.isdir(Config.TMP_DOWNLOAD_DIRECTORY): os.makedirs(Config.TMP_DOWNLOAD_DIRECTORY) if event.reply_to_msg_id: reply_message = await event.get_reply_message() # https://gist.github.com/udf/e4e3dbb2e831c8b580d8fddd312714f7 if not reply_message.sticker: return sticker = reply_message.sticker sticker_attrib = find_instance(sticker.attributes, DocumentAttributeSticker) if not sticker_attrib.stickerset: await event.reply("This sticker is not part of a pack") return is_a_s = is_it_animated_sticker(reply_message) file_ext_ns_ion = "webp" file_caption = "https://t.me/RoseSupportChat/33801" if is_a_s: file_ext_ns_ion = "tgs" file_caption = "Forward the ZIP file to @AnimatedStickersRoBot to get lottIE JSON containing the vector information." sticker_set = await borg(GetStickerSetRequest(sticker_attrib.stickerset)) pack_file = os.path.join(Config.TMP_DOWNLOAD_DIRECTORY, sticker_set.set.short_name, "pack.txt") if os.path.isfile(pack_file): os.remove(pack_file) # Sticker emojis are retrieved as a mapping of # <emoji>: <list of document ids that have this emoji> # So we need to build a mapping of <document id>: <list of emoji> # Thanks, Durov emojis = defaultdict(str) for pack in sticker_set.packs: for document_id in pack.documents: emojis[document_id] += pack.emoticon async def download(sticker, emojis, path, file): await borg.download_media(sticker, file=os.path.join(path, file)) with open(pack_file, "a") as f: f.write(f"{{'image_file': '{file}','emojis':{emojis[sticker.id]}}},") pending_tasks = [ asyncio.ensure_future( download(document, emojis, Config.TMP_DOWNLOAD_DIRECTORY + sticker_set.set.short_name, f"{i:03d}.{file_ext_ns_ion}") ) for i, document in enumerate(sticker_set.documents) ] await event.edit(f"Downloading {sticker_set.set.count} sticker(s) to .{Config.TMP_DOWNLOAD_DIRECTORY}{sticker_set.set.short_name}...") num_tasks = len(pending_tasks) while 1: done, pending_tasks = await asyncio.wait(pending_tasks, timeout=2.5, return_when=asyncio.FIRST_COMPLETED) try: await event.edit( f"Downloaded {num_tasks - len(pending_tasks)}/{sticker_set.set.count}") except MessageNotModifiedError: pass if not pending_tasks: break await event.edit("Downloading to my local completed") # https://gist.github.com/udf/e4e3dbb2e831c8b580d8fddd312714f7 directory_name = Config.TMP_DOWNLOAD_DIRECTORY + sticker_set.set.short_name zipf = zipfile.ZipFile(directory_name + ".zip", "w", zipfile.ZIP_DEFLATED) zipdir(directory_name, zipf) zipf.close() await borg.send_file( event.chat_id, directory_name + ".zip", caption=file_caption, force_document=True, allow_cache=False, reply_to=event.message.id, progress_callback=progress ) try: os.remove(directory_name + ".zip") os.remove(directory_name) except: pass await event.edit("task Completed") await asyncio.sleep(3) await event.delete() else: await event.edit("TODO: Not Implemented") # Helpers def is_it_animated_sticker(message): try: if message.media and message.media.document: mime_type = message.media.document.mime_type if "tgsticker" in mime_type: return True else: return False else: return False except: return False def is_message_image(message): if message.media: if isinstance(message.media, MessageMediaPhoto): return True if message.media.document: if message.media.document.mime_type.split("/")[0] == "image": return True return False return False async def silently_send_message(conv, text): await conv.send_message(text) response = await conv.get_response() await conv.mark_read(message=response) return response async def stickerset_exists(conv, setname): try: await borg(GetStickerSetRequest(InputStickerSetShortName(setname))) response = await silently_send_message(conv, "/addsticker") if response.text == "Invalid pack selected.": await silently_send_message(conv, "/cancel") return False await silently_send_message(conv, "/cancel") return True except StickersetInvalidError: return False def resize_image(image, save_locaton): """ Copyright Rhyse Simpson: https://github.com/skittles9823/SkittBot/blob/master/tg_bot/modules/stickers.py """ im = Image.open(image) maxsize = (512, 512) if (im.width and im.height) < 512: size1 = im.width size2 = im.height if im.width > im.height: scale = 512 / size1 size1new = 512 size2new = size2 * scale else: scale = 512 / size2 size1new = size1 * scale size2new = 512 size1new = math.floor(size1new) size2new = math.floor(size2new) sizenew = (size1new, size2new) im = im.resize(sizenew) else: im.thumbnail(maxsize) im.save(save_locaton, "PNG") def progress(current, total): logger.info("Uploaded: {} of {}\nCompleted {}".format(current, total, (current / total) * 100)) def find_instance(items, class_or_tuple): for item in items: if isinstance(item, class_or_tuple): return item return None def zipdir(path, ziph): # ziph is zipfile handle for root, dirs, files in os.walk(path): for file in files: ziph.write(os.path.join(root, file)) os.remove(os.path.join(root, file))
stdplugins/stickers.py
12,714
Copyright Rhyse Simpson: https://github.com/skittles9823/SkittBot/blob/master/tg_bot/modules/stickers.py Make / Download Telegram Sticker Packs without installing Third Party applications Available Commands: .kangsticker [Optional Emoji] .packinfo .getsticker format: Uni_Borg_userid format: Uni_Borg_userid https://gist.github.com/udf/e4e3dbb2e831c8b580d8fddd312714f7 Sticker emojis are retrieved as a mapping of <emoji>: <list of document ids that have this emoji> So we need to build a mapping of <document id>: <list of emoji> Thanks, Durov https://gist.github.com/udf/e4e3dbb2e831c8b580d8fddd312714f7 Helpers ziph is zipfile handle
638
en
0.682723
#!/usr/bin/env python3 # Copyright (c) 2015-2017 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. """Test processing of unrequested blocks. Setup: two nodes, node0+node1, not connected to each other. Node1 will have nMinimumChainWork set to 0x10, so it won't process low-work unrequested blocks. We have one P2PInterface connection to node0 called test_node, and one to node1 called min_work_node. The test: 1. Generate one block on each node, to leave IBD. 2. Mine a new block on each tip, and deliver to each node from node's peer. The tip should advance for node0, but node1 should skip processing due to nMinimumChainWork. Node1 is unused in tests 3-7: 3. Mine a block that forks from the genesis block, and deliver to test_node. Node0 should not process this block (just accept the header), because it is unrequested and doesn't have more or equal work to the tip. 4a,b. Send another two blocks that build on the forking block. Node0 should process the second block but be stuck on the shorter chain, because it's missing an intermediate block. 4c.Send 288 more blocks on the longer chain (the number of blocks ahead we currently store). Node0 should process all but the last block (too far ahead in height). 5. Send a duplicate of the block in #3 to Node0. Node0 should not process the block because it is unrequested, and stay on the shorter chain. 6. Send Node0 an inv for the height 3 block produced in #4 above. Node0 should figure out that Node0 has the missing height 2 block and send a getdata. 7. Send Node0 the missing block again. Node0 should process and the tip should advance. 8. Create a fork which is invalid at a height longer than the current chain (ie to which the node will try to reorg) but which has headers built on top of the invalid block. Check that we get disconnected if we send more headers on the chain the node now knows to be invalid. 9. Test Node1 is able to sync when connected to node0 (which should have sufficient work on its chain). """ from test_framework.mininode import * from test_framework.test_framework import BitcoinTestFramework from test_framework.util import * import time from test_framework.blocktools import create_block, create_coinbase, create_transaction class AcceptBlockTest(BitcoinTestFramework): def add_options(self, parser): parser.add_option("--testbinary", dest="testbinary", default=os.getenv("BITCOIND", "uexd"), help="uexd binary to test") def set_test_params(self): self.setup_clean_chain = True self.num_nodes = 2 self.extra_args = [[], ["-minimumchainwork=0x10"]] def setup_network(self): # Node0 will be used to test behavior of processing unrequested blocks # from peers which are not whitelisted, while Node1 will be used for # the whitelisted case. # Node2 will be used for non-whitelisted peers to test the interaction # with nMinimumChainWork. self.setup_nodes() def run_test(self): # Setup the p2p connections and start up the network thread. # test_node connects to node0 (not whitelisted) test_node = self.nodes[0].add_p2p_connection(P2PInterface()) # min_work_node connects to node1 (whitelisted) min_work_node = self.nodes[1].add_p2p_connection(P2PInterface()) network_thread_start() # Test logic begins here test_node.wait_for_verack() min_work_node.wait_for_verack() # 1. Have nodes mine a block (leave IBD) [ n.generate(1) for n in self.nodes ] tips = [ int("0x" + n.getbestblockhash(), 0) for n in self.nodes ] # 2. Send one block that builds on each tip. # This should be accepted by node0 blocks_h2 = [] # the height 2 blocks on each node's chain block_time = int(time.time()) + 1 for i in range(2): blocks_h2.append(create_block(tips[i], create_coinbase(2), block_time)) blocks_h2[i].solve() block_time += 1 test_node.send_message(msg_block(blocks_h2[0])) min_work_node.send_message(msg_block(blocks_h2[1])) for x in [test_node, min_work_node]: x.sync_with_ping() assert_equal(self.nodes[0].getblockcount(), 2) assert_equal(self.nodes[1].getblockcount(), 1) self.log.info("First height 2 block accepted by node0; correctly rejected by node1") # 3. Send another block that builds on genesis. block_h1f = create_block(int("0x" + self.nodes[0].getblockhash(0), 0), create_coinbase(1), block_time) block_time += 1 block_h1f.solve() test_node.send_message(msg_block(block_h1f)) test_node.sync_with_ping() tip_entry_found = False for x in self.nodes[0].getchaintips(): if x['hash'] == block_h1f.hash: assert_equal(x['status'], "headers-only") tip_entry_found = True assert(tip_entry_found) assert_raises_rpc_error(-1, "Block not found on disk", self.nodes[0].getblock, block_h1f.hash) # 4. Send another two block that build on the fork. block_h2f = create_block(block_h1f.sha256, create_coinbase(2), block_time) block_time += 1 block_h2f.solve() test_node.send_message(msg_block(block_h2f)) test_node.sync_with_ping() # Since the earlier block was not processed by node, the new block # can't be fully validated. tip_entry_found = False for x in self.nodes[0].getchaintips(): if x['hash'] == block_h2f.hash: assert_equal(x['status'], "headers-only") tip_entry_found = True assert(tip_entry_found) # But this block should be accepted by node since it has equal work. self.nodes[0].getblock(block_h2f.hash) self.log.info("Second height 2 block accepted, but not reorg'ed to") # 4b. Now send another block that builds on the forking chain. block_h3 = create_block(block_h2f.sha256, create_coinbase(3), block_h2f.nTime+1) block_h3.solve() test_node.send_message(msg_block(block_h3)) test_node.sync_with_ping() # Since the earlier block was not processed by node, the new block # can't be fully validated. tip_entry_found = False for x in self.nodes[0].getchaintips(): if x['hash'] == block_h3.hash: assert_equal(x['status'], "headers-only") tip_entry_found = True assert(tip_entry_found) self.nodes[0].getblock(block_h3.hash) # But this block should be accepted by node since it has more work. self.nodes[0].getblock(block_h3.hash) self.log.info("Unrequested more-work block accepted") # 4c. Now mine 288 more blocks and deliver; all should be processed but # the last (height-too-high) on node (as long as its not missing any headers) tip = block_h3 all_blocks = [] for i in range(288): next_block = create_block(tip.sha256, create_coinbase(i + 4), tip.nTime+1) next_block.solve() all_blocks.append(next_block) tip = next_block # Now send the block at height 5 and check that it wasn't accepted (missing header) test_node.send_message(msg_block(all_blocks[1])) test_node.sync_with_ping() assert_raises_rpc_error(-5, "Block not found", self.nodes[0].getblock, all_blocks[1].hash) assert_raises_rpc_error(-5, "Block not found", self.nodes[0].getblockheader, all_blocks[1].hash) # The block at height 5 should be accepted if we provide the missing header, though headers_message = msg_headers() headers_message.headers.append(CBlockHeader(all_blocks[0])) test_node.send_message(headers_message) test_node.send_message(msg_block(all_blocks[1])) test_node.sync_with_ping() self.nodes[0].getblock(all_blocks[1].hash) # Now send the blocks in all_blocks for i in range(288): test_node.send_message(msg_block(all_blocks[i])) test_node.sync_with_ping() # Blocks 1-287 should be accepted, block 288 should be ignored because it's too far ahead for x in all_blocks[:-1]: self.nodes[0].getblock(x.hash) assert_raises_rpc_error(-1, "Block not found on disk", self.nodes[0].getblock, all_blocks[-1].hash) # 5. Test handling of unrequested block on the node that didn't process # Should still not be processed (even though it has a child that has more # work). # The node should have requested the blocks at some point, so # disconnect/reconnect first self.nodes[0].disconnect_p2ps() self.nodes[1].disconnect_p2ps() network_thread_join() test_node = self.nodes[0].add_p2p_connection(P2PInterface()) network_thread_start() test_node.wait_for_verack() test_node.send_message(msg_block(block_h1f)) test_node.sync_with_ping() assert_equal(self.nodes[0].getblockcount(), 2) self.log.info("Unrequested block that would complete more-work chain was ignored") # 6. Try to get node to request the missing block. # Poke the node with an inv for block at height 3 and see if that # triggers a getdata on block 2 (it should if block 2 is missing). with mininode_lock: # Clear state so we can check the getdata request test_node.last_message.pop("getdata", None) test_node.send_message(msg_inv([CInv(2, block_h3.sha256)])) test_node.sync_with_ping() with mininode_lock: getdata = test_node.last_message["getdata"] # Check that the getdata includes the right block assert_equal(getdata.inv[0].hash, block_h1f.sha256) self.log.info("Inv at tip triggered getdata for unprocessed block") # 7. Send the missing block for the third time (now it is requested) test_node.send_message(msg_block(block_h1f)) test_node.sync_with_ping() assert_equal(self.nodes[0].getblockcount(), 290) self.nodes[0].getblock(all_blocks[286].hash) assert_equal(self.nodes[0].getbestblockhash(), all_blocks[286].hash) assert_raises_rpc_error(-1, "Block not found on disk", self.nodes[0].getblock, all_blocks[287].hash) self.log.info("Successfully reorged to longer chain from non-whitelisted peer") # 8. Create a chain which is invalid at a height longer than the # current chain, but which has more blocks on top of that block_289f = create_block(all_blocks[284].sha256, create_coinbase(289), all_blocks[284].nTime+1) block_289f.solve() block_290f = create_block(block_289f.sha256, create_coinbase(290), block_289f.nTime+1) block_290f.solve() block_291 = create_block(block_290f.sha256, create_coinbase(291), block_290f.nTime+1) # block_291 spends a coinbase below maturity! block_291.vtx.append(create_transaction(block_290f.vtx[0], 0, b"42", 1)) block_291.hashMerkleRoot = block_291.calc_merkle_root() block_291.solve() block_292 = create_block(block_291.sha256, create_coinbase(292), block_291.nTime+1) block_292.solve() # Now send all the headers on the chain and enough blocks to trigger reorg headers_message = msg_headers() headers_message.headers.append(CBlockHeader(block_289f)) headers_message.headers.append(CBlockHeader(block_290f)) headers_message.headers.append(CBlockHeader(block_291)) headers_message.headers.append(CBlockHeader(block_292)) test_node.send_message(headers_message) test_node.sync_with_ping() tip_entry_found = False for x in self.nodes[0].getchaintips(): if x['hash'] == block_292.hash: assert_equal(x['status'], "headers-only") tip_entry_found = True assert(tip_entry_found) assert_raises_rpc_error(-1, "Block not found on disk", self.nodes[0].getblock, block_292.hash) test_node.send_message(msg_block(block_289f)) test_node.send_message(msg_block(block_290f)) test_node.sync_with_ping() self.nodes[0].getblock(block_289f.hash) self.nodes[0].getblock(block_290f.hash) test_node.send_message(msg_block(block_291)) # At this point we've sent an obviously-bogus block, wait for full processing # without assuming whether we will be disconnected or not try: # Only wait a short while so the test doesn't take forever if we do get # disconnected test_node.sync_with_ping(timeout=1) except AssertionError: test_node.wait_for_disconnect() self.nodes[0].disconnect_p2ps() test_node = self.nodes[0].add_p2p_connection(P2PInterface()) network_thread_start() test_node.wait_for_verack() # We should have failed reorg and switched back to 290 (but have block 291) assert_equal(self.nodes[0].getblockcount(), 290) assert_equal(self.nodes[0].getbestblockhash(), all_blocks[286].hash) assert_equal(self.nodes[0].getblock(block_291.hash)["confirmations"], -1) # Now send a new header on the invalid chain, indicating we're forked off, and expect to get disconnected block_293 = create_block(block_292.sha256, create_coinbase(293), block_292.nTime+1) block_293.solve() headers_message = msg_headers() headers_message.headers.append(CBlockHeader(block_293)) test_node.send_message(headers_message) test_node.wait_for_disconnect() # 9. Connect node1 to node0 and ensure it is able to sync connect_nodes(self.nodes[0], 1) sync_blocks([self.nodes[0], self.nodes[1]]) self.log.info("Successfully synced nodes 1 and 0") if __name__ == '__main__': AcceptBlockTest().main()
test/functional/p2p_unrequested_blocks.py
14,257
Test processing of unrequested blocks. Setup: two nodes, node0+node1, not connected to each other. Node1 will have nMinimumChainWork set to 0x10, so it won't process low-work unrequested blocks. We have one P2PInterface connection to node0 called test_node, and one to node1 called min_work_node. The test: 1. Generate one block on each node, to leave IBD. 2. Mine a new block on each tip, and deliver to each node from node's peer. The tip should advance for node0, but node1 should skip processing due to nMinimumChainWork. Node1 is unused in tests 3-7: 3. Mine a block that forks from the genesis block, and deliver to test_node. Node0 should not process this block (just accept the header), because it is unrequested and doesn't have more or equal work to the tip. 4a,b. Send another two blocks that build on the forking block. Node0 should process the second block but be stuck on the shorter chain, because it's missing an intermediate block. 4c.Send 288 more blocks on the longer chain (the number of blocks ahead we currently store). Node0 should process all but the last block (too far ahead in height). 5. Send a duplicate of the block in #3 to Node0. Node0 should not process the block because it is unrequested, and stay on the shorter chain. 6. Send Node0 an inv for the height 3 block produced in #4 above. Node0 should figure out that Node0 has the missing height 2 block and send a getdata. 7. Send Node0 the missing block again. Node0 should process and the tip should advance. 8. Create a fork which is invalid at a height longer than the current chain (ie to which the node will try to reorg) but which has headers built on top of the invalid block. Check that we get disconnected if we send more headers on the chain the node now knows to be invalid. 9. Test Node1 is able to sync when connected to node0 (which should have sufficient work on its chain). !/usr/bin/env python3 Copyright (c) 2015-2017 The Bitcoin Core developers Distributed under the MIT software license, see the accompanying file COPYING or http://www.opensource.org/licenses/mit-license.php. Node0 will be used to test behavior of processing unrequested blocks from peers which are not whitelisted, while Node1 will be used for the whitelisted case. Node2 will be used for non-whitelisted peers to test the interaction with nMinimumChainWork. Setup the p2p connections and start up the network thread. test_node connects to node0 (not whitelisted) min_work_node connects to node1 (whitelisted) Test logic begins here 1. Have nodes mine a block (leave IBD) 2. Send one block that builds on each tip. This should be accepted by node0 the height 2 blocks on each node's chain 3. Send another block that builds on genesis. 4. Send another two block that build on the fork. Since the earlier block was not processed by node, the new block can't be fully validated. But this block should be accepted by node since it has equal work. 4b. Now send another block that builds on the forking chain. Since the earlier block was not processed by node, the new block can't be fully validated. But this block should be accepted by node since it has more work. 4c. Now mine 288 more blocks and deliver; all should be processed but the last (height-too-high) on node (as long as its not missing any headers) Now send the block at height 5 and check that it wasn't accepted (missing header) The block at height 5 should be accepted if we provide the missing header, though Now send the blocks in all_blocks Blocks 1-287 should be accepted, block 288 should be ignored because it's too far ahead 5. Test handling of unrequested block on the node that didn't process Should still not be processed (even though it has a child that has more work). The node should have requested the blocks at some point, so disconnect/reconnect first 6. Try to get node to request the missing block. Poke the node with an inv for block at height 3 and see if that triggers a getdata on block 2 (it should if block 2 is missing). Clear state so we can check the getdata request Check that the getdata includes the right block 7. Send the missing block for the third time (now it is requested) 8. Create a chain which is invalid at a height longer than the current chain, but which has more blocks on top of that block_291 spends a coinbase below maturity! Now send all the headers on the chain and enough blocks to trigger reorg At this point we've sent an obviously-bogus block, wait for full processing without assuming whether we will be disconnected or not Only wait a short while so the test doesn't take forever if we do get disconnected We should have failed reorg and switched back to 290 (but have block 291) Now send a new header on the invalid chain, indicating we're forked off, and expect to get disconnected 9. Connect node1 to node0 and ensure it is able to sync
4,893
en
0.929681
import networkx import random def regularize_graph(graph,d): regularized = True for node_id in list(graph.nodes()): if graph.in_degree(node_id)!=d or graph.out_degree(node_id)!=d: regularized = False break while not regularized: lost_in_degree_ids = [] full_in_degree_ids = [] for node_id in list(graph.nodes()): if graph.in_degree(node_id)<d: lost_in_degree_ids.append(node_id) elif graph.in_degree(node_id)==d: full_in_degree_ids.append(node_id) else: raise Exception('In degree too large') lost_in_degree_ids = random.sample(lost_in_degree_ids, len(lost_in_degree_ids)) lost_outdegree_ids = [] full_outdegree_ids = [] for node_id in list(graph.nodes()): if graph.out_degree(node_id)<d: lost_outdegree_ids.append(node_id) elif graph.out_degree(node_id)==d: full_outdegree_ids.append(node_id) else: raise Exception('Out degree too large') lost_outdegree_ids = random.sample(lost_outdegree_ids, len(lost_outdegree_ids)) if len(lost_in_degree_ids)!=len(lost_outdegree_ids): raise Exception('Number of missing in and out degrees do not match') for i in range(len(lost_in_degree_ids)): full_in_degree_ids = random.sample(full_in_degree_ids, len(full_in_degree_ids)) full_outdegree_ids = random.sample(full_outdegree_ids, len(full_outdegree_ids)) lost_in_degree_id = lost_in_degree_ids[i] lost_outdegree_id = lost_outdegree_ids[i] # Find appropriate (full_outdegree_id, full_in_degree_id) pair full_in_degree_id = -1 full_outdegree_id = -1 for fod_id in full_outdegree_ids: if fod_id!=lost_in_degree_id: suc_ids = list(graph.successors(fod_id)) for suc_id in suc_ids: if (suc_id in full_in_degree_ids) and (suc_id!=lost_outdegree_id): full_in_degree_id = suc_id full_outdegree_id = fod_id break if full_in_degree_id!=-1 and full_outdegree_id!=-1: break # Patch graph.remove_edge(full_outdegree_id, full_in_degree_id) graph.add_edge(full_outdegree_id, lost_in_degree_id) graph.add_edge(lost_outdegree_id, full_in_degree_id) regularized = True for node_id in list(graph.nodes()): if graph.in_degree(node_id)!=d or graph.out_degree(node_id)!=d: regularized = False break return graph
exact-2-regular-k/regularize_graph.py
2,438
Find appropriate (full_outdegree_id, full_in_degree_id) pair Patch
66
en
0.516314
#!/usr/bin/env python # Brief: This node subscribes to /tracked_humans and publishes the predicted goal to humans based on their trajectory # Author: Phani Teja Singamaneni import numpy as np import rospy import tf from geometry_msgs.msg import Point, PoseStamped from human_msgs.msg import TrackedHumans, TrackedHuman, TrackedSegmentType from human_path_prediction.msg import PredictedGoal from scipy.stats import multivariate_normal from std_srvs.srv import SetBool, Trigger, TriggerResponse EPS = 1e-12 class PredictGoal(object): def __init__(self, human_num=1): self.human_num = human_num # laas_adream self.goals_x = [1.5, 7.0, 9.0, 10.5, 1.5, 10.3, 8.5] self.goals_y = [2.0, 8.0, 12.5, 15.0, 15.0, 1.5, -4.5] self.goal_num = 7 # maze # self.goals_x = [1.5,1.5,1.5,1.5,1.5,7.5,25,42,42,41.5,42,37,22,15.5,28.5,37,23.5,10.5,15.5,31.5,20,25.5,7] # self.goals_y = [45,15,30,60,87,87,81.5,81.5,66,41.5,22,3,3,12.5,12.5,20.5,21.5,28.5,39.5,47,53,59,59] self.predicted_goal = PoseStamped() self.last_idx = 0 self.changed = False self.current_poses = [[] for i in range(self.human_num)] self.prev_poses = [[] for i in range(self.human_num)] self.mv_nd = multivariate_normal(mean=0,cov=0.1) self.theta_phi = [[0]*self.goal_num for i in range(self.human_num)] self.window_size = 10 self.probability_goal = [np.array([1.0/self.goal_num]*self.goal_num) for i in range(self.human_num)] self.probability_goal_window = [np.array([[1.0/self.goal_num]*self.goal_num]*self.window_size) for i in range(self.human_num)] self.done = False self.itr = 0 NODE_NAME = "human_goal_predict" rospy.init_node(NODE_NAME) self.humans_sub_ = rospy.Subscriber("/tracked_humans",TrackedHumans,self.tracked_humansCB) self.goal_pub_ = rospy.Publisher(NODE_NAME+"/predicted_goal",PredictedGoal, queue_size=2) self.goal_srv_ = rospy.Service("goal_changed", Trigger, self.goal_changed) rospy.spin() def tracked_humansCB(self,msg): self.prev_poses = self.current_poses self.current_poses = [[] for i in range(self.human_num)] for human in msg.humans: for segment in human.segments: if segment.type == TrackedSegmentType.TORSO: self.current_poses[human.track_id-1].append(segment.pose.pose) if not self.done: self.prev_poses = self.current_poses for i in range(0,len(self.current_poses[0])): diff = np.linalg.norm([self.current_poses[0][i].position.x - self.prev_poses[0][i].position.x, self.current_poses[0][i].position.y - self.prev_poses[0][i].position.y]) if diff > EPS or not self.done: dist = [] for j in range(0,len(self.goals_x)): vec1 = np.array([self.goals_x[j],self.goals_y[j],0.0]) - np.array([self.current_poses[0][i].position.x,self.current_poses[0][i].position.y,0.0]) #Vector from current position to a goal rotation = (self.current_poses[0][i].orientation.x,self.current_poses[0][i].orientation.y,self.current_poses[0][i].orientation.z,self.current_poses[0][i].orientation.w) roll,pitch,yaw = tf.transformations.euler_from_quaternion(rotation) unit_vec = np.array([np.cos(yaw), np.sin(yaw),0.0]) self.theta_phi[i][j] = (np.arccos(np.dot(vec1,unit_vec)/np.linalg.norm(vec1))) dist.append(np.linalg.norm([self.current_poses[0][i].position.x - self.goals_x[j],self.current_poses[0][i].position.y - self.goals_y[j]])) self.probability_goal_window[i][self.itr] = self.mv_nd.pdf(np.array(self.theta_phi[i])); self.probability_goal[i] = np.array([1.0]*self.goal_num) for k in range(0,len(self.probability_goal_window[i])): gf = np.exp((k-self.window_size)/5) self.probability_goal[i] = np.power(self.probability_goal_window[i][k],gf)* np.array(self.probability_goal[i]) # Linear prediction of goal for ln in range(0,len(self.goals_x)): self.probability_goal[i][ln] = (1/dist[ln])*self.probability_goal[i][ln]; self.probability_goal[i] = (self.probability_goal[i]-np.min(self.probability_goal[i]))/(np.max(self.probability_goal[i])-np.min(self.probability_goal[i])) self.itr = self.itr + 1 if self.itr == self.window_size: self.itr = 0 self.done = True self.predict_goal() def predict_goal(self): idx = 0 max_prob = 0.0 p_goal = PredictedGoal() for i in range(0,len(self.current_poses[0])): for j in range(0,len(self.goals_x)): if(max_prob<self.probability_goal[i][j]): idx = j max_prob = self.probability_goal[i][j] self.predicted_goal.header.stamp = rospy.Time.now() self.predicted_goal.header.frame_id = 'map' self.predicted_goal.pose.position.x = self.goals_x[idx] self.predicted_goal.pose.position.y = self.goals_y[idx] self.predicted_goal.pose.position.z = 0.0 self.predicted_goal.pose.orientation = self.current_poses[0][i].orientation if self.last_idx != idx: p_goal.changed = True self.changed = True self.last_idx = idx p_goal.goal = self.predicted_goal self.goal_pub_.publish(p_goal) def goal_changed(self,req): if self.changed: self.changed = False return TriggerResponse(True,"Goal Changed") return TriggerResponse(False, "Goal not changed") if __name__ == '__main__': predict_srv = PredictGoal(60)
human_path_prediction/scripts/predict_goal.py
5,916
!/usr/bin/env python Brief: This node subscribes to /tracked_humans and publishes the predicted goal to humans based on their trajectory Author: Phani Teja Singamaneni laas_adream maze self.goals_x = [1.5,1.5,1.5,1.5,1.5,7.5,25,42,42,41.5,42,37,22,15.5,28.5,37,23.5,10.5,15.5,31.5,20,25.5,7] self.goals_y = [45,15,30,60,87,87,81.5,81.5,66,41.5,22,3,3,12.5,12.5,20.5,21.5,28.5,39.5,47,53,59,59]Vector from current position to a goal Linear prediction of goal
457
en
0.882588
# Copyright (c) 2016 Google Inc. (under http://www.apache.org/licenses/LICENSE-2.0) def f1(x): return 1 def f1(x): return 'foo' def f2(x): pass def f2(x,y): pass def f3(x): return 1+x def f3(x): return 'asd'+x
pytype/tools/merge_pyi/test_data/redefine.py
236
Copyright (c) 2016 Google Inc. (under http://www.apache.org/licenses/LICENSE-2.0)
81
en
0.451736
# -*- coding: utf-8 -*- """Tests for various magic functions. Needs to be run by nose (to make ipython session available). """ import io import os import re import sys import warnings from unittest import TestCase from importlib import invalidate_caches from io import StringIO import nose.tools as nt import shlex from IPython import get_ipython from IPython.core import magic from IPython.core.error import UsageError from IPython.core.magic import (Magics, magics_class, line_magic, cell_magic, register_line_magic, register_cell_magic) from IPython.core.magics import execution, script, code, logging, osm from IPython.testing import decorators as dec from IPython.testing import tools as tt from IPython.utils.io import capture_output from IPython.utils.tempdir import (TemporaryDirectory, TemporaryWorkingDirectory) from IPython.utils.process import find_cmd _ip = get_ipython() @magic.magics_class class DummyMagics(magic.Magics): pass def test_extract_code_ranges(): instr = "1 3 5-6 7-9 10:15 17: :10 10- -13 :" expected = [(0, 1), (2, 3), (4, 6), (6, 9), (9, 14), (16, None), (None, 9), (9, None), (None, 13), (None, None)] actual = list(code.extract_code_ranges(instr)) nt.assert_equal(actual, expected) def test_extract_symbols(): source = """import foo\na = 10\ndef b():\n return 42\n\n\nclass A: pass\n\n\n""" symbols_args = ["a", "b", "A", "A,b", "A,a", "z"] expected = [([], ['a']), (["def b():\n return 42\n"], []), (["class A: pass\n"], []), (["class A: pass\n", "def b():\n return 42\n"], []), (["class A: pass\n"], ['a']), ([], ['z'])] for symbols, exp in zip(symbols_args, expected): nt.assert_equal(code.extract_symbols(source, symbols), exp) def test_extract_symbols_raises_exception_with_non_python_code(): source = ("=begin A Ruby program :)=end\n" "def hello\n" "puts 'Hello world'\n" "end") with nt.assert_raises(SyntaxError): code.extract_symbols(source, "hello") def test_magic_not_found(): # magic not found raises UsageError with nt.assert_raises(UsageError): _ip.magic('doesntexist') # ensure result isn't success when a magic isn't found result = _ip.run_cell('%doesntexist') assert isinstance(result.error_in_exec, UsageError) def test_cell_magic_not_found(): # magic not found raises UsageError with nt.assert_raises(UsageError): _ip.run_cell_magic('doesntexist', 'line', 'cell') # ensure result isn't success when a magic isn't found result = _ip.run_cell('%%doesntexist') assert isinstance(result.error_in_exec, UsageError) def test_magic_error_status(): def fail(shell): 1/0 _ip.register_magic_function(fail) result = _ip.run_cell('%fail') assert isinstance(result.error_in_exec, ZeroDivisionError) def test_config(): """ test that config magic does not raise can happen if Configurable init is moved too early into Magics.__init__ as then a Config object will be registered as a magic. """ ## should not raise. _ip.magic('config') def test_config_available_configs(): """ test that config magic prints available configs in unique and sorted order. """ with capture_output() as captured: _ip.magic('config') stdout = captured.stdout config_classes = stdout.strip().split('\n')[1:] nt.assert_list_equal(config_classes, sorted(set(config_classes))) def test_config_print_class(): """ test that config with a classname prints the class's options. """ with capture_output() as captured: _ip.magic('config TerminalInteractiveShell') stdout = captured.stdout if not re.match("TerminalInteractiveShell.* options", stdout.splitlines()[0]): print(stdout) raise AssertionError("1st line of stdout not like " "'TerminalInteractiveShell.* options'") def test_rehashx(): # clear up everything _ip.alias_manager.clear_aliases() del _ip.db['syscmdlist'] _ip.magic('rehashx') # Practically ALL ipython development systems will have more than 10 aliases nt.assert_true(len(_ip.alias_manager.aliases) > 10) for name, cmd in _ip.alias_manager.aliases: # we must strip dots from alias names nt.assert_not_in('.', name) # rehashx must fill up syscmdlist scoms = _ip.db['syscmdlist'] nt.assert_true(len(scoms) > 10) def test_magic_parse_options(): """Test that we don't mangle paths when parsing magic options.""" ip = get_ipython() path = 'c:\\x' m = DummyMagics(ip) opts = m.parse_options('-f %s' % path,'f:')[0] # argv splitting is os-dependent if os.name == 'posix': expected = 'c:x' else: expected = path nt.assert_equal(opts['f'], expected) def test_magic_parse_long_options(): """Magic.parse_options can handle --foo=bar long options""" ip = get_ipython() m = DummyMagics(ip) opts, _ = m.parse_options('--foo --bar=bubble', 'a', 'foo', 'bar=') nt.assert_in('foo', opts) nt.assert_in('bar', opts) nt.assert_equal(opts['bar'], "bubble") @dec.skip_without('sqlite3') def doctest_hist_f(): """Test %hist -f with temporary filename. In [9]: import tempfile In [10]: tfile = tempfile.mktemp('.py','tmp-ipython-') In [11]: %hist -nl -f $tfile 3 In [13]: import os; os.unlink(tfile) """ @dec.skip_without('sqlite3') def doctest_hist_r(): """Test %hist -r XXX - This test is not recording the output correctly. For some reason, in testing mode the raw history isn't getting populated. No idea why. Disabling the output checking for now, though at least we do run it. In [1]: 'hist' in _ip.lsmagic() Out[1]: True In [2]: x=1 In [3]: %hist -rl 2 x=1 # random %hist -r 2 """ @dec.skip_without('sqlite3') def doctest_hist_op(): """Test %hist -op In [1]: class b(float): ...: pass ...: In [2]: class s(object): ...: def __str__(self): ...: return 's' ...: In [3]: In [4]: class r(b): ...: def __repr__(self): ...: return 'r' ...: In [5]: class sr(s,r): pass ...: In [6]: In [7]: bb=b() In [8]: ss=s() In [9]: rr=r() In [10]: ssrr=sr() In [11]: 4.5 Out[11]: 4.5 In [12]: str(ss) Out[12]: 's' In [13]: In [14]: %hist -op >>> class b: ... pass ... >>> class s(b): ... def __str__(self): ... return 's' ... >>> >>> class r(b): ... def __repr__(self): ... return 'r' ... >>> class sr(s,r): pass >>> >>> bb=b() >>> ss=s() >>> rr=r() >>> ssrr=sr() >>> 4.5 4.5 >>> str(ss) 's' >>> """ def test_hist_pof(): ip = get_ipython() ip.run_cell(u"1+2", store_history=True) #raise Exception(ip.history_manager.session_number) #raise Exception(list(ip.history_manager._get_range_session())) with TemporaryDirectory() as td: tf = os.path.join(td, 'hist.py') ip.run_line_magic('history', '-pof %s' % tf) assert os.path.isfile(tf) @dec.skip_without('sqlite3') def test_macro(): ip = get_ipython() ip.history_manager.reset() # Clear any existing history. cmds = ["a=1", "def b():\n return a**2", "print(a,b())"] for i, cmd in enumerate(cmds, start=1): ip.history_manager.store_inputs(i, cmd) ip.magic("macro test 1-3") nt.assert_equal(ip.user_ns["test"].value, "\n".join(cmds)+"\n") # List macros nt.assert_in("test", ip.magic("macro")) @dec.skip_without('sqlite3') def test_macro_run(): """Test that we can run a multi-line macro successfully.""" ip = get_ipython() ip.history_manager.reset() cmds = ["a=10", "a+=1", "print(a)", "%macro test 2-3"] for cmd in cmds: ip.run_cell(cmd, store_history=True) nt.assert_equal(ip.user_ns["test"].value, "a+=1\nprint(a)\n") with tt.AssertPrints("12"): ip.run_cell("test") with tt.AssertPrints("13"): ip.run_cell("test") def test_magic_magic(): """Test %magic""" ip = get_ipython() with capture_output() as captured: ip.magic("magic") stdout = captured.stdout nt.assert_in('%magic', stdout) nt.assert_in('IPython', stdout) nt.assert_in('Available', stdout) @dec.skipif_not_numpy def test_numpy_reset_array_undec(): "Test '%reset array' functionality" _ip.ex('import numpy as np') _ip.ex('a = np.empty(2)') nt.assert_in('a', _ip.user_ns) _ip.magic('reset -f array') nt.assert_not_in('a', _ip.user_ns) def test_reset_out(): "Test '%reset out' magic" _ip.run_cell("parrot = 'dead'", store_history=True) # test '%reset -f out', make an Out prompt _ip.run_cell("parrot", store_history=True) nt.assert_true('dead' in [_ip.user_ns[x] for x in ('_','__','___')]) _ip.magic('reset -f out') nt.assert_false('dead' in [_ip.user_ns[x] for x in ('_','__','___')]) nt.assert_equal(len(_ip.user_ns['Out']), 0) def test_reset_in(): "Test '%reset in' magic" # test '%reset -f in' _ip.run_cell("parrot", store_history=True) nt.assert_true('parrot' in [_ip.user_ns[x] for x in ('_i','_ii','_iii')]) _ip.magic('%reset -f in') nt.assert_false('parrot' in [_ip.user_ns[x] for x in ('_i','_ii','_iii')]) nt.assert_equal(len(set(_ip.user_ns['In'])), 1) def test_reset_dhist(): "Test '%reset dhist' magic" _ip.run_cell("tmp = [d for d in _dh]") # copy before clearing _ip.magic('cd ' + os.path.dirname(nt.__file__)) _ip.magic('cd -') nt.assert_true(len(_ip.user_ns['_dh']) > 0) _ip.magic('reset -f dhist') nt.assert_equal(len(_ip.user_ns['_dh']), 0) _ip.run_cell("_dh = [d for d in tmp]") #restore def test_reset_in_length(): "Test that '%reset in' preserves In[] length" _ip.run_cell("print 'foo'") _ip.run_cell("reset -f in") nt.assert_equal(len(_ip.user_ns['In']), _ip.displayhook.prompt_count+1) def test_tb_syntaxerror(): """test %tb after a SyntaxError""" ip = get_ipython() ip.run_cell("for") # trap and validate stdout save_stdout = sys.stdout try: sys.stdout = StringIO() ip.run_cell("%tb") out = sys.stdout.getvalue() finally: sys.stdout = save_stdout # trim output, and only check the last line last_line = out.rstrip().splitlines()[-1].strip() nt.assert_equal(last_line, "SyntaxError: invalid syntax") def test_time(): ip = get_ipython() with tt.AssertPrints("Wall time: "): ip.run_cell("%time None") ip.run_cell("def f(kmjy):\n" " %time print (2*kmjy)") with tt.AssertPrints("Wall time: "): with tt.AssertPrints("hihi", suppress=False): ip.run_cell("f('hi')") @dec.skip_win32 def test_time2(): ip = get_ipython() with tt.AssertPrints("CPU times: user "): ip.run_cell("%time None") def test_time3(): """Erroneous magic function calls, issue gh-3334""" ip = get_ipython() ip.user_ns.pop('run', None) with tt.AssertNotPrints("not found", channel='stderr'): ip.run_cell("%%time\n" "run = 0\n" "run += 1") def test_doctest_mode(): "Toggle doctest_mode twice, it should be a no-op and run without error" _ip.magic('doctest_mode') _ip.magic('doctest_mode') def test_parse_options(): """Tests for basic options parsing in magics.""" # These are only the most minimal of tests, more should be added later. At # the very least we check that basic text/unicode calls work OK. m = DummyMagics(_ip) nt.assert_equal(m.parse_options('foo', '')[1], 'foo') nt.assert_equal(m.parse_options(u'foo', '')[1], u'foo') def test_dirops(): """Test various directory handling operations.""" # curpath = lambda :os.path.splitdrive(os.getcwd())[1].replace('\\','/') curpath = os.getcwd startdir = os.getcwd() ipdir = os.path.realpath(_ip.ipython_dir) try: _ip.magic('cd "%s"' % ipdir) nt.assert_equal(curpath(), ipdir) _ip.magic('cd -') nt.assert_equal(curpath(), startdir) _ip.magic('pushd "%s"' % ipdir) nt.assert_equal(curpath(), ipdir) _ip.magic('popd') nt.assert_equal(curpath(), startdir) finally: os.chdir(startdir) def test_cd_force_quiet(): """Test OSMagics.cd_force_quiet option""" _ip.config.OSMagics.cd_force_quiet = True osmagics = osm.OSMagics(shell=_ip) startdir = os.getcwd() ipdir = os.path.realpath(_ip.ipython_dir) try: with tt.AssertNotPrints(ipdir): osmagics.cd('"%s"' % ipdir) with tt.AssertNotPrints(startdir): osmagics.cd('-') finally: os.chdir(startdir) def test_xmode(): # Calling xmode three times should be a no-op xmode = _ip.InteractiveTB.mode for i in range(4): _ip.magic("xmode") nt.assert_equal(_ip.InteractiveTB.mode, xmode) def test_reset_hard(): monitor = [] class A(object): def __del__(self): monitor.append(1) def __repr__(self): return "<A instance>" _ip.user_ns["a"] = A() _ip.run_cell("a") nt.assert_equal(monitor, []) _ip.magic("reset -f") nt.assert_equal(monitor, [1]) class TestXdel(tt.TempFileMixin): def test_xdel(self): """Test that references from %run are cleared by xdel.""" src = ("class A(object):\n" " monitor = []\n" " def __del__(self):\n" " self.monitor.append(1)\n" "a = A()\n") self.mktmp(src) # %run creates some hidden references... _ip.magic("run %s" % self.fname) # ... as does the displayhook. _ip.run_cell("a") monitor = _ip.user_ns["A"].monitor nt.assert_equal(monitor, []) _ip.magic("xdel a") # Check that a's __del__ method has been called. nt.assert_equal(monitor, [1]) def doctest_who(): """doctest for %who In [1]: %reset -f In [2]: alpha = 123 In [3]: beta = 'beta' In [4]: %who int alpha In [5]: %who str beta In [6]: %whos Variable Type Data/Info ---------------------------- alpha int 123 beta str beta In [7]: %who_ls Out[7]: ['alpha', 'beta'] """ def test_whos(): """Check that whos is protected against objects where repr() fails.""" class A(object): def __repr__(self): raise Exception() _ip.user_ns['a'] = A() _ip.magic("whos") def doctest_precision(): """doctest for %precision In [1]: f = get_ipython().display_formatter.formatters['text/plain'] In [2]: %precision 5 Out[2]: '%.5f' In [3]: f.float_format Out[3]: '%.5f' In [4]: %precision %e Out[4]: '%e' In [5]: f(3.1415927) Out[5]: '3.141593e+00' """ def test_psearch(): with tt.AssertPrints("dict.fromkeys"): _ip.run_cell("dict.fr*?") def test_timeit_shlex(): """test shlex issues with timeit (#1109)""" _ip.ex("def f(*a,**kw): pass") _ip.magic('timeit -n1 "this is a bug".count(" ")') _ip.magic('timeit -r1 -n1 f(" ", 1)') _ip.magic('timeit -r1 -n1 f(" ", 1, " ", 2, " ")') _ip.magic('timeit -r1 -n1 ("a " + "b")') _ip.magic('timeit -r1 -n1 f("a " + "b")') _ip.magic('timeit -r1 -n1 f("a " + "b ")') def test_timeit_special_syntax(): "Test %%timeit with IPython special syntax" @register_line_magic def lmagic(line): ip = get_ipython() ip.user_ns['lmagic_out'] = line # line mode test _ip.run_line_magic('timeit', '-n1 -r1 %lmagic my line') nt.assert_equal(_ip.user_ns['lmagic_out'], 'my line') # cell mode test _ip.run_cell_magic('timeit', '-n1 -r1', '%lmagic my line2') nt.assert_equal(_ip.user_ns['lmagic_out'], 'my line2') def test_timeit_return(): """ test whether timeit -o return object """ res = _ip.run_line_magic('timeit','-n10 -r10 -o 1') assert(res is not None) def test_timeit_quiet(): """ test quiet option of timeit magic """ with tt.AssertNotPrints("loops"): _ip.run_cell("%timeit -n1 -r1 -q 1") def test_timeit_return_quiet(): with tt.AssertNotPrints("loops"): res = _ip.run_line_magic('timeit', '-n1 -r1 -q -o 1') assert (res is not None) def test_timeit_invalid_return(): with nt.assert_raises_regex(SyntaxError, "outside function"): _ip.run_line_magic('timeit', 'return') @dec.skipif(execution.profile is None) def test_prun_special_syntax(): "Test %%prun with IPython special syntax" @register_line_magic def lmagic(line): ip = get_ipython() ip.user_ns['lmagic_out'] = line # line mode test _ip.run_line_magic('prun', '-q %lmagic my line') nt.assert_equal(_ip.user_ns['lmagic_out'], 'my line') # cell mode test _ip.run_cell_magic('prun', '-q', '%lmagic my line2') nt.assert_equal(_ip.user_ns['lmagic_out'], 'my line2') @dec.skipif(execution.profile is None) def test_prun_quotes(): "Test that prun does not clobber string escapes (GH #1302)" _ip.magic(r"prun -q x = '\t'") nt.assert_equal(_ip.user_ns['x'], '\t') def test_extension(): # Debugging information for failures of this test print('sys.path:') for p in sys.path: print(' ', p) print('CWD', os.getcwd()) nt.assert_raises(ImportError, _ip.magic, "load_ext daft_extension") daft_path = os.path.join(os.path.dirname(__file__), "daft_extension") sys.path.insert(0, daft_path) try: _ip.user_ns.pop('arq', None) invalidate_caches() # Clear import caches _ip.magic("load_ext daft_extension") nt.assert_equal(_ip.user_ns['arq'], 185) _ip.magic("unload_ext daft_extension") assert 'arq' not in _ip.user_ns finally: sys.path.remove(daft_path) def test_notebook_export_json(): _ip = get_ipython() _ip.history_manager.reset() # Clear any existing history. cmds = [u"a=1", u"def b():\n return a**2", u"print('noël, été', b())"] for i, cmd in enumerate(cmds, start=1): _ip.history_manager.store_inputs(i, cmd) with TemporaryDirectory() as td: outfile = os.path.join(td, "nb.ipynb") _ip.magic("notebook -e %s" % outfile) class TestEnv(TestCase): def test_env(self): env = _ip.magic("env") self.assertTrue(isinstance(env, dict)) def test_env_get_set_simple(self): env = _ip.magic("env var val1") self.assertEqual(env, None) self.assertEqual(os.environ['var'], 'val1') self.assertEqual(_ip.magic("env var"), 'val1') env = _ip.magic("env var=val2") self.assertEqual(env, None) self.assertEqual(os.environ['var'], 'val2') def test_env_get_set_complex(self): env = _ip.magic("env var 'val1 '' 'val2") self.assertEqual(env, None) self.assertEqual(os.environ['var'], "'val1 '' 'val2") self.assertEqual(_ip.magic("env var"), "'val1 '' 'val2") env = _ip.magic('env var=val2 val3="val4') self.assertEqual(env, None) self.assertEqual(os.environ['var'], 'val2 val3="val4') def test_env_set_bad_input(self): self.assertRaises(UsageError, lambda: _ip.magic("set_env var")) def test_env_set_whitespace(self): self.assertRaises(UsageError, lambda: _ip.magic("env var A=B")) class CellMagicTestCase(TestCase): def check_ident(self, magic): # Manually called, we get the result out = _ip.run_cell_magic(magic, 'a', 'b') nt.assert_equal(out, ('a','b')) # Via run_cell, it goes into the user's namespace via displayhook _ip.run_cell('%%' + magic +' c\nd\n') nt.assert_equal(_ip.user_ns['_'], ('c','d\n')) def test_cell_magic_func_deco(self): "Cell magic using simple decorator" @register_cell_magic def cellm(line, cell): return line, cell self.check_ident('cellm') def test_cell_magic_reg(self): "Cell magic manually registered" def cellm(line, cell): return line, cell _ip.register_magic_function(cellm, 'cell', 'cellm2') self.check_ident('cellm2') def test_cell_magic_class(self): "Cell magics declared via a class" @magics_class class MyMagics(Magics): @cell_magic def cellm3(self, line, cell): return line, cell _ip.register_magics(MyMagics) self.check_ident('cellm3') def test_cell_magic_class2(self): "Cell magics declared via a class, #2" @magics_class class MyMagics2(Magics): @cell_magic('cellm4') def cellm33(self, line, cell): return line, cell _ip.register_magics(MyMagics2) self.check_ident('cellm4') # Check that nothing is registered as 'cellm33' c33 = _ip.find_cell_magic('cellm33') nt.assert_equal(c33, None) def test_file(): """Basic %%writefile""" ip = get_ipython() with TemporaryDirectory() as td: fname = os.path.join(td, 'file1') ip.run_cell_magic("writefile", fname, u'\n'.join([ 'line1', 'line2', ])) with open(fname) as f: s = f.read() nt.assert_in('line1\n', s) nt.assert_in('line2', s) @dec.skip_win32 def test_file_single_quote(): """Basic %%writefile with embedded single quotes""" ip = get_ipython() with TemporaryDirectory() as td: fname = os.path.join(td, '\'file1\'') ip.run_cell_magic("writefile", fname, u'\n'.join([ 'line1', 'line2', ])) with open(fname) as f: s = f.read() nt.assert_in('line1\n', s) nt.assert_in('line2', s) @dec.skip_win32 def test_file_double_quote(): """Basic %%writefile with embedded double quotes""" ip = get_ipython() with TemporaryDirectory() as td: fname = os.path.join(td, '"file1"') ip.run_cell_magic("writefile", fname, u'\n'.join([ 'line1', 'line2', ])) with open(fname) as f: s = f.read() nt.assert_in('line1\n', s) nt.assert_in('line2', s) def test_file_var_expand(): """%%writefile $filename""" ip = get_ipython() with TemporaryDirectory() as td: fname = os.path.join(td, 'file1') ip.user_ns['filename'] = fname ip.run_cell_magic("writefile", '$filename', u'\n'.join([ 'line1', 'line2', ])) with open(fname) as f: s = f.read() nt.assert_in('line1\n', s) nt.assert_in('line2', s) def test_file_unicode(): """%%writefile with unicode cell""" ip = get_ipython() with TemporaryDirectory() as td: fname = os.path.join(td, 'file1') ip.run_cell_magic("writefile", fname, u'\n'.join([ u'liné1', u'liné2', ])) with io.open(fname, encoding='utf-8') as f: s = f.read() nt.assert_in(u'liné1\n', s) nt.assert_in(u'liné2', s) def test_file_amend(): """%%writefile -a amends files""" ip = get_ipython() with TemporaryDirectory() as td: fname = os.path.join(td, 'file2') ip.run_cell_magic("writefile", fname, u'\n'.join([ 'line1', 'line2', ])) ip.run_cell_magic("writefile", "-a %s" % fname, u'\n'.join([ 'line3', 'line4', ])) with open(fname) as f: s = f.read() nt.assert_in('line1\n', s) nt.assert_in('line3\n', s) def test_file_spaces(): """%%file with spaces in filename""" ip = get_ipython() with TemporaryWorkingDirectory() as td: fname = "file name" ip.run_cell_magic("file", '"%s"'%fname, u'\n'.join([ 'line1', 'line2', ])) with open(fname) as f: s = f.read() nt.assert_in('line1\n', s) nt.assert_in('line2', s) def test_script_config(): ip = get_ipython() ip.config.ScriptMagics.script_magics = ['whoda'] sm = script.ScriptMagics(shell=ip) nt.assert_in('whoda', sm.magics['cell']) @dec.skip_win32 def test_script_out(): ip = get_ipython() ip.run_cell_magic("script", "--out output sh", "echo 'hi'") nt.assert_equal(ip.user_ns['output'], 'hi\n') @dec.skip_win32 def test_script_err(): ip = get_ipython() ip.run_cell_magic("script", "--err error sh", "echo 'hello' >&2") nt.assert_equal(ip.user_ns['error'], 'hello\n') @dec.skip_win32 def test_script_out_err(): ip = get_ipython() ip.run_cell_magic("script", "--out output --err error sh", "echo 'hi'\necho 'hello' >&2") nt.assert_equal(ip.user_ns['output'], 'hi\n') nt.assert_equal(ip.user_ns['error'], 'hello\n') @dec.skip_win32 def test_script_bg_out(): ip = get_ipython() ip.run_cell_magic("script", "--bg --out output sh", "echo 'hi'") nt.assert_equal(ip.user_ns['output'].read(), b'hi\n') ip.user_ns['output'].close() @dec.skip_win32 def test_script_bg_err(): ip = get_ipython() ip.run_cell_magic("script", "--bg --err error sh", "echo 'hello' >&2") nt.assert_equal(ip.user_ns['error'].read(), b'hello\n') ip.user_ns['error'].close() @dec.skip_win32 def test_script_bg_out_err(): ip = get_ipython() ip.run_cell_magic("script", "--bg --out output --err error sh", "echo 'hi'\necho 'hello' >&2") nt.assert_equal(ip.user_ns['output'].read(), b'hi\n') nt.assert_equal(ip.user_ns['error'].read(), b'hello\n') ip.user_ns['output'].close() ip.user_ns['error'].close() def test_script_defaults(): ip = get_ipython() for cmd in ['sh', 'bash', 'perl', 'ruby']: try: find_cmd(cmd) except Exception: pass else: nt.assert_in(cmd, ip.magics_manager.magics['cell']) @magics_class class FooFoo(Magics): """class with both %foo and %%foo magics""" @line_magic('foo') def line_foo(self, line): "I am line foo" pass @cell_magic("foo") def cell_foo(self, line, cell): "I am cell foo, not line foo" pass def test_line_cell_info(): """%%foo and %foo magics are distinguishable to inspect""" ip = get_ipython() ip.magics_manager.register(FooFoo) oinfo = ip.object_inspect('foo') nt.assert_true(oinfo['found']) nt.assert_true(oinfo['ismagic']) oinfo = ip.object_inspect('%%foo') nt.assert_true(oinfo['found']) nt.assert_true(oinfo['ismagic']) nt.assert_equal(oinfo['docstring'], FooFoo.cell_foo.__doc__) oinfo = ip.object_inspect('%foo') nt.assert_true(oinfo['found']) nt.assert_true(oinfo['ismagic']) nt.assert_equal(oinfo['docstring'], FooFoo.line_foo.__doc__) def test_multiple_magics(): ip = get_ipython() foo1 = FooFoo(ip) foo2 = FooFoo(ip) mm = ip.magics_manager mm.register(foo1) nt.assert_true(mm.magics['line']['foo'].__self__ is foo1) mm.register(foo2) nt.assert_true(mm.magics['line']['foo'].__self__ is foo2) def test_alias_magic(): """Test %alias_magic.""" ip = get_ipython() mm = ip.magics_manager # Basic operation: both cell and line magics are created, if possible. ip.run_line_magic('alias_magic', 'timeit_alias timeit') nt.assert_in('timeit_alias', mm.magics['line']) nt.assert_in('timeit_alias', mm.magics['cell']) # --cell is specified, line magic not created. ip.run_line_magic('alias_magic', '--cell timeit_cell_alias timeit') nt.assert_not_in('timeit_cell_alias', mm.magics['line']) nt.assert_in('timeit_cell_alias', mm.magics['cell']) # Test that line alias is created successfully. ip.run_line_magic('alias_magic', '--line env_alias env') nt.assert_equal(ip.run_line_magic('env', ''), ip.run_line_magic('env_alias', '')) # Test that line alias with parameters passed in is created successfully. ip.run_line_magic('alias_magic', '--line history_alias history --params ' + shlex.quote('3')) nt.assert_in('history_alias', mm.magics['line']) def test_save(): """Test %save.""" ip = get_ipython() ip.history_manager.reset() # Clear any existing history. cmds = [u"a=1", u"def b():\n return a**2", u"print(a, b())"] for i, cmd in enumerate(cmds, start=1): ip.history_manager.store_inputs(i, cmd) with TemporaryDirectory() as tmpdir: file = os.path.join(tmpdir, "testsave.py") ip.run_line_magic("save", "%s 1-10" % file) with open(file) as f: content = f.read() nt.assert_equal(content.count(cmds[0]), 1) nt.assert_in('coding: utf-8', content) ip.run_line_magic("save", "-a %s 1-10" % file) with open(file) as f: content = f.read() nt.assert_equal(content.count(cmds[0]), 2) nt.assert_in('coding: utf-8', content) def test_store(): """Test %store.""" ip = get_ipython() ip.run_line_magic('load_ext', 'storemagic') # make sure the storage is empty ip.run_line_magic('store', '-z') ip.user_ns['var'] = 42 ip.run_line_magic('store', 'var') ip.user_ns['var'] = 39 ip.run_line_magic('store', '-r') nt.assert_equal(ip.user_ns['var'], 42) ip.run_line_magic('store', '-d var') ip.user_ns['var'] = 39 ip.run_line_magic('store' , '-r') nt.assert_equal(ip.user_ns['var'], 39) def _run_edit_test(arg_s, exp_filename=None, exp_lineno=-1, exp_contents=None, exp_is_temp=None): ip = get_ipython() M = code.CodeMagics(ip) last_call = ['',''] opts,args = M.parse_options(arg_s,'prxn:') filename, lineno, is_temp = M._find_edit_target(ip, args, opts, last_call) if exp_filename is not None: nt.assert_equal(exp_filename, filename) if exp_contents is not None: with io.open(filename, 'r', encoding='utf-8') as f: contents = f.read() nt.assert_equal(exp_contents, contents) if exp_lineno != -1: nt.assert_equal(exp_lineno, lineno) if exp_is_temp is not None: nt.assert_equal(exp_is_temp, is_temp) def test_edit_interactive(): """%edit on interactively defined objects""" ip = get_ipython() n = ip.execution_count ip.run_cell(u"def foo(): return 1", store_history=True) try: _run_edit_test("foo") except code.InteractivelyDefined as e: nt.assert_equal(e.index, n) else: raise AssertionError("Should have raised InteractivelyDefined") def test_edit_cell(): """%edit [cell id]""" ip = get_ipython() ip.run_cell(u"def foo(): return 1", store_history=True) # test _run_edit_test("1", exp_contents=ip.user_ns['In'][1], exp_is_temp=True) def test_bookmark(): ip = get_ipython() ip.run_line_magic('bookmark', 'bmname') with tt.AssertPrints('bmname'): ip.run_line_magic('bookmark', '-l') ip.run_line_magic('bookmark', '-d bmname') def test_ls_magic(): ip = get_ipython() json_formatter = ip.display_formatter.formatters['application/json'] json_formatter.enabled = True lsmagic = ip.magic('lsmagic') with warnings.catch_warnings(record=True) as w: j = json_formatter(lsmagic) nt.assert_equal(sorted(j), ['cell', 'line']) nt.assert_equal(w, []) # no warnings def test_strip_initial_indent(): def sii(s): lines = s.splitlines() return '\n'.join(code.strip_initial_indent(lines)) nt.assert_equal(sii(" a = 1\nb = 2"), "a = 1\nb = 2") nt.assert_equal(sii(" a\n b\nc"), "a\n b\nc") nt.assert_equal(sii("a\n b"), "a\n b") def test_logging_magic_quiet_from_arg(): _ip.config.LoggingMagics.quiet = False lm = logging.LoggingMagics(shell=_ip) with TemporaryDirectory() as td: try: with tt.AssertNotPrints(re.compile("Activating.*")): lm.logstart('-q {}'.format( os.path.join(td, "quiet_from_arg.log"))) finally: _ip.logger.logstop() def test_logging_magic_quiet_from_config(): _ip.config.LoggingMagics.quiet = True lm = logging.LoggingMagics(shell=_ip) with TemporaryDirectory() as td: try: with tt.AssertNotPrints(re.compile("Activating.*")): lm.logstart(os.path.join(td, "quiet_from_config.log")) finally: _ip.logger.logstop() def test_logging_magic_not_quiet(): _ip.config.LoggingMagics.quiet = False lm = logging.LoggingMagics(shell=_ip) with TemporaryDirectory() as td: try: with tt.AssertPrints(re.compile("Activating.*")): lm.logstart(os.path.join(td, "not_quiet.log")) finally: _ip.logger.logstop() def test_time_no_var_expand(): _ip.user_ns['a'] = 5 _ip.user_ns['b'] = [] _ip.magic('time b.append("{a}")') assert _ip.user_ns['b'] == ['{a}'] # this is slow, put at the end for local testing. def test_timeit_arguments(): "Test valid timeit arguments, should not cause SyntaxError (GH #1269)" if sys.version_info < (3,7): _ip.magic("timeit ('#')") else: # 3.7 optimize no-op statement like above out, and complain there is # nothing in the for loop. _ip.magic("timeit a=('#')")
env/lib/python3.6/site-packages/IPython/core/tests/test_magic.py
34,178
class with both %foo and %%foo magics I am cell foo, not line foo Test %hist -f with temporary filename. In [9]: import tempfile In [10]: tfile = tempfile.mktemp('.py','tmp-ipython-') In [11]: %hist -nl -f $tfile 3 In [13]: import os; os.unlink(tfile) Test %hist -op In [1]: class b(float): ...: pass ...: In [2]: class s(object): ...: def __str__(self): ...: return 's' ...: In [3]: In [4]: class r(b): ...: def __repr__(self): ...: return 'r' ...: In [5]: class sr(s,r): pass ...: In [6]: In [7]: bb=b() In [8]: ss=s() In [9]: rr=r() In [10]: ssrr=sr() In [11]: 4.5 Out[11]: 4.5 In [12]: str(ss) Out[12]: 's' In [13]: In [14]: %hist -op >>> class b: ... pass ... >>> class s(b): ... def __str__(self): ... return 's' ... >>> >>> class r(b): ... def __repr__(self): ... return 'r' ... >>> class sr(s,r): pass >>> >>> bb=b() >>> ss=s() >>> rr=r() >>> ssrr=sr() >>> 4.5 4.5 >>> str(ss) 's' >>> Test %hist -r XXX - This test is not recording the output correctly. For some reason, in testing mode the raw history isn't getting populated. No idea why. Disabling the output checking for now, though at least we do run it. In [1]: 'hist' in _ip.lsmagic() Out[1]: True In [2]: x=1 In [3]: %hist -rl 2 x=1 # random %hist -r 2 doctest for %precision In [1]: f = get_ipython().display_formatter.formatters['text/plain'] In [2]: %precision 5 Out[2]: '%.5f' In [3]: f.float_format Out[3]: '%.5f' In [4]: %precision %e Out[4]: '%e' In [5]: f(3.1415927) Out[5]: '3.141593e+00' doctest for %who In [1]: %reset -f In [2]: alpha = 123 In [3]: beta = 'beta' In [4]: %who int alpha In [5]: %who str beta In [6]: %whos Variable Type Data/Info ---------------------------- alpha int 123 beta str beta In [7]: %who_ls Out[7]: ['alpha', 'beta'] I am line foo Test %alias_magic. Test OSMagics.cd_force_quiet option Cell magics declared via a class Cell magics declared via a class, #2 Cell magic using simple decorator Cell magic manually registered test that config magic does not raise can happen if Configurable init is moved too early into Magics.__init__ as then a Config object will be registered as a magic. test that config magic prints available configs in unique and sorted order. test that config with a classname prints the class's options. Test various directory handling operations. Toggle doctest_mode twice, it should be a no-op and run without error %edit [cell id] %edit on interactively defined objects Basic %%writefile %%writefile -a amends files Basic %%writefile with embedded double quotes Basic %%writefile with embedded single quotes %%file with spaces in filename %%writefile with unicode cell %%writefile $filename %%foo and %foo magics are distinguishable to inspect Test that we can run a multi-line macro successfully. Test %magic Magic.parse_options can handle --foo=bar long options Test that we don't mangle paths when parsing magic options. Test '%reset array' functionality Tests for basic options parsing in magics. Test that prun does not clobber string escapes (GH #1302) Test %%prun with IPython special syntax Test '%reset dhist' magic Test '%reset in' magic Test that '%reset in' preserves In[] length Test '%reset out' magic Test %save. Test %store. test %tb after a SyntaxError Erroneous magic function calls, issue gh-3334 Test valid timeit arguments, should not cause SyntaxError (GH #1269) test quiet option of timeit magic test whether timeit -o return object test shlex issues with timeit (#1109) Test %%timeit with IPython special syntax Check that whos is protected against objects where repr() fails. Test that references from %run are cleared by xdel. Tests for various magic functions. Needs to be run by nose (to make ipython session available). -*- coding: utf-8 -*- magic not found raises UsageError ensure result isn't success when a magic isn't found magic not found raises UsageError ensure result isn't success when a magic isn't found should not raise. clear up everything Practically ALL ipython development systems will have more than 10 aliases we must strip dots from alias names rehashx must fill up syscmdlist argv splitting is os-dependentraise Exception(ip.history_manager.session_number)raise Exception(list(ip.history_manager._get_range_session())) Clear any existing history. List macros test '%reset -f out', make an Out prompt test '%reset -f in' copy before clearingrestore trap and validate stdout trim output, and only check the last line These are only the most minimal of tests, more should be added later. At the very least we check that basic text/unicode calls work OK. curpath = lambda :os.path.splitdrive(os.getcwd())[1].replace('\\','/') Calling xmode three times should be a no-op %run creates some hidden references... ... as does the displayhook. Check that a's __del__ method has been called. line mode test cell mode test line mode test cell mode test Debugging information for failures of this test Clear import caches Clear any existing history. Manually called, we get the result Via run_cell, it goes into the user's namespace via displayhook Check that nothing is registered as 'cellm33' Basic operation: both cell and line magics are created, if possible. --cell is specified, line magic not created. Test that line alias is created successfully. Test that line alias with parameters passed in is created successfully. Clear any existing history. make sure the storage is empty test no warnings this is slow, put at the end for local testing. 3.7 optimize no-op statement like above out, and complain there is nothing in the for loop.
5,670
en
0.706979
# Copyright 2021 DAI Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at: http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import List, Dict, Tuple from ethtx.models.semantics_model import ( ParameterSemantics, EventSemantics, FunctionSemantics, TransformationSemantics, ) def _decode_parameters_list(raw_parameters_list: list) -> List[ParameterSemantics]: parameters_list = [] if not raw_parameters_list: return parameters_list for raw_parameter_semantics in raw_parameters_list: if "indexed" in raw_parameter_semantics: indexed = raw_parameter_semantics["indexed"] else: indexed = False if "dynamic" in raw_parameter_semantics: dynamic = raw_parameter_semantics["dynamic"] else: dynamic = False if raw_parameter_semantics["type"] == "tuple": components = _decode_parameters_list(raw_parameter_semantics["components"]) else: components = [] parameters_list.append( ParameterSemantics( raw_parameter_semantics["name"], raw_parameter_semantics["type"], components, indexed, dynamic, ) ) return parameters_list def decode_events_and_functions( abi: dict, ) -> Tuple[Dict[str, EventSemantics], Dict[str, FunctionSemantics]]: events = dict() for signature, raw_event_semantics in abi.get("events", {}).items(): parameters = _decode_parameters_list(raw_event_semantics.get("parameters")) events[signature] = EventSemantics( signature, raw_event_semantics["anonymous"], raw_event_semantics["name"], parameters, ) functions = dict() for signature, raw_function_semantics in abi.get("functions", {}).items(): if raw_function_semantics: inputs = _decode_parameters_list(raw_function_semantics.get("inputs")) outputs = _decode_parameters_list(raw_function_semantics.get("outputs")) name = raw_function_semantics["name"] else: inputs = outputs = [] name = signature functions[signature] = FunctionSemantics(signature, name, inputs, outputs) return events, functions def decode_transformations( raw_transformations: dict, ) -> Dict[str, Dict[str, TransformationSemantics]]: transformations = dict() if raw_transformations: for signature, transformation in raw_transformations.items(): transformations[signature] = dict() for parameter_name, parameter_transformation in transformation.get( "arguments", dict() ).items(): transformations[signature][parameter_name] = TransformationSemantics( parameter_transformation.get("name"), parameter_transformation.get("type"), parameter_transformation.get("value"), ) return transformations
ethtx/decoders/decoders/semantics.py
3,532
Copyright 2021 DAI Foundation Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at: http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.
556
en
0.848164
""" Support for Xiaomi Yeelight Wifi color bulb. For more details about this platform, please refer to the documentation at https://home-assistant.io/components/light.yeelight/ """ import logging import voluptuous as vol from homeassistant.util.color import ( color_temperature_mired_to_kelvin as mired_to_kelvin, color_temperature_kelvin_to_mired as kelvin_to_mired) from homeassistant.const import CONF_DEVICES, CONF_NAME from homeassistant.components.light import ( ATTR_BRIGHTNESS, ATTR_HS_COLOR, ATTR_TRANSITION, ATTR_COLOR_TEMP, ATTR_FLASH, FLASH_SHORT, FLASH_LONG, ATTR_EFFECT, SUPPORT_BRIGHTNESS, SUPPORT_COLOR, SUPPORT_TRANSITION, SUPPORT_COLOR_TEMP, SUPPORT_FLASH, SUPPORT_EFFECT, Light, PLATFORM_SCHEMA, ATTR_ENTITY_ID, DOMAIN) import homeassistant.helpers.config_validation as cv import homeassistant.util.color as color_util REQUIREMENTS = ['yeelight==0.4.0'] _LOGGER = logging.getLogger(__name__) LEGACY_DEVICE_TYPE_MAP = { 'color1': 'rgb', 'mono1': 'white', 'strip1': 'strip', 'bslamp1': 'bedside', 'ceiling1': 'ceiling', } DEFAULT_NAME = 'Yeelight' DEFAULT_TRANSITION = 350 CONF_TRANSITION = 'transition' CONF_SAVE_ON_CHANGE = 'save_on_change' CONF_MODE_MUSIC = 'use_music_mode' DATA_KEY = 'light.yeelight' DEVICE_SCHEMA = vol.Schema({ vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string, vol.Optional(CONF_TRANSITION, default=DEFAULT_TRANSITION): cv.positive_int, vol.Optional(CONF_MODE_MUSIC, default=False): cv.boolean, vol.Optional(CONF_SAVE_ON_CHANGE, default=True): cv.boolean, }) PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend( {vol.Optional(CONF_DEVICES, default={}): {cv.string: DEVICE_SCHEMA}, }) SUPPORT_YEELIGHT = (SUPPORT_BRIGHTNESS | SUPPORT_TRANSITION | SUPPORT_FLASH) SUPPORT_YEELIGHT_RGB = (SUPPORT_YEELIGHT | SUPPORT_COLOR | SUPPORT_EFFECT | SUPPORT_COLOR_TEMP) YEELIGHT_MIN_KELVIN = YEELIGHT_MAX_KELVIN = 2700 YEELIGHT_RGB_MIN_KELVIN = 1700 YEELIGHT_RGB_MAX_KELVIN = 6500 EFFECT_DISCO = "Disco" EFFECT_TEMP = "Slow Temp" EFFECT_STROBE = "Strobe epilepsy!" EFFECT_STROBE_COLOR = "Strobe color" EFFECT_ALARM = "Alarm" EFFECT_POLICE = "Police" EFFECT_POLICE2 = "Police2" EFFECT_CHRISTMAS = "Christmas" EFFECT_RGB = "RGB" EFFECT_RANDOM_LOOP = "Random Loop" EFFECT_FAST_RANDOM_LOOP = "Fast Random Loop" EFFECT_SLOWDOWN = "Slowdown" EFFECT_WHATSAPP = "WhatsApp" EFFECT_FACEBOOK = "Facebook" EFFECT_TWITTER = "Twitter" EFFECT_STOP = "Stop" YEELIGHT_EFFECT_LIST = [ EFFECT_DISCO, EFFECT_TEMP, EFFECT_STROBE, EFFECT_STROBE_COLOR, EFFECT_ALARM, EFFECT_POLICE, EFFECT_POLICE2, EFFECT_CHRISTMAS, EFFECT_RGB, EFFECT_RANDOM_LOOP, EFFECT_FAST_RANDOM_LOOP, EFFECT_SLOWDOWN, EFFECT_WHATSAPP, EFFECT_FACEBOOK, EFFECT_TWITTER, EFFECT_STOP] SERVICE_SET_MODE = 'yeelight_set_mode' ATTR_MODE = 'mode' YEELIGHT_SERVICE_SCHEMA = vol.Schema({ vol.Optional(ATTR_ENTITY_ID): cv.entity_ids, }) def _cmd(func): """Define a wrapper to catch exceptions from the bulb.""" def _wrap(self, *args, **kwargs): import yeelight try: _LOGGER.debug("Calling %s with %s %s", func, args, kwargs) return func(self, *args, **kwargs) except yeelight.BulbException as ex: _LOGGER.error("Error when calling %s: %s", func, ex) return _wrap def setup_platform(hass, config, add_devices, discovery_info=None): """Set up the Yeelight bulbs.""" from yeelight.enums import PowerMode if DATA_KEY not in hass.data: hass.data[DATA_KEY] = {} lights = [] if discovery_info is not None: _LOGGER.debug("Adding autodetected %s", discovery_info['hostname']) device_type = discovery_info['device_type'] device_type = LEGACY_DEVICE_TYPE_MAP.get(device_type, device_type) # Not using hostname, as it seems to vary. name = "yeelight_%s_%s" % (device_type, discovery_info['properties']['mac']) host = discovery_info['host'] device = {'name': name, 'ipaddr': host} light = YeelightLight(device, DEVICE_SCHEMA({})) lights.append(light) hass.data[DATA_KEY][host] = light else: for host, device_config in config[CONF_DEVICES].items(): device = {'name': device_config[CONF_NAME], 'ipaddr': host} light = YeelightLight(device, device_config) lights.append(light) hass.data[DATA_KEY][host] = light add_devices(lights, True) def service_handler(service): """Dispatch service calls to target entities.""" params = {key: value for key, value in service.data.items() if key != ATTR_ENTITY_ID} entity_ids = service.data.get(ATTR_ENTITY_ID) if entity_ids: target_devices = [dev for dev in hass.data[DATA_KEY].values() if dev.entity_id in entity_ids] else: target_devices = hass.data[DATA_KEY].values() for target_device in target_devices: if service.service == SERVICE_SET_MODE: target_device.set_mode(**params) service_schema_set_mode = YEELIGHT_SERVICE_SCHEMA.extend({ vol.Required(ATTR_MODE): vol.In([mode.name.lower() for mode in PowerMode]) }) hass.services.register( DOMAIN, SERVICE_SET_MODE, service_handler, schema=service_schema_set_mode) class YeelightLight(Light): """Representation of a Yeelight light.""" def __init__(self, device, config): """Initialize the Yeelight light.""" self.config = config self._name = device['name'] self._ipaddr = device['ipaddr'] self._supported_features = SUPPORT_YEELIGHT self._available = False self._bulb_device = None self._brightness = None self._color_temp = None self._is_on = None self._hs = None @property def available(self) -> bool: """Return if bulb is available.""" return self._available @property def supported_features(self) -> int: """Flag supported features.""" return self._supported_features @property def effect_list(self): """Return the list of supported effects.""" return YEELIGHT_EFFECT_LIST @property def color_temp(self) -> int: """Return the color temperature.""" return self._color_temp @property def name(self) -> str: """Return the name of the device if any.""" return self._name @property def is_on(self) -> bool: """Return true if device is on.""" return self._is_on @property def brightness(self) -> int: """Return the brightness of this light between 1..255.""" return self._brightness @property def min_mireds(self): """Return minimum supported color temperature.""" if self.supported_features & SUPPORT_COLOR_TEMP: return kelvin_to_mired(YEELIGHT_RGB_MAX_KELVIN) return kelvin_to_mired(YEELIGHT_MAX_KELVIN) @property def max_mireds(self): """Return maximum supported color temperature.""" if self.supported_features & SUPPORT_COLOR_TEMP: return kelvin_to_mired(YEELIGHT_RGB_MIN_KELVIN) return kelvin_to_mired(YEELIGHT_MIN_KELVIN) def _get_hs_from_properties(self): rgb = self._properties.get('rgb', None) color_mode = self._properties.get('color_mode', None) if not rgb or not color_mode: return None color_mode = int(color_mode) if color_mode == 2: # color temperature temp_in_k = mired_to_kelvin(self._color_temp) return color_util.color_temperature_to_hs(temp_in_k) if color_mode == 3: # hsv hue = int(self._properties.get('hue')) sat = int(self._properties.get('sat')) return (hue / 360 * 65536, sat / 100 * 255) rgb = int(rgb) blue = rgb & 0xff green = (rgb >> 8) & 0xff red = (rgb >> 16) & 0xff return color_util.color_RGB_to_hs(red, green, blue) @property def hs_color(self) -> tuple: """Return the color property.""" return self._hs @property def _properties(self) -> dict: return self._bulb.last_properties @property def _bulb(self) -> 'yeelight.Bulb': import yeelight if self._bulb_device is None: try: self._bulb_device = yeelight.Bulb(self._ipaddr) self._bulb_device.get_properties() # force init for type self._available = True except yeelight.BulbException as ex: self._available = False _LOGGER.error("Failed to connect to bulb %s, %s: %s", self._ipaddr, self._name, ex) return self._bulb_device def set_music_mode(self, mode) -> None: """Set the music mode on or off.""" if mode: self._bulb.start_music() else: self._bulb.stop_music() def update(self) -> None: """Update properties from the bulb.""" import yeelight try: self._bulb.get_properties() if self._bulb_device.bulb_type == yeelight.BulbType.Color: self._supported_features = SUPPORT_YEELIGHT_RGB self._is_on = self._properties.get('power') == 'on' bright = self._properties.get('bright', None) if bright: self._brightness = round(255 * (int(bright) / 100)) temp_in_k = self._properties.get('ct', None) if temp_in_k: self._color_temp = kelvin_to_mired(int(temp_in_k)) self._hs = self._get_hs_from_properties() self._available = True except yeelight.BulbException as ex: if self._available: # just inform once _LOGGER.error("Unable to update bulb status: %s", ex) self._available = False @_cmd def set_brightness(self, brightness, duration) -> None: """Set bulb brightness.""" if brightness: _LOGGER.debug("Setting brightness: %s", brightness) self._bulb.set_brightness(brightness / 255 * 100, duration=duration) @_cmd def set_rgb(self, rgb, duration) -> None: """Set bulb's color.""" if rgb and self.supported_features & SUPPORT_COLOR: _LOGGER.debug("Setting RGB: %s", rgb) self._bulb.set_rgb(rgb[0], rgb[1], rgb[2], duration=duration) @_cmd def set_colortemp(self, colortemp, duration) -> None: """Set bulb's color temperature.""" if colortemp and self.supported_features & SUPPORT_COLOR_TEMP: temp_in_k = mired_to_kelvin(colortemp) _LOGGER.debug("Setting color temp: %s K", temp_in_k) self._bulb.set_color_temp(temp_in_k, duration=duration) @_cmd def set_default(self) -> None: """Set current options as default.""" self._bulb.set_default() @_cmd def set_flash(self, flash) -> None: """Activate flash.""" if flash: from yeelight import (RGBTransition, SleepTransition, Flow, BulbException) if self._bulb.last_properties["color_mode"] != 1: _LOGGER.error("Flash supported currently only in RGB mode.") return transition = int(self.config[CONF_TRANSITION]) if flash == FLASH_LONG: count = 1 duration = transition * 5 if flash == FLASH_SHORT: count = 1 duration = transition * 2 red, green, blue = color_util.color_hs_to_RGB(*self._hs) transitions = list() transitions.append( RGBTransition(255, 0, 0, brightness=10, duration=duration)) transitions.append(SleepTransition( duration=transition)) transitions.append( RGBTransition(red, green, blue, brightness=self.brightness, duration=duration)) flow = Flow(count=count, transitions=transitions) try: self._bulb.start_flow(flow) except BulbException as ex: _LOGGER.error("Unable to set flash: %s", ex) @_cmd def set_effect(self, effect) -> None: """Activate effect.""" if effect: from yeelight import (Flow, BulbException) from yeelight.transitions import (disco, temp, strobe, pulse, strobe_color, alarm, police, police2, christmas, rgb, randomloop, slowdown) if effect == EFFECT_STOP: self._bulb.stop_flow() return if effect == EFFECT_DISCO: flow = Flow(count=0, transitions=disco()) if effect == EFFECT_TEMP: flow = Flow(count=0, transitions=temp()) if effect == EFFECT_STROBE: flow = Flow(count=0, transitions=strobe()) if effect == EFFECT_STROBE_COLOR: flow = Flow(count=0, transitions=strobe_color()) if effect == EFFECT_ALARM: flow = Flow(count=0, transitions=alarm()) if effect == EFFECT_POLICE: flow = Flow(count=0, transitions=police()) if effect == EFFECT_POLICE2: flow = Flow(count=0, transitions=police2()) if effect == EFFECT_CHRISTMAS: flow = Flow(count=0, transitions=christmas()) if effect == EFFECT_RGB: flow = Flow(count=0, transitions=rgb()) if effect == EFFECT_RANDOM_LOOP: flow = Flow(count=0, transitions=randomloop()) if effect == EFFECT_FAST_RANDOM_LOOP: flow = Flow(count=0, transitions=randomloop(duration=250)) if effect == EFFECT_SLOWDOWN: flow = Flow(count=0, transitions=slowdown()) if effect == EFFECT_WHATSAPP: flow = Flow(count=2, transitions=pulse(37, 211, 102)) if effect == EFFECT_FACEBOOK: flow = Flow(count=2, transitions=pulse(59, 89, 152)) if effect == EFFECT_TWITTER: flow = Flow(count=2, transitions=pulse(0, 172, 237)) try: self._bulb.start_flow(flow) except BulbException as ex: _LOGGER.error("Unable to set effect: %s", ex) def turn_on(self, **kwargs) -> None: """Turn the bulb on.""" import yeelight brightness = kwargs.get(ATTR_BRIGHTNESS) colortemp = kwargs.get(ATTR_COLOR_TEMP) hs_color = kwargs.get(ATTR_HS_COLOR) rgb = color_util.color_hs_to_RGB(*hs_color) if hs_color else None flash = kwargs.get(ATTR_FLASH) effect = kwargs.get(ATTR_EFFECT) duration = int(self.config[CONF_TRANSITION]) # in ms if ATTR_TRANSITION in kwargs: # passed kwarg overrides config duration = int(kwargs.get(ATTR_TRANSITION) * 1000) # kwarg in s try: self._bulb.turn_on(duration=duration) except yeelight.BulbException as ex: _LOGGER.error("Unable to turn the bulb on: %s", ex) return if self.config[CONF_MODE_MUSIC] and not self._bulb.music_mode: try: self.set_music_mode(self.config[CONF_MODE_MUSIC]) except yeelight.BulbException as ex: _LOGGER.error("Unable to turn on music mode," "consider disabling it: %s", ex) try: # values checked for none in methods self.set_rgb(rgb, duration) self.set_colortemp(colortemp, duration) self.set_brightness(brightness, duration) self.set_flash(flash) self.set_effect(effect) except yeelight.BulbException as ex: _LOGGER.error("Unable to set bulb properties: %s", ex) return # save the current state if we had a manual change. if self.config[CONF_SAVE_ON_CHANGE] and (brightness or colortemp or rgb): try: self.set_default() except yeelight.BulbException as ex: _LOGGER.error("Unable to set the defaults: %s", ex) return def turn_off(self, **kwargs) -> None: """Turn off.""" import yeelight duration = int(self.config[CONF_TRANSITION]) # in ms if ATTR_TRANSITION in kwargs: # passed kwarg overrides config duration = int(kwargs.get(ATTR_TRANSITION) * 1000) # kwarg in s try: self._bulb.turn_off(duration=duration) except yeelight.BulbException as ex: _LOGGER.error("Unable to turn the bulb off: %s", ex) def set_mode(self, mode: str): """Set a power mode.""" import yeelight try: self._bulb.set_power_mode(yeelight.enums.PowerMode[mode.upper()]) except yeelight.BulbException as ex: _LOGGER.error("Unable to set the power mode: %s", ex)
homeassistant/components/light/yeelight.py
17,617
Representation of a Yeelight light. Initialize the Yeelight light. Define a wrapper to catch exceptions from the bulb. Return if bulb is available. Return the brightness of this light between 1..255. Return the color temperature. Return the list of supported effects. Return the color property. Return true if device is on. Return maximum supported color temperature. Return minimum supported color temperature. Return the name of the device if any. Dispatch service calls to target entities. Set bulb brightness. Set bulb's color temperature. Set current options as default. Activate effect. Activate flash. Set a power mode. Set the music mode on or off. Set bulb's color. Set up the Yeelight bulbs. Flag supported features. Turn off. Turn the bulb on. Update properties from the bulb. Support for Xiaomi Yeelight Wifi color bulb. For more details about this platform, please refer to the documentation at https://home-assistant.io/components/light.yeelight/ Not using hostname, as it seems to vary. color temperature hsv force init for type just inform once in ms passed kwarg overrides config kwarg in s values checked for none in methods save the current state if we had a manual change. in ms passed kwarg overrides config kwarg in s
1,242
en
0.784439
import numpy as np import sys class RBF(): def __init__(self, Input, Output, Ptypes, Nclasses): self.input = Input self.hidden = Ptypes * Nclasses self.output = Output self.ptypes = Ptypes self.nclasses = Nclasses self.protos = 0 self.weights = 0 self.spread = 0 def createPrototypes(self, data): groups = np.random.randint(0, data.shape[0], size = (self.hidden)) prototypes = np.zeros((self.hidden, data.shape[1])) i = 0 for element in groups: prototypes[i] = data[element, :] i += 1 self.protos = prototypes def sigma(self): temp = 0 for i in range(self.hidden): for j in range(self.hidden): distance = np.square(np.linalg.norm(self.protos[i] - self.protos[j])) if distance > temp: temp = distance self.spread = temp/np.sqrt(self.hidden) def train(self, data, classes): self.createPrototypes(data) self.sigma() hidden_out = np.zeros(shape=(0,self.hidden)) for data in data: output=[] for proto in self.protos: distance = np.square(np.linalg.norm(data - proto)) neuron_output = np.exp(-(distance)/(np.square(self.spread))) output.append(neuron_output) hidden_out = np.vstack([hidden_out,np.array(output)]) self.weights = np.dot(np.linalg.pinv(hidden_out), classes) def test(self, data, classes): right = 0 for i in range(len(data)): d = data[i] output = [] for proto in self.protos: distance = np.square(np.linalg.norm(d-proto)) neuron_output = np.exp(-(distance)/np.square(self.spread)) output.append(neuron_output) network_output = np.dot(np.array(output),self.weights) print ("Expected: ", classes[i].argmax(axis=0) +1) print ("Result: ", network_output.argmax(axis=0) + 1) print () if network_output.argmax(axis=0) + 1 == classes[i].argmax(axis=0) +1: right += 1 print ("Accuracy(%): ", (right * 100) / len(data)) def read_iris(percentage): dataset = np.loadtxt('iris.data', delimiter=',', skiprows=0) np.random.shuffle(dataset) q = int(dataset.shape[0] * percentage) + 2 X_training = dataset[0:q, 0:4] Y_training = dataset[0:q, 4] X_test = dataset[q:150, 0:4] Y_test = dataset[q:150, 4] return X_training, Y_training, X_test, Y_test def process_iris_data(data): p_data = np.zeros((data.shape[0], data.shape[1])) max_col1 = np.amax(data[:,0]) max_col2 = np.amax(data[:,1]) max_col3 = np.amax(data[:,2]) max_col4 = np.amax(data[:,3]) for n in range(len(data)): p_data[n, 0] = data[n,0] / max_col1 p_data[n, 1] = data[n,1] / max_col2 p_data[n, 2] = data[n,2] / max_col3 p_data[n, 3] = data[n,3] / max_col4 return p_data def process_iris_labels(labels, operation): if operation == 0: p_labels = np.zeros((labels.shape[0], 3)) for n in range(len(labels)): p_labels[n, int(labels[n])] = 1 return p_labels else: p_labels = np.argmax(labels, axis=1) return p_labels if __name__ == '__main__': # input params # percentage parameters = (sys.argv) print(parameters) x1, y1, x2, y2 = read_iris(float(parameters[1])) xp = process_iris_data(x1) yp = process_iris_labels(y1,0) nn = RBF(xp.shape[1], y1.shape[0], xp.shape[1], 3) nn.train(xp, yp) xp = process_iris_data(x2) yp = process_iris_labels(y2,0) nn.test(xp, yp)
Assignment 3/rbf.py
4,072
input params percentage
23
en
0.344562
#------------------------------------------------------------------------------- # Name:GUI Calculator # Purpose:Simple calculator with GUI using tkinter # # Author: Daniel Campos # # Created: Monday Dec 1st, 2014 #------------------------------------------------------------------------------- from tkinter import * import math class Calculator: '''GUI for the calculator''' def __init__(self, master): self.master = master self.stringContents = '' self.displayStr = StringVar(self.stringContents) self.display = Label(master, textvariable=self.displayStr, width=25, anchor=E, relief=SUNKEN).grid(row=0, columnspan=4) self.seven = Button(master, width=3, text='7', command=lambda: self.addSymbol('7')).grid(row=1, column=0) self.eight = Button(master, width=3, text='8', command=lambda: self.addSymbol('8')).grid(row=1, column=1) self.nine = Button(master, width=3, text='9', command=lambda: self.addSymbol('9')).grid(row=1, column=2) self.div = Button(master, width=3, text='/', command=lambda: self.addSymbol('/')).grid(row=1, column=3) self.master.bind('7', self.addKeyboardSymbol) self.master.bind('8', self.addKeyboardSymbol) self.master.bind('9', self.addKeyboardSymbol) self.master.bind('/', self.addKeyboardSymbol) self.four = Button(master, width=3, text='4', command=lambda: self.addSymbol('4')).grid(row=3, column=0) self.five = Button(master, width=3, text='5', command=lambda: self.addSymbol('5')).grid(row=3, column=1) self.six = Button(master, width=3, text='6', command=lambda: self.addSymbol('6')).grid(row=3, column=2) self.times = Button(master, width=3, text='*', command=lambda: self.addSymbol('*')).grid(row=3, column=3) self.master.bind('4', self.addKeyboardSymbol) self.master.bind('5', self.addKeyboardSymbol) self.master.bind('6', self.addKeyboardSymbol) self.master.bind('*', self.addKeyboardSymbol) self.one = Button(master, width=3, text='1', command=lambda: self.addSymbol('1')).grid(row=4, column=0) self.two = Button(master, width=3, text='2', command=lambda: self.addSymbol('2')).grid(row=4, column=1) self.three = Button(master, width=3, text='3', command=lambda: self.addSymbol('3')).grid(row=4, column=2) self.minus = Button(master, width=3, text='-', command=lambda: self.addSymbol('-')).grid(row=4, column=3) self.master.bind('1', self.addKeyboardSymbol) self.master.bind('2', self.addKeyboardSymbol) self.master.bind('3', self.addKeyboardSymbol) self.master.bind('-', self.addKeyboardSymbol) self.zero = Button(master, width=3, text='0', command=lambda: self.addSymbol('0')).grid(row=5, column=0) self.point = Button(master, width=3, text='.', command=lambda: self.addSymbol('.')).grid(row=5, column=1) self.equals = Button(master, width=3, text='=', command=lambda: self.evaluate()).grid(row=5, column=2) self.plus = Button(master, width=3, text='+', command=lambda: self.addSymbol('+')).grid(row=5, column=3) self.master.bind('0', self.addKeyboardSymbol) self.master.bind('.', self.addKeyboardSymbol) self.master.bind('<Return>', self.evaluate) self.master.bind('+', self.addKeyboardSymbol) self.c = Button(master, width=3, text='C', command=lambda: self.clear()).grid(row=6, column=0) self.d = Button(master, width=3, text='D', command=lambda: self.backSpace()).grid(row=6, column=1) self.lparren = Button(master, width=3, text='(', command=lambda: self.addSymbol('(')).grid(row=6, column=2) self.rparren = Button(master, width=3, text=')', command=lambda: self.addSymbol(')')).grid(row=6, column=3) self.master.bind('C', self.clear) self.master.bind('c', self.clear) self.master.bind('<BackSpace>', self.backSpace) self.master.bind('(', self.addKeyboardSymbol) self.master.bind(')', self.addKeyboardSymbol) def addSymbol(self, char): '''Displays the inputted char onto the display''' self.stringContents += char self.displayStr.set(self.stringContents) def addKeyboardSymbol(self,event): '''Displays the inputted char onto the display''' self.stringContents += str(repr(event.char))[1:-1] self.displayStr.set(self.stringContents) def evaluate(self, evt=None): '''Evalutes the expression''' try: self.displayStr.set(eval(self.stringContents)) self.stringContents = str(eval(self.stringContents)) except Exception as e: self.displayStr.set('Error') self.stringContents = '' def clear(self, evt=None): '''Clears the expression''' self.stringContents = '' self.displayStr.set(self.stringContents) def backSpace(self, evt=None): '''Backspace on expression''' self.stringContents = self.stringContents[:-1] self.displayStr.set(self.stringContents) def Main(): master = Tk() calculator = Calculator(master) calculator.master.title('Calculator') calculator.master.resizable(False, False) master.mainloop() if __name__ == '__main__': Main()
ProgrammingInPython/proj08_daniel_campos.py
5,273
GUI for the calculator Displays the inputted char onto the display Displays the inputted char onto the display Backspace on expression Clears the expression Evalutes the expression ------------------------------------------------------------------------------- Name:GUI Calculator Purpose:Simple calculator with GUI using tkinter Author: Daniel Campos Created: Monday Dec 1st, 2014-------------------------------------------------------------------------------
461
en
0.41298
""" URLConf for Satchmo Contacts. """ from django.conf.urls.defaults import patterns from signals_ahoy.signals import collect_urls from satchmo_store import contact from satchmo_store.shop.satchmo_settings import get_satchmo_setting ssl = get_satchmo_setting('SSL', default_value=False) urlpatterns = patterns('satchmo_store.contact.views', (r'^$', 'view', {}, 'satchmo_account_info'), (r'^update/$', 'update', {}, 'satchmo_profile_update'), (r'^ajax_state/$', 'ajax_get_state', {'SSL': ssl}, 'satchmo_contact_ajax_state'), ) collect_urls.send(sender=contact, patterns=urlpatterns)
satchmo/apps/satchmo_store/contact/urls.py
598
URLConf for Satchmo Contacts.
29
en
0.797734
import sys import requests import argparse import json import os import configparser import arrow from colorama import init import traceback def get_color(color_code): return '\x1b[%sm' % color_code def parse_brief(brief): sentences = None if args.news: sentences = json.loads( requests.get( "https://corpus.vocabulary.com/api/1.0/examples.json?maxResults=5&query=" + args.word).text)[ 'result']['sentences'] word = WORD_COLOR + brief['wordOut'] + ": " if 'relation' in brief['lemma']: word += TEXT_COLOR + ( "%s为%s的%s" % ( brief['wordOut'], brief['lemma']['lemma'], brief['lemma']['relation'])) print(word) pron = "" if 'usPron' in brief: pron += HINT_COLOR + " 美音 " + TEXT_COLOR + "/%s/" % brief['usPron'][ 'ps'] if 'ukPron' in brief: pron += HINT_COLOR + " 英音 " + TEXT_COLOR + "/%s/" % brief['ukPron'][ 'ps'] if pron: print(pron) if 'chnDefinitions' in brief: print(SECTION_COLOR + "中文释义") for chn_def in brief['chnDefinitions']: if 'pos' in chn_def: print( " " + HINT_COLOR + chn_def['pos'].ljust(8) + TEXT_COLOR + chn_def[ 'meaning']) else: print(" " + "".ljust(8) + TEXT_COLOR + chn_def['meaning']) if 'engDefinitions' in brief: print(SECTION_COLOR + "英文释义") for eng_def in brief['engDefinitions']: if 'pos' in eng_def: print( " " + HINT_COLOR + eng_def['pos'].ljust(8) + TEXT_COLOR + eng_def[ 'meaning']) else: print(" " + "".ljust(8) + TEXT_COLOR + eng_def['meaning']) if sentences: print(SECTION_COLOR + "新闻例句") for i, sentence in enumerate(sentences): print(TEXT_COLOR, "".ljust(4) + (str(i + 1) + ".").ljust(3) + sentence[ 'sentence']) print(SOURCE_COLOR, "".ljust(7) + sentence['volume']['corpus']['name'] + "".ljust( 4) + arrow.get(sentence['volume']['dateAdded']).format( "MMM DD, YYYY")) def parse_source(sentence_group): if 'source' not in sentence_group: return "牛津高阶英汉双解词典" else: return sourceDict[sentence_group['source']] def parse_detail(detail): parse_brief(detail['wordBrief']) if 'sentenceLists' in detail: print(SECTION_COLOR + "双语例句") for sentenceGroup in detail['sentenceLists']: count = 1 print("".ljust(4) + HINT_COLOR + parse_source(sentenceGroup)) for sentence in sentenceGroup['sentences']: print(TEXT_COLOR + "".ljust(8) + ("%s." % str(count)).ljust(3) + sentence['eng']) print("".ljust(8) + "".ljust(3) + sentence['chn']) if count >= default_sent: break count += 1 init() sourceDict = {"CAMBRIDGE": "剑桥高阶英汉双解词典", "LONGMAN": "朗文当代高级英语词典", "COLLINS": "柯林斯英汉双解大词典", "ONLINE": "金山词霸"} parser = argparse.ArgumentParser(description='manual to this script') parser.add_argument('word', type=str, help="The word you want to query") parser.add_argument('--detail', '-d', action='store', default=0, const=2, nargs='?', type=int, dest='detail', help="Show the detailed meaning of the word") parser.add_argument('--brief', '-b', action='store_true', default=True, help="Show the brief meaning of the word", ) parser.add_argument('--news', '-n', action='store_true', default=False, help="Whether show sentence examples from news") args = parser.parse_args() if getattr(sys, 'frozen', False): # we are running in a bundle bundle_dir = os.path.split(sys.executable)[0] else: # we are running in a normal Python environment bundle_dir = os.path.dirname(os.path.abspath(__file__)) config_path = os.path.join(bundle_dir, "color.ini") config = configparser.ConfigParser() config.read(config_path) WORD_COLOR = get_color( config.getint('COLOR', 'word_color') if config.getint('COLOR', 'word_color') else 91) HINT_COLOR = get_color( config.getint('COLOR', 'hint_color') if config.getint('COLOR', 'hint_color') else 92) SECTION_COLOR = get_color( config.getint('COLOR', 'section_color') if config.getint('COLOR', 'section_color') else 93) TEXT_COLOR = get_color( config.getint('COLOR', 'text_color') if config.getint('COLOR', 'text_color') else 97) SOURCE_COLOR = get_color( config.getint('COLOR', 'source_color') if config.getint('COLOR', 'source_color') else 90) ENDPOINT = config.get("CONFIG", "endpoint") detail = json.loads( requests.get(ENDPOINT + "/word/detail?json=true&word=" + args.word).text) default_sent = args.detail try: if args.detail: parse_detail(detail) else: parse_brief(detail['wordBrief']) except Exception as e: traceback.print_exc() print("该单词不存在")
win_python/idict.py
5,642
we are running in a bundle we are running in a normal Python environment
72
en
0.953497
# Python import unittest from copy import deepcopy from unittest.mock import Mock # ATS from ats.topology import Device # Genie from genie.libs.ops.igmp.iosxe.igmp import Igmp from genie.libs.ops.igmp.iosxe.tests.igmp_output import IgmpOutput # Parser from genie.libs.parser.iosxe.show_igmp import ShowIpIgmpInterface, \ ShowIpIgmpGroupsDetail, \ ShowIpIgmpSsmMapping # iosxe show_vrf from genie.libs.parser.iosxe.show_vrf import ShowVrfDetail outputs = {} outputs['show ip igmp interface'] = IgmpOutput.ShowIpIgmpInterface_default outputs['show ip igmp vrf VRF1 interface'] = IgmpOutput.ShowIpIgmpInterface_VRF1 outputs['show ip igmp groups detail'] = IgmpOutput.ShowIpIgmpGroupsDetail_default outputs['show ip igmp vrf VRF1 groups detail'] = IgmpOutput.ShowIpIgmpGroupsDetail_VRF1 outputs['show ip igmp ssm-mapping 239.1.1.1'] = IgmpOutput.ShowIpIgmpSsmMapping_default_1 outputs['show ip igmp ssm-mapping 239.2.2.2'] = IgmpOutput.ShowIpIgmpSsmMapping_default_2 outputs['show ip igmp ssm-mapping 239.3.3.3'] = IgmpOutput.ShowIpIgmpSsmMapping_default_3 outputs['show ip igmp ssm-mapping 239.4.4.4'] = IgmpOutput.ShowIpIgmpSsmMapping_default_4 outputs['show ip igmp ssm-mapping 239.5.5.5'] = IgmpOutput.ShowIpIgmpSsmMapping_default_5 outputs['show ip igmp ssm-mapping 239.6.6.6'] = IgmpOutput.ShowIpIgmpSsmMapping_default_6 outputs['show ip igmp ssm-mapping 239.7.7.7'] = IgmpOutput.ShowIpIgmpSsmMapping_default_7 outputs['show ip igmp ssm-mapping 239.8.8.8'] = IgmpOutput.ShowIpIgmpSsmMapping_default_8 outputs['show ip igmp ssm-mapping 239.9.9.9'] = IgmpOutput.ShowIpIgmpSsmMapping_default_9 outputs['show ip igmp ssm-mapping 224.0.1.40'] = IgmpOutput.ShowIpIgmpSsmMapping_default_10 outputs['show ip igmp vrf VRF1 ssm-mapping 239.1.1.1'] = IgmpOutput.ShowIpIgmpSsmMapping_VRF1_1 outputs['show ip igmp vrf VRF1 ssm-mapping 239.2.2.2'] = IgmpOutput.ShowIpIgmpSsmMapping_VRF1_2 outputs['show ip igmp vrf VRF1 ssm-mapping 239.3.3.3'] = IgmpOutput.ShowIpIgmpSsmMapping_VRF1_3 outputs['show ip igmp vrf VRF1 ssm-mapping 239.4.4.4'] = IgmpOutput.ShowIpIgmpSsmMapping_VRF1_4 outputs['show ip igmp vrf VRF1 ssm-mapping 239.5.5.5'] = IgmpOutput.ShowIpIgmpSsmMapping_VRF1_5 outputs['show ip igmp vrf VRF1 ssm-mapping 239.6.6.6'] = IgmpOutput.ShowIpIgmpSsmMapping_VRF1_6 outputs['show ip igmp vrf VRF1 ssm-mapping 239.7.7.7'] = IgmpOutput.ShowIpIgmpSsmMapping_VRF1_7 outputs['show ip igmp vrf VRF1 ssm-mapping 239.8.8.8'] = IgmpOutput.ShowIpIgmpSsmMapping_VRF1_8 outputs['show ip igmp vrf VRF1 ssm-mapping 224.0.1.40'] = IgmpOutput.ShowIpIgmpSsmMapping_VRF1_10 def mapper(key): return outputs[key] class test_igmp(unittest.TestCase): def setUp(self): self.device = Device(name='aDevice') self.device.os = 'iosxe' self.device.mapping={} self.device.mapping['cli']='cli' # Give the device as a connection type # This is done in order to call the parser on the output provided self.device.connectionmgr.connections['cli'] = self.device def test_complete_output(self): self.maxDiff = None igmp = Igmp(device=self.device) # Get outputs igmp.maker.outputs[ShowVrfDetail] = \ {'': IgmpOutput.ShowVrfDetail} # Return outputs above as inputs to parser when called self.device.execute = Mock() self.device.execute.side_effect = mapper # Learn the feature igmp.learn() # Verify Ops was created successfully self.assertEqual(igmp.info, IgmpOutput.Igmp_info) def test_empty_output(self): self.maxDiff = None igmp = Igmp(device=self.device) # Get outputs igmp.maker.outputs[ShowVrfDetail] = \ {'': {}} # Return outputs above as inputs to parser when called self.device.execute = Mock() outputs['show ip igmp interface'] = '' outputs['show ip igmp vrf VRF1 interface'] = '' outputs['show ip igmp groups detail'] = '' outputs['show ip igmp vrf VRF1 groups detail'] = '' outputs['show ip igmp ssm-mapping 239.1.1.1'] = '' outputs['show ip igmp vrf VRF1 ssm-mapping 239.1.1.1'] = '' self.device.execute.side_effect = mapper # Learn the feature igmp.learn() # revert the outputs outputs['show ip igmp interface'] = IgmpOutput.ShowIpIgmpInterface_default outputs['show ip igmp vrf VRF1 interface'] = IgmpOutput.ShowIpIgmpInterface_VRF1 outputs['show ip igmp groups detail'] = IgmpOutput.ShowIpIgmpGroupsDetail_default outputs['show ip igmp vrf VRF1 groups detail'] = IgmpOutput.ShowIpIgmpGroupsDetail_VRF1 outputs['show ip igmp ssm-mapping 239.1.1.1'] = IgmpOutput.ShowIpIgmpSsmMapping_default_1 outputs['show ip igmp vrf VRF1 ssm-mapping 239.1.1.1'] = IgmpOutput.ShowIpIgmpSsmMapping_VRF1_1 # Check no attribute not found with self.assertRaises(AttributeError): igmp.info['vrfs'] def test_selective_attribute(self): self.maxDiff = None igmp = Igmp(device=self.device) # Get outputs igmp.maker.outputs[ShowVrfDetail] = \ {'': IgmpOutput.ShowVrfDetail} # Return outputs above as inputs to parser when called self.device.execute = Mock() self.device.execute.side_effect = mapper # Learn the feature igmp.learn() # Check specific attribute values # info - default vrf self.assertEqual(igmp.info['vrfs']['default']['max_groups'], 20) # info - vrf VRF1 self.assertEqual(igmp.info['vrfs']['VRF1']['interfaces']\ ['GigabitEthernet2']['querier'], '20.1.2.1') def test_incomplete_output(self): self.maxDiff = None igmp = Igmp(device=self.device) # Get outputs igmp.maker.outputs[ShowVrfDetail] = \ {'': IgmpOutput.ShowVrfDetail} # Return outputs above as inputs to parser when called self.device.execute = Mock() # overwrite output with empty output outputs['show ip igmp vrf VRF1 groups detail'] = '''\ show ip igmp vrf VRF1 groups detail ''' self.device.execute.side_effect = mapper # Learn the feature igmp.learn() # Delete missing specific attribute values expect_dict = deepcopy(IgmpOutput.Igmp_info) del(expect_dict['vrfs']['VRF1']['interfaces']['GigabitEthernet2']['join_group']) del(expect_dict['vrfs']['VRF1']['interfaces']['GigabitEthernet2']['static_group']) del(expect_dict['vrfs']['VRF1']['interfaces']['GigabitEthernet2']['group']) del(expect_dict['vrfs']['VRF1']['ssm_map']) # Verify Ops was created successfully self.assertEqual(igmp.info, expect_dict) if __name__ == '__main__': unittest.main()
pkgs/ops-pkg/src/genie/libs/ops/igmp/iosxe/tests/test_igmp.py
6,950
Python ATS Genie Parser iosxe show_vrf Give the device as a connection type This is done in order to call the parser on the output provided Get outputs Return outputs above as inputs to parser when called Learn the feature Verify Ops was created successfully Get outputs Return outputs above as inputs to parser when called Learn the feature revert the outputs Check no attribute not found Get outputs Return outputs above as inputs to parser when called Learn the feature Check specific attribute values info - default vrf info - vrf VRF1 Get outputs Return outputs above as inputs to parser when called overwrite output with empty output Learn the feature Delete missing specific attribute values Verify Ops was created successfully
734
en
0.841744
#!/usr/bin/python # # linearize-hashes.py: List blocks in a linear, no-fork version of the chain. # # Copyright (c) 2013 The Bitcoin developers # Distributed under the MIT/X11 software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. # import json import struct import re import base64 import httplib import sys settings = {} class BitcoinRPC: OBJID = 1 def __init__(self, host, port, username, password): authpair = "%s:%s" % (username, password) self.authhdr = "Basic %s" % (base64.b64encode(authpair)) self.conn = httplib.HTTPConnection(host, port, False, 30) def rpc(self, method, params=None): self.OBJID += 1 obj = { 'version' : '1.1', 'method' : method, 'id' : self.OBJID } if params is None: obj['params'] = [] else: obj['params'] = params self.conn.request('POST', '/', json.dumps(obj), { 'Authorization' : self.authhdr, 'Content-type' : 'application/json' }) resp = self.conn.getresponse() if resp is None: print "JSON-RPC: no response" return None body = resp.read() resp_obj = json.loads(body) if resp_obj is None: print "JSON-RPC: cannot JSON-decode body" return None if 'error' in resp_obj and resp_obj['error'] != None: return resp_obj['error'] if 'result' not in resp_obj: print "JSON-RPC: no result in object" return None return resp_obj['result'] def getblock(self, hash, verbose=True): return self.rpc('getblock', [hash, verbose]) def getblockhash(self, index): return self.rpc('getblockhash', [index]) def get_block_hashes(settings): rpc = BitcoinRPC(settings['host'], settings['port'], settings['rpcuser'], settings['rpcpassword']) for height in xrange(settings['min_height'], settings['max_height']+1): hash = rpc.getblockhash(height) print(hash) if __name__ == '__main__': if len(sys.argv) != 2: print "Usage: linearize-hashes.py CONFIG-FILE" sys.exit(1) f = open(sys.argv[1]) for line in f: # skip comment lines m = re.search('^\s*#', line) if m: continue # parse key=value lines m = re.search('^(\w+)\s*=\s*(\S.*)$', line) if m is None: continue settings[m.group(1)] = m.group(2) f.close() if 'host' not in settings: settings['host'] = '127.0.0.1' if 'port' not in settings: settings['port'] = 4242 if 'min_height' not in settings: settings['min_height'] = 0 if 'max_height' not in settings: settings['max_height'] = 319000 if 'rpcuser' not in settings or 'rpcpassword' not in settings: print "Missing username and/or password in cfg file" sys.exit(1) settings['port'] = int(settings['port']) settings['min_height'] = int(settings['min_height']) settings['max_height'] = int(settings['max_height']) get_block_hashes(settings)
contrib/linearize/linearize-hashes.py
2,761
!/usr/bin/python linearize-hashes.py: List blocks in a linear, no-fork version of the chain. Copyright (c) 2013 The Bitcoin developers Distributed under the MIT/X11 software license, see the accompanying file COPYING or http://www.opensource.org/licenses/mit-license.php. skip comment lines parse key=value lines
313
en
0.673579
#!/usr/bin/env python """ Functions for estimating quantities from nested sampling runs. Each estimator function should have arguments: .. code-block:: python def estimator_func(self, ns_run, logw=None, simulate=False): ... Any additional arguments required for the function should be keyword arguments. The ``logw`` argument allows the log weights for the points in the run to be provided - this is useful if many estimators are being calculated from the same run as it allows ``logw`` to only be calculated once. If it is not specified, ``logw`` is calculated from the run when required. The simulate argument is passed to ``ns_run_utils.get_logw``, and is only used if the function needs to calculate ``logw``. """ import functools import numpy as np import scipy import nestcheck.ns_run_utils # Estimators # ---------- def count_samples(ns_run, **kwargs): r"""Number of samples in run. Unlike most estimators this does not require log weights, but for convenience will not throw an error if they are specified. Parameters ---------- ns_run: dict Nested sampling run dict (see the data_processing module docstring for more details). Returns ------- int """ kwargs.pop('logw', None) kwargs.pop('simulate', None) if kwargs: raise TypeError('Unexpected **kwargs: {0}'.format(kwargs)) return ns_run['logl'].shape[0] def logz(ns_run, logw=None, simulate=False): r"""Natural log of Bayesian evidence :math:`\log \mathcal{Z}`. Parameters ---------- ns_run: dict Nested sampling run dict (see the data_processing module docstring for more details). logw: None or 1d numpy array, optional Log weights of samples. simulate: bool, optional Passed to ns_run_utils.get_logw if logw needs to be calculated. Returns ------- float """ if logw is None: logw = nestcheck.ns_run_utils.get_logw(ns_run, simulate=simulate) return scipy.special.logsumexp(logw) def evidence(ns_run, logw=None, simulate=False): r"""Bayesian evidence :math:`\log \mathcal{Z}`. Parameters ---------- ns_run: dict Nested sampling run dict (see the data_processing module docstring for more details). logw: None or 1d numpy array, optional Log weights of samples. simulate: bool, optional Passed to ns_run_utils.get_logw if logw needs to be calculated. Returns ------- float """ if logw is None: logw = nestcheck.ns_run_utils.get_logw(ns_run, simulate=simulate) return np.exp(scipy.special.logsumexp(logw)) def param_mean(ns_run, logw=None, simulate=False, param_ind=0, handle_indexerror=False): """Mean of a single parameter (single component of theta). Parameters ---------- ns_run: dict Nested sampling run dict (see the data_processing module docstring for more details). logw: None or 1d numpy array, optional Log weights of samples. simulate: bool, optional Passed to ns_run_utils.get_logw if logw needs to be calculated. param_ind: int, optional Index of parameter for which the mean should be calculated. This corresponds to the column of ns_run['theta'] which contains the parameter. handle_indexerror: bool, optional Make the function function return nan rather than raising an IndexError if param_ind >= ndim. This is useful when applying the same list of estimators to data sets of different dimensions. Returns ------- float """ if logw is None: logw = nestcheck.ns_run_utils.get_logw(ns_run, simulate=simulate) w_relative = np.exp(logw - logw.max()) try: return (np.sum(w_relative * ns_run['theta'][:, param_ind]) / np.sum(w_relative)) except IndexError: if handle_indexerror: return np.nan else: raise def param_cred(ns_run, logw=None, simulate=False, probability=0.5, param_ind=0): """One-tailed credible interval on the value of a single parameter (component of theta). Parameters ---------- ns_run: dict Nested sampling run dict (see the data_processing module docstring for more details). logw: None or 1d numpy array, optional Log weights of samples. simulate: bool, optional Passed to ns_run_utils.get_logw if logw needs to be calculated. probability: float, optional Quantile to estimate - must be in open interval (0, 1). For example, use 0.5 for the median and 0.84 for the upper 84% quantile. Passed to weighted_quantile. param_ind: int, optional Index of parameter for which the credible interval should be calculated. This corresponds to the column of ns_run['theta'] which contains the parameter. Returns ------- float """ if logw is None: logw = nestcheck.ns_run_utils.get_logw(ns_run, simulate=simulate) w_relative = np.exp(logw - logw.max()) # protect against overflow return weighted_quantile(probability, ns_run['theta'][:, param_ind], w_relative) def param_squared_mean(ns_run, logw=None, simulate=False, param_ind=0): """Mean of the square of single parameter (second moment of its posterior distribution). Parameters ---------- ns_run: dict Nested sampling run dict (see the data_processing module docstring for more details). logw: None or 1d numpy array, optional Log weights of samples. simulate: bool, optional Passed to ns_run_utils.get_logw if logw needs to be calculated. param_ind: int, optional Index of parameter for which the second moment should be calculated. This corresponds to the column of ns_run['theta'] which contains the parameter. Returns ------- float """ if logw is None: logw = nestcheck.ns_run_utils.get_logw(ns_run, simulate=simulate) w_relative = np.exp(logw - logw.max()) # protect against overflow w_relative /= np.sum(w_relative) return np.sum(w_relative * (ns_run['theta'][:, param_ind] ** 2)) def r_mean(ns_run, logw=None, simulate=False): """Mean of the radial coordinate (magnitude of theta vector). Parameters ---------- ns_run: dict Nested sampling run dict (see the data_processing module docstring for more details). logw: None or 1d numpy array, optional Log weights of samples. simulate: bool, optional Passed to ns_run_utils.get_logw if logw needs to be calculated. Returns ------- float """ if logw is None: logw = nestcheck.ns_run_utils.get_logw(ns_run, simulate=simulate) w_relative = np.exp(logw - logw.max()) r = np.sqrt(np.sum(ns_run['theta'] ** 2, axis=1)) return np.sum(w_relative * r) / np.sum(w_relative) def r_cred(ns_run, logw=None, simulate=False, probability=0.5): """One-tailed credible interval on the value of the radial coordinate (magnitude of theta vector). Parameters ---------- ns_run: dict Nested sampling run dict (see the data_processing module docstring for more details). logw: None or 1d numpy array, optional Log weights of samples. simulate: bool, optional Passed to ns_run_utils.get_logw if logw needs to be calculated. probability: float, optional Quantile to estimate - must be in open interval (0, 1). For example, use 0.5 for the median and 0.84 for the upper 84% quantile. Passed to weighted_quantile. Returns ------- float """ if logw is None: logw = nestcheck.ns_run_utils.get_logw(ns_run, simulate=simulate) w_relative = np.exp(logw - logw.max()) # protect against overflow r = np.sqrt(np.sum(ns_run['theta'] ** 2, axis=1)) return weighted_quantile(probability, r, w_relative) # Helper functions # ---------------- def get_latex_name(func_in, **kwargs): """ Produce a latex formatted name for each function for use in labelling results. Parameters ---------- func_in: function kwargs: dict, optional Kwargs for function. Returns ------- latex_name: str Latex formatted name for the function. """ if isinstance(func_in, functools.partial): func = func_in.func assert not set(func_in.keywords) & set(kwargs), ( 'kwargs={0} and func_in.keywords={1} contain repeated keys' .format(kwargs, func_in.keywords)) kwargs.update(func_in.keywords) else: func = func_in param_ind = kwargs.pop('param_ind', 0) probability = kwargs.pop('probability', 0.5) kwargs.pop('handle_indexerror', None) if kwargs: raise TypeError('Unexpected **kwargs: {0}'.format(kwargs)) ind_str = r'{\hat{' + str(param_ind + 1) + '}}' latex_name_dict = { 'count_samples': r'samples', 'logz': r'$\mathrm{log} \mathcal{Z}$', 'evidence': r'$\mathcal{Z}$', 'r_mean': r'$\overline{|\theta|}$', 'param_mean': r'$\overline{\theta_' + ind_str + '}$', 'param_squared_mean': r'$\overline{\theta^2_' + ind_str + '}$'} # Add credible interval names if probability == 0.5: cred_str = r'$\mathrm{median}(' else: # format percent without trailing zeros percent_str = ('%f' % (probability * 100)).rstrip('0').rstrip('.') cred_str = r'$\mathrm{C.I.}_{' + percent_str + r'\%}(' latex_name_dict['param_cred'] = cred_str + r'\theta_' + ind_str + ')$' latex_name_dict['r_cred'] = cred_str + r'|\theta|)$' try: return latex_name_dict[func.__name__] except KeyError as err: err.args = err.args + ('get_latex_name not yet set up for ' + func.__name__,) raise def weighted_quantile(probability, values, weights): """ Get quantile estimate for input probability given weighted samples using linear interpolation. Parameters ---------- probability: float Quantile to estimate - must be in open interval (0, 1). For example, use 0.5 for the median and 0.84 for the upper 84% quantile. values: 1d numpy array Sample values. weights: 1d numpy array Corresponding sample weights (same shape as values). Returns ------- quantile: float """ assert 1 > probability > 0, ( 'credible interval prob= ' + str(probability) + ' not in (0, 1)') assert values.shape == weights.shape assert values.ndim == 1 assert weights.ndim == 1 sorted_inds = np.argsort(values) quantiles = np.cumsum(weights[sorted_inds]) - (0.5 * weights[sorted_inds]) quantiles /= np.sum(weights) return np.interp(probability, quantiles, values[sorted_inds])
nestcheck/estimators.py
11,030
Number of samples in run. Unlike most estimators this does not require log weights, but for convenience will not throw an error if they are specified. Parameters ---------- ns_run: dict Nested sampling run dict (see the data_processing module docstring for more details). Returns ------- int Bayesian evidence :math:`\log \mathcal{Z}`. Parameters ---------- ns_run: dict Nested sampling run dict (see the data_processing module docstring for more details). logw: None or 1d numpy array, optional Log weights of samples. simulate: bool, optional Passed to ns_run_utils.get_logw if logw needs to be calculated. Returns ------- float Produce a latex formatted name for each function for use in labelling results. Parameters ---------- func_in: function kwargs: dict, optional Kwargs for function. Returns ------- latex_name: str Latex formatted name for the function. Natural log of Bayesian evidence :math:`\log \mathcal{Z}`. Parameters ---------- ns_run: dict Nested sampling run dict (see the data_processing module docstring for more details). logw: None or 1d numpy array, optional Log weights of samples. simulate: bool, optional Passed to ns_run_utils.get_logw if logw needs to be calculated. Returns ------- float One-tailed credible interval on the value of a single parameter (component of theta). Parameters ---------- ns_run: dict Nested sampling run dict (see the data_processing module docstring for more details). logw: None or 1d numpy array, optional Log weights of samples. simulate: bool, optional Passed to ns_run_utils.get_logw if logw needs to be calculated. probability: float, optional Quantile to estimate - must be in open interval (0, 1). For example, use 0.5 for the median and 0.84 for the upper 84% quantile. Passed to weighted_quantile. param_ind: int, optional Index of parameter for which the credible interval should be calculated. This corresponds to the column of ns_run['theta'] which contains the parameter. Returns ------- float Mean of a single parameter (single component of theta). Parameters ---------- ns_run: dict Nested sampling run dict (see the data_processing module docstring for more details). logw: None or 1d numpy array, optional Log weights of samples. simulate: bool, optional Passed to ns_run_utils.get_logw if logw needs to be calculated. param_ind: int, optional Index of parameter for which the mean should be calculated. This corresponds to the column of ns_run['theta'] which contains the parameter. handle_indexerror: bool, optional Make the function function return nan rather than raising an IndexError if param_ind >= ndim. This is useful when applying the same list of estimators to data sets of different dimensions. Returns ------- float Mean of the square of single parameter (second moment of its posterior distribution). Parameters ---------- ns_run: dict Nested sampling run dict (see the data_processing module docstring for more details). logw: None or 1d numpy array, optional Log weights of samples. simulate: bool, optional Passed to ns_run_utils.get_logw if logw needs to be calculated. param_ind: int, optional Index of parameter for which the second moment should be calculated. This corresponds to the column of ns_run['theta'] which contains the parameter. Returns ------- float One-tailed credible interval on the value of the radial coordinate (magnitude of theta vector). Parameters ---------- ns_run: dict Nested sampling run dict (see the data_processing module docstring for more details). logw: None or 1d numpy array, optional Log weights of samples. simulate: bool, optional Passed to ns_run_utils.get_logw if logw needs to be calculated. probability: float, optional Quantile to estimate - must be in open interval (0, 1). For example, use 0.5 for the median and 0.84 for the upper 84% quantile. Passed to weighted_quantile. Returns ------- float Mean of the radial coordinate (magnitude of theta vector). Parameters ---------- ns_run: dict Nested sampling run dict (see the data_processing module docstring for more details). logw: None or 1d numpy array, optional Log weights of samples. simulate: bool, optional Passed to ns_run_utils.get_logw if logw needs to be calculated. Returns ------- float Get quantile estimate for input probability given weighted samples using linear interpolation. Parameters ---------- probability: float Quantile to estimate - must be in open interval (0, 1). For example, use 0.5 for the median and 0.84 for the upper 84% quantile. values: 1d numpy array Sample values. weights: 1d numpy array Corresponding sample weights (same shape as values). Returns ------- quantile: float Functions for estimating quantities from nested sampling runs. Each estimator function should have arguments: .. code-block:: python def estimator_func(self, ns_run, logw=None, simulate=False): ... Any additional arguments required for the function should be keyword arguments. The ``logw`` argument allows the log weights for the points in the run to be provided - this is useful if many estimators are being calculated from the same run as it allows ``logw`` to only be calculated once. If it is not specified, ``logw`` is calculated from the run when required. The simulate argument is passed to ``ns_run_utils.get_logw``, and is only used if the function needs to calculate ``logw``. !/usr/bin/env python Estimators ---------- protect against overflow protect against overflow protect against overflow Helper functions ---------------- Add credible interval names format percent without trailing zeros
5,795
en
0.554408
"""Database schema functions and information for Toron node files. Toron nodes are stored as individual files. The file format is managed, internally, as a relational database. The schema for this database is shown below as a simplified ERD (entity relationship diagram). SQL foreign key relationships are represented with hyphen and pipe characters ('-' and '|'). Other, more complex relationships are represented with bullet points ('•') and these are enforced at the application layer: +------------------+ +---------------------+ | relation | | edge | +------------------+ +---------------------+ | relation_id | •••• <Other Node> | edge_id |------->| edge_id | • | name | ••••••| other_element_id |<••••• | type_info | • •••| element_id |<-+ +--------------+ | description | • • | proportion | | | quantity | | user_properties | • • | mapping_level | | +--------------+ | other_uuid | • • +------------------+ | | quantity_id | | other_filename_hint | • • | +->| _location_id | | other_element_hash |<•• • | | | attributes | | is_complete |<••••• +-----------------+ | | value | +---------------------+ | | +--------------+ | | +------------+ | +--------------+ | +---------------+ | element | | | location | | | structure | +------------+ | +--------------+ | +---------------+ +------| element_id |--+ | _location_id |--+ | _structure_id | | | label_a |••••>| label_a |<••••| label_a | | | label_b |••••>| label_b |<••••| label_b | | | label_c |••••>| label_c |<••••| label_c | | | ... |••••>| ... |<••••| ... | | +------------+ +--------------+ +---------------+ | | +-------------------+ +----------+ | | element_weight | +-------------+ | property | | +-------------------+ | weight | +----------+ | | element_weight_id | +-------------+ | key | | | weight_id |<----| weight_id | | value | +->| element_id |••• | name | +----------+ | value | • | type_info | +-------------------+ • | description | ••>| is_complete | +-------------+ """ import itertools import os import re import sqlite3 from contextlib import contextmanager from json import loads as _loads from urllib.parse import quote as urllib_parse_quote from ._exceptions import ToronError sqlite3.register_converter('TEXT_JSON', _loads) sqlite3.register_converter('TEXT_ATTRIBUTES', _loads) def _is_sqlite_json1_enabled(): """Check if SQLite implementation includes JSON1 extension.""" # The inclusion of JSON functions is optional when compiling SQLite. # In versions 3.38.0 and newer, JSON functions are included by # default but can be disabled (opt-out policy). For older versions # of SQLite, JSON functions are available on an opt-in basis. It is # necessary to test for their presence rathern than referencing the # SQLite version number. # # For more information, see: # https://www.sqlite.org/json1.html#compiling_in_json_support con = sqlite3.connect(':memory:') try: con.execute("SELECT json_valid('123')") except sqlite3.OperationalError: return False finally: con.close() return True SQLITE_JSON1_ENABLED = _is_sqlite_json1_enabled() _schema_script = """ PRAGMA foreign_keys = ON; CREATE TABLE edge( edge_id INTEGER PRIMARY KEY, name TEXT NOT NULL, type_info TEXT_ATTRIBUTES NOT NULL, description TEXT, user_properties TEXT_USERPROPERTIES, other_uuid TEXT CHECK (other_uuid LIKE '________-____-____-____-____________') NOT NULL, other_filename_hint TEXT NOT NULL, other_element_hash TEXT, is_complete INTEGER CHECK (is_complete IN (0, 1)), UNIQUE (name, other_uuid) ); CREATE TABLE relation( relation_id INTEGER PRIMARY KEY, edge_id INTEGER, other_element_id INTEGER NOT NULL, element_id INTEGER, proportion REAL CHECK (0.0 < proportion AND proportion <= 1.0) NOT NULL, mapping_level INTEGER NOT NULL, FOREIGN KEY(edge_id) REFERENCES edge(edge_id) ON DELETE CASCADE, FOREIGN KEY(element_id) REFERENCES element(element_id) DEFERRABLE INITIALLY DEFERRED, UNIQUE (edge_id, other_element_id, element_id) ); CREATE TABLE element( element_id INTEGER PRIMARY KEY AUTOINCREMENT /* <- Must not reuse id values. */ /* label columns added programmatically */ ); CREATE TABLE location( _location_id INTEGER PRIMARY KEY /* label columns added programmatically */ ); CREATE TABLE structure( _structure_id INTEGER PRIMARY KEY /* label columns added programmatically */ ); CREATE TABLE quantity( quantity_id INTEGER PRIMARY KEY, _location_id INTEGER, attributes TEXT_ATTRIBUTES NOT NULL, value NUMERIC NOT NULL, FOREIGN KEY(_location_id) REFERENCES location(_location_id) ); CREATE TABLE weight( weight_id INTEGER PRIMARY KEY, name TEXT NOT NULL, type_info TEXT_ATTRIBUTES NOT NULL, description TEXT, is_complete INTEGER CHECK (is_complete IN (0, 1)), UNIQUE (name) ); CREATE TABLE element_weight( element_weight_id INTEGER PRIMARY KEY, weight_id INTEGER, element_id INTEGER, value REAL NOT NULL, FOREIGN KEY(weight_id) REFERENCES weight(weight_id) ON DELETE CASCADE, FOREIGN KEY(element_id) REFERENCES element(element_id) DEFERRABLE INITIALLY DEFERRED, UNIQUE (element_id, weight_id) ); CREATE TABLE property( key TEXT PRIMARY KEY NOT NULL, value TEXT_JSON ); INSERT INTO property VALUES ('schema_version', '1'); """ def _is_wellformed_json(x): """Return 1 if *x* is well-formed JSON or return 0 if *x* is not well-formed. This function should be registered with SQLite (via the create_function() method) when the JSON1 extension is not available. This function mimics the JSON1 json_valid() function, see: https://www.sqlite.org/json1.html#jvalid """ try: _loads(x) except (ValueError, TypeError): return 0 return 1 def _make_trigger_for_json(insert_or_update, table, column): """Return a SQL statement for creating a temporary trigger. The trigger is used to validate the contents of TEXT_JSON type columns. The trigger will pass without error if the JSON is wellformed. """ if insert_or_update.upper() not in {'INSERT', 'UPDATE'}: msg = f"expected 'INSERT' or 'UPDATE', got {insert_or_update!r}" raise ValueError(msg) if SQLITE_JSON1_ENABLED: when_clause = f""" NEW.{column} IS NOT NULL AND json_valid(NEW.{column}) = 0 """.rstrip() else: when_clause = f""" NEW.{column} IS NOT NULL AND is_wellformed_json(NEW.{column}) = 0 """.rstrip() return f''' CREATE TEMPORARY TRIGGER IF NOT EXISTS trigger_check_{insert_or_update.lower()}_{table}_{column} BEFORE {insert_or_update.upper()} ON main.{table} FOR EACH ROW WHEN{when_clause} BEGIN SELECT RAISE(ABORT, '{table}.{column} must be wellformed JSON'); END; ''' def _is_wellformed_user_properties(x): """Check if *x* is a wellformed TEXT_USERPROPERTIES value. A wellformed TEXT_USERPROPERTIES value is a string containing a JSON formatted object. Returns 1 if *x* is valid or 0 if it's not. This function should be registered as an application-defined SQL function and used in queries when SQLite's JSON1 extension is not enabled. """ try: obj = _loads(x) except (ValueError, TypeError): return 0 if isinstance(obj, dict): return 1 return 0 def _make_trigger_for_user_properties(insert_or_update, table, column): """Return a CREATE TRIGGER statement to check TEXT_USERPROPERTIES values. This trigger is used to check values before they are saved in the database. A wellformed TEXT_USERPROPERTIES value is a string containing a JSON formatted object. The trigger will pass without error if the value is wellformed. """ if SQLITE_JSON1_ENABLED: user_properties_check = f"(json_valid(NEW.{column}) = 0 OR json_type(NEW.{column}) != 'object')" else: user_properties_check = f'is_wellformed_user_properties(NEW.{column}) = 0' return f''' CREATE TEMPORARY TRIGGER IF NOT EXISTS trigger_check_{insert_or_update.lower()}_{table}_{column} BEFORE {insert_or_update.upper()} ON main.{table} FOR EACH ROW WHEN NEW.{column} IS NOT NULL AND {user_properties_check} BEGIN SELECT RAISE(ABORT, '{table}.{column} must be wellformed JSON object'); END; ''' def _is_wellformed_attributes(x): """Returns 1 if *x* is a wellformed TEXT_ATTRIBUTES column value else returns 0. TEXT_ATTRIBUTES should be flat, JSON object strings. This function should be registered with SQLite (via the create_function() method) when the JSON1 extension is not available. """ try: obj = _loads(x) except (ValueError, TypeError): return 0 if not isinstance(obj, dict): return 0 for value in obj.values(): if not isinstance(value, str): return 0 return 1 def _make_trigger_for_attributes(insert_or_update, table, column): """Return a SQL statement for creating a temporary trigger. The trigger is used to validate the contents of TEXT_ATTRIBUTES type columns. The trigger will pass without error if the JSON is a wellformed "object" containing "text" values. The trigger will raise an error if the value is: * not wellformed JSON * not an "object" type * an "object" type that contains one or more "integer", "real", "true", "false", "null", "object" or "array" types """ if insert_or_update.upper() not in {'INSERT', 'UPDATE'}: msg = f"expected 'INSERT' or 'UPDATE', got {insert_or_update!r}" raise ValueError(msg) if SQLITE_JSON1_ENABLED: when_clause = f""" NEW.{column} IS NOT NULL AND (json_valid(NEW.{column}) = 0 OR json_type(NEW.{column}) != 'object' OR (SELECT COUNT(*) FROM json_each(NEW.{column}) WHERE json_each.type != 'text') != 0) """.rstrip() else: when_clause = f""" NEW.{column} IS NOT NULL AND is_wellformed_attributes(NEW.{column}) = 0 """.rstrip() return f''' CREATE TEMPORARY TRIGGER IF NOT EXISTS trigger_check_{insert_or_update.lower()}_{table}_{column} BEFORE {insert_or_update.upper()} ON main.{table} FOR EACH ROW WHEN{when_clause} BEGIN SELECT RAISE(ABORT, '{table}.{column} must be a JSON object with text values'); END; ''' def _add_functions_and_triggers(connection): """Create triggers and application-defined functions *connection*. Note: This function must not be executed on an empty connection. The table schema must exist before triggers can be created. """ if not SQLITE_JSON1_ENABLED: try: connection.create_function( 'is_wellformed_json', 1, _is_wellformed_json, deterministic=True) connection.create_function( 'is_wellformed_user_properties', 1, _is_wellformed_user_properties, deterministic=True) connection.create_function( 'is_wellformed_attributes', 1, _is_wellformed_attributes, deterministic=True) except TypeError: connection.create_function('is_wellformed_json', 1, _is_wellformed_json) connection.create_function('is_wellformed_user_properties', 1, _is_wellformed_user_properties) connection.create_function('is_wellformed_attributes', 1, _is_wellformed_attributes) connection.execute(_make_trigger_for_json('INSERT', 'property', 'value')) connection.execute(_make_trigger_for_json('UPDATE', 'property', 'value')) connection.execute(_make_trigger_for_user_properties('INSERT', 'edge', 'user_properties')) connection.execute(_make_trigger_for_user_properties('UPDATE', 'edge', 'user_properties')) jsonflatobj_columns = [ ('edge', 'type_info'), ('quantity', 'attributes'), ('weight', 'type_info'), ] for table, column in jsonflatobj_columns: connection.execute(_make_trigger_for_attributes('INSERT', table, column)) connection.execute(_make_trigger_for_attributes('UPDATE', table, column)) def _path_to_sqlite_uri(path): """Convert a path into a SQLite compatible URI. Unlike pathlib's URI handling, SQLite accepts relative URI paths. For details, see: https://www.sqlite.org/uri.html#the_uri_path """ if os.name == 'nt': # Windows if re.match(r'^[a-zA-Z]:', path): path = os.path.abspath(path) # If drive-letter, must be absolute. drive_prefix = f'/{path[:2]}' # Must not url-quote colon after drive-letter. path = path[2:] else: drive_prefix = '' path = path.replace('\\', '/') path = urllib_parse_quote(path) path = f'{drive_prefix}{path}' else: path = urllib_parse_quote(path) path = re.sub('/+', '/', path) return f'file:{path}' def connect(path, mode='rwc'): """Returns a sqlite3 connection to a Toron node file.""" uri_path = _path_to_sqlite_uri(path) uri_path = f'{uri_path}?mode={mode}' try: get_connection = lambda: sqlite3.connect( database=uri_path, detect_types=sqlite3.PARSE_DECLTYPES, isolation_level=None, uri=True, ) if os.path.exists(path): con = get_connection() else: con = get_connection() con.executescript(_schema_script) # Create database schema. except sqlite3.OperationalError as err: msg = str(err).replace('database file', f'node file {path!r}') raise ToronError(msg) try: _add_functions_and_triggers(con) except (sqlite3.OperationalError, sqlite3.DatabaseError): # Raises OperationalError when *path* is a database with an unknown # schema and DatabaseError when *path* is a file but not a database. con.close() raise ToronError(f'Path is not a Toron node: {path!r}') cur = con.execute("SELECT value FROM property WHERE key='schema_version'") schema_version, *_ = cur.fetchone() or (None,) cur.close() if schema_version != 1: # When schema version is unsupported. msg = f'Unsupported Toron node format: schema version {schema_version!r}' raise ToronError(msg) return con _SAVEPOINT_NAME_GENERATOR = (f'svpnt{n}' for n in itertools.count()) class savepoint(object): """Context manager to wrap a block of code inside a SAVEPOINT. If the block exists without errors, the SAVEPOINT is released and the changes are committed. If an error occurs, all of the changes are rolled back: cur = con.cursor() with savepoint(cur): cur.execute(...) """ def __init__(self, cursor): if cursor.connection.isolation_level is not None: isolation_level = cursor.connection.isolation_level msg = ( f'isolation_level must be None, got: {isolation_level!r}\n' '\n' 'For explicit transaction handling, the connection must ' 'be operating in "autocommit" mode. Turn on autocommit ' 'mode by setting "con.isolation_level = None".' ) raise sqlite3.OperationalError(msg) self.name = next(_SAVEPOINT_NAME_GENERATOR) self.cursor = cursor def __enter__(self): self.cursor.execute(f'SAVEPOINT {self.name}') def __exit__(self, exc_type, exc_val, exc_tb): if exc_type is None: self.cursor.execute(f'RELEASE {self.name}') else: self.cursor.execute(f'ROLLBACK TO {self.name}') @contextmanager def transaction(path_or_connection, mode=None): """A context manager that yields a cursor that runs in an isolated transaction. If the context manager exits without errors, the transaction is committed. If an exception is raised, all changes are rolled-back. """ if isinstance(path_or_connection, sqlite3.Connection): connection = path_or_connection connection_close = lambda: None # Don't close already-existing cursor. else: connection = connect(path_or_connection, mode=mode) connection_close = connection.close cursor = connection.cursor() try: with savepoint(cursor): yield cursor finally: cursor.close() connection_close()
toron/_node_schema.py
18,111
Context manager to wrap a block of code inside a SAVEPOINT. If the block exists without errors, the SAVEPOINT is released and the changes are committed. If an error occurs, all of the changes are rolled back: cur = con.cursor() with savepoint(cur): cur.execute(...) Create triggers and application-defined functions *connection*. Note: This function must not be executed on an empty connection. The table schema must exist before triggers can be created. Check if SQLite implementation includes JSON1 extension. Returns 1 if *x* is a wellformed TEXT_ATTRIBUTES column value else returns 0. TEXT_ATTRIBUTES should be flat, JSON object strings. This function should be registered with SQLite (via the create_function() method) when the JSON1 extension is not available. Return 1 if *x* is well-formed JSON or return 0 if *x* is not well-formed. This function should be registered with SQLite (via the create_function() method) when the JSON1 extension is not available. This function mimics the JSON1 json_valid() function, see: https://www.sqlite.org/json1.html#jvalid Check if *x* is a wellformed TEXT_USERPROPERTIES value. A wellformed TEXT_USERPROPERTIES value is a string containing a JSON formatted object. Returns 1 if *x* is valid or 0 if it's not. This function should be registered as an application-defined SQL function and used in queries when SQLite's JSON1 extension is not enabled. Return a SQL statement for creating a temporary trigger. The trigger is used to validate the contents of TEXT_ATTRIBUTES type columns. The trigger will pass without error if the JSON is a wellformed "object" containing "text" values. The trigger will raise an error if the value is: * not wellformed JSON * not an "object" type * an "object" type that contains one or more "integer", "real", "true", "false", "null", "object" or "array" types Return a SQL statement for creating a temporary trigger. The trigger is used to validate the contents of TEXT_JSON type columns. The trigger will pass without error if the JSON is wellformed. Return a CREATE TRIGGER statement to check TEXT_USERPROPERTIES values. This trigger is used to check values before they are saved in the database. A wellformed TEXT_USERPROPERTIES value is a string containing a JSON formatted object. The trigger will pass without error if the value is wellformed. Convert a path into a SQLite compatible URI. Unlike pathlib's URI handling, SQLite accepts relative URI paths. For details, see: https://www.sqlite.org/uri.html#the_uri_path Returns a sqlite3 connection to a Toron node file. A context manager that yields a cursor that runs in an isolated transaction. If the context manager exits without errors, the transaction is committed. If an exception is raised, all changes are rolled-back. Database schema functions and information for Toron node files. Toron nodes are stored as individual files. The file format is managed, internally, as a relational database. The schema for this database is shown below as a simplified ERD (entity relationship diagram). SQL foreign key relationships are represented with hyphen and pipe characters ('-' and '|'). Other, more complex relationships are represented with bullet points ('•') and these are enforced at the application layer: +------------------+ +---------------------+ | relation | | edge | +------------------+ +---------------------+ | relation_id | •••• <Other Node> | edge_id |------->| edge_id | • | name | ••••••| other_element_id |<••••• | type_info | • •••| element_id |<-+ +--------------+ | description | • • | proportion | | | quantity | | user_properties | • • | mapping_level | | +--------------+ | other_uuid | • • +------------------+ | | quantity_id | | other_filename_hint | • • | +->| _location_id | | other_element_hash |<•• • | | | attributes | | is_complete |<••••• +-----------------+ | | value | +---------------------+ | | +--------------+ | | +------------+ | +--------------+ | +---------------+ | element | | | location | | | structure | +------------+ | +--------------+ | +---------------+ +------| element_id |--+ | _location_id |--+ | _structure_id | | | label_a |••••>| label_a |<••••| label_a | | | label_b |••••>| label_b |<••••| label_b | | | label_c |••••>| label_c |<••••| label_c | | | ... |••••>| ... |<••••| ... | | +------------+ +--------------+ +---------------+ | | +-------------------+ +----------+ | | element_weight | +-------------+ | property | | +-------------------+ | weight | +----------+ | | element_weight_id | +-------------+ | key | | | weight_id |<----| weight_id | | value | +->| element_id |••• | name | +----------+ | value | • | type_info | +-------------------+ • | description | ••>| is_complete | +-------------+ The inclusion of JSON functions is optional when compiling SQLite. In versions 3.38.0 and newer, JSON functions are included by default but can be disabled (opt-out policy). For older versions of SQLite, JSON functions are available on an opt-in basis. It is necessary to test for their presence rathern than referencing the SQLite version number. For more information, see: https://www.sqlite.org/json1.htmlcompiling_in_json_support Windows If drive-letter, must be absolute. Must not url-quote colon after drive-letter. Create database schema. Raises OperationalError when *path* is a database with an unknown schema and DatabaseError when *path* is a file but not a database. When schema version is unsupported. Don't close already-existing cursor.
6,512
en
0.679119
#!/usr/bin/env python2 # -*- coding: utf-8 -*- """ Created on Thu Jul 5 09:10:56 2018 @author: gtucker """ import numpy as np import datetime from grainhill import GrainFacetSimulator from grainhill import SlopeMeasurer import landlab from landlab.io.native_landlab import save_grid import os def create_folder(directory): try: if not os.path.exists(directory): os.makedirs(directory) except OSError: print('Error: Creating directory ' + directory) params = { 'grid_size' : (111, 81), 'report_interval' : 5.0, 'output_interval' : 1.0e99, 'disturbance_rate' : 1.0e-4, 'weathering_rate' : 0.0, 'dissolution_rate': 0.0, 'friction_coef' : 1.0, 'fault_x' : -0.01, 'cell_width' : 0.5, 'grav_accel' : 9.8, } # Open a file to record output: d = datetime.datetime.today() today_str = str(d.year) + str(d.month).zfill(2) + str(d.day).zfill(2) results_file = open('results_v_vs_w' + today_str + '.csv', 'w') results_file.write('Landlab version,' + landlab.__version__ + ',\n') # Print header in file results_file.write('Uplift interval (yr),Weathering rate ' + 'parameter (1/yr),Gradient (m/m),' + 'Slope angle (deg)\n') # Sweep through a range of dissolution rate parameters for uplift_interval_exp in np.arange(2, 5.2, 0.2): for weath_exp in np.arange(-5, -1.8, 0.2): weath_rate = 10.0**weath_exp uplift_interval = 10.0**uplift_interval_exp params['uplift_interval'] = uplift_interval params['weathering_rate'] = weath_rate # Set run duration long enough for uplift of 150 rows params['run_duration'] = 100 * uplift_interval params['plot_interval'] = 10 * uplift_interval print('Uplift interval: ' + str(params['uplift_interval']) + ' 1/y') print('Weathering rate: ' + str(params['weathering_rate']) + ' 1/y') opname = ('tau' + str(int(round(10 * uplift_interval_exp))) + 'w' + str(int(round(10 * weath_exp)))) create_folder(opname) params['plot_file_name'] = opname + '/' + opname gfs = GrainFacetSimulator(**params) gfs.run() sm = SlopeMeasurer(gfs) sm.pick_rock_surface() (m, b) = sm.fit_straight_line_to_surface() angle = np.degrees(np.arctan(m)) results_file.write(str(uplift_interval) + ',' + str(weath_rate) + ',' + str(m) + ',' + str(angle) + '\n') results_file.flush() save_grid(gfs.grid, opname + '/' + opname + '.grid', clobber=True) results_file.close()
ModelRunScripts/SensitivityAnalysisDandV/run_v_w.py
2,601
Created on Thu Jul 5 09:10:56 2018 @author: gtucker !/usr/bin/env python2 -*- coding: utf-8 -*- Open a file to record output: Print header in file Sweep through a range of dissolution rate parameters Set run duration long enough for uplift of 150 rows
254
en
0.718933
from Jumpscale import j import re # ACTIONS ## R = Regex Replace ## RI = Regex Replace case insensitive DO = """ RI| j.application.JSBase$ | j.baseclasses.object RI| j.data.cache. | j.core.cache. RI| j.data.text. | j.core.text. RI| from jumpscale import j | from Jumpscale import j RI| j.application.jsbase_get_class() | j.baseclasses.object RI| .base_class_config | .JSBaseClassConfig RI| .base_class_configs | .JSBaseClassConfigs RI| j.logging. | j.logger. RI | Jumpscale.logging. | Jumpscale.core.logging. RI| self._location | self.__jslocation__ RI| j.data.serializer. | j.data.serializers. RI| self.prefab.core.file_write | j.sal.fs.writeFile RI| self.prefab.core.run | j.sal.process.execute RI| self.prefab.core.createDir | j.sal.fs.createDir RI| self.prefab.core.file_download | self.prefab.core.file_download RI| self.prefab.system.package.install | j.builders.system.package.ensure """ ERRORS = """ configmanager._base_class_config """ JSBASE = j.baseclasses.object class FixerReplacer(j.baseclasses.object): def __init__(self): JSBASE.__init__(self) self.rules = [] for rule in DO.split("\n"): if rule.strip() == "": continue if rule.strip().startswith("#"): continue cmd, from_, to_ = rule.split("|") if cmd.lower().strip() == "ri": self.rules.append(ReplaceIgnoreCase(from_, to_)) elif cmd.lower().strip() == "r": self.rules.append(ReplaceNormal(from_, to_)) else: raise j.exceptions.Base("unknown rule:%s" % rule) def line_process(self, line): changed = False # if "\t" in line: # line = line.replace("\t"," ") # changed = True for rule in self.rules: line1 = rule.replace(line) if line1 != line: changed = True line = line1 return changed, line def file_process(self, path, write=False, root=""): out = "" nr = 0 for line in j.sal.fs.readFile(path).split("\n"): nr += 1 changed, line2 = self.line_process(line) if changed: path2 = j.sal.fs.pathRemoveDirPart(path, root) if path2 not in self.changes: self.changes[path2] = {} changes = self.changes[path2] changes["line"] = nr changes["from"] = line changes["to.."] = line2 out += "%s\n" % line2 else: out += "%s\n" % line if len(self.changes) > 0 and write: j.sal.fs.writeFile(path, out) def dir_process(self, path, extensions=["py", "txt", "md"], recursive=True, write=False): path = j.sal.fs.pathNormalize(path) self.changes = {} for ext in extensions: for p in j.sal.fs.listFilesInDir(path, recursive=recursive, filter="*.%s" % ext, followSymlinks=False): self._log_debug("process file:%s" % p) self.file_process(root=path, path=p, write=write) print(j.data.serializers.yaml.dumps(self.changes)) class ReplaceIgnoreCase: def __init__(self, from_, to_, prepend="", append=""): self.from_ = from_.strip() self.to_ = to_.strip() self.regex = re.compile(re.escape(prepend + self.from_ + append), re.IGNORECASE | re.VERBOSE) def replace(self, txt): m = self.regex.search(txt) if m: found = m.string[m.start() : m.end()] txt2 = txt.replace(found, self.to_) return txt2 else: return txt class ReplaceNormal(ReplaceIgnoreCase): def __init__(self, from_, to_, prepend="", append=""): ReplaceIgnoreCase.__init__(self, from_, to_, re.VERBOSE) self.regex = re.compile(re.escape(prepend + self.from_ + append))
sandbox/lib/jumpscale/JumpscaleLibs/tools/fixer/FixerReplace.py
3,927
ACTIONS R = Regex Replace RI = Regex Replace case insensitive if "\t" in line: line = line.replace("\t"," ") changed = True
134
en
0.676282
"""Client for Triton Inference Server using REST API. References: - https://github.com/kubeflow/kfserving/blob/master/docs/predict-api/v2/required_api.md#httprest - https://github.com/triton-inference-server/client/tree/master/src/python/examples - https://github.com/triton-inference-server/client/blob/master/src/python/library/tritonclient/http/__init__.py """ import json import time import threading import distribution import clients.base_rest_client import clients.utils import tensorflow.compat.v1 as tf import requests as r import numpy as np import tritonclient.http as triton_httpclient import tritonclient.utils as triton_utils from tensorflow.python.framework import dtypes class TritonRest(clients.base_rest_client.BaseRestClient): def generate_rest_request_from_dictionary(self, row_dict): triton_request_inputs = [] for key, value in row_dict.items(): t = clients.utils.get_type(value, self._default_float_type, self._default_int_type) if t == np.object_: value = clients.utils.map_multi_dimensional_list( value, lambda s: s.encode("utf-8")) numpy_value = np.array(value, dtype=t) triton_request_input = triton_httpclient.InferInput( key, list(numpy_value.shape), triton_utils.np_to_triton_dtype(t)) triton_request_input.set_data_from_numpy( numpy_value, binary_data=True) # binary_data=True by default triton_request_inputs.append(triton_request_input) # https://github.com/triton-inference-server/client/blob/530bcac5f1574aa2222930076200544eb274245c/src/python/library/tritonclient/http/__init__.py#L81 # Returns tuple - request and request len to pass in Infer-Header-Content-Length header (request, json_size) = triton_httpclient._get_inference_request( inputs=triton_request_inputs, request_id="", outputs=None, sequence_id=0, sequence_start=0, sequence_end=0, priority=0, timeout=None) headers = {} if json_size: headers["Inference-Header-Content-Length"] = str(json_size) return (request, headers) def get_requests_from_dictionary(self, path): rows = [] with tf.gfile.GFile(path, "r") as f: for line in f: row_dict = eval(line) rows.append(self.generate_rest_request_from_dictionary(row_dict)) return rows def get_requests_from_tfrecord(self, path, count, batch_size): raise NotImplementedError() def get_requests_from_file(self, path): raise NotImplementedError() def get_uri(self): if self._host.startswith("http"): return self._host else: # https://github.com/kubeflow/kfserving/blob/master/docs/predict-api/v2/required_api.md#httprest if self._model_version: return f"http://{self._host}:{self._port}/v2/models/{self._model_name}/versions/{self._model_version}/infer" else: return f"http://{self._host}:{self._port}/v2/models/{self._model_name}/infer"
clients/triton_rest.py
3,001
Client for Triton Inference Server using REST API. References: - https://github.com/kubeflow/kfserving/blob/master/docs/predict-api/v2/required_api.md#httprest - https://github.com/triton-inference-server/client/tree/master/src/python/examples - https://github.com/triton-inference-server/client/blob/master/src/python/library/tritonclient/http/__init__.py binary_data=True by default https://github.com/triton-inference-server/client/blob/530bcac5f1574aa2222930076200544eb274245c/src/python/library/tritonclient/http/__init__.pyL81 Returns tuple - request and request len to pass in Infer-Header-Content-Length header https://github.com/kubeflow/kfserving/blob/master/docs/predict-api/v2/required_api.mdhttprest
715
en
0.537996
# encoding: utf-8 """ This module defines the things that are used in setup.py for building JupyterLab This includes: * Functions for finding things like packages, package data, etc. * A function for checking dependencies. """ # Copyright (c) Jupyter Development Team. # Distributed under the terms of the Modified BSD License. import io import json import os import pipes import sys import shutil import tempfile import os.path as osp from os.path import join as pjoin from distutils import log from distutils.cmd import Command from distutils.version import LooseVersion from setuptools.command.egg_info import egg_info from setuptools.command.bdist_egg import bdist_egg from subprocess import check_call if sys.platform == 'win32': from subprocess import list2cmdline else: def list2cmdline(cmd_list): return ' '.join(map(pipes.quote, cmd_list)) # the name of the project name = 'jupyterlab' here = osp.dirname(osp.abspath(__file__)) is_repo = osp.exists(pjoin(here, '.git')) version_ns = {} with io.open(pjoin(here, name, '_version.py'), encoding="utf8") as f: exec(f.read(), {}, version_ns) def run(cmd, *args, **kwargs): """Echo a command before running it""" log.info('> ' + list2cmdline(cmd)) kwargs['shell'] = (sys.platform == 'win32') return check_call(cmd, *args, **kwargs) #--------------------------------------------------------------------------- # Find packages #--------------------------------------------------------------------------- def find_packages(): """ Find all of the packages. """ packages = [] for dir, subdirs, files in os.walk('jupyterlab'): if 'node_modules' in subdirs: subdirs.remove('node_modules') package = dir.replace(osp.sep, '.') if '__init__.py' not in files: # not a package continue packages.append(package) return packages #--------------------------------------------------------------------------- # Find package data #--------------------------------------------------------------------------- def find_package_data(): """ Find package_data. """ theme_dirs = [] for dir, subdirs, files in os.walk(pjoin('jupyterlab', 'themes')): slice_len = len('jupyterlab' + os.sep) theme_dirs.append(pjoin(dir[slice_len:], '*')) schema_dirs = [] for dir, subdirs, files in os.walk(pjoin('jupyterlab', 'schemas')): slice_len = len('jupyterlab' + os.sep) schema_dirs.append(pjoin(dir[slice_len:], '*')) return { 'jupyterlab': ['build/*', '*.js', 'package.app.json', 'yarn.lock', 'yarn.app.lock', '.yarnrc' ] + theme_dirs + schema_dirs } def find_data_files(): """ Find data_files. """ if not os.path.exists(pjoin('jupyterlab', 'build')): return [] files = [] static_files = os.listdir(pjoin('jupyterlab', 'build')) files.append(('share/jupyter/lab/static', ['jupyterlab/build/%s' % f for f in static_files])) for dir, subdirs, fnames in os.walk(pjoin('jupyterlab', 'schemas')): dir = dir.replace(os.sep, '/') schema_files = [] for fname in fnames: schema_files.append('%s/%s' % (dir, fname)) slice_len = len('jupyterlab/') files.append(('share/jupyter/lab/%s' % dir[slice_len:], schema_files)) for dir, subdirs, fnames in os.walk(pjoin('jupyterlab', 'themes')): dir = dir.replace(os.sep, '/') themes_files = [] for fname in fnames: themes_files.append('%s/%s' % (dir, fname)) slice_len = len('jupyterlab/') files.append(('share/jupyter/lab/%s' % dir[slice_len:], themes_files)) return files def js_prerelease(command, strict=False): """decorator for building minified js/css prior to another command""" class DecoratedCommand(command): def run(self): jsdeps = self.distribution.get_command_obj('jsdeps') if not is_repo and all(osp.exists(t) for t in jsdeps.targets): # sdist, nothing to do command.run(self) return try: self.distribution.run_command('jsdeps') except Exception as e: missing = [t for t in jsdeps.targets if not osp.exists(t)] if strict or missing: log.warn('js check failed') if missing: log.error('missing files: %s' % missing) raise e else: log.warn('js check failed (not a problem)') log.warn(str(e)) command.run(self) return DecoratedCommand def update_package_data(distribution): """update build_py options to get package_data changes""" build_py = distribution.get_command_obj('build_py') build_py.finalize_options() class CheckAssets(Command): description = 'check for required assets' user_options = [] # Representative files that should exist after a successful build targets = [ pjoin(here, 'jupyterlab', 'build', 'release_data.json'), pjoin(here, 'jupyterlab', 'build', 'main.bundle.js'), pjoin(here, 'jupyterlab', 'schemas', '@jupyterlab', 'shortcuts-extension', 'plugin.json'), pjoin(here, 'jupyterlab', 'themes', '@jupyterlab', 'theme-light-extension', 'images', 'jupyterlab.svg') ] def initialize_options(self): pass def finalize_options(self): pass def run(self): for t in self.targets: if not osp.exists(t): msg = 'Missing file: %s' % t raise ValueError(msg) target = pjoin(here, 'jupyterlab', 'build', 'release_data.json') with open(target) as fid: data = json.load(fid) if (LooseVersion(data['version']) != LooseVersion(version_ns['__version__'])): msg = 'Release assets version mismatch, please run npm publish' raise ValueError(msg) # update package data in case this created new files update_package_data(self.distribution) class bdist_egg_disabled(bdist_egg): """Disabled version of bdist_egg Prevents setup.py install performing setuptools' default easy_install, which it should never ever do. """ def run(self): sys.exit("Aborting implicit building of eggs. Use `pip install .` to install from source.") class custom_egg_info(egg_info): """Prune JavaScript folders from egg_info to avoid locking up pip. """ def run(self): folders = ['examples', 'packages', 'test', 'node_modules'] folders = [f for f in folders if os.path.exists(pjoin(here, f))] tempdir = tempfile.mkdtemp() for folder in folders: shutil.move(pjoin(here, folder), tempdir) value = egg_info.run(self) for folder in folders: shutil.move(pjoin(tempdir, folder), here) shutil.rmtree(tempdir) return value
setupbase.py
7,123
Disabled version of bdist_egg Prevents setup.py install performing setuptools' default easy_install, which it should never ever do. Prune JavaScript folders from egg_info to avoid locking up pip. Find data_files. Find package_data. Find all of the packages. decorator for building minified js/css prior to another command Echo a command before running it update build_py options to get package_data changes This module defines the things that are used in setup.py for building JupyterLab This includes: * Functions for finding things like packages, package data, etc. * A function for checking dependencies. encoding: utf-8 Copyright (c) Jupyter Development Team. Distributed under the terms of the Modified BSD License. the name of the project--------------------------------------------------------------------------- Find packages--------------------------------------------------------------------------- not a package--------------------------------------------------------------------------- Find package data--------------------------------------------------------------------------- sdist, nothing to do Representative files that should exist after a successful build update package data in case this created new files
1,241
en
0.683699
# # SPDX-License-Identifier: MIT # from augur.augurplugin import AugurPlugin # from augur.application import Application # class HousekeeperPlugin(AugurPlugin): # """ # This plugin serves as an example as to how to load plugins into Augur # """ # def __init__(self, augur_app): # super().__init__(augur_app) # self.__housekeeper = self.__call__() # def __call__(self): # from .housekeeper import Housekeeper # return Housekeeper( # user=self._augur.read_config('Database', 'user', 'AUGUR_DB_USER', 'root'), # password=self._augur.read_config('Database', 'password', 'AUGUR_DB_PASS', 'password'), # host=self._augur.read_config('Database', 'host', 'AUGUR_DB_HOST', '127.0.0.1'), # port=self._augur.read_config('Database', 'port', 'AUGUR_DB_PORT', '3306'), # dbname=self._augur.read_config('Database', 'database', 'AUGUR_DB_NAME', 'msr14') # ) # HousekeeperPlugin.augur_plugin_meta = { # 'name': 'housekeeper', # 'datasource': True # } # Application.register_plugin(HousekeeperPlugin) # __all__ = ['HousekeeperPlugin']
augur/housekeeper/__init__.py
1,173
SPDX-License-Identifier: MIT from augur.augurplugin import AugurPlugin from augur.application import Application class HousekeeperPlugin(AugurPlugin): """ This plugin serves as an example as to how to load plugins into Augur """ def __init__(self, augur_app): super().__init__(augur_app) self.__housekeeper = self.__call__() def __call__(self): from .housekeeper import Housekeeper return Housekeeper( user=self._augur.read_config('Database', 'user', 'AUGUR_DB_USER', 'root'), password=self._augur.read_config('Database', 'password', 'AUGUR_DB_PASS', 'password'), host=self._augur.read_config('Database', 'host', 'AUGUR_DB_HOST', '127.0.0.1'), port=self._augur.read_config('Database', 'port', 'AUGUR_DB_PORT', '3306'), dbname=self._augur.read_config('Database', 'database', 'AUGUR_DB_NAME', 'msr14') ) HousekeeperPlugin.augur_plugin_meta = { 'name': 'housekeeper', 'datasource': True } Application.register_plugin(HousekeeperPlugin) __all__ = ['HousekeeperPlugin']
1,116
en
0.300772
""" Video Link: https://youtu.be/1s-Tj65AKZA """ from seleniumbase import __version__ from seleniumbase import BaseCase class HackTests(BaseCase): def test_all_your_base_are_belong_to_us(self): # First make sure that seleniumbase 1.65.0 or newer is installed version = __version__.split(".") if version[0] == "1" and int(version[1]) < 65: raise Exception( "This test requires minimum seleniumbase version: 1.65.0" ) self.set_window_size(1220, 740) ayb = "ALL YOUR BASE" abtu = "ARE BELONG TO US" aybabtu = "%s %s" % (ayb, abtu) sb_banner_logo = "//seleniumbase.io/cdn/img/sb_logo_10.png" sb_dashboard_logo = "//seleniumbase.io/img/dash_pie_3.png" yt_chip = "#chips yt-chip-cloud-chip-renderer:nth-of-type" wiki = "https://en.wikipedia.org/wiki/All_your_base_are_belong_to_us" self.open(wiki) self.click_if_visible('button[aria-label="Close"]') self.set_text_content("h1#firstHeading", aybabtu) self.set_text_content("#ca-history a", aybabtu) self.set_text_content('#n-mainpage-description a', "ALL") self.set_text_content('#n-contents a', "YOUR") self.set_text_content('#n-currentevents a', "BASE") self.set_text_content('#n-randompage a', "ARE") self.set_text_content('#n-aboutsite a', "BELONG") self.set_text_content('#n-contactpage a', "TO") self.set_text_content('#n-sitesupport a', "US") self.set_text_content('.tocsection-1 span.toctext', "ALL") self.set_text_content('.tocsection-2 span.toctext', "YOUR") self.set_text_content('.tocsection-3 span.toctext', "BASE") self.set_text_content('.tocsection-4 span.toctext', "ARE") self.set_text_content('.tocsection-5 span.toctext', "BELONG") self.set_text_content('.tocsection-6 span.toctext', "TO") self.set_text_content('.tocsection-7 span.toctext', "US") self.highlight("h1#firstHeading", loops=2, scroll=False) self.highlight("#ca-history a", loops=2, scroll=False) self.highlight("nav#p-navigation", loops=2, scroll=False) self.highlight("div#toc", loops=2, scroll=False) self.highlight('.tocsection-1 span.toctext', loops=1, scroll=False) self.highlight('.tocsection-2 span.toctext', loops=1, scroll=False) self.highlight('.tocsection-3 span.toctext', loops=2, scroll=False) self.highlight('.tocsection-4 span.toctext', loops=1, scroll=False) self.highlight('.tocsection-5 span.toctext', loops=1, scroll=False) self.highlight('.tocsection-6 span.toctext', loops=1, scroll=False) self.highlight('.tocsection-7 span.toctext', loops=2, scroll=False) zoom_in = 'div.thumbinner{zoom: 1.4;-moz-transform: scale(1.4);}' self.add_css_style(zoom_in) self.highlight("div.thumbinner", loops=8, scroll=False) self.open("https://www.apple.com/store") self.set_text_content("div.rs-shop-subheader", aybabtu) self.set_text_content('#shelf-1 a[href*="mac"]', "ALL") self.set_text_content('#shelf-1 a[href*="iphone"]', "YOUR") self.set_text_content('#shelf-1 a[href*="ipad"]', "BASE") self.set_text_content('#shelf-1 a[href*="watch"]', "ARE") self.set_text_content('#shelf-1 a[href*="airpods"]', "BELONG") self.set_text_content('#shelf-1 a[href*="airtag"]', "TO") self.set_text_content('#shelf-1 a[href*="tv"]', "US") self.set_text_content('#shelf-1 a[href*="homepod"]', ".") self.set_text_content("h2", aybabtu + ". ") self.highlight("div.rs-shop-subheader", loops=6, scroll=False) self.highlight("#shelf-1", loops=2, scroll=False) self.highlight('#shelf-1 a[href*="mac"]', loops=1, scroll=False) self.highlight('#shelf-1 a[href*="iphone"]', loops=1, scroll=False) self.highlight('#shelf-1 a[href*="ipad"]', loops=3, scroll=False) self.highlight('#shelf-1 a[href*="watch"]', loops=1, scroll=False) self.highlight('#shelf-1 a[href*="airpods"]', loops=1, scroll=False) self.highlight('#shelf-1 a[href*="airtag"]', loops=1, scroll=False) self.highlight('#shelf-1 a[href*="tv"]', loops=3, scroll=False) self.highlight("h2", loops=9, scroll=False) self.open("https://google.com/ncr") self.set_text_content('a[href*="about.google"]', ayb) self.set_text_content('a[href*="store.google"]', abtu) self.set_text_content('a[href*="mail.google.com"]', ayb) self.set_text_content('a[href*="google.com/img"]', abtu) self.set_attributes('[value="Google Search"]', "value", ayb) self.set_attributes('[value="I\'m Feeling Lucky"]', "value", abtu) zoom_in = 'a{zoom: 1.2;-moz-transform: scale(1.2);}' self.add_css_style(zoom_in) zoom_in = ( '[value="ALL YOUR BASE"]{zoom: 1.3;-moz-transform: scale(1.3);}' '[value="ARE BELONG TO US"]{zoom: 1.3;-moz-transform: scale(1.3);}' ) self.add_css_style(zoom_in) self.highlight('a[href*="about.google"]', loops=3) self.highlight('a[href*="store.google"]', loops=3) self.highlight('a[href*="mail.google.com"]', loops=3) self.highlight('a[href*="google.com/img"]', loops=3) self.highlight('form[role="search"]', loops=8) self.open("https://twitter.com/") if not self.is_element_visible('a[href*="w/signup"] span'): self.refresh() if self.is_element_visible('a[href*="w/signup"] span'): self.set_text_content('a[href*="w/signup"] span', aybabtu) self.highlight('a[href*="w/signup"] span', loops=6, scroll=False) self.highlight('a[href*="w/signup"]', loops=6, scroll=False) self.open("https://www.youtube.com/") self.set_text_content('%s(1)' % yt_chip, "ALL") self.set_text_content('%s(2)' % yt_chip, "YOUR") self.set_text_content('%s(3)' % yt_chip, "BASE") self.set_text_content('%s(4)' % yt_chip, "ARE") self.set_text_content('%s(5)' % yt_chip, "BELONG") self.set_text_content('%s(6)' % yt_chip, "TO") self.set_text_content('%s(7)' % yt_chip, "US") self.set_text_content('%s(8)' % yt_chip, "!") self.set_text_content('%s(9)' % yt_chip, "!") self.set_text_content('%s(10)' % yt_chip, "!") self.click_if_visible("#dismiss-button") self.click_if_visible('button[aria-label="Close"]') self.highlight("#scroll-container", loops=5, scroll=False) self.highlight('%s(1)' % yt_chip, loops=1, scroll=False) self.highlight('%s(2)' % yt_chip, loops=1, scroll=False) self.highlight('%s(3)' % yt_chip, loops=3, scroll=False) self.highlight('%s(4)' % yt_chip, loops=1, scroll=False) self.highlight('%s(5)' % yt_chip, loops=1, scroll=False) self.highlight('%s(6)' % yt_chip, loops=1, scroll=False) self.highlight('%s(7)' % yt_chip, loops=3, scroll=False) self.highlight("#scroll-container", loops=7, scroll=False) self.open("https://github.com/features/actions") self.set_text_content('a[href="/team"]', ayb) self.set_text_content('a[href="/enterprise"]', abtu) self.set_text_content('h1 span:nth-child(1)', ayb) self.set_text_content('h1 span:nth-of-type(2)', "ARE") self.set_text_content('h1 span:nth-of-type(3)', "BELONG") self.set_text_content('h1 span:nth-of-type(4)', "TO") self.set_text_content('h1 span:nth-of-type(5)', "US") self.type('input[name="q"]', aybabtu.lower()) self.click("h1", scroll=False) self.highlight("nav", loops=5, scroll=False) self.highlight('input[name="q"]', loops=5, scroll=False) self.highlight("h1", loops=8, scroll=False) self.open("https://dev.to/top/infinity") self.click_if_visible('button[aria-label="Close campaign banner"]') self.set_text_content('nav a[data-text="Relevant"]', "ALL") self.set_text_content('nav a[data-text="Latest"]', "YOUR") self.set_text_content('nav a[data-text="Top"]', "BASE") self.set_text_content('nav a[data-text="Week"]', "ARE") self.set_text_content('nav a[data-text="Month"]', "BELONG") self.set_text_content('nav a[data-text="Year"]', "TO") self.set_text_content('nav a[data-text="Infinity"]', "US") self.set_text_content('aside a[class*="tful"]', aybabtu) self.set_text_content('aside a[aria-label="Create new account"]', ayb) self.set_text_content('aside a[aria-label="Log in"]', abtu) self.set_text_content('aside a[class*="tful"]:nth-child(2)', aybabtu) self.set_text_content('aside a[class*="tful"]:nth-child(3)', aybabtu) self.set_text_content('aside a[class*="tful"]:nth-child(4)', aybabtu) self.set_text_content('aside a[class*="tful"]:nth-child(5)', aybabtu) self.set_attribute("a.crayons-avatar img", "src", sb_dashboard_logo) self.set_text_content('.profile-preview-card button', "SeleniumBase") self.set_text_content('h2.crayons-story__title a', aybabtu) self.type('input[name="q"]', aybabtu) self.highlight('input[name="q"]', loops=4, scroll=False) self.highlight('[aria-label="Primary sidebar"] div div', scroll=False) self.highlight('nav a[data-text="Relevant"]', loops=1, scroll=False) self.highlight('nav a[data-text="Latest"]', loops=1, scroll=False) self.highlight('nav a[data-text="Top"]', loops=2, scroll=False) self.highlight('nav a[data-text="Week"]', loops=1, scroll=False) self.highlight('nav a[data-text="Month"]', loops=1, scroll=False) self.highlight('nav a[data-text="Year"]', loops=1, scroll=False) self.highlight('nav a[data-text="Infinity"]', loops=2, scroll=False) self.highlight('aside[id*="sidebar"] section', loops=5, scroll=False) self.highlight("div.crayons-story__body", loops=7, scroll=False) self.open("https://azure.microsoft.com/en-us/services/playfab/") self.set_text_content("h1", aybabtu) self.set_text_content('a[aria-label*="Try PlayF"]', ayb) self.set_text_content('a[aria-label*="Sign in to"]', abtu) self.set_text_content('span:contains("Chat with Sales")', aybabtu) self.highlight("h1", loops=6, scroll=False) self.highlight('a[aria-label*="Try PlayF"]', loops=4, scroll=False) self.highlight('a[aria-label*="Sign in to"]', loops=4, scroll=False) self.highlight('button#live-engage-btn', loops=6, scroll=False) self.open("https://www.snapchat.com/") self.set_text_content("h1", ayb) self.set_text_content("form .button-large span span", abtu) zoom_in = 'a.button-large span{zoom: 1.2;-moz-transform: scale(1.2);}' self.add_css_style(zoom_in) self.highlight("h1", loops=6, scroll=False) self.highlight("form .button-large span span", loops=8, scroll=False) self.open("https://store.steampowered.com/") self.set_text_content('div.content a[href*="/about/"]', " ") self.set_text_content('div.content a[href*="help.steam"]', aybabtu) self.set_text_content("#foryou_tab a", "ALL") self.set_text_content("#noteworthy_tab a", "YOUR BASE") self.set_text_content("#genre_tab a", "ARE") self.set_text_content('span:contains("Points Shop")', "BELONG") self.set_text_content('span:contains("News")', "TO") self.set_text_content('span:contains("Labs")', "US") self.set_value("input#store_nav_search_term", ayb + " . . . .") self.highlight('div.content a[href*="help.steam"]', loops=6) self.highlight('#store_nav_area', loops=2, scroll=False) self.highlight("#foryou_tab a", loops=1, scroll=False) self.highlight("#noteworthy_tab a", loops=3, scroll=False) self.highlight("#genre_tab a", loops=1, scroll=False) self.highlight('span:contains("BELONG")', loops=1, scroll=False) self.highlight('span:contains("TO")', loops=1, scroll=False) self.highlight('span:contains("US")', loops=2, scroll=False) self.js_click('input[id*="nav_search"]') self.highlight('input[id*="nav_search"]', loops=6, scroll=False) self.open("https://xkcd.com/286/") self.set_text_content('a[href="/archive"]', "ALL") self.set_text_content('a[href*="what-if"]', "YOUR") self.set_text_content('a[href*="//blag."]', "BASE") self.set_text_content('a[href*="/about"]', abtu) self.remove_element('li:contains("Feed")') self.remove_element('li:contains("TW")') self.remove_element('li:contains("Books")') self.remove_element('li:contains("What")') self.remove_element('li:contains("WI")') self.set_attributes("#news img", "src", sb_banner_logo) self.set_text_content('#ctitle', aybabtu) self.set_text_content('a[rel="prev"]', "All") self.set_text_content('a[href*="random"]', "Your") self.set_text_content('a[rel="next"]', "Base") self.highlight("#topLeft ul", loops=5, scroll=False) self.highlight('a[href="/archive"]', loops=1, scroll=False) self.highlight('a[href*="what-if"]', loops=1, scroll=False) self.highlight('a[href*="//blag."]', loops=2, scroll=False) self.highlight('a[href*="/about"]', loops=5, scroll=False) self.highlight('a[rel="prev"]', loops=1, scroll=False) self.highlight('a[href*="random"]', loops=1, scroll=False) self.highlight('a[rel="next"]', loops=3, scroll=False) self.highlight("#ctitle", loops=7, scroll=False) self.open("https://www.nintendo.com/whatsnew/") self.set_text_content('button[aria-label="Search"]', aybabtu) self.set_text_content('button[data-section="newsevents"]', aybabtu) self.set_text_content("h2", aybabtu) self.highlight('div.search-flex', loops=4, scroll=False) self.highlight('button[data-section*="news"]', loops=4, scroll=False) self.highlight("h2", loops=6, scroll=False) self.open("https://support.gog.com/hc/en-us?product=gog") self.set_text_content("div.intro-title", aybabtu) self.set_text_content("h4", aybabtu) self.highlight("div.intro-title", loops=8, scroll=False) self.highlight("h4", loops=8, scroll=False) self.open("https://slack.com/help/articles/204714258-Giphy-for-Slack") self.set_text_content("h1", aybabtu) self.set_text_content('a[prettyslug="getting-started"]', "ALL") self.set_text_content('a[prettyslug="using-slack"]', "YOUR") self.set_text_content('a[prettyslug="your-profile"]', "BASE") self.set_text_content('a[prettyslug="connect-tools"]', "ARE") self.set_text_content('a[prettyslug="administration"]', "BELONG") self.set_text_content('a[prettyslug="tutorials"]', "TO US") self.highlight("h1", loops=4, scroll=False) self.highlight("div#global_menu", loops=2, scroll=False) self.highlight('a[prettyslug*="g-started"]', loops=1, scroll=False) self.highlight('a[prettyslug="using-slack"]', loops=1, scroll=False) self.highlight('a[prettyslug="your-profile"]', loops=2, scroll=False) self.highlight('a[prettyslug="connect-tools"]', loops=1, scroll=False) self.highlight('a[prettyslug="administration"]', loops=1, scroll=False) self.highlight('a[prettyslug="tutorials"]', loops=2, scroll=False) self.open("https://kubernetes.io/") self.set_text_content('nav a[href="/docs/"]', "ALL") self.set_text_content('nav a[href="/blog/"]', "YOUR") self.set_text_content('nav a[href="/training/"]', "BASE") self.set_text_content('nav a[href="/partners/"]', "ARE") self.set_text_content('nav a[href="/community/"]', "BELONG") self.set_text_content('nav a[href="/case-studies/"]', "TO") self.set_text_content('nav #navbarDropdown', "US") self.set_text_content('nav #navbarDropdownMenuLink', ".") if self.is_element_visible("h1"): self.set_text_content("h1", aybabtu) self.highlight("nav ul.navbar-nav", loops=3, scroll=False) self.highlight('nav a[href="/docs/"]', loops=1, scroll=False) self.highlight('nav a[href="/blog/"]', loops=1, scroll=False) self.highlight('nav a[href="/training/"]', loops=2, scroll=False) self.highlight('nav a[href="/partners/"]', loops=1, scroll=False) self.highlight('nav a[href="/community/"]', loops=1, scroll=False) self.highlight('nav a[href="/case-studies/"]', loops=1, scroll=False) self.highlight('nav #navbarDropdown', loops=2, scroll=False) if self.is_element_visible("h1"): self.highlight('h1', loops=6, scroll=False) self.open("https://www.selenium.dev/") self.set_attributes("a.dropdown-toggle", "class", "nav-link") self.set_text_content('li a:contains("About")', "ALL") self.set_text_content('li a:contains("Downloads")', "YOUR") self.set_text_content('li a:contains("Documentation")', "BASE") self.set_text_content('li a:contains("Projects")', "ARE") self.set_text_content('li a:contains("Support")', "BELONG") self.set_text_content('li a:contains("Blog")', "TO") self.set_text_content('li a:contains("English")', "US") self.set_text_content("div.lead", aybabtu) self.set_text_content("h2", aybabtu) zoom_in = 'div.lead{zoom: 1.25;-moz-transform: scale(1.25);}' self.add_css_style(zoom_in) self.highlight("div#main_navbar", loops=1, scroll=False) self.highlight('li a:contains("ALL")', loops=1, scroll=False) self.highlight('li a:contains("YOUR")', loops=1, scroll=False) self.highlight('li a:contains("BASE")', loops=2, scroll=False) self.highlight('li a:contains("ARE")', loops=1, scroll=False) self.highlight('li a:contains("BELONG")', loops=1, scroll=False) self.highlight('li a:contains("TO")', loops=1, scroll=False) self.highlight('li a:contains("US")', loops=2, scroll=False) self.highlight("div.lead", loops=6, scroll=False) self.highlight("h2", loops=8, scroll=False) self.open("https://www.python.org/") self.set_text_content('a[class="donate-button"]', ayb) self.set_text_content("#about a", "ALL") self.set_text_content("#downloads a", "YOUR") self.set_text_content("#documentation a", "BASE") self.set_text_content("#community a", "ARE") self.set_text_content("#success-stories a", "BELONG") self.set_text_content("#news a", "TO") self.set_text_content("#events a", "US") self.highlight('a[class="donate-button"]', loops=4, scroll=False) self.highlight("nav#mainnav", loops=5, scroll=False) self.highlight("#about a", loops=1, scroll=False) self.highlight("#downloads a", loops=1, scroll=False) self.highlight("#documentation a", loops=2, scroll=False) self.highlight("#community a", loops=1, scroll=False) self.highlight("#success-stories a", loops=1, scroll=False) self.highlight("#news a", loops=1, scroll=False) self.highlight("#events a", loops=2, scroll=False) self.open("https://docs.pytest.org/") self.set_text_content("h1", "pytest: " + aybabtu) self.highlight("h1", loops=10, scroll=False) self.open("https://wordpress.com/") self.set_text_content('a[title="Plans & Pricing"]', aybabtu) self.set_text_content('a[title="Get Started"]', ayb) self.set_text_content("p.no-widows", aybabtu) self.set_text_content("a#lpc-button", "Automate with SeleniumBase") self.highlight('a[title="Plans & Pricing"]', loops=6, scroll=False) self.highlight('a[title="Get Started"]', loops=4, scroll=False) self.highlight("p.no-widows", loops=8, scroll=False) self.highlight("a#lpc-button", loops=4, scroll=False) self.open("https://seleniumbase.com/") self.set_text_content("h1", aybabtu) self.highlight("h1", loops=10, scroll=False) self.open("https://pypi.org/") self.set_text_content('a[href="/sponsors/"]', aybabtu) self.set_text_content("h1", aybabtu) self.set_value("input#search", aybabtu, scroll=False) self.highlight('a[href="/sponsors/"]', loops=6, scroll=False) self.highlight("h1", loops=6, scroll=False) self.highlight("input#search", loops=8, scroll=False) self.open("https://www.atlassian.com/software/jira") self.set_text_content('a[href*="jira/pricing"]', ayb) self.set_text_content('a[href*="jira/enterprise"]', abtu) self.set_text_content('a[href="/software/jira/features"]', "") self.set_text_content('a[href="/software/jira/guides"]', "") self.set_text_content("h1", ayb) self.set_text_content('div.xs-none-bottom a[href*="free"]', abtu) self.highlight("ul.imkt-navbar__link-list", loops=2, scroll=False) self.highlight('a[href*="jira/pricing"]', loops=3, scroll=False) self.highlight('a[href*="jira/enterprise"]', loops=3, scroll=False) self.highlight("h1", loops=3, scroll=False) self.highlight('div.xs-none-bottom a[href*="free"]', scroll=False) self.open("https://status.iboss.com/ibcloud/app/cloudStatus.html") self.set_text_content('div[translate*="cloudStatus"]', ayb) self.set_text_content('div[translate*="maintenance"]', "ARE") self.set_text_content('div[translate*="advisory"]', "BELONG") self.set_text_content('div[translate*="incident"]', "TO US") self.set_text_content("h1", "Cloud Status - " + aybabtu) self.highlight("nav div.ibcloud-header-contents", loops=3) self.highlight('div[translate*="cloudStatus"]', loops=4) self.highlight('div[translate*="maintenance"]', loops=1) self.highlight('div[translate*="advisory"]', loops=1) self.highlight('div[translate*="incident"]', loops=3) self.highlight("h1", loops=9, scroll=False) self.open("https://git-scm.com/") self.set_text_content("span#tagline", aybabtu) self.set_text_content("#nav-about h3", ayb) self.set_text_content("#nav-documentation h3", abtu) self.highlight("span#tagline", loops=8, scroll=False) self.highlight("#nav-about h3", loops=5, scroll=False) self.highlight("#nav-documentation h3", loops=6, scroll=False) self.open("https://teamtreehouse.com/") self.set_text_content("li.nav-item-free-trial", aybabtu) self.set_text_content("h1", aybabtu) self.set_text_content("h2", aybabtu) self.set_text_content("p.homepage-signup-form-banner", aybabtu) self.highlight("li.nav-item-free-trial", loops=6, scroll=False) self.highlight("h1", loops=6, scroll=False) self.highlight('p[class*="signup-form"]', loops=8, scroll=False) self.open("https://pragprog.com/") self.set_text_content("header p", aybabtu) zoom_in = 'header p{zoom: 1.35;-moz-transform: scale(1.35);}' self.add_css_style(zoom_in) self.highlight("header p", loops=10, scroll=False) self.open("https://seleniumbase.io/") self.set_text_content("h1", aybabtu) self.highlight("h1", loops=10, scroll=False)
examples/hack_the_planet.py
23,602
Video Link: https://youtu.be/1s-Tj65AKZA First make sure that seleniumbase 1.65.0 or newer is installed
106
en
0.737358
import numpy as np import pandas as pd from sklearn.tree import DecisionTreeRegressor from sklearn.model_selection import KFold import plotly.express as px from plotly.subplots import make_subplots import plotly.graph_objects as go # import data and preprocess it def preprocessing(file_name: str): # data import fish_df = pd.read_csv(file_name) fish_df = pd.get_dummies(fish_df, columns=['Species'], prefix='Species') return fish_df # train-test split by a percentage. # input: dataframe, label column name, split ration, and random state # returns: x_train, x_test, y_train, y_test def split_df(user_df: pd.DataFrame, label_name: str, split_ratio=0.8, random_value=42): x_train = user_df.sample(frac=split_ratio, random_state=random_value) x_test = user_df.drop(x_train.index) return x_train.drop(label_name, axis=1), x_test.drop(label_name, axis=1), pd.DataFrame( x_train[label_name]), pd.DataFrame(x_test[label_name]) # Create as arrays of trees in a given size and depth def create_random_forest(forest_size: int, max_depth: int, random_state_local: int): random_forest = [] for i in range(0, forest_size, 1): random_forest.append(DecisionTreeRegressor(criterion='friedman_mse', max_depth=max_depth, random_state=random_state_local)) return random_forest # train trees in a forest by fitting each tree to the previous tree's error # input: forest of trees, initial training guess, x and y databases, alpha coefficient. # returns: trained forest, initial average value, r_matrix of solutions and mse_list of the results (mean square error) def train_forest(random_forest: list, initial_average_weight: float, x_df: pd.DataFrame, y_df: pd.DataFrame, alpha: float = 0.1): # initial average weight and residuals to be used in the 1st tree predictions = np.ones(len(y_df))*initial_average_weight residuals = np.array(y_df['Weight'])-predictions residuals_matrix = [residuals] # calculates the first mse value mse_list = [(np.square(residuals)).sum()/len(predictions)] for tree in random_forest: # train the current stump tree.fit(x_df, residuals) # predict results based on its training error residuals = tree.predict(x_df) # record residuals and calculate mse residuals_matrix.append(residuals) mse_list.append((np.square(residuals)).sum()/len(predictions)) # update predictions and calculate new residuals predictions = predictions + alpha * residuals residuals = np.array(y_df['Weight']) - predictions return random_forest, predictions, residuals_matrix, mse_list # predict test database by the trained random forest # input: forest of trees, initial training guess, x and y databases. # returns: mse_list of the forest (mean square error) def test_forest(random_forest: list, initial_average_weight: float, x_df: pd.DataFrame, y_df: pd.DataFrame, alpha: float = 0.1): predictions = np.ones(len(y_df))*initial_average_weight mse_list = [(np.square(np.array(y_df['Weight']) - predictions)).sum()/len(predictions)] for tree in random_forest: predictions = predictions + alpha * tree.predict(x_df) mse_list.append((np.square(np.array(y_df['Weight']) - predictions)).sum()//len(predictions)) return predictions, mse_list def main(): # data import and preprocessing fish_df = preprocessing("Fish.csv") # splitting of the data x_train, x_test, y_train, y_test = split_df(fish_df, 'Weight', 0.8, 42) # setting up a random forest: #forest_size_list = [4, 5, 6, 7, 8] # variable calibrated by KFold train-validate forest_size = 20 # max_depth_list = [1, 2, 3, 4, 5] # variable calibrated by KFold train-validate max_depth = 3 random_state_local = 42 random_forest = create_random_forest(forest_size, max_depth, random_state_local) #%% Train #alpha_list = [0.1, 0.3, 0.5, 0.7, 0.9] # variable calibrated by KFold train-validate alpha = 0.5 # gradiant coefficient kf = KFold(n_splits=2, shuffle=True, random_state=42) for train_index, test_index in kf.split(x_train, y_train): X_train, X_validate = x_train.iloc[train_index], x_train.iloc[test_index] Y_train, Y_validate = y_train.iloc[train_index], y_train.iloc[test_index] # first guess initial_average_weight = np.average(Y_train['Weight'].tolist()) # train forest random_forest, predictions_train, r_matrix, mse_list_train = train_forest(random_forest, initial_average_weight, X_train, Y_train, alpha) # validate predictions_validate, mse_list_validate = test_forest(random_forest, initial_average_weight, X_validate, Y_validate, alpha) results = pd.DataFrame(data=np.arange(0, forest_size+1, 1), columns=['tree_intervals']) results['Train'] = mse_list_train results['Validation'] = mse_list_validate fig = px.scatter(results, x='tree_intervals', y=['Train', 'Validation'], size='tree_intervals') fig.update_layout(xaxis_title="Amount of Intervals (num.)", yaxis_title="mean square error") fig.show() #%% Test predictions_test, mse_list_test = test_forest(random_forest, initial_average_weight, x_test, y_test, alpha) # %% plot success rate vs tree intervals fig = make_subplots(rows=1, cols=3, subplot_titles=('Train', 'Validation', 'Test'), x_title='Amount of Intervals (num.)', y_title='mean square error') results = pd.DataFrame(data=np.arange(0, forest_size+1, 1), columns=['tree_intervals']) results['Train'] = mse_list_train fig.add_trace(go.Scatter(x=results['tree_intervals'], y=results['Train'], name='Train'), row=1, col=1) results = pd.DataFrame(data=np.arange(0, forest_size + 1, 1), columns=['tree_intervals']) results['Validation'] = mse_list_validate fig.add_trace(go.Scatter(x=results['tree_intervals'], y=results['Validation'], name='Validation'), row=1, col=2) results = pd.DataFrame(data=np.arange(0, forest_size + 1, 1), columns=['tree_intervals']) results['Test'] = mse_list_test fig.add_trace(go.Scatter(x=results['tree_intervals'], y=results['Test'], name='Test'), row=1, col=3) fig.update_layout(title_text="Random Forest Gradient Boosting") fig.show() if __name__ == '__main__': main()
gradient-boosting/main.py
6,766
import data and preprocess it data import train-test split by a percentage. input: dataframe, label column name, split ration, and random state returns: x_train, x_test, y_train, y_test Create as arrays of trees in a given size and depth train trees in a forest by fitting each tree to the previous tree's error input: forest of trees, initial training guess, x and y databases, alpha coefficient. returns: trained forest, initial average value, r_matrix of solutions and mse_list of the results (mean square error) initial average weight and residuals to be used in the 1st tree calculates the first mse value train the current stump predict results based on its training error record residuals and calculate mse update predictions and calculate new residuals predict test database by the trained random forest input: forest of trees, initial training guess, x and y databases. returns: mse_list of the forest (mean square error) data import and preprocessing splitting of the data setting up a random forest:forest_size_list = [4, 5, 6, 7, 8] variable calibrated by KFold train-validate max_depth_list = [1, 2, 3, 4, 5] variable calibrated by KFold train-validate%% Trainalpha_list = [0.1, 0.3, 0.5, 0.7, 0.9] variable calibrated by KFold train-validate gradiant coefficient first guess train forest validate%% Test %% plot success rate vs tree intervals
1,359
en
0.806059
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http:# www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os DJANGO_APPS = [ "kafka" ] REQUIRES_HADOOP = False MENU_INDEX = 100 NICE_NAME = "Kafka" ICON = "kafka/art/icon_kafka_24.png" IS_URL_NAMESPACED = True PROJECT_ROOT = os.path.dirname(os.path.realpath(__file__)) METRICS_INI = os.path.join(PROJECT_ROOT, 'metrics.ini')
kafka/src/kafka/settings.py
1,057
Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. See the NOTICE file distributed with this work for additional information regarding copyright ownership. The ASF licenses this file to you under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http: www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.
748
en
0.886713
# -*- coding: utf-8 -*- SUCCESSFUL_TERMINAL_STATUSES = ('complete', ) UNSUCCESSFUL_TERMINAL_STATUSES = ('cancelled', 'unsuccessful') CONTRACT_REQUIRED_FIELDS = [ 'awardID', 'contractID', 'items', 'suppliers', 'value', 'dateSigned', #'documents' ] CONTRACT_NOT_REQUIRED_FIELDS = [ 'contractNumber', 'title', 'title_en', 'title_ru', 'description', 'description_en', 'description_ru' ]
openregistry/convoy/loki/constants.py
403
-*- coding: utf-8 -*-'documents'
32
en
0.907091
""" Plugin for Czech TV (Ceska televize). Following channels are working: * CT1 - https://www.ceskatelevize.cz/porady/ct1/ * CT2 - https://www.ceskatelevize.cz/porady/ct2/ * CT24 - https://ct24.ceskatelevize.cz/#live * CT sport - https://www.ceskatelevize.cz/sport/zive-vysilani/ * CT Decko - https://decko.ceskatelevize.cz/zive * CT Art - https://www.ceskatelevize.cz/porady/art/ Additionally, videos from iVysilani archive should work as well. """ import json import logging import re from html import unescape as html_unescape from urllib.parse import quote from streamlink.plugin import Plugin, PluginError, pluginmatcher from streamlink.plugin.api import useragents, validate from streamlink.stream import DASHStream, HLSStream log = logging.getLogger(__name__) @pluginmatcher(re.compile( r'https?://([\w-]+\.)*ceskatelevize\.cz' )) class Ceskatelevize(Plugin): ajax_url = 'https://www.ceskatelevize.cz/ivysilani/ajax/get-client-playlist' _player_re = re.compile( r'ivysilani/embed/iFramePlayer[^"]+' ) _hash_re = re.compile( r'hash:"([0-9a-z]+)"' ) _playlist_info_re = re.compile( r'{"type":"([a-z]+)","id":"([0-9]+)"' ) _playlist_url_schema = validate.Schema({ validate.optional("streamingProtocol"): validate.text, "url": validate.any( validate.url(), "Error", "error_region" ) }) _playlist_schema = validate.Schema({ "playlist": [{ validate.optional("type"): validate.text, "streamUrls": { "main": validate.url(), } }] }) def _get_streams(self): self.session.http.headers.update({'User-Agent': useragents.IPAD}) self.session.http.verify = False log.warning('SSL certificate verification is disabled.') # fetch requested url and find playlist info response = self.session.http.get(self.url) info = self._find_playlist_info(response) if not info: # do next try with new API def _fallback_api(*args, **kwargs): self.api2 = CeskatelevizeAPI2(self.session, self.url, *args, **kwargs) return self.api2._get_streams() # playlist info not found, let's try to find player url player_url = self._find_player_url(response) if not player_url: log.debug('Cannot find playlist info or player url, do next try with new API') return _fallback_api(res=response) # get player url and try to find playlist info in it response = self.session.http.get(player_url) info = self._find_playlist_info(response) if not info: log.debug('Cannot find playlist info in the player url, do next try with new API') return _fallback_api() log.trace('{0!r}'.format(info)) data = { 'playlist[0][type]': info['type'], 'playlist[0][id]': info['id'], 'requestUrl': '/ivysilani/embed/iFramePlayer.php', 'requestSource': 'iVysilani', 'type': 'html' } headers = { 'x-addr': '127.0.0.1', } # fetch playlist url response = self.session.http.post( self.ajax_url, data=data, headers=headers ) json_data = self.session.http.json(response, schema=self._playlist_url_schema) log.trace('{0!r}'.format(json_data)) if json_data['url'] in ['Error', 'error_region']: log.error('This stream is not available') return # fetch playlist response = self.session.http.post(json_data['url']) json_data = self.session.http.json(response, schema=self._playlist_schema) log.trace('{0!r}'.format(json_data)) playlist = json_data['playlist'][0]['streamUrls']['main'] return HLSStream.parse_variant_playlist(self.session, playlist) @classmethod def _find_playlist_info(cls, response): """ Finds playlist info (type, id) in HTTP response. :param response: Response object. :returns: Dictionary with type and id. """ values = {} matches = cls._playlist_info_re.search(response.text) if matches: values['type'] = matches.group(1) values['id'] = matches.group(2) return values @classmethod def _find_player_url(cls, response): """ Finds embedded player url in HTTP response. :param response: Response object. :returns: Player url (str). """ url = '' matches = cls._player_re.search(response.text) if matches: tmp_url = matches.group(0).replace('&amp;', '&') if 'hash' not in tmp_url: # there's no hash in the URL, try to find it matches = cls._hash_re.search(response.text) if matches: url = tmp_url + '&hash=' + matches.group(1) else: url = tmp_url return 'http://ceskatelevize.cz/' + url class CeskatelevizeAPI2: _player_api = 'https://playlist.ceskatelevize.cz/' _url_re = re.compile(r'http(s)?://([^.]*.)?ceskatelevize.cz') _playlist_info_re = re.compile(r'{\s*"type":\s*"([a-z]+)",\s*"id":\s*"(\w+)"') _playlist_schema = validate.Schema({ "CODE": validate.contains("OK"), "RESULT": { "playlist": [{ "streamUrls": { "main": validate.url(), } }] } }) _ctcomp_re = re.compile(r'data-ctcomp="Video"\sdata-video-id="(?P<val1>[^"]*)"\sdata-ctcomp-data="(?P<val2>[^"]+)">') _ctcomp_schema = validate.Schema( validate.text, validate.transform(_ctcomp_re.findall), validate.transform(lambda vl: [{"video-id": v[0], "ctcomp-data": json.loads(html_unescape(v[1]))} for v in vl]) ) _playlist_info_schema = validate.Schema({ "type": validate.text, "id": validate.any(validate.text, int), "key": validate.text, "date": validate.text, "requestSource": validate.text, "drm": int, validate.optional("canBePlay"): int, validate.optional("assetId"): validate.text, "quality": validate.text, validate.optional("region"): int }) def __init__(self, session, url, res=None): self.session = session self.url = url self.response = res def _get_streams(self): if self.response is None: infos = self.session.http.get(self.url, schema=self._ctcomp_schema) else: infos = self.session.http.json(self.response, schema=self._ctcomp_schema) if not infos: # playlist infos not found raise PluginError('Cannot find playlist infos!') vod_prio = len(infos) == 2 for info in infos: try: pl = info['ctcomp-data']['source']['playlist'][0] except KeyError: raise PluginError('Cannot find playlist info!') pl = self._playlist_info_schema.validate(pl) if vod_prio and pl['type'] != 'VOD': continue log.trace('{0!r}'.format(info)) if pl['type'] == 'LIVE': data = { "contentType": "live", "items": [{ "id": pl["id"], "assetId": pl["assetId"], "key": pl["key"], "playerType": "dash", "date": pl["date"], "requestSource": pl["requestSource"], "drm": pl["drm"], "quality": pl["quality"], }] } elif pl['type'] == 'VOD': data = { "contentType": "vod", "items": [{ "id": pl["id"], "key": pl["key"], "playerType": "dash", "date": pl["date"], "requestSource": pl["requestSource"], "drm": pl["drm"], "canBePlay": pl["canBePlay"], "quality": pl["quality"], "region": pl["region"] }] } headers = { "Content-Type": "application/x-www-form-urlencoded; charset=UTF-8", } data = json.dumps(data) response = self.session.http.post( self._player_api, data="data={}".format(quote(data)), headers=headers ) json_data = self.session.http.json(response, schema=self._playlist_schema) log.trace('{0!r}'.format(json_data)) playlist = json_data['RESULT']['playlist'][0]['streamUrls']['main'] yield from DASHStream.parse_manifest(self.session, playlist).items() __plugin__ = Ceskatelevize
src/streamlink/plugins/ceskatelevize.py
9,161
Finds embedded player url in HTTP response. :param response: Response object. :returns: Player url (str). Finds playlist info (type, id) in HTTP response. :param response: Response object. :returns: Dictionary with type and id. Plugin for Czech TV (Ceska televize). Following channels are working: * CT1 - https://www.ceskatelevize.cz/porady/ct1/ * CT2 - https://www.ceskatelevize.cz/porady/ct2/ * CT24 - https://ct24.ceskatelevize.cz/#live * CT sport - https://www.ceskatelevize.cz/sport/zive-vysilani/ * CT Decko - https://decko.ceskatelevize.cz/zive * CT Art - https://www.ceskatelevize.cz/porady/art/ Additionally, videos from iVysilani archive should work as well. fetch requested url and find playlist info do next try with new API playlist info not found, let's try to find player url get player url and try to find playlist info in it fetch playlist url fetch playlist there's no hash in the URL, try to find it playlist infos not found
974
en
0.759209
#basic example of dict synat my_dict = {'key1':'value1','key2':'value2','key3':'value3'} print(my_dict) print(my_dict['key3']) #xmpl 2 prices = {'apple':100,'banana':60,'gavava':90,'rice':50} print(prices['rice'])
python_basics/Dictionary/dict.py
215
basic example of dict synatxmpl 2
33
en
0.414212
import numpy as np from time import sleep import struct import matplotlib.pyplot as plt # input raw samples from MCU # in_data = 'out/data_raw.txt' in_data = 'out/8bit.txt' fs = 5000 in_bits = 8 # load file raw = np.loadtxt(in_data) # Stats print("Max=%d Min=%d Mean=%d swing=%d %.1fbits" % \ (np.max(raw), np.min(raw), np.mean(raw), np.max(raw) - np.min(raw), np.log2(np.max(raw) - np.min(raw)))) # generate different bit audio data_depth = {} print(raw) data_depth['16bit'] = 2**(in_bits-16)*(raw / (2**(in_bits-16))).astype('int') print(data_depth['16bit']) data_depth['10bit'] = 2**(in_bits-10)*(raw / (2**(in_bits-10))).astype('int') data_depth['8bit'] = 2**(in_bits-8)*(raw / (2**(in_bits-8))).astype('int') data_depth['7bit'] = 2**(in_bits-7)*(raw / (2**(in_bits-7))).astype('int') data_depth['6bit'] = 2**(in_bits-6)*(raw / (2**(in_bits-6))).astype('int') data_depth['2bit'] = 2**(in_bits-2)*(raw / (2**(in_bits-2))).astype('int') # normalize and zero mean all for key in data_depth: data_depth[key] = data_depth[key] - np.mean(data_depth[key]) data_depth[key] = data_depth[key] / np.max(np.abs(data_depth[key])) # write audio files from scipy.io.wavfile import write for key in data_depth: write('out/test'+key+'.wav', fs, data_depth[key]) # plot some t = np.arange(0, len(raw)/fs, 1/fs) fig, axs = plt.subplots(1, 1) axs.step(t, data_depth['16bit'], label='16bit') axs.step(t, data_depth['8bit'], label='8bit') axs.step(t, data_depth['7bit'], label='7bit') axs.step(t, data_depth['6bit'], label='6bit') axs.step(t, data_depth['2bit'], label='2bit') # axs.set_xlim(0, 6e-3) # axs.set_ylim(-1, 1) axs.set_xlabel('time [s]') axs.set_ylabel('mic data') axs.grid(True) axs.legend() fig.tight_layout() plt.show()
audio/edison/audio/bit_depth_analyze.py
1,741
input raw samples from MCU in_data = 'out/data_raw.txt' load file Stats generate different bit audio normalize and zero mean all write audio files plot some axs.set_xlim(0, 6e-3) axs.set_ylim(-1, 1)
199
en
0.498117
#!/home/pi/Documents/Codigos/API_Estacao/bin/python3 """Simple FTDI EEPROM configurator. """ # Copyright (c) 2019-2020, Emmanuel Blot <[email protected]> # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause from argparse import ArgumentParser, FileType from io import StringIO from logging import Formatter, StreamHandler, DEBUG, ERROR from sys import modules, stderr from textwrap import fill from traceback import format_exc from pyftdi import FtdiLogger from pyftdi.eeprom import FtdiEeprom from pyftdi.ftdi import Ftdi from pyftdi.misc import add_custom_devices, hexdump #pylint: disable-msg=too-many-locals #pylint: disable-msg=too-many-branches #pylint: disable-msg=too-many-statements def main(): """Main routine""" debug = False try: argparser = ArgumentParser(description=modules[__name__].__doc__) argparser.add_argument('device', nargs='?', default='ftdi:///?', help='serial port device name') argparser.add_argument('-x', '--hexdump', action='store_true', help='dump EEPROM content as ASCII') argparser.add_argument('-X', '--hexblock', type=int, help='dump EEPROM as indented hexa blocks') argparser.add_argument('-i', '--input', type=FileType('rt'), help='input ini file to load EEPROM content') argparser.add_argument('-l', '--load', default='all', choices=('all', 'raw', 'values'), help='section(s) to load from input file') argparser.add_argument('-o', '--output', type=FileType('wt'), help='output ini file to save EEPROM content') argparser.add_argument('-s', '--serial-number', help='set serial number') argparser.add_argument('-m', '--manufacturer', help='set manufacturer name') argparser.add_argument('-p', '--product', help='set product name') argparser.add_argument('-c', '--config', action='append', help='change/configure a property ' 'as key=value pair') argparser.add_argument('-e', '--erase', action='store_true', help='erase the whole EEPROM content') argparser.add_argument('-u', '--update', action='store_true', help='perform actual update, use w/ care') argparser.add_argument('-P', '--vidpid', action='append', help='specify a custom VID:PID device ID, ' 'may be repeated') argparser.add_argument('-V', '--virtual', type=FileType('r'), help='use a virtual device, specified as YaML') argparser.add_argument('-v', '--verbose', action='count', default=0, help='increase verbosity') argparser.add_argument('-d', '--debug', action='store_true', help='enable debug mode') args = argparser.parse_args() debug = args.debug if not args.device: argparser.error('Serial device not specified') loglevel = max(DEBUG, ERROR - (10 * args.verbose)) loglevel = min(ERROR, loglevel) if debug: formatter = Formatter('%(asctime)s.%(msecs)03d %(name)-20s ' '%(message)s', '%H:%M:%S') else: formatter = Formatter('%(message)s') FtdiLogger.set_formatter(formatter) FtdiLogger.set_level(loglevel) FtdiLogger.log.addHandler(StreamHandler(stderr)) if args.virtual: #pylint: disable-msg=import-outside-toplevel from pyftdi.usbtools import UsbTools # Force PyUSB to use PyFtdi test framework for USB backends UsbTools.BACKENDS = ('pyftdi.tests.backend.usbvirt', ) # Ensure the virtual backend can be found and is loaded backend = UsbTools.find_backend() loader = backend.create_loader()() loader.load(args.virtual) try: add_custom_devices(Ftdi, args.vidpid, force_hex=True) except ValueError as exc: argparser.error(str(exc)) eeprom = FtdiEeprom() eeprom.open(args.device) if args.erase: eeprom.erase() if args.input: eeprom.load_config(args.input, args.load) if args.serial_number: eeprom.set_serial_number(args.serial_number) if args.manufacturer: eeprom.set_manufacturer_name(args.manufacturer) if args.product: eeprom.set_product_name(args.product) for conf in args.config or []: if conf == '?': helpstr = ', '.join(sorted(eeprom.properties)) print(fill(helpstr, initial_indent=' ', subsequent_indent=' ')) exit(1) for sep in ':=': if sep in conf: name, value = conf.split(sep, 1) if not value: argparser.error('Configuration %s without value' % conf) helpio = StringIO() eeprom.set_property(name, value, helpio) helpstr = helpio.getvalue() if helpstr: print(fill(helpstr, initial_indent=' ', subsequent_indent=' ')) exit(1) break else: argparser.error('Missing name:value separator in %s' % conf) if args.hexdump: print(hexdump(eeprom.data)) if args.hexblock is not None: indent = ' ' * args.hexblock for pos in range(0, len(eeprom.data), 16): hexa = ' '.join(['%02x' % x for x in eeprom.data[pos:pos+16]]) print(indent, hexa, sep='') if args.update: if eeprom.commit(False): eeprom.reset_device() if args.verbose > 0: eeprom.dump_config() if args.output: eeprom.save_config(args.output) except (ImportError, IOError, NotImplementedError, ValueError) as exc: print('\nError: %s' % exc, file=stderr) if debug: print(format_exc(chain=False), file=stderr) exit(1) except KeyboardInterrupt: exit(2) if __name__ == '__main__': main()
bin/ftconf.py
6,686
Main routine Simple FTDI EEPROM configurator. !/home/pi/Documents/Codigos/API_Estacao/bin/python3 Copyright (c) 2019-2020, Emmanuel Blot <[email protected]> All rights reserved. SPDX-License-Identifier: BSD-3-Clausepylint: disable-msg=too-many-localspylint: disable-msg=too-many-branchespylint: disable-msg=too-many-statementspylint: disable-msg=import-outside-toplevel Force PyUSB to use PyFtdi test framework for USB backends Ensure the virtual backend can be found and is loaded
486
en
0.553341
#!/usr/bin/env python3 ###################################################### ## Calibrating the extrinsics between T265 and D4xx ## ## Based on this example: https://github.com/IntelRealSense/librealsense/pull/4355 ## with changes and modifications. ###################################################### ###################################################### # # General steps: # 1. Mount the two cameras rigidly # 2. Print any one of the checkerboards from: https://markhedleyjones.com/projects/calibration-checkerboard-collection # - The default settings in this script are for: https://markhedleyjones.com/storage/checkerboards/Checkerboard-A4-25mm-8x6.pdf # - Measure the actual printed grid size of the squares and modify size. # 3. Modify the script: # - Change grid_H, grid_W and size according to the actual printed checkerboard. # - Change the path and file_name if necessary (ex: use this script as standalone). # 4. Run the script online: # - python calibrate_extrinsics.py # 5. The results include intrinsics (save file) and extrinsics (terminal output) # ###################################################### from __future__ import print_function import pyrealsense2 as rs import numpy as np np.set_printoptions(suppress=True,precision=5) import cv2 assert cv2.__version__[0] >= '3', 'The fisheye module requires opencv version >= 3.0.0' import os import shutil import json import argparse import glob from collections import OrderedDict parser = argparse.ArgumentParser() parser.add_argument('--SN_T265', help='serial number of T265') parser.add_argument('--SN_D4xx', help='serial number of D4xx') parser.add_argument('--path', default="calibration_results", help='image path') parser.add_argument('--file_name', default="/intrinsics.json", help='intrinsics calibration file name') parser.add_argument('--save_tmp', default=False, help='save the temporary files of this program, useful for debugging purposes') parser.add_argument('--grid_H', default=8, help='grid height (inner corners)') parser.add_argument('--grid_W', default=6, help='grid width (inner corners)') parser.add_argument('--size', default=0.0282, help='grid side length') parser.add_argument('--calibrate', default=False, help='run calibration (only)', action='store_true') parser.add_argument('--visualize', default=True, help='with GUI', action='store_true') args = parser.parse_args() CHECKERBOARD = (args.grid_H, args.grid_W) SIDE_LENGTH = args.size tmp_folder = args.path + "/tmp" def add_camera_calibration(intrinsics, streams = None): cam = {} cam['center_px'] = [intrinsics.ppx, intrinsics.ppy] cam['focal_length_px'] = [intrinsics.fx, intrinsics.fy] cam['distortion'] = {} cam['distortion']['type'] = 'kannalabrandt4' cam['distortion']['k'] = intrinsics.coeffs[:4] if streams: ext = streams["cam1"].get_extrinsics_to(streams["pose"]) # w.r.t. #print(ext) cam["extrinsics"] = {} cam["extrinsics"]["T"] = ext.translation #print(ext.rotation) cam["extrinsics"]["R"] = ext.rotation return cam def save_intrinsics(directory, file_name, intrinsics, streams): D = OrderedDict() # in order (cam1,cam2) D['cameras'] = [] D['cameras'].append(add_camera_calibration(intrinsics["cam1"], streams)) D['cameras'].append(add_camera_calibration(intrinsics["cam2"])) if not os.path.exists(directory): os.mkdir(directory) with open(directory + file_name, 'w') as f: json.dump(D, f, indent=4) print("Intrinsics output written to " + directory + file_name) def read_calibration(cam, extrinsics = False): #print("read_calibration") # intrinsics K = np.array([[cam['focal_length_px'][0], 0, cam['center_px'][0]], [ 0, cam['focal_length_px'][1], cam['center_px'][1]], [ 0, 0, 1]]) D = np.array(cam['distortion']['k']) if extrinsics: H = np.eye(4) H[:3,:3] = np.reshape(cam["extrinsics"]["R"],(3,3)) H[:3,3] = cam["extrinsics"]["T"] #print(H) return (K, D, H) return (K, D) def load_calibration(directory, file_name): with open(directory + file_name, 'r') as f: D = json.load(f) (K1, D1, H1) = read_calibration(D['cameras'][0], True) (K2, D2) = read_calibration(D['cameras'][1]) return (K1, D1, K2, D2, H1) def find_realsense_serial_no(type): camera_name = ['Intel RealSense T265', 'Intel RealSense D435'] # Get realsense pipeline handle pipe = rs.pipeline() # Find the T265 devices = rs.context().devices for i in range(len(devices)): if (devices[i].get_info(rs.camera_info.name) == camera_name[type]): print('Found one connected ' + camera_name[type] + ' with serial no:', devices[i].get_info(rs.camera_info.serial_number)) return devices[i].get_info(rs.camera_info.serial_number) print('No ' + camera_name[type] + ' found, please check connection or input serial manually') return None if not args.calibrate: # Obtain the serial number of the cameras, either automatically or from user's input print("Trying to connect devices...") serial_t265 = None serial_d4xx = None if (not args.SN_T265): serial_t265 = find_realsense_serial_no(0) else: serial_t265 = args.SN_T265 if (not args.SN_D4xx): serial_d4xx = find_realsense_serial_no(1) else: serial_d4xx = args.SN_D4xx if (not serial_t265) or (not serial_d4xx): print("Specify serial numbers --SN_T265 and --SN_D4xx (for online calibration, or --calibrate for prerecorded images with --path path to folder)") exit() # cam 1 pipe1 = rs.pipeline() cfg1 = rs.config() cfg1.enable_device(serial_t265) pipe1.start(cfg1) # cam 2 pipe2 = rs.pipeline() cfg2 = rs.config() cfg2.enable_device(serial_d4xx) cfg2.enable_all_streams() pipe2_profile = pipe2.start(cfg2) sensor_depth = pipe2_profile.get_device().first_depth_sensor() sensor_depth.set_option(rs.option.emitter_enabled, 0) # turn OFF projector try: # Retreive the stream and intrinsic properties for both cameras profile1 = pipe1.get_active_profile() profile2 = pipe2.get_active_profile() # future improvements: make both stream configureable streams = {"cam1" : profile1.get_stream(rs.stream.fisheye, 1).as_video_stream_profile(), "pose" : profile1.get_stream(rs.stream.pose), "cam2" : profile2.get_stream(rs.stream.infrared, 1).as_video_stream_profile()} # IR1 #"cam2" : profile1.get_stream(rs.stream.fisheye, 2).as_video_stream_profile()} # test intrinsics = {"cam1" : streams["cam1"].get_intrinsics(), "cam2" : streams["cam2"].get_intrinsics()} #print("cam1:", intrinsics["cam1"]) #print("cam2:", intrinsics["right"]) save_intrinsics(args.path, args.file_name, intrinsics, streams) # capture images i = 0 print("Press 's' to save image.\nPress 'q' or 'c' to quit recording and start the calibration.") while True: # cam 1 frames1 = pipe1.wait_for_frames() f_fe1 = frames1.get_fisheye_frame(1) # left fisheye f_fe2 = frames1.get_fisheye_frame(2) # right fisheye if not f_fe1 or not f_fe2: continue img_fe1 = np.asanyarray(f_fe1.get_data()) img_fe2 = np.asanyarray(f_fe2.get_data()) # cam 2 frames2 = pipe2.wait_for_frames() f_ir1 = frames2.get_infrared_frame(1) # left infrared f_ir2 = frames2.get_infrared_frame(2) # right infrared f_color = frames2.get_color_frame() if not f_ir1 or not f_ir2 or not f_color: continue img_ir1 = np.asanyarray(f_ir1.get_data()) img_ir2 = np.asanyarray(f_ir2.get_data()) img_color = np.asanyarray(f_color.get_data()) # TODO: configure streams img1 = img_fe1 img2 = img_ir1 # display cv2.imshow('cam1', img1) cv2.imshow('cam2', img2) # save or quit k = cv2.waitKey(1) if k == ord('s'): print("'s' key pressed. Saving temp images..") if not os.path.exists(tmp_folder): os.mkdir(tmp_folder) cv2.imwrite(tmp_folder + '/fe1_' + str(i) + '.png', img_fe1) cv2.imwrite(tmp_folder + '/fe2_' + str(i) + '.png', img_fe2) cv2.imwrite(tmp_folder + '/ir1_' + str(i) + '.png', img_ir1) # cv2.imwrite(tmp_folder+ '/ir2_' + str(i) + '.png', img_ir2) cv2.imwrite(tmp_folder + '/color_' + str(i) + '.png', img_color) print("Saved temp images in temp folder " + tmp_folder) i = i+1 if k == ord('q') or k == ord('c'): break finally: pipe1.stop() pipe2.stop() # calibrate print("Calibrate extrinsics now...") # arrays to store detections P3 = [] # w.r.t. target frame P2_1 = [] # in image #1 P2_2 = [] # in image #2 # TODO: configure streams images1 = glob.glob(tmp_folder + '/fe1_*') #images2 = glob.glob(tmp_folder + '/fe2_*') # test images2 = glob.glob(tmp_folder + '/ir1_*') images1.sort() images2.sort() #print(images1) #print(images2) if len(images1) == len(images2) == 0: print("No images found. Exit.") exit(0) try: for i, fname in enumerate(images1): img1 = cv2.imread(images1[i]) img2 = cv2.imread(images2[i]) gray1 = cv2.cvtColor(img1, cv2.COLOR_BGR2GRAY) gray2 = cv2.cvtColor(img2, cv2.COLOR_BGR2GRAY) # detect ret1, corners1 = cv2.findChessboardCorners(gray1, CHECKERBOARD, None) ret2, corners2 = cv2.findChessboardCorners(gray2, CHECKERBOARD, None) if ret1 and ret2: # subpixel refinement criteria_sub = (cv2.TermCriteria_COUNT + cv2.TERM_CRITERIA_EPS, 10, 1e-1) rt = cv2.cornerSubPix(gray1, corners1, (7, 7), (-1, -1), criteria_sub) P2_1.append(corners1) if args.visualize: ret1 = cv2.drawChessboardCorners(img1, CHECKERBOARD, corners1, ret1) cv2.imshow("img1", img1) cv2.waitKey(200) rt = cv2.cornerSubPix(gray2, corners2, (7, 7), (-1, -1), criteria_sub) P2_2.append(corners2) if args.visualize: ret2 = cv2.drawChessboardCorners(img2, CHECKERBOARD, corners2, ret2) cv2.imshow("img2", img2) cv2.waitKey(200) except cv2.error as e: print("Error: ", e) # calibration (stereo extrinsics) R = np.zeros((1, 1, 3), dtype=np.float64) T = np.zeros((1, 1, 3), dtype=np.float64) N = len(P2_1) # number of successful detections p3d = np.zeros( (CHECKERBOARD[0]*CHECKERBOARD[1], 1, 3) , np.float64) p3d[:,0, :2] = np.mgrid[0:CHECKERBOARD[0], 0:CHECKERBOARD[1]].T.reshape(-1, 2) # fisheye.stereoCalibrate needs different data structures/dimensions than cv2.stereoCalibrate, i.e. (N, 1, CHECKERBOARD[0]*CHECKERBOARD[1], 2/3)! P3 = np.array([p3d]*N, dtype=np.float64) P2_1 = np.asarray(P2_1, dtype=np.float64) P2_2 = np.asarray(P2_2, dtype=np.float64) P3 = np.reshape(P3, (N, 1, CHECKERBOARD[0]*CHECKERBOARD[1], 3))*SIDE_LENGTH P2_1 = np.reshape(P2_1, (N, 1, CHECKERBOARD[0]*CHECKERBOARD[1], 2)) P2_2 = np.reshape(P2_2, (N, 1, CHECKERBOARD[0]*CHECKERBOARD[1], 2)) (K1, D1, K2, D2, H1) = load_calibration(args.path, args.file_name) try: (rms, _, _, _, _, R, T) = \ cv2.fisheye.stereoCalibrate( P3, P2_1, P2_2, K1, D1, K2, D2, (0,0), # only used to initialize intrinsics when no intrinsics provided R, T, cv2.fisheye.CALIB_FIX_INTRINSIC # extrinsics only ) except cv2.error as e: print("Error: ", e) print("Please make sure that the checkerboard exists in the images. See tmp images in " + tmp_folder + " to debug.") exit() print("RMS:", rms) H_cam2_cam1 = np.eye(4) H_cam2_cam1[:3,:3] = R H_cam2_cam1[:3,3] = T.flatten() # w.r.t. pose H_ir1_fe1 = H_cam2_cam1 # TODO: configure H_pose_fe1 = H1 H_pose_ir1 = H_pose_fe1.dot( np.linalg.inv(H_ir1_fe1) ) print("H (ir1 wrt pose) =", H_pose_ir1) fn = args.path + "/H.txt" np.savetxt(fn, H_pose_ir1, fmt='%.9f') print("Extrinsic output written to", fn) if not args.save_tmp: if os.path.isdir(tmp_folder): shutil.rmtree(tmp_folder, ignore_errors=True) print("Temporary files deleted. If you wish to keep the tmp files, use --save_tmp True.")
robot/src/vision_to_mavros/scripts/calibrate_extrinsics.py
12,847
!/usr/bin/env python3 Calibrating the extrinsics between T265 and D4xx Based on this example: https://github.com/IntelRealSense/librealsense/pull/4355 with changes and modifications. General steps: 1. Mount the two cameras rigidly 2. Print any one of the checkerboards from: https://markhedleyjones.com/projects/calibration-checkerboard-collection - The default settings in this script are for: https://markhedleyjones.com/storage/checkerboards/Checkerboard-A4-25mm-8x6.pdf - Measure the actual printed grid size of the squares and modify size. 3. Modify the script: - Change grid_H, grid_W and size according to the actual printed checkerboard. - Change the path and file_name if necessary (ex: use this script as standalone). 4. Run the script online: - python calibrate_extrinsics.py 5. The results include intrinsics (save file) and extrinsics (terminal output) w.r.t.print(ext)print(ext.rotation) in order (cam1,cam2)print("read_calibration") intrinsicsprint(H) Get realsense pipeline handle Find the T265 Obtain the serial number of the cameras, either automatically or from user's input cam 1 cam 2 turn OFF projector Retreive the stream and intrinsic properties for both cameras future improvements: make both stream configureable IR1"cam2" : profile1.get_stream(rs.stream.fisheye, 2).as_video_stream_profile()} testprint("cam1:", intrinsics["cam1"])print("cam2:", intrinsics["right"]) capture images cam 1 left fisheye right fisheye cam 2 left infrared right infrared TODO: configure streams display save or quit cv2.imwrite(tmp_folder+ '/ir2_' + str(i) + '.png', img_ir2) calibrate arrays to store detections w.r.t. target frame in image 1 in image 2 TODO: configure streamsimages2 = glob.glob(tmp_folder + '/fe2_*') testprint(images1)print(images2) detect subpixel refinement calibration (stereo extrinsics) number of successful detections fisheye.stereoCalibrate needs different data structures/dimensions than cv2.stereoCalibrate, i.e. (N, 1, CHECKERBOARD[0]*CHECKERBOARD[1], 2/3)! only used to initialize intrinsics when no intrinsics provided extrinsics only w.r.t. pose TODO: configure
2,113
en
0.646712
"""Implementation of Rule L044.""" from typing import Optional from sqlfluff.core.rules.analysis.select_crawler import Query, SelectCrawler from sqlfluff.core.parser import BaseSegment from sqlfluff.core.rules.base import BaseRule, LintResult, RuleContext from sqlfluff.core.rules.doc_decorators import document_groups from sqlfluff.core.rules.functional import sp class RuleFailure(Exception): """Exception class for reporting lint failure inside deeply nested code.""" def __init__(self, anchor: BaseSegment): self.anchor: BaseSegment = anchor @document_groups class Rule_L044(BaseRule): """Query produces an unknown number of result columns. **Anti-pattern** Querying all columns using ``*`` produces a query result where the number or ordering of columns changes if the upstream table's schema changes. This should generally be avoided because it can cause slow performance, cause important schema changes to go undetected, or break production code. For example: * If a query does ``SELECT t.*`` and is expected to return columns ``a``, ``b``, and ``c``, the actual columns returned will be wrong/different if columns are added to or deleted from the input table. * ``UNION`` and ``DIFFERENCE`` clauses require the inputs have the same number of columns (and compatible types). * ``JOIN`` queries may break due to new column name conflicts, e.g. the query references a column ``c`` which initially existed in only one input table but a column of the same name is added to another table. * ``CREATE TABLE (<<column schema>>) AS SELECT *`` .. code-block:: sql WITH cte AS ( SELECT * FROM foo ) SELECT * FROM cte UNION SELECT a, b FROM t **Best practice** Somewhere along the "path" to the source data, specify columns explicitly. .. code-block:: sql WITH cte AS ( SELECT * FROM foo ) SELECT a, b FROM cte UNION SELECT a, b FROM t """ groups = ("all",) _works_on_unparsable = False def _handle_alias(self, selectable, alias_info, query): select_info_target = SelectCrawler.get( query, alias_info.from_expression_element )[0] if isinstance(select_info_target, str): # It's an alias to an external table whose # number of columns could vary without our # knowledge. Thus, warn. self.logger.debug( f"Query target {select_info_target} is external. Generating warning." ) raise RuleFailure(selectable.selectable) else: # Handle nested SELECT. self._analyze_result_columns(select_info_target) def _analyze_result_columns(self, query: Query): """Given info on a list of SELECTs, determine whether to warn.""" # Recursively walk from the given query (select_info_list) to any # wildcard columns in the select targets. If every wildcard evdentually # resolves to a query without wildcards, all is well. Otherwise, warn. if not query.selectables: return # pragma: no cover for selectable in query.selectables: self.logger.debug(f"Analyzing query: {selectable.selectable.raw}") for wildcard in selectable.get_wildcard_info(): if wildcard.tables: for wildcard_table in wildcard.tables: self.logger.debug( f"Wildcard: {wildcard.segment.raw} has target " "{wildcard_table}" ) # Is it an alias? alias_info = selectable.find_alias(wildcard_table) if alias_info: # Found the alias matching the wildcard. Recurse, # analyzing the query associated with that alias. self._handle_alias(selectable, alias_info, query) else: # Not an alias. Is it a CTE? cte = query.lookup_cte(wildcard_table) if cte: # Wildcard refers to a CTE. Analyze it. self._analyze_result_columns(cte) else: # Not CTE, not table alias. Presumably an # external table. Warn. self.logger.debug( f"Query target {wildcard_table} is external. " "Generating warning." ) raise RuleFailure(selectable.selectable) else: # No table was specified with the wildcard. Assume we're # querying from a nested select in FROM. query_list = SelectCrawler.get( query, query.selectables[0].selectable ) for o in query_list: if isinstance(o, Query): self._analyze_result_columns(o) return self.logger.debug( f'Query target "{query.selectables[0].selectable.raw}" has no ' "targets. Generating warning." ) raise RuleFailure(query.selectables[0].selectable) def _eval(self, context: RuleContext) -> Optional[LintResult]: """Outermost query should produce known number of columns.""" start_types = ["select_statement", "set_expression", "with_compound_statement"] if context.segment.is_type( *start_types ) and not context.functional.parent_stack.any(sp.is_type(*start_types)): crawler = SelectCrawler(context.segment, context.dialect) # Begin analysis at the outer query. if crawler.query_tree: try: return self._analyze_result_columns(crawler.query_tree) except RuleFailure as e: return LintResult(anchor=e.anchor) return None
src/sqlfluff/rules/L044.py
6,370
Exception class for reporting lint failure inside deeply nested code. Query produces an unknown number of result columns. **Anti-pattern** Querying all columns using ``*`` produces a query result where the number or ordering of columns changes if the upstream table's schema changes. This should generally be avoided because it can cause slow performance, cause important schema changes to go undetected, or break production code. For example: * If a query does ``SELECT t.*`` and is expected to return columns ``a``, ``b``, and ``c``, the actual columns returned will be wrong/different if columns are added to or deleted from the input table. * ``UNION`` and ``DIFFERENCE`` clauses require the inputs have the same number of columns (and compatible types). * ``JOIN`` queries may break due to new column name conflicts, e.g. the query references a column ``c`` which initially existed in only one input table but a column of the same name is added to another table. * ``CREATE TABLE (<<column schema>>) AS SELECT *`` .. code-block:: sql WITH cte AS ( SELECT * FROM foo ) SELECT * FROM cte UNION SELECT a, b FROM t **Best practice** Somewhere along the "path" to the source data, specify columns explicitly. .. code-block:: sql WITH cte AS ( SELECT * FROM foo ) SELECT a, b FROM cte UNION SELECT a, b FROM t Given info on a list of SELECTs, determine whether to warn. Outermost query should produce known number of columns. Implementation of Rule L044. It's an alias to an external table whose number of columns could vary without our knowledge. Thus, warn. Handle nested SELECT. Recursively walk from the given query (select_info_list) to any wildcard columns in the select targets. If every wildcard evdentually resolves to a query without wildcards, all is well. Otherwise, warn. pragma: no cover Is it an alias? Found the alias matching the wildcard. Recurse, analyzing the query associated with that alias. Not an alias. Is it a CTE? Wildcard refers to a CTE. Analyze it. Not CTE, not table alias. Presumably an external table. Warn. No table was specified with the wildcard. Assume we're querying from a nested select in FROM. Begin analysis at the outer query.
2,248
en
0.824228
import os, paramiko, time, schedule, smtplib, ssl from datetime import datetime from email.message import EmailMessage host='localhost' port='5432' user='postgres' password='admin' database='testdb' #chemin de sauvegarde locale local_dir = 'C:\\Users\\Kamla\\projets\\auto-backup-sqldb\\backup\\' #local_dir = 'Chemin vers le dossier de la base de donnees a sauvegarder\\' #chemin de sauvegarde distant remote_dir = '/C:/Users/vmwin10/Documents/ftpfile/' def job(): print("Backup working...") filestamp = time.strftime('%Y-%m-%dT%H-%M-%S.%z') #nom pour le fichier sql qui serra genere par pg_dump database_remote = database+"_"+filestamp+".bak.sql" PASS="set PGPASSWORD=%s" % (password) #lancement de la commande mysqldump qui va faire une sauvegarde en local #les fichiers sont sauvegarder dans le respertoire 'backup' os.system("(cd backup) && ("+PASS+") && (pg_dump -h %s -p %s -U %s -f %s -C -d %s)" % (host, port, user, database_remote, database)) print("Database dumped to "+database_remote) # debut du SFTP ssh_client=paramiko.SSHClient() ssh_client.set_missing_host_key_policy(paramiko.AutoAddPolicy()) #on se connecte a la machine dans laquelle serra sauvegarde le le fichier backup ssh_client.connect(hostname='192.168.126.2',username='vmwin10',password='vmwin10') ftp_client=ssh_client.open_sftp() #envoie du fichier local vers le remote ftp_client.put(local_dir+database_remote,remote_dir+database_remote) ftp_client.close() print("Successfull Backup") # A chaque backup un email est envoye msg = EmailMessage() msg.set_content("Un backup vient d'etre effectue") msg["Subject"] = "Email de Backup" msg["From"] = "[email protected]" msg["To"] = "[email protected]" context=ssl.create_default_context() with smtplib.SMTP("smtp.gmail.com", port=587) as smtp: smtp.starttls(context=context) smtp.login(msg["From"], "password") smtp.send_message(msg) # le backup se fait chaque 1h schedule.every(3).seconds.do(job) #schedule.every(15).minutes.do(job) #schedule.every().hour.do(job) #schedule.every().day.at("10:30").do(job) #schedule.every(10).to(10).minutes.do(job) #schedule.every().monday.do(job) #schedule.every().wednesday.at("15:00").do(job) #schedule.every().minute.at(":15").do(job) while True: schedule.run_pending() time.sleep(1)
pgsqlbackup.py
2,432
chemin de sauvegarde localelocal_dir = 'Chemin vers le dossier de la base de donnees a sauvegarder\\'chemin de sauvegarde distantnom pour le fichier sql qui serra genere par pg_dumplancement de la commande mysqldump qui va faire une sauvegarde en localles fichiers sont sauvegarder dans le respertoire 'backup' debut du SFTPon se connecte a la machine dans laquelle serra sauvegarde le le fichier backupenvoie du fichier local vers le remote A chaque backup un email est envoye le backup se fait chaque 1hschedule.every(15).minutes.do(job)schedule.every().hour.do(job)schedule.every().day.at("10:30").do(job)schedule.every(10).to(10).minutes.do(job)schedule.every().monday.do(job)schedule.every().wednesday.at("15:00").do(job)schedule.every().minute.at(":15").do(job)
767
fr
0.832575
# Configuration file for the Sphinx documentation builder. # # This file only contains a selection of the most common options. For a full # list see the documentation: # https://www.sphinx-doc.org/en/master/usage/configuration.html # -- Path setup -------------------------------------------------------------- # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. from datetime import date from pathlib import Path ROOT_DIR = Path(__file__).resolve(strict=True).parent.parent PACKAGE_DIR = ROOT_DIR / "email_service" DOCS_DIR = ROOT_DIR / "email_service" version_file_path = PACKAGE_DIR / "version.py" code_obj = compile(version_file_path.read_text(), version_file_path, "exec") __version__ = dict() exec(code_obj, __version__) version = __version__["__version__"] # -- Project information ----------------------------------------------------- project = "Email Service" copyright = """2021, Aditya Raman""" author = "Aditya Raman" # The full version, including alpha/beta/rc tags version = release = f"v{version}" today = str(date.today()) language = "en" # -- General configuration --------------------------------------------------- # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom # ones. extensions = [ "sphinx_rtd_theme", "sphinx.ext.autodoc", "sphinx.ext.napoleon", "sphinx.ext.viewcode", ] # Add any paths that contain templates here, relative to this directory. templates_path = ["_templates"] # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. # This pattern also affects html_static_path and html_extra_path. exclude_patterns = ["_build", "Thumbs.db", ".DS_Store"] # -- Options for HTML output ------------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. # html_theme = "sphinx_rtd_theme" # alternate: "alabaster" # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = ["_static"] # These paths are either relative to html_static_path # or fully qualified paths (eg. https://...) # html_css_files = [] # # html_style = "" master_doc = "index" latex_elements = { # The paper size ('letterpaper' or 'a4paper'). # "papersize": "a4paper", # The font size ('10pt', '11pt' or '12pt'). # # 'pointsize': '10pt', # Additional stuff for the LaTeX preamble. # "preamble": "\\addto\\captionsenglish{\\renewcommand{\\contentsname}{Table of contents}}", # Latex figure (float) alignment # # 'figure_align': 'htbp', } latex_show_urls = "footnote" # If true, the current module name will be prepended to all description # unit titles (such as .. function::). add_module_names = False add_function_parentheses = False show_authors = True
docs/conf.py
3,252
Configuration file for the Sphinx documentation builder. This file only contains a selection of the most common options. For a full list see the documentation: https://www.sphinx-doc.org/en/master/usage/configuration.html -- Path setup -------------------------------------------------------------- If extensions (or modules to document with autodoc) are in another directory, add these directories to sys.path here. If the directory is relative to the documentation root, use os.path.abspath to make it absolute, like shown here. -- Project information ----------------------------------------------------- The full version, including alpha/beta/rc tags -- General configuration --------------------------------------------------- Add any Sphinx extension module names here, as strings. They can be extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones. Add any paths that contain templates here, relative to this directory. List of patterns, relative to source directory, that match files and directories to ignore when looking for source files. This pattern also affects html_static_path and html_extra_path. -- Options for HTML output ------------------------------------------------- The theme to use for HTML and HTML Help pages. See the documentation for a list of builtin themes. alternate: "alabaster" Add any paths that contain custom static files (such as style sheets) here, relative to this directory. They are copied after the builtin static files, so a file named "default.css" will overwrite the builtin "default.css". These paths are either relative to html_static_path or fully qualified paths (eg. https://...) html_css_files = [] html_style = "" The paper size ('letterpaper' or 'a4paper'). The font size ('10pt', '11pt' or '12pt'). 'pointsize': '10pt', Additional stuff for the LaTeX preamble. Latex figure (float) alignment 'figure_align': 'htbp', If true, the current module name will be prepended to all description unit titles (such as .. function::).
1,996
en
0.683511
# NOTE - Still seems to be a leak here somewhere # gateway count doesnt hit zero. Hence the print statements! import sys sys.coinit_flags = 0 # Must be free-threaded! import win32api, pythoncom, time import pywintypes import os import winerror import win32com import win32com.client.connect from win32com.test.util import CheckClean from win32com.client import constants, DispatchBaseClass, CastTo, VARIANT from win32com.test.util import RegisterPythonServer from pywin32_testutil import str2memory import datetime import decimal import win32timezone importMsg = "**** PyCOMTest is not installed ***\n PyCOMTest is a Python test specific COM client and server.\n It is likely this server is not installed on this machine\n To install the server, you must get the win32com sources\n and build it using MS Visual C++" error = Exception # This test uses a Python implemented COM server - ensure correctly registered. RegisterPythonServer( os.path.join(os.path.dirname(__file__), "..", "servers", "test_pycomtest.py"), "Python.Test.PyCOMTest", ) from win32com.client import gencache try: gencache.EnsureModule("{6BCDCB60-5605-11D0-AE5F-CADD4C000000}", 0, 1, 1) except pythoncom.com_error: print("The PyCOMTest module can not be located or generated.") print(importMsg) raise RuntimeError(importMsg) # We had a bg where RegisterInterfaces would fail if gencache had # already been run - exercise that here from win32com import universal universal.RegisterInterfaces("{6BCDCB60-5605-11D0-AE5F-CADD4C000000}", 0, 1, 1) verbose = 0 # convert a normal int to a long int - used to avoid, eg, '1L' for py3k # friendliness def ensure_long(int_val): if sys.version_info > (3,): # py3k - no such thing as a 'long' return int_val # on py2x, we just use an expression that results in a long return 0x100000000 - 0x100000000 + int_val def check_get_set(func, arg): got = func(arg) if got != arg: raise error("%s failed - expected %r, got %r" % (func, arg, got)) def check_get_set_raises(exc, func, arg): try: got = func(arg) except exc as e: pass # what we expect! else: raise error( "%s with arg %r didn't raise %s - returned %r" % (func, arg, exc, got) ) def progress(*args): if verbose: for arg in args: print(arg, end=" ") print() def TestApplyResult(fn, args, result): try: fnName = str(fn).split()[1] except: fnName = str(fn) progress("Testing ", fnName) pref = "function " + fnName rc = fn(*args) if rc != result: raise error("%s failed - result not %r but %r" % (pref, result, rc)) def TestConstant(constName, pyConst): try: comConst = getattr(constants, constName) except: raise error("Constant %s missing" % (constName,)) if comConst != pyConst: raise error( "Constant value wrong for %s - got %s, wanted %s" % (constName, comConst, pyConst) ) # Simple handler class. This demo only fires one event. class RandomEventHandler: def _Init(self): self.fireds = {} def OnFire(self, no): try: self.fireds[no] = self.fireds[no] + 1 except KeyError: self.fireds[no] = 0 def OnFireWithNamedParams(self, no, a_bool, out1, out2): # This test exists mainly to help with an old bug, where named # params would come in reverse. Missing = pythoncom.Missing if no is not Missing: # We know our impl called 'OnFire' with the same ID assert no in self.fireds assert no + 1 == out1, "expecting 'out1' param to be ID+1" assert no + 2 == out2, "expecting 'out2' param to be ID+2" # The middle must be a boolean. assert a_bool is Missing or type(a_bool) == bool, "middle param not a bool" return out1 + 2, out2 + 2 def _DumpFireds(self): if not self.fireds: print("ERROR: Nothing was received!") for firedId, no in self.fireds.items(): progress("ID %d fired %d times" % (firedId, no)) # A simple handler class that derives from object (ie, a "new style class") - # only relevant for Python 2.x (ie, the 2 classes should be identical in 3.x) class NewStyleRandomEventHandler(object): def _Init(self): self.fireds = {} def OnFire(self, no): try: self.fireds[no] = self.fireds[no] + 1 except KeyError: self.fireds[no] = 0 def OnFireWithNamedParams(self, no, a_bool, out1, out2): # This test exists mainly to help with an old bug, where named # params would come in reverse. Missing = pythoncom.Missing if no is not Missing: # We know our impl called 'OnFire' with the same ID assert no in self.fireds assert no + 1 == out1, "expecting 'out1' param to be ID+1" assert no + 2 == out2, "expecting 'out2' param to be ID+2" # The middle must be a boolean. assert a_bool is Missing or type(a_bool) == bool, "middle param not a bool" return out1 + 2, out2 + 2 def _DumpFireds(self): if not self.fireds: print("ERROR: Nothing was received!") for firedId, no in self.fireds.items(): progress("ID %d fired %d times" % (firedId, no)) # Test everything which can be tested using both the "dynamic" and "generated" # COM objects (or when there are very subtle differences) def TestCommon(o, is_generated): progress("Getting counter") counter = o.GetSimpleCounter() TestCounter(counter, is_generated) progress("Checking default args") rc = o.TestOptionals() if rc[:-1] != ("def", 0, 1) or abs(rc[-1] - 3.14) > 0.01: print(rc) raise error("Did not get the optional values correctly") rc = o.TestOptionals("Hi", 2, 3, 1.1) if rc[:-1] != ("Hi", 2, 3) or abs(rc[-1] - 1.1) > 0.01: print(rc) raise error("Did not get the specified optional values correctly") rc = o.TestOptionals2(0) if rc != (0, "", 1): print(rc) raise error("Did not get the optional2 values correctly") rc = o.TestOptionals2(1.1, "Hi", 2) if rc[1:] != ("Hi", 2) or abs(rc[0] - 1.1) > 0.01: print(rc) raise error("Did not get the specified optional2 values correctly") progress("Checking getting/passing IUnknown") check_get_set(o.GetSetUnknown, o) progress("Checking getting/passing IDispatch") # This might be called with either the interface or the CoClass - but these # functions always return from the interface. expected_class = o.__class__ # CoClass instances have `default_interface` expected_class = getattr(expected_class, "default_interface", expected_class) if not isinstance(o.GetSetDispatch(o), expected_class): raise error("GetSetDispatch failed: %r" % (o.GetSetDispatch(o),)) progress("Checking getting/passing IDispatch of known type") expected_class = o.__class__ expected_class = getattr(expected_class, "default_interface", expected_class) if o.GetSetInterface(o).__class__ != expected_class: raise error("GetSetDispatch failed") progress("Checking misc args") check_get_set(o.GetSetVariant, 4) check_get_set(o.GetSetVariant, "foo") check_get_set(o.GetSetVariant, o) # signed/unsigned. check_get_set(o.GetSetInt, 0) check_get_set(o.GetSetInt, -1) check_get_set(o.GetSetInt, 1) check_get_set(o.GetSetUnsignedInt, 0) check_get_set(o.GetSetUnsignedInt, 1) check_get_set(o.GetSetUnsignedInt, 0x80000000) if o.GetSetUnsignedInt(-1) != 0xFFFFFFFF: # -1 is a special case - we accept a negative int (silently converting to # unsigned) but when getting it back we convert it to a long. raise error("unsigned -1 failed") check_get_set(o.GetSetLong, 0) check_get_set(o.GetSetLong, -1) check_get_set(o.GetSetLong, 1) check_get_set(o.GetSetUnsignedLong, 0) check_get_set(o.GetSetUnsignedLong, 1) check_get_set(o.GetSetUnsignedLong, 0x80000000) # -1 is a special case - see above. if o.GetSetUnsignedLong(-1) != 0xFFFFFFFF: raise error("unsigned -1 failed") # We want to explicitly test > 32 bits. py3k has no 'maxint' and # 'maxsize+1' is no good on 64bit platforms as its 65 bits! big = 2147483647 # sys.maxint on py2k for l in big, big + 1, 1 << 65: check_get_set(o.GetSetVariant, l) progress("Checking structs") r = o.GetStruct() assert r.int_value == 99 and str(r.str_value) == "Hello from C++" assert o.DoubleString("foo") == "foofoo" progress("Checking var args") o.SetVarArgs("Hi", "There", "From", "Python", 1) if o.GetLastVarArgs() != ("Hi", "There", "From", "Python", 1): raise error("VarArgs failed -" + str(o.GetLastVarArgs())) progress("Checking arrays") l = [] TestApplyResult(o.SetVariantSafeArray, (l,), len(l)) l = [1, 2, 3, 4] TestApplyResult(o.SetVariantSafeArray, (l,), len(l)) TestApplyResult( o.CheckVariantSafeArray, ( ( 1, 2, 3, 4, ), ), 1, ) # and binary TestApplyResult(o.SetBinSafeArray, (str2memory("foo\0bar"),), 7) progress("Checking properties") o.LongProp = 3 if o.LongProp != 3 or o.IntProp != 3: raise error("Property value wrong - got %d/%d" % (o.LongProp, o.IntProp)) o.LongProp = o.IntProp = -3 if o.LongProp != -3 or o.IntProp != -3: raise error("Property value wrong - got %d/%d" % (o.LongProp, o.IntProp)) # This number fits in an unsigned long. Attempting to set it to a normal # long will involve overflow, which is to be expected. But we do # expect it to work in a property explicitly a VT_UI4. check = 3 * 10 ** 9 o.ULongProp = check if o.ULongProp != check: raise error( "Property value wrong - got %d (expected %d)" % (o.ULongProp, check) ) TestApplyResult(o.Test, ("Unused", 99), 1) # A bool function TestApplyResult(o.Test, ("Unused", -1), 1) # A bool function TestApplyResult(o.Test, ("Unused", 1 == 1), 1) # A bool function TestApplyResult(o.Test, ("Unused", 0), 0) TestApplyResult(o.Test, ("Unused", 1 == 0), 0) assert o.DoubleString("foo") == "foofoo" TestConstant("ULongTest1", ensure_long(0xFFFFFFFF)) TestConstant("ULongTest2", ensure_long(0x7FFFFFFF)) TestConstant("LongTest1", ensure_long(-0x7FFFFFFF)) TestConstant("LongTest2", ensure_long(0x7FFFFFFF)) TestConstant("UCharTest", 255) TestConstant("CharTest", -1) # 'Hello World', but the 'r' is the "Registered" sign (\xae) TestConstant("StringTest", "Hello Wo\xaeld") progress("Checking dates and times") # For now *all* times passed must be tz-aware. now = win32timezone.now() # but conversion to and from a VARIANT loses sub-second... now = now.replace(microsecond=0) later = now + datetime.timedelta(seconds=1) TestApplyResult(o.EarliestDate, (now, later), now) # The below used to fail with `ValueError: microsecond must be in 0..999999` - see #1655 # https://planetcalc.com/7027/ says that float is: Sun, 25 Mar 1951 7:23:49 am assert o.MakeDate(18712.308206013888) == datetime.datetime.fromisoformat( "1951-03-25 07:23:49+00:00" ) progress("Checking currency") # currency. pythoncom.__future_currency__ = 1 if o.CurrencyProp != 0: raise error("Expecting 0, got %r" % (o.CurrencyProp,)) for val in ("1234.5678", "1234.56", "1234"): o.CurrencyProp = decimal.Decimal(val) if o.CurrencyProp != decimal.Decimal(val): raise error("%s got %r" % (val, o.CurrencyProp)) v1 = decimal.Decimal("1234.5678") TestApplyResult(o.DoubleCurrency, (v1,), v1 * 2) v2 = decimal.Decimal("9012.3456") TestApplyResult(o.AddCurrencies, (v1, v2), v1 + v2) TestTrickyTypesWithVariants(o, is_generated) progress("Checking win32com.client.VARIANT") TestPyVariant(o, is_generated) def TestTrickyTypesWithVariants(o, is_generated): # Test tricky stuff with type handling and generally only works with # "generated" support but can be worked around using VARIANT. if is_generated: got = o.TestByRefVariant(2) else: v = VARIANT(pythoncom.VT_BYREF | pythoncom.VT_VARIANT, 2) o.TestByRefVariant(v) got = v.value if got != 4: raise error("TestByRefVariant failed") if is_generated: got = o.TestByRefString("Foo") else: v = VARIANT(pythoncom.VT_BYREF | pythoncom.VT_BSTR, "Foo") o.TestByRefString(v) got = v.value if got != "FooFoo": raise error("TestByRefString failed") # check we can pass ints as a VT_UI1 vals = [1, 2, 3, 4] if is_generated: arg = vals else: arg = VARIANT(pythoncom.VT_ARRAY | pythoncom.VT_UI1, vals) TestApplyResult(o.SetBinSafeArray, (arg,), len(vals)) # safearrays of doubles and floats vals = [0, 1.1, 2.2, 3.3] if is_generated: arg = vals else: arg = VARIANT(pythoncom.VT_ARRAY | pythoncom.VT_R8, vals) TestApplyResult(o.SetDoubleSafeArray, (arg,), len(vals)) if is_generated: arg = vals else: arg = VARIANT(pythoncom.VT_ARRAY | pythoncom.VT_R4, vals) TestApplyResult(o.SetFloatSafeArray, (arg,), len(vals)) vals = [1.1, 2.2, 3.3, 4.4] expected = (1.1 * 2, 2.2 * 2, 3.3 * 2, 4.4 * 2) if is_generated: TestApplyResult(o.ChangeDoubleSafeArray, (vals,), expected) else: arg = VARIANT(pythoncom.VT_BYREF | pythoncom.VT_ARRAY | pythoncom.VT_R8, vals) o.ChangeDoubleSafeArray(arg) if arg.value != expected: raise error("ChangeDoubleSafeArray got the wrong value") if is_generated: got = o.DoubleInOutString("foo") else: v = VARIANT(pythoncom.VT_BYREF | pythoncom.VT_BSTR, "foo") o.DoubleInOutString(v) got = v.value assert got == "foofoo", got val = decimal.Decimal("1234.5678") if is_generated: got = o.DoubleCurrencyByVal(val) else: v = VARIANT(pythoncom.VT_BYREF | pythoncom.VT_CY, val) o.DoubleCurrencyByVal(v) got = v.value assert got == val * 2 def TestDynamic(): progress("Testing Dynamic") import win32com.client.dynamic o = win32com.client.dynamic.DumbDispatch("PyCOMTest.PyCOMTest") TestCommon(o, False) counter = win32com.client.dynamic.DumbDispatch("PyCOMTest.SimpleCounter") TestCounter(counter, False) # Dynamic doesn't know this should be an int, so we get a COM # TypeMismatch error. try: check_get_set_raises(ValueError, o.GetSetInt, "foo") raise error("no exception raised") except pythoncom.com_error as exc: if exc.hresult != winerror.DISP_E_TYPEMISMATCH: raise arg1 = VARIANT(pythoncom.VT_R4 | pythoncom.VT_BYREF, 2.0) arg2 = VARIANT(pythoncom.VT_BOOL | pythoncom.VT_BYREF, True) arg3 = VARIANT(pythoncom.VT_I4 | pythoncom.VT_BYREF, 4) o.TestInOut(arg1, arg2, arg3) assert arg1.value == 4.0, arg1 assert arg2.value == False assert arg3.value == 8 # damn - props with params don't work for dynamic objects :( # o.SetParamProp(0, 1) # if o.ParamProp(0) != 1: # raise RuntimeError, o.paramProp(0) def TestGenerated(): # Create an instance of the server. from win32com.client.gencache import EnsureDispatch o = EnsureDispatch("PyCOMTest.PyCOMTest") TestCommon(o, True) counter = EnsureDispatch("PyCOMTest.SimpleCounter") TestCounter(counter, True) # This dance lets us get a CoClass even though it's not explicitly registered. # This is `CoPyComTest` from win32com.client.CLSIDToClass import GetClass coclass_o = GetClass("{8EE0C520-5605-11D0-AE5F-CADD4C000000}")() TestCommon(coclass_o, True) # Test the regression reported in #1753 assert bool(coclass_o) # This is `CoSimpleCounter` and the counter tests should work. coclass = GetClass("{B88DD310-BAE8-11D0-AE86-76F2C1000000}")() TestCounter(coclass, True) # XXX - this is failing in dynamic tests, but should work fine. i1, i2 = o.GetMultipleInterfaces() if not isinstance(i1, DispatchBaseClass) or not isinstance(i2, DispatchBaseClass): # Yay - is now an instance returned! raise error( "GetMultipleInterfaces did not return instances - got '%s', '%s'" % (i1, i2) ) del i1 del i2 # Generated knows to only pass a 32bit int, so should fail. check_get_set_raises(OverflowError, o.GetSetInt, 0x80000000) check_get_set_raises(OverflowError, o.GetSetLong, 0x80000000) # Generated knows this should be an int, so raises ValueError check_get_set_raises(ValueError, o.GetSetInt, "foo") check_get_set_raises(ValueError, o.GetSetLong, "foo") # Pass some non-sequence objects to our array decoder, and watch it fail. try: o.SetVariantSafeArray("foo") raise error("Expected a type error") except TypeError: pass try: o.SetVariantSafeArray(666) raise error("Expected a type error") except TypeError: pass o.GetSimpleSafeArray(None) TestApplyResult(o.GetSimpleSafeArray, (None,), tuple(range(10))) resultCheck = tuple(range(5)), tuple(range(10)), tuple(range(20)) TestApplyResult(o.GetSafeArrays, (None, None, None), resultCheck) l = [] TestApplyResult(o.SetIntSafeArray, (l,), len(l)) l = [1, 2, 3, 4] TestApplyResult(o.SetIntSafeArray, (l,), len(l)) ll = [1, 2, 3, 0x100000000] TestApplyResult(o.SetLongLongSafeArray, (ll,), len(ll)) TestApplyResult(o.SetULongLongSafeArray, (ll,), len(ll)) # Tell the server to do what it does! TestApplyResult(o.Test2, (constants.Attr2,), constants.Attr2) TestApplyResult(o.Test3, (constants.Attr2,), constants.Attr2) TestApplyResult(o.Test4, (constants.Attr2,), constants.Attr2) TestApplyResult(o.Test5, (constants.Attr2,), constants.Attr2) TestApplyResult(o.Test6, (constants.WideAttr1,), constants.WideAttr1) TestApplyResult(o.Test6, (constants.WideAttr2,), constants.WideAttr2) TestApplyResult(o.Test6, (constants.WideAttr3,), constants.WideAttr3) TestApplyResult(o.Test6, (constants.WideAttr4,), constants.WideAttr4) TestApplyResult(o.Test6, (constants.WideAttr5,), constants.WideAttr5) TestApplyResult(o.TestInOut, (2.0, True, 4), (4.0, False, 8)) o.SetParamProp(0, 1) if o.ParamProp(0) != 1: raise RuntimeError(o.paramProp(0)) # Make sure CastTo works - even though it is only casting it to itself! o2 = CastTo(o, "IPyCOMTest") if o != o2: raise error("CastTo should have returned the same object") # Do the connection point thing... # Create a connection object. progress("Testing connection points") o2 = win32com.client.DispatchWithEvents(o, RandomEventHandler) TestEvents(o2, o2) o2 = win32com.client.DispatchWithEvents(o, NewStyleRandomEventHandler) TestEvents(o2, o2) # and a plain "WithEvents". handler = win32com.client.WithEvents(o, RandomEventHandler) TestEvents(o, handler) handler = win32com.client.WithEvents(o, NewStyleRandomEventHandler) TestEvents(o, handler) progress("Finished generated .py test.") def TestEvents(o, handler): sessions = [] handler._Init() try: for i in range(3): session = o.Start() sessions.append(session) time.sleep(0.5) finally: # Stop the servers for session in sessions: o.Stop(session) handler._DumpFireds() handler.close() def _TestPyVariant(o, is_generated, val, checker=None): if is_generated: vt, got = o.GetVariantAndType(val) else: # Gotta supply all 3 args with the last 2 being explicit variants to # get the byref behaviour. var_vt = VARIANT(pythoncom.VT_UI2 | pythoncom.VT_BYREF, 0) var_result = VARIANT(pythoncom.VT_VARIANT | pythoncom.VT_BYREF, 0) o.GetVariantAndType(val, var_vt, var_result) vt = var_vt.value got = var_result.value if checker is not None: checker(got) return # default checking. assert vt == val.varianttype, (vt, val.varianttype) # Handle our safe-array test - if the passed value is a list of variants, # compare against the actual values. if type(val.value) in (tuple, list): check = [v.value if isinstance(v, VARIANT) else v for v in val.value] # pythoncom always returns arrays as tuples. got = list(got) else: check = val.value assert type(check) == type(got), (type(check), type(got)) assert check == got, (check, got) def _TestPyVariantFails(o, is_generated, val, exc): try: _TestPyVariant(o, is_generated, val) raise error("Setting %r didn't raise %s" % (val, exc)) except exc: pass def TestPyVariant(o, is_generated): _TestPyVariant(o, is_generated, VARIANT(pythoncom.VT_UI1, 1)) _TestPyVariant( o, is_generated, VARIANT(pythoncom.VT_ARRAY | pythoncom.VT_UI4, [1, 2, 3]) ) _TestPyVariant(o, is_generated, VARIANT(pythoncom.VT_BSTR, "hello")) _TestPyVariant( o, is_generated, VARIANT(pythoncom.VT_ARRAY | pythoncom.VT_BSTR, ["hello", "there"]), ) def check_dispatch(got): assert isinstance(got._oleobj_, pythoncom.TypeIIDs[pythoncom.IID_IDispatch]) _TestPyVariant(o, is_generated, VARIANT(pythoncom.VT_DISPATCH, o), check_dispatch) _TestPyVariant( o, is_generated, VARIANT(pythoncom.VT_ARRAY | pythoncom.VT_DISPATCH, [o]) ) # an array of variants each with a specific type. v = VARIANT( pythoncom.VT_ARRAY | pythoncom.VT_VARIANT, [ VARIANT(pythoncom.VT_UI4, 1), VARIANT(pythoncom.VT_UI4, 2), VARIANT(pythoncom.VT_UI4, 3), ], ) _TestPyVariant(o, is_generated, v) # and failures _TestPyVariantFails(o, is_generated, VARIANT(pythoncom.VT_UI1, "foo"), ValueError) def TestCounter(counter, bIsGenerated): # Test random access into container progress("Testing counter", repr(counter)) import random for i in range(50): num = int(random.random() * len(counter)) try: # XXX - this appears broken by commit 08a14d4deb374eaa06378509cf44078ad467b9dc - # We shouldn't need to do generated differently than dynamic. if bIsGenerated: ret = counter.Item(num + 1) else: ret = counter[num] if ret != num + 1: raise error( "Random access into element %d failed - return was %s" % (num, repr(ret)) ) except IndexError: raise error("** IndexError accessing collection element %d" % num) num = 0 if bIsGenerated: counter.SetTestProperty(1) counter.TestProperty = 1 # Note this has a second, default arg. counter.SetTestProperty(1, 2) if counter.TestPropertyWithDef != 0: raise error("Unexpected property set value!") if counter.TestPropertyNoDef(1) != 1: raise error("Unexpected property set value!") else: pass # counter.TestProperty = 1 counter.LBound = 1 counter.UBound = 10 if counter.LBound != 1 or counter.UBound != 10: print("** Error - counter did not keep its properties") if bIsGenerated: bounds = counter.GetBounds() if bounds[0] != 1 or bounds[1] != 10: raise error("** Error - counter did not give the same properties back") counter.SetBounds(bounds[0], bounds[1]) for item in counter: num = num + 1 if num != len(counter): raise error("*** Length of counter and loop iterations dont match ***") if num != 10: raise error("*** Unexpected number of loop iterations ***") try: counter = iter(counter)._iter_.Clone() # Test Clone() and enum directly except AttributeError: # *sob* - sometimes this is a real iterator and sometimes not :/ progress("Finished testing counter (but skipped the iterator stuff") return counter.Reset() num = 0 for item in counter: num = num + 1 if num != 10: raise error("*** Unexpected number of loop iterations - got %d ***" % num) progress("Finished testing counter") def TestLocalVTable(ob): # Python doesn't fully implement this interface. if ob.DoubleString("foo") != "foofoo": raise error("couldn't foofoo") ############################### ## ## Some vtable tests of the interface ## def TestVTable(clsctx=pythoncom.CLSCTX_ALL): # Any vtable interfaces marked as dual *should* be able to be # correctly implemented as IDispatch. ob = win32com.client.Dispatch("Python.Test.PyCOMTest") TestLocalVTable(ob) # Now test it via vtable - use some C++ code to help here as Python can't do it directly yet. tester = win32com.client.Dispatch("PyCOMTest.PyCOMTest") testee = pythoncom.CoCreateInstance( "Python.Test.PyCOMTest", None, clsctx, pythoncom.IID_IUnknown ) # check we fail gracefully with None passed. try: tester.TestMyInterface(None) except pythoncom.com_error as details: pass # and a real object. tester.TestMyInterface(testee) def TestVTable2(): # We once crashed creating our object with the native interface as # the first IID specified. We must do it _after_ the tests, so that # Python has already had the gateway registered from last run. ob = win32com.client.Dispatch("Python.Test.PyCOMTest") iid = pythoncom.InterfaceNames["IPyCOMTest"] clsid = "Python.Test.PyCOMTest" clsctx = pythoncom.CLSCTX_SERVER try: testee = pythoncom.CoCreateInstance(clsid, None, clsctx, iid) except TypeError: # Python can't actually _use_ this interface yet, so this is # "expected". Any COM error is not. pass def TestVTableMI(): clsctx = pythoncom.CLSCTX_SERVER ob = pythoncom.CoCreateInstance( "Python.Test.PyCOMTestMI", None, clsctx, pythoncom.IID_IUnknown ) # This inherits from IStream. ob.QueryInterface(pythoncom.IID_IStream) # This implements IStorage, specifying the IID as a string ob.QueryInterface(pythoncom.IID_IStorage) # IDispatch should always work ob.QueryInterface(pythoncom.IID_IDispatch) iid = pythoncom.InterfaceNames["IPyCOMTest"] try: ob.QueryInterface(iid) except TypeError: # Python can't actually _use_ this interface yet, so this is # "expected". Any COM error is not. pass def TestQueryInterface(long_lived_server=0, iterations=5): tester = win32com.client.Dispatch("PyCOMTest.PyCOMTest") if long_lived_server: # Create a local server t0 = win32com.client.Dispatch( "Python.Test.PyCOMTest", clsctx=pythoncom.CLSCTX_LOCAL_SERVER ) # Request custom interfaces a number of times prompt = [ "Testing QueryInterface without long-lived local-server #%d of %d...", "Testing QueryInterface with long-lived local-server #%d of %d...", ] for i in range(iterations): progress(prompt[long_lived_server != 0] % (i + 1, iterations)) tester.TestQueryInterface() class Tester(win32com.test.util.TestCase): def testVTableInProc(self): # We used to crash running this the second time - do it a few times for i in range(3): progress("Testing VTables in-process #%d..." % (i + 1)) TestVTable(pythoncom.CLSCTX_INPROC_SERVER) def testVTableLocalServer(self): for i in range(3): progress("Testing VTables out-of-process #%d..." % (i + 1)) TestVTable(pythoncom.CLSCTX_LOCAL_SERVER) def testVTable2(self): for i in range(3): TestVTable2() def testVTableMI(self): for i in range(3): TestVTableMI() def testMultiQueryInterface(self): TestQueryInterface(0, 6) # When we use the custom interface in the presence of a long-lived # local server, i.e. a local server that is already running when # we request an instance of our COM object, and remains afterwards, # then after repeated requests to create an instance of our object # the custom interface disappears -- i.e. QueryInterface fails with # E_NOINTERFACE. Set the upper range of the following test to 2 to # pass this test, i.e. TestQueryInterface(1,2) TestQueryInterface(1, 6) def testDynamic(self): TestDynamic() def testGenerated(self): TestGenerated() if __name__ == "__main__": # XXX - todo - Complete hack to crank threading support. # Should NOT be necessary def NullThreadFunc(): pass import _thread _thread.start_new(NullThreadFunc, ()) if "-v" in sys.argv: verbose = 1 win32com.test.util.testmain()
env/Lib/site-packages/win32com/test/testPyComTest.py
29,446
NOTE - Still seems to be a leak here somewhere gateway count doesnt hit zero. Hence the print statements! Must be free-threaded! This test uses a Python implemented COM server - ensure correctly registered. We had a bg where RegisterInterfaces would fail if gencache had already been run - exercise that here convert a normal int to a long int - used to avoid, eg, '1L' for py3k friendliness py3k - no such thing as a 'long' on py2x, we just use an expression that results in a long what we expect! Simple handler class. This demo only fires one event. This test exists mainly to help with an old bug, where named params would come in reverse. We know our impl called 'OnFire' with the same ID The middle must be a boolean. A simple handler class that derives from object (ie, a "new style class") - only relevant for Python 2.x (ie, the 2 classes should be identical in 3.x) This test exists mainly to help with an old bug, where named params would come in reverse. We know our impl called 'OnFire' with the same ID The middle must be a boolean. Test everything which can be tested using both the "dynamic" and "generated" COM objects (or when there are very subtle differences) This might be called with either the interface or the CoClass - but these functions always return from the interface. CoClass instances have `default_interface` signed/unsigned. -1 is a special case - we accept a negative int (silently converting to unsigned) but when getting it back we convert it to a long. -1 is a special case - see above. We want to explicitly test > 32 bits. py3k has no 'maxint' and 'maxsize+1' is no good on 64bit platforms as its 65 bits! sys.maxint on py2k and binary This number fits in an unsigned long. Attempting to set it to a normal long will involve overflow, which is to be expected. But we do expect it to work in a property explicitly a VT_UI4. A bool function A bool function A bool function 'Hello World', but the 'r' is the "Registered" sign (\xae) For now *all* times passed must be tz-aware. but conversion to and from a VARIANT loses sub-second... The below used to fail with `ValueError: microsecond must be in 0..999999` - see 1655 https://planetcalc.com/7027/ says that float is: Sun, 25 Mar 1951 7:23:49 am currency. Test tricky stuff with type handling and generally only works with "generated" support but can be worked around using VARIANT. check we can pass ints as a VT_UI1 safearrays of doubles and floats Dynamic doesn't know this should be an int, so we get a COM TypeMismatch error. damn - props with params don't work for dynamic objects :( o.SetParamProp(0, 1) if o.ParamProp(0) != 1: raise RuntimeError, o.paramProp(0) Create an instance of the server. This dance lets us get a CoClass even though it's not explicitly registered. This is `CoPyComTest` Test the regression reported in 1753 This is `CoSimpleCounter` and the counter tests should work. XXX - this is failing in dynamic tests, but should work fine. Yay - is now an instance returned! Generated knows to only pass a 32bit int, so should fail. Generated knows this should be an int, so raises ValueError Pass some non-sequence objects to our array decoder, and watch it fail. Tell the server to do what it does! Make sure CastTo works - even though it is only casting it to itself! Do the connection point thing... Create a connection object. and a plain "WithEvents". Stop the servers Gotta supply all 3 args with the last 2 being explicit variants to get the byref behaviour. default checking. Handle our safe-array test - if the passed value is a list of variants, compare against the actual values. pythoncom always returns arrays as tuples. an array of variants each with a specific type. and failures Test random access into container XXX - this appears broken by commit 08a14d4deb374eaa06378509cf44078ad467b9dc - We shouldn't need to do generated differently than dynamic. Note this has a second, default arg. counter.TestProperty = 1 Test Clone() and enum directly *sob* - sometimes this is a real iterator and sometimes not :/ Python doesn't fully implement this interface. Some vtable tests of the interface Any vtable interfaces marked as dual *should* be able to be correctly implemented as IDispatch. Now test it via vtable - use some C++ code to help here as Python can't do it directly yet. check we fail gracefully with None passed. and a real object. We once crashed creating our object with the native interface as the first IID specified. We must do it _after_ the tests, so that Python has already had the gateway registered from last run. Python can't actually _use_ this interface yet, so this is "expected". Any COM error is not. This inherits from IStream. This implements IStorage, specifying the IID as a string IDispatch should always work Python can't actually _use_ this interface yet, so this is "expected". Any COM error is not. Create a local server Request custom interfaces a number of times We used to crash running this the second time - do it a few times When we use the custom interface in the presence of a long-lived local server, i.e. a local server that is already running when we request an instance of our COM object, and remains afterwards, then after repeated requests to create an instance of our object the custom interface disappears -- i.e. QueryInterface fails with E_NOINTERFACE. Set the upper range of the following test to 2 to pass this test, i.e. TestQueryInterface(1,2) XXX - todo - Complete hack to crank threading support. Should NOT be necessary
5,515
en
0.885883
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. # pylint: disable=no-member, too-many-lines, redefined-builtin, protected-access, unused-import, invalid-name # pylint: disable=too-many-arguments, too-many-locals, no-name-in-module, too-many-branches, too-many-statements """Read individual image files and perform augmentations.""" from __future__ import absolute_import, print_function import os import random import logging import json import warnings import numpy as np try: import cv2 except ImportError: cv2 = None from ..base import numeric_types from .. import ndarray as nd from ..ndarray import _internal from ..ndarray._internal import _cvimresize as imresize from ..ndarray._internal import _cvcopyMakeBorder as copyMakeBorder from .. import io from .. import recordio def imread(filename, *args, **kwargs): """Read and decode an image to an NDArray. Note: `imread` uses OpenCV (not the CV2 Python library). MXNet must have been built with USE_OPENCV=1 for `imdecode` to work. Parameters ---------- filename : str Name of the image file to be loaded. flag : {0, 1}, default 1 1 for three channel color output. 0 for grayscale output. to_rgb : bool, default True True for RGB formatted output (MXNet default). False for BGR formatted output (OpenCV default). out : NDArray, optional Output buffer. Use `None` for automatic allocation. Returns ------- NDArray An `NDArray` containing the image. Example ------- >>> mx.img.imread("flower.jpg") <NDArray 224x224x3 @cpu(0)> Set `flag` parameter to 0 to get grayscale output >>> mx.img.imread("flower.jpg", flag=0) <NDArray 224x224x1 @cpu(0)> Set `to_rgb` parameter to 0 to get output in OpenCV format (BGR) >>> mx.img.imread("flower.jpg", to_rgb=0) <NDArray 224x224x3 @cpu(0)> """ return _internal._cvimread(filename, *args, **kwargs) def imdecode(buf, *args, **kwargs): """Decode an image to an NDArray. Note: `imdecode` uses OpenCV (not the CV2 Python library). MXNet must have been built with USE_OPENCV=1 for `imdecode` to work. Parameters ---------- buf : str/bytes or numpy.ndarray Binary image data as string or numpy ndarray. flag : int, optional, default=1 1 for three channel color output. 0 for grayscale output. to_rgb : int, optional, default=1 1 for RGB formatted output (MXNet default). 0 for BGR formatted output (OpenCV default). out : NDArray, optional Output buffer. Use `None` for automatic allocation. Returns ------- NDArray An `NDArray` containing the image. Example ------- >>> with open("flower.jpg", 'rb') as fp: ... str_image = fp.read() ... >>> image = mx.img.imdecode(str_image) >>> image <NDArray 224x224x3 @cpu(0)> Set `flag` parameter to 0 to get grayscale output >>> with open("flower.jpg", 'rb') as fp: ... str_image = fp.read() ... >>> image = mx.img.imdecode(str_image, flag=0) >>> image <NDArray 224x224x1 @cpu(0)> Set `to_rgb` parameter to 0 to get output in OpenCV format (BGR) >>> with open("flower.jpg", 'rb') as fp: ... str_image = fp.read() ... >>> image = mx.img.imdecode(str_image, to_rgb=0) >>> image <NDArray 224x224x3 @cpu(0)> """ if not isinstance(buf, nd.NDArray): buf = nd.array(np.frombuffer(buf, dtype=np.uint8), dtype=np.uint8) return _internal._cvimdecode(buf, *args, **kwargs) def scale_down(src_size, size): """Scales down crop size if it's larger than image size. If width/height of the crop is larger than the width/height of the image, sets the width/height to the width/height of the image. Parameters ---------- src_size : tuple of int Size of the image in (width, height) format. size : tuple of int Size of the crop in (width, height) format. Returns ------- tuple of int A tuple containing the scaled crop size in (width, height) format. Example -------- >>> src_size = (640,480) >>> size = (720,120) >>> new_size = mx.img.scale_down(src_size, size) >>> new_size (640,106) """ w, h = size sw, sh = src_size if sh < h: w, h = float(w * sh) / h, sh if sw < w: w, h = sw, float(h * sw) / w return int(w), int(h) def _get_interp_method(interp, sizes=()): """Get the interpolation method for resize functions. The major purpose of this function is to wrap a random interp method selection and a auto-estimation method. Parameters ---------- interp : int interpolation method for all resizing operations Possible values: 0: Nearest Neighbors Interpolation. 1: Bilinear interpolation. 2: Area-based (resampling using pixel area relation). It may be a preferred method for image decimation, as it gives moire-free results. But when the image is zoomed, it is similar to the Nearest Neighbors method. (used by default). 3: Bicubic interpolation over 4x4 pixel neighborhood. 4: Lanczos interpolation over 8x8 pixel neighborhood. 9: Cubic for enlarge, area for shrink, bilinear for others 10: Random select from interpolation method metioned above. Note: When shrinking an image, it will generally look best with AREA-based interpolation, whereas, when enlarging an image, it will generally look best with Bicubic (slow) or Bilinear (faster but still looks OK). More details can be found in the documentation of OpenCV, please refer to http://docs.opencv.org/master/da/d54/group__imgproc__transform.html. sizes : tuple of int (old_height, old_width, new_height, new_width), if None provided, auto(9) will return Area(2) anyway. Returns ------- int interp method from 0 to 4 """ if interp == 9: if sizes: assert len(sizes) == 4 oh, ow, nh, nw = sizes if nh > oh and nw > ow: return 2 elif nh < oh and nw < ow: return 3 else: return 1 else: return 2 if interp == 10: return random.randint(0, 4) if interp not in (0, 1, 2, 3, 4): raise ValueError('Unknown interp method %d' % interp) return interp def resize_short(src, size, interp=2): """Resizes shorter edge to size. Note: `resize_short` uses OpenCV (not the CV2 Python library). MXNet must have been built with OpenCV for `resize_short` to work. Resizes the original image by setting the shorter edge to size and setting the longer edge accordingly. Resizing function is called from OpenCV. Parameters ---------- src : NDArray The original image. size : int The length to be set for the shorter edge. interp : int, optional, default=2 Interpolation method used for resizing the image. Possible values: 0: Nearest Neighbors Interpolation. 1: Bilinear interpolation. 2: Area-based (resampling using pixel area relation). It may be a preferred method for image decimation, as it gives moire-free results. But when the image is zoomed, it is similar to the Nearest Neighbors method. (used by default). 3: Bicubic interpolation over 4x4 pixel neighborhood. 4: Lanczos interpolation over 8x8 pixel neighborhood. 9: Cubic for enlarge, area for shrink, bilinear for others 10: Random select from interpolation method metioned above. Note: When shrinking an image, it will generally look best with AREA-based interpolation, whereas, when enlarging an image, it will generally look best with Bicubic (slow) or Bilinear (faster but still looks OK). More details can be found in the documentation of OpenCV, please refer to http://docs.opencv.org/master/da/d54/group__imgproc__transform.html. Returns ------- NDArray An 'NDArray' containing the resized image. Example ------- >>> with open("flower.jpeg", 'rb') as fp: ... str_image = fp.read() ... >>> image = mx.img.imdecode(str_image) >>> image <NDArray 2321x3482x3 @cpu(0)> >>> size = 640 >>> new_image = mx.img.resize_short(image, size) >>> new_image <NDArray 2321x3482x3 @cpu(0)> """ h, w, _ = src.shape if h > w: new_h, new_w = size * h // w, size else: new_h, new_w = size, size * w // h return imresize(src, new_w, new_h, interp=_get_interp_method(interp, (h, w, new_h, new_w))) def fixed_crop(src, x0, y0, w, h, size=None, interp=2): """Crop src at fixed location, and (optionally) resize it to size. Parameters ---------- src : NDArray Input image x0 : int Left boundary of the cropping area y0 : int Top boundary of the cropping area w : int Width of the cropping area h : int Height of the cropping area size : tuple of (w, h) Optional, resize to new size after cropping interp : int, optional, default=2 Interpolation method. See resize_short for details. Returns ------- NDArray An `NDArray` containing the cropped image. """ out = nd.crop(src, begin=(y0, x0, 0), end=(y0 + h, x0 + w, int(src.shape[2]))) if size is not None and (w, h) != size: sizes = (h, w, size[1], size[0]) out = imresize(out, *size, interp=_get_interp_method(interp, sizes)) return out def random_crop(src, size, interp=2): """Randomly crop `src` with `size` (width, height). Upsample result if `src` is smaller than `size`. Parameters ---------- src: Source image `NDArray` size: Size of the crop formatted as (width, height). If the `size` is larger than the image, then the source image is upsampled to `size` and returned. interp: int, optional, default=2 Interpolation method. See resize_short for details. Returns ------- NDArray An `NDArray` containing the cropped image. Tuple A tuple (x, y, width, height) where (x, y) is top-left position of the crop in the original image and (width, height) are the dimensions of the cropped image. Example ------- >>> im = mx.nd.array(cv2.imread("flower.jpg")) >>> cropped_im, rect = mx.image.random_crop(im, (100, 100)) >>> print cropped_im <NDArray 100x100x1 @cpu(0)> >>> print rect (20, 21, 100, 100) """ h, w, _ = src.shape new_w, new_h = scale_down((w, h), size) x0 = random.randint(0, w - new_w) y0 = random.randint(0, h - new_h) out = fixed_crop(src, x0, y0, new_w, new_h, size, interp) return out, (x0, y0, new_w, new_h) def center_crop(src, size, interp=2): """Crops the image `src` to the given `size` by trimming on all four sides and preserving the center of the image. Upsamples if `src` is smaller than `size`. .. note:: This requires MXNet to be compiled with USE_OPENCV. Parameters ---------- src : NDArray Binary source image data. size : list or tuple of int The desired output image size. interp : int, optional, default=2 Interpolation method. See resize_short for details. Returns ------- NDArray The cropped image. Tuple (x, y, width, height) where x, y are the positions of the crop in the original image and width, height the dimensions of the crop. Example ------- >>> with open("flower.jpg", 'rb') as fp: ... str_image = fp.read() ... >>> image = mx.image.imdecode(str_image) >>> image <NDArray 2321x3482x3 @cpu(0)> >>> cropped_image, (x, y, width, height) = mx.image.center_crop(image, (1000, 500)) >>> cropped_image <NDArray 500x1000x3 @cpu(0)> >>> x, y, width, height (1241, 910, 1000, 500) """ h, w, _ = src.shape new_w, new_h = scale_down((w, h), size) x0 = int((w - new_w) / 2) y0 = int((h - new_h) / 2) out = fixed_crop(src, x0, y0, new_w, new_h, size, interp) return out, (x0, y0, new_w, new_h) def color_normalize(src, mean, std=None): """Normalize src with mean and std. Parameters ---------- src : NDArray Input image mean : NDArray RGB mean to be subtracted std : NDArray RGB standard deviation to be divided Returns ------- NDArray An `NDArray` containing the normalized image. """ if mean is not None: src -= mean if std is not None: src /= std return src def random_size_crop(src, size, area, ratio, interp=2, **kwargs): """Randomly crop src with size. Randomize area and aspect ratio. Parameters ---------- src : NDArray Input image size : tuple of (int, int) Size of the crop formatted as (width, height). area : float in (0, 1] or tuple of (float, float) If tuple, minimum area and maximum area to be maintained after cropping If float, minimum area to be maintained after cropping, maximum area is set to 1.0 ratio : tuple of (float, float) Aspect ratio range as (min_aspect_ratio, max_aspect_ratio) interp: int, optional, default=2 Interpolation method. See resize_short for details. Returns ------- NDArray An `NDArray` containing the cropped image. Tuple A tuple (x, y, width, height) where (x, y) is top-left position of the crop in the original image and (width, height) are the dimensions of the cropped image. """ h, w, _ = src.shape src_area = h * w if 'min_area' in kwargs: warnings.warn('`min_area` is deprecated. Please use `area` instead.', DeprecationWarning) area = kwargs.pop('min_area') assert not kwargs, "unexpected keyword arguments for `random_size_crop`." if isinstance(area, numeric_types): area = (area, 1.0) for _ in range(10): target_area = random.uniform(area[0], area[1]) * src_area new_ratio = random.uniform(*ratio) new_w = int(round(np.sqrt(target_area * new_ratio))) new_h = int(round(np.sqrt(target_area / new_ratio))) if random.random() < 0.5: new_h, new_w = new_w, new_h if new_w <= w and new_h <= h: x0 = random.randint(0, w - new_w) y0 = random.randint(0, h - new_h) out = fixed_crop(src, x0, y0, new_w, new_h, size, interp) return out, (x0, y0, new_w, new_h) # fall back to center_crop return center_crop(src, size, interp) class Augmenter(object): """Image Augmenter base class""" def __init__(self, **kwargs): self._kwargs = kwargs for k, v in self._kwargs.items(): if isinstance(v, nd.NDArray): v = v.asnumpy() if isinstance(v, np.ndarray): v = v.tolist() self._kwargs[k] = v def dumps(self): """Saves the Augmenter to string Returns ------- str JSON formatted string that describes the Augmenter. """ return json.dumps([self.__class__.__name__.lower(), self._kwargs]) def __call__(self, src): """Abstract implementation body""" raise NotImplementedError("Must override implementation.") class SequentialAug(Augmenter): """Composing a sequential augmenter list. Parameters ---------- ts : list of augmenters A series of augmenters to be applied in sequential order. """ def __init__(self, ts): super(SequentialAug, self).__init__() self.ts = ts def dumps(self): """Override the default to avoid duplicate dump.""" return [self.__class__.__name__.lower(), [x.dumps() for x in self.ts]] def __call__(self, src): """Augmenter body""" for aug in self.ts: src = aug(src) return src class ResizeAug(Augmenter): """Make resize shorter edge to size augmenter. Parameters ---------- size : int The length to be set for the shorter edge. interp : int, optional, default=2 Interpolation method. See resize_short for details. """ def __init__(self, size, interp=2): super(ResizeAug, self).__init__(size=size, interp=interp) self.size = size self.interp = interp def __call__(self, src): """Augmenter body""" return resize_short(src, self.size, self.interp) class ForceResizeAug(Augmenter): """Force resize to size regardless of aspect ratio Parameters ---------- size : tuple of (int, int) The desired size as in (width, height) interp : int, optional, default=2 Interpolation method. See resize_short for details. """ def __init__(self, size, interp=2): super(ForceResizeAug, self).__init__(size=size, interp=interp) self.size = size self.interp = interp def __call__(self, src): """Augmenter body""" sizes = (src.shape[0], src.shape[1], self.size[1], self.size[0]) return imresize(src, *self.size, interp=_get_interp_method(self.interp, sizes)) class RandomCropAug(Augmenter): """Make random crop augmenter Parameters ---------- size : int The length to be set for the shorter edge. interp : int, optional, default=2 Interpolation method. See resize_short for details. """ def __init__(self, size, interp=2): super(RandomCropAug, self).__init__(size=size, interp=interp) self.size = size self.interp = interp def __call__(self, src): """Augmenter body""" return random_crop(src, self.size, self.interp)[0] class RandomSizedCropAug(Augmenter): """Make random crop with random resizing and random aspect ratio jitter augmenter. Parameters ---------- size : tuple of (int, int) Size of the crop formatted as (width, height). area : float in (0, 1] or tuple of (float, float) If tuple, minimum area and maximum area to be maintained after cropping If float, minimum area to be maintained after cropping, maximum area is set to 1.0 ratio : tuple of (float, float) Aspect ratio range as (min_aspect_ratio, max_aspect_ratio) interp: int, optional, default=2 Interpolation method. See resize_short for details. """ def __init__(self, size, area, ratio, interp=2, **kwargs): super(RandomSizedCropAug, self).__init__(size=size, area=area, ratio=ratio, interp=interp) self.size = size if 'min_area' in kwargs: warnings.warn('`min_area` is deprecated. Please use `area` instead.', DeprecationWarning) self.area = kwargs.pop('min_area') else: self.area = area self.ratio = ratio self.interp = interp assert not kwargs, "unexpected keyword arguments for `RandomSizedCropAug`." def __call__(self, src): """Augmenter body""" return random_size_crop(src, self.size, self.area, self.ratio, self.interp)[0] class CenterCropAug(Augmenter): """Make center crop augmenter. Parameters ---------- size : list or tuple of int The desired output image size. interp : int, optional, default=2 Interpolation method. See resize_short for details. """ def __init__(self, size, interp=2): super(CenterCropAug, self).__init__(size=size, interp=interp) self.size = size self.interp = interp def __call__(self, src): """Augmenter body""" return center_crop(src, self.size, self.interp)[0] class RandomOrderAug(Augmenter): """Apply list of augmenters in random order Parameters ---------- ts : list of augmenters A series of augmenters to be applied in random order """ def __init__(self, ts): super(RandomOrderAug, self).__init__() self.ts = ts def dumps(self): """Override the default to avoid duplicate dump.""" return [self.__class__.__name__.lower(), [x.dumps() for x in self.ts]] def __call__(self, src): """Augmenter body""" random.shuffle(self.ts) for t in self.ts: src = t(src) return src class BrightnessJitterAug(Augmenter): """Random brightness jitter augmentation. Parameters ---------- brightness : float The brightness jitter ratio range, [0, 1] """ def __init__(self, brightness): super(BrightnessJitterAug, self).__init__(brightness=brightness) self.brightness = brightness def __call__(self, src): """Augmenter body""" alpha = 1.0 + random.uniform(-self.brightness, self.brightness) src *= alpha return src class ContrastJitterAug(Augmenter): """Random contrast jitter augmentation. Parameters ---------- contrast : float The contrast jitter ratio range, [0, 1] """ def __init__(self, contrast): super(ContrastJitterAug, self).__init__(contrast=contrast) self.contrast = contrast self.coef = nd.array([[[0.299, 0.587, 0.114]]]) def __call__(self, src): """Augmenter body""" alpha = 1.0 + random.uniform(-self.contrast, self.contrast) gray = src * self.coef gray = (3.0 * (1.0 - alpha) / gray.size) * nd.sum(gray) src *= alpha src += gray return src class SaturationJitterAug(Augmenter): """Random saturation jitter augmentation. Parameters ---------- saturation : float The saturation jitter ratio range, [0, 1] """ def __init__(self, saturation): super(SaturationJitterAug, self).__init__(saturation=saturation) self.saturation = saturation self.coef = nd.array([[[0.299, 0.587, 0.114]]]) def __call__(self, src): """Augmenter body""" alpha = 1.0 + random.uniform(-self.saturation, self.saturation) gray = src * self.coef gray = nd.sum(gray, axis=2, keepdims=True) gray *= (1.0 - alpha) src *= alpha src += gray return src class HueJitterAug(Augmenter): """Random hue jitter augmentation. Parameters ---------- hue : float The hue jitter ratio range, [0, 1] """ def __init__(self, hue): super(HueJitterAug, self).__init__(hue=hue) self.hue = hue self.tyiq = np.array([[0.299, 0.587, 0.114], [0.596, -0.274, -0.321], [0.211, -0.523, 0.311]]) self.ityiq = np.array([[1.0, 0.956, 0.621], [1.0, -0.272, -0.647], [1.0, -1.107, 1.705]]) def __call__(self, src): """Augmenter body. Using approximate linear transfomation described in: https://beesbuzz.biz/code/hsv_color_transforms.php """ alpha = random.uniform(-self.hue, self.hue) u = np.cos(alpha * np.pi) w = np.sin(alpha * np.pi) bt = np.array([[1.0, 0.0, 0.0], [0.0, u, -w], [0.0, w, u]]) t = np.dot(np.dot(self.ityiq, bt), self.tyiq).T src = nd.dot(src, nd.array(t)) return src class ColorJitterAug(RandomOrderAug): """Apply random brightness, contrast and saturation jitter in random order. Parameters ---------- brightness : float The brightness jitter ratio range, [0, 1] contrast : float The contrast jitter ratio range, [0, 1] saturation : float The saturation jitter ratio range, [0, 1] """ def __init__(self, brightness, contrast, saturation): ts = [] if brightness > 0: ts.append(BrightnessJitterAug(brightness)) if contrast > 0: ts.append(ContrastJitterAug(contrast)) if saturation > 0: ts.append(SaturationJitterAug(saturation)) super(ColorJitterAug, self).__init__(ts) class LightingAug(Augmenter): """Add PCA based noise. Parameters ---------- alphastd : float Noise level eigval : 3x1 np.array Eigen values eigvec : 3x3 np.array Eigen vectors """ def __init__(self, alphastd, eigval, eigvec): super(LightingAug, self).__init__(alphastd=alphastd, eigval=eigval, eigvec=eigvec) self.alphastd = alphastd self.eigval = eigval self.eigvec = eigvec def __call__(self, src): """Augmenter body""" alpha = np.random.normal(0, self.alphastd, size=(3,)) rgb = np.dot(self.eigvec * alpha, self.eigval) src += nd.array(rgb) return src class ColorNormalizeAug(Augmenter): """Mean and std normalization. Parameters ---------- mean : NDArray RGB mean to be subtracted std : NDArray RGB standard deviation to be divided """ def __init__(self, mean, std): super(ColorNormalizeAug, self).__init__(mean=mean, std=std) self.mean = mean if mean is None or isinstance(mean, nd.NDArray) else nd.array(mean) self.std = std if std is None or isinstance(std, nd.NDArray) else nd.array(std) def __call__(self, src): """Augmenter body""" return color_normalize(src, self.mean, self.std) class RandomGrayAug(Augmenter): """Randomly convert to gray image. Parameters ---------- p : float Probability to convert to grayscale """ def __init__(self, p): super(RandomGrayAug, self).__init__(p=p) self.p = p self.mat = nd.array([[0.21, 0.21, 0.21], [0.72, 0.72, 0.72], [0.07, 0.07, 0.07]]) def __call__(self, src): """Augmenter body""" if random.random() < self.p: src = nd.dot(src, self.mat) return src class HorizontalFlipAug(Augmenter): """Random horizontal flip. Parameters ---------- p : float Probability to flip image horizontally """ def __init__(self, p): super(HorizontalFlipAug, self).__init__(p=p) self.p = p def __call__(self, src): """Augmenter body""" if random.random() < self.p: src = nd.flip(src, axis=1) return src class CastAug(Augmenter): """Cast to float32""" def __init__(self, typ='float32'): super(CastAug, self).__init__(type=typ) self.typ = typ def __call__(self, src): """Augmenter body""" src = src.astype(self.typ) return src def CreateAugmenter(data_shape, resize=0, rand_crop=False, rand_resize=False, rand_mirror=False, mean=None, std=None, brightness=0, contrast=0, saturation=0, hue=0, pca_noise=0, rand_gray=0, inter_method=2): """Creates an augmenter list. Parameters ---------- data_shape : tuple of int Shape for output data resize : int Resize shorter edge if larger than 0 at the begining rand_crop : bool Whether to enable random cropping other than center crop rand_resize : bool Whether to enable random sized cropping, require rand_crop to be enabled rand_gray : float [0, 1], probability to convert to grayscale for all channels, the number of channels will not be reduced to 1 rand_mirror : bool Whether to apply horizontal flip to image with probability 0.5 mean : np.ndarray or None Mean pixel values for [r, g, b] std : np.ndarray or None Standard deviations for [r, g, b] brightness : float Brightness jittering range (percent) contrast : float Contrast jittering range (percent) saturation : float Saturation jittering range (percent) hue : float Hue jittering range (percent) pca_noise : float Pca noise level (percent) inter_method : int, default=2(Area-based) Interpolation method for all resizing operations Possible values: 0: Nearest Neighbors Interpolation. 1: Bilinear interpolation. 2: Area-based (resampling using pixel area relation). It may be a preferred method for image decimation, as it gives moire-free results. But when the image is zoomed, it is similar to the Nearest Neighbors method. (used by default). 3: Bicubic interpolation over 4x4 pixel neighborhood. 4: Lanczos interpolation over 8x8 pixel neighborhood. 9: Cubic for enlarge, area for shrink, bilinear for others 10: Random select from interpolation method metioned above. Note: When shrinking an image, it will generally look best with AREA-based interpolation, whereas, when enlarging an image, it will generally look best with Bicubic (slow) or Bilinear (faster but still looks OK). Examples -------- >>> # An example of creating multiple augmenters >>> augs = mx.image.CreateAugmenter(data_shape=(3, 300, 300), rand_mirror=True, ... mean=True, brightness=0.125, contrast=0.125, rand_gray=0.05, ... saturation=0.125, pca_noise=0.05, inter_method=10) >>> # dump the details >>> for aug in augs: ... aug.dumps() """ auglist = [] if resize > 0: auglist.append(ResizeAug(resize, inter_method)) crop_size = (data_shape[2], data_shape[1]) if rand_resize: assert rand_crop auglist.append(RandomSizedCropAug(crop_size, 0.08, (3.0 / 4.0, 4.0 / 3.0), inter_method)) elif rand_crop: auglist.append(RandomCropAug(crop_size, inter_method)) else: auglist.append(CenterCropAug(crop_size, inter_method)) if rand_mirror: auglist.append(HorizontalFlipAug(0.5)) auglist.append(CastAug()) if brightness or contrast or saturation: auglist.append(ColorJitterAug(brightness, contrast, saturation)) if hue: auglist.append(HueJitterAug(hue)) if pca_noise > 0: eigval = np.array([55.46, 4.794, 1.148]) eigvec = np.array([[-0.5675, 0.7192, 0.4009], [-0.5808, -0.0045, -0.8140], [-0.5836, -0.6948, 0.4203]]) auglist.append(LightingAug(pca_noise, eigval, eigvec)) if rand_gray > 0: auglist.append(RandomGrayAug(rand_gray)) if mean is True: mean = nd.array([123.68, 116.28, 103.53]) elif mean is not None: assert isinstance(mean, (np.ndarray, nd.NDArray)) and mean.shape[0] in [1, 3] if std is True: std = nd.array([58.395, 57.12, 57.375]) elif std is not None: assert isinstance(std, (np.ndarray, nd.NDArray)) and std.shape[0] in [1, 3] if mean is not None or std is not None: auglist.append(ColorNormalizeAug(mean, std)) return auglist class ImageIter(io.DataIter): """Image data iterator with a large number of augmentation choices. This iterator supports reading from both .rec files and raw image files. To load input images from .rec files, use `path_imgrec` parameter and to load from raw image files, use `path_imglist` and `path_root` parameters. To use data partition (for distributed training) or shuffling, specify `path_imgidx` parameter. Parameters ---------- batch_size : int Number of examples per batch. data_shape : tuple Data shape in (channels, height, width) format. For now, only RGB image with 3 channels is supported. label_width : int, optional Number of labels per example. The default label width is 1. path_imgrec : str Path to image record file (.rec). Created with tools/im2rec.py or bin/im2rec. path_imglist : str Path to image list (.lst). Created with tools/im2rec.py or with custom script. Format: Tab separated record of index, one or more labels and relative_path_from_root. imglist: list A list of images with the label(s). Each item is a list [imagelabel: float or list of float, imgpath]. path_root : str Root folder of image files. path_imgidx : str Path to image index file. Needed for partition and shuffling when using .rec source. shuffle : bool Whether to shuffle all images at the start of each iteration or not. Can be slow for HDD. part_index : int Partition index. num_parts : int Total number of partitions. data_name : str Data name for provided symbols. label_name : str Label name for provided symbols. dtype : str Label data type. Default: float32. Other options: int32, int64, float64 last_batch_handle : str, optional How to handle the last batch. This parameter can be 'pad'(default), 'discard' or 'roll_over'. If 'pad', the last batch will be padded with data starting from the begining If 'discard', the last batch will be discarded If 'roll_over', the remaining elements will be rolled over to the next iteration kwargs : ... More arguments for creating augmenter. See mx.image.CreateAugmenter. """ def __init__(self, batch_size, data_shape, label_width=1, path_imgrec=None, path_imglist=None, path_root=None, path_imgidx=None, shuffle=False, part_index=0, num_parts=1, aug_list=None, imglist=None, data_name='data', label_name='softmax_label', dtype='float32', last_batch_handle='pad', **kwargs): super(ImageIter, self).__init__() assert path_imgrec or path_imglist or (isinstance(imglist, list)) assert dtype in ['int32', 'float32', 'int64', 'float64'], dtype + ' label not supported' num_threads = os.environ.get('MXNET_CPU_WORKER_NTHREADS', 1) logging.info('Using %s threads for decoding...', str(num_threads)) logging.info('Set enviroment variable MXNET_CPU_WORKER_NTHREADS to a' ' larger number to use more threads.') class_name = self.__class__.__name__ if path_imgrec: logging.info('%s: loading recordio %s...', class_name, path_imgrec) if path_imgidx: self.imgrec = recordio.MXIndexedRecordIO(path_imgidx, path_imgrec, 'r') # pylint: disable=redefined-variable-type self.imgidx = list(self.imgrec.keys) else: self.imgrec = recordio.MXRecordIO(path_imgrec, 'r') # pylint: disable=redefined-variable-type self.imgidx = None else: self.imgrec = None if path_imglist: logging.info('%s: loading image list %s...', class_name, path_imglist) with open(path_imglist) as fin: imglist = {} imgkeys = [] for line in iter(fin.readline, ''): line = line.strip().split('\t') label = nd.array(line[1:-1], dtype=dtype) key = int(line[0]) imglist[key] = (label, line[-1]) imgkeys.append(key) self.imglist = imglist elif isinstance(imglist, list): logging.info('%s: loading image list...', class_name) result = {} imgkeys = [] index = 1 for img in imglist: key = str(index) # pylint: disable=redefined-variable-type index += 1 if len(img) > 2: label = nd.array(img[:-1], dtype=dtype) elif isinstance(img[0], numeric_types): label = nd.array([img[0]], dtype=dtype) else: label = nd.array(img[0], dtype=dtype) result[key] = (label, img[-1]) imgkeys.append(str(key)) self.imglist = result else: self.imglist = None self.path_root = path_root self.check_data_shape(data_shape) self.provide_data = [(data_name, (batch_size,) + data_shape)] if label_width > 1: self.provide_label = [(label_name, (batch_size, label_width))] else: self.provide_label = [(label_name, (batch_size,))] self.batch_size = batch_size self.data_shape = data_shape self.label_width = label_width self.shuffle = shuffle if self.imgrec is None: self.seq = imgkeys elif shuffle or num_parts > 1: assert self.imgidx is not None self.seq = self.imgidx else: self.seq = None if num_parts > 1: assert part_index < num_parts N = len(self.seq) C = N // num_parts self.seq = self.seq[part_index * C:(part_index + 1) * C] if aug_list is None: self.auglist = CreateAugmenter(data_shape, **kwargs) else: self.auglist = aug_list self.cur = 0 self._allow_read = True self.last_batch_handle = last_batch_handle self.num_image = len(self.seq) if self.seq is not None else None self._cache_data = None self._cache_label = None self._cache_idx = None self.reset() def reset(self): """Resets the iterator to the beginning of the data.""" if self.seq is not None and self.shuffle: random.shuffle(self.seq) if self.last_batch_handle != 'roll_over' or \ self._cache_data is None: if self.imgrec is not None: self.imgrec.reset() self.cur = 0 if self._allow_read is False: self._allow_read = True def hard_reset(self): """Resets the iterator and ignore roll over data""" if self.seq is not None and self.shuffle: random.shuffle(self.seq) if self.imgrec is not None: self.imgrec.reset() self.cur = 0 self._allow_read = True self._cache_data = None self._cache_label = None self._cache_idx = None def next_sample(self): """Helper function for reading in next sample.""" if self._allow_read is False: raise StopIteration if self.seq is not None: if self.cur < self.num_image: idx = self.seq[self.cur] else: if self.last_batch_handle != 'discard': self.cur = 0 raise StopIteration self.cur += 1 if self.imgrec is not None: s = self.imgrec.read_idx(idx) header, img = recordio.unpack(s) if self.imglist is None: return header.label, img else: return self.imglist[idx][0], img else: label, fname = self.imglist[idx] return label, self.read_image(fname) else: s = self.imgrec.read() if s is None: if self.last_batch_handle != 'discard': self.imgrec.reset() raise StopIteration header, img = recordio.unpack(s) return header.label, img def _batchify(self, batch_data, batch_label, start=0): """Helper function for batchifying data""" i = start batch_size = self.batch_size try: while i < batch_size: label, s = self.next_sample() data = self.imdecode(s) try: self.check_valid_image(data) except RuntimeError as e: logging.debug('Invalid image, skipping: %s', str(e)) continue data = self.augmentation_transform(data) assert i < batch_size, 'Batch size must be multiples of augmenter output length' batch_data[i] = self.postprocess_data(data) batch_label[i] = label i += 1 except StopIteration: if not i: raise StopIteration return i def next(self): """Returns the next batch of data.""" batch_size = self.batch_size c, h, w = self.data_shape # if last batch data is rolled over if self._cache_data is not None: # check both the data and label have values assert self._cache_label is not None, "_cache_label didn't have values" assert self._cache_idx is not None, "_cache_idx didn't have values" batch_data = self._cache_data batch_label = self._cache_label i = self._cache_idx # clear the cache data else: batch_data = nd.empty((batch_size, c, h, w)) batch_label = nd.empty(self.provide_label[0][1]) i = self._batchify(batch_data, batch_label) # calculate the padding pad = batch_size - i # handle padding for the last batch if pad != 0: if self.last_batch_handle == 'discard': raise StopIteration # if the option is 'roll_over', throw StopIteration and cache the data elif self.last_batch_handle == 'roll_over' and \ self._cache_data is None: self._cache_data = batch_data self._cache_label = batch_label self._cache_idx = i raise StopIteration else: _ = self._batchify(batch_data, batch_label, i) if self.last_batch_handle == 'pad': self._allow_read = False else: self._cache_data = None self._cache_label = None self._cache_idx = None return io.DataBatch([batch_data], [batch_label], pad=pad) def check_data_shape(self, data_shape): """Checks if the input data shape is valid""" if not len(data_shape) == 3: raise ValueError('data_shape should have length 3, with dimensions CxHxW') if not data_shape[0] == 3: raise ValueError('This iterator expects inputs to have 3 channels.') def check_valid_image(self, data): """Checks if the input data is valid""" if len(data[0].shape) == 0: raise RuntimeError('Data shape is wrong') def imdecode(self, s): """Decodes a string or byte string to an NDArray. See mx.img.imdecode for more details.""" def locate(): """Locate the image file/index if decode fails.""" if self.seq is not None: idx = self.seq[(self.cur % self.num_image) - 1] else: idx = (self.cur % self.num_image) - 1 if self.imglist is not None: _, fname = self.imglist[idx] msg = "filename: {}".format(fname) else: msg = "index: {}".format(idx) return "Broken image " + msg try: img = imdecode(s) except Exception as e: raise RuntimeError("{}, {}".format(locate(), e)) return img def read_image(self, fname): """Reads an input image `fname` and returns the decoded raw bytes. Example usage: ---------- >>> dataIter.read_image('Face.jpg') # returns decoded raw bytes. """ with open(os.path.join(self.path_root, fname), 'rb') as fin: img = fin.read() return img def augmentation_transform(self, data): """Transforms input data with specified augmentation.""" for aug in self.auglist: data = aug(data) return data def postprocess_data(self, datum): """Final postprocessing step before image is loaded into the batch.""" return nd.transpose(datum, axes=(2, 0, 1))
python/mxnet/image/image.py
45,108
Image Augmenter base class Random brightness jitter augmentation. Parameters ---------- brightness : float The brightness jitter ratio range, [0, 1] Cast to float32 Make center crop augmenter. Parameters ---------- size : list or tuple of int The desired output image size. interp : int, optional, default=2 Interpolation method. See resize_short for details. Apply random brightness, contrast and saturation jitter in random order. Parameters ---------- brightness : float The brightness jitter ratio range, [0, 1] contrast : float The contrast jitter ratio range, [0, 1] saturation : float The saturation jitter ratio range, [0, 1] Mean and std normalization. Parameters ---------- mean : NDArray RGB mean to be subtracted std : NDArray RGB standard deviation to be divided Random contrast jitter augmentation. Parameters ---------- contrast : float The contrast jitter ratio range, [0, 1] Force resize to size regardless of aspect ratio Parameters ---------- size : tuple of (int, int) The desired size as in (width, height) interp : int, optional, default=2 Interpolation method. See resize_short for details. Random horizontal flip. Parameters ---------- p : float Probability to flip image horizontally Random hue jitter augmentation. Parameters ---------- hue : float The hue jitter ratio range, [0, 1] Image data iterator with a large number of augmentation choices. This iterator supports reading from both .rec files and raw image files. To load input images from .rec files, use `path_imgrec` parameter and to load from raw image files, use `path_imglist` and `path_root` parameters. To use data partition (for distributed training) or shuffling, specify `path_imgidx` parameter. Parameters ---------- batch_size : int Number of examples per batch. data_shape : tuple Data shape in (channels, height, width) format. For now, only RGB image with 3 channels is supported. label_width : int, optional Number of labels per example. The default label width is 1. path_imgrec : str Path to image record file (.rec). Created with tools/im2rec.py or bin/im2rec. path_imglist : str Path to image list (.lst). Created with tools/im2rec.py or with custom script. Format: Tab separated record of index, one or more labels and relative_path_from_root. imglist: list A list of images with the label(s). Each item is a list [imagelabel: float or list of float, imgpath]. path_root : str Root folder of image files. path_imgidx : str Path to image index file. Needed for partition and shuffling when using .rec source. shuffle : bool Whether to shuffle all images at the start of each iteration or not. Can be slow for HDD. part_index : int Partition index. num_parts : int Total number of partitions. data_name : str Data name for provided symbols. label_name : str Label name for provided symbols. dtype : str Label data type. Default: float32. Other options: int32, int64, float64 last_batch_handle : str, optional How to handle the last batch. This parameter can be 'pad'(default), 'discard' or 'roll_over'. If 'pad', the last batch will be padded with data starting from the begining If 'discard', the last batch will be discarded If 'roll_over', the remaining elements will be rolled over to the next iteration kwargs : ... More arguments for creating augmenter. See mx.image.CreateAugmenter. Add PCA based noise. Parameters ---------- alphastd : float Noise level eigval : 3x1 np.array Eigen values eigvec : 3x3 np.array Eigen vectors Make random crop augmenter Parameters ---------- size : int The length to be set for the shorter edge. interp : int, optional, default=2 Interpolation method. See resize_short for details. Randomly convert to gray image. Parameters ---------- p : float Probability to convert to grayscale Apply list of augmenters in random order Parameters ---------- ts : list of augmenters A series of augmenters to be applied in random order Make random crop with random resizing and random aspect ratio jitter augmenter. Parameters ---------- size : tuple of (int, int) Size of the crop formatted as (width, height). area : float in (0, 1] or tuple of (float, float) If tuple, minimum area and maximum area to be maintained after cropping If float, minimum area to be maintained after cropping, maximum area is set to 1.0 ratio : tuple of (float, float) Aspect ratio range as (min_aspect_ratio, max_aspect_ratio) interp: int, optional, default=2 Interpolation method. See resize_short for details. Make resize shorter edge to size augmenter. Parameters ---------- size : int The length to be set for the shorter edge. interp : int, optional, default=2 Interpolation method. See resize_short for details. Random saturation jitter augmentation. Parameters ---------- saturation : float The saturation jitter ratio range, [0, 1] Composing a sequential augmenter list. Parameters ---------- ts : list of augmenters A series of augmenters to be applied in sequential order. Creates an augmenter list. Parameters ---------- data_shape : tuple of int Shape for output data resize : int Resize shorter edge if larger than 0 at the begining rand_crop : bool Whether to enable random cropping other than center crop rand_resize : bool Whether to enable random sized cropping, require rand_crop to be enabled rand_gray : float [0, 1], probability to convert to grayscale for all channels, the number of channels will not be reduced to 1 rand_mirror : bool Whether to apply horizontal flip to image with probability 0.5 mean : np.ndarray or None Mean pixel values for [r, g, b] std : np.ndarray or None Standard deviations for [r, g, b] brightness : float Brightness jittering range (percent) contrast : float Contrast jittering range (percent) saturation : float Saturation jittering range (percent) hue : float Hue jittering range (percent) pca_noise : float Pca noise level (percent) inter_method : int, default=2(Area-based) Interpolation method for all resizing operations Possible values: 0: Nearest Neighbors Interpolation. 1: Bilinear interpolation. 2: Area-based (resampling using pixel area relation). It may be a preferred method for image decimation, as it gives moire-free results. But when the image is zoomed, it is similar to the Nearest Neighbors method. (used by default). 3: Bicubic interpolation over 4x4 pixel neighborhood. 4: Lanczos interpolation over 8x8 pixel neighborhood. 9: Cubic for enlarge, area for shrink, bilinear for others 10: Random select from interpolation method metioned above. Note: When shrinking an image, it will generally look best with AREA-based interpolation, whereas, when enlarging an image, it will generally look best with Bicubic (slow) or Bilinear (faster but still looks OK). Examples -------- >>> # An example of creating multiple augmenters >>> augs = mx.image.CreateAugmenter(data_shape=(3, 300, 300), rand_mirror=True, ... mean=True, brightness=0.125, contrast=0.125, rand_gray=0.05, ... saturation=0.125, pca_noise=0.05, inter_method=10) >>> # dump the details >>> for aug in augs: ... aug.dumps() Abstract implementation body Augmenter body Augmenter body Augmenter body Augmenter body Augmenter body Augmenter body Augmenter body Augmenter body Augmenter body Augmenter body Augmenter body. Using approximate linear transfomation described in: https://beesbuzz.biz/code/hsv_color_transforms.php Augmenter body Augmenter body Augmenter body Augmenter body Augmenter body Helper function for batchifying data Get the interpolation method for resize functions. The major purpose of this function is to wrap a random interp method selection and a auto-estimation method. Parameters ---------- interp : int interpolation method for all resizing operations Possible values: 0: Nearest Neighbors Interpolation. 1: Bilinear interpolation. 2: Area-based (resampling using pixel area relation). It may be a preferred method for image decimation, as it gives moire-free results. But when the image is zoomed, it is similar to the Nearest Neighbors method. (used by default). 3: Bicubic interpolation over 4x4 pixel neighborhood. 4: Lanczos interpolation over 8x8 pixel neighborhood. 9: Cubic for enlarge, area for shrink, bilinear for others 10: Random select from interpolation method metioned above. Note: When shrinking an image, it will generally look best with AREA-based interpolation, whereas, when enlarging an image, it will generally look best with Bicubic (slow) or Bilinear (faster but still looks OK). More details can be found in the documentation of OpenCV, please refer to http://docs.opencv.org/master/da/d54/group__imgproc__transform.html. sizes : tuple of int (old_height, old_width, new_height, new_width), if None provided, auto(9) will return Area(2) anyway. Returns ------- int interp method from 0 to 4 Transforms input data with specified augmentation. Crops the image `src` to the given `size` by trimming on all four sides and preserving the center of the image. Upsamples if `src` is smaller than `size`. .. note:: This requires MXNet to be compiled with USE_OPENCV. Parameters ---------- src : NDArray Binary source image data. size : list or tuple of int The desired output image size. interp : int, optional, default=2 Interpolation method. See resize_short for details. Returns ------- NDArray The cropped image. Tuple (x, y, width, height) where x, y are the positions of the crop in the original image and width, height the dimensions of the crop. Example ------- >>> with open("flower.jpg", 'rb') as fp: ... str_image = fp.read() ... >>> image = mx.image.imdecode(str_image) >>> image <NDArray 2321x3482x3 @cpu(0)> >>> cropped_image, (x, y, width, height) = mx.image.center_crop(image, (1000, 500)) >>> cropped_image <NDArray 500x1000x3 @cpu(0)> >>> x, y, width, height (1241, 910, 1000, 500) Checks if the input data shape is valid Checks if the input data is valid Normalize src with mean and std. Parameters ---------- src : NDArray Input image mean : NDArray RGB mean to be subtracted std : NDArray RGB standard deviation to be divided Returns ------- NDArray An `NDArray` containing the normalized image. Saves the Augmenter to string Returns ------- str JSON formatted string that describes the Augmenter. Override the default to avoid duplicate dump. Override the default to avoid duplicate dump. Crop src at fixed location, and (optionally) resize it to size. Parameters ---------- src : NDArray Input image x0 : int Left boundary of the cropping area y0 : int Top boundary of the cropping area w : int Width of the cropping area h : int Height of the cropping area size : tuple of (w, h) Optional, resize to new size after cropping interp : int, optional, default=2 Interpolation method. See resize_short for details. Returns ------- NDArray An `NDArray` containing the cropped image. Resets the iterator and ignore roll over data Decode an image to an NDArray. Note: `imdecode` uses OpenCV (not the CV2 Python library). MXNet must have been built with USE_OPENCV=1 for `imdecode` to work. Parameters ---------- buf : str/bytes or numpy.ndarray Binary image data as string or numpy ndarray. flag : int, optional, default=1 1 for three channel color output. 0 for grayscale output. to_rgb : int, optional, default=1 1 for RGB formatted output (MXNet default). 0 for BGR formatted output (OpenCV default). out : NDArray, optional Output buffer. Use `None` for automatic allocation. Returns ------- NDArray An `NDArray` containing the image. Example ------- >>> with open("flower.jpg", 'rb') as fp: ... str_image = fp.read() ... >>> image = mx.img.imdecode(str_image) >>> image <NDArray 224x224x3 @cpu(0)> Set `flag` parameter to 0 to get grayscale output >>> with open("flower.jpg", 'rb') as fp: ... str_image = fp.read() ... >>> image = mx.img.imdecode(str_image, flag=0) >>> image <NDArray 224x224x1 @cpu(0)> Set `to_rgb` parameter to 0 to get output in OpenCV format (BGR) >>> with open("flower.jpg", 'rb') as fp: ... str_image = fp.read() ... >>> image = mx.img.imdecode(str_image, to_rgb=0) >>> image <NDArray 224x224x3 @cpu(0)> Decodes a string or byte string to an NDArray. See mx.img.imdecode for more details. Read and decode an image to an NDArray. Note: `imread` uses OpenCV (not the CV2 Python library). MXNet must have been built with USE_OPENCV=1 for `imdecode` to work. Parameters ---------- filename : str Name of the image file to be loaded. flag : {0, 1}, default 1 1 for three channel color output. 0 for grayscale output. to_rgb : bool, default True True for RGB formatted output (MXNet default). False for BGR formatted output (OpenCV default). out : NDArray, optional Output buffer. Use `None` for automatic allocation. Returns ------- NDArray An `NDArray` containing the image. Example ------- >>> mx.img.imread("flower.jpg") <NDArray 224x224x3 @cpu(0)> Set `flag` parameter to 0 to get grayscale output >>> mx.img.imread("flower.jpg", flag=0) <NDArray 224x224x1 @cpu(0)> Set `to_rgb` parameter to 0 to get output in OpenCV format (BGR) >>> mx.img.imread("flower.jpg", to_rgb=0) <NDArray 224x224x3 @cpu(0)> Locate the image file/index if decode fails. Returns the next batch of data. Helper function for reading in next sample. Final postprocessing step before image is loaded into the batch. Randomly crop `src` with `size` (width, height). Upsample result if `src` is smaller than `size`. Parameters ---------- src: Source image `NDArray` size: Size of the crop formatted as (width, height). If the `size` is larger than the image, then the source image is upsampled to `size` and returned. interp: int, optional, default=2 Interpolation method. See resize_short for details. Returns ------- NDArray An `NDArray` containing the cropped image. Tuple A tuple (x, y, width, height) where (x, y) is top-left position of the crop in the original image and (width, height) are the dimensions of the cropped image. Example ------- >>> im = mx.nd.array(cv2.imread("flower.jpg")) >>> cropped_im, rect = mx.image.random_crop(im, (100, 100)) >>> print cropped_im <NDArray 100x100x1 @cpu(0)> >>> print rect (20, 21, 100, 100) Randomly crop src with size. Randomize area and aspect ratio. Parameters ---------- src : NDArray Input image size : tuple of (int, int) Size of the crop formatted as (width, height). area : float in (0, 1] or tuple of (float, float) If tuple, minimum area and maximum area to be maintained after cropping If float, minimum area to be maintained after cropping, maximum area is set to 1.0 ratio : tuple of (float, float) Aspect ratio range as (min_aspect_ratio, max_aspect_ratio) interp: int, optional, default=2 Interpolation method. See resize_short for details. Returns ------- NDArray An `NDArray` containing the cropped image. Tuple A tuple (x, y, width, height) where (x, y) is top-left position of the crop in the original image and (width, height) are the dimensions of the cropped image. Reads an input image `fname` and returns the decoded raw bytes. Example usage: ---------- >>> dataIter.read_image('Face.jpg') # returns decoded raw bytes. Resets the iterator to the beginning of the data. Resizes shorter edge to size. Note: `resize_short` uses OpenCV (not the CV2 Python library). MXNet must have been built with OpenCV for `resize_short` to work. Resizes the original image by setting the shorter edge to size and setting the longer edge accordingly. Resizing function is called from OpenCV. Parameters ---------- src : NDArray The original image. size : int The length to be set for the shorter edge. interp : int, optional, default=2 Interpolation method used for resizing the image. Possible values: 0: Nearest Neighbors Interpolation. 1: Bilinear interpolation. 2: Area-based (resampling using pixel area relation). It may be a preferred method for image decimation, as it gives moire-free results. But when the image is zoomed, it is similar to the Nearest Neighbors method. (used by default). 3: Bicubic interpolation over 4x4 pixel neighborhood. 4: Lanczos interpolation over 8x8 pixel neighborhood. 9: Cubic for enlarge, area for shrink, bilinear for others 10: Random select from interpolation method metioned above. Note: When shrinking an image, it will generally look best with AREA-based interpolation, whereas, when enlarging an image, it will generally look best with Bicubic (slow) or Bilinear (faster but still looks OK). More details can be found in the documentation of OpenCV, please refer to http://docs.opencv.org/master/da/d54/group__imgproc__transform.html. Returns ------- NDArray An 'NDArray' containing the resized image. Example ------- >>> with open("flower.jpeg", 'rb') as fp: ... str_image = fp.read() ... >>> image = mx.img.imdecode(str_image) >>> image <NDArray 2321x3482x3 @cpu(0)> >>> size = 640 >>> new_image = mx.img.resize_short(image, size) >>> new_image <NDArray 2321x3482x3 @cpu(0)> Scales down crop size if it's larger than image size. If width/height of the crop is larger than the width/height of the image, sets the width/height to the width/height of the image. Parameters ---------- src_size : tuple of int Size of the image in (width, height) format. size : tuple of int Size of the crop in (width, height) format. Returns ------- tuple of int A tuple containing the scaled crop size in (width, height) format. Example -------- >>> src_size = (640,480) >>> size = (720,120) >>> new_size = mx.img.scale_down(src_size, size) >>> new_size (640,106) Read individual image files and perform augmentations. Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. See the NOTICE file distributed with this work for additional information regarding copyright ownership. The ASF licenses this file to you under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. pylint: disable=no-member, too-many-lines, redefined-builtin, protected-access, unused-import, invalid-name pylint: disable=too-many-arguments, too-many-locals, no-name-in-module, too-many-branches, too-many-statements fall back to center_crop pylint: disable=redefined-variable-type pylint: disable=redefined-variable-type pylint: disable=redefined-variable-type if last batch data is rolled over check both the data and label have values clear the cache data calculate the padding handle padding for the last batch if the option is 'roll_over', throw StopIteration and cache the data
19,440
en
0.637788
# coding: utf-8 """ OpenShift API (with Kubernetes) OpenShift provides builds, application lifecycle, image content management, and administrative policy on top of Kubernetes. The API allows consistent management of those objects. All API operations are authenticated via an Authorization bearer token that is provided for service accounts as a generated secret (in JWT form) or via the native OAuth endpoint located at /oauth/authorize. Core infrastructure components may use openshift.client certificates that require no authentication. All API operations return a 'resourceVersion' string that represents the version of the object in the underlying storage. The standard LIST operation performs a snapshot read of the underlying objects, returning a resourceVersion representing a consistent version of the listed objects. The WATCH operation allows all updates to a set of objects after the provided resourceVersion to be observed by a openshift.client. By listing and beginning a watch from the returned resourceVersion, openshift.clients may observe a consistent view of the state of one or more objects. Note that WATCH always returns the update after the provided resourceVersion. Watch may be extended a limited time in the past - using etcd 2 the watch window is 1000 events (which on a large cluster may only be a few tens of seconds) so openshift.clients must explicitly handle the \"watch to old error\" by re-listing. Objects are divided into two rough categories - those that have a lifecycle and must reflect the state of the cluster, and those that have no state. Objects with lifecycle typically have three main sections: * 'metadata' common to all objects * a 'spec' that represents the desired state * a 'status' that represents how much of the desired state is reflected on the cluster at the current time Objects that have no state have 'metadata' but may lack a 'spec' or 'status' section. Objects are divided into those that are namespace scoped (only exist inside of a namespace) and those that are cluster scoped (exist outside of a namespace). A namespace scoped resource will be deleted when the namespace is deleted and cannot be created if the namespace has not yet been created or is in the process of deletion. Cluster scoped resources are typically only accessible to admins - resources like nodes, persistent volumes, and cluster policy. All objects have a schema that is a combination of the 'kind' and 'apiVersion' fields. This schema is additive only for any given version - no backwards incompatible changes are allowed without incrementing the apiVersion. The server will return and accept a number of standard responses that share a common schema - for instance, the common error type is 'unversioned.Status' (described below) and will be returned on any error from the API server. The API is available in multiple serialization formats - the default is JSON (Accept: application/json and Content-Type: application/json) but openshift.clients may also use YAML (application/yaml) or the native Protobuf schema (application/vnd.kubernetes.protobuf). Note that the format of the WATCH API call is slightly different - for JSON it returns newline delimited objects while for Protobuf it returns length-delimited frames (4 bytes in network-order) that contain a 'versioned.Watch' Protobuf object. See the OpenShift documentation at https://docs.openshift.org for more information. OpenAPI spec version: v3.6.0-alpha.0 Generated by: https://github.com/swagger-api/swagger-codegen.git """ from __future__ import absolute_import import os import sys import unittest import openshift.client from kubernetes.client.rest import ApiException from openshift.client.models.v1beta1_cpu_target_utilization import V1beta1CPUTargetUtilization class TestV1beta1CPUTargetUtilization(unittest.TestCase): """ V1beta1CPUTargetUtilization unit test stubs """ def setUp(self): pass def tearDown(self): pass def testV1beta1CPUTargetUtilization(self): """ Test V1beta1CPUTargetUtilization """ model = openshift.client.models.v1beta1_cpu_target_utilization.V1beta1CPUTargetUtilization() if __name__ == '__main__': unittest.main()
openshift/test/test_v1beta1_cpu_target_utilization.py
4,258
V1beta1CPUTargetUtilization unit test stubs Test V1beta1CPUTargetUtilization OpenShift API (with Kubernetes) OpenShift provides builds, application lifecycle, image content management, and administrative policy on top of Kubernetes. The API allows consistent management of those objects. All API operations are authenticated via an Authorization bearer token that is provided for service accounts as a generated secret (in JWT form) or via the native OAuth endpoint located at /oauth/authorize. Core infrastructure components may use openshift.client certificates that require no authentication. All API operations return a 'resourceVersion' string that represents the version of the object in the underlying storage. The standard LIST operation performs a snapshot read of the underlying objects, returning a resourceVersion representing a consistent version of the listed objects. The WATCH operation allows all updates to a set of objects after the provided resourceVersion to be observed by a openshift.client. By listing and beginning a watch from the returned resourceVersion, openshift.clients may observe a consistent view of the state of one or more objects. Note that WATCH always returns the update after the provided resourceVersion. Watch may be extended a limited time in the past - using etcd 2 the watch window is 1000 events (which on a large cluster may only be a few tens of seconds) so openshift.clients must explicitly handle the "watch to old error" by re-listing. Objects are divided into two rough categories - those that have a lifecycle and must reflect the state of the cluster, and those that have no state. Objects with lifecycle typically have three main sections: * 'metadata' common to all objects * a 'spec' that represents the desired state * a 'status' that represents how much of the desired state is reflected on the cluster at the current time Objects that have no state have 'metadata' but may lack a 'spec' or 'status' section. Objects are divided into those that are namespace scoped (only exist inside of a namespace) and those that are cluster scoped (exist outside of a namespace). A namespace scoped resource will be deleted when the namespace is deleted and cannot be created if the namespace has not yet been created or is in the process of deletion. Cluster scoped resources are typically only accessible to admins - resources like nodes, persistent volumes, and cluster policy. All objects have a schema that is a combination of the 'kind' and 'apiVersion' fields. This schema is additive only for any given version - no backwards incompatible changes are allowed without incrementing the apiVersion. The server will return and accept a number of standard responses that share a common schema - for instance, the common error type is 'unversioned.Status' (described below) and will be returned on any error from the API server. The API is available in multiple serialization formats - the default is JSON (Accept: application/json and Content-Type: application/json) but openshift.clients may also use YAML (application/yaml) or the native Protobuf schema (application/vnd.kubernetes.protobuf). Note that the format of the WATCH API call is slightly different - for JSON it returns newline delimited objects while for Protobuf it returns length-delimited frames (4 bytes in network-order) that contain a 'versioned.Watch' Protobuf object. See the OpenShift documentation at https://docs.openshift.org for more information. OpenAPI spec version: v3.6.0-alpha.0 Generated by: https://github.com/swagger-api/swagger-codegen.git coding: utf-8
3,605
en
0.87757
import warnings import mmcv import numpy as np import torch from torch.nn.modules.utils import _pair from mmdet.core.anchor.builder import ANCHOR_GENERATORS from mmdet.core.anchor import AnchorGenerator @ANCHOR_GENERATORS.register_module(force=True) class SSDAnchorGenerator(AnchorGenerator): """Anchor generator for SSD Args: strides (list[int] | list[tuple[int, int]]): Strides of anchors in multiple feature levels. ratios (list[float]): The list of ratios between the height and width of anchors in a single level. basesize_ratio_range (tuple(float)): Ratio range of anchors. input_size (int): Size of feature map, 300 for SSD300, 512 for SSD512. scale_major (bool): Whether to multiply scales first when generating base anchors. If true, the anchors in the same row will have the same scales. It is always set to be False in SSD. """ def __init__(self, strides, ratios, basesize_ratio_range, input_size=300, scale_major=True): assert len(strides) == len(ratios) assert mmcv.is_tuple_of(basesize_ratio_range, float) self.strides = [_pair(stride) for stride in strides] self.input_size = max(input_size) if isinstance(input_size, (list,tuple)) else input_size self.centers = [(stride[0] / 2., stride[1] / 2.) for stride in self.strides] self.basesize_ratio_range = basesize_ratio_range # calculate anchor ratios and sizes min_ratio, max_ratio = basesize_ratio_range min_ratio = int(min_ratio * 100) max_ratio = int(max_ratio * 100) step = int(np.floor(max_ratio - min_ratio) / (self.num_levels - 2)) min_sizes = [] max_sizes = [] for ratio in range(int(min_ratio), int(max_ratio) + 1, step): min_sizes.append(int(self.input_size * ratio / 100)) max_sizes.append(int(self.input_size * (ratio + step) / 100)) if self.input_size == 300: if basesize_ratio_range[0] == 0.15: # SSD300 COCO min_sizes.insert(0, int(self.input_size * 7 / 100)) max_sizes.insert(0, int(self.input_size * 15 / 100)) elif basesize_ratio_range[0] == 0.2: # SSD300 VOC min_sizes.insert(0, int(self.input_size * 10 / 100)) max_sizes.insert(0, int(self.input_size * 20 / 100)) else: min_sizes.insert(0, int(self.input_size * basesize_ratio_range[0] * 0.4)) max_sizes.insert(0, int(self.input_size * basesize_ratio_range[0])) warnings.warn( 'according to original SSD, basesize_ratio_range[0] should be either 0.15' 'or 0.2 when input_size is 300, got ' f'{basesize_ratio_range[0]}.') elif self.input_size == 512: if basesize_ratio_range[0] == 0.1: # SSD512 COCO min_sizes.insert(0, int(self.input_size * 4 / 100)) max_sizes.insert(0, int(self.input_size * 10 / 100)) elif basesize_ratio_range[0] == 0.15: # SSD512 VOC min_sizes.insert(0, int(self.input_size * 7 / 100)) max_sizes.insert(0, int(self.input_size * 15 / 100)) else: min_sizes.insert(0, int(self.input_size * basesize_ratio_range[0] * 0.4)) max_sizes.insert(0, int(self.input_size * basesize_ratio_range[0])) warnings.warn('according to original SSD, basesize_ratio_range[0] should be either 0.1' 'or 0.15 when input_size is 512, got' f' {basesize_ratio_range[0]}.') else: if basesize_ratio_range[0] == 0.1: # SSD512 COCO min_sizes.insert(0, int(self.input_size * 4 / 100)) max_sizes.insert(0, int(self.input_size * 10 / 100)) else: min_sizes.insert(0, int(self.input_size * basesize_ratio_range[0] * 0.4)) max_sizes.insert(0, int(self.input_size * basesize_ratio_range[0])) anchor_ratios = [] anchor_scales = [] for k in range(len(self.strides)): scales = [1., np.sqrt(max_sizes[k] / min_sizes[k])] anchor_ratio = [1.] for r in ratios[k]: anchor_ratio += [1 / r, r] # 4 or 6 ratio anchor_ratios.append(torch.Tensor(anchor_ratio)) anchor_scales.append(torch.Tensor(scales)) self.base_sizes = min_sizes self.scales = anchor_scales self.ratios = anchor_ratios self.scale_major = scale_major self.center_offset = 0 self.base_anchors = self.gen_base_anchors() # added for proto export self.min_sizes = min_sizes self.max_sizes = max_sizes def gen_base_anchors(self): """Generate base anchors. Returns: list(torch.Tensor): Base anchors of a feature grid in multiple \ feature levels. """ multi_level_base_anchors = [] for i, base_size in enumerate(self.base_sizes): base_anchors = self.gen_single_level_base_anchors( base_size, scales=self.scales[i], ratios=self.ratios[i], center=self.centers[i]) indices = list(range(len(self.ratios[i]))) indices.insert(1, len(indices)) base_anchors = torch.index_select(base_anchors, 0, torch.LongTensor(indices)) multi_level_base_anchors.append(base_anchors) return multi_level_base_anchors def __repr__(self): """str: a string that describes the module""" indent_str = ' ' repr_str = self.__class__.__name__ + '(\n' repr_str += f'{indent_str}strides={self.strides},\n' repr_str += f'{indent_str}scales={self.scales},\n' repr_str += f'{indent_str}scale_major={self.scale_major},\n' repr_str += f'{indent_str}input_size={self.input_size},\n' repr_str += f'{indent_str}scales={self.scales},\n' repr_str += f'{indent_str}ratios={self.ratios},\n' repr_str += f'{indent_str}num_levels={self.num_levels},\n' repr_str += f'{indent_str}base_sizes={self.base_sizes},\n' repr_str += f'{indent_str}basesize_ratio_range=' repr_str += f'{self.basesize_ratio_range})' return repr_str
xmmdet/core/anchor/anchor_generator.py
6,591
Anchor generator for SSD Args: strides (list[int] | list[tuple[int, int]]): Strides of anchors in multiple feature levels. ratios (list[float]): The list of ratios between the height and width of anchors in a single level. basesize_ratio_range (tuple(float)): Ratio range of anchors. input_size (int): Size of feature map, 300 for SSD300, 512 for SSD512. scale_major (bool): Whether to multiply scales first when generating base anchors. If true, the anchors in the same row will have the same scales. It is always set to be False in SSD. str: a string that describes the module Generate base anchors. Returns: list(torch.Tensor): Base anchors of a feature grid in multiple feature levels. calculate anchor ratios and sizes SSD300 COCO SSD300 VOC SSD512 COCO SSD512 VOC SSD512 COCO 4 or 6 ratio added for proto export
902
en
0.613184
from __future__ import absolute_import """This module offers a display and interaction frontend with Qt. It will try importing PySide first, and if that fails PyQt. The code will constantly be tested with both bindings.""" from .displaywidgets import DisplayWidget, NewDisplayWidget from .control import ControlWidget #from .mainwin import ZasimMainWindow display_objects = [] class ZasimDisplay(object): simulator = None """The `Simulator` object for this display.""" display = None """The `BaseDisplayWidget` in use.""" window = None """The `ZasimMainWindow` instance in use.""" control = None """The `ControlWidget` in use.""" def __init__(self, simulator): """Instantiate a Display (thas is: a window with a display widget and simulation controls) from a simulator. :param simulator: The simulator to use.""" self.simulator = simulator if not self.display: if 'tiles' in self.simulator.palette_info: self.display = NewDisplayWidget(self.simulator) else: self.display = DisplayWidget(self.simulator) if self.control is None: self.control = ControlWidget(self.simulator) from .mainwin import ZasimMainWindow self.window = ZasimMainWindow(self.simulator, self.display, self.control) display_objects.append(self.window) self.window.show() def set_scale(self, scale): """Sets the scale of the display component.""" self.display.set_scale(scale)
zasim/gui/display.py
1,565
Instantiate a Display (thas is: a window with a display widget and simulation controls) from a simulator. :param simulator: The simulator to use. Sets the scale of the display component. from .mainwin import ZasimMainWindow
225
en
0.656556
import numpy from chainer.backends import cuda from chainer import function_node from chainer.utils import type_check class ResizeImages3D(function_node.FunctionNode): def __init__(self, output_shape): self.out_H = output_shape[0] self.out_W = output_shape[1] self.out_D = output_shape[2] def check_type_forward(self, in_types): n_in = in_types.size() type_check.expect(n_in == 1) x_type = in_types[0] type_check.expect( x_type.dtype.char == 'f', x_type.ndim == 5 ) def forward(self, inputs): x, = inputs xp = cuda.get_array_module(x) B, C, H, W, D = x.shape u_1d = xp.linspace(0, W - 1, num=self.out_W) v_1d = xp.linspace(0, H - 1, num=self.out_H) t_1d = xp.linspace(0, D - 1, num=self.out_D) grid = xp.meshgrid(u_1d, v_1d, t_1d) u = grid[0].ravel() v = grid[1].ravel() t = grid[2].ravel() u0 = xp.floor(u).astype(numpy.int32) u0 = u0.clip(0, W - 2) u1 = u0 + 1 v0 = xp.floor(v).astype(numpy.int32) v0 = v0.clip(0, H - 2) v1 = v0 + 1 t0 = xp.floor(t).astype(numpy.int32) t0 = t0.clip(0, D - 2) t1 = t0 + 1 # weights w1 = (u1 - u) * (v1 - v) * (t1 - t) w2 = (u - u0) * (v1 - v) * (t1 - t) w3 = (u1 - u) * (v - v0) * (t1 - t) w4 = (u - u0) * (v - v0) * (t1 - t) w5 = (u1 - u) * (v1 - v) * (t - t0) w6 = (u - u0) * (v1 - v) * (t - t0) w7 = (u1 - u) * (v - v0) * (t - t0) w8 = (u - u0) * (v - v0) * (t - t0) w1 = w1.astype(x.dtype) w2 = w2.astype(x.dtype) w3 = w3.astype(x.dtype) w4 = w4.astype(x.dtype) w5 = w5.astype(x.dtype) w6 = w6.astype(x.dtype) w7 = w7.astype(x.dtype) w8 = w8.astype(x.dtype) y = (w1[None, None, :] * x[:, :, v0, u0, t0] + w2[None, None, :] * x[:, :, v0, u1, t0] + w3[None, None, :] * x[:, :, v1, u0, t0] + w4[None, None, :] * x[:, :, v1, u1, t0] + w5[None, None, :] * x[:, :, v0, u0, t1] + w6[None, None, :] * x[:, :, v0, u1, t1] + w7[None, None, :] * x[:, :, v1, u0, t1] + w8[None, None, :] * x[:, :, v1, u1, t1]) y = y.reshape(B, C, self.out_H, self.out_W, self.out_D) return y, def backward(self, indexes, grad_outputs): return ResizeImagesGrad3D( self.inputs[0].shape, (self.out_H, self.out_W, self.out_D)).apply(grad_outputs) class ResizeImagesGrad3D(function_node.FunctionNode): def __init__(self, input_shape, output_shape): self.out_H = output_shape[0] self.out_W = output_shape[1] self.out_D = output_shape[2] self.input_shape = input_shape def check_type_forward(self, in_types): n_in = in_types.size() type_check.expect(n_in == 1) x_type = in_types[0] type_check.expect( x_type.dtype.char == 'f', x_type.ndim == 5 ) def forward(self, inputs): xp = cuda.get_array_module(*inputs) gy, = inputs B, C, H, W, D = self.input_shape u_1d = xp.linspace(0, W - 1, num=self.out_W) v_1d = xp.linspace(0, H - 1, num=self.out_H) t_1d = xp.linspace(0, D - 1, num=self.out_D) grid = xp.meshgrid(u_1d, v_1d, t_1d) u = grid[0].ravel() v = grid[1].ravel() t = grid[2].ravel() u0 = xp.floor(u).astype(numpy.int32) u0 = u0.clip(0, W - 2) u1 = u0 + 1 v0 = xp.floor(v).astype(numpy.int32) v0 = v0.clip(0, H - 2) v1 = v0 + 1 t0 = xp.floor(t).astype(numpy.int32) t0 = t0.clip(0, D - 2) t1 = t0 + 1 # weights wu0 = u - u0 wu1 = u1 - u wv0 = v - v0 wv1 = v1 - v wt0 = t - t0 wt1 = t1 - t wu0 = wu0.astype(gy.dtype) wu1 = wu1.astype(gy.dtype) wv0 = wv0.astype(gy.dtype) wv1 = wv1.astype(gy.dtype) wt0 = wt0.astype(gy.dtype) wt1 = wt1.astype(gy.dtype) # --- gx if xp is numpy: scatter_add = numpy.add.at else: scatter_add = cuda.cupyx.scatter_add gx = xp.zeros(self.input_shape, dtype=gy.dtype) gy = gy.reshape(B, C, -1) scatter_add(gx, (slice(None), slice(None), v0, u0, t0), gy * wu1 * wv1 * wt1) scatter_add(gx, (slice(None), slice(None), v0, u1, t0), gy * wu0 * wv1 * wt1) scatter_add(gx, (slice(None), slice(None), v1, u0, t0), gy * wu1 * wv0 * wt1) scatter_add(gx, (slice(None), slice(None), v1, u1, t0), gy * wu0 * wv0 * wt1) scatter_add(gx, (slice(None), slice(None), v0, u0, t1), gy * wu1 * wv1 * wt0) scatter_add(gx, (slice(None), slice(None), v0, u1, t1), gy * wu0 * wv1 * wt0) scatter_add(gx, (slice(None), slice(None), v1, u0, t1), gy * wu1 * wv0 * wt0) scatter_add(gx, (slice(None), slice(None), v1, u1, t1), gy * wu0 * wv0 * wt0) return gx, def backward(self, indexes, grad_outputs): return ResizeImages3D( (self.out_H, self.out_W, self.out_D)).apply(grad_outputs) def resize_images_3d(x, output_shape): """Resize images to the given shape. This function resizes 3D data to :obj:`output_shape`. Currently, only bilinear interpolation is supported as the sampling method. Notatition: here is a notation for dimensionalities. - :math:`n` is the batch size. - :math:`c_I` is the number of the input channels. - :math:`h`, :math:`w` and :math:`d` are the height, width and depth of the input image, respectively. - :math:`h_O`, :math:`w_O` and :math:`d_0` are the height, width and depth of the output image. Args: x (~chainer.Variable): Input variable of shape :math:`(n, c_I, h, w, d)`. output_shape (tuple): This is a tuple of length 3 whose values are :obj:`(h_O, w_O, d_O)`. Returns: ~chainer.Variable: Resized image whose shape is \ :math:`(n, c_I, h_O, w_O, d_O)`. """ return ResizeImages3D(output_shape).apply((x,))[0]
src/links/model/resize_images_3d.py
6,408
Resize images to the given shape. This function resizes 3D data to :obj:`output_shape`. Currently, only bilinear interpolation is supported as the sampling method. Notatition: here is a notation for dimensionalities. - :math:`n` is the batch size. - :math:`c_I` is the number of the input channels. - :math:`h`, :math:`w` and :math:`d` are the height, width and depth of the input image, respectively. - :math:`h_O`, :math:`w_O` and :math:`d_0` are the height, width and depth of the output image. Args: x (~chainer.Variable): Input variable of shape :math:`(n, c_I, h, w, d)`. output_shape (tuple): This is a tuple of length 3 whose values are :obj:`(h_O, w_O, d_O)`. Returns: ~chainer.Variable: Resized image whose shape is :math:`(n, c_I, h_O, w_O, d_O)`. weights weights --- gx
823
en
0.806657
# coding=utf-8 ''' author: ShiLei Miao analyses and build model about NBA ''' import numpy as np from numpy import * import pandas as pd from pandas import * import os from sklearn.ensemble import RandomForestClassifier from sklearn.cross_validation import KFold from sklearn import metrics os.chdir(r'E:\PycharmProjects\Rong360\dta') def loadDataSetT(path): data = pd.read_csv(path) dataSet = data.values[0:,2:] dataLabel = data.values[0:,1:2] return dataSet,dataLabel def transLabel(Mat_Labels): labels = [] for item in Mat_Labels: labels.append(item[0]) labels = array(labels) return labels def P_YYYY(N_train, target_train, N_test, target_test): clf = RandomForestClassifier(n_estimators=300, random_state=520341, max_depth=9,\ min_samples_split=3, class_weight='balanced_subsample') clf = clf.fit(N_train, target_train) pred = clf.predict_proba(N_test) pred = DataFrame(pred)[0].values N_auc = metrics.roc_auc_score(target_test, 1 - pred) print N_auc print '\n' return N_auc, clf def preds_calculate(Mat_Train,Mat_Labels): kf = KFold(len(Mat_Train), n_folds=10) NN_auc = [] for train_index, test_index in kf: X_train, X_test = Mat_Train[train_index], Mat_Train[test_index] y_train, y_test = Mat_Labels[train_index], Mat_Labels[test_index] N_auc, clf = P_YYYY(X_train, y_train, X_test, y_test) NN_auc.append(N_auc) mean_auc = mean(NN_auc) print 'AUC均值:',mean_auc return mean_auc, clf # 训练集 S_train_user_info = pd.read_csv(r'Generate_dta\S_train_user_info.csv') N_train_user_info = pd.read_csv(r'Generate_dta\N_train_user_info.csv').drop(['lable'],axis=1) relation1_train = pd.read_csv(r'Generate_dta\0909relation1_train.csv') relation2_train = pd.read_csv(r'Generate_dta\0909relation2_train.csv') N_train_consumption1 = pd.read_csv(r'Generate_dta\N_train_consumption1.csv').drop(['lable'],axis=1) t_consumption = pd.read_csv(r'Generate_dta\t_consumption.csv') #rong_tag 没有使用 【下面的数据是one-hot后的特征】 rong_tag_train = pd.read_csv(r'Generate_dta\N_rong_tag_train.csv').drop(['lable'],axis=1) N_rong_tag_train_var = pd.read_excel(r'Stat_importance_var.xls') N_rong_tag_train_var = N_rong_tag_train_var[N_rong_tag_train_var['Importance']>10] N_rong_tag_train = rong_tag_train.reindex(columns = N_rong_tag_train_var['Feature'].values) N_rong_tag_train['user_id'] = rong_tag_train['user_id'] N_rong_tag_train = N_rong_tag_train.replace([None], [-1]) train = merge(S_train_user_info,N_train_user_info,how="left", left_on='user_id', right_on='user_id') train = merge(train,relation1_train,how="left", left_on='user_id', right_on='user_id') train = merge(train,relation2_train,how="left", left_on='user_id', right_on='user_id') train = merge(train,N_train_consumption1,how="left", left_on='user_id', right_on='user_id') train = merge(train,t_consumption,how="left", left_on='user_id', right_on='user_id') train = train.replace([None], [-1]) train['category_null'] = (train<0).sum(axis=1) ## 在统计的train跟test缺失的情况后,选择剔除用户的特征缺失个数为187的【基本都是product_id=2】 train = train[train['category_null'] < 187] train = DataFrame(train.values,columns=train.columns) train = merge(train,N_rong_tag_train,how="left", left_on='user_id', right_on='user_id') Mat_Train = train.drop(['user_id','lable','category_null'],axis=1) Mat_Train = array(Mat_Train) Mat_Label = train['lable'].astype(int) mean_auc, clf = preds_calculate(Mat_Train,Mat_Label)
Procedure/2_M1/train/m2-cv-rf.py
3,732
coding=utf-8 训练集rong_tag 没有使用 【下面的数据是one-hot后的特征】 在统计的train跟test缺失的情况后,选择剔除用户的特征缺失个数为187的【基本都是product_id=2】
107
zh
0.980853
# -*- coding: utf-8 -*- from __future__ import unicode_literals from django.contrib.auth.models import User from django.db import models class Post(models.Model): status_ITEMS = ( (1, '上线'), (2, '草稿'), (3, '删除'), ) title = models.CharField(max_length=50, verbose_name='标题') desc = models.CharField(max_length=255, blank=True, verbose_name='摘要') category = models.ForeignKey('Category', verbose_name='分类') tags = models.ManyToManyField('Tag', related_name="posts", verbose_name='标签') content = models.TextField(verbose_name='内容', help_text='注:目前仅支持Markdown格式') status = models.PositiveIntegerField(default=1, choices=status_ITEMS, verbose_name='状态') owner = models.ForeignKey(User, verbose_name='作者') created_time = models.DateTimeField(auto_now_add=True, verbose_name='创建时间') lasted_update_time = models.DateTimeField(auto_now=True, verbose_name='更新时间') def status_show(self): return '当前状态:%s'%(self.status) status_show.short_description = '展示站台' def __unicode__(self): return self.title class Meta: verbose_name = verbose_name_plural = '文章' class Category(models.Model): status_ITEMS = ( (1, '可用'), (2, '删除'), ) name = models.CharField(max_length=50,verbose_name='名称') status = models.PositiveIntegerField(default=1, choices=status_ITEMS, verbose_name='状态') owner = models.ForeignKey(User, verbose_name='作者') is_nav = models.BooleanField(default=False, verbose_name="是否为导航") created_time = models.DateTimeField(auto_now_add=True, verbose_name='创建时间') # parent = models.ForeignKey('Category', verbose_name='分类') def __unicode__(self): return self.name class Meta: verbose_name = verbose_name_plural = '分类' class Tag(models.Model): status_ITEMS= ( (1, '正常'), (2, '删除'), ) name = models.CharField(max_length=50,verbose_name='名称') status = models.PositiveIntegerField(default=1, choices=status_ITEMS, verbose_name='状态') owner = models.ForeignKey(User, verbose_name='作者') created_time = models.DateTimeField(auto_now_add=True, verbose_name='创建时间') def __unicode__(self): return self.name class Meta: verbose_name = verbose_name_plural = '标签'
typeidea/blog/models.py
2,307
-*- coding: utf-8 -*- parent = models.ForeignKey('Category', verbose_name='分类')
82
en
0.426176
#!/usr/bin/env python2.7 # Advanced Multi-Mission Operations System (AMMOS) Instrument Toolkit (AIT) # Bespoke Link to Instruments and Small Satellites (BLISS) # # Copyright 2016, by the California Institute of Technology. ALL RIGHTS # RESERVED. United States Government Sponsorship acknowledged. Any # commercial use must be negotiated with the Office of Technology Transfer # at the California Institute of Technology. # # This software may be subject to U.S. export control laws. By accepting # this software, the user agrees to comply with all applicable U.S. export # laws and regulations. User has the responsibility to obtain export licenses, # or other export authority as may be required before exporting such # information to foreign countries or providing access to foreign persons. import time import datetime import mock import os import os.path import nose import nose.tools import ait.core from ait.core import dmc LEAPSECOND_DATA_RESPONSE = '''# # Updated through IERS Bulletin C55 # File expires on: 28 December 2018 # #@ 3754944000 # 2272060800 10 # 1 Jan 1972 2287785600 11 # 1 Jul 1972 2303683200 12 # 1 Jan 1973 2335219200 13 # 1 Jan 1974 2366755200 14 # 1 Jan 1975 2398291200 15 # 1 Jan 1976 2429913600 16 # 1 Jan 1977 2461449600 17 # 1 Jan 1978 2492985600 18 # 1 Jan 1979 2524521600 19 # 1 Jan 1980 2571782400 20 # 1 Jul 1981 2603318400 21 # 1 Jul 1982 2634854400 22 # 1 Jul 1983 2698012800 23 # 1 Jul 1985 2776982400 24 # 1 Jan 1988 2840140800 25 # 1 Jan 1990 2871676800 26 # 1 Jan 1991 2918937600 27 # 1 Jul 1992 2950473600 28 # 1 Jul 1993 2982009600 29 # 1 Jul 1994 3029443200 30 # 1 Jan 1996 3076704000 31 # 1 Jul 1997 ''' class MockResponse: def __init__(self, text, status_code): self.text = text self.status_code = status_code def test_getTimestampUTC(): expected = time.strftime('%Y-%j', time.gmtime()) actual = time.strftime('%Y-%j', time.gmtime(dmc.getTimestampUTC()[0])) assert actual == expected def test_getUTCDatetimeDOY_w_days(): days = 1 t = datetime.datetime.utcnow() + datetime.timedelta(days=days) timestamp = t.timetuple() exp_year = timestamp.tm_year exp_day = '%03d' % timestamp.tm_yday dtime = dmc.getUTCDatetimeDOY(days=days).split('T')[0].split('-') assert str(exp_year) == dtime[0] assert str(exp_day) == dtime[1] def test_leap_second_attrs(): ait.config.leapseconds._config['filename'] = os.path.join( os.path.dirname(__file__), "testdata", "dmc", "leapseconds.dat" ) ls = dmc.LeapSeconds ls._load_leap_second_data() assert ls.leapseconds == ls._data['leapseconds'] assert ls.valid_date == ls._data['valid'] assert ls.get_current_GPS_offset() == ls.leapseconds[-1][-1] @nose.tools.raises(ValueError) def test_leap_second_by_date_invalid_gps_date(): ait.config.leapseconds._config['filename'] = os.path.join( os.path.dirname(__file__), "testdata", "dmc", "leapseconds.dat" ) dmc.LeapSeconds._load_leap_second_data() dmc.LeapSeconds.get_GPS_offset_for_date(datetime.datetime(1980, 1, 1)) def test_leap_second_by_date(): ait.config.leapseconds._config['filename'] = os.path.join( os.path.dirname(__file__), "testdata", "dmc", "leapseconds.dat" ) ls = dmc.LeapSeconds ls._load_leap_second_data() assert ls.get_GPS_offset_for_date(datetime.datetime(1981, 1, 1)) == 0 assert ls.get_GPS_offset_for_date(datetime.datetime(1981, 7, 1)) == 1 assert ls.get_GPS_offset_for_date(datetime.datetime(1982, 7, 1)) == 2 assert ls.get_GPS_offset_for_date(datetime.datetime(1983, 7, 1)) == 3 assert ls.get_GPS_offset_for_date(datetime.datetime(1985, 7, 1)) == 4 assert ls.get_GPS_offset_for_date(datetime.datetime(1988, 1, 1)) == 5 assert ls.get_GPS_offset_for_date(datetime.datetime(1990, 1, 1)) == 6 assert ls.get_GPS_offset_for_date(datetime.datetime(1991, 1, 1)) == 7 assert ls.get_GPS_offset_for_date(datetime.datetime(1992, 7, 1)) == 8 assert ls.get_GPS_offset_for_date(datetime.datetime(1993, 7, 1)) == 9 assert ls.get_GPS_offset_for_date(datetime.datetime(1994, 7, 1)) == 10 assert ls.get_GPS_offset_for_date(datetime.datetime(1996, 1, 1)) == 11 assert ls.get_GPS_offset_for_date(datetime.datetime(1997, 7, 1)) == 12 assert ls.get_GPS_offset_for_date(datetime.datetime(1999, 1, 1)) == 13 assert ls.get_GPS_offset_for_date(datetime.datetime(2006, 1, 1)) == 14 assert ls.get_GPS_offset_for_date(datetime.datetime(2009, 1, 1)) == 15 assert ls.get_GPS_offset_for_date(datetime.datetime(2012, 7, 1)) == 16 assert ls.get_GPS_offset_for_date(datetime.datetime(2015, 7, 1)) == 17 assert ls.get_GPS_offset_for_date(datetime.datetime(2017, 1, 1)) == 18 # Make sure not supplying a date returns the offset for the current date assert (ls.get_GPS_offset_for_date(datetime.datetime.utcnow()) == ls.get_GPS_offset_for_date()) def test_leap_second_data_load(): ait.config.leapseconds._config['filename'] = os.path.join( os.path.dirname(__file__), "testdata", "dmc", "leapseconds.dat" ) assert type(dmc.LeapSeconds.leapseconds) == type([]) assert dmc.LeapSeconds.leapseconds[0] == (datetime.datetime(1981, 7, 1), 1) assert type(dmc.LeapSeconds.valid_date) == type(datetime.datetime.now()) @nose.tools.raises(ValueError) @mock.patch('requests.get', mock.MagicMock(return_value=MockResponse(LEAPSECOND_DATA_RESPONSE, 400))) def test_failed_leapsecond_load_and_update(): ait.config.leapseconds._config['filename'] = os.path.join( os.path.dirname(__file__), "invalidpath", "leapseconds.dat" ) dmc.LeapSeconds._data = None dmc.LeapSeconds._load_leap_second_data() @mock.patch('requests.get', mock.MagicMock(return_value=MockResponse(LEAPSECOND_DATA_RESPONSE, 200))) def test_update_leap_second_data(): ait.config.leapseconds._config['filename'] = os.path.join( os.path.dirname(__file__), "testdata", "dmc", "tmp_leapseconds.out" ) dmc.LeapSeconds._data = None dmc.LeapSeconds._update_leap_second_data() assert type(dmc.LeapSeconds.leapseconds) == type([]) assert dmc.LeapSeconds.leapseconds[0] == (datetime.datetime(1981, 7, 1), 1) assert type(dmc.LeapSeconds.valid_date) == type(datetime.datetime.now()) assert os.path.isfile(ait.config.leapseconds.filename) os.remove(ait.config.leapseconds.filename) @nose.tools.raises(ValueError) @mock.patch('requests.get', mock.MagicMock(return_value=MockResponse(LEAPSECOND_DATA_RESPONSE, 400))) def test_unable_to_pull_leapsecond_data(): ait.config.leapseconds._config['filename'] = os.path.join( os.path.dirname(__file__), "testdata", "dmc", "tmp_leapseconds.out" ) dmc.LeapSeconds._data = None dmc.LeapSeconds._update_leap_second_data() if __name__ == '__main__': nose.main()
ait/core/test/test_dmc.py
6,860
!/usr/bin/env python2.7 Advanced Multi-Mission Operations System (AMMOS) Instrument Toolkit (AIT) Bespoke Link to Instruments and Small Satellites (BLISS) Copyright 2016, by the California Institute of Technology. ALL RIGHTS RESERVED. United States Government Sponsorship acknowledged. Any commercial use must be negotiated with the Office of Technology Transfer at the California Institute of Technology. This software may be subject to U.S. export control laws. By accepting this software, the user agrees to comply with all applicable U.S. export laws and regulations. User has the responsibility to obtain export licenses, or other export authority as may be required before exporting such information to foreign countries or providing access to foreign persons. Make sure not supplying a date returns the offset for the current date
837
en
0.902081
# Copyright 2018 the GPflow authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import numpy as np from numpy.random import randn import tensorflow as tf import pytest import gpflow from gpflow import logdensities, settings from gpflow.test_util import session_tf from scipy.stats import multivariate_normal as mvn from numpy.testing import assert_allclose rng = np.random.RandomState(1) @pytest.mark.parametrize("x", [randn(4,10), randn(4,1)]) @pytest.mark.parametrize("mu", [randn(4,10), randn(4,1)]) @pytest.mark.parametrize("cov_sqrt", [randn(4,4), np.eye(4)]) def test_multivariate_normal(session_tf, x, mu, cov_sqrt): cov = np.dot(cov_sqrt, cov_sqrt.T) L = np.linalg.cholesky(cov) x_tf = tf.placeholder(settings.float_type) mu_tf = tf.placeholder(settings.float_type) gp_result = logdensities.multivariate_normal( x_tf, mu_tf, tf.convert_to_tensor(L)) gp_result = session_tf.run(gp_result, feed_dict={x_tf: x, mu_tf: mu}) if mu.shape[1] > 1: if x.shape[1] > 1: sp_result = [mvn.logpdf(x[:,i], mu[:,i], cov) for i in range(mu.shape[1])] else: sp_result = [mvn.logpdf(x.ravel(), mu[:, i], cov) for i in range(mu.shape[1])] else: sp_result = mvn.logpdf(x.T, mu.ravel(), cov) assert_allclose(gp_result, sp_result) def test_shape_asserts(session_tf): A = np.random.randn(5) B = np.random.randn(5) L = np.tril(np.random.randn(5, 5)) # Static shape check: with pytest.raises(ValueError): tA = tf.identity(A) tB = tf.identity(B) tL = tf.identity(L) res = logdensities.multivariate_normal(tA, tB, tL) # Dynamic shape check: # the following results in a segfault before PR#964 with pytest.raises(tf.errors.InvalidArgumentError): vA = tf.placeholder(tf.float64) vB = tf.placeholder(tf.float64) vL = tf.placeholder(tf.float64) res = logdensities.multivariate_normal(vA, vB, vL) session_tf.run(res, {vA: A, vB: B, vL: L})
tests/test_logdensities.py
2,521
Copyright 2018 the GPflow authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Static shape check: Dynamic shape check: the following results in a segfault before PR964
642
en
0.857075
# -*- coding: utf-8 -*- """ wsproto/handshake ~~~~~~~~~~~~~~~~~~ An implementation of WebSocket handshakes. """ from collections import deque from typing import Deque, Dict, Generator, List, Optional, Union import h11 from .connection import Connection, ConnectionState, ConnectionType from .events import AcceptConnection, Event, RejectConnection, RejectData, Request from .extensions import Extension from .typing import Headers from .utilities import ( generate_accept_token, generate_nonce, LocalProtocolError, normed_header_dict, RemoteProtocolError, split_comma_header, ) # RFC6455, Section 4.2.1/6 - Reading the Client's Opening Handshake WEBSOCKET_VERSION = b"13" class H11Handshake: """A Handshake implementation for HTTP/1.1 connections.""" def __init__(self, connection_type: ConnectionType) -> None: self.client = connection_type is ConnectionType.CLIENT self._state = ConnectionState.CONNECTING if self.client: self._h11_connection = h11.Connection(h11.CLIENT) else: self._h11_connection = h11.Connection(h11.SERVER) self._connection: Optional[Connection] = None self._events: Deque[Event] = deque() self._initiating_request: Optional[Request] = None self._nonce: Optional[bytes] = None @property def state(self) -> ConnectionState: return self._state @property def connection(self) -> Optional[Connection]: """Return the established connection. This will either return the connection or raise a LocalProtocolError if the connection has not yet been established. :rtype: h11.Connection """ return self._connection def initiate_upgrade_connection(self, headers: Headers, path: str) -> None: """Initiate an upgrade connection. This should be used if the request has already be received and parsed. :param list headers: HTTP headers represented as a list of 2-tuples. :param str path: A URL path. """ if self.client: raise LocalProtocolError( "Cannot initiate an upgrade connection when acting as the client" ) upgrade_request = h11.Request(method=b"GET", target=path, headers=headers) h11_client = h11.Connection(h11.CLIENT) self.receive_data(h11_client.send(upgrade_request)) def send(self, event: Event) -> bytes: """Send an event to the remote. This will return the bytes to send based on the event or raise a LocalProtocolError if the event is not valid given the state. :returns: Data to send to the WebSocket peer. :rtype: bytes """ data = b"" if isinstance(event, Request): data += self._initiate_connection(event) elif isinstance(event, AcceptConnection): data += self._accept(event) elif isinstance(event, RejectConnection): data += self._reject(event) elif isinstance(event, RejectData): data += self._send_reject_data(event) else: raise LocalProtocolError( "Event {} cannot be sent during the handshake".format(event) ) return data def receive_data(self, data: bytes) -> None: """Receive data from the remote. A list of events that the remote peer triggered by sending this data can be retrieved with :meth:`events`. :param bytes data: Data received from the WebSocket peer. """ self._h11_connection.receive_data(data) while True: try: event = self._h11_connection.next_event() except h11.RemoteProtocolError: raise RemoteProtocolError( "Bad HTTP message", event_hint=RejectConnection() ) if ( isinstance(event, h11.ConnectionClosed) or event is h11.NEED_DATA or event is h11.PAUSED ): break if self.client: if isinstance(event, h11.InformationalResponse): if event.status_code == 101: self._events.append(self._establish_client_connection(event)) else: self._events.append( RejectConnection( headers=event.headers, status_code=event.status_code, has_body=False, ) ) self._state = ConnectionState.CLOSED elif isinstance(event, h11.Response): self._state = ConnectionState.REJECTING self._events.append( RejectConnection( headers=event.headers, status_code=event.status_code, has_body=True, ) ) elif isinstance(event, h11.Data): self._events.append( RejectData(data=event.data, body_finished=False) ) elif isinstance(event, h11.EndOfMessage): self._events.append(RejectData(data=b"", body_finished=True)) self._state = ConnectionState.CLOSED else: if isinstance(event, h11.Request): self._events.append(self._process_connection_request(event)) def events(self) -> Generator[Event, None, None]: """Return a generator that provides any events that have been generated by protocol activity. :returns: a generator that yields H11 events. """ while self._events: yield self._events.popleft() ############ Server mode methods def _process_connection_request(self, event: h11.Request) -> Request: if event.method != b"GET": raise RemoteProtocolError( "Request method must be GET", event_hint=RejectConnection() ) connection_tokens = None extensions: List[str] = [] host = None key = None subprotocols: List[str] = [] upgrade = b"" version = None headers: Headers = [] for name, value in event.headers: name = name.lower() if name == b"connection": connection_tokens = split_comma_header(value) elif name == b"host": host = value.decode("ascii") continue # Skip appending to headers elif name == b"sec-websocket-extensions": extensions = split_comma_header(value) continue # Skip appending to headers elif name == b"sec-websocket-key": key = value elif name == b"sec-websocket-protocol": subprotocols = split_comma_header(value) continue # Skip appending to headers elif name == b"sec-websocket-version": version = value elif name == b"upgrade": upgrade = value headers.append((name, value)) if connection_tokens is None or not any( token.lower() == "upgrade" for token in connection_tokens ): raise RemoteProtocolError( "Missing header, 'Connection: Upgrade'", event_hint=RejectConnection() ) if version != WEBSOCKET_VERSION: raise RemoteProtocolError( "Missing header, 'Sec-WebSocket-Version'", event_hint=RejectConnection( headers=[(b"Sec-WebSocket-Version", WEBSOCKET_VERSION)], status_code=426, ), ) if key is None: raise RemoteProtocolError( "Missing header, 'Sec-WebSocket-Key'", event_hint=RejectConnection() ) if upgrade.lower() != b"websocket": raise RemoteProtocolError( "Missing header, 'Upgrade: WebSocket'", event_hint=RejectConnection() ) if version is None: raise RemoteProtocolError( "Missing header, 'Sec-WebSocket-Version'", event_hint=RejectConnection() ) self._initiating_request = Request( extensions=extensions, extra_headers=headers, host=host, subprotocols=subprotocols, target=event.target.decode("ascii"), ) return self._initiating_request def _accept(self, event: AcceptConnection) -> bytes: request_headers = normed_header_dict(self._initiating_request.extra_headers) nonce = request_headers[b"sec-websocket-key"] accept_token = generate_accept_token(nonce) headers = [ (b"Upgrade", b"WebSocket"), (b"Connection", b"Upgrade"), (b"Sec-WebSocket-Accept", accept_token), ] if event.subprotocol is not None: if event.subprotocol not in self._initiating_request.subprotocols: raise LocalProtocolError( "unexpected subprotocol {}".format(event.subprotocol) ) headers.append( (b"Sec-WebSocket-Protocol", event.subprotocol.encode("ascii")) ) if event.extensions: accepts = server_extensions_handshake( # type: ignore self._initiating_request.extensions, event.extensions ) if accepts: headers.append((b"Sec-WebSocket-Extensions", accepts)) response = h11.InformationalResponse( status_code=101, headers=headers + event.extra_headers ) self._connection = Connection( ConnectionType.CLIENT if self.client else ConnectionType.SERVER, event.extensions, ) self._state = ConnectionState.OPEN return self._h11_connection.send(response) def _reject(self, event: RejectConnection) -> bytes: if self.state != ConnectionState.CONNECTING: raise LocalProtocolError( "Connection cannot be rejected in state %s" % self.state ) headers = event.headers if not event.has_body: headers.append((b"content-length", b"0")) response = h11.Response(status_code=event.status_code, headers=headers) data = self._h11_connection.send(response) self._state = ConnectionState.REJECTING if not event.has_body: data += self._h11_connection.send(h11.EndOfMessage()) self._state = ConnectionState.CLOSED return data def _send_reject_data(self, event: RejectData) -> bytes: if self.state != ConnectionState.REJECTING: raise LocalProtocolError( "Cannot send rejection data in state {}".format(self.state) ) data = self._h11_connection.send(h11.Data(data=event.data)) if event.body_finished: data += self._h11_connection.send(h11.EndOfMessage()) self._state = ConnectionState.CLOSED return data ############ Client mode methods def _initiate_connection(self, request: Request) -> bytes: self._initiating_request = request self._nonce = generate_nonce() headers = [ (b"Host", request.host.encode("ascii")), (b"Upgrade", b"WebSocket"), (b"Connection", b"Upgrade"), (b"Sec-WebSocket-Key", self._nonce), (b"Sec-WebSocket-Version", WEBSOCKET_VERSION), ] if request.subprotocols: headers.append( ( b"Sec-WebSocket-Protocol", (", ".join(request.subprotocols)).encode("ascii"), ) ) if request.extensions: offers = {e.name: e.offer() for e in request.extensions} # type: ignore extensions = [] for name, params in offers.items(): name = name.encode("ascii") if params is True: extensions.append(name) elif params: extensions.append( b"%s; %s" % (name, params.encode("ascii")) # type: ignore ) if extensions: headers.append((b"Sec-WebSocket-Extensions", b", ".join(extensions))) upgrade = h11.Request( method=b"GET", target=request.target.encode("ascii"), headers=headers + request.extra_headers, ) return self._h11_connection.send(upgrade) def _establish_client_connection( self, event: h11.InformationalResponse ) -> AcceptConnection: # noqa: MC0001 accept = None connection_tokens = None accepts: List[str] = [] subprotocol = None upgrade = b"" headers: Headers = [] for name, value in event.headers: name = name.lower() if name == b"connection": connection_tokens = split_comma_header(value) continue # Skip appending to headers elif name == b"sec-websocket-extensions": accepts = split_comma_header(value) continue # Skip appending to headers elif name == b"sec-websocket-accept": accept = value continue # Skip appending to headers elif name == b"sec-websocket-protocol": subprotocol = value continue # Skip appending to headers elif name == b"upgrade": upgrade = value continue # Skip appending to headers headers.append((name, value)) if connection_tokens is None or not any( token.lower() == "upgrade" for token in connection_tokens ): raise RemoteProtocolError( "Missing header, 'Connection: Upgrade'", event_hint=RejectConnection() ) if upgrade.lower() != b"websocket": raise RemoteProtocolError( "Missing header, 'Upgrade: WebSocket'", event_hint=RejectConnection() ) accept_token = generate_accept_token(self._nonce) if accept != accept_token: raise RemoteProtocolError("Bad accept token", event_hint=RejectConnection()) if subprotocol is not None: subprotocol = subprotocol.decode("ascii") if subprotocol not in self._initiating_request.subprotocols: raise RemoteProtocolError( "unrecognized subprotocol {}".format(subprotocol), event_hint=RejectConnection(), ) extensions = client_extensions_handshake( # type: ignore accepts, self._initiating_request.extensions ) self._connection = Connection( ConnectionType.CLIENT if self.client else ConnectionType.SERVER, extensions, self._h11_connection.trailing_data[0], ) self._state = ConnectionState.OPEN return AcceptConnection( extensions=extensions, extra_headers=headers, subprotocol=subprotocol ) def __repr__(self) -> str: return "{}(client={}, state={})".format( self.__class__.__name__, self.client, self.state ) def server_extensions_handshake( requested: List[str], supported: List[Extension] ) -> Optional[bytes]: """Agree on the extensions to use returning an appropriate header value. This returns None if there are no agreed extensions """ accepts: Dict[str, Union[bool, bytes]] = {} for offer in requested: name = offer.split(";", 1)[0].strip() for extension in supported: if extension.name == name: accept = extension.accept(offer) if accept is True: accepts[extension.name] = True elif accept is not False and accept is not None: accepts[extension.name] = accept.encode("ascii") # type: ignore if accepts: extensions: List[bytes] = [] for name, params in accepts.items(): name = name.encode("ascii") # type: ignore if params is True: extensions.append(name) # type: ignore else: if params == b"": extensions.append(b"%s" % (name)) else: extensions.append(b"%s; %s" % (name, params)) return b", ".join(extensions) return None def client_extensions_handshake( accepted: List[str], supported: List[Extension] ) -> List[Extension]: # This raises RemoteProtocolError is the accepted extension is not # supported. extensions = [] for accept in accepted: name = accept.split(";", 1)[0].strip() for extension in supported: if extension.name == name: extension.finalize(accept) extensions.append(extension) break else: raise RemoteProtocolError( "unrecognized extension {}".format(name), event_hint=RejectConnection() ) return extensions
wsproto/handshake.py
17,527
A Handshake implementation for HTTP/1.1 connections. Return the established connection. This will either return the connection or raise a LocalProtocolError if the connection has not yet been established. :rtype: h11.Connection Return a generator that provides any events that have been generated by protocol activity. :returns: a generator that yields H11 events. Initiate an upgrade connection. This should be used if the request has already be received and parsed. :param list headers: HTTP headers represented as a list of 2-tuples. :param str path: A URL path. Receive data from the remote. A list of events that the remote peer triggered by sending this data can be retrieved with :meth:`events`. :param bytes data: Data received from the WebSocket peer. Send an event to the remote. This will return the bytes to send based on the event or raise a LocalProtocolError if the event is not valid given the state. :returns: Data to send to the WebSocket peer. :rtype: bytes Agree on the extensions to use returning an appropriate header value. This returns None if there are no agreed extensions wsproto/handshake ~~~~~~~~~~~~~~~~~~ An implementation of WebSocket handshakes. -*- coding: utf-8 -*- RFC6455, Section 4.2.1/6 - Reading the Client's Opening Handshake Server mode methods Skip appending to headers Skip appending to headers Skip appending to headers type: ignore Client mode methods type: ignore type: ignore noqa: MC0001 Skip appending to headers Skip appending to headers Skip appending to headers Skip appending to headers Skip appending to headers type: ignore type: ignore type: ignore type: ignore This raises RemoteProtocolError is the accepted extension is not supported.
1,707
en
0.817603
import numpy as np from astropy.io import fits from scipy.interpolate import interp1d # Fitting Sline3 def fit_spline3(y, x, order=3, nsum=3): y_resampled = [np.median(y[i:i + nsum]) for i in range(0, len(y) - len(y) % nsum, nsum)] x_resampled = np.linspace(0, len(y), len(y_resampled)) # Fitting f = interp1d(x_resampled, y_resampled, kind=order, bounds_error=True) # Return function to be constructed with any other x array return f # Local Minima and Maxima def local_minmax(data, nmin=2, nmax=2): # Identifying indices of local minima-maxima points id_min = (np.gradient(np.sign(np.gradient(data))) > 0).nonzero()[0] # index of local min id_max = (np.gradient(np.sign(np.gradient(data))) < 0).nonzero()[0] # index of local max # Taking values at min/max points list_min, list_max = data[id_min], data[id_max] # Sorting minima-maxima values (bigger --> lower) list_min, id_min = (list(p) for p in zip(*sorted(zip(list_min, id_min), reverse=False))) list_max, id_max = (list(p) for p in zip(*sorted(zip(list_max, id_max), reverse=True))) # Taking the desired number of local minima-maxima points list_min, list_max, id_min, id_max = list_min[0:nmin], list_max[0:nmax], id_min[0:nmin], id_max[0:nmax] return list_min, list_max, id_min, id_max def trim_slitedge(flat, plot=True): # Getting input data ccddata = fits.getdata(flat, ignore_missing_end=True) # Collapse flat in the dispersion direction flat_collapsed = fits.getdata(flat, ignore_missing_end=True).sum(axis=1) / ccddata.shape[1] lines = np.arange(0, flat_collapsed.size, 1) # Excluding first pixels in the spatial direction cut = 3 c_flat = flat_collapsed[cut:-cut] c_lines = np.arange(0, c_flat.size, 1) # Fittin cubic spline. It's working very well with order=5, nsum=2 func_splin3 = fit_spline3(c_flat, c_lines, order=5, nsum=2) smooth_flat = func_splin3(c_lines) # Compute 1st and flat smoothed dy = np.gradient(smooth_flat) dy2 = np.gradient(dy) # Regions to compute local minina-maxima # Region one: it represent first 40 percent of all data # Region two: ... last 40% pixa, pixb = int(len(c_flat) * 0.4), int(len(c_flat) * 0.6) dy2_one, dy2_two = dy2[0:pixa], dy2[pixb:] # Reg. 1: Compute local min/max of the 2nd derivative list_min_1, list_max_1, id_min_1, id_max_1 = local_minmax(dy2_one, nmin=1, nmax=1) list_min_2, list_max_2, id_min_2, id_max_2 = local_minmax(dy2_two, nmin=1, nmax=1) # Indice have to be reshifted to the original indices of the function dy2 id_min_2 = np.array(id_min_2) + pixb # Slit edges are the local maxima/minima 1/2 [accounting the cutted pixels] slit_1, slit_2 = int(np.array(id_min_1) + cut), int(np.array(id_min_2) + cut) print slit_1, slit_2 if plot is True: import matplotlib.pyplot as plt c_lines += cut plt.plot(lines, flat_collapsed, 'k-', label='Flat Collapsed') plt.plot(lines[slit_1:slit_2], flat_collapsed[slit_1:slit_2], 'r-', label = 'Cutted Flat') plt.plot(c_lines, dy, 'g-', label="Dy/dx") plt.plot(c_lines, dy2, 'y-', label="Dy2/dx") plt.plot(slit_1, list_min_1, 'bo', label='Slit Edge 1 ') plt.plot(slit_2, list_min_2, 'ro', label='Slit Edge 2') plt.xlim(lines.min() - 50, lines.max() + 50) plt.legend(loc='best') plt.show() return slit_1, slit_2 flat = '/home/davidsanm/PyCharmProjects/GoodmanDataReduction/2016-03-20/RED/master_flat_600.fits' trim_slitedge(flat, plot = True)
trim_slitedge.py
3,601
Fitting Sline3 Fitting Return function to be constructed with any other x array Local Minima and Maxima Identifying indices of local minima-maxima points index of local min index of local max Taking values at min/max points Sorting minima-maxima values (bigger --> lower) Taking the desired number of local minima-maxima points Getting input data Collapse flat in the dispersion direction Excluding first pixels in the spatial direction Fittin cubic spline. It's working very well with order=5, nsum=2 Compute 1st and flat smoothed Regions to compute local minina-maxima Region one: it represent first 40 percent of all data Region two: ... last 40% Reg. 1: Compute local min/max of the 2nd derivative Indice have to be reshifted to the original indices of the function dy2 Slit edges are the local maxima/minima 1/2 [accounting the cutted pixels]
847
en
0.74017
# -*- encoding: utf-8 -*- # $Id: __init__.py,v 1.8.2.2 2007/05/22 21:06:52 customdesigned Exp $ # # This file is part of the pydns project. # Homepage: http://pydns.sourceforge.net # # This code is covered by the standard Python License. # # __init__.py for DNS class. __version__ = '2.3.1' import Type,Opcode,Status,Class from Base import DnsRequest, DNSError from Lib import DnsResult from Base import * from Lib import * Error=DNSError from lazy import * Request = DnsRequest Result = DnsResult from Serialization import Serialize,DeSerialize # # $Log: __init__.py,v $ # Revision 1.8.2.2 2007/05/22 21:06:52 customdesigned # utf-8 in __init__.py # # Revision 1.8.2.1 2007/05/22 20:39:20 customdesigned # Release 2.3.1 # # Revision 1.8 2002/05/06 06:17:49 anthonybaxter # found that the old README file called itself release 2.2. So make # this one 2.3... # # Revision 1.7 2002/05/06 06:16:15 anthonybaxter # make some sort of reasonable version string. releasewards ho! # # Revision 1.6 2002/03/19 13:05:02 anthonybaxter # converted to class based exceptions (there goes the python1.4 compatibility :) # # removed a quite gross use of 'eval()'. # # Revision 1.5 2002/03/19 12:41:33 anthonybaxter # tabnannied and reindented everything. 4 space indent, no tabs. # yay. # # Revision 1.4 2001/11/26 17:57:51 stroeder # Added __version__ # # Revision 1.3 2001/08/09 09:08:55 anthonybaxter # added identifying header to top of each file # # Revision 1.2 2001/07/19 06:57:07 anthony # cvs keywords added # #
tools/hipdnsproxy/DNS/__init__.py
1,527
-*- encoding: utf-8 -*- $Id: __init__.py,v 1.8.2.2 2007/05/22 21:06:52 customdesigned Exp $ This file is part of the pydns project. Homepage: http://pydns.sourceforge.net This code is covered by the standard Python License. __init__.py for DNS class. $Log: __init__.py,v $ Revision 1.8.2.2 2007/05/22 21:06:52 customdesigned utf-8 in __init__.py Revision 1.8.2.1 2007/05/22 20:39:20 customdesigned Release 2.3.1 Revision 1.8 2002/05/06 06:17:49 anthonybaxter found that the old README file called itself release 2.2. So make this one 2.3... Revision 1.7 2002/05/06 06:16:15 anthonybaxter make some sort of reasonable version string. releasewards ho! Revision 1.6 2002/03/19 13:05:02 anthonybaxter converted to class based exceptions (there goes the python1.4 compatibility :) removed a quite gross use of 'eval()'. Revision 1.5 2002/03/19 12:41:33 anthonybaxter tabnannied and reindented everything. 4 space indent, no tabs. yay. Revision 1.4 2001/11/26 17:57:51 stroeder Added __version__ Revision 1.3 2001/08/09 09:08:55 anthonybaxter added identifying header to top of each file Revision 1.2 2001/07/19 06:57:07 anthony cvs keywords added
1,159
en
0.700518
import os import sys import setuptools # To prevent importing about and thereby breaking the coverage info we use this # exec hack about = {} with open('python_utils/__about__.py') as fp: exec(fp.read(), about) if os.path.isfile('README.rst'): long_description = open('README.rst').read() else: long_description = 'See http://pypi.python.org/pypi/python-utils/' needs_pytest = set(['ptr', 'pytest', 'test']).intersection(sys.argv) pytest_runner = ['pytest-runner'] if needs_pytest else [] if __name__ == '__main__': setuptools.setup( name='python-utils', version=about['__version__'], author=about['__author__'], author_email=about['__author_email__'], description=about['__description__'], url=about['__url__'], license='BSD', packages=setuptools.find_packages(), long_description=long_description, install_requires=['six'], tests_require=['pytest'], setup_requires=[] + pytest_runner, classifiers=['License :: OSI Approved :: BSD License'], )
setup.py
1,077
To prevent importing about and thereby breaking the coverage info we use this exec hack
87
en
0.841132
import logging as log import cv2 import sys import numpy as np class LandmarksDetectionModel: ''' Class for the Face Landmarks Detection Model. Load and configure inference plugins for the specified target devices, and performs either synchronous or asynchronous modes for the specified infer requests. ''' def __init__(self, model_name, device='CPU', extensions=None, async_infer=True): ''' Set instance variables. ''' self.plugin = None self.network = None self.exec_network = None self.infer_request_handle = None self.input_blob = None self.input_shape = None self.output_blob = None self.output_shape = None self.model_name = model_name self.device = device self.extensions = extensions self.async_infer = async_infer def load_model(self, plugin): ''' This method is for loading the model (in IR format) to the device specified by the user. Default device is CPU. ''' # Get model model_structure = self.model_name + '.xml' model_weights = self.model_name + '.bin' # Initialize the plugin - load the inference engine API # Plugin is the one already created for the Face Detection model self.plugin = plugin # Add a CPU extension, if applicable if self.extensions and 'CPU' in self.device: self.plugin.add_extension(self.extensions, self.device) # Read the IR as IENetwork try: self.network = self.plugin.read_network(model=model_structure, weights=model_weights) except: raise ValueError("Could not initialise the network. Have you entered the correct model path?") # Check if model and CPU plugin are supported if self.device == 'CPU': self.check_model() # Load the IENetwork into the plugin self.exec_network = self.plugin.load_network(network=self.network, device_name=self.device, num_requests=1) # Get the input and output layers self.input_blob = next(iter(self.network.inputs)) self.input_shape = self.network.inputs[self.input_blob].shape self.output_blob = next(iter(self.network.outputs)) self.output_shape = self.network.outputs[self.output_blob].shape return def predict(self, image): ''' This method is meant for running predictions on the input image. ''' if np.all(np.array(image.shape)): # Create input image to feed into the network net_input = {self.input_blob: self.preprocess_input(image)} # Start inference. Infer mode (async/sync) is input by user if self.async_infer: self.infer_request_handle = self.exec_network.start_async(request_id=0, inputs=net_input) # Wait for the result of the inference if self.exec_network.requests[0].wait(-1) == 0: # Get result of the inference request outputs = self.infer_request_handle.outputs[self.output_blob] eyes_coords, crop_left, crop_right = self.preprocess_output(outputs, image) else: self.infer_request_handle = self.exec_network.infer(inputs=net_input) # Get result of the inference request outputs = self.infer_request_handle[self.output_blob] eyes_coords, crop_left, crop_right = self.preprocess_output(outputs, image) else: eyes_coords = [] crop_left = [] crop_right = [] return eyes_coords, crop_left, crop_right def check_model(self): ''' This method check whether the model (along with the plugin) is support on the CPU device. If anything is missing (such as a CPU extension), let the user know and exit the programm. ''' supported_layers = self.plugin.query_network(network=self.network, device_name='CPU') unsupported_layers = [l for l in self.network.layers.keys() if l not in supported_layers] if len(unsupported_layers) != 0: log.error("Unsupported layers found: {}".format(unsupported_layers)) if self.extensions: log.error("The extensions specified do not support some layers. Please specify a new extension.") else: log.error( "Please try to specify an extension library path by using the --extensions command line argument.") sys.exit(1) return def preprocess_input(self, image): ''' Method to process inputs before feeding them into the model for inference. ''' image = cv2.resize(image, (self.input_shape[3], self.input_shape[2])) image = image.transpose((2, 0, 1)) image = image.reshape(1, *image.shape) return image def preprocess_output(self, outputs, image): ''' Method to process outputs before feeding them into the next model for inference or for the last step of the app. ''' w = image.shape[1] h = image.shape[0] outputs = outputs[0] xl, yl = int(outputs[0][0][0] * w), int(outputs[1][0][0] * h) xr, yr = int(outputs[2][0][0] * w), int(outputs[3][0][0] * h) eyes_coords = [xl, yl, xr, yr] # Using the fact that eyes take 1/5 of your face width # define bounding boxes around the eyes according to this square_size = int(w / 10) left_eye_box = [xl - square_size, yl - square_size, xl + square_size, yl + square_size] right_eye_box = [xr - square_size, yr - square_size, xr + square_size, yr + square_size] crop_left = image[left_eye_box[1]:left_eye_box[3], left_eye_box[0]:left_eye_box[2]] crop_right = image[right_eye_box[1]:right_eye_box[3], right_eye_box[0]:right_eye_box[2]] return eyes_coords, crop_left, crop_right
src/facial_landmarks_detection.py
6,044
Class for the Face Landmarks Detection Model. Load and configure inference plugins for the specified target devices, and performs either synchronous or asynchronous modes for the specified infer requests. Set instance variables. This method check whether the model (along with the plugin) is support on the CPU device. If anything is missing (such as a CPU extension), let the user know and exit the programm. This method is for loading the model (in IR format) to the device specified by the user. Default device is CPU. This method is meant for running predictions on the input image. Method to process inputs before feeding them into the model for inference. Method to process outputs before feeding them into the next model for inference or for the last step of the app. Get model Initialize the plugin - load the inference engine API Plugin is the one already created for the Face Detection model Add a CPU extension, if applicable Read the IR as IENetwork Check if model and CPU plugin are supported Load the IENetwork into the plugin Get the input and output layers Create input image to feed into the network Start inference. Infer mode (async/sync) is input by user Wait for the result of the inference Get result of the inference request Get result of the inference request Using the fact that eyes take 1/5 of your face width define bounding boxes around the eyes according to this
1,395
en
0.849101
""" ASGI config for FYP project. It exposes the ASGI callable as a module-level variable named ``application``. For more information on this file, see https://docs.djangoproject.com/en/3.0/howto/deployment/asgi/ """ import os from django.core.asgi import get_asgi_application os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'FYP.settings') application = get_asgi_application()
src/FYP/FYP/asgi.py
383
ASGI config for FYP project. It exposes the ASGI callable as a module-level variable named ``application``. For more information on this file, see https://docs.djangoproject.com/en/3.0/howto/deployment/asgi/
209
en
0.7294
# -*- coding: utf-8 -*- from __future__ import print_function from IPython import get_ipython from IPython.display import ( display, Javascript, ) from IPython.core import magic_arguments from IPython.core.magic import ( Magics, magics_class, cell_magic, ) from IPython.utils.importstring import import_item import yaml __version__ = "0.2.0" @magics_class class YAMLMagics(Magics): """ Write and load YAML in the IPython Notebook. Uses SafeLoader by default. Example: %%yaml x -lyaml.Loader foo: bar: baz """ def __init__(self, shell): super(YAMLMagics, self).__init__(shell) @cell_magic @magic_arguments.magic_arguments() @magic_arguments.argument( "var_name", default=None, nargs="?", help="""Name of local variable to set to parsed value""" ) @magic_arguments.argument( "-l", "--loader", default="yaml.SafeLoader", help="""Dotted-notation class to use for loading""" ) def yaml(self, line, cell): line = line.strip() args = magic_arguments.parse_argstring(self.yaml, line) display(Javascript( """ require( [ "notebook/js/codecell", "codemirror/mode/yaml/yaml" ], function(cc){ cc.CodeCell.options_default.highlight_modes.magic_yaml = { reg: ["^%%yaml"] } } ); """)) loader = get_ipython().user_global_ns.get(args.loader, None) if loader is None: loader = import_item(args.loader) try: val = yaml.load(cell, Loader=loader) except yaml.YAMLError as err: print(err) return if args.var_name is not None: get_ipython().user_ns[args.var_name] = val else: return val def load_ipython_extension(ip): ip = get_ipython() ip.register_magics(YAMLMagics)
yamlmagic.py
2,090
Write and load YAML in the IPython Notebook. Uses SafeLoader by default. Example: %%yaml x -lyaml.Loader foo: bar: baz -*- coding: utf-8 -*-
160
en
0.575212
import h5py import pickle import numpy as np # import read_affect_data as r # from tqdm import tqdm import random from PIL import Image, ImageOps, ImageEnhance import colorsys # def read_h5_data_set(path): # f = h5py.File(path, 'r') # time_stamps = list(f[list(f.keys())[0]].keys()) # d = {time : dict() for time in time_stamps} # for feature in list(f.keys()): # if hasattr(f[feature], 'keys'): # for time in tqdm(list(f[feature].keys())): # k = list(f[feature][time].keys())[0] # d[time][feature] = np.array(f[feature][time][k]) # return d # def read_pkl_data_set(path): # f = r.load_pickle(path) # time_stamps = list(f[list(f.keys())[0]].keys()) # d = {time : dict() for time in time_stamps} # for feature in list(f.keys()): # if hasattr(f[feature], 'keys'): # for time in tqdm(list(f[feature].keys())): # if hasattr(f[feature][time], 'keys'): # for k in list(f[feature][time].keys()): # d[time][feature] = np.array(f[feature][time][k]) # return d ############################################################################## # Visual def visual_robustness(tests, noise_level=0.3, gray=True, contrast=True, s_and_p=True, gaus=True, rot=True, crop=True): noises = [] if gray: noises.append(grayscale) if contrast: noises.append(low_contrast) if s_and_p: noises.append(salt_and_pepper) if gaus: noises.append(gaussian) if rot: noises.append(rotate) if crop: noises.append(random_crop) robustness_tests = [] for i in range(len(tests)): img = Image.fromarray(tests[i]) for noise in noises: img = noise(img, noise_level) robustness_tests.append(np.array(img)) return robustness_tests def grayscale(img, p): if np.random.sample() <= p: return ImageOps.grayscale(img) else: return img def low_contrast(img, factor): if np.random.sample() <= p: enhancer = ImageEnhance.Contrast(img) return enhancer.enhance(factor) else: return img def inversion(img, p): if np.random.sample() <= p: return ImageOps.invert(img) else: return img def WB(img, p): if np.random.sample() <= p: kelvin_table = {1000: (255, 56, 0), 1500: (255, 109, 0), 2000: (255, 137, 18), 2500: (255, 161, 72), 3000: (255, 180, 107), 3500: (255, 196, 137), 4000: (255, 209, 163), 4500: (255, 219, 186), 5000: (255, 228, 206), 5500: ( 255, 236, 224), 6000: (255, 243, 239), 6500: (255, 249, 253), 7000: (245, 243, 255), 7500: (235, 238, 255), 8000: (227, 233, 255), 8500: (220, 229, 255), 9000: (214, 225, 255), 9500: (208, 222, 255), 10000: (204, 219, 255)} temp = np.random.choice(kelvin_table.keys()) r, g, b = kelvin_table[temp] matrix = (r / 255.0, 0.0, 0.0, 0.0, 0.0, g / 255.0, 0.0, 0.0, 0.0, 0.0, b / 255.0, 0.0) return img.convert('RGB', matrix) else: return img def colorize(img, p): if np.random.sample() <= p: color = np.random.choice(['red', 'blue', 'green']) layer = Image.new('RGB', img.size, color) return Image.blend(img, layer, 0.3) else: return img def salt_and_pepper(img, p): if np.random.sample() <= p: output = np.copy(np.array(img)) nb_salt = np.ceil(p*output.size*0.5) coords = [np.random.randint(0, i-1, int(nb_salt)) for i in output.shape] for i in coords: output[i] = 1 nb_pepper = np.ceil(p*output.size*0.5) coords = [np.random.randint(0, i-1, int(nb_pepper)) for i in output.shape] for i in coords: output[i] = 0 return Image.fromarray(output) else: return img def gaussian(img, p): if np.random.sample() <= p: height, width = np.array(img).shape gauss = np.random.normal(0, p, (height, width)) return Image.fromarray((np.array(img)+gauss).astype('uint8')) else: return img def rotate(img, p): if np.random.sample() <= p: angle = np.random.random_sample()*40-20 return img.rotate(angle, Image.BILINEAR) else: return img def horizontal_flip(img, p): if np.random.sample() <= p: return img.transpose(Image.FLIP_LEFT_RIGHT) else: return img def random_crop(img, p): if np.random.sample() <= p: dim = np.array(img).shape height = dim[0] width = dim[1] cropped_height = height / 5 cropped_width = width / 5 init_height = np.random.random_sample() * cropped_height init_width = np.random.random_sample() * cropped_width end_height = height - cropped_height + init_height end_width = width - cropped_width + init_width return img.crop((init_width, init_height, end_width, end_height)).resize((height, width)) else: return img def periodic(img, periodic_noise_filename="periodic_noise"): height = img.height width = img.width output = [] for i in range(6): noise = Image.open("{}_{}.png".format( periodic_noise_filename, i+1)).convert("RGBA") noise = random_crop(rotate(noise.resize( (width*2, height*2)), np.random.random_sample()*360, 'white'), height, width) output.append(Image.blend(img.convert("RGBA"), noise, 0.3)) return output ############################################################################## # Text def text_robustness(tests, noise_level=0.3, swap=True, rand_mid=True, typo=True, sticky=True, omit=True): noises = [] if swap: noises.append(swap_letter) if rand_mid: noises.append(random_mid) if typo: noises.append(qwerty_typo) if sticky: noises.append(sticky_keys) if omit: noises.append(omission) robustness_tests = [] for i in range(len(tests)): newtext = [] text = tests[i].lower().split() for word in text: if len(word) > 3 and np.random.sample() <= noise_level: mode = np.random.randint(len(noises)) newtext.append(noises[mode](word)) else: newtext.append(word) robustness_tests.append(' '.join(newtext)) return np.array(robustness_tests) def last_char(word): for i in range(len(word)): if word[len(word)-1-i].isalpha(): return len(word) - 1 - i def swap_letter(word): # swap two random adjacent letters last = last_char(word) pos = np.random.randint(last-2) + 1 return word[:pos] + word[pos+1] + word[pos] + word[pos+2:] def random_mid(word): # randomly permute the middle chunk of a word (all letters except the first and last letter) last = last_char(word) mid = [char for char in word[1:last]] np.random.shuffle(mid) return word[0]+''.join(mid)+word[last:] def qwerty_typo(word, num_typo=1): # randomly replace num_typo number of letters of a word to a one adjacent to it on qwerty keyboard qwerty = {'q': ['w'], 'w': ['q', 'e', 's'], 'e': ['w', 'r', 'd'], 'r': ['e', 't', 'f'], 't': ['r', 'g', 'y'], 'y': ['t', 'u', 'h'], 'u': ['y', 'i', 'j'], 'i': ['u', 'o', 'k'], 'o': ['i', 'p', 'l'], 'p': ['o'], 'a': ['q', 's', 'z'], 's': ['a', 'w', 'd', 'x', 'z'], 'd': ['s', 'e', 'f', 'x', 'c'], 'f': ['d', 'r', 'g', 'c', 'v'], 'g': [ 'f', 't', 'h', 'v', 'b'], 'h': ['g', 'y', 'j', 'b', 'n'], 'j': ['h', 'u', 'k', 'n', 'm'], 'k': ['j', 'i', 'l', 'm'], 'l': ['k', 'o'], 'z': ['a', 's', 'x'], 'x': ['z', 's', 'd', 'c'], 'c': ['x', 'd', 'f', 'v'], 'v': ['c', 'f', 'g', 'b'], 'b': ['v', 'g', 'h', 'n'], 'n': ['b', 'h', 'm', 'j'], 'm': ['n', 'j', 'k']} last = last_char(word) typos = np.arange(last+1) np.random.shuffle(typos) for i in range(num_typo): typo = qwerty[word[typos[i]]] key = typo[np.random.randint(len(typo))] word = word[:typos[i]] + key + word[typos[i]+1:] return word def sticky_keys(word, num_sticky=1): # randomly repeat num_sticky number of letters of a word last = last_char(word) sticky = np.arange(last+1) np.random.shuffle(sticky) for i in range(num_sticky): word = word[:sticky[i]] + word[sticky[i]] + word[sticky[i]:] return word def omission(word, num_omit=1): # randomly omit num_omit number of letters of a word last = last_char(word) for i in range(num_omit): omit = np.random.randint(last-1) + 1 word = word[:omit] + word[omit+1:] last -= 1 return word ############################################################################## # Audio def audio_robustness(tests, noise_level=0.3, noises=None): if noises == None: noises = [additive_white_gaussian_noise, audio_random_dropout, audio_structured_dropout] robustness_tests = np.zeros(tests.shape) for i in range(len(tests)): if np.random.sample() <= noise_level: mode = np.random.randint(len(noises)) robustness_tests[i] = noises[mode](tests[i], noise_level) return robustness_tests def additive_white_gaussian_noise(signal, noise_level): # SNR = 10 * log((RMS of signal)^2 / (RMS of noise)^2) # RMS_s = np.sqrt(np.mean(signal*signal)) # RMS_n = np.sqrt(RMS_s*RMS_s / (np.power(10, SNR/10))) noise = np.random.normal(0, noise_level, signal.shape[0]) return signal + noise def audio_structured_dropout(sig, p, step=10): # each consecutive time steps are chosen with probability p to be dropped res = [sig[i] for i in range(len(sig))] for i in range(len(res)-step+1): if (res[i] != 0) and np.random.random_sample() < p: for j in range(step): res[i+j] = 0 return res def audio_random_dropout(sig, p): return audio_structured_dropout(sig, 1, p) ############################################################################## # Time-Series def timeseries_robustness(tests, noise_level=0.3, noise=True, rand_drop=True, struct_drop=True, modality_map=None): robust_tests = np.array(tests) if noise: robust_tests = white_noise(robust_tests, noise_level) if rand_drop: robust_tests = random_drop(robust_tests, noise_level) if struct_drop: robust_tests = structured_drop(robust_tests, noise_level, modality_map) return robust_tests # add noise sampled from zero-mean Gaussian with standard deviation p at every time step def white_noise(data, p): for i in range(len(data)): for time in range(len(data[i])): data[i][time] += np.random.normal(0, p) return data # each entry is dropped independently with probability p def random_drop(data, p): for i in range(len(data)): for time in range(len(data[i])): for feature in range(len(data[i][time])): if np.random.random_sample() < p: data[i][time][feature] = 0 # else: # result = dict() # for time in data: # for feature in data[time]: # if np.random.random_sample() < p: # result[time][feature] = np.zeros(data[time][feature].shape) # else: # result[time][feature] = data[time][feature] return data # independently for each modality, each time step is chosen with probability p # at which all feature dimensions are dropped def structured_drop(data, p, modality_map): for i in range(len(data)): for time in range(len(data[i])): if np.random.random_sample() < p: data[i][time] = np.zeros(data[i][time].shape) # else: # result = dict() # for time in data: # for modality in modality_map.keys(): # if np.random.random_sample() < p: # for feature in modality_map[modality]: # result[time][feature] = np.zeros(data[time][feature].shape) # else: # for feature in modality_map[modality]: # result[time][feature] = data[time][feature] return data ############################################################################## # Tabular def add_tabular_noise(tests, noise_level=0.3, drop=True, swap=True): robust_tests = np.array(tests) if drop: robust_tests = drop_entry(robust_tests, noise_level) if swap: robust_tests = swap_entry(robust_tests, noise_level) return robust_tests def drop_entry(data, p): for i in range(len(data)): for j in range(len(data[i])): if np.random.random_sample() < p: data[i][j] = 0 else: data[i][j] = data[i][j] return data def swap_entry(data, p): for i in range(len(data)): for j in range(1, len(data[i])): if np.random.random_sample() < p: data[i][j] = data[i][j-1] data[i][j-1] = data[i][j] return data if __name__ == '__main__': print('='*5 + 'Multi Affect' + '='*5) print('1. CMU-MOSI, Aligned') print('2. CMU-MOSI, Unaligned') print('3. CMU-MOSEI, Aligned') print('4. CMU-MOSEI, Unaligned') print('5. CMU-POM, Aligned') print('6. CMU-POM, Unaligned') print('7. UR-Funny') print('8. Sarcasm') print('9. Deception') opt = int(input('Input option: ')) print('='*22) if opt == 1: data = read_h5_data_set('./mosi/mosi.hdf5') modality_map = {'vision': ['FACET_4.2', 'OpenFace_1'], 'text': [ 'words'], 'vocal': ['COVAREP', 'OpenSmile_emobase2010']} elif opt == 2: print("To be implemented!") # data = read_h5_data_set('./mosi/mosi_unalign.hdf5') elif opt == 3: data = read_h5_data_set('./mosei/mosei.hdf5') modality_map = {'vision': ['OpenFace_2'], 'text': ['words'], 'vocal': ['COVAREP']} elif opt == 4: print("To be implemented!") # data = read_h5_data_set('./mosei/mosei_unalign.hdf5') elif opt == 5: data = read_h5_data_set('./pom/pom.hdf5') modality_map = {'vision': ['FACET_4.2', 'OpenFace2'], 'text': [ 'words'], 'vocal': ['COVAREP']} elif opt == 6: print("To be implemented!") # data = read_h5_data_set('./pom/pom_unalign.hdf5') elif opt == 7: data = read_pkl_data_set('./urfunny/urfunny.pkl') # time = data[list(data.keys())[0]] # k = data[list(data[time].keys())[0]] elif opt == 8: print("To be implemented!") # display_sarcasm_data_set('./sarcasm/sarcasm.pkl') elif opt == 9: print("To be implemented!") # display_pkl_data_set('./deception/deception.pkl') else: print('Wrong Input!')
deprecated/robustness_tests_draft.py
14,949
import read_affect_data as r from tqdm import tqdm def read_h5_data_set(path): f = h5py.File(path, 'r') time_stamps = list(f[list(f.keys())[0]].keys()) d = {time : dict() for time in time_stamps} for feature in list(f.keys()): if hasattr(f[feature], 'keys'): for time in tqdm(list(f[feature].keys())): k = list(f[feature][time].keys())[0] d[time][feature] = np.array(f[feature][time][k]) return d def read_pkl_data_set(path): f = r.load_pickle(path) time_stamps = list(f[list(f.keys())[0]].keys()) d = {time : dict() for time in time_stamps} for feature in list(f.keys()): if hasattr(f[feature], 'keys'): for time in tqdm(list(f[feature].keys())): if hasattr(f[feature][time], 'keys'): for k in list(f[feature][time].keys()): d[time][feature] = np.array(f[feature][time][k]) return d Visual Text swap two random adjacent letters randomly permute the middle chunk of a word (all letters except the first and last letter) randomly replace num_typo number of letters of a word to a one adjacent to it on qwerty keyboard randomly repeat num_sticky number of letters of a word randomly omit num_omit number of letters of a word Audio SNR = 10 * log((RMS of signal)^2 / (RMS of noise)^2) RMS_s = np.sqrt(np.mean(signal*signal)) RMS_n = np.sqrt(RMS_s*RMS_s / (np.power(10, SNR/10))) each consecutive time steps are chosen with probability p to be dropped Time-Series add noise sampled from zero-mean Gaussian with standard deviation p at every time step each entry is dropped independently with probability p else: result = dict() for time in data: for feature in data[time]: if np.random.random_sample() < p: result[time][feature] = np.zeros(data[time][feature].shape) else: result[time][feature] = data[time][feature] independently for each modality, each time step is chosen with probability p at which all feature dimensions are dropped else: result = dict() for time in data: for modality in modality_map.keys(): if np.random.random_sample() < p: for feature in modality_map[modality]: result[time][feature] = np.zeros(data[time][feature].shape) else: for feature in modality_map[modality]: result[time][feature] = data[time][feature] Tabular data = read_h5_data_set('./mosi/mosi_unalign.hdf5') data = read_h5_data_set('./mosei/mosei_unalign.hdf5') data = read_h5_data_set('./pom/pom_unalign.hdf5') time = data[list(data.keys())[0]] k = data[list(data[time].keys())[0]] display_sarcasm_data_set('./sarcasm/sarcasm.pkl') display_pkl_data_set('./deception/deception.pkl')
2,823
en
0.59783
import os, sys import ROOT from ROOT import TH1F,TH2F,TFile,TTree,TCanvas, TProfile, TNtuple, gErrorIgnoreLevel, kInfo, kWarning from tqdm import tqdm from particle import Particle, PDGID tqdm_disable = False ROOT.gErrorIgnoreLevel = kWarning; File = TFile("/home/kshi/Zprime/Zp_data_Ntuple/WmTo3l_ZpM45.root","READ") tree = File.Get("Ana/passedEvents") nEntries = tree.GetEntries() W, p, none, other = 0, 0, 0, 0 others = [] for i in tqdm(range(0, nEntries)): tree.GetEntry(i) #for j in range(0,tree.lep_matchedR03_MomMomId.size()): # if abs(tree.lep_matchedR03_MomMomId[j])>=11 and abs(tree.lep_matchedR03_MomMomId[j])<=18: # print "Event:" + str(tree.Event) + ", Lepton " + str(j) + " MomMomid is: " + lepton#str(tree.lep_matchedR03_MomMomId[j]) #for j in range(0,tree.lep_matchedR03_PdgId.size()): # if (abs(tree.lep_matchedR03_PdgId[j])<11 or abs(tree.lep_matchedR03_PdgId[j]>18)) and tree.lep_matchedR03_PdgId[j]!=0: # print "Event:" + str(tree.Event) + " has lepton id of " + Particle.from_pdgid(tree.lep_matchedR03_PdgId[j]).name #for j in range(0,tree.GENlep_id.size()): # if PDGID(tree.GENlep_id[j]).is_valid==False: # print "Invalid lep id " + str(tree.GENlep_id[j]) # if PDGID(tree.GENlep_MomId[j]).is_valid==False: # print "Invalid lep mom id " + str(tree.GENlep_MomId[j]) # if PDGID(tree.GENlep_MomMomId[j]).is_valid==False: # print "Invalid lep mom mom id " + str(tree.GENlep_MomMomId[j]) # else: # print "Event:" + str(tree.Event) + ", Lepton " + str(j) + " is a " + Particle.from_pdgid(tree.GENlep_id[j]).name + " that came from a " + Particle.from_pdgid(tree.GENlep_MomId[j]).name + " which came from a " + Particle.from_pdgid(tree.GENlep_MomMomId[j]).name for j in range(0,tree.lep_matchedR03_PdgId.size()): #if PDGID(tree.lep_matchedR03_PdgId[j]).is_valid==False: # print "Invalid lep id " + str(tree.lep_matchedR03_PdgId[j]) #if PDGID(tree.lep_matchedR03_MomId[j]).is_valid==False: # print "Invalid lep mom id " + str(tree.lep_matchedR03_MomId[j]) #if PDGID(tree.lep_matchedR03_MomMomId[j]).is_valid==False: # print "Invalid lep mom mom id " + str(tree.lep_matchedR03_MomMomId[j]) ##if tree.lep_matchedR03_PdgId[j]!=999888 and tree.lep_matchedR03_MomId!=999888 and tree.lep_matchedR03_MomMomId[j]!=999888: ## print "Event:" + str(tree.Event) + ", Lepton " + str(j) + " is a " + Particle.from_pdgid(tree.lep_matchedR03_PdgId[j]).name + " that came from a " + Particle.from_pdgid(tree.lep_matchedR03_MomId[j]).name + " which came from a " + Particle.from_pdgid(tree.lep_matchedR03_MomMomId[j]).name #elif tree.lep_matchedR03_MomId[j]==999888: # print "Event:" + str(tree.Event) + ", Lepton " + str(j) + " is a " + Particle.from_pdgid(tree.lep_matchedR03_PdgId[j]).name + " that came from a " + str(tree.lep_matchedR03_MomId[j]) + " which came from a " + Particle.from_pdgid(tree.lep_matchedR03_MomMomId[j]).name if tree.lep_matchedR03_MomId[j]==999888: if abs(tree.lep_matchedR03_MomMomId[j])==24: W+=1 elif abs(tree.lep_matchedR03_MomMomId[j])==2212: p+=1 elif abs(tree.lep_matchedR03_MomMomId[j])==0: none+=1 else: other+=1 others.append(tree.lep_matchedR03_MomMomId[j]) print "Sources of Z':" print "W = " + str(W) + ", p = " + str(p) + ", none = " + str(none) + ", other = " + str(other) for i in range(0, len(others)): print "Other MomMomId: " + str(others[i])
Wto3l/mom_counting.py
3,474
for j in range(0,tree.lep_matchedR03_MomMomId.size()): if abs(tree.lep_matchedR03_MomMomId[j])>=11 and abs(tree.lep_matchedR03_MomMomId[j])<=18: print "Event:" + str(tree.Event) + ", Lepton " + str(j) + " MomMomid is: " + leptonstr(tree.lep_matchedR03_MomMomId[j])for j in range(0,tree.lep_matchedR03_PdgId.size()): if (abs(tree.lep_matchedR03_PdgId[j])<11 or abs(tree.lep_matchedR03_PdgId[j]>18)) and tree.lep_matchedR03_PdgId[j]!=0: print "Event:" + str(tree.Event) + " has lepton id of " + Particle.from_pdgid(tree.lep_matchedR03_PdgId[j]).namefor j in range(0,tree.GENlep_id.size()): if PDGID(tree.GENlep_id[j]).is_valid==False: print "Invalid lep id " + str(tree.GENlep_id[j]) if PDGID(tree.GENlep_MomId[j]).is_valid==False: print "Invalid lep mom id " + str(tree.GENlep_MomId[j]) if PDGID(tree.GENlep_MomMomId[j]).is_valid==False: print "Invalid lep mom mom id " + str(tree.GENlep_MomMomId[j]) else: print "Event:" + str(tree.Event) + ", Lepton " + str(j) + " is a " + Particle.from_pdgid(tree.GENlep_id[j]).name + " that came from a " + Particle.from_pdgid(tree.GENlep_MomId[j]).name + " which came from a " + Particle.from_pdgid(tree.GENlep_MomMomId[j]).nameif PDGID(tree.lep_matchedR03_PdgId[j]).is_valid==False: print "Invalid lep id " + str(tree.lep_matchedR03_PdgId[j])if PDGID(tree.lep_matchedR03_MomId[j]).is_valid==False: print "Invalid lep mom id " + str(tree.lep_matchedR03_MomId[j])if PDGID(tree.lep_matchedR03_MomMomId[j]).is_valid==False: print "Invalid lep mom mom id " + str(tree.lep_matchedR03_MomMomId[j])if tree.lep_matchedR03_PdgId[j]!=999888 and tree.lep_matchedR03_MomId!=999888 and tree.lep_matchedR03_MomMomId[j]!=999888: print "Event:" + str(tree.Event) + ", Lepton " + str(j) + " is a " + Particle.from_pdgid(tree.lep_matchedR03_PdgId[j]).name + " that came from a " + Particle.from_pdgid(tree.lep_matchedR03_MomId[j]).name + " which came from a " + Particle.from_pdgid(tree.lep_matchedR03_MomMomId[j]).nameelif tree.lep_matchedR03_MomId[j]==999888: print "Event:" + str(tree.Event) + ", Lepton " + str(j) + " is a " + Particle.from_pdgid(tree.lep_matchedR03_PdgId[j]).name + " that came from a " + str(tree.lep_matchedR03_MomId[j]) + " which came from a " + Particle.from_pdgid(tree.lep_matchedR03_MomMomId[j]).name
2,277
en
0.619532
import logging from korbit.client.korbit_client import KorbitClient logging.basicConfig(level=logging.INFO) properties_sandbox_file = '../properties_sandbox_test.json' context_sandbox_file = '../context_sandbox.json' kbclient = KorbitClient(properties_sandbox_file, context_sandbox_file) print(kbclient.getUserInfo()) # 매수 Buy # print( kbclient.buy(price=300000, coin_amount=1) ) # # 매도 Sell # print( kbclient.sell(price=300000, coin_amount=1) ) print( kbclient.getOpenOrders() ) # Wallet Test wallet = kbclient.getWallet() balance = wallet['balance'] pending_orders = wallet['pendingOrders'] available = wallet['available'] print(balance) print(pending_orders) print(available)
test/korbit/client/korbit_client_tests.py
694
매수 Buy print( kbclient.buy(price=300000, coin_amount=1) ) 매도 Sell print( kbclient.sell(price=300000, coin_amount=1) ) Wallet Test
130
en
0.472198
import asyncio import socket from stor.server.server import StorServer from stor.types.peer_info import PeerInfo def start_reconnect_task(server: StorServer, peer_info_arg: PeerInfo, log, auth: bool): """ Start a background task that checks connection and reconnects periodically to a peer. """ # If peer_info_arg is already an address, use it, otherwise resolve it here. if peer_info_arg.is_valid(): peer_info = peer_info_arg else: peer_info = PeerInfo(socket.gethostbyname(peer_info_arg.host), peer_info_arg.port) async def connection_check(): while True: peer_retry = True for _, connection in server.all_connections.items(): if connection.get_peer_info() == peer_info or connection.get_peer_info() == peer_info_arg: peer_retry = False if peer_retry: log.info(f"Reconnecting to peer {peer_info}") try: await server.start_client(peer_info, None, auth=auth) except Exception as e: log.info(f"Failed to connect to {peer_info} {e}") await asyncio.sleep(3) return asyncio.create_task(connection_check())
stor/server/reconnect_task.py
1,235
Start a background task that checks connection and reconnects periodically to a peer. If peer_info_arg is already an address, use it, otherwise resolve it here.
162
en
0.854277
""" Generates code metrics for a given project. Whereas code_metrics.py operates on a single stream of source code input, this program walks a project tree and generates reports based on all of the source code found. TODO: project config should be supplied as input, not imported """ import os, shutil import code_metrics, metrics_formatter, stats, config def find_available_filename(filename): if not os.path.exists(filename): return filename attempts = 1 filename += str(attempts) while os.path.exists(filename): attempts += 1 if (attempts > 999): print('error: could not find available filename', filename) exit() filename = filename[:len(filename)-1] + str(attempts) return filename def is_code_file(path): filename, file_ext = os.path.splitext(path) return file_ext in config.code_filename_extensions def find_files(root_path, filter): result = [] for root, dirs, files in os.walk(root_path): for file_name in files: if not filter(file_name): continue path = os.path.join(root, file_name) result.append(path) return result def add_project_totals(project_report, file_reports): project_report['file_count'] = len(file_reports) project_report['function_count'] = 0 project_report['line_count'] = 0 project_report['lines_ending_in_whitespace_count'] = 0 project_report['line_length_distribution'] = {} project_report['line_indent_distribution'] = {} for filename, file_report in file_reports.items(): if file_report == {}: continue project_report['function_count'] += len(file_report['functions']) project_report['line_count'] += file_report['line_count'] # TODO: figure out how to aggregate project stats like this #project_report['lines_ending_in_whitespace_count'] += file_report['lines_ending_in_whitespace_count'] #stats.merge_into_distribution(project_report['line_length_distribution'], file_report['line_length_distribution']) #stats.merge_into_distribution(project_report['line_indent_distribution'], file_report['line_indent_distribution']) def report(project_root): file_reports = {} for path in find_files(project_root, is_code_file): target_lang = code_metrics.file_ext_lang(path) with open(path, 'r') as input_file: try: file_reports[path] = code_metrics.report(path, input_file.read(), target_lang) except IOError: continue project_report = { 'source_path': project_root, 'files': file_reports } add_project_totals(project_report, file_reports) return project_report def write_report_file(report, path, target_dir): if report == {}: return filename = metrics_formatter.convert_path_to_report_filename(path) out_file_path = target_dir + '/' + filename out_file_path = find_available_filename(out_file_path) with open(out_file_path, 'w') as output_file: metrics_formatter.write_report(report, 'html', output_file) def write_report(project_report, target_dir): if os.path.exists(target_dir): print('error: cannot create output dir', target_dir) exit() os.mkdir(target_dir) with open(target_dir + '/' + 'index.html', 'w') as output_file: metrics_formatter.write_project_index(project_report, 'html', output_file) for path, report in project_report['files'].items(): write_report_file(report, path, target_dir) if __name__ == '__main__': # TODO: make output format configurable output_dir = config.project_report_output_dir # TODO: also accept command line flag output_dir = find_available_filename(output_dir) write_report(report(config.project_root), output_dir) shutil.copy('Chart.min.js', output_dir)
project_metrics.py
3,550
Generates code metrics for a given project. Whereas code_metrics.py operates on a single stream of source code input, this program walks a project tree and generates reports based on all of the source code found. TODO: project config should be supplied as input, not imported TODO: figure out how to aggregate project stats like thisproject_report['lines_ending_in_whitespace_count'] += file_report['lines_ending_in_whitespace_count']stats.merge_into_distribution(project_report['line_length_distribution'], file_report['line_length_distribution'])stats.merge_into_distribution(project_report['line_indent_distribution'], file_report['line_indent_distribution']) TODO: make output format configurable TODO: also accept command line flag
739
en
0.591556
""" switchboard.manager ~~~~~~~~~~~~~~~~ :copyright: (c) 2015 Kyle Adams. :license: Apache License 2.0, see LICENSE for more details. """ import logging import sqlalchemy as sqla from .base import ModelDict from .models import ( Model, Switch, DISABLED, SELECTIVE, GLOBAL, INHERIT, INCLUDE, EXCLUDE, ) from .proxy import SwitchProxy from .settings import settings, Settings from .store import SQLAlchemyStore log = logging.getLogger(__name__) # These are (mostly) read-only module variables since we want it shared among # any and all threads. The only exception to read-only is when they are # populated on Switchboard startup (i.e., operator.register()). registry = {} registry_by_namespace = {} def nested_config(config): cfg = {} token = 'switchboard.' for k, v in config.iteritems(): if k.startswith(token): cfg[k.replace(token, '')] = v return cfg def configure(config={}, nested=False, cache=None): """Useful for when you need to control Switchboard's setup.""" if nested: config = nested_config(config) # Re-read settings to make sure we have everything. Settings.init(cache=cache, **config) operator.cache = cache # Establish the connection to the database. timeout = getattr(settings, 'SWITCHBOARD_TIMEOUT', 10) dburl = settings.SWITCHBOARD_DBURL if dburl: engine = sqla.create_engine( dburl, connect_args={'connect_timeout': timeout}) Switch.store = SQLAlchemyStore(engine, settings.SWITCHBOARD_DBTABLE) # Register the builtins. __import__('switchboard.builtins') class SwitchManager(ModelDict): DISABLED = DISABLED SELECTIVE = SELECTIVE GLOBAL = GLOBAL INHERIT = INHERIT INCLUDE = INCLUDE EXCLUDE = EXCLUDE def __init__(self, *args, **kwargs): # Inject args and kwargs that are known quantities; the SwitchManager # will always deal with the Switch model and so on. new_args = [Switch] new_args.extend(args) kwargs['key'] = 'key' kwargs['value'] = 'value' self.result_cache = None self.context = {} super(SwitchManager, self).__init__(*new_args, **kwargs) def __unicode__(self): return "<%s: %s (%s)>" % (self.__class__.__name__, getattr(self, 'model', ''), registry.values()) def __getitem__(self, key): """ Returns a SwitchProxy, rather than a Switch. It allows us to easily extend the Switches method and automatically include our manager instance. """ return SwitchProxy(self, super(SwitchManager, self).__getitem__(key)) def with_result_cache(func): """ Decorator specifically for is_active. If self.result_cache is set to a {} the is_active results will be cached for each set of params. """ def inner(self, *args, **kwargs): dic = self.result_cache cache_key = None if dic is not None: cache_key = (args, tuple(kwargs.items())) try: result = dic.get(cache_key) except TypeError as e: # not hashable log.debug('Switchboard result cache not active for this "%s" check due to: %s within args: %s', args[0], e, repr(cache_key)[:200]) cache_key = None else: if result is not None: return result result = func(self, *args, **kwargs) if cache_key is not None: dic[cache_key] = result return result return inner @with_result_cache def is_active(self, key, *instances, **kwargs): """ Returns ``True`` if any of ``instances`` match an active switch. Otherwise returns ``False``. >>> operator.is_active('my_feature', request) #doctest: +SKIP """ try: default = kwargs.pop('default', False) # Check all parents for a disabled state parts = key.split(':') if len(parts) > 1: child_kwargs = kwargs.copy() child_kwargs['default'] = None result = self.is_active(':'.join(parts[:-1]), *instances, **child_kwargs) if result is False: return result elif result is True: default = result try: switch = self[key] except KeyError: # switch is not defined, defer to parent return default if switch.status == GLOBAL: return True elif switch.status == DISABLED: return False elif switch.status == INHERIT: return default conditions = switch.value # If no conditions are set, we inherit from parents if not conditions: return default instances = list(instances) if instances else [] instances.extend(self.context.values()) # check each switch to see if it can execute return_value = False for namespace, condition in conditions.iteritems(): condition_set = registry_by_namespace.get(namespace) if not condition_set: continue result = condition_set.has_active_condition(condition, instances) if result is False: return False elif result is True: return_value = True except: log.exception('Error checking if switch "%s" is active', key) return_value = False # there were no matching conditions, so it must not be enabled return return_value def register(self, condition_set): """ Registers a condition set with the manager. >>> condition_set = MyConditionSet() #doctest: +SKIP >>> operator.register(condition_set) #doctest: +SKIP """ if callable(condition_set): condition_set = condition_set() registry[condition_set.get_id()] = condition_set registry_by_namespace[condition_set.get_namespace()] = condition_set def unregister(self, condition_set): """ Unregisters a condition set with the manager. >>> operator.unregister(condition_set) #doctest: +SKIP """ if callable(condition_set): condition_set = condition_set() registry.pop(condition_set.get_id(), None) registry_by_namespace.pop(condition_set.get_namespace(), None) def get_condition_set_by_id(self, switch_id): """ Given the identifier of a condition set (described in ConditionSet.get_id()), returns the registered instance. """ return registry[switch_id] def get_condition_sets(self): """ Returns a generator yielding all currently registered ConditionSet instances. """ return registry.itervalues() def get_all_conditions(self): """ Returns a generator which yields groups of lists of conditions. >>> for set_id, label, field in operator.get_all_conditions(): #doctest: +SKIP >>> print "%(label)s: %(field)s" % (label, field.label) #doctest: +SKIP """ cs = self.get_condition_sets() for condition_set in sorted(cs, key=lambda x: x.get_group_label()): group = unicode(condition_set.get_group_label()) for field in condition_set.fields.itervalues(): yield condition_set.get_id(), group, field def as_request(self, user=None, ip_address=None): from .helpers import MockRequest return MockRequest(user, ip_address) auto_create = getattr(settings, 'SWITCHBOARD_AUTO_CREATE', True) operator = SwitchManager(auto_create=auto_create)
switchboard/manager.py
8,168
Returns a SwitchProxy, rather than a Switch. It allows us to easily extend the Switches method and automatically include our manager instance. Useful for when you need to control Switchboard's setup. Returns a generator which yields groups of lists of conditions. >>> for set_id, label, field in operator.get_all_conditions(): #doctest: +SKIP >>> print "%(label)s: %(field)s" % (label, field.label) #doctest: +SKIP Given the identifier of a condition set (described in ConditionSet.get_id()), returns the registered instance. Returns a generator yielding all currently registered ConditionSet instances. Returns ``True`` if any of ``instances`` match an active switch. Otherwise returns ``False``. >>> operator.is_active('my_feature', request) #doctest: +SKIP Registers a condition set with the manager. >>> condition_set = MyConditionSet() #doctest: +SKIP >>> operator.register(condition_set) #doctest: +SKIP Unregisters a condition set with the manager. >>> operator.unregister(condition_set) #doctest: +SKIP Decorator specifically for is_active. If self.result_cache is set to a {} the is_active results will be cached for each set of params. switchboard.manager ~~~~~~~~~~~~~~~~ :copyright: (c) 2015 Kyle Adams. :license: Apache License 2.0, see LICENSE for more details. These are (mostly) read-only module variables since we want it shared among any and all threads. The only exception to read-only is when they are populated on Switchboard startup (i.e., operator.register()). Re-read settings to make sure we have everything. Establish the connection to the database. Register the builtins. Inject args and kwargs that are known quantities; the SwitchManager will always deal with the Switch model and so on. not hashable Check all parents for a disabled state switch is not defined, defer to parent If no conditions are set, we inherit from parents check each switch to see if it can execute there were no matching conditions, so it must not be enabled
1,973
en
0.824767
#!/usr/bin/env python # -*- coding: utf-8 -*- #---------------------------------------------------------------------------- # Created By Rodrigo Wilkens # Last update 27/March/2022 # version ='1.0' # --------------------------------------------------------------------------- def join_institution(institution): if len(institution)==0: return None if len(institution)==1: return institution[0] res = ", ".join(institution[:-1]) res += " and " + institution[-1] return res def get_user(or_id,client_acl, force_institution=False): c = None try: c = client_acl.get_profile(or_id) except: print("\nERROR: or_id not found", or_id) return {"first_name":or_id, "last_name":or_id,"name":or_id, "username":or_id, "emails":or_id, "institution":"NA"}, True try: if or_id[0] == "~": emails = client_acl.search_profiles(ids=[or_id]) assert len(emails) >= 1 else: emails = client_acl.search_profiles(ids=[c.id]) assert len(emails) >= 1 # emails = [or_id] except: print("\nERROR: or_id not associated to an email", or_id) return {"first_name":or_id, "last_name":or_id,"name":or_id, "username":or_id, "emails":or_id, "institution":"NA"}, True # try: if True: c = c.content namePrefered = None for name in c["names"]: if namePrefered==None or ('preferred' in name and name['preferred']): namePrefered = name name = " ".join([namePrefered['first'] if type(namePrefered['first'])==str else '', namePrefered['middle'] if namePrefered['middle']!=None else '', namePrefered['last'] if namePrefered['last']!=None else '' ]).replace(" ", " ") first_name = namePrefered['first'].strip() if type(namePrefered['first'])==str else '' middle_name = namePrefered['middle'].strip() if namePrefered['middle']!=None else '' last_name = namePrefered['last'].strip() if namePrefered['last']!=None else '' username = namePrefered['username'].strip() if len(first_name)>2: first_name = " ".join([n[0].upper() + n[1:].lower() if (n==n.upper() or n==n.lower()) else n for n in first_name.split(" ")]) if len(middle_name)>2: middle_name = " ".join([n[0].upper() + n[1:].lower() if (n==n.upper() or n==n.lower()) else n for n in middle_name.split(" ")]) if len(last_name)>2: last_name = " ".join([n[0].upper() + n[1:].lower() if (n==n.upper() or n==n.lower()) else n for n in last_name.split(" ")]) if 'preferredEmail' in emails[0].content: emails = emails[0].content['preferredEmail'] else: emails = emails[0].content['emails'][0] emails = emails.replace("_","\\_") institution = [] if 'history' in c: for h in c['history']: if 'end' not in h or h['end'] == None: institution.append(h['institution']["name"]) ret = {"first_name":first_name, "last_name":last_name,"name":name, "username":username, "emails":emails} institution = join_institution(institution) if institution: ret["institution"] = institution else: if force_institution: ret["institution"] = "NA" if len(middle_name)>0: ret["middle_name"]=middle_name if "gscholar" in c: ret["google_scholar_id"] = c["gscholar"] if 'dblp' in c: ret['dblp_id'] = c['dblp'] if 'homepage' in c: ret['homepage'] = c['homepage'] if 'orcid'in c: ret['orcid'] = c['orcid'] if 'semanticScholar' in c: ret["semantic_scholar_id"] = c['semanticScholar'] return ret, False
openreview/util.py
3,883
!/usr/bin/env python -*- coding: utf-8 -*----------------------------------------------------------------------------- Created By Rodrigo Wilkens Last update 27/March/2022 version ='1.0' --------------------------------------------------------------------------- emails = [or_id] try:
284
en
0.358508
from pdf_reports import ReportWriter # DEFINE A WRITER WITH DEFAULT TEMPLATE AND VALUES report_writer = ReportWriter( default_stylesheets=["style.css"], default_template="template.pug", title="My default title", version="0.1.2" ) # THEN LATER IN YOUR CODE: html = report_writer.pug_to_html(my_name="Zulko", my_organization="EGF") report_writer.write_report(html, "example_reportwriter.pdf")
examples/example_reportwriter/example_reportwriter.py
408
DEFINE A WRITER WITH DEFAULT TEMPLATE AND VALUES THEN LATER IN YOUR CODE:
73
en
0.87841
# coding=utf-8 # *** WARNING: this file was generated by test. *** # *** Do not edit by hand unless you're certain you know what you are doing! *** import warnings import pulumi import pulumi.runtime from typing import Any, Mapping, Optional, Sequence, Union, overload from . import _utilities __all__ = [ 'Foo', ] @pulumi.input_type class Foo: def __init__(__self__, *, a: Optional[bool] = None): if a is not None: pulumi.set(__self__, "a", a) @property @pulumi.getter def a(self) -> Optional[bool]: return pulumi.get(self, "a") @a.setter def a(self, value: Optional[bool]): pulumi.set(self, "a", value)
pkg/codegen/testing/test/testdata/plain-schema-gh6957/python/pulumi_xyz/_inputs.py
693
coding=utf-8 *** WARNING: this file was generated by test. *** *** Do not edit by hand unless you're certain you know what you are doing! ***
141
en
0.974495
import os import os.path as osp import numpy as np # `pip install easydict` if you don't have it from easydict import EasyDict as edict __C = edict() # Consumers can get config by: # from fast_rcnn_config import cfg cfg = __C # # Training options # __C.TRAIN = edict() # Online hard negative mining __C.TRAIN.HARD_POSITIVE_MINING = True __C.TRAIN.HARD_NEGATIVE_MINING = True __C.TRAIN.BG_THRESH_LOW = 0.0 __C.TRAIN.ORIG_SIZE = False # Initial learning rate __C.TRAIN.LEARNING_RATE = 0.001 # Momentum __C.TRAIN.MOMENTUM = 0.9 # Weight decay, for regularization __C.TRAIN.WEIGHT_DECAY = 0.0005 # Factor for reducing the learning rate __C.TRAIN.GAMMA = 0.1 # Step size for reducing the learning rate, currently only support one step __C.TRAIN.STEPSIZE = [30000] # Iteration intervals for showing the loss during training, on command line interface __C.TRAIN.DISPLAY = 50 # Iteration intervals for save check point __C.TRAIN.CHECKPOINT = 500 # Whether to double the learning rate for bias __C.TRAIN.DOUBLE_BIAS = True # Whether to initialize the weights with truncated normal distribution __C.TRAIN.TRUNCATED = False # Whether to have weight decay on bias as well __C.TRAIN.BIAS_DECAY = False # Whether to add ground truth boxes to the pool when sampling regions __C.TRAIN.USE_GT = False # Whether to use aspect-ratio grouping of training images, introduced merely for saving # GPU memory __C.TRAIN.ASPECT_GROUPING = False # The number of snapshots kept, older ones are deleted to save space __C.TRAIN.SNAPSHOT_KEPT = 3 # The time interval for saving tensorflow summaries __C.TRAIN.SUMMARY_INTERVAL = 180 # Scale to use during training (can list multiple scales) # The scale is the pixel size of an image's shortest side __C.TRAIN.SCALES = (600,800) # Max pixel size of the longest side of a scaled input image __C.TRAIN.MAX_SIZE = 1200 # Trim size for input images to create minibatch __C.TRAIN.TRIM_HEIGHT = 600 __C.TRAIN.TRIM_WIDTH = 600 # Images to use per minibatch __C.TRAIN.IMS_PER_BATCH = 1 # Minibatch size (number of regions of interest [ROIs]) __C.TRAIN.BATCH_SIZE = 256 # Fraction of minibatch that is labeled foreground (i.e. class > 0) __C.TRAIN.FG_FRACTION = 0.25 # Overlap threshold for a ROI to be considered foreground (if >= FG_THRESH) __C.TRAIN.FG_THRESH = 0.5 # Overlap threshold for a ROI to be considered background (class = 0 if # overlap in [LO, HI)) __C.TRAIN.BG_THRESH_HI = 0.5 __C.TRAIN.BG_THRESH_LO = 0.0 # Use horizontally-flipped images during training? __C.TRAIN.USE_FLIPPED = True # Train bounding-box regressors __C.TRAIN.BBOX_REG = True # Overlap required between a ROI and ground-truth box in order for that ROI to # be used as a bounding-box regression training example __C.TRAIN.BBOX_THRESH = 0.5 # Iterations between snapshots __C.TRAIN.SNAPSHOT_ITERS = 5000 # solver.prototxt specifies the snapshot path prefix, this adds an optional # infix to yield the path: <prefix>[_<infix>]_iters_XYZ.caffemodel __C.TRAIN.SNAPSHOT_PREFIX = 'res101_faster_rcnn' # __C.TRAIN.SNAPSHOT_INFIX = '' # Use a prefetch thread in roi_data_layer.layer # So far I haven't found this useful; likely more engineering work is required # __C.TRAIN.USE_PREFETCH = False # Normalize the targets (subtract empirical mean, divide by empirical stddev) __C.TRAIN.BBOX_NORMALIZE_TARGETS = True # Deprecated (inside weights) __C.TRAIN.BBOX_INSIDE_WEIGHTS = (1.0, 1.0, 1.0, 1.0) # Normalize the targets using "precomputed" (or made up) means and stdevs # (BBOX_NORMALIZE_TARGETS must also be True) __C.TRAIN.BBOX_NORMALIZE_TARGETS_PRECOMPUTED = True __C.TRAIN.BBOX_NORMALIZE_MEANS = (0.0, 0.0, 0.0, 0.0) __C.TRAIN.BBOX_NORMALIZE_STDS = (0.1, 0.1, 0.2, 0.2) # Train using these proposals __C.TRAIN.PROPOSAL_METHOD = 'gt' # Make minibatches from images that have similar aspect ratios (i.e. both # tall and thin or both short and wide) in order to avoid wasting computation # on zero-padding. # Use RPN to detect objects __C.TRAIN.HAS_RPN = True # IOU >= thresh: positive example __C.TRAIN.ANCHOR_POSITIVE_OVERLAP = 0.5 # IOU < thresh: negative example __C.TRAIN.ANCHOR_NEGATIVE_OVERLAP = 0.3 # If an anchor statisfied by positive and negative conditions set to negative __C.TRAIN.RPN_CLOBBER_POSITIVES = False # Max number of foreground examples __C.TRAIN.RPN_FG_FRACTION = 0.25 # Total number of examples __C.TRAIN.RPN_BATCHSIZE = 384 # NMS threshold used on RPN proposals __C.TRAIN.RPN_NMS_THRESH = 0.7 # Number of top scoring boxes to keep before apply NMS to RPN proposals __C.TRAIN.RPN_PRE_NMS_TOP_N = 12000 # Number of top scoring boxes to keep after applying NMS to RPN proposals __C.TRAIN.RPN_POST_NMS_TOP_N = 2000 # Proposal height and width both need to be greater than RPN_MIN_SIZE (at orig image scale) __C.TRAIN.RPN_MIN_SIZE = 4 # Deprecated (outside weights) __C.TRAIN.RPN_BBOX_INSIDE_WEIGHTS = (1.0, 1.0, 1.0, 1.0) # Give the positive RPN examples weight of p * 1 / {num positives} # and give negatives a weight of (1 - p) # Set to -1.0 to use uniform example weighting __C.TRAIN.RPN_POSITIVE_WEIGHT = -1.0 # Whether to use all ground truth bounding boxes for training, # For COCO, setting USE_ALL_GT to False will exclude boxes that are flagged as ''iscrowd'' __C.TRAIN.USE_ALL_GT = True # Whether to tune the batch normalization parameters during training __C.TRAIN.BN_TRAIN = False # # Testing options # __C.TEST = edict() # Scale to use during testing (can NOT list multiple scales) # The scale is the pixel size of an image's shortest side __C.TEST.SCALES = (1200,) # Max pixel size of the longest side of a scaled input image __C.TEST.MAX_SIZE = 1600 __C.TEST.ORIG_SIZE = False # Overlap threshold used for non-maximum suppression (suppress boxes with # IoU >= this threshold) __C.TEST.NMS = 0.3 # Experimental: treat the (K+1) units in the cls_score layer as linear # predictors (trained, eg, with one-vs-rest SVMs). __C.TEST.SVM = False # Test using bounding-box regressors __C.TEST.BBOX_REG = True # Propose boxes __C.TEST.HAS_RPN = False # Test using these proposals __C.TEST.PROPOSAL_METHOD = 'gt' ## NMS threshold used on RPN proposals __C.TEST.RPN_NMS_THRESH = 0.3 ## Number of top scoring boxes to keep before apply NMS to RPN proposals __C.TEST.RPN_PRE_NMS_TOP_N = 6000 ## Number of top scoring boxes to keep after applying NMS to RPN proposals __C.TEST.RPN_POST_NMS_TOP_N = 300 # Proposal height and width both need to be greater than RPN_MIN_SIZE (at orig image scale) __C.TEST.RPN_MIN_SIZE = 16 # Testing mode, default to be 'nms', 'top' is slower but better # See report for details __C.TEST.MODE = 'nms' # Only useful when TEST.MODE is 'top', specifies the number of top proposals to select __C.TEST.RPN_TOP_N = 5000 # # ResNet options # __C.RESNET = edict() # Option to set if max-pooling is appended after crop_and_resize. # if true, the region will be resized to a square of 2xPOOLING_SIZE, # then 2x2 max-pooling is applied; otherwise the region will be directly # resized to a square of POOLING_SIZE __C.RESNET.MAX_POOL = False # Number of fixed blocks during training, by default the first of all 4 blocks is fixed # Range: 0 (none) to 3 (all) __C.RESNET.FIXED_BLOCKS = 1 # # MobileNet options # __C.MOBILENET = edict() # Whether to regularize the depth-wise filters during training __C.MOBILENET.REGU_DEPTH = False # Number of fixed layers during training, by default the first of all 14 layers is fixed # Range: 0 (none) to 12 (all) __C.MOBILENET.FIXED_LAYERS = 5 # Weight decay for the mobilenet weights __C.MOBILENET.WEIGHT_DECAY = 0.00004 # Depth multiplier __C.MOBILENET.DEPTH_MULTIPLIER = 1. # # MISC # # The mapping from image coordinates to feature map coordinates might cause # some boxes that are distinct in image space to become identical in feature # coordinates. If DEDUP_BOXES > 0, then DEDUP_BOXES is used as the scale factor # for identifying duplicate boxes. # 1/16 is correct for {Alex,Caffe}Net, VGG_CNN_M_1024, and VGG16 __C.DEDUP_BOXES = 1. / 16. # Pixel mean values (BGR order) as a (1, 1, 3) array # We use the same pixel mean for all networks even though it's not exactly what # they were trained with __C.PIXEL_MEANS = np.array([[[102.9801, 115.9465, 122.7717]]]) # For reproducibility __C.RNG_SEED = 3 # A small number that's used many times __C.EPS = 1e-14 # Root directory of project __C.ROOT_DIR = osp.abspath(osp.join(osp.dirname(__file__), '..', '..')) # Data directory __C.DATA_DIR = osp.abspath(osp.join(__C.ROOT_DIR, 'data')) # Name (or path to) the matlab executable __C.MATLAB = 'matlab' # Place outputs under an experiments directory __C.EXP_DIR = 'default' # Use GPU implementation of non-maximum suppression __C.USE_GPU_NMS = True # Default GPU device id __C.GPU_ID = 0 __C.POOLING_MODE = 'crop' # Size of the pooled region after RoI pooling __C.POOLING_SIZE = 7 # Maximal number of gt rois in an image during Training __C.MAX_NUM_GT_BOXES = 20 # Anchor scales for RPN __C.ANCHOR_SCALES = [8, 16, 32] # Anchor ratios for RPN __C.ANCHOR_RATIOS = [0.5, 1, 2] # Feature stride for RPN __C.FEAT_STRIDE = [16, ] __C.CUDA = False __C.CROP_RESIZE_WITH_MAX_POOL = True import pdb def get_output_dir(imdb_name, net_name=None,output_dir='output'): """Return the directory where experimental artifacts are placed. If the directory does not exist, it is created. A canonical path is built using the name from an imdb and a network (if not None). """ outdir = osp.abspath(osp.join(cfg.ROOT_DIR, output_dir, cfg.EXP_DIR, imdb_name)) if net_name is not None: outdir = osp.join(outdir, net_name) if not os.path.exists(outdir): os.makedirs(outdir) return outdir def get_output_tb_dir(imdb, weights_filename): """Return the directory where tensorflow summaries are placed. If the directory does not exist, it is created. A canonical path is built using the name from an imdb and a network (if not None). """ outdir = osp.abspath(osp.join(__C.ROOT_DIR, 'tensorboard', __C.EXP_DIR, imdb.name)) if weights_filename is None: weights_filename = 'default' outdir = osp.join(outdir, weights_filename) if not os.path.exists(outdir): os.makedirs(outdir) return outdir def _merge_a_into_b(a, b): """Merge config dictionary a into config dictionary b, clobbering the options in b whenever they are also specified in a. """ if type(a) is not edict: return for k, v in a.items(): # a must specify keys that are in b if k not in b: raise KeyError('{} is not a valid config key'.format(k)) # the types must match, too old_type = type(b[k]) if old_type is not type(v): if isinstance(b[k], np.ndarray): v = np.array(v, dtype=b[k].dtype) else: raise ValueError(('Type mismatch ({} vs. {}) ' 'for config key: {}').format(type(b[k]), type(v), k)) # recursively merge dicts if type(v) is edict: try: _merge_a_into_b(a[k], b[k]) except: print(('Error under config key: {}'.format(k))) raise else: b[k] = v def cfg_from_file(filename): """Load a config file and merge it into the default options.""" import yaml with open(filename, 'r') as f: yaml_cfg = edict(yaml.load(f)) _merge_a_into_b(yaml_cfg, __C) def cfg_from_list(cfg_list): """Set config keys via list (e.g., from command line).""" from ast import literal_eval assert len(cfg_list) % 2 == 0 for k, v in zip(cfg_list[0::2], cfg_list[1::2]): key_list = k.split('.') d = __C for subkey in key_list[:-1]: assert subkey in d d = d[subkey] subkey = key_list[-1] assert subkey in d try: value = literal_eval(v) except: # handle the case when v is a string literal value = v assert type(value) == type(d[subkey]), \ 'type {} does not match original type {}'.format( type(value), type(d[subkey])) d[subkey] = value
model/utils/config.py
12,253
Merge config dictionary a into config dictionary b, clobbering the options in b whenever they are also specified in a. Load a config file and merge it into the default options. Set config keys via list (e.g., from command line). Return the directory where experimental artifacts are placed. If the directory does not exist, it is created. A canonical path is built using the name from an imdb and a network (if not None). Return the directory where tensorflow summaries are placed. If the directory does not exist, it is created. A canonical path is built using the name from an imdb and a network (if not None). `pip install easydict` if you don't have it Consumers can get config by: from fast_rcnn_config import cfg Training options Online hard negative mining Initial learning rate Momentum Weight decay, for regularization Factor for reducing the learning rate Step size for reducing the learning rate, currently only support one step Iteration intervals for showing the loss during training, on command line interface Iteration intervals for save check point Whether to double the learning rate for bias Whether to initialize the weights with truncated normal distribution Whether to have weight decay on bias as well Whether to add ground truth boxes to the pool when sampling regions Whether to use aspect-ratio grouping of training images, introduced merely for saving GPU memory The number of snapshots kept, older ones are deleted to save space The time interval for saving tensorflow summaries Scale to use during training (can list multiple scales) The scale is the pixel size of an image's shortest side Max pixel size of the longest side of a scaled input image Trim size for input images to create minibatch Images to use per minibatch Minibatch size (number of regions of interest [ROIs]) Fraction of minibatch that is labeled foreground (i.e. class > 0) Overlap threshold for a ROI to be considered foreground (if >= FG_THRESH) Overlap threshold for a ROI to be considered background (class = 0 if overlap in [LO, HI)) Use horizontally-flipped images during training? Train bounding-box regressors Overlap required between a ROI and ground-truth box in order for that ROI to be used as a bounding-box regression training example Iterations between snapshots solver.prototxt specifies the snapshot path prefix, this adds an optional infix to yield the path: <prefix>[_<infix>]_iters_XYZ.caffemodel __C.TRAIN.SNAPSHOT_INFIX = '' Use a prefetch thread in roi_data_layer.layer So far I haven't found this useful; likely more engineering work is required __C.TRAIN.USE_PREFETCH = False Normalize the targets (subtract empirical mean, divide by empirical stddev) Deprecated (inside weights) Normalize the targets using "precomputed" (or made up) means and stdevs (BBOX_NORMALIZE_TARGETS must also be True) Train using these proposals Make minibatches from images that have similar aspect ratios (i.e. both tall and thin or both short and wide) in order to avoid wasting computation on zero-padding. Use RPN to detect objects IOU >= thresh: positive example IOU < thresh: negative example If an anchor statisfied by positive and negative conditions set to negative Max number of foreground examples Total number of examples NMS threshold used on RPN proposals Number of top scoring boxes to keep before apply NMS to RPN proposals Number of top scoring boxes to keep after applying NMS to RPN proposals Proposal height and width both need to be greater than RPN_MIN_SIZE (at orig image scale) Deprecated (outside weights) Give the positive RPN examples weight of p * 1 / {num positives} and give negatives a weight of (1 - p) Set to -1.0 to use uniform example weighting Whether to use all ground truth bounding boxes for training, For COCO, setting USE_ALL_GT to False will exclude boxes that are flagged as ''iscrowd'' Whether to tune the batch normalization parameters during training Testing options Scale to use during testing (can NOT list multiple scales) The scale is the pixel size of an image's shortest side Max pixel size of the longest side of a scaled input image Overlap threshold used for non-maximum suppression (suppress boxes with IoU >= this threshold) Experimental: treat the (K+1) units in the cls_score layer as linear predictors (trained, eg, with one-vs-rest SVMs). Test using bounding-box regressors Propose boxes Test using these proposals NMS threshold used on RPN proposals Number of top scoring boxes to keep before apply NMS to RPN proposals Number of top scoring boxes to keep after applying NMS to RPN proposals Proposal height and width both need to be greater than RPN_MIN_SIZE (at orig image scale) Testing mode, default to be 'nms', 'top' is slower but better See report for details Only useful when TEST.MODE is 'top', specifies the number of top proposals to select ResNet options Option to set if max-pooling is appended after crop_and_resize. if true, the region will be resized to a square of 2xPOOLING_SIZE, then 2x2 max-pooling is applied; otherwise the region will be directly resized to a square of POOLING_SIZE Number of fixed blocks during training, by default the first of all 4 blocks is fixed Range: 0 (none) to 3 (all) MobileNet options Whether to regularize the depth-wise filters during training Number of fixed layers during training, by default the first of all 14 layers is fixed Range: 0 (none) to 12 (all) Weight decay for the mobilenet weights Depth multiplier MISC The mapping from image coordinates to feature map coordinates might cause some boxes that are distinct in image space to become identical in feature coordinates. If DEDUP_BOXES > 0, then DEDUP_BOXES is used as the scale factor for identifying duplicate boxes. 1/16 is correct for {Alex,Caffe}Net, VGG_CNN_M_1024, and VGG16 Pixel mean values (BGR order) as a (1, 1, 3) array We use the same pixel mean for all networks even though it's not exactly what they were trained with For reproducibility A small number that's used many times Root directory of project Data directory Name (or path to) the matlab executable Place outputs under an experiments directory Use GPU implementation of non-maximum suppression Default GPU device id Size of the pooled region after RoI pooling Maximal number of gt rois in an image during Training Anchor scales for RPN Anchor ratios for RPN Feature stride for RPN a must specify keys that are in b the types must match, too recursively merge dicts handle the case when v is a string literal
6,463
en
0.8614
# Code in this file is copied and adapted from # https://github.com/berkeleydeeprlcourse import json """ Some simple logging functionality, inspired by rllab's logging. Assumes that each diagnostic gets logged each iteration Call logz.configure_output_dir() to start logging to a tab-separated-values file (some_folder_name/log.txt) """ import os.path as osp, shutil, time, atexit, os, subprocess color2num = dict( gray=30, red=31, green=32, yellow=33, blue=34, magenta=35, cyan=36, white=37, crimson=38 ) def colorize(string, color, bold=False, highlight=False): attr = [] num = color2num[color] if highlight: num += 10 attr.append(str(num)) if bold: attr.append('1') return '\x1b[%sm%s\x1b[0m' % (';'.join(attr), string) class G(object): output_dir = None output_file = None first_row = True log_headers = [] log_current_row = {} def configure_output_dir(d=None): """ Set output directory to d, or to /tmp/somerandomnumber if d is None """ G.first_row = True G.log_headers = [] G.log_current_row = {} G.output_dir = d or "/tmp/experiments/%i"%int(time.time()) if not osp.exists(G.output_dir): os.makedirs(G.output_dir) G.output_file = open(osp.join(G.output_dir, "log.txt"), 'w') atexit.register(G.output_file.close) print(colorize("Logging data to %s"%G.output_file.name, 'green', bold=True)) def log_tabular(key, val): """ Log a value of some diagnostic Call this once for each diagnostic quantity, each iteration """ if G.first_row: G.log_headers.append(key) else: assert key in G.log_headers, "Trying to introduce a new key %s that you didn't include in the first iteration"%key assert key not in G.log_current_row, "You already set %s this iteration. Maybe you forgot to call dump_tabular()"%key G.log_current_row[key] = val def save_params(params): with open(osp.join(G.output_dir, "params.json"), 'w') as out: out.write(json.dumps(params, separators=(',\n','\t:\t'), sort_keys=True)) def dump_tabular(): """ Write all of the diagnostics from the current iteration """ vals = [] key_lens = [len(key) for key in G.log_headers] max_key_len = max(15,max(key_lens)) keystr = '%'+'%d'%max_key_len fmt = "| " + keystr + "s | %15s |" n_slashes = 22 + max_key_len print("-"*n_slashes) for key in G.log_headers: val = G.log_current_row.get(key, "") if hasattr(val, "__float__"): valstr = "%8.3g"%val else: valstr = val print(fmt%(key, valstr)) vals.append(val) print("-"*n_slashes) if G.output_file is not None: if G.first_row: G.output_file.write("\t".join(G.log_headers)) G.output_file.write("\n") G.output_file.write("\t".join(map(str,vals))) G.output_file.write("\n") G.output_file.flush() G.log_current_row.clear() G.first_row=False
ADMCode/snuz/ars/logz.py
3,011
Set output directory to d, or to /tmp/somerandomnumber if d is None Write all of the diagnostics from the current iteration Log a value of some diagnostic Call this once for each diagnostic quantity, each iteration Code in this file is copied and adapted from https://github.com/berkeleydeeprlcourse
302
en
0.858333
# !/usr/bin/env python # -*-coding: utf-8 -*- __author__ = 'wtq' LOG_PATH = "monitor_logging.log" REDIS_HOST = "127.0.0.1" REDIS_PORT = 6379 # 采集的间隔与间断时长 MONITOR_INTERVAL = 1 MONITOR_PEROID = 3 # 监控的读写速率的网卡 NET_NAME = 'eth0' # 系统内各台机器的名字,以此来计算系统的平均负载信息 SYSTEM_MACHINE_NAME = ["storage1", "storage2"] # 用来计算客户端链接数的机器名字,一般为master CLIENT_LINK_MACNHIE = ["storage1"] DISK_ALL_SPACE = 100 CPU_KERNEL_NUMS = 32 MEM_ALL_SPACE = 100 FASTDFSPORT = '8000' REDIS_SYSTEM_KEY = 'system' FASTDFS_PEROID = 3
config/config.py
631
!/usr/bin/env python -*-coding: utf-8 -*- 采集的间隔与间断时长 监控的读写速率的网卡 系统内各台机器的名字,以此来计算系统的平均负载信息 用来计算客户端链接数的机器名字,一般为master
115
zh
0.930899
#!/usr/bin/env python # (works in both Python 2 and Python 3) # Offline HTML Indexer v1.32 (c) 2013-15,2020 Silas S. Brown. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # This is a Python program for creating large indices of # HTML text which can be queried using simple Javascript # that works on many mobile phone browsers without needing # an Internet connection or a Web server. This is useful if # you want to load a dictionary or other reference onto your # phone (or computer) for use when connectivity is not # available. # The input HTML should be interspersed with anchors like # this: <a name="xyz"></a> where xyz is the index heading # for the following text. There should be one such anchor # before each entry and an extra anchor at the end of the # text; everything before the first anchor is counted as the # "header" and everything after the last as the "footer". If # these are empty, a default "mobile friendly" HTML header # and footer specifying UTF-8 encoding will be # added. Anchors may be linked from other entries; these # links are changed as necessary. # Opening any of the resulting HTML files should display a # textbox that lets you type the first few letters of the # word you wish to look up; the browser will then jump to # whatever heading is alphabetically nearest to the typed-in # text. # Configuration # ------------- infile = None # None = standard input, or set a "filename" outdir = "." # current directory by default alphabet = "abcdefghijklmnopqrstuvwxyz" # set to None for all characters and case-sensitive ignore_text_in_parentheses = True # or False, for parentheses in index headings more_sensible_punctuation_sort_order = True remove_utf8_diacritics = True # or False, for removing diacritics in index headings (not in main text); # assumes UTF-8. (Letters with diacritics will be treated as though they did not have any.) max_filesize = 64*1024 # of each HTML file # (max_filesize can be exceeded by 1 very large entry) # Where to find history: # on GitHub at https://github.com/ssb22/indexer # and on GitLab at https://gitlab.com/ssb22/indexer # and on BitBucket https://bitbucket.org/ssb22/indexer # and at https://gitlab.developers.cam.ac.uk/ssb22/indexer # and in China: https://gitee.com/ssb22/indexer # --------------------------------------------------------------- import re,sys,os,time if type("")==type(u""): izip = zip # Python 3 else: from itertools import izip # Python 2 if infile: sys.stderr.write("Reading from "+infile+"... ") infile = open(infile) else: sys.stderr.write("Reading from standard input... ") infile = sys.stdin fragments = re.split(r'<a name="([^"]*)"></a>',infile.read()) # odd indices should be the tag names, even should be the HTML in between assert len(fragments)>3, "Couldn't find 2 or more hash tags (were they formatted correctly?)" assert len(fragments)%2, "re.split not returning groups??" header,footer = fragments[0],fragments[-1] if not header.strip(): header="""<html><head><meta name="mobileoptimized" content="0"><meta name="viewport" content="width=device-width"><meta http-equiv="Content-Type" content="text/html; charset=utf-8"></head><body>""" if not footer.strip(): footer = "</body></html>" fragments = fragments[1:-1] sys.stderr.write("%d entries\n" % len(fragments)) def alphaOnly(x): if ignore_text_in_parentheses: x=re.sub(r"\([^)]*\)[;, ]*","",x) if alphabet: x=''.join(c for c in x.lower() if c in alphabet) return re.sub(r"^[@,;]*","",x) # see ohi_latex.py if more_sensible_punctuation_sort_order: _ao1 = alphaOnly alphaOnly = lambda x: _ao1(re.sub('([;,]);+',r'\1',x.replace('-',' ').replace(',','~COM~').replace(';',',').replace('~COM~',';').replace(' ',';'))) # gives ; < , == space (useful if ; is used to separate definitions and , is used before extra words to be added at the start; better set space EQUAL to comma, not higher, or will end up in wrong place if user inputs something forgetting the comma) if alphabet: for c in '@,;': if not c in alphabet: alphabet += c if remove_utf8_diacritics: _ao = alphaOnly ; import unicodedata def S(s): if type(u"")==type(""): return s # Python 3 else: return s.encode('utf-8') # Python 2 def U(s): if type(s)==type(u""): return s return s.decode('utf-8') alphaOnly = lambda x: _ao(S(u''.join((c for c in unicodedata.normalize('NFD',U(x)) if not unicodedata.category(c).startswith('M'))))) fragments = list(zip(map(alphaOnly,fragments[::2]), fragments[1::2])) fragments.sort() class ChangedLetters: def __init__(self): self.lastText = "" def __call__(self,text): "Find shortest prefix of text that differentiates it from previous item (empty string if no difference)" assert text >= self.lastText, "input must have been properly sorted" i = 0 for c1,c2 in izip(self.lastText+chr(0),text): i += 1 if not c1==c2: self.lastText = text return text[:i] assert text==self.lastText, repr(text)+"!="+repr(self.lastText) return "" # no difference from lastText changedLetters = ChangedLetters() ; f2 = [] fragments.reverse() sys.stderr.write("Minimizing prefixes... ") while fragments: x,y = fragments.pop() x = changedLetters(x) if f2 and not x: f2[-1] = (f2[-1][0], f2[-1][1]+y) # combine effectively-identical ones else: f2.append((x,y)) sys.stderr.write("done\n") fragments = f2 def tag(n): if n: return '<a name="%s"></a>' % n else: return '' def old_javascript_array(array): "in case the browser doesn't support JSON, and to save some separator bytes" array = list(array) # in case it was an iterator sepChar = ord(' ') chars_used = set(''.join(array)) assert '"' not in chars_used and '\\' not in chars_used and '<' not in chars_used and '&' not in chars_used, "Can't use special chars (unless you change this code to escape them)" while True: if chr(sepChar) not in chars_used and not chr(sepChar) in r'\"<&': break sepChar += 1 assert sepChar < 127, "can't find a suitable separator char (hard-code the array instead?)" return '"'+chr(sepChar).join(array)+'".split("'+chr(sepChar)+'")' js_binchop = """function(a,i) { function inner(a,i,lo,hi) { var mid=lo+Math.floor((hi-lo)/2); if(mid==lo || a[mid]==i) return a[mid]; if(a[mid] > i) return inner(a,i,lo,mid); return inner(a,i,mid,hi); } return inner(a,i,0,a.length); }""" js_binchop_dx = js_binchop.replace("return a[mid]","return mid") def js_hashjump(hashtags): return """<script><!-- var h=location.hash; if(h.length > 1) { if(h!='#_h' && h!='#_f') { var n="#"+%s(%s,h.slice(1)); if (h!=n) location.hash=n; } } else location.href="index.html" //--></script>""" % (js_binchop,old_javascript_array(hashtags)) # (the h!=n test is needed to avoid loop on some browsers e.g. PocketIE7) # #_h and #_f are special hashes for header and footer, used for "Next page" and "Previous page" links # (HTML5 defaults type to text/javascript, as do all pre-HTML5 browsers including NN2's 'script language="javascript"' thing, so we might as well save a few bytes) __lastStartEnd = None def htmlDoc(start,end,docNo): "Returns an HTML document containing fragments[start:end]. docNo is used to generate previous/next page links as appropriate. Caches its return value in case called again with same start,end (in which case docNo is ignored on second call)." global __lastStartEnd,__lastDoc if not (start,end) == __lastStartEnd: __lastStartEnd = (start,end) __lastDoc = header+js_hashjump(x for x,y in fragments[start:end] if x) if start: assert docNo, "Document 0 should start at 0" __lastDoc += '<p><a name="_h" href="%d.html#_f">Previous page</a></p>' % (docNo-1,) __lastDoc += ''.join(tag(x)+y for x,y in fragments[start:end]) if end<len(fragments): __lastDoc += '<p><a name="_f" href="%d.html#_h">Next page</a></p>' % (docNo+1,) __lastDoc += footer return linkSub(__lastDoc) def linkSub(txt): return re.sub(r'(?i)<a href=("?)#',r'<a href=\1index.html#',txt) # (do link to index.html#whatever rather than directly, so link still works if docs change) def findEnd(start,docNo): "Given 'start' (an index into 'fragments'), find an 'end' that produces the largest possible htmlDoc less than max_filesize. docNo is used to generate previous/next page links as appropriate." eTry = len(fragments)-start assert eTry, "must start before the end" sLen = len(htmlDoc(start,start+eTry,docNo)) if sLen > max_filesize: eTry = int(eTry / int(sLen / max_filesize)) # rough start point while eTry > 1 and len(htmlDoc(start,start+eTry,docNo)) > max_filesize: eTry = int(eTry/2) if eTry < 1: eTry = 1 while eTry < len(fragments)-start and len(htmlDoc(start,start+eTry,docNo)) < max_filesize: eTry += 1 return start + max(1,eTry-1) def allRanges(): start = docNo = 0 while start < len(fragments): end = findEnd(start,docNo) sys.stderr.write("\rSegmenting (%d/%d)" % (end,len(fragments))) yield start,end start = end ; docNo += 1 sys.stderr.write("Segmenting") startsList = [] for start,end in allRanges(): open(("%s%s%d.html" % (outdir,os.sep,len(startsList))),"w").write(htmlDoc(start,end,len(startsList))) startsList.append(start) if alphabet: assert not '"' in alphabet and not '\\' in alphabet and not '&' in alphabet and not '<' in alphabet, "Can't use special characters in alphabet (unless js_alphabet is modified to quote them)" js_alphabet = """var a=val.toLowerCase(),i; val=""; for(i=0; i < a.length; i++) { var c=a.charAt(i); if("%s".indexOf(c)>-1) val += c } """ % alphabet # TODO: what if user types letters with diacritics, when remove_utf8_diacritics is set? else: js_alphabet = "" if more_sensible_punctuation_sort_order: js_alphabet = "val = val.replace(/-/g,' ').replace(/,/g,'~COM~').replace(/;/g,',').replace(/~COM~/g,';').replace(/ /g,';').replace(/([;,]);+/g,'$1');" + js_alphabet def hashReload(footer): # If a footer refers to index.html#example, need to # make sure the hash script runs when clicking there # from the index page itself. strToFind = '<a href="index.html#' # TODO: what if it's quoted differently and/or has extra attributes? (ohi.html does specify using " quoting though) while True: i = footer.lower().find(strToFind) if i==-1: return footer footer = footer[:i]+'<a onclick="document.forms[0].q.value=\''+footer[i+len(strToFind):footer.index('"',i+len(strToFind))]+'\';jump()" href="index.html#'+footer[i+len(strToFind):] open(outdir+os.sep+"index.html","w").write("""%s<script><!-- function jump() { var val=document.forms[0].q.value; %s location.href=%s(%s,val)+".html#"+val } if(navigator.userAgent.indexOf("Opera/9.50" /* sometimes found on WM6.1 phones from 2008 */) >= 0) document.write("<p><b>WARNING:</"+"b> Your version of Opera may have trouble jumping to anchors; please try Opera 10 or above.</"+"p>") //--></script><noscript><p><b>ERROR:</b> Javascript needs to be switched on for this form to work.</p></noscript> <form action="#" onSubmit="jump();return false">Lookup: <input type="text" name="q"><input type="submit" value="ok"></form><script><!-- if(location.hash.length > 1) { document.forms[0].q.value = location.hash.slice(1).replace(/(\+|%%20)/g,' '); jump(); } else document.forms[0].q.focus(); //--></script>%s""" % (hashReload(linkSub(header)),js_alphabet,js_binchop_dx,old_javascript_array(fragments[s][0] for s in startsList),hashReload(linkSub(footer)))) sys.stderr.write(" %d files\n" % (len(startsList)+1))
ohi.py
12,229
Find shortest prefix of text that differentiates it from previous item (empty string if no difference) Given 'start' (an index into 'fragments'), find an 'end' that produces the largest possible htmlDoc less than max_filesize. docNo is used to generate previous/next page links as appropriate. Returns an HTML document containing fragments[start:end]. docNo is used to generate previous/next page links as appropriate. Caches its return value in case called again with same start,end (in which case docNo is ignored on second call). in case the browser doesn't support JSON, and to save some separator bytes !/usr/bin/env python (works in both Python 2 and Python 3) Offline HTML Indexer v1.32 (c) 2013-15,2020 Silas S. Brown. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. This is a Python program for creating large indices of HTML text which can be queried using simple Javascript that works on many mobile phone browsers without needing an Internet connection or a Web server. This is useful if you want to load a dictionary or other reference onto your phone (or computer) for use when connectivity is not available. The input HTML should be interspersed with anchors like this: <a name="xyz"></a> where xyz is the index heading for the following text. There should be one such anchor before each entry and an extra anchor at the end of the text; everything before the first anchor is counted as the "header" and everything after the last as the "footer". If these are empty, a default "mobile friendly" HTML header and footer specifying UTF-8 encoding will be added. Anchors may be linked from other entries; these links are changed as necessary. Opening any of the resulting HTML files should display a textbox that lets you type the first few letters of the word you wish to look up; the browser will then jump to whatever heading is alphabetically nearest to the typed-in text. Configuration ------------- None = standard input, or set a "filename" current directory by default set to None for all characters and case-sensitive or False, for parentheses in index headings or False, for removing diacritics in index headings (not in main text); assumes UTF-8. (Letters with diacritics will be treated as though they did not have any.) of each HTML file (max_filesize can be exceeded by 1 very large entry) Where to find history: on GitHub at https://github.com/ssb22/indexer and on GitLab at https://gitlab.com/ssb22/indexer and on BitBucket https://bitbucket.org/ssb22/indexer and at https://gitlab.developers.cam.ac.uk/ssb22/indexer and in China: https://gitee.com/ssb22/indexer --------------------------------------------------------------- Python 3 Python 2 odd indices should be the tag names, even should be the HTML in between see ohi_latex.py gives ; < , == space (useful if ; is used to separate definitions and , is used before extra words to be added at the start; better set space EQUAL to comma, not higher, or will end up in wrong place if user inputs something forgetting the comma) Python 3 Python 2 no difference from lastText combine effectively-identical ones in case it was an iterator (the h!=n test is needed to avoid loop on some browsers e.g. PocketIE7) _h and _f are special hashes for header and footer, used for "Next page" and "Previous page" links (HTML5 defaults type to text/javascript, as do all pre-HTML5 browsers including NN2's 'script language="javascript"' thing, so we might as well save a few bytes) (do link to index.htmlwhatever rather than directly, so link still works if docs change) rough start point TODO: what if user types letters with diacritics, when remove_utf8_diacritics is set? If a footer refers to index.htmlexample, need to make sure the hash script runs when clicking there from the index page itself. TODO: what if it's quoted differently and/or has extra attributes? (ohi.html does specify using " quoting though)
4,366
en
0.851421
import environ from pathlib import Path env = environ.Env( # Sets debug to False if it cannot find .env DEBUG=(bool, False) ) environ.Env.read_env() # GENERAL # ------------------------------------------------------------------------------ # Build paths inside the project like this: BASE_DIR / 'subdir'. BASE_DIR = Path(__file__).resolve().parent.parent # https://docs.djangoproject.com/en/dev/ref/settings/#std:setting-SECRET_KEY SECRET_KEY = env.str('SECRET_KEY') # https://docs.djangoproject.com/en/dev/ref/settings/#debug DEBUG = env.bool('DEBUG') # https://docs.djangoproject.com/en/dev/ref/settings/#allowed-hosts ALLOWED_HOSTS = tuple(env.list('ALLOWED_HOSTS')) # APPS # ------------------------------------------------------------------------------ # https://docs.djangoproject.com/en/dev/ref/settings/#installed-apps INSTALLED_APPS = [ 'django.contrib.admin', 'django.contrib.auth', 'django.contrib.contenttypes', 'django.contrib.sessions', 'django.contrib.messages', 'whitenoise.runserver_nostatic', 'django.contrib.staticfiles', 'django.contrib.sites', # Third-party 'allauth', 'allauth.account', 'crispy_forms', 'debug_toolbar', # Local 'accounts', 'pages', 'snacks', ] # MIDDLEWARE # ------------------------------------------------------------------------------ # https://docs.djangoproject.com/en/dev/ref/settings/#middleware MIDDLEWARE = [ 'django.middleware.security.SecurityMiddleware', 'whitenoise.middleware.WhiteNoiseMiddleware', 'django.contrib.sessions.middleware.SessionMiddleware', 'django.middleware.common.CommonMiddleware', 'debug_toolbar.middleware.DebugToolbarMiddleware', 'django.middleware.csrf.CsrfViewMiddleware', 'django.contrib.auth.middleware.AuthenticationMiddleware', 'django.contrib.messages.middleware.MessageMiddleware', 'django.middleware.clickjacking.XFrameOptionsMiddleware', ] # URLS # ------------------------------------------------------------------------------ # https://docs.djangoproject.com/en/dev/ref/settings/#root-urlconf ROOT_URLCONF = "config.urls" # https://docs.djangoproject.com/en/dev/ref/settings/#wsgi-application WSGI_APPLICATION = "config.wsgi.application" # TEMPLATES # ------------------------------------------------------------------------------ # https://docs.djangoproject.com/en/dev/ref/settings/#templates TEMPLATES = [ { 'BACKEND': 'django.template.backends.django.DjangoTemplates', 'DIRS': ['templates'], 'APP_DIRS': True, 'OPTIONS': { 'context_processors': [ 'django.template.context_processors.debug', 'django.template.context_processors.request', 'django.contrib.auth.context_processors.auth', 'django.contrib.messages.context_processors.messages', ], }, }, ] # DATABASES # ------------------------------------------------------------------------------ # https://docs.djangoproject.com/en/dev/ref/settings/#databases DATABASES = { 'default': { 'ENGINE': 'django.db.backends.sqlite3', 'NAME': BASE_DIR / 'db.sqlite3', } } # PASSWORDS # ------------------------------------------------------------------------------ # https://docs.djangoproject.com/en/dev/ref/settings/#auth-password-validators AUTH_PASSWORD_VALIDATORS = [ { 'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator', }, { 'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator', }, { 'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator', }, { 'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator', }, ] # INTERNATIONALIZATION # ------------------------------------------------------------------------------ # https://docs.djangoproject.com/en/dev/topics/i18n/ # https://docs.djangoproject.com/en/dev/ref/settings/#language-code LANGUAGE_CODE = 'en-us' # https://docs.djangoproject.com/en/dev/ref/settings/#time-zone TIME_ZONE = 'UTC' # https://docs.djangoproject.com/en/dev/ref/settings/#std:setting-USE_I18N USE_I18N = True # https://docs.djangoproject.com/en/dev/ref/settings/#use-l10n USE_L10N = True # https://docs.djangoproject.com/en/dev/ref/settings/#use-tz USE_TZ = True # STATIC # ------------------------------------------------------------------------------ # https://docs.djangoproject.com/en/dev/ref/settings/#static-root STATIC_ROOT = str(BASE_DIR.joinpath('staticfiles')) # https://docs.djangoproject.com/en/dev/ref/settings/#static-url STATIC_URL = '/static/' # https://docs.djangoproject.com/en/dev/ref/contrib/staticfiles/#std:setting-STATICFILES_DIRS STATICFILES_DIRS = [str(BASE_DIR.joinpath('static'))] # http://whitenoise.evans.io/en/stable/django.html#add-compression-and-caching-support STATICFILES_STORAGE = "whitenoise.storage.CompressedManifestStaticFilesStorage" # DJANGO-CRISPY-FORMS CONFIGS # ------------------------------------------------------------------------------ # https://django-crispy-forms.readthedocs.io/en/latest/install.html#template-packs CRISPY_TEMPLATE_PACK = "bootstrap4" # EMAIL # ------------------------------------------------------------------------------ # https://docs.djangoproject.com/en/dev/ref/settings/#email-backend EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend' # DJANGO-DEBUG-TOOLBAR CONFIGS # ------------------------------------------------------------------------------ # https://django-debug-toolbar.readthedocs.io/en/latest/installation.html # https://docs.djangoproject.com/en/dev/ref/settings/#internal-ips INTERNAL_IPS = ['127.0.0.1'] # CUSTOM USER MODEL CONFIGS # ------------------------------------------------------------------------------ # https://docs.djangoproject.com/en/dev/topics/auth/customizing/#substituting-a-custom-user-model AUTH_USER_MODEL = 'accounts.CustomUser' # DJANGO-ALLAUTH CONFIGS # ------------------------------------------------------------------------------ # https://docs.djangoproject.com/en/dev/ref/settings/#site-id SITE_ID = 1 # https://docs.djangoproject.com/en/dev/ref/settings/#login-redirect-url LOGIN_REDIRECT_URL = 'home' # https://django-allauth.readthedocs.io/en/latest/views.html#logout-account-logout ACCOUNT_LOGOUT_REDIRECT_URL = 'home' # https://django-allauth.readthedocs.io/en/latest/installation.html?highlight=backends AUTHENTICATION_BACKENDS = ( "django.contrib.auth.backends.ModelBackend", "allauth.account.auth_backends.AuthenticationBackend", ) # https://django-allauth.readthedocs.io/en/latest/configuration.html ACCOUNT_SESSION_REMEMBER = True ACCOUNT_SIGNUP_PASSWORD_ENTER_TWICE = False ACCOUNT_USERNAME_REQUIRED = False ACCOUNT_AUTHENTICATION_METHOD = 'email' ACCOUNT_EMAIL_REQUIRED = True ACCOUNT_UNIQUE_EMAIL = True
config/settings.py
6,857
Sets debug to False if it cannot find .env GENERAL ------------------------------------------------------------------------------ Build paths inside the project like this: BASE_DIR / 'subdir'. https://docs.djangoproject.com/en/dev/ref/settings/std:setting-SECRET_KEY https://docs.djangoproject.com/en/dev/ref/settings/debug https://docs.djangoproject.com/en/dev/ref/settings/allowed-hosts APPS ------------------------------------------------------------------------------ https://docs.djangoproject.com/en/dev/ref/settings/installed-apps Third-party Local MIDDLEWARE ------------------------------------------------------------------------------ https://docs.djangoproject.com/en/dev/ref/settings/middleware URLS ------------------------------------------------------------------------------ https://docs.djangoproject.com/en/dev/ref/settings/root-urlconf https://docs.djangoproject.com/en/dev/ref/settings/wsgi-application TEMPLATES ------------------------------------------------------------------------------ https://docs.djangoproject.com/en/dev/ref/settings/templates DATABASES ------------------------------------------------------------------------------ https://docs.djangoproject.com/en/dev/ref/settings/databases PASSWORDS ------------------------------------------------------------------------------ https://docs.djangoproject.com/en/dev/ref/settings/auth-password-validators INTERNATIONALIZATION ------------------------------------------------------------------------------ https://docs.djangoproject.com/en/dev/topics/i18n/ https://docs.djangoproject.com/en/dev/ref/settings/language-code https://docs.djangoproject.com/en/dev/ref/settings/time-zone https://docs.djangoproject.com/en/dev/ref/settings/std:setting-USE_I18N https://docs.djangoproject.com/en/dev/ref/settings/use-l10n https://docs.djangoproject.com/en/dev/ref/settings/use-tz STATIC ------------------------------------------------------------------------------ https://docs.djangoproject.com/en/dev/ref/settings/static-root https://docs.djangoproject.com/en/dev/ref/settings/static-url https://docs.djangoproject.com/en/dev/ref/contrib/staticfiles/std:setting-STATICFILES_DIRS http://whitenoise.evans.io/en/stable/django.htmladd-compression-and-caching-support DJANGO-CRISPY-FORMS CONFIGS ------------------------------------------------------------------------------ https://django-crispy-forms.readthedocs.io/en/latest/install.htmltemplate-packs EMAIL ------------------------------------------------------------------------------ https://docs.djangoproject.com/en/dev/ref/settings/email-backend DJANGO-DEBUG-TOOLBAR CONFIGS ------------------------------------------------------------------------------ https://django-debug-toolbar.readthedocs.io/en/latest/installation.html https://docs.djangoproject.com/en/dev/ref/settings/internal-ips CUSTOM USER MODEL CONFIGS ------------------------------------------------------------------------------ https://docs.djangoproject.com/en/dev/topics/auth/customizing/substituting-a-custom-user-model DJANGO-ALLAUTH CONFIGS ------------------------------------------------------------------------------ https://docs.djangoproject.com/en/dev/ref/settings/site-id https://docs.djangoproject.com/en/dev/ref/settings/login-redirect-url https://django-allauth.readthedocs.io/en/latest/views.htmllogout-account-logout https://django-allauth.readthedocs.io/en/latest/installation.html?highlight=backends https://django-allauth.readthedocs.io/en/latest/configuration.html
3,486
en
0.415933
# model settings model = dict( type='CenterNet', pretrained='modelzoo://resnet18', backbone=dict( type='ResNet', depth=18, num_stages=4, out_indices=(0, 1, 2, 3), frozen_stages=1, norm_eval=False, add_summay_every_n_step=200, style='pytorch'), neck=dict(type='None'), bbox_head=dict( type='CXTHead', inplanes=(64, 128, 256, 512), head_conv=128, wh_conv=64, use_deconv=False, norm_after_upsample=False, hm_head_conv_num=2, wh_head_conv_num=2, ct_head_conv_num=1, fovea_hm=False, num_classes=81, use_exp_wh=False, wh_offset_base=16, wh_area_process='norm', shortcut_cfg=(1, 2, 3), shortcut_attention=(False, False, False), norm_cfg=dict(type='BN'), norm_wh=False, avg_wh_weightv3=False, center_ratio=0.2, hm_init_value=None, giou_weight=5., merge_weight=1., hm_weight=1., ct_weight=1.)) cudnn_benchmark = True # training and testing settings train_cfg = dict( vis_every_n_iters=100, debug=False) test_cfg = dict( score_thr=0.05, max_per_img=100) # dataset settings dataset_type = 'CocoDataset' data_root = 'data/coco/' img_norm_cfg = dict( mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) train_pipeline = [ dict(type='LoadImageFromFile', to_float32=True), dict(type='LoadAnnotations', with_bbox=True), dict( type='PhotoMetricDistortion', brightness_delta=32, contrast_range=(0.5, 1.5), saturation_range=(0.5, 1.5), hue_delta=18), dict( type='Expand', mean=img_norm_cfg['mean'], to_rgb=img_norm_cfg['to_rgb'], ratio_range=(1, 4)), dict( type='MinIoURandomCrop', min_ious=(0.1, 0.3, 0.5, 0.7, 0.9), min_crop_size=0.3), dict(type='Resize', img_scale=(512, 512), keep_ratio=False), dict(type='Normalize', **img_norm_cfg), dict(type='RandomFlip', flip_ratio=0.5), dict(type='DefaultFormatBundle'), dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']), ] test_pipeline = [ dict(type='LoadImageFromFile'), dict( type='MultiScaleFlipAug', img_scale=(512, 512), flip=False, transforms=[ dict(type='Resize', keep_ratio=False), dict(type='Normalize', **img_norm_cfg), dict(type='ImageToTensor', keys=['img']), dict(type='Collect', keys=['img']), ]) ] data = dict( imgs_per_gpu=16, workers_per_gpu=4, train=dict( type=dataset_type, ann_file=data_root + 'annotations/instances_train2017.json', img_prefix=data_root + 'train2017/', pipeline=train_pipeline), val=dict( type=dataset_type, ann_file=data_root + 'annotations/instances_val2017.json', img_prefix=data_root + 'val2017/', pipeline=test_pipeline), test=dict( type=dataset_type, ann_file=data_root + 'annotations/instances_val2017.json', img_prefix=data_root + 'val2017/', pipeline=test_pipeline)) # optimizer optimizer = dict(type='SGD', lr=0.003, momentum=0.9, weight_decay=0.0003, paramwise_options=dict(bias_lr_mult=2., bias_decay_mult=0.)) optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2)) # learning policy lr_config = dict( policy='step', warmup='linear', warmup_iters=500, warmup_ratio=1.0 / 5, step=[18, 22]) checkpoint_config = dict(save_every_n_steps=200, max_to_keep=1, keep_every_n_epochs=18) bbox_head_hist_config = dict( model_type=['ConvModule', 'DeformConvPack'], sub_modules=['bbox_head'], save_every_n_steps=200) # yapf:disable log_config = dict(interval=20) # yapf:enable # runtime settings total_epochs = 24 dist_params = dict(backend='nccl') log_level = 'INFO' work_dir = 'eft18_o16_v1norm_3lr_alpha2_wd3e4_s123_nos_2x' load_from = None resume_from = None workflow = [('train', 1)]
configs/centernext/eft18_o16_v1norm_3lr_alpha2_wd3e4_s123_nos_2x.py
4,096
model settings training and testing settings dataset settings optimizer learning policy yapf:disable yapf:enable runtime settings
129
en
0.778863
import multiprocessing as mp import itertools import traceback import pickle import numpy as np from numba import cuda from numba.cuda.testing import (skip_on_cudasim, skip_under_cuda_memcheck, ContextResettingTestCase, ForeignArray) import unittest def core_ipc_handle_test(the_work, result_queue): try: arr = the_work() # Catch anything going wrong in the worker function except: # noqa: E722 # FAILED. propagate the exception as a string succ = False out = traceback.format_exc() else: # OK. send the ndarray back succ = True out = arr result_queue.put((succ, out)) def base_ipc_handle_test(handle, size, result_queue): def the_work(): dtype = np.dtype(np.intp) with cuda.open_ipc_array(handle, shape=size // dtype.itemsize, dtype=dtype) as darr: # copy the data to host return darr.copy_to_host() core_ipc_handle_test(the_work, result_queue) def serialize_ipc_handle_test(handle, result_queue): def the_work(): dtype = np.dtype(np.intp) darr = handle.open_array(cuda.current_context(), shape=handle.size // dtype.itemsize, dtype=dtype) # copy the data to host arr = darr.copy_to_host() handle.close() return arr core_ipc_handle_test(the_work, result_queue) def ipc_array_test(ipcarr, result_queue): try: with ipcarr as darr: arr = darr.copy_to_host() try: # should fail to reopen with ipcarr: pass except ValueError as e: if str(e) != 'IpcHandle is already opened': raise AssertionError('invalid exception message') else: raise AssertionError('did not raise on reopen') # Catch any exception so we can propagate it except: # noqa: E722 # FAILED. propagate the exception as a string succ = False out = traceback.format_exc() else: # OK. send the ndarray back succ = True out = arr result_queue.put((succ, out)) @skip_under_cuda_memcheck('Hangs cuda-memcheck') @skip_on_cudasim('Ipc not available in CUDASIM') class TestIpcMemory(ContextResettingTestCase): def test_ipc_handle(self): # prepare data for IPC arr = np.arange(10, dtype=np.intp) devarr = cuda.to_device(arr) # create IPC handle ctx = cuda.current_context() ipch = ctx.get_ipc_handle(devarr.gpu_data) # manually prepare for serialization as bytes handle_bytes = bytes(ipch.handle) size = ipch.size # spawn new process for testing ctx = mp.get_context('spawn') result_queue = ctx.Queue() args = (handle_bytes, size, result_queue) proc = ctx.Process(target=base_ipc_handle_test, args=args) proc.start() succ, out = result_queue.get() if not succ: self.fail(out) else: np.testing.assert_equal(arr, out) proc.join(3) def variants(self): # Test with no slicing and various different slices indices = (None, slice(3, None), slice(3, 8), slice(None, 8)) # Test with a Numba DeviceNDArray, or an array from elsewhere through # the CUDA Array Interface foreigns = (False, True) return itertools.product(indices, foreigns) def check_ipc_handle_serialization(self, index_arg=None, foreign=False): # prepare data for IPC arr = np.arange(10, dtype=np.intp) devarr = cuda.to_device(arr) if index_arg is not None: devarr = devarr[index_arg] if foreign: devarr = cuda.as_cuda_array(ForeignArray(devarr)) expect = devarr.copy_to_host() # create IPC handle ctx = cuda.current_context() ipch = ctx.get_ipc_handle(devarr.gpu_data) # pickle buf = pickle.dumps(ipch) ipch_recon = pickle.loads(buf) self.assertIs(ipch_recon.base, None) self.assertEqual(tuple(ipch_recon.handle), tuple(ipch.handle)) self.assertEqual(ipch_recon.size, ipch.size) # spawn new process for testing ctx = mp.get_context('spawn') result_queue = ctx.Queue() args = (ipch, result_queue) proc = ctx.Process(target=serialize_ipc_handle_test, args=args) proc.start() succ, out = result_queue.get() if not succ: self.fail(out) else: np.testing.assert_equal(expect, out) proc.join(3) def test_ipc_handle_serialization(self): for index, foreign, in self.variants(): with self.subTest(index=index, foreign=foreign): self.check_ipc_handle_serialization(index, foreign) def check_ipc_array(self, index_arg=None, foreign=False): # prepare data for IPC arr = np.arange(10, dtype=np.intp) devarr = cuda.to_device(arr) # Slice if index_arg is not None: devarr = devarr[index_arg] if foreign: devarr = cuda.as_cuda_array(ForeignArray(devarr)) expect = devarr.copy_to_host() ipch = devarr.get_ipc_handle() # spawn new process for testing ctx = mp.get_context('spawn') result_queue = ctx.Queue() args = (ipch, result_queue) proc = ctx.Process(target=ipc_array_test, args=args) proc.start() succ, out = result_queue.get() if not succ: self.fail(out) else: np.testing.assert_equal(expect, out) proc.join(3) def test_ipc_array(self): for index, foreign, in self.variants(): with self.subTest(index=index, foreign=foreign): self.check_ipc_array(index, foreign) def staged_ipc_handle_test(handle, device_num, result_queue): def the_work(): with cuda.gpus[device_num]: this_ctx = cuda.devices.get_context() deviceptr = handle.open_staged(this_ctx) arrsize = handle.size // np.dtype(np.intp).itemsize hostarray = np.zeros(arrsize, dtype=np.intp) cuda.driver.device_to_host( hostarray, deviceptr, size=handle.size, ) handle.close() return hostarray core_ipc_handle_test(the_work, result_queue) def staged_ipc_array_test(ipcarr, device_num, result_queue): try: with cuda.gpus[device_num]: with ipcarr as darr: arr = darr.copy_to_host() try: # should fail to reopen with ipcarr: pass except ValueError as e: if str(e) != 'IpcHandle is already opened': raise AssertionError('invalid exception message') else: raise AssertionError('did not raise on reopen') # Catch any exception so we can propagate it except: # noqa: E722 # FAILED. propagate the exception as a string succ = False out = traceback.format_exc() else: # OK. send the ndarray back succ = True out = arr result_queue.put((succ, out)) @skip_under_cuda_memcheck('Hangs cuda-memcheck') @skip_on_cudasim('Ipc not available in CUDASIM') class TestIpcStaged(ContextResettingTestCase): def test_staged(self): # prepare data for IPC arr = np.arange(10, dtype=np.intp) devarr = cuda.to_device(arr) # spawn new process for testing mpctx = mp.get_context('spawn') result_queue = mpctx.Queue() # create IPC handle ctx = cuda.current_context() ipch = ctx.get_ipc_handle(devarr.gpu_data) # pickle buf = pickle.dumps(ipch) ipch_recon = pickle.loads(buf) self.assertIs(ipch_recon.base, None) self.assertEqual(tuple(ipch_recon.handle), tuple(ipch.handle)) self.assertEqual(ipch_recon.size, ipch.size) # Test on every CUDA devices for device_num in range(len(cuda.gpus)): args = (ipch, device_num, result_queue) proc = mpctx.Process(target=staged_ipc_handle_test, args=args) proc.start() succ, out = result_queue.get() proc.join(3) if not succ: self.fail(out) else: np.testing.assert_equal(arr, out) def test_ipc_array(self): for device_num in range(len(cuda.gpus)): # prepare data for IPC arr = np.random.random(10) devarr = cuda.to_device(arr) ipch = devarr.get_ipc_handle() # spawn new process for testing ctx = mp.get_context('spawn') result_queue = ctx.Queue() args = (ipch, device_num, result_queue) proc = ctx.Process(target=staged_ipc_array_test, args=args) proc.start() succ, out = result_queue.get() proc.join(3) if not succ: self.fail(out) else: np.testing.assert_equal(arr, out) if __name__ == '__main__': unittest.main()
numba/cuda/tests/cudapy/test_ipc.py
9,385
Catch anything going wrong in the worker function noqa: E722 FAILED. propagate the exception as a string OK. send the ndarray back copy the data to host copy the data to host should fail to reopen Catch any exception so we can propagate it noqa: E722 FAILED. propagate the exception as a string OK. send the ndarray back prepare data for IPC create IPC handle manually prepare for serialization as bytes spawn new process for testing Test with no slicing and various different slices Test with a Numba DeviceNDArray, or an array from elsewhere through the CUDA Array Interface prepare data for IPC create IPC handle pickle spawn new process for testing prepare data for IPC Slice spawn new process for testing should fail to reopen Catch any exception so we can propagate it noqa: E722 FAILED. propagate the exception as a string OK. send the ndarray back prepare data for IPC spawn new process for testing create IPC handle pickle Test on every CUDA devices prepare data for IPC spawn new process for testing
1,009
en
0.799655
#!/usr/bin/env python2 # Copyright 2016 Vimal Manohar # 2016 Johns Hopkins University (author: Daniel Povey) # Apache 2.0 from __future__ import print_function import argparse import logging import sys from collections import defaultdict """ This script reads and writes the 'ctm-edits' file that is produced by get_ctm_edits.py. It modifies the ctm-edits so that non-scored words are not counted as errors: for instance, if there are things like [COUGH] and [NOISE] in the transcript, deletions, insertions and substitutions involving them are allowed, and we modify the reference to correspond to the hypothesis. If you supply the <lang> directory (the one that corresponds to how you decoded the data) to this script, it assumes that the <lang> directory contains phones/align_lexicon.int, and it uses this to work out a reasonable guess of the non-scored phones, based on which have a single-word pronunciation that maps to a silence phone. It then uses the words.txt to work out the written form of those words. Alternatively, you may specify a file containing the non-scored words one per line, with the --non-scored-words option. Non-scored words that were deleted (i.e. they were in the ref but not the hyp) are simply removed from the ctm. For non-scored words that were inserted or substituted, we change the reference word to match the hyp word, but instead of marking the operation as 'cor' (correct), we mark it as 'fix' (fixed), so that it will not be positively counted as a correct word for purposes of finding the optimal segment boundaries. e.g. <file-id> <channel> <start-time> <duration> <conf> <hyp-word> <ref-word> <edit-type> [note: the <channel> will always be 1]. AJJacobs_2007P-0001605-0003029 1 0 0.09 <eps> 1.0 <eps> sil AJJacobs_2007P-0001605-0003029 1 0.09 0.15 i 1.0 i cor AJJacobs_2007P-0001605-0003029 1 0.24 0.25 thought 1.0 thought cor AJJacobs_2007P-0001605-0003029 1 0.49 0.14 i'd 1.0 i'd cor AJJacobs_2007P-0001605-0003029 1 0.63 0.22 tell 1.0 tell cor AJJacobs_2007P-0001605-0003029 1 0.85 0.11 you 1.0 you cor AJJacobs_2007P-0001605-0003029 1 0.96 0.05 a 1.0 a cor AJJacobs_2007P-0001605-0003029 1 1.01 0.24 little 1.0 little cor AJJacobs_2007P-0001605-0003029 1 1.25 0.5 about 1.0 about cor AJJacobs_2007P-0001605-0003029 1 1.75 0.48 [UH] 1.0 [UH] cor """ logger = logging.getLogger(__name__) logger.setLevel(logging.INFO) handler = logging.StreamHandler() handler.setLevel(logging.INFO) formatter = logging.Formatter('%(asctime)s [%(filename)s:%(lineno)s - ' '%(funcName)s - %(levelname)s ] %(message)s') handler.setFormatter(formatter) logger.addHandler(handler) parser = argparse.ArgumentParser( description = "This program modifies the reference in the ctm-edits which " "is output by steps/cleanup/internal/get_ctm_edits.py, to allow insertions, deletions and " "substitutions of non-scored words, and [if --allow-repetitions=true], " "duplications of single words or pairs of scored words (to account for dysfluencies " "that were not transcribed). Note: deletions and substitutions of non-scored words " "after the reference is corrected, will be marked as operation 'fix' rather than " "'cor' (correct) so that the downstream processing knows that this was not in " "the original reference. Also by defaults tags non-scored words as such when " "they are correct; see the --tag-non-scored option.") parser.add_argument("--verbose", type = int, default = 1, choices=[0,1,2,3], help = "Verbose level, higher = more verbose output") parser.add_argument("--allow-repetitions", type = str, default = 'true', choices=['true','false'], help = "If true, allow repetitions in the transcript of one or " "two-word sequences: for instance if the ref says 'i' but " "the hyp says 'i i', or the ref says 'but then' and the hyp says " "'but then but then', fix the reference accordingly. Intervening " "non-scored words are allowed between the repetitions. These " "fixes will be marked as 'cor', not as 'fix', since there is " "generally no way to tell which repetition was the 'real' one " "(and since we're generally confident that such things were " "actually uttered).") parser.add_argument("non_scored_words_in", metavar = "<non-scored-words-file>", help="Filename of file containing a list of non-scored words, " "one per line. See steps/cleanup/get_nonscored_words.py.") parser.add_argument("ctm_edits_in", metavar = "<ctm-edits-in>", help = "Filename of input ctm-edits file. " "Use /dev/stdin for standard input.") parser.add_argument("ctm_edits_out", metavar = "<ctm-edits-out>", help = "Filename of output ctm-edits file. " "Use /dev/stdout for standard output.") args = parser.parse_args() def ReadNonScoredWords(non_scored_words_file): global non_scored_words try: f = open(non_scored_words_file) except: sys.exit("modify_ctm_edits.py: error opening file: " "--non-scored-words=" + non_scored_words_file) for line in f.readlines(): a = line.split() if not len(line.split()) == 1: sys.exit("modify_ctm_edits.py: bad line in non-scored-words " "file {0}: {1}".format(non_scored_words_file, line)) non_scored_words.add(a[0]) f.close() # The ctm-edits file format is as follows [note: file-id is really utterance-id # in this context]. # <file-id> <channel> <start-time> <duration> <conf> <hyp-word> <ref-word> <edit> # e.g.: # AJJacobs_2007P-0001605-0003029 1 0 0.09 <eps> 1.0 <eps> sil # AJJacobs_2007P-0001605-0003029 1 0.09 0.15 i 1.0 i cor # ... # This function processes a single line of ctm-edits input for fixing # "non-scored" words. The input 'a' is the split line as an array of fields. # It modifies the object 'a'. This function returns the modified array, # and please note that it is destructive of its input 'a'. # If it returnso the empty array then the line is to be deleted. def ProcessLineForNonScoredWords(a): global num_lines, num_correct_lines, ref_change_stats try: assert len(a) == 8 num_lines += 1 # we could do: # [ file, channel, start, duration, hyp_word, confidence, ref_word, edit_type ] = a duration = a[3] hyp_word = a[4] ref_word = a[6] edit_type = a[7] if edit_type == 'ins': assert ref_word == '<eps>' if hyp_word in non_scored_words: # insert this non-scored word into the reference. ref_change_stats[ref_word + ' -> ' + hyp_word] += 1 ref_word = hyp_word edit_type = 'fix' elif edit_type == 'del': assert hyp_word == '<eps>' and float(duration) == 0.0 if ref_word in non_scored_words: ref_change_stats[ref_word + ' -> ' + hyp_word] += 1 return [] elif edit_type == 'sub': assert hyp_word != '<eps>' if hyp_word in non_scored_words and ref_word in non_scored_words: # we also allow replacing one non-scored word with another. ref_change_stats[ref_word + ' -> ' + hyp_word] += 1 ref_word = hyp_word edit_type = 'fix' else: assert edit_type == 'cor' or edit_type == 'sil' num_correct_lines += 1 a[4] = hyp_word a[6] = ref_word a[7] = edit_type return a except Exception: logger.error("bad line in ctm-edits input: " "{0}".format(a)) raise RuntimeError # This function processes the split lines of one utterance (as a # list of lists of fields), to allow repetitions of words, so if the # reference says 'i' but the hyp says 'i i', or the ref says # 'you know' and the hyp says 'you know you know', we change the # ref to match. # It returns the modified list-of-lists [but note that the input # is actually modified]. def ProcessUtteranceForRepetitions(split_lines_of_utt): global non_scored_words, repetition_stats # The array 'selected_lines' will contain the indexes of of selected # elements of 'split_lines_of_utt'. Consider split_line = # split_lines_of_utt[i]. If the hyp and ref words in split_line are both # either '<eps>' or non-scoreable words, we discard the index. # Otherwise we put it into selected_lines. selected_line_indexes = [] # selected_edits will contain, for each element of selected_line_indexes, the # corresponding edit_type from the original utterance previous to # this function call ('cor', 'ins', etc.). # # As a special case, if there was a substitution ('sub') where the # reference word was a non-scored word and the hyp word was a real word, # we mark it in this array as 'ins', because for purposes of this algorithm # it behaves the same as an insertion. # # Whenever we do any operation that will change the reference, we change # all the selected_edits in the array to None so that they won't match # any further operations. selected_edits = [] # selected_hyp_words will contain, for each element of selected_line_indexes, the # corresponding hyp_word. selected_hyp_words = [] for i in range(len(split_lines_of_utt)): split_line = split_lines_of_utt[i] hyp_word = split_line[4] ref_word = split_line[6] # keep_this_line will be True if we are going to keep this line in the # 'selected lines' for further processing of repetitions. We only # eliminate lines involving non-scored words or epsilon in both hyp # and reference position # [note: epsilon in hyp position for non-empty segments indicates # optional-silence, and it does make sense to make this 'invisible', # just like non-scored words, for the purposes of this code.] keep_this_line = True if (hyp_word == '<eps>' or hyp_word in non_scored_words) and \ (ref_word == '<eps>' or ref_word in non_scored_words): keep_this_line = False if keep_this_line: selected_line_indexes.append(i) edit_type = split_line[7] if edit_type == 'sub' and ref_word in non_scored_words: assert not hyp_word in non_scored_words # For purposes of this algorithm, substitution of, say, # '[COUGH]' by 'hello' behaves like an insertion of 'hello', # since we're willing to remove the '[COUGH]' from the # transript. edit_type = 'ins' selected_edits.append(edit_type) selected_hyp_words.append(hyp_word) # indexes_to_fix will be a list of indexes into 'selected_indexes' where we # plan to fix the ref to match the hyp. indexes_to_fix = [] # This loop scans for, and fixes, two-word insertions that follow, # or precede, the corresponding correct words. for i in range(0, len(selected_line_indexes) - 3): this_indexes = selected_line_indexes[i:i+4] this_hyp_words = selected_hyp_words[i:i+4] if this_hyp_words[0] == this_hyp_words[2] and \ this_hyp_words[1] == this_hyp_words[3] and \ this_hyp_words[0] != this_hyp_words[1]: # if the hyp words were of the form [ 'a', 'b', 'a', 'b' ]... this_edits = selected_edits[i:i+4] if this_edits == [ 'cor', 'cor', 'ins', 'ins' ] or \ this_edits == [ 'ins', 'ins', 'cor', 'cor' ]: if this_edits[0] == 'cor': indexes_to_fix += [ i+2, i+3 ] else: indexes_to_fix += [ i, i+1 ] # the next line prevents this region of the text being used # in any further edits. selected_edits[i:i+4] = [ None, None, None, None ] word_pair = this_hyp_words[0] + ' ' + this_hyp_words[1] # e.g. word_pair = 'hi there' # add 2 because these stats are of words. repetition_stats[word_pair] += 2 # the next line prevents this region of the text being used # in any further edits. selected_edits[i:i+4] = [ None, None, None, None ] # This loop scans for, and fixes, one-word insertions that follow, # or precede, the corresponding correct words. for i in range(0, len(selected_line_indexes) - 1): this_indexes = selected_line_indexes[i:i+2] this_hyp_words = selected_hyp_words[i:i+2] if this_hyp_words[0] == this_hyp_words[1]: # if the hyp words were of the form [ 'a', 'a' ]... this_edits = selected_edits[i:i+2] if this_edits == [ 'cor', 'ins' ] or this_edits == [ 'ins', 'cor' ]: if this_edits[0] == 'cor': indexes_to_fix.append(i+1) else: indexes_to_fix.append(i) repetition_stats[this_hyp_words[0]] += 1 # the next line prevents this region of the text being used # in any further edits. selected_edits[i:i+2] = [ None, None ] for i in indexes_to_fix: j = selected_line_indexes[i] split_line = split_lines_of_utt[j] ref_word = split_line[6] hyp_word = split_line[4] assert ref_word == '<eps>' or ref_word in non_scored_words # we replace reference with the decoded word, which will be a # repetition. split_line[6] = hyp_word split_line[7] = 'cor' return split_lines_of_utt # note: split_lines_of_utt is a list of lists, one per line, each containing the # sequence of fields. # Returns the same format of data after processing. def ProcessUtterance(split_lines_of_utt): new_split_lines_of_utt = [] for split_line in split_lines_of_utt: new_split_line = ProcessLineForNonScoredWords(split_line) if new_split_line != []: new_split_lines_of_utt.append(new_split_line) if args.allow_repetitions == 'true': new_split_lines_of_utt = ProcessUtteranceForRepetitions(new_split_lines_of_utt) return new_split_lines_of_utt def ProcessData(): try: f_in = open(args.ctm_edits_in) except: sys.exit("modify_ctm_edits.py: error opening ctm-edits input " "file {0}".format(args.ctm_edits_in)) try: f_out = open(args.ctm_edits_out, 'w') except: sys.exit("modify_ctm_edits.py: error opening ctm-edits output " "file {0}".format(args.ctm_edits_out)) num_lines_processed = 0 # Most of what we're doing in the lines below is splitting the input lines # and grouping them per utterance, before giving them to ProcessUtterance() # and then printing the modified lines. first_line = f_in.readline() if first_line == '': sys.exit("modify_ctm_edits.py: empty input") split_pending_line = first_line.split() if len(split_pending_line) == 0: sys.exit("modify_ctm_edits.py: bad input line " + first_line) cur_utterance = split_pending_line[0] split_lines_of_cur_utterance = [] while True: if len(split_pending_line) == 0 or split_pending_line[0] != cur_utterance: split_lines_of_cur_utterance = ProcessUtterance(split_lines_of_cur_utterance) for split_line in split_lines_of_cur_utterance: print(' '.join(split_line), file = f_out) split_lines_of_cur_utterance = [] if len(split_pending_line) == 0: break else: cur_utterance = split_pending_line[0] split_lines_of_cur_utterance.append(split_pending_line) next_line = f_in.readline() split_pending_line = next_line.split() if len(split_pending_line) == 0: if next_line != '': sys.exit("modify_ctm_edits.py: got an empty or whitespace input line") try: f_out.close() except: sys.exit("modify_ctm_edits.py: error closing ctm-edits output " "(broken pipe or full disk?)") def PrintNonScoredStats(): if args.verbose < 1: return if num_lines == 0: print("modify_ctm_edits.py: processed no input.", file = sys.stderr) num_lines_modified = sum(ref_change_stats.values()) num_incorrect_lines = num_lines - num_correct_lines percent_lines_incorrect= '%.2f' % (num_incorrect_lines * 100.0 / num_lines) percent_modified = '%.2f' % (num_lines_modified * 100.0 / num_lines); if num_incorrect_lines > 0: percent_of_incorrect_modified = '%.2f' % (num_lines_modified * 100.0 / num_incorrect_lines) else: percent_of_incorrect_modified = float('nan') print("modify_ctm_edits.py: processed {0} lines of ctm ({1}% of which incorrect), " "of which {2} were changed fixing the reference for non-scored words " "({3}% of lines, or {4}% of incorrect lines)".format( num_lines, percent_lines_incorrect, num_lines_modified, percent_modified, percent_of_incorrect_modified), file = sys.stderr) keys = sorted(ref_change_stats.keys(), reverse=True, key = lambda x: ref_change_stats[x]) num_keys_to_print = 40 if args.verbose >= 2 else 10 print("modify_ctm_edits.py: most common edits (as percentages " "of all such edits) are:\n" + ('\n'.join([ '%s [%.2f%%]' % (k, ref_change_stats[k]*100.0/num_lines_modified) for k in keys[0:num_keys_to_print]])) + '\n...'if num_keys_to_print < len(keys) else '', file = sys.stderr) def PrintRepetitionStats(): if args.verbose < 1 or sum(repetition_stats.values()) == 0: return num_lines_modified = sum(repetition_stats.values()) num_incorrect_lines = num_lines - num_correct_lines percent_lines_incorrect= '%.2f' % (num_incorrect_lines * 100.0 / num_lines) percent_modified = '%.2f' % (num_lines_modified * 100.0 / num_lines); if num_incorrect_lines > 0: percent_of_incorrect_modified = '%.2f' % (num_lines_modified * 100.0 / num_incorrect_lines) else: percent_of_incorrect_modified = float('nan') print("modify_ctm_edits.py: processed {0} lines of ctm ({1}% of which incorrect), " "of which {2} were changed fixing the reference for repetitions ({3}% of " "lines, or {4}% of incorrect lines)".format( num_lines, percent_lines_incorrect, num_lines_modified, percent_modified, percent_of_incorrect_modified), file = sys.stderr) keys = sorted(repetition_stats.keys(), reverse=True, key = lambda x: repetition_stats[x]) num_keys_to_print = 40 if args.verbose >= 2 else 10 print("modify_ctm_edits.py: most common repetitions inserted into reference (as percentages " "of all words fixed in this way) are:\n" + ('\n'.join([ '%s [%.2f%%]' % (k, repetition_stats[k]*100.0/num_lines_modified) for k in keys[0:num_keys_to_print]])) + '\n...' if num_keys_to_print < len(keys) else '', file = sys.stderr) non_scored_words = set() ReadNonScoredWords(args.non_scored_words_in) num_lines = 0 num_correct_lines = 0 # ref_change_stats will be a map from a string like # 'foo -> bar' to an integer count; it keeps track of how much we changed # the reference. ref_change_stats = defaultdict(int) # repetition_stats will be a map from strings like # 'a', or 'a b' (the repeated strings), to an integer count; like # ref_change_stats, it keeps track of how many changes we made # in allowing repetitions. repetition_stats = defaultdict(int) ProcessData() PrintNonScoredStats() PrintRepetitionStats()
egs/wsj/s5/steps/cleanup/internal/modify_ctm_edits.py
20,279
!/usr/bin/env python2 Copyright 2016 Vimal Manohar 2016 Johns Hopkins University (author: Daniel Povey) Apache 2.0 The ctm-edits file format is as follows [note: file-id is really utterance-id in this context]. <file-id> <channel> <start-time> <duration> <conf> <hyp-word> <ref-word> <edit> e.g.: AJJacobs_2007P-0001605-0003029 1 0 0.09 <eps> 1.0 <eps> sil AJJacobs_2007P-0001605-0003029 1 0.09 0.15 i 1.0 i cor ... This function processes a single line of ctm-edits input for fixing "non-scored" words. The input 'a' is the split line as an array of fields. It modifies the object 'a'. This function returns the modified array, and please note that it is destructive of its input 'a'. If it returnso the empty array then the line is to be deleted. we could do: [ file, channel, start, duration, hyp_word, confidence, ref_word, edit_type ] = a insert this non-scored word into the reference. we also allow replacing one non-scored word with another. This function processes the split lines of one utterance (as a list of lists of fields), to allow repetitions of words, so if the reference says 'i' but the hyp says 'i i', or the ref says 'you know' and the hyp says 'you know you know', we change the ref to match. It returns the modified list-of-lists [but note that the input is actually modified]. The array 'selected_lines' will contain the indexes of of selected elements of 'split_lines_of_utt'. Consider split_line = split_lines_of_utt[i]. If the hyp and ref words in split_line are both either '<eps>' or non-scoreable words, we discard the index. Otherwise we put it into selected_lines. selected_edits will contain, for each element of selected_line_indexes, the corresponding edit_type from the original utterance previous to this function call ('cor', 'ins', etc.). As a special case, if there was a substitution ('sub') where the reference word was a non-scored word and the hyp word was a real word, we mark it in this array as 'ins', because for purposes of this algorithm it behaves the same as an insertion. Whenever we do any operation that will change the reference, we change all the selected_edits in the array to None so that they won't match any further operations. selected_hyp_words will contain, for each element of selected_line_indexes, the corresponding hyp_word. keep_this_line will be True if we are going to keep this line in the 'selected lines' for further processing of repetitions. We only eliminate lines involving non-scored words or epsilon in both hyp and reference position [note: epsilon in hyp position for non-empty segments indicates optional-silence, and it does make sense to make this 'invisible', just like non-scored words, for the purposes of this code.] For purposes of this algorithm, substitution of, say, '[COUGH]' by 'hello' behaves like an insertion of 'hello', since we're willing to remove the '[COUGH]' from the transript. indexes_to_fix will be a list of indexes into 'selected_indexes' where we plan to fix the ref to match the hyp. This loop scans for, and fixes, two-word insertions that follow, or precede, the corresponding correct words. if the hyp words were of the form [ 'a', 'b', 'a', 'b' ]... the next line prevents this region of the text being used in any further edits. e.g. word_pair = 'hi there' add 2 because these stats are of words. the next line prevents this region of the text being used in any further edits. This loop scans for, and fixes, one-word insertions that follow, or precede, the corresponding correct words. if the hyp words were of the form [ 'a', 'a' ]... the next line prevents this region of the text being used in any further edits. we replace reference with the decoded word, which will be a repetition. note: split_lines_of_utt is a list of lists, one per line, each containing the sequence of fields. Returns the same format of data after processing. Most of what we're doing in the lines below is splitting the input lines and grouping them per utterance, before giving them to ProcessUtterance() and then printing the modified lines. ref_change_stats will be a map from a string like 'foo -> bar' to an integer count; it keeps track of how much we changed the reference. repetition_stats will be a map from strings like 'a', or 'a b' (the repeated strings), to an integer count; like ref_change_stats, it keeps track of how many changes we made in allowing repetitions.
4,397
en
0.863738
# Copyright (C) 2010-2011 Richard Lincoln # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to # deal in the Software without restriction, including without limitation the # rights to use, copy, modify, merge, publish, distribute, sublicense, and/or # sell copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING # FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS # IN THE SOFTWARE. """An extension to the Core and Topology package that models information on the electrical characteristics of Transmission and Distribution networks. This package is used by network applications such as State Estimation, Load Flow and Optimal Power Flow. """ from CIM15.CDPSM.Connectivity.IEC61970.Wires.Fuse import Fuse from CIM15.CDPSM.Connectivity.IEC61970.Wires.EnergyConsumer import EnergyConsumer from CIM15.CDPSM.Connectivity.IEC61970.Wires.Switch import Switch from CIM15.CDPSM.Connectivity.IEC61970.Wires.Disconnector import Disconnector from CIM15.CDPSM.Connectivity.IEC61970.Wires.ACLineSegment import ACLineSegment from CIM15.CDPSM.Connectivity.IEC61970.Wires.SynchronousMachine import SynchronousMachine from CIM15.CDPSM.Connectivity.IEC61970.Wires.BusbarSection import BusbarSection from CIM15.CDPSM.Connectivity.IEC61970.Wires.LoadBreakSwitch import LoadBreakSwitch from CIM15.CDPSM.Connectivity.IEC61970.Wires.TransformerTank import TransformerTank from CIM15.CDPSM.Connectivity.IEC61970.Wires.GroundDisconnector import GroundDisconnector from CIM15.CDPSM.Connectivity.IEC61970.Wires.PowerTransformerEnd import PowerTransformerEnd from CIM15.CDPSM.Connectivity.IEC61970.Wires.Junction import Junction from CIM15.CDPSM.Connectivity.IEC61970.Wires.SeriesCompensator import SeriesCompensator from CIM15.CDPSM.Connectivity.IEC61970.Wires.Breaker import Breaker from CIM15.CDPSM.Connectivity.IEC61970.Wires.TransformerTankEnd import TransformerTankEnd from CIM15.CDPSM.Connectivity.IEC61970.Wires.Sectionaliser import Sectionaliser from CIM15.CDPSM.Connectivity.IEC61970.Wires.DCLineSegment import DCLineSegment from CIM15.CDPSM.Connectivity.IEC61970.Wires.Line import Line from CIM15.CDPSM.Connectivity.IEC61970.Wires.Conductor import Conductor from CIM15.CDPSM.Connectivity.IEC61970.Wires.PowerTransformer import PowerTransformer from CIM15.CDPSM.Connectivity.IEC61970.Wires.Ground import Ground from CIM15.CDPSM.Connectivity.IEC61970.Wires.TransformerEnd import TransformerEnd from CIM15.CDPSM.Connectivity.IEC61970.Wires.ShuntCompensator import ShuntCompensator from CIM15.CDPSM.Connectivity.IEC61970.Wires.EnergySource import EnergySource from CIM15.CDPSM.Connectivity.IEC61970.Wires.Jumper import Jumper nsURI = "http://iec.ch/TC57/2010/CIM-schema-cim15?profile=http://iec.ch/TC57/2011/iec61968-13/CDPSM/Connectivity#Wires" nsPrefix = "cimWires"
CIM15/CDPSM/Connectivity/IEC61970/Wires/__init__.py
3,466
An extension to the Core and Topology package that models information on the electrical characteristics of Transmission and Distribution networks. This package is used by network applications such as State Estimation, Load Flow and Optimal Power Flow. Copyright (C) 2010-2011 Richard Lincoln Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
1,314
en
0.885325
from django.apps import AppConfig class BooksConfig(AppConfig): name = 'bookstudio.books' verbose_name = 'books' def ready(self): """Override this to put in: Users system checks Users signal registration """ pass
bookstudio/books/apps.py
276
Override this to put in: Users system checks Users signal registration
70
en
0.708613
# pylint: disable=too-few-public-methods, no-member """API for scheduling learning rate.""" from .. import symbol as sym class LRScheduler(object): """Base class of a learning rate scheduler. A scheduler returns a new learning rate based on the number of updates that have been performed. Parameters ---------- base_lr : float, optional The initial learning rate. """ def __init__(self, base_lr=0.01, name='LRScheduler'): self.name = name self.base_lr = base_lr def __call__(self, num_update): """Return a new learning rate based on number of updates. Parameters ---------- num_update: nnvm Symbol the number of updates applied to weight. """ raise NotImplementedError("__call__ method must be overridden.") class FactorScheduler(LRScheduler): """Reduce the learning rate by a factor for every *n* steps. It returns a new learning rate by:: base_lr * pow(factor, num_update/step) Parameters ---------- step : int Changes the learning rate for every n updates. factor : float, optional The factor to change the learning rate. stop_factor_lr : float, optional Stop updating the learning rate if it is less than this value. """ def __init__(self, step, factor=1, stop_factor_lr=1e-8, name='FactorScheduler', **kwargs): super(FactorScheduler, self).__init__(name=name, **kwargs) if step < 1: raise ValueError("Schedule step must be greater or equal than 1 round") if factor > 1.0: raise ValueError("Factor must be no more than 1 to make lr reduce") self.step = step self.factor = factor self.stop_factor_lr = stop_factor_lr def __call__(self, num_update): updated_lr = self.base_lr * self.factor ** (num_update / self.step) return sym.clip(updated_lr, a_min=self.stop_factor_lr, a_max=self.base_lr)
nnvm/python/nnvm/compiler/lr_scheduler.py
1,985
Reduce the learning rate by a factor for every *n* steps. It returns a new learning rate by:: base_lr * pow(factor, num_update/step) Parameters ---------- step : int Changes the learning rate for every n updates. factor : float, optional The factor to change the learning rate. stop_factor_lr : float, optional Stop updating the learning rate if it is less than this value. Base class of a learning rate scheduler. A scheduler returns a new learning rate based on the number of updates that have been performed. Parameters ---------- base_lr : float, optional The initial learning rate. Return a new learning rate based on number of updates. Parameters ---------- num_update: nnvm Symbol the number of updates applied to weight. API for scheduling learning rate. pylint: disable=too-few-public-methods, no-member
844
en
0.693085
import sys import os import re import tempfile import auto_editor import auto_editor.vanparse as vanparse from auto_editor.utils.log import Log from auto_editor.ffwrapper import FFmpeg def grep_options(parser): parser.add_argument('--no-filename', action='store_true', help='Never print filenames with output lines.') parser.add_argument('--max-count', '-m', type=int, default=None, help='Stop reading a file after NUM matching lines.') parser.add_argument('--count', '-c', action='store_true', help='Suppress normal output; instead print count of matching lines for each file.') parser.add_argument('--ignore-case', '-i', action='store_true', help='Ignore case distinctions for the PATTERN.') parser.add_argument('--timecode', action='store_true', help="Print the match's timecode.") parser.add_argument('--time', action='store_true', help="Print when the match happens. (Ignore ending).") parser.add_argument('--ffmpeg-location', default=None, help='Point to your custom ffmpeg file.') parser.add_argument('--my-ffmpeg', action='store_true', help='Use the ffmpeg on your PATH instead of the one packaged.') parser.add_argument('--help', '-h', action='store_true', help='Print info about the program or an option and exit.') parser.add_required('input', nargs='*', help='The path to a file you want inspected.') return parser # stackoverflow.com/questions/9662346/python-code-to-remove-html-tags-from-a-string def cleanhtml(raw_html: str) -> str: cleanr = re.compile('<.*?>') cleantext = re.sub(cleanr, '', raw_html) return cleantext def grep_core( media_file: str, add_prefix: bool, ffmpeg: FFmpeg, args, log: Log, TEMP: str ) -> None: """ We're using the WEBVTT subtitle format. It's better than srt because it doesn't emit line numbers and the time code is in (hh:mm:ss.sss) instead of (dd:hh:mm:ss,sss) """ out_file = os.path.join(TEMP, 'media.vtt') ffmpeg.run(['-i', media_file, out_file]) count = 0 flags = 0 if args.ignore_case: flags = re.IGNORECASE prefix = '' if add_prefix: prefix = '{}:'.format(os.path.splitext(os.path.basename(media_file))[0]) if args.max_count is None: args.max_count = float('inf') timecode = '' line_number = -1 with open(out_file, 'r') as file: while True: line = file.readline() line_number += 1 if line_number == 0: continue if not line or count >= args.max_count: break if line.strip() == '': continue if re.match(r'\d*:\d\d.\d*\s-->\s\d*:\d\d.\d*', line): if args.time: timecode = line.split('-->')[0].strip() + ' ' else: timecode = line.strip() + '; ' continue line = cleanhtml(line) match = re.search(args.input[0], line, flags) line = line.strip() if match: count += 1 if not args.count: if args.timecode or args.time: print(prefix + timecode + line) else: print(prefix + line) if args.count: print(prefix + str(count)) def main(sys_args=sys.argv[1:]): parser = vanparse.ArgumentParser('grep', auto_editor.version, description='Read and match subtitle tracks in media files.', ) parser = grep_options(parser) TEMP = tempfile.mkdtemp() log = Log(temp=TEMP) try: args = parser.parse_args(sys_args) except vanparse.ParserError as e: log.error(str(e)) ffmpeg = FFmpeg(args.ffmpeg_location, args.my_ffmpeg, debug=False) media_files = args.input[1:] add_prefix = (len(media_files) > 1 or os.path.isdir(media_files[0])) and not args.no_filename for media_file in media_files: if not os.path.exists(media_file): log.error(f'{media_file}: File does not exist.') if os.path.isdir(media_file): for _, _, files in os.walk(media_file): for file in files: if file == '.DS_Store': continue grep_core(os.path.join(media_file, file), add_prefix, ffmpeg, args, log, TEMP) else: grep_core(media_file, add_prefix, ffmpeg, args, log, TEMP) log.cleanup() if __name__ == '__main__': main()
auto_editor/subcommands/grep.py
4,597
We're using the WEBVTT subtitle format. It's better than srt because it doesn't emit line numbers and the time code is in (hh:mm:ss.sss) instead of (dd:hh:mm:ss,sss) stackoverflow.com/questions/9662346/python-code-to-remove-html-tags-from-a-string
249
en
0.764102
""" Create a blueprint with endpoints for logins from configured identity providers. The identity providers include, for example, Google, Shibboleth, or another fence instance. See the other files in this directory for the definitions of the endpoints for each provider. """ from authlib.common.urls import add_params_to_uri import flask import requests from cdislogging import get_logger from fence.blueprints.login.cilogon import CilogonLogin, CilogonCallback from fence.blueprints.login.cognito import CognitoLogin, CognitoCallback from fence.blueprints.login.fence_login import FenceLogin, FenceCallback from fence.blueprints.login.google import GoogleLogin, GoogleCallback from fence.blueprints.login.shib import ShibbolethLogin, ShibbolethCallback from fence.blueprints.login.microsoft import MicrosoftLogin, MicrosoftCallback from fence.blueprints.login.okta import OktaLogin, OktaCallback from fence.blueprints.login.orcid import ORCIDLogin, ORCIDCallback from fence.blueprints.login.ras import RASLogin, RASCallback from fence.blueprints.login.synapse import SynapseLogin, SynapseCallback from fence.errors import InternalError from fence.resources.audit.utils import enable_audit_logging from fence.restful import RestfulApi from fence.config import config logger = get_logger(__name__) # Mapping from IDP ID to the name in the URL on the blueprint (see below). IDP_URL_MAP = { "fence": "fence", "google": "google", "shibboleth": "shib", "orcid": "orcid", "synapse": "synapse", "microsoft": "microsoft", "okta": "okta", "cognito": "cognito", "ras": "ras", "cilogon": "cilogon", } def absolute_login_url(provider_id, fence_idp=None, shib_idp=None): """ Args: provider_id (str): provider to log in with; an IDP_URL_MAP key. fence_idp (str, optional): if provider_id is "fence" (multi-tenant Fence setup), fence_idp can be any of the providers supported by the other Fence. If not specified, will default to NIH login. shib_idp (str, optional): if provider_id is "fence" and fence_idp is "shibboleth", shib_idp can be any Shibboleth/ InCommon provider. If not specified, will default to NIH login. Returns: str: login URL for this provider, including extra query parameters if fence_idp and/or shib_idp are specified. """ try: base_url = config["BASE_URL"].rstrip("/") login_url = base_url + "/login/{}".format(IDP_URL_MAP[provider_id]) except KeyError as e: raise InternalError("identity provider misconfigured: {}".format(str(e))) params = {} if fence_idp: params["idp"] = fence_idp if shib_idp: params["shib_idp"] = shib_idp login_url = add_params_to_uri(login_url, params) return login_url def provider_info(login_details): """ Args: login_details (dict): { name, desc, idp, fence_idp, shib_idps, secondary } - "idp": a configured provider. Multiple options can be configured with the same idp. - if provider_id is "fence", "fence_idp" can be any of the providers supported by the other Fence. If not specified, will default to NIH login. - if provider_id is "fence" and fence_idp is "shibboleth", a list of "shib_idps" can be configured for InCommon login. If not specified, will default to NIH login. - Optional parameters: "desc" (description) and "secondary" (boolean - can be used by the frontend to display secondary buttons differently). Returns: dict: { name, desc, idp, urls, secondary } - urls: list of { name, url } dictionaries """ info = { # "id" deprecated, replaced by "idp" "id": login_details["idp"], "idp": login_details["idp"], "name": login_details["name"], # "url" deprecated, replaced by "urls" "url": absolute_login_url(login_details["idp"]), "desc": login_details.get("desc", None), "secondary": login_details.get("secondary", False), } # for Fence multi-tenant login fence_idp = None if login_details["idp"] == "fence": fence_idp = login_details.get("fence_idp") # handle Shibboleth IDPs: InCommon login can either be configured # directly in this Fence, or through multi-tenant Fence if ( login_details["idp"] == "shibboleth" or fence_idp == "shibboleth" ) and "shib_idps" in login_details: # get list of all available shib IDPs if not hasattr(flask.current_app, "all_shib_idps"): flask.current_app.all_shib_idps = get_all_shib_idps() requested_shib_idps = login_details["shib_idps"] if requested_shib_idps == "*": shib_idps = flask.current_app.all_shib_idps elif isinstance(requested_shib_idps, list): # get the display names for each requested shib IDP shib_idps = [] for requested_shib_idp in requested_shib_idps: shib_idp = next( ( available_shib_idp for available_shib_idp in flask.current_app.all_shib_idps if available_shib_idp["idp"] == requested_shib_idp ), None, ) if not shib_idp: raise InternalError( 'Requested shib_idp "{}" does not exist'.format( requested_shib_idp ) ) shib_idps.append(shib_idp) else: raise InternalError( 'fence provider misconfigured: "shib_idps" must be a list or "*", got {}'.format( requested_shib_idps ) ) info["urls"] = [ { "name": shib_idp["name"], "url": absolute_login_url( login_details["idp"], fence_idp, shib_idp["idp"] ), } for shib_idp in shib_idps ] # non-Shibboleth provider else: info["urls"] = [ { "name": login_details["name"], "url": absolute_login_url(login_details["idp"], fence_idp), } ] return info def get_login_providers_info(): # default login option if config.get("DEFAULT_LOGIN_IDP"): default_idp = config["DEFAULT_LOGIN_IDP"] elif "default" in config.get("ENABLED_IDENTITY_PROVIDERS", {}): # fall back on ENABLED_IDENTITY_PROVIDERS.default default_idp = config["ENABLED_IDENTITY_PROVIDERS"]["default"] else: logger.warning("DEFAULT_LOGIN_IDP not configured") default_idp = None # other login options if config["LOGIN_OPTIONS"]: login_options = config["LOGIN_OPTIONS"] elif "providers" in config.get("ENABLED_IDENTITY_PROVIDERS", {}): # fall back on "providers" and convert to "login_options" format enabled_providers = config["ENABLED_IDENTITY_PROVIDERS"]["providers"] login_options = [ { "name": details.get("name"), "idp": idp, "desc": details.get("desc"), "secondary": details.get("secondary"), } for idp, details in enabled_providers.items() ] else: logger.warning("LOGIN_OPTIONS not configured or empty") login_options = [] try: all_provider_info = [ provider_info(login_details) for login_details in login_options ] except KeyError as e: raise InternalError("LOGIN_OPTIONS misconfigured: cannot find key {}".format(e)) # if several login_options are defined for this default IDP, will # default to the first one: default_provider_info = next( (info for info in all_provider_info if info["idp"] == default_idp), None ) if not default_provider_info: raise InternalError( "default provider misconfigured: DEFAULT_LOGIN_IDP is set to {}, which is not configured in LOGIN_OPTIONS".format( default_idp ) ) return default_provider_info, all_provider_info def make_login_blueprint(): """ Return: flask.Blueprint: the blueprint used for ``/login`` endpoints Raises: ValueError: if app is not amenably configured """ blueprint = flask.Blueprint("login", __name__) blueprint_api = RestfulApi(blueprint, decorators=[enable_audit_logging]) @blueprint.route("", methods=["GET"]) def default_login(): """ The default root login route. """ default_provider_info, all_provider_info = get_login_providers_info() return flask.jsonify( {"default_provider": default_provider_info, "providers": all_provider_info} ) # Add identity provider login routes for IDPs enabled in the config. configured_idps = config["OPENID_CONNECT"].keys() if "fence" in configured_idps: blueprint_api.add_resource(FenceLogin, "/fence", strict_slashes=False) blueprint_api.add_resource(FenceCallback, "/fence/login", strict_slashes=False) if "google" in configured_idps: blueprint_api.add_resource(GoogleLogin, "/google", strict_slashes=False) blueprint_api.add_resource( GoogleCallback, "/google/login", strict_slashes=False ) if "orcid" in configured_idps: blueprint_api.add_resource(ORCIDLogin, "/orcid", strict_slashes=False) blueprint_api.add_resource(ORCIDCallback, "/orcid/login", strict_slashes=False) if "ras" in configured_idps: blueprint_api.add_resource(RASLogin, "/ras", strict_slashes=False) # note that the callback endpoint is "/ras/callback", not "/ras/login" like other IDPs blueprint_api.add_resource(RASCallback, "/ras/callback", strict_slashes=False) if "synapse" in configured_idps: blueprint_api.add_resource(SynapseLogin, "/synapse", strict_slashes=False) blueprint_api.add_resource( SynapseCallback, "/synapse/login", strict_slashes=False ) if "microsoft" in configured_idps: blueprint_api.add_resource(MicrosoftLogin, "/microsoft", strict_slashes=False) blueprint_api.add_resource( MicrosoftCallback, "/microsoft/login", strict_slashes=False ) if "okta" in configured_idps: blueprint_api.add_resource(OktaLogin, "/okta", strict_slashes=False) blueprint_api.add_resource(OktaCallback, "/okta/login", strict_slashes=False) if "cognito" in configured_idps: blueprint_api.add_resource(CognitoLogin, "/cognito", strict_slashes=False) blueprint_api.add_resource( CognitoCallback, "/cognito/login", strict_slashes=False ) if "shibboleth" in configured_idps: blueprint_api.add_resource(ShibbolethLogin, "/shib", strict_slashes=False) blueprint_api.add_resource( ShibbolethCallback, "/shib/login", strict_slashes=False ) if "cilogon" in configured_idps: blueprint_api.add_resource(CilogonLogin, "/cilogon", strict_slashes=False) blueprint_api.add_resource( CilogonCallback, "/cilogon/login", strict_slashes=False ) return blueprint def get_all_shib_idps(): """ Get the list of all existing Shibboleth IDPs. This function only returns the information we need to generate login URLs. Returns: list: list of {"idp": "", "name": ""} dictionaries """ url = config["OPENID_CONNECT"].get("fence", {}).get("shibboleth_discovery_url") if not url: raise InternalError( "Unable to get list of Shibboleth IDPs: OPENID_CONNECT.fence.shibboleth_discovery_url not configured" ) res = requests.get(url) assert ( res.status_code == 200 ), "Unable to get list of Shibboleth IDPs from {}".format(url) all_shib_idps = [] for shib_idp in res.json(): if "entityID" not in shib_idp: logger.warning( f"get_all_shib_idps(): 'entityID' field not in IDP data: {shib_idp}. Skipping this IDP." ) continue idp = shib_idp["entityID"] if len(shib_idp.get("DisplayNames", [])) > 0: name = get_shib_idp_en_name(shib_idp["DisplayNames"]) else: logger.warning( f"get_all_shib_idps(): 'DisplayNames' field not in IDP data: {shib_idp}. Using IDP ID '{idp}' as IDP name." ) name = idp all_shib_idps.append( { "idp": idp, "name": name, } ) return all_shib_idps def get_shib_idp_en_name(names): """ Returns a name in English for a Shibboleth IDP, or the first available name if no English name was provided. Args: names (list): list of {"lang": "", "value": ""} dictionaries Example: [ { "value": "University of Chicago", "lang": "en" }, { "value": "Universidad de Chicago", "lang": "es" } ] Returns: str: Display name to use for this Shibboleth IDP """ for name in names: if name.get("lang") == "en": return name["value"] return names[0]["value"]
fence/blueprints/login/__init__.py
13,602
Args: provider_id (str): provider to log in with; an IDP_URL_MAP key. fence_idp (str, optional): if provider_id is "fence" (multi-tenant Fence setup), fence_idp can be any of the providers supported by the other Fence. If not specified, will default to NIH login. shib_idp (str, optional): if provider_id is "fence" and fence_idp is "shibboleth", shib_idp can be any Shibboleth/ InCommon provider. If not specified, will default to NIH login. Returns: str: login URL for this provider, including extra query parameters if fence_idp and/or shib_idp are specified. The default root login route. Get the list of all existing Shibboleth IDPs. This function only returns the information we need to generate login URLs. Returns: list: list of {"idp": "", "name": ""} dictionaries Returns a name in English for a Shibboleth IDP, or the first available name if no English name was provided. Args: names (list): list of {"lang": "", "value": ""} dictionaries Example: [ { "value": "University of Chicago", "lang": "en" }, { "value": "Universidad de Chicago", "lang": "es" } ] Returns: str: Display name to use for this Shibboleth IDP Return: flask.Blueprint: the blueprint used for ``/login`` endpoints Raises: ValueError: if app is not amenably configured Args: login_details (dict): { name, desc, idp, fence_idp, shib_idps, secondary } - "idp": a configured provider. Multiple options can be configured with the same idp. - if provider_id is "fence", "fence_idp" can be any of the providers supported by the other Fence. If not specified, will default to NIH login. - if provider_id is "fence" and fence_idp is "shibboleth", a list of "shib_idps" can be configured for InCommon login. If not specified, will default to NIH login. - Optional parameters: "desc" (description) and "secondary" (boolean - can be used by the frontend to display secondary buttons differently). Returns: dict: { name, desc, idp, urls, secondary } - urls: list of { name, url } dictionaries Create a blueprint with endpoints for logins from configured identity providers. The identity providers include, for example, Google, Shibboleth, or another fence instance. See the other files in this directory for the definitions of the endpoints for each provider. Mapping from IDP ID to the name in the URL on the blueprint (see below). "id" deprecated, replaced by "idp" "url" deprecated, replaced by "urls" for Fence multi-tenant login handle Shibboleth IDPs: InCommon login can either be configured directly in this Fence, or through multi-tenant Fence get list of all available shib IDPs get the display names for each requested shib IDP non-Shibboleth provider default login option fall back on ENABLED_IDENTITY_PROVIDERS.default other login options fall back on "providers" and convert to "login_options" format if several login_options are defined for this default IDP, will default to the first one: Add identity provider login routes for IDPs enabled in the config. note that the callback endpoint is "/ras/callback", not "/ras/login" like other IDPs
3,319
en
0.68775
#! /usr/bin/python """ Monitoring functions for xrootd cache server, producing classads that can be handed to condor """ import os import math import time import errno import struct import collections import six from six.moves import urllib import classad import XRootD.client __all__ = ['collect_cache_stats'] # these paths in the cache are to be treated as top level "VOs" for stats collection vo_paths = [ '/user', '/pnfs/fnal.gov/usr' ] def _split_path(path): """ Split a path into a list of directory names """ if path[0] != '/': raise Exception("Not absolute path") result = [] while path != '/': path, tail = os.path.split(path) if tail: result.append(tail) return list(reversed(result)) def _is_prefix(lhs, rhs): """ return True if the first list is a prefix of the second """ rhs = list(rhs) while rhs: if lhs == rhs: return True rhs.pop() return False def scan_cache_dirs(rootdir): """ Scan the top level directory of the cache. Walks the path looking for directories that are not in vo_paths. For each of these generate a cache summary """ results = {} try: root_components = _split_path(rootdir) for dirpath, dirnames, filenames in os.walk(rootdir, topdown=True): # get the path components as a list, removing the rootdir part dirpath_components = _split_path(dirpath)[len(root_components):] for name in list(dirnames): path_components = dirpath_components + [name] for p in [ _split_path(p) for p in vo_paths]: # if this directory is in vo_paths, keep recursing if _is_prefix( path_components, p): break else: # if nothing is in vo_paths, get the stats and remove from dirnames # so this walk goes no further vo_name = os.path.join('/', *path_components) try: results[vo_name] = scan_vo_dir(os.path.join(dirpath, name)) except (OSError, IOError) as ex: results[vo_name] = {'scan_vo_dir_error': str(ex) } dirnames.remove(name) return results except (OSError, IOError) as ex: return { 'scan_cache_dirs_error' : { 'message' : str(ex) } } # error message? def scan_vo_dir(vodir): """ Scan a VO directory (assumed to be the whole directory tree after the top level """ now = time.time() totalsize = 0 nfiles = 0 naccesses = 0 accesses = collections.defaultdict(int) most_recent_access = 0 bad_cinfo_files = 0 for root, dirs, files in os.walk(vodir): fnames = set(files) # Somebody might add a file ending in .cinfo in the cache # so look for the f, f.cinfo pair for f, cinfo in ((f, f + '.cinfo') for f in fnames if f + '.cinfo' in fnames): try: st = os.stat(os.path.join(root, f)) except OSError as ex: if ex.errno == errno.ENOENT: # must have just been deleted continue else: raise try: access_info = read_cinfo(os.path.join(root, cinfo), now) except OSError as ex: if ex.errno == errno.ENOENT: continue else: bad_cinfo_files += 1 access_info = { "naccesses" : 0, "last_access": 0, "by_hour" : {} } except ReadCInfoError as ex: bad_cinfo_files += 1 access_info = ex.access_info nfiles += 1 file_size = st.st_blocks*512 # allow for sparse files totalsize += file_size naccesses += access_info["naccesses"] most_recent_access = max(most_recent_access, access_info["last_access"]) for h in access_info["by_hour"]: accesses["naccesses_hr_" + h] += access_info["by_hour"][h] accesses["bytes_hr_" + h] += access_info["bytes_hr"][h] result = classad.ClassAd({ "used_bytes" : totalsize, "nfiles" : nfiles, "naccesses" : naccesses, "bad_cinfo_files" : bad_cinfo_files }) result.update(accesses) if most_recent_access > 0: result["most_recent_access_time"] = most_recent_access return result # Parsing the cinfo files # The header (not a c struct; consecutive separate values with no padding) # version + buffer size + file size (blocks) # int + long long + long long _header_fmt = '=iqq' _header_fmt_size = struct.calcsize(_header_fmt) # then the number of accesses # int _int_fmt = '@q' _int_fmt_size = struct.calcsize(_int_fmt) # each access contains a struct (native size + padding) # AttachTime + DetachTime + BytesDisk + BytesRam + BytesMissed # time_t + long long + long long + long long + long long _status_fmt = '@qqqqq' _status_fmt_size = struct.calcsize(_status_fmt) class ReadCInfoError(Exception): def __init__(self, *args): Exception.__init__(self, *args) if len(args) > 1: self.access_info = args[1] else: self.access_info = {} def read_cinfo(cinfo_file, now): """ Try to extract useful info from the cinfo file """ result = { "naccesses": 0, "last_access": 0, "by_hour" : { "01": 0, "12": 0, "24": 0 }, "bytes_hr" : { "01": 0, "12": 0, "24": 0 }, } cf = open(cinfo_file, 'rb') # read and unpack the header buf = cf.read(_header_fmt_size) if len(buf) < _header_fmt_size: # a mangled file raise ReadCInfoError("%s header too short" % cinfo_file, result) version, buffer_size, file_size = struct.unpack(_header_fmt, buf) # we only understand version 2 if version != 2: raise ReadCInfoError("%s unknown version: %s" % (cinfo_file, version), result) # Get the size of the state vector and skip over it # buff_synced uses 1 bit per bufferSize block of bytes # Length is rounded up to the nearest byte buff_synced_len = int(math.ceil(float(file_size)/buffer_size/8)) # If the file_size is zero, state vector length is 1 # (Difference is due to Python's integer division returning the floor) if file_size == 0: buff_synced_len = 1 cf.read(buff_synced_len) # Go past cksum (char[16]) and creationTime (time_t) cf.read(16 + 8) # now the access count (an int) buf = cf.read(_int_fmt_size) if len(buf) < _int_fmt_size: raise ReadCInfoError("%s: invalid access field" % cinfo_file, result) access_count, = struct.unpack(_int_fmt, buf) result["naccesses"] = access_count if access_count < 0: raise ReadCInfoError("%s: invalid access count: %s" % (cinfo_file, access_count), result) elif access_count == 0: return result # read the access times hr_01 = now - 60*60 hr_12 = now - 12*60*60 hr_24 = now - 24*60*60 # Read AStat structs try: for buf in iter(lambda: cf.read(_status_fmt_size), b''): access_time, _, bytes_disk, bytes_ram, _ = struct.unpack(_status_fmt, buf) result["last_access"] = access_time #print access_time, bytes_disk, bytes_ram #print time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(access_time)) intervals = list() if access_time >= hr_01: intervals.append('01') if access_time >= hr_12: intervals.append('12') if access_time >= hr_24: intervals.append('24') else: # no longer interested next for interval in intervals: result["by_hour"][interval] += 1 result["bytes_hr"][interval] += bytes_disk + bytes_ram except struct.error as ex: # return what we've got raise ReadCInfoError("%s unable to decode access time data: %s" % (cinfo_file, str(ex)), result) return result def test_xrootd_server(url): """ Contact the xrootd server to check if it's alive """ try: myclient = XRootD.client.FileSystem(url) startt = time.time() response, _ = myclient.ping(timeout=10) elapsed = time.time() - startt if response.fatal: status = "fatal" elif response.error: status = "error" elif response.ok: status = "ok" else: status = "unknown" result = {"ping_response_status" : status, "ping_response_code" : response.code, "ping_response_message" : response.message, "ping_elapsed_time" : elapsed} return result except Exception as ex: # more specific exception would be better return {"ping_response_status" : "failed", "ping_response_code" : -1, "ping_response_message" : str(ex), "ping_elapsed_time" : 0.0} def get_cache_info(rootdir, cache_max_fs_fraction): """Get information about the cache itself""" result = {} try: stat = os.statvfs(rootdir) total_size = int(stat.f_blocks*stat.f_bsize*cache_max_fs_fraction) free_size = int(total_size - (stat.f_blocks-stat.f_bfree)*stat.f_bsize) result['total_cache_bytes'] = total_size result['free_cache_bytes'] = free_size result['free_cache_fraction'] = 1 - float(stat.f_blocks-stat.f_bfree)/int(stat.f_blocks*cache_max_fs_fraction) return result except (OSError, IOError) as ex: return {} def collect_cache_stats(url, rootdir, cache_max_fs_fraction=1.0): """ Collect stats on the cache server """ start_time = time.time() parsed_url = urllib.parse.urlparse(url) # Python 2.6's urlparse returns a ParseResult object whereas # Python 2.4's urlparse returns a tuple that doesn't handle # root:// properly try: if parsed_url.scheme not in ('root', 'xroot'): raise Exception("URL '%s' is not an xrootd url" % url) hostname = parsed_url.netloc except AttributeError: if parsed_url[0] not in ('root', 'xroot'): raise Exception("URL '%s' is not an xrootd url" % url) hostname = parsed_url[2][2:] # Avoid the '//' prefix result = {'MyType' : 'Machine', 'Name': 'xrootd@%s' % hostname, 'stats_time' : int(start_time)} result.update(test_xrootd_server(url)) result.update(get_cache_info(rootdir, cache_max_fs_fraction)) stats_per_vo = scan_cache_dirs(rootdir) # add up the sizes totals = dict() most_recent_access = 0 result['VO'] = {} for vo, vostats in stats_per_vo.items(): for k, v in vostats.items(): if k == "most_recent_access_time": most_recent_access = max(most_recent_access, v) else: try: totals[k] += v except KeyError: totals[k] = v result['VO'][vo] = vostats result['used_cache_bytes'] = totals.pop("used_bytes", 0) for k, v in totals.items(): result["total_" + k] = v if most_recent_access > 0: result["most_recent_access_time"] = most_recent_access result['time_to_collect_stats'] = time.time() - start_time return classad.ClassAd(result) if __name__ == '__main__': import sys args = sys.argv[1:] if len(args) > 2: args[2] = float(args[2]) elif len(args) == 2: args.append(0.99) # max cache fraction print(collect_cache_stats(*args))
src/xrootd_cache_stats.py
11,746
return True if the first list is a prefix of the second Split a path into a list of directory names Collect stats on the cache server Get information about the cache itself Try to extract useful info from the cinfo file Scan the top level directory of the cache. Walks the path looking for directories that are not in vo_paths. For each of these generate a cache summary Scan a VO directory (assumed to be the whole directory tree after the top level Contact the xrootd server to check if it's alive Monitoring functions for xrootd cache server, producing classads that can be handed to condor ! /usr/bin/python these paths in the cache are to be treated as top level "VOs" for stats collection get the path components as a list, removing the rootdir part if this directory is in vo_paths, keep recursing if nothing is in vo_paths, get the stats and remove from dirnames so this walk goes no further error message? Somebody might add a file ending in .cinfo in the cache so look for the f, f.cinfo pair must have just been deleted allow for sparse files Parsing the cinfo files The header (not a c struct; consecutive separate values with no padding) version + buffer size + file size (blocks) int + long long + long long then the number of accesses int each access contains a struct (native size + padding) AttachTime + DetachTime + BytesDisk + BytesRam + BytesMissed time_t + long long + long long + long long + long long read and unpack the header a mangled file we only understand version 2 Get the size of the state vector and skip over it buff_synced uses 1 bit per bufferSize block of bytes Length is rounded up to the nearest byte If the file_size is zero, state vector length is 1 (Difference is due to Python's integer division returning the floor) Go past cksum (char[16]) and creationTime (time_t) now the access count (an int) read the access times Read AStat structsprint access_time, bytes_disk, bytes_ramprint time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(access_time)) no longer interested return what we've got more specific exception would be better Python 2.6's urlparse returns a ParseResult object whereas Python 2.4's urlparse returns a tuple that doesn't handle root:// properly Avoid the '//' prefix add up the sizes max cache fraction
2,289
en
0.77337
''' dShell output classes @author: tparker ''' import os import sys import logging import struct import datetime import dshell import util class Output(object): ''' dShell output base class, extended by output types ''' _DEFAULT_FORMAT = '' _DEFAULT_TIMEFORMAT = '%Y-%m-%d %H:%M:%S' _DEFAULT_DELIM = ' ' _NULL = None # true if you want to remove extra fields from the parsed record _FILTER_EXTRA = False def __init__(self, *a, **kw): ''' base output class constructor configuration kwords: logger=<existing logging object> to pass in a logger format='format string' to override default formatstring for output class pcap = filename to write pcap ''' # setup the logger self.logger = kw.get('logger', logging) # parse the format string self.setformat(kw.get('format', self._DEFAULT_FORMAT)) self.timeformat = (kw.get('timeformat', self._DEFAULT_TIMEFORMAT)) self.delim = (kw.get('delim', self._DEFAULT_DELIM)) if 'pcap' in kw: self.pcapwriter = PCAPWriter(kw['pcap']) else: self.pcapwriter = None # this is up to the output plugin to process # by default stuffs extra fields and data into 'extra' field # if _FILTER_EXTRA is true self.extra = kw.get('extra', False) # create the default session writer if 'session' in kw: self.sessionwriter = SessionWriter(**kw) else: self.sessionwriter = None # write a message to the log def log(self, msg, level=logging.INFO, *args, **kw): '''write a message to the log passes all args and kwargs thru to logging except for level= is used to set logging level''' self.logger.log(level, msg, *args, **kw) def setformat(self, formatstr=None, typemap=None): '''parse a format string and extract the field info if no string given, reverts to default for class will set self.fields to be a list of (name,type,spec) tuples self.fieldnames to a list of fieldnames and self.fieldmap to a list of key=in value=out mappings format string can also map in field to out field with %(in:out)spectype or specify an explicit out type with %(in:out)specintype:outtype (note this breaks compatibility with text formatting, but useful for db or other output modules) a typemap of [intype]=outtype (or [in]=(newintype,outtype) can be used to map and replace types ''' if formatstr: self.format = formatstr + "\n" else: self.format = self._DEFAULT_FORMAT + "\n" self.fields = [] # will be a (name,type,length) tuple self.fieldnames = [] self.fieldmap = {} # get all the field names e = 0 while True: # find the next format spec of %(...) s = self.format.find('%', e) + 1 if s < 1 or self.format[s] != '(': break # not %(... e = self.format.find(')', s) if e < 0: break # didn't find a closing paren # get text between parens as field name fname = self.format[s + 1:e] # len/precision specs will be 0-9 between ) and type char fspec = '' for i in xrange(e + 1, len(self.format)): if self.format[i] in '1234567890.+-# lLh': fspec += self.format[i] else: break # this char is not a spec char, it is the type char ftype = self.format[i] i += 1 # is the field type a intype:outtype def? if i < len(self.format) and self.format[i] == ':': e = self.format.find(' ', i) # find the end whitespace # split on: to get input:output mapping ftype, outtype = self.format[i - 1:e].split(':') else: outtype = None # output will be same as input type e = i # start at next char on loop try: # field name to column mapping fname, fmap = fname.split(':') except: fmap = fname # no mapping if typemap and ftype in typemap and not outtype: try: (ftype, outtype) = typemap[ftype] except: outtype = typemap[ftype] # append the field name,type,spec,mapping self.fields.append((fname, ftype, fspec)) self.fieldnames.append(fname) if outtype: self.fieldmap[fname] = (fmap, outtype) # map of in to out,type def parse(self, *args, **kw): '''parse the input args/kwargs into a record dict according to format string - timestamps are formatted to date/time strings - fields not in the input will be defined but blank - extra fields in the record will be formatted into a "name=value name2=value2..." string and put in 'extra' - args will go into 'data' - format keyword can contain a new format string to use (this also sets format for future output) ''' # convert timestamps to proper format for ts in [k for k in kw if k == 'ts' or k.endswith('time')]: dt = ts[:-4] + 'datetime' # ts->datetime , Xtime -> Xdatetime kw[dt] = datetime.datetime.fromtimestamp( float(kw[ts])).strftime(self.timeformat) # format properly if kw.get('direction') is 'cs': kw['dir_arrow'] = '->' elif kw.get('direction') is 'sc': kw['dir_arrow'] = '<-' else: kw['dir_arrow'] = '--' if 'format' in kw: self.setformat(kw['format']) # change the format string? del kw['format'] # create the record initialized to the _NULL value rec = dict((f, self._NULL) for f in self.fieldnames) # populate record from datadict if datadict key is a field if self._FILTER_EXTRA: rec.update( dict((f, kw[f]) for f in self.fieldnames if (f in kw and kw[f] != None))) # place extra datadict keys into the extra field (and exclude the # addr tuple) if self.extra: rec['extra'] = self.delim.join(['%s=%s' % (f, kw[f]) for f in sorted( kw.keys()) if f not in self.fieldnames and f != 'addr']) else: # not filtering extra, just lump them in as fields rec.update(kw) # populate the data field if args: rec['data'] = self.delim.join(map(str, args)) return rec def dump(self, pkt=None, **kw): # pass packets to pcap '''dump raw packet data to an output override this if you want a format other than pcap''' pktdata = str(pkt) # might be string, might be a dpkt object pktlen = kw.get('len', len(pktdata)) if self.pcapwriter: self.pcapwriter.write(pktlen, pktdata, kw['ts']) else: self.log(util.hexPlusAscii(str(pkt)), level=logging.DEBUG) # close the PCAP output def close(self): if self.pcapwriter: self.pcapwriter.close() def dispatch(self, m, *args, **kwargs): '''dispatch from Q pop''' if m == 'write': self.write(*args, **kwargs) if m == 'alert': self.alert(*args, **kwargs) if m == 'dump': self.dump(*args, **kwargs) class FileOutput(Output): def __init__(self, *args, **kw): '''configuration for fileoutput: fh=<existing open file handle> file=filename to write to mode=mode to open file as, default 'w' ''' # do base init first Output.__init__(self, *args, **kw) # get the output filehandle or file f = None if 'fh' in kw: self.fh = kw['fh'] return elif 'file' in kw: f = kw['file'] elif args: f = args[0] if f: if 'mode' in kw: mode = kw['mode'] else: mode = 'w' if mode == 'noclobber': mode = 'w' try: while os.stat(f): p = f.split('-') try: p, n = p[:-1], int(p[-1]) except ValueError: n = 0 f = '-'.join(p + ['%04d' % (int(n) + 1)]) except OSError: pass # file not found self.fh = open(f, mode) else: self.fh = sys.stdout def write(self, obj, **kw): '''write session data to the session output or stdout''' if self.sessionwriter: self.sessionwriter.write(obj, **kw) elif self.fh: self.fh.write(str(obj)) def close(self): '''close output if not stdout''' if self.fh != sys.stdout: self.fh.close() Output.close(self) class TextOutput(FileOutput): '''formatted text output to file or stdout''' _DEFAULT_FORMAT = "%(decoder)s %(datetime)s %(sip)16s:%(sport)-5s %(dir_arrow)s %(dip)16s:%(dport)-5s ** %(data)s **" _NULL = '' _FILTER_EXTRA = True def __init__(self, *args, **kw): if 'extra' in kw: self._DEFAULT_FORMAT += " [ %(extra)s ]" FileOutput.__init__(self, *args, **kw) def alert(self, *args, **kw): '''write an alert record we pass in the decoder object and args/dict''' rec = self.parse(*args, **kw) if rec: self.fh.write(self.format % rec) class DBOutput(Output): '''format strings as used by the DBOutput module to create tables and map fields these follow the usual %(name)type and in most cases a custom format string will work defualt type maps are: s,r = VARCHAR (if field len given) /TEXT (if no len) c = CHAR(1) x,X,o = VARCHAR d,i,u = INTEGER e,E,f,F,g,G = DECIMAL with the following extra: (using these breaks text format string compatibility) b = boolean t = timestamp D = datetime T = this field selects table (following are postgres-only) A = inet H = host N = cidr M = macaddr format string can also map field to column with %(field:column)type or specify an explicit column type with %(field:column)pytype:DBTYPE (note this also breaks compatibility with text format strings) ''' _DEFAULT_FORMAT = "%(decoder)T %(ts:timestamp)t %(sip)s %(sport)s %(dip)s %(dport)s %(data:alert)s" _NULL = None # format type to (type,coltype) map _TYPEMAP = {'s': 'VARCHAR', 'r': 'VARCHAR', 'c': 'CHAR(1)', 'x': 'VARCHAR', 'X': 'VARCHAR', 'o': 'VARCHAR', 'd': 'INTEGER', 'i': 'INTEGER', 'u': 'INTEGER', 'e': 'DECIMAL', 'E': 'DECIMAL', 'f': 'DECIMAL', 'F': 'DECIMAL', 'g': 'DECIMAL', 'G': 'DECIMAL', # 'b' isn't a python type, so (ftype,DBTYPE) tuple for value formats input as ftype 'b': ('d', 'BOOLEAN'), # not standard across database types! 't': ('f', 'TIMESTAMP'), 'D': ('s', 'DATETIME'), 'A': ('s', 'INET'), 'H': ('s', 'HOST'), 'N': ('s', 'CIDR'), 'M': ('s', 'MACADDR')} # these are postgres specific # acceptable params to pass to db module connect method _DBCONNPARAMS = ['host', 'user', 'passwd', 'password', 'db', 'database', 'port', 'charset'] # map of db type to insert placeholder. '%s' is the default, but sqlite3 doesn't like it # you can override this with the 'placeholder' config keyword _DBTYPE_PLACEHOLDER_MAP = {'sqlite3': '?'} def __init__(self, *args, **kw): '''configuration: config=db config .ini file name to parse config keywords: dbtype=database type, selects DB API module to load in conf file use [dbtype] section name instead host,user,passwd,password,db,database,port will be passed to db module if present table=db table to use if not specified by a field insert_param=character to use as parameter placeholder for INSERT (sqlite3=?, default=%%s) format_types=types to format before insert (default=x) ('s' to pad strings, 'x' to convert to hex, 'f' to format floats, 'fx' for hex and floats...) ''' self.dbconfig = kw.copy() # if we were passed a config.ini file, parse it and add the k/v pairs # to the config if 'config' in self.dbconfig: import ConfigParser config = ConfigParser.ConfigParser() config.read(self.dbconfig['config']) sections = config.sections() if len(sections) > 0: self.dbconfig['dbtype'] = sections[0] for k, v in config.items(sections[0], raw=True): self.dbconfig[k] = v # import the db module self.db = __import__(self.dbconfig['dbtype']) # create a connection, using a dict filtered to db conn params self.dbconn = self.db.connect( *args, **dict((k, self.dbconfig[k]) for k in self._DBCONNPARAMS if k in self.dbconfig)) # do the base init last to catch the format string, etc.. (as it may # have come from the config file) Output.__init__(self, *args, **self.dbconfig) def createtable(self, table=None): '''creates a table based on the format string''' if not table and 'table' in self.dbconfig: table = self.dbconfig['table'] try: cursor = self.dbconn.cursor() sqlfields = [] for fname, ftype, fspec in [f for f in self.fields if f[1] != 'T']: ctype = self.fieldmap[fname][1] # if no width spec, use TEXT instead of VARCHAR and hope the db # likes it if ctype == 'VARCHAR' and not fspec: ctype = 'TEXT' fdef = self.fieldmap[fname][0] + ' ' + ctype if fspec: # try to conver python format spec to something SQL will # take fdef += '(' + \ fspec.strip('+-# lLh').replace('.', ',') + ')' sqlfields.append(fdef) sql = 'CREATE TABLE "' + table + '" (' + ','.join(sqlfields) + ')' self.log(sql, logging.DEBUG) return cursor.execute(sql) except: raise def close(self): '''closes database connection''' self.dbconn.close() Output.close(self) def alert(self, *args, **kw): '''write an output record we pass in the decoder object and args/dict''' rec = self.parse(self, *args, **kw) if rec: self.insert(rec) def setformat(self, formatstr=None): '''calls main setformat and then builds the insert SQL''' # what is the insert param?? some databases use %s, some use ? # try to map it or take the placeholder keyword from config ph = self.dbconfig.get('insert_param', self._DBTYPE_PLACEHOLDER_MAP.get( self.dbconfig['dbtype'], '%%s') ) # these are the types we need to format before passing to the db self.format_types = self.dbconfig.get('format_types', 'x') Output.setformat(self, formatstr, typemap=self._TYPEMAP) # build all fields we map (except for [T]able select) self.tablefield = 'decoder' # default to decodername for fname, ftype, fspec in self.fields: if ftype == 'T': self.tablefield = fname sqlfields = [self.fieldmap[fname][0] for (fname, ftype, fspec) in self.fields if fname in self.fieldmap] self.insertsql = 'INSERT INTO "%%s" (%s) VALUES (%s)' % ( ','.join(sqlfields), ','.join([ph] * len(sqlfields))) def insert(self, rec, table=None): ''' inserts rec dict using self.format into table (if given, else default or specified by field) if insert fails, tries to create table and insert again before raising exception ''' if not table: if 'table' in self.dbconfig: table = self.dbconfig['table'] elif rec[self.tablefield]: table = rec[self.tablefield] try: sqlvalues = [] cursor = self.dbconn.cursor() for fname, ftype, fspec in self.fields: if fname in self.fieldmap: # do we preformat this data? if ftype in self.format_types: sqlvalues.append(('%' + fspec + ftype) % rec[fname]) else: sqlvalues.append(rec[fname]) # create a INSERT INTO table (fields) VALUES (?,?,?) for execute sql = self.insertsql % table self.log(sql + ' %s' % sqlvalues, logging.DEBUG) except: raise # try once, if it fails, try to create table and retry # throws on second failure or create table failure fail = False while True: try: cursor.execute(sql, sqlvalues) self.dbconn.commit() break # success except Exception, e: self.log(e, level=logging.WARNING) if fail: raise else: fail = True try: self.createtable(table) except: raise class PCAPWriter(FileOutput): '''writes a pcap file''' def __init__(self, *args, **kw): FileOutput.__init__(self, *args, **kw) if self.fh: self.fh.write( struct.pack('IHHIIII', 0xa1b2c3d4, 2, 4, 0, 0, 65535, 1)) # overrides Output.write to write session as PCAP # data flow is Output.dump->pcapwriter.write def write(self, pktlen, pktdata, ts): if self.fh: self.fh.write( struct.pack('II', int(ts), int((ts - int(ts)) * 1000000))) # captured length, original length self.fh.write(struct.pack('II', len(pktdata), pktlen)) self.fh.write(pktdata) class SessionWriter(Output): '''writes the session to one or more files''' def __init__(self, session=None, **kw): self.file = kw.get('session', session) self.dir = kw.get('direction', 'both') self.mode = kw.get('mode', 'a') self.timeformat = (kw.get('timeformat', self._DEFAULT_TIMEFORMAT)) self.fieldnames = [] def write(self, obj, **kwargs): out = None kw = dict(**kwargs) # if a session object with info() and data() methods (conn or blob, but # not packet) try: kw.update(**obj.info()) # get object info kw = self.parse(**kw) if self.dir == 'both': ds = [None] elif self.dir == 'split': ds = ['cs', 'sc'] else: ds = [self.dir] for d in ds: kw.update(direction=d if d else 'both') # set direction # format filename and open out = FileOutput(self.file % kw, mode=self.mode) # write obj data for direction out.fh.write(obj.data(direction=d)) out.close() except: # if not a session object # build filename from kw out = FileOutput(self.file % kw, mode=self.mode) out.fh.write(str(obj)) out.close() class QueueOutput(Output): '''pipes pickled packets to parent process''' def __init__(self, q, **kwargs): self.queue = q Output.__init__(self, **kwargs) def write(self, *args, **kw): self.dispatch('write', *args, **kw) def alert(self, *args, **kw): self.dispatch('alert', *args, **kw) def dump(self, *args, **kw): self.dispatch('dump', *args, **kw) def dispatch(self, m, *args, **kw): # takes (method,...) to Q self.queue.put((m, args, kw)) def close(self): self.queue.close() Output.close(self) # default output module obj = TextOutput
lib/output/output.py
21,310
true if you want to remove extra fields from the parsed record setup the logger parse the format string this is up to the output plugin to process by default stuffs extra fields and data into 'extra' field if _FILTER_EXTRA is true create the default session writer write a message to the log will be a (name,type,length) tuple get all the field names find the next format spec of %(...) not %(... didn't find a closing paren get text between parens as field name len/precision specs will be 0-9 between ) and type char this char is not a spec char, it is the type char is the field type a intype:outtype def? find the end whitespace split on: to get input:output mapping output will be same as input type start at next char on loop field name to column mapping no mapping append the field name,type,spec,mapping map of in to out,type convert timestamps to proper format ts->datetime , Xtime -> Xdatetime format properly change the format string? create the record initialized to the _NULL value populate record from datadict if datadict key is a field place extra datadict keys into the extra field (and exclude the addr tuple) not filtering extra, just lump them in as fields populate the data field pass packets to pcap might be string, might be a dpkt object close the PCAP output do base init first get the output filehandle or file file not found format type to (type,coltype) map 'b' isn't a python type, so (ftype,DBTYPE) tuple for value formats input as ftype not standard across database types! these are postgres specific acceptable params to pass to db module connect method map of db type to insert placeholder. '%s' is the default, but sqlite3 doesn't like it you can override this with the 'placeholder' config keyword if we were passed a config.ini file, parse it and add the k/v pairs to the config import the db module create a connection, using a dict filtered to db conn params do the base init last to catch the format string, etc.. (as it may have come from the config file) if no width spec, use TEXT instead of VARCHAR and hope the db likes it try to conver python format spec to something SQL will take what is the insert param?? some databases use %s, some use ? try to map it or take the placeholder keyword from config these are the types we need to format before passing to the db build all fields we map (except for [T]able select) default to decodername do we preformat this data? create a INSERT INTO table (fields) VALUES (?,?,?) for execute try once, if it fails, try to create table and retry throws on second failure or create table failure success overrides Output.write to write session as PCAP data flow is Output.dump->pcapwriter.write captured length, original length if a session object with info() and data() methods (conn or blob, but not packet) get object info set direction format filename and open write obj data for direction if not a session object build filename from kw takes (method,...) to Q default output module
2,967
en
0.761941
"""Auto-generated file, do not edit by hand. MQ metadata""" from ..phonemetadata import NumberFormat, PhoneNumberDesc, PhoneMetadata PHONE_METADATA_MQ = PhoneMetadata(id='MQ', country_code=596, international_prefix='00', general_desc=PhoneNumberDesc(national_number_pattern='[56]\\d{8}', possible_number_pattern='\\d{9}'), fixed_line=PhoneNumberDesc(national_number_pattern='596(?:0[2-5]|[12]0|3[05-9]|4[024-8]|[5-7]\\d|89|9[4-8])\\d{4}', possible_number_pattern='\\d{9}', example_number='596301234'), mobile=PhoneNumberDesc(national_number_pattern='696(?:[0-479]\\d|5[01]|8[0-689])\\d{4}', possible_number_pattern='\\d{9}', example_number='696201234'), toll_free=PhoneNumberDesc(national_number_pattern='NA', possible_number_pattern='NA'), premium_rate=PhoneNumberDesc(national_number_pattern='NA', possible_number_pattern='NA'), shared_cost=PhoneNumberDesc(national_number_pattern='NA', possible_number_pattern='NA'), personal_number=PhoneNumberDesc(national_number_pattern='NA', possible_number_pattern='NA'), voip=PhoneNumberDesc(national_number_pattern='NA', possible_number_pattern='NA'), pager=PhoneNumberDesc(national_number_pattern='NA', possible_number_pattern='NA'), uan=PhoneNumberDesc(national_number_pattern='NA', possible_number_pattern='NA'), voicemail=PhoneNumberDesc(national_number_pattern='NA', possible_number_pattern='NA'), no_international_dialling=PhoneNumberDesc(national_number_pattern='NA', possible_number_pattern='NA'), national_prefix='0', national_prefix_for_parsing='0', number_format=[NumberFormat(pattern='(\\d{3})(\\d{2})(\\d{2})(\\d{2})', format=u'\\1 \\2 \\3 \\4', national_prefix_formatting_rule=u'0\\1')])
python/phonenumbers/data/region_MQ.py
1,706
Auto-generated file, do not edit by hand. MQ metadata
53
en
0.786707
# coding=utf-8 # *** WARNING: this file was generated by the Pulumi SDK Generator. *** # *** Do not edit by hand unless you're certain you know what you are doing! *** import warnings import pulumi import pulumi.runtime from typing import Any, Mapping, Optional, Sequence, Union, overload from ... import _utilities from . import outputs from ._inputs import * __all__ = ['VpnSiteArgs', 'VpnSite'] @pulumi.input_type class VpnSiteArgs: def __init__(__self__, *, resource_group_name: pulumi.Input[str], address_space: Optional[pulumi.Input['AddressSpaceArgs']] = None, bgp_properties: Optional[pulumi.Input['BgpSettingsArgs']] = None, device_properties: Optional[pulumi.Input['DevicePropertiesArgs']] = None, id: Optional[pulumi.Input[str]] = None, ip_address: Optional[pulumi.Input[str]] = None, is_security_site: Optional[pulumi.Input[bool]] = None, location: Optional[pulumi.Input[str]] = None, site_key: Optional[pulumi.Input[str]] = None, tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None, virtual_wan: Optional[pulumi.Input['SubResourceArgs']] = None, vpn_site_links: Optional[pulumi.Input[Sequence[pulumi.Input['VpnSiteLinkArgs']]]] = None, vpn_site_name: Optional[pulumi.Input[str]] = None): """ The set of arguments for constructing a VpnSite resource. :param pulumi.Input[str] resource_group_name: The resource group name of the VpnSite. :param pulumi.Input['AddressSpaceArgs'] address_space: The AddressSpace that contains an array of IP address ranges. :param pulumi.Input['BgpSettingsArgs'] bgp_properties: The set of bgp properties. :param pulumi.Input['DevicePropertiesArgs'] device_properties: The device properties. :param pulumi.Input[str] id: Resource ID. :param pulumi.Input[str] ip_address: The ip-address for the vpn-site. :param pulumi.Input[bool] is_security_site: IsSecuritySite flag. :param pulumi.Input[str] location: Resource location. :param pulumi.Input[str] site_key: The key for vpn-site that can be used for connections. :param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Resource tags. :param pulumi.Input['SubResourceArgs'] virtual_wan: The VirtualWAN to which the vpnSite belongs. :param pulumi.Input[Sequence[pulumi.Input['VpnSiteLinkArgs']]] vpn_site_links: List of all vpn site links. :param pulumi.Input[str] vpn_site_name: The name of the VpnSite being created or updated. """ pulumi.set(__self__, "resource_group_name", resource_group_name) if address_space is not None: pulumi.set(__self__, "address_space", address_space) if bgp_properties is not None: pulumi.set(__self__, "bgp_properties", bgp_properties) if device_properties is not None: pulumi.set(__self__, "device_properties", device_properties) if id is not None: pulumi.set(__self__, "id", id) if ip_address is not None: pulumi.set(__self__, "ip_address", ip_address) if is_security_site is not None: pulumi.set(__self__, "is_security_site", is_security_site) if location is not None: pulumi.set(__self__, "location", location) if site_key is not None: pulumi.set(__self__, "site_key", site_key) if tags is not None: pulumi.set(__self__, "tags", tags) if virtual_wan is not None: pulumi.set(__self__, "virtual_wan", virtual_wan) if vpn_site_links is not None: pulumi.set(__self__, "vpn_site_links", vpn_site_links) if vpn_site_name is not None: pulumi.set(__self__, "vpn_site_name", vpn_site_name) @property @pulumi.getter(name="resourceGroupName") def resource_group_name(self) -> pulumi.Input[str]: """ The resource group name of the VpnSite. """ return pulumi.get(self, "resource_group_name") @resource_group_name.setter def resource_group_name(self, value: pulumi.Input[str]): pulumi.set(self, "resource_group_name", value) @property @pulumi.getter(name="addressSpace") def address_space(self) -> Optional[pulumi.Input['AddressSpaceArgs']]: """ The AddressSpace that contains an array of IP address ranges. """ return pulumi.get(self, "address_space") @address_space.setter def address_space(self, value: Optional[pulumi.Input['AddressSpaceArgs']]): pulumi.set(self, "address_space", value) @property @pulumi.getter(name="bgpProperties") def bgp_properties(self) -> Optional[pulumi.Input['BgpSettingsArgs']]: """ The set of bgp properties. """ return pulumi.get(self, "bgp_properties") @bgp_properties.setter def bgp_properties(self, value: Optional[pulumi.Input['BgpSettingsArgs']]): pulumi.set(self, "bgp_properties", value) @property @pulumi.getter(name="deviceProperties") def device_properties(self) -> Optional[pulumi.Input['DevicePropertiesArgs']]: """ The device properties. """ return pulumi.get(self, "device_properties") @device_properties.setter def device_properties(self, value: Optional[pulumi.Input['DevicePropertiesArgs']]): pulumi.set(self, "device_properties", value) @property @pulumi.getter def id(self) -> Optional[pulumi.Input[str]]: """ Resource ID. """ return pulumi.get(self, "id") @id.setter def id(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "id", value) @property @pulumi.getter(name="ipAddress") def ip_address(self) -> Optional[pulumi.Input[str]]: """ The ip-address for the vpn-site. """ return pulumi.get(self, "ip_address") @ip_address.setter def ip_address(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "ip_address", value) @property @pulumi.getter(name="isSecuritySite") def is_security_site(self) -> Optional[pulumi.Input[bool]]: """ IsSecuritySite flag. """ return pulumi.get(self, "is_security_site") @is_security_site.setter def is_security_site(self, value: Optional[pulumi.Input[bool]]): pulumi.set(self, "is_security_site", value) @property @pulumi.getter def location(self) -> Optional[pulumi.Input[str]]: """ Resource location. """ return pulumi.get(self, "location") @location.setter def location(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "location", value) @property @pulumi.getter(name="siteKey") def site_key(self) -> Optional[pulumi.Input[str]]: """ The key for vpn-site that can be used for connections. """ return pulumi.get(self, "site_key") @site_key.setter def site_key(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "site_key", value) @property @pulumi.getter def tags(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]: """ Resource tags. """ return pulumi.get(self, "tags") @tags.setter def tags(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]): pulumi.set(self, "tags", value) @property @pulumi.getter(name="virtualWan") def virtual_wan(self) -> Optional[pulumi.Input['SubResourceArgs']]: """ The VirtualWAN to which the vpnSite belongs. """ return pulumi.get(self, "virtual_wan") @virtual_wan.setter def virtual_wan(self, value: Optional[pulumi.Input['SubResourceArgs']]): pulumi.set(self, "virtual_wan", value) @property @pulumi.getter(name="vpnSiteLinks") def vpn_site_links(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['VpnSiteLinkArgs']]]]: """ List of all vpn site links. """ return pulumi.get(self, "vpn_site_links") @vpn_site_links.setter def vpn_site_links(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['VpnSiteLinkArgs']]]]): pulumi.set(self, "vpn_site_links", value) @property @pulumi.getter(name="vpnSiteName") def vpn_site_name(self) -> Optional[pulumi.Input[str]]: """ The name of the VpnSite being created or updated. """ return pulumi.get(self, "vpn_site_name") @vpn_site_name.setter def vpn_site_name(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "vpn_site_name", value) class VpnSite(pulumi.CustomResource): @overload def __init__(__self__, resource_name: str, opts: Optional[pulumi.ResourceOptions] = None, address_space: Optional[pulumi.Input[pulumi.InputType['AddressSpaceArgs']]] = None, bgp_properties: Optional[pulumi.Input[pulumi.InputType['BgpSettingsArgs']]] = None, device_properties: Optional[pulumi.Input[pulumi.InputType['DevicePropertiesArgs']]] = None, id: Optional[pulumi.Input[str]] = None, ip_address: Optional[pulumi.Input[str]] = None, is_security_site: Optional[pulumi.Input[bool]] = None, location: Optional[pulumi.Input[str]] = None, resource_group_name: Optional[pulumi.Input[str]] = None, site_key: Optional[pulumi.Input[str]] = None, tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None, virtual_wan: Optional[pulumi.Input[pulumi.InputType['SubResourceArgs']]] = None, vpn_site_links: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['VpnSiteLinkArgs']]]]] = None, vpn_site_name: Optional[pulumi.Input[str]] = None, __props__=None): """ VpnSite Resource. :param str resource_name: The name of the resource. :param pulumi.ResourceOptions opts: Options for the resource. :param pulumi.Input[pulumi.InputType['AddressSpaceArgs']] address_space: The AddressSpace that contains an array of IP address ranges. :param pulumi.Input[pulumi.InputType['BgpSettingsArgs']] bgp_properties: The set of bgp properties. :param pulumi.Input[pulumi.InputType['DevicePropertiesArgs']] device_properties: The device properties. :param pulumi.Input[str] id: Resource ID. :param pulumi.Input[str] ip_address: The ip-address for the vpn-site. :param pulumi.Input[bool] is_security_site: IsSecuritySite flag. :param pulumi.Input[str] location: Resource location. :param pulumi.Input[str] resource_group_name: The resource group name of the VpnSite. :param pulumi.Input[str] site_key: The key for vpn-site that can be used for connections. :param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Resource tags. :param pulumi.Input[pulumi.InputType['SubResourceArgs']] virtual_wan: The VirtualWAN to which the vpnSite belongs. :param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['VpnSiteLinkArgs']]]] vpn_site_links: List of all vpn site links. :param pulumi.Input[str] vpn_site_name: The name of the VpnSite being created or updated. """ ... @overload def __init__(__self__, resource_name: str, args: VpnSiteArgs, opts: Optional[pulumi.ResourceOptions] = None): """ VpnSite Resource. :param str resource_name: The name of the resource. :param VpnSiteArgs args: The arguments to use to populate this resource's properties. :param pulumi.ResourceOptions opts: Options for the resource. """ ... def __init__(__self__, resource_name: str, *args, **kwargs): resource_args, opts = _utilities.get_resource_args_opts(VpnSiteArgs, pulumi.ResourceOptions, *args, **kwargs) if resource_args is not None: __self__._internal_init(resource_name, opts, **resource_args.__dict__) else: __self__._internal_init(resource_name, *args, **kwargs) def _internal_init(__self__, resource_name: str, opts: Optional[pulumi.ResourceOptions] = None, address_space: Optional[pulumi.Input[pulumi.InputType['AddressSpaceArgs']]] = None, bgp_properties: Optional[pulumi.Input[pulumi.InputType['BgpSettingsArgs']]] = None, device_properties: Optional[pulumi.Input[pulumi.InputType['DevicePropertiesArgs']]] = None, id: Optional[pulumi.Input[str]] = None, ip_address: Optional[pulumi.Input[str]] = None, is_security_site: Optional[pulumi.Input[bool]] = None, location: Optional[pulumi.Input[str]] = None, resource_group_name: Optional[pulumi.Input[str]] = None, site_key: Optional[pulumi.Input[str]] = None, tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None, virtual_wan: Optional[pulumi.Input[pulumi.InputType['SubResourceArgs']]] = None, vpn_site_links: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['VpnSiteLinkArgs']]]]] = None, vpn_site_name: Optional[pulumi.Input[str]] = None, __props__=None): if opts is None: opts = pulumi.ResourceOptions() if not isinstance(opts, pulumi.ResourceOptions): raise TypeError('Expected resource options to be a ResourceOptions instance') if opts.version is None: opts.version = _utilities.get_version() if opts.id is None: if __props__ is not None: raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource') __props__ = VpnSiteArgs.__new__(VpnSiteArgs) __props__.__dict__["address_space"] = address_space __props__.__dict__["bgp_properties"] = bgp_properties __props__.__dict__["device_properties"] = device_properties __props__.__dict__["id"] = id __props__.__dict__["ip_address"] = ip_address __props__.__dict__["is_security_site"] = is_security_site __props__.__dict__["location"] = location if resource_group_name is None and not opts.urn: raise TypeError("Missing required property 'resource_group_name'") __props__.__dict__["resource_group_name"] = resource_group_name __props__.__dict__["site_key"] = site_key __props__.__dict__["tags"] = tags __props__.__dict__["virtual_wan"] = virtual_wan __props__.__dict__["vpn_site_links"] = vpn_site_links __props__.__dict__["vpn_site_name"] = vpn_site_name __props__.__dict__["etag"] = None __props__.__dict__["name"] = None __props__.__dict__["provisioning_state"] = None __props__.__dict__["type"] = None alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-nextgen:network/v20200301:VpnSite"), pulumi.Alias(type_="azure-native:network:VpnSite"), pulumi.Alias(type_="azure-nextgen:network:VpnSite"), pulumi.Alias(type_="azure-native:network/v20180401:VpnSite"), pulumi.Alias(type_="azure-nextgen:network/v20180401:VpnSite"), pulumi.Alias(type_="azure-native:network/v20180601:VpnSite"), pulumi.Alias(type_="azure-nextgen:network/v20180601:VpnSite"), pulumi.Alias(type_="azure-native:network/v20180701:VpnSite"), pulumi.Alias(type_="azure-nextgen:network/v20180701:VpnSite"), pulumi.Alias(type_="azure-native:network/v20180801:VpnSite"), pulumi.Alias(type_="azure-nextgen:network/v20180801:VpnSite"), pulumi.Alias(type_="azure-native:network/v20181001:VpnSite"), pulumi.Alias(type_="azure-nextgen:network/v20181001:VpnSite"), pulumi.Alias(type_="azure-native:network/v20181101:VpnSite"), pulumi.Alias(type_="azure-nextgen:network/v20181101:VpnSite"), pulumi.Alias(type_="azure-native:network/v20181201:VpnSite"), pulumi.Alias(type_="azure-nextgen:network/v20181201:VpnSite"), pulumi.Alias(type_="azure-native:network/v20190201:VpnSite"), pulumi.Alias(type_="azure-nextgen:network/v20190201:VpnSite"), pulumi.Alias(type_="azure-native:network/v20190401:VpnSite"), pulumi.Alias(type_="azure-nextgen:network/v20190401:VpnSite"), pulumi.Alias(type_="azure-native:network/v20190601:VpnSite"), pulumi.Alias(type_="azure-nextgen:network/v20190601:VpnSite"), pulumi.Alias(type_="azure-native:network/v20190701:VpnSite"), pulumi.Alias(type_="azure-nextgen:network/v20190701:VpnSite"), pulumi.Alias(type_="azure-native:network/v20190801:VpnSite"), pulumi.Alias(type_="azure-nextgen:network/v20190801:VpnSite"), pulumi.Alias(type_="azure-native:network/v20190901:VpnSite"), pulumi.Alias(type_="azure-nextgen:network/v20190901:VpnSite"), pulumi.Alias(type_="azure-native:network/v20191101:VpnSite"), pulumi.Alias(type_="azure-nextgen:network/v20191101:VpnSite"), pulumi.Alias(type_="azure-native:network/v20191201:VpnSite"), pulumi.Alias(type_="azure-nextgen:network/v20191201:VpnSite"), pulumi.Alias(type_="azure-native:network/v20200401:VpnSite"), pulumi.Alias(type_="azure-nextgen:network/v20200401:VpnSite"), pulumi.Alias(type_="azure-native:network/v20200501:VpnSite"), pulumi.Alias(type_="azure-nextgen:network/v20200501:VpnSite"), pulumi.Alias(type_="azure-native:network/v20200601:VpnSite"), pulumi.Alias(type_="azure-nextgen:network/v20200601:VpnSite"), pulumi.Alias(type_="azure-native:network/v20200701:VpnSite"), pulumi.Alias(type_="azure-nextgen:network/v20200701:VpnSite"), pulumi.Alias(type_="azure-native:network/v20200801:VpnSite"), pulumi.Alias(type_="azure-nextgen:network/v20200801:VpnSite"), pulumi.Alias(type_="azure-native:network/v20201101:VpnSite"), pulumi.Alias(type_="azure-nextgen:network/v20201101:VpnSite")]) opts = pulumi.ResourceOptions.merge(opts, alias_opts) super(VpnSite, __self__).__init__( 'azure-native:network/v20200301:VpnSite', resource_name, __props__, opts) @staticmethod def get(resource_name: str, id: pulumi.Input[str], opts: Optional[pulumi.ResourceOptions] = None) -> 'VpnSite': """ Get an existing VpnSite resource's state with the given name, id, and optional extra properties used to qualify the lookup. :param str resource_name: The unique name of the resulting resource. :param pulumi.Input[str] id: The unique provider ID of the resource to lookup. :param pulumi.ResourceOptions opts: Options for the resource. """ opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id)) __props__ = VpnSiteArgs.__new__(VpnSiteArgs) __props__.__dict__["address_space"] = None __props__.__dict__["bgp_properties"] = None __props__.__dict__["device_properties"] = None __props__.__dict__["etag"] = None __props__.__dict__["ip_address"] = None __props__.__dict__["is_security_site"] = None __props__.__dict__["location"] = None __props__.__dict__["name"] = None __props__.__dict__["provisioning_state"] = None __props__.__dict__["site_key"] = None __props__.__dict__["tags"] = None __props__.__dict__["type"] = None __props__.__dict__["virtual_wan"] = None __props__.__dict__["vpn_site_links"] = None return VpnSite(resource_name, opts=opts, __props__=__props__) @property @pulumi.getter(name="addressSpace") def address_space(self) -> pulumi.Output[Optional['outputs.AddressSpaceResponse']]: """ The AddressSpace that contains an array of IP address ranges. """ return pulumi.get(self, "address_space") @property @pulumi.getter(name="bgpProperties") def bgp_properties(self) -> pulumi.Output[Optional['outputs.BgpSettingsResponse']]: """ The set of bgp properties. """ return pulumi.get(self, "bgp_properties") @property @pulumi.getter(name="deviceProperties") def device_properties(self) -> pulumi.Output[Optional['outputs.DevicePropertiesResponse']]: """ The device properties. """ return pulumi.get(self, "device_properties") @property @pulumi.getter def etag(self) -> pulumi.Output[str]: """ A unique read-only string that changes whenever the resource is updated. """ return pulumi.get(self, "etag") @property @pulumi.getter(name="ipAddress") def ip_address(self) -> pulumi.Output[Optional[str]]: """ The ip-address for the vpn-site. """ return pulumi.get(self, "ip_address") @property @pulumi.getter(name="isSecuritySite") def is_security_site(self) -> pulumi.Output[Optional[bool]]: """ IsSecuritySite flag. """ return pulumi.get(self, "is_security_site") @property @pulumi.getter def location(self) -> pulumi.Output[str]: """ Resource location. """ return pulumi.get(self, "location") @property @pulumi.getter def name(self) -> pulumi.Output[str]: """ Resource name. """ return pulumi.get(self, "name") @property @pulumi.getter(name="provisioningState") def provisioning_state(self) -> pulumi.Output[str]: """ The provisioning state of the VPN site resource. """ return pulumi.get(self, "provisioning_state") @property @pulumi.getter(name="siteKey") def site_key(self) -> pulumi.Output[Optional[str]]: """ The key for vpn-site that can be used for connections. """ return pulumi.get(self, "site_key") @property @pulumi.getter def tags(self) -> pulumi.Output[Optional[Mapping[str, str]]]: """ Resource tags. """ return pulumi.get(self, "tags") @property @pulumi.getter def type(self) -> pulumi.Output[str]: """ Resource type. """ return pulumi.get(self, "type") @property @pulumi.getter(name="virtualWan") def virtual_wan(self) -> pulumi.Output[Optional['outputs.SubResourceResponse']]: """ The VirtualWAN to which the vpnSite belongs. """ return pulumi.get(self, "virtual_wan") @property @pulumi.getter(name="vpnSiteLinks") def vpn_site_links(self) -> pulumi.Output[Optional[Sequence['outputs.VpnSiteLinkResponse']]]: """ List of all vpn site links. """ return pulumi.get(self, "vpn_site_links")
sdk/python/pulumi_azure_native/network/v20200301/vpn_site.py
23,051
The set of arguments for constructing a VpnSite resource. :param pulumi.Input[str] resource_group_name: The resource group name of the VpnSite. :param pulumi.Input['AddressSpaceArgs'] address_space: The AddressSpace that contains an array of IP address ranges. :param pulumi.Input['BgpSettingsArgs'] bgp_properties: The set of bgp properties. :param pulumi.Input['DevicePropertiesArgs'] device_properties: The device properties. :param pulumi.Input[str] id: Resource ID. :param pulumi.Input[str] ip_address: The ip-address for the vpn-site. :param pulumi.Input[bool] is_security_site: IsSecuritySite flag. :param pulumi.Input[str] location: Resource location. :param pulumi.Input[str] site_key: The key for vpn-site that can be used for connections. :param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Resource tags. :param pulumi.Input['SubResourceArgs'] virtual_wan: The VirtualWAN to which the vpnSite belongs. :param pulumi.Input[Sequence[pulumi.Input['VpnSiteLinkArgs']]] vpn_site_links: List of all vpn site links. :param pulumi.Input[str] vpn_site_name: The name of the VpnSite being created or updated. VpnSite Resource. :param str resource_name: The name of the resource. :param pulumi.ResourceOptions opts: Options for the resource. :param pulumi.Input[pulumi.InputType['AddressSpaceArgs']] address_space: The AddressSpace that contains an array of IP address ranges. :param pulumi.Input[pulumi.InputType['BgpSettingsArgs']] bgp_properties: The set of bgp properties. :param pulumi.Input[pulumi.InputType['DevicePropertiesArgs']] device_properties: The device properties. :param pulumi.Input[str] id: Resource ID. :param pulumi.Input[str] ip_address: The ip-address for the vpn-site. :param pulumi.Input[bool] is_security_site: IsSecuritySite flag. :param pulumi.Input[str] location: Resource location. :param pulumi.Input[str] resource_group_name: The resource group name of the VpnSite. :param pulumi.Input[str] site_key: The key for vpn-site that can be used for connections. :param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Resource tags. :param pulumi.Input[pulumi.InputType['SubResourceArgs']] virtual_wan: The VirtualWAN to which the vpnSite belongs. :param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['VpnSiteLinkArgs']]]] vpn_site_links: List of all vpn site links. :param pulumi.Input[str] vpn_site_name: The name of the VpnSite being created or updated. VpnSite Resource. :param str resource_name: The name of the resource. :param VpnSiteArgs args: The arguments to use to populate this resource's properties. :param pulumi.ResourceOptions opts: Options for the resource. The AddressSpace that contains an array of IP address ranges. The AddressSpace that contains an array of IP address ranges. The set of bgp properties. The set of bgp properties. The device properties. The device properties. A unique read-only string that changes whenever the resource is updated. Get an existing VpnSite resource's state with the given name, id, and optional extra properties used to qualify the lookup. :param str resource_name: The unique name of the resulting resource. :param pulumi.Input[str] id: The unique provider ID of the resource to lookup. :param pulumi.ResourceOptions opts: Options for the resource. Resource ID. The ip-address for the vpn-site. The ip-address for the vpn-site. IsSecuritySite flag. IsSecuritySite flag. Resource location. Resource location. Resource name. The provisioning state of the VPN site resource. The resource group name of the VpnSite. The key for vpn-site that can be used for connections. The key for vpn-site that can be used for connections. Resource tags. Resource tags. Resource type. The VirtualWAN to which the vpnSite belongs. The VirtualWAN to which the vpnSite belongs. List of all vpn site links. List of all vpn site links. The name of the VpnSite being created or updated. coding=utf-8 *** WARNING: this file was generated by the Pulumi SDK Generator. *** *** Do not edit by hand unless you're certain you know what you are doing! ***
4,029
en
0.59594
# -*- coding: utf-8 -*- """ Copyright [2009-2018] EMBL-European Bioinformatics Institute Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ from pathlib import Path import click from rnacentral_pipeline.rnacentral import attempted, r2dt @click.group("r2dt") def cli(): """ A group of commands for parsing data from secondary structures into an importable format. """ pass @cli.command("process-svgs") @click.option("--allow-missing", is_flag=True, default=False) @click.argument("model_info", type=click.File("r")) @click.argument("directory", type=click.Path()) @click.argument("output", type=click.File("w")) def process_svgs(model_info, directory, output, allow_missing=False): """ Process all SVG secondary structures in the given directory and produce a single data file that can be imported into the database. """ r2dt.write(model_info, directory, output, allow_missing=allow_missing) @cli.group("should-show") def should_show(): """ Some commands relating to building a model for should show as well as running it. """ @should_show.command("convert-sheet") @click.argument("filename", type=click.File("r")) @click.argument("output", type=click.File("w")) def convert_sheet(filename, output): """ This command is to convert a downloaded google sheet csv into a csv that can be used for training data. Often we will build a spreadsheet of example URS and then use that to build a training set. It is nice since you can embedd an SVG in google sheets so it is fast for us to compare several of them. In order to move that back into the training data you can download that sheet as a CSV and then run this command on it to build the CSV that is used in training. It requires there be a 'urs' and 'Labeled Should show' column to build the CSV. The values in labeled should show must be true/false (ignoring case). """ r2dt.write_converted_sheet(filename, output) @should_show.command("fetch-data") @click.option("--db-url", envvar="PGDATABASE") @click.argument("filename", type=click.File("r")) @click.argument("output", type=click.File("w")) def fetch_training_data(filename, output, db_url=None): """ This builds a CSV file of training data to use for the model building. I keep it separate so I can build a training csv and play with it interactivly before committing the final modeling building logic to the pipeline. """ r2dt.write_training_data(filename, db_url, output) @should_show.command("inspect-data") @click.option("--db-url", envvar="PGDATABASE") @click.argument("filename", type=click.File("r")) @click.argument("output", type=click.File("w")) def fetch_inspect_data(filename, output, db_url=None): """ This is the command to use when trying to fetch more examples to add to the training set. This will fetch some information that is useful for a person to evaluate a diagram and decide if it should be true/false in the training set. """ r2dt.write_training_data(filename, db_url, output) @should_show.command("build-model") @click.option("--db-url", envvar="PGDATABASE") @click.argument("training-info", type=click.File("r")) @click.argument("model", type=click.Path()) def build_model(training_info, model, db_url=None): """ This builds a model given then training information. The training information should be a csv file of: URS,flag The flag must be 1 or 0 to indicate if the URS should be shown or not. THis will fetch the data like the fetch-data command but will then build a model and write it out the the output file directly. """ r2dt.build_model(training_info, db_url, Path(model)) @should_show.command("compute") @click.option("--db-url", envvar="PGDATABASE") @click.argument("model", type=click.Path()) @click.argument("filename", type=click.File("r")) @click.argument("output", type=click.File("w")) def write_should_show(model, filename, output, db_url=None): """ This computes the should show values for the data in the given file and a file listing urs ids to use. The data needed for the URS will be fetched from the database. This is meant to operate on large batches, like relabeling the entire database. """ r2dt.write_should_show(model, filename, db_url, output) @cli.group("model-info") def model_info(): """ Commands for parsing and generating data files we can import into the database as model info files. """ pass @model_info.command("crw") @click.argument("filename", type=click.File("r")) @click.argument("output", default="-", type=click.File("w")) def crw_model_info(filename, output): """ Parse the CRW metadata file and produce """ r2dt.write_crw(filename, output) @model_info.command("ribovision") @click.argument("filename", type=click.File("r")) @click.argument("output", default="-", type=click.File("w")) def ribovision_model_info(filename, output): """ Parse the metadata.tsv file from R2DT for Ribovision models to produce something we can put in our database. """ r2dt.write_ribovision(filename, output) @model_info.command("gtrnadb") @click.argument("filename", type=click.File("r")) @click.argument("output", default="-", type=click.File("w")) def gtrnadb_model_info(filename, output): """ Parse the metadata.tsv file from R2DT for gtrnadb models to produce something we can put in our database. """ r2dt.write_gtrnadb(filename, output) @model_info.command("rnase-p") @click.argument("filename", type=click.File("r")) @click.argument("output", default="-", type=click.File("w")) def rnase_p_model_info(filename, output): """ Parse the metadata.tsv file from R2DT for Ribovision models to produce something we can put in our database. """ r2dt.write_rnase_p(filename, output) @cli.command("create-attempted") @click.argument("filename", type=click.File("r")) @click.argument("output", default="-", type=click.File("w")) def r2dt_create_attempted(filename, output): attempted.r2dt(filename, output) @cli.command("publish") @click.option("--suffix", default="") @click.option("--allow-missing", is_flag=True, default=False) @click.argument("model_info", type=click.File("r")) @click.argument( "directory", type=click.Path( writable=False, dir_okay=True, file_okay=False, ), ) @click.argument( "output", type=click.Path( writable=True, dir_okay=True, file_okay=False, ), ) def r2dt_publish(model_info, directory, output, allow_missing, suffix=""): r2dt.publish( model_info, directory, output, allow_missing=allow_missing, suffix=suffix ) @cli.command("prepare-s3") @click.option("--allow-missing", is_flag=True, default=False) @click.argument("model_info", type=click.File("r")) @click.argument( "directory", type=click.Path( writable=False, dir_okay=True, file_okay=False, ), ) @click.argument( "output", type=click.Path( writable=True, dir_okay=True, file_okay=False, ), ) @click.argument("file_list", type=click.Path()) def r2dt_prepare_s3(model_info, directory, output, file_list, allow_missing): file_list = Path(file_list) output = Path(output) r2dt.prepare_s3( model_info, directory, output, file_list, allow_missing=allow_missing )
rnacentral_pipeline/cli/r2dt.py
7,915
This builds a model given then training information. The training information should be a csv file of: URS,flag The flag must be 1 or 0 to indicate if the URS should be shown or not. THis will fetch the data like the fetch-data command but will then build a model and write it out the the output file directly. A group of commands for parsing data from secondary structures into an importable format. This command is to convert a downloaded google sheet csv into a csv that can be used for training data. Often we will build a spreadsheet of example URS and then use that to build a training set. It is nice since you can embedd an SVG in google sheets so it is fast for us to compare several of them. In order to move that back into the training data you can download that sheet as a CSV and then run this command on it to build the CSV that is used in training. It requires there be a 'urs' and 'Labeled Should show' column to build the CSV. The values in labeled should show must be true/false (ignoring case). Parse the CRW metadata file and produce This is the command to use when trying to fetch more examples to add to the training set. This will fetch some information that is useful for a person to evaluate a diagram and decide if it should be true/false in the training set. This builds a CSV file of training data to use for the model building. I keep it separate so I can build a training csv and play with it interactivly before committing the final modeling building logic to the pipeline. Parse the metadata.tsv file from R2DT for gtrnadb models to produce something we can put in our database. Commands for parsing and generating data files we can import into the database as model info files. Process all SVG secondary structures in the given directory and produce a single data file that can be imported into the database. Parse the metadata.tsv file from R2DT for Ribovision models to produce something we can put in our database. Parse the metadata.tsv file from R2DT for Ribovision models to produce something we can put in our database. Some commands relating to building a model for should show as well as running it. This computes the should show values for the data in the given file and a file listing urs ids to use. The data needed for the URS will be fetched from the database. This is meant to operate on large batches, like relabeling the entire database. Copyright [2009-2018] EMBL-European Bioinformatics Institute Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. -*- coding: utf-8 -*-
2,995
en
0.895942
import os import pyudev import psutil import logging import time from arm.ripper import music_brainz from arm.ui import db from arm.config.config import cfg from flask_login import LoginManager, current_user, login_user, UserMixin # noqa: F401 from prettytable import PrettyTable hidden_attribs = ("OMDB_API_KEY", "EMBY_USERID", "EMBY_PASSWORD", "EMBY_API_KEY", "PB_KEY", "IFTTT_KEY", "PO_KEY", "PO_USER_KEY", "PO_APP_KEY", "ARM_API_KEY", "TMDB_API_KEY") HIDDEN_VALUE = "<hidden>" class Job(db.Model): job_id = db.Column(db.Integer, primary_key=True) arm_version = db.Column(db.String(20)) crc_id = db.Column(db.String(63)) logfile = db.Column(db.String(256)) start_time = db.Column(db.DateTime) stop_time = db.Column(db.DateTime) job_length = db.Column(db.String(12)) status = db.Column(db.String(32)) stage = db.Column(db.String(63)) no_of_titles = db.Column(db.Integer) title = db.Column(db.String(256)) title_auto = db.Column(db.String(256)) title_manual = db.Column(db.String(256)) year = db.Column(db.String(4)) year_auto = db.Column(db.String(4)) year_manual = db.Column(db.String(4)) video_type = db.Column(db.String(20)) video_type_auto = db.Column(db.String(20)) video_type_manual = db.Column(db.String(20)) imdb_id = db.Column(db.String(15)) imdb_id_auto = db.Column(db.String(15)) imdb_id_manual = db.Column(db.String(15)) poster_url = db.Column(db.String(256)) poster_url_auto = db.Column(db.String(256)) poster_url_manual = db.Column(db.String(256)) devpath = db.Column(db.String(15)) mountpoint = db.Column(db.String(20)) hasnicetitle = db.Column(db.Boolean) errors = db.Column(db.Text) disctype = db.Column(db.String(20)) # dvd/bluray/data/music/unknown label = db.Column(db.String(256)) path = db.Column(db.String(256)) ejected = db.Column(db.Boolean) updated = db.Column(db.Boolean) pid = db.Column(db.Integer) pid_hash = db.Column(db.Integer) tracks = db.relationship('Track', backref='job', lazy='dynamic') config = db.relationship('Config', uselist=False, backref="job") def __init__(self, devpath): """Return a disc object""" self.devpath = devpath self.mountpoint = "/mnt" + devpath self.hasnicetitle = False self.video_type = "unknown" self.ejected = False self.updated = False if cfg['VIDEOTYPE'] != "auto": self.video_type = cfg['VIDEOTYPE'] self.parse_udev() self.get_pid() def parse_udev(self): """Parse udev for properties of current disc""" context = pyudev.Context() device = pyudev.Devices.from_device_file(context, self.devpath) self.disctype = "unknown" for key, value in device.items(): if key == "ID_FS_LABEL": self.label = value if value == "iso9660": self.disctype = "data" elif key == "ID_CDROM_MEDIA_BD": self.disctype = "bluray" elif key == "ID_CDROM_MEDIA_DVD": self.disctype = "dvd" elif key == "ID_CDROM_MEDIA_TRACK_COUNT_AUDIO": self.disctype = "music" else: pass def get_pid(self): pid = os.getpid() p = psutil.Process(pid) self.pid = pid self.pid_hash = hash(p) def get_disc_type(self, found_hvdvd_ts): if self.disctype == "music": logging.debug("Disc is music.") self.label = music_brainz.main(self) elif os.path.isdir(self.mountpoint + "/VIDEO_TS"): logging.debug(f"Found: {self.mountpoint}/VIDEO_TS") self.disctype = "dvd" elif os.path.isdir(self.mountpoint + "/video_ts"): logging.debug(f"Found: {self.mountpoint}/video_ts") self.disctype = "dvd" elif os.path.isdir(self.mountpoint + "/BDMV"): logging.debug(f"Found: {self.mountpoint}/BDMV") self.disctype = "bluray" elif os.path.isdir(self.mountpoint + "/HVDVD_TS"): logging.debug(f"Found: {self.mountpoint}/HVDVD_TS") # do something here elif found_hvdvd_ts: logging.debug("Found file: HVDVD_TS") # do something here too else: logging.debug("Did not find valid dvd/bd files. Changing disctype to 'data'") self.disctype = "data" def identify_audio_cd(self): """ Get the title for audio cds to use for the logfile name. Needs the job class passed into it so it can be forwarded to mb return - only the logfile - setup_logging() adds the full path """ # Use the music label if we can find it - defaults to music_cd.log disc_id = music_brainz.get_disc_id(self) mb_title = music_brainz.get_title(disc_id, self) if mb_title == "not identified": self.label = self.title = "not identified" logfile = "music_cd.log" new_log_file = f"music_cd_{round(time.time() * 100)}.log" else: logfile = f"{mb_title}.log" new_log_file = f"{mb_title}_{round(time.time() * 100)}.log" temp_log_full = os.path.join(cfg['LOGPATH'], logfile) logfile = new_log_file if os.path.isfile(temp_log_full) else logfile return logfile def __str__(self): """Returns a string of the object""" s = self.__class__.__name__ + ": " for attr, value in self.__dict__.items(): s = s + "(" + str(attr) + "=" + str(value) + ") " return s def pretty_table(self): """Returns a string of the prettytable""" x = PrettyTable() x.field_names = ["Config", "Value"] x._max_width = {"Config": 50, "Value": 60} for attr, value in self.__dict__.items(): if attr == "config": x.add_row([str(attr), str(value.pretty_table())]) else: x.add_row([str(attr), str(value)]) return str(x.get_string()) def get_d(self): r = {} for key, value in self.__dict__.items(): if '_sa_instance_state' not in key: r[str(key)] = str(value) return r def __repr__(self): return '<Job {}>'.format(self.label) def eject(self): """Eject disc if it hasn't previously been ejected""" if not self.ejected: self.ejected = True try: if os.system("umount " + self.devpath): logging.debug("we unmounted disc" + self.devpath) if os.system("eject " + self.devpath): logging.debug("we ejected disc" + self.devpath) self.ejected = True else: logging.debug("failed to eject" + self.devpath) except Exception as e: logging.debug(self.devpath + " couldn't be ejected " + str(e)) class Track(db.Model): track_id = db.Column(db.Integer, primary_key=True) job_id = db.Column(db.Integer, db.ForeignKey('job.job_id')) track_number = db.Column(db.String(4)) length = db.Column(db.Integer) aspect_ratio = db.Column(db.String(20)) fps = db.Column(db.Float) main_feature = db.Column(db.Boolean) basename = db.Column(db.String(256)) filename = db.Column(db.String(256)) orig_filename = db.Column(db.String(256)) new_filename = db.Column(db.String(256)) ripped = db.Column(db.Boolean) status = db.Column(db.String(32)) error = db.Column(db.Text) source = db.Column(db.String(32)) def __init__(self, job_id, track_number, length, aspect_ratio, fps, main_feature, source, basename, filename): """Return a track object""" self.job_id = job_id self.track_number = track_number self.length = length self.aspect_ratio = aspect_ratio self.fps = fps self.main_feature = main_feature self.source = source self.basename = basename self.filename = filename self.ripped = False def __repr__(self): return '<Post {}>'.format(self.track_number) class Config(db.Model): CONFIG_ID = db.Column(db.Integer, primary_key=True) job_id = db.Column(db.Integer, db.ForeignKey('job.job_id')) ARM_CHECK_UDF = db.Column(db.Boolean) GET_VIDEO_TITLE = db.Column(db.Boolean) SKIP_TRANSCODE = db.Column(db.Boolean) VIDEOTYPE = db.Column(db.String(25)) MINLENGTH = db.Column(db.String(6)) MAXLENGTH = db.Column(db.String(6)) MANUAL_WAIT = db.Column(db.Boolean) MANUAL_WAIT_TIME = db.Column(db.Integer) RAW_PATH = db.Column(db.String(255)) TRANSCODE_PATH = db.Column(db.String(255)) COMPLETED_PATH = db.Column(db.String(255)) EXTRAS_SUB = db.Column(db.String(255)) INSTALLPATH = db.Column(db.String(255)) LOGPATH = db.Column(db.String(255)) LOGLEVEL = db.Column(db.String(255)) LOGLIFE = db.Column(db.Integer) DBFILE = db.Column(db.String(255)) WEBSERVER_IP = db.Column(db.String(25)) WEBSERVER_PORT = db.Column(db.Integer) SET_MEDIA_PERMISSIONS = db.Column(db.Boolean) CHMOD_VALUE = db.Column(db.Integer) SET_MEDIA_OWNER = db.Column(db.Boolean) CHOWN_USER = db.Column(db.String(50)) CHOWN_GROUP = db.Column(db.String(50)) RIPMETHOD = db.Column(db.String(25)) MKV_ARGS = db.Column(db.String(25)) DELRAWFILES = db.Column(db.Boolean) HASHEDKEYS = db.Column(db.Boolean) HB_PRESET_DVD = db.Column(db.String(256)) HB_PRESET_BD = db.Column(db.String(256)) DEST_EXT = db.Column(db.String(10)) HANDBRAKE_CLI = db.Column(db.String(25)) MAINFEATURE = db.Column(db.Boolean) HB_ARGS_DVD = db.Column(db.String(256)) HB_ARGS_BD = db.Column(db.String(256)) EMBY_REFRESH = db.Column(db.Boolean) EMBY_SERVER = db.Column(db.String(25)) EMBY_PORT = db.Column(db.String(6)) EMBY_CLIENT = db.Column(db.String(25)) EMBY_DEVICE = db.Column(db.String(50)) EMBY_DEVICEID = db.Column(db.String(128)) EMBY_USERNAME = db.Column(db.String(50)) EMBY_USERID = db.Column(db.String(128)) EMBY_PASSWORD = db.Column(db.String(128)) EMBY_API_KEY = db.Column(db.String(64)) NOTIFY_RIP = db.Column(db.Boolean) NOTIFY_TRANSCODE = db.Column(db.Boolean) PB_KEY = db.Column(db.String(64)) IFTTT_KEY = db.Column(db.String(64)) IFTTT_EVENT = db.Column(db.String(25)) PO_USER_KEY = db.Column(db.String(64)) PO_APP_KEY = db.Column(db.String(64)) OMDB_API_KEY = db.Column(db.String(64)) def __init__(self, c, job_id): self.__dict__.update(c) self.job_id = job_id def list_params(self): """Returns a string of the object""" s = self.__class__.__name__ + ": " for attr, value in self.__dict__.items(): if s: s = s + "\n" if str(attr) in hidden_attribs and value: value = HIDDEN_VALUE s = s + str(attr) + ":" + str(value) return s def __str__(self): """Returns a string of the object""" s = self.__class__.__name__ + ": " for attr, value in self.__dict__.items(): if str(attr) in hidden_attribs and value: value = HIDDEN_VALUE s = s + "(" + str(attr) + "=" + str(value) + ") " return s def pretty_table(self): """Returns a string of the prettytable""" x = PrettyTable() x.field_names = ["Config", "Value"] x._max_width = {"Config": 20, "Value": 30} for attr, value in self.__dict__.items(): if str(attr) in hidden_attribs and value: value = HIDDEN_VALUE x.add_row([str(attr), str(value)]) return str(x.get_string()) def get_d(self): r = {} for key, value in self.__dict__.items(): if str(key) not in hidden_attribs: r[str(key)] = str(value) return r class User(db.Model, UserMixin): user_id = db.Column(db.Integer, index=True, primary_key=True) email = db.Column(db.String(64)) password = db.Column(db.String(128)) hash = db.Column(db.String(256)) def __init__(self, email=None, password=None, hashed=None): self.email = email self.password = password self.hash = hashed def __repr__(self): return '<User %r>' % (self.email) def get_id(self): return self.user_id class AlembicVersion(db.Model): version_num = db.Column(db.String(36), autoincrement=False, primary_key=True) def __init__(self, version=None): self.version_num = version class UISettings(db.Model): id = db.Column(db.Integer, autoincrement=True, primary_key=True) use_icons = db.Column(db.Boolean) save_remote_images = db.Column(db.Boolean) bootstrap_skin = db.Column(db.String(64)) language = db.Column(db.String(4)) index_refresh = db.Column(db.Integer) database_limit = db.Column(db.Integer) def __init__(self, use_icons=None, save_remote_images=None, bootstrap_skin=None, language=None, index_refresh=None, database_limit=None): self.use_icons = use_icons self.save_remote_images = save_remote_images self.bootstrap_skin = bootstrap_skin self.language = language self.index_refresh = index_refresh self.database_limit = database_limit def __repr__(self): return '<UISettings %r>' % self.id def __str__(self): """Returns a string of the object""" s = self.__class__.__name__ + ": " for attr, value in self.__dict__.items(): s = s + "(" + str(attr) + "=" + str(value) + ") " return s def get_d(self): r = {} for key, value in self.__dict__.items(): if '_sa_instance_state' not in key: r[str(key)] = str(value) return r
arm/models/models.py
14,015
Return a disc object Return a track object Returns a string of the object Returns a string of the object Returns a string of the object Eject disc if it hasn't previously been ejected Get the title for audio cds to use for the logfile name. Needs the job class passed into it so it can be forwarded to mb return - only the logfile - setup_logging() adds the full path Returns a string of the object Parse udev for properties of current disc Returns a string of the prettytable Returns a string of the prettytable noqa: F401 dvd/bluray/data/music/unknown do something here do something here too Use the music label if we can find it - defaults to music_cd.log
662
en
0.628308
# Copyright 2020 XEBIALABS # # Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. # # https://stackoverflow.com/questions/16910955/programmatically-configure-logback-appender?noredirect=1 # import ch.qos.logback.core.Appender as LogAppender import ch.qos.logback.core.util.COWArrayList as COWArrayList import ch.qos.logback.classic.encoder.PatternLayoutEncoder as PatternLayoutEncoder import ch.qos.logback.core.FileAppender as FileAppender import org.slf4j.LoggerFactory as LoggerFactory import ch.qos.logback.classic.Level as logLevels import json def getLogAppenders( loggerName="console" ): loggerMap = [] myLogger = LoggerFactory.getLogger("logmanager") loggerContext = LoggerFactory.getILoggerFactory() myLogger.error("===================") appenderMap = {} for logger in loggerContext.getLoggerList(): appenderList = logger.iteratorForAppenders() while appenderList.hasNext(): appender = appenderList.next() logger.error("Logger %s" % appender.getName()) if appender.getName() not in appenderMap.keys(): loggerMap.append({"name": appender.getName(), "appender": "NA"}) myLogger.error("Appender %s: %s" % (appender.getName(), "NA")) myLogger.error("===================") return loggerMap def createLogAppender( name, file ): lc = LoggerFactory.getILoggerFactory() ple = PatternLayoutEncoder() ple.setPattern("%date %level [%thread] %logger{10} [%file:%line] %msg%n") ple.setContext(lc) ple.start() fileAppender = FileAppender() fileAppender.setFile(file) fileAppender.setEncoder(ple) fileAppender.setContext(lc) fileAppender.start() logger = LoggerFactory.getLogger(string) logger.addAppender(fileAppender) #logger.setLevel(logLevels.DEBUG) # set to true if root should log too logger.setAdditive(True) return logger myLogger = LoggerFactory.getLogger("logmanager") verb = "GET" if (request): if (request.query): if (request.query['verb']): verb = request.query['verb'] if( verb == "create"): string = request.query['string'] file = request.query['file'] myLogger.info("Setting %s to %s" % (string, file)) createLogAppender(string, file) loggerMap = getLogAppenders() myLogger.error("%s" % json.dumps(loggerMap, indent=4, sort_keys=True)) response.entity = {"status": "OK", "data":loggerMap }
src/main/resources/restapi/logger/getLogAppenders.py
3,408
Copyright 2020 XEBIALABS Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. https://stackoverflow.com/questions/16910955/programmatically-configure-logback-appender?noredirect=1logger.setLevel(logLevels.DEBUG) set to true if root should log too
1,214
en
0.837147
# coding: utf-8 """ App Center Client Microsoft Visual Studio App Center API # noqa: E501 OpenAPI spec version: preview Contact: [email protected] Project Repository: https://github.com/b3nab/appcenter-sdks """ from __future__ import absolute_import import unittest import appcenter_sdk from DistributionGroupAppsDeleteRequest.clsDistributionGroupAppsDeleteRequest import DistributionGroupAppsDeleteRequest # noqa: E501 from appcenter_sdk.rest import ApiException class TestDistributionGroupAppsDeleteRequest(unittest.TestCase): """DistributionGroupAppsDeleteRequest unit test stubs""" def setUp(self): pass def tearDown(self): pass def testDistributionGroupAppsDeleteRequest(self): """Test DistributionGroupAppsDeleteRequest""" # FIXME: construct object with mandatory attributes with example values # model = appcenter_sdk.models.clsDistributionGroupAppsDeleteRequest.DistributionGroupAppsDeleteRequest() # noqa: E501 pass if __name__ == '__main__': unittest.main()
sdks/python/test/test_DistributionGroupAppsDeleteRequest.py
1,084
DistributionGroupAppsDeleteRequest unit test stubs Test DistributionGroupAppsDeleteRequest App Center Client Microsoft Visual Studio App Center API # noqa: E501 OpenAPI spec version: preview Contact: [email protected] Project Repository: https://github.com/b3nab/appcenter-sdks coding: utf-8 noqa: E501 FIXME: construct object with mandatory attributes with example values model = appcenter_sdk.models.clsDistributionGroupAppsDeleteRequest.DistributionGroupAppsDeleteRequest() noqa: E501
506
en
0.509512
# Copyright 2008-2015 Nokia Solutions and Networks # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from .tags import TagPatterns class Criticality(object): def __init__(self, critical_tags=None, non_critical_tags=None): self.critical_tags = self._get_tag_patterns(critical_tags) self.non_critical_tags = self._get_tag_patterns(non_critical_tags) def _get_tag_patterns(self, tags): return TagPatterns(tags) if not isinstance(tags, TagPatterns) else tags def tag_is_critical(self, tag): return self.critical_tags.match(tag) def tag_is_non_critical(self, tag): return self.non_critical_tags.match(tag) def test_is_critical(self, test): if self.critical_tags and not self.critical_tags.match(test.tags): return False return not self.non_critical_tags.match(test.tags) def __bool__(self): return bool(self.critical_tags or self.non_critical_tags) #PY2 def __nonzero__(self): return self.__bool__()
src/robot/model/criticality.py
1,527
Copyright 2008-2015 Nokia Solutions and Networks Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.PY2
582
en
0.857814