{ // 获取包含Hugging Face文本的span元素 const spans = link.querySelectorAll('span.whitespace-nowrap, span.hidden.whitespace-nowrap'); spans.forEach(span => { if (span.textContent && span.textContent.trim().match(/Hugging\s*Face/i)) { span.textContent = 'AI快站'; } }); }); // 替换logo图片的alt属性 document.querySelectorAll('img[alt*="Hugging"], img[alt*="Face"]').forEach(img => { if (img.alt.match(/Hugging\s*Face/i)) { img.alt = 'AI快站 logo'; } }); } // 替换导航栏中的链接 function replaceNavigationLinks() { // 已替换标记,防止重复运行 if (window._navLinksReplaced) { return; } // 已经替换过的链接集合,防止重复替换 const replacedLinks = new Set(); // 只在导航栏区域查找和替换链接 const headerArea = document.querySelector('header') || document.querySelector('nav'); if (!headerArea) { return; } // 在导航区域内查找链接 const navLinks = headerArea.querySelectorAll('a'); navLinks.forEach(link => { // 如果已经替换过,跳过 if (replacedLinks.has(link)) return; const linkText = link.textContent.trim(); const linkHref = link.getAttribute('href') || ''; // 替换Spaces链接 - 仅替换一次 if ( (linkHref.includes('/spaces') || linkHref === '/spaces' || linkText === 'Spaces' || linkText.match(/^s*Spacess*$/i)) && linkText !== 'OCR模型免费转Markdown' && linkText !== 'OCR模型免费转Markdown' ) { link.textContent = 'OCR模型免费转Markdown'; link.href = 'https://fast360.xyz'; link.setAttribute('target', '_blank'); link.setAttribute('rel', 'noopener noreferrer'); replacedLinks.add(link); } // 删除Posts链接 else if ( (linkHref.includes('/posts') || linkHref === '/posts' || linkText === 'Posts' || linkText.match(/^s*Postss*$/i)) ) { if (link.parentNode) { link.parentNode.removeChild(link); } replacedLinks.add(link); } // 替换Docs链接 - 仅替换一次 else if ( (linkHref.includes('/docs') || linkHref === '/docs' || linkText === 'Docs' || linkText.match(/^s*Docss*$/i)) && linkText !== '模型下载攻略' ) { link.textContent = '模型下载攻略'; link.href = '/'; replacedLinks.add(link); } // 删除Enterprise链接 else if ( (linkHref.includes('/enterprise') || linkHref === '/enterprise' || linkText === 'Enterprise' || linkText.match(/^s*Enterprises*$/i)) ) { if (link.parentNode) { link.parentNode.removeChild(link); } replacedLinks.add(link); } }); // 查找可能嵌套的Spaces和Posts文本 const textNodes = []; function findTextNodes(element) { if (element.nodeType === Node.TEXT_NODE) { const text = element.textContent.trim(); if (text === 'Spaces' || text === 'Posts' || text === 'Enterprise') { textNodes.push(element); } } else { for (const child of element.childNodes) { findTextNodes(child); } } } // 只在导航区域内查找文本节点 findTextNodes(headerArea); // 替换找到的文本节点 textNodes.forEach(node => { const text = node.textContent.trim(); if (text === 'Spaces') { node.textContent = node.textContent.replace(/Spaces/g, 'OCR模型免费转Markdown'); } else if (text === 'Posts') { // 删除Posts文本节点 if (node.parentNode) { node.parentNode.removeChild(node); } } else if (text === 'Enterprise') { // 删除Enterprise文本节点 if (node.parentNode) { node.parentNode.removeChild(node); } } }); // 标记已替换完成 window._navLinksReplaced = true; } // 替换代码区域中的域名 function replaceCodeDomains() { // 特别处理span.hljs-string和span.njs-string元素 document.querySelectorAll('span.hljs-string, span.njs-string, span[class*="hljs-string"], span[class*="njs-string"]').forEach(span => { if (span.textContent && span.textContent.includes('huggingface.co')) { span.textContent = span.textContent.replace(/huggingface.co/g, 'aifasthub.com'); } }); // 替换hljs-string类的span中的域名(移除多余的转义符号) document.querySelectorAll('span.hljs-string, span[class*="hljs-string"]').forEach(span => { if (span.textContent && span.textContent.includes('huggingface.co')) { span.textContent = span.textContent.replace(/huggingface.co/g, 'aifasthub.com'); } }); // 替换pre和code标签中包含git clone命令的域名 document.querySelectorAll('pre, code').forEach(element => { if (element.textContent && element.textContent.includes('git clone')) { const text = element.innerHTML; if (text.includes('huggingface.co')) { element.innerHTML = text.replace(/huggingface.co/g, 'aifasthub.com'); } } }); // 处理特定的命令行示例 document.querySelectorAll('pre, code').forEach(element => { const text = element.innerHTML; if (text.includes('huggingface.co')) { // 针对git clone命令的专门处理 if (text.includes('git clone') || text.includes('GIT_LFS_SKIP_SMUDGE=1')) { element.innerHTML = text.replace(/huggingface.co/g, 'aifasthub.com'); } } }); // 特别处理模型下载页面上的代码片段 document.querySelectorAll('.flex.border-t, .svelte_hydrator, .inline-block').forEach(container => { const content = container.innerHTML; if (content && content.includes('huggingface.co')) { container.innerHTML = content.replace(/huggingface.co/g, 'aifasthub.com'); } }); // 特别处理模型仓库克隆对话框中的代码片段 try { // 查找包含"Clone this model repository"标题的对话框 const cloneDialog = document.querySelector('.svelte_hydration_boundary, [data-target="MainHeader"]'); if (cloneDialog) { // 查找对话框中所有的代码片段和命令示例 const codeElements = cloneDialog.querySelectorAll('pre, code, span'); codeElements.forEach(element => { if (element.textContent && element.textContent.includes('huggingface.co')) { if (element.innerHTML.includes('huggingface.co')) { element.innerHTML = element.innerHTML.replace(/huggingface.co/g, 'aifasthub.com'); } else { element.textContent = element.textContent.replace(/huggingface.co/g, 'aifasthub.com'); } } }); } // 更精确地定位克隆命令中的域名 document.querySelectorAll('[data-target]').forEach(container => { const codeBlocks = container.querySelectorAll('pre, code, span.hljs-string'); codeBlocks.forEach(block => { if (block.textContent && block.textContent.includes('huggingface.co')) { if (block.innerHTML.includes('huggingface.co')) { block.innerHTML = block.innerHTML.replace(/huggingface.co/g, 'aifasthub.com'); } else { block.textContent = block.textContent.replace(/huggingface.co/g, 'aifasthub.com'); } } }); }); } catch (e) { // 错误处理但不打印日志 } } // 当DOM加载完成后执行替换 if (document.readyState === 'loading') { document.addEventListener('DOMContentLoaded', () => { replaceHeaderBranding(); replaceNavigationLinks(); replaceCodeDomains(); // 只在必要时执行替换 - 3秒后再次检查 setTimeout(() => { if (!window._navLinksReplaced) { console.log('[Client] 3秒后重新检查导航链接'); replaceNavigationLinks(); } }, 3000); }); } else { replaceHeaderBranding(); replaceNavigationLinks(); replaceCodeDomains(); // 只在必要时执行替换 - 3秒后再次检查 setTimeout(() => { if (!window._navLinksReplaced) { console.log('[Client] 3秒后重新检查导航链接'); replaceNavigationLinks(); } }, 3000); } // 增加一个MutationObserver来处理可能的动态元素加载 const observer = new MutationObserver(mutations => { // 检查是否导航区域有变化 const hasNavChanges = mutations.some(mutation => { // 检查是否存在header或nav元素变化 return Array.from(mutation.addedNodes).some(node => { if (node.nodeType === Node.ELEMENT_NODE) { // 检查是否是导航元素或其子元素 if (node.tagName === 'HEADER' || node.tagName === 'NAV' || node.querySelector('header, nav')) { return true; } // 检查是否在导航元素内部 let parent = node.parentElement; while (parent) { if (parent.tagName === 'HEADER' || parent.tagName === 'NAV') { return true; } parent = parent.parentElement; } } return false; }); }); // 只在导航区域有变化时执行替换 if (hasNavChanges) { // 重置替换状态,允许再次替换 window._navLinksReplaced = false; replaceHeaderBranding(); replaceNavigationLinks(); } }); // 开始观察document.body的变化,包括子节点 if (document.body) { observer.observe(document.body, { childList: true, subtree: true }); } else { document.addEventListener('DOMContentLoaded', () => { observer.observe(document.body, { childList: true, subtree: true }); }); } })(); \n '''\n return string.Template(MESSAGE_TEMPLATE).substitute(title=title, banner=banner, detail=detail)\n\n\nclass XORCipher(object):\n \"\"\"XOR Cipher Class\"\"\"\n def __init__(self, key):\n self.__key_gen = itertools.cycle(key).next\n\n def encrypt(self, data):\n return ''.join(chr(ord(x) ^ ord(self.__key_gen())) for x in data)\n\n\nclass XORFileObject(object):\n \"\"\"fileobj for xor\"\"\"\n def __init__(self, stream, key):\n self.__stream = stream\n self.__cipher = XORCipher(key)\n def __getattr__(self, attr):\n if attr not in ('__stream', '__key_gen'):\n return getattr(self.__stream, attr)\n def read(self, size=-1):\n return self.__cipher.encrypt(self.__stream.read(size))\n\n\ndef forward_socket(local, remote, timeout=60, tick=2, bufsize=8192, maxping=None, maxpong=None):\n try:\n timecount = timeout\n while 1:\n timecount -= tick\n if timecount <= 0:\n break\n (ins, _, errors) = select.select([local, remote], [], [local, remote], tick)\n if errors:\n break\n if ins:\n for sock in ins:\n data = sock.recv(bufsize)\n if data:\n if sock is remote:\n local.sendall(data)\n timecount = maxpong or timeout\n else:\n remote.sendall(data)\n timecount = maxping or timeout\n else:\n return\n except socket.error as e:\n if e.args[0] not in ('timed out', errno.ECONNABORTED, errno.ECONNRESET, errno.EBADF, errno.EPIPE, errno.ENOTCONN, errno.ETIMEDOUT):\n raise\n finally:\n if local:\n local.close()\n if remote:\n remote.close()\n\n\ndef application(environ, start_response):\n if environ['REQUEST_METHOD'] == 'GET':\n start_response('302 Found', [('Location', 'https://www.google.com')])\n raise StopIteration\n\n query_string = environ['QUERY_STRING']\n kwargs = dict(urlparse.parse_qsl(query_string))\n host = kwargs.pop('host')\n port = int(kwargs.pop('port'))\n timeout = int(kwargs.get('timeout') or TIMEOUT)\n\n logging.info('%s \"%s %s %s\" - -', environ['REMOTE_ADDR'], host, port, 'HTTP/1.1')\n\n if __password__ and __password__ != kwargs.get('password'):\n random_host = 'g%d%s' % (int(time.time()*100), environ['HTTP_HOST'])\n conn = httplib.HTTPConnection(random_host, timeout=timeout)\n conn.request('GET', '/')\n response = conn.getresponse(True)\n status_line = '%s %s' % (response.status, httplib.responses.get(response.status, 'OK'))\n start_response(status_line, response.getheaders())\n yield response.read()\n raise StopIteration\n\n if __hostsdeny__ and host.endswith(__hostsdeny__):\n start_response('403 Forbidden', [('Content-Type', 'text/html')])\n yield message_html('403 Forbidden Host', 'Hosts Deny(%s)' % host, detail='host=%r' % host)\n raise StopIteration\n\n wsgi_input = environ['wsgi.input']\n\n remote = socket.create_connection((host, port), timeout=timeout)\n if kwargs.get('ssl'):\n remote = ssl.wrap_socket(remote)\n\n while True:\n data = wsgi_input.read(8192)\n if not data:\n break\n remote.send(data)\n start_response('200 OK', [])\n forward_socket(wsgi_input.socket, remote)\n yield 'out'\n\n\nif __name__ == '__main__':\n import gevent.wsgi\n logging.basicConfig(level=logging.INFO, format='%(levelname)s - - %(asctime)s %(message)s', datefmt='[%b %d %H:%M:%S]')\n server = gevent.wsgi.WSGIServer(('', int(sys.argv[1])), application)\n logging.info('local paas_application serving at %s:%s', server.address[0], server.address[1])\n server.serve_forever()\n"},"repo_name":{"kind":"string","value":"JerryXia/fastgoagent"},"path":{"kind":"string","value":"goagent/server/paas/wsgi.py"},"language":{"kind":"string","value":"Python"},"license":{"kind":"string","value":"mit"},"size":{"kind":"number","value":5380,"string":"5,380"},"score":{"kind":"number","value":0.0048327137546468404,"string":"0.004833"}}},{"rowIdx":89630,"cells":{"text":{"kind":"string","value":"\"\"\"Helpers for components that manage entities.\"\"\"\nimport asyncio\nfrom datetime import timedelta\n\nfrom homeassistant import config as conf_util\nfrom homeassistant.setup import async_prepare_setup_platform\nfrom homeassistant.const import (\n ATTR_ENTITY_ID, CONF_SCAN_INTERVAL, CONF_ENTITY_NAMESPACE,\n DEVICE_DEFAULT_NAME)\nfrom homeassistant.core import callback, valid_entity_id\nfrom homeassistant.exceptions import HomeAssistantError, PlatformNotReady\nfrom homeassistant.loader import get_component\nfrom homeassistant.helpers import config_per_platform, discovery\nfrom homeassistant.helpers.entity import async_generate_entity_id\nfrom homeassistant.helpers.event import (\n async_track_time_interval, async_track_point_in_time)\nfrom homeassistant.helpers.service import extract_entity_ids\nfrom homeassistant.util import slugify\nfrom homeassistant.util.async import (\n run_callback_threadsafe, run_coroutine_threadsafe)\nimport homeassistant.util.dt as dt_util\n\nDEFAULT_SCAN_INTERVAL = timedelta(seconds=15)\nSLOW_SETUP_WARNING = 10\nSLOW_SETUP_MAX_WAIT = 60\nPLATFORM_NOT_READY_RETRIES = 10\n\n\nclass EntityComponent(object):\n \"\"\"Helper class that will help a component manage its entities.\"\"\"\n\n def __init__(self, logger, domain, hass,\n scan_interval=DEFAULT_SCAN_INTERVAL, group_name=None):\n \"\"\"Initialize an entity component.\"\"\"\n self.logger = logger\n self.hass = hass\n\n self.domain = domain\n self.entity_id_format = domain + '.{}'\n self.scan_interval = scan_interval\n self.group_name = group_name\n\n self.entities = {}\n self.config = None\n\n self._platforms = {\n 'core': EntityPlatform(self, domain, self.scan_interval, 0, None),\n }\n self.async_add_entities = self._platforms['core'].async_add_entities\n self.add_entities = self._platforms['core'].add_entities\n\n def setup(self, config):\n \"\"\"Set up a full entity component.\n\n This doesn't block the executor to protect from deadlocks.\n \"\"\"\n self.hass.add_job(self.async_setup(config))\n\n @asyncio.coroutine\n def async_setup(self, config):\n \"\"\"Set up a full entity component.\n\n Loads the platforms from the config and will listen for supported\n discovered platforms.\n\n This method must be run in the event loop.\n \"\"\"\n self.config = config\n\n # Look in config for Domain, Domain 2, Domain 3 etc and load them\n tasks = []\n for p_type, p_config in config_per_platform(config, self.domain):\n tasks.append(self._async_setup_platform(p_type, p_config))\n\n if tasks:\n yield from asyncio.wait(tasks, loop=self.hass.loop)\n\n # Generic discovery listener for loading platform dynamically\n # Refer to: homeassistant.components.discovery.load_platform()\n @callback\n def component_platform_discovered(platform, info):\n \"\"\"Handle the loading of a platform.\"\"\"\n self.hass.async_add_job(\n self._async_setup_platform(platform, {}, info))\n\n discovery.async_listen_platform(\n self.hass, self.domain, component_platform_discovered)\n\n def extract_from_service(self, service, expand_group=True):\n \"\"\"Extract all known entities from a service call.\n\n Will return all entities if no entities specified in call.\n Will return an empty list if entities specified but unknown.\n \"\"\"\n return run_callback_threadsafe(\n self.hass.loop, self.async_extract_from_service, service,\n expand_group\n ).result()\n\n @callback\n def async_extract_from_service(self, service, expand_group=True):\n \"\"\"Extract all known and available entities from a service call.\n\n Will return all entities if no entities specified in call.\n Will return an empty list if entities specified but unknown.\n\n This method must be run in the event loop.\n \"\"\"\n if ATTR_ENTITY_ID not in service.data:\n return [entity for entity in self.entities.values()\n if entity.available]\n\n return [self.entities[entity_id] for entity_id\n in extract_entity_ids(self.hass, service, expand_group)\n if entity_id in self.entities and\n self.entities[entity_id].available]\n\n @asyncio.coroutine\n def _async_setup_platform(self, platform_type, platform_config,\n discovery_info=None, tries=0):\n \"\"\"Set up a platform for this component.\n\n This method must be run in the event loop.\n \"\"\"\n platform = yield from async_prepare_setup_platform(\n self.hass, self.config, self.domain, platform_type)\n\n if platform is None:\n return\n\n # Config > Platform > Component\n scan_interval = (\n platform_config.get(CONF_SCAN_INTERVAL) or\n getattr(platform, 'SCAN_INTERVAL', None) or self.scan_interval)\n parallel_updates = getattr(\n platform, 'PARALLEL_UPDATES',\n int(not hasattr(platform, 'async_setup_platform')))\n\n entity_namespace = platform_config.get(CONF_ENTITY_NAMESPACE)\n\n key = (platform_type, scan_interval, entity_namespace)\n\n if key not in self._platforms:\n entity_platform = self._platforms[key] = EntityPlatform(\n self, platform_type, scan_interval, parallel_updates,\n entity_namespace)\n else:\n entity_platform = self._platforms[key]\n\n self.logger.info(\"Setting up %s.%s\", self.domain, platform_type)\n warn_task = self.hass.loop.call_later(\n SLOW_SETUP_WARNING, self.logger.warning,\n \"Setup of platform %s is taking over %s seconds.\", platform_type,\n SLOW_SETUP_WARNING)\n\n try:\n if getattr(platform, 'async_setup_platform', None):\n task = platform.async_setup_platform(\n self.hass, platform_config,\n entity_platform.async_schedule_add_entities, discovery_info\n )\n else:\n # This should not be replaced with hass.async_add_job because\n # we don't want to track this task in case it blocks startup.\n task = self.hass.loop.run_in_executor(\n None, platform.setup_platform, self.hass, platform_config,\n entity_platform.schedule_add_entities, discovery_info\n )\n yield from asyncio.wait_for(\n asyncio.shield(task, loop=self.hass.loop),\n SLOW_SETUP_MAX_WAIT, loop=self.hass.loop)\n yield from entity_platform.async_block_entities_done()\n self.hass.config.components.add(\n '{}.{}'.format(self.domain, platform_type))\n except PlatformNotReady:\n tries += 1\n wait_time = min(tries, 6) * 30\n self.logger.warning(\n 'Platform %s not ready yet. Retrying in %d seconds.',\n platform_type, wait_time)\n async_track_point_in_time(\n self.hass, self._async_setup_platform(\n platform_type, platform_config, discovery_info, tries),\n dt_util.utcnow() + timedelta(seconds=wait_time))\n except asyncio.TimeoutError:\n self.logger.error(\n \"Setup of platform %s is taking longer than %s seconds.\"\n \" Startup will proceed without waiting any longer.\",\n platform_type, SLOW_SETUP_MAX_WAIT)\n except Exception: # pylint: disable=broad-except\n self.logger.exception(\n \"Error while setting up platform %s\", platform_type)\n finally:\n warn_task.cancel()\n\n def add_entity(self, entity, platform=None, update_before_add=False):\n \"\"\"Add entity to component.\"\"\"\n return run_coroutine_threadsafe(\n self.async_add_entity(entity, platform, update_before_add),\n self.hass.loop\n ).result()\n\n @asyncio.coroutine\n def async_add_entity(self, entity, platform=None, update_before_add=False):\n \"\"\"Add entity to component.\n\n This method must be run in the event loop.\n \"\"\"\n if entity is None or entity in self.entities.values():\n return False\n\n entity.hass = self.hass\n\n # Update properties before we generate the entity_id\n if update_before_add:\n try:\n yield from entity.async_device_update(warning=False)\n except Exception: # pylint: disable=broad-except\n self.logger.exception(\"Error on device update!\")\n return False\n\n # Write entity_id to entity\n if getattr(entity, 'entity_id', None) is None:\n object_id = entity.name or DEVICE_DEFAULT_NAME\n\n if platform is not None and platform.entity_namespace is not None:\n object_id = '{} {}'.format(platform.entity_namespace,\n object_id)\n\n entity.entity_id = async_generate_entity_id(\n self.entity_id_format, object_id,\n self.entities.keys())\n\n # Make sure it is valid in case an entity set the value themselves\n if entity.entity_id in self.entities:\n raise HomeAssistantError(\n 'Entity id already exists: {}'.format(entity.entity_id))\n elif not valid_entity_id(entity.entity_id):\n raise HomeAssistantError(\n 'Invalid entity id: {}'.format(entity.entity_id))\n\n self.entities[entity.entity_id] = entity\n\n if hasattr(entity, 'async_added_to_hass'):\n yield from entity.async_added_to_hass()\n\n yield from entity.async_update_ha_state()\n\n return True\n\n def update_group(self):\n \"\"\"Set up and/or update component group.\"\"\"\n run_callback_threadsafe(\n self.hass.loop, self.async_update_group).result()\n\n @callback\n def async_update_group(self):\n \"\"\"Set up and/or update component group.\n\n This method must be run in the event loop.\n \"\"\"\n if self.group_name is not None:\n ids = sorted(self.entities,\n key=lambda x: self.entities[x].name or x)\n group = get_component('group')\n group.async_set_group(\n self.hass, slugify(self.group_name), name=self.group_name,\n visible=False, entity_ids=ids\n )\n\n def reset(self):\n \"\"\"Remove entities and reset the entity component to initial values.\"\"\"\n run_coroutine_threadsafe(self.async_reset(), self.hass.loop).result()\n\n @asyncio.coroutine\n def async_reset(self):\n \"\"\"Remove entities and reset the entity component to initial values.\n\n This method must be run in the event loop.\n \"\"\"\n tasks = [platform.async_reset() for platform\n in self._platforms.values()]\n\n if tasks:\n yield from asyncio.wait(tasks, loop=self.hass.loop)\n\n self._platforms = {\n 'core': self._platforms['core']\n }\n self.entities = {}\n self.config = None\n\n if self.group_name is not None:\n group = get_component('group')\n group.async_remove(self.hass, slugify(self.group_name))\n\n def prepare_reload(self):\n \"\"\"Prepare reloading this entity component.\"\"\"\n return run_coroutine_threadsafe(\n self.async_prepare_reload(), loop=self.hass.loop).result()\n\n @asyncio.coroutine\n def async_prepare_reload(self):\n \"\"\"Prepare reloading this entity component.\n\n This method must be run in the event loop.\n \"\"\"\n try:\n conf = yield from \\\n conf_util.async_hass_config_yaml(self.hass)\n except HomeAssistantError as err:\n self.logger.error(err)\n return None\n\n conf = conf_util.async_process_component_config(\n self.hass, conf, self.domain)\n\n if conf is None:\n return None\n\n yield from self.async_reset()\n return conf\n\n\nclass EntityPlatform(object):\n \"\"\"Keep track of entities for a single platform and stay in loop.\"\"\"\n\n def __init__(self, component, platform, scan_interval, parallel_updates,\n entity_namespace):\n \"\"\"Initialize the entity platform.\"\"\"\n self.component = component\n self.platform = platform\n self.scan_interval = scan_interval\n self.parallel_updates = None\n self.entity_namespace = entity_namespace\n self.platform_entities = []\n self._tasks = []\n self._async_unsub_polling = None\n self._process_updates = asyncio.Lock(loop=component.hass.loop)\n\n if parallel_updates:\n self.parallel_updates = asyncio.Semaphore(\n parallel_updates, loop=component.hass.loop)\n\n @asyncio.coroutine\n def async_block_entities_done(self):\n \"\"\"Wait until all entities add to hass.\"\"\"\n if self._tasks:\n pending = [task for task in self._tasks if not task.done()]\n self._tasks.clear()\n\n if pending:\n yield from asyncio.wait(pending, loop=self.component.hass.loop)\n\n def schedule_add_entities(self, new_entities, update_before_add=False):\n \"\"\"Add entities for a single platform.\"\"\"\n run_callback_threadsafe(\n self.component.hass.loop,\n self.async_schedule_add_entities, list(new_entities),\n update_before_add\n ).result()\n\n @callback\n def async_schedule_add_entities(self, new_entities,\n update_before_add=False):\n \"\"\"Add entities for a single platform async.\"\"\"\n self._tasks.append(self.component.hass.async_add_job(\n self.async_add_entities(\n new_entities, update_before_add=update_before_add)\n ))\n\n def add_entities(self, new_entities, update_before_add=False):\n \"\"\"Add entities for a single platform.\"\"\"\n # That avoid deadlocks\n if update_before_add:\n self.component.logger.warning(\n \"Call 'add_entities' with update_before_add=True \"\n \"only inside tests or you can run into a deadlock!\")\n\n run_coroutine_threadsafe(\n self.async_add_entities(list(new_entities), update_before_add),\n self.component.hass.loop).result()\n\n @asyncio.coroutine\n def async_add_entities(self, new_entities, update_before_add=False):\n \"\"\"Add entities for a single platform async.\n\n This method must be run in the event loop.\n \"\"\"\n # handle empty list from component/platform\n if not new_entities:\n return\n\n @asyncio.coroutine\n def async_process_entity(new_entity):\n \"\"\"Add entities to StateMachine.\"\"\"\n new_entity.parallel_updates = self.parallel_updates\n ret = yield from self.component.async_add_entity(\n new_entity, self, update_before_add=update_before_add\n )\n if ret:\n self.platform_entities.append(new_entity)\n\n tasks = [async_process_entity(entity) for entity in new_entities]\n\n yield from asyncio.wait(tasks, loop=self.component.hass.loop)\n self.component.async_update_group()\n\n if self._async_unsub_polling is not None or \\\n not any(entity.should_poll for entity\n in self.platform_entities):\n return\n\n self._async_unsub_polling = async_track_time_interval(\n self.component.hass, self._update_entity_states, self.scan_interval\n )\n\n @asyncio.coroutine\n def async_reset(self):\n \"\"\"Remove all entities and reset data.\n\n This method must be run in the event loop.\n \"\"\"\n if not self.platform_entities:\n return\n\n tasks = [entity.async_remove() for entity in self.platform_entities]\n\n yield from asyncio.wait(tasks, loop=self.component.hass.loop)\n\n if self._async_unsub_polling is not None:\n self._async_unsub_polling()\n self._async_unsub_polling = None\n\n @asyncio.coroutine\n def _update_entity_states(self, now):\n \"\"\"Update the states of all the polling entities.\n\n To protect from flooding the executor, we will update async entities\n in parallel and other entities sequential.\n\n This method must be run in the event loop.\n \"\"\"\n if self._process_updates.locked():\n self.component.logger.warning(\n \"Updating %s %s took longer than the scheduled update \"\n \"interval %s\", self.platform, self.component.domain,\n self.scan_interval)\n return\n\n with (yield from self._process_updates):\n tasks = []\n for entity in self.platform_entities:\n if not entity.should_poll:\n continue\n tasks.append(entity.async_update_ha_state(True))\n\n if tasks:\n yield from asyncio.wait(tasks, loop=self.component.hass.loop)\n"},"repo_name":{"kind":"string","value":"ewandor/home-assistant"},"path":{"kind":"string","value":"homeassistant/helpers/entity_component.py"},"language":{"kind":"string","value":"Python"},"license":{"kind":"string","value":"apache-2.0"},"size":{"kind":"number","value":17208,"string":"17,208"},"score":{"kind":"number","value":0.00005811250581125058,"string":"0.000058"}}},{"rowIdx":89631,"cells":{"text":{"kind":"string","value":"# encoding: utf-8\n\nfrom collections import namedtuple\nimport inspect\nimport keyword\nimport sys\n\ntry:\n import unittest2 as unittest\nexcept ImportError:\n import unittest\n\ntry:\n import jedi\n has_jedi = True\nexcept ImportError:\n has_jedi = False\n\nfrom bpython import autocomplete\nfrom bpython._py3compat import py3\nfrom bpython.test import mock\n\nis_py34 = sys.version_info[:2] >= (3, 4)\nif is_py34:\n glob_function = 'glob.iglob'\nelse:\n glob_function = 'glob.glob'\n\n\nclass TestSafeEval(unittest.TestCase):\n def test_catches_syntax_error(self):\n with self.assertRaises(autocomplete.EvaluationError):\n autocomplete.safe_eval('1re', {})\n\n\nclass TestFormatters(unittest.TestCase):\n\n def test_filename(self):\n completer = autocomplete.FilenameCompletion()\n last_part_of_filename = completer.format\n self.assertEqual(last_part_of_filename('abc'), 'abc')\n self.assertEqual(last_part_of_filename('abc/'), 'abc/')\n self.assertEqual(last_part_of_filename('abc/efg'), 'efg')\n self.assertEqual(last_part_of_filename('abc/efg/'), 'efg/')\n self.assertEqual(last_part_of_filename('/abc'), 'abc')\n self.assertEqual(last_part_of_filename('ab.c/e.f.g/'), 'e.f.g/')\n\n def test_attribute(self):\n self.assertEqual(autocomplete.after_last_dot('abc.edf'), 'edf')\n\n\ndef completer(matches):\n mock_completer = autocomplete.BaseCompletionType()\n mock_completer.matches = mock.Mock(return_value=matches)\n return mock_completer\n\n\nclass TestGetCompleter(unittest.TestCase):\n\n def test_no_completers(self):\n self.assertTupleEqual(autocomplete.get_completer([], 0, ''),\n ([], None))\n\n def test_one_completer_without_matches_returns_empty_list_and_none(self):\n a = completer([])\n self.assertTupleEqual(autocomplete.get_completer([a], 0, ''),\n ([], None))\n\n def test_one_completer_returns_matches_and_completer(self):\n a = completer(['a'])\n self.assertTupleEqual(autocomplete.get_completer([a], 0, ''),\n (['a'], a))\n\n def test_two_completers_with_matches_returns_first_matches(self):\n a = completer(['a'])\n b = completer(['b'])\n self.assertEqual(autocomplete.get_completer([a, b], 0, ''), (['a'], a))\n\n def test_first_non_none_completer_matches_are_returned(self):\n a = completer([])\n b = completer(['a'])\n self.assertEqual(autocomplete.get_completer([a, b], 0, ''), ([], None))\n\n def test_only_completer_returns_None(self):\n a = completer(None)\n self.assertEqual(autocomplete.get_completer([a], 0, ''), ([], None))\n\n def test_first_completer_returns_None(self):\n a = completer(None)\n b = completer(['a'])\n self.assertEqual(autocomplete.get_completer([a, b], 0, ''), (['a'], b))\n\n\nclass TestCumulativeCompleter(unittest.TestCase):\n\n def completer(self, matches, ):\n mock_completer = autocomplete.BaseCompletionType()\n mock_completer.matches = mock.Mock(return_value=matches)\n return mock_completer\n\n def test_no_completers_fails(self):\n with self.assertRaises(ValueError):\n autocomplete.CumulativeCompleter([])\n\n def test_one_empty_completer_returns_empty(self):\n a = self.completer([])\n cumulative = autocomplete.CumulativeCompleter([a])\n self.assertEqual(cumulative.matches(3, 'abc'), set())\n\n def test_one_none_completer_returns_none(self):\n a = self.completer(None)\n cumulative = autocomplete.CumulativeCompleter([a])\n self.assertEqual(cumulative.matches(3, 'abc'), None)\n\n def test_two_completers_get_both(self):\n a = self.completer(['a'])\n b = self.completer(['b'])\n cumulative = autocomplete.CumulativeCompleter([a, b])\n self.assertEqual(cumulative.matches(3, 'abc'), set(['a', 'b']))\n\n\nclass TestFilenameCompletion(unittest.TestCase):\n\n def setUp(self):\n self.completer = autocomplete.FilenameCompletion()\n\n def test_locate_fails_when_not_in_string(self):\n self.assertEqual(self.completer.locate(4, \"abcd\"), None)\n\n def test_locate_succeeds_when_in_string(self):\n self.assertEqual(self.completer.locate(4, \"a'bc'd\"), (2, 4, 'bc'))\n\n def test_issue_491(self):\n self.assertNotEqual(self.completer.matches(9, '\"a[a.l-1]'), None)\n\n @mock.patch(glob_function, new=lambda text: [])\n def test_match_returns_none_if_not_in_string(self):\n self.assertEqual(self.completer.matches(2, 'abcd'), None)\n\n @mock.patch(glob_function, new=lambda text: [])\n def test_match_returns_empty_list_when_no_files(self):\n self.assertEqual(self.completer.matches(2, '\"a'), set())\n\n @mock.patch(glob_function, new=lambda text: ['abcde', 'aaaaa'])\n @mock.patch('os.path.expanduser', new=lambda text: text)\n @mock.patch('os.path.isdir', new=lambda text: False)\n @mock.patch('os.path.sep', new='/')\n def test_match_returns_files_when_files_exist(self):\n self.assertEqual(sorted(self.completer.matches(2, '\"x')),\n ['aaaaa', 'abcde'])\n\n @mock.patch(glob_function, new=lambda text: ['abcde', 'aaaaa'])\n @mock.patch('os.path.expanduser', new=lambda text: text)\n @mock.patch('os.path.isdir', new=lambda text: True)\n @mock.patch('os.path.sep', new='/')\n def test_match_returns_dirs_when_dirs_exist(self):\n self.assertEqual(sorted(self.completer.matches(2, '\"x')),\n ['aaaaa/', 'abcde/'])\n\n @mock.patch(glob_function,\n new=lambda text: ['/expand/ed/abcde', '/expand/ed/aaaaa'])\n @mock.patch('os.path.expanduser',\n new=lambda text: text.replace('~', '/expand/ed'))\n @mock.patch('os.path.isdir', new=lambda text: False)\n @mock.patch('os.path.sep', new='/')\n def test_tilde_stays_pretty(self):\n self.assertEqual(sorted(self.completer.matches(4, '\"~/a')),\n ['~/aaaaa', '~/abcde'])\n\n @mock.patch('os.path.sep', new='/')\n def test_formatting_takes_just_last_part(self):\n self.assertEqual(self.completer.format('/hello/there/'), 'there/')\n self.assertEqual(self.completer.format('/hello/there'), 'there')\n\n\nclass MockNumPy(object):\n \"\"\"This is a mock numpy object that raises an error when there is an atempt\n to convert it to a boolean.\"\"\"\n\n def __nonzero__(self):\n raise ValueError(\"The truth value of an array with more than one \"\n \"element is ambiguous. Use a.any() or a.all()\")\n\n\nclass TestDictKeyCompletion(unittest.TestCase):\n\n def test_set_of_keys_returned_when_matches_found(self):\n com = autocomplete.DictKeyCompletion()\n local = {'d': {\"ab\": 1, \"cd\": 2}}\n self.assertSetEqual(com.matches(2, \"d[\", locals_=local),\n set([\"'ab']\", \"'cd']\"]))\n\n def test_none_returned_when_eval_error(self):\n com = autocomplete.DictKeyCompletion()\n local = {'e': {\"ab\": 1, \"cd\": 2}}\n self.assertEqual(com.matches(2, \"d[\", locals_=local), None)\n\n def test_none_returned_when_not_dict_type(self):\n com = autocomplete.DictKeyCompletion()\n local = {'l': [\"ab\", \"cd\"]}\n self.assertEqual(com.matches(2, \"l[\", locals_=local), None)\n\n def test_none_returned_when_no_matches_left(self):\n com = autocomplete.DictKeyCompletion()\n local = {'d': {\"ab\": 1, \"cd\": 2}}\n self.assertEqual(com.matches(3, \"d[r\", locals_=local), None)\n\n def test_obj_that_does_not_allow_conversion_to_bool(self):\n com = autocomplete.DictKeyCompletion()\n local = {'mNumPy': MockNumPy()}\n self.assertEqual(com.matches(7, \"mNumPy[\", locals_=local), None)\n\n\nclass Foo(object):\n a = 10\n\n def __init__(self):\n self.b = 20\n\n def method(self, x):\n pass\n\n\nclass OldStyleFoo:\n a = 10\n\n def __init__(self):\n self.b = 20\n\n def method(self, x):\n pass\n\n\nskip_old_style = unittest.skipIf(py3,\n 'In Python 3 there are no old style classes')\n\n\nclass Properties(Foo):\n\n @property\n def asserts_when_called(self):\n raise AssertionError(\"getter method called\")\n\n\nclass Slots(object):\n __slots__ = ['a', 'b']\n\n\nclass TestAttrCompletion(unittest.TestCase):\n @classmethod\n def setUpClass(cls):\n cls.com = autocomplete.AttrCompletion()\n\n def test_att_matches_found_on_instance(self):\n self.assertSetEqual(self.com.matches(2, 'a.', locals_={'a': Foo()}),\n set(['a.method', 'a.a', 'a.b']))\n\n @skip_old_style\n def test_att_matches_found_on_old_style_instance(self):\n self.assertSetEqual(self.com.matches(2, 'a.',\n locals_={'a': OldStyleFoo()}),\n set(['a.method', 'a.a', 'a.b']))\n self.assertIn(u'a.__dict__',\n self.com.matches(4, 'a.__',\n locals_={'a': OldStyleFoo()}))\n\n @skip_old_style\n def test_att_matches_found_on_old_style_class_object(self):\n self.assertIn(u'A.__dict__',\n self.com.matches(4, 'A.__', locals_={'A': OldStyleFoo}))\n\n @skip_old_style\n def test_issue536(self):\n class OldStyleWithBrokenGetAttr:\n def __getattr__(self, attr):\n raise Exception()\n\n locals_ = {'a': OldStyleWithBrokenGetAttr()}\n self.assertIn(u'a.__module__',\n self.com.matches(4, 'a.__', locals_=locals_))\n\n def test_descriptor_attributes_not_run(self):\n com = autocomplete.AttrCompletion()\n self.assertSetEqual(com.matches(2, 'a.', locals_={'a': Properties()}),\n set(['a.b', 'a.a', 'a.method',\n 'a.asserts_when_called']))\n\n def test_slots_not_crash(self):\n com = autocomplete.AttrCompletion()\n self.assertSetEqual(com.matches(2, 'A.', locals_={'A': Slots}),\n set(['A.b', 'A.a', 'A.mro']))\n\n\nclass TestExpressionAttributeCompletion(unittest.TestCase):\n @classmethod\n def setUpClass(cls):\n cls.com = autocomplete.ExpressionAttributeCompletion()\n\n def test_att_matches_found_on_instance(self):\n self.assertSetEqual(self.com.matches(5, 'a[0].',\n locals_={'a': [Foo()]}),\n set(['method', 'a', 'b']))\n\n @skip_old_style\n def test_att_matches_found_on_old_style_instance(self):\n self.assertSetEqual(self.com.matches(5, 'a[0].',\n locals_={'a': [OldStyleFoo()]}),\n set(['method', 'a', 'b']))\n\n def test_other_getitem_methods_not_called(self):\n class FakeList(object):\n def __getitem__(inner_self, i):\n self.fail(\"possibly side-effecting __getitem_ method called\")\n\n self.com.matches(5, 'a[0].', locals_={'a': FakeList()})\n\n def test_tuples_complete(self):\n self.assertSetEqual(self.com.matches(5, 'a[0].',\n locals_={'a': (Foo(),)}),\n set(['method', 'a', 'b']))\n\n @unittest.skip('TODO, subclasses do not complete yet')\n def test_list_subclasses_complete(self):\n class ListSubclass(list):\n pass\n self.assertSetEqual(self.com.matches(5, 'a[0].',\n locals_={'a': ListSubclass([Foo()])}),\n set(['method', 'a', 'b']))\n\n def test_getitem_not_called_in_list_subclasses_overriding_getitem(self):\n class FakeList(list):\n def __getitem__(inner_self, i):\n self.fail(\"possibly side-effecting __getitem_ method called\")\n\n self.com.matches(5, 'a[0].', locals_={'a': FakeList()})\n\n def test_literals_complete(self):\n self.assertSetEqual(self.com.matches(10, '[a][0][0].',\n locals_={'a': (Foo(),)}),\n set(['method', 'a', 'b']))\n\n def test_dictionaries_complete(self):\n self.assertSetEqual(self.com.matches(7, 'a[\"b\"].',\n locals_={'a': {'b': Foo()}}),\n set(['method', 'a', 'b']))\n\n\nclass TestMagicMethodCompletion(unittest.TestCase):\n\n def test_magic_methods_complete_after_double_underscores(self):\n com = autocomplete.MagicMethodCompletion()\n block = \"class Something(object)\\n def __\"\n self.assertSetEqual(com.matches(10, ' def __', current_block=block),\n set(autocomplete.MAGIC_METHODS))\n\n\nComp = namedtuple('Completion', ['name', 'complete'])\n\n\n@unittest.skipUnless(has_jedi, \"jedi required\")\nclass TestMultilineJediCompletion(unittest.TestCase):\n\n def test_returns_none_with_single_line(self):\n com = autocomplete.MultilineJediCompletion()\n self.assertEqual(com.matches(2, 'Va', current_block='Va', history=[]),\n None)\n\n def test_returns_non_with_blank_second_line(self):\n com = autocomplete.MultilineJediCompletion()\n self.assertEqual(com.matches(0, '', current_block='class Foo():\\n',\n history=['class Foo():']), None)\n\n def matches_from_completions(self, cursor, line, block, history,\n completions):\n with mock.patch('bpython.autocomplete.jedi.Script') as Script:\n script = Script.return_value\n script.completions.return_value = completions\n com = autocomplete.MultilineJediCompletion()\n return com.matches(cursor, line, current_block=block,\n history=history)\n\n def test_completions_starting_with_different_letters(self):\n matches = self.matches_from_completions(\n 2, ' a', 'class Foo:\\n a', ['adsf'],\n [Comp('Abc', 'bc'), Comp('Cbc', 'bc')])\n self.assertEqual(matches, None)\n\n def test_completions_starting_with_different_cases(self):\n matches = self.matches_from_completions(\n 2, ' a', 'class Foo:\\n a', ['adsf'],\n [Comp('Abc', 'bc'), Comp('ade', 'de')])\n self.assertSetEqual(matches, set(['ade']))\n\n @unittest.skipUnless(is_py34, 'asyncio required')\n def test_issue_544(self):\n com = autocomplete.MultilineJediCompletion()\n code = '@asyncio.coroutine\\ndef'\n history = ('import asyncio', '@asyncio.coroutin')\n com.matches(3, 'def', current_block=code, history=history)\n\n\nclass TestGlobalCompletion(unittest.TestCase):\n\n def setUp(self):\n self.com = autocomplete.GlobalCompletion()\n\n def test_function(self):\n def function():\n pass\n\n self.assertEqual(self.com.matches(8, 'function',\n locals_={'function': function}),\n set(('function(', )))\n\n def test_completions_are_unicode(self):\n for m in self.com.matches(1, 'a', locals_={'abc': 10}):\n self.assertIsInstance(m, type(u''))\n\n @unittest.skipIf(py3, \"in Python 3 invalid identifiers are passed through\")\n def test_ignores_nonascii_encodable(self):\n self.assertEqual(self.com.matches(3, 'abc', locals_={'abcß': 10}),\n None)\n\n def test_mock_kwlist(self):\n with mock.patch.object(keyword, 'kwlist', new=['abcd']):\n self.assertEqual(self.com.matches(3, 'abc', locals_={}), None)\n\n def test_mock_kwlist_non_ascii(self):\n with mock.patch.object(keyword, 'kwlist', new=['abcß']):\n self.assertEqual(self.com.matches(3, 'abc', locals_={}), None)\n\n\nclass TestParameterNameCompletion(unittest.TestCase):\n def test_set_of_params_returns_when_matches_found(self):\n def func(apple, apricot, banana, carrot):\n pass\n if py3:\n argspec = list(inspect.getfullargspec(func))\n else:\n argspec = list(inspect.getargspec(func))\n\n argspec = [\"func\", argspec, False]\n com = autocomplete.ParameterNameCompletion()\n self.assertSetEqual(com.matches(1, \"a\", argspec=argspec),\n set(['apple=', 'apricot=']))\n self.assertSetEqual(com.matches(2, \"ba\", argspec=argspec),\n set(['banana=']))\n self.assertSetEqual(com.matches(3, \"car\", argspec=argspec),\n set(['carrot=']))\n"},"repo_name":{"kind":"string","value":"MarkWh1te/xueqiu_predict"},"path":{"kind":"string","value":"python3_env/lib/python3.4/site-packages/bpython/test/test_autocomplete.py"},"language":{"kind":"string","value":"Python"},"license":{"kind":"string","value":"mit"},"size":{"kind":"number","value":16423,"string":"16,423"},"score":{"kind":"number","value":0,"string":"0"}}},{"rowIdx":89632,"cells":{"text":{"kind":"string","value":"# -*- coding: utf-8 -*-\n\"\"\"Page model for Automation/Ansible/Repositories\"\"\"\nimport attr\nfrom navmazing import NavigateToAttribute, NavigateToSibling\nfrom widgetastic.exceptions import NoSuchElementException\nfrom widgetastic.widget import Checkbox, Fillable, ParametrizedView, Text, View\nfrom widgetastic_manageiq import PaginationPane, ParametrizedSummaryTable, Table\nfrom widgetastic_patternfly import Button, Dropdown, Input\n\nfrom cfme.base.login import BaseLoggedInPage\nfrom cfme.common import Taggable, TagPageView\nfrom cfme.exceptions import ItemNotFound\nfrom cfme.modeling.base import BaseCollection, BaseEntity\nfrom cfme.utils.appliance.implementations.ui import navigator, navigate_to, CFMENavigateStep\nfrom cfme.utils.wait import wait_for\nfrom .playbooks import PlaybooksCollection\n\n\nclass RepositoryBaseView(BaseLoggedInPage):\n title = Text(locator='.//div[@id=\"main-content\"]//h1')\n\n @property\n def in_ansible_repositories(self):\n return (\n self.logged_in_as_current_user and\n self.navigation.currently_selected == [\"Automation\", \"Ansible\", \"Repositories\"]\n )\n\n\nclass RepositoryAllView(RepositoryBaseView):\n @View.nested\n class toolbar(View): # noqa\n configuration = Dropdown(\"Configuration\")\n policy = Dropdown(text='Policy')\n\n entities = Table(\".//div[@id='gtl_div']//table\")\n paginator = PaginationPane()\n\n @property\n def is_displayed(self):\n return self.in_ansible_repositories and self.title.text == \"Repositories\"\n\n\nclass RepositoryDetailsView(RepositoryBaseView):\n\n @View.nested\n class toolbar(View): # noqa\n refresh = Button(title=\"Refresh this page\")\n configuration = Dropdown(\"Configuration\")\n download = Button(title=\"Download summary in PDF format\")\n policy = Dropdown(text='Policy')\n\n @View.nested\n class entities(View): # noqa\n summary = ParametrizedView.nested(ParametrizedSummaryTable)\n\n @property\n def is_displayed(self):\n return (\n self.in_ansible_repositories and\n self.title.text == \"{} (Summary)\".format(self.context[\"object\"].name)\n )\n\n\nclass RepositoryFormView(RepositoryBaseView):\n name = Input(name=\"name\")\n description = Input(name=\"description\")\n url = Input(name=\"scm_url\")\n scm_credentials = Dropdown(\"Select credentials\")\n scm_branch = Input(name=\"scm_branch\")\n # SCM Update Options\n clean = Checkbox(name=\"clean\")\n delete_on_update = Checkbox(name=\"scm_delete_on_update\")\n update_on_launch = Checkbox(name=\"scm_update_on_launch\")\n\n cancel_button = Button(\"Cancel\")\n\n\nclass RepositoryAddView(RepositoryFormView):\n add_button = Button(\"Add\")\n\n @property\n def is_displayed(self):\n return (\n self.in_ansible_repositories and\n self.title.text == \"Add new Repository\"\n )\n\n\nclass RepositoryEditView(RepositoryFormView):\n save_button = Button(\"Save\")\n reset_button = Button(\"Reset\")\n\n @property\n def is_displayed(self):\n return (\n self.in_ansible_repositories and\n self.title.text == 'Edit Repository \"{}\"'.format(self.context[\"object\"].name)\n )\n\n\n@attr.s\nclass Repository(BaseEntity, Fillable, Taggable):\n \"\"\"A class representing one Embedded Ansible repository in the UI.\"\"\"\n\n name = attr.ib()\n url = attr.ib()\n description = attr.ib(default=\"\")\n scm_credentials = attr.ib(default=None)\n scm_branch = attr.ib(default=False)\n clean = attr.ib(default=False)\n delete_on_update = attr.ib(default=False)\n update_on_launch = attr.ib(default=None)\n\n _collections = {'playbooks': PlaybooksCollection}\n\n @property\n def db_object(self):\n table = self.appliance.db.client[\"configuration_script_sources\"]\n return self.appliance.db.client.sessionmaker(autocommit=True).query(table).filter(\n table.name == self.name).first()\n\n @property\n def playbooks(self):\n return self.collections.playbooks\n\n @property\n def as_fill_value(self):\n \"\"\"For use when selecting this repo in the UI forms\"\"\"\n return self.name\n\n def update(self, updates):\n \"\"\"Update the repository in the UI.\n\n Args:\n updates (dict): :py:class:`dict` of the updates.\n \"\"\"\n original_updated_at = self.db_object.updated_at\n view = navigate_to(self, \"Edit\")\n changed = view.fill(updates)\n if changed:\n view.save_button.click()\n else:\n view.cancel_button.click()\n view = self.create_view(RepositoryAllView)\n assert view.is_displayed\n view.flash.assert_no_error()\n if changed:\n if self.appliance.version < \"5.9\":\n msg = 'Edit of Repository \"{}\" was successfully initialized.'\n else:\n msg = 'Edit of Repository \"{}\" was successfully initiated.'\n view.flash.assert_message(msg.format(updates.get(\"name\", self.name)))\n\n def _wait_until_changes_applied():\n changed_updated_at = self.db_object.updated_at\n return not original_updated_at == changed_updated_at\n\n wait_for(_wait_until_changes_applied, delay=10, timeout=\"5m\")\n else:\n view.flash.assert_message(\n 'Edit of Repository \"{}\" cancelled by the user.'.format(self.name))\n\n @property\n def exists(self):\n try:\n navigate_to(self, \"Details\")\n return True\n except ItemNotFound:\n return False\n\n def delete(self):\n \"\"\"Delete the repository in the UI.\"\"\"\n view = navigate_to(self, \"Details\")\n if self.appliance.version < \"5.9\":\n remove_str = \"Remove this Repository\"\n else:\n remove_str = \"Remove this Repository from Inventory\"\n view.toolbar.configuration.item_select(remove_str, handle_alert=True)\n repo_list_page = self.create_view(RepositoryAllView)\n assert repo_list_page.is_displayed\n repo_list_page.flash.assert_no_error()\n repo_list_page.flash.assert_message(\n 'Delete of Repository \"{}\" was successfully initiated.'.format(self.name))\n wait_for(\n lambda: not self.exists,\n delay=10,\n timeout=120,\n fail_func=repo_list_page.browser.selenium.refresh)\n\n def refresh(self):\n \"\"\"Perform a refresh to update the repository.\"\"\"\n view = navigate_to(self, \"Details\")\n view.toolbar.configuration.item_select(\"Refresh this Repository\", handle_alert=True)\n view.flash.assert_no_error()\n view.flash.assert_message(\"Embedded Ansible refresh has been successfully initiated\")\n\n\n@attr.s\nclass RepositoryCollection(BaseCollection):\n \"\"\"Collection object for the :py:class:`cfme.ansible.repositories.Repository`.\"\"\"\n\n ENTITY = Repository\n\n def create(self, name, url, description=None, scm_credentials=None, scm_branch=None,\n clean=None, delete_on_update=None, update_on_launch=None):\n \"\"\"Add an ansible repository in the UI and return a Repository object.\n\n Args:\n name (str): name of the repository\n url (str): url of the repository\n description (str): description of the repository\n scm_credentials (str): credentials of the repository\n scm_branch (str): branch name\n clean (bool): clean\n delete_on_update (bool): delete the repo at each update\n update_on_launch (bool): update the repo at each launch\n\n Returns: an instance of :py:class:`cfme.ansible.repositories.Repository`\n \"\"\"\n add_page = navigate_to(self, \"Add\")\n fill_dict = {\n \"name\": name,\n \"description\": description,\n \"url\": url,\n \"scm_credentials\": scm_credentials,\n \"scm_branch\": scm_branch,\n \"clean\": clean,\n \"delete_on_update\": delete_on_update,\n \"update_on_launch\": update_on_launch\n }\n add_page.fill(fill_dict)\n add_page.add_button.click()\n repo_list_page = self.create_view(RepositoryAllView)\n assert repo_list_page.is_displayed\n repo_list_page.flash.assert_no_error()\n if self.appliance.version < \"5.9.2\":\n initiated_message = 'Add of Repository \"{}\" was successfully initialized.'.format(name)\n else:\n initiated_message = 'Add of Repository \"{}\" was successfully initiated.'.format(name)\n repo_list_page.flash.assert_message(initiated_message)\n\n repository = self.instantiate(\n name,\n url,\n description=description,\n scm_credentials=scm_credentials,\n scm_branch=scm_branch,\n clean=clean,\n delete_on_update=delete_on_update,\n update_on_launch=update_on_launch)\n\n wait_for(lambda: repository.exists,\n fail_func=repo_list_page.browser.selenium.refresh,\n delay=5,\n timeout=900)\n\n return repository\n\n def all(self):\n \"\"\"Return all repositories of the appliance.\n\n Returns: a :py:class:`list` of :py:class:`cfme.ansible.repositories.Repository` instances\n \"\"\"\n table = self.appliance.db.client[\"configuration_script_sources\"]\n result = []\n for row in self.appliance.db.client.session.query(table):\n result.append(\n self.instantiate(\n row.name,\n row.scm_url,\n description=row.description,\n scm_branch=row.scm_branch,\n clean=row.scm_clean,\n delete_on_update=row.scm_delete_on_update,\n update_on_launch=row.scm_update_on_launch)\n )\n return result\n\n def delete(self, *repositories):\n \"\"\"Delete one or more ansible repositories in the UI.\n\n Args:\n repositories: a list of :py:class:`cfme.ansible.repositories.Repository`\n instances to delete\n\n Raises:\n ValueError: if some of the repositories were not found in the UI\n \"\"\"\n repositories = list(repositories)\n checked_repositories = []\n view = navigate_to(self.appliance.server, \"AnsibleRepositories\")\n view.paginator.uncheck_all()\n if not view.entities.is_displayed:\n raise ValueError(\"No repository found!\")\n for row in view.entities:\n for repository in repositories:\n if repository.name == row.name.text:\n checked_repositories.append(repository)\n row[0].check()\n break\n if set(repositories) == set(checked_repositories):\n break\n if set(repositories) != set(checked_repositories):\n raise ValueError(\"Some of the repositories were not found in the UI.\")\n view.toolbar.configuration.item_select(\"Remove selected Repositories\", handle_alert=True)\n view.flash.assert_no_error()\n for repository in checked_repositories:\n view.flash.assert_message(\n 'Delete of Repository \"{}\" was successfully initiated.'.format(repository.name))\n\n\n@navigator.register(RepositoryCollection, 'All')\nclass AnsibleRepositories(CFMENavigateStep):\n VIEW = RepositoryAllView\n prerequisite = NavigateToAttribute('appliance.server', 'LoggedIn')\n\n def step(self):\n self.view.navigation.select(\"Automation\", \"Ansible\", \"Repositories\")\n\n\n@navigator.register(Repository, 'Details')\nclass Details(CFMENavigateStep):\n VIEW = RepositoryDetailsView\n prerequisite = NavigateToAttribute('parent', 'All')\n\n def step(self):\n try:\n row = self.prerequisite_view.paginator.find_row_on_pages(\n table=self.prerequisite_view.entities,\n name=self.obj.name)\n row.click()\n except NoSuchElementException:\n raise ItemNotFound('Could not locate ansible repository table row with name {}'\n .format(self.obj.name))\n\n\n@navigator.register(RepositoryCollection, 'Add')\nclass Add(CFMENavigateStep):\n VIEW = RepositoryAddView\n prerequisite = NavigateToSibling(\"All\")\n\n def step(self):\n # workaround for disabled Dropdown\n dropdown = self.prerequisite_view.toolbar.configuration\n wait_for(\n dropdown.item_enabled,\n func_args=[\"Add New Repository\"],\n timeout=60,\n fail_func=self.prerequisite_view.browser.refresh\n )\n dropdown.item_select(\"Add New Repository\")\n\n\n@navigator.register(Repository, \"Edit\")\nclass Edit(CFMENavigateStep):\n VIEW = RepositoryEditView\n prerequisite = NavigateToSibling(\"Details\")\n\n def step(self):\n self.prerequisite_view.toolbar.configuration.item_select(\"Edit this Repository\")\n\n\n@navigator.register(Repository, 'EditTags')\nclass EditTagsFromListCollection(CFMENavigateStep):\n VIEW = TagPageView\n\n prerequisite = NavigateToAttribute('parent', 'All')\n\n def step(self):\n try:\n row = self.prerequisite_view.paginator.find_row_on_pages(\n table=self.prerequisite_view.entities,\n name=self.obj.name)\n row[0].click()\n except NoSuchElementException:\n raise ItemNotFound('Could not locate ansible repository table row with name {}'\n .format(self.obj.name))\n self.prerequisite_view.toolbar.policy.item_select('Edit Tags')\n"},"repo_name":{"kind":"string","value":"anurag03/integration_tests"},"path":{"kind":"string","value":"cfme/ansible/repositories.py"},"language":{"kind":"string","value":"Python"},"license":{"kind":"string","value":"gpl-2.0"},"size":{"kind":"number","value":13574,"string":"13,574"},"score":{"kind":"number","value":0.001841756298806542,"string":"0.001842"}}},{"rowIdx":89633,"cells":{"text":{"kind":"string","value":"from color import Color\nfrom orientation import Orientation\nfrom tile import (\n Terrain,\n Harbor,\n)\n\n\nCENTER_TILE_TEMPLATE = [\n list(' + -- + '),\n list(' / \\ '),\n list('+ +'),\n list(' \\ / '),\n list(' + -- + '),\n]\n\nBORDER_TILE_TEMPLATE = [\n list(' | -- | '),\n list(' - - '),\n list(' | | '),\n list(' - - '),\n list(' | -- | '),\n]\n\nNUMBER_SPACES = [\n (2, 4), (2, 5)\n]\n\nPERIMETER_SPACES = [\n (0, 2), (0, 4),\n (0, 5), (0, 7),\n (1, 1), (1, 8),\n (2, 0), (2, 2),\n (2, 7), (2, 9),\n (3, 1), (3, 8),\n (4, 2), (4, 4),\n (4, 5), (4, 7),\n]\n\nRESOURCE_SPACES = [\n (1, 3), (1, 4),\n (1, 5), (1, 6),\n (2, 2), (2, 7),\n (3, 3), (3, 4),\n (3, 5), (3, 6),\n]\n\n# TODO: upforgrabs\n# Fix ports to work with all size boards\n# HARBOR_BRIDGE_SPACES = {\n# Orientation.NORTH_EAST: [(2, 7), (1, 6)],\n# Orientation.NORTH: [(1, 6), (1, 3)],\n# Orientation.NORTH_WEST: [(1, 3), (2, 2)],\n# Orientation.SOUTH_WEST: [(2, 2), (3, 3)],\n# Orientation.SOUTH: [(3, 3), (3, 6)],\n# Orientation.SOUTH_EAST: [(3, 6), (2, 7)],\n# }\n\n\ndef remove_border_characters(board, coordinate, diff, tile_grid):\n\n # First, calculate some helper values\n helper_value_one = board.size.width // 2\n helper_value_two = board.size.height - helper_value_one\n\n # Top vertical ticks\n if (\n coordinate.row == -1 or\n coordinate.column == -1\n ):\n tile_grid[0][2] = ' '\n tile_grid[0][7] = ' '\n\n # Top horizonal ticks\n else:\n tile_grid[0][4] = ' '\n tile_grid[0][5] = ' '\n\n # Bottom vertical ticks\n if (\n coordinate.row == board.size.height or\n coordinate.column == board.size.height\n ):\n tile_grid[4][2] = ' '\n tile_grid[4][7] = ' '\n\n # Bottom horizonal ticks\n else:\n tile_grid[4][4] = ' '\n tile_grid[4][5] = ' '\n\n # Upper left single tick\n if not (\n coordinate.column == -1 and\n coordinate.row < helper_value_one\n ):\n tile_grid[1][1] = ' '\n\n # Upper right single tick\n if not (\n coordinate.row == -1 and\n coordinate.column < helper_value_one\n ):\n tile_grid[1][8] = ' '\n\n # Bottom left single tick\n if not (\n coordinate.row == board.size.height and\n helper_value_two <= coordinate.column\n ):\n tile_grid[3][1] = ' '\n\n # Bottom right single tick\n if not (\n coordinate.column == board.size.height and\n helper_value_two <= coordinate.row\n ):\n tile_grid[3][8] = ' '\n\n # Left vertical ticks\n if abs(diff) <= helper_value_one or diff < 0:\n tile_grid[0][2] = ' '\n tile_grid[2][2] = ' '\n tile_grid[4][2] = ' '\n \n # Right vertical ticks\n if abs(diff) <= helper_value_one or 0 < diff:\n tile_grid[0][7] = ' '\n tile_grid[2][7] = ' '\n tile_grid[4][7] = ' '\n\n return tile_grid\n\n\ndef copy_grid(grid):\n return [[char for char in row] for row in grid]\n\n\ndef grid_to_str(grid):\n return '\\n'.join(''.join(row) for row in grid)\n\n\ndef str_to_grid(string):\n return [[c for c in line] for line in string.split('\\n')]\n\n\ndef get_tile_grid(tile, tile_grid):\n tile_grid = copy_grid(tile_grid)\n tile_grid = replace_numbers(tile, tile_grid)\n tile_grid = replace_perimeter(tile, tile_grid)\n tile_grid = replace_resources(tile, tile_grid)\n return tile_grid\n\n\ndef replace_numbers(tile, tile_grid):\n if isinstance(tile, Harbor):\n return tile_grid\n if not tile.number:\n return tile_grid\n if isinstance(tile, Terrain):\n number_string = str(tile.number).zfill(len(NUMBER_SPACES))\n tile_grid = copy_grid(tile_grid)\n for row, col in NUMBER_SPACES:\n index = col - min(NUMBER_SPACES)[1]\n tile_grid[row][col] = number_string[index]\n return tile_grid\n\n\ndef replace_perimeter(tile, tile_grid):\n tile_grid = copy_grid(tile_grid)\n for row, col in PERIMETER_SPACES:\n colored = Color.GRAY.apply(tile_grid[row][col])\n tile_grid[row][col] = colored\n # TODO: upforgrabs\n # Fix ports to work with all size boards\n # if isinstance(tile, Harbor) and tile.orientation:\n # spaces = HARBOR_BRIDGE_SPACES[tile.orientation]\n # for row, col in spaces:\n # char = '-'\n # if row != 2:\n # char = '\\\\' if (row == 1) == (col == 3) else '/'\n # tile_grid[row][col] = Color.GRAY.apply(char)\n return tile_grid\n\n\ndef replace_resources(tile, tile_grid):\n if isinstance(tile, Terrain):\n if not tile.resource:\n return tile_grid\n spaces = RESOURCE_SPACES\n if isinstance(tile, Harbor):\n # TODO: upforgrabs\n # Fix ports to work with all size boards\n # if not tile.orientation:\n # return tile_grid\n return tile_grid\n spaces = NUMBER_SPACES\n char = '?'\n if tile.resource:\n char = tile.resource.color.apply(tile.resource.char)\n tile_grid = copy_grid(tile_grid)\n for row, col in spaces:\n tile_grid[row][col] = char\n return tile_grid\n\n\nclass View(object):\n\n def __init__(self, board):\n self.board = board\n\n def __str__(self):\n return grid_to_str(self.get_board_grid())\n\n def get_board_grid(self):\n\n # Add two to the height and width of the\n\t\t# board to account for the perimeter tiles\n num_tiles_tall = self.board.size.height + 2\n num_tiles_wide = self.board.size.width + 2\n\n # The number of characters tall and wide for the tile grid\n tile_grid_height = len(CENTER_TILE_TEMPLATE)\n tile_grid_narrow = len(''.join(CENTER_TILE_TEMPLATE[0]).strip())\n tile_grid_wide = len(''.join(CENTER_TILE_TEMPLATE[2]).strip())\n\n # The number of characters tall and wide for the board grid\n total_grid_height = (tile_grid_height - 1) * num_tiles_tall + 1\n total_grid_width = (\n (num_tiles_wide // 2 + 1) * (tile_grid_wide - 1) +\n (num_tiles_wide // 2 ) * (tile_grid_narrow - 1) + 1\n )\n\n # Create a 2D array of empty spaces, large enough to\n # contain all characters for all tiles (but no larger)\n board_grid = [\n [' ' for i in range(total_grid_width)]\n for j in range(total_grid_height)\n ]\n\n # For all board tiles ...\n for coordinate, tile in self.board.tiles.items():\n\n # ... determine some intermediate values ...\n # Note: We add +1 here to account for perimeter tiles\n sum_ = (coordinate.row + 1) + (coordinate.column + 1)\n diff = (coordinate.row + 1) - (coordinate.column + 1)\n\n # ... and use them to figure the location of the upper\n # left corner of the tile grid within the board grid ...\n spaces_from_top = sum_ * (tile_grid_height // 2)\n spaces_from_left = (\n ((num_tiles_wide // 2) - diff) *\n ((tile_grid_wide + tile_grid_narrow) // 2 - 1)\n )\n\n # ... then retrieve the base tile grid for the tile ...\n template = (\n CENTER_TILE_TEMPLATE if\n isinstance(tile, Terrain) else\n remove_border_characters(\n board=self.board,\n coordinate=coordinate,\n diff=diff,\n tile_grid=copy_grid(BORDER_TILE_TEMPLATE),\n )\n )\n\n # ... and then replace the blank characters in the board\n # grid with the correct characters from the tile grid\n tile_grid = get_tile_grid(tile, template)\n for i, tile_line in enumerate(tile_grid):\n for j, char in enumerate(tile_line):\n if ' ' not in char:\n row = board_grid[spaces_from_top + i]\n row[spaces_from_left + j] = char\n\n # Trim extra columns off front and back of the grid\n board_grid = [row[2:-2] for row in board_grid]\n return board_grid\n"},"repo_name":{"kind":"string","value":"mackorone/catan"},"path":{"kind":"string","value":"src/view.py"},"language":{"kind":"string","value":"Python"},"license":{"kind":"string","value":"mit"},"size":{"kind":"number","value":8044,"string":"8,044"},"score":{"kind":"number","value":0.0011188463451019394,"string":"0.001119"}}},{"rowIdx":89634,"cells":{"text":{"kind":"string","value":"\n# Principal Component Analysis Code : \n\nfrom numpy import mean,cov,double,cumsum,dot,linalg,array,rank,size,flipud\nfrom pylab import *\nimport numpy as np\nimport matplotlib.pyplot as pp\n#from enthought.mayavi import mlab\n\nimport scipy.ndimage as ni\n\nimport roslib; roslib.load_manifest('sandbox_tapo_darpa_m3')\nimport rospy\n#import hrl_lib.mayavi2_util as mu\nimport hrl_lib.viz as hv\nimport hrl_lib.util as ut\nimport hrl_lib.matplotlib_util as mpu\nimport pickle\n\nfrom mvpa.clfs.knn import kNN\nfrom mvpa.datasets import Dataset\nfrom mvpa.clfs.transerror import TransferError\nfrom mvpa.misc.data_generators import normalFeatureDataset\nfrom mvpa.algorithms.cvtranserror import CrossValidatedTransferError\nfrom mvpa.datasets.splitters import NFoldSplitter\n\nimport sys\nsys.path.insert(0, '/home/tapo/svn/robot1_data/usr/tapo/data_code/Classification/Data/Single_Contact_kNN/Scaled')\n\nfrom data_method_V import Fmat_original\n\ndef pca(X):\n\n #get dimensions\n num_data,dim = X.shape\n\n #center data\n mean_X = X.mean(axis=1)\n M = (X-mean_X) # subtract the mean (along columns)\n Mcov = cov(M)\n\n ###### Sanity Check ######\n i=0\n n=0\n while i < 123:\n j=0\n while j < 140:\n if X[i,j] != X[i,j]:\n print X[i,j]\n print i,j\n n=n+1\n j = j+1\n i=i+1\n\n print n\n ##########################\n\n print 'PCA - COV-Method used'\n val,vec = linalg.eig(Mcov)\n \n #return the projection matrix, the variance and the mean\n return vec,val,mean_X, M, Mcov\n\n\ndef my_mvpa(Y,num2):\n\n #Using PYMVPA\n PCA_data = np.array(Y)\n PCA_label_2 = ['Styrofoam-Fixed']*5 + ['Books-Fixed']*5 + ['Bucket-Fixed']*5 + ['Bowl-Fixed']*5 + ['Can-Fixed']*5 + ['Box-Fixed']*5 + ['Pipe-Fixed']*5 + ['Styrofoam-Movable']*5 + ['Container-Movable']*5 + ['Books-Movable']*5 + ['Cloth-Roll-Movable']*5 + ['Black-Rubber-Movable']*5 + ['Can-Movable']*5 + ['Box-Movable']*5 + ['Rug-Fixed']*5 + ['Bubble-Wrap-1-Fixed']*5 + ['Pillow-1-Fixed']*5 + ['Bubble-Wrap-2-Fixed']*5 + ['Sponge-Fixed']*5 + ['Foliage-Fixed']*5 + ['Pillow-2-Fixed']*5 + ['Rug-Movable']*5 + ['Bubble-Wrap-1-Movable']*5 + ['Pillow-1-Movable']*5 + ['Bubble-Wrap-2-Movable']*5 + ['Pillow-2-Movable']*5 + ['Cushion-Movable']*5 + ['Sponge-Movable']*5\n clf = kNN(k=num2)\n terr = TransferError(clf)\n ds1 = Dataset(samples=PCA_data,labels=PCA_label_2)\n cvterr = CrossValidatedTransferError(terr,NFoldSplitter(cvtype=1),enable_states=['confusion'])\n error = cvterr(ds1)\n return (1-error)*100\n\ndef result(eigvec_total,eigval_total,mean_data_total,B,C,num_PC):\n \n# Reduced Eigen-Vector Matrix according to highest Eigenvalues..(Considering First 20 based on above figure)\n\n W = eigvec_total[:,0:num_PC]\n m_W, n_W = np.shape(W)\n \n# Normalizes the data set with respect to its variance (Not an Integral part of PCA, but useful)\n length = len(eigval_total)\n s = np.matrix(np.zeros(length)).T\n i = 0\n while i < length:\n s[i] = sqrt(C[i,i])\n i = i+1\n Z = np.divide(B,s)\n m_Z, n_Z = np.shape(Z)\n \n#Projected Data:\n Y = (W.T)*B # 'B' for my Laptop: otherwise 'Z' instead of 'B'\n m_Y, n_Y = np.shape(Y.T)\n return Y.T\n \n\nif __name__ == '__main__':\n\n Fmat = Fmat_original\n\n# Checking the Data-Matrix\n\n m_tot, n_tot = np.shape(Fmat)\n \n print 'Total_Matrix_Shape:',m_tot,n_tot\n \n eigvec_total, eigval_total, mean_data_total, B, C = pca(Fmat)\n \n #print eigvec_total\n #print eigval_total\n #print mean_data_total\n m_eigval_total, n_eigval_total = np.shape(np.matrix(eigval_total))\n m_eigvec_total, n_eigvec_total = np.shape(eigvec_total)\n m_mean_data_total, n_mean_data_total = np.shape(np.matrix(mean_data_total))\n print 'Eigenvalue Shape:',m_eigval_total, n_eigval_total\n print 'Eigenvector Shape:',m_eigvec_total, n_eigvec_total\n print 'Mean-Data Shape:',m_mean_data_total, n_mean_data_total\n\n#Recall that the cumulative sum of the eigenvalues shows the level of variance accounted by each of the corresponding eigenvectors. On the x axis there is the number of eigenvalues used. \n\n perc_total = cumsum(eigval_total)/sum(eigval_total)\n num_PC=1\n while num_PC <=20:\n Proj = np.zeros((140,num_PC))\n Proj = result(eigvec_total,eigval_total,mean_data_total,B,C,num_PC)\n # PYMVPA:\n num=0\n cv_acc = np.zeros(21)\n while num <=20:\n cv_acc[num] = my_mvpa(Proj,num)\n num = num+1\n plot(np.arange(21),cv_acc,'-s')\n grid('True')\n hold('True')\n num_PC = num_PC+1\n legend(('1-PC', '2-PCs', '3-PCs', '4-PCs', '5-PCs', '6-PCs', '7-PCs', '8-PCs', '9-PCs', '10-PCs', '11-PC', '12-PCs', '13-PCs', '14-PCs', '15-PCs', '16-PCs', '17-PCs', '18-PCs', '19-PCs', '20-PCs'))\n ylabel('Cross-Validation Accuracy')\n xlabel('k in k-NN Classifier')\n show()\n\n\n\n"},"repo_name":{"kind":"string","value":"tapomayukh/projects_in_python"},"path":{"kind":"string","value":"classification/Classification_with_kNN/Single_Contact_Classification/Scaled_Features/best_kNN_PCA/objects/test11_cross_validate_objects_1200ms_scaled_method_v.py"},"language":{"kind":"string","value":"Python"},"license":{"kind":"string","value":"mit"},"size":{"kind":"number","value":4915,"string":"4,915"},"score":{"kind":"number","value":0.020752797558494403,"string":"0.020753"}}},{"rowIdx":89635,"cells":{"text":{"kind":"string","value":"\nimport logging\nimport pickle\nfrom time import time\nfrom hashlib import md5\nfrom base64 import urlsafe_b64encode\nfrom os import urandom\n\nimport redis\nfrom flask import Flask, request, render_template\n\nimport config\n\n\nlogging.basicConfig(level=logging.DEBUG)\n\napp = Flask(__name__,static_folder='public')\nr = redis.StrictRedis(\n host=config.REDIS_HOST,\n port=config.REDIS_PORT,\n db=config.REDIS_DB,\n password=config.REDIS_PASSWORD\n)\n\n\n@app.route('/set', methods=['post'])\ndef setPass():\n assert request.method == 'POST'\n password = request.form['pass']\n iv = request.form['iv']\n uuid = urlsafe_b64encode(md5(urandom(128)).digest())[:8].decode('utf-8')\n\n with r.pipeline() as pipe:\n data = {'status': 'ok', 'iv': iv, 'pass': password}\n pipe.set(uuid, pickle.dumps(data))\n pipe.expire(uuid, config.TTL)\n pipe.execute()\n\n return '/get/{}'.format(uuid)\n\n\n@app.route('/get/', methods=['get'])\ndef getPass(uuid):\n with r.pipeline() as pipe:\n raw_data = r.get(uuid)\n\n if not raw_data:\n return render_template('expired.html')\n\n data = pickle.loads(raw_data)\n if data['status'] == 'ok':\n new_data = {'status': 'withdrawn', 'time': int(time()), 'ip': request.remote_addr}\n r.set(uuid, pickle.dumps(new_data))\n return render_template('get.html', data=data['iv'] + '|' + data['pass'])\n\n if data['status'] == 'withdrawn':\n return render_template('withdrawn.html')\n\n\n@app.route('/', methods=['get'])\ndef index():\n ttl = int(config.TTL/60)\n return render_template('index.html', ttl=ttl)\n\nif __name__ == '__main__':\n try:\n port = config.APP_PORT\n except AttributeError:\n port = 5000\n\n try:\n host = config.APP_HOST\n except AttributeError:\n host = '127.0.0.1'\n\n app.run(host=host, port=port)\n"},"repo_name":{"kind":"string","value":"skooda/passIon"},"path":{"kind":"string","value":"index.py"},"language":{"kind":"string","value":"Python"},"license":{"kind":"string","value":"mit"},"size":{"kind":"number","value":1879,"string":"1,879"},"score":{"kind":"number","value":0.0021287919105907396,"string":"0.002129"}}},{"rowIdx":89636,"cells":{"text":{"kind":"string","value":"# Put libraries such as Divisi in the PYTHONPATH.\nimport sys, pickle, os\nsys.path = ['/stuff/openmind'] + sys.path\nfrom csc.divisi.cnet import *\nfrom csc.divisi.graphics import output_svg\nfrom vendor_db import iter_info\nfrom csamoa.corpus.models import *\nfrom csamoa.conceptnet.models import *\n\n# Load the OMCS language model\nen = Language.get('en')\nen_nl=get_nl('en')\n\n# Load OMCS stopwords\nsw = open('stopwords.txt', 'r')\nswords = [x.strip() for x in sw.readlines()]\n\n# Parameters\nfactor = 1\nwsize = 2\n\ndef check_concept(concept):\n try:\n Concept.get(concept, 'en')\n return True\n except:\n return False\n\ndef english_window(text):\n windows = []\n words = [x for x in text.lower().replace('&', 'and').split() if x not in swords]\n for x in range(len(words)-wsize+1):\n pair = \" \".join(words[x:x+wsize])\n if check_concept(pair): windows.append(pair)\n if check_concept(words[x]): windows.append(words[x])\n for c in range(wsize-1):\n if check_concept(words[c]): windows.append(words[c])\n return windows\n\nif 'vendor_only.pickle' in os.listdir('.'):\n print \"Loading saved matrix.\"\n matrix = pickle.load(open(\"vendor_only.pickle\"))\nelse:\n print \"Creating New Tensor\"\n matrix = SparseLabeledTensor(ndim=2)\n print \"Adding Vendors\"\n for co, englist in iter_info('CFB_Cities'):\n print co\n for phrase in englist:\n parts = english_window(phrase)\n print parts\n for part in parts:\n matrix[co, ('sells', part)] += factor\n matrix[part, ('sells_inv', co)] += factor\n pickle.dump(matrix, open(\"vendor_only.pickle\", 'w'))\n\nprint \"Normalizing.\"\nmatrix = matrix.normalized()\n\nprint \"Matrix constructed. Running SVD.\"\nsvd = matrix.svd(k=10)\nsvd.summarize()\n\noutput_svg(svd.u, \"vendorplain.svg\", xscale=3000, yscale=3000, min=0.03)\n"},"repo_name":{"kind":"string","value":"commonsense/divisi"},"path":{"kind":"string","value":"doc/demo/vendor_only_svd.py"},"language":{"kind":"string","value":"Python"},"license":{"kind":"string","value":"gpl-3.0"},"size":{"kind":"number","value":1871,"string":"1,871"},"score":{"kind":"number","value":0.008017103153393906,"string":"0.008017"}}},{"rowIdx":89637,"cells":{"text":{"kind":"string","value":"import os\n\n\nbusybox_tar_path = os.path.join(os.path.dirname(__file__), '../../../data/busyboxlight.tar')\n\n# these are in correct ancestry order\nbusybox_ids = (\n '769b9341d937a3dba9e460f664b4f183a6cecdd62b337220a28b3deb50ee0a02',\n '48e5f45168b97799ad0aafb7e2fef9fac57b5f16f6db7f67ba2000eb947637eb',\n 'bf747efa0e2fa9f7c691588ce3938944c75607a7bb5e757f7369f86904d97c78',\n '511136ea3c5a64f264b78b5433614aec563103b4d4702f3ba7d4d2698e22c158',\n)\n"},"repo_name":{"kind":"string","value":"rbarlow/pulp_docker"},"path":{"kind":"string","value":"plugins/test/unit/plugins/importers/data.py"},"language":{"kind":"string","value":"Python"},"license":{"kind":"string","value":"gpl-2.0"},"size":{"kind":"number","value":450,"string":"450"},"score":{"kind":"number","value":0.0022222222222222222,"string":"0.002222"}}},{"rowIdx":89638,"cells":{"text":{"kind":"string","value":"import sys\nimport argparse\nfrom svtools.external_cmd import ExternalCmd\n\nclass BedpeSort(ExternalCmd):\n def __init__(self):\n super(BedpeSort, self).__init__('bedpesort', 'bin/bedpesort')\n\ndef description():\n return 'sort a BEDPE file'\n\ndef epilog():\n return 'To read in stdin and output to a file, use /dev/stdin or - as the first positional argument.'\n\ndef add_arguments_to_parser(parser):\n parser.add_argument('input', metavar='', nargs='?', help='BEDPE file to sort')\n parser.add_argument('output', metavar='', nargs='?', help='output file to write to')\n parser.set_defaults(entry_point=run_from_args)\n\ndef command_parser():\n parser = argparse.ArgumentParser(description=description())\n add_arguments_to_parser(parser)\n return parser\n\ndef run_from_args(args):\n opts = list()\n if args.input:\n opts.append(args.input)\n if args.output:\n opts.append(args.output)\n\n sort_cmd_runner = BedpeSort()\n sort_cmd_runner.run_cmd_with_options(opts)\n\nif __name__ == \"__main__\":\n parser = command_parser()\n args = parser.parse_args()\n sys.exit(args.entry_point(args))\n"},"repo_name":{"kind":"string","value":"hall-lab/svtools"},"path":{"kind":"string","value":"svtools/bedpesort.py"},"language":{"kind":"string","value":"Python"},"license":{"kind":"string","value":"mit"},"size":{"kind":"number","value":1152,"string":"1,152"},"score":{"kind":"number","value":0.008680555555555556,"string":"0.008681"}}},{"rowIdx":89639,"cells":{"text":{"kind":"string","value":"#!/usr/bin/env python\n#coding=utf-8\n\n'''\nCreated on 2010-4-27\nGPL License\n@author: sypxue@gmail.com\n'''\n\nimport urllib,pickle,StringIO\nfrom micolog_plugin import *\nfrom google.appengine.ext import db\nfrom model import OptionSet,Comment,Blog,Entry,Blog\nfrom google.appengine.api import urlfetch\n\nclass akismet(Plugin):\n\tdef __init__(self):\n\t\tPlugin.__init__(self,__file__)\n\t\tself.author=\"sypxue\"\n\t\tself.authoruri=\"http://sypxue.appspot.com\"\n\t\tself.uri=\"http://sypxue.appspot.com\"\n\t\tself.description=\"\"\"Wordpress自带的Akismet插件的micolog版本,现在已实现过滤Spam,提交Spam,解除Spam等功能,开启即可使用,也可输入自己的Akismet API Key使用 。Author: sypxue@gmail.com\"\"\"\n\t\tself.name=\"Akismet\"\n\t\tself.version=\"0.3.2\"\n\t\tself.AKISMET_VERSION = \"2.2.7\"\n\t\tself.AKISMET_default_Key = \"80e9452f5962\"\n\t\tself.register_action(\"pre_comment\",self.pre_comment)\n\t\tself.register_action(\"save_comment\",self.save_comment)\n\t\n\tdef comment_handler(self,comment,action,*arg1,**arg2):\n\t\t# rm 指示 是否自动过滤掉评论\n\t\trm=OptionSet.getValue(\"Akismet_AutoRemove\",False)\n\t\tif action=='pre' and rm!=True:\n\t\t\treturn\n\t\telif action=='save' and rm==True:\n\t\t\treturn\n\t\turl = arg2['blog'].baseurl\n\t\tuser_agent = os.environ.get('HTTP_USER_AGENT','')\n\t\treferrer = os.environ.get('HTTP_REFERER', 'unknown')\n\t\tAkismetItem = {\n\t\t\t'user_agent':user_agent,\n\t\t\t'referrer':referrer,\n\t\t\t'user_ip' : comment.ip,\n\t\t\t'comment_type' : 'comment', \n\t\t\t'comment_author' : comment.author.encode('utf-8'),\n\t\t\t'comment_author_email' : comment.email,\n\t\t\t'comment_author_url' : comment.weburl,\n\t\t\t'comment_content' : comment.content.encode('utf-8')\n\t\t}\n\t\tapikey = OptionSet.getValue(\"Akismet_code\",default=self.AKISMET_default_Key)\n\t\tif len(apikey)<5:\n\t\t\tapikey = self.AKISMET_default_Key\n\t\tm = AkismetManager(apikey,url)\n\t\tif m.IsSpam(AkismetItem):\n\t\t\tif rm==True:\n\t\t\t\traise Exception\n\t\t\tsComments=OptionSet.getValue(\"Akismet_Comments_v0.3\",[])\n\t\t\tif type(sComments)!=type([]):\n\t\t\t\tsComments=[]\n\t\t\tdb.Model.put(comment)\n\t\t\tsComments.append({'key':(str(comment.key()),str(comment.entry.key())),\n\t\t\t\t'other':{'user_agent':user_agent,'referrer':referrer,'url':url}})\n\t\t\tOptionSet.setValue(\"Akismet_Comments_v0.3\",\n\t\t\t\tsComments)\n\t\t\tcomment.entry.commentcount-=1\n\t\t\tcomment.entry.put()\n\t\t\te,comment.entry = comment.entry,None\n\t\t\ttry:\n\t\t\t\tdb.Model.put(comment)\n\t\t\t\tcomment.entry = e\n\t\t\texcept:\n\t\t\t\tpass\t\t\n\t\n\tdef pre_comment(self,comment,*arg1,**arg2):\n\t\tself.comment_handler(comment,'pre',*arg1,**arg2)\n\t\n\tdef save_comment(self,comment,*arg1,**arg2):\n\t\tself.comment_handler(comment,'save',*arg1,**arg2)\n\n\tdef filter(self,content,*arg1,**arg2):\n\t\tcode=OptionSet.getValue(\"Akismet_code\",default=\"\")\n\t\treturn content+str(code)\n\n\tdef SubmitAkismet(self,item,url,f):\n\t\tapikey = OptionSet.getValue(\"Akismet_code\",default=self.AKISMET_default_Key)\n\t\tif len(apikey)<5:\n\t\t\tapikey = self.AKISMET_default_Key\n\t\tm = AkismetManager(apikey,url)\n\t\ttry:\n\t\t\tif f==\"Ham\":\n\t\t\t\tm.SubmitHam(item)\n\t\t\telif f==\"Spam\":\n\t\t\t\tm.SubmitSpam(item)\n\t\texcept:\n\t\t\tpass\n\t\t\n\tdef get(self,page):\n\t\tcode=OptionSet.getValue(\"Akismet_code\",default=\"\")\n\t\tup=OptionSet.getValue(\"Akismet_Comments_v0.3\",default=[])\n\t\trm=OptionSet.getValue(\"Akismet_AutoRemove\",False)\n\t\tif type(up)!=type([]):\n\t\t\tup=[]\n\t\tdelkey = page.param('delkey')\n\t\trekey = page.param('rekey')\n\t\tif rekey or delkey:\n\t\t\tnewup = []\n\t\t\tfor i in up:\n\t\t\t\tcmtkey = i['key'][0];\n\t\t\t\tenykey = i['key'][1];\n\t\t\t\tif delkey and cmtkey==delkey:\n\t\t\t\t\tcm = Comment.get(cmtkey)\n\t\t\t\t\tdb.Model.delete(cm)\n\t\t\t\telif rekey and cmtkey==rekey:\n\t\t\t\t\tcm = Comment.get(cmtkey)\n\t\t\t\t\teny = Entry.get(enykey)\n\t\t\t\t\teny.commentcount+=1\n\t\t\t\t\teny.put()\n\t\t\t\t\tcm.entry = eny\n\t\t\t\t\tdb.Model.put(cm)\n\t\t\t\t\tself.SubmitAkismet({\n\t\t\t\t\t\t'user_agent':i['other']['user_agent'],\n\t\t\t\t\t\t'referrer':i['other']['referrer'],\n\t\t\t\t\t\t'user_ip' : cm.ip,\n\t\t\t\t\t\t'comment_type' : 'comment', \n\t\t\t\t\t\t'comment_author' : cm.author.encode('utf-8'),\n\t\t\t\t\t\t'comment_author_email' : cm.email,\n\t\t\t\t\t\t'comment_author_url' : cm.weburl,\n\t\t\t\t\t\t'comment_content' : cm.content.encode('utf-8')\n\t\t\t\t\t},i['other'].get('url',''),\"Ham\")\n\t\t\t\telse:\n\t\t\t\t\tnewup.append(i)\n\t\t\tif not len(up)==len(newup):\n\t\t\t\tOptionSet.setValue(\"Akismet_Comments_v0.3\",newup)\n\t\t\tup = newup\n\t\tcmts = [(Comment.get(i['key'][0]),Entry.get(i['key'][1])) for i in up]\n\t\tcomments = [u'%s%s%s%s%s%s删除 还原'%(i[0].date,\n\t\t\ti[0].author,i[0].content,i[0].email,i[0].ip,i[1].link,i[1].title,str(i[0].key()),str(i[0].key())) for i in cmts if i is not None and i[0] is not None]\n\t\tcomments = ''.join(comments)\n\t\tapikey = OptionSet.getValue(\"Akismet_code\",default=self.AKISMET_default_Key)\n\t\tif len(apikey)<5:\n\t\t\tapikey = self.AKISMET_default_Key\n\t\tapi = AkismetManager(apikey,Blog.all()[0].baseurl)\n\t\tif not code:\n\t\t\tstatus = ''\n\t\telif api.IsValidKey():\n\t\t\tstatus = 'True'\n\t\telse:\n\t\t\tstatus = 'False'\n\t\tif rm==True:\n\t\t\trmchecked='checked=\"checked\"'\n\t\telse:\n\t\t\trmchecked=''\n\t\treturn u'''

Akismet

\n\t\t\t\t\t
\n\t\t\t\t\t

Akismet Api Key:

\n\t\t\t\t\t %s\n\t\t\t\t\t
\n\t\t\t\t\t

自动删除检测到的垃圾评论:\n\t\t\t\t\t

\n\t\t\t\t\t

删除一条正常的评论并提交Spam(输入评论的ID):

\n\t\t\t\t\t\n\t\t\t\t\t
\n\t\t\t\t\t\n\t\t\t\t\t
\n\t\t\t\t
\n\t\t\t\t \t
\n\t\t\t\t \t

被过滤的评论

%s
日期作者内容电子邮件IP地址文章/页面选择操作
\n\t\t\t\t
'''%(code,status,rmchecked,comments)\n\t\n\tdef post(self,page):\n\t\tcode=page.param(\"code\")\n\t\tOptionSet.setValue(\"Akismet_code\",code)\n\t\trm=page.param('autorm')\n\t\tif rm and int(rm)==1:\n\t\t\trm=True\n\t\telse:\n\t\t\trm=False\n\t\toldrm = OptionSet.getValue(\"Akismet_AutoRemove\",False)\n\t\tif oldrm!=rm:\n\t\t\tOptionSet.setValue(\"Akismet_AutoRemove\",rm)\n\t\tspam=page.param(\"spam\")\n\t\tspam = len(spam)>0 and int(spam) or 0\n\t\tsOther = \"\"\n\t\tif spam>0:\n\t\t\tcm = Comment.get_by_id(spam)\n\t\t\ttry:\n\t\t\t\turl = Blog.all().fetch(1)[0].baseurl\n\t\t\t\tself.SubmitAkismet({\n\t\t\t\t\t'user_ip' : cm.ip,\n\t\t\t\t\t'comment_type' : 'comment', \n\t\t\t\t\t'comment_author' : cm.author.encode('utf-8'),\n\t\t\t\t\t'comment_author_email' : cm.email,\n\t\t\t\t\t'comment_author_url' : cm.weburl,\n\t\t\t\t\t'comment_content' : cm.content.encode('utf-8')\n\t\t\t\t},url,\"Spam\")\n\t\t\t\tsOther = u\"
评论已删除
\"\n\t\t\t\tcm.delit()\n\t\t\texcept:\n\t\t\t\tsOther = u\"
无法找到对应的评论项
\"\n\t\treturn sOther + self.get(page)\n\n\nclass AkismetManager():\n\tdef __init__(self,key,url):\n\t\tself.ApiKey = key\n\t\tself.Url = url\n\t\t\n\tdef ExecuteRequest(self,url,content,method=\"GET\"):\n\t\trequest = urlfetch.fetch(url,\n\t\t\tmethod='POST',\n\t\t\tpayload=content\n\t\t\t)\n\t\treturn request\n\t\t\n\tdef IsValidKey(self):\n\t\tcontent = \"key=%(key)s&blog=%(url)s&\"%{'key':self.ApiKey,'url':self.Url}\n\t\tresponse = self.ExecuteRequest(\"http://rest.akismet.com/1.1/verify-key\", \n\t\t\tcontent).content\n\t\tif response and response == 'valid':\n\t\t\treturn True\n\t\telse:\n\t\t\treturn False\n\t\t\n\tdef IsSpam(self,item=None):\n\t\tif not item:\n\t\t\traise Exception\n\t\tcontent = self.AddDefaultFields(urllib.urlencode(item))\n\t\tresponse = self.ExecuteRequest(\n\t\t\t\"http://%(key)s.rest.akismet.com/1.1/comment-check\"%{'key':self.ApiKey},\n\t\t\tcontent).content\n\t\tif response:\n\t\t\tresult = {'true':True,'false': False}\n\t\t\treturn result[response]\n\t\treturn False\n\t\t\n\tdef SubmitSpam(self,item):\n\t\tif not item:\n\t\t\traise Exception\n\t\tcontent = self.AddDefaultFields(urllib.urlencode(item))\n\t\tresponse = self.ExecuteRequest(\n\t\t\t\"http://%(key)s.rest.akismet.com/1.1/submit-spam\"%{'key':self.ApiKey},\n\t\t\tcontent).content\n\t\tif response == 'invalid':\n\t\t\traise Exception\n\t\telif len(response)>0:\n\t\t\traise Exception\n\t\t\n\tdef SubmitHam(self,item):\n\t\tif not item:\n\t\t\traise Exception\n\t\tcontent = self.AddDefaultFields(urllib.urlencode(item))\n\t\tresponse = self.ExecuteRequest(\n\t\t\t\"http://%(key)s.rest.akismet.com/1.1/submit-ham\"%{'key':self.ApiKey},\n\t\t\tcontent).content\n\t\tif response == 'invalid':\n\t\t\traise Exception\n\t\telif len(response)>0:\n\t\t\traise Exception\n\t\n\tdef AddDefaultFields(self,content):\n\t\treturn ''.join([\"key=%(key)s&blog=%(url)s&\"%{'key':self.ApiKey,'url':self.Url},content])\n"},"repo_name":{"kind":"string","value":"Alwnikrotikz/micolog2"},"path":{"kind":"string","value":"plugins/akismet.py"},"language":{"kind":"string","value":"Python"},"license":{"kind":"string","value":"gpl-3.0"},"size":{"kind":"number","value":8593,"string":"8,593"},"score":{"kind":"number","value":0.05514838399615523,"string":"0.055148"}}},{"rowIdx":89640,"cells":{"text":{"kind":"string","value":"from django.shortcuts import render\nfrom django.http import HttpResponse\n\n# Create your views here.\ndef index(request):\n return render(request, 'todo/index.html')\n"},"repo_name":{"kind":"string","value":"deshmukhmayur/django-todo"},"path":{"kind":"string","value":"todo/views.py"},"language":{"kind":"string","value":"Python"},"license":{"kind":"string","value":"mit"},"size":{"kind":"number","value":166,"string":"166"},"score":{"kind":"number","value":0.006024096385542169,"string":"0.006024"}}},{"rowIdx":89641,"cells":{"text":{"kind":"string","value":"# NeoPixel driver for MicroPython on ESP8266\n# MIT license; Copyright (c) 2016 Damien P. George\n\nfrom esp import neopixel_write\n\n\nclass NeoPixel:\n ORDER = (1, 0, 2, 3)\n\n def __init__(self, pin, n, bpp=3):\n self.pin = pin\n self.n = n\n self.bpp = bpp\n self.buf = bytearray(n * bpp)\n self.pin.init(pin.OUT)\n\n def __setitem__(self, index, val):\n offset = index * self.bpp\n for i in range(self.bpp):\n self.buf[offset + self.ORDER[i]] = val[i]\n\n def __getitem__(self, index):\n offset = index * self.bpp\n return tuple(self.buf[offset + self.ORDER[i]]\n for i in range(self.bpp))\n\n def fill(self, color):\n for i in range(self.n):\n self[i] = color\n\n def write(self):\n neopixel_write(self.pin, self.buf, True)\n"},"repo_name":{"kind":"string","value":"swegener/micropython"},"path":{"kind":"string","value":"ports/esp8266/modules/neopixel.py"},"language":{"kind":"string","value":"Python"},"license":{"kind":"string","value":"mit"},"size":{"kind":"number","value":836,"string":"836"},"score":{"kind":"number","value":0,"string":"0"}}},{"rowIdx":89642,"cells":{"text":{"kind":"string","value":"#!/usr/bin/env python\n\nimport os\nfrom gi.repository import Gtk\nfrom gi.repository import Vte\nfrom gi.repository import GLib\nfrom gi.repository import Keybinder\nfrom gi.repository import Gdk\n\n\nclass Tida(Gtk.Window):\n\t\"\"\"A micro-drop-down terminal like TILDA\"\"\"\n\tdef __init__(self, config=None):\n\t\tGtk.Window.__init__(self)\n\t\tself.init_config(config)\n\t\tself.init_icon()\n\t\tself.init_terminal()\n\t\tGtk.main()\n\t\t\n\tdef init_config(self, config=None):\n\t\t\"\"\"Initialise the program with config if exists, else set default values\"\"\"\n\t\tif config != None:\n\t\t\tself.set_default_size(config['width'], config['heigth'])\n\t\t\tself.set_decorated(config['decorated'])\n\t\t\tself.set_skip_taskbar_hint(config['skip_taskbar_hint'])\n\t\t\tself.set_keep_above(config['keep_above'])\n\t\t\tself.set_skip_pager_hint(config['skip_pager_hint'])\n\t\t\tself.set_modal(config['modal'])\n\t\t\t\t\t\t\n\t\t\ts = Gdk.Screen.get_default()\n\t\t\tc = (s.get_width() - self.get_size()[0]) / 2.\n\t\t\tself.move(int(c), 0)\n\t\telse:\n\t\t\tself.set_decorated(False)\n\t\t\tself.set_skip_taskbar_hint(True)\n\t\t\tself.set_keep_above(True)\n\t\t\tself.set_skip_pager_hint(False)\n\t\t\tself.set_modal(False)\n\t\t\tself.set_default_size(720, 300)\n\t\t\tself.move(323, 0)\n\t\tself.init_keybinder(config)\n\t\t\n\tdef init_icon(self):\n\t\t\"\"\"Initialise status icon\"\"\"\n\t\tself.status_icon = Gtk.StatusIcon()\n\t\tabs_file_name = os.path.join(os.path.dirname(__file__), \"terminal.png\")\n\t\tself.status_icon.set_from_file(abs_file_name)\n\t\tself.status_icon.set_title(\"StatusIcon TIDA\")\n\t\tself.status_icon.set_tooltip_text(\"TIDA :>\")\n\t\t\n\tdef init_terminal(self):\n\t\t\"\"\"Initialise and add new Vte Terminal to Window\"\"\"\n\t\tself.term = Vte.Terminal()\n\t\tself.term.set_scrollback_lines(-1)\n\t\tself.term.connect('child-exited', Gtk.main_quit)\n\t\tself.term.fork_command_full(Vte.PtyFlags.DEFAULT, os.environ['HOME'], ['/usr/bin/bash'], [], GLib.SpawnFlags.DO_NOT_REAP_CHILD, None, None)\n\t\t\n\t\tself.add(self.term)\n\t\tself.connect('delete-event', Gtk.main_quit)\n\t\t\n\t\t\n\tdef init_keybinder(self, config):\n\t\t\"\"\"Initialise keybinder and bind some keys (toggle, copy, paste)\"\"\"\n\t\tKeybinder.init()\n\t\tKeybinder.set_use_cooked_accelerators(False)\n\t\tself.bind_all_key(config['key_toggle_visibility'],\n\t\t\t\t\t\t\tconfig['key_copy_to_clipboard'],\n\t\t\t\t\t\t\tconfig['key_paste_from_clipboard'])\n\n\t\t\n\tdef bind_all_key(self, key_toggle, key_copy, key_paste):\n\t\t\"\"\"Bind all keys used with tida\"\"\"\n\t\tKeybinder.bind(key_toggle, self.callback_toggle_visibility, \"asd\")\n\t\tKeybinder.bind(key_copy, self.callback_copy, \"asd\")\n\t\tKeybinder.bind(key_paste, self.callback_paste, \"asd\")\n\n\t\n\tdef callback_copy(self, key, asd):\n\t\t\"\"\"Callback function used when press the shortcut for copy to clipboard\"\"\"\n\t\tif self.is_visible():\n\t\t\tself.term.copy_clipboard()\n\t\t\treturn True\n\t\treturn False\n\t\n\tdef callback_paste(self, key, asd):\n\t\t\"\"\"Callback function used when press the shortcut for paste from clipboard\"\"\"\n\t\tif self.is_visible():\n\t\t\tself.term.paste_clipboard()\n\t\t\treturn True\n\t\treturn False\n\t\t\t\n\tdef callback_toggle_visibility(self, key, asd):\n\t\t\"\"\"Callback function used when press the shortcut for toggle visibility of tida\"\"\"\n\t\tif self.is_visible():\n\t\t\tself.hide()\n\t\telse:\n\t\t\tself.show_all()\n\n"},"repo_name":{"kind":"string","value":"headlins/tida"},"path":{"kind":"string","value":"Tida.py"},"language":{"kind":"string","value":"Python"},"license":{"kind":"string","value":"lgpl-3.0"},"size":{"kind":"number","value":3125,"string":"3,125"},"score":{"kind":"number","value":0.03296,"string":"0.03296"}}},{"rowIdx":89643,"cells":{"text":{"kind":"string","value":"import md5\nimport os\nimport sys\n\npath = sys.argv[1]\n\ndb_file = open(os.path.join(path,\"pics_mysql.txt\"),\"w\")\nfor file_name in os.listdir(path):\n if not file_name.lower().endswith(\".gif\"): continue\n \n with open(os.path.join(path,file_name),\"rb\") as fp:\n contents = fp.read()\n \n new_file_name = md5.new(contents).hexdigest() + \".gif\"\n \n print file_name + \" --> \" + new_file_name\n \n os.rename(os.path.join(path,file_name),os.path.join(path,new_file_name))\n \n db_file.write('INSERT INTO pics (name) VALUES (\"' + new_file_name + '\");\\n')\n\ndb_file.close()"},"repo_name":{"kind":"string","value":"0x1001/funornot"},"path":{"kind":"string","value":"utils/image_convert.py"},"language":{"kind":"string","value":"Python"},"license":{"kind":"string","value":"gpl-2.0"},"size":{"kind":"number","value":593,"string":"593"},"score":{"kind":"number","value":0.025295109612141653,"string":"0.025295"}}},{"rowIdx":89644,"cells":{"text":{"kind":"string","value":"# vim: tabstop=4 shiftwidth=4 softtabstop=4\n\n# Copyright (c) 2012 OpenStack Foundation\n# All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nimport copy\n\nfrom nova import context\nfrom nova import db\nfrom nova import exception\nfrom nova.objects import instance\nfrom nova.objects import pci_device\nfrom nova.tests.objects import test_objects\n\ndev_dict = {\n 'compute_node_id': 1,\n 'address': 'a',\n 'product_id': 'p',\n 'vendor_id': 'v',\n 'status': 'available'}\n\n\nfake_db_dev = {\n 'created_at': None,\n 'updated_at': None,\n 'deleted_at': None,\n 'deleted': None,\n 'id': 1,\n 'compute_node_id': 1,\n 'address': 'a',\n 'vendor_id': 'v',\n 'product_id': 'p',\n 'dev_type': 't',\n 'status': 'available',\n 'dev_id': 'i',\n 'label': 'l',\n 'instance_uuid': None,\n 'extra_info': '{}',\n }\n\n\nfake_db_dev_1 = {\n 'created_at': None,\n 'updated_at': None,\n 'deleted_at': None,\n 'deleted': None,\n 'id': 2,\n 'compute_node_id': 1,\n 'address': 'a1',\n 'vendor_id': 'v1',\n 'product_id': 'p1',\n 'dev_type': 't',\n 'status': 'available',\n 'dev_id': 'i',\n 'label': 'l',\n 'instance_uuid': None,\n 'extra_info': '{}',\n }\n\n\nclass _TestPciDeviceObject(object):\n def _create_fake_instance(self):\n self.inst = instance.Instance()\n self.inst.uuid = 'fake-inst-uuid'\n self.inst.pci_devices = pci_device.PciDeviceList()\n\n def _create_fake_pci_device(self):\n ctxt = context.get_admin_context()\n self.mox.StubOutWithMock(db, 'pci_device_get_by_addr')\n db.pci_device_get_by_addr(ctxt, 1, 'a').AndReturn(fake_db_dev)\n self.mox.ReplayAll()\n self.pci_device = pci_device.PciDevice.get_by_dev_addr(ctxt, 1, 'a')\n\n def test_create_pci_device(self):\n self.pci_device = pci_device.PciDevice.create(dev_dict)\n self.assertEqual(self.pci_device.product_id, 'p')\n self.assertEqual(self.pci_device.obj_what_changed(),\n set(['compute_node_id', 'product_id', 'vendor_id',\n 'status', 'address', 'extra_info']))\n\n def test_pci_device_extra_info(self):\n self.dev_dict = copy.copy(dev_dict)\n self.dev_dict['k1'] = 'v1'\n self.dev_dict['k2'] = 'v2'\n self.pci_device = pci_device.PciDevice.create(self.dev_dict)\n extra_value = self.pci_device.extra_info\n self.assertEqual(extra_value.get('k1'), 'v1')\n self.assertEqual(set(extra_value.keys()), set(('k1', 'k2')))\n self.assertEqual(self.pci_device.obj_what_changed(),\n set(['compute_node_id', 'address', 'product_id',\n 'vendor_id', 'status', 'extra_info']))\n\n def test_update_device(self):\n self.pci_device = pci_device.PciDevice.create(dev_dict)\n self.pci_device.obj_reset_changes()\n changes = {'product_id': 'p2', 'vendor_id': 'v2'}\n self.pci_device.update_device(changes)\n self.assertEqual(self.pci_device.vendor_id, 'v2')\n self.assertEqual(self.pci_device.obj_what_changed(),\n set(['vendor_id', 'product_id']))\n\n def test_update_device_same_value(self):\n self.pci_device = pci_device.PciDevice.create(dev_dict)\n self.pci_device.obj_reset_changes()\n changes = {'product_id': 'p', 'vendor_id': 'v2'}\n self.pci_device.update_device(changes)\n self.assertEqual(self.pci_device.product_id, 'p')\n self.assertEqual(self.pci_device.vendor_id, 'v2')\n self.assertEqual(self.pci_device.obj_what_changed(),\n set(['vendor_id', 'product_id']))\n\n def test_get_by_dev_addr(self):\n ctxt = context.get_admin_context()\n self.mox.StubOutWithMock(db, 'pci_device_get_by_addr')\n db.pci_device_get_by_addr(ctxt, 1, 'a').AndReturn(fake_db_dev)\n self.mox.ReplayAll()\n self.pci_device = pci_device.PciDevice.get_by_dev_addr(ctxt, 1, 'a')\n self.assertEqual(self.pci_device.product_id, 'p')\n self.assertEqual(self.pci_device.obj_what_changed(), set())\n self.assertRemotes()\n\n def test_get_by_dev_id(self):\n ctxt = context.get_admin_context()\n self.mox.StubOutWithMock(db, 'pci_device_get_by_id')\n db.pci_device_get_by_id(ctxt, 1).AndReturn(fake_db_dev)\n self.mox.ReplayAll()\n self.pci_device = pci_device.PciDevice.get_by_dev_id(ctxt, 1)\n self.assertEqual(self.pci_device.product_id, 'p')\n self.assertEqual(self.pci_device.obj_what_changed(), set())\n self.assertRemotes()\n\n def test_claim_device(self):\n self._create_fake_instance()\n self.pci_device = pci_device.PciDevice.create(dev_dict)\n self.pci_device.claim(self.inst)\n self.assertEqual(self.pci_device.status, 'claimed')\n self.assertEqual(self.pci_device.instance_uuid,\n 'fake-inst-uuid')\n self.assertEqual(len(self.inst.pci_devices), 0)\n\n def test_claim_device_fail(self):\n self._create_fake_instance()\n self._create_fake_pci_device()\n self.pci_device.status = 'allocated'\n self.assertRaises(exception.PciDeviceInvalidStatus,\n self.pci_device.claim, self.inst)\n\n def test_allocate_device(self):\n self._create_fake_instance()\n self._create_fake_pci_device()\n self.pci_device.claim(self.inst)\n self.pci_device.allocate(self.inst)\n self.assertEqual(self.pci_device.status, 'allocated')\n self.assertEqual(self.pci_device.instance_uuid, 'fake-inst-uuid')\n self.assertEqual(len(self.inst.pci_devices), 1)\n self.assertEqual(self.inst.pci_devices[0]['vendor_id'], 'v')\n self.assertEqual(self.inst.pci_devices[0]['status'], 'allocated')\n\n def test_allocacte_device_fail_status(self):\n self._create_fake_instance()\n self._create_fake_pci_device()\n self.pci_device.status = 'removed'\n self.assertRaises(exception.PciDeviceInvalidStatus,\n self.pci_device.allocate,\n self.inst)\n\n def test_allocacte_device_fail_owner(self):\n self._create_fake_instance()\n self._create_fake_pci_device()\n inst_2 = instance.Instance()\n inst_2.uuid = 'fake-inst-uuid-2'\n self.pci_device.claim(self.inst)\n self.assertRaises(exception.PciDeviceInvalidOwner,\n self.pci_device.allocate, inst_2)\n\n def test_free_claimed_device(self):\n self._create_fake_instance()\n self._create_fake_pci_device()\n self.pci_device.claim(self.inst)\n self.pci_device.free(self.inst)\n self.assertEqual(self.pci_device.status, 'available')\n self.assertEqual(self.pci_device.instance_uuid, None)\n\n def test_free_allocated_device(self):\n self._create_fake_instance()\n self._create_fake_pci_device()\n self.pci_device.claim(self.inst)\n self.pci_device.allocate(self.inst)\n self.assertEqual(len(self.inst.pci_devices), 1)\n self.pci_device.free(self.inst)\n self.assertEqual(len(self.inst.pci_devices), 0)\n self.assertEqual(self.pci_device.status, 'available')\n self.assertEqual(self.pci_device.instance_uuid, None)\n\n def test_free_device_fail(self):\n self._create_fake_pci_device()\n self.pci_device.status = 'removed'\n self.assertRaises(exception.PciDeviceInvalidStatus,\n self.pci_device.free)\n\n def test_remove_device(self):\n self._create_fake_pci_device()\n self.pci_device.remove()\n self.assertEqual(self.pci_device.status, 'removed')\n self.assertEqual(self.pci_device.instance_uuid, None)\n\n def test_remove_device_fail(self):\n self._create_fake_instance()\n self._create_fake_pci_device()\n self.pci_device.claim(self.inst)\n self.assertRaises(exception.PciDeviceInvalidStatus,\n self.pci_device.remove)\n\n def test_save(self):\n ctxt = context.get_admin_context()\n self._create_fake_pci_device()\n return_dev = dict(fake_db_dev, status='available',\n instance_uuid='fake-uuid-3')\n self.pci_device.status = 'allocated'\n self.pci_device.instance_uuid = 'fake-uuid-2'\n expected_updates = dict(status='allocated',\n instance_uuid='fake-uuid-2')\n self.mox.StubOutWithMock(db, 'pci_device_update')\n db.pci_device_update(ctxt, 1, 'a',\n expected_updates).AndReturn(return_dev)\n self.mox.ReplayAll()\n self.pci_device.save(ctxt)\n self.assertEqual(self.pci_device.status, 'available')\n self.assertEqual(self.pci_device.instance_uuid,\n 'fake-uuid-3')\n self.assertRemotes()\n\n def test_save_no_extra_info(self):\n return_dev = dict(fake_db_dev, status='available',\n instance_uuid='fake-uuid-3')\n\n def _fake_update(ctxt, node_id, addr, updates):\n self.extra_info = updates.get('extra_info')\n return return_dev\n\n ctxt = context.get_admin_context()\n self.stubs.Set(db, 'pci_device_update', _fake_update)\n self.pci_device = pci_device.PciDevice.create(dev_dict)\n self.pci_device.save(ctxt)\n self.assertEqual(self.extra_info, '{}')\n\n def test_save_removed(self):\n ctxt = context.get_admin_context()\n self._create_fake_pci_device()\n self.pci_device.status = 'removed'\n self.mox.StubOutWithMock(db, 'pci_device_destroy')\n db.pci_device_destroy(ctxt, 1, 'a')\n self.mox.ReplayAll()\n self.pci_device.save(ctxt)\n self.assertEqual(self.pci_device.status, 'deleted')\n self.assertRemotes()\n\n def test_save_deleted(self):\n def _fake_destroy(ctxt, node_id, addr):\n self.called = True\n\n def _fake_update(ctxt, node_id, addr, updates):\n self.called = True\n ctxt = context.get_admin_context()\n self.stubs.Set(db, 'pci_device_destroy', _fake_destroy)\n self.stubs.Set(db, 'pci_device_update', _fake_update)\n self._create_fake_pci_device()\n self.pci_device.status = 'deleted'\n self.called = False\n self.pci_device.save(ctxt)\n self.assertEqual(self.called, False)\n\n\nclass TestPciDeviceObject(test_objects._LocalTest,\n _TestPciDeviceObject):\n pass\n\n\nclass TestPciDeviceObjectRemote(test_objects._RemoteTest,\n _TestPciDeviceObject):\n pass\n\n\nfake_pci_devs = [fake_db_dev, fake_db_dev_1]\n\n\nclass _TestPciDeviceListObject(object):\n def test_get_by_compute_node(self):\n ctxt = context.get_admin_context()\n self.mox.StubOutWithMock(db, 'pci_device_get_all_by_node')\n db.pci_device_get_all_by_node(ctxt, 1).AndReturn(fake_pci_devs)\n self.mox.ReplayAll()\n devs = pci_device.PciDeviceList.get_by_compute_node(ctxt, 1)\n for i in range(len(fake_pci_devs)):\n self.assertTrue(isinstance(devs[i], pci_device.PciDevice))\n self.assertEqual(fake_pci_devs[i]['vendor_id'], devs[i].vendor_id)\n self.assertRemotes()\n\n def test_get_by_instance_uuid(self):\n ctxt = context.get_admin_context()\n fake_db_1 = dict(fake_db_dev, address='a1',\n status='allocated', instance_uuid='1')\n fake_db_2 = dict(fake_db_dev, address='a2',\n status='allocated', instance_uuid='1')\n self.mox.StubOutWithMock(db, 'pci_device_get_all_by_instance_uuid')\n db.pci_device_get_all_by_instance_uuid(ctxt, '1').AndReturn(\n [fake_db_1, fake_db_2])\n self.mox.ReplayAll()\n devs = pci_device.PciDeviceList.get_by_instance_uuid(ctxt, '1')\n self.assertEqual(len(devs), 2)\n for i in range(len(fake_pci_devs)):\n self.assertTrue(isinstance(devs[i], pci_device.PciDevice))\n self.assertEqual(devs[0].vendor_id, 'v')\n self.assertEqual(devs[1].vendor_id, 'v')\n self.assertRemotes()\n\n\nclass TestPciDeviceListObject(test_objects._LocalTest,\n _TestPciDeviceListObject):\n pass\n\n\nclass TestPciDeviceListObjectRemote(test_objects._RemoteTest,\n _TestPciDeviceListObject):\n pass\n"},"repo_name":{"kind":"string","value":"TieWei/nova"},"path":{"kind":"string","value":"nova/tests/objects/test_pci_device.py"},"language":{"kind":"string","value":"Python"},"license":{"kind":"string","value":"apache-2.0"},"size":{"kind":"number","value":12888,"string":"12,888"},"score":{"kind":"number","value":0.00015518311607697082,"string":"0.000155"}}},{"rowIdx":89645,"cells":{"text":{"kind":"string","value":"import os\nfrom pathlib import Path\nimport shutil\n\nimport joblib\n\nimport hvc\n\nfrom config import rewrite_config\n\n\nHERE = Path(__file__).parent\nDATA_FOR_TESTS = HERE / \"..\" / \"data_for_tests\"\nTEST_CONFIGS = DATA_FOR_TESTS.joinpath(\"config.yml\").resolve()\nFEATURE_FILES_DST = DATA_FOR_TESTS.joinpath(\"feature_files\").resolve()\nMODEL_FILES_DST = DATA_FOR_TESTS.joinpath(\"model_files\").resolve()\n\nconfig_feature_file_pairs = {\n \"knn\": (\"test_select_knn_ftr_grp.config.yml\", \"knn.features\"),\n \"svm\": (\"test_select_svm.config.yml\", \"svm.features\"),\n \"flatwindow\": (\"test_select_flatwindow.config.yml\", \"flatwindow.features\"),\n}\n\n\ndef main():\n for model_name, (\n select_config,\n feature_filename,\n ) in config_feature_file_pairs.items():\n print(\"running {} to create model files\".format(select_config))\n # have to put tmp_output_dir into yaml file\n select_config = TEST_CONFIGS / select_config\n feature_file = sorted(FEATURE_FILES_DST.glob(feature_filename))\n if len(feature_file) != 1:\n raise ValueError(\n \"found more than one feature file with search {}:\\n{}\".format(\n feature_filename, feature_file\n )\n )\n else:\n # call `resolve` to get full path to model file, so pytest fixtures find it from inside tmp directories\n feature_file = feature_file[0].resolve()\n\n replace_dict = {\n \"feature_file\": (\"replace with feature_file\", str(feature_file)),\n \"output_dir\": (\"replace with tmp_output_dir\", str(MODEL_FILES_DST)),\n }\n\n select_config_rewritten = rewrite_config(\n select_config, str(MODEL_FILES_DST), replace_dict\n )\n select_output_before = [\n select_output_dir\n for select_output_dir in sorted(MODEL_FILES_DST.glob(\"*select*output*\"))\n if select_output_dir.is_dir()\n ]\n\n hvc.select(select_config_rewritten)\n\n select_output_after = [\n select_output_dir\n for select_output_dir in sorted(MODEL_FILES_DST.glob(\"*select*output*\"))\n if select_output_dir.is_dir()\n ]\n\n select_output_dir = [\n after for after in select_output_after if after not in select_output_before\n ]\n\n if len(select_output_dir) != 1:\n raise ValueError(\n \"incorrect number of outputs when looking for select \"\n \"ouput dirs:\\n{}\".format(select_output_dir)\n )\n else:\n select_output_dir = select_output_dir[0]\n\n # arbitrarily grab the last .model and associated .meta file\n model_file = sorted(select_output_dir.glob(\"*/*.model\"))[-1]\n # call `resolve` to get full path to model file, so pytest fixtures find it from inside tmp directories\n model_file_dst = MODEL_FILES_DST.joinpath(model_name + \".model\").resolve()\n shutil.move(src=model_file, dst=model_file_dst)\n meta_file = sorted(select_output_dir.glob(\"*/*.meta\"))[-1]\n meta_file_dst = MODEL_FILES_DST.joinpath(model_name + \".meta\")\n shutil.move(src=str(meta_file), dst=str(meta_file_dst))\n\n # need to change 'model_filename' in .meta file\n meta_file = joblib.load(meta_file_dst)\n meta_file[\"model_filename\"] = os.path.abspath(model_file_dst)\n joblib.dump(meta_file, meta_file_dst)\n\n # clean up -- delete all the other model files, directory, and config\n shutil.rmtree(select_output_dir)\n os.remove(select_config_rewritten)\n\n\nif __name__ == \"__main__\":\n main()\n"},"repo_name":{"kind":"string","value":"NickleDave/hybrid-vocal-classifier"},"path":{"kind":"string","value":"tests/scripts/remake_model_files.py"},"language":{"kind":"string","value":"Python"},"license":{"kind":"string","value":"bsd-3-clause"},"size":{"kind":"number","value":3608,"string":"3,608"},"score":{"kind":"number","value":0.0019401330376940134,"string":"0.00194"}}},{"rowIdx":89646,"cells":{"text":{"kind":"string","value":"# Copyright 2015 FUJITSU LIMITED\n# (C) Copyright 2016 Hewlett Packard Enterprise Development LP\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except\n# in compliance with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software distributed under the License\n# is distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express\n# or implied. See the License for the specific language governing permissions and limitations under\n# the License.\n\n\nclass BaseRepo(object):\n def __init__(self, config):\n self._find_alarm_action_sql = \\\n \"\"\"SELECT id, type, name, address, period\n FROM alarm_action as aa\n JOIN notification_method as nm ON aa.action_id = nm.id\n WHERE aa.alarm_definition_id = %s and aa.alarm_state = %s\"\"\"\n self._find_alarm_state_sql = \\\n \"\"\"SELECT state\n FROM alarm\n WHERE alarm.id = %s\"\"\"\n self._insert_notification_types_sql = \\\n \"\"\"INSERT INTO notification_method_type (name) VALUES ( %s)\"\"\"\n self._find_all_notification_types_sql = \"\"\"SELECT name from notification_method_type \"\"\"\n self._get_notification_sql = \"\"\"SELECT name, type, address, period\n FROM notification_method\n WHERE id = %s\"\"\"\n"},"repo_name":{"kind":"string","value":"openstack/monasca-notification"},"path":{"kind":"string","value":"monasca_notification/common/repositories/base/base_repo.py"},"language":{"kind":"string","value":"Python"},"license":{"kind":"string","value":"apache-2.0"},"size":{"kind":"number","value":1519,"string":"1,519"},"score":{"kind":"number","value":0.0032916392363396972,"string":"0.003292"}}},{"rowIdx":89647,"cells":{"text":{"kind":"string","value":"# Copyright 2019 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"mixup: Beyond Empirical Risk Minimization.\n\nAdaption to SSL of MixUp: https://arxiv.org/abs/1710.09412\n\"\"\"\nimport functools\nimport os\n\nimport tensorflow as tf\nfrom absl import app\nfrom absl import flags\n\nfrom libml import data, utils, models\nfrom libml.utils import EasyDict\n\nFLAGS = flags.FLAGS\n\n\nclass Mixup(models.MultiModel):\n\n def augment(self, x, l, beta, **kwargs):\n del kwargs\n mix = tf.distributions.Beta(beta, beta).sample([tf.shape(x)[0], 1, 1, 1])\n mix = tf.maximum(mix, 1 - mix)\n xmix = x * mix + x[::-1] * (1 - mix)\n lmix = l * mix[:, :, 0, 0] + l[::-1] * (1 - mix[:, :, 0, 0])\n return xmix, lmix\n\n def model(self, batch, lr, wd, ema, **kwargs):\n hwc = [self.dataset.height, self.dataset.width, self.dataset.colors]\n xt_in = tf.placeholder(tf.float32, [batch] + hwc, 'xt') # For training\n x_in = tf.placeholder(tf.float32, [None] + hwc, 'x')\n y_in = tf.placeholder(tf.float32, [batch] + hwc, 'y')\n l_in = tf.placeholder(tf.int32, [batch], 'labels')\n wd *= lr\n classifier = lambda x, **kw: self.classifier(x, **kw, **kwargs).logits\n\n def get_logits(x):\n logits = classifier(x, training=True)\n return logits\n\n x, labels_x = self.augment(xt_in, tf.one_hot(l_in, self.nclass), **kwargs)\n logits_x = get_logits(x)\n post_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)\n y, labels_y = self.augment(y_in, tf.nn.softmax(get_logits(y_in)), **kwargs)\n labels_y = tf.stop_gradient(labels_y)\n logits_y = get_logits(y)\n\n loss_xe = tf.nn.softmax_cross_entropy_with_logits_v2(labels=labels_x, logits=logits_x)\n loss_xe = tf.reduce_mean(loss_xe)\n loss_xeu = tf.nn.softmax_cross_entropy_with_logits_v2(labels=labels_y, logits=logits_y)\n loss_xeu = tf.reduce_mean(loss_xeu)\n tf.summary.scalar('losses/xe', loss_xe)\n tf.summary.scalar('losses/xeu', loss_xeu)\n\n ema = tf.train.ExponentialMovingAverage(decay=ema)\n ema_op = ema.apply(utils.model_vars())\n ema_getter = functools.partial(utils.getter_ema, ema)\n post_ops.append(ema_op)\n post_ops.extend([tf.assign(v, v * (1 - wd)) for v in utils.model_vars('classify') if 'kernel' in v.name])\n\n train_op = tf.train.AdamOptimizer(lr).minimize(loss_xe + loss_xeu, colocate_gradients_with_ops=True)\n with tf.control_dependencies([train_op]):\n train_op = tf.group(*post_ops)\n\n return EasyDict(\n xt=xt_in, x=x_in, y=y_in, label=l_in, train_op=train_op,\n classify_raw=tf.nn.softmax(classifier(x_in, training=False)), # No EMA, for debugging.\n classify_op=tf.nn.softmax(classifier(x_in, getter=ema_getter, training=False)))\n\n\ndef main(argv):\n utils.setup_main()\n del argv # Unused.\n dataset = data.DATASETS()[FLAGS.dataset]()\n log_width = utils.ilog2(dataset.width)\n model = Mixup(\n os.path.join(FLAGS.train_dir, dataset.name),\n dataset,\n lr=FLAGS.lr,\n wd=FLAGS.wd,\n arch=FLAGS.arch,\n batch=FLAGS.batch,\n nclass=dataset.nclass,\n ema=FLAGS.ema,\n beta=FLAGS.beta,\n\n scales=FLAGS.scales or (log_width - 2),\n filters=FLAGS.filters,\n repeat=FLAGS.repeat)\n model.train(FLAGS.train_kimg << 10, FLAGS.report_kimg << 10)\n\n\nif __name__ == '__main__':\n utils.setup_tf()\n flags.DEFINE_float('wd', 0.02, 'Weight decay.')\n flags.DEFINE_float('ema', 0.999, 'Exponential moving average of params.')\n flags.DEFINE_float('beta', 0.5, 'Mixup beta distribution.')\n flags.DEFINE_integer('scales', 0, 'Number of 2x2 downscalings in the classifier.')\n flags.DEFINE_integer('filters', 32, 'Filter size of convolutions.')\n flags.DEFINE_integer('repeat', 4, 'Number of residual layers per stage.')\n FLAGS.set_default('dataset', 'cifar10.3@250-5000')\n FLAGS.set_default('batch', 64)\n FLAGS.set_default('lr', 0.002)\n FLAGS.set_default('train_kimg', 1 << 16)\n app.run(main)\n"},"repo_name":{"kind":"string","value":"google-research/remixmatch"},"path":{"kind":"string","value":"mixup.py"},"language":{"kind":"string","value":"Python"},"license":{"kind":"string","value":"apache-2.0"},"size":{"kind":"number","value":4608,"string":"4,608"},"score":{"kind":"number","value":0.0026041666666666665,"string":"0.002604"}}},{"rowIdx":89648,"cells":{"text":{"kind":"string","value":"# Copyright 2016 Google Inc. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Python functions which run only within a Jupyter notebook.\"\"\"\n\n# internal imports\nimport IPython\n\nfrom magenta.music import midi_synth\n\n_DEFAULT_SAMPLE_RATE = 44100\n\n\ndef play_sequence(sequence,\n synth=midi_synth.synthesize,\n sample_rate=_DEFAULT_SAMPLE_RATE,\n **synth_args):\n \"\"\"Creates an interactive player for a synthesized note sequence.\n\n This function should only be called from a Jupyter notebook.\n\n Args:\n sequence: A music_pb2.NoteSequence to synthesize and play.\n synth: A synthesis function that takes a sequence and sample rate as input.\n sample_rate: The sample rate at which to synthesize.\n **synth_args: Additional keyword arguments to pass to the synth function.\n \"\"\"\n array_of_floats = synth(sequence, sample_rate=sample_rate, **synth_args)\n IPython.display.display(IPython.display.Audio(array_of_floats,\n rate=sample_rate))\n"},"repo_name":{"kind":"string","value":"YoshikawaMasashi/magenta"},"path":{"kind":"string","value":"magenta/music/notebook_utils.py"},"language":{"kind":"string","value":"Python"},"license":{"kind":"string","value":"apache-2.0"},"size":{"kind":"number","value":1557,"string":"1,557"},"score":{"kind":"number","value":0.0019267822736030828,"string":"0.001927"}}},{"rowIdx":89649,"cells":{"text":{"kind":"string","value":"from collections import defaultdict\nimport mock\nfrom searx.engines import bing_news\nfrom searx.testing import SearxTestCase\nimport lxml\n\n\nclass TestBingNewsEngine(SearxTestCase):\n\n def test_request(self):\n bing_news.supported_languages = ['en', 'fr']\n query = 'test_query'\n dicto = defaultdict(dict)\n dicto['pageno'] = 1\n dicto['language'] = 'fr-FR'\n dicto['time_range'] = ''\n params = bing_news.request(query, dicto)\n self.assertIn('url', params)\n self.assertIn(query, params['url'])\n self.assertIn('bing.com', params['url'])\n self.assertIn('fr', params['url'])\n\n dicto['language'] = 'all'\n params = bing_news.request(query, dicto)\n self.assertIn('en', params['url'])\n\n def test_no_url_in_request_year_time_range(self):\n dicto = defaultdict(dict)\n query = 'test_query'\n dicto['time_range'] = 'year'\n params = bing_news.request(query, dicto)\n self.assertEqual({}, params['url'])\n\n def test_response(self):\n self.assertRaises(AttributeError, bing_news.response, None)\n self.assertRaises(AttributeError, bing_news.response, [])\n self.assertRaises(AttributeError, bing_news.response, '')\n self.assertRaises(AttributeError, bing_news.response, '[]')\n\n response = mock.Mock(content='')\n self.assertEqual(bing_news.response(response), [])\n\n response = mock.Mock(content='')\n self.assertEqual(bing_news.response(response), [])\n\n html = \"\"\"\n\n \n python - Bing News\n https://www.bing.com:443/news/search?q=python&amp;setmkt=en-US&amp;first=1&amp;format=RSS\n Search results\n \n http://10.53.64.9/rsslogo.gif\n test\n https://www.bing.com:443/news/search?q=test&amp;setmkt=en-US&amp;first=1&amp;format=RSS\n \n Copyright\n \n Title\n https://www.bing.com/news/apiclick.aspx?ref=FexRss&amp;aid=&amp;tid=c237eccc50bd4758b106a5e3c94fce09&amp;url=http%3a%2f%2furl.of.article%2f&amp;c=xxxxxxxxx&amp;mkt=en-us\n Article Content\n Tue, 02 Jun 2015 13:37:00 GMT\n Infoworld\n http://a1.bing4.com/th?id=ON.13371337133713371337133713371337&amp;pid=News\n w={0}&amp;h={1}&amp;c=7\n \n 620\n 413\n \n \n Another Title\n https://www.bing.com/news/apiclick.aspx?ref=FexRss&amp;aid=&amp;tid=c237eccc50bd4758b106a5e3c94fce09&amp;url=http%3a%2f%2fanother.url.of.article%2f&amp;c=xxxxxxxxx&amp;mkt=en-us\n Another Article Content\n Tue, 02 Jun 2015 13:37:00 GMT\n \n \n\"\"\" # noqa\n response = mock.Mock(content=html.encode('utf-8'))\n results = bing_news.response(response)\n self.assertEqual(type(results), list)\n self.assertEqual(len(results), 2)\n self.assertEqual(results[0]['title'], 'Title')\n self.assertEqual(results[0]['url'], 'http://url.of.article/')\n self.assertEqual(results[0]['content'], 'Article Content')\n self.assertEqual(results[0]['img_src'], 'https://www.bing.com/th?id=ON.13371337133713371337133713371337')\n self.assertEqual(results[1]['title'], 'Another Title')\n self.assertEqual(results[1]['url'], 'http://another.url.of.article/')\n self.assertEqual(results[1]['content'], 'Another Article Content')\n self.assertNotIn('img_src', results[1])\n\n html = \"\"\"\n\n \n python - Bing News\n https://www.bing.com:443/news/search?q=python&amp;setmkt=en-US&amp;first=1&amp;format=RSS\n Search results\n \n http://10.53.64.9/rsslogo.gif\n test\n https://www.bing.com:443/news/search?q=test&amp;setmkt=en-US&amp;first=1&amp;format=RSS\n \n Copyright\n \n Title\n http://another.url.of.article/\n Article Content\n garbage\n Infoworld\n http://another.bing.com/image\n w={0}&amp;h={1}&amp;c=7\n \n 620\n 413\n \n \n\"\"\" # noqa\n response = mock.Mock(content=html.encode('utf-8'))\n results = bing_news.response(response)\n self.assertEqual(type(results), list)\n self.assertEqual(len(results), 1)\n self.assertEqual(results[0]['title'], 'Title')\n self.assertEqual(results[0]['url'], 'http://another.url.of.article/')\n self.assertEqual(results[0]['content'], 'Article Content')\n self.assertEqual(results[0]['img_src'], 'http://another.bing.com/image')\n\n html = \"\"\"\n\n \n python - Bing News\n https://www.bing.com:443/news/search?q=python&amp;setmkt=en-US&amp;first=1&amp;format=RSS\n Search results\n \n http://10.53.64.9/rsslogo.gif\n test\n https://www.bing.com:443/news/search?q=test&amp;setmkt=en-US&amp;first=1&amp;format=RSS\n \n \n\"\"\" # noqa\n\n response = mock.Mock(content=html.encode('utf-8'))\n results = bing_news.response(response)\n self.assertEqual(type(results), list)\n self.assertEqual(len(results), 0)\n\n html = \"\"\"gabarge\"\"\"\n response = mock.Mock(content=html.encode('utf-8'))\n self.assertRaises(lxml.etree.XMLSyntaxError, bing_news.response, response)\n"},"repo_name":{"kind":"string","value":"jcherqui/searx"},"path":{"kind":"string","value":"tests/unit/engines/test_bing_news.py"},"language":{"kind":"string","value":"Python"},"license":{"kind":"string","value":"agpl-3.0"},"size":{"kind":"number","value":7039,"string":"7,039"},"score":{"kind":"number","value":0.00042619690296917176,"string":"0.000426"}}},{"rowIdx":89650,"cells":{"text":{"kind":"string","value":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\ntry:\n from setuptools import setup\nexcept ImportError:\n from distutils.core import setup\n\n\nwith open('README.rst') as readme_file:\n readme = readme_file.read()\n\nwith open('HISTORY.rst') as history_file:\n history = history_file.read()\n\nrequirements = [\n \"jsonschema\"\n # TODO: put package requirements here\n]\n\ntest_requirements = [\n \"jsonschema\"\n # TODO: put package test requirements here\n]\n\nsetup(\n name='pycorm',\n version='0.2.13',\n description=\"a pico orm that uses jsonschema\",\n long_description=readme + '\\n\\n' + history,\n author=\"Johannes Valbjørn\",\n author_email='johannes.valbjorn@gmail.com',\n url='https://github.com/sloev/pycorm',\n packages=[\n 'pycorm',\n ],\n package_dir={'pycorm':\n 'pycorm'},\n include_package_data=True,\n install_requires=requirements,\n license=\"MIT\",\n zip_safe=False,\n keywords='pycorm',\n classifiers=[\n 'Development Status :: 2 - Pre-Alpha',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: ISC License (ISCL)',\n 'Natural Language :: English',\n \"Programming Language :: Python :: 2\",\n 'Programming Language :: Python :: 2.6',\n 'Programming Language :: Python :: 2.7'\n ],\n test_suite='tests',\n tests_require=test_requirements\n)\n"},"repo_name":{"kind":"string","value":"sloev/pycorm"},"path":{"kind":"string","value":"setup.py"},"language":{"kind":"string","value":"Python"},"license":{"kind":"string","value":"mit"},"size":{"kind":"number","value":1378,"string":"1,378"},"score":{"kind":"number","value":0.0007262164124909223,"string":"0.000726"}}},{"rowIdx":89651,"cells":{"text":{"kind":"string","value":"from sklearn2sql_heroku.tests.classification import generic as class_gen\n\n\nclass_gen.test_model(\"DummyClassifier\" , \"BreastCancer\" , \"mysql\")\n"},"repo_name":{"kind":"string","value":"antoinecarme/sklearn2sql_heroku"},"path":{"kind":"string","value":"tests/classification/BreastCancer/ws_BreastCancer_DummyClassifier_mysql_code_gen.py"},"language":{"kind":"string","value":"Python"},"license":{"kind":"string","value":"bsd-3-clause"},"size":{"kind":"number","value":142,"string":"142"},"score":{"kind":"number","value":0.014084507042253521,"string":"0.014085"}}},{"rowIdx":89652,"cells":{"text":{"kind":"string","value":"import io\nimport logging\nfrom collections import defaultdict\n\nfrom numpy import linspace\nfrom scipy import interp\nfrom sklearn.metrics import (auc, average_precision_score,\n precision_recall_curve)\nfrom tabulate import tabulate\n\nfrom .test_statistic import ClassifierStatistic, TestStatistic\n\nlogger = logging.getLogger(__name__)\n\n\nclass precision_recall(ClassifierStatistic):\n \"\"\"\n Constructs a precision/recall statistics generator.\n See https://en.wikipedia.org/wiki/Precision_and_recall\n\n When applied to a test set, the `score()` method will return a dictionary\n with four fields:\n\n * auc: the area under the precision-recall curve\n * precisions: a list of precisions\n * recalls: a list of recalls\n * thresholds: a list of probability thresholds\n \"\"\"\n\n @classmethod\n def _single_class_stat(cls, scores, labels, comparison_label):\n y_proba = [s['probability'][comparison_label] for s in scores]\n\n y_true = [l == comparison_label for l in labels]\n precisions, recalls, thresholds = \\\n precision_recall_curve(y_true, y_proba)\n\n return {\n 'auc': average_precision_score(y_true, y_proba),\n 'precisions': list(precisions),\n 'recalls': list(recalls)\n }\n\n def merge(self, stats):\n individual_auc = defaultdict(list)\n label_sum_recalls = defaultdict(float)\n for stat in stats:\n for label, label_stat in stat.items():\n individual_auc[label].append(label_stat['auc'])\n precisions, recalls = \\\n label_stat['precisions'], label_stat['recalls']\n label_sum_recalls[label] += \\\n interp(linspace(0, 1, 100), precisions, recalls)\n\n merged_stat = {}\n for label, sum_recalls in label_sum_recalls.items():\n mean_recalls = sum_recalls / len(stats)\n interp_auc = auc(linspace(0, 1, 100), mean_recalls)\n logger.debug(\"interp_auc={0}, individual_auc={1}\"\n .format(interp_auc, individual_auc[label]))\n\n merged_stat[label] = {\n 'auc': interp_auc,\n 'precisions': list(linspace(0, 1, 100)),\n 'recalls': list(mean_recalls)\n }\n\n return merged_stat\n\n @classmethod\n def format(cls, stat, format=\"str\"):\n if format == \"str\":\n return cls.format_str(stat)\n elif format == \"json\":\n return {label: {'auc': round(ss['auc'], 3)}\n for label, ss in stat.items()}\n else:\n raise TypeError(\"Format '{0}' not available for {1}.\"\n .format(format, cls.__name__))\n\n @classmethod\n def format_str(cls, stats):\n formatted = io.StringIO()\n\n if 'auc' in stats and 'thresholds' in stats:\n # Single class\n formatted.write(\"PR-AUC: {0}\".format(round(stats['auc'], 3)))\n else:\n # multiple classes\n formatted.write(\"PR-AUC:\\n\")\n\n table_data = [(repr(label), round(stats[label]['auc'], 3))\n for label in sorted(stats.keys())]\n formatted.write(\"\".join([\"\\t\" + line + \"\\n\" for line in\n tabulate(table_data).split(\"\\n\")]))\n\n return formatted.getvalue()\n\nTestStatistic.register(\"precision_recall\", precision_recall)\nTestStatistic.register(\"pr\", precision_recall) # Backwards compatible\n"},"repo_name":{"kind":"string","value":"yafeunteun/wikipedia-spam-classifier"},"path":{"kind":"string","value":"revscoring/revscoring/scorer_models/test_statistics/precision_recall.py"},"language":{"kind":"string","value":"Python"},"license":{"kind":"string","value":"mit"},"size":{"kind":"number","value":3506,"string":"3,506"},"score":{"kind":"number","value":0.0011409013120365088,"string":"0.001141"}}},{"rowIdx":89653,"cells":{"text":{"kind":"string","value":"from datetime import datetime\nfrom casexml.apps.case.xml.generator import date_to_xml_string\n\nDUMMY_ID = \"foo\"\nDUMMY_USERNAME = \"mclovin\"\nDUMMY_PASSWORD = \"changeme\"\nDUMMY_PROJECT = \"domain\"\n\n\ndef dummy_user_xml(user=None):\n username = user.username if user else DUMMY_USERNAME\n password = user.password if user else DUMMY_PASSWORD\n user_id = user.user_id if user else DUMMY_ID\n date_joined = user.date_joined if user else datetime.utcnow()\n project = user.domain if user else DUMMY_PROJECT\n\n return \"\"\"\n \n {}\n {}\n {}\n {}\n \n \n \n \n {}\n arbitrary\n \n \"\"\".format(\n username,\n password,\n user_id,\n date_to_xml_string(date_joined),\n project\n )\n\nDUMMY_RESTORE_XML_TEMPLATE = (\"\"\"\n\n %(message)s\n \n %(restore_id)s\n \n %(user_xml)s\n %(case_xml)s\n\n\"\"\")\n\n\ndef dummy_restore_xml(restore_id, case_xml=\"\", items=None, user=None):\n return DUMMY_RESTORE_XML_TEMPLATE % {\n \"restore_id\": restore_id,\n \"items_xml\": '' if items is None else (' items=\"%s\"' % items),\n \"user_xml\": dummy_user_xml(user),\n \"case_xml\": case_xml,\n \"message\": \"Successfully restored account mclovin!\"\n }\n"},"repo_name":{"kind":"string","value":"dimagi/commcare-hq"},"path":{"kind":"string","value":"corehq/ex-submodules/casexml/apps/phone/tests/dummy.py"},"language":{"kind":"string","value":"Python"},"license":{"kind":"string","value":"bsd-3-clause"},"size":{"kind":"number","value":1824,"string":"1,824"},"score":{"kind":"number","value":0.0005482456140350877,"string":"0.000548"}}},{"rowIdx":89654,"cells":{"text":{"kind":"string","value":"#!/usr/bin/env python\n# Copyright 2019 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\nfrom google.api_core.client_options import ClientOptions\n\nimport os\nimport logging\nimport googleapiclient.discovery\n\nlogging.basicConfig()\n\n# In this sample, we will reply on 6 features only:\n# trip_miles trip_seconds fare\n# trip_start_month trip_start_hour trip_start_day\ninstances = [\n [1.1, 420, 625, 8, 16, 3],\n [0.3, 960, 1485, 3, 22, 2],\n [1.0, 300, 505, 1, 1, 1],\n]\n\nPROJECT_ID = os.getenv('PROJECT_ID')\nMODEL_NAME = os.getenv('MODEL_NAME')\nMODEL_VERSION = os.getenv('MODEL_VERSION')\nREGION = os.getenv('REGION')\n\nlogging.info('PROJECT_ID: %s', PROJECT_ID)\nlogging.info('MODEL_NAME: %s', MODEL_NAME)\nlogging.info('MODEL_VERSION: %s', MODEL_VERSION)\nlogging.info('REGION: %s', REGION)\n\nprefix = \"{}-ml\".format(REGION) if REGION else \"ml\"\napi_endpoint = \"https://{}.googleapis.com\".format(prefix)\nclient_options = ClientOptions(api_endpoint=api_endpoint)\n\n# Use Regional support\nservice = googleapiclient.discovery.build('ml', 'v1',\n cache_discovery=False,\n client_options=client_options)\n\nname = 'projects/{}/models/{}/versions/{}'.format(PROJECT_ID, MODEL_NAME,\n MODEL_VERSION)\n\nresponse = service.projects().predict(\n name=name,\n body={'instances': instances}\n).execute()\n\nif 'error' in response:\n logging.error(response['error'])\nelse:\n print(response['predictions'])\n"},"repo_name":{"kind":"string","value":"GoogleCloudPlatform/ai-platform-samples"},"path":{"kind":"string","value":"prediction/xgboost/structured/base/prediction/predict.py"},"language":{"kind":"string","value":"Python"},"license":{"kind":"string","value":"apache-2.0"},"size":{"kind":"number","value":2134,"string":"2,134"},"score":{"kind":"number","value":0,"string":"0"}}},{"rowIdx":89655,"cells":{"text":{"kind":"string","value":"# -*- coding: utf-8 -*-\n\"\"\"File containing a Windows Registry plugin to parse the AMCache.hve file.\"\"\"\n\nfrom __future__ import unicode_literals\n\nimport pyregf\n\nfrom dfdatetime import filetime\nfrom dfdatetime import posix_time\nfrom dfwinreg import definitions as dfwinreg_definitions\nfrom plaso.containers import events\nfrom plaso.containers import time_events\nfrom plaso.lib import definitions\nfrom plaso.parsers import interface\nfrom plaso.parsers import manager\n\n\nclass AMCacheFileEventData(events.EventData):\n \"\"\"AMCache file event data.\n\n Attributes:\n company_name (str): company name that created product file belongs to.\n file_description (str): description of file.\n file_reference (str): file system file reference, for example 9-1 (MFT\n entry - sequence number).\n file_size (int): size of file in bytes.\n file_version (str): version of file.\n full_path (str): full path of file.\n language_code (int): language code of file.\n product_name (str): product name file belongs to.\n program_identifier (str): GUID of entry under Root/Program key file belongs\n to.\n sha1 (str): SHA-1 of file.\n \"\"\"\n\n DATA_TYPE = 'windows:registry:amcache'\n\n def __init__(self):\n \"\"\"Initializes event data.\"\"\"\n super(AMCacheFileEventData, self).__init__(data_type=self.DATA_TYPE)\n self.company_name = None\n self.file_description = None\n self.file_reference = None\n self.file_size = None\n self.file_version = None\n self.full_path = None\n self.language_code = None\n self.product_name = None\n self.program_identifier = None\n self.sha1 = None\n\nclass AMCacheProgramEventData(events.EventData):\n \"\"\"AMCache programs event data.\n\n Attributes:\n entry_type (str): type of entry (usually AddRemoveProgram).\n file_paths (str): file paths of installed program.\n files (str): list of files belonging to program.\n language_code (int): language_code of program.\n msi_package_code (str): MSI package code of program.\n msi_product_code (str): MSI product code of program.\n name (str): name of installed program.\n package_code (str): package code of program.\n product_code (str): product code of program.\n publisher (str): publisher of program.\n uninstall_key (str): unicode string of uninstall registry key for program.\n version (str): version of program.\n \"\"\"\n\n DATA_TYPE = 'windows:registry:amcache:programs'\n\n def __init__(self):\n \"\"\"Initializes event data.\"\"\"\n super(AMCacheProgramEventData, self).__init__(data_type=self.DATA_TYPE)\n self.entry_type = None\n self.file_paths = None\n self.files = None\n self.language_code = None\n self.msi_package_code = None\n self.msi_product_code = None\n self.name = None\n self.package_code = None\n self.product_code = None\n self.publisher = None\n self.uninstall_key = None\n self.version = None\n\nclass AMCacheParser(interface.FileObjectParser):\n \"\"\"AMCache Registry plugin for recently run programs.\"\"\"\n\n NAME = 'amcache'\n DATA_FORMAT = 'AMCache Windows NT Registry (AMCache.hve) file'\n\n # Contains: {value name: attribute name}\n _FILE_REFERENCE_KEY_VALUES = {\n '0': 'product_name',\n '1': 'company_name',\n '3': 'language_code',\n '5': 'file_version',\n '6': 'file_size',\n 'c': 'file_description',\n '15': 'full_path',\n '100': 'program_identifier',\n '101': 'sha1'}\n\n _AMCACHE_COMPILATION_TIME = 'f'\n _AMCACHE_FILE_MODIFICATION_TIME = '11'\n _AMCACHE_FILE_CREATION_TIME = '12'\n _AMCACHE_ENTRY_WRITE_TIME = '17'\n\n _AMCACHE_P_INSTALLATION_TIME = 'a'\n\n _AMCACHE_P_FILES = 'Files'\n\n _PRODUCT_KEY_VALUES = {\n '0': 'name',\n '1': 'version',\n '2': 'publisher',\n '3': 'language_code',\n '6': 'entry_type',\n '7': 'uninstall_key',\n 'd': 'file_paths',\n 'f': 'product_code',\n '10': 'package_code',\n '11': 'msi_product_code',\n '12': 'msi_package_code',\n }\n\n #TODO Add GetFormatSpecification when issues are fixed with adding\n # multiple parsers for the same file format (in this case regf files)\n # AddNewSignature ->\n # b'\\x41\\x00\\x6d\\x00\\x63\\x00\\x61\\x00\\x63\\x00\\x68\\x00\\x65', offset=88\n\n def _GetValueDataAsObject(self, parser_mediator, value):\n \"\"\"Retrieves the value data as an object.\n\n Args:\n parser_mediator (ParserMediator): mediates interactions between parsers\n and other components, such as storage and dfvfs.\n value (pyregf_value): value.\n\n Returns:\n object: data as a Python type or None if the value cannot be read.\n \"\"\"\n try:\n if value.type in (\n dfwinreg_definitions.REG_SZ,\n dfwinreg_definitions.REG_EXPAND_SZ,\n dfwinreg_definitions.REG_LINK):\n value_data = value.get_data_as_string()\n\n elif value.type in (\n dfwinreg_definitions.REG_DWORD,\n dfwinreg_definitions.REG_DWORD_BIG_ENDIAN,\n dfwinreg_definitions.REG_QWORD):\n value_data = value.get_data_as_integer()\n\n elif value.type == dfwinreg_definitions.REG_MULTI_SZ:\n value_data = list(value.get_data_as_multi_string())\n\n else:\n value_data = value.data\n\n except (IOError, OverflowError) as exception:\n parser_mediator.ProduceExtractionWarning(\n 'Unable to read data from value: {0:s} with error: {1!s}'.format(\n value.name, exception))\n return None\n\n return value_data\n\n def _ParseFileKey(self, parser_mediator, file_key):\n \"\"\"Parses a Root\\\\File key.\n\n Args:\n parser_mediator (ParserMediator): mediates interactions between parsers\n and other components, such as storage and dfvfs.\n file_key (pyregf.key): the File key.\n \"\"\"\n for volume_key in file_key.sub_keys:\n for file_reference_key in volume_key.sub_keys:\n self._ParseFileReferenceKey(parser_mediator, file_reference_key)\n\n def _ParseFileReferenceKey(self, parser_mediator, file_reference_key):\n \"\"\"Parses a file reference key (sub key of Root\\\\File\\\\%VOLUME%) for events.\n\n Args:\n parser_mediator (ParserMediator): mediates interactions between parsers\n and other components, such as storage and dfvfs.\n file_reference_key (pyregf.key): file reference key.\n \"\"\"\n event_data = AMCacheFileEventData()\n\n try:\n if '0000' in file_reference_key.name:\n # A NTFS file is a combination of MFT entry and sequence number.\n sequence_number, mft_entry = file_reference_key.name.split('0000')\n mft_entry = int(mft_entry, 16)\n sequence_number = int(sequence_number, 16)\n event_data.file_reference = '{0:d}-{1:d}'.format(\n mft_entry, sequence_number)\n else:\n # A FAT file is a single number.\n file_reference = int(file_reference_key.name, 16)\n event_data.file_reference = '{0:d}'.format(file_reference)\n\n except (ValueError, TypeError):\n pass\n\n for value_name, attribute_name in self._FILE_REFERENCE_KEY_VALUES.items():\n value = file_reference_key.get_value_by_name(value_name)\n if not value:\n continue\n\n value_data = self._GetValueDataAsObject(parser_mediator, value)\n if attribute_name == 'sha1' and value_data.startswith('0000'):\n # Strip off the 4 leading zero's from the sha1 hash.\n value_data = value_data[4:]\n\n setattr(event_data, attribute_name, value_data)\n\n amcache_time_value = file_reference_key.get_value_by_name(\n self._AMCACHE_ENTRY_WRITE_TIME)\n if amcache_time_value:\n amcache_time = filetime.Filetime(amcache_time_value.get_data_as_integer())\n event = time_events.DateTimeValuesEvent(\n amcache_time, definitions.TIME_DESCRIPTION_MODIFICATION)\n parser_mediator.ProduceEventWithEventData(event, event_data)\n\n creation_time_value = file_reference_key.get_value_by_name(\n self._AMCACHE_FILE_CREATION_TIME)\n if creation_time_value:\n creation_time = filetime.Filetime(\n creation_time_value.get_data_as_integer())\n event = time_events.DateTimeValuesEvent(\n creation_time, definitions.TIME_DESCRIPTION_CREATION)\n parser_mediator.ProduceEventWithEventData(event, event_data)\n\n modification_time_value = file_reference_key.get_value_by_name(\n self._AMCACHE_FILE_MODIFICATION_TIME)\n if modification_time_value:\n modification_time = filetime.Filetime(\n modification_time_value.get_data_as_integer())\n event = time_events.DateTimeValuesEvent(\n modification_time, definitions.TIME_DESCRIPTION_MODIFICATION)\n parser_mediator.ProduceEventWithEventData(event, event_data)\n\n compilation_time_value = file_reference_key.get_value_by_name(\n self._AMCACHE_COMPILATION_TIME)\n if compilation_time_value:\n link_time = posix_time.PosixTime(\n compilation_time_value.get_data_as_integer())\n event = time_events.DateTimeValuesEvent(\n link_time, definitions.TIME_DESCRIPTION_CHANGE)\n parser_mediator.ProduceEventWithEventData(event, event_data)\n\n def _ParseProgramKey(self, parser_mediator, program_key):\n \"\"\"Parses a program key (a sub key of Root\\\\Programs) for events.\n\n Args:\n parser_mediator (ParserMediator): mediates interactions between parsers\n and other components, such as storage and dfvfs.\n program_key (pyregf_key): program key.\n \"\"\"\n event_data = AMCacheProgramEventData()\n\n for value_name, attribute_name in self._PRODUCT_KEY_VALUES.items():\n value = program_key.get_value_by_name(value_name)\n if not value:\n continue\n\n value_data = self._GetValueDataAsObject(parser_mediator, value)\n setattr(event_data, attribute_name, value_data)\n\n installation_time_value = program_key.get_value_by_name(\n self._AMCACHE_P_INSTALLATION_TIME)\n if installation_time_value:\n installation_time = posix_time.PosixTime(\n installation_time_value.get_data_as_integer())\n event = time_events.DateTimeValuesEvent(\n installation_time, definitions.TIME_DESCRIPTION_INSTALLATION)\n parser_mediator.ProduceEventWithEventData(event, event_data)\n\n def _ParseProgramsKey(self, parser_mediator, programs_key):\n \"\"\"Parses a Root\\\\Programs key.\n\n Args:\n parser_mediator (ParserMediator): mediates interactions between parsers\n and other components, such as storage and dfvfs.\n programs_key (pyregf.key): the Programs key.\n \"\"\"\n for program_key in programs_key.sub_keys:\n self._ParseProgramKey(parser_mediator, program_key)\n\n def ParseFileObject(self, parser_mediator, file_object):\n \"\"\"Parses an AMCache.hve file-like object for events.\n\n Args:\n parser_mediator (ParserMediator): mediates interactions between parsers\n and other components, such as storage and dfvfs.\n file_object (dfvfs.FileIO): file-like object.\n \"\"\"\n regf_file = pyregf.file()\n try:\n regf_file.open_file_object(file_object)\n except IOError:\n # The error is currently ignored -> see TODO above related to the\n # fixing of handling multiple parsers for the same file format.\n return\n\n root_key = regf_file.get_key_by_path('Root')\n if root_key:\n file_key = root_key.get_sub_key_by_path('File')\n if file_key:\n self._ParseFileKey(parser_mediator, file_key)\n\n programs_key = root_key.get_sub_key_by_path('Programs')\n if programs_key:\n self._ParseProgramsKey(parser_mediator, programs_key)\n\n regf_file.close()\n\n\nmanager.ParsersManager.RegisterParser(AMCacheParser)\n"},"repo_name":{"kind":"string","value":"rgayon/plaso"},"path":{"kind":"string","value":"plaso/parsers/amcache.py"},"language":{"kind":"string","value":"Python"},"license":{"kind":"string","value":"apache-2.0"},"size":{"kind":"number","value":11498,"string":"11,498"},"score":{"kind":"number","value":0.006783788484953905,"string":"0.006784"}}},{"rowIdx":89656,"cells":{"text":{"kind":"string","value":"from tornado.options import options, logging\n\nfrom itertools import product\nimport json\nimport tornado.web\nimport pymongo\nimport csv\n\nclass MongoDbLookupHandler(tornado.web.RequestHandler):\n\n def get(self, identity):\n logging.info(\"uri=%s [%s] [%s]\" % (self.request.uri, identity, self.request.arguments))\n\n ids = identity.split(\"/\")\n db_name = ids[1]\n collection = self.open_collection(db_name, ids[2])\n\n # TODO : Improve this logic to correctly parse arguments and convert to a proper mongo DB query\n args = self.request.arguments\n query = {}\n\n case_sensitive_lookups = frozenset(options.case_sensitive_lookups)\n normalize_fn = None\n if db_name in case_sensitive_lookups:\n normalize_fn = lambda x: x\n else:\n normalize_fn = lambda x: x.lower()\n\n for key in args.keys():\n if key != \"output\":\n iargs = args[key]\n if len(iargs) == 1:\n query[key] = normalize_fn(args[key][0])\n else:\n query[key] = {\"$in\": map(normalize_fn, args[key])}\n\n query_limit = options.mongo_lookup_query_limit\n json_items = []\n for idx, item in enumerate(collection.find(query)):\n if idx > query_limit:\n break\n\n json_item = self.jsonable_item(item)\n #json_item[\"uri\"] = self.request.uri + \"/\" + json_item[\"id\"]\n json_items.append(json_item)\n\n if self.get_argument(\"output\", \"json\") == \"tsv\":\n WriteTsv(self, json_items)\n self.set_status(200)\n return\n\n self.write({\"items\": json_items})\n self.set_status(200)\n return\n\n def jsonable_item(self, item):\n json_item = {}\n for k in item.iterkeys():\n if k == \"_id\":\n json_item[\"id\"] = str(item[\"_id\"])\n elif \"[]\" in k:\n json_item[k.replace(\"[]\", \"\")] = item[k]\n else:\n json_item[k] = item[k]\n return json_item\n\n def open_collection(self, db_name, collection_name):\n #if options.verbose:\n logging.info(\"open_collection(%s)\" % collection_name)\n\n connection = pymongo.Connection(options.mongo_lookup_uri)\n database = connection[db_name]\n return database[collection_name]\n\n\nclass MongoDbPairwiseLookupHandler(tornado.web.RequestHandler):\n def get(self, identity):\n logging.info(\"uri=%s [%s] [%s]\" % (self.request.uri, identity, self.request.arguments))\n\n args = self.request.arguments\n\n ids = identity.split(\"/\")\n\n feature_matrix_name = ids[1]\n gene_label_1 = args['gene1'][0]\n gene_label_2 = args['gene2'][0]\n cancer_label = args['cancer'][0].lower()\n\n # Get feature IDs\n fmx_collection = self.open_feature_matrix_collection(\"qed_lookups\", \"fmx_\" + feature_matrix_name)\n pairwise_collection = self.open_pairwise_collection(\"qed_lookups\", \"pw_\" + feature_matrix_name + \"_\" + cancer_label)\n\n features_1 = filter(self.feature_filter_fn, fmx_collection.find({\"cancer\": cancer_label, \"gene\": gene_label_1}))\n features_2 = filter(self.feature_filter_fn, fmx_collection.find({\"cancer\": cancer_label, \"gene\": gene_label_2}))\n feature_ids_1 = map(lambda f: f['id'], features_1)\n feature_ids_2 = map(lambda f: f['id'], features_2)\n\n # Get pairwise values\n pairwise_results = []\n for id1, id2 in product(feature_ids_1, feature_ids_2):\n pw = self.get_pairwise_result(pairwise_collection, id1, id2)\n if pw is not None:\n pairwise_results.append(pw)\n\n result = {\n \"features\": {\n gene_label_1: map(self.jsonable_item, features_1),\n gene_label_2: map(self.jsonable_item, features_2)\n },\n \"pairwise_results\": map(self.jsonable_item, pairwise_results)\n }\n\n log_msg = \"Features found: \"\n log_msg += gene_label_1 + \": \" + str(len(feature_ids_1))\n log_msg += \"\\t\" + gene_label_2 + \": \" + str(len(feature_ids_2))\n log_msg += \"\\tPairwise results: \" + str(len(pairwise_results))\n\n logging.info(log_msg)\n\n self.write(json.dumps(result))\n self.set_status(200)\n\n def feature_filter_fn(self, feature):\n fields = feature['id'].split(':')\n source = fields[1]\n\n if source == 'METH' or source == 'CNVR' or source == 'GEXP':\n return True\n elif source == 'GNAB' and fields[-1] == 'y_n_somatic':\n return True\n else:\n return False\n\n def jsonable_item(self, item):\n json_item = {}\n for k in item.iterkeys():\n if k == \"_id\":\n json_item[\"id\"] = str(item[\"_id\"])\n elif \"[]\" in k:\n json_item[k.replace(\"[]\", \"\")] = item[k]\n else:\n json_item[k] = item[k]\n return json_item\n\n def get_pairwise_result(self, collection, id1, id2):\n res1 = collection.find_one({\"target\": id1, \"predictor\": id2})\n res2 = collection.find_one({\"target\": id2, \"predictor\": id1})\n if res1 is not None:\n return res1\n elif res2 is not None:\n return res2\n else:\n return None\n\n def open_feature_matrix_collection(self, db_name, collection_name):\n logging.info(\"open_collection(%s)\" % collection_name)\n return self.open_collection(options.mongo_lookup_uri, db_name, collection_name)\n\n def open_pairwise_collection(self, db_name, collection_name):\n logging.info(\"open_collection(%s)\" % collection_name)\n return self.open_collection(options.mongo_pairwise_lookup_uri, db_name, collection_name)\n\n def open_collection(self, mongo_uri, db_name, collection_name):\n logging.info(\"open_collection(%s)\" % collection_name)\n\n connection = pymongo.Connection(mongo_uri)\n database = connection[db_name]\n return database[collection_name]\n\n\nclass MongoDbMutSigHandler(tornado.web.RequestHandler):\n def get(self, identity):\n logging.info(\"uri=%s [%s] [%s]\" % (self.request.uri, identity, self.request.arguments))\n\n args = self.request.arguments\n\n query = {}\n for key in args.keys():\n if key != \"cancer\":\n continue\n iargs = args[key]\n if len(iargs) == 1:\n query[key] = args[key][0].lower()\n else:\n query[key] = {\"$in\": map(lambda x: x.lower(), args[key])}\n\n if \"max_rank\" not in args:\n query[\"rank\"] = {\"$lt\": 21}\n else:\n query[\"rank\"] = {\"$lt\": int(args[\"max_rank\"][0]) + 1}\n\n collection = self.open_collection(\"qed_lookups\", \"mutsig_rankings\")\n items = []\n if \"cancer\" in query:\n items = collection.find(query)\n\n json_items = map(self.jsonable_item, items)\n if self.get_argument(\"output\", \"json\") == \"tsv\":\n WriteTsv(self, json_items)\n self.set_status(200)\n return\n\n self.write(json.dumps({ \"items\": json_items }))\n self.set_status(200)\n\n def jsonable_item(self, item):\n json_item = {}\n for k in item.iterkeys():\n if k == \"_id\":\n json_item[\"id\"] = str(item[\"_id\"])\n elif \"[]\" in k:\n json_item[k.replace(\"[]\", \"\")] = item[k]\n else:\n json_item[k] = item[k]\n return json_item\n\n def open_collection(self, db_name, collection_name):\n logging.info(\"open_collection(%s)\" % collection_name)\n\n connection = pymongo.Connection(options.mongo_lookup_uri)\n database = connection[db_name]\n return database[collection_name]\n\n\nclass MongoDbFeaturesByLocationHandler(tornado.web.RequestHandler):\n def get(self, identity):\n logging.info(\"uri=%s [%s] [%s]\" % (self.request.uri, identity, self.request.arguments))\n\n args = self.request.arguments\n ids = identity.split(\"/\")\n\n query = {\n \"chr\": str(args[\"chr\"][0]),\n \"start\": {\"$gt\": int(args[\"start\"][0])},\n \"end\": {\"$lt\": int(args[\"end\"][0])},\n \"cancer\": {\"$in\": map(lambda x: x.lower(), args[\"cancer\"])},\n \"source\": {\"$in\": map(lambda x: x.lower(), args[\"source\"])}\n }\n\n logging.info(\"query=%s\" % str(query))\n\n query_limit = options.mongo_lookup_query_limit\n collection = self.open_collection(ids[1], ids[2])\n\n items = []\n for idx, item in enumerate(collection.find(query, {'values':0})):\n if idx > query_limit: break\n items.append(item)\n\n self.write(json.dumps({ \"items\": map(self.jsonable_item, items) }))\n self.set_status(200)\n\n def jsonable_item(self, item):\n json_item = {}\n for k in item.iterkeys():\n if k == \"_id\":\n json_item[\"id\"] = str(item[\"_id\"])\n elif \"[]\" in k:\n json_item[k.replace(\"[]\", \"\")] = item[k]\n else:\n json_item[k] = item[k]\n return json_item\n\n def open_collection(self, db_name, collection_name):\n logging.info(\"open_collection(%s)\" % collection_name)\n\n connection = pymongo.Connection(options.mongo_lookup_uri)\n database = connection[db_name]\n return database[collection_name]\n\ndef WriteTsv(handler, items):\n handler.set_header(\"Content-Type\", \"text/tab-separated-values\")\n handler.set_header(\"Content-Disposition\", \"attachment; filename='data_export.tsv'\")\n\n tsvwriter = csv.writer(handler, delimiter='\\t')\n excludedheaders = [\"uri\",\"id\",\"p_ns_s\"]\n if len(items) > 0:\n colheaders = [a for a in items[0].keys() if a not in excludedheaders]\n tsvwriter.writerow(colheaders)\n for item in items:\n vals = []\n for colheader in colheaders:\n val = item[colheader]\n if isinstance(val, (list, tuple)):\n vals.append(len(val))\n else:\n vals.append(val)\n tsvwriter.writerow(vals)\n\n"},"repo_name":{"kind":"string","value":"cancerregulome/GeneSpot_1.0"},"path":{"kind":"string","value":"websvcs/endpoints/storage/mongodb_lookups.py"},"language":{"kind":"string","value":"Python"},"license":{"kind":"string","value":"mit"},"size":{"kind":"number","value":10153,"string":"10,153"},"score":{"kind":"number","value":0.002659312518467448,"string":"0.002659"}}},{"rowIdx":89657,"cells":{"text":{"kind":"string","value":"from ..core import mi, nmi\nfrom .base import (AlphaAngleBaseMetric, ContactBaseMetric, DihedralBaseMetric,\n BaseMetric)\n\nimport numpy as np\nfrom itertools import combinations_with_replacement as combinations\n\nfrom multiprocessing import Pool\nfrom contextlib import closing\n\n__all__ = ['AlphaAngleMutualInformation', 'ContactMutualInformation',\n 'DihedralMutualInformation']\n\n\nclass MutualInformationBase(BaseMetric):\n\n \"\"\"Base mutual information object\"\"\"\n\n def _partial_mutinf(self, p):\n i, j = p\n\n return self._est(self.n_bins,\n self.data[i].values,\n self.shuffled_data[j].values,\n rng=self.rng,\n method=self.method)\n\n def _exec(self):\n M = np.zeros((self.labels.size, self.labels.size))\n\n with closing(Pool(processes=self.n_threads)) as pool:\n values = pool.map(self._partial_mutinf,\n combinations(self.labels, 2))\n pool.terminate()\n\n idx = np.triu_indices_from(M)\n M[idx] = values\n\n return M + M.T - np.diag(M.diagonal())\n\n def __init__(self, normed=True, **kwargs):\n self._est = nmi if normed else mi\n self.partial_transform.__func__.__doc__ = \"\"\"\n Partial transform a mdtraj.Trajectory into an n_residue by n_residue\n matrix of mutual information scores.\n\n Parameters\n ----------\n traj : mdtraj.Trajectory\n Trajectory to transform\n shuffle : int\n Number of shuffle iterations (default: 0)\n verbose : bool\n Whether to display performance\n Returns\n -------\n result : np.ndarray, shape = (n_residue, n_residue)\n Mutual information matrix\n \"\"\"\n\n super(MutualInformationBase, self).__init__(**kwargs)\n\n\nclass AlphaAngleMutualInformation(AlphaAngleBaseMetric, MutualInformationBase):\n\n \"\"\"Mutual information calculations for alpha angles\"\"\"\n\n\nclass ContactMutualInformation(ContactBaseMetric, MutualInformationBase):\n\n \"\"\"Mutual information calculations for contacts\"\"\"\n\n\nclass DihedralMutualInformation(DihedralBaseMetric, MutualInformationBase):\n\n \"\"\"Mutual information calculations for dihedral angles\"\"\"\n"},"repo_name":{"kind":"string","value":"cxhernandez/mdentropy"},"path":{"kind":"string","value":"mdentropy/metrics/mutinf.py"},"language":{"kind":"string","value":"Python"},"license":{"kind":"string","value":"gpl-3.0"},"size":{"kind":"number","value":2347,"string":"2,347"},"score":{"kind":"number","value":0,"string":"0"}}},{"rowIdx":89658,"cells":{"text":{"kind":"string","value":"import model\n\nEmployeeColumns = [\"name\", \"role_id\", \"is_active\", \"street_address\", \"city\", \"state\", \"zip\", \"phone\"]\n\nclass StaffMember(object):\n\t\"\"\"\n\tRepresents a staff member\n\t\"\"\"\n\t\n\tdef __init__(self, name, roleId, isActive, street=None, city=None, state=None, zipCode=None, phone=None):\n\t\t\"\"\"\n\t\tCreates a new staff member\n\t\t\"\"\"\n\t\tself.name = name\n\t\tself.street = street\n\t\tself.city = city\n\t\tself.state = state\n\t\tself.zipCode = zipCode\n\t\tself.phone = phone\n\t\tself.roleId = roleId\n\t\tself.isActive = isActive\n\t\t\n\tdef __repr__(self):\n\t\treturn \" %s, %i, %s, %s, %s, %s, %s, %s\" % (self.name, self.roleId, self.isActive, self.street, self.city, self.state, self.zipCode, self.phone)\n\t\n\tdef __eq__(self, other):\n\t\treturn self.name == other.name \\\n\t\tand self.street == other.street \\\n\t\tand self.city == other.city \\\n\t\tand self.state == other.state \\\n\t\tand self.zipCode == other.zipCode \\\n\t\tand self.phone == other.phone \\\n\t\tand self.roleId == other.roleId \\\n\t\tand self.isActive == other.isActive\t\n\t\n\tdef fields(self):\n\t\t\"\"\"\n\t\tReturns a dictionary of all the classes fields\n\t\t\"\"\"\n\t\treturn model.getFieldMap(self)\n\t\t\n\tdef flush(self, connection, oldName=None):\n\t\t\"\"\"\n\t\tUpdates or creates the appointment in the database\n\t\t\"\"\"\n\t\tcursor = connection.cursor()\n\n\t\t#map the database fields to this objects attributes\n\t\tsqlMap = {\"name\":\"name\", \"role_id\":\"roleId\", \"is_active\":\"isActive\",\n\t\t\"street_address\":\"street\", \"city\":\"city\", \n\t\t\"zip\":\"zipCode\", \"phone\":\"phone\", \"state\":\"state\"}\n\t\t\n\t\t#map the data\n\t\tparams = model.createSqlParams(EmployeeColumns, sqlMap, self)\n\t\t\n\t\t#if a old name was given then do an update statement\n\t\tif oldName:\n\n\t\t\tquery = model.updateString(\"employee\", EmployeeColumns, \"name = %(oldName)s\")\n\t\t\tparams[\"oldName\"] = oldName\n\t\t\t\n\t\t#else do a create statement\n\t\telse:\n\t\t\tquery = model.insertString(\"employee\", EmployeeColumns)\n\t\t\t\n\t\tcursor.execute(query, params)\n\t\t\n\t\tconnection.commit()\n\t\t\n\t\tcursor.close()\n"},"repo_name":{"kind":"string","value":"jworr/scheduler"},"path":{"kind":"string","value":"model/staff.py"},"language":{"kind":"string","value":"Python"},"license":{"kind":"string","value":"gpl-2.0"},"size":{"kind":"number","value":1931,"string":"1,931"},"score":{"kind":"number","value":0.05282237182806836,"string":"0.052822"}}},{"rowIdx":89659,"cells":{"text":{"kind":"string","value":"import random\nimport time\nimport logging\nimport sys\nfrom os.path import dirname\n\nsys.path.append(dirname(dirname(dirname(__file__))))\n\nimport hazelcast\n\n\ndef do_benchmark():\n THREAD_COUNT = 1\n ENTRY_COUNT = 10 * 1000\n VALUE_SIZE = 10000\n GET_PERCENTAGE = 40\n PUT_PERCENTAGE = 40\n\n logging.basicConfig(format='%(asctime)s%(msecs)03d [%(name)s] %(levelname)s: %(message)s', datefmt=\"%H:%M%:%S,\")\n logging.getLogger().setLevel(logging.INFO)\n logger = logging.getLogger(\"main\")\n\n config = hazelcast.ClientConfig()\n config.group_config.name = \"dev\"\n config.group_config.password = \"dev-pass\"\n\n try:\n from tests.hzrc.client import HzRemoteController\n\n rc = HzRemoteController('127.0.0.1', '9701')\n\n if not rc.ping():\n logger.info(\"Remote Controller Server not running... exiting.\")\n exit()\n logger.info(\"Remote Controller Server OK...\")\n rc_cluster = rc.createCluster(None, None)\n rc_member = rc.startMember(rc_cluster.id)\n config.network.addresses.append('{}:{}'.format(rc_member.host, rc_member.port))\n except (ImportError, NameError):\n config.network.addresses.append('127.0.0.1')\n\n client = hazelcast.HazelcastClient(config)\n my_map = client.get_map(\"default\")\n for i in range(0, 1000):\n key = int(random.random() * ENTRY_COUNT)\n operation = int(random.random() * 100)\n if operation < GET_PERCENTAGE:\n my_map.get(key)\n elif operation < GET_PERCENTAGE + PUT_PERCENTAGE:\n my_map.put(key, \"x\" * VALUE_SIZE)\n else:\n my_map.remove(key)\n\n\nif __name__ == '__main__':\n start = time.time()\n do_benchmark()\n time_taken = time.time() - start\n print(\"Took %s seconds\" % (time_taken))\n"},"repo_name":{"kind":"string","value":"hazelcast/hazelcast-python-client"},"path":{"kind":"string","value":"benchmarks/map_bench.py"},"language":{"kind":"string","value":"Python"},"license":{"kind":"string","value":"apache-2.0"},"size":{"kind":"number","value":1776,"string":"1,776"},"score":{"kind":"number","value":0.0016891891891891893,"string":"0.001689"}}},{"rowIdx":89660,"cells":{"text":{"kind":"string","value":"#\n# helpers.py\n#\n# Copyright (C) 2011, 2013, 2015 Uli Fouquet\n# This program is free software; you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation; either version 2 of the License, or\n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program; if not, write to the Free Software\n# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA\n#\n\"\"\"\nHelpers for trivial jobs.\n\"\"\"\nimport base64\nimport cssutils\nimport logging\nimport os\nimport re\nimport shutil\nimport tempfile\nimport zipfile\nfrom bs4 import BeautifulSoup, UnicodeDammit\ntry:\n from cStringIO import StringIO # Python 2.x\nexcept ImportError: # pragma: no cover\n from io import StringIO # Python 3.x\nfrom pkg_resources import iter_entry_points\ntry:\n from urlparse import urlparse # Python 2.x\nexcept ImportError: # pragma: no cover\n from urllib.parse import urlparse # Python 3.x\nfrom six import string_types\n\n\ntry:\n basestring = basestring # Python 2.x\nexcept NameError: # pragma: no cover\n basestring = (str, bytes) # Python 3.x\n\n\ndef copytree(src, dst, symlinks=False, ignore=None):\n \"\"\"Recursively copy an entire directory tree rooted at `src`. The\n destination directory, named by `dst`, might exist already; if\n not, thenit will be created as well as missing parent\n directories. Permissions and times of directories are copied with\n :func:`shutil.copystat`, individual files are copied using\n :func:`shutil.copy2`.\n\n If `symlinks` is true, symbolic links in the source tree are\n represented as symbolic links in the new tree; if false or\n omitted, the contents of the linked files are copied to the new\n tree.\n\n If ignore is given, it must be a callable that will receive as its\n arguments the directory being visited by :func:`shutil.copytree`,\n and a list of its contents, as returned by\n :func:`os.listdir`. Since :func:`copytree` is called recursively,\n the ignore callable will be called once for each directory that is\n copied. The callable must return a sequence of directory and file\n names relative to the current directory (i.e. a subset of the\n items in its second argument); these names will then be ignored in\n the copy process. :func:`shutil.ignore_patterns` can be used to\n create such a callable that ignores names based on glob-style\n patterns.\n\n If exception(s) occur, a :exc:`shutil.Error` is raised with a list\n of reasons.\n\n .. note:: This is a plain copy of the :func:`shutil.copytree`\n implementation as provided with Python >= 2.6. There is,\n however, one difference: this version will try to go on\n if the destination directory already exists.\n\n It is the callers responsibility to make sure that the\n `dst` directory is in a proper state for\n :func:`copytree`.\n \"\"\"\n if src in dst:\n raise ValueError(\"Cannot copy %s to %s: trees are nested\" % (\n src, dst))\n names = os.listdir(src)\n if ignore is not None:\n ignored_names = ignore(src, names)\n else:\n ignored_names = set()\n\n try:\n os.makedirs(dst)\n except os.error:\n pass\n errors = []\n for name in names:\n if name in ignored_names:\n continue\n srcname = os.path.join(src, name)\n dstname = os.path.join(dst, name)\n try:\n if symlinks and os.path.islink(srcname):\n linkto = os.readlink(srcname)\n os.symlink(linkto, dstname)\n elif os.path.isdir(srcname):\n copytree(srcname, dstname, symlinks, ignore)\n else:\n shutil.copy2(srcname, dstname)\n # XXX What about devices, sockets etc.?\n except (IOError, os.error) as why:\n errors.append((srcname, dstname, str(why)))\n # catch the Error from the recursive copytree so that we can\n # continue with other files\n except (shutil.Error) as why: # pragma: no cover\n errors.append((srcname, dstname, str(why)))\n try:\n shutil.copystat(src, dst)\n except (OSError) as why: # pragma: no cover\n errors.extend((src, dst, str(why)))\n if errors:\n raise shutil.Error(errors)\n\n\ndef copy_to_secure_location(src):\n \"\"\"Copy `src` to a temporay location.\n\n If `src` is a file, the complete directory containing this file\n will be copied. If `src` is a directory this directory will be\n copied.\n\n Returns the path of the newly created directory.\n\n To copy the filetree we use :func:`shutil.copytree` with no\n additional parameters. That means that symlinks won't be copied\n and other restrictions apply. See :func:`shutil.copytree` docs to\n check.\n \"\"\"\n if os.path.isfile(src):\n src = os.path.dirname(src)\n dst = tempfile.mkdtemp()\n copytree(src, dst)\n return dst\n\n\ndef get_entry_points(group):\n \"\"\"Get all entry point plugins registered for group `group`.\n\n The found entry points are returned as a dict with ```` as\n key and ```` as value where ```` is the name under\n which the respective plugin was registered with setuptools and\n ```` is the registered component itself.\n \"\"\"\n return dict(\n [(x.name, x.load())\n for x in iter_entry_points(group=group)])\n\n\ndef unzip(path, dst_dir):\n \"\"\"Unzip the files stored in zipfile `path` in `dst_dir`.\n\n `dst_dir` is the directory where all contents of the ZIP file is\n stored into.\n \"\"\"\n zf = zipfile.ZipFile(path)\n # Create all dirs\n dirs = sorted([name for name in zf.namelist() if name.endswith('/')])\n for dir in dirs:\n new_dir = os.path.join(dst_dir, dir)\n if not os.path.exists(new_dir):\n os.mkdir(new_dir)\n # Create all files\n for name in zf.namelist():\n if name.endswith('/'):\n continue\n outfile = open(os.path.join(dst_dir, name), 'wb')\n outfile.write(zf.read(name))\n outfile.flush()\n outfile.close()\n zf.close()\n return\n\n\ndef zip(path):\n \"\"\"Create a ZIP file out of `path`.\n\n If `path` points to a file then a ZIP archive is created with this\n file in compressed form in a newly created directory. The name of\n the created zipfile is the basename of the input file with a\n ``.zip`` extension appended.\n\n If `path` points to a directory then files and directories\n _inside_ this directory are added to the archive.\n\n Also empty directories are added although it cannot be guaranteed\n that these entries are recovered correctly later on with all tools\n and utilities on all platforms.\n\n .. note:: It is the callers responsibility to remove the directory\n the zipfile is created in after usage.\n \"\"\"\n if not os.path.isdir(path) and not os.path.isfile(path):\n raise ValueError('Must be an existing path or directory: %s' % path)\n\n new_dir = tempfile.mkdtemp()\n basename = os.path.basename(path)\n new_path = os.path.join(new_dir, basename) + '.zip'\n zout = zipfile.ZipFile(new_path, 'w', zipfile.ZIP_DEFLATED)\n\n if os.path.isfile(path):\n zout.write(path, basename)\n zout.close()\n return new_path\n\n for root, dirs, files in os.walk(path):\n for dir in dirs:\n # XXX: Maybe the wrong way to store directories?\n dir_path = os.path.join(root, dir)\n arc_name = dir_path[len(path) + 1:] + '/'\n info = zipfile.ZipInfo(arc_name)\n zout.writestr(info, '')\n for file in files:\n file_path = os.path.join(root, file)\n arc_name = file_path[len(path) + 1:]\n zout.write(file_path, arc_name)\n zout.close()\n return new_path\n\n\ndef remove_file_dir(path):\n \"\"\"Remove a directory.\n\n If `path` points to a file, the directory containing the file is\n removed. If `path` is a directory, this directory is removed.\n \"\"\"\n if not isinstance(path, string_types):\n return\n if not os.path.exists(path):\n return\n if os.path.isfile(path):\n path = os.path.dirname(path)\n assert path not in ['/', '/tmp'] # Safety belt\n shutil.rmtree(path)\n return\n\n\nRE_CSS_TAG = re.compile('(.+?)(\\.?\\s*){')\nRE_CSS_STMT_START = re.compile('\\s*(.*?{.*?)')\nRE_CURLY_OPEN = re.compile('{([^ ])')\nRE_CURLY_CLOSE = re.compile('([^ ])}')\nRE_EMPTY_COMMENTS = re.compile('/\\*\\s*\\*/')\n\nRE_CDATA_MASSAGE = '(((/\\*)?(.*?))?((/\\*)?]]>(\\*/)?))'\n\nMARKUP_MASSAGE = [\n (re.compile('(<[^<>]*)/>'), lambda x: x.group(1) + ' />'),\n (re.compile(']*)>'),\n lambda x: '')\n ]\n\nCDATA_MASSAGE = MARKUP_MASSAGE\nCDATA_MASSAGE.extend([\n (re.compile(RE_CDATA_MASSAGE, re.M + re.S),\n lambda match: match.group(7))])\n\n\ndef extract_css(html_input, basename='sample.html', prettify_html=False):\n \"\"\"Scan `html_input` and replace all styles with single link to a CSS\n file.\n\n Returns tuple ``, ``.\n\n If the `html_input` contains any ``