{ // 获取包含Hugging Face文本的span元素 const spans = link.querySelectorAll('span.whitespace-nowrap, span.hidden.whitespace-nowrap'); spans.forEach(span => { if (span.textContent && span.textContent.trim().match(/Hugging\s*Face/i)) { span.textContent = 'AI快站'; } }); }); // 替换logo图片的alt属性 document.querySelectorAll('img[alt*="Hugging"], img[alt*="Face"]').forEach(img => { if (img.alt.match(/Hugging\s*Face/i)) { img.alt = 'AI快站 logo'; } }); } // 替换导航栏中的链接 function replaceNavigationLinks() { // 已替换标记,防止重复运行 if (window._navLinksReplaced) { return; } // 已经替换过的链接集合,防止重复替换 const replacedLinks = new Set(); // 只在导航栏区域查找和替换链接 const headerArea = document.querySelector('header') || document.querySelector('nav'); if (!headerArea) { return; } // 在导航区域内查找链接 const navLinks = headerArea.querySelectorAll('a'); navLinks.forEach(link => { // 如果已经替换过,跳过 if (replacedLinks.has(link)) return; const linkText = link.textContent.trim(); const linkHref = link.getAttribute('href') || ''; // 替换Spaces链接 - 仅替换一次 if ( (linkHref.includes('/spaces') || linkHref === '/spaces' || linkText === 'Spaces' || linkText.match(/^s*Spacess*$/i)) && linkText !== 'PDF TO Markdown' && linkText !== 'PDF TO Markdown' ) { link.textContent = 'PDF TO Markdown'; link.href = 'https://fast360.xyz'; link.setAttribute('target', '_blank'); link.setAttribute('rel', 'noopener noreferrer'); replacedLinks.add(link); } // 删除Posts链接 else if ( (linkHref.includes('/posts') || linkHref === '/posts' || linkText === 'Posts' || linkText.match(/^s*Postss*$/i)) ) { if (link.parentNode) { link.parentNode.removeChild(link); } replacedLinks.add(link); } // 替换Docs链接 - 仅替换一次 else if ( (linkHref.includes('/docs') || linkHref === '/docs' || linkText === 'Docs' || linkText.match(/^s*Docss*$/i)) && linkText !== 'Voice Cloning' ) { link.textContent = 'Voice Cloning'; link.href = 'https://vibevoice.info/'; replacedLinks.add(link); } // 删除Enterprise链接 else if ( (linkHref.includes('/enterprise') || linkHref === '/enterprise' || linkText === 'Enterprise' || linkText.match(/^s*Enterprises*$/i)) ) { if (link.parentNode) { link.parentNode.removeChild(link); } replacedLinks.add(link); } }); // 查找可能嵌套的Spaces和Posts文本 const textNodes = []; function findTextNodes(element) { if (element.nodeType === Node.TEXT_NODE) { const text = element.textContent.trim(); if (text === 'Spaces' || text === 'Posts' || text === 'Enterprise') { textNodes.push(element); } } else { for (const child of element.childNodes) { findTextNodes(child); } } } // 只在导航区域内查找文本节点 findTextNodes(headerArea); // 替换找到的文本节点 textNodes.forEach(node => { const text = node.textContent.trim(); if (text === 'Spaces') { node.textContent = node.textContent.replace(/Spaces/g, 'PDF TO Markdown'); } else if (text === 'Posts') { // 删除Posts文本节点 if (node.parentNode) { node.parentNode.removeChild(node); } } else if (text === 'Enterprise') { // 删除Enterprise文本节点 if (node.parentNode) { node.parentNode.removeChild(node); } } }); // 标记已替换完成 window._navLinksReplaced = true; } // 替换代码区域中的域名 function replaceCodeDomains() { // 特别处理span.hljs-string和span.njs-string元素 document.querySelectorAll('span.hljs-string, span.njs-string, span[class*="hljs-string"], span[class*="njs-string"]').forEach(span => { if (span.textContent && span.textContent.includes('huggingface.co')) { span.textContent = span.textContent.replace(/huggingface.co/g, 'aifasthub.com'); } }); // 替换hljs-string类的span中的域名(移除多余的转义符号) document.querySelectorAll('span.hljs-string, span[class*="hljs-string"]').forEach(span => { if (span.textContent && span.textContent.includes('huggingface.co')) { span.textContent = span.textContent.replace(/huggingface.co/g, 'aifasthub.com'); } }); // 替换pre和code标签中包含git clone命令的域名 document.querySelectorAll('pre, code').forEach(element => { if (element.textContent && element.textContent.includes('git clone')) { const text = element.innerHTML; if (text.includes('huggingface.co')) { element.innerHTML = text.replace(/huggingface.co/g, 'aifasthub.com'); } } }); // 处理特定的命令行示例 document.querySelectorAll('pre, code').forEach(element => { const text = element.innerHTML; if (text.includes('huggingface.co')) { // 针对git clone命令的专门处理 if (text.includes('git clone') || text.includes('GIT_LFS_SKIP_SMUDGE=1')) { element.innerHTML = text.replace(/huggingface.co/g, 'aifasthub.com'); } } }); // 特别处理模型下载页面上的代码片段 document.querySelectorAll('.flex.border-t, .svelte_hydrator, .inline-block').forEach(container => { const content = container.innerHTML; if (content && content.includes('huggingface.co')) { container.innerHTML = content.replace(/huggingface.co/g, 'aifasthub.com'); } }); // 特别处理模型仓库克隆对话框中的代码片段 try { // 查找包含"Clone this model repository"标题的对话框 const cloneDialog = document.querySelector('.svelte_hydration_boundary, [data-target="MainHeader"]'); if (cloneDialog) { // 查找对话框中所有的代码片段和命令示例 const codeElements = cloneDialog.querySelectorAll('pre, code, span'); codeElements.forEach(element => { if (element.textContent && element.textContent.includes('huggingface.co')) { if (element.innerHTML.includes('huggingface.co')) { element.innerHTML = element.innerHTML.replace(/huggingface.co/g, 'aifasthub.com'); } else { element.textContent = element.textContent.replace(/huggingface.co/g, 'aifasthub.com'); } } }); } // 更精确地定位克隆命令中的域名 document.querySelectorAll('[data-target]').forEach(container => { const codeBlocks = container.querySelectorAll('pre, code, span.hljs-string'); codeBlocks.forEach(block => { if (block.textContent && block.textContent.includes('huggingface.co')) { if (block.innerHTML.includes('huggingface.co')) { block.innerHTML = block.innerHTML.replace(/huggingface.co/g, 'aifasthub.com'); } else { block.textContent = block.textContent.replace(/huggingface.co/g, 'aifasthub.com'); } } }); }); } catch (e) { // 错误处理但不打印日志 } } // 当DOM加载完成后执行替换 if (document.readyState === 'loading') { document.addEventListener('DOMContentLoaded', () => { replaceHeaderBranding(); replaceNavigationLinks(); replaceCodeDomains(); // 只在必要时执行替换 - 3秒后再次检查 setTimeout(() => { if (!window._navLinksReplaced) { console.log('[Client] 3秒后重新检查导航链接'); replaceNavigationLinks(); } }, 3000); }); } else { replaceHeaderBranding(); replaceNavigationLinks(); replaceCodeDomains(); // 只在必要时执行替换 - 3秒后再次检查 setTimeout(() => { if (!window._navLinksReplaced) { console.log('[Client] 3秒后重新检查导航链接'); replaceNavigationLinks(); } }, 3000); } // 增加一个MutationObserver来处理可能的动态元素加载 const observer = new MutationObserver(mutations => { // 检查是否导航区域有变化 const hasNavChanges = mutations.some(mutation => { // 检查是否存在header或nav元素变化 return Array.from(mutation.addedNodes).some(node => { if (node.nodeType === Node.ELEMENT_NODE) { // 检查是否是导航元素或其子元素 if (node.tagName === 'HEADER' || node.tagName === 'NAV' || node.querySelector('header, nav')) { return true; } // 检查是否在导航元素内部 let parent = node.parentElement; while (parent) { if (parent.tagName === 'HEADER' || parent.tagName === 'NAV') { return true; } parent = parent.parentElement; } } return false; }); }); // 只在导航区域有变化时执行替换 if (hasNavChanges) { // 重置替换状态,允许再次替换 window._navLinksReplaced = false; replaceHeaderBranding(); replaceNavigationLinks(); } }); // 开始观察document.body的变化,包括子节点 if (document.body) { observer.observe(document.body, { childList: true, subtree: true }); } else { document.addEventListener('DOMContentLoaded', () => { observer.observe(document.body, { childList: true, subtree: true }); }); } })(); \", None))\n\nimport background_icons_rc\n"},"license":{"kind":"string","value":"gpl-2.0"},"hash":{"kind":"number","value":8153043657374581000,"string":"8,153,043,657,374,581,000"},"line_mean":{"kind":"number","value":65.2253787879,"string":"65.225379"},"line_max":{"kind":"number","value":188,"string":"188"},"alpha_frac":{"kind":"number","value":0.7106700603,"string":"0.71067"},"autogenerated":{"kind":"bool","value":false,"string":"false"}}},{"rowIdx":109517,"cells":{"repo_name":{"kind":"string","value":"ama-jharrison/agdc"},"path":{"kind":"string","value":"agdc/deprecated/landsat_tiler.py"},"copies":{"kind":"string","value":"1"},"size":{"kind":"string","value":"57143"},"content":{"kind":"string","value":"#!/usr/bin/env python\n\n#===============================================================================\n# Copyright 2015 Geoscience Australia\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#===============================================================================\n\n'''\nOriginal prototype script to reproject and tile ORTHO, NBAR & PQ datasets and \ncreate tile files and records in Datacube DB.\nRequires previous run of dbupdater.py to catalogue datasets.\n\nN.B: This functionality is now provided by landsat_ingester.py\n\nCreated on 05/10/2012\n\n@author: Alex Ip\n'''\nimport os\nimport sys\nimport argparse\nimport logging\nimport re\nimport psycopg2\nimport numpy\nimport shutil\nfrom osgeo import gdal,osr\nfrom math import floor,ceil\nfrom datetime import datetime\nfrom copy import copy\nimport time\nimport string\n \nfrom EOtools.utils import log_multiline\nfrom EOtools.execute import execute\n\nfrom agdc import DataCube\n\nTILE_OWNER = 'axi547:rs0' # Owner of file files\n\n# Set top level standard output \nconsole_handler = logging.StreamHandler(sys.stdout)\nconsole_handler.setLevel(logging.INFO)\nconsole_formatter = logging.Formatter('%(message)s')\nconsole_handler.setFormatter(console_formatter)\n\nlogger = logging.getLogger(__name__)\nif not logger.level:\n logger.setLevel(logging.DEBUG) # Default logging level for all modules\n logger.addHandler(console_handler)\n \nclass LandsatTiler(DataCube):\n\n CONTIGUITY_BIT_INDEX = 8\n \n def getFileSizeMB(self, path):\n \"\"\"Gets the size of a file (megabytes).\n \n Arguments:\n path: file path\n \n Returns:\n File size (MB)\n \n Raises:\n OSError [Errno=2] if file does not exist\n \"\"\" \n return os.path.getsize(path) / (1024*1024)\n\n def parse_args(self):\n \"\"\"Overrides Datacube function to parse the command line arguments.\n \n Returns:\n argparse namespace object\n \"\"\"\n logger.debug(' Calling parse_args()')\n \n _arg_parser = argparse.ArgumentParser('datacube')\n \n _arg_parser.add_argument('-C', '--config', dest='config_file',\n default=os.path.join(self.agdc_root, 'agdc_default.conf'),\n help='DataCube configuration file')\n _arg_parser.add_argument('-d', '--debug', dest='debug',\n default=False, action='store_const', const=True,\n help='Debug mode flag')\n _arg_parser.add_argument('--refresh', dest='refresh',\n default=True, action='store_const', const=True,\n help='Refresh mode flag to force updating of existing records')\n _arg_parser.add_argument('-t', '--tile_type', dest='default_tile_type_id',\n required=False, default=None,\n help='Tile type ID of tile to be stacked')\n \n return _arg_parser.parse_args()\n \n def __init__(self, source_datacube=None, default_tile_type_id=1):\n \"\"\"Constructor\n Arguments:\n source_datacube: Optional DataCube object whose connection and data will be shared\n tile_type_id: Optional tile_type_id value (defaults to config file value = 1)\n \"\"\"\n if source_datacube:\n # Copy values from source_datacube and then override command line args\n self.__dict__ = copy(source_datacube.__dict__)\n \n args = self.parse_args()\n # Set instance attributes for every value in command line arguments file\n for attribute_name in args.__dict__.keys():\n attribute_value = args.__dict__[attribute_name]\n self.__setattr__(attribute_name, attribute_value)\n\n else:\n DataCube.__init__(self); # Call inherited constructor\n \n if self.debug:\n console_handler.setLevel(logging.DEBUG)\n\n # Turn autocommit OFF so that transaction can cover all queries for each dataset\n self.db_connection.autocommit = False\n self.db_connection.set_isolation_level(psycopg2.extensions.ISOLATION_LEVEL_READ_COMMITTED)\n\n # Attempt to parse dates from command line arguments or config file\n try:\n self.default_tile_type_id = int(self.default_tile_type_id) \n except:\n self.default_tile_type_id = default_tile_type_id\n try:\n self.start_date = datetime.strptime(self.start_date, '%d/%m/%Y').date()\n except:\n self.start_date = None\n try:\n self.end_date = datetime.strptime(self.end_date, '%d/%m/%Y').date()\n except:\n self.end_date = None\n try:\n self.min_path = int(self.min_path) \n except:\n self.min_path = None\n try:\n self.max_path = int(self.max_path) \n except:\n self.max_path = None\n try:\n self.min_row = int(self.min_row) \n except:\n self.min_row = None\n try:\n self.max_row = int(self.max_row) \n except:\n self.max_row = None\n \n def create_tiles(self, start_date=None, end_date=None, min_path=None, max_path=None, min_row=None, max_row=None, tile_type_id=None):\n # Set default values to instance values\n start_date = start_date or self.start_date\n end_date = end_date or self.end_date\n min_path = min_path or self.min_path\n max_path = max_path or self.max_path\n min_row = min_row or self.min_row\n max_row = max_row or self.max_row\n tile_type_id = tile_type_id or self.default_tile_type_id\n \n tile_type_info = self.tile_type_dict[tile_type_id]\n def process_dataset(dataset_info):\n log_multiline(logger.debug, dataset_info, 'Dataset values', '\\t')\n \n def find_file(dataset_dir, file_pattern):\n# logger.debug('find_file(%s, %s) called', dataset_dir, file_pattern)\n assert os.path.isdir(dataset_dir), '%s is not a valid directory' % dataset_dir\n filelist = [filename for filename in os.listdir(dataset_dir) if re.match(file_pattern, filename)]\n# logger.debug('filelist = %s', filelist)\n assert len(filelist) == 1, 'Unable to find unique match for file pattern %s' % file_pattern\n return os.path.join(dataset_dir, filelist[0])\n \n def get_tile_index_range(dataset_filename):\n \"\"\"Returns integer (xmin, ymin, xmax, ymax) tuple for input GDAL dataset filename\"\"\"\n dataset = gdal.Open(dataset_filename)\n assert dataset, 'Unable to open dataset %s' % dataset_filename\n spatial_reference = osr.SpatialReference()\n spatial_reference.ImportFromWkt(dataset.GetProjection())\n geotrans = dataset.GetGeoTransform()\n logger.debug('geotransform = %s', geotrans)\n# latlong_spatial_reference = spatial_reference.CloneGeogCS()\n tile_spatial_reference = osr.SpatialReference()\n s = re.match('EPSG:(\\d+)', tile_type_info['crs'])\n if s:\n epsg_code = int(s.group(1))\n logger.debug('epsg_code = %d', epsg_code)\n assert tile_spatial_reference.ImportFromEPSG(epsg_code) == 0, 'Invalid EPSG code for tile projection'\n else:\n assert tile_spatial_reference.ImportFromWkt(tile_type_info['crs']), 'Invalid WKT for tile projection'\n \n logger.debug('Tile WKT = %s', tile_spatial_reference.ExportToWkt())\n \n coord_transform_to_tile = osr.CoordinateTransformation(spatial_reference, tile_spatial_reference)\n #Have looked at following with particular scene:\n #/g/data1/v10/NBAR/2009-03/LS5_TM_NBAR_P54_GANBAR01-002_110_078_20090319/scene01/LS5_TM_NBAR_P54_GANBAR01-002_110_078_20090319_B10.tif\n #MPH 04/04/2014 calculate four corners of the dataset, include cross terms of the geotransform\n\n logger.debug('Dataset vertex coordinates: UL = (%f, %f); LL = (%f, %f); UR = (%f, %f); LR = (%f, %f)', xul, yul, xll, yll, xur, yur, xlr, ylr)\n logger.debug('Dataset bounding box: UL = (%f, %f); LL = (%f, %f); UR = (%f, %f); LR = (%f, %f)', xmin, ymax, xmin, ymin, xmax, ymax, xmax, ymin)\n\n return (int(floor((xmin - tile_type_info['x_origin']) / tile_type_info['x_size'])), \n int(floor((ymin - tile_type_info['y_origin']) / tile_type_info['y_size'])), \n int(ceil((xmax - tile_type_info['x_origin']) / tile_type_info['x_size'])), \n int(ceil((ymax - tile_type_info['y_origin']) / tile_type_info['y_size'])))\n #Would return (-120, -27, 123, -25) on scene above\n def get_tiles_touched_by_acquisition(dataset_filename):\n \"\"\"For the quadrilateral defined by the acquisitiion footprint,\n return a list of overlapping tiles as [(xtile, ytile), ..., ]\"\"\"\n\n def find_intersection(X, Y):\n \"\"\"given a list of four x-coordinates, X, and a list of four y-coordinates, Y,\n determine if there is a point of intersection\"\"\"\n pvec = (X[0], Y[0])\n qvec = (X[2], Y[2])\n rvec = (X[1] - X[0], Y[1] - Y[0])\n svec = (X[3] - X[2], Y[3] - Y[2])\n rvec_cross_svec = rvec[0] * svec[1] - rvec[1] * svec[0]\n if rvec_cross_svec == 0:\n return False\n qminusp_cross_svec = (qvec[0] - pvec[0]) * svec[1] - (qvec[1] - pvec[1]) * svec[0]\n qminusp_cross_rvec = (qvec[0] - pvec[0]) * rvec[1] - (qvec[1] - pvec[1]) * rvec[0]\n tparameter = qminusp_cross_svec / rvec_cross_svec\n uparameter = qminusp_cross_rvec / rvec_cross_svec\n if tparameter > 0 and tparameter < 1 and uparameter > 0 and uparameter < 1:\n return True\n\n #get_tiles_touched_by_acquisition method starts here\n dataset = gdal.Open(dataset_filename)\n assert dataset, 'Unable to open dataset %s' % dataset_filename\n spatial_reference = osr.SpatialReference()\n spatial_reference.ImportFromWkt(dataset.GetProjection())\n geotrans = dataset.GetGeoTransform()\n logger.debug('geotransform = %s', geotrans)\n# latlong_spatial_reference = spatial_reference.CloneGeogCS()\n tile_spatial_reference = osr.SpatialReference()\n s = re.match('EPSG:(\\d+)', tile_type_info['crs'])\n if s:\n epsg_code = int(s.group(1))\n logger.debug('epsg_code = %d', epsg_code)\n assert tile_spatial_reference.ImportFromEPSG(epsg_code) == 0, 'Invalid EPSG code for tile projection'\n else:\n assert tile_spatial_reference.ImportFromWkt(tile_type_info['crs']), 'Invalid WKT for tile projection'\n \n logger.debug('Tile WKT = %s', tile_spatial_reference.ExportToWkt())\n coord_transform_to_tile = osr.CoordinateTransformation(spatial_reference, tile_spatial_reference)\n #Determine the bounding quadrilateral of the acquisition\n xul, yul, _z = coord_transform_to_tile.TransformPoint(geotrans[0], geotrans[3], 0)\n xll, yll, _z = coord_transform_to_tile.TransformPoint(geotrans[0] + geotrans[2]*dataset.RasterYSize, \n geotrans[3] + geotrans[5]*dataset.RasterYSize, 0)\n xur, yur, _z = coord_transform_to_tile.TransformPoint(geotrans[0] + geotrans[1]*dataset.RasterXSize,\n geotrans[3] + geotrans[4]*dataset.RasterXSize, 0)\n xlr, ylr, _z = coord_transform_to_tile.TransformPoint(geotrans[0] + geotrans[1]*dataset.RasterXSize + geotrans[2]*dataset.RasterYSize,\n geotrans[3] + geotrans[4]*dataset.RasterXSize + geotrans[5]*dataset.RasterYSize,0)\n acquisition_bbox = [(xul, yul), (xur, yur), (xlr, ylr), (xll, yll)]\n acquisition_vertex_number = len(acquisition_bbox)\n #Within this acqusition quadrilateral, we need to find all tiles with at least one vertex contained within the acquisition\n #There is an outer rectangle, which is the minimum containing rectangle for the acquisition footprint,\n #and an inner rectangle, which is the maximum rectagle contained by the acquisitiion footprint\n outer_xmin = min(xll, xul)\n outer_xmax = max(xlr, xur)\n outer_ymin = min(yll, ylr)\n outer_ymax = max(yul, yur)\n\n inner_xmin = max(xll, xul)\n inner_xmax = min(xlr, xur)\n inner_ymin = max(yll, ylr)\n inner_ymax = min(yul, yur)\n \n outer_xmin_index = int(floor((outer_xmin - tile_type_info['x_origin']) / tile_type_info['x_size']))\n outer_xmax_index = int(floor((outer_xmax - tile_type_info['x_origin']) / tile_type_info['x_size']))\n outer_ymin_index = int(floor((outer_ymin - tile_type_info['y_origin']) / tile_type_info['y_size']))\n outer_ymax_index = int(floor((outer_ymax - tile_type_info['y_origin']) / tile_type_info['y_size']))\n\n inner_xmin_index = int(floor((inner_xmin - tile_type_info['x_origin']) / tile_type_info['x_size']))\n inner_xmax_index = int(floor((inner_xmax - tile_type_info['x_origin']) / tile_type_info['x_size']))\n inner_ymin_index = int(floor((inner_ymin - tile_type_info['y_origin']) / tile_type_info['y_size']))\n inner_ymax_index = int(floor((inner_ymax - tile_type_info['y_origin']) / tile_type_info['y_size']))\n\n touched_tiles = []\n #inspect tiles from the outer rectangle\n for itile in range(outer_xmin_index, outer_xmax_index + 1):\n for jtile in range(outer_ymin_index, outer_ymax_index + 1):\n if itile >= inner_xmin_index and itile <= inner_xmax_index and jtile >= inner_ymin_index and jtile <= inner_ymax_index:\n touched_tiles.append([itile, jtile])\n continue\n #For each tile in the outer rectangle but not in the inner rectangle\n #define the upper-left vertexx\n (x0, y0) = (tile_type_info['x_origin'] + itile * tile_type_info['x_size'],\n tile_type_info['y_origin'] + (jtile + 1) * tile_type_info['y_size']) \n tile_bbox = [(x0, y0), (x0 + tile_type_info['x_size'], y0), \n (x0 + tile_type_info['x_size'], y0 - tile_type_info['y_size']),\n (x0, y0 - tile_type_info['y_size'])]\n tile_vertex_number = len(tile_bbox)\n intersection_exists = False\n for tile_vertex in range(tile_vertex_number):\n x1, y1 = tile_bbox[tile_vertex]\n x2, y2 = tile_bbox[(tile_vertex + 1) % tile_vertex_number]\n for acquisition_vertex in range(acquisition_vertex_number):\n x3, y3 = acquisition_bbox[acquisition_vertex]\n x4, y4 = acquisition_bbox[(acquisition_vertex + 1) % acquisition_vertex_number]\n #get intersection of the two lines (x1, y1)-to-(x2, y2) and (x3, y3)-to-(x4, y4)\n xcoords = [x1, x2, x3, x4]\n ycoords = [y1, y2, y3, y4]\n intersection_exists = find_intersection(xcoords,ycoords)\n if intersection_exists:\n touched_tiles.append([itile, jtile])\n break\n if intersection_exists:\n break\n return touched_tiles\n\n \n def find_tiles(x_index = None, y_index = None):\n \"\"\"Find any tile records for current dataset\n returns dict of tile information keyed by tile_id\n \"\"\"\n db_cursor2 = self.db_connection.cursor()\n\n sql = \"\"\"-- Check for any existing tiles\nselect\n tile_id,\n x_index,\n y_index,\n tile_type_id,\n tile_pathname,\n dataset_id,\n tile_class_id,\n tile_size\nfrom tile_footprint\ninner join tile using(x_index, y_index, tile_type_id)\nwhere (%(x_index)s is null or x_index = %(x_index)s)\n and (%(y_index)s is null or y_index = %(y_index)s)\n and tile_type_id = %(tile_type_id)s\n and (dataset_id = %(l1t_dataset_id)s\n or dataset_id = %(nbar_dataset_id)s\n or dataset_id = %(pqa_dataset_id)s)\n\n and ctime is not null -- TODO: Remove this after reload\n;\n\"\"\"\n params = {'x_index': x_index,\n 'y_index': y_index,\n 'tile_type_id': tile_type_info['tile_type_id'],\n 'l1t_dataset_id': dataset_info['l1t_dataset_id'],\n 'nbar_dataset_id': dataset_info['nbar_dataset_id'],\n 'pqa_dataset_id': dataset_info['pqa_dataset_id']}\n \n log_multiline(logger.debug, db_cursor2.mogrify(sql, params), 'SQL', '\\t')\n db_cursor2.execute(sql, params)\n tile_info = {}\n for record in db_cursor2:\n tile_info_dict = {\n 'x_index': record[1],\n 'y_index': record[2],\n 'tile_type_id': record[3],\n 'tile_pathname': record[4],\n 'dataset_id': record[5],\n 'tile_class_id': record[6],\n 'tile_size': record[7]\n }\n tile_info[record[0]] = tile_info_dict # Keyed by tile_id\n \n log_multiline(logger.debug, tile_info, 'tile_info', '\\t')\n return tile_info\n \n \n def get_vrt_band_list():\n \"\"\"Returns list of band information to create tiles\n \"\"\"\n logger.debug('get_vrt_band_list() called')\n vrt_band_list = []\n sensor_dict = self.bands[tile_type_id][(dataset_info['satellite_tag'], dataset_info['sensor_name'])]\n# log_multiline(logger.debug, sensor, 'Sensor', '\\t')\n for file_number in sorted(sensor_dict.keys()):\n band_info = sensor_dict[file_number]\n if band_info['level_name'] == 'NBAR':\n dataset_dir = dataset_info['nbar_dataset_path']\n dataset_id = dataset_info['nbar_dataset_id']\n processing_level = dataset_info['nbar_level_name']\n nodata_value = dataset_info['nbar_nodata_value']\n resampling_method = dataset_info['nbar_resampling_method']\n elif band_info['level_name'] == 'ORTHO':\n dataset_dir = dataset_info['l1t_dataset_path']\n dataset_id = dataset_info['l1t_dataset_id']\n processing_level = dataset_info['l1t_level_name']\n nodata_value = dataset_info['l1t_nodata_value']\n resampling_method = dataset_info['l1t_resampling_method']\n else:\n continue # Ignore any pan-chromatic and derived bands\n \n dataset_dir = os.path.join(dataset_dir, 'scene01')\n filename = find_file(dataset_dir, band_info['file_pattern'])\n vrt_band_list.append({'file_number': band_info['file_number'], \n 'filename': filename, \n 'name': band_info['band_name'],\n 'dataset_id': dataset_id,\n 'band_id': band_info['band_id'],\n 'processing_level': processing_level,\n 'nodata_value': nodata_value,\n 'resampling_method': resampling_method,\n 'tile_layer': band_info['tile_layer']})\n \n # Add Derived bands (only PQA at this stage)\n for band_level in ['PQA']:\n derived_bands = self.bands[tile_type_id][('DERIVED', band_level)]\n # log_multiline(logger.debug, derived_bands, 'derived_bands', '\\t')\n #TODO: Make this able to handle multiple layers\n band_info = [band_info for band_info in derived_bands.values() \n if band_info['level_name'] == band_level][0]\n file_pattern = band_info['file_pattern']\n dataset_dir = os.path.join(dataset_info['pqa_dataset_path'], 'scene01')\n dataset_id = dataset_info['pqa_dataset_id']\n filename = find_file(dataset_dir, file_pattern) \n processing_level = dataset_info['pqa_level_name']\n nodata_value = dataset_info['pqa_nodata_value'] # Should be None for PQA\n resampling_method = dataset_info['pqa_resampling_method']\n vrt_band_list.append({'file_number': None, \n 'filename': filename, \n 'name': band_info['band_name'],\n 'dataset_id': dataset_id,\n 'band_id': band_info['band_id'],\n 'processing_level': processing_level,\n 'nodata_value': nodata_value,\n 'resampling_method': resampling_method,\n 'tile_layer': 1})\n \n log_multiline(logger.debug, vrt_band_list, 'vrt_band_list = %s', '\\t')\n return vrt_band_list\n \n # process_dataset function starts here\n result = False\n db_cursor1 = self.db_connection.cursor()\n \n logger.info('Processing dataset %s', dataset_info['nbar_dataset_path'])\n \n vrt_band_stack_basename = '_'.join([dataset_info['satellite_tag'], \n re.sub('\\W', '', dataset_info['sensor_name']), \n dataset_info['start_datetime'].date().strftime('%Y%m%d'), \n '%03d' % dataset_info['x_ref'], \n '%03d' % dataset_info['y_ref']]\n ) + '.vrt'\n logger.debug('vrt_band_stack_basename = %s', vrt_band_stack_basename)\n \n tile_output_root = os.path.join(self.tile_root, tile_type_info['tile_directory'],\n dataset_info['satellite_tag'] + '_' + re.sub('\\W', '', dataset_info['sensor_name'])) \n logger.debug('tile_output_root = %s', tile_output_root)\n\n vrt_band_list = get_vrt_band_list()\n #Upper right obtainable as (dataset_info['ul_lon'], dataset_info['ul_lat']), but these coordinates only relate to tiles in the case of (1deg, 1deg) tiles\n #Otherwise, we must use the generic scene-to-tile coordinate transformation in get_tile_index_range\n #tile_index_range = get_tile_index_range(vrt_band_list[0]['filename']) # Find extents of first band dataset\n tiles_in_acquisition = get_tiles_touched_by_acquisition(vrt_band_list[0]['filename'])\n #The number of tile footprints touched by this acquisition\n tile_count = len(tiles_in_acquisition)\n # Check whether tiles exist for every band\n tile_record_count = len(find_tiles())\n logger.info('Found %d tile records in database for %d tiles', tile_record_count, tile_count * 3) # Count ORTHO, NBAR & PQA\n if tile_record_count == tile_count * 3:\n logger.info('All tiles already exist in database - skipping tile creation for %s', dataset_info['nbar_dataset_path'])\n return result\n \n try:\n \n #TODO: Create all new acquisition records and commit the transaction here \n \n # Use NBAR dataset name for dataset lock (could have been any other level)\n work_directory = os.path.join(self.temp_dir,\n os.path.basename(dataset_info['nbar_dataset_path'])\n )\n \n #TODO: Apply lock on path/row instead of on dataset to try to force the same node to process the full depth\n if not self.lock_object(work_directory):\n logger.info('Already processing %s - skipping', dataset_info['nbar_dataset_path'])\n return result\n \n if self.refresh and os.path.exists(work_directory):\n shutil.rmtree(work_directory)\n \n self.create_directory(work_directory)\n \n tile_has_data = {}\n for processing_level in ['PQA', 'ORTHO', 'NBAR']: # N.B: PQA must be first\n vrt_band_info_list = [vrt_band_info for vrt_band_info in vrt_band_list if vrt_band_info['processing_level'] == processing_level]\n nodata_value = vrt_band_info_list[0]['nodata_value'] # All the same for a given processing_level\n resampling_method = vrt_band_info_list[0]['resampling_method'] # All the same for a given processing_level\n \n vrt_band_stack_filename = os.path.join(work_directory,\n processing_level + '_' + vrt_band_stack_basename)\n \n if not os.path.exists(vrt_band_stack_filename) or self.check_object_locked(vrt_band_stack_filename):\n \n # Check whether this dataset is already been processed\n if not self.lock_object(vrt_band_stack_filename):\n logger.warning('Band stack %s already being processed - skipping.', vrt_band_stack_filename)\n continue\n \n logger.info('Creating %s band stack file %s', processing_level, vrt_band_stack_filename)\n command_string = 'gdalbuildvrt -separate'\n if not self.debug:\n command_string += ' -q'\n if nodata_value is not None:\n command_string += ' -srcnodata %d -vrtnodata %d' % (\n nodata_value, \n nodata_value) \n command_string += ' -overwrite %s %s' % (\n vrt_band_stack_filename,\n ' '.join([vrt_band_info['filename'] for vrt_band_info in vrt_band_info_list])\n )\n logger.debug('command_string = %s', command_string)\n \n result = execute(command_string=command_string)\n \n if result['stdout']:\n log_multiline(logger.info, result['stdout'], 'stdout from ' + command_string, '\\t') \n \n if result['returncode']:\n log_multiline(logger.error, result['stderr'], 'stderr from ' + command_string, '\\t')\n raise Exception('%s failed', command_string) \n \n band_stack_dataset = gdal.Open(vrt_band_stack_filename)\n assert band_stack_dataset, 'Unable to open VRT %s' % vrt_band_stack_filename\n band_stack_dataset.SetMetadata(\n {'satellite': dataset_info['satellite_tag'], \n 'sensor': dataset_info['sensor_name'], \n 'start_datetime': dataset_info['start_datetime'].isoformat(),\n 'end_datetime': dataset_info['end_datetime'].isoformat(),\n 'path': '%03d' % dataset_info['x_ref'],\n 'row': '%03d' % dataset_info['y_ref']}\n )\n \n for band_index in range(len(vrt_band_info_list)):\n band = band_stack_dataset.GetRasterBand(band_index + 1)\n band.SetMetadata({'name': vrt_band_info_list[band_index]['name'], \n 'filename': vrt_band_info_list[band_index]['filename']})\n \n # Need to set nodata values for each band - can't seem to do it in gdalbuildvrt\n nodata_value = vrt_band_info_list[band_index]['nodata_value']\n if nodata_value is not None:\n band.SetNoDataValue(nodata_value)\n \n band_stack_dataset.FlushCache()\n self.unlock_object(vrt_band_stack_filename)\n else:\n logger.info('Band stack %s already exists', vrt_band_stack_filename)\n band_stack_dataset = gdal.Open(vrt_band_stack_filename)\n \n logger.info('Processing %d %s Tiles', tile_count, processing_level)\n #MPH replace double-loop with single loop over tiles touched by acquisition\n for x_index, y_index in tiles_in_acquisition: #MPH\n #for x_index in range(tile_index_range[0], tile_index_range[2]):\n # for y_index in range(tile_index_range[1], tile_index_range[3]): \n #tile_extents to be used by gdalwarp -te flag. Works for our current crs EPSG 4326. In general, will need to get the tile's geotransform and\n #consider the max, min values in projected space (Xp, Yp). That is, need to calculate tile extents over the four vertices\n #Upper-left\n #xul = geotransform[0]\n #yul = geotransform[3]\n #Upper-right\n #xur = geotransform[0] + geotransform[1]*tile_type_info['x_pixels']\n #yur = geotransform[3] + geotransform[4]*tile_type_info['x_pixels'] \n #Lower-left\n #xll = geotransform[0] + geotransform[2]*tile_type_info['y_pixels']\n #yll = geotransform[3] + geotransform[5]*tile_type_info['y_pixels']\n #Lower-right\n #xlr = geotransform[0] + geotransform[1]*tile_type_info['x_pixels'] + geotransform[2]*tile_type_info['y_pixels']\n #ylr = geotransform[3] + geotransform[4]*tile_type_info['x_pixels'] + geotransform[5]*tile_type_info['y_pixels']\n #tile_extents[0] = min(xll, xul)\n #tile_extents[1] = min(yll, ylr)\n #tile_extents[2] = max(xur, xlr)\n #tile_extents[3] = max(yul, yur)\n \n tile_extents = (tile_type_info['x_origin'] + x_index * tile_type_info['x_size'], \n tile_type_info['y_origin'] + y_index * tile_type_info['y_size'], \n tile_type_info['x_origin'] + (x_index + 1) * tile_type_info['x_size'], \n tile_type_info['y_origin'] + (y_index + 1) * tile_type_info['y_size']) \n logger.debug('tile_extents = %s', tile_extents) \n tile_output_dir = os.path.join(tile_output_root, \n re.sub('\\+', '', '%+04d_%+04d' % (x_index, y_index)),\n '%04d' % dataset_info['start_datetime'].year\n ) \n \n self.create_directory(os.path.join(tile_output_dir, 'mosaic_cache'))\n \n tile_output_path = os.path.join(tile_output_dir,\n '_'.join([dataset_info['satellite_tag'], \n re.sub('\\W', '', dataset_info['sensor_name']),\n processing_level,\n re.sub('\\+', '', '%+04d_%+04d' % (x_index, y_index)),\n re.sub(':', '-', dataset_info['start_datetime'].isoformat())\n ]) + tile_type_info['file_extension']\n )\n \n # Check whether this tile has already been processed\n if not self.lock_object(tile_output_path):\n logger.warning('Tile %s already being processed - skipping.', tile_output_path)\n continue\n # Only generate tile file if PQA tile or tile contains data\n if tile_has_data.get((x_index, y_index)) is None or tile_has_data[(x_index, y_index)]: \n #Assuming tile has data, use gdalwarp to reproject from scene projection to datacube projection\n command_string = 'gdalwarp'\n if not self.debug:\n command_string += ' -q'\n command_string += ' -t_srs %s -te %f %f %f %f -tr %f %f -tap -tap -r %s' % (\n tile_type_info['crs'],\n tile_extents[0], tile_extents[1], tile_extents[2], tile_extents[3], \n tile_type_info['x_pixel_size'], tile_type_info['y_pixel_size'],\n resampling_method\n )\n \n if nodata_value is not None:\n command_string += ' -srcnodata %d -dstnodata %d' % (nodata_value, nodata_value)\n \n command_string += ' -of %s' % tile_type_info['file_format']\n \n if tile_type_info['format_options']:\n for format_option in tile_type_info['format_options'].split(','):\n command_string += ' -co %s' % format_option\n \n command_string += ' -overwrite %s %s' % (\n vrt_band_stack_filename,\n tile_output_path\n )\n \n logger.debug('command_string = %s', command_string)\n \n retry=True\n while retry:\n result = execute(command_string=command_string)\n\n if result['stdout']:\n log_multiline(logger.info, result['stdout'], 'stdout from ' + command_string, '\\t')\n\n if result['returncode']: # Return code is non-zero\n log_multiline(logger.error, result['stderr'], 'stderr from ' + command_string, '\\t')\n\n # Work-around for gdalwarp error writing LZW-compressed GeoTIFFs \n if (string.find(result['stderr'], 'LZW') > -1 # LZW-related error\n and tile_type_info['file_format'] == 'GTiff' # Output format is GeoTIFF\n and string.find(tile_type_info['format_options'], 'COMPRESS=LZW') > -1): # LZW compression requested\n \n temp_tile_path = os.path.join(os.path.dirname(vrt_band_stack_filename), \n os.path.basename(tile_output_path))\n\n # Write uncompressed tile to a temporary path\n command_string = string.replace(command_string, 'COMPRESS=LZW', 'COMPRESS=NONE')\n command_string = string.replace(command_string, tile_output_path, temp_tile_path)\n \n # Translate temporary uncompressed tile to final compressed tile\n command_string += '; gdal_translate -of GTiff'\n if tile_type_info['format_options']:\n for format_option in tile_type_info['format_options'].split(','):\n command_string += ' -co %s' % format_option\n command_string += ' %s %s' % (\n temp_tile_path,\n tile_output_path\n )\n else:\n raise Exception('%s failed', command_string)\n else:\n retry = False # No retry on success\n \n # Set tile metadata\n tile_dataset = gdal.Open(tile_output_path)\n assert tile_dataset, 'Unable to open tile dataset %s' % tile_output_path\n \n # Check whether PQA tile contains any contiguous data\n #MPHtemp\n if tile_has_data.get((x_index, y_index)) is None and processing_level == 'PQA':\n tile_has_data[(x_index, y_index)] = ((numpy.bitwise_and(tile_dataset.GetRasterBand(1).ReadAsArray(), \n 1 << LandsatTiler.CONTIGUITY_BIT_INDEX)) > 0).any()\n logger.debug('%s tile (%d, %d) has data = %s', processing_level, x_index, y_index, tile_has_data[(x_index, y_index)])\n\n #MPH check whether this processing_level has any data\n #if tile_has_data.get((x_index, y_index)) is None:\n # if processing_level == 'PQA':\n # tile_has_data[(x_index, y_index)] = ((numpy.bitwise_and(tile_dataset.GetRasterBand(1).ReadAsArray(), \n # 1 << LandsatTiler.CONTIGUITY_BIT_INDEX)) > 0).any()\n # else:\n # #pull in the multiple bands \n #Would need to look at NBAR and ORTHO to know if PQA contiguity bit is 1\n\n # Only bother setting metadata if tile has valid data\n if tile_has_data[(x_index, y_index)]:\n metadata = band_stack_dataset.GetMetadata()\n metadata['x_index'] = str(x_index)\n metadata['y_index'] = str(y_index)\n tile_dataset.SetMetadata(metadata)\n \n # Set tile band metadata\n for band_index in range(len(vrt_band_info_list)):\n scene_band = band_stack_dataset.GetRasterBand(band_index + 1)\n tile_band = tile_dataset.GetRasterBand(band_index + 1)\n tile_band.SetMetadata(scene_band.GetMetadata())\n \n # Need to set nodata values for each band - gdalwarp doesn't copy it across\n nodata_value = vrt_band_info_list[band_index]['nodata_value']\n if nodata_value is not None:\n tile_band.SetNoDataValue(nodata_value)\n \n \n logger.info('Processed %s Tile (%d, %d)', processing_level, x_index, y_index)\n else:\n logger.info('AAA Skipped empty %s Tile (%d, %d)', processing_level, x_index, y_index)\n else:\n logger.info('BBB Skipped empty %s Tile (%d, %d)', processing_level, x_index, y_index)\n \n \n # Change permissions on any recently created files\n command_string = 'chmod -R 775 %s; chmod -R 777 %s' % (tile_output_dir, \n os.path.join(tile_output_dir, 'mosaic_cache')\n )\n \n result = execute(command_string=command_string)\n \n if result['stdout']:\n log_multiline(logger.info, result['stdout'], 'stdout from ' + command_string, '\\t') \n \n # N.B: command may return errors for files not owned by user\n if result['returncode']:\n log_multiline(logger.warning, result['stderr'], 'stderr from ' + command_string, '\\t')\n# raise Exception('%s failed', command_string) \n \n self.unlock_object(tile_output_path)\n \n # Check whether tile contains any data \n if tile_has_data[(x_index, y_index)]: \n tile_class_id = 1 # Valid tile\n tile_size = self.getFileSizeMB(tile_output_path)\n else: # PQA tile contains no data \n # Remove empty PQA tile file\n tile_class_id = 2 # Dummy tile record with no file\n self.remove(tile_output_path)\n tile_size = 0 \n \n sql = \"\"\"-- Insert new tile_footprint record if necessary\n insert into tile_footprint (\n x_index, \n y_index, \n tile_type_id, \n x_min, \n y_min, \n x_max, \n y_max\n )\n select\n %(x_index)s, \n %(y_index)s, \n %(tile_type_id)s, \n %(x_min)s, \n %(y_min)s, \n %(x_max)s, \n %(y_max)s\n where not exists\n (select \n x_index, \n y_index, \n tile_type_id\n from tile_footprint\n where x_index = %(x_index)s \n and y_index = %(y_index)s \n and tile_type_id = %(tile_type_id)s);\n \n -- Update any existing tile record\n update tile\n set \n tile_pathname = %(tile_pathname)s,\n tile_class_id = %(tile_class_id)s,\n tile_size = %(tile_size)s,\n ctime = now()\n where \n x_index = %(x_index)s\n and y_index = %(y_index)s\n and tile_type_id = %(tile_type_id)s\n and dataset_id = %(dataset_id)s;\n \n -- Insert new tile record if necessary\n insert into tile (\n tile_id,\n x_index,\n y_index,\n tile_type_id,\n dataset_id,\n tile_pathname,\n tile_class_id,\n tile_size,\n ctime\n ) \n select\n nextval('tile_id_seq'::regclass),\n %(x_index)s,\n %(y_index)s,\n %(tile_type_id)s,\n %(dataset_id)s,\n %(tile_pathname)s,\n %(tile_class_id)s,\n %(tile_size)s,\n now()\n where not exists\n (select tile_id\n from tile\n where \n x_index = %(x_index)s\n and y_index = %(y_index)s\n and tile_type_id = %(tile_type_id)s\n and dataset_id = %(dataset_id)s\n );\n \"\"\" \n params = {'x_index': x_index,\n 'y_index': y_index,\n 'tile_type_id': tile_type_info['tile_type_id'],\n 'x_min': tile_extents[0], \n 'y_min': tile_extents[1], \n 'x_max': tile_extents[2], \n 'y_max': tile_extents[3],\n 'dataset_id': vrt_band_info_list[0]['dataset_id'], # All the same\n 'tile_pathname': tile_output_path,\n 'tile_class_id': tile_class_id,\n 'tile_size': tile_size\n }\n \n log_multiline(logger.debug, db_cursor1.mogrify(sql, params), 'SQL', '\\t')\n db_cursor1.execute(sql, params)\n #end loop over all tiles touched by acquisiton \n self.unlock_object(work_directory)\n \n #if not self.debug:\n # shutil.rmtree(work_directory)\n \n result = True\n self.db_connection.commit() \n logger.info('Dataset tiling completed - Transaction committed')\n return result\n except Exception, e:\n logger.error('Tiling operation failed: %s', e.message) # Keep on processing\n self.db_connection.rollback()\n if not self.debug:\n raise\n \n \n def process_scenes(): \n db_cursor = self.db_connection.cursor()\n \n sql = \"\"\"-- Find all scenes with L1T, NBAR and PQA level datasets with missing tiles\nselect * from (\n select distinct\n acquisition_id,\n l1t.dataset_id as l1t_dataset_id,\n l1t.dataset_path as l1t_dataset_path,\n l1t.level_name as l1t_level_name,\n l1t.nodata_value as l1t_nodata_value,\n l1t.resampling_method as l1t_resampling_method,\n l1t.tile_count as l1t_tile_count,\n nbar.dataset_id as nbar_dataset_id,\n nbar.dataset_path as nbar_dataset_path,\n nbar.level_name as nbar_level_name,\n nbar.nodata_value as nbar_nodata_value,\n nbar.resampling_method as nbar_resampling_method,\n nbar.tile_count as nbar_tile_count,\n pqa.dataset_id as pqa_dataset_id,\n pqa.dataset_path as pqa_dataset_path,\n pqa.level_name as pqa_level_name,\n pqa.nodata_value as pqa_nodata_value,\n pqa.resampling_method as pqa_resampling_method,\n pqa.tile_count as pqa_tile_count,\n satellite_tag,\n sensor_name,\n x_ref,\n y_ref,\n start_datetime,\n end_datetime,\n ll_lon,\n ll_lat,\n lr_lon,\n lr_lat,\n ul_lon,\n ul_lat,\n ur_lon,\n ur_lat,\n nbar.crs,\n nbar.ll_x,\n nbar.ll_y,\n nbar.lr_x,\n nbar.lr_y,\n nbar.ul_x,\n nbar.ul_y,\n nbar.ur_x,\n nbar.ur_y,\n nbar.x_pixels,\n nbar.y_pixels,\n -- TODO: Use dataset_footprint table so that this will not break for projected tile types\n (\n ceil(greatest((lr_lon + 360.0)::numeric %% 360.0::numeric, \n (ur_lon + 360.0)::numeric %% 360.0::numeric) / %(tile_x_size)s)\n -\n floor(least((ll_lon + 360.0)::numeric %% 360.0::numeric,\n (ul_lon + 360.0)::numeric %% 360.0::numeric) / %(tile_x_size)s)\n )\n *\n (\n ceil(greatest(ul_lat, ur_lat) / %(tile_y_size)s) \n -\n floor(least(ll_lat, lr_lat) / %(tile_y_size)s)\n ) as tiles_required\n from acquisition\n inner join (\n select\n acquisition_id,\n d.dataset_id,\n level_name,\n dataset_path,\n nodata_value,\n resampling_method,\n count(tile_id) as tile_count\n from dataset d\n inner join processing_level using(level_id)\n left join tile t on t.dataset_id = d.dataset_id and tile_type_id = 1\n and ctime is not null -- *** TODO: Remove this line after reload ***\n where level_name = 'ORTHO'\n group by 1,2,3,4,5,6\n ) l1t using(acquisition_id)\n inner join (\n select\n acquisition_id,\n d.dataset_id,\n level_name,\n dataset_path,\n nodata_value,\n resampling_method,\n -- Grab extra info from NBAR dataset - should be the same as in L1T & PQA datasets\n crs,\n ll_x,\n ll_y,\n lr_x,\n lr_y,\n ul_x,\n ul_y,\n ur_x,\n ur_y,\n x_pixels,\n y_pixels,\n count(tile_id) as tile_count\n from dataset d\n inner join processing_level using(level_id)\n left join tile t on t.dataset_id = d.dataset_id and tile_type_id = 1\n and ctime is not null -- *** TODO: Remove this line after reload ***\n where level_name = 'NBAR'\n group by 1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17\n ) nbar using(acquisition_id)\n inner join (\n select\n acquisition_id,\n d.dataset_id,\n level_name,\n dataset_path,\n nodata_value,\n resampling_method,\n count(tile_id) as tile_count\n from dataset d\n inner join processing_level using(level_id)\n left join tile t on t.dataset_id = d.dataset_id and tile_type_id = 1\n and ctime is not null -- *** TODO: Remove this line after reload ***\n where level_name = 'PQA'\n group by 1,2,3,4,5,6\n ) pqa using(acquisition_id)\n inner join satellite sa using(satellite_id)\n inner join sensor se using(satellite_id, sensor_id)\n where (%(start_date)s is null or start_datetime >= %(start_date)s)\n and (%(end_date)s is null or end_datetime < cast(%(end_date)s as date) + 1)\n and (%(min_path)s is null or x_ref >= %(min_path)s)\n and (%(max_path)s is null or x_ref <= %(max_path)s)\n and (%(min_row)s is null or y_ref >= %(min_row)s)\n and (%(max_row)s is null or y_ref <= %(max_row)s)\n and (cloud_cover is null or cloud_cover < 98) -- Arbitrary threshold above which scene should be ignored\n) datasets\nwhere l1t_tile_count < tiles_required\n or nbar_tile_count < tiles_required\n or pqa_tile_count < tiles_required\norder by -- Order by path, row then descending date-times\n l1t_tile_count + nbar_tile_count + pqa_tile_count,\n x_ref,\n y_ref,\n start_datetime desc,\n end_datetime desc,\n satellite_tag,\n sensor_name;\n\"\"\" \n params = {'tile_type_id': tile_type_id,\n 'start_date': start_date,\n 'end_date': end_date,\n 'min_path': min_path,\n 'max_path': max_path,\n 'min_row': min_row,\n 'max_row': max_row,\n 'tile_x_size': tile_type_info['x_size'],\n 'tile_y_size': tile_type_info['y_size']\n }\n \n log_multiline(logger.debug, db_cursor.mogrify(sql, params), 'SQL', '\\t')\n \n # This mother of all queries creates a logjam at the DB server, so we only allow one instance a query at a time to submit it\n #TODO: Find a nicer way of dealing with this\n while not self.lock_object(os.path.basename(__file__) + ' dataset query'):\n print 'About to sleep because %s not locked' %(os.path.basename(__file__) + ' dataset query')\n time.sleep(10) \n try:\n db_cursor.execute(sql, params)\n finally:\n self.unlock_object(os.path.basename(__file__) +' dataset query')\n \n column_list = ['acquisition_id',\n 'l1t_dataset_id', \n 'l1t_dataset_path',\n 'l1t_level_name',\n 'l1t_nodata_value', \n 'l1t_resampling_method', \n 'l1t_tile_count', \n 'nbar_dataset_id',\n 'nbar_dataset_path',\n 'nbar_level_name',\n 'nbar_nodata_value', \n 'nbar_resampling_method', \n 'nbar_tile_count', \n 'pqa_dataset_id',\n 'pqa_dataset_path',\n 'pqa_level_name',\n 'pqa_nodata_value', \n 'pqa_resampling_method', \n 'pqa_tile_count', \n 'satellite_tag', \n 'sensor_name', \n 'x_ref', \n 'y_ref', \n 'start_datetime', \n 'end_datetime', \n 'll_lon',\n 'll_lat',\n 'lr_lon',\n 'lr_lat',\n 'ul_lon',\n 'ul_lat',\n 'ur_lon',\n 'ur_lat',\n 'crs',\n 'll_x',\n 'll_y',\n 'lr_x',\n 'lr_y',\n 'ul_x',\n 'ul_y',\n 'ur_x',\n 'ur_y',\n 'x_pixels',\n 'y_pixels']\n \n for record in db_cursor:\n dataset_info = {}\n for column_index in range(len(column_list)):\n dataset_info[column_list[column_index]] = record[column_index]\n \n # Ignore bad dataset and proceed to next one if not debugging\n if self.debug: \n process_dataset(dataset_info)\n else:\n try:\n process_dataset(dataset_info)\n except Exception, e:\n logger.warning(e.message)\n \n # Start of create_tiles function\n process_scenes()\n# create_composites()\n \nif __name__ == '__main__':\n landsat_tiler = LandsatTiler()\n \n #===========================================================================\n # # Sleep for a random number of seconds to avoid potential database lock-up with many instances starting up at the same time\n # # TODO: Find something better than this nasty work-around\n # if not landsat_tiler.debug:\n # time.sleep(random.randint(0, 30)) \n #===========================================================================\n landsat_tiler.create_tiles()\n"},"license":{"kind":"string","value":"apache-2.0"},"hash":{"kind":"number","value":-1938904243955361800,"string":"-1,938,904,243,955,361,800"},"line_mean":{"kind":"number","value":51.3287545788,"string":"51.328755"},"line_max":{"kind":"number","value":165,"string":"165"},"alpha_frac":{"kind":"number","value":0.4754913113,"string":"0.475491"},"autogenerated":{"kind":"bool","value":false,"string":"false"}}},{"rowIdx":109518,"cells":{"repo_name":{"kind":"string","value":"best-coloc-ever/globibot"},"path":{"kind":"string","value":"bot/plugins/repost/plugin.py"},"copies":{"kind":"string","value":"1"},"size":{"kind":"string","value":"1779"},"content":{"kind":"string","value":"from globibot.lib.plugin import Plugin\n\nfrom collections import defaultdict\nfrom time import time\n\nimport re\n\nURL_PATTERN = re.compile('http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\\(\\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+')\n\nclass Repost(Plugin):\n\n def load(self):\n self.shames = defaultdict(lambda: defaultdict(list))\n self.links = self.load_links()\n\n async def on_new(self, message):\n await self.process_message(message)\n\n async def on_edit(self, before, after):\n await self.process_message(after)\n\n async def process_message(self, message):\n for url in URL_PATTERN.findall(message.content):\n try:\n author_id, stamp = self.links[message.server.id][url]\n for emoji in ['🔔', '🇷', '🇪', '🇵', '🇴', '🇸', '🇹']:\n await self.bot.add_reaction(message, emoji)\n self.shames[message.server.id][message.author.id].append((url, time()))\n except KeyError:\n self.links[message.server.id][url] = (message.author.id, time())\n\n def load_links(self):\n links = defaultdict(dict)\n\n with self.transaction() as trans:\n trans.execute('''\n select author_id, stamp, server_id, content\n from log\n order by stamp asc\n ''')\n\n for author_id, stamp, server_id, content in trans.fetchall():\n for url in URL_PATTERN.findall(content):\n if url in links[str(server_id)]:\n self.shames[str(server_id)][str(author_id)].append((url, stamp.timestamp()))\n else:\n links[str(server_id)][url] = (str(author_id), stamp.timestamp())\n\n return links\n"},"license":{"kind":"string","value":"mit"},"hash":{"kind":"number","value":5692639545437346000,"string":"5,692,639,545,437,346,000"},"line_mean":{"kind":"number","value":34.8775510204,"string":"34.877551"},"line_max":{"kind":"number","value":106,"string":"106"},"alpha_frac":{"kind":"number","value":0.542662116,"string":"0.542662"},"autogenerated":{"kind":"bool","value":false,"string":"false"}}},{"rowIdx":109519,"cells":{"repo_name":{"kind":"string","value":"sensusaps/RoboBraille.Web.API"},"path":{"kind":"string","value":"WorkingDirectory/DaisyPipeline/transformers/ca_cnib_rtf2dtbook/rtf2xml-py/rtf2xml/check_brackets.py"},"copies":{"kind":"string","value":"1"},"size":{"kind":"string","value":"2889"},"content":{"kind":"string","value":"#########################################################################\n# #\n# #\n# copyright 2002 Paul Henry Tremblay #\n# #\n# This program is distributed in the hope that it will be useful, #\n# but WITHOUT ANY WARRANTY; without even the implied warranty of #\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU #\n# General Public License for more details. #\n# #\n# You should have received a copy of the GNU General Public License #\n# along with this program; if not, write to the Free Software #\n# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA #\n# 02111-1307 USA #\n# #\n# #\n#########################################################################\n\nimport sys,os\nclass CheckBrackets:\n \"\"\"Check that brackets match up\"\"\"\n\n\n def __init__(self, bug_handler = None, file=None):\n self.__file=file\n self.__bug_handler = bug_handler\n self.__bracket_count=0\n self.__ob_count = 0\n self.__cb_count = 0\n self.__open_bracket_num = []\n\n def open_brack(self, line):\n num = line[-5:-1]\n self.__open_bracket_num.append(num)\n self.__bracket_count += 1\n\n def close_brack(self, line):\n num = line[-5:-1]\n ##self.__open_bracket_num.append(num)\n try:\n last_num = self.__open_bracket_num.pop()\n except:\n return 0\n if num != last_num:\n return 0\n self.__bracket_count -= 1\n return 1\n\n \n def check_brackets(self):\n read_obj = open(self.__file, 'r')\n line = 'dummy'\n line_count = 0\n while line:\n line_count += 1\n line = read_obj.readline()\n self.__token_info = line[:16]\n if self.__token_info == 'ob 1 and sys.argv[1]:\n msg = sys.argv[1]\n else:\n msg = _(\"The screen is locked by a system administrator.\")\n if len(sys.argv) > 2:\n unlock_secs = int(sys.argv[2])\n else:\n unlock_secs = None\n LockScreen(True).lock(msg, unlock_secs)\n Gtk.main()\n\n\nif __name__ == '__main__':\n main()\n"},"license":{"kind":"string","value":"gpl-3.0"},"hash":{"kind":"number","value":-6936315692895311000,"string":"-6,936,315,692,895,311,000"},"line_mean":{"kind":"number","value":31.1666666667,"string":"31.166667"},"line_max":{"kind":"number","value":76,"string":"76"},"alpha_frac":{"kind":"number","value":0.6107893935,"string":"0.610789"},"autogenerated":{"kind":"bool","value":false,"string":"false"}}},{"rowIdx":109522,"cells":{"repo_name":{"kind":"string","value":"rst2pdf/rst2pdf"},"path":{"kind":"string","value":"rst2pdf/tests/input/sphinx-issue529/conf.py"},"copies":{"kind":"string","value":"1"},"size":{"kind":"string","value":"1322"},"content":{"kind":"string","value":"# -*- coding: utf-8 -*-\n\n# -- General configuration -----------------------------------------------------\n\n# Add any Sphinx extension module names here, as strings. They can be extensions\n# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.\nextensions = ['rst2pdf.pdfbuilder']\n\n# The master toctree document.\nmaster_doc = 'index'\n\n# General information about the project.\nproject = u'Foobar'\ncopyright = u'2009, Jason S'\n\n# The version info for the project you're documenting, acts as replacement for\n# |version| and |release|, also used in various other places throughout the\n# built documents.\n#\n# The short X.Y version.\nversion = '1.0.1'\n# The full version, including alpha/beta/rc tags.\nrelease = '1.0.1'\n\n\n# -- Options for sphinx.ext.todo extension -----------------------------------\n\ntodo_include_todos = True\n\n\n# -- Options for PDF output --------------------------------------------------\n\n# Grouping the document tree into PDF files. List of tuples\n# (source start file, target name, title, author).\npdf_documents = [\n ('index', u'index', u'index', u'lorenzo'),\n]\n\n# A comma-separated list of custom stylesheets. Example:\npdf_stylesheets = ['sphinx']\n\n# If false, no index is generated.\npdf_use_index = False\n\n# If false, no coverpage is generated.\npdf_use_coverpage = False\n\npdf_invariant = True\n"},"license":{"kind":"string","value":"mit"},"hash":{"kind":"number","value":7878673909229144000,"string":"7,878,673,909,229,144,000"},"line_mean":{"kind":"number","value":26.5416666667,"string":"26.541667"},"line_max":{"kind":"number","value":80,"string":"80"},"alpha_frac":{"kind":"number","value":0.6384266263,"string":"0.638427"},"autogenerated":{"kind":"bool","value":false,"string":"false"}}},{"rowIdx":109523,"cells":{"repo_name":{"kind":"string","value":"FabianN/autopkg_recipies"},"path":{"kind":"string","value":"MSOfficeUpdates/MSOffice2016URLandUpdateInfoProvider.py"},"copies":{"kind":"string","value":"1"},"size":{"kind":"string","value":"13414"},"content":{"kind":"string","value":"#!/usr/bin/env python\n#\n# Copyright 2015 Allister Banks and Tim Sutton,\n# based on MSOffice2011UpdateInfoProvider by Greg Neagle\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# Disabling 'no-env-member' for recipe processors\n#pylint:disable=e1101\n\"\"\"See docstring for MSOffice2016URLandUpdateInfoProvider class\"\"\"\n\nimport plistlib\nimport re\nimport urllib2\n\nfrom autopkglib import Processor, ProcessorError\n\n\n__all__ = [\"MSOffice2016URLandUpdateInfoProvider\"]\n\n# CULTURE_CODE defaulting to 'en-US' as the installers and updates seem to be\n# multilingual.\nCULTURE_CODE = \"0409\"\nBASE_URL = \"https://officecdn.microsoft.com/pr/%s/OfficeMac/%s.xml\"\n\n# These can be easily be found as \"Application ID\" in ~/Library/Preferences/com.microsoft.autoupdate2.plist on a \n# machine that has Microsoft AutoUpdate.app installed on it.\n#\n# Note that Skype, 'MSFB' has a '16' after it, AutoUpdate has a '03' after it while all the other products have '15'\n\nPROD_DICT = {\n 'Excel': {'id': 'XCEL15', 'path': '/Applications/Microsoft Excel.app'},\n 'OneNote': {'id': 'ONMC15', 'path': '/Applications/Microsoft OneNote.app'},\n 'Outlook': {'id': 'OPIM15', 'path': '/Applications/Microsoft Outlook.app'},\n 'PowerPoint': {'id': 'PPT315', 'path': '/Applications/Microsoft PowerPoint.app'},\n 'Word': {'id': 'MSWD15', 'path': '/Applications/Microsoft Word.app'},\n 'SkypeForBusiness': {'id': 'MSFB16', 'path': '/Applications/Skype for Business.app'},\n 'AutoUpdate': {\n 'id': 'MSau03',\n 'path': '/Library/Application Support/Microsoft/MAU2.0/Microsoft AutoUpdate.app'\n }\n}\nLOCALE_ID_INFO_URL = \"https://msdn.microsoft.com/en-us/goglobal/bb964664.aspx\"\nSUPPORTED_VERSIONS = [\"latest\", \"latest-delta\"]\nDEFAULT_VERSION = \"latest\"\nCHANNELS = {\n 'Production': 'C1297A47-86C4-4C1F-97FA-950631F94777',\n 'InsiderSlow': '1ac37578-5a24-40fb-892e-b89d85b6dfaa',\n 'InsiderFast': '4B2D7701-0A4F-49C8-B4CB-0C2D4043F51F',\n}\nDEFAULT_CHANNEL = \"Production\"\n\nclass MSOffice2016URLandUpdateInfoProvider(Processor):\n \"\"\"Provides a download URL for the most recent version of MS Office 2016.\"\"\"\n input_variables = {\n \"locale_id\": {\n \"required\": False,\n \"default\": \"1033\",\n \"description\": (\n \"Locale ID that determines the language \"\n \"that is retrieved from the metadata, currently only \"\n \"used by the update description. See %s \"\n \"for a list of locale codes. The default is en-US.\"\n % LOCALE_ID_INFO_URL)\n },\n \"product\": {\n \"required\": True,\n \"description\": \"Name of product to fetch, e.g. Excel.\",\n },\n \"version\": {\n \"required\": False,\n \"default\": DEFAULT_VERSION,\n \"description\": (\"Update type to fetch. Supported values are: \"\n \"'%s'. Defaults to %s.\"\n % (\"', '\".join(SUPPORTED_VERSIONS),\n DEFAULT_VERSION)),\n },\n \"munki_required_update_name\": {\n \"required\": False,\n \"default\": \"\",\n \"description\":\n (\"If the update is a delta, a 'requires' key will be set \"\n \"according to the minimum version defined in the MS \"\n \"metadata. If this key is set, this name will be used \"\n \"for the required item. If unset, NAME will be used.\")\n },\n \"channel\": {\n \"required\": False,\n \"default\": DEFAULT_CHANNEL,\n \"description\":\n (\"Update feed channel that will be checked for updates. \"\n \"Defaults to %s, acceptable values are either a custom \"\n \"UUID or one of: %s\" % (\n DEFAULT_CHANNEL,\n \", \".join(CHANNELS.keys())))\n }\n }\n output_variables = {\n \"additional_pkginfo\": {\n \"description\":\n \"Some pkginfo fields extracted from the Microsoft metadata.\",\n },\n \"description\": {\n \"description\":\n \"Description of the update from the manifest, in the language \"\n \"given by the locale_id input variable.\",\n },\n \"version\": {\n \"description\":\n (\"The version of the update as extracted from the Microsoft \"\n \"metadata.\")\n },\n \"minimum_os_version\": {\n \"description\":\n (\"The minimum os version required by the update as extracted \"\n \"from the Microsoft metadata.\")\n },\n \"minimum_version_for_delta\": {\n \"description\":\n (\"If this update is a delta, this value will be set to the \"\n \"minimum required application version to which this delta \"\n \"can be applied. Otherwise it will be an empty string.\")\n },\n \"url\": {\n \"description\": \"URL to the latest installer.\",\n },\n }\n description = __doc__\n min_delta_version = \"\"\n\n def sanity_check_expected_triggers(self, item):\n \"\"\"Raises an exeception if the Trigger Condition or\n Triggers for an update don't match what we expect.\n Protects us if these change in the future.\"\"\"\n # MS currently uses \"Registered File\" placeholders, which get replaced\n # with the bundle of a given application ID. In other words, this is\n # the bundle version of the app itself.\n if not item.get(\"Trigger Condition\") == [\"and\", \"Registered File\"]:\n raise ProcessorError(\n \"Unexpected Trigger Condition in item %s: %s\"\n % (item[\"Title\"], item[\"Trigger Condition\"]))\n\n def get_installs_items(self, item):\n \"\"\"Attempts to parse the Triggers to create an installs item using\n only manifest data, making the assumption that CFBundleVersion and\n CFBundleShortVersionString are equal. Skip SkypeForBusiness as its\n xml does not contain a 'Trigger Condition'\"\"\"\n if self.env[\"product\"] != 'SkypeForBusiness':\n self.sanity_check_expected_triggers(item)\n version = self.get_version(item)\n # Skipping CFBundleShortVersionString because it doesn't contain\n # anything more specific than major.minor (no build versions\n # distinguishing Insider builds for example)\n installs_item = {\n \"CFBundleVersion\": version,\n \"path\": PROD_DICT[self.env[\"product\"]]['path'],\n \"type\": \"application\",\n }\n return [installs_item]\n\n def get_version(self, item):\n \"\"\"Extracts the version of the update item.\"\"\"\n # If the 'Update Version' key exists we pull the \"full\" version string\n # easily from this\n if item.get(\"Update Version\"):\n self.output(\n \"Extracting version %s from metadata 'Update Version' key\" %\n item[\"Update Version\"])\n return item[\"Update Version\"]\n\n def get_installer_info(self):\n \"\"\"Gets info about an installer from MS metadata.\"\"\"\n # Get the channel UUID, matching against a custom UUID if one is given\n channel_input = self.env.get(\"channel\", DEFAULT_CHANNEL)\n rex = r\"^([0-9a-fA-F]{8}-([0-9a-fA-F]{4}-){3}[0-9a-fA-F]{12})$\"\n match_uuid = re.match(rex, channel_input)\n if not match_uuid and channel_input not in CHANNELS.keys():\n raise ProcessorError(\n \"'channel' input variable must be one of: %s or a custom \"\n \"uuid\" % (\", \".join(CHANNELS.keys())))\n if match_uuid:\n channel = match_uuid.groups()[0]\n else:\n channel = CHANNELS[channel_input]\n base_url = BASE_URL % (channel,\n CULTURE_CODE + PROD_DICT[self.env[\"product\"]]['id'])\n\n # Get metadata URL\n self.output(\"Requesting xml: %s\" % base_url)\n req = urllib2.Request(base_url)\n # Add the MAU User-Agent, since MAU feed server seems to explicitly\n # block a User-Agent of 'Python-urllib/2.7' - even a blank User-Agent\n # string passes.\n req.add_header(\n \"User-Agent\",\n \"Microsoft%20AutoUpdate/3.6.16080300 CFNetwork/760.6.3 Darwin/15.6.0 (x86_64)\")\n\n try:\n fdesc = urllib2.urlopen(req)\n data = fdesc.read()\n fdesc.close()\n except BaseException as err:\n raise ProcessorError(\"Can't download %s: %s\" % (base_url, err))\n\n metadata = plistlib.readPlistFromString(data)\n item = {}\n # According to MS, update feeds for a given 'channel' will only ever\n # have two items: a full and a delta. Delta updates will have a\n # 'FullUpdaterLocation' key, so filter by the array according to\n # which item has that key.\n if self.env[\"version\"] == \"latest\":\n item = [u for u in metadata if not u.get(\"FullUpdaterLocation\")]\n elif self.env[\"version\"] == \"latest-delta\":\n item = [u for u in metadata if u.get(\"FullUpdaterLocation\")]\n if not item:\n raise ProcessorError(\"Could not find an applicable update in \"\n \"update metadata.\")\n item = item[0]\n\n self.env[\"url\"] = item[\"Location\"]\n self.output(\"Found URL %s\" % self.env[\"url\"])\n self.output(\"Got update: '%s'\" % item[\"Title\"])\n # now extract useful info from the rest of the metadata that could\n # be used in a pkginfo\n pkginfo = {}\n # Get a copy of the description in our locale_id\n all_localizations = item.get(\"Localized\")\n lcid = self.env[\"locale_id\"]\n if lcid not in all_localizations:\n raise ProcessorError(\n \"Locale ID %s not found in manifest metadata. Available IDs: \"\n \"%s. See %s for more details.\" % (\n lcid,\n \", \".join(all_localizations.keys()),\n LOCALE_ID_INFO_URL))\n manifest_description = all_localizations[lcid]['Short Description']\n # Store the description in a separate output variable and in our pkginfo\n # directly.\n pkginfo[\"description\"] = \"%s\" % manifest_description\n self.env[\"description\"] = manifest_description\n\n # Minimum OS version key should exist always, but default to the current\n # minimum as of 16/11/03\n pkginfo[\"minimum_os_version\"] = item.get('Minimum OS', '10.10.5')\n installs_items = self.get_installs_items(item)\n if installs_items:\n pkginfo[\"installs\"] = installs_items\n\n # Extra work to do if this is a delta updater\n if self.env[\"version\"] == \"latest-delta\":\n try:\n rel_versions = item[\"Triggers\"][\"Registered File\"][\"VersionsRelative\"]\n except KeyError:\n raise ProcessorError(\"Can't find expected VersionsRelative\"\n \"keys for determining minimum update \"\n \"required for delta update.\")\n for expression in rel_versions:\n operator, ver_eval = expression.split()\n if operator == \">=\":\n self.min_delta_version = ver_eval\n break\n if not self.min_delta_version:\n raise ProcessorError(\"Not able to determine minimum required \"\n \"version for delta update.\")\n # Put minimum_update_version into installs item\n self.output(\"Adding minimum required version: %s\" %\n self.min_delta_version)\n pkginfo[\"installs\"][0][\"minimum_update_version\"] = \\\n self.min_delta_version\n required_update_name = self.env[\"NAME\"]\n if self.env[\"munki_required_update_name\"]:\n required_update_name = self.env[\"munki_required_update_name\"]\n # Add 'requires' array\n pkginfo[\"requires\"] = [\"%s-%s\" % (required_update_name,\n self.min_delta_version)]\n\n self.env[\"version\"] = self.get_version(item)\n self.env[\"minimum_os_version\"] = pkginfo[\"minimum_os_version\"]\n self.env[\"minimum_version_for_delta\"] = self.min_delta_version\n self.env[\"additional_pkginfo\"] = pkginfo\n self.env[\"url\"] = item[\"Location\"]\n self.output(\"Additional pkginfo: %s\" % self.env[\"additional_pkginfo\"])\n\n def main(self):\n \"\"\"Get information about an update\"\"\"\n if self.env[\"version\"] not in SUPPORTED_VERSIONS:\n raise ProcessorError(\"Invalid 'version': supported values are '%s'\"\n % \"', '\".join(SUPPORTED_VERSIONS))\n self.get_installer_info()\n\n\nif __name__ == \"__main__\":\n PROCESSOR = MSOffice2016URLandUpdateInfoProvider()\n PROCESSOR.execute_shell()\n"},"license":{"kind":"string","value":"mit"},"hash":{"kind":"number","value":8124691191780263000,"string":"8,124,691,191,780,263,000"},"line_mean":{"kind":"number","value":43.2706270627,"string":"43.270627"},"line_max":{"kind":"number","value":116,"string":"116"},"alpha_frac":{"kind":"number","value":0.5834203071,"string":"0.58342"},"autogenerated":{"kind":"bool","value":false,"string":"false"}}},{"rowIdx":109524,"cells":{"repo_name":{"kind":"string","value":"lucventurini/mikado"},"path":{"kind":"string","value":"Mikado/preparation/annotation_parser.py"},"copies":{"kind":"string","value":"1"},"size":{"kind":"string","value":"34495"},"content":{"kind":"string","value":"import multiprocessing\nfrom ..parsers import parser_factory\nfrom ..parsers.bam_parser import BamParser\nfrom ..utilities.log_utils import create_queue_logger\nfrom ..utilities import overlap\nimport logging\nimport logging.handlers\nfrom .. import exceptions\nfrom sys import intern\nimport rapidjson as json\nimport msgpack\nimport os\nfrom ..transcripts import Transcript\nfrom operator import itemgetter\nimport random\nimport struct\nimport zlib\n\n\n__author__ = 'Luca Venturini'\n\n# TODO: found_ids can be refactored out, in preference of a unique ID per file which is used as part of the label\n# in doing this, we prevent requiring the user to rename their inputs as there would be no repeated naming.\n\ndef __raise_redundant(row_id, name, label):\n\n if label == '':\n\n raise exceptions.RedundantNames(\n \"\"\"{0} has already been found in another file but is present in {1};\n this will cause unsolvable collisions. Please rerun preparation using\n labels to tag each file.\"\"\".format(\n row_id, name\n ))\n else:\n raise exceptions.RedundantNames(\n \"\"\"\"{0} has already been found in another file but is present in {1};\n this will cause unsolvable collisions. This happened even if\n you specified label {2}; please change them in order to ensure that\n no collisions happen.\"\"\".format(row_id, name, label))\n\n\ndef __raise_invalid(row_id, name, label):\n\n raise exceptions.InvalidAssembly(\n \"\"\"{0} is present multiple times in {1}{2}. This breaks the input parsing.\n Please ensure that the file is properly formatted, with unique IDs.\"\"\".format(\n row_id,\n name,\n \"(label: {0})\".format(label) if label != '' else \"\"))\n\n\ndef _create_split_tobject(tobject: dict, start, stop, num: int):\n \"\"\"\n Function to create a subset of the transcript, by keeping only the relevant exons\n :param tobject: dictionary of the features\n :param segments: the segments\n :param tid: original name\n :param num: progressive numbering of the transcript\n :return:\n \"\"\"\n\n newobj = tobject.copy()\n foundany = False\n for key in tobject:\n if key == \"features\":\n newobj[\"features\"] = dict.fromkeys(tobject[\"features\"])\n for feature in newobj[\"features\"]:\n newobj[\"features\"][feature] = []\n for ff in tobject[\"features\"][feature]:\n if overlap((start, stop), (ff[0], ff[1]), positive=True, flank=0) > 0:\n foundany = True\n newobj[\"features\"][feature].append(ff[:])\n else:\n newobj[key] = tobject[key]\n\n newobj[\"tid\"] = newobj[\"tid\"] + f\"_isplit.{num}\"\n newobj[\"parent\"] = \"{}.gene\".format(newobj[\"tid\"])\n return newobj, newobj[\"tid\"], start, stop\n\n\ndef _evaluate_tid(tid, tobject, logger, min_length, max_intron):\n if \"exon\" in tobject[\"features\"]:\n segments = tobject[\"features\"][\"exon\"][:]\n elif \"CDS\" in tobject[\"features\"]:\n segments = tobject[\"features\"][\"CDS\"][:]\n for feature in tobject[\"features\"]:\n if \"utr\" in feature.lower():\n segments.extend(tobject[\"features\"][feature])\n else:\n continue\n segments = sorted(segments, key=itemgetter(0))\n # Now check the exons\n exons = []\n if len(segments) == 0:\n logger.warning(\"No valid exon feature for %s, continuing\", tid)\n return []\n elif len(segments) == 1:\n exons = segments[0]\n else:\n current = segments[0]\n for pos in range(1, len(segments)):\n segment = segments[pos]\n if segment[0] > current[1] + 1:\n exons.append(current)\n current = segment\n elif segment[0] == current[1] + 1:\n current = (current[0], segment[1], None)\n else:\n logger.warning(\"Overlapping segments found in %s. Discarding it\", tid)\n return []\n exons.append(current)\n tobject[\"features\"][\"exon\"] = exons[:]\n else:\n raise KeyError(tobject[\"features\"])\n\n segments = sorted(segments, key=itemgetter(0))\n tlength = 0\n start, end = segments[0][0], segments[-1][1]\n introns = []\n num_segments = len(segments)\n for pos, segment in enumerate(segments):\n if pos < num_segments - 1:\n later = segments[pos + 1]\n intron = later[0] - (segment[1] + 1)\n introns.append((pos, intron))\n tlength += segment[1] + 1 - segment[0]\n\n # Discard transcript under a certain size\n if tlength < min_length:\n if tobject[\"is_reference\"] is True:\n logger.info(\"%s retained even if it is too short (%d) as it is a reference transcript.\",\n tid, tlength)\n else:\n logger.info(\"Discarding %s because its size (%d) is under the minimum of %d\",\n tid, tlength, min_length)\n return []\n\n # Discard transcripts with introns over the limit\n over = [intron for intron in introns if intron[1] > max_intron]\n if len(over) > 0:\n if tobject[\"is_reference\"] is True:\n logger.info(\n \"%s retained even if has %s introns the limit (%d, max: %d) as it is a reference transcript.\",\n tid, len(over), max([_[1] for _ in over]), max_intron)\n return [(tobject, tid, start, end)]\n else:\n logger.info(\n \"Splitting %s into %d transcripts because it has %d introns over the maximum of %d (longest: %d)\",\n tid, len(over) + 1, len(over), max_intron, max([_[1] for _ in over]))\n splitted = []\n current = 0\n for num, ointron in enumerate(over):\n final_pos = ointron[0]\n segs = segments[current:final_pos+1][:]\n current = final_pos + 1\n start, stop = segs[0][0], segs[-1][1]\n tlength = sum([_[1] + 1 - _[0] for _ in segs])\n if tlength < min_length:\n logger.info(\"Discarding fragment %s of %s because its length is beneath the minimum of %s (%s)\",\n num, tid, min_length, tlength)\n continue\n else:\n splitted.append(_create_split_tobject(tobject, start, stop, num))\n\n segs = segments[current:]\n start, stop = segs[0][0], segs[-1][1]\n tlength = sum([_[1] + 1 - _[0] for _ in segs])\n if tlength < min_length:\n logger.info(\"Discarding fragment %s of %s because its length is beneath the minimum of %s (%s)\",\n len(over), tid, min_length, tlength)\n else:\n splitted.append(_create_split_tobject(tobject, start, stop, len(over)))\n return splitted\n else:\n return [(tobject, tid, start, end)]\n\n\ndef load_into_storage(shelf_name, exon_lines, min_length, logger, strip_cds=True, max_intron=3*10**5):\n\n \"\"\"Function to load the exon_lines dictionary into the temporary storage.\"\"\"\n\n if os.path.exists(shelf_name) or any(_.startswith(os.path.basename(shelf_name))\n for _ in os.listdir(os.path.dirname(shelf_name))):\n logger.error(\"Shelf %s already exists (maybe from a previous aborted run?), dropping its contents\", shelf_name)\n for _ in (_ for _ in os.listdir(os.path.dirname(shelf_name))\n if _.startswith(os.path.basename(shelf_name))):\n if os.path.exists(_):\n os.remove(_)\n\n shelf = open(shelf_name, \"wb\")\n\n rows = []\n logger.warning(\"Max intron: %s\", max_intron)\n for tid in exon_lines:\n if \"features\" not in exon_lines[tid]:\n raise KeyError(\"{0}: {1}\\n{2}\".format(tid, \"features\", exon_lines[tid]))\n if (\"exon\" not in exon_lines[tid][\"features\"] or\n len(exon_lines[tid][\"features\"][\"exon\"]) == 0):\n # Match-like things\n if \"match\" in exon_lines[tid][\"features\"]:\n if len(exon_lines[tid][\"features\"][\"match\"]) > 1:\n logger.warning(\"Invalid features for %s, skipping.\", tid)\n continue\n exon_lines[tid][\"features\"][\"exon\"] = [exon_lines[tid][\"features\"][\"match\"][0]]\n logger.warning(\"Inferring that %s is a mono-exonic transcript-match: (%s, %d-%d)\",\n tid, exon_lines[tid][\"chrom\"],\n exon_lines[tid][\"features\"][\"exon\"][0][0],\n exon_lines[tid][\"features\"][\"exon\"][0][1])\n del exon_lines[tid][\"features\"][\"match\"]\n elif (strip_cds is False and \"CDS\" in exon_lines[tid][\"features\"] and\n len(exon_lines[tid][\"features\"][\"CDS\"]) > 0):\n pass\n else:\n logger.warning(\"No valid exon feature for %s, continuing\", tid)\n continue\n elif \"match\" in exon_lines[tid][\"features\"] and \"exon\" in exon_lines[tid][\"features\"]:\n del exon_lines[tid][\"features\"][\"match\"]\n\n for values, tid, start, end in _evaluate_tid(tid, exon_lines[tid], logger,\n max_intron=max_intron,\n min_length=min_length):\n chrom = values[\"chrom\"]\n assert chrom is not None\n strand = values[\"strand\"]\n if strand is None:\n strand = \".\"\n\n logger.debug(\"Inserting %s into shelf %s\", tid, shelf_name)\n values = zlib.compress(msgpack.dumps(values))\n write_start = shelf.tell()\n write_length = shelf.write(values)\n row = (chrom.encode(), start, end, strand.encode(), tid.encode(), write_start, write_length)\n rows.append(row)\n\n logger.warning(\"Finished packing rows for %s\", shelf_name)\n return rows\n\n\ndef load_from_gff(shelf_name,\n gff_handle,\n label,\n found_ids,\n logger,\n min_length=0,\n max_intron=3*10**5,\n is_reference=False,\n exclude_redundant=False,\n strip_cds=False,\n strand_specific=False):\n \"\"\"\n Method to load the exon lines from GFF3 files.\n :param shelf_name: the name of the shelf DB to use.\n :param gff_handle: The handle for the GTF to be parsed.\n :param label: label to be attached to all transcripts.\n :type label: str\n :param found_ids: set of IDs already found in other files.\n :type found_ids: set\n :param logger: a logger to be used to pass messages\n :type logger: logging.Logger\n :param min_length: minimum length for a cDNA to be considered as valid\n :type min_length: int\n :param max_intron: maximum intron length for a cDNA to be considered as valid\n :type max_intron: int\n :param strip_cds: boolean flag. If true, all CDS lines will be ignored.\n :type strip_cds: bool\n :param strand_specific: whether the assembly is strand-specific or not.\n :type strand_specific: bool\n :param is_reference: boolean. If set to True, the transcript will always be retained.\n :type is_reference: bool\n :param exclude_redundant: boolean. If set to True, fully redundant transcripts will be removed.\n :type exclude_redundant: bool\n :return:\n \"\"\"\n\n exon_lines = dict()\n\n strip_cds = strip_cds and (not is_reference)\n if strand_specific is not True and is_reference is True:\n strand_specific = True\n\n transcript2genes = dict()\n new_ids = set()\n\n to_ignore = set()\n\n for row in gff_handle:\n if row.feature == \"protein\":\n continue\n elif row.is_transcript is True or row.feature == \"match\":\n if label != '':\n row.id = \"{0}_{1}\".format(label, row.id)\n row.source = label\n if row.id in found_ids:\n __raise_redundant(row.id, gff_handle.name, label)\n elif row.id in exon_lines:\n # This might sometimes happen in GMAP\n logger.warning(\n \"Multiple instance of %s found, skipping any subsequent entry\",\n row.id)\n to_ignore.add(row.id)\n continue\n #\n # if row.id not in exon_lines:\n exon_lines[row.id] = dict()\n exon_lines[row.id][\"source\"] = row.source\n if row.parent:\n transcript2genes[row.id] = row.parent[0]\n else:\n transcript2genes[row.id] = row.id\n assert row.id is not None\n if row.id in found_ids:\n __raise_redundant(row.id, gff_handle.name, label)\n\n exon_lines[row.id][\"attributes\"] = row.attributes.copy()\n exon_lines[row.id][\"chrom\"] = row.chrom\n exon_lines[row.id][\"strand\"] = row.strand\n exon_lines[row.id][\"tid\"] = row.transcript or row.id\n exon_lines[row.id][\"parent\"] = \"{}.gene\".format(row.id)\n exon_lines[row.id][\"features\"] = dict()\n # Here we have to add the match feature as an exon, in case it is the only one present\n if row.feature == \"match\":\n exon_lines[row.id][\"features\"][row.feature] = []\n exon_lines[row.id][\"features\"][row.feature].append((row.start, row.end, row.phase))\n\n exon_lines[row.id][\"strand_specific\"] = strand_specific\n exon_lines[row.id][\"is_reference\"] = is_reference\n exon_lines[row.id][\"exclude_redundant\"] = exclude_redundant\n continue\n elif row.is_exon is True:\n if not row.is_cds or (row.is_cds is True and strip_cds is False):\n if len(row.parent) == 0 and \"cDNA_match\" == row.feature:\n if label == '':\n __tid = row.id\n else:\n __tid = \"{0}_{1}\".format(label, row.id)\n row.parent = __tid\n transcript2genes[__tid] = \"{}_match\".format(__tid)\n row.feature = \"exon\"\n elif row.feature == \"match_part\":\n if label == '':\n __tid = row.parent[0]\n else:\n __tid = \"{0}_{1}\".format(label, row.parent[0])\n row.parent = __tid\n transcript2genes[__tid] = \"{}_match\".format(__tid)\n row.feature = \"exon\"\n\n elif label != '':\n row.transcript = [\"{0}_{1}\".format(label, tid) for tid in row.transcript]\n\n parents = row.transcript[:]\n for tid in parents:\n\n if tid in found_ids:\n __raise_redundant(tid, gff_handle.name, label)\n elif tid in to_ignore:\n continue\n if tid not in exon_lines and tid in transcript2genes:\n exon_lines[tid] = dict()\n exon_lines[tid][\"attributes\"] = row.attributes.copy()\n if label:\n exon_lines[tid][\"source\"] = label\n else:\n exon_lines[tid][\"source\"] = row.source\n exon_lines[tid][\"chrom\"] = row.chrom\n exon_lines[tid][\"strand\"] = row.strand\n exon_lines[tid][\"features\"] = dict()\n exon_lines[tid][\"tid\"] = tid\n exon_lines[tid][\"parent\"] = transcript2genes[tid]\n exon_lines[tid][\"strand_specific\"] = strand_specific\n exon_lines[tid][\"is_reference\"] = is_reference\n exon_lines[tid][\"exclude_redundant\"] = exclude_redundant\n elif tid not in exon_lines and tid not in transcript2genes:\n continue\n else:\n if \"exon_number\" in row.attributes:\n del row.attributes[\"exon_number\"]\n if (exon_lines[tid][\"chrom\"] != row.chrom or\n exon_lines[tid][\"strand\"] != row.strand):\n __raise_invalid(tid, gff_handle.name, label)\n exon_lines[tid][\"attributes\"].update(row.attributes)\n\n if row.feature not in exon_lines[tid][\"features\"]:\n exon_lines[tid][\"features\"][row.feature] = []\n exon_lines[tid][\"features\"][row.feature].append((row.start, row.end, row.phase))\n new_ids.add(tid)\n\n else:\n continue\n gff_handle.close()\n\n logger.info(\"Starting to load %s\", shelf_name)\n rows = load_into_storage(shelf_name, exon_lines,\n logger=logger, min_length=min_length, strip_cds=strip_cds, max_intron=max_intron)\n\n logger.info(\"Finished parsing %s\", gff_handle.name)\n return new_ids, rows\n\n\ndef load_from_gtf(shelf_name,\n gff_handle,\n label,\n found_ids,\n logger,\n min_length=0,\n max_intron=3*10**5,\n is_reference=False,\n exclude_redundant=False,\n strip_cds=False,\n strand_specific=False):\n \"\"\"\n Method to load the exon lines from GTF files.\n :param shelf_name: the name of the shelf DB to use.\n :param gff_handle: The handle for the GTF to be parsed.\n :param label: label to be attached to all transcripts.\n :type label: str\n :param found_ids: set of IDs already found in other files.\n :type found_ids: set\n :param logger: a logger to be used to pass messages\n :type logger: logging.Logger\n :param min_length: minimum length for a cDNA to be considered as valid\n :type min_length: int\n :param max_intron: maximum intron length for a cDNA to be considered as valid\n :type max_intron: int\n :param strip_cds: boolean flag. If true, all CDS lines will be ignored.\n :type strip_cds: bool\n :param strand_specific: whether the assembly is strand-specific or not.\n :type strand_specific: bool\n :param is_reference: boolean. If set to True, the transcript will always be retained.\n :type is_reference: bool\n :param exclude_redundant: boolean. If set to True, the transcript will be marked for potential redundancy removal.\n :type exclude_redundant: bool\n :return:\n \"\"\"\n\n exon_lines = dict()\n\n strip_cds = strip_cds and (not is_reference)\n strand_specific = strand_specific or is_reference\n\n # Reduce memory footprint\n [intern(_) for _ in [\"chrom\", \"features\", \"strand\", \"attributes\", \"tid\", \"parent\", \"attributes\"]]\n\n new_ids = set()\n to_ignore = set()\n for row in gff_handle:\n if row.is_transcript is True:\n if label != '':\n row.transcript = \"{0}_{1}\".format(label, row.transcript)\n if row.transcript in found_ids:\n __raise_redundant(row.transcript, gff_handle.name, label)\n if row.transcript in exon_lines:\n logger.warning(\n \"Multiple instance of %s found, skipping any subsequent entry\", row.id)\n to_ignore.add(row.id)\n continue\n # __raise_invalid(row.transcript, gff_handle.name, label)\n if row.transcript not in exon_lines:\n exon_lines[row.transcript] = dict()\n if label:\n exon_lines[row.transcript][\"source\"] = label\n else:\n exon_lines[row.transcript][\"source\"] = row.source\n\n exon_lines[row.transcript][\"features\"] = dict()\n exon_lines[row.transcript][\"chrom\"] = row.chrom\n exon_lines[row.transcript][\"strand\"] = row.strand\n exon_lines[row.transcript][\"attributes\"] = row.attributes.copy()\n exon_lines[row.transcript][\"tid\"] = row.id\n exon_lines[row.transcript][\"parent\"] = \"{}.gene\".format(row.id)\n exon_lines[row.transcript][\"strand_specific\"] = strand_specific\n exon_lines[row.transcript][\"is_reference\"] = is_reference\n exon_lines[row.transcript][\"exclude_redundant\"] = exclude_redundant\n if \"exon_number\" in exon_lines[row.transcript][\"attributes\"]:\n del exon_lines[row.transcript][\"attributes\"][\"exon_number\"]\n continue\n\n if row.is_exon is False or (row.is_cds is True and strip_cds is True):\n continue\n if label != '':\n row.transcript = \"{0}_{1}\".format(label, row.transcript)\n if row.transcript in found_ids:\n __raise_redundant(row.transcript, gff_handle.name, label)\n assert row.transcript is not None\n if row.transcript not in exon_lines:\n exon_lines[row.transcript] = dict()\n if label:\n exon_lines[row.transcript][\"source\"] = label\n else:\n exon_lines[row.transcript][\"source\"] = row.source\n exon_lines[row.transcript][\"features\"] = dict()\n exon_lines[row.transcript][\"chrom\"] = row.chrom\n exon_lines[row.transcript][\"strand\"] = row.strand\n exon_lines[row.transcript][\"exon\"] = []\n exon_lines[row.transcript][\"attributes\"] = row.attributes.copy()\n exon_lines[row.transcript][\"tid\"] = row.transcript\n exon_lines[row.transcript][\"parent\"] = \"{}.gene\".format(row.transcript)\n exon_lines[row.transcript][\"strand_specific\"] = strand_specific\n exon_lines[row.transcript][\"is_reference\"] = is_reference\n exon_lines[row.transcript][\"exclude_redundant\"] = exclude_redundant\n else:\n if row.transcript in to_ignore:\n continue\n if \"exon_number\" in row.attributes:\n del row.attributes[\"exon_number\"]\n if (\"chrom\" not in exon_lines[row.transcript] or\n exon_lines[row.transcript][\"chrom\"] != row.chrom or\n exon_lines[row.transcript][\"strand\"] != row.strand):\n __raise_invalid(row.transcript, gff_handle.name, label)\n exon_lines[row.transcript][\"attributes\"].update(row.attributes)\n if row.feature not in exon_lines[row.transcript][\"features\"]:\n exon_lines[row.transcript][\"features\"][row.feature] = []\n exon_lines[row.transcript][\"features\"][row.feature].append((row.start, row.end, row.phase))\n new_ids.add(row.transcript)\n gff_handle.close()\n logger.info(\"Starting to load %s\", shelf_name)\n rows = load_into_storage(shelf_name,\n exon_lines,\n logger=logger, min_length=min_length, strip_cds=strip_cds, max_intron=max_intron)\n\n logger.info(\"Finished parsing %s\", gff_handle.name)\n return new_ids, rows\n\n\ndef load_from_bed12(shelf_name,\n gff_handle,\n label,\n found_ids,\n logger,\n min_length=0,\n max_intron=3*10**5,\n is_reference=False,\n exclude_redundant=False,\n strip_cds=False,\n strand_specific=False):\n \"\"\"\n Method to load the exon lines from GTF files.\n :param shelf_name: the name of the shelf DB to use.\n :param gff_handle: The handle for the GTF to be parsed.\n :param label: label to be attached to all transcripts.\n :type label: str\n :param found_ids: set of IDs already found in other files.\n :type found_ids: set\n :param logger: a logger to be used to pass messages\n :type logger: logging.Logger\n :param min_length: minimum length for a cDNA to be considered as valid\n :type min_length: int\n :param max_intron: maximum intron length for a cDNA to be considered as valid\n :type max_intron: int\n :param strip_cds: boolean flag. If true, all CDS lines will be ignored.\n :type strip_cds: bool\n :param strand_specific: whether the assembly is strand-specific or not.\n :type strand_specific: bool\n :param is_reference: boolean. If set to True, the transcript will always be retained.\n :type is_reference: bool\n :param exclude_redundant: boolean. If set to True, the transcript will be marked for potential redundancy removal.\n :type exclude_redundant: bool\n :return:\n \"\"\"\n\n exon_lines = dict()\n\n strip_cds = strip_cds and (not is_reference)\n strand_specific = strand_specific or is_reference\n\n # Reduce memory footprint\n [intern(_) for _ in [\"chrom\", \"features\", \"strand\", \"attributes\", \"tid\", \"parent\", \"attributes\"]]\n\n new_ids = set()\n to_ignore = set()\n for row in gff_handle:\n # Each row is a transcript\n transcript = Transcript(row)\n if label != '':\n transcript.id = \"{0}_{1}\".format(label, transcript.id)\n if transcript.id in found_ids:\n __raise_redundant(transcript.id, gff_handle.name, label)\n if transcript.id in exon_lines:\n logger.warning(\n \"Multiple instance of %s found, skipping any subsequent entry\", row.id)\n to_ignore.add(row.id)\n continue\n else:\n exon_lines[transcript.id] = dict()\n if label:\n exon_lines[transcript.id][\"source\"] = label\n else:\n exon_lines[transcript.id][\"source\"] = gff_handle.name # BED12 files have no source\n exon_lines[transcript.id][\"features\"] = dict()\n exon_lines[transcript.id][\"chrom\"] = transcript.chrom\n exon_lines[transcript.id][\"strand\"] = transcript.strand\n # Should deal with GFFRead style input and BAM\n exon_lines[transcript.id][\"attributes\"] = transcript.attributes\n exon_lines[transcript.id][\"tid\"] = transcript.id\n exon_lines[transcript.id][\"parent\"] = \"{}.gene\".format(transcript.id)\n exon_lines[transcript.id][\"strand_specific\"] = strand_specific\n exon_lines[transcript.id][\"is_reference\"] = is_reference\n exon_lines[transcript.id][\"exclude_redundant\"] = exclude_redundant\n exon_lines[transcript.id][\"features\"][\"exon\"] = [\n (exon[0], exon[1]) for exon in transcript.exons\n ]\n if transcript.is_coding and not strip_cds:\n exon_lines[transcript.id][\"features\"]['CDS'] = [\n (exon[0], exon[1]) for exon in transcript.combined_cds\n ]\n exon_lines[transcript.id][\"features\"][\"UTR\"] = [\n (exon[0], exon[1]) for exon in transcript.five_utr + transcript.three_utr\n ]\n new_ids.add(transcript.id)\n gff_handle.close()\n rows = load_into_storage(shelf_name, exon_lines,\n logger=logger, min_length=min_length, strip_cds=strip_cds, max_intron=max_intron)\n\n logger.info(\"Finished parsing %s\", gff_handle.name)\n return new_ids, rows\n\n\ndef load_from_bam(shelf_name: str,\n gff_handle: BamParser,\n label: str,\n found_ids: set,\n logger: logging.Logger,\n min_length=0,\n max_intron=3*10**5,\n is_reference=False,\n exclude_redundant=False,\n strip_cds=False,\n strand_specific=False):\n \"\"\"\n Method to load the exon lines from BAM files.\n :param shelf_name: the name of the shelf DB to use.\n :param gff_handle: The handle for the BAM to be parsed. This handle is BamParser with a file attached to read from.\n :param label: label to be attached to all transcripts.\n :type label: str\n :param found_ids: set of IDs already found in other files.\n :type found_ids: set\n :param logger: a logger to report any messages\n :type logger: logging.Logger\n :param min_length: minimum length for a cDNA to be considered as valid\n :type min_length: int\n :param max_intron: maximum intron length for a cDNA to be considered as valid\n :type max_intron: int\n :param strip_cds: boolean flag. If true, all CDS lines will be ignored.\n :type strip_cds: bool\n :param strand_specific: whether the input data is strand-specific or not.\n :type strand_specific: bool\n :param is_reference: boolean. If set to True, the transcript will always be retained.\n :type is_reference: bool\n :param exclude_redundant: boolean. If set to True, the transcript will be marked for potential redundancy removal.\n :type exclude_redundant: bool\n :return:\n \"\"\"\n return load_from_bed12(shelf_name, gff_handle, label, found_ids, logger,\n min_length=min_length, max_intron=max_intron,\n is_reference=is_reference,exclude_redundant=exclude_redundant,\n strip_cds=strip_cds, strand_specific=strand_specific)\n\n\nloaders = {\"gtf\": load_from_gtf, \"gff\": load_from_gff, \"gff3\": load_from_gff,\n \"bed12\": load_from_bed12, \"bed\": load_from_bed12, \"bam\": load_from_bam}\n\n# Chrom, start, end, strand, Tid, write start, write length\n# 100 chars, unsigned Long, unsigned Long, one char, 100 chars, unsigned Long, unsigned Long\n_row_struct_str = \">1000sLLc1000sLLH\"\nrow_struct = struct.Struct(_row_struct_str)\nrow_struct_size = struct.calcsize(_row_struct_str)\n\n\nclass AnnotationParser(multiprocessing.Process):\n\n def __init__(self,\n submission_queue: multiprocessing.JoinableQueue,\n return_queue: multiprocessing.JoinableQueue,\n logging_queue: multiprocessing.JoinableQueue,\n identifier: int,\n min_length=0,\n max_intron=3*10**5,\n log_level=\"WARNING\",\n seed=None,\n strip_cds=False):\n\n super().__init__()\n if seed is not None:\n # numpy.random.seed(seed % (2 ** 32 - 1))\n random.seed(seed % (2 ** 32 - 1))\n else:\n # numpy.random.seed(None)\n random.seed(None)\n\n self.submission_queue = submission_queue\n self.return_queue = return_queue\n self.min_length = min_length\n self.max_intron = max_intron\n self.__strip_cds = strip_cds\n self.logging_queue = logging_queue\n self.log_level = log_level\n self.__identifier = identifier\n self.name = \"AnnotationParser-{0}\".format(self.identifier)\n self.logger = None\n self.handler = None\n self.logger = logging.getLogger(self.name)\n create_queue_logger(self, prefix=\"prepare\")\n # self.logger.warning(\"Started process %s\", self.name)\n\n def __getstate__(self):\n\n state = self.__dict__.copy()\n for key in (\"logger\", \"handler\", \"_log_handler\"):\n if key in state:\n del state[key]\n\n return state\n\n def __setstate__(self, state):\n\n self.__dict__.update(state)\n create_queue_logger(self)\n\n def run(self):\n\n found_ids = set()\n self.logger.debug(\"Starting to listen to the queue\")\n counter = 0\n while True:\n results = self.submission_queue.get()\n try:\n label, handle, strand_specific, is_reference,\\\n exclude_redundant, file_strip_cds, shelf_name, shelf_index = results\n except ValueError as exc:\n raise ValueError(\"{}.\\tValues: {}\".format(exc, \", \".join([str(_) for _ in results])))\n if handle == \"EXIT\":\n self.submission_queue.put(results)\n break\n counter += 1\n self.logger.debug(\"Received %s (label: %s; SS: %s, shelf_name: %s)\",\n handle,\n label,\n strand_specific,\n shelf_name)\n try:\n gff_handle = parser_factory(handle)\n loader = loaders.get(gff_handle.__annot_type__, None)\n if loader is None:\n raise ValueError(\"Invalid file type: {}\".format(gff_handle.name))\n if file_strip_cds is True:\n file_strip_cds = True\n else:\n file_strip_cds = self.__strip_cds\n\n new_ids, new_rows = loader(shelf_name, gff_handle, label, found_ids, self.logger,\n min_length=self.min_length, max_intron=self.max_intron,\n strip_cds=file_strip_cds and not is_reference,\n is_reference=is_reference, exclude_redundant=exclude_redundant,\n strand_specific=strand_specific)\n\n if len(new_ids) == 0:\n raise exceptions.InvalidAssembly(\n \"No valid transcripts found in {0}{1}!\".format(\n handle, \" (label: {0})\".format(label) if label != \"\" else \"\"\n ))\n # Now convert the rows into structs.\n self.logger.debug(\"Packing %d rows of %s\", len(new_rows), label)\n [self.return_queue.put_nowait((*row, shelf_index)) for row in new_rows]\n self.logger.debug(\"Packed %d rows of %s\", len(new_rows), label)\n\n except (exceptions.InvalidAssembly, exceptions.InvalidParsingFormat) as exc:\n self.logger.exception(\"Invalid file: %s. Skipping it\", handle)\n self.logger.exception(exc)\n load_into_storage(shelf_name, [], self.min_length, self.logger, strip_cds=True,\n max_intron=3 * 10 ** 5)\n [self.return_queue.put_nowait((*row, shelf_index)) for row in []]\n continue\n except Exception as exc:\n self.logger.exception(exc)\n raise\n\n self.return_queue.put_nowait(\"FINISHED\")\n\n @property\n def identifier(self):\n \"\"\"\n A numeric value that identifies the process uniquely.\n :return:\n \"\"\"\n return self.__identifier\n"},"license":{"kind":"string","value":"lgpl-3.0"},"hash":{"kind":"number","value":-6226314293436030000,"string":"-6,226,314,293,436,030,000"},"line_mean":{"kind":"number","value":42.8867684478,"string":"42.886768"},"line_max":{"kind":"number","value":119,"string":"119"},"alpha_frac":{"kind":"number","value":0.5528917234,"string":"0.552892"},"autogenerated":{"kind":"bool","value":false,"string":"false"}}},{"rowIdx":109525,"cells":{"repo_name":{"kind":"string","value":"oduwsdl/ipwb"},"path":{"kind":"string","value":"ipwb/backends.py"},"copies":{"kind":"string","value":"1"},"size":{"kind":"string","value":"2716"},"content":{"kind":"string","value":"import dataclasses\nfrom typing import Optional\nfrom urllib.parse import urlparse\n\nimport ipfshttpclient\nimport requests\n\nfrom ipwb import util\n\n\n@dataclasses.dataclass(frozen=True)\nclass BackendError(Exception):\n backend_name: str\n\n def __str__(self):\n return 'Cannot load index file from {self.backend_name}.'.format(\n self=self,\n )\n\n\ndef format_ipfs_cid(path: str) -> Optional[str]:\n \"\"\"Format IPFS CID properly.\"\"\"\n if path.startswith('Qm'):\n return path\n\n elif path.startswith('ipfs://'):\n return path.replace('ipfs://', '')\n\n\ndef fetch_ipfs_index(path: str) -> Optional[str]:\n \"\"\"Fetch CDXJ file content from IPFS by hash.\"\"\"\n ipfs_hash = format_ipfs_cid(path)\n\n if ipfs_hash is None:\n return None\n\n try:\n with ipfshttpclient.connect(util.IPFSAPI_MUTLIADDRESS) as client:\n return client.cat(path).decode('utf-8')\n\n except ipfshttpclient.exceptions.StatusError as err:\n raise BackendError(backend_name='ipfs') from err\n\n\ndef fetch_web_index(path: str) -> Optional[str]:\n \"\"\"Fetch CDXJ file content from a URL.\"\"\"\n scheme = urlparse(path).scheme\n\n if not scheme:\n return None\n\n try:\n return requests.get(path).text\n\n except (\n requests.ConnectionError,\n requests.HTTPError,\n ) as err:\n raise BackendError(backend_name='web') from err\n\n\ndef fetch_local_index(path: str) -> str:\n \"\"\"Fetch CDXJ index contents from a file on local disk.\"\"\"\n with open(path, 'r') as f:\n return f.read()\n\n\ndef get_web_archive_index(path: str) -> str:\n \"\"\"\n Based on path, choose appropriate backend and fetch the file contents.\n \"\"\"\n\n # TODO right now, every backend is just a function which returns contents\n # of a CDXJ file as string. In the future, however, backends will be\n # probably represented as classes with much more sophisticated methods\n # of manipulating the archive index records.\n # TODO also, it will be possible to choose a backend and configure it;\n # whereas right now we choose a backend automatically based on the given\n # path itself.\n\n # Maybe it is an IPFS address?\n response = fetch_ipfs_index(path)\n if response is not None:\n return response\n\n # Or a traditional Web address?\n response = fetch_web_index(path)\n if response is not None:\n return response\n\n # Okay, this is probably a file on local disk\n response = fetch_local_index(path)\n if response is not None:\n return response\n\n raise ValueError((\n f'Unknown format of index file location: {path}. Please provide '\n f'a valid local path, HTTP or FTP URL, or an IPFS QmHash.'\n ))\n"},"license":{"kind":"string","value":"mit"},"hash":{"kind":"number","value":4986274070217820000,"string":"4,986,274,070,217,820,000"},"line_mean":{"kind":"number","value":26.4343434343,"string":"26.434343"},"line_max":{"kind":"number","value":78,"string":"78"},"alpha_frac":{"kind":"number","value":0.6561119293,"string":"0.656112"},"autogenerated":{"kind":"bool","value":false,"string":"false"}}},{"rowIdx":109526,"cells":{"repo_name":{"kind":"string","value":"mzmttks/miteteyo"},"path":{"kind":"string","value":"server/app.py"},"copies":{"kind":"string","value":"1"},"size":{"kind":"string","value":"1209"},"content":{"kind":"string","value":"from flask import jsonify, Flask, request\nfrom pymongo import MongoClient\nimport os\nimport json\nimport pprint\nimport traceback\napp = Flask(__name__)\n\nclient = MongoClient(os.environ[\"MONGOLAB_URI\"])\ndb = client[\"heroku_gw4w78g9\"]\ncol = db[\"locations\"]\nprint col\n\n@app.route('/location', methods=[\"POST\"])\ndef addLocation():\n try:\n data = request.json\n except Exception as e:\n ret = jsonify({\"msg\": \"JSON parsing failed\"})\n ret.status_code = 400\n return ret\n\n keys = [\"latitude\", \"longitude\", \"userid\", \"utcTime\"]\n for key in keys:\n if key not in data.keys():\n ret = jsonify({\"msg\": \"Mandatory key %s is not found\" % key})\n ret.status_code = 400\n return ret\n\n col.insert_one(request.json)\n return \"ok\" \n\n@app.route('/userid')\ndef getUserid():\n userids = col.distinct(\"userid\")\n return jsonify({\"userids\": userids})\n\n@app.route('/userid/')\ndef getLocations(userid):\n locs = []\n for d in col.find({\"userid\": userid}):\n del d[\"_id\"]\n locs.append(d)\n return jsonify({\"locations\": locs})\n\n@app.route('/')\ndef hello_world():\n locs = [d for d in col.find({})]\n return \"
\" + pprint.pformat(locs) + \"
\"\n\nif __name__ == '__main__':\n app.run(debug=True)\n"},"license":{"kind":"string","value":"mit"},"hash":{"kind":"number","value":6914129593433982000,"string":"6,914,129,593,433,982,000"},"line_mean":{"kind":"number","value":22.25,"string":"22.25"},"line_max":{"kind":"number","value":67,"string":"67"},"alpha_frac":{"kind":"number","value":0.6401985112,"string":"0.640199"},"autogenerated":{"kind":"bool","value":false,"string":"false"}}},{"rowIdx":109527,"cells":{"repo_name":{"kind":"string","value":"zmlabe/IceVarFigs"},"path":{"kind":"string","value":"Scripts/SeaIce/plot_AMSR2_SIC_region.py"},"copies":{"kind":"string","value":"1"},"size":{"kind":"string","value":"6969"},"content":{"kind":"string","value":"\"\"\"\nPlots JAXA AMSR2 3.125 km (UHH-Processed) Sea Ice Concentration Data\n \nSource : http://osisaf.met.no/p/ice/\nAuthor : Zachary Labe\nDate : 27 February 2017\n\"\"\"\n\nfrom netCDF4 import Dataset\nimport matplotlib.pyplot as plt\nfrom mpl_toolkits.basemap import Basemap\nimport urllib.request as UL\nimport numpy as np\nimport datetime\nimport calendar as cal\nimport gzip\nimport nclcmaps as ncm\nimport math\nimport cmocean\n\n### Directory and time\ndirectory = './Data/'\ndirectoryfigure = './Figures/'\n\nnow = datetime.datetime.now()\ncurrentmn = str(now.month)\nif now.day == 1:\n currentdy = str(cal.monthrange(now.year,now.month-1)[1])\n currentmn = str(now.month-1)\nelse:\n currentdy = str(now.day-1)\nif int(currentdy) < 10:\n currentdy = '0' + currentdy\n \ncurrentyr = str(now.year)\n\nif int(currentmn) < 10:\n currentmn = '0' + currentmn\n\ncurrenttime = currentmn + '_' + str(currentdy) + '_' + currentyr\ntitletime = currentmn + '/' + str(currentdy) + '/' + currentyr\n\nprint('\\n' 'Current Time = %s' '\\n' % titletime)\n\nfor i in range(24,25):\n currentdy = str(i+1)\n currentmn = '03'\n if int(currentdy) < 10:\n currentdy = '0' + currentdy\n\n currentyr = '2018'\n currenttime = currentmn + '_' + str(currentdy) + '_' + currentyr\n titletime = currentmn + '/' + str(currentdy) + '/' + currentyr\n\n ### Pick data set\n icedataset = 'AMSR2'\n \n if icedataset == 'AMSR2':\n \n url = 'ftp://ftp-projects.cen.uni-hamburg.de/seaice/AMSR2/3.125km/'\n filename = 'Arc_%s%s%s_res3.125_pyres.nc.gz' % (currentyr,currentmn,currentdy)\n filenameout = 'Arc_AMSR2_SIC.nc'\n UL.urlretrieve(url + filename, directory + filename)\n inF = gzip.open(directory + filename, 'rb')\n outF = open(directory + filenameout, 'wb')\n outF.write( inF.read() )\n inF.close()\n outF.close()\n \n data = Dataset(directory + filenameout)\n ice = data.variables['sea_ice_concentration'][:]\n lat = data.variables['latitude'][:] \n lon = data.variables['longitude'][:]\n data.close()\n \n ice = np.asarray(np.squeeze(ice/100.))\n \n print('Completed: Data read!')\n \n ice[np.where(ice <= 0.15)] = np.nan\n ice[np.where((ice >= 0.999) & (ice <= 1))] = 0.999\n ice[np.where(ice > 1)] = np.nan\n ice = ice*100.\n \n print('Completed: Ice masked!')\n \n plt.rc('text',usetex=True)\n plt.rc('font',**{'family':'sans-serif','sans-serif':['Avant Garde']}) \n plt.rc('savefig',facecolor='black')\n plt.rc('axes',edgecolor='white')\n plt.rc('xtick',color='white')\n plt.rc('ytick',color='white')\n plt.rc('axes',labelcolor='white')\n plt.rc('axes',facecolor='black')\n \n def setcolor(x, color):\n for m in x:\n for t in x[m][1]:\n t.set_color(color)\n \n fig = plt.figure()\n ax = fig.add_subplot(111)\n \n ### Enter lat/lon\n region = 'bering'\n \n if region == 'kara':\n # Kara Sea\n latmin = 67\n latmax = 87\n lonmin = 20\n lonmax = 90\n \n elif region == 'beaufort':\n # Beaufort Sea\n latmin = 64\n latmax = 87\n lonmin = 180\n lonmax = 240\n \n elif region == 'bering':\n # Bering/Chukchi Sea/Okhotsk \n latmin = 56\n latmax = 75\n lonmin = 166\n lonmax = 210\n \n elif region == 'greenland':\n # Greenland\n latmin = 55\n latmax = 89.5\n lonmin = 280\n lonmax = 395\n \n elif region == 'pacific':\n # Central Arctic\n latmin = 69\n latmax = 89.99\n lonmin = 160\n lonmax = 250 \n \n elif region == 'svalbard':\n latmin = 73\n latmax = 86\n lonmin = 340\n lonmax = 420\n \n elif region == 'GreenlandSea':\n latmin = 74\n latmax = 88\n lonmin = 330\n lonmax = 410\n \n else:\n ValueError('Wrong region listed!')\n \n def polar_stere(lon_w, lon_e, lat_s, lat_n, **kwargs):\n '''Returns a Basemap object (NPS/SPS) focused in a region.\n \n lon_w, lon_e, lat_s, lat_n -- Graphic limits in geographical coordinates.\n W and S directions are negative.\n **kwargs -- Aditional arguments for Basemap object.\n \n '''\n lon_0 = lon_w + (lon_e - lon_w) / 2.\n ref = lat_s if abs(lat_s) > abs(lat_n) else lat_n\n lat_0 = math.copysign(90., ref)\n proj = 'npstere' if lat_0 > 0 else 'spstere'\n prj = Basemap(projection=proj, lon_0=lon_0, lat_0=lat_0,\n boundinglat=0, resolution='l')\n #prj = pyproj.Proj(proj='stere', lon_0=lon_0, lat_0=lat_0)\n lons = [lon_w, lon_e, lon_w, lon_e, lon_0, lon_0]\n lats = [lat_s, lat_s, lat_n, lat_n, lat_s, lat_n]\n x, y = prj(lons, lats)\n ll_lon, ll_lat = prj(min(x), min(y), inverse=True)\n ur_lon, ur_lat = prj(max(x), max(y), inverse=True)\n return Basemap(projection='stere', lat_0=lat_0, lon_0=lon_0,\n llcrnrlon=ll_lon, llcrnrlat=ll_lat,\n urcrnrlon=ur_lon, urcrnrlat=ur_lat, round=True,\n resolution='l')\n \n m = polar_stere(lonmin,lonmax,latmin,latmax)\n m.drawcoastlines(color = 'r',linewidth=1.4)\n m.drawmapboundary(color='k')\n m.drawlsmask(land_color='k',ocean_color='k')\n \n cs = m.contourf(lon,lat,ice[:,:],np.arange(20,100.01,1),extend='min',latlon=True)\n \n cmap = ncm.cmap('MPL_YlGnBu') \n cmap = cmocean.cm.ice \n cs.set_cmap(cmap)\n \n m.fillcontinents(color='k')\n \n cbar = m.colorbar(cs,location='right',pad = 0.2)\n cbar.outline.set_edgecolor('k')\n barlim = np.arange(20,101,10)\n cbar.set_ticks(barlim)\n cbar.set_ticklabels(list(map(str,barlim)))\n cbar.set_label(r'\\textbf{Concentration (\\%)}',fontsize=13,\n alpha=0.6)\n cbar.ax.tick_params(axis='y', size=.01)\n \n fig.suptitle(r'\\textbf{ARCTIC SEA ICE -- %s}' % titletime,\n fontsize=22,color='white',alpha=0.6)\n \n plt.annotate(r'\\textbf{DATA:} AMSR2 3.125 km (JAXA/Uni Hamburg-Processing)',xy=(250,100),\n xycoords='figure pixels',color='white',fontsize=6,\n alpha=0.7,rotation=0) \n plt.annotate(r'\\textbf{SOURCE:} http://icdc.cen.uni-hamburg.de/daten/cryosphere.html',xy=(250,80),\n xycoords='figure pixels',color='white',fontsize=6,\n alpha=0.7,rotation=0) \n plt.annotate(r'\\textbf{GRAPHIC:} Zachary Labe (@ZLabe)',xy=(250,60),\n xycoords='figure pixels',color='white',fontsize=6,\n alpha=0.7,rotation=0)\n \n fig.subplots_adjust(top=0.89)\n \n print('Completed: Figure plotted!')\n plt.savefig(directoryfigure + 'seaiceconc_%s_%s.png' % (region,currenttime), dpi=300)\n \nprint('Completed: Script done!')"},"license":{"kind":"string","value":"mit"},"hash":{"kind":"number","value":9191539575763977000,"string":"9,191,539,575,763,977,000"},"line_mean":{"kind":"number","value":30.3963963964,"string":"30.396396"},"line_max":{"kind":"number","value":102,"string":"102"},"alpha_frac":{"kind":"number","value":0.5543119529,"string":"0.554312"},"autogenerated":{"kind":"bool","value":false,"string":"false"}}},{"rowIdx":109528,"cells":{"repo_name":{"kind":"string","value":"e-lin/LeetCode"},"path":{"kind":"string","value":"24-swap-nodes-in-pairs/24-swap-nodes-in-pairs.py"},"copies":{"kind":"string","value":"1"},"size":{"kind":"string","value":"2435"},"content":{"kind":"string","value":"# Definition for singly-linked list.\nclass ListNode(object):\n def __init__(self, x):\n self.val = x\n self.next = None\n\nclass Solution(object):\n def swapPairs(self, head):\n \"\"\"\n :type head: ListNode\n :rtype: ListNode\n \"\"\"\n odds = self.connOdds(head)\n evens = self.connEvens(head)\n\n dummyHead = ListNode(0)\n dummyPtr = dummyHead\n p1 = odds\n p2 = evens\n\n while p1 is not None and p2 is not None:\n dummyPtr.next = ListNode(p2.val)\n dummyPtr = dummyPtr.next\n p2 = p2.next\n dummyPtr.next = ListNode(p1.val)\n dummyPtr = dummyPtr.next\n p1 = p1.next\n\n while p1 is not None:\n dummyPtr.next = ListNode(p1.val)\n dummyPtr = dummyPtr.next\n p1 = p1.next\n\n while p2 is not None:\n dummyPtr.next = ListNode(p2.val)\n dummyPtr = dummyPtr.next\n p2 = p2.next\n\n # printNode(dummyHead.next)\n return dummyHead.next\n\n def connOdds(self, head):\n if head is None:\n return None\n\n ptr = head\n dummyOdds = ListNode(0)\n dummyPtr = dummyOdds\n while ptr is not None:\n dummyPtr.next = ListNode(ptr.val)\n dummyPtr = dummyPtr.next\n\n if ptr.next is not None and ptr.next.next is not None:\n ptr = ptr.next.next\n else:\n ptr = None\n # printNode(dummyOdds.next)\n return dummyOdds.next\n\n def connEvens(self, head):\n if head is None or head.next is None:\n return None\n\n ptr = head.next\n dummyEvens = ListNode(0)\n dummyPtr = dummyEvens\n while ptr is not None:\n dummyPtr.next = ListNode(ptr.val)\n dummyPtr = dummyPtr.next\n\n if ptr.next is not None and ptr.next.next is not None:\n ptr = ptr.next.next\n else:\n ptr = None\n # printNode(dummyEvens.next)\n return dummyEvens.next\n\n\ndef printNode(node):\n ptr = node\n while ptr is not None:\n print ptr.val\n ptr = ptr.next\n\ndef main():\n node = ListNode(1)\n # node.next = ListNode(2)\n # node.next.next = ListNode(3)\n # node.next.next.next = ListNode(4)\n\n solution = Solution()\n result = solution.swapPairs(node)\n\n printNode(result)\n\n\nif __name__ == '__main__':\n main()"},"license":{"kind":"string","value":"apache-2.0"},"hash":{"kind":"number","value":2848055936710345700,"string":"2,848,055,936,710,345,700"},"line_mean":{"kind":"number","value":23.8571428571,"string":"23.857143"},"line_max":{"kind":"number","value":66,"string":"66"},"alpha_frac":{"kind":"number","value":0.5338809035,"string":"0.533881"},"autogenerated":{"kind":"bool","value":false,"string":"false"}}},{"rowIdx":109529,"cells":{"repo_name":{"kind":"string","value":"InsightSoftwareConsortium/ITKExamples"},"path":{"kind":"string","value":"src/Filtering/AntiAlias/SmoothBinaryImageBeforeSurfaceExtraction/Code.py"},"copies":{"kind":"string","value":"1"},"size":{"kind":"string","value":"1638"},"content":{"kind":"string","value":"#!/usr/bin/env python\n\n# Copyright NumFOCUS\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0.txt\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport sys\nimport itk\n\nif len(sys.argv) != 6:\n print(\n \"Usage: \" + sys.argv[0] + \" \"\n \" \"\n )\n sys.exit(1)\n\ninputImage = sys.argv[1]\noutputImage = sys.argv[2]\nmaximumRMSError = float(sys.argv[3])\nnumberOfIterations = int(sys.argv[4])\nnumberOfLayers = int(sys.argv[5])\n\nPixelType = itk.F\nDimension = 2\nImageType = itk.Image[PixelType, Dimension]\n\nReaderType = itk.ImageFileReader[ImageType]\nreader = ReaderType.New()\nreader.SetFileName(inputImage)\n\nAntiAliasFilterType = itk.AntiAliasBinaryImageFilter[ImageType, ImageType]\nantialiasfilter = AntiAliasFilterType.New()\nantialiasfilter.SetInput(reader.GetOutput())\nantialiasfilter.SetMaximumRMSError(maximumRMSError)\nantialiasfilter.SetNumberOfIterations(numberOfIterations)\nantialiasfilter.SetNumberOfLayers(numberOfLayers)\n\nWriterType = itk.ImageFileWriter[ImageType]\nwriter = WriterType.New()\nwriter.SetFileName(outputImage)\nwriter.SetInput(antialiasfilter.GetOutput())\n\nwriter.Update()\n"},"license":{"kind":"string","value":"apache-2.0"},"hash":{"kind":"number","value":-1858956442300015900,"string":"-1,858,956,442,300,015,900"},"line_mean":{"kind":"number","value":29.9056603774,"string":"29.90566"},"line_max":{"kind":"number","value":74,"string":"74"},"alpha_frac":{"kind":"number","value":0.7686202686,"string":"0.76862"},"autogenerated":{"kind":"bool","value":false,"string":"false"}}},{"rowIdx":109530,"cells":{"repo_name":{"kind":"string","value":"tensorflow/federated"},"path":{"kind":"string","value":"tensorflow_federated/python/tensorflow_libs/graph_merge_test.py"},"copies":{"kind":"string","value":"1"},"size":{"kind":"string","value":"19532"},"content":{"kind":"string","value":"# Copyright 2019, The TensorFlow Federated Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport numpy as np\nimport tensorflow as tf\n\nfrom tensorflow_federated.python.tensorflow_libs import graph_merge\nfrom tensorflow_federated.python.tensorflow_libs import graph_spec\n\n\ndef _make_add_one_graph():\n with tf.Graph().as_default() as graph:\n input_val = tf.compat.v1.placeholder(tf.float32, name='input')\n const = tf.constant(1.0)\n out = tf.add(input_val, const)\n return graph, input_val.name, out.name\n\n\ndef _make_add_variable_number_graph(var_name=None):\n with tf.Graph().as_default() as graph:\n input_val = tf.compat.v1.placeholder(tf.float32, name='input')\n var = tf.Variable(initial_value=0.0, name=var_name, import_scope='')\n assign_op = var.assign_add(tf.constant(1.0))\n out = tf.add(input_val, assign_op)\n return graph, input_val.name, out.name\n\n\ndef _make_dataset_constructing_graph():\n with tf.Graph().as_default() as graph:\n d1 = tf.data.Dataset.range(5)\n v1 = tf.data.experimental.to_variant(d1)\n return graph, '', v1.name\n\n\ndef _make_manual_reduce_graph(dataset_construction_graph, return_element):\n with tf.Graph().as_default() as graph:\n v1 = tf.import_graph_def(\n dataset_construction_graph.as_graph_def(),\n return_elements=[return_element])[0]\n structure = tf.TensorSpec([], tf.int64)\n ds1 = tf.data.experimental.from_variant(v1, structure=structure)\n out = ds1.reduce(tf.constant(0, dtype=tf.int64), lambda x, y: x + y)\n return graph, '', out.name\n\n\nclass ConcatenateInputsAndOutputsTest(tf.test.TestCase):\n\n def test_raises_on_none(self):\n with self.assertRaises(TypeError):\n graph_merge.concatenate_inputs_and_outputs(None)\n\n def test_raises_on_non_iterable(self):\n with self.assertRaises(TypeError):\n graph_merge.concatenate_inputs_and_outputs(1)\n\n def test_concatenate_inputs_and_outputs_two_add_one_graphs(self):\n graph1, input_name_1, output_name_1 = _make_add_one_graph()\n graph2, input_name_2, output_name_2 = _make_add_one_graph()\n with graph1.as_default():\n init_op_name_1 = tf.compat.v1.global_variables_initializer().name\n with graph2.as_default():\n init_op_name_2 = tf.compat.v1.global_variables_initializer().name\n graph_spec_1 = graph_spec.GraphSpec(graph1.as_graph_def(), init_op_name_1,\n [input_name_1], [output_name_1])\n graph_spec_2 = graph_spec.GraphSpec(graph2.as_graph_def(), init_op_name_2,\n [input_name_2], [output_name_2])\n arg_list = [graph_spec_1, graph_spec_2]\n merged_graph, init_op_name, in_name_maps, out_name_maps = graph_merge.concatenate_inputs_and_outputs(\n arg_list)\n\n with merged_graph.as_default():\n with tf.compat.v1.Session() as sess:\n sess.run(init_op_name)\n outputs = sess.run(\n [out_name_maps[0][output_name_1], out_name_maps[1][output_name_2]],\n feed_dict={\n in_name_maps[0][input_name_1]: 1.0,\n in_name_maps[1][input_name_2]: 2.0\n })\n\n self.assertAllClose(outputs, np.array([2., 3.]))\n\n def test_concatenate_inputs_and_outputs_three_add_one_graphs(self):\n graph1, input_name_1, output_name_1 = _make_add_one_graph()\n graph2, input_name_2, output_name_2 = _make_add_one_graph()\n graph3, input_name_3, output_name_3 = _make_add_one_graph()\n with graph1.as_default():\n init_op_name_1 = tf.compat.v1.global_variables_initializer().name\n with graph2.as_default():\n init_op_name_2 = tf.compat.v1.global_variables_initializer().name\n with graph3.as_default():\n init_op_name_3 = tf.compat.v1.global_variables_initializer().name\n graph_spec_1 = graph_spec.GraphSpec(graph1.as_graph_def(), init_op_name_1,\n [input_name_1], [output_name_1])\n graph_spec_2 = graph_spec.GraphSpec(graph2.as_graph_def(), init_op_name_2,\n [input_name_2], [output_name_2])\n graph_spec_3 = graph_spec.GraphSpec(graph3.as_graph_def(), init_op_name_3,\n [input_name_3], [output_name_3])\n arg_list = [graph_spec_1, graph_spec_2, graph_spec_3]\n merged_graph, init_op_name, in_name_maps, out_name_maps = graph_merge.concatenate_inputs_and_outputs(\n arg_list)\n\n with merged_graph.as_default():\n with tf.compat.v1.Session() as sess:\n sess.run(init_op_name)\n outputs = sess.run(\n [\n out_name_maps[0][output_name_1],\n out_name_maps[1][output_name_2], out_name_maps[2][output_name_3]\n ],\n feed_dict={\n in_name_maps[0][input_name_1]: 1.0,\n in_name_maps[1][input_name_2]: 2.0,\n in_name_maps[2][input_name_3]: 3.0\n })\n\n self.assertAllClose(outputs, np.array([2., 3., 4.]))\n\n def test_concatenate_inputs_and_outputs_no_arg_graphs(self):\n graph1 = tf.Graph()\n with graph1.as_default():\n out1 = tf.constant(1.0)\n init_op_name_1 = tf.compat.v1.global_variables_initializer().name\n graph2 = tf.Graph()\n with graph2.as_default():\n out2 = tf.constant(2.0)\n init_op_name_2 = tf.compat.v1.global_variables_initializer().name\n\n graph_spec_1 = graph_spec.GraphSpec(graph1.as_graph_def(), init_op_name_1,\n [], [out1.name])\n graph_spec_2 = graph_spec.GraphSpec(graph2.as_graph_def(), init_op_name_2,\n [], [out2.name])\n arg_list = [graph_spec_1, graph_spec_2]\n merged_graph, init_op_name, _, out_name_maps = graph_merge.concatenate_inputs_and_outputs(\n arg_list)\n\n with merged_graph.as_default():\n with tf.compat.v1.Session() as sess:\n sess.run(init_op_name)\n outputs = sess.run(\n [out_name_maps[0][out1.name], out_name_maps[1][out2.name]])\n\n self.assertAllClose(outputs, np.array([1., 2.]))\n\n def test_concatenate_inputs_and_outputs_no_init_op_graphs(self):\n graph1, input_name_1, output_name_1 = _make_add_one_graph()\n graph2, input_name_2, output_name_2 = _make_add_one_graph()\n graph_spec_1 = graph_spec.GraphSpec(graph1.as_graph_def(), None,\n [input_name_1], [output_name_1])\n graph_spec_2 = graph_spec.GraphSpec(graph2.as_graph_def(), None,\n [input_name_2], [output_name_2])\n arg_list = [graph_spec_1, graph_spec_2]\n merged_graph, init_op_name, in_name_maps, out_name_maps = graph_merge.concatenate_inputs_and_outputs(\n arg_list)\n\n with merged_graph.as_default():\n with tf.compat.v1.Session() as sess:\n sess.run(init_op_name)\n outputs = sess.run(\n [out_name_maps[0][output_name_1], out_name_maps[1][output_name_2]],\n feed_dict={\n in_name_maps[0][input_name_1]: 1.0,\n in_name_maps[1][input_name_2]: 2.0\n })\n\n self.assertAllClose(outputs, np.array([2., 3.]))\n\n def test_concatenate_inputs_and_outputs_two_add_variable_number_graphs(self):\n graph1, input_name_1, output_name_1 = _make_add_variable_number_graph()\n graph2, input_name_2, output_name_2 = _make_add_variable_number_graph()\n with graph1.as_default():\n init_op_name_1 = tf.compat.v1.global_variables_initializer().name\n with graph2.as_default():\n init_op_name_2 = tf.compat.v1.global_variables_initializer().name\n graph_spec_1 = graph_spec.GraphSpec(graph1.as_graph_def(), init_op_name_1,\n [input_name_1], [output_name_1])\n graph_spec_2 = graph_spec.GraphSpec(graph2.as_graph_def(), init_op_name_2,\n [input_name_2], [output_name_2])\n arg_list = [graph_spec_1, graph_spec_2]\n merged_graph, init_op_name, in_name_maps, out_name_maps = graph_merge.concatenate_inputs_and_outputs(\n arg_list)\n\n with merged_graph.as_default():\n with tf.compat.v1.Session() as sess:\n sess.run(init_op_name)\n outputs_1 = sess.run(\n [out_name_maps[0][output_name_1], out_name_maps[1][output_name_2]],\n feed_dict={\n in_name_maps[0][input_name_1]: 1.0,\n in_name_maps[1][input_name_2]: 2.0\n })\n outputs_2 = sess.run(\n [out_name_maps[0][output_name_1], out_name_maps[1][output_name_2]],\n feed_dict={\n in_name_maps[0][input_name_1]: 1.0,\n in_name_maps[1][input_name_2]: 2.0\n })\n outputs_3 = sess.run(\n [out_name_maps[0][output_name_1], out_name_maps[1][output_name_2]],\n feed_dict={\n in_name_maps[0][input_name_1]: 1.0,\n in_name_maps[1][input_name_2]: 2.0\n })\n self.assertAllClose(outputs_1, [2., 3.])\n self.assertAllClose(outputs_2, [3., 4.])\n self.assertAllClose(outputs_3, [4., 5.])\n\n def test_concatenate_inputs_and_outputs_with_dataset_wires_correctly(self):\n dataset_graph, _, dataset_out_name = _make_dataset_constructing_graph()\n graph_1, _, out_name_1 = _make_manual_reduce_graph(dataset_graph,\n dataset_out_name)\n graph_2, _, out_name_2 = _make_manual_reduce_graph(dataset_graph,\n dataset_out_name)\n with graph_1.as_default():\n init_op_name_1 = tf.compat.v1.global_variables_initializer().name\n with graph_2.as_default():\n init_op_name_2 = tf.compat.v1.global_variables_initializer().name\n graph_spec_1 = graph_spec.GraphSpec(graph_1.as_graph_def(), init_op_name_1,\n [], [out_name_1])\n graph_spec_2 = graph_spec.GraphSpec(graph_2.as_graph_def(), init_op_name_2,\n [], [out_name_2])\n arg_list = [graph_spec_1, graph_spec_2]\n merged_graph, init_op_name, _, out_name_maps = graph_merge.concatenate_inputs_and_outputs(\n arg_list)\n\n with merged_graph.as_default():\n with tf.compat.v1.Session() as sess:\n sess.run(init_op_name)\n tens = sess.run(\n [out_name_maps[0][out_name_1], out_name_maps[1][out_name_2]])\n self.assertEqual(tens, [10, 10])\n\n\nclass ComposeGraphSpecTest(tf.test.TestCase):\n\n def test_raises_on_none(self):\n with self.assertRaises(TypeError):\n graph_merge.compose_graph_specs(None)\n\n def test_raises_on_graph_spec_set(self):\n graph1, input_name_1, output_name_1 = _make_add_one_graph()\n graph_spec_1 = graph_spec.GraphSpec(graph1.as_graph_def(), '',\n [input_name_1], [output_name_1])\n with self.assertRaises(TypeError):\n graph_merge.compose_graph_specs(set(graph_spec_1))\n\n def test_raises_on_list_of_ints(self):\n with self.assertRaises(TypeError):\n graph_merge.compose_graph_specs([0, 1])\n\n def test_compose_no_input_graphs_raises(self):\n graph1 = tf.Graph()\n with graph1.as_default():\n out1 = tf.constant(1.0)\n init_op_name_1 = tf.compat.v1.global_variables_initializer().name\n graph2 = tf.Graph()\n with graph2.as_default():\n out2 = tf.constant(2.0)\n init_op_name_2 = tf.compat.v1.global_variables_initializer().name\n\n graph_spec_1 = graph_spec.GraphSpec(graph1.as_graph_def(), init_op_name_1,\n [], [out1.name])\n graph_spec_2 = graph_spec.GraphSpec(graph2.as_graph_def(), init_op_name_2,\n [], [out2.name])\n arg_list = [graph_spec_1, graph_spec_2]\n with self.assertRaisesRegex(ValueError, 'mismatch'):\n graph_merge.compose_graph_specs(arg_list)\n\n def test_compose_two_add_one_graphs_adds_two(self):\n graph1, input_name_1, output_name_1 = _make_add_one_graph()\n graph2, input_name_2, output_name_2 = _make_add_one_graph()\n with graph1.as_default():\n init_op_name_1 = tf.compat.v1.global_variables_initializer().name\n with graph2.as_default():\n init_op_name_2 = tf.compat.v1.global_variables_initializer().name\n graph_spec_1 = graph_spec.GraphSpec(graph1.as_graph_def(), init_op_name_1,\n [input_name_1], [output_name_1])\n graph_spec_2 = graph_spec.GraphSpec(graph2.as_graph_def(), init_op_name_2,\n [input_name_2], [output_name_2])\n arg_list = [graph_spec_1, graph_spec_2]\n composed_graph, init_op_name, in_name_map, out_name_map = graph_merge.compose_graph_specs(\n arg_list)\n\n with composed_graph.as_default():\n with tf.compat.v1.Session() as sess:\n sess.run(init_op_name)\n outputs = sess.run(\n out_name_map[output_name_2],\n feed_dict={\n in_name_map[input_name_1]: 0.0,\n })\n\n self.assertAllClose(outputs, np.array(2.))\n\n def test_composition_happens_in_mathematical_composition_order(self):\n graph1, input_name_1, output_name_1 = _make_add_one_graph()\n\n def _make_cast_to_int_graph():\n with tf.Graph().as_default() as graph:\n input_val = tf.compat.v1.placeholder(tf.float32, name='input')\n out = tf.cast(input_val, tf.int32)\n return graph, input_val.name, out.name\n\n graph2, input_name_2, output_name_2 = _make_cast_to_int_graph()\n\n with graph1.as_default():\n init_op_name_1 = tf.compat.v1.global_variables_initializer().name\n with graph2.as_default():\n init_op_name_2 = tf.compat.v1.global_variables_initializer().name\n graph_spec_1 = graph_spec.GraphSpec(graph1.as_graph_def(), init_op_name_1,\n [input_name_1], [output_name_1])\n graph_spec_2 = graph_spec.GraphSpec(graph2.as_graph_def(), init_op_name_2,\n [input_name_2], [output_name_2])\n arg_list = [graph_spec_2, graph_spec_1]\n\n composed_graph, _, in_name_map, out_name_map = graph_merge.compose_graph_specs(\n arg_list)\n\n with composed_graph.as_default():\n with tf.compat.v1.Session() as sess:\n outputs = sess.run(\n out_name_map[output_name_2],\n feed_dict={\n in_name_map[input_name_1]: 0.0,\n })\n\n self.assertEqual(outputs, 1)\n\n with self.assertRaises(ValueError):\n graph_merge.compose_graph_specs(list(reversed(arg_list)))\n\n def test_compose_three_add_one_graphs_adds_three(self):\n graph1, input_name_1, output_name_1 = _make_add_one_graph()\n graph2, input_name_2, output_name_2 = _make_add_one_graph()\n graph3, input_name_3, output_name_3 = _make_add_one_graph()\n with graph1.as_default():\n init_op_name_1 = tf.compat.v1.global_variables_initializer().name\n with graph2.as_default():\n init_op_name_2 = tf.compat.v1.global_variables_initializer().name\n with graph3.as_default():\n init_op_name_3 = tf.compat.v1.global_variables_initializer().name\n graph_spec_1 = graph_spec.GraphSpec(graph1.as_graph_def(), init_op_name_1,\n [input_name_1], [output_name_1])\n graph_spec_2 = graph_spec.GraphSpec(graph2.as_graph_def(), init_op_name_2,\n [input_name_2], [output_name_2])\n graph_spec_3 = graph_spec.GraphSpec(graph3.as_graph_def(), init_op_name_3,\n [input_name_3], [output_name_3])\n arg_list = [graph_spec_1, graph_spec_2, graph_spec_3]\n composed_graph, init_op_name, in_name_map, out_name_map = graph_merge.compose_graph_specs(\n arg_list)\n\n with composed_graph.as_default():\n with tf.compat.v1.Session() as sess:\n sess.run(init_op_name)\n outputs = sess.run(\n out_name_map[output_name_3],\n feed_dict={\n in_name_map[input_name_1]: 0.0,\n })\n\n self.assertAllClose(outputs, np.array(3.))\n\n def test_compose_two_add_variable_number_graphs_executes_correctly(self):\n graph1, input_name_1, output_name_1 = _make_add_variable_number_graph()\n graph2, input_name_2, output_name_2 = _make_add_variable_number_graph()\n with graph1.as_default():\n init_op_name_1 = tf.compat.v1.global_variables_initializer().name\n with graph2.as_default():\n init_op_name_2 = tf.compat.v1.global_variables_initializer().name\n graph_spec_1 = graph_spec.GraphSpec(graph1.as_graph_def(), init_op_name_1,\n [input_name_1], [output_name_1])\n graph_spec_2 = graph_spec.GraphSpec(graph2.as_graph_def(), init_op_name_2,\n [input_name_2], [output_name_2])\n arg_list = [graph_spec_1, graph_spec_2]\n composed_graph, init_op_name, in_name_map, out_name_map = graph_merge.compose_graph_specs(\n arg_list)\n\n with composed_graph.as_default():\n with tf.compat.v1.Session() as sess:\n sess.run(init_op_name)\n output_one = sess.run(\n out_name_map[output_name_2],\n feed_dict={\n in_name_map[input_name_1]: 0.0,\n })\n output_two = sess.run(\n out_name_map[output_name_2],\n feed_dict={\n in_name_map[input_name_1]: 0.0,\n })\n output_three = sess.run(\n out_name_map[output_name_2],\n feed_dict={\n in_name_map[input_name_1]: 0.0,\n })\n\n self.assertAllClose(output_one, np.array(2.))\n self.assertAllClose(output_two, np.array(4.))\n self.assertAllClose(output_three, np.array(6.))\n\n def test_compose_with_dataset_wires_correctly(self):\n with tf.Graph().as_default() as dataset_graph:\n d1 = tf.data.Dataset.range(5)\n v1 = tf.data.experimental.to_variant(d1)\n\n ds_out_name = v1.name\n variant_type = v1.dtype\n\n with tf.Graph().as_default() as reduce_graph:\n variant = tf.compat.v1.placeholder(variant_type)\n structure = tf.TensorSpec([], tf.int64)\n ds1 = tf.data.experimental.from_variant(variant, structure=structure)\n out = ds1.reduce(tf.constant(0, dtype=tf.int64), lambda x, y: x + y)\n\n ds_in_name = variant.name\n reduce_out_name = out.name\n\n with dataset_graph.as_default():\n init_op_name_1 = tf.compat.v1.global_variables_initializer().name\n with reduce_graph.as_default():\n init_op_name_2 = tf.compat.v1.global_variables_initializer().name\n dataset_graph_spec = graph_spec.GraphSpec(dataset_graph.as_graph_def(),\n init_op_name_1, [], [ds_out_name])\n reduce_graph_spec = graph_spec.GraphSpec(reduce_graph.as_graph_def(),\n init_op_name_2, [ds_in_name],\n [reduce_out_name])\n arg_list = [reduce_graph_spec, dataset_graph_spec]\n composed_graph, _, _, out_name_map = graph_merge.compose_graph_specs(\n arg_list)\n\n with composed_graph.as_default():\n with tf.compat.v1.Session() as sess:\n ten = sess.run(out_name_map[reduce_out_name])\n self.assertEqual(ten, 10)\n\n\nif __name__ == '__main__':\n tf.test.main()\n"},"license":{"kind":"string","value":"apache-2.0"},"hash":{"kind":"number","value":7805936602714991000,"string":"7,805,936,602,714,991,000"},"line_mean":{"kind":"number","value":42.5011135857,"string":"42.501114"},"line_max":{"kind":"number","value":105,"string":"105"},"alpha_frac":{"kind":"number","value":0.6012697112,"string":"0.60127"},"autogenerated":{"kind":"bool","value":false,"string":"false"}}},{"rowIdx":109531,"cells":{"repo_name":{"kind":"string","value":"tomchop/volatility-autoruns"},"path":{"kind":"string","value":"autoruns.py"},"copies":{"kind":"string","value":"1"},"size":{"kind":"string","value":"33523"},"content":{"kind":"string","value":"import sys\nimport re\nimport xml.etree.ElementTree as ET\nimport volatility.debug as debug\nimport volatility.plugins.registry.registryapi as registryapi\nimport volatility.plugins.filescan as filescan\nimport volatility.plugins.dumpfiles as dumpfiles\nimport volatility.win32 as win32\nimport volatility.utils as utils\nimport volatility.plugins.common as common\nfrom volatility.renderers import TreeGrid\n\n# HKLM\\Software\\\nSOFTWARE_RUN_KEYS = [\n \"Microsoft\\\\Windows\\\\CurrentVersion\\\\Run\",\n \"Microsoft\\\\Windows\\\\CurrentVersion\\\\RunOnce\",\n \"Microsoft\\\\Windows\\\\CurrentVersion\\\\RunServices\",\n \"Microsoft\\\\Windows\\\\CurrentVersion\\\\Policies\\\\Explorer\\\\Run\",\n \"Wow6432Node\\\\Microsoft\\\\Windows\\\\CurrentVersion\\\\Run\",\n \"Wow6432Node\\\\Microsoft\\\\Windows\\\\CurrentVersion\\\\RunOnce\",\n \"Wow6432Node\\\\Microsoft\\\\Windows\\\\CurrentVersion\\\\Policies\\\\Explorer\\\\Run\",\n \"Microsoft\\\\Windows NT\\\\CurrentVersion\\\\Terminal Server\\\\Install\\\\Software\\\\Microsoft\\\\Windows\\\\CurrentVersion\\\\Run\",\n \"Microsoft\\\\Windows NT\\\\CurrentVersion\\\\Terminal Server\\\\Install\\\\Software\\\\Microsoft\\\\Windows\\\\CurrentVersion\\\\RunOnce\",\n]\n\n# HKCU\\\nNTUSER_RUN_KEYS = [\n \"Software\\\\Microsoft\\\\Windows\\\\CurrentVersion\\\\Run\",\n \"Software\\\\Microsoft\\\\Windows\\\\CurrentVersion\\\\RunOnce\",\n \"Software\\\\Microsoft\\\\Windows\\\\CurrentVersion\\\\RunServices\",\n \"Software\\\\Microsoft\\\\Windows\\\\CurrentVersion\\\\RunServicesOnce\",\n \"Software\\\\Microsoft\\\\Windows\\\\CurrentVersion\\\\Policies\\\\Explorer\\\\Run\",\n \"Software\\\\Microsoft\\\\Windows NT\\\\CurrentVersion\\\\Terminal Server\\\\Install\\\\Software\\\\Microsoft\\\\Windows\\\\CurrentVersion\\\\Run\",\n \"Software\\\\Microsoft\\\\Windows NT\\\\CurrentVersion\\\\Terminal Server\\\\Install\\\\Software\\\\Microsoft\\\\Windows\\\\CurrentVersion\\\\RunOnce\",\n \"Software\\\\Microsoft\\\\Windows NT\\\\CurrentVersion\\\\Run\",\n \"Software\\\\Wow6432Node\\\\Microsoft\\\\Windows\\\\CurrentVersion\\\\Policies\\\\Explorer\\\\Run\",\n \"Software\\\\Wow6432Node\\\\Microsoft\\\\Windows\\\\CurrentVersion\\\\Run\",\n]\n\n\n# Active Setup only executes commands from the SOFTWARE hive\n# See: https://helgeklein.com/blog/2010/04/active-setup-explained/\n# http://blogs.msdn.com/b/aruns_blog/archive/2011/06/20/active-setup-registry-key-what-it-is-and-how-to-create-in-the-package-using-admin-studio-install-shield.aspx\n# http://blog.spiderlabs.com/2014/07/backoff-technical-analysis.html\nACTIVE_SETUP_KEY = \"Microsoft\\\\Active Setup\\\\Installed Components\"\n\n\n# Abusing MS Fix-It patches to ensure persistence\n# References:\n# https://www.blackhat.com/docs/asia-14/materials/Erickson/WP-Asia-14-Erickson-Persist-It-Using-And-Abusing-Microsofts-Fix-It-Patches.pdf\n# http://blog.cert.societegenerale.com/2015/04/analyzing-gootkits-persistence-mechanism.html\nAPPCOMPAT_SDB_KEY = \"Microsoft\\\\Windows NT\\\\CurrentVersion\\\\AppCompatFlags\\\\InstalledSDB\"\n\n\n# Winlogon Notification packages are supported in pre-Vista versions of Windows only\n# See: http://technet.microsoft.com/en-us/library/cc721961(v=ws.10).aspx\nWINLOGON_NOTIFICATION_EVENTS = [\n \"Lock\",\n \"Logoff\",\n \"Logon\",\n \"Shutdown\",\n \"StartScreenSaver\",\n \"StartShell\",\n \"Startup\",\n \"StopScreenSaver\",\n \"Unlock\",\n]\n\nWINLOGON_REGISTRATION_KNOWN_DLLS = [\n 'crypt32.dll',\n 'cryptnet.dll',\n 'cscdll.dll',\n 'dimsntfy.dll',\n 'sclgntfy.dll',\n 'wlnotify.dll',\n 'wzcdlg.dll',\n]\n\nWINLOGON_COMMON_VALUES = {\n 'Userinit': 'userinit.exe',\n 'VmApplet': 'rundll32 shell32,Control_RunDLL \"sysdm.cpl\"',\n 'Shell': 'Explorer.exe',\n 'TaskMan': \"Taskmgr.exe\",\n 'System': 'lsass.exe',\n}\n\n# Service key -> value maps\n# Original list from regripper plugins, extra / repeated values from\n# http://technet.microsoft.com/en-us/library/cc759275(v=ws.10).aspx\n# http://www.atmarkit.co.jp/ait/articles/1705/01/news009_2.html (in Japanese)\n# https://github.com/processhacker/processhacker/blob/master/phlib/svcsup.c\n# https://docs.microsoft.com/en-us/windows/desktop/api/winsvc/nf-winsvc-createservicea\n# https://www.codemachine.com/downloads/win10/winnt.h\nSERVICE_TYPES = {\n 0x001: \"Kernel driver\",\n 0x002: \"File system driver\",\n 0x004: \"Arguments for adapter\",\n 0x008: \"File system driver\",\n 0x010: \"Win32_Own_Process\",\n 0x020: \"Win32_Share_Process\",\n 0x050: \"User_Own_Process TEMPLATE\",\n 0x060: \"User_Share_Process TEMPLATE\",\n 0x0D0: \"User_Own_Process INSTANCE\",\n 0x0E0: \"User_Share_Process INSTANCE\",\n 0x100: \"Interactive\",\n 0x110: \"Interactive\",\n 0x120: \"Share_process Interactive\",\n -1: \"Unknown\",\n}\n\nSERVICE_STARTUP = {\n 0x00: \"Boot Start\",\n 0x01: \"System Start\",\n 0x02: \"Auto Start\",\n 0x03: \"Manual\",\n 0x04: \"Disabled\",\n -1: \"Unknown\",\n}\n\n\ndef sanitize_path(path):\n # Clears the path of most equivalent forms\n if path:\n path = path.lower()\n path = path.replace(\"%systemroot%\\\\\", '')\n path = path.replace(\"\\\\systemroot\\\\\", '')\n path = path.replace(\"%windir%\", '')\n path = path.replace(\"\\\\??\\\\\", '')\n path = path.replace('\\x00', '')\n path = path.replace('\"', '').replace(\"'\", '')\n return path\n\n else:\n return ''\n\n\ndef get_indented_dict(d, depth=0):\n output = \"\"\n for key in d:\n output += \"{}{}: \".format(\" \" * depth * 2, key)\n if isinstance(d[key], dict):\n output += \"\\n\" + get_indented_dict(d[key], depth + 1)\n elif isinstance(d[key], list):\n output += '\\n'\n for e in d[key]:\n output += get_indented_dict(e, depth + 1)\n else:\n output += \"{}\\n\".format(d[key])\n return output\n\n\nclass Autoruns(common.AbstractWindowsCommand):\n \"\"\"Searches the registry and memory space for applications running at system startup and maps them to running processes\"\"\"\n def __init__(self, config, *args, **kwargs):\n common.AbstractWindowsCommand.__init__(self, config, *args, **kwargs)\n config.add_option(\"ASEP-TYPE\", short_option='t', default=None,\n help='Only collect the ASEP types specified. Select from: autoruns, services, appinit, winlogon, tasks, activesetup, sdb (comma-separated)',\n action='store', type='str')\n config.remove_option(\"VERBOSE\")\n config.add_option(\"VERBOSE\", short_option='v', default=False,\n help='Show entries that are normally filtered out (Ex. Services from the System32 folder)',\n action='store_true')\n\n self.process_dict = {}\n self.autoruns = []\n self.services = []\n self.appinit_dlls = []\n self.winlogon = []\n self.winlogon_registrations = []\n self.tasks = []\n self.activesetup = []\n self.sdb = []\n\n def get_dll_list(self):\n addr_space = utils.load_as(self._config)\n task_objects = win32.tasks.pslist(addr_space)\n for task in task_objects:\n if task.Peb:\n self.process_dict[int(task.UniqueProcessId)] = (task, [m for m in task.get_load_modules()])\n\n # Matches a given module (executable, DLL) to a running process by looking either\n # in the CommandLine parameters or in the loaded modules\n def find_pids_for_imagepath(self, module):\n pids = []\n module = sanitize_path(module)\n if module:\n for pid in self.process_dict:\n # case where the image path matches the process' command-line information\n if self.process_dict[pid][0].Peb:\n cmdline = self.process_dict[pid][0].Peb.ProcessParameters.CommandLine\n if module in sanitize_path(str(cmdline or '[no cmdline]')):\n pids.append(pid)\n\n # case where the module is actually loaded process (case for DLLs loaded by services)\n for dll in self.process_dict[pid][1]:\n if module in sanitize_path(str(dll.FullDllName or '[no dllname]')):\n pids.append(pid)\n\n return list(set(pids))\n\n # Returns [] or a list of tuples(dll, key path, key.LastWriteTime, [int(pids)])\n def get_appinit_dlls(self):\n\n debug.debug('Started get_appinit_dlls()')\n key_path=\"Microsoft\\\\Windows NT\\\\CurrentVersion\\\\Windows\"\n results = []\n\n try:\n self.regapi.reset_current()\n key = self.regapi.reg_get_key(hive_name='software', key=key_path)\n appinit_values = self.regapi.reg_get_value(None, None, value='AppInit_DLLs', given_root=key)\n\n except Exception as e:\n debug.warning('get_appinit_dlls() failed to complete. Exception: {} {}'.format(type(e).__name__, e.args))\n\n else:\n if appinit_values:\n # Split on space or comma: https://msdn.microsoft.com/en-us/library/windows/desktop/dd744762(v=vs.85).aspx\n appinit_dlls = str(appinit_values).replace('\\x00', '').replace(',', ' ').split(' ')\n results = [(dll, key_path, key.LastWriteTime, \"AppInit_DLLs\", self.find_pids_for_imagepath(dll)) for dll in appinit_dlls if dll]\n\n debug.debug('Finished get_appinit_dlls()')\n return results\n\n # Winlogon Notification packages are supported in pre-Vista versions of Windows only\n # See: http://technet.microsoft.com/fr-fr/library/cc721961(v=ws.10).aspx\n # returns [] or a list of tuples from parse_winlogon_registration_key()\n def get_winlogon_registrations(self):\n\n debug.debug('Started get_winlogon_registrations()')\n results = []\n notify_key = \"Microsoft\\\\Windows NT\\\\CurrentVersion\\\\Winlogon\\\\Notify\"\n\n try:\n self.regapi.reset_current()\n for subkey in self.regapi.reg_get_all_subkeys(hive_name='software', key=notify_key):\n parsed_entry = self.parse_winlogon_registration_key(subkey)\n if parsed_entry and (self._config.VERBOSE or (parsed_entry[0].split('\\\\')[-1] not in WINLOGON_REGISTRATION_KNOWN_DLLS)):\n results.append(parsed_entry)\n\n except Exception as e:\n debug.warning('get_winlogon_registrations() failed to complete. Exception: {0} {1}'.format(type(e).__name__, e.args))\n\n debug.debug('Finished get_winlogon_registrations()')\n return results\n\n # Returns None or (str(dllname), [(str(trigger)),str(event))], key.LastWriteTime, key path, [int(pids)])\n def parse_winlogon_registration_key(self, key):\n\n dllname = \"\"\n events = []\n pids = []\n key_path = self.regapi.reg_get_key_path(key) or str(key.Name)\n\n try:\n for v_name, v_data in self.regapi.reg_yield_values(hive_name=None, key=None, given_root=key):\n val_name = str(v_name or '')\n val_data = str(v_data or '').replace('\\x00', '')\n\n if val_name.lower() == 'dllname':\n dllname = val_data\n pids = self.find_pids_for_imagepath(dllname)\n elif val_name in WINLOGON_NOTIFICATION_EVENTS:\n events.append((val_name, val_data))\n\n except Exception as e:\n debug.warning('Failed while parsing {}. Exception: {} {}'.format(key_path, type(e).__name__, e.args))\n\n if dllname:\n return (dllname, events, key.LastWriteTime, key_path, pids)\n\n # Returns [] or a list of tuples(val_name, val_data, key.LastWriteTime, expected_val_data, [int(pids)])\n def get_winlogon(self):\n\n debug.debug('Started get_winlogon()')\n winlogon = []\n winlogon_key_path=\"Microsoft\\\\Windows NT\\\\CurrentVersion\\\\Winlogon\"\n\n try:\n self.regapi.reset_current()\n key = self.regapi.reg_get_key(hive_name='software', key=winlogon_key_path)\n if key:\n for v_name, v_data in self.regapi.reg_yield_values(hive_name=None, key=None, given_root=key):\n val_name = str(v_name or '')\n val_data = str(v_data or '').replace('\\x00', '')\n\n if val_data and val_name in WINLOGON_COMMON_VALUES:\n pids = self.find_pids_for_imagepath(val_data)\n winlogon.append((val_name, val_data, key.LastWriteTime, WINLOGON_COMMON_VALUES[val_name], winlogon_key_path, pids))\n\n except Exception as e:\n debug.warning('get_winlogon() failed to complete. Exception: {} {}'.format(type(e).__name__, e.args))\n\n debug.debug('Finished get_winlogon()')\n return winlogon\n\n # Returns [] or a list of tuples from parse_service_key()\n def get_services(self):\n\n debug.debug('Started get_services()')\n results = []\n service_key_path = \"{}\\\\Services\".format(self.currentcs)\n\n try:\n self.regapi.reset_current()\n for service_sk in self.regapi.reg_get_all_subkeys(hive_name='system', key=service_key_path):\n parsed_service = self.parse_service_key(service_sk)\n if parsed_service and (self._config.VERBOSE or 'system32' not in parsed_service[5].lower()):\n results.append(parsed_service)\n\n except Exception as e:\n debug.warning('get_services() failed to complete. Exception: {0} {1}'.format(type(e).__name__, e.args))\n\n debug.debug('Finished get_services()')\n return results\n\n # Returns None or (key_path, timestamp, display_name, SERVICE_STARTUP[startup], SERVICE_TYPES[type], image_path, service_dll, [int(pids)])\n def parse_service_key(self, service_key):\n\n try:\n values = {str(val_name): str(val_data).replace('\\x00', '') for val_name, val_data in self.regapi.reg_yield_values(None, None, given_root=service_key)}\n\n image_path = values.get(\"ImagePath\", '')\n display_name = values.get(\"DisplayName\",'')\n service_dll = values.get(\"ServiceDll\", '')\n main = values.get(\"ServiceMain\", '')\n startup = int(values.get(\"Start\", -1))\n type = int(values.get(\"Type\", -1))\n timestamp = service_key.LastWriteTime\n key_path = self.regapi.reg_get_key_path(service_key) or str(service_key.Name)\n\n # Check if the service is not set to automatically start or does not have an image path\n # More details here: http://technet.microsoft.com/en-us/library/cc759637(v=ws.10).aspx\n if not image_path or startup not in [0, 1, 2]:\n return None\n\n if 'svchost.exe -k' in image_path.lower() or SERVICE_TYPES[type] == 'Share_Process':\n sk = self.regapi.reg_get_key(hive_name='system', key='Parameters', given_root=service_key)\n if sk and not service_dll:\n timestamp = sk.LastWriteTime\n service_dll = self.regapi.reg_get_value(hive_name='system', key='', value=\"ServiceDll\", given_root=sk)\n main = self.regapi.reg_get_value(hive_name='system', key='', value='ServiceMain', given_root=sk)\n\n if not service_dll and '@' in display_name:\n timestamp = service_key.LastWriteTime\n service_dll = display_name.split('@')[1].split(',')[0]\n\n if service_dll:\n service_dll = service_dll.replace('\\x00', '')\n pids = self.find_pids_for_imagepath(service_dll)\n if main:\n service_dll = \"{} ({})\".format(service_dll, main.replace('\\x00', ''))\n else:\n pids = self.find_pids_for_imagepath(image_path)\n\n except Exception as e:\n debug.warning('Failed while parsing {}. Exception: {} {}'.format(key_path, type(e).__name__, e.args))\n\n return (key_path, timestamp, display_name, SERVICE_STARTUP[startup], SERVICE_TYPES[type], image_path, service_dll, pids)\n\n # Returns [] or a list of tuples from parse_activesetup_keys()\n def get_activesetup(self):\n\n debug.debug('Started get_activesetup()')\n results = []\n\n try:\n self.regapi.reset_current()\n for subkey in self.regapi.reg_get_all_subkeys(hive_name='software', key=ACTIVE_SETUP_KEY):\n r = self.parse_activesetup_keys(subkey)\n if r:\n results.append(r)\n\n except Exception as e:\n debug.warning('get_activesetup() failed to complete. Exception: {0} {1}'.format(type(e).__name__, e.args))\n\n debug.debug('Finished get_activesetup()')\n return results\n\n # Returns None or a tuple(exe path, subkey.LastWriteTime, key path, [int(pids)])\n def parse_activesetup_keys(self, subkey):\n\n key_path = self.regapi.reg_get_key_path(subkey) or str(subkey.Name)\n\n try:\n stub_path_val = self.regapi.reg_get_value(hive_name='software', key='', value='StubPath', given_root=subkey)\n stub_path_val = str(stub_path_val or '').replace('\\x00', '')\n except Exception as e:\n debug.warning('Failed while parsing {}. Exception: {} {}'.format(key_path, type(e).__name__, e.args))\n\n if stub_path_val:\n pids = self.find_pids_for_imagepath(stub_path_val)\n return (stub_path_val, subkey.LastWriteTime, key_path, pids)\n\n # Returns [] or a list of tuples from parse_sdb_key()\n def get_sdb(self):\n\n debug.debug('Started get_sdb()')\n results = []\n\n try:\n self.regapi.reset_current()\n sdb_keys = self.regapi.reg_get_all_subkeys(hive_name='software', key=APPCOMPAT_SDB_KEY)\n for subkey in sdb_keys:\n parsed_sdb_entry = self.parse_sdb_key(subkey)\n if parsed_sdb_entry:\n results.append(parsed_sdb_entry)\n\n except Exception as e:\n debug.warning('get_sdb() failed to complete. Exception: {0} {1}'.format(type(e).__name__, e.args))\n\n debug.debug('Finished get_sdb()')\n return results\n\n #Returns None or a tuple(exe, db_path, subkey.LastWriteTime, key path, [int(pids)])\n def parse_sdb_key(self, subkey):\n\n key_path = self.regapi.reg_get_key_path(subkey) or str(subkey.Name)\n\n try:\n desc = sanitize_path(self.regapi.reg_get_value('software', '', 'DatabaseDescription', subkey) or '')\n db_path = sanitize_path(self.regapi.reg_get_value('software', '', 'DatabasePath', subkey) or '')\n pids = self.find_pids_for_imagepath(desc)\n except Exception as e:\n debug.warning('Failed while parsing {}. Exception: {} {}'.format(key_path, type(e).__name__, e.args))\n\n if desc:\n return (desc, db_path, subkey.LastWriteTime, key_path, pids)\n\n # Returns [] or a list of tuples from parse_autoruns_key()\n def get_autoruns(self):\n\n debug.debug('Started get_autoruns()')\n results = []\n hive_key_list = []\n\n try:\n # Gather all software run keys\n self.regapi.reset_current()\n for run_key in SOFTWARE_RUN_KEYS:\n hive_key_list += [k for k in self.regapi.reg_yield_key(hive_name='software', key=run_key)]\n\n # Gather all ntuser run keys\n self.regapi.reset_current()\n for run_key in NTUSER_RUN_KEYS:\n hive_key_list += [k for k in self.regapi.reg_yield_key(hive_name='ntuser.dat', key=run_key)]\n\n # hive_key = (key pointer, hive_name)\n for hive_key in hive_key_list:\n results += self.parse_autoruns_key(hive_key)\n\n except Exception as e:\n debug.warning('get_autoruns() failed to complete. Exception: {0} {1}'.format(type(e).__name__, e.args))\n\n debug.debug('Finished get_autoruns()')\n return results\n\n # Returns [] or a list of tuples(exe path, hive name, key path, key.LastWriteTime, value name, [int(pids)])\n def parse_autoruns_key(self, hive_key):\n\n results = []\n key = hive_key[0]\n hive_name = hive_key[1]\n key_path = self.regapi.reg_get_key_path(key) or str(key.Name)\n\n try:\n # val_data is the exe path\n for v_name, v_data in self.regapi.reg_yield_values(None, None, given_root=key):\n val_name = str(v_name or '')\n val_data = str(v_data or '').replace('\\x00', '')\n\n if val_data:\n pids = self.find_pids_for_imagepath(val_data)\n results.append((val_data, hive_name, key_path, key.LastWriteTime, val_name, pids))\n\n except Exception as e:\n debug.warning('Failed while parsing {}. Exception: {} {}'.format(key_path, type(e).__name__, e.args))\n\n return results\n\n def get_tasks(self):\n\n debug.debug('Started get_tasks()')\n addr_space = utils.load_as(self._config)\n f = filescan.FileScan(self._config)\n tasks = []\n parsed_tasks = []\n\n try:\n for file in f.calculate():\n filename = str(file.file_name_with_device() or '')\n if \"system32\\\\tasks\\\\\" in filename.lower() and (('system32\\\\tasks\\\\microsoft' not in filename.lower() or self._config.VERBOSE)):\n tasks.append((file.obj_offset, filename))\n debug.debug(\"Found task: 0x{0:x} {1}\".format(file.obj_offset, filename))\n\n for offset, name in tasks:\n\n self._config.PHYSOFFSET = '0x{:x}'.format(offset)\n df = dumpfiles.DumpFiles(self._config)\n self._config.DUMP_DIR = '.'\n for data in df.calculate():\n # Doing this with mmap would probably be cleaner\n # Create a sufficiently large (dynamically resizable?)\n # memory map so that we can seek and write the file accordingly\n #\n # SystemError: mmap: resizing not available--no mremap()\n\n chopped_file = {}\n\n for mdata in data['present']:\n rdata = addr_space.base.read(mdata[0], mdata[2])\n chopped_file[mdata[1]] = rdata\n\n task_xml = \"\".join(part[1] for part in sorted(chopped_file.items(), key=lambda x: x[0]))\n\n parsed = self.parse_task_xml(task_xml, name)\n\n if parsed:\n args = parsed['Actions']['Exec'].get(\"Arguments\", None)\n if args:\n parsed['Actions']['Exec']['Command'] += \" {}\".format(args)\n pids = self.find_pids_for_imagepath(parsed['Actions']['Exec']['Command'])\n parsed_tasks.append((name.split('\\\\')[-1], parsed, task_xml, pids))\n\n except Exception as e:\n debug.warning('get_tasks() failed to complete. Exception: {0} {1}'.format(type(e).__name__, e.args))\n\n debug.debug('Finished get_tasks()')\n return parsed_tasks\n\n def parse_task_xml(self, xml, f_name):\n raw = xml\n xml = re.sub('\\x00\\x00+', '', xml) + '\\x00'\n if xml:\n try:\n xml = xml.decode('utf-16')\n xml = re.sub(r\"\", \"\", xml)\n xml = xml.encode('utf-16')\n\n root = ET.fromstring(xml)\n d = {}\n\n for e in root.findall(\"./RegistrationInfo/Date\"):\n d['Date'] = e.text or ''\n for e in root.findall(\"./RegistrationInfo/Description\"):\n d['Description'] = e.text or ''\n for e in root.findall(\"./Actions\"):\n d['Actions'] = self.visit_all_children(e)\n for e in root.findall(\"./Settings/Enabled\"):\n d['Enabled'] = e.text or ''\n for e in root.findall(\"./Settings/Hidden\"):\n d['Hidden'] = e.text or ''\n for t in root.findall(\"./Triggers/*\"):\n d['Triggers'] = self.visit_all_children(t)\n\n if not d.get(\"Actions\", {}).get('Exec', {}).get(\"Command\", False):\n return None\n\n return d\n except UnicodeDecodeError as e:\n debug.warning('Error while parsing the following task: {}'.format(f_name))\n debug.debug('UnicodeDecodeError for: {}'.format(repr(raw)))\n\n def visit_all_children(self, node):\n d = {}\n for c in node:\n d[c.tag] = self.visit_all_children(c)\n\n if node.text:\n if node.text.strip(' \\t\\n\\r'):\n d = node.text.strip(' \\t\\n\\r')\n return d\n\n def calculate(self):\n self.get_dll_list()\n self.regapi = registryapi.RegistryApi(self._config)\n self.currentcs = self.regapi.reg_get_currentcontrolset() or \"ControlSet001\"\n asep_list = ['autoruns', 'services', 'appinit', 'winlogon', 'tasks', 'activesetup', 'sdb']\n os_major = utils.load_as(self._config).profile.metadata.get('major', 0)\n\n # If all_offsets is empty then regapi was unable to find\n # hive offsets and we exit with an error message\n if not self.regapi.all_offsets:\n debug.error('Unable to find registry hives.')\n\n if self._config.ASEP_TYPE:\n debug.debug('Config: {}'.format(self._config.ASEP_TYPE))\n asep_list = [s for s in self._config.ASEP_TYPE.replace(' ', '').split(',')]\n\n # Scan for ASEPs and populate the lists\n if 'autoruns' in asep_list:\n self.autoruns = self.get_autoruns()\n if 'services' in asep_list:\n self.services = self.get_services()\n if 'appinit' in asep_list:\n self.appinit_dlls = self.get_appinit_dlls()\n if 'winlogon' in asep_list:\n self.winlogon = self.get_winlogon()\n if os_major == 5:\n self.winlogon_registrations = self.get_winlogon_registrations()\n if 'tasks' in asep_list:\n self.tasks = self.get_tasks()\n if 'activesetup' in asep_list:\n self.activesetup = self.get_activesetup()\n if 'sdb' in asep_list:\n self.sdb = self.get_sdb()\n\n #Returns a generator to generator() that generates the unified output data\n return self.get_unified_output_data()\n\n def get_unified_output_data(self):\n for exe_path, hive, key, timestamp, val_name, pids in self.autoruns:\n yield [exe_path,\n 'Autoruns',\n timestamp,\n val_name,\n \", \".join([str(p) for p in pids]),\n hive,\n key,\n val_name,\n \"\"]\n for exe_path, key, timestamp, val_name, pids in self.appinit_dlls:\n yield [exe_path,\n 'AppInit Dlls',\n timestamp,\n '-',\n \", \".join([str(p) for p in pids]),\n \"Windows/System32/config/SOFTWARE\",\n key,\n val_name,\n \"\"]\n for exe_path, events, timestamp, key, pids in self.winlogon_registrations:\n yield [exe_path,\n 'Winlogon (Notify)',\n timestamp,\n 'Hooks: {0}'.format(\", \".join([e[1] for e in events])),\n \", \".join([str(p) for p in pids]),\n \"Windows/System32/config/SOFTWARE\",\n key,\n \"Dllname\",\n \"\"]\n for val_name, exe_path, timestamp, common_value, key, pids in self.winlogon:\n yield [exe_path,\n 'Winlogon ({})'.format(val_name),\n timestamp,\n \"Default value: {}\".format(common_value),\n \", \".join([str(p) for p in pids]),\n \"Windows/System32/config/SOFTWARE\",\n key,\n val_name,\n \"\"]\n for key, timestamp, display_name, start, type, exe_path, entry, pids in self.services:\n yield [exe_path,\n 'Services',\n timestamp,\n \"{0} - {1} ({2} - {3})\".format(key.split('\\\\')[-1], display_name, type, start),\n \", \".join([str(p) for p in pids]),\n \"Windows/System32/config/SYSTEM\",\n key,\n \"\",\n entry]\n for name, task, task_xml, pids in self.tasks:\n yield [task['Actions']['Exec']['Command'],\n 'Scheduled Tasks',\n task.get('Date', \"\"),\n \"{} ({})\".format(name, task.get('Description', \"N/A\")),\n \", \".join([str(p) for p in pids]),\n \"\",\n \"\",\n \"\",\n \"\"]\n for exe_path, timestamp, key, pids in self.activesetup:\n yield [exe_path,\n \"Active Setup\",\n timestamp,\n \"-\",\n \", \".join([str(p) for p in pids]),\n \"Windows/System32/config/SOFTWARE\",\n key,\n \"StubPath\",\n \"\"]\n for desc, exe_path, timestamp, key, pids in self.sdb:\n yield [exe_path,\n \"SDB\",\n timestamp,\n desc,\n \", \".join([str(p) for p in pids]),\n \"Windows/System32/config/SOFTWARE\",\n key,\n \"\",\n \"\"]\n\n def unified_output(self, data):\n \"\"\"This standardizes the output formatting\"\"\"\n return TreeGrid([(\"Executable\", str),\n (\"Source\", str),\n (\"Last write time\", str),\n (\"Details\", str),\n (\"PIDs\", str),\n (\"Hive\", str),\n (\"Key\", str),\n (\"Name\", str),\n (\"Share Process Dll\", str)],\n self.generator(data))\n\n def generator(self, data):\n \"\"\"This yields data according to the unified output format\"\"\"\n for executable, source, lastWriteTime, details, pids, hive, key, name, spDllPath in data:\n yield (0, [str(executable), str(source), str(lastWriteTime), str(details), str(pids), str(hive), str(key), str(name), str(spDllPath)])\n\n def render_table(self, outfd, data):\n self.table_header(outfd,\n [(\"Executable\", \"<65\"),\n (\"Source\", \"30\"),\n (\"Last write time\", \"28\"),\n (\"Details\", \"60\"),\n (\"PIDs\", \"15\")\n ])\n\n for exe, source, timestamp, details, pids, hive, key, name, share_dll in data:\n if share_dll:\n exe = share_dll\n self.table_row(outfd, exe, source, timestamp, details, pids)\n\n def render_text(self, outfd, data):\n previous_source = \"\"\n for exe, source, timestamp, details, pids, hive, key, name, share_dll in data:\n if source != previous_source:\n outfd.write(\"\\n\\n\")\n outfd.write(\"{:=<50}\\n\\n\".format(source))\n\n if source == \"Services\":\n outfd.write(\"Service: {}\\n\".format(details))\n outfd.write(\" Image path: {0} (Last modified: {1})\\n\".format(exe, timestamp))\n outfd.write(\" PIDs: {}\\n\".format(pids))\n if share_dll:\n outfd.write(\" Loads: {}\\n\".format(share_dll))\n elif source == \"Autoruns\":\n outfd.write(\"Hive: {}\\n\".format(hive))\n outfd.write(\" {0} (Last modified: {1})\\n\".format(key, timestamp))\n outfd.write(\" {0:30} : {1} (PIDs: {2})\\n\".format(exe, details, pids))\n elif source == \"Active Setup\":\n outfd.write(\"Command line: {}\\nLast-written: {} (PIDs: {})\\n\".format(exe, timestamp, pids))\n elif source == \"SDB\":\n previous_source = source\n continue\n elif source == \"Winlogon (Notify)\":\n outfd.write(\"{0} (Last write time: {1})\\n\".format(exe, timestamp))\n outfd.write(\" PIDs: {}\\n\".format(pids))\n outfd.write(\" {}\\n\".format(details))\n elif \"Winlogon\" in source:\n outfd.write(\"{0}: {1}\\n\".format(name, exe))\n outfd.write(\" {}\\n\".format(details))\n outfd.write(\" PIDs: {}\\n\".format(pids))\n outfd.write(\" Last write time: {}\\n\".format(timestamp))\n elif source == \"AppInit Dlls\":\n outfd.write(\"Exe path: {}\\n\".format(exe))\n outfd.write(\"PIDS: {}\\n\".format(pids))\n elif source == \"Scheduled Tasks\":\n previous_source = source\n continue\n\n outfd.write(\"\\n\")\n previous_source = source\n\n if self.tasks:\n outfd.write(\"\\n\\n\")\n outfd.write(\"{:=<50}\\n\\n\".format(\"Scheduled tasks \"))\n for name, task, task_xml, pids in self.tasks:\n outfd.write(\"==== Task name: {} (PIDs: {})\\n\".format(name, \", \".join([str(p) for p in pids]) or \"-\"))\n outfd.write(get_indented_dict(task))\n outfd.write('\\n')\n outfd.write(\"Raw XML:\\n\\n---------\\n{}\\n---------\\n\\n\\n\".format(task_xml))\n\n if self.sdb:\n outfd.write(\"\\n\\n\")\n outfd.write(\"{:=<50}\\n\\n\".format(\"SDB Fix-it patches \"))\n for desc, path, timestamp, pids in self.sdb:\n outfd.write(\"Description: \\\"{}\\\"\\nLast-written: {}\\nPatch: {}\\n\\n\".format(desc, timestamp, path))"},"license":{"kind":"string","value":"gpl-2.0"},"hash":{"kind":"number","value":-5039099454219604000,"string":"-5,039,099,454,219,604,000"},"line_mean":{"kind":"number","value":42.0346598203,"string":"42.03466"},"line_max":{"kind":"number","value":169,"string":"169"},"alpha_frac":{"kind":"number","value":0.5557080214,"string":"0.555708"},"autogenerated":{"kind":"bool","value":false,"string":"false"}}},{"rowIdx":109532,"cells":{"repo_name":{"kind":"string","value":"ActiveState/code"},"path":{"kind":"string","value":"recipes/Python/580811_Uno_TextBased/recipe-580811.py"},"copies":{"kind":"string","value":"1"},"size":{"kind":"string","value":"67103"},"content":{"kind":"string","value":"import os\nimport sys\nimport random\nimport math\nimport time\n\nclass BadInputError(Exception):\n pass\n\nclass Player():\n\n def __init__(self, name):\n self.id = None\n self.name = name\n self.type = 'Human'\n self.hand = Hand()\n self.legalCards = []\n self.wildCards = []\n self.valueChangeCards = []\n self.zeroCards = []\n self.canSkip = False\n self.canReverse = False\n self.canDrawTwo = False\n self.canDrawFour = False\n self.canValueChange = False\n self.drew = False\n self.scrollMax = 0\n self.points = 0\n self.forceDraw = 0\n\n def addCard(self, card):\n self.drew = True\n if self.forceDraw > 0:\n self.forceDraw -= 1\n self.drew = False\n self.hand.addCard(card)\n \n def beginTurn(self):\n self.drew = False\n \n def didDraw(self):\n return self.drew\n \n def getLegalCards(self, color, value, zeroChange=False):\n self.canSkip = False\n self.canReverse = False\n self.canDrawTwo = False\n self.canDrawFour = False\n self.canValueChange = False\n self.canZeroChange = False\n self.legalCards = []\n self.wildCards = []\n self.valueChangeCards = []\n self.zeroCards = []\n plusFours = []\n for card in self.hand:\n if card.isWild():\n if card.getValue() == '+4':\n plusFours.append(card)\n else:\n self.wildCards.append(card)\n elif zeroChange and card.isZero():\n self.canZero = True\n self.zeroCards.append(card)\n elif card.getColor() == color or card.getValue() == value:\n if card.getColor() != color:\n self.canValueChange = True\n self.valueChangeCards.append(card)\n if card.getValue() == \"+2\":\n self.canDrawTwo = True\n elif card.getValue() == 'R':\n self.canReverse = True\n elif card.getValue() == 'X':\n self.canSkip = True\n self.legalCards.append(card)\n if len(self.legalCards) == 0 and len(plusFours) > 0:\n self.canDrawFour = True\n self.wildCards += plusFours\n \n def getValidCards(self):\n return self.legalCards\n \n def getAllValidCards(self):\n return self.legalCards + self.wildCards + self.zeroCards\n \n def hasLegalCard(self):\n return len(self.legalCards) > 0\n \n def addPoints(self, amount):\n if (self.points + amount) <= 999999999999999999999:\n self.points += amount\n \n def removeCard(self, index):\n return self.hand.removeCard(index)\n \n def assignID(self, identity):\n self.id = identity\n\n def getName(self):\n return self.name\n\n def getID(self):\n return self.id\n \n def getPoints(self):\n return self.points\n\n def getType(self):\n return self.type\n\n def getCardNum(self):\n return len(self.hand)\n\n def getHand(self, scrollNum=0, hide=False):\n return self.hand.show(scrollNum, hide)\n \n def getForceDraws(self):\n return self.forceDraw\n \n def addForceDraw(self, num):\n self.forceDraw += num\n \n def decreaseForceDraw(self):\n self.forceDraw -= 1\n \n def removeForceDraw(self):\n self.forceDraw = 0\n\n def checkCard(self, index):\n return self.hand.getCard(int(index))\n \n def discardHand(self):\n self.hand.discard()\n \n def __str__(self):\n return self.name\n \n def __repr__(self):\n return '({},{})'.format(self.name, self.points)\n\nclass Hand():\n ''''deck' (Deck) : Card's Color (rgby)\n 'numberOfCards' (int) : Card's Value (0-9, R, X, W, +2, +4)'''\n\n def __init__(self, deck=None,numberOfCards=0):\n self.hand = []\n if deck != None:\n self.draw(deck,numberOfCards)\n\n def __iter__(self):\n return iter(self.hand)\n\n def __len__(self):\n return len(self.hand)\n\n def __getitem__(self, item):\n try:\n return self.hand[item]\n except:\n return ''\n\n def addCard(self, card):\n self.hand.append(card) \n \n def removeCard(self, index):\n index = int(index)\n if (0 <= index < len(self)):\n return self.hand.pop(index) \n\n def discard(self):\n self.hand = []\n\n def show(self, scrollNum=0, hide=False):\n if scrollNum == -1:\n scrollNum = 0\n output = ''\n num = 0\n header, footer, upper, lower = '', '', '', ''\n header += ('\\033[97m\\u2666--\\u2666\\033[0m ')\n upper += ('\\033[97m|<-|\\033[0m ')\n lower += ('\\033[97m|<-|\\033[0m ')\n footer += ('\\033[97m\\u2666--\\u2666\\033[0m ')\n for i in range(10):\n indexNum = i+(10*scrollNum)\n if indexNum < len(self):\n header += (self[indexNum].getRow(0,hide)+' ')\n upper += (self[indexNum].getRow(1,hide)+' ')\n lower += (self[indexNum].getRow(2,hide)+' ')\n footer += (self[indexNum].getRow(3,hide)+' ')\n num += 1\n for j in range(10-num):\n j #unused\n header += (' ')\n footer += (' ')\n upper += (' ')\n lower += (' ')\n header += ('\\033[97m\\u2666--\\u2666\\033[0m ')\n upper += ('\\033[97m|->|\\033[0m ')\n lower += ('\\033[97m|->|\\033[0m ')\n footer += ('\\033[97m\\u2666--\\u2666\\033[0m ')\n output += (' '+header+'\\n '+upper+'\\n '+lower+'\\n '+footer+'\\n\\033[97m|-(<)--')\n for k in range(num):\n output += '({})'.format(k)\n output += '--'\n for l in range(10-num):\n l #unused\n output += '-----'\n output += '(>)--|\\033[0m\\n'\n return output\n\n def getCard(self, index):\n return self.hand[index]\n \n def indexCard(self, card):\n return self.hand.index(card)\n\nclass GameSettings():\n \n playerIdentities = ('play1','play2','play3','play4')\n computerNames = ('Watson','SkyNet','Hal','Metal Gear')\n \n def __init__(self):\n self.playerStaging = [] # Where Player Objs Are Stored Before Game Starts\n self.players = {} # ID : Player Obj\n self.numPlayers = 0\n self.useColor = True \n self.displayEffects = True\n self.hideComputerHands = True\n self.zeroChange = False\n self.computerSimulation = False\n self.mainMenuError = ''\n self.computerSpeed = 'normal'\n \n def canAddPlayer(self):\n return (self.numPlayers < 4)\n \n def canRemovePlayer(self):\n return (self.numPlayers > 0)\n \n def canBegin(self):\n return (self.numPlayers > 1)\n \n def addPlayer(self, player):\n self.playerStaging.append(player)\n self.numPlayers += 1\n \n def removePlayer(self, number):\n number -= 1\n del self.playerStaging[number]\n self.numPlayers -= 1\n \n def clearStaging(self):\n self.numPlayers = 0\n self.playerStaging = []\n \n def finalizePlayers(self):\n self.players.clear()\n identity = 0\n for player in self.playerStaging:\n playerID = self.playerIdentities[identity]\n player.assignID(playerID)\n self.players[playerID] = player\n identity += 1\n \n def getPlayerNum(self):\n return self.numPlayers\n \n def getComputerName(self):\n complete = False\n index = self.numPlayers\n while not complete:\n name = self.computerNames[index]\n complete = True\n for player in self.playerStaging:\n if player.getName() == name:\n index += 1\n if index >= len(self.computerNames):\n index = 0\n complete = False\n \n return self.computerNames[index]\n \n def getRandomIdentity(self):\n '''For Getting a Random Player for First Turn.'''\n return random.choice(self.players.keys())\n \n def compileMainMenuElements(self):\n def getBlankSpace(word, total):\n return \" \"*(total-len(word))\n \n def getPlayerBox(playerNum, rowNum):\n if rowNum == 1:\n name = self.playerStaging[playerNum-1].getName()\n return '{}{}'.format(name, getBlankSpace(name, 29))\n elif rowNum == 2:\n points = self.playerStaging[playerNum-1].getPoints()\n return 'Points: {}{}'.format(points, getBlankSpace(str(points), 21))\n \n self.mainMenuElements= {'play1row1':'No Player ','play1row2':' ',\n 'play2row1':'No Player ',\n 'play2row2':' ',\n 'play3row1':'No Player ','play3row2':' ',\n 'play4row1':'No Player ',\n 'play4row2':' ', \n 'play1box':'\\033[90m','play2box':'\\033[90m','play3box':'\\033[90m','play4box':'\\033[90m',\n 'beginBox':'\\033[90m','addBox':'\\033[97m','removeBox':'\\033[90m'\n }\n playerBoxKey = 'play{}box'\n playerRowKey = 'play{}row{}'\n i = 1\n for j in self.playerStaging:\n j\n colorCode = ['\\033[91m','\\033[94m','\\033[92m','\\033[93m']\n key = playerBoxKey.format(i)\n self.mainMenuElements[key] = colorCode[i-1]\n self.mainMenuElements[playerRowKey.format(i,1)] = getPlayerBox(i, 1)\n self.mainMenuElements[playerRowKey.format(i,2)] = getPlayerBox(i, 2)\n i+=1\n if self.canBegin():\n self.mainMenuElements['beginBox'] = '\\033[95m'\n if not self.canAddPlayer():\n self.mainMenuElements['addBox'] = '\\033[90m'\n if self.canRemovePlayer():\n self.mainMenuElements['removeBox'] = '\\033[97m'\n \n def changeComputerSpeed(self):\n if self.computerSpeed == 'slow':\n self.computerSpeed = 'normal'\n elif self.computerSpeed == 'normal':\n self.computerSpeed = 'fast'\n elif self.computerSpeed == 'fast':\n self.computerSpeed = 'slow'\n \n def getMainMenuElements(self):\n return self.mainMenuElements\n\nclass Deck():\n ''''shuffle' (bool) : shuffle deck.'''\n\n colors = ('red','yellow','green','blue')\n values = ('0','1','2','3','4','5','6','7','8','9','X','R','+2')\n \n def __init__(self, populate):\n '''Initializes proper deck of 108 Uno Cards.'''\n self.deck = []\n if populate:\n self.populate(True)\n \n def __getitem__(self, index):\n return self.deck[index]\n \n def populate(self, shuffle=True):\n for color in self.colors:\n for value in self.values:\n self.deck.append(Card(color, value))\n if value != '0':\n self.deck.append(Card(color, value))\n for i in range(4):\n i #unused\n self.deck.append(Card('wild', '+4'))\n self.deck.append(Card('wild', 'W'))\n if shuffle:\n self.shuffle()\n\n def __iter__(self):\n return iter(self.deck)\n\n def __len__(self):\n return len(self.deck)\n\n def draw(self):\n return self.deck.pop()\n \n def place(self, card):\n return self.deck.append(card)\n \n def insert(self, card):\n self.deck.insert(0, card)\n\n def shuffle(self):\n random.shuffle(self.deck)\n\nclass ComputerPlayer(Player):\n \n def __init__(self, name):\n super().__init__(name)\n self.type = 'Computer'\n self.begun = False\n self.colorsInHand = {'red':0, 'blue':0, 'green':0, 'yellow':0, 'wild':0}\n self.colorsOutHand = {}\n self.currentColor = \"\"\n \n def addCard(self, card):\n Player.addCard(self, card)\n color = card.getColor()\n self.colorsInHand[color] += 1\n \n def indexCard(self, cardColor, cardValue):\n for card in self.hand:\n if card.getValue() == cardValue:\n if cardValue in ('+4', 'W'):\n return self.hand.indexCard(card)\n else:\n if card.getColor() == cardColor:\n return self.hand.indexCard(card)\n raise ValueError(\"Card Cannot Be Found\")\n \n def think(self, match):\n card = None\n self.currentColor = match.currentColor\n currentValue = match.currentValue\n zeroChangeRule = match.zeroChange\n twoPlayers = False\n previousTurnID = match.getNextTurn(True)\n nextTurnID = match.getNextTurn(False)\n previousPlayer = match.getPlayer(previousTurnID)\n #nextPlayer = match.getPlayer(nextTurnID)\n if previousTurnID == nextTurnID:\n twoPlayers = True\n if self.canSkip == False and self.canReverse == True:\n self.canSkip = True\n self.canReverse = False\n \n self.getLegalCards(self.currentColor, currentValue, zeroChangeRule)\n\n ### DRAW CASE ###\n \n if len(self.legalCards) == 0 and len(self.wildCards) == 0:\n return \"d\"\n \n else:\n \n ### NO LEGAL CARD, USE WILD CARD ###\n \n if len(self.legalCards) == 0:\n \n if zeroChangeRule and self.canZeroChange:\n bestZeroColor = self.getBestColor(self.zeroCards)\n card = self.getCardByColor(self.zeroCards, bestZeroColor)\n \n else:\n \n if self.canDrawFour:\n card = self.getCardByValue(self.wildCards, \"+4\")\n print(card)\n \n else:\n card = random.choice(self.wildCards)\n \n else:\n \n ### HAS LEGAL CARD ###\n \n if twoPlayers and self.canSkip: #Always play a skip card in a two player game\n #print(\"Shed Skip Strategy\")\n card = self.getCardByValue(self.legalCards,\"R\", \"X\")\n \n if self.canReverse and previousPlayer.didDraw():\n #print(\"Reverse Strategy\")\n reverseCards = self.getAllCardsByValue(self.legalCards, \"R\")\n for reverseCard in reverseCards:\n if reverseCard.getColor() == self.currentColor:\n card = reverseCard\n \n if self.canValueChange:\n # Computer Can Value Change, However, Should it?\n # Computer Checks to See if Value Change Color is Better Than Current\n currentColorNum = self.colorsInHand[self.currentColor]\n bestValueChangeColor = self.getBestColor(self.valueChangeCards)\n if self.colorsInHand[bestValueChangeColor] > currentColorNum or len(self.valueChangeCards) == len(self.legalCards):\n card = self.getCardByColor(self.valueChangeCards, bestValueChangeColor)\n \n \n if card == None:\n #print(\"Random Strategy\")\n card = random.choice(list(set(self.legalCards) - set(self.valueChangeCards)))\n \n color = card.getColor()\n self.colorsInHand[color] -= 1\n return str(self.indexCard(card.getColor(), card.getValue()))\n \n def getWildColor(self):\n maxKey = max(self.colorsInHand, key=self.colorsInHand.get)\n if maxKey == 'wild':\n return random.choice(('r','g','b','y'))\n else:\n return maxKey\n \n def getCardByValue(self, cardList, *values):\n for card in cardList:\n if card.getValue() in values:\n return card\n \n def getAllCardsByValue(self, cardList, *values):\n cards = []\n for card in cardList:\n if card.getValue() in values:\n cards.append(card)\n return cards\n \n def getCardByColor(self, cardList, *colors):\n for card in cardList:\n if card.getColor() in colors:\n return card\n \n def getBestColor(self, cardList):\n bestColor = None\n bestColorNum = 0\n for card in cardList:\n color = card.getColor()\n if self.colorsInHand[color] > bestColorNum:\n bestColor = color\n bestColorNum = self.colorsInHand[color]\n return bestColor\n\nclass Card():\n '''\n 'suit' (string) : Card's Color (rgby)\n 'rank' (string) : Card's Value (0-9, R, X, W, +2, +4)\n '''\n\n colors = {\n 'red' : '\\033[91m',\n 'green' : '\\033[92m',\n 'yellow' : '\\033[93m',\n 'blue' : '\\033[94m',\n 'purple' : '\\033[95m',\n 'cyan' : '\\033[96m',\n 'white' : '\\033[97m',\n 'wild' : '',\n 'dwild' : '',\n 'dred' : '\\033[31m',\n 'dgreen' : '\\033[32m',\n 'dyellow' : '\\033[33m',\n 'dblue' : '\\033[34m',\n 'dpurple' : '\\033[35m',\n 'dcyan' : '\\033[36m',\n 'dwhite' : '\\033[37m',\n }\n \n idMap = {\n 'red':'R','blue':'B','green':'G','yellow':'Y','wild':'W',\n '0':'0','1':'1','2':'2','3':'3','4':'4','5':'5','6':'6','7':'7','8':'8','9':'9',\n '+2':'+','R':'R','W':'W','+4':'$','X':'X'\n }\n\n bigNums = {\n \"0\" : [\" .d888b. \",\"d88P Y88b\",\"888 888\",\"888 888\",\"888 888\",\"888 888\",\"d88P Y88b\",\" \\\"Y888P\\\" \"],\n \"1\" : [\" d888 \",\" d8888 \",\" 888 \",\" 888 \",\" 888 \",\" 888 \",\" 888 \",\" 8888888 \"],\n \"2\" : [\".d8888b. \",\"d88P Y88\",\"d8 888\",\" .d88P\",\".od888P\\\" \",\"d88P\\\" \",\"888\\\" \",\"888888888\"],\n \"3\" : [\" .d8888b.\",\"d88P Y88\",\" .d88\",\" 8888\\\" \",\" \\\"Y8b\",\"888 88\",\"Y88b d88\",\" \\\"Y8888P\\\"\"],\n \"4\" : [\" d88b \",\" d8P88 \",\" d8 88 \",\" d8 88 \",\"d8 88 \",\"888888888\",\" 88 \",\" 88 \"],\n \"5\" : [\"888888888\",\"888 \",\"888 \",\"8888888b \",\" \\\"Y88b \",\" 888\",\"Y88b d88P\",\"\\\"Y8888P\\\" \"],\n \"6\" : [\" .d888b. \",\"d88P Y88b\",\"888 \",\"888d888b \",\"888P \\\"Y8b\",\"888 888\",\"Y88b d88b\",\" \\\"Y888P\\\" \"],\n \"7\" : [\"888888888\",\" d8P\",\" d8P \",\" d8P \",\" 8888888 \",\" d8P \",\" d8P \",\"d8P \"],\n \"8\" : [\" .d888b. \",\"d8P Y8b\",\"Y8b. d8P\",\" \\\"Y8888\\\" \",\" .dP\\\"Yb. \",\"888 888\",\"Y88b d88P\",\" \\\"Y888P\\\" \"],\n \"9\" : [\" .d888b. \",\"d8P Y8b\",\"88 88\",\"Y8b. d88\",\" \\\"Y88P888\",\" 888\",\"Y88b d88P\",\" \\\"Y888P\\\" \"],\n \"X\" : [\"Y8b d8P\",\" Y8b d8P \",\" Y8o8P \",\" Y8P \",\" d8b \",\" d888b \",\" d8P Y8b \",\"d8P Y8b\"],\n \"W\" : [\"88 88\",\"88 88\",\"88 o 88\",\"88 d8b 88\",\"88d888b88\",\"88P Y88\",\"8P Y8\",\"P Y\"],\n \"+2\" : [\" db \",\" 88 \",\"C8888D \",\" 88 8888\",\" VP 8\",\" 8888\",\" 8 \",\" 8888\"],\n \"+4\" : [\" db \",\" 88 \",\"C8888D \",\" 88 d \",\" VP d8 \",\" d 8 \",\" d8888\",\" 8 \"],\n \"R9\" : [\" d88P \",\" d88P \",\" d88P \",\" d88P \",\" Y88b \",\" Y88b \",\" Y88b \",\" Y88b \"],\n \"R8\" : [\" d88P \",\" d88P \",\" d88P \",\"d88P \",\"Y88b \",\" Y88b \",\" Y88b \",\" Y88b \"],\n \"R7\" : [\" d88P Y\",\" d88P \",\"d88P \",\"88P \",\"88b \",\"Y88b \",\" Y88b \",\" Y88b d\"],\n \"R6\" : [\" d88P Y8\",\"d88P Y\",\"88P \",\"8P \",\"8b \",\"88b \",\"Y88b d\",\" Y88b d8\"],\n \"R5\" : [\"d88P Y88\",\"88P Y8\",\"8P Y\",\"P \",\"b \",\"8b d\",\"88b d8\",\"Y88b d88\"],\n \"R4\" : [\"88P Y88b\",\"8P Y88\",\"P Y8\",\" Y\",\" d\",\"b d8\",\"8b d88\",\"88b d88P\"],\n \"R3\" : [\"8P Y88b \",\"P Y88b\",\" Y88\",\" Y8\",\" d8\",\" d88\",\"b d88P\",\"8b d88P \"],\n \"R2\" : [\"P Y88b \",\" Y88b \",\" Y88b\",\" Y88\",\" d88\",\" d88P\",\" d88P \",\"b d88P \"],\n \"R1\" : [\" Y88b \",\" Y88b \",\" Y88b \",\" Y88b\",\" d88P\",\" d88P \",\" d88P \",\" d88P \"],\n \"R0\" : [\" Y88b \",\" Y88b \",\" Y88b \",\" Y88b \",\" d88P \",\" d88P \",\" d88P \",\" d88P \"],\n }\n \n\n def __init__(self, color, value):\n '''Initializes Uno Card w/ Color and Value.'''\n self.wild = False #Is wild card?\n self.zero = False\n self.cardID = '{}{}'.format(self.idMap[color],self.idMap[value])\n self.setColor(color)\n self.setValue(value)\n self.setPoints(value)\n\n\n #############################################\n\n ### -\\/- Retrieve Card Information -\\/- ### \n \n def __repr__(self):\n return \"{},{}\".format(self.color, self.value)\n\n def getBigNum(self, reverse, reverseSeed=0):\n '''Returns list of strings to draw card's value on the pile.'''\n bigNums = []\n colorCode = self.colorCode\n colorCodeDark = self.colorCodeDark\n value = self.value\n if value == 'R':\n if not reverse:\n value += str(reverseSeed)\n else:\n value += str(9-reverseSeed)\n for mid in self.bigNums[value]:\n bigNums += ['{}| |{}'.format(colorCode,colorCodeDark)+mid+'{}| |\\033[0m\\t'.format(colorCode)]\n \n return bigNums\n\n def getColor(self):\n '''Returns card's color.'''\n return self.color\n \n def getColorCode(self):\n '''Returns card's color code.'''\n return self.colorCode\n\n def getValue(self):\n '''Returns card's value.'''\n return self.value\n \n def getPoints(self):\n '''Returns card's point value.'''\n return self.points\n \n def getRow(self,rowNum,hide=False):\n value = self.value\n displaySpace = self.displaySpace\n if hide:\n colorCode = '\\033[97m'\n value = '?'\n displaySpace = ' '\n else:\n colorCode = self.colorCode\n if self.isWild():\n if rowNum == 0: \n colorCode = '\\033[91m'\n elif rowNum == 1:\n colorCode = '\\033[93m'\n elif rowNum == 2:\n colorCode = '\\033[92m'\n elif rowNum == 3:\n colorCode = '\\033[94m'\n \n if rowNum == 0:\n return '{}\\u2666--\\u2666\\033[0m'.format(colorCode)\n elif rowNum == 1:\n return '{}|{}{}|\\033[0m'.format(colorCode, displaySpace, value)\n elif rowNum == 2:\n if hide:\n return '{}|? |\\033[0m'.format(colorCode)\n else:\n return '{}| |\\033[0m'.format(colorCode)\n elif rowNum == 3:\n return '{}\\u2666--\\u2666\\033[0m'.format(colorCode)\n\n #############################################\n\n ### -\\/- Set Card Information -\\/- ### \n \n def setColor(self, color):\n '''Sets Card's color and escape code.'''\n if color == 'blue':\n self.color = 'blue'\n self.colorCode = self.colors['blue']\n self.colorCodeDark = self.colors['dblue']\n elif color == 'red':\n self.color = 'red'\n self.colorCode = self.colors['red']\n self.colorCodeDark = self.colors['dred']\n elif color == 'yellow':\n self.color = 'yellow'\n self.colorCode = self.colors['yellow']\n self.colorCodeDark = self.colors['dyellow']\n elif color == 'green':\n self.color = 'green'\n self.colorCode = self.colors['green']\n self.colorCodeDark = self.colors['dgreen']\n elif color == 'wild': #No color modification\n self.wild = True\n self.color = 'wild'\n self.colorCodeDark = self.colors['dwild']\n self.colorCode = self.colors['wild']\n\n def setValue(self, value):\n if value in ('0','1','2','3','4','5','6','7','8','9','X','R','+2','+4','W'):\n self.value = value\n self.displaySpace = ' '\n if len(value) == 2:\n self.displaySpace = ''\n if value == '0':\n self.zero = True\n \n def setPoints(self, value):\n if value in ('0','1','2','3','4','5','6','7','8','9'):\n self.points = int(value)\n elif value in (\"W\", \"+4\"):\n self.points = 50\n else:\n self.points = 20\n\n\n #############################################\n\n ### -\\/- Wild Card Methods -\\/- ### \n\n def changeColor(self, color):\n '''Changes Card's Color, Intended for Wild Cards.'''\n self.setColor(color)\n\n def isWild(self):\n '''Returns if card is a wild card.'''\n return self.wild\n \n def isZero(self):\n return self.zero\n \nclass Match():\n\n elementsInit = {\n ### Names (final) ###\n 'P1Name':' ', 'P2Name':' ', 'P3Name':' ', 'P4Name':' ',\n ### Card Values ### \n 'P1Cards':' ', 'P2Cards':' ', 'P3Cards':' ', 'P4Cards':' ',\n ### Turn Colors / Hand###\n 'P1Turn':'', 'P2Turn':'', 'P3Turn':'', 'P4Turn':'',\n 'HName':'\\t\\t', 'HVisual':'' ,'Hand':'',\n ### Deck ###\n 'DNum':'', 'Deck':['','','','','','','','',''],\n 'PostDNum':'',\n ### Pile ###\n 'uHeader':'\\t\\t\\t\\t', 'uMiddle':' ', 'uLower':' ',\n 'oHeader':'\\t\\t\\t', 'oMiddle':['\\t\\t\\t','\\t\\t\\t','\\t\\t\\t','\\t\\t\\t','\\t\\t\\t','\\t\\t\\t','\\t\\t\\t','\\t\\t\\t'],\n ### Messages ###\n 'Console':'', 'Error':''\n }\n \n speeds = {'slow':2,'normal':1,'fast':0}\n \n\n def __init__(self, gs):\n ### Decks ###\n self.deck = Deck(True)\n self.pile = Deck(False)\n \n ### Player Information ###\n self.players = gs.players\n self.turnList = []\n self.handTitles = {'play1':'','play2':'','play3':'','play4':''}\n \n ### Carry Information ###\n self.displayEffects = gs.displayEffects\n self.hideComputerHands = gs.hideComputerHands\n self.zeroChange = gs.zeroChange\n self.computerSpeed = self.speeds[gs.computerSpeed]\n self.simulation = gs.computerSimulation\n\n ### Data ###\n self.handPosition = 0 # For hand displays\n self.drawAmount = 0 # Used for force draws\n self.passes = 0 # Keep track of consecutive passes for emergency color change\n self.passMax = 0 # Max passes before color change\n self.turn = '' # Current turn\n self.event = '' # Wild, Reverse, Skip, etc\n self.wildColorChange = '' # Specifies color to change wild card to\n self.currentColor = '' # Current color\n self.currentValue = '' # Current value\n self.winnerID = '' # ID of Player who Won\n self.reverse = False # Is turn order reversed\n self.turnComplete = False # Is turn complete\n self.matchComplete = False # Is the Game over?\n self.matchAbort = False # Did the match conclude without a winner?\n self.forcedWild = False # Force change wild\n\n ### Initialize Names / Cards / Deck (Assuming New Game) ###\n self.elements = dict(self.elementsInit)\n \n keyStringName = 'P{}Name'\n keyStringCards = 'P{}Cards'\n \n for i in self.players:\n self.elements[keyStringName.format(i[-1])] = self.players[i].getName()+(' '*(11-len(self.players[i].getName())))\n self.elements[keyStringCards.format(i[-1])] = ' '+(' '*(3-len(str(self.players[i].getCardNum()))))+str(self.players[i].getCardNum())+' Cards'\n \n self.elements['DNum'] = len(self.deck)\n \n if len(str(len(self.deck))) < 2:\n self.elements['PostDNum'] = '\\t'\n \n j = 8\n for i in range(int(math.ceil(len(self.deck)/12))):\n self.elements['Deck'][j] = '='\n j -= 1\n \n for key in GameSettings.playerIdentities:\n try:\n self.buildHandString(key)\n self.turnList += [key]\n except KeyError:\n pass\n \n self.passMax = len(self.turnList)\n \n def clearShell(self):\n os.system('cls' if os.name == 'nt' else 'clear')\n\n def begin(self):\n self.elements['Console'] = 'Beginning Game, Press Enter.'\n print(self.drawScreen())\n self.enterBreak()\n self.eventDealCards()\n self.turn = random.choice(self.turnList)\n self.elements['Console'] = 'First turn will be {}. Press Enter.'.format(self.players[self.turn].getName())\n print(self.drawScreen(True))\n self.enterBreak()\n self.placeCard()\n self.elements['P{}Turn'.format(self.turn[-1])] = '\\033[93m'\n if self.event == 'wild':\n self.eventWildCard()\n elif self.event == 'reverse':\n self.eventReverse()\n \n def end(self, gs):\n if not self.matchAbort:\n points = 0\n self.elements['P{}Turn'.format(self.turn[-1])] = ''\n self.elements['Console'] = '{} Wins! Press Enter to Begin Point Tally'.format(self.players[self.winnerID].getName())\n print(self.drawScreen())\n self.enterBreak()\n \n for identity in self.turnList:\n if identity != self.winnerID:\n self.turn = identity\n self.elements['HName'] = self.handTitles[self.turn]\n self.elements['P{}Turn'.format(self.turn[-1])] = '\\033[93m'\n while self.players[identity].getCardNum() > 0:\n card = self.players[identity].removeCard(0)\n points += card.getPoints()\n self.elements['Console'] = '{} Won {} Points!'.format(self.players[self.winnerID].getName(),points)\n \n keyStringCards = 'P{}Cards'\n self.elements[keyStringCards.format(identity[-1])] = ' '+(' '*(3-len(str(self.players[identity].getCardNum()))))+str(self.players[identity].getCardNum())+' Cards'\n self.players[identity].maxScroll = math.ceil((self.players[identity].getCardNum() / 10)-1)\n if self.handPosition > self.players[identity].maxScroll:\n self.handPosition -= 1\n self.buildHandVisual(identity)\n \n if self.displayEffects and not self.simulation:\n print(self.drawScreen())\n time.sleep(.1)\n self.elements['P{}Turn'.format(self.turn[-1])] = ''\n \n self.players[self.winnerID].addPoints(points)\n self.elements['Console'] = '{} Won {} Points! Press Enter'.format(self.players[self.winnerID].getName(),points)\n print(self.drawScreen())\n self.enterBreak()\n \n gs.clearStaging()\n for identity in self.turnList:\n self.players[identity].discardHand()\n gs.addPlayer(self.players[identity])\n return gs\n \n def adjustCardAmount(self, playerID):\n keyStringCards = 'P{}Cards'\n self.elements[keyStringCards.format(playerID[-1])] = ' '+(' '*(3-len(str(self.players[playerID].getCardNum()))))+str(self.players[playerID].getCardNum())+' Cards'\n self.players[playerID].maxScroll = math.ceil((self.players[playerID].getCardNum() / 10)-1)\n if self.handPosition > self.players[playerID].maxScroll:\n self.handPosition -= 1\n self.buildHandVisual(playerID)\n\n def buildHandString(self, playerID):\n playerName = self.players[playerID].getName()\n if len(playerName) < 9:\n self.handTitles[playerID] = \"{}'s Hand\\t\".format(self.players[playerID].getName())\n else:\n self.handTitles[playerID] = \"{}'s Hand\".format(self.players[playerID].getName())\n\n def buildHandVisual(self, playerID):\n string = '['\n for i in range(self.players[playerID].maxScroll+1):\n if i == self.handPosition:\n string += '|'\n else:\n string += '-'\n string += ']'\n self.elements['HVisual'] = string\n\n def checkInput(self, playerInput):\n if playerInput == '':\n return {'valid':False,'entry':playerInput}\n if playerInput.isnumeric():\n if int(playerInput)+(10*self.handPosition) < self.players[self.turn].getCardNum():\n return {'valid':True,'entry':str(int(playerInput)+(10*self.handPosition)),'type':'card'}\n else:\n self.elements['Error'] = '{} is not a card.'.format(playerInput)\n return {'valid':False,'entry':playerInput}\n else:\n playerInput = playerInput.lower()[0]\n if playerInput in ['<','>','u','d','p','q','s']:\n return {'valid':True,'entry':playerInput}\n else:\n self.elements['Error'] = '{} is not a valid selection.'.format(playerInput)\n return {'valid':False,'entry':playerInput}\n\n def checkColorInput(self, playerInput):\n if playerInput == '':\n return {'valid':False,'entry':playerInput}\n playerInput = str(playerInput).lower()[0]\n if playerInput[0] == 'b':\n return {'valid':True,'entry':'blue'}\n elif playerInput[0] == 'r':\n return {'valid':True,'entry':'red'}\n elif playerInput[0] == 'g':\n return {'valid':True,'entry':'green'}\n elif playerInput[0] == 'y':\n return {'valid':True,'entry':'yellow'}\n return {'valid':False,'entry':playerInput}\n\n def eventDealCards(self):\n if self.displayEffects and not self.simulation:\n self.elements['Console'] = 'Dealing Cards...'\n for i in ('play1','play2','play3','play4'):\n if i in self.players:\n for j in range(7):\n j #unused\n self.dealCard(i)\n if self.displayEffects and not self.simulation:\n print(self.drawScreen(True))\n time.sleep(.1)\n\n def eventReverse(self):\n if self.displayEffects and not self.simulation:\n hide = False\n if self.players[self.turn].getType() == \"Computer\":\n hide = self.hideComputerHands\n self.elements['Console'] = \"Reverse Card Played! Reversing Turn Order.\".format(self.players[self.turn].getName())\n print(self.drawScreen(hide))\n time.sleep(1)\n for i in range(10):\n cardBigNums = self.pile[0].getBigNum(self.reverse,i)\n self.elements['oMiddle'] = cardBigNums\n print(self.drawScreen(hide))\n if self.displayEffects and not self.simulation:\n time.sleep(.1)\n cardBigNums = self.pile[0].getBigNum(self.reverse,9)\n self.elements['oMiddle'] = cardBigNums\n self.reverse = not self.reverse\n self.event = ''\n \n def eventSkip(self):\n if self.displayEffects and not self.simulation:\n hide = False\n if self.players[self.turn].getType() == \"Computer\":\n hide = self.hideComputerHands\n self.elements['Console'] = \"Skip Card Placed! Skipping {}'s Turn.\".format(self.players[self.turn].getName())\n print(self.drawScreen(hide))\n time.sleep(1)\n for i in range(2):\n i #unused\n self.elements['P{}Turn'.format(self.turn[-1])] = '\\033[91m'\n print(self.drawScreen(hide))\n time.sleep(.3)\n self.elements['P{}Turn'.format(self.turn[-1])] = ''\n print(self.drawScreen(hide))\n time.sleep(.3)\n self.turnComplete = True\n self.event = ''\n\n def eventWildCard(self):\n hide = False\n if not self.forcedWild:\n if self.players[self.turn].getType() == 'Human':\n self.elements['Console'] = 'Wild Card! Specifiy a Color: (B)lue, (R)ed, (G)reen, (Y)ellow'\n self.elements['Error'] = 'Specifiy A Color'\n print(self.drawScreen())\n playerInput = str(input(\"Color Change: \"))\n checked = self.checkColorInput(playerInput)\n while not checked['valid']:\n if checked['entry'] == '<':\n self.handPosition -= 1\n if self.handPosition == -1:\n self.handPosition = self.players[self.turn].maxScroll\n self.buildHandVisual(self.turn)\n elif checked['entry'] == '>':\n self.handPosition += 1\n if self.handPosition > self.players[self.turn].maxScroll:\n self.handPosition = 0\n self.buildHandVisual(self.turn)\n print(self.drawScreen())\n playerInput = str(input(\"Color Change: \"))\n checked = self.checkColorInput(playerInput)\n else:\n hide = self.hideComputerHands\n checked = self.checkColorInput(self.players[self.turn].getWildColor())\n self.wildColorChange = checked['entry']\n else:\n self.wildColorChange = self.checkColorInput(random.choice(('r','b','g','y')))['entry']\n self.forcedWild = False\n self.currentColor = self.wildColorChange\n self.elements['Error'] = \"\"\n if self.displayEffects and not self.simulation:\n self.elements['Console'] = 'Wild Card! Changing Color.'\n seed = 1\n for i in range(10):\n i #unused\n if seed > 4:\n seed = 1\n print(self.drawScreen(hide,wildSeed=seed))\n time.sleep(.1)\n seed += 1\n self.pile[0].changeColor(self.wildColorChange)\n self.wildColorChange = ''\n cardBigNums = self.pile[0].getBigNum(self.reverse)\n self.elements['oHeader'] = '{}\\u2666\\u2666\\u2666=========\\u2666\\u2666\\u2666\\033[0m\\t'.format(self.pile[0].getColorCode())\n self.elements['oMiddle'] = cardBigNums\n self.event = ''\n \n def eventDraw(self):\n self.players[self.turn].addForceDraw(self.drawAmount)\n self.drawAmount = 0\n self.event = ''\n\n def dealCard(self, playerID):\n \n card = self.deck.draw()\n self.players[playerID].addCard(card)\n \n ### Adjust Hand Visual ###\n self.players[playerID].maxScroll = math.ceil((self.players[playerID].getCardNum() / 10)-1)\n self.handPosition = self.players[playerID].maxScroll\n self.buildHandVisual(playerID)\n \n ### Adjust Player Tile ###\n keyStringCards = 'P{}Cards'\n self.elements[keyStringCards.format(playerID[-1])] = ' '+(' '*(3-len(str(self.players[playerID].getCardNum()))))+str(self.players[playerID].getCardNum())+' Cards'\n \n ### Adjust Deck ###\n self.elements['DNum'] = len(self.deck)\n if len(str(len(self.deck))) < 2:\n self.elements['PostDNum'] = '\\t'\n j = 8\n self.elements['Deck'] = [' ',' ',' ',' ',' ',' ',' ',' ', ' ']\n for i in range(math.ceil(len(self.deck)/12)):\n i #unused\n self.elements['Deck'][j] = '='\n j -= 1\n\n def placeCard(self, card=None):\n if card == None:\n ### Used At Beginning For First Card ###\n card = self.deck.draw()\n self.elements['DNum'] = len(self.deck)\n \n cardColor = card.getColorCode()\n cardBigNums = card.getBigNum(self.reverse)\n \n self.currentColor = card.getColor()\n self.currentValue = card.getValue()\n \n self.pile.insert(card)\n self.elements['oHeader'] = '{}\\u2666\\u2666\\u2666=========\\u2666\\u2666\\u2666\\033[0m\\t'.format(cardColor)\n self.elements['oMiddle'] = cardBigNums\n \n if len(self.pile) > 1:\n previousCard = self.pile[1]\n previousCardColor = previousCard.getColorCode()\n self.elements['uHeader'] = '{} \\u2666\\u2666\\u2666=========\\u2666\\u2666\\u2666\\033[0m\\t\\t'.format(previousCardColor)\n self.elements['uMiddle'] = '{}| |\\033[0m'.format(previousCardColor)\n self.elements['uLower'] = '{}\\u2666\\u2666\\u2666\\033[0m'.format(previousCardColor)\n \n if self.currentColor == 'wild':\n self.event = 'wild'\n \n if self.currentValue == 'X':\n self.event = 'skip'\n elif self.currentValue == 'R':\n if len(self.players) > 2:\n self.event = 'reverse'\n else:\n self.event = 'skip'\n elif self.currentValue == '+4':\n self.drawAmount = 4\n elif self.currentValue == '+2':\n self.drawAmount = 2\n self.passes = 0\n \n def extractCard(self, playerID, index):\n card = self.players[playerID].removeCard(index)\n if self.players[playerID].getCardNum() == 0:\n self.matchComplete = True\n self.winnerID = self.turn\n self.adjustCardAmount(playerID)\n return card\n \n def enterBreak(self):\n if not self.simulation:\n str(input())\n return\n \n def nextTurn(self):\n self.turnComplete = False\n self.handPosition = 0\n turnType = self.players[self.turn].getType()\n self.players[self.turn].beginTurn()\n ### Prepare Hand Visuals ###\n \n self.elements['HName'] = self.handTitles[self.turn]\n self.buildHandVisual(self.turn)\n \n if self.event == 'skip':\n self.eventSkip()\n elif self.drawAmount > 0:\n self.eventDraw()\n \n while not self.turnComplete:\n if turnType == 'Human':\n self.players[self.turn].getLegalCards(self.currentColor, self.currentValue, self.zeroChange)\n if len(self.deck) > 0:\n self.elements['Console'] = 'Select a card, (D)raw, or (P)ause.'\n else:\n self.players[self.turn].removeForceDraw()\n self.elements['Console'] = 'Select a card, (D)raw, (P)ause, or Pas(s).'\n if self.players[self.turn].getForceDraws() > 0:\n self.elements['Error'] = 'Draw Card Played! Draw {} cards.'.format(self.players[self.turn].getForceDraws())\n print(self.drawScreen())\n playerInput = str(input(\"\\033[97mSelection: \\033[92m\"))\n checked = self.checkInput(playerInput)\n while not checked['valid']:\n print(self.drawScreen())\n playerInput = str(input(\"\\033[97mSelection: \\033[92m\"))\n checked = self.checkInput(playerInput)\n \n playerInput = checked['entry']\n \n if playerInput == '<':\n self.handPosition -= 1\n if self.handPosition == -1:\n self.handPosition = self.players[self.turn].maxScroll\n self.buildHandVisual(self.turn)\n elif playerInput == '>':\n self.handPosition += 1\n if self.handPosition > self.players[self.turn].maxScroll:\n self.handPosition = 0\n self.buildHandVisual(self.turn)\n elif playerInput == 'd':\n if len(self.deck) > 0:\n self.elements['Error'] = ''\n self.dealCard(self.turn)\n else:\n self.elements['Error'] = \"Cannot Draw. Deck is Empty\"\n elif playerInput == 'p':\n pauseOutput = self.pauseScreen()\n if pauseOutput == 'quit':\n self.matchComplete = True\n self.turnComplete = True\n self.winnerID = 'play1'\n self.matchAbort = True\n elif playerInput == 's':\n if len(self.deck) > 0:\n self.elements['Error'] = \"Cannot pass until Deck is empty.\"\n elif len(self.players[self.turn].getAllValidCards()) > 0:\n self.elements['Error'] = \"Cannot pass while having playable cards.\"\n else:\n self.turnComplete = True\n self.passes += 1\n if self.passes == self.passMax:\n self.forcedWild = True\n self.event = 'wild'\n self.passes = 0\n elif playerInput.isnumeric():\n if self.players[self.turn].getForceDraws() == 0:\n cardCheck = self.players[self.turn].checkCard(playerInput)\n if cardCheck in self.players[self.turn].getAllValidCards():\n card = self.extractCard(self.turn, playerInput)\n self.placeCard(card)\n self.elements['Error'] = \"\"\n self.turnComplete = True\n else:\n self.elements['Error'] = \"Card Doesn't Match The Color {} or Value {}!\".format(self.currentColor, self.currentValue)\n else:\n pass\n \n elif turnType == 'Computer':\n self.elements['Console'] = '{}\\'s Turn'.format(self.players[self.turn].getName())\n print(self.drawScreen(self.hideComputerHands))\n if not self.simulation:\n time.sleep(self.computerSpeed)\n #str(input())\n while (True):\n if self.displayEffects and not self.simulation:\n time.sleep(.2)\n if self.players[self.turn].getForceDraws() > 0 and len(self.deck) > 0:\n cardIndex = 'd'\n else:\n cardIndex = self.players[self.turn].think(self)\n if cardIndex.isnumeric():\n card = self.extractCard(self.turn, int(cardIndex))\n if card.getColor() != self.currentColor:\n self.resetDrawBool()\n self.placeCard(card)\n self.turnComplete = True\n break\n else:\n if cardIndex == 'd':\n if len(self.deck) > 0:\n self.dealCard(self.turn)\n print(self.drawScreen(self.hideComputerHands))\n else:\n self.turnComplete = True\n self.players[self.turn].removeForceDraw()\n self.passes += 1\n if self.passes == self.passMax:\n self.forcedWild = True\n self.event = 'wild'\n self.passes = 0\n break\n \n ### DECODE INPUT ###\n \n if self.event == 'reverse':\n self.eventReverse()\n elif self.event == 'wild':\n self.eventWildCard()\n \n # Clear Current Turn\n self.elements['P{}Turn'.format(self.turn[-1])] = ''\n # Prepare Next Turn\n self.turn = self.getNextTurn()\n self.elements['P{}Turn'.format(self.turn[-1])] = '\\033[93m'\n\n def drawScreen(self, hide=False, wildSeed=0):\n if self.simulation:\n return ''\n colorCombos = {\n 1 : ['\\033[91m','\\033[93m','\\033[92m','\\033[94m'],\n 2 : ['\\033[94m','\\033[91m','\\033[93m','\\033[92m'],\n 3 : ['\\033[92m','\\033[94m','\\033[91m','\\033[93m'],\n 4 : ['\\033[93m','\\033[92m','\\033[94m','\\033[91m'] }\n currentTurn = self.turn\n if currentTurn == '':\n currentTurn = self.turnList[-1]\n hide = True\n if wildSeed != 0:\n colorMod = colorCombos[wildSeed]\n else:\n colorMod = ['','','','']\n\n self.clearShell()\n screenout = ''\n screenout += '\\t\\t\\033[94m || ||\\033[92m ||\\ || \\033[91m// \\\\\\\\\\n\\033[0m'\n screenout += '\\t\\t\\033[94m || ||\\033[92m ||\\\\\\|| \\033[91m(( ))\\n\\033[0m'\n screenout += '\\t\\t\\033[94m \\\\\\ //\\033[92m || \\|| \\033[91m \\\\\\ //\\n\\033[0m'\n screenout += '\\033[97m===============================================================\\n'\n screenout += '\\033[93m{}\\033[0m\\n'.format(self.elements['Console'])\n screenout += '\\033[97m===============================================================\\n'\n screenout += '\\t\\t\\t\\t\\t\\t' + ' \\033[97m{}\\u2666-----------\\u2666\\033[0m\\n'.format(self.elements['P1Turn'])\n screenout += '\\033[97mDeck:\\t\\t' + '{}'.format(self.elements['uHeader']) + ' \\033[97m{}|{}|\\033[0m\\n'.format(self.elements['P1Turn'],self.elements['P1Name'])\n screenout += '\\033[97m{} Cards'.format(self.elements['DNum']) + '{}'.format(self.elements['PostDNum'])+'\\t' + '{}'.format(self.elements['uHeader']) + ' \\033[97m{}|{}|\\033[0m\\n'.format(self.elements['P1Turn'],self.elements['P1Cards'])\n screenout += '\\t\\t ' + '{}'.format(self.elements['uMiddle']) + '\\033[97m{}{}'.format(colorMod[0],self.elements['oHeader']) + ' \\033[97m{}\\u2666-----------\\u2666\\033[0m\\n'.format(self.elements['P1Turn'])\n screenout += '\\033[97m _+_ \\t\\t ' + '{}'.format(self.elements['uMiddle']) + '\\033[97m{}{}'.format(colorMod[1],self.elements['oHeader']) + ' \\033[97m{}\\u2666-----------\\u2666\\033[0m\\n'.format(self.elements['P2Turn']) \n screenout += '\\033[97m | ' + '\\033[92m{}\\033[0m'.format(self.elements['Deck'][0]) + '\\033[97m |\\t\\t ' + '{}'.format(self.elements['uMiddle']) + '\\033[97m{}{}'.format(colorMod[2],self.elements['oMiddle'][0]) + ' \\033[97m{}|{}|\\033[0m\\n'.format(self.elements['P2Turn'],self.elements['P2Name'])\n screenout += '\\033[97m | ' + '\\033[92m{}\\033[0m'.format(self.elements['Deck'][1]) + '\\033[97m |\\t\\t ' + '{}'.format(self.elements['uMiddle']) + '\\033[97m{}{}'.format(colorMod[3],self.elements['oMiddle'][1]) + ' \\033[97m{}|{}|\\033[0m\\n'.format(self.elements['P2Turn'],self.elements['P2Cards'])\n screenout += '\\033[97m | ' + '\\033[92m{}\\033[0m'.format(self.elements['Deck'][2]) + '\\033[97m |\\t\\t ' + '{}'.format(self.elements['uMiddle']) + '\\033[97m{}{}'.format(colorMod[0],self.elements['oMiddle'][2]) + ' \\033[97m{}\\u2666-----------\\u2666\\033[0m\\n'.format(self.elements['P2Turn'])\n screenout += '\\033[97m | ' + '\\033[93m{}\\033[0m'.format(self.elements['Deck'][3]) + '\\033[97m |\\t\\t ' + '{}'.format(self.elements['uMiddle']) + '\\033[97m{}{}'.format(colorMod[1],self.elements['oMiddle'][3]) + ' \\033[97m{}\\u2666-----------\\u2666\\033[0m\\n'.format(self.elements['P3Turn'])\n screenout += '\\033[97m | ' + '\\033[93m{}\\033[0m'.format(self.elements['Deck'][4]) + '\\033[97m |\\t\\t ' + '{}'.format(self.elements['uMiddle']) + '\\033[97m{}{}'.format(colorMod[2],self.elements['oMiddle'][4]) + ' \\033[97m{}|{}|\\033[0m\\n'.format(self.elements['P3Turn'],self.elements['P3Name'])\n screenout += '\\033[97m | ' + '\\033[93m{}\\033[0m'.format(self.elements['Deck'][5]) + '\\033[97m |\\t\\t ' + '{}'.format(self.elements['uMiddle']) + '\\033[97m{}{}'.format(colorMod[3],self.elements['oMiddle'][5]) + ' \\033[97m{}|{}|\\033[0m\\n'.format(self.elements['P3Turn'],self.elements['P3Cards'])\n screenout += '\\033[97m | ' + '\\033[91m{}\\033[0m'.format(self.elements['Deck'][6]) + '\\033[97m |\\t\\t ' + '{}'.format(self.elements['uLower']) + '\\033[97m{}{}'.format(colorMod[0],self.elements['oMiddle'][6]) + ' \\033[97m{}\\u2666-----------\\u2666\\033[0m\\n'.format(self.elements['P3Turn'])\n screenout += '\\033[97m | ' + '\\033[91m{}\\033[0m'.format(self.elements['Deck'][7]) + '\\033[97m |\\t\\t ' + '{}'.format(self.elements['uLower']) + '\\033[97m{}{}'.format(colorMod[1],self.elements['oMiddle'][7]) + ' \\033[97m{}\\u2666-----------\\u2666\\033[0m\\n'.format(self.elements['P4Turn'])\n screenout += '\\033[97m |_' + '\\033[91m{}\\033[0m'.format(self.elements['Deck'][8]) + '\\033[97m_|\\t\\t ' + '\\033[97m{}{}'.format(colorMod[2],self.elements['oHeader']) + ' \\033[97m{}|{}|\\033[0m\\n'.format(self.elements['P4Turn'],self.elements['P4Name'])\n screenout += '\\033[97m\\t\\t ' + '\\033[97m{}{}'.format(colorMod[3],self.elements['oHeader']) + ' \\033[97m{}|{}|\\033[0m\\n'.format(self.elements['P4Turn'],self.elements['P4Cards'])\n screenout += '\\t\\t\\t\\t\\t\\t' + ' \\033[97m{}\\u2666-----------\\u2666\\033[0m\\n'.format(self.elements['P4Turn'])\n screenout += \"\\033[97m{}\".format(self.elements['HName']) + \"\\t\\t\\t\\t {}\\n\".format(self.elements['HVisual'])\n screenout += '\\033[97m===============================================================\\n'\n screenout += self.players[currentTurn].getHand(self.handPosition,hide)\n screenout += '\\033[91m{}\\033[0m'.format(self.elements['Error'])\n return screenout\n \n def pauseScreen(self):\n while True:\n self.clearShell()\n print('\\n\\t\\t\\tPause')\n print('\\n\\t\\t1. Resume')\n print('\\t\\t2. Quit')\n \n selection = str(input('\\nSelection: ')).upper()\n while selection not in ['1', '2']:\n print('\\nSelection Invalid')\n selection = str(input('\\nSelection: ')).upper()\n \n if selection == '1' or \"\":\n return \"\"\n \n elif selection == '2':\n return \"quit\"\n \n \n def isComplete(self):\n return self.matchComplete\n \n def next(self):\n self.turn = self.getNextTurn()\n \n def getNextTurn(self, forceReverse=False):\n if forceReverse:\n reverse = not self.reverse\n else:\n reverse = self.reverse\n currentIndex = self.turnList.index(self.turn)\n if not reverse:\n if (currentIndex + 1) == len(self.turnList):\n return self.turnList[0]\n else:\n return self.turnList[currentIndex+1]\n else:\n if currentIndex == 0:\n return self.turnList[len(self.turnList) - 1]\n else:\n return self.turnList[currentIndex-1]\n \n def getPlayer(self, playerID):\n return self.players[playerID]\n \n def resetDrawBool(self):\n for identity in self.players:\n self.players[identity].drew = False\n\ndef Uno(debugging=False):\n\n ###MENUS###\n \n def clearShell():\n os.system('cls' if os.name == 'nt' else 'clear')\n\n def mainMenu():\n sys.stdout.write(\"\\x1b[8;32;63t\")\n sys.stdout.flush()\n gs = GameSettings()\n \n while True:\n \n print(drawMainMenu(gs))\n \n selection = str(input('\\033[97mSelection: \\033[92m'))\n while selection not in ['1', '2', '3', '4', '5']:\n gs.mainMenuError = \"Invalid Selection\"\n print(drawMainMenu(gs))\n selection = str(input('\\033[97mSelection: \\033[92m'))\n \n if selection == '1':\n if gs.canBegin():\n gs.mainMenuError = \"\"\n gs.finalizePlayers()\n gs = playMatch(gs)\n else:\n gs.mainMenuError = \"Two Players Required to Begin\"\n\n elif selection == '2':\n if gs.canAddPlayer():\n gs.mainMenuError = \"\"\n gs = addPlayer(gs)\n else:\n gs.mainMenuError = \"Max Number of Players Reached\"\n \n elif selection == '3':\n if gs.canAddPlayer():\n gs.mainMenuError = \"\"\n gs = addComputer(gs)\n else:\n gs.mainMenuError = \"Max Number of Players Reached\"\n\n elif selection == '4':\n if gs.canRemovePlayer():\n gs.mainMenuError = \"\"\n gs = removePlayer(gs)\n else:\n gs.mainMenuError = \"No Players to Remove\"\n\n elif selection == '5':\n gs.mainMenuError = \"\"\n gs = settingsMenu(gs)\n\n else:\n raise BadInputError('Data Provided Has No Function')\n \n def playMatch(gs):\n for i in range(1):\n i\n m = Match(gs)\n m.begin()\n while (not m.isComplete()):\n m.nextTurn()\n gs = m.end(gs)\n return gs\n \n def addPlayer(gs):\n colors = ['\\033[91m','\\033[94m', '\\033[92m', '\\033[93m']\n nameOkay = False\n playerNum = gs.getPlayerNum() + 1\n colorIndex = playerNum - 1\n message = \"\\033[97mPlease Enter Player {}'s Name: {}\".format(playerNum, colors[colorIndex])\n \n while not nameOkay:\n print(drawMainMenu(gs))\n name = str(input(message)).title()\n if len(name) > 11:\n gs.mainMenuError = \"Name Must Be 11 Characters or Less!\"\n elif len(name) == 0:\n gs.mainMenuError = \"\"\n return gs\n else:\n nameOkay = True\n for player in gs.playerStaging:\n if player.getName() == name:\n nameOkay = False\n if nameOkay == False or name in GameSettings.computerNames:\n gs.mainMenuError = \"Name Cannot Match Another Player's Name!\"\n \n p = Player(name)\n gs.addPlayer(p)\n gs.mainMenuError = \"\"\n \n return gs\n \n def addComputer(gs):\n name = gs.getComputerName()\n c = ComputerPlayer(name)\n gs.addPlayer(c)\n \n return gs\n \n def removePlayer(gs):\n sys.stdout.write(\"\\x1b[8;{rows};{cols}t\".format(rows=32, cols=63))\n sys.stdout.flush()\n clearShell()\n \n complete = False\n playerNum = gs.getPlayerNum()\n message = \"\\033[97mPlease Enter Player Number to Remove: \\033[91m\".format(playerNum)\n \n while (not complete):\n print(drawMainMenu(gs))\n number = str(input(message)) \n if len(number) == 0:\n gs.mainMenuError = \"\"\n return gs\n try:\n number = int(number)\n if 0 < number <= playerNum:\n complete = True\n else:\n gs.mainMenuError = \"Invalid Player Number!\"\n except:\n gs.mainMenuError = \"Please Enter the Player Number, not Name!\"\n \n gs.mainMenuError = \"\"\n gs.removePlayer(number)\n return gs\n \n def settingsMenu(gs):\n while True:\n sys.stdout.write(\"\\x1b[8;32;63t\")\n sys.stdout.flush()\n clearShell()\n print('\\n\\t\\tSettings')\n print('\\n\\t1. Draw Effects\\t\\t\\t{}'.format(gs.displayEffects))\n print('\\t2. Hide Computer Hands\\t\\t{}'.format(gs.hideComputerHands))\n print('\\t3. Computer Speed\\t\\t{}'.format(gs.computerSpeed.title()))\n #print('\\t4. Zero Card Changes Color\\t{}'.format(gs.zeroChange))\n #print('\\t5. Run Simulations\\t\\t{}'.format(gs.computerSimulation))\n print('\\n\\tA. Exit')\n \n selection = str(input('\\nSelection: ')).upper()\n while selection not in ('1', '2', '3', '4', '5', 'A', ''):\n print('\\nSelection Invalid')\n selection = str(input('\\nSelection: ')).upper()\n \n if selection == '1':\n gs.displayEffects = not gs.displayEffects\n \n elif selection == '2':\n gs.hideComputerHands = not gs.hideComputerHands\n \n elif selection == '3':\n gs.changeComputerSpeed()\n '''\n elif selection == '4':\n gs.zeroChange = not gs.zeroChange\n \n elif selection == '5':\n gs.computerSimulation = not gs.computerSimulation\n '''\n elif selection == 'A' or selection == '' or selection in ('4','5'):\n return gs\n \n def drawMainMenu(gs):\n clearShell()\n gs.compileMainMenuElements()\n menuElements = gs.getMainMenuElements()\n screenout = ''\n screenout += '\\t\\t\\033[94m || ||\\033[92m ||\\ || \\033[91m// \\\\\\\\\\n\\033[0m'\n screenout += '\\t\\t\\033[94m || ||\\033[92m ||\\\\\\|| \\033[91m(( ))\\n\\033[0m'\n screenout += '\\t\\t\\033[94m \\\\\\ //\\033[92m || \\|| \\033[91m \\\\\\ //\\n\\033[0m'\n screenout += '\\033[97m===============================================================\\033[0m\\n'\n screenout += \"{}1-----------------------------1\\033[0m {}2-----------------------------2\\033[0m\\n\".format(menuElements['play1box'],menuElements['play2box'])\n screenout += \"{}|{}|\\033[0m {}|{}|\\033[0m\\n\".format(menuElements['play1box'],menuElements['play1row1'],menuElements['play2box'],menuElements['play2row1'])\n screenout += \"{}|{}|\\033[0m {}|{}|\\033[0m\\n\".format(menuElements['play1box'],menuElements['play1row2'],menuElements['play2box'],menuElements['play2row2'])\n screenout += \"{}1-----------------------------1\\033[0m {}2-----------------------------2\\033[0m\\n\".format(menuElements['play1box'],menuElements['play2box'])\n screenout += \"{}3-----------------------------3\\033[0m {}4-----------------------------4\\033[0m\\n\".format(menuElements['play3box'],menuElements['play4box'])\n screenout += \"{}|{}|\\033[0m {}|{}|\\033[0m\\n\".format(menuElements['play3box'],menuElements['play3row1'],menuElements['play4box'],menuElements['play4row1'])\n screenout += \"{}|{}|\\033[0m {}|{}|\\033[0m\\n\".format(menuElements['play3box'],menuElements['play3row2'],menuElements['play4box'],menuElements['play4row2'])\n screenout += \"{}3-----------------------------3\\033[0m {}4-----------------------------4\\033[0m\\n\".format(menuElements['play3box'],menuElements['play4box'])\n screenout += \"\\033[97m===============================================================\\033[0m\\n\"\n screenout += \" {}\\u2666---------------------------\\u2666\\033[0m \\u2666===========================\\u2666\\n\".format(menuElements['beginBox'])\n screenout += \" {}|1. Begin Match |\\033[0m | High Scores |\\n\".format(menuElements['beginBox'])\n screenout += \" {}\\u2666---------------------------\\u2666\\033[0m \\u2666---------------------------\\u2666\\n\".format(menuElements['beginBox'])\n screenout += \" {}\\u2666---------------------------\\u2666\\033[0m | |\\n\".format(menuElements['addBox'])\n screenout += \" {}|2. Add Player |\\033[0m | |\\n\".format(menuElements['addBox'])\n screenout += \" {}\\u2666---------------------------\\u2666\\033[0m | |\\n\".format(menuElements['addBox'])\n screenout += \" {}\\u2666---------------------------\\u2666\\033[0m | |\\n\".format(menuElements['addBox'])\n screenout += \" {}|3. Add Computer |\\033[0m | |\\n\".format(menuElements['addBox'])\n screenout += \" {}\\u2666---------------------------\\u2666\\033[0m | |\\n\".format(menuElements['addBox'])\n screenout += \" {}\\u2666---------------------------\\u2666\\033[0m | |\\n\".format(menuElements['removeBox'])\n screenout += \" {}|4. Remove Player |\\033[0m | |\\n\".format(menuElements['removeBox'])\n screenout += \" {}\\u2666---------------------------\\u2666\\033[0m | |\\n\".format(menuElements['removeBox'])\n screenout += \" \\033[97m\\u2666---------------------------\\u2666\\033[0m | |\\n\"\n screenout += \" \\033[97m|5. Settings |\\033[0m | |\\n\"\n screenout += \" \\033[97m\\u2666---------------------------\\u2666\\033[0m \\u2666===========================\\u2666\\n\"\n screenout += \"\\033[97m===============================================================\\033[0m\\n\"\n screenout += '\\033[91m{}\\033[0m'.format(gs.mainMenuError)\n return screenout\n \n mainMenu()\n \nif __name__ == \"__main__\":\n Uno()\n \n"},"license":{"kind":"string","value":"mit"},"hash":{"kind":"number","value":3301806578621670400,"string":"3,301,806,578,621,670,400"},"line_mean":{"kind":"number","value":42.7153094463,"string":"42.715309"},"line_max":{"kind":"number","value":439,"string":"439"},"alpha_frac":{"kind":"number","value":0.4748371906,"string":"0.474837"},"autogenerated":{"kind":"bool","value":false,"string":"false"}}},{"rowIdx":109533,"cells":{"repo_name":{"kind":"string","value":"SRLKilling/sigma-backend"},"path":{"kind":"string","value":"data-server/django_app/sigma_core/views/group_field.py"},"copies":{"kind":"string","value":"1"},"size":{"kind":"string","value":"2127"},"content":{"kind":"string","value":"from rest_framework import status\nfrom rest_framework.decorators import detail_route\nfrom sigma_core.views.sigma_viewset import SigmaViewSet\nfrom sigma_core.importer import load_ressource\n\nGroupField = load_ressource(\"GroupField\")\n\n\nclass GroupFieldViewSet(SigmaViewSet):\n \n serializer_class = GroupField.serializer\n queryset = GroupField.model.objects.all()\n \n \n #*********************************************************************************************#\n #** Read actions **#\n #*********************************************************************************************#\n\n \n def retrieve(self, request, pk):\n \"\"\"\n REST retrieve action. Used to retrieve a group_field.\n \"\"\"\n return self.handle_action_pk('retrieve', request, pk)\n \n \n \n \n #*********************************************************************************************#\n #** Write actions **#\n #*********************************************************************************************#\n\n def create(self, request):\n \"\"\"\n REST create action. Used to create a Group Field.\n If succeeded, returns HTTP_201_CREATED with the newly created Group field object.\n \"\"\"\n return self.handle_action('create', request)\n \n \n \n def update(self, request, pk):\n \"\"\"\n REST update action. Used to update a Group Field.\n If succeeded, returns HTTP_201_SUCCESS with the updated Group field object.\n \"\"\"\n \n # return self.basic_update(request, pk) # HERE !\n pass\n \n \n def destroy(self, request, pk):\n \"\"\"\n REST destroy action. Used to update a Group Field.\n If succeeded, returns HTTP_204_NO_CONTENT.\n \"\"\"\n \n return self.handle_action_pk('destroy', request, pk)\n "},"license":{"kind":"string","value":"agpl-3.0"},"hash":{"kind":"number","value":7523328847063784000,"string":"7,523,328,847,063,784,000"},"line_mean":{"kind":"number","value":35.0677966102,"string":"35.067797"},"line_max":{"kind":"number","value":136,"string":"136"},"alpha_frac":{"kind":"number","value":0.4165491302,"string":"0.416549"},"autogenerated":{"kind":"bool","value":false,"string":"false"}}},{"rowIdx":109534,"cells":{"repo_name":{"kind":"string","value":"USGSDenverPychron/pychron"},"path":{"kind":"string","value":"pychron/envisage/browser/analysis_table.py"},"copies":{"kind":"string","value":"1"},"size":{"kind":"string","value":"8406"},"content":{"kind":"string","value":"# ===============================================================================\n# Copyright 2013 Jake Ross\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ===============================================================================\n\n# ============= enthought library imports =======================\nimport json\nimport os\nfrom collections import OrderedDict\nfrom datetime import datetime\nfrom hashlib import md5\n\nfrom traits.api import List, Any, Str, Enum, Bool, Event, Property, cached_property, Instance, DelegatesTo, \\\n CStr, Int, Button\n\nfrom pychron.column_sorter_mixin import ColumnSorterMixin\nfrom pychron.core.fuzzyfinder import fuzzyfinder\nfrom pychron.core.ui.table_configurer import AnalysisTableConfigurer\nfrom pychron.dvc.func import get_review_status\nfrom pychron.envisage.browser.adapters import AnalysisAdapter\nfrom pychron.paths import paths\n\n\ndef sort_items(ans):\n return sorted(ans, key=lambda x: x.timestampf)\n\n\nclass AnalysisTable(ColumnSorterMixin):\n analyses = List\n oanalyses = List\n selected = Any\n dclicked = Any\n\n context_menu_event = Event\n\n analysis_filter = CStr\n analysis_filter_values = List\n analysis_filter_comparator = Enum('=', '<', '>', '>=', '<=', 'not =', 'startswith')\n analysis_filter_parameter = Str\n analysis_filter_parameters = Property(List, depends_on='tabular_adapter.columns')\n\n # omit_invalid = Bool(True)\n table_configurer = Instance(AnalysisTableConfigurer)\n\n limit = DelegatesTo('table_configurer')\n omit_invalid = DelegatesTo('table_configurer')\n\n no_update = False\n scroll_to_row = Event\n refresh_needed = Event\n tabular_adapter = Instance(AnalysisAdapter)\n append_replace_enabled = Bool(True)\n\n add_analysis_set_button = Button\n analysis_set = Str\n analysis_set_names = List\n _analysis_sets = None\n max_history = Int\n suppress_load_analysis_set = False\n\n def __init__(self, *args, **kw):\n super(AnalysisTable, self).__init__(*args, **kw)\n\n self._analysis_sets = OrderedDict()\n\n def load(self):\n p = paths.hidden_path('analysis_sets')\n if os.path.isfile(p):\n with open(p, 'r') as rfile:\n jd = json.load(rfile, object_pairs_hook=OrderedDict)\n self._analysis_sets = jd\n self.analysis_set_names = list(reversed([ji[0] for ji in jd.values()]))\n\n def dump(self):\n p = paths.hidden_path('analysis_sets')\n with open(p, 'w') as wfile:\n json.dump(self._analysis_sets, wfile)\n\n def get_selected_analyses(self):\n if self.analyses:\n ans = self.selected\n if not ans:\n ans = self.analyses\n return ans\n\n def add_analysis_set(self):\n ans = self.get_selected_analyses()\n if ans:\n aset = [(a.uuid, a.record_id) for a in ans]\n if aset:\n if len(aset) > 1:\n name = '{} -- {}'.format(aset[0][1], aset[-1][1])\n else:\n name = aset[0][1]\n\n h = md5(''.join(sorted((ai[0] for ai in aset)))).hexdigest()\n if h not in self._analysis_sets:\n name = '{} ({})'.format(name, datetime.now().strftime('%m/%d/%y'))\n self._analysis_sets[h] = (name, aset)\n\n if self.max_history:\n while len(self._analysis_sets) > self.max_history:\n self._analysis_sets.popitem(last=False)\n return name\n\n def get_analysis_set(self, name):\n return next((a[1] for a in self._analysis_sets.itervalues() if a[0] == name))\n\n def set_tags(self, tag, items):\n for i in items:\n ai = next((a for a in self.oanalyses if a.uuid == i.uuid), None)\n if ai:\n ai.tag = tag\n\n self._analysis_filter_changed(self.analysis_filter)\n\n def remove_invalid(self):\n self.oanalyses = [ai for ai in self.oanalyses if ai.tag != 'invalid']\n self._analysis_filter_changed(self.analysis_filter)\n\n def add_analyses(self, ans):\n items = self.analyses\n items.extend(ans)\n self.oanalyses = self.analyses = sort_items(items)\n self.calculate_dts(self.analyses)\n self.scroll_to_row = len(self.analyses) - 1\n\n def set_analyses(self, ans, tc=None, page=None, reset_page=False, selected_identifiers=None):\n if selected_identifiers:\n aa = self.analyses\n aa = [ai for ai in aa if ai.identifier in selected_identifiers]\n aa.extend(ans)\n else:\n aa = ans\n\n new_items = sort_items(aa)\n items = [ai for ai in self.analyses if ai.frozen]\n\n new_items = [ai for ai in new_items if ai not in items]\n items.extend(new_items)\n\n self.oanalyses = self.analyses = items\n\n self.calculate_dts(self.analyses)\n self.scroll_to_row = len(self.analyses) - 1\n\n def calculate_dts(self, ans):\n if ans and len(ans) > 1:\n self._python_dt(ans)\n\n def _python_dt(self, ans):\n ref = ans[0]\n prev = ref.timestampf\n ref.delta_time = 0\n for ai in ans[1:]:\n t = ai.timestampf\n dt = (t - prev) / 60.\n ai.delta_time = dt\n prev = t\n\n def configure_table(self):\n self.table_configurer.edit_traits(kind='livemodal')\n\n def review_status_details(self):\n from pychron.envisage.browser.review_status_details import ReviewStatusDetailsView, ReviewStatusDetailsModel\n m = ReviewStatusDetailsModel(self.selected[0])\n rsd = ReviewStatusDetailsView(model=m)\n rsd.edit_traits()\n\n def toggle_freeze(self):\n for ai in self.get_selected_analyses():\n ai.frozen = not ai.frozen\n self.refresh_needed = True\n\n def load_review_status(self):\n records = self.get_analysis_records()\n if records:\n for ri in records:\n get_review_status(ri)\n self.refresh_needed = True\n\n def get_analysis_records(self):\n records = self.selected\n if not records:\n records = self.analyses\n\n return records\n\n # handlers\n def _add_analysis_set_button_fired(self):\n name = self.add_analysis_set()\n if name:\n self.dump()\n self.load()\n\n self.suppress_load_analysis_set = True\n self.analysis_set = name\n self.suppress_load_analysis_set = False\n\n def _analyses_items_changed(self, old, new):\n if self.sort_suppress:\n return\n\n self.calculate_dts(self.analyses)\n\n if new.removed:\n for ai in new.removed:\n self.oanalyses.remove(ai)\n\n def _analysis_filter_changed(self, new):\n if new:\n name = self.analysis_filter_parameter\n self.analyses = fuzzyfinder(new, self.oanalyses, name)\n # self.analyses = filter(filter_func(new, name), self.oanalyses)\n else:\n self.analyses = self.oanalyses\n\n def _analysis_filter_comparator_changed(self):\n self._analysis_filter_changed(self.analysis_filter)\n\n def _get_analysis_filter_parameter(self):\n p = self.analysis_filter_parameter\n return p.lower()\n\n @cached_property\n def _get_analysis_filter_parameters(self):\n return dict([(ci[1], ci[0]) for ci in self.tabular_adapter.columns])\n\n # defaults\n def _table_configurer_default(self):\n return AnalysisTableConfigurer(id='analysis.table',\n title='Configure Analysis Table')\n\n def _analysis_filter_parameter_default(self):\n return 'record_id'\n\n def _tabular_adapter_default(self):\n adapter = AnalysisAdapter()\n self.table_configurer.adapter = adapter\n self.table_configurer.load()\n return adapter\n\n# ============= EOF =============================================\n"},"license":{"kind":"string","value":"apache-2.0"},"hash":{"kind":"number","value":-1964306526817542100,"string":"-1,964,306,526,817,542,100"},"line_mean":{"kind":"number","value":32.094488189,"string":"32.094488"},"line_max":{"kind":"number","value":116,"string":"116"},"alpha_frac":{"kind":"number","value":0.5923150131,"string":"0.592315"},"autogenerated":{"kind":"bool","value":false,"string":"false"}}},{"rowIdx":109535,"cells":{"repo_name":{"kind":"string","value":"chripo/calflate"},"path":{"kind":"string","value":"tests/test_vcard.py"},"copies":{"kind":"string","value":"1"},"size":{"kind":"string","value":"1509"},"content":{"kind":"string","value":"# -*- encoding: utf-8 -*-\n\n# AUTHOR: http://www.christoph-polcin.com\n# LICENSE: FreeBSD License\n# CREATED: 2014-01-19\n\nimport calflate\n\n\ndef test_new_empty_collection():\n data = calflate.new_collection((\"\", \"VCARD\", ))\n assert data == ''\n\n\ndef test_new_collection():\n data = calflate.new_collection((\"UID:12345\", \"VCARD\", ))\n assert data == 'UID:12345'\n data = calflate.new_collection((\"UID:12345\\nREV:2011-01-02\", \"VCARD\", ))\n assert data == 'UID:12345\\r\\nREV:2011-01-02'\n data = calflate.new_collection((\"UID:12345\\n\\nREV:2011-01-02\", \"VCARD\", ))\n assert data == 'UID:12345\\r\\nREV:2011-01-02'\n\n\ndef test_get_items():\n items = calflate.get_items('''BEGIN:VCARD\nVERSION:3.0\nN:Gump;Forrest;Mr.\nUID:01234-01234-01234-01234\nREV:2008-04-24T19:52:43Z\nEND:VCARD\nBEGIN:VCARD\nVERSION:4.0\nN:Gump;Forrest;Mr.\nUID:3333-3444-55\nEND:VCARD\n''')\n item = next(items)\n assert len(item) == 4\n assert item[0] == '''BEGIN:VCARD\\nVERSION:3.0\\nN:Gump;Forrest;Mr.\\n\\\nUID:01234-01234-01234-01234\\nREV:2008-04-24T19:52:43Z\\nEND:VCARD'''\n assert item[1] == 'VCARD'\n assert item[2] == '01234-01234-01234-01234'\n assert item[3] == '2008-04-24T19:52:43Z'\n\n item = next(items)\n assert len(item) == 4\n assert item[0] == 'BEGIN:VCARD\\nVERSION:4.0\\nN:Gump;Forrest;Mr.\\nUID:3333-3444-55\\nEND:VCARD'\n assert item[1] == 'VCARD'\n assert item[2] == '3333-3444-55'\n assert item[3] == '0'\n\n try:\n item = next(items)\n assert 0\n except StopIteration:\n assert 1\n"},"license":{"kind":"string","value":"bsd-2-clause"},"hash":{"kind":"number","value":-2247921644035591000,"string":"-2,247,921,644,035,591,000"},"line_mean":{"kind":"number","value":25.9464285714,"string":"25.946429"},"line_max":{"kind":"number","value":97,"string":"97"},"alpha_frac":{"kind":"number","value":0.6388336647,"string":"0.638834"},"autogenerated":{"kind":"bool","value":false,"string":"false"}}},{"rowIdx":109536,"cells":{"repo_name":{"kind":"string","value":"labase/activnce"},"path":{"kind":"string","value":"main/bookmarks/model.py"},"copies":{"kind":"string","value":"1"},"size":{"kind":"string","value":"12237"},"content":{"kind":"string","value":"# -*- coding: utf-8 -*-\n\"\"\"\n################################################\nPlataforma ActivUFRJ\n################################################\n\n:Author: *Núcleo de Computação Eletrônica (NCE/UFRJ)*\n:Contact: carlo@nce.ufrj.br\n:Date: $Date: 2009-2010 $\n:Status: This is a \"work in progress\"\n:Revision: $Revision: 0.01 $\n:Home: `LABASE `__\n:Copyright: ©2009, `GPL \n\"\"\"\n\ntry:\n from couchdb.schema import Document, TextField, IntegerField, LongField, DateTimeField, DictField, ListField, Schema, FloatField\nexcept ImportError:\n from couchdb.mapping import Document, TextField, IntegerField, LongField, DateTimeField, DictField, ListField, FloatField\n from couchdb.mapping import Mapping as Schema\n\nimport database\nfrom search.model import addTag, removeTag\nfrom libs.permissions import isAllowedToDeleteObject, isAllowedToWriteObject, isAllowedToDeleteComment\nfrom libs.dateformat import short_datetime\nfrom libs.strformat import remove_diacritics, remove_special_chars\n\nfrom datetime import datetime\nimport operator\nfrom operator import itemgetter\n\ndef _strListSize(number, str, genero='M'):\n plural = lambda x: 's' if x!=1 else ''\n if number>0:\n return u\"%d %s%s\" % (number, str, plural(number))\n elif genero=='M':\n return u\"nenhum %s\" % str\n else:\n return u\"nenhuma %s\" % str\n\ndef _urlCount(url):\n for row in database.BOOKMARKS.view('bookmarks/count_by_url',startkey=url, group=\"true\"):\n return row.value\n return 0\n \nclass Bookmarks(Document):\n # _id = \n registry_id = TextField() # dono do bookmark: usuário ou comunidade\n owner = TextField() # quem criou o bookmark.\n # caso bookmark seja de uma comunidade, owner!=registry_id\n url = TextField()\n title = TextField()\n description = TextField()\n tags = ListField(TextField())\n data_cri = TextField()\n data_alt = TextField()\n alterado_por = TextField()\n comentarios = ListField(DictField(Schema.build(\n owner = TextField(),\n comment = TextField(),\n data_cri = TextField()\n )))\n\n @classmethod\n def createBookmarkLink(self, user, url):\n for item in database.BOOKMARKS.view('bookmarks/by_registry_id_and_url',startkey=[user,url],endkey=[user, url, {}]):\n return (\"Editar favorito\", \"/static/imagens/icones/bookmark32_true.png\", \"/bookmarks/popup/\"+user+\"?url=\"+url, \"\",\"\",True)\n return (\"Adicionar favorito\", \"/static/imagens/icones/bookmark32_false.png\", \"/bookmarks/popup/\"+user+\"?url=\"+url, \"\",\"\",True)\n\n\n\n @classmethod\n def searchIdByUrl(self, registry_id, url):\n for row in database.BOOKMARKS.view('bookmarks/by_registry_id_and_url' ,startkey=[registry_id, url],endkey=[registry_id, url, {}]):\n return row.key[2]\n return None\n\n @classmethod\n def searchBookmarksByUrl(self, user, page, page_size, url):\n bookmarks = []\n # Obtem uma página de resultados no BD\n # descending=true para listar os mais recentes primeiro\n # como a árvore é percorrida em sentido inverso, endkey é o documento inicial e startkey é o final. \n for row in database.BOOKMARKS.view('bookmarks/by_url' ,startkey=[url, {}, {}], endkey=[url], descending=\"true\", skip=(page-1)*page_size , limit=page_size):\n (url, data_alt, bookmark_id) = row.key\n bookmark_data = dict()\n bookmark_data[\"registry_id\"] = row.value[\"registry_id\"]\n bookmark_data[\"owner\"] = row.value[\"owner\"]\n bookmark_data[\"description\"] = row.value[\"description\"]\n bookmark_data[\"title\"] = row.value[\"title\"]\n bookmark_data[\"url\"] = row.value[\"url\"]\n bookmark_data[\"tags\"] = row.value[\"tags\"]\n bookmark_data[\"id\"] = bookmark_id\n \n bookmark_data[\"alterar\"] = isAllowedToWriteObject(user, \"bookmarks\", row.value[\"registry_id\"])\n bookmark_data[\"apagar\"] = bookmark_data[\"alterar\"] and isAllowedToDeleteObject(user, row.value[\"owner\"], row.value[\"registry_id\"]+\"/\"+bookmark_id)\n\n \n bookmark_data[\"data_nofmt\"] = row.value[\"data_alt\"]\n bookmark_data[\"data_alt\"] = short_datetime(row.value[\"data_alt\"])\n bookmark_data[\"alterado_por\"] = row.value[\"alterado_por\"]\n bookmark_data[\"num_comments\"] = _strListSize (len(row.value[\"comentarios\"]), u\"comentário\")\n #bookmark_data[\"url_count\"] = _strListSize (_urlCount(bookmark_data[\"url\"]), u\"referência\", genero='F')\n #url_count = _urlCount(bookmark_data[\"url\"])\n bookmark_data[\"url_count\"] = \"\"\n #if url_count > 1: bookmark_data[\"url_count\"] = u\"%d usuários marcaram esta página\" % url_count\n\n bookmarks.append(bookmark_data)\n \n bookmarks = sorted(bookmarks, key=itemgetter(\"data_nofmt\"), reverse = True)\n return bookmarks\n \n @classmethod\n def countBookmarksByUrl(self, url):\n for row in database.BOOKMARKS.view('bookmarks/count_by_url', \\\n startkey=url, \\\n group=\"true\"): \n return row.value\n return 0\n \n @classmethod\n def countBookmarksByRegistryId(self, registry_id):\n for row in database.BOOKMARKS.view('bookmarks/count_by_registry_id', \\\n startkey=[registry_id],endkey=[registry_id, {}], \\\n group_level=1, group=\"true\"): \n return row.value\n return 0\n\n @classmethod\n def countBookmarksByRegistryIdAndTag(self, registry_id, tag):\n for row in database.BOOKMARKS.view('bookmarks/count_by_registry_id_and_tag', \\\n startkey=[registry_id, tag],endkey=[registry_id, tag, {}], \\\n group_level=1, group=\"true\"): \n return row.value\n return 0\n \n @classmethod\n def listBookmarks(self, user, registry_id, page, page_size, tag=None):\n bookmarks = []\n if tag:\n view_name = 'bookmarks/by_registry_id_and_tag'\n start_key = [registry_id, tag]\n end_key = [registry_id, tag, {}, {}]\n else:\n view_name = 'bookmarks/by_registry_id'\n start_key = [registry_id]\n end_key = [registry_id, {}, {}]\n\n # Obtem uma página de resultados no BD\n # descending=true para listar os mais recentes primeiro\n # como a árvore é percorrida em sentido inverso, endkey é o documento inicial e startkey é o final. \n for row in database.BOOKMARKS.view(view_name, startkey=end_key,endkey=start_key, descending=\"true\", skip=(page-1)*page_size , limit=page_size):\n if tag:\n (registry_id, tag_found, data_alt, bookmark_id) = row.key\n else:\n (registry_id, data_alt, bookmark_id) = row.key\n\n bookmark_data = dict()\n bookmark_data[\"registry_id\"] = registry_id\n bookmark_data[\"owner\"] = row.value[\"owner\"]\n bookmark_data[\"description\"] = row.value[\"description\"]\n bookmark_data[\"title\"] = row.value[\"title\"]\n bookmark_data[\"url\"] = row.value[\"url\"]\n bookmark_data[\"tags\"] = row.value[\"tags\"]\n bookmark_data[\"id\"] = bookmark_id\n \n # _file = Files().retrieve(file_id)\n bookmark_data[\"alterar\"] = isAllowedToWriteObject(user, \"bookmarks\", registry_id)\n bookmark_data[\"apagar\"] = bookmark_data[\"alterar\"] and isAllowedToDeleteObject(user, row.value[\"owner\"], registry_id+\"/\"+bookmark_id)\n \n \n bookmark_data[\"data_nofmt\"] = row.value[\"data_alt\"]\n bookmark_data[\"data_alt\"] = short_datetime(row.value[\"data_alt\"])\n bookmark_data[\"alterado_por\"] = row.value[\"alterado_por\"]\n bookmark_data[\"num_comments\"] = _strListSize (len(row.value[\"comentarios\"]), u\"comentário\")\n #bookmark_data[\"url_count\"] = _strListSize (url_count, u\"referência\", genero='F')\n url_count = _urlCount(bookmark_data[\"url\"])\n bookmark_data[\"url_count\"] = \"\"\n if url_count > 1: bookmark_data[\"url_count\"] = u\"%d usuários marcaram esta página\" % url_count\n \n bookmarks.append(bookmark_data)\n return bookmarks\n\n \n @classmethod\n def listAllTags(self, registry_id, tag=None):\n \n tags_list = []\n for row in database.BOOKMARKS.view('bookmarks/by_registry_id_and_tag', startkey = [registry_id], endkey = [registry_id, {}, {}, {}]):\n (registry_id, tag_found, data_alt, bookmark_id) = row.key\n tags_list.append(tag_found)\n\n if tag and tag in tags_list:\n tags_list.remove(tag)\n tags_list = sorted(set(tags_list)) \n return tags_list\n\n def saveBookmark(self, id=None):\n self.save(id=id)\n \n # atualiza tabela de tags\n # vai para o tags.model\n data_tag = str(datetime.now())\n for tag in self.tags:\n if self.title:\n nome = self.title \n else:\n url = self.url\n url = remove_special_chars(remove_diacritics(url.replace(\" \",\"_\")))\n nome = url\n addTag(tag, self.registry_id, self.owner, \"bookmarks\", self.id, nome, data_tag)\n \n def deleteBookmark(self):\n tags = self.tags\n self.delete()\n \n # atualiza tabela de tags\n # vai para o tags.model\n for tag in tags:\n removeTag(remove_diacritics(tag.lower()), \"bookmarks\", self.id)\n \n def editBookmark(self, user, newtitle, newdesc, newtags):\n # preserva tags anteriores\n old_tags = self.tags\n \n self.title = newtitle\n self.description = newdesc\n self.tags = newtags\n self.alterado_por = user\n self.data_alt = str(datetime.now())\n self.save()\n \n # compara as tags anteriores com as modificadas, atualizando a lista de tags no BD\n data_tag = str(datetime.now())\n if self.title:\n nome = self.title\n else:\n url = self.url\n url = remove_special_chars(remove_diacritics(url.replace(\" \",\"_\")))\n nome = url\n \n for tag in self.tags:\n if tag not in old_tags:\n addTag(tag, self.registry_id, user, \"bookmarks\", self.id, nome, data_tag)\n \n for tag in old_tags:\n if tag not in self.tags:\n removeTag(remove_diacritics(tag.lower()), \"bookmarks\", self.id)\n \n def addBookmarkComment(self, owner, comment):\n self.comentarios.append(dict(\n owner = owner,\n comment = comment,\n data_cri = str(datetime.now())\n ))\n self.save()\n \n def prepareCommentsToPrint(self, user):\n for comment in self.comentarios:\n comment[\"apagar\"] = isAllowedToDeleteComment(user, self.registry_id, comment[\"owner\"])\n comment[\"data_fmt\"] = short_datetime(comment[\"data_cri\"])\n comment[\"comment\"] = comment[\"comment\"].replace(\"\\r\\n\", \"
\")\n self.comentarios = sorted(self.comentarios, key=itemgetter(\"data_cri\"), reverse=True)\n \n def deleteBookmarkComment(self, owner, data_cri):\n for comentario in self.comentarios:\n if comentario[\"owner\"]==owner and comentario[\"data_cri\"]==data_cri:\n self.comentarios.remove(comentario)\n self.save()\n return True\n return False\n \n def save(self, id=None, db=database.BOOKMARKS):\n if not self.id and id: self.id = id\n self.store(db)\n \n def retrieve(self, id, db=database.BOOKMARKS):\n return Bookmarks.load(db, id)\n \n def delete(self, db=database.BOOKMARKS):\n #db.delete(self)\n del db[self.id]\n \n "},"license":{"kind":"string","value":"gpl-2.0"},"hash":{"kind":"number","value":-8946841370132735000,"string":"-8,946,841,370,132,735,000"},"line_mean":{"kind":"number","value":42.0070422535,"string":"42.007042"},"line_max":{"kind":"number","value":163,"string":"163"},"alpha_frac":{"kind":"number","value":0.5700483092,"string":"0.570048"},"autogenerated":{"kind":"bool","value":false,"string":"false"}}},{"rowIdx":109537,"cells":{"repo_name":{"kind":"string","value":"Zincr0/xtweepy"},"path":{"kind":"string","value":"setup.py"},"copies":{"kind":"string","value":"1"},"size":{"kind":"string","value":"1135"},"content":{"kind":"string","value":"# -*- coding=utf-8 -*-\nimport sys\nimport os\nfrom setuptools import setup\n\nreload(sys)\nsys.setdefaultencoding('utf-8')\n\n# Utility function to read the README file.\n# Used for the long_description. It's nice, because now 1) we have a top level\n# README file and 2) it's easier to type in the README file than to put a raw\n# string in below ...\n\nfiles = ['xtweepy/*']\n\n\ndef read(fname):\n return open(os.path.join(os.path.dirname(__file__), fname)).read()\n\n\nsetup(\n name = 'xtweepy',\n version = '0.0.1',\n author = 'Daniel Mondaca Seguel',\n author_email = 'dnielm@gmail.com',\n description = ('tweepy based library with 1.1 search api support'),\n license = 'MIT',\n keywords = 'twitter library',\n url = 'https://github.com/Nievous',\n packages=['xtweepy'],\n install_requires = ['simplejson', 'tweepy'],\n long_description=read('README.txt'),\n package_data = {'package' : files },\n classifiers=[\n 'Development Status :: 4 - Beta',\n 'Topic :: Software Development',\n 'License :: MIT',\n 'Operating System :: POSIX :: Linux',\n 'Programming Language :: Python',\n ],\n)\n"},"license":{"kind":"string","value":"mit"},"hash":{"kind":"number","value":-1463559531065777000,"string":"-1,463,559,531,065,777,000"},"line_mean":{"kind":"number","value":26.6829268293,"string":"26.682927"},"line_max":{"kind":"number","value":79,"string":"79"},"alpha_frac":{"kind":"number","value":0.6299559471,"string":"0.629956"},"autogenerated":{"kind":"bool","value":false,"string":"false"}}},{"rowIdx":109538,"cells":{"repo_name":{"kind":"string","value":"slackhq/python-slackclient"},"path":{"kind":"string","value":"slack_sdk/scim/v1/group.py"},"copies":{"kind":"string","value":"1"},"size":{"kind":"string","value":"2522"},"content":{"kind":"string","value":"from typing import Optional, List, Union, Dict, Any\n\nfrom .default_arg import DefaultArg, NotGiven\nfrom .internal_utils import _to_dict_without_not_given, _is_iterable\n\n\nclass GroupMember:\n display: Union[Optional[str], DefaultArg]\n value: Union[Optional[str], DefaultArg]\n unknown_fields: Dict[str, Any]\n\n def __init__(\n self,\n *,\n display: Union[Optional[str], DefaultArg] = NotGiven,\n value: Union[Optional[str], DefaultArg] = NotGiven,\n **kwargs,\n ) -> None:\n self.display = display\n self.value = value\n self.unknown_fields = kwargs\n\n def to_dict(self):\n return _to_dict_without_not_given(self)\n\n\nclass GroupMeta:\n created: Union[Optional[str], DefaultArg]\n location: Union[Optional[str], DefaultArg]\n unknown_fields: Dict[str, Any]\n\n def __init__(\n self,\n *,\n created: Union[Optional[str], DefaultArg] = NotGiven,\n location: Union[Optional[str], DefaultArg] = NotGiven,\n **kwargs,\n ) -> None:\n self.created = created\n self.location = location\n self.unknown_fields = kwargs\n\n def to_dict(self):\n return _to_dict_without_not_given(self)\n\n\nclass Group:\n display_name: Union[Optional[str], DefaultArg]\n id: Union[Optional[str], DefaultArg]\n members: Union[Optional[List[GroupMember]], DefaultArg]\n meta: Union[Optional[GroupMeta], DefaultArg]\n schemas: Union[Optional[List[str]], DefaultArg]\n unknown_fields: Dict[str, Any]\n\n def __init__(\n self,\n *,\n display_name: Union[Optional[str], DefaultArg] = NotGiven,\n id: Union[Optional[str], DefaultArg] = NotGiven,\n members: Union[Optional[List[GroupMember]], DefaultArg] = NotGiven,\n meta: Union[Optional[GroupMeta], DefaultArg] = NotGiven,\n schemas: Union[Optional[List[str]], DefaultArg] = NotGiven,\n **kwargs,\n ) -> None:\n self.display_name = display_name\n self.id = id\n self.members = (\n [a if isinstance(a, GroupMember) else GroupMember(**a) for a in members]\n if _is_iterable(members)\n else members\n )\n self.meta = (\n GroupMeta(**meta) if meta is not None and isinstance(meta, dict) else meta\n )\n self.schemas = schemas\n self.unknown_fields = kwargs\n\n def to_dict(self):\n return _to_dict_without_not_given(self)\n\n def __repr__(self):\n return f\"\"\n"},"license":{"kind":"string","value":"mit"},"hash":{"kind":"number","value":2794789609398700500,"string":"2,794,789,609,398,700,500"},"line_mean":{"kind":"number","value":29.756097561,"string":"29.756098"},"line_max":{"kind":"number","value":86,"string":"86"},"alpha_frac":{"kind":"number","value":0.6106264869,"string":"0.610626"},"autogenerated":{"kind":"bool","value":false,"string":"false"}}},{"rowIdx":109539,"cells":{"repo_name":{"kind":"string","value":"StratoSource/StratoSource"},"path":{"kind":"string","value":"stratosource/management/commands/sfdiff.py"},"copies":{"kind":"string","value":"1"},"size":{"kind":"string","value":"28907"},"content":{"kind":"string","value":"# Copyright 2010, 2011 Red Hat Inc.\n#\n# This file is part of StratoSource.\n#\n# StratoSource is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# StratoSource is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied waarranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with StratoSource. If not, see .\n#\nimport pytz\nfrom django.core.management.base import BaseCommand\nfrom django.core.exceptions import ObjectDoesNotExist\nimport os\nimport sys\nimport subprocess\nimport logging\nfrom datetime import datetime\nfrom django.db import transaction\nfrom lxml import etree\nimport stratosource.models\n\n__author__ = \"masmith\"\n__date__ = \"$Jul 26, 2010 2:23:44 PM$\"\n\nSF_NAMESPACE = '{http://soap.sforce.com/2006/04/metadata}'\nCODE_BASE = 'unpackaged'\n\ndocumentCache = {}\nmapCache = {}\n\nlogger = logging.getLogger('console')\n\n\nclass NewObjectException(Exception):\n pass\n\n\nclass DeletedObjectException(Exception):\n pass\n\n\n##\n# [ git utilities ]\n##\n\ndef resetLocalRepo(branch_name):\n subprocess.check_call([\"git\", \"checkout\", branch_name])\n\n\n# subprocess.check_call([\"git\",\"reset\",\"--hard\",\"{0}\".format(branch_name)])\n\ndef branchExists(branchname):\n proc = subprocess.Popen(['git', 'branch', '-a'], shell=False, stdout=subprocess.PIPE)\n input, error = proc.communicate()\n for br in input.split('\\n'):\n br = br.rstrip()\n if len(br) > 0 and br[2:] == branchname: return True\n return False\n\n\ndef getCurrentTag():\n proc = subprocess.Popen(['git', 'describe'], shell=False, stdout=subprocess.PIPE)\n input, error = proc.communicate()\n tag = input.rstrip()\n return tag\n\n\ndef getCurrentBranch():\n proc = subprocess.Popen(['git', 'branch'], shell=False, stdout=subprocess.PIPE)\n input, error = proc.communicate()\n for br in input.split('\\n'):\n br = br.rstrip()\n if len(br) > 0 and br[0:2] == \"* \":\n return br[2:]\n return 'unknown'\n\n\ndef verifyGitRepo():\n proc = subprocess.Popen(['git', 'status'], shell=False, stderr=subprocess.PIPE)\n input, error = proc.communicate()\n if error.find('Not a git repository') > 0:\n logger.error('Error: Not a git repository')\n sys.exit(1)\n\n\ndef getDiffNames(left, right):\n logger.info('git diff --name-only %s %s' % (left, right))\n proc = subprocess.Popen(['git', 'diff', '--name-only', left, right], shell=False, stdout=subprocess.PIPE)\n input, error = proc.communicate()\n changedList = []\n all = 0\n map = {}\n for entry in input.split('\\n'):\n all = all + 1\n entry = entry.rstrip()\n if entry == '.gitignore': continue\n if len(entry) > 1 and not entry.endswith('.xml'):\n #logger.debug(' entry={0}'.format(entry))\n parts = entry.split('/')\n type = parts[1]\n name = '/'.join(parts[2:]) # put trailing components back together to support folder-based assets\n# a, b = os.path.split(entry)\n if not map.has_key(type): map[type] = []\n map[type].append(name)\n changedList.append(entry)\n\n changedList.sort()\n return map\n\n\n##\n# [ XML parsing and searching ]\n##\n\ndef getElementMap(key):\n global mapCache\n\n if mapCache.has_key(key):\n return mapCache[key]\n m = {}\n mapCache[key] = m\n return m\n\n\ndef getObjectChanges(lkey, lcache, rkey, rcache, objectName, elementName, resolver):\n global documentCache\n\n ldoc = None\n rdoc = None\n\n if documentCache.has_key(lkey + objectName): ldoc = documentCache[lkey + objectName]\n if documentCache.has_key(rkey + objectName): rdoc = documentCache[rkey + objectName]\n\n rmap = getElementMap(rkey + objectName + elementName)\n lmap = getElementMap(lkey + objectName + elementName)\n\n if ldoc is None:\n lobj = lcache.get(objectName)\n if lobj:\n ldoc = etree.XML(lobj)\n if ldoc is None: return None, None\n documentCache[lkey + objectName] = ldoc\n if rdoc is None:\n robj = rcache.get(objectName)\n if robj:\n rdoc = etree.XML(robj)\n if rdoc is None: return None, None\n documentCache[rkey + objectName] = rdoc\n\n if ldoc is None and not rdoc is None:\n raise NewObjectException()\n if not ldoc is None and rdoc is None:\n raise DeletedObjectException()\n\n return resolver(ldoc, rdoc, rmap, lmap, elementName)\n\n\ndef compareObjectMaps(lmap, rmap):\n missing = {}\n updates = {}\n\n for lname, lnodestring in lmap.items():\n # find the field in the other file\n if rmap.has_key(lname):\n rnodestring = rmap[lname]\n # compare for changes\n if lnodestring != rnodestring:\n updates[lname] = rnodestring\n else:\n # field missing on right, must be deleted\n missing[lname] = lnodestring\n return updates, missing\n\n\ndef populateElementMap(doc, nodeName, elementName, amap):\n if doc != None and len(amap) == 0:\n children = doc.findall(nodeName)\n for child in children:\n node = child.find(elementName)\n amap[node.text] = etree.tostring(child)\n\n\ndef objectChangeResolver(ldoc, rdoc, rmap, lmap, elementName):\n nodeName = SF_NAMESPACE + elementName\n nameKey = SF_NAMESPACE + 'fullName'\n #\n # build a map of custom label names and xml fragment for faster lookups\n #\n populateElementMap(rdoc, nodeName, nameKey, rmap)\n populateElementMap(ldoc, nodeName, nameKey, lmap)\n return compareObjectMaps(lmap, rmap)\n\n\ndef objectTranslationChangeResolver(ldoc, rdoc, rmap, lmap, elementName):\n nodeName = SF_NAMESPACE + elementName\n nameKey = SF_NAMESPACE + 'name'\n #\n # build a map of custom label names and xml fragment for faster lookups\n #\n populateElementMap(rdoc, nodeName, nameKey, rmap)\n populateElementMap(ldoc, nodeName, nameKey, lmap)\n return compareObjectMaps(lmap, rmap)\n\n\ndef translationChangeResolver(ldoc, rdoc, rmap, lmap, elementName):\n nodeName = SF_NAMESPACE + elementName\n nameKey = SF_NAMESPACE + 'name'\n #\n # build a map of custom label names and xml fragment for faster lookups\n #\n populateElementMap(rdoc, nodeName, nameKey, rmap)\n populateElementMap(ldoc, nodeName, nameKey, lmap)\n return compareObjectMaps(lmap, rmap)\n\n\ndef getAllFullNames(doc, elementName, tagname='fullName'):\n fqfullname = SF_NAMESPACE + tagname\n nodes = doc.findall(SF_NAMESPACE + elementName)\n if nodes:\n allnames = []\n for node in nodes:\n el = node.find(fqfullname)\n if el is not None: allnames.append(el.text)\n # allnames = [node.find(fqfullname).text for node in nodes]\n return allnames\n else:\n logger.debug('No nodes found for %s' % elementName)\n return []\n\n\ndef getAllObjectChanges(objectName, lFileCache, rFileCache, elementname, resolver):\n updates, deletes = getObjectChanges('l', lFileCache, 'r', rFileCache, objectName, elementname, resolver)\n rupdates, inserts = getObjectChanges('r', rFileCache, 'l', lFileCache, objectName, elementname, resolver)\n return inserts, updates, deletes\n\n\n##\n# [ database and caching ]\n##\n\ndef createFileCache(hash, map, branch_name):\n\n subprocess.check_call([\"git\", \"checkout\", branch_name])\n\n try:\n tmpbranch = branch_name + '_sfdiff'\n if branchExists(tmpbranch):\n subprocess.check_call([\"git\", \"branch\", \"-D\", tmpbranch])\n subprocess.check_call([\"git\", \"checkout\", \"-b\", tmpbranch, hash])\n\n # os.system('git reset --hard {0}'.format(hash))\n cache = {}\n for type, list in map.items():\n if type in ('objects', 'labels', 'translations', 'objectTranslations', 'workflows'):\n for objectName in list:\n try:\n path = os.path.join(CODE_BASE, type, objectName)\n f = open(path)\n cache[objectName] = f.read()\n f.close()\n except IOError:\n # print '** not able to load ' + path\n pass # caused by a new file added, not present on current branch\n else:\n for objectName in list:\n if os.path.isfile(os.path.join(CODE_BASE, type, objectName)):\n cache[objectName] = None\n return cache\n finally:\n subprocess.check_call([\"git\", \"checkout\", branch_name])\n\n\ndef getDeployable(branch, objectName, objectType, el_type, el_name, el_subtype=None):\n try:\n if el_type and el_name:\n deployable = stratosource.models.DeployableObject.objects.get(branch=branch, type__exact=objectType, filename__exact=objectName,\n el_type__exact=el_type, el_name__exact=el_name,\n el_subtype__exact=el_subtype,\n status__exact='a')\n else:\n deployable = stratosource.models.DeployableObject.objects.get(branch=branch, type__exact=objectType, filename__exact=objectName,\n status__exact='a')\n except ObjectDoesNotExist:\n deployable = stratosource.models.DeployableObject()\n deployable.type = objectType\n deployable.filename = objectName\n deployable.branch = branch\n deployable.el_type = el_type\n deployable.el_name = el_name\n deployable.el_subtype = el_subtype\n deployable.save()\n return deployable\n\n\ndef insert_deltas(commit, objectName, type, items, delta_type, el_type, el_subtype=None):\n # global mqclient\n\n for item in items:\n deployable = getDeployable(commit.branch, objectName, type, el_type, item, el_subtype)\n delta = stratosource.models.Delta()\n delta.user_change = get_last_change(objectName, el_type, item)\n delta.object = deployable\n delta.commit = commit\n delta.delta_type = delta_type\n delta.save()\n\n\n# if not delta.user_change is None:\n# mqclient.publish({'user': delta.user_change.sfuser.name.encode('ascii', 'ignore'), 'commit': commit.hash,\n# 'dtype': delta_type, 'type': type, 'item': item,\n# 'last_update': delta.user_change.last_update.isoformat()})\n\n\ndef get_last_change(objectName, el_type, el_name):\n fullName = objectName\n if el_type == 'labels': return None # not doing audit tracking for labels\n\n if el_type == 'fields': el_type = 'object'\n\n parts = objectName.split('.')\n if len(parts) > 1 and not el_type is None:\n parts[0] = el_type + ':' + parts[0]\n if el_name: parts[0] += '.' + el_name\n fullName = parts[0] # '.'.join(parts)\n # print ' fullName=%s' % fullName\n\n lastchangelist = list(stratosource.models.UserChange.objects.filter(branch=working_branch, apex_name=fullName).order_by('-last_update'))\n if len(lastchangelist) > 0:\n return lastchangelist[0]\n return None\n\n\ndef getDeployableTranslation(branch, label, locale):\n try:\n deployableT = stratosource.models.DeployableTranslation.objects.get(branch=branch, label=label, locale=locale, status__exact='a')\n except ObjectDoesNotExist:\n deployableT = stratosource.models.DeployableTranslation()\n deployableT.label = label\n deployableT.locale = locale\n deployableT.branch = branch\n deployableT.save()\n return deployableT\n\n\ndef insertTranslationDeltas(commit, items, delta_type, locale):\n for item in items:\n deployableT = getDeployableTranslation(commit.branch, item, locale)\n delta = stratosource.models.TranslationDelta()\n delta.translation = deployableT\n delta.commit = commit\n delta.delta_type = delta_type\n delta.save()\n\n\n##\n# [ objects ]\n##\ndef analyze_object_changes(list, lFileCache, rFileCache, elementname, commit):\n global documentCache\n\n changesFound = False\n for objectName in list:\n logger.debug('analyzing %s > %s' % (objectName, elementname))\n try:\n inserts, updates, deletes = getAllObjectChanges(objectName, lFileCache, rFileCache, elementname,\n objectChangeResolver)\n if (inserts and len(inserts)) or (updates and len(updates)) or (deletes and len(deletes)):\n if inserts: insert_deltas(commit, objectName, 'objects', inserts.keys(), 'a', elementname)\n if updates: insert_deltas(commit, objectName, 'objects', updates.keys(), 'u', elementname)\n if deletes: insert_deltas(commit, objectName, 'objects', deletes.keys(), 'd', elementname)\n changesFound = True\n\n except NewObjectException:\n logger.debug('New object %s' % objectName)\n doc = documentCache['r' + objectName]\n insert_deltas(commit, objectName, 'objects', getAllFullNames(doc, elementname), 'a', elementname)\n\n except DeletedObjectException:\n doc = documentCache['l' + objectName]\n insert_deltas(commit, objectName, 'objects', getAllFullNames(doc, elementname), 'd', elementname)\n\n if not changesFound:\n pass\n\n\n##\n# [ objectTranslation ]\n##\ndef analyze_object_translation_changes(list, lFileCache, rFileCache, elementname, commit):\n global documentCache\n\n changesFound = False\n for objectName in list:\n try:\n inserts, updates, deletes = getAllObjectChanges(objectName, lFileCache, rFileCache, elementname,\n objectTranslationChangeResolver)\n if (inserts and len(inserts)) or (updates and len(updates)) or (deletes and len(deletes)):\n if inserts: insert_deltas(commit, objectName, 'objectTranslations', inserts.keys(), 'a', elementname)\n if updates: insert_deltas(commit, objectName, 'objectTranslations', updates.keys(), 'u', elementname)\n if deletes: insert_deltas(commit, objectName, 'objectTranslations', deletes.keys(), 'd', elementname)\n changesFound = True\n\n except NewObjectException:\n doc = documentCache['r' + objectName]\n insert_deltas(commit, objectName, 'objectTranslations', getAllFullNames(doc, elementname), 'a', elementname)\n return\n\n except DeletedObjectException:\n doc = documentCache['l' + objectName]\n insert_deltas(commit, objectName, 'objectTranslations', getAllFullNames(doc, elementname), 'd', elementname)\n return\n\n if not changesFound:\n pass\n\n\n##\n# [ labels ]\n##\ndef analyze_label_changes(list, lFileCache, rFileCache, elementname, commit):\n global documentCache\n\n for objectName in list:\n try:\n inserts, updates, deletes = getAllObjectChanges(objectName, lFileCache, rFileCache, elementname,\n objectChangeResolver)\n if (inserts and len(inserts)) or (updates and len(updates)) or (deletes and len(deletes)):\n if inserts: insert_deltas(commit, objectName, 'labels', inserts.keys(), 'a', elementname)\n if updates: insert_deltas(commit, objectName, 'labels', updates.keys(), 'u', elementname)\n if deletes: insert_deltas(commit, objectName, 'labels', deletes.keys(), 'd', elementname)\n\n except NewObjectException:\n doc = documentCache['r' + objectName]\n insert_deltas(commit, objectName, 'labels', getAllFullNames(doc, elementname), 'a', elementname)\n\n except DeletedObjectException:\n doc = documentCache['l' + objectName]\n insert_deltas(commit, objectName, 'labels', getAllFullNames(doc, elementname), 'd', elementname)\n\n\n##\n# [ translations ]\n##\ndef analyze_translation_changes(list, lFileCache, rFileCache, commit):\n global documentCache\n\n for objectName in list:\n locale = objectName[:-12] # the locale is part of the object name\n try:\n inserts, updates, deletes = getAllObjectChanges(objectName, lFileCache, rFileCache, 'customLabels',\n translationChangeResolver)\n if (inserts and len(inserts)) or (updates and len(updates)) or (deletes and len(deletes)):\n if inserts: insertTranslationDeltas(commit, inserts.keys(), 'a', locale)\n if updates: insertTranslationDeltas(commit, updates.keys(), 'u', locale)\n if deletes: insertTranslationDeltas(commit, deletes.keys(), 'd', locale)\n\n except NewObjectException:\n doc = documentCache['r' + objectName]\n insert_deltas(commit, objectName, 'translations', getAllFullNames(doc, 'customLabels', tagname='name'), 'a',\n 'customLabels')\n\n except DeletedObjectException:\n doc = documentCache['l' + objectName]\n insert_deltas(commit, objectName, 'translations', getAllFullNames(doc, 'customLabels', tagname='name'), 'd',\n 'customLabels')\n\n\n##\n# [ record types/picklists ]\n##\n\ndef recTypePicklistResolver(ldoc, rdoc, rmap, map, elementName):\n missing = {}\n updates = {}\n inserts = {}\n\n if ldoc is None:\n lnodes = []\n else:\n lnodes = ldoc.findall(SF_NAMESPACE + elementName)\n if rdoc is None:\n rnodes = []\n else:\n rnodes = rdoc.findall(SF_NAMESPACE + elementName)\n\n #\n # put the left and right lists in a hash for easier analysis\n #\n fqfullname = SF_NAMESPACE + 'fullName'\n fqpicklistvalues = SF_NAMESPACE + 'picklistValues'\n fqpicklist = SF_NAMESPACE + 'picklist'\n\n llists = {}\n for lnode in lnodes:\n fullName = lnode.find(fqfullname).text\n lpicklists = lnode.findall(fqpicklistvalues)\n for lpicklist in lpicklists:\n lpicklist_name = lpicklist.find(fqpicklist).text\n llists[fullName + ':' + lpicklist_name] = etree.tostring(lpicklist)\n\n rlists = {}\n for rnode in rnodes:\n fullName = rnode.find(fqfullname).text\n rpicklists = rnode.findall(fqpicklistvalues)\n for rpicklist in rpicklists:\n rpicklist_name = rpicklist.find(fqpicklist).text\n rlists[fullName + ':' + rpicklist_name] = etree.tostring(rpicklist)\n\n #\n # go down the left side lookup for updates and deletes\n #\n for lrectype_name in llists.keys():\n if rlists.has_key(lrectype_name):\n if rlists[lrectype_name] != llists[lrectype_name]:\n updates[lrectype_name] = rlists[lrectype_name]\n else:\n missing[lrectype_name] = llists[lrectype_name]\n\n #\n # go down the right side looking for additions\n #\n for rrectype_name in rlists.keys():\n if not llists.has_key(rrectype_name):\n inserts[rrectype_name] = rlists[rrectype_name]\n\n return inserts, updates, missing\n\n\ndef analyze_recordtype_picklist_changes(list, lFileCache, rFileCache, commit):\n global documentCache\n\n for objectName in list:\n try:\n inserts, updates, deletes = getObjectChanges('l', lFileCache, 'r', rFileCache, objectName, 'recordTypes',\n recTypePicklistResolver)\n if inserts: insert_deltas(commit, objectName, 'objects', inserts, 'a', 'recordTypes', 'picklists')\n if updates: insert_deltas(commit, objectName, 'objects', updates, 'u', 'recordTypes', 'picklists')\n if deletes: insert_deltas(commit, objectName, 'objects', deletes, 'd', 'recordTypes', 'picklists')\n\n except NewObjectException:\n doc = documentCache['r' + objectName]\n insert_deltas(commit, objectName, 'objects', getAllFullNames(doc, 'recordTypes', tagname='name'), 'a',\n 'recordTypes')\n\n except DeletedObjectException:\n doc = documentCache['l' + objectName]\n insert_deltas(commit, objectName, 'objects', getAllFullNames(doc, 'recordTypes', tagname='name'), 'd',\n 'recordTypes')\n\n\n##\n# [ workflows ]\n##\ndef analyze_workflow_changes(list, lFileCache, rFileCache, elementname, commit):\n global documentCache\n\n changesFound = False\n for objectName in list:\n try:\n # print' object name is', objectName, 'element name is', elementname\n inserts, updates, deletes = getAllObjectChanges(objectName, lFileCache, rFileCache, elementname,\n objectChangeResolver)\n if (inserts and len(inserts)) or (updates and len(updates)) or (deletes and len(deletes)):\n if inserts: insert_deltas(commit, objectName, 'workflows', inserts.keys(), 'a', elementname)\n if updates: insert_deltas(commit, objectName, 'workflows', updates.keys(), 'u', elementname)\n if deletes: insert_deltas(commit, objectName, 'workflows', deletes.keys(), 'd', elementname)\n changesFound = True\n\n except NewObjectException:\n doc = documentCache['r' + objectName]\n insert_deltas(commit, objectName, 'workflows', getAllFullNames(doc, elementname), 'a', elementname)\n\n except DeletedObjectException:\n doc = documentCache['l' + objectName]\n insert_deltas(commit, objectName, 'workflows', getAllFullNames(doc, elementname), 'd', elementname)\n\n if not changesFound:\n pass\n\n\n@transaction.atomic\ndef analyze_commit(branch, commit):\n global documentCache\n global mapCache\n global working_branch\n global change_batch\n\n working_branch = branch\n\n logger.info(\"Analyzing commit %s\" % commit.hash)\n\n documentCache = {} # do not want to accumulate this stuff over multiple iterations\n mapCache = {}\n change_batch = None\n\n # clean up deltas in case we are rerunning\n stratosource.models.Delta.objects.filter(commit=commit).delete()\n stratosource.models.TranslationDelta.objects.filter(commit=commit).delete()\n\n lhash = commit.prev_hash\n rhash = commit.hash\n\n ##\n # call \"git diff\" to get a list of changed files\n ##\n omap = getDiffNames(lhash, rhash)\n\n ##\n # load all changed files from each hash into a map for performance (config only)\n ##\n lFileCache = createFileCache(lhash, omap, branch.name)\n rFileCache = createFileCache(rhash, omap, branch.name)\n\n for otype, olist in omap.items():\n # logger.debug(\"Type: %s\" % otype)\n if otype == 'objects':\n analyze_object_changes(olist, lFileCache, rFileCache, 'fields', commit)\n analyze_object_changes(olist, lFileCache, rFileCache, 'fieldSets', commit)\n analyze_object_changes(olist, lFileCache, rFileCache, 'validationRules', commit)\n analyze_object_changes(olist, lFileCache, rFileCache, 'webLinks', commit)\n analyze_object_changes(olist, lFileCache, rFileCache, 'recordTypes', commit)\n analyze_recordtype_picklist_changes(olist, lFileCache, rFileCache, commit)\n analyze_object_changes(olist, lFileCache, rFileCache, 'namedFilters', commit)\n analyze_object_changes(olist, lFileCache, rFileCache, 'listViews', commit)\n # misc single-node elements\n # analyzeObjectChanges(list, lFileCache, rFileCache, 'label', commit)\n # analyzeObjectChanges(list, lFileCache, rFileCache, 'nameField', commit, nameKey='label')\n # analyzeObjectChanges(list, lFileCache, rFileCache, 'pluralLabel', commit)\n # analyzeObjectChanges(list, lFileCache, rFileCache, 'searchLayouts', commit)\n # analyzeObjectChanges(list, lFileCache, rFileCache, 'sharingModel', commit)\n\n elif otype == 'translations':\n analyze_translation_changes(olist, lFileCache, rFileCache, commit)\n\n elif otype == 'workflows':\n analyze_workflow_changes(olist, lFileCache, rFileCache, 'alerts', commit)\n analyze_workflow_changes(olist, lFileCache, rFileCache, 'fieldUpdates', commit)\n analyze_workflow_changes(olist, lFileCache, rFileCache, 'rules', commit)\n analyze_workflow_changes(olist, lFileCache, rFileCache, 'tasks', commit)\n\n elif otype == 'objectTranslations':\n analyze_object_translation_changes(olist, lFileCache, rFileCache, 'fields', commit)\n analyze_object_translation_changes(olist, lFileCache, rFileCache, 'validationRules', commit)\n analyze_object_translation_changes(olist, lFileCache, rFileCache, 'webLinks', commit)\n\n elif otype == 'labels':\n analyze_label_changes(olist, lFileCache, rFileCache, 'labels', commit)\n\n else:\n for listitem in olist:\n delta_type = None\n if lFileCache.has_key(listitem) and rFileCache.has_key(listitem) == False:\n delta_type = 'd'\n elif lFileCache.has_key(listitem) == False:\n delta_type = 'a'\n else:\n delta_type = 'u'\n\n delta = stratosource.models.Delta()\n delta.object = getDeployable(branch, listitem, otype, None, None, None)\n delta.commit = commit\n delta.user_change = get_last_change(listitem, None, None)\n if delta.user_change is None:\n #logger.debug('** Audit record not found for %s' % listitem)\n pass\n else:\n # print 'audit record found!'\n pass\n delta.delta_type = delta_type\n delta.save()\n logger.debug(' added delta for {0}'.format(listitem))\n # if not delta.user_change is None:\n # print 'user %s' % (delta.user_change.sfuser.name,)\n # print 'commit %s' % (commit,)\n # print 'dtype %s' % (delta_type,)\n # print 'otype %s' % (otype,)\n # print 'item %s' % (listitem,)\n\n # mqclient.publish(\n # {'user': delta.user_change.sfuser.name.encode('ascii', 'ignore'), 'commit': commit.hash,\n # 'dtype': delta_type, 'type': otype, 'item': listitem,\n # 'last_update': delta.user_change.last_update.isoformat()})\n\n commit.status = 'c'\n commit.save()\n\n\ndef generate_analysis(branch, start_date):\n global documentCache\n\n commits = stratosource.models.Commit.objects.filter(branch=branch, status__exact='p', prev_hash__isnull=False,\n date_added__gte=start_date).order_by('-date_added')\n\n for commit in commits:\n if commit.prev_hash is None or len(commit.prev_hash) == 0: continue\n analyze_commit(branch, commit)\n\n\n# msg = AdminMessage()\n# msg.subject = branch.name + ' commits processed'\n# msg.body = '%d %s commits were processed on %s' % (len(commits), branch.name, str(datetime.now()))\n# msg.sender = 'sfdiff'\n# msg.save()\n\n##\n# [ Entry point ]\n##\n\nclass Command(BaseCommand):\n def add_arguments(self, parser):\n parser.add_argument('repo', help='repository name')\n parser.add_argument('branch', help='branch name')\n\n def handle(self, *args, **options):\n global documentCache\n\n repo = stratosource.models.Repo.objects.get(name__exact=options['repo'])\n branch = stratosource.models.Branch.objects.get(repo=repo, name__exact=options['branch'])\n\n # mqclient = MQClient(exch='delta')\n\n # if len(args) == 3:\n # start_date = datetime.strptime(args[2], '%m-%d-%Y')\n # else:\n\n start_date = datetime(2000, 1, 1, 0, 0) #, tzinfo=pytz.utc)\n\n os.chdir(repo.location)\n\n ##\n # some basic housekeeping\n ##\n resetLocalRepo(branch.name)\n verifyGitRepo()\n\n generate_analysis(branch, start_date)\n\n documentCache = {} # just in case running stateful by django middleware, clear out between calls\n\n # try to leave repo in a good state\n resetLocalRepo(branch.name)\n"},"license":{"kind":"string","value":"gpl-3.0"},"hash":{"kind":"number","value":5506570039653036000,"string":"5,506,570,039,653,036,000"},"line_mean":{"kind":"number","value":37.5941255007,"string":"37.594126"},"line_max":{"kind":"number","value":140,"string":"140"},"alpha_frac":{"kind":"number","value":0.6130694987,"string":"0.613069"},"autogenerated":{"kind":"bool","value":false,"string":"false"}}},{"rowIdx":109540,"cells":{"repo_name":{"kind":"string","value":"ethertricity/bluesky"},"path":{"kind":"string","value":"bluesky/traffic/adsbmodel.py"},"copies":{"kind":"string","value":"1"},"size":{"kind":"string","value":"1990"},"content":{"kind":"string","value":"\"\"\" ADS-B model. Implements real-life limitations of ADS-B communication.\"\"\"\r\nimport numpy as np\r\nimport bluesky as bs\r\nfrom bluesky.tools.aero import ft\r\nfrom bluesky.tools.trafficarrays import TrafficArrays, RegisterElementParameters\r\n\r\n\r\nclass ADSB(TrafficArrays):\r\n \"\"\" ADS-B model. Implements real-life limitations of ADS-B communication.\"\"\"\r\n\r\n def __init__(self):\r\n super(ADSB, self).__init__()\r\n # From here, define object arrays\r\n with RegisterElementParameters(self):\r\n # Most recent broadcast data\r\n self.lastupdate = np.array([])\r\n self.lat = np.array([])\r\n self.lon = np.array([])\r\n self.alt = np.array([])\r\n self.trk = np.array([])\r\n self.tas = np.array([])\r\n self.gs = np.array([])\r\n self.vs = np.array([])\r\n\r\n self.SetNoise(False)\r\n\r\n def SetNoise(self, n):\r\n self.transnoise = n\r\n self.truncated = n\r\n self.transerror = [1, 100, 100 * ft] # [degree,m,m] standard bearing, distance, altitude error\r\n self.trunctime = 0 # [s]\r\n\r\n def create(self, n=1):\r\n super(ADSB, self).create(n)\r\n\r\n self.lastupdate[-n:] = -self.trunctime * np.random.rand(n)\r\n self.lat[-n:] = bs.traf.lat[-n:]\r\n self.lon[-n:] = bs.traf.lon[-n:]\r\n self.alt[-n:] = bs.traf.alt[-n:]\r\n self.trk[-n:] = bs.traf.trk[-n:]\r\n self.tas[-n:] = bs.traf.tas[-n:]\r\n self.gs[-n:] = bs.traf.gs[-n:]\r\n\r\n def update(self, time):\r\n up = np.where(self.lastupdate + self.trunctime < time)\r\n self.lat[up] = bs.traf.lat[up]\r\n self.lon[up] = bs.traf.lon[up]\r\n self.alt[up] = bs.traf.alt[up]\r\n self.trk[up] = bs.traf.trk[up]\r\n self.tas[up] = bs.traf.tas[up]\r\n self.gs[up] = bs.traf.gs[up]\r\n self.vs[up] = bs.traf.vs[up]\r\n self.lastupdate[up] = self.lastupdate[up] + self.trunctime\r\n"},"license":{"kind":"string","value":"gpl-3.0"},"hash":{"kind":"number","value":-2682508057104136700,"string":"-2,682,508,057,104,136,700"},"line_mean":{"kind":"number","value":35.5471698113,"string":"35.54717"},"line_max":{"kind":"number","value":103,"string":"103"},"alpha_frac":{"kind":"number","value":0.5326633166,"string":"0.532663"},"autogenerated":{"kind":"bool","value":false,"string":"false"}}},{"rowIdx":109541,"cells":{"repo_name":{"kind":"string","value":"AltarBeastiful/rateItSeven"},"path":{"kind":"string","value":"rateItSeven/legacy/legacysenscritique.py"},"copies":{"kind":"string","value":"1"},"size":{"kind":"string","value":"6348"},"content":{"kind":"string","value":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n# === This file is part of RateItSeven ===\n#\n# Copyright 2015, Rémi Benoit \n#\n# RateItSeven is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# RateItSeven is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with RateItSeven. If not, see .\n#\n\nimport logging\n\nfrom rateItSeven.legacy.movie import Movie\nfrom rateItSeven.legacy.senscritiquepages import HomePage, ListCollectionPage, ListPage, \\\n ListModule\nfrom selenium.webdriver import PhantomJS, ActionChains\nfrom selenium.webdriver.common.desired_capabilities import DesiredCapabilities\n\nfrom rateItSeven.legacy import sclist\nfrom rateItSeven.legacy.sclist import SCList\n\nLINUX_USER_AGENT = \"Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2272.118 Safari/537.36\"\n\n\nclass LegacySensCritique(object):\n\n CHANGEPAGE_TIMEOUT = 20\n\n '''\n Interact with SensCritique website\n '''\n\n def __init__(self, login, password, userAgent=LINUX_USER_AGENT):\n '''\n Constructor\n\n :param login:\n :param password:\n '''\n\n self.login = login\n self.password = password\n\n dcap = dict(DesiredCapabilities.PHANTOMJS)\n dcap[\"phantomjs.page.settings.userAgent\"] = (\n userAgent\n )\n self.driver = PhantomJS(desired_capabilities=dcap)\n self.driver.set_window_size(1366, 768)\n\n def sign_in(self):\n '''\n Sign-in to SensCritique using the given login details\n\n :rtype: bool\n :Return: true if login succeeded, false otherwise\n '''\n\n self.to(HomePage())\n\n self.page.alreadySuscribed().click()\n\n self.page.loginField().send_keys(self.login)\n self.page.passwordField().send_keys(self.password)\n\n self.page.submitLoginButton().click()\n\n #TODO changing page so wait or something\n currentUser = self.page.username(self.CHANGEPAGE_TIMEOUT)\n\n if currentUser is not None:\n self._currentUsername = currentUser.value()\n logging.warn(\"Logged in with user \" + self._currentUsername)\n\n return True\n else:\n if self.page.loginError() is not None:\n logging.error(\"Couldn't login : \" + self.page.loginError().value())\n\n return False\n\n def is_logged_in(self):\n return self.page is not None and self.page.username() is not None\n\n def retrieveListById(self, listId):\n self.to(ListCollectionPage(self._currentUsername))\n\n for l in self.page.lists():\n if listId in l.url():\n return self.createSCListFromListModule(l)\n\n return None\n\n def retrieveListByTitle(self, title):\n self.to(ListCollectionPage(self._currentUsername))\n\n for l in self.page.lists():\n if l.title() == title:\n return self.createSCListFromListModule(l)\n\n return None\n\n def retrieveMoviesFromList(self, l : SCList):\n self.to(ListPage(l))\n\n for movie in self.page.movies():\n yield movie\n\n def createList(self, l : SCList):\n self.to(ListCollectionPage(self._currentUsername))\n\n self.page.create_list_button().click()\n\n self.page.new_list_title().send_keys(l.title())\n self.page.film_type_radio().click()\n self.page.classic_list_radio().click()\n self.page.public_list_radio().click()\n\n self.page.confirm_create_list_button().click()\n\n # Change the current page as we are now on the list page\n self.page = ListPage(l)\n self.page._driver = self.driver # TODO: fixme, we don't want to use self.to(page) as it would reload the page\n\n self.page.set_description(l.description())\n\n url = self.driver.current_url\n l._id = url[url.rfind(\"/\") + 1:]\n\n return l\n\n def deleteList(self, l : sclist):\n self.to(ListCollectionPage(self._currentUsername))\n\n for module in self.page.lists():\n if l.id() in module.url():\n\n # Alert box will be auto-accepted. Needed as Phantomjs cannot handle them\n self.driver.execute_script(\"window.confirm = function(msg) { return true; };\")\n\n delete_button = module.delete_button()\n\n delete_action = ActionChains(self.driver)\n delete_action.move_to_element(module.title_node())\n delete_action.move_to_element(delete_button)\n delete_action.click(delete_button)\n\n delete_action.perform()\n\n def addMovie(self, movie: Movie, l : SCList):\n self.to(ListPage(l))\n\n self.page.query_input().send_keys(movie.title())\n\n add_button = self.page.add_movie_button(0)\n if add_button is None:\n return False # Movie already in list\n\n if movie.description():\n self.page.movie_description_field(0).send_keys(movie.description())\n\n add_button.click()\n return True\n\n def deleteMovies(self, movies_to_delete, l : SCList):\n self.to(ListPage(l))\n\n for movie in self.page.movies():\n try:\n movies_to_delete.remove(movie.title())\n\n delete = movie.delete_button()\n delete.click()\n\n movie.confirm_delete_button().click()\n self.page.wait_loading_finished()\n except Exception as e:\n logging.error(\"Fail to delete movie \" + movie.title() + \". \" + format(e))\n\n return movies_to_delete\n\n def to(self, page):\n page.to(self.driver)\n self.page = page\n\n def createSCListFromListModule(self, module : ListModule):\n list = sclist.SCList(module.id())\n\n list.setTitle(module.title())\n list.setDescription(module.description())\n list.setType(None) # TODO: parse the type\n\n return list\n"},"license":{"kind":"string","value":"gpl-3.0"},"hash":{"kind":"number","value":-2295932688796886500,"string":"-2,295,932,688,796,886,500"},"line_mean":{"kind":"number","value":30.112745098,"string":"30.112745"},"line_max":{"kind":"number","value":126,"string":"126"},"alpha_frac":{"kind":"number","value":0.6272254608,"string":"0.627225"},"autogenerated":{"kind":"bool","value":false,"string":"false"}}},{"rowIdx":109542,"cells":{"repo_name":{"kind":"string","value":"JasonFruit/doremi"},"path":{"kind":"string","value":"doremi/doremi_parser.py"},"copies":{"kind":"string","value":"1"},"size":{"kind":"string","value":"12775"},"content":{"kind":"string","value":"\"\"\"A simple music-representation language suitable for hymn tunes,\npart-songs, and other brief, vocal-style works.\n\n\"\"\"\n\n# TODO: figure out how to make fermatas in bass staves upside-down in\n# the template\n\nimport codecs\nimport copy\n\nfrom parsimonious import Grammar, NodeVisitor\n\nfrom doremi.lilypond import *\nfrom doremi.lyric_parser import Lyric, LyricParser\n\nclass RepeatMarker(object):\n def __init__(self, text):\n self.text = text\n def to_lilypond(self, *args, **kwargs):\n if self.text == \"|:\":\n return r\"\\repeat volta 2 {\"\n elif self.text == \":|\":\n return r\"}\"\n elif self.text == \"!\":\n return r\"} \\alternative { {\"\n elif self.text == \"1!\":\n return r\"} {\"\n elif self.text == \"2!\":\n return r\"} }\"\n elif self.text == \"|.\":\n return r'\\bar \"|.\"'\n elif self.text == \"||\":\n return r'\\bar \"||\"'\n\nclass Note(object):\n \"\"\"Represents a note (or rest) in a musical work, including scale\ndegree, duration, octave, and other information\"\"\"\n def __init__(self, # initialize with empty properties\n pitch=None, # because they are built on-the-fly\n duration=None,\n octave=None,\n modifiers=list()):\n self.pitch = pitch\n self.duration = duration\n self.octave = octave\n self.modifiers = modifiers\n def to_lilypond(self, key, octave_offset = 0):\n \"\"\"\n Convert to an equivalent Lilypond representation\n \"\"\"\n\n # short-circuit if this is a rest\n if self.pitch == \"r\":\n return \"%s%s\" % (self.pitch, self.duration)\n \n pitch = syllable_to_note(self.pitch, key)\n octave = self.octave + octave_offset + 1\n\n # convert internal octave representation to Lilypond, which\n # uses c->b\n offset = key_octave_offset[key.lower()]\n\n local_pitch_level = copy.copy(pitch_level)\n\n # adjust the local copy of the pitch-level order to go from\n # la->sol if key is minor\n if \"minor\" in key.lower():\n for k in local_pitch_level.keys():\n local_pitch_level[k] = local_pitch_level[k] + 2\n if local_pitch_level[k] > 6:\n local_pitch_level[k] -= 7\n \n if local_pitch_level[self.pitch] - offset < 0:\n octave -= 1\n elif local_pitch_level[self.pitch] - offset > 6:\n octave += 1\n\n if octave < 0:\n octave = \",\" * abs(octave)\n else:\n octave = \"'\" * octave\n\n # start or end slurs (or beams) as indicated by modifiers\n slur = \"\"\n if \"slur\" in self.modifiers:\n if self.duration in [\"8\", \"8.\", \"16\"]:\n slur = \"[\"\n else:\n slur = \"(\"\n elif \"end slur\" in self.modifiers:\n if self.duration in [\"8\", \"8.\", \"16\"]:\n slur = \"]\"\n else:\n slur = \")\"\n\n # ties only ever connect two notes, so need not be explicitly\n # terminated\n tie = \"\"\n if \"tie\" in self.modifiers:\n tie = \"~\"\n\n # add a fermata\n if \"fermata\" in self.modifiers:\n fermata = r\"\\fermata\"\n else:\n fermata = \"\"\n\n # assemble and return the Lilypond string\n return \"%s%s%s%s%s%s\" % (pitch,\n octave,\n self.duration,\n tie,\n slur,\n fermata)\n \nclass Voice(list):\n \"\"\"Represents a named part in a vocal-style composition\"\"\"\n def __init__(self,\n name=\"\",\n octave=\"\"):\n list.__init__(self)\n self.name = name\n self.octave = octave # the starting octave for the part\n def last_note(self):\n index = -1\n try:\n while type(self[index]) != Note:\n index -= 1\n return self[index]\n except IndexError:\n raise IndexError(\"No previous notes\")\n def to_lilypond(self,\n time,\n key,\n octave_offset=0,\n shapes=None,\n template=\"default\"):\n \"\"\"A representation of the voice as a Lilypond string\"\"\"\n\n # association of doremi shape args and Lilypond shape commands\n shape_dic = {\"round\": (\"\", \"\"),\n \"aikin\": (r\"\\aikenHeads\", \"Minor\"),\n \"sacredharp\": (r\"\\sacredHarpHeads\", \"Minor\"),\n \"southernharmony\": (r\"\\southernHarmonyHeads\", \"Minor\"),\n \"funk\": (r\"\\funkHeads\", \"Minor\"),\n \"walker\": (r\"\\walkerHeads\", \"Minor\")}\n\n # build the lilypond shape command\n if shapes == None:\n lshapes = \"\"\n else:\n lparts = shape_dic[shapes.lower()]\n lshapes = lparts[0]\n\n # there's a different command for minor\n if \"minor\" in key:\n lshapes += lparts[1]\n\n tmpl = codecs.open(\"templates/%s-voice.tmpl\" % template,\n \"r\",\n \"utf-8\").read()\n \n return tmpl % {\"name\": self.name,\n \"key\": key.replace(\" \", \" \\\\\"), # a minor -> a \\minor\n \"time\": time,\n \"shapes\": lshapes,\n \"notes\": \" \".join(\n [note.to_lilypond(\n key,\n octave_offset=octave_offset)\n for note in self])}\n\n\nclass Tune(list):\n \"\"\"Represents a vocal-style tune, e.g. a hymn-tune or partsong\"\"\"\n def __init__(self,\n title=\"\",\n scripture=\"\",\n composer=\"\",\n key=\"\",\n time=None,\n partial=None):\n self.title = title\n self.scripture = scripture\n self.composer = composer\n self.key = key\n self.time = time\n self.partial = partial\n \n def to_lilypond(self,\n key,\n octave_offset=0,\n shapes=None,\n lyric=None,\n template=\"default\"):\n \"\"\"Return a Lilypond version of the tune\"\"\"\n\n key = key_to_lilypond(key)\n\n # represent the partial beginning measure a la Lilypond if\n # necessary\n if self.partial:\n partial = r\"\\partial %s\" % self.partial\n else:\n partial = \"\"\n\n # TODO: make this allow other templates\n ly = codecs.open(\"templates/%s.tmpl\" % template, \"r\", \"utf-8\").read()\n\n tmpl_data = {\"voices\": \"\\n\".join(\n [voice.to_lilypond(self.time,\n key,\n octave_offset=octave_offset,\n shapes=shapes,\n template=template)\n for voice in self]),\n \"author\": lyric.author,\n \"lyrictitle\": lyric.title,\n \"meter\": lyric.meter,\n \"title\": self.title,\n \"scripture\": self.scripture,\n \"composer\": self.composer,\n \"partial\": partial}\n\n for voice in self:\n tmpl_data[\"%s_lyrics\" % voice.name] = \"\"\n \n for lvoice in lyric.voices:\n tmpl_data[\"%s_lyrics\" % lvoice.name] = lvoice.to_lilypond()\n \n return ly % tmpl_data\n \n\ndef get_node_val(node, val_type):\n \"\"\"Return the value as a string of a child node of the specified type,\nor raise ValueError if none exists\"\"\"\n for child in node.children:\n if child.expr_name == val_type:\n return child.text.strip('\"')\n raise ValueError(\"No value of specified type.\")\n\ndef get_string_val(node):\n \"\"\"Return the value of a string child node, if exists; otherwise,\nraise a ValueError\"\"\"\n try:\n return get_node_val(node, \"string\")\n except:\n raise ValueError(\"No string value.\")\n \nclass DoremiParser(NodeVisitor):\n def __init__(self, tune_fn):\n NodeVisitor.__init__(self)\n # start with an empty tune, voice, note, and list of modifiers\n self.tune = Tune()\n self.voice = Voice()\n self.note = Note()\n self.note_modifiers = []\n\n # at the outset, we are not in a voice's content\n self.in_content = False\n\n # set up the actual parser\n grammar = Grammar(open(\"doremi-grammar\", \"r\").read())\n\n # read and parse the tune\n tune_text = codecs.open(tune_fn, \"r\", \"utf-8\").read()\n self.syntax = grammar.parse(tune_text)\n \n def convert(self):\n \"\"\"Convert the parse tree to the internal music representation\"\"\"\n self.visit(self.syntax)\n return self.tune\n\n # title, composer, key, and partial value can only occur at the\n # tune level, so they always are added to the tune\n def visit_title(self, node, vc):\n self.tune.title = get_string_val(node)\n def visit_scripture(self, node, vc):\n self.tune.scripture = get_string_val(node)\n def visit_composer(self, node, vc):\n self.tune.composer = get_string_val(node)\n def visit_key(self, node, vc):\n text = \" \".join([child.text for child in node.children\n if child.expr_name == \"name\"])\n self.tune.key = text\n def visit_partial(self, node, vc):\n self.tune.partial = int(get_node_val(node, \"number\"))\n\n def visit_time(self, node, vc):\n time = get_node_val(node, \"fraction\")\n\n # if it occurs inside a voice's note array\n if self.in_content:\n self.note_modifiers.append(time)\n else: # otherwise, it's at the tune level\n self.tune.time = time\n\n # octave and voice-name only occur at the voice level\n def visit_octave(self, node, vc):\n self.voice.octave = int(get_node_val(node, \"number\"))\n def visit_voice_name(self, node, vc):\n self.voice.name = node.children[-1].text\n\n # modifiers only occur in a collection of notes, and are stored at\n # the note level\n def visit_note_modifier(self, node, vc):\n self.note_modifiers.append(node.text)\n \n def visit_voice(self, node, vc):\n # a voice is only visited when fully parsed, so the voice is\n # already fully constructed; add it to the tune and start a\n # new one\n self.tune.append(self.voice)\n self.voice = Voice()\n \n def visit_note(self, node, vc):\n # a note is only visited after its modifiers have been\n # visited, so we finalize it and add it to the voice here\n\n # if there's no duration explicit, it's the same as the\n # previous note in the same voice\n if not self.note.duration:\n self.note.duration = self.voice.last_note().duration\n \n self.note.modifiers = self.note_modifiers\n self.note.pitch = node.text\n\n # if there's a previous note, start from its octave; if not,\n # start from the voice's octave\n try:\n self.note.octave = self.voice.last_note().octave\n except IndexError:\n self.note.octave = self.voice.octave\n\n # alter the octave according to octave modifiers\n for mod in self.note.modifiers:\n if mod == \"-\":\n self.note.octave -= 1\n elif mod == \"+\":\n self.note.octave += 1\n\n # if a slur started on the previous note and is not continued\n # by this one, explicitly end it\n try:\n if \"slur\" in self.voice.last_note().modifiers:\n if not \"slur\" in self.note.modifiers: \n self.note.modifiers.append(\"end slur\")\n except IndexError:\n pass\n\n # add the note to the voice and start a new one with no\n # modifiers\n self.voice.append(self.note)\n self.note = Note()\n self.note_modifiers = []\n\n def visit_repeat(self, node, vc):\n self.voice.append(RepeatMarker(node.text))\n \n def visit_number(self, node, vc):\n # all numbers except note durations are handled at a higher level\n if self.in_content:\n self.note.duration = node.text\n \n def generic_visit(self, node, vc):\n # set whether we're in the note-content of a voice based on\n # open- and close-brackets\n if node.text == \"[\":\n self.in_content = True\n elif node.text == \"]\":\n self.in_content = False\n"},"license":{"kind":"string","value":"mit"},"hash":{"kind":"number","value":-1205490265218397200,"string":"-1,205,490,265,218,397,200"},"line_mean":{"kind":"number","value":33.3413978495,"string":"33.341398"},"line_max":{"kind":"number","value":77,"string":"77"},"alpha_frac":{"kind":"number","value":0.5135029354,"string":"0.513503"},"autogenerated":{"kind":"bool","value":false,"string":"false"}}},{"rowIdx":109543,"cells":{"repo_name":{"kind":"string","value":"mmcardle/MServe"},"path":{"kind":"string","value":"django-mserve/rassc/tasks.py"},"copies":{"kind":"string","value":"1"},"size":{"kind":"string","value":"4340"},"content":{"kind":"string","value":"########################################################################\n#\n# University of Southampton IT Innovation Centre, 2012\n#\n# Copyright in this library belongs to the University of Southampton\n# University Road, Highfield, Southampton, UK, SO17 1BJ\n#\n# This software may not be used, sold, licensed, transferred, copied\n# or reproduced in whole or in part in any manner or form or in or\n# on any media by any person other than in accordance with the terms\n# of the Licence Agreement supplied with the software, or otherwise\n# without the prior written consent of the copyright owners.\n#\n# This software is distributed WITHOUT ANY WARRANTY, without even the\n# implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR\n# PURPOSE, except where stated in the Licence Agreement supplied with\n# the software.\n#\n#\tCreated By :\t\t\tMark McArdle\n#\tCreated Date :\t\t\t2012-01-06\n#\tCreated for Project :\t\tRASCC\n#\n########################################################################\nimport logging\nimport subprocess\nimport tempfile\nimport os\nimport os.path\nimport shutil\nimport PythonMagick\nimport settings as settings\nfrom celery.task import task\nfrom dataservice.tasks import _get_mfile\nfrom dataservice.tasks import _save_joboutput\n\n@task(default_retry_delay=5,max_retries=1)\ndef dumbtask(inputs,outputs,options={},callbacks=[]):\n logging.info(\"Processing dumb task\")\n try:\n mfileid = inputs[0]\n filepath = _get_mfile(mfileid)\n\n toutfile = tempfile.NamedTemporaryFile(delete=False,suffix=\".txt\")\n joboutput = outputs[0]\n\n retcode = subprocess.call([\"wc\",filepath,toutfile.name])\n _save_joboutput(joboutput,toutfile)\n\n return {\"success\":True,\"message\":\"Dumb task successful\"}\n\n except Exception as e:\n logging.info(\"Error with dumb task %s\" % e)\n raise e\n\n@task(default_retry_delay=15,max_retries=3)\ndef swirl(inputs,outputs,options={},callbacks=[]):\n try:\n mfileid = inputs[0]\n\n path = _get_mfile(mfileid)\n\n logging.info(\"Swirling image for %s (%s)\" % (input, path))\n img = PythonMagick.Image()\n img.read(str(path))\n img.swirl(90)\n\n # Create swirled image as job output\n toutfile = tempfile.NamedTemporaryFile(delete=False,suffix=\".jpg\")\n img.write(toutfile.name)\n joboutput = outputs[0]\n _save_joboutput(joboutput,toutfile)\n\n return {\"success\":True,\"message\":\"Swirl successful\"}\n except Exception ,e:\n logging.info(\"Error with swirl %s\" % e)\n\n@task(default_retry_delay=15,max_retries=3)\ndef imodel(inputs,outputs,options={},callbacks=[]):\n mfileid = inputs[0]\n path = _get_mfile(mfileid)\n logging.info(\"CWD: %s\" % os.getcwd())\n logging.info(\"Running imodel on %s\" % (path))\n\n\t# Run iModel in a temporary directory\n\tlogging.info(os.environ)\n #imodel_home = \"/opt/iModel-1.0-beta-3-SNAPSHOT\"\n #imodel_home = os.environ[\"IMODEL_HOME\"]\n\timodel_home = settings.IMODEL_HOME\n logging.info(\"Running iModel from %s\" % imodel_home)\n\t(mfile_dir, mfile_name) = os.path.split(path)\n # XXX: configuration.txt must be in CWD\n # XXX: Runtime arguments should not be provided in a file\n\ttempdir = tempfile.mkdtemp()\n\tlogging.info(\"iModel temp dir: %s\" % (tempdir))\n shutil.copy(\"imodel/configuration.txt\", tempdir)\n shutil.copy(path, tempdir)\n p = subprocess.Popen([\"java\", \"-cp\", imodel_home + \":\" + imodel_home+\"/lib/*:\" + imodel_home+\"/bin\", \"uk.ac.soton.itinnovation.prestoprime.imodel.batch.start.StartArchiveSystemModel\", mfile_name], cwd=tempdir, stdout=subprocess.PIPE)\n\n # save stdout\n\tstdoutfile = open(tempdir+\"/stdout\", 'w')\n logging.info(\"Temp file for stdout: %s\" % (stdoutfile.name))\n (stdout, stderr) = p.communicate()\n stdoutfile.write(stdout)\n stdoutfile.close()\n _save_joboutput(outputs[1], stdoutfile)\n\n\t# Process results\n\timport sys\n\tsys.path.append(\"imodel\")\n\timport parseimodel\n\tprocessedresultsfilename = tempdir+\"/data.csv\"\n\tparseimodel.parse(tempdir+\"/outputSystemPerformance.log\", processedresultsfilename)\n joboutput = outputs[0]\n\tprocessedresultsfile = open(processedresultsfilename, 'r')\n _save_joboutput(joboutput, processedresultsfile)\n\n return {\"success\":True,\"message\":\"iModel simulation successful\"}\n"},"license":{"kind":"string","value":"lgpl-2.1"},"hash":{"kind":"number","value":1262400644338189000,"string":"1,262,400,644,338,189,000"},"line_mean":{"kind":"number","value":36.094017094,"string":"36.094017"},"line_max":{"kind":"number","value":241,"string":"241"},"alpha_frac":{"kind":"number","value":0.666359447,"string":"0.666359"},"autogenerated":{"kind":"bool","value":false,"string":"false"}}},{"rowIdx":109544,"cells":{"repo_name":{"kind":"string","value":"alphagov/digitalmarketplace-api"},"path":{"kind":"string","value":"migrations/versions/320_drop_selection_answers.py"},"copies":{"kind":"string","value":"1"},"size":{"kind":"string","value":"1121"},"content":{"kind":"string","value":"\"\"\"Drop selection_answers table\n\nRevision ID: 320_drop_selection_answers\nRevises: 310_rename_selection_answers\nCreate Date: 2015-10-14 10:52:26.557319\n\n\"\"\"\n\n# revision identifiers, used by Alembic.\nrevision = '320_drop_selection_answers'\ndown_revision = '310_rename_selection_answers'\n\nfrom alembic import op\nimport sqlalchemy as sa\nfrom sqlalchemy.dialects import postgresql\n\n\ndef upgrade():\n op.execute('DROP TABLE selection_answers')\n\n\ndef downgrade():\n op.create_table(\n 'selection_answers',\n sa.Column('supplier_id', sa.Integer(), nullable=False),\n sa.Column('framework_id', sa.Integer(), nullable=False),\n sa.Column('question_answers', postgresql.JSON(), nullable=True),\n sa.ForeignKeyConstraint(['framework_id'], ['frameworks.id'], ),\n sa.ForeignKeyConstraint(['supplier_id'], ['suppliers.supplier_id'], ),\n sa.PrimaryKeyConstraint('supplier_id', 'framework_id')\n )\n op.execute(\"\"\"\n INSERT INTO selection_answers(supplier_id, framework_id, question_answers)\n SELECT supplier_id, framework_id, declaration FROM supplier_frameworks\n \"\"\")\n"},"license":{"kind":"string","value":"mit"},"hash":{"kind":"number","value":-1919669192622042400,"string":"-1,919,669,192,622,042,400"},"line_mean":{"kind":"number","value":31.0285714286,"string":"31.028571"},"line_max":{"kind":"number","value":82,"string":"82"},"alpha_frac":{"kind":"number","value":0.6975914362,"string":"0.697591"},"autogenerated":{"kind":"bool","value":false,"string":"false"}}},{"rowIdx":109545,"cells":{"repo_name":{"kind":"string","value":"slightlynybbled/tk_tools"},"path":{"kind":"string","value":"tk_tools/tooltips.py"},"copies":{"kind":"string","value":"1"},"size":{"kind":"string","value":"1552"},"content":{"kind":"string","value":"import tkinter as tk\n\n\nclass ToolTip(object):\n \"\"\"\n Add a tooltip to any widget.::\n\n entry = tk.Entry(root)\n entry.grid()\n\n # createst a tooltip\n tk_tools.ToolTip(entry, 'enter a value between 1 and 10')\n\n :param widget: the widget on which to hover\n :param text: the text to display\n :param time: the time to display the text, in milliseconds\n \"\"\"\n\n def __init__(self, widget, text: str = 'widget info', time: int = 4000):\n self._widget = widget\n self._text = text\n self._time = time\n\n self._widget.bind(\"\",\n lambda _: self._widget.after(500, self._enter()))\n self._widget.bind(\"\", self._close)\n\n self._tw = None\n\n def _enter(self, event=None):\n x, y, cx, cy = self._widget.bbox(\"insert\")\n x += self._widget.winfo_rootx() + 25\n y += self._widget.winfo_rooty() + 20\n\n # creates a toplevel window\n self._tw = tk.Toplevel(self._widget)\n\n # Leaves only the label and removes the app window\n self._tw.wm_overrideredirect(True)\n self._tw.wm_geometry(\"+%d+%d\" % (x, y))\n label = tk.Label(self._tw, text=self._text, justify='left',\n background='#FFFFDD', relief='solid', borderwidth=1,\n font=(\"times\", \"8\", \"normal\"))\n\n label.pack(ipadx=1)\n\n if self._time:\n self._tw.after(self._time, self._tw.destroy)\n\n def _close(self, event=None):\n if self._tw:\n self._tw.destroy()\n"},"license":{"kind":"string","value":"mit"},"hash":{"kind":"number","value":6823558609957457000,"string":"6,823,558,609,957,457,000"},"line_mean":{"kind":"number","value":28.8461538462,"string":"28.846154"},"line_max":{"kind":"number","value":77,"string":"77"},"alpha_frac":{"kind":"number","value":0.5444587629,"string":"0.544459"},"autogenerated":{"kind":"bool","value":false,"string":"false"}}},{"rowIdx":109546,"cells":{"repo_name":{"kind":"string","value":"SchrodingersGat/kicad-footprint-generator"},"path":{"kind":"string","value":"KicadModTree/nodes/base/Arc.py"},"copies":{"kind":"string","value":"1"},"size":{"kind":"string","value":"3826"},"content":{"kind":"string","value":"# KicadModTree is free software: you can redistribute it and/or\n# modify it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# KicadModTree is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with kicad-footprint-generator. If not, see < http://www.gnu.org/licenses/ >.\n#\n# (C) 2016 by Thomas Pointhuber, \n\nfrom KicadModTree.Vector import *\nfrom KicadModTree.nodes.Node import Node\nimport math\n\n\nclass Arc(Node):\n r\"\"\"Add an Arc to the render tree\n\n :param \\**kwargs:\n See below\n\n :Keyword Arguments:\n * *center* (``Vector2D``) --\n center of arc\n * *start* (``Vector2D``) --\n start point of arc\n * *angle* (``float``) --\n angle of arc\n * *layer* (``str``) --\n layer on which the arc is drawn (default: 'F.SilkS')\n * *width* (``float``) --\n width of the arc line (default: None, which means auto detection)\n\n :Example:\n\n >>> from KicadModTree import *\n >>> Arc(center=[0, 0], start=[-1, 0], angle=180, layer='F.SilkS')\n \"\"\"\n\n def __init__(self, **kwargs):\n Node.__init__(self)\n self.center_pos = Vector2D(kwargs['center'])\n self.start_pos = Vector2D(kwargs['start'])\n self.angle = kwargs['angle']\n\n self.layer = kwargs.get('layer', 'F.SilkS')\n self.width = kwargs.get('width')\n\n def calculateBoundingBox(self):\n # TODO: finish implementation\n min_x = min(self.start_pos.x, self._calulateEndPos().x)\n min_y = min(self.start_pos.x, self._calulateEndPos().y)\n max_x = max(self.start_pos.x, self._calulateEndPos().x)\n max_y = max(self.start_pos.x, self._calulateEndPos().y)\n\n '''\n for angle in range(4):\n float_angle = angle * math.pi/2.\n\n start_angle = _calculateStartAngle(self)\n end_angle = start_angle + math.radians(self.angle)\n\n # TODO: +- pi border\n if float_angle < start_angle:\n continue\n if float_angle > end_angle:\n continue\n\n print(\"TODO: add angle side: {1}\".format(float_angle))\n '''\n\n return Node.calculateBoundingBox({'min': Vector2D((min_x, min_y)), 'max': Vector2D((max_x, max_y))})\n\n def _calulateEndPos(self):\n radius = self._calculateRadius()\n\n angle = self._calculateStartAngle() + math.radians(self.angle)\n\n return Vector2D(math.sin(angle)*radius, math.cos(angle)*radius)\n\n def _calculateRadius(self):\n x_size = self.start_pos.x - self.center_pos.x\n y_size = self.start_pos.y - self.center_pos.y\n\n return math.sqrt(math.pow(x_size, 2) + math.pow(y_size, 2))\n\n def _calculateStartAngle(self):\n x_size = self.start_pos.x - self.center_pos.x\n y_size = self.start_pos.y - self.center_pos.y\n\n return math.atan2(y_size, x_size)\n\n def _getRenderTreeText(self):\n render_strings = ['fp_arc']\n render_strings.append(self.center_pos.render('(center {x} {y})'))\n render_strings.append(self.start_pos.render('(start {x} {y})'))\n render_strings.append('(angle {angle})'.format(angle=self.angle))\n render_strings.append('(layer {layer})'.format(layer=self.layer))\n render_strings.append('(width {width})'.format(width=self.width))\n\n render_text = Node._getRenderTreeText(self)\n render_text += ' ({})'.format(' '.join(render_strings))\n\n return render_text\n"},"license":{"kind":"string","value":"gpl-3.0"},"hash":{"kind":"number","value":-2313590398890318300,"string":"-2,313,590,398,890,318,300"},"line_mean":{"kind":"number","value":34.1009174312,"string":"34.100917"},"line_max":{"kind":"number","value":108,"string":"108"},"alpha_frac":{"kind":"number","value":0.6113434396,"string":"0.611343"},"autogenerated":{"kind":"bool","value":false,"string":"false"}}},{"rowIdx":109547,"cells":{"repo_name":{"kind":"string","value":"dcmease/ChessMoves"},"path":{"kind":"string","value":"chessboard.py"},"copies":{"kind":"string","value":"1"},"size":{"kind":"string","value":"18072"},"content":{"kind":"string","value":"\"\"\"\nchessboard.py\n~~~~~~~~~~~~~\nThis class will hold the state for a chessboard\n\"\"\"\n\n# Imports\nimport sys\n\n# Defines\nEMPTY_SQUARE = \" \"\nWHITE_KING = \"♔\"\nWHITE_QUEEN = \"♕\"\nWHITE_BISHOP = \"♗\"\nWHITE_KNIGHT = \"♘\"\nWHITE_ROOK = \"♖\"\nWHITE_PAWN = \"♙\"\nBLACK_KING = \"♚\"\nBLACK_QUEEN = \"♛\"\nBLACK_BISHOP = \"♝\"\nBLACK_KNIGHT = \"♞\"\nBLACK_ROOK = \"♜\"\nBLACK_PAWN = \"♟\"\nWHITE_PIECES = [WHITE_KING, WHITE_QUEEN, WHITE_BISHOP, WHITE_KNIGHT, WHITE_ROOK, WHITE_PAWN]\nBLACK_PIECES = [BLACK_KING, BLACK_QUEEN, BLACK_BISHOP, BLACK_KNIGHT, BLACK_ROOK, BLACK_PAWN]\nALL_PIECES = [EMPTY_SQUARE] + WHITE_PIECES + BLACK_PIECES\n\nclass Chessboard():\n # Constructor\n def __init__(self):\n # Create an empty board\n self.__board = [[EMPTY_SQUARE for i in range(8)] for i in range(8)]\n # Default player turn to white\n self.__player = \"white\"\n # Print out current state of the board\n def debug(self):\n for row in range(7, -1, -1):\n print(row+1, end=\" \")\n for col in range(8):\n print(self.__board[row][col], end=\" \")\n print(\"\")\n print(\" a b c d e f g h\")\n # Update current player\n def set_current_player(self, player):\n if player != \"white\" and player != \"black\":\n raise Exception('Unknown player: %s' % player)\n self.__player = player\n # Add a new piece to the board\n def __add_piece(self, type, loc):\n # If location is out of range\n if (loc[0] < 0 or loc[0] > 7) or (loc[1] < 0 or loc[1] > 7):\n raise Exception('Piece out of bounds')\n # If space is already occupied\n elif self.__board[loc[0]][loc[1]] != EMPTY_SQUARE:\n raise Exception('Space already occupied')\n # Add piece to board\n else:\n self.__board[loc[0]][loc[1]] = type;\n # Validate board\n def __validate_board_state(self):\n # Maintain a count total of all pieces\n counts = {\n WHITE_KING: 0,\n WHITE_QUEEN: 0,\n WHITE_BISHOP: 0,\n WHITE_KNIGHT: 0,\n WHITE_ROOK: 0,\n WHITE_PAWN: 0,\n BLACK_KING: 0,\n BLACK_QUEEN: 0,\n BLACK_BISHOP: 0,\n BLACK_KNIGHT: 0,\n BLACK_ROOK: 0,\n BLACK_PAWN: 0\n }\n\n # Count all pieces on board\n for row in range(8):\n for col in range(8):\n cur_piece = self.__board[row][col]\n # Check for unknown pieces\n if cur_piece not in ALL_PIECES:\n raise Exception('Unknown piece: %s' % str(cur_piece))\n # Update total\n if cur_piece != EMPTY_SQUARE:\n counts[cur_piece] += 1\n\n # Check for illegal piece counts\n if counts[WHITE_KING] < 1:\n raise Exception('Missing white king')\n if counts[WHITE_KING] > 1:\n raise Exception('Too many white kings')\n if counts[BLACK_KING] < 1:\n raise Exception('Missing black king')\n if counts[BLACK_KING] > 1:\n raise Exception('Too many black kings')\n if counts[WHITE_QUEEN] > 9:\n raise Exception('Too many white queens')\n if counts[BLACK_QUEEN] > 9:\n raise Exception('Too many black queens')\n if counts[WHITE_BISHOP] > 10:\n raise Exception('Too many white bishops')\n if counts[BLACK_BISHOP] > 10:\n raise Exception('Too many black bishops')\n if counts[WHITE_KNIGHT] > 10:\n raise Exception('Too many white knights')\n if counts[BLACK_KNIGHT] > 10:\n raise Exception('Too many black knights')\n if counts[WHITE_ROOK] > 10:\n raise Exception('Too many white rooks')\n if counts[BLACK_ROOK] > 10:\n raise Exception('Too many black rooks')\n if counts[WHITE_PAWN] > 8:\n raise Exception('Too many white pawns')\n if counts[BLACK_PAWN] > 8:\n raise Exception('Too many black pawns')\n if counts[WHITE_QUEEN] + counts[WHITE_BISHOP] + counts[WHITE_KNIGHT] + counts[WHITE_ROOK] + counts[WHITE_PAWN] > 15:\n raise Exception('Too many white pieces')\n if counts[BLACK_QUEEN] + counts[BLACK_BISHOP] + counts[BLACK_KNIGHT] + counts[BLACK_ROOK] + counts[BLACK_PAWN] > 15:\n raise Exception('Too many black pieces')\n # Import new board\n def import_board(self, board):\n if len(board) != 8:\n raise Exception('Board size must be 8x8')\n for row in range(8):\n if len(board[row]) != 8:\n raise Exception('Board size must be 8x8')\n for col in range(8):\n self.__add_piece(board[row][col], [7-row, col])\n # Ensure new board is valid\n self.__validate_board_state()\n # Format move for output\n def __new_move(self, from_loc, to_loc):\n col = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h']\n move = self.__board[from_loc[0]][from_loc[1]] + \" \"\n move += str(from_loc[0]+1) + col[from_loc[1]]\n move += str(to_loc[0]+1) + col[to_loc[1]]\n return move\n # Check is a square is valid (on the board)\n def __check_square_valid(self, loc):\n if loc[0] < 0 or loc[0] > 7:\n return False\n elif loc[1] < 0 or loc[1] > 7:\n return False\n else:\n return True\n # Check if a square in unoccupied\n def __check_square_empty(self, loc):\n if not self.__check_square_valid(loc):\n return False\n elif self.__board[loc[0]][loc[1]] != EMPTY_SQUARE:\n return False\n else:\n return True\n # Check is a square contains an enemy (opposite color piece)\n def __check_square_enemy(self, loc):\n if not self.__check_square_valid(loc):\n return False\n elif self.__player == \"white\" and self.__board[loc[0]][loc[1]] in BLACK_PIECES:\n return True\n elif self.__player == \"black\" and self.__board[loc[0]][loc[1]] in WHITE_PIECES:\n return True\n else:\n return False\n # Check is a square is empty or contains an enemy\n def __check_square_empty_or_enemy(self, loc):\n if self.__check_square_empty(loc):\n return True\n elif self.__check_square_enemy(loc):\n return True\n else:\n return False\n # List all moves for a Pawn\n def __get_pawn_moves(self, loc):\n # Init moves\n moves = []\n\n # If White Player\n if self.__player == \"white\":\n # Move double forward\n if loc[0] == 1 and self.__check_square_empty([loc[0]+1, loc[1]]) and self.__check_square_empty([loc[0]+2, loc[1]]):\n move = self.__new_move(loc, [loc[0]+2, loc[1]])\n moves.append(move)\n # Attack diagonally left\n if self.__check_square_enemy([loc[0]+1, loc[1]-1]):\n move = self.__new_move(loc, [loc[0]+1, loc[1]-1])\n moves.append(move)\n # Move forward\n if self.__check_square_empty([loc[0]+1, loc[1]]):\n move = self.__new_move(loc, [loc[0]+1, loc[1]])\n moves.append(move)\n # Attack diagonally right\n if self.__check_square_enemy([loc[0]+1, loc[1]+1]):\n move = self.__new_move(loc, [loc[0]+1, loc[1]+1])\n moves.append(move)\n\n # If Black Player\n else:\n # Move forward twice\n if loc[0] == 6 and self.__check_square_empty([loc[0]-1, loc[1]]) and self.__check_square_empty([loc[0]-2, loc[1]]):\n move = self.__new_move(loc, [loc[0]-2, loc[1]])\n moves.append(move)\n # Attack diagonally left\n if self.__check_square_enemy([loc[0]-1, loc[1]-1]):\n move = self.__new_move(loc, [loc[0]-1, loc[1]-1])\n moves.append(move)\n # Move forward\n if self.__check_square_empty([loc[0]-1, loc[1]]):\n move = self.__new_move(loc, [loc[0]-1, loc[1]])\n moves.append(move)\n # Attack diagonally right\n if self.__check_square_enemy([loc[0]-1, loc[1]+1]):\n move = self.__new_move(loc, [loc[0]-1, loc[1]+1])\n moves.append(move)\n\n return moves\n # List all moves for a Rook\n def __get_rook_moves(self, loc):\n moves = []\n\n # Move/Attack Up\n new_x = loc[0]+1\n new_y = loc[1]\n while self.__check_square_empty_or_enemy([new_x, new_y]):\n move = self.__new_move(loc, [new_x, new_y])\n moves.append(move)\n # Stop moving if enemy is in the way\n if self.__check_square_enemy([new_x, new_y]):\n break\n new_x += 1\n\n # Move/Attack Left\n new_x = loc[0]\n new_y = loc[1]-1\n while self.__check_square_empty_or_enemy([new_x, new_y]):\n move = self.__new_move(loc, [new_x, new_y])\n moves.append(move)\n # Stop moving if enemy is in the way\n if self.__check_square_enemy([new_x, new_y]):\n break\n new_y -= 1\n\n # Move/Attack Right\n new_x = loc[0]\n new_y = loc[1]+1\n while self.__check_square_empty_or_enemy([new_x, new_y]):\n move = self.__new_move(loc, [new_x, new_y])\n moves.append(move)\n # Stop moving if enemy is in the way\n if self.__check_square_enemy([new_x, new_y]):\n break\n new_y += 1\n\n # Move/Attack Down\n new_x = loc[0]-1\n new_y = loc[1]\n while self.__check_square_empty_or_enemy([new_x, new_y]):\n move = self.__new_move(loc, [new_x, new_y])\n moves.append(move)\n # Stop moving if enemy is in the way\n if self.__check_square_enemy([new_x, new_y]):\n break\n new_x -= 1\n\n return moves\n # List all moves for a Knight\n def __get_knight_moves(self, loc):\n moves = []\n\n # Move/Attack Up-Up-Right\n if self.__check_square_empty_or_enemy([loc[0]+2, loc[1]+1]):\n move = self.__new_move(loc, [loc[0]+2, loc[1]+1])\n moves.append(move)\n # Move/Attack Up-Right-Right\n if self.__check_square_empty_or_enemy([loc[0]+1, loc[1]+2]):\n move = self.__new_move(loc, [loc[0]+1, loc[1]+2])\n moves.append(move)\n # Move/Attack Down-Right-Right\n if self.__check_square_empty_or_enemy([loc[0]-1, loc[1]+2]):\n move = self.__new_move(loc, [loc[0]-1, loc[1]+2])\n moves.append(move)\n # Move/Attack Down-Down-Right\n if self.__check_square_empty_or_enemy([loc[0]-2, loc[1]+1]):\n move = self.__new_move(loc, [loc[0]-2, loc[1]+1])\n moves.append(move)\n # Move/Attack Down-Down-Left\n if self.__check_square_empty_or_enemy([loc[0]-2, loc[1]-1]):\n move = self.__new_move(loc, [loc[0]-2, loc[1]-1])\n moves.append(move)\n # Move/Attack Down-Left-Left\n if self.__check_square_empty_or_enemy([loc[0]-1, loc[1]-2]):\n move = self.__new_move(loc, [loc[0]-1, loc[1]-2])\n moves.append(move)\n # Move/Attack Up-Left-Left\n if self.__check_square_empty_or_enemy([loc[0]+1, loc[1]-2]):\n move = self.__new_move(loc, [loc[0]+1, loc[1]-2])\n moves.append(move)\n # Move/Attack Up-Up-Left\n if self.__check_square_empty_or_enemy([loc[0]+2, loc[1]-1]):\n move = self.__new_move(loc, [loc[0]+2, loc[1]-1])\n moves.append(move)\n\n return moves\n # List all moves for a Bishop\n def __get_bishop_moves(self, loc):\n moves = []\n\n # Move/Attack Up-Left\n new_x = loc[0]+1\n new_y = loc[1]-1\n while self.__check_square_empty_or_enemy([new_x, new_y]):\n move = self.__new_move(loc, [new_x, new_y])\n moves.append(move)\n # Stop moving if enemy is in the way\n if self.__check_square_enemy([new_x, new_y]):\n break\n new_x += 1\n new_y -= 1\n\n # Move/Attack Up-Right\n new_x = loc[0]+1\n new_y = loc[1]+1\n while self.__check_square_empty_or_enemy([new_x, new_y]):\n move = self.__new_move(loc, [new_x, new_y])\n moves.append(move)\n # Stop moving if enemy is in the way\n if self.__check_square_enemy([new_x, new_y]):\n break\n new_x += 1\n new_y += 1\n\n # Move/Attack Down-Left\n new_x = loc[0]-1\n new_y = loc[1]-1\n while self.__check_square_empty_or_enemy([new_x, new_y]):\n move = self.__new_move(loc, [new_x, new_y])\n moves.append(move)\n # Stop moving if enemy is in the way\n if self.__check_square_enemy([new_x, new_y]):\n break\n new_x -= 1\n new_y -= 1\n\n # Move/ATtack Down-Right\n new_x = loc[0]-1\n new_y = loc[1]+1\n while self.__check_square_empty_or_enemy([new_x, new_y]):\n move = self.__new_move(loc, [new_x, new_y])\n moves.append(move)\n # Stop moving if enemy is in the way\n if self.__check_square_enemy([new_x, new_y]):\n break\n new_x -= 1\n new_y += 1\n\n return moves\n # List all moves for a King\n def __get_king_moves(self, loc):\n moves = []\n\n # Move/Attack Up-Left\n if self.__check_square_empty_or_enemy([loc[0]+1, loc[1]-1]):\n move = self.__new_move(loc, [loc[0]+1, loc[1]-1])\n moves.append(move)\n # Move/Attack Up\n if self.__check_square_empty_or_enemy([loc[0]+1, loc[1]]):\n move = self.__new_move(loc, [loc[0]+1, loc[1]])\n moves.append(move)\n # Move/Attack Up-Right\n if self.__check_square_empty_or_enemy([loc[0]+1, loc[1]+1]):\n move = self.__new_move(loc, [loc[0]+1, loc[1]+1])\n moves.append(move)\n # Move/Attack Left\n if self.__check_square_empty_or_enemy([loc[0], loc[1]-1]):\n move = self.__new_move(loc, [loc[0], loc[1]-1])\n moves.append(move)\n # Move/Attack Right\n if self.__check_square_empty_or_enemy([loc[0], loc[1]+1]):\n move = self.__new_move(loc, [loc[0], loc[1]+1])\n moves.append(move)\n # Move/Attack Down-Left\n if self.__check_square_empty_or_enemy([loc[0]-1, loc[1]-1]):\n move = self.__new_move(loc, [loc[0]-1, loc[1]-1])\n moves.append(move)\n # Move/Attack Down\n if self.__check_square_empty_or_enemy([loc[0]-1, loc[1]]):\n move = self.__new_move(loc, [loc[0]-1, loc[1]])\n moves.append(move)\n # Move/Attack Down-Right\n if self.__check_square_empty_or_enemy([loc[0]-1, loc[1]+1]):\n move = self.__new_move(loc, [loc[0]-1, loc[1]+1])\n moves.append(move)\n\n # Castling\n # White side\n if self.__player == \"white\" and loc[0] == 0 and loc[1] == 4:\n # Check right side\n if self.__check_square_empty([0, 5]) and self.__check_square_empty([0, 6]) and self.__board[0][7] == WHITE_ROOK:\n move = self.__new_move(loc, [0, 6])\n # Move rook too?\n moves.append(move)\n # Check left side\n if self.__check_square_empty([0, 3]) and self.__check_square_empty([0, 2]) and self.__check_square_empty([0, 1]) and self.__board[0][0] == WHITE_ROOK:\n move = self.__new_move(loc, [0, 2])\n # Move rook too?\n moves.append(move)\n # Black side\n if self.__player == \"black\" and loc[0] == 7 and loc[1] == 4:\n # Check right side\n if self.__check_square_empty([7, 5]) and self.__check_square_empty([7, 6]) and self.__board[7][7] == BLACK_ROOK:\n move = self.__new_move(loc, [7, 6])\n # Move rook too?\n moves.append(move)\n # Check left side\n if self.__check_square_empty([7, 3]) and self.__check_square_empty([7, 2]) and self.__check_square_empty([7, 1]) and self.__board[7][0] == BLACK_ROOK:\n move = self.__new_move(loc, [7, 2])\n # Move rook too?\n moves.append(move)\n\n return moves\n # List all moves for a Queen\n def __get_queen_moves(self, loc):\n moves = []\n\n # The Queen moves like a Rook and Bishop combined\n moves += self.__get_rook_moves(loc)\n moves += self.__get_bishop_moves(loc)\n\n return moves\n # List all all available moves\n def get_valid_moves(self):\n moves = []\n for row in range(8):\n for col in range(8):\n new_moves = []\n cur_piece = self.__board[row][col]\n\n # Ignore pieces that don't belong to the current player\n if self.__check_square_enemy([row,col]):\n continue;\n\n # Calculate moves based on the current piece\n if cur_piece == WHITE_PAWN or cur_piece == BLACK_PAWN:\n new_moves = self.__get_pawn_moves([row, col])\n elif cur_piece == WHITE_ROOK or cur_piece == BLACK_ROOK:\n new_moves = self.__get_rook_moves([row, col])\n elif cur_piece == WHITE_KNIGHT or cur_piece == BLACK_KNIGHT:\n new_moves = self.__get_knight_moves([row, col])\n elif cur_piece == WHITE_BISHOP or cur_piece == BLACK_BISHOP:\n new_moves = self.__get_bishop_moves([row, col])\n elif cur_piece == WHITE_QUEEN or cur_piece == BLACK_QUEEN:\n new_moves = self.__get_queen_moves([row, col])\n elif cur_piece == WHITE_KING or cur_piece == BLACK_KING:\n new_moves = self.__get_king_moves([row, col])\n moves += new_moves\n\n return moves\n"},"license":{"kind":"string","value":"mit"},"hash":{"kind":"number","value":2038706519729979400,"string":"2,038,706,519,729,979,400"},"line_mean":{"kind":"number","value":38.4923413567,"string":"38.492341"},"line_max":{"kind":"number","value":162,"string":"162"},"alpha_frac":{"kind":"number","value":0.5087544326,"string":"0.508754"},"autogenerated":{"kind":"bool","value":false,"string":"false"}}},{"rowIdx":109548,"cells":{"repo_name":{"kind":"string","value":"odoo-arg/odoo_l10n_ar"},"path":{"kind":"string","value":"l10n_ar_invoice_presentation/models/presentation_tools.py"},"copies":{"kind":"string","value":"1"},"size":{"kind":"string","value":"4805"},"content":{"kind":"string","value":"# coding: utf-8\n##############################################################################\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU Affero General Public License as\n# published by the Free Software Foundation, either version 3 of the\n# License, or (at your option) any later version.\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU Affero General Public License for more details.\n# You should have received a copy of the GNU Affero General Public License\n# along with this program. If not, see .\n##############################################################################\n\n\nclass PresentationTools:\n def __init__(self):\n pass\n\n @staticmethod\n def format_date(d):\n # type: (str) -> str\n \"\"\"\n Formatea la fecha para su presentacion en ventas compras.\n :param d: La fecha a formatear.\n :type d: str\n :return: La fecha formateada.\n :rtype: str\n \"\"\"\n if not isinstance(d, str):\n d = str(d)\n return d.replace(\"-\", \"\")\n\n @staticmethod\n def get_currency_rate_from_move(invoice):\n \"\"\"\n Obtiene la currency de la factura, a partir de las lineas del asiento.\n :param invoice: record, factura\n :return: float, currency. ej: 15.32\n \"\"\"\n move = invoice.move_id\n account = invoice.account_id\n\n # Traemos todas las lineas del asiento que tengan esa cuenta\n move_line = move.line_ids.filtered(lambda x: x.account_id == account)[0]\n\n # Traemos el monto de la linea, si es de debito o credito\n amount = move_line.credit or move_line.debit\n amount_currency = abs(move_line.amount_currency)\n # El rate sera el monto dividido la currency si es distinto de cero, sino se divide por si mismo\n currency_rate = float(amount) / float(amount_currency or amount)\n\n return currency_rate\n\n @staticmethod\n def get_invoice_type(invoice):\n \"\"\"\n Obtiene el tipo de factura, a partir de los codigos de AFIP.\n :param invoice: record, factura\n :return: string, codigo de afip del tipo de factura\n \"\"\"\n # Guardamos en invoice_type si es nota de debito, credito o factura\n if invoice.is_debit_note:\n invoice_type = 'debit_note'\n else:\n invoice_type = 'invoice' if invoice.type in ['out_invoice', 'in_invoice'] else 'refund'\n\n # Buscamos el tipo de talonario segun el tipo de factura\n document_type_id = invoice.env['document.book.document.type'].search([\n ('type', '=', invoice_type),\n ('category', '=', 'invoice'),\n ], limit=1)\n # Buscamos el tipo de voucher almacenado en sistema de acuerdo al tipo de talonario y denominacion\n # TODO: Para los despachos de importacion el voucher type no va a existir por un problema de mapeo\n # El siguiente bloque if/else es temporal y debe ser removido.\n type_i = invoice.env.ref('l10n_ar_afip_tables.account_denomination_i')\n if invoice.denomination_id == invoice.env.ref('l10n_ar_afip_tables.account_denomination_d'):\n voucher_type = invoice.env['afip.voucher.type'].search([\n ('document_type_id', '=', document_type_id.id),\n ('denomination_id', '=', type_i.id)],\n limit=1\n )\n else:\n # Conservar solo esta porcion de codigo\n voucher_type = invoice.env['afip.voucher.type'].search([\n ('document_type_id', '=', document_type_id.id),\n ('denomination_id', '=', invoice.denomination_id.id)],\n limit=1\n )\n # Traemos el codigo de afip de la tabla de relaciones, en base a lo antes calculado\n document_afip_code = int(invoice.env['codes.models.relation'].get_code('afip.voucher.type', voucher_type.id))\n\n return document_afip_code\n\n @staticmethod\n def format_amount(amount, dp=2):\n # type: (float, int) -> str\n \"\"\"\n Formatea el numero con la cantidad de decimales que se le pase, o dos decimales por defecto.\n :param amount: El numero a formatear.\n :type amount: float\n :param dp: La precision decimal, a.k.a. la cantidad de decimales.\n :type dp: int\n :return: El numero formateado a string.\n :rtype: str\n \"\"\"\n amount = str(\"{0:.{1}f}\".format(amount, dp))\n amount = amount.replace(\".\", \"\").replace(\",\", \"\")\n return amount\n\n# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:\n"},"license":{"kind":"string","value":"agpl-3.0"},"hash":{"kind":"number","value":-2529918797849831000,"string":"-2,529,918,797,849,831,000"},"line_mean":{"kind":"number","value":42.2882882883,"string":"42.288288"},"line_max":{"kind":"number","value":117,"string":"117"},"alpha_frac":{"kind":"number","value":0.5966701353,"string":"0.59667"},"autogenerated":{"kind":"bool","value":false,"string":"false"}}},{"rowIdx":109549,"cells":{"repo_name":{"kind":"string","value":"michael-hart/Imperial-Vex-2014"},"path":{"kind":"string","value":"src/Python/serial_methods.py"},"copies":{"kind":"string","value":"1"},"size":{"kind":"string","value":"1297"},"content":{"kind":"string","value":"import dispatch\nimport receive\nimport time\nimport struct\n\nclass serial_master:\n pending_ack = {}\n disp = dispatch.dispatch_master()\n rec = receive.receive_master()\n ack_timeout = 10 #very much a placeholder\n serial_timeout = 10 #very much a placeholder\n \n def dispatch(self):\n cmd = self.disp.dispatch()\n if ((cmd[0] == '\\x02') | (cmd[0] == '\\x03') | (cmd[0] == '\\x05')): #command requires ack\n y = struct.pack('f5p',time.time(), cmd)\n self.pending_ack[cmd[1]] =y\n return cmd\n def push_msg(self,cmd,data=0):\n self.disp.generate_msg(cmd,data)\n def receive(self,cmd):\n result = self.rec.rcv_msg(cmd)\n if cmd[0] == '\\x04': #is acknowledge\n try:\n self.pending_ack.pop(chr(result))\n except ValueError:\n print \"iD of acknowledge invalid\" \n def check_timeout(self):\n t = time.time()\n if (t - self.rec.time_last) > self.serial_timeout:\n print 'the cortex is dead. everything is lost'\n else:\n for key in self.pending_ack:\n y = struct.unpack('f5p',self.pending_ack[key])\n if (y[1] -t) > self.ack_timeout:\n self.push_msg(y[0][0],y[0][2])\n "},"license":{"kind":"string","value":"gpl-2.0"},"hash":{"kind":"number","value":-7951646154742164000,"string":"-7,951,646,154,742,164,000"},"line_mean":{"kind":"number","value":34.0810810811,"string":"34.081081"},"line_max":{"kind":"number","value":97,"string":"97"},"alpha_frac":{"kind":"number","value":0.5358519661,"string":"0.535852"},"autogenerated":{"kind":"bool","value":false,"string":"false"}}},{"rowIdx":109550,"cells":{"repo_name":{"kind":"string","value":"jcherqui/searx"},"path":{"kind":"string","value":"tests/unit/engines/test_google_videos.py"},"copies":{"kind":"string","value":"1"},"size":{"kind":"string","value":"2859"},"content":{"kind":"string","value":"from collections import defaultdict\nimport mock\nfrom searx.engines import google_videos\nfrom searx.testing import SearxTestCase\n\n\nclass TestGoogleVideosEngine(SearxTestCase):\n\n def test_request(self):\n query = 'test_query'\n dicto = defaultdict(dict)\n dicto['pageno'] = 1\n dicto['safesearch'] = 1\n dicto['time_range'] = ''\n params = google_videos.request(query, dicto)\n self.assertIn('url', params)\n self.assertIn(query, params['url'])\n\n dicto['safesearch'] = 0\n params = google_videos.request(query, dicto)\n self.assertNotIn('safe', params['url'])\n\n def test_response(self):\n self.assertRaises(AttributeError, google_videos.response, None)\n self.assertRaises(AttributeError, google_videos.response, [])\n self.assertRaises(AttributeError, google_videos.response, '')\n self.assertRaises(AttributeError, google_videos.response, '[]')\n\n html = r\"\"\"\n
\n
\n
\n
\n

Title 1

\n
\n
\n \n
\n
\n Content 1\n
\n
\n
\n
\n

Title 2

\n
\n
\n \n
\n
\n Content 2\n
\n
\n
\n
\n \n \"\"\"\n response = mock.Mock(text=html)\n results = google_videos.response(response)\n self.assertEqual(type(results), list)\n self.assertEqual(len(results), 2)\n self.assertEqual(results[0]['url'], u'url_1')\n self.assertEqual(results[0]['title'], u'Title 1')\n self.assertEqual(results[0]['content'], u'Content 1')\n self.assertEqual(results[1]['url'], u'url_2')\n self.assertEqual(results[1]['title'], u'Title 2')\n self.assertEqual(results[1]['content'], u'Content 2')\n"},"license":{"kind":"string","value":"agpl-3.0"},"hash":{"kind":"number","value":7804260171419426000,"string":"7,804,260,171,419,426,000"},"line_mean":{"kind":"number","value":35.1898734177,"string":"35.189873"},"line_max":{"kind":"number","value":71,"string":"71"},"alpha_frac":{"kind":"number","value":0.4323189927,"string":"0.432319"},"autogenerated":{"kind":"bool","value":false,"string":"false"}}},{"rowIdx":109551,"cells":{"repo_name":{"kind":"string","value":"stcorp/legato"},"path":{"kind":"string","value":"setup.py"},"copies":{"kind":"string","value":"1"},"size":{"kind":"string","value":"1043"},"content":{"kind":"string","value":"from setuptools import setup, find_packages\nimport sys\n\nif sys.hexversion < 0x02070000:\n sys.exit(\"Python 2.7 or newer is required to use this package.\")\n\nsetup(\n name=\"legato\",\n version=\"1.2\",\n author=\"S[&]T\",\n url=\"https://github.com/stcorp/legato\",\n description=\"Task trigger daemon\",\n license=\"BSD\",\n packages=find_packages(),\n entry_points={\n \"console_scripts\": [\n \"legato = legato.main:main\",\n ],\n },\n classifiers=[\n \"Development Status :: 5 - Production/Stable\",\n \"Programming Language :: Python :: 2\",\n \"Programming Language :: Python :: 2.7\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"License :: OSI Approved :: BSD License\",\n \"Operating System :: OS Independent\",\n \"Topic :: System\",\n ],\n install_requires=[\n \"pyyaml\",\n \"schedule\",\n \"watchdog\"\n ]\n)\n"},"license":{"kind":"string","value":"bsd-3-clause"},"hash":{"kind":"number","value":977491382755786200,"string":"977,491,382,755,786,200"},"line_mean":{"kind":"number","value":27.1891891892,"string":"27.189189"},"line_max":{"kind":"number","value":68,"string":"68"},"alpha_frac":{"kind":"number","value":0.5675934803,"string":"0.567593"},"autogenerated":{"kind":"bool","value":false,"string":"false"}}},{"rowIdx":109552,"cells":{"repo_name":{"kind":"string","value":"iamaris/ppf"},"path":{"kind":"string","value":"ppf/test/test_hull_white.py"},"copies":{"kind":"string","value":"1"},"size":{"kind":"string","value":"14343"},"content":{"kind":"string","value":"import ppf, math, numpy, unittest\n\ndef _assert_seq_close(a, b, tol=1.0e-8):\n assert (len(a) == len(b)) and \\\n not [l for l in [math.fabs(x - y) <= tol for (x, y) in zip(a, b)] if not l]\n\nclass requestor_tests(unittest.TestCase):\n def test_discount_factor(self):\n env = ppf.market.environment()\n times = numpy.linspace(0, 2, 5)\n env.add_curve(\n \"zc.disc.eur\"\n , ppf.market.curve(\n times\n , numpy.array([math.exp(-0.05*t) for t in times])\n , ppf.math.interpolation.loglinear\n )\n )\n r = ppf.model.hull_white.requestor()\n t = 1.5\n Bt = [r.discount_factor(t, \"eur\", env)]\n\n _assert_seq_close([0.927743486329], Bt)\n\n def test_term_vol(self):\n env = ppf.market.environment()\n env.add_constant(\"cv.mr.eur.hw\", 0.0)\n expiries, tenors = [0.1, 0.5, 1.0, 1.5, 2.0, 3.0, 4.0, 5.0], [0, 90]\n env.add_surface(\n \"ve.term.eur.hw\"\n , ppf.market.surface(expiries, tenors, numpy.array(8*[[0.04, 0.04]]))\n )\n r = ppf.model.hull_white.requestor()\n t = 0.25\n sig = [r.term_vol(t, \"eur\", env)]\n\n _assert_seq_close(sig, [0.1])\n\nclass state_tests(unittest.TestCase):\n def test(self):\n expiries, tenors = [0.1, 0.5, 1.0, 1.5, 2.0, 3.0, 4.0, 5.0], [0, 90]\n surf = ppf.market.surface(expiries, tenors, numpy.array(8*[[0.04, 0.04]]))\n env = ppf.market.environment()\n env.add_surface(\"ve.term.eur.hw\", surf)\n env.add_constant( \"cv.mr.eur.hw\", 0.01)\n s = ppf.model.hull_white.lattice.state(\"eur\", 11, 3.5)\n x = s.fill(1.25, ppf.model.hull_white.requestor(), env)\n exp = \\\n [-0.78754076\n ,-0.63003261\n ,-0.47252446\n ,-0.31501631\n ,-0.15750815\n , 0.\n , 0.15750815\n , 0.31501631\n , 0.47252446\n , 0.63003261\n , 0.78754076]\n\n _assert_seq_close(exp, x)\n\nclass fill_tests(unittest.TestCase):\n def test_numeraire_rebased_bond(self):\n env = ppf.market.environment()\n times = numpy.linspace(0, 2, 5)\n factors = numpy.array([math.exp(-0.05*t) for t in times])\n env.add_curve(\"zc.disc.eur\"\n , ppf.market.curve(times, factors, ppf.math.interpolation.loglinear))\n expiries, tenors = [0.1, 0.5, 1.0, 1.5, 2.0, 3.0, 4.0, 5.0], [0, 90]\n env.add_surface(\"ve.term.eur.hw\"\n , ppf.market.surface(expiries, tenors, numpy.zeros((8, 2))))\n env.add_constant(\"cv.mr.eur.hw\", 0.0)\n r = ppf.model.hull_white.requestor()\n s = ppf.model.hull_white.lattice.state(\"eur\", 11, 3.5)\n sx = s.fill(0.25, r, env)\n f = ppf.model.hull_white.fill(2.0)\n PtT = f.numeraire_rebased_bond(0.25, 1.5, \"eur\", env, r, sx)\n exp = \\\n [0.927743486329\n ,0.927743486329\n ,0.927743486329\n ,0.927743486329\n ,0.927743486329\n ,0.927743486329\n ,0.927743486329\n ,0.927743486329\n ,0.927743486329\n ,0.927743486329\n ,0.927743486329]\n\n _assert_seq_close(exp, PtT)\n\n def test_libor(self):\n from ppf.date_time \\\n import date, shift, modified_following, basis_act_360, months\n pd = date(2008, 01, 01)\n env = ppf.market.environment(pd)\n times = numpy.linspace(0, 2, 5)\n factors = numpy.array([math.exp(-0.05*t) for t in times])\n env.add_curve(\"zc.disc.eur\"\n , ppf.market.curve(times, factors, ppf.math.interpolation.loglinear))\n expiries, tenors = [0.1, 0.5, 1.0, 1.5, 2.0, 3.0, 4.0, 5.0], [0, 90]\n env.add_surface(\"ve.term.eur.hw\"\n , ppf.market.surface(expiries, tenors, numpy.zeros((8, 2))))\n env.add_constant(\"cv.mr.eur.hw\", 0.0)\n rd = date(2008, 07, 01)\n libor_obs = \\\n ppf.core.libor_rate( \\\n None #attributes\n , 0 #flow-id\n , 0 #reset-id\n , rd #reset-date\n , \"eur\"#reset-currency\n , rd #projection-start-date\n , shift(rd + months(6), modified_following)#projection-end-date\n , basis_act_360#projection-basis\n , ppf.core.fixing(False))# fixing (and no spread)\n r = ppf.model.hull_white.requestor()\n s = ppf.model.hull_white.lattice.state(\"eur\", 11, 3.5)\n sx = s.fill(0.25, r, env)\n f = ppf.model.hull_white.fill(2.0)\n libortT = f.libor(0.25, libor_obs, env, r, sx)\n exp = \\\n [0.0499418283138\n ,0.0499418283138\n ,0.0499418283138\n ,0.0499418283138\n ,0.0499418283138\n ,0.0499418283138\n ,0.0499418283138\n ,0.0499418283138\n ,0.0499418283138\n ,0.0499418283138\n ,0.0499418283138\n ]\n\n _assert_seq_close(exp, libortT)\n\nclass rollback_tests(unittest.TestCase):\n def test_discounted_libor_rollback(self):\n from ppf.date_time \\\n import date, shift, modified_following, basis_act_360, months\n pd = date(2008, 01, 01)\n env = ppf.market.environment(pd)\n times = numpy.linspace(0, 6, 10)\n factors = numpy.array([math.exp(-0.05*t) for t in times])\n env.add_curve(\"zc.disc.eur\"\n , ppf.market.curve(times, factors, ppf.math.interpolation.loglinear))\n expiries, tenors = [0.0, 0.5, 1.0, 1.5, 2.0, 3.0, 4.0, 5.0, 6.0], [0, 90]\n values = numpy.zeros((9, 2))\n values.fill(0.001)\n env.add_surface(\"ve.term.eur.hw\"\n , ppf.market.surface(expiries, tenors, values))\n env.add_constant(\"cv.mr.eur.hw\", 0.01)\n r = ppf.model.hull_white.requestor()\n s = ppf.model.hull_white.lattice.state(\"eur\", 41, 4.5)\n f = ppf.model.hull_white.fill(5.0)\n rd = date(2011, 01, 01)\n libor_obs = \\\n ppf.core.libor_rate( \\\n None #attributes\n , 0 #flow-id\n , 0 #reset-id\n , rd #reset-date\n , \"eur\"#reset-currency\n , rd #projection-start-date\n , shift(rd + months(6), modified_following)#projection-end-date\n , basis_act_360#projection-basis\n , ppf.core.fixing(False))# fixing (and no spread)\n t = env.relative_date(libor_obs.proj_start_date())/365.0\n T = env.relative_date(libor_obs.proj_end_date())/365.0\n sx = s.fill(t, r, env)\n libort = f.libor(t, libor_obs, env, r, sx)\n ptT = f.numeraire_rebased_bond(t, T, \"eur\", env, r, sx)\n pv = libort*ptT*libor_obs.year_fraction()\n roll = ppf.model.hull_white.lattice.rollback(\"eur\")\n intermediate_pv = roll.rollback(0.5*t, t, s, r, env, pv)\n actual = roll.rollback(0.0, 0.5*t, s, r, env, intermediate_pv).mean() \n expected = r.discount_factor(t, \"eur\", env)-r.discount_factor(T, \"eur\", env)\n _assert_seq_close([expected],[actual],1.0e-6)\n\n def test_bond_option(self): \n from ppf.date_time \\\n import date, shift, modified_following, basis_act_360, months\n pd = date(2008, 01, 01)\n env = ppf.market.environment(pd)\n times = numpy.linspace(0, 6, 10)\n factors = numpy.array([math.exp(-0.05*t) for t in times])\n env.add_curve(\"zc.disc.eur\"\n , ppf.market.curve(times, factors, ppf.math.interpolation.loglinear))\n expiries, tenors = [0.0, 0.5, 1.0, 1.5, 2.0, 3.0, 4.0, 5.0, 6.0], [0, 90]\n values = numpy.zeros((9, 2))\n values.fill(0.001)\n env.add_surface(\"ve.term.eur.hw\"\n , ppf.market.surface(expiries, tenors, values))\n env.add_constant(\"cv.mr.eur.hw\", 0.01)\n r = ppf.model.hull_white.requestor()\n s = ppf.model.hull_white.lattice.state(\"eur\", 41, 4.5)\n f = ppf.model.hull_white.fill(5.0)\n t = 3.0\n T = 4.0\n terminal_T = 5.0\n sx = s.fill(t, r, env)\n ptT = f.numeraire_rebased_bond(t, T, \"eur\", env, r, sx)\n k = 0.9\n pv = ptT-k\n roll = ppf.model.hull_white.lattice.rollback(\"eur\") \n actual = roll.rollback_max(0.0, t, s, r, env, pv).mean()\n volt = r.term_vol(t, \"eur\", env)*r.local_vol(T, terminal_T, \"eur\", env)\n F = r.discount_factor(T, \"eur\", env)\n d1 = math.log(F/k)/volt+0.5*volt\n d2 = d1-volt\n expected = F*ppf.math.N(d1)-k*ppf.math.N(d2)\n _assert_seq_close([expected],[actual],1.0e-5)\n\n def test_constant(self):\n from ppf.date_time \\\n import date, shift, modified_following, basis_act_360, months\n pd = date(2008, 01, 01)\n env = ppf.market.environment(pd)\n times = numpy.linspace(0, 6, 10)\n factors = numpy.array([math.exp(-0.05*t) for t in times])\n env.add_curve(\"zc.disc.eur\"\n , ppf.market.curve(times, factors, ppf.math.interpolation.loglinear))\n expiries, tenors = [0.0, 0.5, 1.0, 1.5, 2.0, 3.0, 4.0, 5.0, 6.0], [0, 90]\n values = numpy.zeros((9, 2))\n values.fill(0.001)\n env.add_surface(\"ve.term.eur.hw\"\n , ppf.market.surface(expiries, tenors, values))\n env.add_constant(\"cv.mr.eur.hw\", 0.01)\n r = ppf.model.hull_white.requestor()\n s = ppf.model.hull_white.lattice.state(\"eur\", 41, 5.5)\n f = ppf.model.hull_white.fill(5.0)\n t = 3.0\n T = 4.0\n terminal_T = 5.0\n sx = s.fill(t, r, env)\n yT = numpy.zeros(41)\n yT.fill(1)\n roll = ppf.model.hull_white.lattice.rollback(\"eur\") \n yt = roll.rollback(t, T, s, r, env, yT)\n _assert_seq_close(yt, yT, 1.0e-5)\n\nclass evolve_tests(unittest.TestCase):\n def test_mean_and_variance(self):\n from ppf.date_time \\\n import date, shift, modified_following, basis_act_360, months\n pd = date(2008, 01, 01)\n env = ppf.market.environment(pd)\n times = numpy.linspace(0, 6, 10)\n factors = numpy.array([math.exp(-0.05*t) for t in times])\n env.add_curve(\"zc.disc.eur\"\n , ppf.market.curve(times, factors, ppf.math.interpolation.loglinear))\n expiries, tenors = [0.0, 0.5, 1.0, 1.5, 2.0, 3.0, 4.0, 5.0, 6.0], [0, 90]\n values = numpy.zeros((9, 2))\n values.fill(0.001)\n env.add_surface(\"ve.term.eur.hw\"\n , ppf.market.surface(expiries, tenors, values))\n env.add_constant(\"cv.mr.eur.hw\", 0.01)\n r = ppf.model.hull_white.requestor()\n s = ppf.model.hull_white.monte_carlo.state(10000)\n e = ppf.model.hull_white.monte_carlo.evolve(\"eur\")\n e.evolve(0.0,0.5,s,r,env)\n e.evolve(0.5,1.0,s,r,env)\n variates = s.get_variates()\n mean = variates.sum()/10000\n assert(math.fabs(mean) < 1.0e-4)\n tmp = variates*variates\n variance = tmp.sum()/10000\n vol = r.term_vol(1.0,\"eur\",env)\n assert(math.fabs(variance-vol*vol) < 1.0e-4)\n\n def test_bond(self):\n from ppf.date_time \\\n import date, shift, modified_following, basis_act_360, months\n pd = date(2008, 01, 01)\n env = ppf.market.environment(pd)\n times = numpy.linspace(0, 6, 10)\n factors = numpy.array([math.exp(-0.05*t) for t in times])\n env.add_curve(\"zc.disc.eur\"\n , ppf.market.curve(times, factors, ppf.math.interpolation.loglinear))\n expiries, tenors = [0.0, 0.5, 1.0, 1.5, 2.0, 3.0, 4.0, 5.0, 6.0], [0, 90]\n values = numpy.zeros((9, 2))\n values.fill(0.001)\n env.add_surface(\"ve.term.eur.hw\"\n , ppf.market.surface(expiries, tenors, values))\n env.add_constant(\"cv.mr.eur.hw\", 0.01)\n r = ppf.model.hull_white.requestor()\n s = ppf.model.hull_white.monte_carlo.state(10000)\n e = ppf.model.hull_white.monte_carlo.evolve(\"eur\")\n e.evolve(0.0,3.0,s,r,env)\n f = ppf.model.hull_white.fill(5.0)\n t = 3.0\n T = 4.0\n sx = s.fill(t, r, env)\n ptT = f.numeraire_rebased_bond(t, T, \"eur\", env, r, sx)\n actual = ptT.mean()\n expected = r.discount_factor(T, \"eur\", env)\n assert(math.fabs(actual-expected) < 1.0e-3)\n\nclass exercise_tests(unittest.TestCase):\n def test_explanatory_variables(self):\n from ppf.math.interpolation import loglinear\n times = [0.0, 0.5, 1.0, 1.5, 2.0, 2.5, 3.0]\n factors = [math.exp(-0.05*t) for t in times]\n c = ppf.market.curve(times, factors, loglinear)\n expiries = [0.0, 0.5, 1.0, 1.5, 2.0, 3.0, 4.0, 5.0]\n tenors = [0, 90]\n values = numpy.zeros((8, 2))\n surf = ppf.market.surface(expiries, tenors, values)\n from ppf.date_time \\\n import date, shift_convention, modified_following, basis_act_360, months\n pd = date(2008, 01, 01)\n env = ppf.market.environment(pd)\n key = \"zc.disc.eur\"\n env.add_curve(key, c)\n key = \"ve.term.eur.hw\"\n env.add_surface(key, surf)\n key = \"cv.mr.eur.hw\"\n env.add_constant(key, 0.0)\n r = ppf.model.hull_white.requestor()\n s = ppf.model.hull_white.monte_carlo.state(10)\n sx = s.fill(0.25, r, env)\n f = ppf.model.hull_white.fill(3.0)\n flows = ppf.core.generate_flows(\n start = date(2008, 01, 01)\n , end = date(2010, 01, 01)\n , duration = months\n , period = 6\n , shift_method = shift_convention.modified_following\n , basis = \"ACT/360\"\n , pay_currency = \"EUR\")\n lg = ppf.core.leg(flows, ppf.core.PAY)\n ex = ppf.model.hull_white.monte_carlo.cle_exercise(lg)\n t = env.relative_date(flows[1].accrual_start_date())/365.0\n T = env.relative_date(flows[1].accrual_end_date())/365.0\n ret = ex(t, f, sx, r, env)\n dft = c(t)\n dfT = c(T)\n expected_libor = (dft/dfT-1.0)/flows[1].year_fraction()\n pv01 = 0.0\n for fl in flows[1:]:\n T = env.relative_date(fl.pay_date())/365.0\n dfT = c(T)\n pv01 += fl.year_fraction()*dfT\n T = env.relative_date(flows[-1].accrual_end_date())/365.0\n dfT = c(T)\n expected_swap = (dft-dfT)/pv01\n expected_libors = numpy.zeros(10)\n expected_libors.fill(expected_libor)\n expected_swaps = numpy.zeros(10)\n expected_swaps.fill(expected_swap)\n actual_libors = ret[:, 0]\n actual_swaps = ret[:, 1]\n\n _assert_seq_close(actual_libors, expected_libors)\n _assert_seq_close(actual_swaps, expected_swaps)\n\nclass hull_white_test_suite(unittest.TestSuite):\n def __init__(self):\n tests = map(requestor_tests,('test_discount_factor','test_term_vol')) + \\\n map(state_tests,('test',)) + \\\n map(fill_tests,('test_numeraire_rebased_bond', 'test_libor')) + \\\n map(rollback_tests, ('test_discounted_libor_rollback','test_bond_option', 'test_constant')) + \\\n map(evolve_tests, ('test_mean_and_variance', 'test_bond')) + \\\n map(exercise_tests, ('test_explanatory_variables',))\n\n unittest.TestSuite.__init__(self, tests)\n\n#////////1/////////2/////////3/////////4/////////5/////////6/////////7/////////8\n# driver\n#\n\ndef suite():\n all_tests = unittest.TestSuite(\n (\n hull_white_test_suite()\n , \n ) )\n\n return all_tests\n \ndef run_tests():\n runner = unittest.TextTestRunner()\n runner.run(suite())\n\nif __name__ == '__main__': run_tests()\n"},"license":{"kind":"string","value":"mit"},"hash":{"kind":"number","value":-2732997826539042000,"string":"-2,732,997,826,539,042,000"},"line_mean":{"kind":"number","value":35.9664948454,"string":"35.966495"},"line_max":{"kind":"number","value":107,"string":"107"},"alpha_frac":{"kind":"number","value":0.5857212578,"string":"0.585721"},"autogenerated":{"kind":"bool","value":false,"string":"false"}}},{"rowIdx":109553,"cells":{"repo_name":{"kind":"string","value":"hayashizakitakaaki/Introduction_mysite"},"path":{"kind":"string","value":"accounts/models.py"},"copies":{"kind":"string","value":"1"},"size":{"kind":"string","value":"3386"},"content":{"kind":"string","value":"# -*- coding: utf-8 -*-\nfrom django.contrib.auth.models import AbstractBaseUser, BaseUserManager, PermissionsMixin\nfrom django.db import models\nfrom django.shortcuts import get_object_or_404\n\n\nclass UserManager(BaseUserManager):\n use_in_migrations = True\n\n def _create_user(self, username, password, is_superuser, first_name=None, last_name=None, **extra_fields):\n if not username or len(username.strip()) < 1:\n raise ValueError(u'ユーザー名を入力してください!')\n if User.objects.filter(username=username):\n raise ValueError(u'そのユーザーは登録されています')\n if first_name is not None and len(first_name.strip()) < 1:\n raise ValueError(u'姓を入力してください!')\n if last_name is not None and len(last_name.strip()) < 1:\n raise ValueError(u'名を入力してください!')\n user = self.model(\n username=username, first_name=first_name, last_name=last_name, is_superuser=is_superuser, **extra_fields)\n user.set_password(password)\n user.save(using=self._db)\n return user\n\n def _edit_user(self, id, username, is_superuser, first_name=None, last_name=None, **extra_fields):\n if not username or len(username.strip()) < 1:\n raise ValueError(u'ユーザー名を入力してください!')\n if first_name is not None and len(first_name.strip()) < 1:\n raise ValueError(u'姓を入力してください!')\n if last_name is not None and len(last_name.strip()) < 1:\n raise ValueError(u'名を入力してください!')\n try:\n user = get_object_or_404(User, pk=id)\n except:\n raise ValueError(u'ユーザーが存在しません')\n user.username = username\n user.first_name = first_name\n user.last_name = last_name\n user.is_superuser = is_superuser\n user.save(using=self._db)\n return user\n\n def create_superuser(self, username, password, **extra_fields):\n return self._create_user(username, password, True, **extra_fields)\n\n\nclass User(AbstractBaseUser, PermissionsMixin):\n username = models.CharField('ユーザーID', max_length=30, unique=True,\n help_text=\"This using user ID and use login or logout\")\n screenname = models.CharField('表示名', max_length=255,\n help_text=\"\")\n first_name = models.CharField('姓', max_length=255, blank=True, null=True,\n help_text=\"\")\n last_name = models.CharField('名', max_length=255, blank=True, null=True,\n help_text=\"\")\n is_active = models.BooleanField('有効フラグ', default=True)\n is_staff = models.BooleanField('スタッフ', default=True)\n created_date = models.DateTimeField('登録日時', auto_now_add=True)\n modified_date = models.DateTimeField('更新日時', auto_now=True)\n\n objects = UserManager()\n USERNAME_FIELD = 'username'\n\n class Meta:\n verbose_name = 'ユーザー'\n verbose_name_plural = verbose_name\n\n def get_full_name(self):\n if self.first_name and self.last_name:\n return self.first_name + self.last_name\n else:\n return self.username\n\n def get_short_name(self):\n return self.first_name\n"},"license":{"kind":"string","value":"mit"},"hash":{"kind":"number","value":5415129295822937000,"string":"5,415,129,295,822,937,000"},"line_mean":{"kind":"number","value":40.68,"string":"40.68"},"line_max":{"kind":"number","value":117,"string":"117"},"alpha_frac":{"kind":"number","value":0.6225207933,"string":"0.622521"},"autogenerated":{"kind":"bool","value":false,"string":"false"}}},{"rowIdx":109554,"cells":{"repo_name":{"kind":"string","value":"Micronaet/micronaet-mx8"},"path":{"kind":"string","value":"l10n_it_ddt_sectional/journal.py"},"copies":{"kind":"string","value":"1"},"size":{"kind":"string","value":"2205"},"content":{"kind":"string","value":"# -*- coding: utf-8 -*-\n###############################################################################\n#\n# Copyright (C) 2001-2014 Micronaet SRL ().\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU Affero General Public License as published\n# by the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU Affero General Public License for more details.\n#\n# You should have received a copy of the GNU Affero General Public License\n# along with this program. If not, see .\n#\n###############################################################################\nimport os\nimport sys\nimport logging\nimport openerp\nimport openerp.netsvc as netsvc\nimport openerp.addons.decimal_precision as dp\nfrom openerp.osv import fields, osv, expression, orm\nfrom datetime import datetime, timedelta\nfrom dateutil.relativedelta import relativedelta\nfrom openerp import SUPERUSER_ID, api\nfrom openerp import tools\nfrom openerp.tools.translate import _\nfrom openerp.tools.float_utils import float_round as round\nfrom openerp.tools import (DEFAULT_SERVER_DATE_FORMAT, \n DEFAULT_SERVER_DATETIME_FORMAT, \n DATETIME_FORMATS_MAP, \n float_compare)\n\n\n_logger = logging.getLogger(__name__)\n\n\nclass StockDdT(orm.Model):\n '''Override DDT for change set number function:\n '''\n _inherit = 'stock.ddt'\n\n def set_number(self, cr, uid, ids, context=None):\n ''' Override original function:\n ''' \n for ddt in self.browse(cr, uid, ids, context=context):\n if not ddt.name:\n name = self.pool.get('ir.sequence').get_id(\n cr, uid, ddt.sequence.id, code_or_id='id', \n context=context) \n self.write(cr, uid, ddt.id, {\n 'name': name}, context=context)\n\n# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:\n"},"license":{"kind":"string","value":"agpl-3.0"},"hash":{"kind":"number","value":1794800547456019000,"string":"1,794,800,547,456,019,000"},"line_mean":{"kind":"number","value":37.0172413793,"string":"37.017241"},"line_max":{"kind":"number","value":79,"string":"79"},"alpha_frac":{"kind":"number","value":0.6367346939,"string":"0.636735"},"autogenerated":{"kind":"bool","value":false,"string":"false"}}},{"rowIdx":109555,"cells":{"repo_name":{"kind":"string","value":"ageek/useful-papers-codes"},"path":{"kind":"string","value":"dropconnect-on-CIFAR10/drop-nn/imagenetdata-old.py"},"copies":{"kind":"string","value":"2"},"size":{"kind":"string","value":"7620"},"content":{"kind":"string","value":"# image net data provider\n\nfrom PIL import Image\nfrom util import pickle,unpickle\nimport numpy as n\nimport sys\nfrom numpy.random import random_integers\nfrom time import time, asctime, localtime, strftime\nfrom math import *\n\nMEAN_FILE_EXT = \"_mean\"\n\ndef PIL2array(img):\n #if img.mode == 'L':\n # r = n.array(img.getdata(), n.uint8).reshape(img.size[1], img.size[0] )\n # result = n.zeros( (img.size[1], img.size[0],3 ), n.uint8 )\n # result[:,:,0] = r\n # result[:,:,1] = r\n # result[:,:,2] = r\n # return result\n #else:\n # return n.array(img.getdata(), n.uint8).reshape(img.size[1], img.size[0], 3)\n if img.mode == 'L':\n I = n.asarray( img )\n result = n.zeros( (img.size[1], img.size[0],3 ), n.uint8 )\n result[:,:,0] = I\n result[:,:,1] = I\n result[:,:,2] = I\n return result\n else:\n return n.asarray( img )\n\ndef array2PIL(arr):\n return Image.fromarray( n.uint8(arr) )\n\nclass ImagenetDataProvider:\n def __init__( self, data_file, root_path, data_mode = \"train\", random_transform = False,\n batch_size = 128, crop_width = 224, crop_height = 224 ):\n # read image-name image-index map from file\n self.data = unpickle( data_file )\n self.num_classes = len( self.data['index_map'] )\n self.data_mode = data_mode\n self.random_transform = random_transform\n self.root_path = root_path\n if data_mode == \"all\":\n index_map = self.data['index_map']\n elif data_mode == \"val\":\n index_map = self.data['index_map_val']\n elif data_mode == \"train\":\n index_map = self.data['index_map_train']\n else:\n print \"data_mode: \" + data_mode + \" not valid\"\n import pdb; pdb.set_trace()\n sys.exit(1)\n\n # get batch queue\n self.batch_queue = []\n has_add = True\n while has_add:\n has_add = False\n for i in range( self.num_classes ):\n if len(index_map[i]) > 0:\n index = index_map[i].pop()\n self.batch_queue.append( index )\n has_add = True\n\n self.num_images = len( self.batch_queue )\n\n #init current index and batch size\n self.batch_size = batch_size\n self.prev_batch_size = batch_size\n self.crop_width = crop_width\n self.crop_height = crop_height\n self.batch_index = 1\n self.epoch = 1\n \n # read data mean from file\n data_mean_file = unpickle( data_file + MEAN_FILE_EXT )\n self.data_mean = data_mean_file['data']\n\n def get_data_dims( self, idx ):\n if idx == 0:\n return self.crop_width * self.crop_height * 3\n if idx == 1:\n return 1\n\n def get_previous_batch_size( self ):\n return self.prev_batch_size\n\n def get_next_batch( self ):\n # construct next batch online\n # batch_data[0]: epoch\n # batch_data[1]: batchnum\n # batch_data[2]['label']: each column represents an image \n # batch_data[2]['data'] : each column represents an image \n # this function only crop center 256 x 256 in image for classification\n\n\n total_time_start = time()\n\n alloc_time_start = time()\n result_data = n.zeros( ( self.crop_width * self.crop_height * 3, self.batch_size ), \\\n n.float32 )\n result_label = n.zeros( (1,self.batch_size ), n.float32 )\n\n batch_index = self.batch_index - 1\n if batch_index * self.batch_size >= self.num_images:\n self.batch_index = 1\n self.epoch += 1\n batch_index = 0\n alloc_time = time() - alloc_time_start\n\n # loading/tranform image time\n load_time = 0\n transform_time = 0\n\n lt_time_start = time()\n k = 0\n for i in range( self.batch_size ):\n index = (i + batch_index * self.batch_size ) \n if index >= self.num_images:\n break\n k += 1\n index = self.batch_queue[index]\n result_data[:,i], result_label[0,i], lti, tti = self.get_data_label( index )\n load_time += lti\n transform_time += tti\n lt_time = time() - lt_time_start \n\n pack_time_start = time()\n # shrink result_data, result_label to have k columns\n if k < self.batch_size:\n result_data = result_data[:,0:k]\n result_label = result_label[0,0:k].reshape(1,k)\n self.previous_batch_size = k\n\n self.batch_index += 1\n result = {}\n result['data'] = result_data\n result['label'] = result_label\n #result['label'] = result_label % 10\n #import pdb; pdb.set_trace()\n pack_time = time() - pack_time_start\n print \"load data: (%.3f sec) \" % ( time() - total_time_start ),\n print \" = %.2f(%.2f + %.2f) + %.2f\" % (lt_time, load_time , transform_time, alloc_time), \n return self.epoch, batch_index+1, result\n\n def get_data_label( self, index ):\n #import pdb; pdb.set_trace()\n image_path = self.root_path + \"/\" + self.data['image_path'][index]\n label = self.data['image_label'][index]\n\n #load image \n load_time_start= time()\n im = Image.open( image_path )\n image_matrix = PIL2array( im )\n load_time = time() - load_time_start \n\n # generate transformed image\n transform_time_start = time()\n #[x,y,w,h] = im.getbbox()\n x = 0\n y = 0\n (w,h) = im.size\n\n # get image matrix and substract mean\n image_matrix = image_matrix.astype(n.float32)\n image_matrix -= self.data_mean\n\n if self.random_transform:\n # random crop\n x += random_integers( 0, w - self.crop_width - 1)\n y += random_integers( 0, h - self.crop_height - 1)\n else:\n # fixed crop\n x += (w - self.crop_width)/2\n y += (h - self.crop_height)/2\n\n #crop image\n assert( x + self.crop_width < w )\n assert( y + self.crop_height < h )\n #im = im.crop( (x,y, x + self.crop_width, y + self.crop_height ) )\n image_matrix = image_matrix[ x:x+self.crop_width, y:y+self.crop_width, : ]\n\n if self.random_transform:\n # flip: roll a dice to whether flip image\n if random_integers( 0,1 ) > 0.5:\n #im = im.transpose( Image.FLIP_LEFT_RIGHT )\n image_matrix = image_matrix[:, -1::-1, :]\n\n image_matrix = image_matrix.reshape( (self.crop_width * self.crop_height * 3, ) )\n image_matrix = n.require( image_matrix, dtype=n.single, \n requirements='C')\n label = n.require( label, dtype=n.single, requirements='C' )\n\n transform_time = time() - transform_time_start \n return image_matrix, label, load_time, transform_time;\n\n def get_num_classes( self ):\n return self.num_classes\n\n def get_num_batches( self ):\n return int(ceil( 1.0 * len(self.batch_queue) / self.batch_size ))\n\n def print_data_summary( self ):\n class_labels = [ self.data['image_label'][x] for x in self.batch_queue ]\n label_hist = [0] * self.get_num_classes() \n for i in range( len(class_labels ) ):\n label_hist[ class_labels[i] ] += 1\n print \"Class Label Hist: \", label_hist, len(label_hist)\n print \"Num Batches : \", self.get_num_batches()\n\nif __name__ == \"__main__\":\n data_file = '/home/snwiz/data/imagenet12/code/data/imagenet_data_tiny10'\n provider = ImagenetDataProvider( data_file, 'val', batch_size = 128, random_transform = True )\n for i in range(2000):\n epoch, batch_index, data = provider.get_next_batch()\n print 'epoch: ' + str(epoch) + ' batch_index: ' + str(batch_index) + \\\n '/' + str(provider.get_num_batches()) + \\\n ' data: ' + str(data['data'][0:5,0:5]) +\\\n ' label: ' + str(data['label'][0:5,0:5] )\n"},"license":{"kind":"string","value":"gpl-2.0"},"hash":{"kind":"number","value":716190105202208900,"string":"716,190,105,202,208,900"},"line_mean":{"kind":"number","value":33.479638009,"string":"33.479638"},"line_max":{"kind":"number","value":97,"string":"97"},"alpha_frac":{"kind":"number","value":0.5700787402,"string":"0.570079"},"autogenerated":{"kind":"bool","value":false,"string":"false"}}},{"rowIdx":109556,"cells":{"repo_name":{"kind":"string","value":"pintomollo/zztip"},"path":{"kind":"string","value":"organize_shh.py"},"copies":{"kind":"string","value":"1"},"size":{"kind":"string","value":"1240"},"content":{"kind":"string","value":"#!/usr/bin/env python\n\nfrom __future__ import print_function\nimport sys\nimport os\nimport re\nimport glob\nimport shutil\nimport shlex, subprocess\n\nif __name__=='__main__':\n helpmess=\"\"\"Usage:\norganize_shh dir\n\nConverts the output of the Leica SB configuration into single tiffs.\n\"\"\"\n # Inputs\n if len(sys.argv)<2:\n print(helpmess)\n sys.exit(0)\n else:\n indir=sys.argv[1]\n\n indir=os.path.realpath(indir)\n\n count = 1;\n dpa = '-100';\n isgfp = False;\n\n if os.path.exists(indir) and os.path.isdir(indir):\n files = os.listdir(indir)\n for fname in files:\n res = re.match('SB25_(\\d+)_(-?\\d+)dpa_(Z\\d+)_(\\d+)(.*)\\.tif', fname)\n if res != None:\n ids = res.group(2)\n index = ''\n\n if ids != dpa:\n if (res.group(5) == '_GFP'):\n dpa = ids\n count = 1\n else:\n index = chr(96+count)\n if (res.group(5) == '_GFP'):\n count += 1\n\n print('{name} -> SB25_{z}{i}{g}_{d}dpa_{D}'.format(name=fname, z=res.group(3), i=index, g=res.group(5), d=ids, D=res.group(1)))\n os.rename(os.path.join(indir, fname), os.path.join(indir, 'SB25_{z}{i}{g}_{d}dpa_{D}.tif'.format(z=res.group(3), i=index, g=res.group(5), d=ids, D=res.group(1))))\n"},"license":{"kind":"string","value":"gpl-3.0"},"hash":{"kind":"number","value":-584310953087189600,"string":"-584,310,953,087,189,600"},"line_mean":{"kind":"number","value":24.8333333333,"string":"24.833333"},"line_max":{"kind":"number","value":170,"string":"170"},"alpha_frac":{"kind":"number","value":0.5564516129,"string":"0.556452"},"autogenerated":{"kind":"bool","value":false,"string":"false"}}},{"rowIdx":109557,"cells":{"repo_name":{"kind":"string","value":"meine-stadt-transparent/meine-stadt-transparent"},"path":{"kind":"string","value":"importer/management/commands/_import_base_command.py"},"copies":{"kind":"string","value":"1"},"size":{"kind":"string","value":"1662"},"content":{"kind":"string","value":"import logging\nfrom abc import ABC\nfrom typing import Tuple, Dict, Any\n\nfrom django.conf import settings\nfrom django.core.management.base import BaseCommand\n\nfrom importer.importer import Importer\nfrom importer.loader import get_loader_from_body, BaseLoader\nfrom mainapp.models import Body\n\nlogger = logging.getLogger(__name__)\n\n\nclass ImportBaseCommand(BaseCommand, ABC):\n def add_arguments(self, parser):\n parser.add_argument(\"--body\", help=\"The oparl id of the body\")\n parser.add_argument(\n \"--ignore-modified\", dest=\"ignore_modified\", action=\"store_true\"\n )\n parser.add_argument(\"--force-singlethread\", action=\"store_true\")\n parser.add_argument(\n \"--skip-download\",\n action=\"store_true\",\n dest=\"skip_download\",\n default=False,\n help=\"Do not download and parse the files\",\n )\n\n def get_importer(self, options: Dict[str, Any]) -> Tuple[Importer, Body]:\n if options.get(\"body\"):\n body = Body.objects.get(oparl_id=options[\"body\"])\n else:\n body = Body.objects.get(id=settings.SITE_DEFAULT_BODY)\n\n if body.oparl_id is not None:\n loader = get_loader_from_body(body.oparl_id)\n importer = Importer(\n loader, body, ignore_modified=options[\"ignore_modified\"]\n )\n else:\n importer = Importer(\n BaseLoader(dict()), ignore_modified=options[\"ignore_modified\"]\n )\n importer.force_singlethread = options[\"force_singlethread\"]\n importer.download_files = not options[\"skip_download\"]\n\n return importer, body\n"},"license":{"kind":"string","value":"mit"},"hash":{"kind":"number","value":8626599972573587000,"string":"8,626,599,972,573,587,000"},"line_mean":{"kind":"number","value":33.625,"string":"33.625"},"line_max":{"kind":"number","value":78,"string":"78"},"alpha_frac":{"kind":"number","value":0.6245487365,"string":"0.624549"},"autogenerated":{"kind":"bool","value":false,"string":"false"}}},{"rowIdx":109558,"cells":{"repo_name":{"kind":"string","value":"kratsg/ironman"},"path":{"kind":"string","value":"tests/test_history.py"},"copies":{"kind":"string","value":"1"},"size":{"kind":"string","value":"1083"},"content":{"kind":"string","value":"from zope.interface.verify import verifyClass, verifyObject\nfrom ironman.history import History\nfrom ironman.interfaces import IHistory\n\nfrom ironman.globals import IPBUS_VERSION, TESTPACKETS\nfrom ironman.packet import IPBusPacket\n\n# fixtures for passing in the objects\nimport pytest\n\n\ndef test_history_create():\n obj = History()\n assert obj is not None\n\n\ndef test_history_class_iface():\n # Assure the class implements the declared interface\n assert verifyClass(IHistory, History)\n\n\ndef test_history_instance_iface():\n # Assure instances of the class provide the declared interface\n assert verifyObject(IHistory, History())\n\n\ndef test_history_empty():\n h = History()\n assert len(h) == 0\n assert any(h.packets) == False\n assert len(h.packets) == h.maxlen\n\n\ndef test_history_record():\n h = History()\n for i in range(101):\n p = IPBusPacket(TESTPACKETS['big-endian'])\n p.request.header.id = i\n h.record(p)\n assert len(h) == h.maxlen\n assert all(h.packets) == True\n assert 0 not in h\n assert 1 in h\n assert 100 in h\n"},"license":{"kind":"string","value":"mit"},"hash":{"kind":"number","value":-3390162100391632400,"string":"-3,390,162,100,391,632,400"},"line_mean":{"kind":"number","value":23.6136363636,"string":"23.613636"},"line_max":{"kind":"number","value":66,"string":"66"},"alpha_frac":{"kind":"number","value":0.6989843029,"string":"0.698984"},"autogenerated":{"kind":"bool","value":false,"string":"false"}}},{"rowIdx":109559,"cells":{"repo_name":{"kind":"string","value":"paulpc/nyx"},"path":{"kind":"string","value":"plugin_template.py"},"copies":{"kind":"string","value":"1"},"size":{"kind":"string","value":"2211"},"content":{"kind":"string","value":"import syslog\n\ndef add_ip(ip,settings,intel_list,tags):\n \"\"\" adds an IP to the pre-established list. The tags might or might not be supported by the control\"\"\"\n \n # your code here\n \n if \"[condition for confirming a successful addition\":\n syslog.syslog(syslog.LOG_INFO,'nyx->[this_plugin]: successfully added %s to %s'% (ip,intel_list))\n return True\n else:\n syslog.syslog(syslog.LOG_ERR,'nyx->[this_plugin]: problems adding %s to %s'% (ip,intel_list))\n return False\n\ndef add_domain(domain,settings,intel_list,tags):\n \"\"\" adds an domain to the pre-established list. The tags might or might not be supported by the control\"\"\"\n \n # your code here\n \n if \"[condition for confirming a successful addition\":\n syslog.syslog(syslog.LOG_INFO,'nyx->[this_plugin]: successfully added %s to %s'% (ip,intel_list))\n return True\n else:\n syslog.syslog(syslog.LOG_ERR,'nyx->[this_plugin]: problems adding %s to %s'% (ip,intel_list))\n return False\n\ndef list_ips(settings):\n \"\"\" retrieves the IP addresses from the control's specific lists for comparison\"\"\"\n ip_index={}\n # your code here\n return ip_index\n\ndef list_domains(settings):\n \"\"\" retrieves the domains from the control's lists for comparison.\n The index should be structured as a dictionary of {domain:intel_list}\"\"\"\n domain_index={}\n # your code here\n return domain_index\n\ndef remove_ip(ip,settings):\n \"\"\" removes an IP from the control\"\"\"\n \n # your code here\n \n \n if \"[conditions for successful removal]\":\n syslog.syslog(syslog.LOG_INFO,'nyx->[this_plugin]:: successfully removed %s'% (ip))\n return True\n else:\n syslog.syslog(syslog.LOG_ERR,'nyx->[this_plugin]: problems removing %s'% (ip))\n return False\n \ndef remove_domain(domain,settings):\n \"\"\" removes a domain from the control\"\"\"\n \n # your code here\n \n \n if \"[conditions for successful removal]\":\n syslog.syslog(syslog.LOG_INFO,'nyx->[this_plugin]:: successfully removed %s'% (ip))\n return True\n else:\n syslog.syslog(syslog.LOG_ERR,'nyx->[this_plugin]: problems removing %s'% (ip))\n return False"},"license":{"kind":"string","value":"mit"},"hash":{"kind":"number","value":-30865448108538424,"string":"-30,865,448,108,538,424"},"line_mean":{"kind":"number","value":33.5625,"string":"33.5625"},"line_max":{"kind":"number","value":110,"string":"110"},"alpha_frac":{"kind":"number","value":0.6454093171,"string":"0.645409"},"autogenerated":{"kind":"bool","value":false,"string":"false"}}},{"rowIdx":109560,"cells":{"repo_name":{"kind":"string","value":"yugangzhang/GitTest"},"path":{"kind":"string","value":"CMS_Profile/81-beam.py"},"copies":{"kind":"string","value":"1"},"size":{"kind":"string","value":"92270"},"content":{"kind":"string","value":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\n# vi: ts=4 sw=4\n\n\n\n\n################################################################################\n# Code for querying and controlling beamline components that 'affect' the\n# beam. (Beam energy, beam flux, etc.)\n################################################################################\n# Known Bugs:\n# N/A\n################################################################################\n# TODO:\n# Search for \"TODO\" below.\n################################################################################\n\n\n# Notes\n################################################################################\n# verbosity=0 : Output nothing\n# verbosity=1 : Output only final (minimal) result\n# verbosity=2 : Output 'regular' amounts of information/data\n# verbosity=3 : Output all useful information\n# verbosity=4 : Output marginally useful things (e.g. essentially redundant/obvious things)\n# verbosity=5 : Output everything (e.g. for testing)\n\n\n\n# These imports are not necessary if part of the startup sequence.\n# If this file is called separately, some of these may be needed.\n#import numpy as np\n#from epics import caget, caput\n#from time import sleep\n\n#from ophyd import EpicsMotor, Device, Component as Cpt\n#from ophyd.commands import * # For mov, movr\n\n#define pilatus_name and _Epicsname, instead of pilatus300 or pilatus2M\n#moved to 20-area-detectors.py\n#pilatus_name = pilatus2M\n#pilatus_Epicsname = '{Det:PIL2M}'\n\n\nclass BeamlineDetector(object):\n \n def __init__(self, detector, **md):\n \n self.detector = detector\n \n self.md = md\n \n \n def get_md(self, prefix='detector_', **md):\n '''Returns a dictionary of the current metadata.\n The 'prefix' argument is prepended to all the md keys, which allows the\n metadata to be grouped with other metadata in a clear way. (Especially,\n to make it explicit that this metadata came from the beamline.)'''\n \n md_return = self.md.copy()\n \n # Include the user-specified metadata\n md_return.update(md)\n\n # Add an optional prefix\n if prefix is not None:\n md_return = { '{:s}{:s}'.format(prefix, key) : value for key, value in md_return.items() }\n \n return md_return\n \n \nclass CMS_SAXS_Detector(BeamlineDetector):\n\n def setCalibration(self, direct_beam, distance, detector_position=None, pixel_size=0.172):\n \n self.direct_beam = direct_beam\n self.distance = distance\n if detector_position is None:\n self.detector_position = [SAXSx.user_readback.value, SAXSy.user_readback.value]\n else:\n self.detector_position = detector_position\n self.pixel_size = pixel_size\n \n \n def get_md(self, prefix='detector_SAXS_', **md):\n \n md_return = self.md.copy()\n \n x0, y0 = self.direct_beam\n position_defined_x, position_defined_y = self.detector_position\n position_current_x, position_current_y = SAXSx.user_readback.value, SAXSy.user_readback.value\n \n \n md_return['name'] = self.detector.name\n md_return['x0_pix'] = round( x0 + (position_current_x-position_defined_x)/self.pixel_size , 2 )\n md_return['y0_pix'] = round( y0 + (position_current_y-position_defined_y)/self.pixel_size , 2 )\n md_return['distance_m'] = self.distance\n \n # Include the user-specified metadata\n md_return.update(md)\n\n # Add an optional prefix\n if prefix is not None:\n md_return = { '{:s}{:s}'.format(prefix, key) : value for key, value in md_return.items() }\n \n return md_return\n\n\nclass BeamlineElement(object):\n '''Defines a component of the beamline that (may) intersect the x-ray beam.'''\n \n def __init__(self, name, zposition, description=\"\", pv=None, **args):\n \n self.name = name\n self.zposition = zposition\n self.description = description\n \n self.conversion_factor = 1\n \n self._pv_main = pv\n \n self.has_flux = True\n \n \n def state(self):\n \"\"\"\n Returns the current state of the beamline element. Common states:\n out - Element is out of the way of the beam (and should not be blocking).\n in - Element is in the beam (but should not be blocking).\n block - Element is in the beam, and should be blocking the beam.\n undefined - Element is in an unexpected state.\n \"\"\"\n \n return \"out\"\n\n \n def transmission(self, t=None, verbosity=0):\n \"\"\"\n Returns the predicted transmission of this beamline element, based on \n its current state.\n \"\"\"\n \n if t is not None:\n print(\"WARNING: To change transmission, use 'setTransmission'.\")\n print(\"WARNING: Beam transmission was not changed.\")\n return\n \n tr_tot = 1.0\n \n if verbosity>=2:\n print('{:s} transmission = {:.6g}'.format(self.name, tr_tot))\n \n \n # Assume a generic beamline element doesn't block/perturb the beam\n return tr_tot\n \n \n def flux(self, verbosity=3):\n \n reading = self.reading(verbosity=0)\n flux = self.conversion_factor*reading # ph/s\n \n if verbosity>=2:\n print('flux = {:.4g} ph/s'.format(flux))\n \n return flux\n \n \n \n \nclass Shutter(BeamlineElement):\n \n # Example\n # XF:11BMA-PPS{PSh}Enbl-Sts\n # Status: XF:11BMA-PPS{PSh}Pos-Sts 0 for open, 1 for close\n # Open: XF:11BMA-PPS{PSh}Cmd:Opn-Cmd\n # Close: XF:11BMA-PPS{PSh}Cmd:Cls-Cmd\n \n def __init__(self, name, zposition, description=\"\", pv=None, **args):\n \n super().__init__(name=name, zposition=zposition, description=description, pv=pv, **args)\n self.has_flux = False\n \n \n def state(self):\n \"\"\"\n Returns the current state of the beamline element. Common states:\n out - Element is out of the way of the beam (and should not be blocking).\n in - Element is in the beam (but should not be blocking).\n block - Element is in the beam, and should be blocking the beam.\n undefined - Element is in an unexpected state.\n \"\"\"\n \n state_n = caget(self._pv_main+'Pos-Sts')\n \n if state_n is 0:\n return \"out\"\n elif state_n is 1:\n return \"block\"\n else:\n return \"undefined\" \n \n \n def open(self, verbosity=3):\n \n if verbosity>=3:\n print('Opening {:s}...'.format(self.name))\n \n # E.g. #XF:11BMB-VA{Slt:4-GV:1}Cmd:Opn-Cmd\n pv = self._pv_main + 'Cmd:Opn-Cmd'\n #caput(pv, 1) # TODO: Test this.\n \n def close(self, verbosity=3):\n \n if verbosity>=3:\n print('Closing {:s}...'.format(self.name))\n \n pv = self._pv_main + 'Cmd:Cls-Cmd'\n #caput(pv, 1) # TODO: Test this.\n\n \n\n\n\nclass GateValve(Shutter):\n \n # Example\n # Status: XF:11BMB-VA{Slt:4-GV:1}Pos-Sts 1 for open, 0 for close\n # Open: XF:11BMB-VA{Slt:4-GV:1}Cmd:Opn-Cmd\n # Close: XF:11BMB-VA{Slt:4-GV:1}Cmd:Cls-Cmd\n \n \n def state(self):\n \"\"\"\n Returns the current state of the beamline element. Common states:\n out - Element is out of the way of the beam (and should not be blocking).\n in - Element is in the beam (but should not be blocking).\n block - Element is in the beam, and should be blocking the beam.\n undefined - Element is in an unexpected state.\n \"\"\"\n \n state_n = caget(self._pv_main+'Pos-Sts')\n \n if state_n is 1:\n return \"out\"\n elif state_n is 0:\n return \"block\"\n else:\n return \"undefined\" \n \n\n\nclass ThreePoleWiggler(BeamlineElement):\n \n def __init__(self, name='3PW', zposition=0.0, description='Three-pole wiggler source of x-rays', **args):\n \n \n super().__init__(name=name, zposition=zposition, description=description, **args)\n \n # TODO: Find out the right conversion factor\n self.conversion_factor = 3e18/500.0 #(ph/s)/mA\n \n\n def state(self):\n \"\"\"\n Returns the current state of the beamline element. Common states:\n out - Element is out of the way of the beam (and should not be blocking).\n in - Element is in the beam (but should not be blocking).\n block - Element is in the beam, and should be blocking the beam.\n undefined - Element is in an unexpected state.\n \"\"\"\n \n position = caget('SR:C11-ID:G5{3PW:1}Mtr.RBV')\n \n # TODO: Instead use the 'inserted' flag?\n # caget('SR:C11-ID:G5{3PW:1}InsertedFlag')\n \n if abs(position-0)<3:\n return \"in\"\n \n elif abs(position - -189.0)<10:\n return \"out\"\n \n else:\n return \"undefined\"\n \n \n def reading(self, verbosity=3):\n \n if self.state() is 'in':\n \n ring_current = caget('SR:OPS-BI{DCCT:1}I:Real-I')\n if verbosity>=2:\n print('{:s} is inserted; ring current = {:.1f} mA'.format(self.name, ring_current))\n \n return ring_current\n \n else:\n if verbosity>=2:\n print('{:s} is not inserted.'.format(self.name))\n \n return 0\n \n\nclass Monitor(BeamlineElement):\n \n def quickReading(self, verbosity=3, delay=1.0):\n \"\"\"\n Puts the diagnostic into the beam, takes a reading, and removes the\n diagnostic.\n \"\"\"\n \n self.insert()\n time.sleep(delay)\n value = self.reading(verbosity=verbosity)\n \n self.retract()\n time.sleep(delay)\n \n return value\n \n \n \nclass DiagnosticScreen(Monitor):\n \n #XF:11BMB-BI{FS:4}Pos-Sts\n \n def __init__(self, name, zposition, description=\"\", pv=None, epics_signal=None, **args):\n \n super().__init__(name=name, zposition=zposition, description=description, pv=pv, **args)\n self.epics_signal = epics_signal\n self.has_flux = False\n \n \n def state(self):\n \"\"\"\n Returns the current state of the beamline element. Common states:\n out - Element is out of the way of the beam (and should not be blocking).\n in - Element is in the beam (but should not be blocking).\n block - Element is in the beam, and should be blocking the beam.\n undefined - Element is in an unexpected state.\n \"\"\"\n \n state_n = caget(self._pv_main+'Pos-Sts')\n \n if state_n is 0:\n return \"out\"\n elif state_n is 1:\n return \"block\"\n else:\n return \"undefined\" \n \n \n def insert(self, verbosity=3):\n \n if verbosity>=3:\n print('Inserting {:s}...'.format(self.name))\n \n # E.g. #XF:11BMB-VA{Slt:4-GV:1}Cmd:Opn-Cmd\n pv = self._pv_main + 'Cmd:In-Cmd'\n caput(pv, 1)\n \n def retract(self, verbosity=3):\n \n if verbosity>=3:\n print('Retracting {:s}...'.format(self.name))\n \n pv = self._pv_main + 'Cmd:Out-Cmd'\n caput(pv, 1)\n \n \n def reading(self, verbosity=3):\n \n value = self.epics_signal.stats1.total.value\n \n if self.state() is 'block':\n \n ring_current = caget('SR:OPS-BI{DCCT:1}I:Real-I')\n if verbosity>=2:\n print('{:s} is inserted; reading = {:.4g}'.format(self.name, value))\n \n return value\n \n else:\n if verbosity>=2:\n print('{:s} is not inserted.'.format(self.name))\n \n return 0\n \n \n \n \nclass PointDiode_CMS(Monitor):\n \n def __init__(self, name='bim6 point diode', zposition=58.3, description=\"Bar holding a point-diode, downstream of sample.\", pv='XF:11BMB-BI{IM:2}EM180:Current1:MeanValue_RBV', epics_signal=None, **args):\n \n super().__init__(name=name, zposition=zposition, description=description, pv=pv, **args)\n self.has_flux = True\n \n if epics_signal is None:\n \n #bim6 = EpicsSignalROWait(\"XF:11BMB-BI{IM:2}EM180:Current1:MeanValue_RBV\", wait_time=1, name='bim6')\n #bim6_integrating = EpicsSignalROIntegrate(\"XF:11BMB-BI{IM:2}EM180:Current1:MeanValue_RBV\", wait_time=0.5, integrate_num=8, integrate_delay=0.1, name='bim6')\n \n self.epics_signal = bim6_integrating\n \n else:\n self.epics_signal = epics_signal\n \n \n # The beam (at the ion chamber) is roughly 0.50x0.50 mm.\n # If we slit down to 0.20x0.05 mm, we are capturing 0.4*0.25 = 0.1 of the beam.\n # bim6 reads 70000 cts (of course this depends on settings) when ion chamber reads 1.3e11 ph/s.\n # (settings: trans = 5e-4)\n # So conversion_factor is roughly:\n self.conversion_factor = 1.3e11*0.1/70000. # (ph/s)/cts\n \n self.in_position_x = 0.0\n self.in_position_y = 0.0\n\n self.out_position_x = 0.0\n self.out_position_y = -16.0\n \n self.position_tolerance = 0.1\n \n \n def state(self):\n \"\"\"\n Returns the current state of the beamline element. Common states:\n out - Element is out of the way of the beam (and should not be blocking).\n in - Element is in the beam (but should not be blocking).\n block - Element is in the beam, and should be blocking the beam.\n undefined - Element is in an unexpected state.\n \"\"\"\n \n position_x = DETx.user_readback.value\n position_y = DETy.user_readback.value\n \n if abs(position_x-self.out_position_x)=3:\n print('Inserting {:s}...'.format(self.name))\n \n #mov( [DETx, DETy], [self.in_position_x, self.in_position_y] )\n DETx.move = self.in_position_x\n DETy.move = self.in_position_y\n \n def retract(self, verbosity=3):\n \n if verbosity>=3:\n print('Retracting {:s}...'.format(self.name))\n \n #mov( [DETx, DETy], [self.out_position_x, self.out_position_y] )\n DETx.move = self.out_position_x\n DETy.move = self.out_position_y \n \n def reading(self, verbosity=3):\n \n value = self.epics_signal.read()[self.epics_signal.name]['value']\n \n if self.state() is 'block':\n \n if verbosity>=2:\n print('{:s} is inserted; reading = {:.4g}'.format(self.name, value))\n \n return value\n \n else:\n if verbosity>=2:\n print('{:s} is not inserted.'.format(self.name))\n \n return value\n \n \n\nclass IonChamber_CMS(Monitor):\n \n def __init__(self, name='bim3 ionchamber', zposition=49, description=\"Ion chamber (FMB Oxford I404) at start of endstation hutch\", pv=None, beam=None, **args):\n \n super().__init__(name=name, zposition=zposition, description=description, pv=pv, **args)\n self.has_flux = True\n \n self.beam = beam\n \n # PVs\n import epics\n self.v1 = epics.PV('XF:11BMB-BI{IM:3}:IC1_MON')\n self.v2 = epics.PV('XF:11BMB-BI{IM:3}:IC2_MON')\n self.h1 = epics.PV('XF:11BMB-BI{IM:3}:IC3_MON')\n self.h2 = epics.PV('XF:11BMB-BI{IM:3}:IC4_MON')\n\n \n def state(self):\n \n return \"in\"\n \n \n def v_position(self):\n \n total = self.v1.value+self.v2.value\n if total>0:\n return (self.v1.value-self.v2.value)/(total)\n else:\n return 0\n\n def h_position(self):\n \n total = self.h1.value+self.h2.value\n if total>0:\n return (self.h1.value-self.h2.value)/(total)\n else:\n return 0\n \n def reading(self, verbosity=3):\n \n total = self.h1.value + self.h2.value + self.v1.value + self.v2.value\n \n if verbosity>=3:\n print('Reading for {:s} ({:s})'.format(self.name, self.description))\n print(' Horizontal: {:9.4g} + {:9.4g} = {:9.4g}'.format(self.h1.value, self.h2.value, self.h1.value+self.h2.value))\n print(' position: {:.3f}'.format(self.h_position()))\n print(' Vertical: {:9.4g} + {:9.4g} = {:9.4g}'.format(self.v1.value, self.v2.value, self.v1.value+self.v2.value))\n print(' position: {:.3f}'.format(self.v_position()))\n\n if verbosity>=2:\n \n print(' Total: {:9.4g}'.format(total))\n \n return total\n \n \n def current_to_flux(self, current):\n \n energy_keV = self.beam.energy(verbosity=0)\n \n V_ion = 0.036 ## ionization energy of N2 gas in [keV]\n IC_len = 6.0 ## active length of Ion Chamber in [cm]\n qe = 1.602e-19 ## electron charge in [C]\n\n ## Absorption length [cm] of gas N2 (1 atm, 1.131 g/L) vs E [keV]\n # based on polynomial fit to the calculated abs length data from: henke.lbl.gov/optical_constants/atten2.html \n # see /home/xf11bm/masa/atten_len_N2* \n abs_len = 355.21 - 112.26*energy_keV + 11.200*np.square(energy_keV) - 0.10611*np.power(energy_keV,3.0)\n\n N_abs = current*V_ion/(qe*energy_keV)\n flux = N_abs / (1.0 - np.exp(-IC_len/abs_len))\n\n return flux\n \n \n def flux(self, verbosity=3):\n \n if self.reading(verbosity=0) < 5e-10:\n return 0.0\n \n h1 = self.current_to_flux(self.h1.value)\n h2 = self.current_to_flux(self.h2.value)\n h_total = h1 + h2\n v1 = self.current_to_flux(self.v1.value)\n v2 = self.current_to_flux(self.v2.value)\n v_total = v1 + v2\n \n total = h_total + v_total\n avg = total*0.5\n \n if verbosity>=3:\n print('Flux for {:s} ({:s})'.format(self.name, self.description))\n print(' Horizontal: {:9.4g} + {:9.4g} = {:9.4g} ph/s'.format(h1, h2, h1+h2))\n print(' position: {:.3f}'.format(self.h_position()))\n print(' Vertical: {:9.4g} + {:9.4g} = {:9.4g} ph/s'.format(v1, v2, v1+v2))\n print(' position: {:.3f}'.format(self.v_position()))\n\n if verbosity>=2:\n \n print(' Average: {:9.4g} ph/s'.format(avg))\n \n return avg \n \n \n \n#ionchamber = IonChamber_CMS(beam=beam)\n\n\nclass Scintillator_CMS(Monitor):\n \n def __init__(self, name='bim4 scintillator', zposition=57, description=\"Scintillation detector (FMB Oxford C400) between S3 and KB tank in endstation hutch. Captures scattering off of a Kapton film at 45 degrees.\", pv=None, beam=None, **args):\n \n super().__init__(name=name, zposition=zposition, description=description, pv=pv, **args)\n self.has_flux = True\n \n self.beam = beam\n \n # PVs\n import epics\n self.sec = epics.PV('XF:11BMB-BI{IM:4}:GET_PERIOD') # integration time in [sec]\n self.cts = epics.PV('XF:11BMB-BI{IM:4}:C1_1') # raw counts\n\n\n def state(self):\n \n return \"in\"\n\n \n def reading(self, verbosity=3):\n \n if self.sec.value == 0.0:\n print('Counting time set to zero. Check CSS settings for FMB Oxford C400.')\n return 0 \n else:\n sec = self.sec.value\n cts = self.cts.value\n cps = cts/sec\n \n if verbosity>=3:\n print('Reading for {:s} ({:s})'.format(self.name, self.description))\n print(' Count time: {:9.4g} sec'.format(sec))\n print(' Raw counts: {:9.4g} counts'.format(cts))\n\n if verbosity>=2:\n print(' Count rate: {:9.4g} counts/sec'.format(cps))\n \n return cps\n \n \n def cps_to_flux(self, cps):\n\n ### Ratio between estimated beam flux to raw scintillator counts \n # (see Olog entry on July 7, 2017)\n # For unslitted, unattenuated beam at 13.5 keV, \n # BIM4 yields 2.86E5 cts/sec for 1.85E11 ph/s at BIM3:\n # 1.85E11 / 2.86E5 = 647000 (ph/s)/(cts/sec).\n cps_to_flux_factor = 647000.\n\n flux = cps_to_flux_factor * cps\n\n return flux\n \n \n def flux(self, verbosity=3):\n \n if self.reading(verbosity=0) < 5e-10:\n return 0.0\n\n flux = self.cps_to_flux(self.reading(verbosity=0)) \n \n \n if verbosity>=3:\n print('Flux for {:s} ({:s})'.format(self.name, self.description))\n\n if verbosity>=2:\n print(' Beam flux: {:9.4g} ph/s'.format(flux))\n \n return flux \n \n \nclass DiamondDiode_CMS(Monitor):\n \n def __init__(self, name='bim5 diamonddiode', zposition=58.2, description=\"Diamond diode BPM (Dectris RIGI via FMB Oxford F460) between KB tank and sample chamber in endstation hutch. Needs to be insered into beam via IM:5.\", pv=None, beam=None, **args):\n \n super().__init__(name=name, zposition=zposition, description=description, pv=pv, **args)\n self.has_flux = True\n \n self.beam = beam\n \n # PVs\n import epics\n self.i0 = epics.PV('XF:11BMB-BI{BPM:1}Cur:I0-I') # upper left\n self.i1 = epics.PV('XF:11BMB-BI{BPM:1}Cur:I1-I') # upper right\n self.i2 = epics.PV('XF:11BMB-BI{BPM:1}Cur:I2-I') # lower left\n self.i3 = epics.PV('XF:11BMB-BI{BPM:1}Cur:I3-I') # lower right\n \n def state(self):\n\n # TODO: fix this so it queries state of IM:5 \n return \"in\"\n \n \n def v_position(self):\n \n total = self.i0.value + self.i1.value + self.i2.value + self.i3.value\n if total>0:\n return (self.i0.value + self.i1.value - self.i2.value - self.i3.value)/(total)\n else:\n return 0\n\n def h_position(self):\n \n total = self.i0.value + self.i1.value + self.i2.value + self.i3.value\n if total>0:\n return (self.i1.value + self.i3.value - self.i0.value - self.i2.value)/(total)\n else:\n return 0\n \n def reading(self, verbosity=3):\n \n #total = self.i0.value + self.i1.value + self.i2.value + self.i3.value\n ## 07/12/2017 Total dark current with beam off is ~9.3e-10 A.\n dark_current = 9.3e-10\n total = self.i0.value + self.i1.value + self.i2.value + self.i3.value - dark_current\n \n if verbosity>=3:\n print('Reading for {:s} ({:s})'.format(self.name, self.description))\n print(' Horizontal:')\n print(' Right: {:9.4g} + {:9.4g} = {:9.4g} A'.format(self.i1.value, self.i3.value, self.i1.value+self.i3.value))\n print(' Left: {:9.4g} + {:9.4g} = {:9.4g} A'.format(self.i0.value, self.i2.value, self.i0.value+self.i2.value))\n print(' Position [-1(L) to 1(R), 0 at center]: {:.3f}'.format(self.h_position()))\n print(' Vertical:')\n print(' Top: {:9.4g} + {:9.4g} = {:9.4g} A'.format(self.i0.value, self.i1.value, self.i0.value+self.i1.value))\n print(' Bottom: {:9.4g} + {:9.4g} = {:9.4g} A'.format(self.i2.value, self.i3.value, self.i2.value+self.i3.value))\n print(' Position [-1(B) to 1(T), 0 at center]: {:.3f}'.format(self.v_position()))\n\n if verbosity>=2:\n \n print(' Total current: {:9.4g} A'.format(total))\n \n return total\n \n \n def current_to_flux(self, current):\n\n ### Ratio between estimated beam flux to raw TOTAL current for the 4 quadrants \n # (see Olog entry on July 7, 2017).\n # For unslitted, unattenuated beam at 13.5 keV, \n # BIM5 yields a TOTAL current of 4.8E-8 A at ~230 mA ring current, \n # corresponding to 1.38E11 ph/s at BIM3:\n # 1.38E11 / 4.8E-8 = 0.29E19 (ph/s)/A.\n # With dark current (total = 9.3e-10 A = 0.093e-8 A) taken into account, \n # 1.38E11 / 4.7E-8 = 0.294E19 (ph/s)/A.\n\n current_to_flux_factor = 2.94E18\n\n flux = current_to_flux_factor * current\n\n return flux\n\n \n def flux(self, verbosity=3):\n \n if self.reading(verbosity=0) < 1e-11:\n return 0.0\n \n right = self.current_to_flux(self.i1.value+self.i3.value)\n left = self.current_to_flux(self.i0.value+self.i2.value)\n top = self.current_to_flux(self.i0.value+self.i1.value)\n bottom = self.current_to_flux(self.i2.value+self.i3.value)\n total = self.current_to_flux(self.reading(verbosity=0))\n \n if verbosity>=3:\n print('Flux for {:s} ({:s})'.format(self.name, self.description))\n print(' Horizontal:')\n print(' Right: {:9.4g} ph/s'.format(right))\n print(' Left: {:9.4g} ph/s'.format(left))\n print(' Position [-1(L) to 1(R), 0 at center]: {:.3f}'.format(self.h_position()))\n print(' Vertical:')\n print(' Top: {:9.4g} ph/s'.format(top))\n print(' Bottom: {:9.4g} ph/s'.format(bottom))\n print(' Position [-1(B) to 1(T), 0 at center]: {:.3f}'.format(self.v_position()))\n\n if verbosity>=2:\n \n print(' Total flux: {:9.4g} ph/s'.format(total))\n \n return total \n\n\n# CMSBeam\n################################################################################\nclass CMSBeam(object):\n \"\"\"\n This class represents the 'beam' at the beamline. This collects together aspects\n of querying or changing beam properties, including the energy (or wavelength), \n the beam intensity (or measuring flux), and so forth.\n \"\"\"\n \n def __init__(self):\n \n self.mono_bragg_pv = 'XF:11BMA-OP{Mono:DMM-Ax:Bragg}Mtr.RBV'\n \n # (planck constant * speed of light)/(electronic charge)\n self.hc_over_e = 1.23984197e-6 # m^3 kg s^-3 Amp^-1 = eV*m\n self.hc_over_e_keVpA = self.hc_over_e*1e7 # = 12.4 keV*Angstrom\n \n # DMM bilayer pitch in Angstroms, according to Rigaku metrology report\n self.dmm_dsp = 20.1 # Angstroms\n \n \n \n self.mono = BeamlineElement('monochromator', 26.5)\n def transmission(verbosity=0):\n return 1e-7\n self.mono.transmission = transmission\n\n \n self.attenuator = BeamlineElement('attenuator', 53.8, description=\"Attenuator/filter box\")\n self.attenuator.has_flux = False\n def reading(verbosity=0):\n return self.transmission(verbosity=verbosity)\n self.attenuator.reading = reading\n self.attenuator.transmission = self.transmission\n\n if False:\n self.fs1 = DiagnosticScreen( 'fs1', 27.2, pv='XF:11BMA-BI{FS:1}', epics_signal=StandardProsilica('XF:11BMA-BI{FS:1-Cam:1}', name='fs1') )\n #self.fs2 = DiagnosticScreen( 'fs2', 29.1, pv='XF:11BMA-BI{FS:2}', epics_signal=StandardProsilica('XF:11BMA-BI{FS:2-Cam:1}', name='fs2') )\n self.fs3 = DiagnosticScreen( 'fs3', 55.8, pv='XF:11BMB-BI{FS:3}', epics_signal=StandardProsilica('XF:11BMB-BI{FS:3-Cam:1}', name='fs3') )\n self.fs4 = DiagnosticScreen( 'fs4', 58.2, pv='XF:11BMB-BI{FS:4}', epics_signal=StandardProsilica('XF:11BMB-BI{FS:4-Cam:1}', name='fs4') )\n self.fs5 = DiagnosticScreen( 'fs5', 70.0, pv='XF:11BMB-BI{FS:Test-Cam:1}', epics_signal=StandardProsilica('XF:11BMB-BI{FS:4-Cam:1}', name='fs5') )\n else:\n # Rely on the fact that these are defined in 20-area-detectors.py\n self.fs1 = DiagnosticScreen( 'fs1', 27.2, pv='XF:11BMA-BI{FS:1}', epics_signal=fs1 )\n #self.fs2 = DiagnosticScreen( 'fs2', 29.1, pv='XF:11BMA-BI{FS:2}', epics_signal=fs2 )\n self.fs3 = DiagnosticScreen( 'fs3', 55.8, pv='XF:11BMB-BI{FS:3}', epics_signal=fs3 )\n self.fs4 = DiagnosticScreen( 'fs4', 58.2, pv='XF:11BMB-BI{FS:4}', epics_signal=fs4 )\n self.fs5 = DiagnosticScreen( 'fs5', 70.0, pv='XF:11BMB-BI{FS:Test-Cam:1}', epics_signal=fs5 )\n \n \n self.bim3 = IonChamber_CMS(beam=self)\n self.bim4 = Scintillator_CMS()\n self.beam_defining_slit = s4\n self.bim5 = DiamondDiode_CMS()\n self.bim6 = PointDiode_CMS()\n \n self.GVdsbig = GateValve('GV ds big', 60.0, pv='XF:11BMB-VA{Chm:Det-GV:1}')\n \n \n \n self.elements = []\n \n # Front End\n self.elements.append(ThreePoleWiggler())\n #SR:C03-EPS{PLC:1}Sts:BM_BMPS_Opn-Sts BMPS\n self.elements.append(GateValve('GV1', 20.0, pv='FE:C03A-VA{GV:1}DB:'))\n self.elements.append(GateValve('GV2', 21.0, pv='FE:C03A-VA{GV:2}DB:'))\n \n \n # FOE\n self.elements.append(Shutter('FE shutter', 25.0, pv='XF:11BM-PPS{Sh:FE}'))\n self.elements.append(GateValve('GV', 26.0, pv='FE:C11B-VA{GV:2}'))\n self.elements.append(self.mono)\n self.elements.append(self.fs1)\n # bim1\n # slit0\n # bim2\n self.elements.append(GateValve('GV', 28.0, pv='XF:11BMA-VA{Slt:0-GV:1}'))\n self.elements.append(BeamlineElement('mirror', 29.1))\n self.elements.append(GateValve('GV', 30.5, pv='XF:11BMA-VA{Mir:Tor-GV:1}'))\n self.elements.append(BeamlineElement('fs2 (manual)', 30.9)) # self.elements.append(self.fs2)\n self.elements.append(Shutter('photon shutter', 33.7, pv='XF:11BMA-PPS{PSh}'))\n self.elements.append(GateValve('GV', 34.0, pv='XF:11BMA-VA{PSh:1-GV:1}'))\n \n # Endstation\n self.elements.append(self.bim3)\n # Experimental shutter 49.5\n self.elements.append(self.attenuator)\n self.elements.append(self.fs3)\n self.elements.append(self.bim4) # scintillation detector\n self.elements.append(BeamlineElement('KB mirrors', 57.8))\n self.elements.append(self.fs4)\n self.elements.append(self.bim5) # diamond diode BPM\n # im4\n #self.elements.append(GateValve('GV us small', 58.5, pv='XF:11BMB-VA{Slt:4-GV:1}'))\n \n \n self.elements.append(BeamlineElement('sample', 58.8))\n self.elements.append(self.bim6) # dsmon\n self.elements.append(BeamlineElement('WAXS detector', 59.0))\n self.elements.append(self.GVdsbig)\n self.elements.append(BeamlineElement('SAXS detector', 58.8+5))\n \n \n \n # Sort by position along the beam\n self.elements.sort(key=lambda o: o.zposition, reverse=False)\n \n \n # Monochromator\n ########################################\n \n def energy(self, verbosity=3):\n \"\"\"\n Returns the current x-ray photon energy (in keV).\n \"\"\"\n \n # Current angle of monochromator multilayer crystal\n Bragg_deg = caget(self.mono_bragg_pv)\n Bragg_rad = np.radians(Bragg_deg)\n \n wavelength_A = 2.*self.dmm_dsp*np.sin(Bragg_rad)\n wavelength_m = wavelength_A*1e-10\n\n energy_eV = self.hc_over_e/wavelength_m\n energy_keV = energy_eV/1000.\n \n if verbosity>=3:\n print('E = {:.2f} keV, wavelength = {:.4f} Å, Bragg = {:.6f} rad = {:.4f} deg'.format(energy_keV, wavelength_A, Bragg_rad, Bragg_deg))\n \n elif verbosity>=1:\n print('E = {:.3f} keV'.format(energy_keV))\n \n return energy_keV\n \n \n def wavelength(self, verbosity=3):\n \"\"\"\n Returns the current x-ray photon wavelength (in Angstroms).\n \"\"\"\n \n # Current angle of monochromator multilayer crystal\n Bragg_deg = caget(self.mono_bragg_pv)\n Bragg_rad = np.radians(Bragg_deg)\n \n wavelength_A = 2.*self.dmm_dsp*np.sin(Bragg_rad)\n wavelength_m = wavelength_A*1e-10\n\n # (planck constant * speed of light)/(electronic charge)\n \n energy_eV = self.hc_over_e/wavelength_m\n energy_keV = energy_eV/1000.\n \n if verbosity>=3:\n print('wavelength = {:.4f} Å, E = {:.2f} keV, Bragg = {:.6f} rad = {:.4f} deg'.format(wavelength_A, energy_keV, Bragg_rad, Bragg_deg))\n \n elif verbosity>=1:\n print('wavelength = {:.5f} Å'.format(wavelength_A))\n \n return wavelength_A\n \n \n def setEnergy(self, energy_keV, verbosity=3):\n \"\"\"\n Set the x-ray beam to the specified energy (by changing the\n monochromator angle.\n \"\"\"\n \n energy_eV = energy_keV*1000.\n wavelength_m = self.hc_over_e/energy_eV\n wavelength_A = wavelength_m*1.e10\n \n self.setWavelength(wavelength_A, verbosity=verbosity)\n \n return self.energy(verbosity=0)\n \n \n def setWavelength(self, wavelength_A, verbosity=3):\n \"\"\"\n Set the x-ray beam to the specified wavelength (by changing the\n monochromator angle.\n \"\"\"\n \n Bragg_deg_initial = caget(self.mono_bragg_pv)\n wavelength_m = wavelength_A*1.e-10\n Bragg_rad = np.arcsin(wavelength_A/(2.*self.dmm_dsp))\n Bragg_deg = np.degrees(Bragg_rad)\n \n print('mono_bragg will move to {:.4f}g deg'.format(Bragg_deg))\n response = input(' Are you sure? (y/[n]) ')\n if response is 'y' or response is 'Y':\n \n #mov(mono_bragg, Bragg_deg)\n mono_bragg.move = Bragg_deg\n \n if verbosity>=1:\n print('mono_bragg moved from {:.4f} deg to {:.4f} deg'.format(Bragg_deg_initial, Bragg_deg))\n \n elif verbosity>=1:\n print('No move was made.')\n \n return self.wavelength(verbosity=verbosity)\n\n \n # Slits\n ########################################\n \n def size(self, verbosity=3):\n \"\"\"\n Returns the current beam size (rough estimate).\n The return is (size_horizontal, size_vertical) (in mm).\n \"\"\"\n size_h = self.beam_defining_slit.xg.user_readback.value\n size_v = self.beam_defining_slit.yg.user_readback.value\n \n if verbosity>=3:\n print('Beam size:')\n print(' horizontal = {:.3f} mm'.format(size_h))\n print(' vertical = {:.3f} mm'.format(size_v))\n \n return size_h, size_v\n\n \n def setSize(self, horizontal, vertical, verbosity=3):\n \"\"\"\n Sets the beam size.\n \"\"\"\n \n h, v = self.size(verbosity=0)\n \n if verbosity>=3:\n print('Changing horizontal beam size from {:.3f} mm to {:.3f} mm'.format(h, horizontal))\n self.beam_defining_slit.xg.user_setpoint.value = horizontal\n \n if verbosity>=3:\n print('Changing vertical beam size from {:.3f} mm to {:.3f} mm'.format(v, vertical))\n \n self.beam_defining_slit.yg.user_setpoint.value = vertical\n \n \n def divergence(self, verbosity=3):\n \"\"\"\n Returns the beamline divergence.\n This is based on the Front End (FE) slits. The return is\n (horizontal, vertical) (in mrad).\n \"\"\"\n \n distance_m = 10.0 # distance from source to slits\n \n horizontal_mm = caget('FE:C11B-OP{Slt:12-Ax:X}t2.C')\n vertical_mm = caget('FE:C11B-OP{Slt:12-Ax:Y}t2.C')\n \n horizontal_mrad = horizontal_mm/distance_m\n vertical_mrad = vertical_mm/distance_m\n \n if verbosity>=3:\n print('Beam divergence:')\n print(' horizontal = {:.3f} mrad'.format(horizontal_mrad))\n print(' vertical = {:.3f} mrad'.format(vertical_mrad))\n \n return horizontal_mrad, vertical_mrad\n \n \n def setDivergence(self, horizontal, vertical, verbosity=3):\n \"\"\"\n Set beamline divergence (in mrad).\n This is controlled using the Front End (FE) slits.\n \"\"\"\n \n h, v = self.divergence(verbosity=0)\n\n distance_m = 10.0 # distance from source to slits\n \n horizontal_mm = horizontal*distance_m\n vertical_mm = vertical*distance_m\n \n if horizontal<0:\n if verbosity>=1:\n print(\"Horizontal divergence less than zero ({}) doesn't make sense.\".format(horizontal))\n \n elif horizontal>1.5:\n if verbosity>=1:\n print(\"Horizontal divergence should be less than 1.5 mrad.\")\n \n else:\n if verbosity>=3:\n print('Changing horizontal divergence from {:.3f} mrad to {:.3f} mrad.'.format(h, horizontal))\n caput('FE:C11B-OP{Slt:12-Ax:X}size', horizontal_mm)\n \n \n if vertical<0:\n if verbosity>=1:\n print(\"Vertical divergence less than zero ({}) doesn't make sense.\".format(vertical))\n \n elif vertical>0.15:\n if verbosity>=1:\n print(\"Vertical divergence should be less than 0.15 mrad.\")\n \n else:\n if verbosity>=3:\n print('Changing vertical divergence from {:.3f} mrad to {:.3f} mrad.'.format(v, vertical))\n caput('FE:C11B-OP{Slt:12-Ax:Y}size', vertical_mm)\n \n\n \n # Experimental Shutter\n ########################################\n \n def is_on(self, verbosity=3):\n '''Returns true if the beam is on (experimental shutter open).'''\n \n blade1 = caget('XF:11BMB-OP{PSh:2}Pos:1-Sts')\n blade2 = caget('XF:11BMB-OP{PSh:2}Pos:2-Sts')\n \n if blade1==1 and blade2==1:\n if verbosity>=4:\n print('Beam on (shutter open).')\n \n return True\n \n else:\n if verbosity>=4:\n print('Beam off (shutter closed).')\n \n return False\n \n \n def on(self, verbosity=3, wait_time=0.1, poling_period=0.10, retry_time=2.0, max_retries=5):\n '''Turn on the beam (open experimental shutter).\n update: 090517, RL: change the wait_time from 0.005 to 0.1, change sleep to time.sleep'''\n \n if self.is_on(verbosity=0):\n if verbosity>=4:\n print('Beam on (shutter already open.)')\n \n else:\n \n itry = 0\n while (not self.is_on(verbosity=0)) and itry=5:\n print(' try {:d}, t = {:02.2f} s, state = {:s}'.format(itry+1, (time.time()-start_time), 'OPEN_____' if self.is_on(verbosity=0) else 'CLOSE===='))\n time.sleep(poling_period)\n \n itry += 1\n \n\n if verbosity>=4:\n if self.is_on(verbosity=0):\n print('Beam on (shutter opened).')\n else:\n print(\"Beam off (shutter didn't open).\")\n\n \n def off(self, verbosity=3, wait_time=0.1, poling_period=0.10, retry_time=2.0, max_retries=5):\n '''Turn off the beam (close experimental shutter).\n update: 090517, RL: change the wait_time from 0.005 to 0.1, change sleep to time.sleep'''\n \n if self.is_on(verbosity=0):\n \n itry = 0\n while self.is_on(verbosity=0) and itry=5:\n print(' try {:d}, t = {:02.2f} s, state = {:s}'.format(itry+1, (time.time()-start_time), 'OPEN_____' if self.is_on(verbosity=0) else 'CLOSE===='))\n time.sleep(poling_period)\n \n itry += 1\n\n\n\n if verbosity>=4:\n if self.is_on(verbosity=0):\n print(\"Beam on (shutter didn't close).\")\n else:\n print('Beam off (shutter closed).')\n \n else:\n if verbosity>=4:\n print('Beam off (shutter already closed).')\n \n def blade1_is_on(self, verbosity=3):\n '''Returns true if the beam is on (experimental shutter open).'''\n \n blade1 = caget('XF:11BMB-OP{PSh:2}Pos:1-Sts')\n \n if blade1==1:\n if verbosity>=4:\n print('Beam on (shutter open).')\n \n return True\n \n else:\n if verbosity>=4:\n print('Beam off (shutter closed).')\n \n return False\n\n def blade2_is_on(self, verbosity=3):\n '''Returns true if the beam is on (experimental shutter open).'''\n \n blade2 = caget('XF:11BMB-OP{PSh:2}Pos:2-Sts')\n \n if blade2==1:\n if verbosity>=4:\n print('Beam on (shutter open).')\n \n return True\n \n else:\n if verbosity>=4:\n print('Beam off (shutter closed).')\n \n return False\n \n def _test_on(self, verbosity=3, wait_time=0.1, poling_period=0.10, retry_time=2.0, max_retries=5):\n '''Turn on the beam (open experimental shutter).'''\n \n #print('1')\n #print(sam.clock())\n if self.is_on(verbosity=0):\n if verbosity>=4:\n print('Beam on (shutter already open.)')\n \n else:\n \n itry = 0\n while (not self.blade1_is_on(verbosity=0)) and itry=5:\n print(' try {:d}, t = {:02.2f} s, state = {:s}'.format(itry+1, (time.time()-start_time), 'OPEN_____' if self.is_on(verbosity=0) else 'CLOSE===='))\n sleep(poling_period)\n #print('3')\n #print(sam.clock())\n \n itry += 1\n \n\n if verbosity>=4:\n if self.blade1_is_on(verbosity=0):\n print('Beam on (shutter opened).')\n else:\n print(\"Beam off (shutter didn't open).\")\n #print('4')\n #print(sam.clock())\n \n \n def _test_off(self, verbosity=3, wait_time=0.1, poling_period=0.10, retry_time=2.0, max_retries=5):\n '''Turn off the beam (close experimental shutter).'''\n \n #print('1')\n #print(sam.clock())\n \n if self.is_on(verbosity=0):\n \n itry = 0\n while self.is_on(verbosity=0) and itry=5:\n print(' try {:d}, t = {:02.2f} s, state = {:s}'.format(itry+1, (time.time()-start_time), 'OPEN_____' if self.is_on(verbosity=0) else 'CLOSE===='))\n time.sleep(poling_period)\n \n #print('3')\n #print(sam.clock())\n \n itry += 1\n\n\n\n if verbosity>=4:\n if self.blade1_is_on(verbosity=0):\n print(\"Beam on (shutter didn't close).\")\n else:\n print('Beam off (shutter closed).')\n #print('4')\n #print(sam.clock())\n \n else:\n if verbosity>=4:\n print('Beam off (shutter already closed).') \n\n #print('5')\n #print(sam.clock())\n\n\n # Attenuator/Filter Box\n ########################################\n\n def transmission(self, verbosity=3):\n \"\"\"\n Returns the current beam transmission through the attenuator/filter box.\n To change the transmission, use 'setTransmission'.\n \"\"\"\n \n energy_keV = self.energy(verbosity=0)\n \n if energy_keV < 6.0 or energy_keV > 18.0:\n print('Transmission data not available at the current X-ray energy ({.2f} keV).'.format(energy_keV))\n \n else:\n \n # The states of the foils in the filter box\n N = [ caget('XF:11BMB-OP{{Fltr:{:d}}}Pos-Sts'.format(ifoil)) for ifoil in range(1, 8+1) ]\n \n tr_tot = self.calc_transmission_filters(N, verbosity=verbosity)\n \n return tr_tot\n\n\n def calc_transmission_filters(self, filter_settings, energy_keV=None, verbosity=3):\n \"\"\"\n Returns the computed transmission value for the given configuration of\n foils. Note that the foils are not actually moved. This is just a\n calculation.\n \n Parameters\n ----------\n filter_settings : array of length 8\n Each element must be either a zero (foil removed) or a 1 (foil blocking \n beam)\n energy_keV : float\n If 'None', the current energy is used. If specified, the calculation \n is performed for the requested energy.\n \n Returns \n \n\n -------\n transmission : float\n The computed transmission value of the x-ray beam through the filter box.\n \"\"\"\n \n if energy_keV is None:\n energy_keV = self.energy(verbosity=0)\n \n if len(filter_settings) != 8:\n print('States for all eight foils must be specified.')\n\n else:\n N = filter_settings\n \n E = energy_keV\n E2 = np.square(E)\n E3 = np.power(E, 3)\n \n\n # foil thickness blocking the beam\n N_Al = N[0] + 2*N[1] + 4*N[2] + 8*N[3]\n N_Nb = N[4] + 2*N[5] + 4*N[6] + 8*N[7]\n\n d_Nb = 0.1 # Thickness [mm] of one Nb foil \n d_Al = 0.25 # Thickness [mm] of one Al foil \n\n # Absorption length [mm] based on fits to LBL CXRO data for 6 < E < 19 keV\n l_Nb = 1.4476e-3 - 5.6011e-4 * E + 1.0401e-4 * E2 + 8.7961e-6 * E3\n l_Al = 5.2293e-3 - 1.3491e-3 * E + 1.7833e-4 * E2 + 1.4001e-4 * E3\n\n # transmission factors\n tr_Nb = np.exp(-N_Nb*d_Nb/l_Nb)\n tr_Al = np.exp(-N_Al*d_Al/l_Al)\n tr_tot = tr_Nb*tr_Al\n \n if verbosity>=5:\n print(' state: {} T = {:.6g}'.format(filter_settings, tr_tot))\n if verbosity>=4:\n print('{:d} × 0.25 mm Al ({:.4g}) and {:d} × 0.10 mm Nb ({:.4g})'.format(N_Al, tr_Al, N_Nb, tr_Nb) )\n if verbosity>=1:\n print('transmission = {:.6g}'.format(tr_tot))\n \n return tr_tot\n \n \n\n def set_attenuation_filters(self, filter_settings, verbosity=3):\n \"\"\"\n Sets the positions (in/out) for each of the foils in the attenuator/\n filter box. The input 'filter_settings' should be an array of length\n 8, where each element is either a zero (foil removed) or a 1 (foil\n blocking beam).\n \"\"\"\n \n if verbosity>=4:\n print('Filters:')\n # The states of the foils in the filter box\n filters_initial = [ caget('XF:11BMB-OP{{Fltr:{:d}}}Pos-Sts'.format(ifoil)) for ifoil in range(1, 8+1) ]\n print(' initial: {} T = {:.6g}'.format(filters_initial, self.calc_transmission_filters(filters_initial, verbosity=0)))\n print(' requested: {} T = {:.6g}'.format(filter_settings, self.calc_transmission_filters(filter_settings, verbosity=0)))\n \n if len(filter_settings) != 8:\n print('States for all eight foils must be specified.')\n \n else:\n \n for i, state in enumerate(filter_settings):\n \n ifoil = i+1\n \n if state==1:\n # Put foil #ifoil into the beam\n caput( 'XF:11BMB-OP{{Fltr:{:d}}}Cmd:In-Cmd'.format(ifoil) , 1 )\n \n elif state==0:\n # Remove foil #ifoil\n caput( 'XF:11BMB-OP{{Fltr:{:d}}}Cmd:Out-Cmd'.format(ifoil) , 1 )\n \n else:\n if verbosity>=3:\n state_actual = caget( 'XF:11BMB-OP{{Fltr:{:d}}}Pos-Sts'.format(ifoil) )\n state_actual_str = 'IN' if state_actual is 1 else 'OUT'\n print('WARNING: Filter state {} not recognized. Filter {:d} is {:s}.'.format(state, ifoil, state_actual_str))\n \n\n \n time.sleep(1.) # Wait for filter box to settle\n \n if verbosity>=4:\n filters_final = [ caget('XF:11BMB-OP{{Fltr:{:d}}}Pos-Sts'.format(ifoil)) for ifoil in range(1, 8+1) ]\n print(' final: {} T = {:.6g}'.format(filters_final, self.calc_transmission_filters(filters_final, verbosity=0)))\n\n \n def setTransmission(self, transmission, retries=3, tolerance=0.5, verbosity=3):\n \"\"\"\n Sets the transmission through the attenuator/filter box.\n Because the filter box has a discrete set of foils, it is impossible to\n exactly match a given transmission value. A nearby value will be\n selected.\n \"\"\"\n \n energy_keV = self.energy(verbosity=0)\n \n if energy_keV < 6.0 or energy_keV > 18.0:\n print('Transmission data not available at the current X-ray energy ({.2f} keV).'.format(energy_keV))\n \n elif transmission > 1.0:\n print('A transmission above 1.0 is not possible.')\n \n elif transmission < 1e-10:\n print('A transmission this low ({:g}) cannot be reliably achieved.'.format(transmission))\n \n else:\n \n E = energy_keV\n E2 = np.square(E)\n E3 = np.power(E, 3)\n \n d_Nb = 0.1 # Thickness [mm] of one Nb foil \n d_Al = 0.25 # Thickness [mm] of one Al foil \n\n # Absorption length [mm] based on fits to LBL CXRO data for 6 < E < 19 keV\n l_Nb = 1.4476e-3 - 5.6011e-4 * E + 1.0401e-4 * E2 + 8.7961e-6 * E3\n l_Al = 5.2293e-3 - 1.3491e-3 * E + 1.7833e-4 * E2 + 1.4001e-4 * E3\n\n d_l_Nb = d_Nb/l_Nb\n d_l_Al = d_Al/l_Al\n\n # Number of foils to be inserted (equivalent to \"XIA_attn.mac\" from X9) \n #N_Nb = int(-log(transmission)/d_l_Nb)\n ##N_Al = int((-log(transmission) - N_Nb*d_l_Nb)/(d_l_Al-0.5))\n #N_Al = int((-log(transmission) - N_Nb*d_l_Nb)/d_l_Al)\n\n # Number of foils to be inserted (picks a set that gives smallest deviation from requested transmission)\n dev = []\n for i in np.arange(16):\n for j in np.arange(16):\n dev_ij = abs(transmission - exp(-i*d_l_Nb)*exp(-j*d_l_Al))\n dev.append(dev_ij)\n if (dev_ij == min(dev)):\n N_Nb = i # number of Nb foils selected\n N_Al = j # number of Al foils selected\n \n \n\n\n N = []\n state = N_Al\n for i in np.arange(4):\n N.append(state % 2)\n state = int(state/2)\n\n state = N_Nb\n for i in np.arange(4):\n N.append(state % 2)\n state = int(state/2)\n\n self.set_attenuation_filters(N, verbosity=verbosity)\n \n \n # Check that transmission was actually correctly changed\n if abs(self.transmission(verbosity=0)-transmission)/transmission > tolerance:\n if retries>0:\n #time.sleep(0.5)\n # Try again\n return self.setTransmission(transmission, retries=retries-1, tolerance=tolerance, verbosity=verbosity)\n \n else:\n print(\"WARNING: transmission didn't update correctly (request: {}; actual: {})\".format(transmission, self.transmission(verbosity=0)))\n\n \n return self.transmission(verbosity=verbosity)\n\n\n\n\n\n # Flux estimates at various points along the beam\n ########################################\n \n # TBD\n \n \n # Flux diagnostics\n ########################################\n \n def fluxes(self, verbosity=3):\n \"\"\"\n Outputs a list of fluxes at various points along the beam. Also checks \n the state (in or out of the beam) of various components, to help identify\n if anything could be blocking the beam.\n \"\"\"\n \n if verbosity>=1:\n print('+--------+------------------+-----+-------------+-------------+-------------+')\n print('| pos | name |path | reading | flux (ph/s) | expected |')\n print('|--------|------------------|-----|-------------|-------------|-------------|')\n \n \n last_z = -100\n beam = True\n \n flux_expected = None\n \n for element in self.elements:\n \n state = element.state()\n if state is 'block':\n beam = False\n \n if verbosity>=4:\n if element.zposition >= 0 and last_z < 0:\n print('| Front End | | | | |')\n if element.zposition > 25 and last_z < 25:\n print('| FOE | | | | |')\n if element.zposition > 50 and last_z < 50:\n print('| Endstation | | | | |')\n last_z = element.zposition\n flux_expected\n if verbosity>=1:\n \n \n if state is 'in':\n if beam:\n path = '(|)'\n else:\n path = '(-)'\n elif state is 'out': \n \n\n if beam:\n path = ' | '\n else:\n path = '---'\n elif state is 'block':\n path = '[X]'\n beam = False\n \n elif state is 'undefined':\n if beam:\n path = '?|?'\n else:\n path = '?-?'\n \n else:\n path = '???'\n \n\n\n \n \n if flux_expected is None or not beam:\n flux_expected_str = ''\n else:\n flux_expected_str = '{:11.3g}'.format(flux_expected)\n flux_expected *= element.transmission(verbosity=0)\n\n\n \n \n if callable(getattr(element, 'reading', None)):\n reading_str = '{:11.3g}'.format(element.reading(verbosity=0))\n state = element.state()\n if element.has_flux and (state=='in' or state=='block'):\n flux_cur = element.flux(verbosity=0)\n flux_expected = flux_cur\n flux_str = '{:11.3g}'.format(flux_cur)\n else:\n flux_str = ''\n \n else:\n reading_str = ''\n flux_str = ''\n \n \n \n print('|{:5.1f} m | {:16.16} | {:s} | {:11.11} | {:11.11} | {:11.11} |'.format(element.zposition, element.name, path, reading_str, flux_str, flux_expected_str))\n \n \n #beam = True # For testing\n \n \n if verbosity>=1:\n print('+--------+------------------+-----+-------------+-------------+-------------+')\n\n \n \n\n\n # End class CMSBeam(object)\n ########################################\n \n\n\nbeam = CMSBeam()\n\n\nclass Beamline(object):\n '''Generic class that encapsulates different aspects of the beamline.\n The intention for this object is to have methods that activate various 'standard'\n protocols or sequences of actions.'''\n\n def __init__(self, **kwargs):\n \n self.md = {}\n self.current_mode = 'undefined'\n \n \n def mode(self, new_mode):\n '''Tells the instrument to switch into the requested mode. This may involve\n moving detectors, moving the sample, enabling/disabling detectors, and so\n on.'''\n \n getattr(self, 'mode'+new_mode)()\n \n \n def get_md(self, prefix=None, **md):\n '''Returns a dictionary of the current metadata.\n The 'prefix' argument is prepended to all the md keys, which allows the\n metadata to be grouped with other metadata in a clear way. (Especially,\n to make it explicit that this metadata came from the beamline.)'''\n \n # Update internal md\n #self.md['key'] = value\n\n md_return = self.md.copy()\n \n # Add md that may change\n md_return['mode'] = self.current_mode\n \n # Include the user-specified metadata\n md_return.update(md)\n\n # Add an optional prefix\n if prefix is not None:\n md_return = { '{:s}{:s}'.format(prefix, key) : value for key, value in md_return.items() }\n \n return md_return\n \n \n def comment(self, text, logbooks=None, tags=None, append_md=True, **md):\n \n text += '\\n\\n[comment for beamline: {}]'.format(self.__class__.__name__)\n \n if append_md:\n \n # Global md\n md_current = { k : v for k, v in RE.md.items() }\n \n # Beamline md\n md_current.update(self.get_md())\n \n # Specified md\n md_current.update(md)\n \n text += '\\n\\n\\nMetadata\\n----------------------------------------'\n for key, value in sorted(md_current.items()):\n text += '\\n{}: {}'.format(key, value)\n \n logbook.log(text, logbooks=logbooks, tags=tags)\n \n \n def log_motors(self, motors, verbosity=3, **md):\n \n log_text = 'Motors\\n----------------------------------------\\nname | position | offset | direction |\\n'\n \n for motor in motors:\n offset = float(caget(motor.prefix+'.OFF'))\n direction = int(caget(motor.prefix+'.DIR'))\n log_text += '{} | {} | {} | {} |\\n'.format(motor.name, motor.user_readback.value, offset, direction)\n \n \n md_current = { k : v for k, v in RE.md.items() }\n md_current.update(md)\n log_text += '\\nMetadata\\n----------------------------------------\\n'\n for k, v in sorted(md_current.items()):\n log_text += '{}: {}\\n'.format(k, v)\n \n if verbosity>=3:\n print(log_text)\n \n self.comment(log_text)\n \n\n \n\n\n\nclass CMS_Beamline(Beamline):\n '''This object collects together various standard protocols and sequences\n of action used on the CMS (11-BM) beamline at NSLS-II.'''\n \n \n def __init__(self, **kwargs):\n \n super().__init__(**kwargs)\n \n self.beam = beam\n #self.SAXS = CMS_SAXS_Detector(pilatus300)\n #self.WAXS = CMS_WAXS_Detector()\n self.SAXS = CMS_SAXS_Detector(pilatus_name)\n \n from epics import PV\n \n self._chamber_pressure_pv = PV('XF:11BMB-VA{Chm:Det-TCG:1}P-I')\n \n self.detector = [] \n self.PLOT_Y = []\n self.TABLE_COLS = []\n self.bsx_pos = -16.74\n \n \n def modeAlignment_bim6(self, verbosity=3):\n \n self.current_mode = 'undefined'\n \n # TODO: Check what mode (TSAXS, GISAXS) and respond accordingly\n # TODO: Check if gate valves are open and flux is okay (warn user)\n \n \n self.beam.off()\n #self.beam.setTransmission(1e-4)\n self.beam.setTransmission(5e-4)\n \n #mov( [DETx, DETy], [0, 0] )\n self.beam.bim6.insert()\n \n caput('XF:11BMB-BI{IM:2}EM180:Acquire', 1) # Turn on bim6\n detselect(bim6, suffix='')\n \n self.current_mode = 'alignment'\n \n self.beam.bim6.reading()\n\n \n \n def modeMeasurement_bim6(self, verbosity=3):\n \n self.current_mode = 'undefined'\n \n self.beam.off()\n self.beam.setTransmission(1)\n \n #mov(DETy, -16)\n self.beam.bim6.retract()\n \n caput('XF:11BMB-BI{IM:2}EM180:Acquire', 0) # Turn off bim6\n #detselect(pilatus300)\n detselect(pilatus_name)\n \n #if RE.state is not 'idle':\n # RE.abort()\n \n self.current_mode = 'measurement'\n \n # Check if gate valves are open\n if self.beam.GVdsbig.state() is not 'out' and verbosity>=1:\n print('Warning: Sample chamber gate valve (large, downstream) is not open.')\n \n\n def modeAlignment(self, verbosity=3):\n \n self.current_mode = 'undefined'\n \n # TODO: Check what mode (TSAXS, GISAXS) and respond accordingly\n # TODO: Check if gate valves are open and flux is okay (warn user)\n # TODO: Check list: change attenuator for different energy, change the bsx position with beamcenter accordingly\n \n \n self.beam.off()\n self.beam.setTransmission(1e-8) #1e-6 for 13.5kev, 1e-8 for 17kev\n while beam.transmission() > 3e-8:\n time.sleep(0.5)\n self.beam.setTransmission(1e-8)\n \n #mov(bsx, -10.95)\n bsx.move(self.bsx_pos+3)\n \n #detselect(pilatus300, suffix='_stats4_total')\n #caput('XF:11BMB-ES{Det:SAXS}:cam1:AcquireTime', 0.5)\n #caput('XF:11BMB-ES{Det:SAXS}:cam1:AcquirePeriod', 0.6)\n\n detselect(pilatus_name, suffix='_stats4_total')\n caput('XF:11BMB-ES{}:cam1:AcquireTime'.format(pilatus_Epicsname), 0.5)\n caput('XF:11BMB-ES{}:cam1:AcquirePeriod'.format(pilatus_Epicsname), 0.6)\n #caput('XF:11BMB-ES{Det:PIL2M}:cam1:AcquirePeriod', 0.6)\n \n #TODO: Update ROI based on current SAXSx, SAXSy and the md in cms object\n \n self.current_mode = 'alignment'\n \n #self.beam.bim6.reading()\n\n\n def modeMeasurement(self, verbosity=3): \n \n self.current_mode = 'undefined'\n \n self.beam.off()\n \n #mov(bsx, -15.95)\n bsx.move(self.bsx_pos)\n\n if abs(bsx.user_readback.value - self.bsx_pos)>0.1:\n print('WARNING: Beamstop did not return to correct position!')\n return\n \n self.beam.setTransmission(1)\n \n #detselect(pilatus300)\n #detselect([pilatus300, psccd])\n detselect(pilatus_name)\n \n #if RE.state is not 'idle':\n # RE.abort()\n\n \n self.current_mode = 'measurement'\n \n # Check if gate valves are open\n if self.beam.GVdsbig.state() is not 'out' and verbosity>=1:\n print('Warning: Sample chamber gate valve (large, downstream) is not open.')\n \n\n\n \n \n def modeBeamstopAlignment(self, verbosity=3):\n '''Places bim6 (dsmon) as a temporary beamstop.'''\n \n DETy.move(-6.1)\n \n \n \n def beamstopCircular(self, verbosity=3):\n \n self.beam.setTransmission(1e-6)\n \n bsx.move(0)\n bsphi.move(-12.0)\n bsx.move(self.bsx_pos)\n bsy.move(-15.47)\n \n # TODO: Capture image and confirm that it's okay?\n if verbosity>=1:\n print(\"WARNING: This routine merely puts the beamstop in the ~approximately~ correct position. You must confirm that the beam is being blocked correctly.\")\n \n self.beam.transmission(verbosity=verbosity)\n\n\n def beamstopLinear(self, verbosity=3):\n \n self.beam.setTransmission(1e-6)\n \n bsx.move(0)\n bsphi.move(-223.4)\n bsx.move(self.bsx_pos)\n bsy.move(17)\n \n # TODO: Capture image and confirm that it's okay?\n if verbosity>=1:\n print(\"WARNING: This routine merely puts the beamstop in the ~approximately~ correct position. You must confirm that the beam is being blocked correctly.\")\n \n self.beam.transmission(verbosity=verbosity)\n \n \n \n def _actuate_open(self, pv, max_tries=5, wait_time=1.0, verbosity=2):\n \n tries = 1\n if verbosity>=4:\n print(' Opening {} (try # {:d})'.format(pv, tries))\n caput(pv+'Cmd:Opn-Cmd', 1)\n time.sleep(wait_time)\n \n \n while caget(pv+'Pos-Sts')!= 1 and tries=4:\n print(' Opening {} (try # {:d})'.format(pv, tries))\n caput(pv+'Cmd:Opn-Cmd', 1)\n time.sleep(wait_time)\n \n if verbosity>=1 and caget(pv+'Pos-Sts')!= 1:\n print('ERROR, valve did not open ({})'.format(pv))\n\n def _actuate_close(self, pv, max_tries=5, wait_time=1.0, verbosity=2):\n \n tries = 1\n if verbosity>=4:\n print(' Closing {} (try # {:d})'.format(pv, tries))\n caput(pv+'Cmd:Cls-Cmd', 1)\n time.sleep(wait_time)\n \n \n while caget(pv+'Pos-Sts')!= 0 and tries=4:\n print(' Closing {} (try # {:d})'.format(pv, tries))\n caput(pv+'Cmd:Cls-Cmd', 1)\n time.sleep(wait_time)\n \n if verbosity>=1 and caget(pv+'Pos-Sts')!= 0:\n print('ERROR, valve did not close ({})'.format(pv))\n \n \n def ventChamber(self, verbosity=3):\n \n #TODO: Remove the old (commented-out) caput lines\n \n # Close large gate valve (downstream side of sample chamber)\n #caput('XF:11BMB-VA{Chm:Det-GV:1}Cmd:Cls-Cmd',1)\n self._actuate_close('XF:11BMB-VA{Chm:Det-GV:1}', verbosity=verbosity)\n\n # Close small gate valve (upstream side of sample chamber)\n #caput('XF:11BMB-VA{Slt:4-GV:1}Cmd:Cls-Cmd',1)\n #self._actuate_close('XF:11BMB-VA{Slt:4-GV:1}', verbosity=verbosity)\n\n # Close valve connecting sample chamber to vacuum pump\n #caput('XF:11BMB-VA{Chm:Det-IV:1}Cmd:Cls-Cmd',1)\n self._actuate_close('XF:11BMB-VA{Chm:Det-IV:1}', verbosity=verbosity)\n \n # Soft-open the upstream vent-valve\n #caput('XF:11BMB-VA{Chm:Smpl-VV:1}Cmd:Cls-Cmd', 1)\n self._actuate_close('XF:11BMB-VA{Chm:Smpl-VV:1}', verbosity=verbosity)\n time.sleep(1.0)\n #caput('XF:11BMB-VA{Chm:Smpl-VV:1_Soft}Cmd:Opn-Cmd', 1)\n self._actuate_open('XF:11BMB-VA{Chm:Smpl-VV:1_Soft}', verbosity=verbosity)\n \n \n self.chamberPressure(range_high=100)\n \n # Fully open the upstream vent-vale\n #caput('XF:11BMB-VA{Chm:Smpl-VV:1_Soft}Cmd:Cls-Cmd', 1)\n self._actuate_close('XF:11BMB-VA{Chm:Smpl-VV:1_Soft}', verbosity=verbosity)\n time.sleep(1.0)\n #caput('XF:11BMB-VA{Chm:Smpl-VV:1}Cmd:Opn-Cmd', 1)\n self._actuate_open('XF:11BMB-VA{Chm:Smpl-VV:1}', verbosity=verbosity)\n\n # Fully open the downstream vent-vale\n #caput('XF:11BMB-VA{Chm:Det-VV:1_Soft}Cmd:Cls-Cmd', 1)\n self._actuate_close('XF:11BMB-VA{Chm:Det-VV:1_Soft}', verbosity=verbosity)\n time.sleep(1.0)\n #caput('XF:11BMB-VA{Chm:Det-VV:1}Cmd:Opn-Cmd', 1)\n self._actuate_open('XF:11BMB-VA{Chm:Det-VV:1}', verbosity=verbosity)\n \n self.chamberPressure(range_high=1000)\n \n if verbosity>=1:\n print('Sample chamber is ready to be opened.')\n \n \n \n def _old_ventChamber(self, verbosity=3):\n # TODO: deprecate and delete\n \n \n # Close large gate valve (downstream side of sample chamber)\n caput('XF:11BMB-VA{Chm:Det-GV:1}Cmd:Cls-Cmd',1)\n\n # Close small gate valve (upstream side of sample chamber)\n #caput('XF:11BMB-VA{Slt:4-GV:1}Cmd:Cls-Cmd',1)\n\n # Close valve connecting sample chamber to vacuum pump\n caput('XF:11BMB-VA{Chm:Det-IV:1}Cmd:Cls-Cmd',1)\n \n time.sleep(0.5)\n \n # Soft-open the upstream vent-valve\n caput('XF:11BMB-VA{Chm:Smpl-VV:1}Cmd:Cls-Cmd', 1)\n time.sleep(1.0)\n caput('XF:11BMB-VA{Chm:Smpl-VV:1_Soft}Cmd:Opn-Cmd', 1)\n \n \n \n self.chamberPressure(range_high=100)\n \n # Fully open the upstream vent-vale\n caput('XF:11BMB-VA{Chm:Smpl-VV:1_Soft}Cmd:Cls-Cmd', 1)\n time.sleep(1.0)\n caput('XF:11BMB-VA{Chm:Smpl-VV:1}Cmd:Opn-Cmd', 1)\n\n # Fully open the downstream vent-vale\n caput('XF:11BMB-VA{Chm:Det-VV:1_Soft}Cmd:Cls-Cmd', 1)\n time.sleep(1.0)\n caput('XF:11BMB-VA{Chm:Det-VV:1}Cmd:Opn-Cmd', 1)\n \n self.chamberPressure(range_high=1000)\n \n if verbosity>=1:\n print('Sample chamber is ready to be opened.')\n \n \n \n def chamberPressure(self, range_low=None, range_high=None, readout_period=1.0, verbosity=3):\n '''Monitors the pressure in the sample chamber, printing the current value.\n If range arguments are provided, the monitoring will end once the pressure\n is outside the range.\n '''\n \n monitor = True\n while monitor:\n \n try:\n \n if range_low is not None and self._chamber_pressure_pv.get()range_high:\n monitor = False\n\n P_mbar = self._chamber_pressure_pv.get()\n P_atm = P_mbar*0.000986923\n P_torr = P_mbar*0.750062\n P_kPa = P_mbar*0.1\n P_psi = 0.0145038\n \n if verbosity>=4:\n print('Sample chamber pressure: {:8.2f} mbar = {:5.3f} atm = {:7.3f} torr = {:4.1g} kPa \\r'.format(P_mbar, P_atm, P_torr, P_kPa), end='', flush=True)\n elif verbosity>=2:\n print('Sample chamber pressure: {:8.2f} mbar ({:5.3f} atm) \\r'.format(P_mbar, P_atm), end='', flush=True)\n \n time.sleep(readout_period)\n \n \n except KeyboardInterrupt:\n monitor = False\n \n \n def pumpChamber(self, max_tries=8, verbosity=3):\n \n \n # Close vent-valves\n #caput('XF:11BMB-VA{Chm:Smpl-VV:1_Soft}Cmd:Cls-Cmd', 1)\n #caput('XF:11BMB-VA{Chm:Smpl-VV:1}Cmd:Cls-Cmd', 1)\n #caput('XF:11BMB-VA{Chm:Det-VV:1_Soft}Cmd:Cls-Cmd', 1)\n #caput('XF:11BMB-VA{Chm:Det-VV:1}Cmd:Cls-Cmd', 1)\n self._actuate_close('XF:11BMB-VA{Chm:Smpl-VV:1_Soft}', verbosity=verbosity)\n self._actuate_close('XF:11BMB-VA{Chm:Smpl-VV:1}', verbosity=verbosity)\n self._actuate_close('XF:11BMB-VA{Chm:Det-VV:1_Soft}', verbosity=verbosity)\n self._actuate_close('XF:11BMB-VA{Chm:Det-VV:1}', verbosity=verbosity)\n \n # Turn on pump (if necessary)\n tries = 1\n while caget('XF:11BMB-VA{Chm:Det-Pmp:1}Sts:Enbl-Sts')==0 and tries<=max_tries:\n caput('XF:11BMB-VA{Chm:Det-Pmp:1}Cmd:Enbl-Cmd', 0)\n time.sleep(0.2)\n caput('XF:11BMB-VA{Chm:Det-Pmp:1}Cmd:Enbl-Cmd', 1)\n time.sleep(2.0)\n tries += 1\n \n # Soft-open valve to pump\n #caput('XF:11BMB-VA{Chm:Det-IV:1}Cmd:Cls-Cmd', 1)\n self._actuate_close('XF:11BMB-VA{Chm:Det-IV:1}', verbosity=verbosity)\n time.sleep(0.5)\n #caput('XF:11BMB-VA{Chm:Det-IV:1_Soft}Cmd:Opn-Cmd', 1)\n self._actuate_open('XF:11BMB-VA{Chm:Det-IV:1_Soft}', verbosity=verbosity)\n \n time.sleep(5.0)\n # Check pump again\n tries = 1\n while caget('XF:11BMB-VA{Chm:Det-Pmp:1}Sts:Enbl-Sts')==0 and tries<=max_tries:\n caput('XF:11BMB-VA{Chm:Det-Pmp:1}Cmd:Enbl-Cmd', 0)\n time.sleep(0.2)\n caput('XF:11BMB-VA{Chm:Det-Pmp:1}Cmd:Enbl-Cmd', 1)\n time.sleep(2.0)\n tries += 1\n \n \n self.chamberPressure(range_low=500)\n\n # Fully open valve to pump\n #caput('XF:11BMB-VA{Chm:Det-IV:1_Soft}Cmd:Cls-Cmd', 1)\n self._actuate_close('XF:11BMB-VA{Chm:Det-IV:1_Soft}', verbosity=verbosity)\n time.sleep(0.5)\n #caput('XF:11BMB-VA{Chm:Det-IV:1}Cmd:Opn-Cmd', 1)\n self._actuate_open('XF:11BMB-VA{Chm:Det-IV:1}', verbosity=verbosity)\n \n self.chamberPressure(range_low=200)\n \n \n def _old_pumpChamber(self, readout_delay=0.2):\n # TODO: deprecate and delete\n \n \n # Close vent-valves\n caput('XF:11BMB-VA{Chm:Smpl-VV:1_Soft}Cmd:Cls-Cmd', 1)\n time.sleep(0.5)\n caput('XF:11BMB-VA{Chm:Smpl-VV:1}Cmd:Cls-Cmd', 1)\n time.sleep(0.5)\n caput('XF:11BMB-VA{Chm:Det-VV:1_Soft}Cmd:Cls-Cmd', 1)\n time.sleep(0.5)\n caput('XF:11BMB-VA{Chm:Det-VV:1}Cmd:Cls-Cmd', 1)\n time.sleep(0.2)\n \n # Turn on pump (if necessary)\n if caget('XF:11BMB-VA{Chm:Det-Pmp:1}Sts:Enbl-Sts')==0:\n caput('XF:11BMB-VA{Chm:Det-Pmp:1}Cmd:Enbl-Cmd', 0)\n time.sleep(0.2)\n caput('XF:11BMB-VA{Chm:Det-Pmp:1}Cmd:Enbl-Cmd', 1)\n \n # Soft-open valve to pump\n caput('XF:11BMB-VA{Chm:Det-IV:1}Cmd:Cls-Cmd', 1)\n time.sleep(1.0)\n caput('XF:11BMB-VA{Chm:Det-IV:1_Soft}Cmd:Opn-Cmd', 1)\n time.sleep(0.2)\n \n sleep(5.0)\n # Check pump again\n if caget('XF:11BMB-VA{Chm:Det-Pmp:1}Sts:Enbl-Sts')==0:\n caput('XF:11BMB-VA{Chm:Det-Pmp:1}Cmd:Enbl-Cmd', 0)\n time.sleep(0.2)\n caput('XF:11BMB-VA{Chm:Det-Pmp:1}Cmd:Enbl-Cmd', 1)\n \n \n self.chamberPressure(range_low=500)\n\n # Fully open valve to pump\n caput('XF:11BMB-VA{Chm:Det-IV:1_Soft}Cmd:Cls-Cmd', 1)\n time.sleep(1.0)\n caput('XF:11BMB-VA{Chm:Det-IV:1}Cmd:Opn-Cmd', 1)\n time.sleep(0.2)\n \n self.chamberPressure(range_low=200)\n \n \n def openChamberGateValve(self):\n \n caput('XF:11BMB-VA{Chm:Det-GV:1}Cmd:Opn-Cmd', 1) # Large (downstream)\n #caput('XF:11BMB-VA{Slt:4-GV:1}Cmd:Opn-Cmd',1) # Small (upstream)\n\n\n def closeChamberGateValve(self):\n \n caput('XF:11BMB-VA{Chm:Det-GV:1}Cmd:Cls-Cmd', 1) # Large (downstream)\n #caput('XF:11BMB-VA{Slt:4-GV:1}Cmd:Cls-Cmd',1) # Small (upstream)\n \n \n # Metatdata methods\n ########################################\n \n def get_md(self, prefix=None, **md):\n \n md_current = self.md.copy()\n md_current['calibration_energy_keV'] = round(self.beam.energy(verbosity=0), 3)\n md_current['calibration_wavelength_A'] = round(self.beam.wavelength(verbosity=0), 5)\n \n h, v = self.beam.size(verbosity=0)\n md_current['beam_size_x_mm'] = h\n md_current['beam_size_y_mm'] = v\n \n #temperarily block it for bad communication. 17:30, 071617\n h, v = self.beam.divergence(verbosity=0)\n md_current['beam_divergence_x_mrad'] = h\n md_current['beam_divergence_y_mrad'] = v\n \n md_current['beamline_mode'] = self.current_mode\n \n #md_current['detector'] = self.detector\n \n md_current['motor_SAXSx'] = SAXSx.user_readback.value\n md_current['motor_SAXSy'] = SAXSy.user_readback.value\n md_current['motor_DETx'] = DETx.user_readback.value\n md_current['motor_DETy'] = DETy.user_readback.value\n md_current['motor_WAXSx'] = WAXSx.user_readback.value\n md_current['motor_smx'] = smx.user_readback.value\n md_current['motor_smy'] = smy.user_readback.value\n md_current['motor_sth'] = sth.user_readback.value\n\n md_current['motor_bsx'] = bsx.user_readback.value\n md_current['motor_bsy'] = bsy.user_readback.value\n md_current['motor_bsphi'] = bsphi.user_readback.value\n \n md_current.update(self.SAXS.get_md(prefix='detector_SAXS_'))\n \n md_current.update(md)\n \n # Add an optional prefix\n if prefix is not None:\n md_current = { '{:s}{:s}'.format(prefix, key) : value for key, value in md_current.items() }\n \n return md_current\n \n\n def setMetadata(self, verbosity=3):\n '''Guides the user through setting some of the required and recommended\n meta-data fields.'''\n \n if verbosity>=3:\n print('This will guide you through adding some meta-data for the upcoming experiment.')\n if verbosity>=4:\n print('You can accept default values (shown in square [] brackets) by pressing enter. You can leave a value blank (or put a space) to skip that entry.')\n\n\n # Set some values automatically\n month = int(time.strftime('%m'))\n if month<=4:\n cycle = 1\n elif month<=8:\n cycle = 2\n else:\n cycle = 3 \n RE.md['experiment_cycle'] = '{:s}_{:d}'.format( time.strftime('%Y'), cycle )\n \n RE.md['calibration_energy_keV'] = round(self.beam.energy(verbosity=0), 3)\n RE.md['calibration_wavelength_A'] = round(self.beam.wavelength(verbosity=0), 5)\n \n # TODO:\n # RE.md['calibration_detector_distance_m'] =\n # RE.md['calibration_detector_x0'] =\n # RE.md['calibration_detector_y0'] = \n \n \n \n # Ask the user some questions\n \n questions = [\n ['experiment_proposal_number', 'Proposal number'] ,\n ['experiment_SAF_number', 'SAF number'] ,\n ['experiment_group', 'User group (e.g. PI)'] ,\n ['experiment_user', 'The specific user/person running the experiment'] ,\n ['experiment_project', 'Project name/code'] ,\n ['experiment_alias_directory', 'Alias directory'] ,\n ['experiment_type', 'Type of experiments/measurements (SAXS, GIWAXS, etc.)'] ,\n ]\n \n \n # TBD:\n # Path where data will be stored?\n\n self._dialog_total_questions = len(questions)\n self._dialog_question_number = 1\n \n for key, text in questions:\n try:\n self._ask_question(key, text)\n except KeyboardInterrupt:\n return\n \n if verbosity>=4:\n print('You can also add/edit metadata directly using the RE.md object.')\n \n \n\n def _ask_question(self, key, text, default=None):\n\n if default is None and key in RE.md:\n default = RE.md[key]\n \n if default is None:\n ret = input(' Q{:d}/{:d}. {:s}: '.format(self._dialog_question_number, self._dialog_total_questions, text) )\n \n else:\n ret = input(' Q{:d}/{:d}. {:s} [{}]: '.format(self._dialog_question_number, self._dialog_total_questions, text, default) )\n if ret=='':\n ret = default\n \n \n if ret!='' and ret!=' ':\n RE.md[key] = ret\n \n self._dialog_question_number += 1\n \n \n # Logging methods\n ########################################\n \n def logAllMotors(self, verbosity=3, **md):\n log_pos()\n \n motor_list = [\n mono_bragg ,\n mono_pitch2 ,\n mono_roll2 ,\n mono_perp2 ,\n mir_usx ,\n mir_dsx ,\n mir_usy ,\n mir_dsyi ,\n mir_dsyo ,\n mir_bend ,\n s0.tp ,\n s0.bt ,\n s0.ob ,\n s0.ib ,\n s1.xc ,\n s1.xg ,\n s1.yc ,\n s1.yg ,\n s2.xc ,\n s2.xg ,\n s2.yc ,\n s2.yg ,\n s3.xc ,\n s3.xg ,\n s3.yc ,\n s3.yg ,\n s4.xc ,\n s4.xg ,\n s4.yc ,\n s4.yg ,\n s5.xc ,\n s5.xg ,\n s5.yc ,\n s5.yg ,\n bim3y ,\n fs3y ,\n bim4y ,\n bim5y ,\n smx ,\n smy ,\n sth ,\n schi ,\n sphi ,\n srot ,\n strans ,\n camx ,\n camy ,\n cam2x ,\n cam2z ,\n DETx ,\n DETy ,\n WAXSx ,\n SAXSx ,\n SAXSy ,\n bsx , \n bsy ,\n bsphi ,\n armz ,\n armx ,\n armphi ,\n army ,\n armr ,\n ]\n \n self.log_motors(motor_list, verbosity=verbosity, **md)\n \n \n # End class CMS_Beamline(Beamline)\n ########################################\n \n\nclass CMS_Beamline_GISAXS(CMS_Beamline):\n \n \n def modeAlignment(self, verbosity=3):\n \n if RE.state!='idle':\n RE.abort()\n \n self.current_mode = 'undefined'\n \n # TODO: Check what mode (TSAXS, GISAXS) and respond accordingly\n # TODO: Check if gate valves are open and flux is okay (warn user)\n \n \n self.beam.off()\n self.beam.setTransmission(1e-6)\n while beam.transmission() > 2e-6:\n time.sleep(0.5)\n self.beam.setTransmission(1e-6)\n \n #mov(bsx, -11.55)\n #mov(bsx, -11.55+2) # changed at 06/02/17, Osuji beam time\n #mov(bsx, -14.73+2) # changed at 06/04/17, SAXS, 3m, Osuji beam time\n #mov(bsx, -15.23+2) # changed at 06/04/17, GISAXS, 3m, Osuji beam time\n #mov(bsx, -17.03+3) # changed at 06/04/17, GISAXS, 3m, Osuji beam time\n #mov(bsx, -16.0+3) #change it at 07/10/17, GISAXS, 2m, LSita Beam time\n #mov(bsx, -16.53+3) # 07/20/17, GISAXS, 5m, CRoss\n #mov(bsx, self.bsx_pos+3)\n \n bsx.move(self.bsx_pos+3)\n \n self.setReflectedBeamROI()\n self.setDirectBeamROI()\n\n #detselect(pilatus300, suffix='_stats4_total')\n #caput('XF:11BMB-ES{Det:SAXS}:cam1:AcquireTime', 0.5)\n #caput('XF:11BMB-ES{Det:SAXS}:cam1:AcquirePeriod', 0.6)\n \n detselect(pilatus_name, suffix='_stats4_total')\n caput('XF:11BMB-ES{}:cam1:AcquireTime'.format(pilatus_Epicsname), 0.5)\n caput('XF:11BMB-ES{}:cam1:AcquirePeriod'.format(pilatus_Epicsname), 0.6)\n\n #TODO: Update ROI based on current SAXSx, SAXSy and the md in cms object\n \n self.current_mode = 'alignment'\n \n #self.beam.bim6.reading()\n\n\n def modeMeasurement(self, verbosity=3): \n\n if RE.state!='idle':\n RE.abort()\n \n self.current_mode = 'undefined'\n \n self.beam.off()\n \n #bsx_pos=-16.74\n #mov(bsx, -16.55)\n #mov(bsx, -13.83) #change it at 06/02/17, Osuji Beam time\n #mov(bsx, -14.73) #change it at 06/04/17, SAXS, 3m, Osuji Beam time\n #mov(bsx, -15.03) #change it at 06/04/17, GISAXS, 3m, Osuji Beam time\n #mov(bsx, -16.43) #change it at 06/12/17, GISAXS, 3m, LZhu Beam time\n #mov(bsx, -16.53) #change it at 06/19/17, GISAXS, 5m, AHexemer Beam time\n #mov(bsx, -16.2) #change it at 07/07/17, GISAXS, 3m, TKoga Beam time\n #mov(bsx, -16.43) #change it at 07/10/17, GISAXS, 2m, LSita Beam time\n #mov(bsx, -16.53) # 07/20/17, GISAXS, 5m, CRoss Beam time\n #mov(bsx, -15.84) # 07/26/17, SAXS/WAXS, 2m, BVogt Beam time\n #mov(bsx, -16.34) # 08/02/17, TOMO GISAXS, 5m, LRichter Beam time\n #mov(bsx, -16.74) # 08/02/17, TOMO GISAXS, 5m, LRichter Beam time\n #mov(bsx, self.bsx_pos)\n \n bsx.move(self.bsx_pos)\n\n #if abs(bsx.user_readback.value - -16.74)>0.1:\n if abs(bsx.user_readback.value - self.bsx_pos)>0.1:\n print('WARNING: Beamstop did not return to correct position!')\n return\n \n self.beam.setTransmission(1)\n \n \n #mov(DETy, -16)\n #self.beam.bim6.retract()\n\n \n #caput('XF:11BMB-BI{IM:2}EM180:Acquire', 0) # Turn off bim6\n #detselect(pilatus300)\n #detselect([pilatus300, psccd]) \n detselect(pilatus_name)\n \n \n self.current_mode = 'measurement'\n \n # Check if gate valves are open\n if self.beam.GVdsbig.state() is not 'out' and verbosity>=1:\n print('Warning: Sample chamber gate valve (large, downstream) is not open.')\n \n \n def setDirectBeamROI(self, size=[10,4], verbosity=3):\n '''Update the ROI (stats4) for the direct beam on the Pilatus\n detector. This (should) update correctly based on the current SAXSx, SAXSy.\n \n The size argument controls the size (in pixels) of the ROI itself\n (in the format [width, height]). A size=[6,4] is reasonable.\n The size is changed to [10, 4] for possible beam drift during a user run (changed at 08/16/17)'''\n \n detector = self.SAXS\n\n # These positions are updated based on current detector position\n det_md = detector.get_md()\n x0 = det_md['detector_SAXS_x0_pix']\n y0 = det_md['detector_SAXS_y0_pix']\n \n #caput('XF:11BMB-ES{Det:SAXS}:ROI4:MinX', int(x0-size[0]/2))\n #caput('XF:11BMB-ES{Det:SAXS}:ROI4:SizeX', int(size[0]))\n #caput('XF:11BMB-ES{Det:SAXS}:ROI4:MinY', int(y0-size[1]/2))\n #caput('XF:11BMB-ES{Det:SAXS}:ROI4:SizeY', int(size[1]))\n \n #detselect(pilatus300, suffix='_stats4_total')\n\n caput('XF:11BMB-ES{}:ROI4:MinX'.format(pilatus_Epicsname), int(x0-size[0]/2))\n caput('XF:11BMB-ES{}:ROI4:SizeX'.format(pilatus_Epicsname), int(size[0]))\n caput('XF:11BMB-ES{}:ROI4:MinY'.format(pilatus_Epicsname), int(y0-size[1]/2))\n caput('XF:11BMB-ES{}:ROI4:SizeY'.format(pilatus_Epicsname), int(size[1]))\n \n detselect(pilatus_name, suffix='_stats4_total') \n \n def setReflectedBeamROI(self, total_angle=0.16, size=[10,2], verbosity=3):\n '''Update the ROI (stats3) for the reflected beam on the Pilatus300k\n detector. This (should) update correctly based on the current SAXSx, SAXSy.\n \n The size argument controls the size (in pixels) of the ROI itself\n (in the format [width, height]). A size=[6,2] is reasonable.'''\n \n detector = self.SAXS\n\n # These positions are updated based on current detector position\n det_md = detector.get_md()\n x0 = det_md['detector_SAXS_x0_pix']\n y0 = det_md['detector_SAXS_y0_pix']\n \n d = detector.distance*1000.0 # mm\n pixel_size = detector.pixel_size # mm\n \n y_offset_mm = np.tan(np.radians(total_angle))*d\n y_offset_pix = y_offset_mm/pixel_size\n \n #for pilatus300k\n #y_pos = int( y0 - size[1]/2 - y_offset_pix )\n \n #for pilatus2M, placed up-side down\n y_pos = int( y0 - size[1]/2 + y_offset_pix )\n \n #caput('XF:11BMB-ES{Det:SAXS}:ROI3:MinX', int(x0-size[0]/2))\n #caput('XF:11BMB-ES{Det:SAXS}:ROI3:SizeX', int(size[0]))\n #caput('XF:11BMB-ES{Det:SAXS}:ROI3:MinY', y_pos)\n #caput('XF:11BMB-ES{Det:SAXS}:ROI3:SizeY', int(size[1]))\n \n #detselect(pilatus300, suffix='_stats3_total')\n \n caput('XF:11BMB-ES{}:ROI3:MinX'.format(pilatus_Epicsname), int(x0-size[0]/2))\n caput('XF:11BMB-ES{}:ROI3:SizeX'.format(pilatus_Epicsname), int(size[0]))\n caput('XF:11BMB-ES{}:ROI3:MinY'.format(pilatus_Epicsname), y_pos)\n caput('XF:11BMB-ES{}:ROI3:SizeY'.format(pilatus_Epicsname), int(size[1]))\n \n detselect(pilatus_name, suffix='_stats3_total')\n\n\n\n\n\n#cms = CMS_Beamline()\ncms = CMS_Beamline_GISAXS()\n\ndef get_beamline():\n return cms\n\n\n"},"license":{"kind":"string","value":"bsd-3-clause"},"hash":{"kind":"number","value":9100477858381139000,"string":"9,100,477,858,381,139,000"},"line_mean":{"kind":"number","value":34.7477721813,"string":"34.747772"},"line_max":{"kind":"number","value":257,"string":"257"},"alpha_frac":{"kind":"number","value":0.5128055059,"string":"0.512806"},"autogenerated":{"kind":"bool","value":false,"string":"false"}}},{"rowIdx":109561,"cells":{"repo_name":{"kind":"string","value":"pybel/pybel-tools"},"path":{"kind":"string","value":"src/pybel_tools/selection/paths.py"},"copies":{"kind":"string","value":"1"},"size":{"kind":"string","value":"5832"},"content":{"kind":"string","value":"# -*- coding: utf-8 -*-\n\n\"\"\"Path selection tools.\"\"\"\n\nimport itertools as itt\nfrom operator import itemgetter\nfrom typing import Any, List, Mapping, Optional, Tuple\n\nimport networkx as nx\nfrom more_itertools import pairwise\n\nfrom pybel import BELGraph, BaseEntity\nfrom pybel.constants import (\n ANALOGOUS_TO, ASSOCIATION, BIOMARKER_FOR, CAUSES_NO_CHANGE, DECREASES, DIRECTLY_DECREASES, DIRECTLY_INCREASES,\n EQUIVALENT_TO, HAS_PRODUCT, HAS_REACTANT, HAS_VARIANT, INCREASES, IS_A, NEGATIVE_CORRELATION, PART_OF,\n POSITIVE_CORRELATION, PROGONSTIC_BIOMARKER_FOR, RATE_LIMITING_STEP_OF, REGULATES, RELATION, SUBPROCESS_OF,\n TRANSCRIBED_TO, TRANSLATED_TO,\n)\nfrom pybel.struct.mutation import get_nodes_in_all_shortest_paths\n\n__all__ = [\n 'get_nodes_in_all_shortest_paths',\n 'get_shortest_directed_path_between_subgraphs',\n 'get_shortest_undirected_path_between_subgraphs',\n]\n\ndefault_edge_ranking = {\n INCREASES: 2,\n DIRECTLY_INCREASES: 3,\n DECREASES: 2,\n DIRECTLY_DECREASES: 3,\n RATE_LIMITING_STEP_OF: 0,\n CAUSES_NO_CHANGE: 0,\n REGULATES: 0,\n NEGATIVE_CORRELATION: 2,\n POSITIVE_CORRELATION: 2,\n ASSOCIATION: 1,\n HAS_PRODUCT: 0,\n HAS_VARIANT: 0,\n HAS_REACTANT: 0,\n TRANSLATED_TO: 0,\n TRANSCRIBED_TO: 0,\n IS_A: 0,\n PART_OF: 0,\n SUBPROCESS_OF: 0,\n ANALOGOUS_TO: 0,\n BIOMARKER_FOR: 0,\n PROGONSTIC_BIOMARKER_FOR: 0,\n EQUIVALENT_TO: 0,\n}\n\n\ndef rank_path(graph: BELGraph, path: List[BaseEntity], edge_ranking: Optional[Mapping[str, int]] = None) -> int:\n \"\"\"Score the given path.\n\n :param graph: A BEL graph\n :param path: A list of nodes in the path (includes terminal nodes)\n :param edge_ranking: A dictionary of {relationship: score}\n :return: The score for the edge\n \"\"\"\n if edge_ranking is None:\n edge_ranking = default_edge_ranking\n\n return sum(\n max(\n edge_ranking[data[RELATION]]\n for data in graph.edges[source][target].values()\n )\n for source, target in pairwise(path)\n )\n\n\n# TODO consider all shortest paths?\ndef _get_shortest_path_between_subgraphs_helper(\n graph: nx.Graph,\n a: nx.Graph,\n b: nx.Graph,\n) -> List[List[Any]]:\n \"\"\"Calculate the shortest path(s) between disconnected sub-graphs ``a`` and ``b`` through ``graph``.\n\n :param graph: A graph\n :param a: A sub-graph of :code:`graph`, disjoint from :code:`b`\n :param b: A sub-graph of :code:`graph`, disjoint from :code:`a`\n :return: A list of the shortest paths between the two sub-graphs\n \"\"\"\n if graph.is_directed():\n shortest_paths = [\n shortest_path\n for na, nb in itt.product(a, b)\n for shortest_path in ( # do it going both ways because it's directed\n nx.shortest_path(graph, na, nb),\n nx.shortest_path(graph, nb, na),\n )\n ]\n else:\n shortest_paths = [\n nx.shortest_path(graph, na, nb)\n for na, nb in itt.product(a, b)\n ]\n\n min_len = min(map(len, shortest_paths))\n return [\n shortest_path\n for shortest_path in shortest_paths\n if len(shortest_path) == min_len\n ]\n\n\ndef get_shortest_directed_path_between_subgraphs(graph: BELGraph, a: BELGraph, b: BELGraph) -> List[List[Any]]:\n \"\"\"Calculate the shortest path(s) between disconnected sub-graphs ``a`` and ``b`` through ``graph``.\n\n :param graph: A BEL graph\n :param a: A sub-graph of :code:`graph`, disjoint from :code:`b`\n :param b: A sub-graph of :code:`graph`, disjoint from :code:`a`\n :return: A list of the shortest paths between the two sub-graphs\n \"\"\"\n return _get_shortest_path_between_subgraphs_helper(graph, a, b)\n\n\ndef get_shortest_undirected_path_between_subgraphs(graph: BELGraph, a: BELGraph, b: BELGraph) -> List[List[Any]]:\n \"\"\"Calculate the undirected shortest path(s) between disconnected sub-graphs ``a`` and ``b`` through ``graph``.\n\n :param graph: A BEL graph\n :param a: A sub-graph of :code:`graph`, disjoint from :code:`b`\n :param b: A sub-graph of :code:`graph`, disjoint from :code:`a`\n :return: A list of the shortest paths between the two sub-graphs\n \"\"\"\n ug = graph.to_undirected()\n return _get_shortest_path_between_subgraphs_helper(ug, a, b)\n\n\ndef find_root_in_path(graph: BELGraph, path_nodes: List[BaseEntity]) -> Tuple[BELGraph, BaseEntity]:\n \"\"\"Find the root of the path.\n\n This is defined as the node with the lowest out degree, if multiple:\n the root is the one with the highest out degree among those with lowest out degree\n\n :param graph: A BEL Graph\n :param path_nodes: A list of nodes in their order in a path\n :return: A pair of the graph: graph of the path and the root node\n \"\"\"\n path_graph = graph.subgraph(path_nodes)\n\n # node_in_degree_tuple: list of tuples with (node,in_degree_of_node) in ascending order\n in_degrees = sorted(path_graph.in_degree().items(), key=itemgetter(1))\n\n # In case all have the same in degree it needs to be reference before\n tied_root_index = 0\n\n # Get index where the min in_degree stops (in case they are duplicates)\n for i in range(0, (len(in_degrees) - 1)):\n if in_degrees[i][1] < in_degrees[i + 1][1]:\n tied_root_index = i\n break\n\n # If there are multiple nodes with minimum in_degree take the one with max out degree\n # (in case multiple have the same out degree pick one random)\n if tied_root_index != 0:\n # node_out_degree_tuple: ordered list of tuples with (node,in_degree_of_node) in descending order\n out_degrees = sorted(path_graph.out_degree().items(), key=itemgetter(1), reverse=True)\n root_tuple = max(out_degrees[:tied_root_index], key=itemgetter(1))\n else:\n root_tuple = in_degrees[0]\n\n return path_graph, root_tuple[0]\n"},"license":{"kind":"string","value":"mit"},"hash":{"kind":"number","value":-1052130577255750700,"string":"-1,052,130,577,255,750,700"},"line_mean":{"kind":"number","value":34.3454545455,"string":"34.345455"},"line_max":{"kind":"number","value":115,"string":"115"},"alpha_frac":{"kind":"number","value":0.6539780521,"string":"0.653978"},"autogenerated":{"kind":"bool","value":false,"string":"false"}}},{"rowIdx":109562,"cells":{"repo_name":{"kind":"string","value":"tommo/gii"},"path":{"kind":"string","value":"packages/Mock/PhysicsTools/PhysicsTools.py"},"copies":{"kind":"string","value":"1"},"size":{"kind":"string","value":"1092"},"content":{"kind":"string","value":"import random\n##----------------------------------------------------------------##\nfrom gii.core import app, signals\nfrom gii.SceneEditor import SceneEditorModule, getSceneSelectionManager, SceneTool, SceneToolButton\n\nfrom mock import SceneViewTool\n\ndef _getModulePath( path ):\n\timport os.path\n\treturn os.path.dirname( __file__ ) + '/' + path\n\n##----------------------------------------------------------------##\nclass PhyscsShapeTool( SceneViewTool ):\n\tname = 'physics_shape_editor'\n\ttool = 'physics_shape_editor'\n\n##----------------------------------------------------------------##\nclass PhysicTools( SceneEditorModule ):\n\tname = 'physics_tools'\n\tdependency = [ 'scene_view' ]\n\n\tdef onLoad( self ):\n\t\tself.mainToolBar = self.addToolBar( 'physics_tools', \n\t\t\tself.getMainWindow().requestToolBar( 'physics_tools' )\n\t\t\t)\n\t\t\n\t\ttoolManager = self.getModule( 'scene_tool_manager' )\n\t\t\n\t\tself.addTool( 'physics_tools/shape_editor',\n\t\t\twidget = SceneToolButton(\n\t\t\t\t'physics_shape_editor',\n\t\t\t\tlabel = 'Physics Shape Editor',\n\t\t\t\ticon = 'tools/box2d'\n\t\t\t)\n\t\t)\n\n\tdef onStart( self ):\n\t\tpass\n"},"license":{"kind":"string","value":"mit"},"hash":{"kind":"number","value":1252288696254564600,"string":"1,252,288,696,254,564,600"},"line_mean":{"kind":"number","value":27.7368421053,"string":"27.736842"},"line_max":{"kind":"number","value":99,"string":"99"},"alpha_frac":{"kind":"number","value":0.5714285714,"string":"0.571429"},"autogenerated":{"kind":"bool","value":false,"string":"false"}}},{"rowIdx":109563,"cells":{"repo_name":{"kind":"string","value":"synapse-wireless/bulk-reprogramming"},"path":{"kind":"string","value":"snappyImages/synapse/DK200base.py"},"copies":{"kind":"string","value":"1"},"size":{"kind":"string","value":"7614"},"content":{"kind":"string","value":"# Copyright (C) 2015 Synapse Wireless, Inc.\n# Subject to your agreement of the disclaimer set forth below, permission is given by Synapse Wireless, Inc. (\"Synapse\") to you to freely modify, redistribute or include this SNAPpy code in any program. The purpose of this code is to help you understand and learn about SNAPpy by code examples.\n# BY USING ALL OR ANY PORTION OF THIS SNAPPY CODE, YOU ACCEPT AND AGREE TO THE BELOW DISCLAIMER. If you do not accept or agree to the below disclaimer, then you may not use, modify, or distribute this SNAPpy code.\n# THE CODE IS PROVIDED UNDER THIS LICENSE ON AN \"AS IS\" BASIS, WITHOUT\n# WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, WITHOUT\n# LIMITATION, WARRANTIES THAT THE COVERED CODE IS FREE OF DEFECTS,\n# MERCHANTABLE, FIT FOR A PARTICULAR PURPOSE OR NON-INFRINGING. THE ENTIRE\n# RISK AS TO THE QUALITY AND PERFORMANCE OF THE COVERED CODE IS WITH YOU.\n# SHOULD ANY COVERED CODE PROVE DEFECTIVE IN ANY RESPECT, YOU (NOT THE\n# INITIAL DEVELOPER OR ANY OTHER CONTRIBUTOR) ASSUME THE COST OF ANY\n# NECESSARY SERVICING, REPAIR OR CORRECTION. UNDER NO CIRCUMSTANCES WILL\n# SYNAPSE BE LIABLE TO YOU, OR ANY OTHER PERSON OR ENTITY, FOR ANY LOSS OF\n# USE, REVENUE OR PROFIT, LOST OR DAMAGED DATA, OR OTHER COMMERCIAL OR\n# ECONOMIC LOSS OR FOR ANY DAMAGES WHATSOEVER RELATED TO YOUR USE OR\n# RELIANCE UPON THE SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH\n# DAMAGES OR IF SUCH DAMAGES ARE FORESEEABLE. THIS DISCLAIMER OF WARRANTY\n# AND LIABILITY CONSTITUTES AN ESSENTIAL PART OF THIS LICENSE. NO USE OF\n# ANY COVERED CODE IS AUTHORIZED HEREUNDER EXCEPT UNDER THIS DISCLAIMER.\n\n\"\"\"Synapse Evaluation Board base definitions\n This module provides easy initialization of the evaluation boards:\n ProtoBoards (SN171 & SN172) and RF266 USB Dongle \n\nCall the function 'detectEvalBoards()' to detect and intialize one of these\nboards in the default configuration.\n\"\"\"\n\nfrom switchboard import *\nfrom nvparams import *\nfrom platforms import *\n\n\ndef isSM220():\n \"\"\"Detect SM220 based on platform.\"\"\"\n return (platform == 'SM220')\n\nif isSM220():\n # SN172 ProtoBoard pin definitions\n PROTO_172_LED_PIN_1 = GPIO_B2\n PROTO_172_LED_PIN_2 = GPIO_F4\n PROTO_172_BUTTON_PIN = GPIO_F2\n DEMO_LED_PIN = None\n DEMO_BUTTON_PIN = None\n BATT_SENSE = None\n RELAY_SET_PIN = None\n RELAY_RESET_PIN = None\n\n # Proto board pin definitions\n PROTO_LED_GRN_PIN = None\n PROTO_LED_YLW_PIN = None\n PROTO_BUTTON_PIN = None\n PROTO_BUZZER_PIN = None\nelse:\n # Demo board pin definitions\n DEMO_LED_PIN = GPIO_0\n DEMO_BUTTON_PIN = GPIO_1\n BATT_SENSE = GPIO_3\n RELAY_SET_PIN = GPIO_16\n RELAY_RESET_PIN = GPIO_17\n\n # Proto board pin definitions\n PROTO_LED_GRN_PIN = GPIO_1\n PROTO_LED_YLW_PIN = GPIO_2\n PROTO_BUTTON_PIN = GPIO_5\n PROTO_BUZZER_PIN = GPIO_12\n\n PROTO_172_LED_PIN_1 = None\n PROTO_172_LED_PIN_2 = None\n PROTO_172_BUTTON_PIN = None\n\n# RF 266 USB Dongle pin definitions\nDONGLE_LED_PIN = 20 # SNAPpy IO 20 (IO_20)\n\n# Generic pin definitions (set during board-type detection)\nLED_PIN = None\nBUTTON_PIN = None\ndeviceType = None\n\n\ndef detectEvalBoards():\n \"\"\"Detect and initialize Evaluation Boards to default configuration.\"\"\"\n global deviceType\n\n initDeviceType()\n\n # Perform base eval-board initialization\n if deviceType == 'Buzz':\n initProtoHw()\n elif deviceType == 'Proto':\n init172ProtoHw()\n elif deviceType == 'Dongle':\n initRF266DongleHw()\n\n # Make the default UART port accessible to Portal\n defaultUart = 1\n crossConnect(DS_UART0 + defaultUart, DS_PACKET_SERIAL)\n saveNvParam(NV_DEFAULT_UART_ID, defaultUart)\n\n\ndef initProtoHw():\n \"\"\"Initialize ProtoBoard (SN171) hardware peripherals.\"\"\"\n global BUTTON_PIN, LED_PIN\n LED_PIN = PROTO_LED_YLW_PIN\n setPinDir(LED_PIN, True)\n setPinDir(PROTO_LED_GRN_PIN, True)\n BUTTON_PIN = PROTO_BUTTON_PIN\n setPinDir(BUTTON_PIN, False)\n setPinPullup(BUTTON_PIN, True)\n setPinDir(PROTO_BUZZER_PIN, True)\n writePin(PROTO_BUZZER_PIN, False)\n\n\ndef init172ProtoHw():\n \"\"\"Initialize ProtoBoard (SN172) hardware peripherals.\"\"\"\n global BUTTON_PIN, LED_PIN\n BUTTON_PIN = PROTO_172_BUTTON_PIN\n setPinDir(BUTTON_PIN, False)\n setPinPullup(BUTTON_PIN, True)\n LED_PIN = PROTO_172_LED_PIN_1\n setPinDir(LED_PIN, True)\n setPinDir(PROTO_172_LED_PIN_2, True)\n\n\ndef initRF266DongleHw():\n \"\"\"Initialize RF266 USB Dongle hardware peripherals.\"\"\"\n global LED_PIN\n LED_PIN = DONGLE_LED_PIN # There's only one LED on RF266 USB Dongle\n setPinDir(LED_PIN, True)\n writePin(LED_PIN, False) # Initial state of the LED is OFF\n\n\ndef blinkLed(msDuration):\n \"\"\"Blink LED for specified duration in milliseconds.\"\"\"\n global deviceType\n pulsePin(LED_PIN, msDuration, True)\n\n\ndef blinkLed2(msDuration):\n \"\"\"Blink LED2 (if it exists) for specified duration in milliseconds.\"\"\"\n global deviceType\n\n if deviceType == \"Buzz\":\n pulsePin(PROTO_LED_GRN_PIN, msDuration, True)\n elif deviceType == \"Proto\":\n pulsePin(PROTO_172_LED_PIN_2, msDuration, True)\n elif deviceType == \"Dongle\":\n blinkLed(msDuration)\n\n\ndef blinkLed3(msDuration):\n \"\"\"Blink LED3 (if it exists) for specified duration in milliseconds.\"\"\"\n global deviceType\n\n if deviceType == \"Buzz\":\n pulsePin(PROTO_LED_GRN_PIN, msDuration, True)\n pulsePin(PROTO_LED_YLW_PIN, msDuration, True)\n elif deviceType == \"Proto\":\n pulsePin(PROTO_172_LED_PIN_1, msDuration, True)\n pulsePin(PROTO_172_LED_PIN_2, msDuration, True)\n elif deviceType == \"Dongle\":\n blinkLed(msDuration)\n\n\ndef lightLed():\n \"\"\"Light the first LED.\"\"\"\n global deviceType\n\n if deviceType == \"Proto\":\n writePin(PROTO_172_LED_PIN_1, True)\n writePin(PROTO_172_LED_PIN_2, False)\n elif deviceType == \"Buzz\":\n writePin(PROTO_LED_GRN_PIN, False)\n writePin(PROTO_LED_YLW_PIN, True)\n elif deviceType == \"Dongle\":\n writePin(LED_PIN, True)\n\n\ndef lightLed2():\n \"\"\"Light LED2 (if it exists)\"\"\"\n global deviceType\n\n if deviceType == \"Buzz\":\n writePin(PROTO_LED_GRN_PIN, True)\n writePin(PROTO_LED_YLW_PIN, False)\n elif deviceType == \"Proto\":\n writePin(PROTO_172_LED_PIN_1, True)\n writePin(PROTO_172_LED_PIN_2, True)\n elif deviceType == \"Dongle\":\n writePin(LED_PIN, False)\n\n\ndef lightLed3():\n \"\"\"Light LED3 (if itexists)\"\"\"\n global deviceType\n\n if deviceType == \"Buzz\":\n writePin(PROTO_LED_GRN_PIN, True)\n writePin(PROTO_LED_YLW_PIN, True)\n elif deviceType == \"Proto\":\n writePin(PROTO_172_LED_PIN_1, False)\n writePin(PROTO_172_LED_PIN_2, True)\n elif deviceType == \"Dongle\":\n writePin(LED_PIN, True)\n\n\ndef ledsOff():\n \"\"\"Turn off all LEDs.\"\"\"\n global deviceType\n\n if deviceType == \"Buzz\":\n writePin(PROTO_LED_GRN_PIN, False)\n writePin(PROTO_LED_YLW_PIN, False)\n elif deviceType == \"Proto\":\n writePin(PROTO_172_LED_PIN_1, False)\n writePin(PROTO_172_LED_PIN_2, False)\n elif deviceType == \"Dongle\":\n writePin(LED_PIN, False)\n\n\ndef isRF266():\n \"\"\"Detect the RF266 USB Dongle based on platform.\"\"\"\n return (platform == 'RF266')\n\n\ndef initDeviceType():\n \"\"\"Set default device types for Evaluation boards.\"\"\"\n global deviceType\n if isSM220():\n deviceType = \"Proto\"\n elif isRF266():\n deviceType = \"Dongle\"\n else:\n deviceType = \"Buzz\"\n\n # Store detected device type string to reserved system NV param\n saveNvParam(NV_DEVICE_TYPE_ID, deviceType)\n"},"license":{"kind":"string","value":"apache-2.0"},"hash":{"kind":"number","value":6309866372889226000,"string":"6,309,866,372,889,226,000"},"line_mean":{"kind":"number","value":31.2627118644,"string":"31.262712"},"line_max":{"kind":"number","value":294,"string":"294"},"alpha_frac":{"kind":"number","value":0.6843971631,"string":"0.684397"},"autogenerated":{"kind":"bool","value":false,"string":"false"}}},{"rowIdx":109564,"cells":{"repo_name":{"kind":"string","value":"italomaia/turtle-linux"},"path":{"kind":"string","value":"games/WhichWayIsUp/lib/projectile.py"},"copies":{"kind":"string","value":"1"},"size":{"kind":"string","value":"1297"},"content":{"kind":"string","value":"'''One of the enemies, the spider, which climbs along walls and shoots at the player.'''\n\nimport pygame\nimport os\n\nfrom pygame.locals import *\n\nfrom locals import *\n\nimport data\n\nfrom object import Gameobject\nfrom sound import play_sound\nfrom animation import Animation\n\nclass Projectile(Gameobject):\n\n def __init__(self, screen, x = None, y = None, dx = None, dy = None, damage = 5, set = \"energy\"):\n Gameobject.__init__(self, screen, False, False, x, y, -1)\n self.animations[\"default\"] = Animation(set, \"flying\")\n self.animations[\"dying\"] = Animation(set, \"dying\")\n self.image = self.animations[self.current_animation].update_and_get_image()\n self.rect = self.image.get_rect()\n self.dx = dx\n self.dy = dy\n self.saveddx = None\n self.damage = damage\n self.itemclass = \"projectile\"\n return\n\n def update(self, level = None):\n Gameobject.update(self, level)\n if self.y < 0 and self.current_animation != \"dying\": #This kills projectiles that wander off the screen from the top\n self.current_animation = \"dying\"\n if self.dx == 0 and self.dy == 0 and self.saveddx != None:\n self.dx = self.saveddx\n self.dy = self.saveddy\n return\n\n def flip(self):\n self.saveddx = -self.dy\n self.saveddy = self.dx\n Gameobject.flip(self)\n return"},"license":{"kind":"string","value":"gpl-3.0"},"hash":{"kind":"number","value":8307536100845769000,"string":"8,307,536,100,845,769,000"},"line_mean":{"kind":"number","value":28.5,"string":"28.5"},"line_max":{"kind":"number","value":121,"string":"121"},"alpha_frac":{"kind":"number","value":0.6700077101,"string":"0.670008"},"autogenerated":{"kind":"bool","value":false,"string":"false"}}},{"rowIdx":109565,"cells":{"repo_name":{"kind":"string","value":"simon-weber/Instant-SQLite-Audit-Trail"},"path":{"kind":"string","value":"test.py"},"copies":{"kind":"string","value":"1"},"size":{"kind":"string","value":"4363"},"content":{"kind":"string","value":"#!/usr/bin/env python\n\nimport sqlite3\nimport unittest\n\nimport audit\n\n\nclass TestAudit(unittest.TestCase):\n def setUp(self):\n self.conn = sqlite3.connect(':memory:')\n\n self.conn.execute('CREATE TABLE tab(c1, c2)')\n audit.attach_log(self.conn)\n\n def tearDown(self):\n audit.detach_log(self.conn)\n self.conn.close()\n\n def test_string_to_python(self):\n self.conn.execute(\"INSERT INTO tab VALUES('a', 'b')\")\n\n r = self.conn.execute(\"SELECT * FROM _audit\").fetchone()\n py_val = audit.to_python(r[4])\n\n self.assertEqual(py_val,\n [['c1', 'a'],\n ['c2', 'b']])\n\n def test_nums_to_python(self):\n self.conn.execute(\"INSERT INTO tab VALUES(5, 3.14)\")\n\n r = self.conn.execute(\"SELECT * FROM _audit\").fetchone()\n py_val = audit.to_python(r[4])\n\n self.assertEqual(py_val,\n [['c1', 5],\n ['c2', 3.14]])\n\n def test_null_to_python(self):\n self.conn.execute(\"INSERT INTO tab VALUES(NULL, NULL)\")\n\n r = self.conn.execute(\"SELECT * FROM _audit\").fetchone()\n py_val = audit.to_python(r[4])\n\n self.assertEqual(py_val,\n [['c1', None],\n ['c2', None]])\n\n def test_insert(self):\n self.conn.execute(\"INSERT INTO tab VALUES('audit', 'this')\")\n\n audit_rows = self.conn.execute(\"SELECT * FROM _audit\").fetchall()\n self.assertEqual(len(audit_rows), 1)\n\n r = audit_rows[0]\n\n #table and op\n self.assertEqual(r[1:3], (u'tab', u'INSERT'))\n\n #no previous, new is what we inserted\n self.assertEqual(r[3:],\n (None,\n unicode(repr([['c1', 'audit'],\n ['c2', 'this']])),\n ))\n\n def test_update(self):\n self.conn.execute(\"INSERT INTO tab VALUES('audit', 'this')\")\n self.assertEqual(\n self.conn.execute(\"UPDATE tab SET c2='everything' WHERE\"\n \" c2='this'\").rowcount,\n 1)\n\n audit_rows = self.conn.execute(\"SELECT * FROM _audit\").fetchall()\n self.assertEqual(len(audit_rows), 2)\n\n r = audit_rows[1]\n\n self.assertEqual(r[1:3], (u'tab', u'UPDATE'))\n self.assertEqual(r[3:],\n (unicode(repr([['c1', 'audit'],\n ['c2', 'this']])),\n unicode(repr([['c1', 'audit'],\n ['c2', 'everything']])),\n ))\n\n def test_delete(self):\n self.conn.execute(\"INSERT INTO tab VALUES('audit', 'this')\")\n self.assertEqual(\n self.conn.execute(\"DELETE FROM tab WHERE c1='audit'\").rowcount,\n 1)\n\n audit_rows = self.conn.execute(\"SELECT * FROM _audit\").fetchall()\n self.assertEqual(len(audit_rows), 2)\n\n r = audit_rows[1]\n\n self.assertEqual(r[1:3], (u'tab', u'DELETE'))\n self.assertEqual(r[3:],\n (unicode(repr([['c1', 'audit'],\n ['c2', 'this']])),\n None,\n ))\n\n def test_update_null(self):\n self.conn.execute(\"INSERT INTO tab VALUES('audit', NULL)\")\n self.assertEqual(\n self.conn.execute(\"UPDATE tab SET c2='everything' WHERE\"\n \" c2 is NULL\").rowcount,\n 1)\n\n audit_rows = self.conn.execute(\"SELECT * FROM _audit\").fetchall()\n self.assertEqual(len(audit_rows), 2)\n\n r = audit_rows[1]\n\n self.assertEqual(r[1:3], (u'tab', u'UPDATE'))\n self.assertEqual(r[3:],\n (unicode(repr([['c1', 'audit'],\n ['c2', None]])),\n unicode(repr([['c1', 'audit'],\n ['c2', 'everything']])),\n ))\n\n def test_detach(self):\n audit.detach_log(self.conn)\n\n self.conn.execute(\"INSERT INTO tab VALUES('no', 'audit')\")\n\n with self.assertRaises(sqlite3.OperationalError):\n #no _audit table\n self.conn.execute(\"SELECT * FROM _audit\").fetchall()\n\n\nif __name__ == '__main__':\n unittest.main()\n"},"license":{"kind":"string","value":"mit"},"hash":{"kind":"number","value":-5486037840757958000,"string":"-5,486,037,840,757,958,000"},"line_mean":{"kind":"number","value":30.8467153285,"string":"30.846715"},"line_max":{"kind":"number","value":75,"string":"75"},"alpha_frac":{"kind":"number","value":0.472610589,"string":"0.472611"},"autogenerated":{"kind":"bool","value":false,"string":"false"}}},{"rowIdx":109566,"cells":{"repo_name":{"kind":"string","value":"eliostvs/django-budget"},"path":{"kind":"string","value":"django-budget/dashboard/views.py"},"copies":{"kind":"string","value":"1"},"size":{"kind":"string","value":"1405"},"content":{"kind":"string","value":"from __future__ import unicode_literals\n\nfrom datetime import date, timedelta\nfrom decimal import InvalidOperation\n\nfrom django.contrib.auth.decorators import login_required\nfrom django.shortcuts import redirect, render\nfrom django.utils import timezone\n\nfrom budget.models import Budget\nfrom transaction.models import Transaction\n\n\n@login_required\ndef dashboard(request):\n try:\n budget = Budget.active.most_current_for_date(timezone.now())\n\n except Budget.DoesNotExist:\n return redirect('setup')\n\n latest_expenses = Transaction.expenses.get_latest()\n latest_incomes = Transaction.incomes.get_latest()\n\n now = timezone.now()\n start_date = date(now.year, now.month, 1)\n end_year, end_month = now.year, now.month + 1\n\n end_date = date(end_year, end_month + 1, 1) - timedelta(days=1)\n\n estimated_amount = budget.monthly_estimated_total()\n amount_used = budget.actual_total(start_date, end_date)\n\n try:\n progress_bar_percent = min(100, amount_used / estimated_amount * 100)\n\n except InvalidOperation:\n progress_bar_percent = 0\n\n ctx = {'budget': budget,\n 'estimated_amount': estimated_amount,\n 'amount_used': amount_used,\n 'latest_incomes': latest_incomes,\n 'latest_expenses': latest_expenses,\n 'progress_bar_percent': progress_bar_percent}\n\n return render(request, 'dashboard.html', ctx)\n"},"license":{"kind":"string","value":"mit"},"hash":{"kind":"number","value":777468964307954300,"string":"777,468,964,307,954,300"},"line_mean":{"kind":"number","value":28.8936170213,"string":"28.893617"},"line_max":{"kind":"number","value":77,"string":"77"},"alpha_frac":{"kind":"number","value":0.6925266904,"string":"0.692527"},"autogenerated":{"kind":"bool","value":false,"string":"false"}}},{"rowIdx":109567,"cells":{"repo_name":{"kind":"string","value":"ryanjoneil/docker-image-construction"},"path":{"kind":"string","value":"dicp/solvers/colgen_model_gurobi.py"},"copies":{"kind":"string","value":"1"},"size":{"kind":"string","value":"9388"},"content":{"kind":"string","value":"from collections import defaultdict\nfrom dicp.clique import Clique\nfrom gurobipy import GRB, Model, quicksum\nfrom itertools import combinations, product\nimport time\n\n\nclass ColgenModelGurobi(object):\n '''Column Generation model'''\n _slug = 'colgen-model-gurobi'\n\n def __init__(self, time=None):\n self.time = time # in minutes\n\n def slug(self):\n return ColgenModelGurobi._slug\n\n def solve(self, problem, saver):\n self.problem = problem\n\n # Starting cliques\n self.cliques = set()\n\n self.img_cmd_to_cliques = defaultdict(list)\n self.img_to_cliques = defaultdict(list) # cliques with >= 2 cmds\n for img, cmds in problem.images.items():\n for cmd in cmds:\n clique = Clique(problem, [img], [cmd])\n self.cliques.add(clique)\n self.img_cmd_to_cliques[img, cmd].append(clique)\n self.img_to_cliques[img].append(clique)\n\n for cmd, imgs in problem.images_by_command.items():\n if len(imgs) < 2:\n continue\n clique = Clique(problem, imgs, [cmd])\n self.cliques.add(clique)\n for img in imgs:\n self.img_cmd_to_cliques[img, cmd].append(clique)\n self.img_to_cliques[img].append(clique)\n\n # Initial set of intersections\n self.intersections = set()\n for c1, c2 in combinations(self.cliques, 2):\n self._test_intersection(c1, c2)\n\n for iteration in range(1000):\n print '[iteration %02d / %s]' % (iteration + 1, time.asctime())\n\n done = True\n self._master()\n for clique in self._subproblem():\n if clique is not None and clique not in self.cliques:\n print '[new clique] %s' % clique\n\n for c in self.cliques:\n self._test_intersection(clique, c)\n\n done = False\n self.cliques.add(clique)\n for img, cmd in product(clique.images, clique.commands):\n self.img_cmd_to_cliques[img, cmd].append(clique)\n for img in clique.images:\n self.img_to_cliques[img].append(clique)\n\n if done:\n solution = self._master(final=True)\n\n print '\\n[solution]'\n for clique in sorted(solution):\n if len(clique.images) > 1:\n print clique\n\n print '\\n[cliques]'\n for clique in sorted(self.cliques):\n if len(clique.images) > 1:\n print clique\n break\n print\n\n def _test_intersection(self, c1, c2):\n c1, c2 = tuple(sorted([c1, c2]))\n if (c1, c2) in self.intersections:\n return\n\n overlapping_images = bool(c1.images_set.intersection(c2.images_set))\n if not overlapping_images:\n return\n\n disjoint_images = bool(c1.images_set - c2.images_set and c2.images_set - c1.images_set)\n overlapping_commands = bool(c1.commands_set.intersection(c2.commands_set))\n\n if disjoint_images or (overlapping_images and overlapping_commands):\n self.intersections.add((c1, c2))\n\n def _master(self, final=False):\n self.img_cmd_duals = defaultdict(float)\n self.clique_inter_duals = defaultdict(float)\n\n model = Model()\n model.params.OutputFlag = False\n obj = []\n\n # x[i,c] = 1 if clique c is used\n x = {}\n for clique in self.cliques:\n if final:\n x[clique] = v = model.addVar(vtype=GRB.BINARY)\n else:\n x[clique] = v = model.addVar()\n obj.append(clique.cost * v)\n\n model.update()\n\n # Each image has to run each of its commands.\n img_cmd_constraints = {}\n for img, cmds in self.problem.images.items():\n for cmd in cmds:\n vlist = [x[c] for c in self.img_cmd_to_cliques[img, cmd]]\n if final:\n model.addConstr(quicksum(vlist) == 1)\n else:\n img_cmd_constraints[img, cmd] = model.addConstr(quicksum(vlist) >= 1)\n\n # Clique intersections\n clique_inter_constraints = {}\n for c1, c2 in self.intersections:\n clique_inter_constraints[c1, c2] = model.addConstr(x[c1] + x[c2] <= 1)\n\n model.setObjective(quicksum(obj), GRB.MINIMIZE)\n model.optimize()\n\n if final:\n print '\\n[final master obj: %.02f]' % model.objVal\n return [c for c in self.cliques if x[c].x > 0.5]\n\n else:\n header = ' | %s' % (' '.join('% 6s' % c for c in self.problem.commands))\n print '-' * len(header)\n print header\n print '-' * len(header)\n for img in self.problem.images:\n duals = []\n for cmd in self.problem.commands:\n try:\n extra = img_cmd_constraints[img, cmd].pi\n self.img_cmd_duals[img, cmd] = extra\n duals.append(round(extra, 1) or '')\n except KeyError:\n duals.append('')\n print '% 4s | %s' % (img, ' '.join('% 6s' % d for d in duals))\n print '-' * len(header)\n\n for (c1, c2), c in sorted(clique_inter_constraints.items()):\n if c.pi:\n print '[clique/inter dual] %s | %s = %.02f' % (c1, c2, c.pi)\n self.clique_inter_duals[c1, c2] = c.pi\n\n def _subproblem(self):\n cliques = []\n\n for (c1, c2), pi in self.clique_inter_duals.items():\n int_images = c1.images_set.intersection(c2.images_set)\n\n # Remove images in c1 not in c2\n z = pi + c1.cost\n for i in int_images:\n dual = sum(self.img_cmd_duals[i, cmd] for cmd in c1.commands)\n z -= dual\n\n if z < 0:\n cliques.append(Clique(self.problem, int_images, c1.commands))\n\n # Remove images in c2 not in c1\n z = pi + c2.cost\n for i in int_images:\n dual = sum(self.img_cmd_duals[i, cmd] for cmd in c2.commands)\n z -= dual\n\n if z < 0:\n cliques.append(Clique(self.problem, int_images, c2.commands))\n\n # Pare images off until they don't intersect anymore\n if len(c1.images) <= 2 or len(c2.images) <= 2:\n continue\n\n model = Model()\n model.params.OutputFlag = False\n\n # model.params.OutputFlag = False\n obj = [pi]\n\n p1 = model.addVar(vtype=GRB.BINARY)\n p2 = model.addVar(vtype=GRB.BINARY)\n q1 = {i: model.addVar(vtype=GRB.BINARY) for i in c1.images}\n q2 = {i: model.addVar(vtype=GRB.BINARY) for i in c2.images}\n r1 = {i: model.addVar(vtype=GRB.BINARY) for i in int_images}\n r2 = {i: model.addVar(vtype=GRB.BINARY) for i in int_images}\n\n model.update()\n\n obj.append(c1.cost * p1)\n obj.append(c2.cost * p2)\n\n for i in c1.images:\n dual = sum(self.img_cmd_duals[i, cmd] for cmd in c1.commands)\n obj.append(-dual * q1[i])\n\n for i in c2.images:\n dual = sum(self.img_cmd_duals[i, cmd] for cmd in c2.commands)\n obj.append(-dual * q2[i])\n\n for i in int_images:\n model.addConstr(p1 <= r1[i] + r2[i])\n model.addConstr(p2 <= r1[i] + r2[i])\n\n for i in c1.images:\n model.addConstr(q1[i] <= p1)\n if i in int_images:\n model.addConstr(q1[i] <= 1 - r1[i])\n\n for i in c2.images:\n model.addConstr(q2[i] <= p1)\n if i in int_images:\n model.addConstr(q2[i] <= 1 - r2[i])\n\n model.setObjective(sum(obj), GRB.MINIMIZE)\n model.optimize()\n\n if model.objVal >= 0:\n continue\n\n for c, v, r in [(c1, p1, r1), (c2, p2, r2)]:\n if v.x < 0.5:\n continue\n\n # Figure out what images are left in the clique\n rem_imgs = set(c.images)\n for i, riv in r.items():\n if riv.x > 0.5:\n rem_imgs.remove(i)\n\n if len(rem_imgs) > 1:\n cliques.append(Clique(self.problem, rem_imgs, c.commands))\n rem_imgs_1 = set(c1.images)\n rem_imgs_2 = set(c2.images)\n\n z = -c1.cost - c2.cost\n for i in int_images:\n dual1 = sum(self.img_cmd_duals[i, cmd] for cmd in c1.commands)\n dual2 = sum(self.img_cmd_duals[i, cmd] for cmd in c2.commands)\n if dual1 > dual2:\n rem_imgs_1.remove(i)\n z += dual1\n else:\n rem_imgs_2.remove(i)\n z += dual2\n\n if z > 0:\n cliques.append(Clique(self.problem, rem_imgs_1, c1.commands))\n cliques.append(Clique(self.problem, rem_imgs_2, c2.commands))\n\n return cliques\n"},"license":{"kind":"string","value":"mit"},"hash":{"kind":"number","value":5374028787520214000,"string":"5,374,028,787,520,214,000"},"line_mean":{"kind":"number","value":34.6958174905,"string":"34.695817"},"line_max":{"kind":"number","value":95,"string":"95"},"alpha_frac":{"kind":"number","value":0.4968044312,"string":"0.496804"},"autogenerated":{"kind":"bool","value":false,"string":"false"}}},{"rowIdx":109568,"cells":{"repo_name":{"kind":"string","value":"akvo/akvo-rsr"},"path":{"kind":"string","value":"akvo/rsr/models/result/indicator_period_data_comment.py"},"copies":{"kind":"string","value":"1"},"size":{"kind":"string","value":"1260"},"content":{"kind":"string","value":"# -*- coding: utf-8 -*-\n\n# Akvo RSR is covered by the GNU Affero General Public License.\n# See more details in the license.txt file located at the root folder of the Akvo RSR module.\n# For additional details on the GNU license please see < http://www.gnu.org/licenses/agpl.html >.\n\nfrom .indicator_period_data import IndicatorPeriodData\n\nfrom akvo.rsr.fields import ValidXMLTextField\nfrom akvo.rsr.mixins import TimestampsMixin\n\nfrom django.conf import settings\nfrom django.db import models\nfrom django.utils.translation import ugettext_lazy as _\n\n\nclass IndicatorPeriodDataComment(TimestampsMixin, models.Model):\n \"\"\"\n Model for adding comments to data of an indicator period.\n \"\"\"\n project_relation = 'results__indicators__periods__data__comments__in'\n\n data = models.ForeignKey(IndicatorPeriodData, verbose_name=_('indicator period data'),\n related_name='comments')\n user = models.ForeignKey(settings.AUTH_USER_MODEL, verbose_name=_('user'), db_index=True)\n comment = ValidXMLTextField(_('comment'), blank=True)\n\n class Meta:\n app_label = 'rsr'\n verbose_name = _('indicator period data comment')\n verbose_name_plural = _('indicator period data comments')\n ordering = ('-id', )\n"},"license":{"kind":"string","value":"agpl-3.0"},"hash":{"kind":"number","value":1878097736667727400,"string":"1,878,097,736,667,727,400"},"line_mean":{"kind":"number","value":38.375,"string":"38.375"},"line_max":{"kind":"number","value":97,"string":"97"},"alpha_frac":{"kind":"number","value":0.7087301587,"string":"0.70873"},"autogenerated":{"kind":"bool","value":false,"string":"false"}}},{"rowIdx":109569,"cells":{"repo_name":{"kind":"string","value":"reimandlab/Visualistion-Framework-for-Genome-Mutations"},"path":{"kind":"string","value":"website/imports/sites/site_mapper.py"},"copies":{"kind":"string","value":"1"},"size":{"kind":"string","value":"8076"},"content":{"kind":"string","value":"import logging\nimport re\nfrom typing import List\nfrom warnings import warn\n\nfrom pandas import DataFrame\nfrom tqdm import tqdm\n\nfrom database import create_key_model_dict\nfrom models import Protein, Gene\n\n\nlogger = logging.getLogger(__name__)\n\n\ndef find_all(longer_string: str, sub_string: str):\n \"\"\"Returns positions of all overlapping matches.\n\n Allowed alphabet excludes '^' and '$' characters.\n\n If sub_string starts with '^' or ends with '$'\n an exact match (at front or at the end) will\n be performed.\n \"\"\"\n if sub_string.startswith('^'):\n # there can be only one match\n # (or otherwise we would be matching with\n # less precision than declared earlier)\n return [0] if longer_string.startswith(sub_string[1:]) else []\n if sub_string.endswith('$'):\n return [len(longer_string) - len(sub_string) + 1] if longer_string.endswith(sub_string[:-1]) else []\n\n position = -1\n matches = []\n\n while True:\n position = longer_string.find(sub_string, position + 1)\n\n if position == -1:\n return matches\n\n matches.append(position)\n\n\ndef find_all_regex(longer_string: str, sub_string: str):\n \"\"\"Should have the same effect as `find_all`,\n\n but as it adds an overhead of creating match objects\n and has to supports a lot of additional features, is\n probably match slower than `find_all`\n \"\"\"\n\n if not (sub_string.startswith('^') or sub_string.endswith('$')):\n # positive lookahead to enable detection of overlapping matches\n sub_string = '(?=' + sub_string + ')'\n\n return [\n match.start()\n for match in re.finditer(sub_string, longer_string)\n ]\n\n\nclass OneBasedPosition(int):\n pass\n\n\nclass SiteMapper:\n\n def __init__(self, proteins, repr_site):\n self.proteins = proteins\n self.repr_site = repr_site\n self.genes = create_key_model_dict(Gene, 'name')\n self.has_gene_names = None\n self.already_warned = None\n\n def map_sites_by_sequence(self, sites: DataFrame) -> DataFrame:\n \"\"\"Given a site with an isoform it should occur in,\n verify if the site really appears on the given position\n in this isoform and find where in all other isoforms\n this site appears (by exact match of a sequence span,\n typically one 15 amino acids long: site position +/-7 aa).\n\n If a site does not appear on declared position in the\n original isoform, emit a warning and try to find the\n correct position (to overcome a potential sequence shift\n which might be a result of different sequence versions).\n\n If there is no isoform with given refseq and there is\n a gene column in provided sites DataFrame, all isoforms\n of this gene will be used for mapping.\n\n Args:\n sites: data frame with sites, having (at least) following columns:\n 'sequence', 'position', 'refseq', 'residue', 'left_sequence_offset'\n\n Returns:\n Data frame of sites mapped to isoforms in database,\n including the sites in isoforms provided on input,\n if those has been confirmed or adjusted.\n \"\"\"\n print('Mapping sites to isoforms')\n\n mapped_cnt = 0\n mapped_sites = []\n self.already_warned = set()\n self.has_gene_names = 'gene' in sites.columns\n\n for site in tqdm(sites.itertuples(index=False), total=len(sites)):\n\n was_mapped = False\n protein = None\n positions = {}\n\n isoforms_to_map = self.choose_isoforms_to_map(site)\n\n # find matches\n for isoform in isoforms_to_map:\n positions[isoform] = self.map_site_to_isoform(site, isoform)\n\n if protein:\n matches = positions[protein]\n self.collate_matches_with_expectations(matches, site)\n\n # create rows with sites\n for isoform, matched_positions in positions.items():\n\n for position in matched_positions:\n\n # _replace() returns new namedtuple with replaced values;\n # it is not protected but hidden (to allow 'replace' field)\n new_site = site._replace(\n refseq=isoform.refseq,\n position=position\n )\n mapped_sites.append(new_site)\n was_mapped = True\n\n if was_mapped:\n mapped_cnt += 1\n\n print(\n f'Successfully mapped {mapped_cnt} '\n f'({mapped_cnt / len(sites) * 100}%) sites'\n )\n\n return DataFrame(mapped_sites)\n\n def map_site_to_isoform(self, site, isoform: Protein) -> List[OneBasedPosition]:\n \"\"\"Finds all occurrences of a site (by exact sequence match)\n in provided sequence of an alternative isoform.\n\n Original position of the site is used to highlight \"suspicious\" cases,\n in which the matched site is far away (>50/90% of isoform length) from\n the one of the original site. This is based on premise that most of\n alternative isoform should not differ so much.\n\n Returned positions are 1-based\n \"\"\"\n matches = [\n m + 1 + site.left_sequence_offset\n # asterisks (*) representing stop codon are removed for the time of mapping\n # so expression like 'SOMECTERMINALSEQUENCE$' can be easily matched\n for m in find_all(isoform.sequence.rstrip('*'), site.sequence)\n ]\n\n if len(matches) > 1:\n warn(f'More than one match for: {self.repr_site(site)}')\n\n if matches:\n biggest_distance = max(abs(position - site.position) for position in matches)\n\n if biggest_distance > len(isoform.sequence) / 2:\n positions = \", \".join([str(m) for m in matches])\n\n if biggest_distance > len(isoform.sequence) * 9 / 10:\n inform = warn\n else:\n inform = logger.info\n\n inform(\n f'This site {self.repr_site(site)} was found on position(s): '\n f'{positions}; some are quite far away from the '\n f'position in original isoform: {site.position}.'\n )\n\n return matches\n\n def choose_isoforms_to_map(self, site):\n protein = None\n\n if site.refseq not in self.proteins:\n if self.has_gene_names and site.gene in self.genes:\n gene = self.genes[site.gene]\n logger.info(\n f'Using {gene} to map {self.repr_site(site)} (not using '\n f'{site.refseq}, as this sequence is not available).'\n )\n else:\n if site.refseq not in self.already_warned:\n warn(\n f'No protein with {site.refseq} '\n + (f'and no gene named {site.gene} ' if self.has_gene_names else '') +\n f'(first encountered for {self.repr_site(site)}).'\n )\n self.already_warned.add(site.refseq)\n return []\n else:\n protein = self.proteins[site.refseq]\n gene = protein.gene\n\n if gene and gene.isoforms:\n return {self.proteins[isoform.refseq] for isoform in gene.isoforms}\n elif protein:\n return {protein}\n return []\n\n def collate_matches_with_expectations(self, original_isoform_matches, site):\n\n if not original_isoform_matches:\n warn(f'The site: {self.repr_site(site)} was not found in {site.refseq}, '\n f'though it should appear in this isoform according to provided sites data.')\n elif all(match_pos != site.position for match_pos in original_isoform_matches):\n warn(f'The site: {self.repr_site(site)} does not appear on the exact given position in '\n f'{site.refseq} isoform, though it was re-mapped to: {original_isoform_matches}.')\n"},"license":{"kind":"string","value":"lgpl-2.1"},"hash":{"kind":"number","value":-1987020756523349200,"string":"-1,987,020,756,523,349,200"},"line_mean":{"kind":"number","value":35.0535714286,"string":"35.053571"},"line_max":{"kind":"number","value":108,"string":"108"},"alpha_frac":{"kind":"number","value":0.5865527489,"string":"0.586553"},"autogenerated":{"kind":"bool","value":false,"string":"false"}}},{"rowIdx":109570,"cells":{"repo_name":{"kind":"string","value":"lconceicao/son-security-pilot"},"path":{"kind":"string","value":"fsm/tor-config/tor/tor.py"},"copies":{"kind":"string","value":"1"},"size":{"kind":"string","value":"6736"},"content":{"kind":"string","value":"\"\"\"\nCopyright (c) 2015 SONATA-NFV\nALL RIGHTS RESERVED.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n\nNeither the name of the SONATA-NFV [, ANY ADDITIONAL AFFILIATION]\nnor the names of its contributors may be used to endorse or promote\nproducts derived from this software without specific prior written\npermission.\n\nThis work has been performed in the framework of the SONATA project,\nfunded by the European Commission under Grant number 671517 through\nthe Horizon 2020 and 5G-PPP programmes. The authors would like to\nacknowledge the contributions of their colleagues of the SONATA\npartner consortium (www.sonata-nfv.eu).\n\"\"\"\n\nimport logging\nimport yaml\nimport os\nimport sys\nfrom collections import namedtuple\nfrom ansible.parsing.dataloader import DataLoader\nfrom ansible.vars import VariableManager\nfrom ansible.inventory import Inventory\nfrom ansible.executor.playbook_executor import PlaybookExecutor\nfrom sonsmbase.smbase import sonSMbase\n\nlogging.basicConfig(level=logging.INFO)\nLOG = logging.getLogger(\"fsm-tor-config-1\")\nLOG.setLevel(logging.DEBUG)\nlogging.getLogger(\"son-mano-base:messaging\").setLevel(logging.INFO)\n\n\nclass TORConfigurationFSM(sonSMbase):\n\n def __init__(self):\n\n self.specific_manager_type = 'fsm'\n self.service_name = 'psa-service' # TODO: match nsd name\n self.function_name = 'tor-vnf'\n self.specific_manager_name = 'tor-config'\n self.id_number = '1'\n self.version = 'v0.1'\n self.description = \"FSM for configuring the TOR VNF\"\n\n self.amqp_topic = ('son.' + self.specific_manager_name +\n self.id_number + self.version)\n\n super(self.__class__, self).__init__(\n specific_manager_type=self.specific_manager_type,\n service_name=self.service_name,\n function_name=self.function_name,\n specific_manager_name=self.specific_manager_name,\n id_number=self.id_number,\n version=self.version,\n description=self.description)\n\n def on_registration_ok(self):\n\n LOG.debug(\"Received registration ok event.\")\n\n # send the status to the SMR (not necessary)\n self.manoconn.publish(topic='specific.manager.registry.ssm.status',\n message=yaml.dump(\n {'name': self.specific_manager_id,\n 'status': 'Registration is done, '\n 'initialising the configuration...'}))\n\n # subscribes to related topic (could be any other topic)\n self.manoconn.subscribe(self.on_configuration,\n topic=self.amqp_topic)\n\n def on_configuration(self, ch, method, props, response):\n\n if props.app_id != self.specific_manager_id:\n LOG.info('Start retrieving the IP address ...')\n response = yaml.load(str(response))\n list = response['VNFR']\n mgmt_ip = None\n vm_image = 'http://files.sonata-nfv.eu/son-psa-pilot/tor-vnf/' \\\n 'sonata-tor.qcow2'\n\n for x in range(len(list)):\n if (response['VNFR'][x]['virtual_deployment_units']\n [0]['vm_image']) == vm_image:\n mgmt_ip = (response['VNFR'][x]['virtual_deployment_units']\n [0]['vnfc_instance'][0]['connection_points'][0]\n ['type']['address'])\n\n if not mgmt_ip:\n LOG.error(\"Couldn't obtain IP address from VNFR\")\n return\n\n # send the status to the SMR (not necessary)\n self.manoconn.publish(\n topic='specific.manager.registry.ssm.status',\n message=yaml.dump(\n {'name': self.specific_manager_id,\n 'status': \"IP address:'{0}'\".format(mgmt_ip)}))\n\n LOG.info(\"IP address:'{0}'\".format(mgmt_ip))\n\n self.manoconn.notify(topic=self.amqp_topic,\n msg=yaml.dump(\n {'name': self.specific_manager_id,\n 'IP': mgmt_ip}))\n\n # configure vm using ansible playbook\n variable_manager = VariableManager()\n loader = DataLoader()\n\n inventory = Inventory(loader=loader,\n variable_manager=variable_manager)\n\n playbook_path = 'fsm/tor-config/ansible/site.yml'\n\n if not os.path.exists(playbook_path):\n LOG.error('The playbook does not exist')\n return\n\n Options = namedtuple('Options',\n ['listtags', 'listtasks', 'listhosts',\n 'syntax', 'connection', 'module_path',\n 'forks', 'remote_user', 'private_key_file',\n 'ssh_common_args', 'ssh_extra_args',\n 'sftp_extra_args', 'scp_extra_args',\n 'become', 'become_method', 'become_user',\n 'verbosity', 'check'])\n options = Options(listtags=False, listtasks=False, listhosts=False,\n syntax=False, connection='ssh', module_path=None,\n forks=100, remote_user='slotlocker',\n private_key_file=None, ssh_common_args=None,\n ssh_extra_args=None, sftp_extra_args=None,\n scp_extra_args=None, become=True,\n become_method=None, become_user='root',\n verbosity=None, check=False)\n\n variable_manager.extra_vars = {'hosts': mgmt_ip}\n\n passwords = {}\n\n pbex = PlaybookExecutor(playbooks=[playbook_path],\n inventory=inventory,\n variable_manager=variable_manager,\n loader=loader, options=options,\n passwords=passwords)\n\n results = pbex.run()\n\n return\n\ndef main():\n TORConfigurationFSM()\n\nif __name__ == '__main__':\n main()\n"},"license":{"kind":"string","value":"apache-2.0"},"hash":{"kind":"number","value":1664009224081678800,"string":"1,664,009,224,081,678,800"},"line_mean":{"kind":"number","value":39.0952380952,"string":"39.095238"},"line_max":{"kind":"number","value":79,"string":"79"},"alpha_frac":{"kind":"number","value":0.5645783848,"string":"0.564578"},"autogenerated":{"kind":"bool","value":false,"string":"false"}}},{"rowIdx":109571,"cells":{"repo_name":{"kind":"string","value":"JuBra/GEMEditor"},"path":{"kind":"string","value":"GEMEditor/base/ui/EmptyDialogHorzButtons.py"},"copies":{"kind":"string","value":"1"},"size":{"kind":"string","value":"1289"},"content":{"kind":"string","value":"# -*- coding: utf-8 -*-\n\n# Form implementation generated from reading ui file '.\\EmptyDialogHorzButtons.ui'\n#\n# Created by: PyQt5 UI code generator 5.8.2\n#\n# WARNING! All changes made in this file will be lost!\n\nfrom PyQt5 import QtCore, QtGui, QtWidgets\n\nclass Ui_EmptyDialogHorzButtons(object):\n def setupUi(self, EmptyDialogHorzButtons):\n EmptyDialogHorzButtons.setObjectName(\"EmptyDialogHorzButtons\")\n EmptyDialogHorzButtons.resize(212, 70)\n self.buttonBox = QtWidgets.QDialogButtonBox(EmptyDialogHorzButtons)\n self.buttonBox.setGeometry(QtCore.QRect(20, 20, 171, 32))\n self.buttonBox.setOrientation(QtCore.Qt.Horizontal)\n self.buttonBox.setStandardButtons(QtWidgets.QDialogButtonBox.Cancel|QtWidgets.QDialogButtonBox.Ok)\n self.buttonBox.setObjectName(\"buttonBox\")\n\n self.retranslateUi(EmptyDialogHorzButtons)\n self.buttonBox.accepted.connect(EmptyDialogHorzButtons.accept)\n self.buttonBox.rejected.connect(EmptyDialogHorzButtons.reject)\n QtCore.QMetaObject.connectSlotsByName(EmptyDialogHorzButtons)\n\n def retranslateUi(self, EmptyDialogHorzButtons):\n _translate = QtCore.QCoreApplication.translate\n EmptyDialogHorzButtons.setWindowTitle(_translate(\"EmptyDialogHorzButtons\", \"Dialog\"))\n\n"},"license":{"kind":"string","value":"gpl-3.0"},"hash":{"kind":"number","value":-3072252802214121000,"string":"-3,072,252,802,214,121,000"},"line_mean":{"kind":"number","value":43.4482758621,"string":"43.448276"},"line_max":{"kind":"number","value":106,"string":"106"},"alpha_frac":{"kind":"number","value":0.7626066718,"string":"0.762607"},"autogenerated":{"kind":"bool","value":false,"string":"false"}}},{"rowIdx":109572,"cells":{"repo_name":{"kind":"string","value":"zkbt/exopop"},"path":{"kind":"string","value":"exoatlas/populations/Population.py"},"copies":{"kind":"string","value":"1"},"size":{"kind":"string","value":"50046"},"content":{"kind":"string","value":"# general class for exoplanet populations\nfrom ..imports import *\nfrom ..telescopes import *\nfrom ..models import *\n\nimport string\n\nbasic_columns = [\n'name',\n'hostname',\n'ra', 'dec',\n'distance',\n'discoverer']\n\ntransit_columns = [\n'period',\n'semimajoraxis',\n'e', 'omega', 'inclination',\n'transit_epoch',\n'transit_duration',\n'transit_depth',\n'stellar_teff',\n'stellar_mass',\n'stellar_radius',\n'radius',\n'mass',\n'transit_ar',\n'transit_b']\n\ncalculated_columns = [\n'a_over_rs',\n'b',\n'insolation',\n'relative_insolation',\n'log_relative_insolation',\n'teq',\n'planet_luminosity',\n'density',\n'surface_gravity',\n'distance_modulus',\n'escape_velocity',\n'escape_parameter',\n'angular_separation',\n'imaging_contrast',\n'stellar_luminosity',\n]\n\n\ntable_columns = basic_columns + transit_columns\nattribute_columns = table_columns + calculated_columns\n\n\nmethod_columns = ['scale_height',\n 'transmission_signal', 'transmission_snr',\n 'emission_signal', 'emission_snr',\n 'reflection_signal', 'reflection_snr',\n 'stellar_brightness',\n 'stellar_brightness_in_telescope_units',\n 'depth_uncertainty']\n\ndesired_columns = [\n'mass_uncertainty_upper',\n'mass_uncertainty_lower',\n'radius_uncertainty_upper',\n'radius_uncertainty_lower',\n'distance_uncertainty_upper',\n'distance_uncertainty_lower']\n\n# these are keywords that can be set for a population\ndefault_plotkw = dict(color='black',\n alpha=1,\n zorder=0,\n marker='o',\n linewidth=1,\n respond_to_color=True,\n respond_to_size=True,\n exact=False,\n label_planets=False,\n filled=True,\n outlined=False)\n\n# what keywords can we set for the population plotkw?\nallowed_plotkw = list(default_plotkw.keys())\nallowed_plotkw += ['s',\n 'c',\n 'cmap',\n 'norm',\n 'vmin',\n 'vmax'\n 'outlined',\n 'filled']\n\n\n\nclass Population(Talker):\n '''\n Create a population from a standardized table.\n '''\n\n #kludge?\n _pithy = True\n def __init__(self, standard, label='unknown', verbose=False, **plotkw):\n '''\n Initialize a Population of exoplanets from a standardized table.\n\n Parameters\n ----------\n standard : astropy.table.Table\n A table that contains all the necessary columns.\n **plotkw : dict\n All other keyword arguments wil\n '''\n\n # a standardized table with a minimum set of columns we can expect\n self.standard = Table(standard)\n\n # store a label for this population\n self.label = label\n\n # keywords to use for plotting\n self.plotkw = plotkw\n\n self._pithy = verbose == False\n # define some cleaned names and hostnames, for indexing\n try:\n self.standard['tidyname']\n except KeyError:\n self.standard['tidyname'] = [clean(x).lower() for x in self.standard['name']]\n\n try:\n self.standard['tidyhostname']\n except KeyError:\n self.standard['tidyhostname'] = [clean(x).lower() for x in self.standard['hostname']]\n\n # make sure the table is searchable via names\n self.standard.add_index('tidyname')\n self.standard.add_index('tidyhostname')\n\n def sort(self, x, reverse=False):\n '''\n Sort this population by some key or attribute.\n '''\n\n to_sort = getattr(self, x)\n i = np.argsort(to_sort)\n if reverse:\n i = i[::-1]\n\n self.standard = self.standard[i]\n return self\n\n def __add__(self, other):\n '''\n Create a new population by adding two together:\n\n `bigger = this + other`\n\n Parameters\n ----------\n other : Population\n The population to be tacked onto this one.\n\n Returns\n -------\n bigger : Population\n A new population, consisting of all the planets\n in `this` population and some extra ones added\n from `other`.\n\n '''\n\n # skip any warnings that pop up\n with warnings.catch_warnings():\n warnings.simplefilter('ignore')\n\n # create a new table, joining both together\n table = join(self.standard,\n other.standard,\n join_type='outer')\n\n # create an informative label\n label = f'{self.label} + {other.label}'\n\n # create and return the new population\n return Population(table, label=label)\n\n def remove_by_key(self, other, key='tidyname'):\n '''\n Create a new population by removing some rows from here:\n\n `smaller = this - other`\n\n Parameters\n ----------\n other : Population\n The population of planets to be removed from\n `this` population to create a new `smaller` one.\n\n Returns\n -------\n smaller : Population\n A subset of `this` population, where some rows\n have been removed.\n '''\n\n # skip any warnings that pop up\n with warnings.catch_warnings():\n warnings.simplefilter('ignore')\n\n # create a new table, joining both together\n table = setdiff(self.standard,\n other.standard,\n keys=key)\n\n # create an informative label\n label = f'{self.label} - {other.label}'\n\n # create and return the new population\n return Population(table, label=label)\n\n\n\n def __sub__(self, other):\n '''\n Create a new population by removing some rows from here:\n\n `smaller = this - other`\n\n Parameters\n ----------\n other : Population\n The population of planets to be removed from\n `this` population to create a new `smaller` one.\n\n Returns\n -------\n smaller : Population\n A subset of `this` population, where some rows\n have been removed.\n '''\n return self.remove_by_key(other)\n\n\n def __getitem__(self, key):\n '''\n Create a subpopulation of planets by indexing, slicing, or masking.\n '''\n # FIXME -- maybe make it easier to pull out intermediate masks?\n\n try:\n # if the key is an index/slice/mask, return it\n if self.label is None:\n label = None\n else:\n label = f'Subset of {self.label}'\n subset = Population(standard=self.standard[key],\n label=label,\n **self.plotkw)\n\n # if the key is a column, raise an error\n if type(key) in self.standard.colnames:\n raise IndexError(f'''\n You seem to be trying to access a column from this\n population via `pop[{key}]`. For clarity, all `[]`\n indexing is reserved for selecting subsets of the\n population.\n\n To access your particular column, please try either\n `pop.{key}` or `pop.standard[{key}]` to return a\n 1D array of the entries in that column.\n ''')\n except KeyError:\n\n # use a string or a list of strings make a subset by planet name\n # FIXME - maybe we should make this say more when it's making a sneaky choice for us?\n try:\n subset = self.create_subset_by_name(key)\n except KeyError:\n subset = self.create_subset_by_hostname(key)\n\n return subset\n\n\n def create_subset_by_name(self, key):\n '''\n Extract a subset of this population,\n based on one or more planet names.\n\n Parameters\n ----------\n key : strings, list of strings\n The name of a planet (\"GJ1132b\")\n or a list of planet names.\n\n (All names will be stripped of\n special characters and converted\n to lower case before indexing.)\n\n Returns\n -------\n subset : Population\n A new population containing\n some subset of the original.\n '''\n\n # use a (list of) string(s) to index population by name\n if isinstance(key, str):\n # is it just one name?\n key = clean(key).lower()\n elif isinstance(key[0], str):\n # is it a list of names?\n key = [clean(k).lower() for k in key]\n\n # pull out rows by planet name\n subset = self.standard.loc['tidyname', key]\n\n # create a useful label for the population\n if isinstance(key, str):\n label = key\n elif isinstance(key[0], str):\n label = '+'.join(key)\n\n # create that new sub-population\n return Population(standard=subset,\n label=label,\n **self.plotkw)\n\n\n def create_subset_by_hostname(self, key):\n '''\n Extract a subset of this population,\n based on one or more planet hostnames.\n\n Parameters\n ----------\n key : strings, list of strings\n The hostname of a planet (\"GJ1132\")\n or a list of planet hostnames.\n\n (All names will be stripped of\n special characters and converted\n to lower case before indexing.)\n\n Returns\n -------\n subset : Population\n A new population containing\n some subset of the original.\n '''\n\n\n # use a string or a list of strings to index the population by name\n if isinstance(key, str):\n # is it just one name?\n key = clean(key).lower()\n elif isinstance(key[0], str):\n # is it a list of names?\n key = [clean(k).lower() for k in key]\n\n # pull out rows by planet name\n subset = self.standard.loc['tidyhostname', key]\n\n # create a useful label for the population\n if isinstance(key, str):\n label = key\n elif isinstance(key[0], str):\n label = '+'.join(key)\n\n # create that new sub-population\n return Population(standard=subset,\n label=label,\n **self.plotkw)\n\n def create_subset_by_position(self,\n coordinates,\n radius=1*u.arcmin,\n use_proper_motions=False,\n return_indices=False):\n '''\n Extract a subset of this population,\n by performing a spatial cross-match by\n RA and Dec. This will return all plannets\n from this population that fall within\n the specified radius of at least one of\n the specified coordinates.\n\n Parameters\n ----------\n coordinates : astropy.coordinates.SkyCoord\n The sky coordinate (RA, Dec) or list\n of coordinates we want to search for\n nearby objects.\n\n radius : astropy.units.Quantity\n The angular radius around each position\n that we should include in each search.\n\n use_proper_motions : bool\n Should we use available proper motions,\n embedded in the skycoords, to propagate\n positions to a shared epoch before\n cross-matching? Alas, this ability\n is *not yet implemented*. FIXME!\n\n return_indices : bool\n Should we also return the indices\n of the original coordinates that\n were matched to existing positions?\n\n Returns\n -------\n subset : Population\n A new population containing a subset\n of the original, including *all* planets\n that fall within the 2D sky search space.\n\n '''\n\n if use_proper_motions:\n raise NotImplementedError('No cross-matching with proper motions yet :-(')\n\n # create astropy coordinates for this population\n population_coords = SkyCoord(ra=self.ra, dec=self.dec)\n\n # do a spatial cross match on the sky\n # (idx gives the index into coordinates,\n # each corresponding to an entry in population_coords)\n idx, d2d, d3d = population_coords.match_to_catalog_sky(coordinates)\n\n # identify which systems are actually close on the sky\n match = d2d < radius\n\n # create new populations that are linked by spatial position\n i_match = match.nonzero()[0]\n #matched_coordinates = coordinates[idx[i_match]]\n subset = self.standard[i_match]\n\n # define a meaningful label\n label = f'Spatial Cross-Match ({len(coordinates)} positions, {radius} radius)'\n\n # create that new sub-population\n new_population = Population(standard=subset,\n label=label,\n **self.plotkw)\n\n # choose what to return\n if return_indices:\n i_from_original_coordinates = idx[i_match]\n return new_population, i_from_original_coordinates\n else:\n return new_population\n\n def __getattr__(self, key):\n '''\n If an attribute/method isn't defined for a population,\n look for it as a column of the standardized table.\n\n For example, `population.stellar_radius` will try to\n access `population.standard['stellar_radius']`.\n\n Parameters\n ----------\n key : str\n The attribute we're trying to get.\n '''\n if key == 'label':\n raise RuntimeError('Yikes!')\n try:\n # extract the column from the standardized table\n try:\n # try to return the array of quantities (with units)\n return self.standard[key].quantity\n except TypeError:\n # columns without units don't have quantities\n return self.standard[key].data\n except KeyError:\n # try to get a plotkw from this pop, from the plotting defaults, from None\n try:\n assert(key in allowed_plotkw)\n return self.plotkw.get(key, default_plotkw[key])\n except (AssertionError, KeyError):\n raise AttributeError(f\"\"\"\n Alas, there seems to be no way to find `.{key}`\n as an attribute or propetry of {self}.\n \"\"\") #AtlasError\n\n def __setattr__(self, key, value):\n '''\n Define what happens when we try to set an attribute via `pop.attr = x`.\n If the keyword is a pre-defined \"plotting\" keyword in `allowed_plotkw`,\n then we should save it in a special `plotkw` dictionary. Otherwise,\n the attribute should be set as normal.\n\n Parameters\n ----------\n key : str\n The attribute we're trying to set.\n value : anything\n The value we're trying to give that attribute.\n '''\n\n if key in allowed_plotkw:\n # store plotting keywords in a separate plotting dictionary\n self.plotkw[key] = value\n else:\n # otherwise, store attributes as normal for objects\n self.__dict__[key] = value\n\n def __repr__(self):\n '''\n How should this object appear as a repr/str?\n '''\n return f'<{self.label} | population of {self.n} planets>'\n\n def uncertainty(self, key):\n '''\n Return an array of symmetric uncertainties on a column.\n\n Parameters\n ----------\n key : str\n The column for which we want errors.\n '''\n\n # first try for an `uncertainty_{key}` column\n try:\n return self.__getattr__(f'{key}_uncertainty')\n except (KeyError, AssertionError, AtlasError, AttributeError): # is including AttributeError a kludge?\n # this can be removed after debugging\n self.speak(f'no symmetric uncertainties found for \"{key}\"')\n\n # then try for crudely averaging asymmetric uncertainties\n try:\n lower = self.__getattr__(f'{key}_uncertainty_lower')\n upper = self.__getattr__(f'{key}_uncertainty_upper')\n avg = 0.5*(np.abs(lower) + np.abs(upper))\n return avg\n except (KeyError, AssertionError, AtlasError, AttributeError):\n # this can be removed after debugging\n self.speak(f'no asymmetric uncertainties found for \"{key}\"')\n\n # then give up and return nans\n return np.nan*self.standard[key]\n\n def uncertainty_lowerupper(self, key):\n '''\n Return two arrays of lower and upper uncertainties on a column.\n\n Parameters\n ----------\n key : str\n The column for which we want errors.\n\n Returns\n -------\n lower : np.array\n The magnitude of the lower uncertainties (x_{-lower}^{+upper})\n upper : np.array\n The magnitude of the upper uncertainties (x_{-lower}^{+upper})\n '''\n\n # first try for actual asymmetric uncertainties\n try:\n lower = self.__getattr__(f'{key}_uncertainty_lower')\n upper = self.__getattr__(f'{key}_uncertainty_upper')\n return np.abs(lower), np.abs(upper)\n except (KeyError, AssertionError, AttributeError):\n # this can be removed after debugging\n self.speak(f'no asymmetric uncertainties found for \"{key}\"')\n\n # first try for an `uncertainty_{key}` column\n try:\n sym = self.__getattr__(f'{key}_uncertainty')\n return np.abs(sym), np.abs(sym)\n except (KeyError, AssertionError, AttributeError):\n # this can be removed after debugging\n self.speak(f'no symmetric uncertainties found for \"{key}\"')\n\n # then give up and return nans\n unc = np.nan*self.__getattr__(key)\n return unc, unc\n\n def single(self, name):\n '''\n Create a subpopulation of a single planet.\n '''\n\n # create a subset of the standardized table\n subset = self.standard.loc[name]\n\n # create a new object, from this subset\n return Population(standard=subset, label=name, **self.plotkw)\n\n def validate_columns(self):\n '''\n Make sure this standardized table has all the necessary columns.\n Summarize the amount of good data in each.\n '''\n\n N = len(self.standard)\n for k in table_columns:\n try:\n n = sum(self.standard[k].mask == False)\n except AttributeError:\n try:\n n = sum(np.isfinite(self.standard[k]))\n except TypeError:\n n = sum(self.standard[k] != '')\n self.speak(f'{k:>25} | {n:4}/{N} rows = {n/N:4.0%} are not empty')\n\n def find(self, name):\n '''\n Return index of a particular planet in the population.\n\n ??? = maybe this could/should be replaced with some table cleverness?\n '''\n\n return np.array([clean(name) in clean(x) for x in self.name]).nonzero()[0]\n\n def update_planet(self, planet_name, **kwargs):\n '''\n Correct the properties of a particular planet,\n modifying its values in the standardized table.\n\n Parameters\n ----------\n planet_name : str\n The name of the planet to fix.\n **kwargs : dict\n Keyword arguments will go into modifying\n the properties of that planet.\n '''\n\n # find the entry to replace\n match = self.find(planet_name)\n if len(match) != 1:\n self.speak(f'failed when trying to modify parameters for {planet_name}')\n return\n else:\n match = match[0]\n \n # loop over the keys, modifying each\n self.speak(f'for planet \"{planet_name}\"')\n for k, new in kwargs.items():\n old = self.standard[k][match]\n self.speak(f' {k} changed from {old} to {new}')\n self.standard[k][match] = new\n if k == 'name':\n self.standard['tidyname'][match] = clean(new).lower()\n if k == 'hostname':\n self.standard['tidyhostname'][match] = clean(new).lower()\n\n def removeRows(self, indices):\n\n raise NotImplementedError('''\n The `removeRows` method has been removed. Please use something like\n `population[0:42]` or `population[ok]` to use slices, indices, or masks\n to create new sub-populations that extract subsets from this one.\n ''')\n\n @property\n def n(self):\n '''\n How many planets are in this population?\n '''\n return len(self.standard)\n\n def __len__(self):\n '''\n How many planets are in this population?\n '''\n return len(self.standard)\n\n\n\n\n @property\n def semimajor_axis(self):\n '''\n Have a safe way to calculate the semimajor axis of planets,\n that fills in gaps as necessary. Basic strategy:\n\n First from table.\n Then from NVK3L.\n Then from a/R*.\n\n '''\n\n # pull out the actual values from the table\n a = self.standard['semimajoraxis'].copy().quantity\n\n # try to replace bad ones with NVK3L\n bad = np.isfinite(a) == False\n self.speak(f'{sum(bad)}/{self.n} semimajoraxes are missing')\n\n # calculate from the period and the stellar mass\n P = self.period[bad]\n M = self.stellar_mass[bad]\n G = con.G\n a[bad] = ((G*M*P**2/4/np.pi**2)**(1/3)).to('AU')\n\n # replace those that are still bad with the a/R*\n stillbad = np.isfinite(a) == False\n self.speak(f'{sum(stillbad)}/{self.n} are still missing after NVK3L')\n # (pull from table to avoid potential for recursion)\n a_over_rs = self.standard['transit_ar'][stillbad].quantity\n rs = self.standard['stellar_radius'][stillbad].quantity\n a[stillbad] = a_over_rs*rs\n\n return a\n\n @property\n def angular_separation(self):\n '''\n Calculate the angular separation,\n simply as theta = a/D\n '''\n\n a = self.semimajor_axis\n D = self.distance\n\n theta = np.arctan(a/D).to(u.arcsec)\n\n return theta\n\n\n @property\n def imaging_contrast(self):\n '''\n What is the reflected light eclipse depth,\n for an albedo of 100%?\n\n But use a kludged radius\n '''\n return 0.25*(self.kludge_radius/self.semimajor_axis).decompose()**2\n\n\n\n @property\n def a_over_rs(self):\n '''\n Have a safe way to calculate the scaled semimajor axis of planets,\n that fills in gaps as necessary. Basic strategy:\n\n First from table, mostly derived from transit.\n Then from the semimajor axis.\n '''\n\n # pull out the values from the table\n a_over_rs = self.standard['transit_ar'].copy()\n\n # try to replace bad ones with NVK3L\n bad = np.isfinite(a_over_rs) == False\n self.speak(f'{sum(bad)}/{self.n} values for a/R* are missing')\n\n a = self.semimajor_axis[bad]\n R = self.stellar_radius[bad]\n a_over_rs[bad] = a/R\n\n stillbad = np.isfinite(a_over_rs) == False\n self.speak(f'{sum(stillbad)}/{self.n} are still missing after a and R*')\n\n return a_over_rs\n\n @property\n def stellar_luminosity(self):\n T = self.stellar_teff\n R = self.stellar_radius\n sigma = con.sigma_sb\n return (4*np.pi*R**2*sigma*T**4).to(u.Lsun)\n\n @property\n def e(self):\n '''\n FIXME -- assumes are missing eccentricities are 0!\n '''\n\n # pull out the actual values from the table\n e = self.standard['e'].copy().quantity\n\n # try to replace bad ones with NVK3L\n bad = np.isfinite(e) == False\n self.speak(f'{sum(bad)}/{self.n} eccentricities are missing')\n self.speak(f'assuming they are all zero')\n e[bad] = 0\n\n return e\n\n @property\n def omega(self):\n '''\n (FIXME! we need better longitudes of periastron)\n '''\n\n # pull out the actual values from the table\n omega = self.standard['omega'].copy().quantity\n\n # try to replace bad ones with NVK3L\n bad = np.isfinite(omega) == False\n self.speak(f'{sum(bad)}/{self.n} longitudes of periastron are missing')\n e_zero = self.e == 0\n self.speak(f'{sum(e_zero)} have eccentricities assumed to be 0')\n omega[e_zero] = 0*u.deg\n\n return omega\n\n @property\n def b(self):\n '''\n Transit impact parameter.\n (FIXME! split this into transit and occultation)\n '''\n\n # pull out the actual values from the table\n b = self.standard['transit_b'].copy().quantity\n\n # try to replace bad ones with NVK3L\n bad = np.isfinite(b) == False\n self.speak(f'{sum(bad)}/{self.n} impact parameters are missing')\n\n # calculate from the period and the stellar mass\n a_over_rs = self.a_over_rs[bad]\n i = self.standard['inclination'][bad].quantity\n e = self.e[bad]\n omega = self.omega[bad]\n b[bad] = a_over_rs*np.cos(i)*((1-e**2)/(1+e*np.sin(omega)))\n\n # report those that are still bad\n stillbad = np.isfinite(b) == False\n self.speak(f'{sum(stillbad)}/{self.n} are still missing after using i')\n\n return b\n\n\n\n # the 1360 W/m^2 that Earth receives from the Sun\n earth_insolation = (1*u.Lsun/4/np.pi/u.AU**2).to(u.W/u.m**2)\n\n @property\n def insolation(self):\n '''\n The insolation the planet receives, in W/m^2.\n '''\n\n # calculate the average insolation the planet receives\n insolation = self.stellar_luminosity/4/np.pi/self.semimajor_axis**2\n return insolation.to(u.W/u.m**2)\n\n @property\n def relative_insolation(self):\n '''\n The insolation the planet receives, relative to Earth.\n '''\n return self.insolation/self.earth_insolation\n\n @property\n def log_relative_insolation(self):\n return np.log10(self.relative_insolation)\n\n @property\n def relative_cumulative_xuv(self):\n xuv_proxy = (self.stellar_luminosity/u.Lsun)**-0.6\n return self.relative_insolation*xuv_proxy\n\n @property\n def teq(self):\n '''\n The equilibrium temperature of the planet.\n '''\n f = self.insolation\n sigma = con.sigma_sb\n A = 1\n return ((f*A/4/sigma)**(1/4)).to(u.K)\n\n @property\n def planet_luminosity(self):\n '''\n The bolometric luminosity of the planet (assuming zero albedo).\n '''\n return (self.teq**4*con.sigma_sb*4*np.pi*self.radius**2).to(u.W)\n\n @property\n def transit_depth(self):\n '''\n The depth of the transit\n (FIXME, clarify if this is 1.5-3.5 or what)\n '''\n\n # pull out the actual values from the table\n d = self.standard['transit_depth'].copy().quantity\n\n # try to replace bad ones with NVK3L\n bad = np.isfinite(d) == False\n self.speak(f'{sum(bad)}/{self.n} transit depths are missing')\n\n Rp = self.radius[bad]\n Rs = self.stellar_radius[bad]\n\n d[bad] = (Rp/Rs).decompose()**2\n\n # report those that are still bad\n stillbad = np.isfinite(d) == False\n self.speak(f'{sum(stillbad)}/{self.n} are still missing after Rp/Rs')\n\n return d\n\n\n @property\n def transit_duration(self):\n '''\n The duration of the transit\n (FIXME, clarify if this is 1.5-3.5 or what)\n '''\n\n with warnings.catch_warnings():\n warnings.simplefilter('ignore')\n\n # pull out the actual values from the table\n d = self.standard['transit_duration'].copy().quantity\n\n # try to replace bad ones with NVK3L\n bad = np.isfinite(d) == False\n self.speak(f'{sum(bad)}/{self.n} transit durations are missing')\n\n\n\n P = self.period[bad]\n a_over_rs = self.a_over_rs[bad]\n b = self.b[bad]\n\n T0 = P/np.pi/a_over_rs\n T = T0*np.sqrt(1-b**2)\n\n e = self.e[bad]\n omega = self.omega[bad]\n factor = np.sqrt(1 - e**2)/(1 + e*np.sin(omega))\n\n d[bad] = (T*factor).to(u.day)\n\n # report those that are still bad\n stillbad = np.isfinite(d) == False\n self.speak(f'{sum(stillbad)}/{self.n} are still missing after P, a/R*, b')\n\n return d\n\n\n\n @property\n def kludge_mass(self):\n '''\n Have a safe way to calculate the mass of planets,\n that fills in gaps as necessary. Basic strategy:\n\n First from table.\n Then from msini.\n '''\n\n # pull out the actual values from the table\n M = self.standard['mass'].copy().quantity\n\n # try to replace bad ones with NVK3L\n bad = np.isfinite(M) == False\n self.speak(f'{sum(bad)}/{self.n} masses are missing')\n\n # estimate from the msini\n try:\n M[bad] = self.msini[bad]\n except (KeyError, AssertionError, AtlasError, AttributeError):\n pass\n\n # replace those that are still bad with the a/R*\n stillbad = np.isfinite(M) == False\n self.speak(f'{sum(stillbad)}/{self.n} are still missing after msini')\n\n return M\n\n @property\n def kludge_radius(self):\n '''\n Have a safe way to calculate the radii of planets,\n that fills in gaps as necessary. Basic strategy:\n\n First from table.\n Then from mass, via Chen & Kipping (2017).\n '''\n\n # pull out the actual values from the table\n R = self.standard['radius'].copy().quantity\n\n # try to replace bad ones with NVK3L\n bad = np.isfinite(R) == False\n self.speak(f'{sum(bad)}/{self.n} radii are missing')\n\n # estimate from Chen and Kipping\n try:\n M = self.kludge_mass\n R[bad] = estimate_radius(M[bad])\n except (KeyError, AssertionError, AtlasError, AttributeError):\n pass\n\n # replace those that are still bad with the a/R*\n stillbad = np.isfinite(R) == False\n self.speak(f'{sum(stillbad)}/{self.n} are still missing after Chen & Kipping (2017)')\n\n return R\n\n @property\n def kludge_age(self):\n '''\n Have a safe way to calculate the age of planets,\n that fills in gaps as necessary. Basic strategy:\n\n First from table.\n Then assume 5 Gyr.\n '''\n\n # pull out the actual values from the table\n age = self.standard['stellar_age'].copy().quantity\n\n # try to replace bad ones with NVK3L\n bad = np.isfinite(age) == False\n self.speak(f'{sum(bad)}/{self.n} ages are missing')\n\n # estimate from the msini\n try:\n age[bad] = 5*u.Gyr\n except (KeyError, AssertionError, AtlasError, AttributeError):\n pass\n\n # replace those that are still bad with the a/R*\n stillbad = np.isfinite(age) == False\n self.speak(f'{sum(stillbad)}/{self.n} are still missing after blindly assuming 5Gyr for missing ages')\n\n return age\n\n\n @property\n def surface_gravity(self):\n '''\n (FIXME) -- make an assumption for planets without masses\n '''\n\n G = con.G\n M = self.mass\n R = self.radius\n\n g = (G*M/R**2).to('m/s**2')\n return g\n\n @property\n def density(self):\n '''\n The density of the planet.\n '''\n mass = self.mass\n volume = 4/3*np.pi*(self.radius)**3\n return (mass/volume).to('g/cm**3')\n\n @property\n def escape_velocity(self):\n '''\n The escape velocity of the planet.\n '''\n G = con.G\n M = self.mass\n R = self.radius\n return np.sqrt(2*G*M/R).to('km/s')\n\n\n @property\n def escape_parameter(self):\n '''\n The Jeans atmospheric escape parameter for atomic hydrogen,\n at the equilibrium temperature of the planet.\n '''\n k = con.k_B\n T = self.teq\n mu = 1\n m_p = con.m_p\n G = con.G\n M = self.mass\n R = self.radius\n\n e_thermal = k*T\n e_grav = G*M*m_p/R\n return (e_grav/e_thermal).decompose()\n\n @property\n def distance_modulus(self):\n '''\n The distance modulus to the system, in magnitudes.\n '''\n mu = 5*np.log10(self.distance/(10*u.pc))\n return mu\n\n def scale_height(self, mu=2.32):\n '''\n The scale height of the atmosphere, at equilibrium temperature.\n '''\n k = con.k_B\n T = self.teq\n m_p = con.m_p\n g = self.surface_gravity\n return (k*T/mu/m_p/g).to('km')\n\n\n def transmission_signal(self, mu=2.32, threshold=2):\n '''\n What is the transit depth of 1 scale height of an\n atmosphere transiting in front of the star.\n\n Parameters\n ----------\n mu : float\n Mean molecular weight (default 2.2 for H/He)\n threshold : float\n By how many sigma must the planet mass be detected?\n\n '''\n with np.errstate(invalid='ignore'):\n\n H = self.scale_height(mu)\n Rp = self.radius\n Rs = self.stellar_radius\n depth = (2*H*Rp/Rs**2).decompose()\n\n dlnm = self.uncertainty('mass')/self.mass\n bad = dlnm > 1/threshold\n depth[bad] = np.nan\n return depth\n\n\n def reflection_signal(self, albedo=1.0):\n '''\n What is the reflected light eclipse depth,\n for an albedo of 100%?\n '''\n return albedo*0.25*(self.radius/self.semimajor_axis).decompose()**2\n\n\n def emission_signal(self, wavelength=5*u.micron):\n '''\n What is the thermal emission eclipse depth,\n assuming Planck spectra for both star and planet?\n\n This calculation assumes a Bond albedo of 0\n and that heat is uniformly distributed over the planet.\n\n Parameters\n ----------\n wavelength : astropy.unit.Quantity\n The wavelength at which it should be calculated.\n '''\n\n # create thermal emission sources for both star and planet\n import rainbowconnection as rc\n star = rc.Thermal(teff=self.stellar_teff, radius=self.stellar_radius)\n planet = rc.Thermal(teff=self.teq, radius=self.radius)\n\n # calculate the depth as the luminosity ratio\n depths = planet.spectrum(wavelength)/star.spectrum(wavelength)\n\n\n return depths\n\n def stellar_brightness(self, wavelength=5*u.micron):\n '''\n How many photons/s/m^2/micron do we receive from the star?\n\n This is calculated from the distance, radius, and\n stellar effective temperature of the stars.\n\n (It could be potentially be improved with PHOENIX\n model grids and/or cleverness with photometry.)\n\n Parameters\n ----------\n wavelength : astropy.unit.Quantity\n The wavelength at which it should be calculated.\n '''\n\n # import some tools for easy cartoon spectra\n import rainbowconnection as rc\n\n # create source with right temperature, size, distance\n teff, radius = self.stellar_teff, self.stellar_radius\n star = rc.Thermal(teff=teff,\n radius=radius).at(self.distance)\n\n # calculate the energy flux\n flux_in_energy = star.spectrum(wavelength)\n\n # convert to photon flux\n photon_energy = con.h*con.c/wavelength/u.ph\n flux_in_photons = flux_in_energy/photon_energy\n\n # return the\n return flux_in_photons.to('ph s^-1 m^-2 micron^-1')\n\n def stellar_brightness_in_telescope_units(self, telescope_name='JWST', **kw):\n '''\n The stellar brightness, converted to telescope units.\n\n Parameters\n ----------\n telescope_name : str\n The name of the telescope.\n\n wavelength : astropy.unit.Quantity\n The wavelength at which it should be calculated.\n\n R : float\n The spectral resolution at which the\n telescope will bin wavelengths.\n\n dt : astropy.units.quantity.Quantity\n The time over which the telescope exposes.\n '''\n\n # what counts as 1 \"telescope unit\" (e.g. JWST at R=20 at 5 microns for 1 hour)\n telescope_unit = define_telescope_unit_by_name(telescope_name, **kw)\n\n # what's the photon flux (photons/m**2/s)\n flux_in_photons = self.stellar_brightness(telescope_unit.wavelength)\n\n # quote the brightness as (for example) gigaphotons/JWST at R=20 at 5 microns in 1 hour\n unit = lotsofphotons_unit/telescope_unit\n return flux_in_photons.to(unit)\n\n def depth_uncertainty(self, telescope_name='JWST',\n per_transit=False,\n dt=1*u.hour,\n **kw):\n '''\n What is the transit/eclipse depth uncertainty\n with a particular telescope\n at a particular wavelength\n at a particular resolution?\n\n By default, this will be calculated for one transit.\n Optionally, it can be calculated for a given amount of time instead.\n\n Parameters\n ----------\n telescope_name : str\n The name of the telescope.\n\n per_transit : bool\n If True, calculate the depth uncertainty for one transit.\n If False, calculate the depth uncertainty for a certain amount\n of in-transit time. You likely want to specify `dt` as a\n keyword argument to set that amount of in-transit time.\n In either case, an out-of-transit baseline equal to the\n total in-transit time will be assumed. This means the actual\n time cost will be twice the transit duration or `dt` chosen,\n and the depth uncertainty will be a factor sqrt(2) larger\n than the pure photon noise binned to the relevant timescale.\n\n wavelength : astropy.unit.Quantity\n The wavelength at which it should be calculated.\n\n R : float\n The spectral resolution at which the\n telescope will bin wavelengths.\n\n dt : astropy.units.quantity.Quantity\n The time over which the telescope exposes. If `per_transit=True`,\n this will be ignored. Otherwise, it will set the total amount\n of in-transit time observed, assuming that an equal amount of\n time will *also* be observed out of transit.\n '''\n\n # what counts as 1 \"telescope unit\" (e.g. JWST at R=20 at 5 microns for 1 hour)\n telescope_unit = define_telescope_unit_by_name(telescope_name,\n dt=dt,\n **kw)\n\n # what's the photon flux (photons/m**2/s)\n flux_in_photons = self.stellar_brightness(telescope_unit.wavelength)\n\n # what's the total collecting power?\n if per_transit:\n ratio_of_collecting_time = self.transit_duration/dt\n else:\n ratio_of_collecting_time = 1\n collecting_power = 1*telescope_unit*ratio_of_collecting_time\n\n # what's the total number of photons collected during transit\n N = (flux_in_photons*collecting_power).to(u.ph).value\n\n # what's the flux uncertainty on the time scale of one transit?\n sigma = 1/np.sqrt(N)\n\n # inflate by a factor of sqrt(2) for equal out-of-transit\n oot = np.sqrt(2)\n sigma_depth = sigma*oot\n\n return sigma_depth\n\n def _get_noise_and_unit(self, telescope_name='JWST',\n per_transit=False,\n **kw):\n '''\n Tiny helper to get the noise and the telescope_unit\n for a telescope observation of a planet.\n '''\n\n # figure out the noise\n noise = self.depth_uncertainty(telescope_name=telescope_name, per_transit=per_transit, **kw)\n\n # create a telescope unit (mostly to get a default wavelength)\n telescope_unit = define_telescope_unit_by_name(telescope_name, **kw)\n\n return noise, telescope_unit\n\n def emission_snr(self, telescope_name='JWST', **kw):\n '''\n What's the approximate S/N for the detection of the\n thermal emission eclipse of a planet?\n '''\n\n noise, telescope_unit = self._get_noise_and_unit(telescope_name=telescope_name, **kw)\n signal = self.emission_signal(wavelength=telescope_unit.wavelength)\n return signal/noise\n\n def reflection_snr(self, telescope_name='JWST', albedo=1, **kw):\n '''\n What's the approximate S/N for the detection of the\n reflected light eclipse of a planet?\n '''\n\n noise, telescope_unit = self._get_noise_and_unit(telescope_name=telescope_name, **kw)\n signal = self.reflection_signal(albedo=albedo)\n return signal/noise\n\n def transmission_snr(self, telescope_name='JWST', mu=2.32, threshold=2, **kw):\n '''\n What's the approximate S/N for the detection of the\n reflected light eclipse of a planet?\n '''\n\n noise, telescope_unit = self._get_noise_and_unit(telescope_name=telescope_name, **kw)\n signal = self.transmission_signal(mu=mu, threshold=threshold)\n return signal/noise\n\n def scatter(self, xname, yname, c=None, s=None, names=True, xlog=True, ylog=True, **kw):\n '''\n Quick tool to plot one parameter against another.\n '''\n plt.ion()\n x, y = self.__getattr__(xname), self.__getattr__(yname)\n try:\n self.ax.cla()\n except:\n self.figure = plt.figure('Exoplanet Population')\n self.ax = plt.subplot()\n\n self.ax.set_xlabel(xname)\n self.ax.set_ylabel(yname)\n self.ax.scatter(x, y, c=c, s=s, **kw)\n if False:\n for i in range(len(x)):\n self.ax.text(x[i], y[i], self.table['NAME'][i])\n if xlog:\n plt.xscale('log')\n if ylog:\n plt.yscale('log')\n\n plt.draw()\n\n def thumbtack(self, maxr=1000, dr=100, labels=False):\n '''Plot the planets as thumbtacks.'''\n def scale(d):\n return np.array(d)**1.5\n r = scale(self.distance)\n x, y = r*np.cos(self.ra*np.pi/180), r*np.sin(self.ra*np.pi/180)\n plt.ion()\n plt.figure('thumbtacks')\n\n ax = plt.subplot()\n ax.cla()\n ax.set_aspect('equal')\n theta = np.linspace(0,2*np.pi,1000)\n angle = -90*np.pi/180\n\n gridkw = dict(alpha=0.25, color='green')\n for originalradius in np.arange(dr,maxr*2,dr):\n radii = scale(originalradius)\n\n ax.plot(radii*np.cos(theta), radii*np.sin(theta), linewidth=3, **gridkw)\n ax.text(radii*np.cos(angle), radii*np.sin(angle), '{0:.0f} pc'.format(originalradius), rotation=90+ angle*180/np.pi, va='bottom', ha='center', size=13, weight='extra bold', **gridkw)\n\n ax.plot(x, y, marker='o', alpha=0.5, color='gray', linewidth=0, markeredgewidth=0)\n close = (self.name == 'WASP-94A b').nonzero()[0]#(self.distance < maxr).nonzero()[0]\n if labels:\n for c in close:\n plt.text(x[c], y[c], self.name[c])\n ax.set_xlim(-scale(maxr), scale(maxr))\n ax.set_ylim(-scale(maxr), scale(maxr))\n\n\n\n\n def compare(self, x='teq', y='radius', area='depth', color='stellar_radius'):\n\n xplot = self.__dict__[x]\n yplot = self.__dict__[y]\n sizeplot = self.__dict__[size]\n colorplot = self.__dict__[color]\n\n maxarea = 1000\n area = self.__dict__[area]\n sizeplot = np.sqrt(area/np.nanmax(area)*maxarea)\n\n plt.scatter(xplot, yplot, linewidth=0, marker='o', markersize=sizeplot)\n\n\nclass PredefinedPopulation(Population):\n '''\n Population object keeps track of an exoplanet population.\n '''\n\n expiration = 0.00001\n def __init__(self, label='exoplanets', remake=False, skip_update=False, **plotkw):\n '''\n Initialize a population, by trying the following steps:\n 1) Load a standardized ascii table.\n 2) Ingest a raw table, and standardize it.\n\n Parameters\n ----------\n label : str\n The name of this population, for use both in filenames\n and labeling points on plots.\n remake : bool\n Should we re-ingest this table from its raw ingredients?\n skip_update : bool\n Should we skip checking for updates in the existing data?\n **plotkw : dict\n All other keywords are stored as plotting suggestions.\n '''\n\n # set the name for this population\n self.label = label\n\n try:\n # try to load the standardized table\n assert(remake == False)\n standard = self.load_standard(skip_update=skip_update)\n except (IOError,FileNotFoundError,AssertionError):\n # or create a new standardized table and save it\n standard = self.ingest_table(remake=remake)\n\n # initialize with a standard table\n Population.__init__(self,\n standard=standard,\n label=label,\n **plotkw)\n\n\n @property\n def fileprefix(self):\n '''\n Define a fileprefix for this population, to be used\n for setting the filename of the standardized population.\n '''\n return clean(self.label)\n\n def ingest_table(self, **kwargs):\n '''\n Ingest a new population table of arbitrary format,\n and then standardize it, using the tools defined in\n inherited population classes.'''\n\n\n # load the raw table\n raw = self.load_raw()\n\n # trim elements from raw table as necessary\n trimmed = self.trim_raw(raw)\n\n # create a standardized table from the array\n standard = self.create_standard(trimmed)\n\n # save the standardized table\n self.save_standard(standard)\n\n return standard\n\n @property\n def standard_path(self):\n '''\n Define the filepath for the standardized table.\n '''\n return os.path.join(directories['data'],\n f'standardized-{self.fileprefix}.txt')\n\n def load_raw(self):\n raise NotImplementedError('''\n Yikes! The `.load_raw` method has not been defined\n for whatever object is trying to call it!\n ''')\n\n def trim_raw(self, raw):\n '''\n Trim bad/unnecessary rows out of a raw table of planet properties.\n '''\n\n # no trimming necessary\n trimmed = raw\n\n # for debugging, hang onto the trimmed table as a hidden attribute\n self._trimmed = trimmed\n\n # a trimmed table\n return self._trimmed\n\n\n def load_standard(self, skip_update=False):\n '''\n Load a standardized population table. Generally this\n will be from a file like ~/.exoatlas/standardized-*.txt\n\n Returns\n -------\n\n standard : astropy.table.Table\n A table of planet properties,\n with a minimal set of columns.\n skip_update : bool\n Should we skip checks to see if the data are too stale?\n '''\n\n # make sure this file is recent enough (unless we're skipping updates)\n if not skip_update:\n old = check_if_needs_updating(self.standard_path, self.expiration)\n assert(old == False)\n\n\n # keywords for reading a standardized table\n readkw = dict(format='ecsv', fill_values=[('',np.nan), ('--', np.nan)])\n\n standard = ascii.read(self.standard_path, **readkw)\n self.speak(f'Loaded standardized table from {self.standard_path}')\n\n # ??? change this to do something more clever with tables\n # masked = np.ma.filled(standard, fill_value = np.nan)\n\n return standard\n\n def save_standard(self, standard):\n '''\n Save the standardized table out to a text file\n like ~/exoatlas/standardized-*.txt\n '''\n\n # save it as an ascii table for humans to read\n standard.write(self.standard_path,\n format='ascii.ecsv',\n overwrite=True )\n self.speak(f'Saved a standardized text table to {self.standard_path}')\n\n def create_table(self, desired_columns=['name',\n 'radius', 'relative_insolation',\n 'stellar_radius', 'stellar_teff',\n 'ra', 'dec', 'distance']):\n '''\n Create an astropy table based on this population,\n using a subset of columns, which may include ones\n that have been calculated as Population properties.\n\n Parameters\n ----------\n desired_columns : list\n The columns you want to include. Anything that\n can be accessed via Population.??? can be provided\n here as a string.\n\n Returns\n -------\n table : astropy.table.Table\n A table, with those columns, in the same order\n as the Population itself.\n '''\n # FIXME! need to add method support for arguments\n\n # create a dictionary with the desired columns\n d = {c:getattr(self, c) for c in desired_columns}\n\n # turn that into an astropy Table\n t = Table(d)\n\n return t\n"},"license":{"kind":"string","value":"mit"},"hash":{"kind":"number","value":-3495989945911375000,"string":"-3,495,989,945,911,375,000"},"line_mean":{"kind":"number","value":30.4161958569,"string":"30.416196"},"line_max":{"kind":"number","value":194,"string":"194"},"alpha_frac":{"kind":"number","value":0.5621428286,"string":"0.562143"},"autogenerated":{"kind":"bool","value":false,"string":"false"}}},{"rowIdx":109573,"cells":{"repo_name":{"kind":"string","value":"ajaybhatia/ypg-odm-project"},"path":{"kind":"string","value":"src/profiles/migrations/0001_initial.py"},"copies":{"kind":"string","value":"1"},"size":{"kind":"string","value":"1466"},"content":{"kind":"string","value":"# -*- coding: utf-8 -*-\n# Generated by Django 1.10.1 on 2016-09-30 09:57\nfrom __future__ import unicode_literals\n\nfrom django.conf import settings\nfrom django.db import migrations, models\nimport django.db.models.deletion\nimport uuid\n\n\nclass Migration(migrations.Migration):\n\n initial = True\n\n dependencies = [\n ('authtools', '0003_auto_20160128_0912'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='Profile',\n fields=[\n ('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, primary_key=True, serialize=False, to=settings.AUTH_USER_MODEL)),\n ('slug', models.UUIDField(blank=True, default=uuid.uuid4, editable=False)),\n ('odm', models.CharField(choices=[('Tinno', 'Tinno'), ('Huaqin', 'Huaqin'), ('IMG', 'IMG'), ('Ragentek', 'Ragentek'), ('Wingtech', 'Wingtech'), ('Coolpad', 'Coolpad'), ('Amer', 'Amer'), ('Sprocomm', 'Sprocomm'), ('Topwisez', 'Topwisez')], default='Tinno', max_length=100)),\n ('picture', models.ImageField(blank=True, null=True, upload_to='profile_pics/%Y-%m-%d/', verbose_name='Profile picture')),\n ('bio', models.CharField(blank=True, max_length=200, null=True, verbose_name='Short Bio')),\n ('email_verified', models.BooleanField(default=False, verbose_name='Email verified')),\n ],\n options={\n 'abstract': False,\n },\n ),\n ]\n"},"license":{"kind":"string","value":"mit"},"hash":{"kind":"number","value":-2959312260762456600,"string":"-2,959,312,260,762,456,600"},"line_mean":{"kind":"number","value":42.1176470588,"string":"42.117647"},"line_max":{"kind":"number","value":289,"string":"289"},"alpha_frac":{"kind":"number","value":0.6002728513,"string":"0.600273"},"autogenerated":{"kind":"bool","value":false,"string":"false"}}},{"rowIdx":109574,"cells":{"repo_name":{"kind":"string","value":"bobbydurrett/PythonDBAGraphs"},"path":{"kind":"string","value":"ashcount.py"},"copies":{"kind":"string","value":"1"},"size":{"kind":"string","value":"4982"},"content":{"kind":"string","value":"\"\"\"\nPythonDBAGraphs: Graphs to help with Oracle Database Tuning\nCopyright (C) 2016 Robert Taft Durrett (Bobby Durrett)\n\nThis program is free software: you can redistribute it and/or modify\nit under the terms of the GNU General Public License as published by\nthe Free Software Foundation, either version 3 of the License, or\n(at your option) any later version.\n\nThis program is distributed in the hope that it will be useful,\nbut WITHOUT ANY WARRANTY; without even the implied warranty of\nMERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\nGNU General Public License for more details.\n\nYou should have received a copy of the GNU General Public License\nalong with this program. If not, see .\n\nContact:\n\nbobby@bobbydurrettdba.com\n\nashcount.py\n\nShows ASH active session counts in time period. \n\n\"\"\"\n\nimport myplot\nimport util\n\ndef dbaashcount(start_time,end_time,instance_number):\n \"\"\"\n Group by minute.\n 10 second samples.\n dba table\n \"\"\"\n q_string = \"\"\"\ncreate table dbaashcount as\nselect\nto_char(all_time.sample_time,'YYYY/MM/DD HH24:MI') date_minute,\nsum(all_time.cnt)/6 all_count,\nsum(nvl(cpu_time.cnt,0))/6 cpu_count\nfrom\n(select \nsample_time,\ncount(*) cnt\nfrom DBA_HIST_ACTIVE_SESS_HISTORY a\nwhere \nsample_time \nbetween \nto_date('\"\"\" \n q_string += start_time\n q_string += \"\"\"','DD-MON-YYYY HH24:MI:SS')\nand \nto_date('\"\"\"\n q_string += end_time\n q_string += \"\"\"','DD-MON-YYYY HH24:MI:SS')\nand a.INSTANCE_NUMBER = \"\"\"\n q_string += instance_number\n q_string += \"\"\"\ngroup by sample_time) all_time,\n(select \nsample_time,\ncount(*) cnt\nfrom DBA_HIST_ACTIVE_SESS_HISTORY a\nwhere \nsample_time \nbetween \nto_date('\"\"\"\n q_string += start_time\n q_string += \"\"\"','DD-MON-YYYY HH24:MI:SS')\nand \nto_date('\"\"\"\n q_string += end_time\n q_string += \"\"\"','DD-MON-YYYY HH24:MI:SS') and\nsession_state = 'ON CPU'\nand a.INSTANCE_NUMBER = \"\"\"\n q_string += instance_number\n q_string += \"\"\"\ngroup by sample_time) cpu_time\nwhere\nall_time.sample_time=cpu_time.sample_time(+)\ngroup by to_char(all_time.sample_time,'YYYY/MM/DD HH24:MI')\n\"\"\"\n return q_string\n \ndef vdollarashcount(start_time,end_time,instance_number):\n \"\"\"\n Group by minute.\n 1 second samples.\n v$ table\n \"\"\"\n q_string = \"\"\"\ncreate table combinedashcount as\nselect\nto_char(all_time.sample_time,'YYYY/MM/DD HH24:MI') date_minute,\nsum(all_time.cnt)/60 all_count,\nsum(nvl(cpu_time.cnt,0))/60 cpu_count\nfrom\n(select \nsample_time,\ncount(*) cnt\nfrom GV$ACTIVE_SESSION_HISTORY a\nwhere \nsample_time \nbetween \nto_date('\"\"\" \n q_string += start_time\n q_string += \"\"\"','DD-MON-YYYY HH24:MI:SS')\nand \nto_date('\"\"\"\n q_string += end_time\n q_string += \"\"\"','DD-MON-YYYY HH24:MI:SS')\nand a.INST_ID = \"\"\"\n q_string += instance_number\n q_string += \"\"\"\ngroup by sample_time) all_time,\n(select \nsample_time,\ncount(*) cnt\nfrom GV$ACTIVE_SESSION_HISTORY a\nwhere \nsample_time \nbetween \nto_date('\"\"\"\n q_string += start_time\n q_string += \"\"\"','DD-MON-YYYY HH24:MI:SS')\nand \nto_date('\"\"\"\n q_string += end_time\n q_string += \"\"\"','DD-MON-YYYY HH24:MI:SS') and\nsession_state = 'ON CPU'\nand a.INST_ID = \"\"\"\n q_string += instance_number\n q_string += \"\"\"\ngroup by sample_time) cpu_time\nwhere\nall_time.sample_time=cpu_time.sample_time(+)\ngroup by to_char(all_time.sample_time,'YYYY/MM/DD HH24:MI')\n\"\"\"\n return q_string\n\ndatabase,dbconnection = util.script_startup('ASH active session counts')\n\nstart_time=util.input_with_default('Start date and time (DD-MON-YYYY HH24:MI:SS)','01-JAN-1900 12:00:00')\n\nend_time=util.input_with_default('End date and time (DD-MON-YYYY HH24:MI:SS)','01-JAN-2200 12:00:00')\n\ninstance_number=util.input_with_default('Database Instance (1 if not RAC)','1')\n\n# first get ash counts by minutes from dba view\n\ndbconnection.run_return_no_results_catch_error(\"drop table dbaashcount\")\n \ndbacrtable = dbaashcount(start_time,end_time,instance_number)\n\ndbconnection.run_return_no_results(dbacrtable);\n\n# now get from ash view put in combined table first\n\ndbconnection.run_return_no_results_catch_error(\"drop table combinedashcount\")\n\nvdcrtable = vdollarashcount(start_time,end_time,instance_number)\n\ndbconnection.run_return_no_results(vdcrtable)\n\n# insert dba rows for date and minute not in v$\n\ninsert_sql = \"\"\"\ninsert into combinedashcount\nselect * from dbaashcount d\nwhere d.date_minute not in\n(select date_minute from combinedashcount)\"\"\"\n\ndbconnection.run_return_no_results(insert_sql)\n\ndbconnection.commit()\n\nquerytext = \"\"\"\nselect\nto_date(DATE_MINUTE,'YYYY/MM/DD HH24:MI'),\nALL_COUNT,\nCPU_COUNT\nfrom combinedashcount\norder by date_minute\"\"\"\n \nresults = dbconnection.run_return_flipped_results(querytext)\n\nutil.exit_no_results(results)\n\n# plot query\n\nmyplot.xdatetimes = results[0]\nmyplot.ylists = results[1:]\n \nmyplot.title = \"ASH active session count for \"+database+\" database, instance \"+instance_number\nmyplot.ylabel1 = \"Sessions\"\n \nmyplot.ylistlabels=[\"Total\",\"CPU\"]\n\nmyplot.line()\n"},"license":{"kind":"string","value":"gpl-3.0"},"hash":{"kind":"number","value":5110317334580013000,"string":"5,110,317,334,580,013,000"},"line_mean":{"kind":"number","value":23.7860696517,"string":"23.78607"},"line_max":{"kind":"number","value":105,"string":"105"},"alpha_frac":{"kind":"number","value":0.7027298274,"string":"0.70273"},"autogenerated":{"kind":"bool","value":false,"string":"false"}}},{"rowIdx":109575,"cells":{"repo_name":{"kind":"string","value":"Zerack/zoll.me"},"path":{"kind":"string","value":"leapday/templatetags/leapday_extras.py"},"copies":{"kind":"string","value":"1"},"size":{"kind":"string","value":"1051"},"content":{"kind":"string","value":"'''\nJames D. Zoll\n\n4/15/2013\n\nPurpose: Defines template tags for the Leapday Recipedia application.\n\nLicense: This is a public work.\n'''\n\nfrom django import template\nregister = template.Library()\n\n@register.filter()\ndef css_name(value):\n ''' \n Returns the lower-case hyphen-replaced display name,\n which used as the css class for the good.\n \n Keyword Arguments:\n value -> Good. The good to get the css class for.\n \n '''\n \n return value.lower().replace(' ','-')\n\n@register.filter()\ndef desc_value_sort(value):\n '''\n Designed to sort the results of .iteritems() on a dict of goods\n for the index.\n \n value -> List of tuples.\n '''\n \n return sorted(value, key=lambda x: x[1]['active']['value'], reverse=True)\n\n@register.filter()\ndef base_good_display_name(value):\n BASE_GOODS = {'good_water': 'Water',\n 'good_food': 'Food',\n 'good_wood': 'Wood',\n 'good_stone': 'Stone',\n 'goodtype_crystal': 'Crystal'}\n return BASE_GOODS[value]"},"license":{"kind":"string","value":"mit"},"hash":{"kind":"number","value":-1046326108224100700,"string":"-1,046,326,108,224,100,700"},"line_mean":{"kind":"number","value":22.3777777778,"string":"22.377778"},"line_max":{"kind":"number","value":77,"string":"77"},"alpha_frac":{"kind":"number","value":0.6022835395,"string":"0.602284"},"autogenerated":{"kind":"bool","value":false,"string":"false"}}},{"rowIdx":109576,"cells":{"repo_name":{"kind":"string","value":"karlnapf/ozone-roulette"},"path":{"kind":"string","value":"russian_roulette/RussianRoulette.py"},"copies":{"kind":"string","value":"1"},"size":{"kind":"string","value":"3664"},"content":{"kind":"string","value":"\nfrom abc import abstractmethod\nfrom numpy.lib.function_base import delete\nfrom numpy.ma.core import mean, zeros, log, arange, std\nfrom numpy.random import permutation, rand\nimport logging\n\nclass RussianRoulette(object):\n def __init__(self, threshold, block_size=1):\n self.threshold = threshold\n self.block_size = block_size\n\n @abstractmethod\n def get_estimate(self, estimates, index):\n start_idx = index * self.block_size\n stop_idx = index * self.block_size + self.block_size\n \n # if there are enough samples, use them, sub-sample if not\n if stop_idx <= len(estimates):\n logging.debug(\"Averaging over %d samples from index %d to %d\" % \n (self.block_size, start_idx, stop_idx))\n indices = arange(start_idx, stop_idx)\n else:\n logging.debug(\"Averaging over a random subset of %d samples\" % \n self.block_size)\n \n indices = permutation(len(estimates))[:self.block_size]\n \n return mean(estimates[indices])\n \n def exponential(self, estimates):\n logging.debug(\"Entering\")\n \n # find a strict lower bound on the estimates and remove it from list\n bound = estimates.min()\n bound_idx = estimates.argmin()\n estimates = delete(estimates, bound_idx)\n estimates = estimates - bound\n \n\n # find an integer close to the mean of the transformed estimates and divide\n E = max(int(round(abs(mean(estimates)))), 1)\n estimates = estimates / E\n \n logging.info(\"Using %f as lower bound on estimates\" % bound)\n logging.info(\"Computing product of E=%d RR estimates\" % E)\n logging.info(\"Std-deviation after scaling is %f\" % std(estimates))\n \n # index for iterating through the used estimates\n # (might be averaged, so might be lower than the number of available estimates\n # if the block size is greater than one\n estimate_idx = 0\n \n samples = zeros(E)\n for iteration in range(E):\n weight = 1\n \n # start with x^0 which is 1\n samples[iteration] = 1\n term = 1\n \n # index for computed samples\n series_term_idx = 1\n\n while weight > 0:\n # update current term of infinite series\n # average over block\n x_inner = self.get_estimate(estimates, estimate_idx)\n term *= (x_inner / series_term_idx)\n \n # if summation has reached threshold, update weights\n if abs(term) < self.threshold:\n q = term / self.threshold\n if rand() < q:\n # continue and update weight\n weight = weight / q\n else:\n # stop summation\n weight = 0\n \n samples[iteration] += weight * term;\n estimate_idx += 1\n series_term_idx += 1\n \n logging.info(\"RR estimate %d/%d with threshold %.2f is %.4f and took %d series terms\" % \n (iteration + 1, E, self.threshold, samples[iteration], series_term_idx))\n \n # now put things together. Note that samples contains an unbiased estimate\n # which might be quite small. However, due to the removal of the bound,\n # this will not cause an underflow and we can just take the log.\n logging.debug(\"Leaving\")\n return bound + sum(log(samples));\n"},"license":{"kind":"string","value":"bsd-2-clause"},"hash":{"kind":"number","value":1499286222712020500,"string":"1,499,286,222,712,020,500"},"line_mean":{"kind":"number","value":38.8260869565,"string":"38.826087"},"line_max":{"kind":"number","value":100,"string":"100"},"alpha_frac":{"kind":"number","value":0.5507641921,"string":"0.550764"},"autogenerated":{"kind":"bool","value":false,"string":"false"}}},{"rowIdx":109577,"cells":{"repo_name":{"kind":"string","value":"qedsoftware/commcare-hq"},"path":{"kind":"string","value":"custom/ilsgateway/zipline/reports/zipline_warehouse_order_report.py"},"copies":{"kind":"string","value":"1"},"size":{"kind":"string","value":"1663"},"content":{"kind":"string","value":"from collections import namedtuple\n\nfrom django.utils.translation import ugettext_lazy as _\n\nfrom corehq.apps.reports.filters.dates import DatespanFilter\nfrom corehq.apps.reports.filters.fixtures import AsyncLocationFilter\nfrom custom.ilsgateway.zipline.data_sources.zipline_warehouse_order_data_source import \\\n ZiplineWarehouseOrderDataSource\nfrom custom.ilsgateway.zipline.filters import EmergencyOrderStatusChoiceFilter\nfrom custom.ilsgateway.zipline.reports.zipline_report import ZiplineReport\n\nReportConfig = namedtuple(\n 'ReportConfig', ['domain', 'start_date', 'end_date', 'location_id', 'statuses']\n)\n\n\nclass ZiplineWarehouseOrderReport(ZiplineReport):\n\n report_title = _('Zipline Warehouse - Order')\n name = _('Zipline Warehouse - Order')\n slug = 'zipline_warehouse_order'\n\n fields = [\n DatespanFilter,\n AsyncLocationFilter,\n EmergencyOrderStatusChoiceFilter\n ]\n\n @property\n def report_config(self):\n return ReportConfig(\n domain=self.domain,\n start_date=self.datespan.startdate,\n end_date=self.datespan.end_of_end_day,\n location_id=self.location_id,\n statuses=self.statuses\n )\n\n @property\n def data_source(self):\n return ZiplineWarehouseOrderDataSource(self.report_config)\n\n @property\n def shared_pagination_GET_params(self):\n return [\n dict(name='startdate', value=self.datespan.startdate_display),\n dict(name='enddate', value=self.datespan.enddate_display),\n dict(name='location_id', value=self.location_id),\n dict(name='statuses', value=self.statuses)\n ]\n"},"license":{"kind":"string","value":"bsd-3-clause"},"hash":{"kind":"number","value":2349617236119291400,"string":"2,349,617,236,119,291,400"},"line_mean":{"kind":"number","value":32.26,"string":"32.26"},"line_max":{"kind":"number","value":88,"string":"88"},"alpha_frac":{"kind":"number","value":0.6987372219,"string":"0.698737"},"autogenerated":{"kind":"bool","value":false,"string":"false"}}},{"rowIdx":109578,"cells":{"repo_name":{"kind":"string","value":"T-002/pycast"},"path":{"kind":"string","value":"pycast/common/json_encoder.py"},"copies":{"kind":"string","value":"1"},"size":{"kind":"string","value":"1477"},"content":{"kind":"string","value":"# !/usr/bin/env python\n# -*- coding: UTF-8 -*-\n\n# Copyright (c) 2012-2015 Christian Schwarz\n#\n# Permission is hereby granted, free of charge, to any person obtaining\n# a copy of this software and associated documentation files (the\n# \"Software\"), to deal in the Software without restriction, including\n# without limitation the rights to use, copy, modify, merge, publish,\n# distribute, sublicense, and/or sell copies of the Software, and to\n# permit persons to whom the Software is furnished to do so, subject to\n# the following conditions:\n#\n# The above copyright notice and this permission notice shall be\n# included in all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND,\n# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF\n# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND\n# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE\n# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION\n# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION\n# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n\nimport json\nfrom pycastobject import PyCastObject\n\nclass PycastEncoder(json.JSONEncoder, PyCastObject):\n\n \"\"\"Encodes a PyCastObject to json.\"\"\"\n\n def default(self, obj):\n # Cannot use the to_json method, because it returns a string rather\n # than a serializable list.\n return obj.to_twodim_list()\n"},"license":{"kind":"string","value":"mit"},"hash":{"kind":"number","value":-1432928872212610000,"string":"-1,432,928,872,212,610,000"},"line_mean":{"kind":"number","value":41.2,"string":"41.2"},"line_max":{"kind":"number","value":75,"string":"75"},"alpha_frac":{"kind":"number","value":0.7528774543,"string":"0.752877"},"autogenerated":{"kind":"bool","value":false,"string":"false"}}},{"rowIdx":109579,"cells":{"repo_name":{"kind":"string","value":"sanja7s/SR_Twitter"},"path":{"kind":"string","value":"src_MONGO/monthly_user_tweets_clean_2_json.py"},"copies":{"kind":"string","value":"1"},"size":{"kind":"string","value":"2578"},"content":{"kind":"string","value":"#!/usr/bin/env python\n# -*- coding: UTF-8 -*-\n\"\"\"\n\tgo through user tweets, collect them per user, clean and save in a josn format per each month. we end up with 7 months\n\tsince May and Nov are half included. The output is 7 files json formatted and ready to be inputted to MongoDB\n\"\"\"\nfrom nltk.tokenize import wordpunct_tokenize\nfrom nltk.corpus import stopwords\nfrom nltk.book import FreqDist\nfrom collections import defaultdict\nimport codecs\nimport matplotlib.pyplot as plt\nimport pylab as P\nimport numpy as np\nimport glob, os\nimport datetime\nimport json\n\nIN_DIR = \"../../../DATA/General/\"\nos.chdir(IN_DIR)\n\nF_IN = \"usrs_with_more_than_20_tweets.dat\"\n\nF_OUT_5 = codecs.open(\"5_tweets.dat\", \"w\")\nF_OUT_6 = codecs.open(\"6_tweets.dat\", \"w\")\nF_OUT_7 = codecs.open(\"7_tweets.dat\", \"w\")\nF_OUT_8 = codecs.open(\"8_tweets.dat\", \"w\")\nF_OUT_9 = codecs.open(\"9_tweets.dat\", \"w\")\nF_OUT_10 = codecs.open(\"10_tweets.dat\", \"w\")\nF_OUT_11 = codecs.open(\"11_tweets.dat\", \"w\")\nF_OUT_12 = codecs.open(\"12_tweets.dat\", \"w\")\n\nf_out_list = {5:F_OUT_5, 6:F_OUT_6, 7:F_OUT_7, 8:F_OUT_8, 9:F_OUT_9, \\\n\t\t\t\t10:F_OUT_10, 11:F_OUT_11}\n\nENGLISH_STOPWORDS = set(stopwords.words('english'))\ndef clean(tweet):\n\treturn [i.lower() for i in tweet if i.isalpha() and i not in ENGLISH_STOPWORDS and i != 'RT']\n\ndef extract_monthly_user_CV_and_num_tweets():\n\n\tuser_monthly_tweets = defaultdict(int)\n\tuser_monthly_count = defaultdict(int)\n\tcnt_all_tweets = 0\n\n\twith codecs.open(F_IN,'r', encoding='utf8') as input_file:\n\t\t# the code loops through the input, collects tweets text for each user into a dict\n\t\tfor line in input_file:\t\n\t\t\tcnt_all_tweets += 1\n\t\t\tline = line.split()\n\t\t\tuser = line[0]\n\t\t\tif user not in user_monthly_tweets:\n\t\t\t\tuser_monthly_tweets[user] = defaultdict(list)\n\t\t\t\tuser_monthly_count[user] = defaultdict(int)\n\t\t\tUTS = long(line[4])\n\t\t\tmonth = datetime.datetime.utcfromtimestamp(UTS).month\n\t\t\ttweet = line[5:]\n\t\t\tuser_monthly_tweets[user][month] += clean(tweet)\n\t\t\tuser_monthly_count[user][month] += 1\n\t\t\tif cnt_all_tweets % 100000 == 0:\n\t\t\t\tprint tweet, clean(tweet)\n\tprint \"Processed %d tweets\" % cnt_all_tweets\n\n\tfor user in user_monthly_tweets:\n\t\tfor MO in user_monthly_tweets[user]:\n\t\t\toutput_file = f_out_list[MO]\n\t\t\tusr_tweets_json = {}\n\t\t\tusr_tweets_json['_id'] = str(user)\n\t\t\tusr_tweets_json['count'] = str(user_monthly_count[user][MO])\n\t\t\tusr_tweets_json['txt'] = [ {el[0]: el[1]} for el in FreqDist(user_monthly_tweets[user][MO]).iteritems() ]\n\t\t\toutput_file.write(unicode(json.dumps(usr_tweets_json, ensure_ascii=False)) + '\\n')\n\nextract_monthly_user_CV_and_num_tweets()\n\t\n\n\n\n\n\n\n\n\n\n"},"license":{"kind":"string","value":"mit"},"hash":{"kind":"number","value":-763975166775583000,"string":"-763,975,166,775,583,000"},"line_mean":{"kind":"number","value":30.0602409639,"string":"30.060241"},"line_max":{"kind":"number","value":119,"string":"119"},"alpha_frac":{"kind":"number","value":0.6830876649,"string":"0.683088"},"autogenerated":{"kind":"bool","value":false,"string":"false"}}},{"rowIdx":109580,"cells":{"repo_name":{"kind":"string","value":"giffordw/OSMOSreduce"},"path":{"kind":"string","value":"proc4k.py"},"copies":{"kind":"string","value":"1"},"size":{"kind":"string","value":"22781"},"content":{"kind":"string","value":"#! /usr/bin/env python\n#\n# Paul Martini (OSU)\n#\n# proc4k.py files\n#\n# Perform the overscan subtraction and remove the relative gain\n# differences for a single R4K image or a list of R4K images.\n# Also works for MDM4K.\n#\n# Steps:\n# 1. determine if input is a file or list of files\n# 2. identify binning, size of overscan\n# 3. remove overscan and trim\n# 4. remove relative gain variations (TBD)\n#\n# 8 Sep 2011: initial version for just bias subtraction\n# 12 Sep 2011: tested, adapted to run on MDM computers, work on MDM4K data\n# 16 Sep 2011: added glob module\n# 8 Feb 2012: fixed error in even/odd column definitions, added more\n# \t\ttracking and debugging information\n#\n#-----------------------------------------------------------------------------\n#from __future__ import division\nimport string as str\nimport os\nfrom sys import argv, exit\nimport numpy as np\nfrom astropy.io import fits as pyfits\nimport glob\nfrom matplotlib import pyplot\n\n# Version and Date\n\nversNum = \"1.1.0\"\nversDate = \"2012-02-08\"\n\n############################\n#### Define various routines\n############################\n\nscriptname = argv[0][str.rfind(argv[0], \"/\") + 1::]\n\n\ndef usage():\n print \"\\nUsage for %s v%s (%s):\" % (scriptname, versNum, versDate)\n print \"\t%s file.fits [or file*.fits or file1.fits file2.fits\\\n ]\" % (scriptname)\n print \"\\nWhere: file.fits, file*.fits, etc. are fits files\\n\"\n\n\ndef parseinput():\n flags = []\n files = []\n # check for any command-line arguments and input files\n for i in range(1, len(argv)):\n if str.find(argv[i], \"-\") == 0:\n flags.append(argv[i].strip(\"-\"))\n else:\n files.append(argv[i])\n # check that the input files exist\n for i in range(1, len(files)):\n if os.path.isfile(files[i]) == 0:\n print \"\\n** ERROR: \" + files[i] + \" does not exist.\"\n exit(1)\n return files, flags\n\n\ndef filt(x, l):\n y = [0] * len(x)\n c = 0.6745\n\n for a in range(0, len(x)):\n y[a] = l[x[a]]\n m = np.median(y)\n print m\n dv = [elm - m for elm in y]\n mad = np.median(np.fabs(dv) / c) # Median-Asbolute-Deviation\n# print m + mad / 2\n for b in range(0, len(y)):\n if y[b] > m + 20 * mad / 2 or y[b] < m - 20 * mad / 2:\n print \"reject: %d \" % b\n y[b] = m\n return y\n\n\ndef ftlgd(x, l, i, d):\n coe = np.polynomial.legendre.legfit(x, l, d)\n return np.polynomial.legendre.legval(i, coe)\n############################\n#### Script starts here ####\n############################\n\nDebug = False\nBiasSingle = 0\nBiasRow = 1\nBiasFit = 2\n#BiasType = BiasRow\nBiasType = BiasSingle\n#BiasType = BiasFit\nGain = False\t# keep as False until gain values are known\nR4K = True\n\n# Gain values for each amplifier [to be computed]\nr4k_gain_q1e = 1.0\nr4k_gain_q1o = 1.0\nr4k_gain_q2e = 1.0\nr4k_gain_q2o = 1.0\nr4k_gain_q3e = 1.0\nr4k_gain_q3o = 1.0\nr4k_gain_q4e = 1.0\nr4k_gain_q4o = 1.0\nmdm4k_gain_q1 = 1.0\nmdm4k_gain_q2 = 1.0\nmdm4k_gain_q3 = 1.0\nmdm4k_gain_q4 = 1.0\n\n# switch to more primitive (slower) code at MDM\nAT_MDM = False\nuser = os.getlogin()\nif str.find(user, 'obs24m') >= 0 or str.find(user, 'obs13m') >= 0:\n AT_MDM = True\n\nfiles = []\nfor input in argv[1:]:\n files = files + glob.glob(input)\n\nif len(files) == 0:\n usage()\n exit(1)\n\nfor file in files:\n if os.path.isfile(file):\n fitsfile = pyfits.open(file)\n naxis1 = fitsfile[0].header['NAXIS1']\n naxis2 = fitsfile[0].header['NAXIS2']\n overscanx = fitsfile[0].header['OVERSCNX']\n overscany = fitsfile[0].header['OVERSCNY'] # should be 0\n ccdxbin = fitsfile[0].header['CCDXBIN']\n ccdybin = fitsfile[0].header['CCDYBIN']\n detector = fitsfile[0].header['DETECTOR']\n telescope = fitsfile[0].header['TELESCOP']\n overscanx /= ccdxbin\n overscany /= ccdybin\n # OSMOS or direct? [useful for knowing if MIS keywords have values]\n OSMOS = True\n if str.find(telescope, 'McGraw') >= 0:\n OSMOS = False # direct image with the 1.3m\n\n #print file, naxis1, naxis2, overscanx, overscany, detector\n print \"Processing %s[%d:%d] OVERSCANX=%d OVERSCANY=%d from %s \\\n obtained at the %s\" \\\n % (file, naxis1, naxis2, overscanx, overscany, detector, telescope)\n\n if overscanx * ccdxbin < 32:\n print \"Error: OVERSCNX=%d less than 32 in %s\" % (overscanx, file)\n exit(1)\n if overscany > 0:\n print \"Error: code not tested with OVERSCNY > 0!\"\n exit(1)\n if str.find(detector, 'R4K') < 0:\n # if not R4K, assume MDM4K\n R4K = False\n # IRAF units: 1:32, 33:556, 557:1080, 1081:1112\n # Python units: 0:31, 32:555, 556:1079, 1080:1111\n c1 = overscanx # 32 first image column counting from *zero*\n c2 = int(0.5 * naxis1) - 1 # 555 last image column on first half\n c3 = c2 + 1 # 556 first image column on second half\n c4 = naxis1 - overscanx - 1 # 1079 last image column\n r1 = overscany # 0 first image row\n r2 = int(0.5 * naxis2) - 1 # 523 last image row on first half\n r3 = r2 + 1 # 524 first image row on second half\n r4 = naxis2 - overscany - 1 # 1047 last image row\n outnaxis1 = c4 - c1 + 1 # 1048 columns in output, trimmed image\n outnaxis2 = r4 - r1 + 1 # 1048 rows in output, trimmed image\n collen = int(0.5 * outnaxis1) # number of rows in an image quadrant\n rowlen = int(0.5 * outnaxis2) # number of rows in an image quadrant\n\n #\n # Assumed layout: (ds9 perspective)\n # # #\n # q2 q4\n # # #\n # q1 q3\n # # #\n # each R4K quadrant has an even 'e' and an odd 'o' amplifier\n #\n\n if Debug:\n print \"Quadrants in IRAF pixels: \"\n print \" q1: [%d : %d, %d : %d] \" % (c1 + 1, c2 + 1, r1 + 1, r2 + 1)\n print \" q2: [%d : %d, %d : %d] \" % (c1 + 1, c2 + 1, r3 + 1, r4 + 1)\n print \" q3: [%d : %d, %d : %d] \" % (c3 + 1, c4 + 1, r1 + 1, r2 + 1)\n print \" q4: [%d : %d, %d : %d] \" % (c3 + 1, c4 + 1, r3 + 1, r4 + 1)\n ## Calculate the bias level for each amplifier\n data = fitsfile[0].data\n # identify the columns to use to calculate the bias level\n # skip the first and last columns of the overscan\n # changed to 'list' for hiltner due to primitive python version\n starti = 4 / ccdxbin\n if AT_MDM:\n if R4K:\n cols_over_q1e = list(np.arange(starti, overscanx - 2, 2))\n cols_over_q1o = list(np.arange(starti + 1, overscanx - 2, 2))\n cols_over_q2e = cols_over_q1e\n cols_over_q2o = cols_over_q1o\n cols_over_q3e = list(np.arange(naxis1 - overscanx + starti, naxis1 - 2, 2))\n cols_over_q3o = list(np.arange(naxis1 - overscanx + starti + 1, naxis1 - 2, 2))\n cols_over_q4e = cols_over_q3e\n cols_over_q4o = cols_over_q3o\n cols_q1e = list(np.arange(c1, c2, 2))\n cols_q1o = list(np.arange(c1 + 1, c2 + 2, 2))\n cols_q2e = cols_q1e\n cols_q2o = cols_q1o\n cols_q3e = list(np.arange(c3, c4, 2))\n cols_q3o = list(np.arange(c3 + 1, c4 + 2, 2))\n cols_q4e = cols_q3e\n cols_q4o = cols_q3o\n else:\n cols_over_q1 = list(np.arange(starti, overscanx - 2, 1))\n cols_over_q2 = cols_over_q1\n cols_over_q3 = list(np.arange(naxis1 - overscanx + starti, naxis1 - 2, 1))\n cols_over_q4 = cols_over_q3\n cols_q1 = list(np.arange(c1, c2 + 1, 1))\n cols_q2 = cols_q1\n cols_q3 = list(np.arange(c3, c4 + 1, 1))\n cols_q4 = cols_q3\n else:\n if R4K:\n # identify the even and odd columns in the overscan\n cols_over_q1e = np.arange(starti, overscanx - starti, 2)\n cols_over_q1o = np.arange(starti + 1, overscanx - starti, 2)\n cols_over_q2e = cols_over_q1e\n cols_over_q2o = cols_over_q1o\n cols_over_q3e = np.arange(naxis1 - overscanx + starti, naxis1 - starti, 2)\n cols_over_q3o = np.arange(naxis1 - overscanx + starti + 1, naxis1 - starti, 2)\n cols_over_q4e = cols_over_q3e\n cols_over_q4o = cols_over_q3o\n # identify the even and odd columns in each quadrant\n cols_q1e = np.arange(c1, c2, 2)\n cols_q2e = cols_q1e\n cols_q1o = np.arange(c1 + 1, c2 + 2, 2)\n cols_q2o = cols_q1o\n cols_q3e = np.arange(c3, c4, 2)\n cols_q4e = cols_q3e\n cols_q3o = np.arange(c3 + 1, c4 + 2, 2)\n cols_q4o = cols_q3o\n else:\n cols_over_q1 = np.arange(starti, overscanx-2, 1)\n cols_over_q2 = cols_over_q1\n cols_over_q3 = np.arange(naxis1-overscanx+starti, naxis1-2, 1)\n cols_over_q4 = cols_over_q3\n cols_q1 = np.arange(c1,c2+1,1)\n cols_q2 = cols_q1\n cols_q3 = np.arange(c3,c4+1,1)\n cols_q4 = cols_q3\n if Debug:\n print \"Overscan columns: \"\n print \"Q1/Q2 overscan even first and last columns:\", cols_over_q1e[0], cols_over_q1e[-1], len(cols_over_q1e)\n print \"Q1/Q2 overscan odd first and last columns:\", cols_over_q1o[0], cols_over_q1o[-1], len(cols_over_q1o)\n print \"Q3/Q4 overscan even first and last columns:\", cols_over_q3e[0], cols_over_q3e[-1], len(cols_over_q3e)\n print \"Q3/Q4 overscan odd first and last columns:\", cols_over_q3o[0], cols_over_q3o[-1], len(cols_over_q3o)\n if Debug:\n print \"Image columns: \"\n print \"Q1/Q2 even first and last columns:\", cols_q1e[0], cols_q1e[-1], len(cols_q1e), r1, r2, len(cols_q1e)\n print \"Q1/Q2 odd first and last columns:\", cols_q1o[0], cols_q1o[-1], len(cols_q1o), r1+rowlen, r2+rowlen, len(cols_q1o)\n print \"Q3/Q4 even first and last columns:\", cols_q3e[0], cols_q3e[-1], len(cols_q3e), r1, r2, len(cols_q3e)\n print \"Q3/Q4 odd first and last columns:\", cols_q3o[0], cols_q3o[-1], len(cols_q3o), r1+rowlen, r2+rowlen, len(cols_q3o)\n # create arrays with the median overscan vs. row for each amplifier\n if R4K:\n bias_q1e = np.zeros(rowlen, dtype=float)\n bias_q1o = np.zeros(rowlen, dtype=float)\n bias_q2e = np.zeros(rowlen, dtype=float)\n bias_q2o = np.zeros(rowlen, dtype=float)\n bias_q3e = np.zeros(rowlen, dtype=float)\n bias_q3o = np.zeros(rowlen, dtype=float)\n bias_q4e = np.zeros(rowlen, dtype=float)\n bias_q4o = np.zeros(rowlen, dtype=float)\n else:\n bias_q1 = np.zeros(rowlen, dtype=float)\n bias_q2 = np.zeros(rowlen, dtype=float)\n bias_q3 = np.zeros(rowlen, dtype=float)\n bias_q4 = np.zeros(rowlen, dtype=float)\n # calculate 1-D bias arrays for each amplifier\n for i in range(r1, r2+1, 1):\n if R4K:\n bias_q1e[i] = np.median(data[i,cols_over_q1e]) \t# data[rows, columns]\n bias_q1o[i] = np.median(data[i,cols_over_q1o])\n bias_q2e[i] = np.median(data[i+rowlen,cols_over_q2e])\n bias_q2o[i] = np.median(data[i+rowlen,cols_over_q2o])\n bias_q3e[i] = np.median(data[i,cols_over_q3e])\n bias_q3o[i] = np.median(data[i,cols_over_q3o])\n bias_q4e[i] = np.median(data[i+rowlen,cols_over_q4e])\n bias_q4o[i] = np.median(data[i+rowlen,cols_over_q4o])\n else: #MDM4K\n bias_q1[i] = np.median(data[i,cols_over_q1]) \t# data[rows, columns]\n bias_q2[i] = np.median(data[i+rowlen,cols_over_q2])\n bias_q3[i] = np.median(data[i,cols_over_q3])\n bias_q4[i] = np.median(data[i+rowlen,cols_over_q4])\n\n ##########################################################################\n # Subtract the bias from the output\n ##########################################################################\n\n if BiasType == BiasSingle:\n OverscanKeyValue = 'BiasSingle'\n suffix = 'b'\n # subtract a single bias value for each amplifier\n if R4K:\n bq1e = np.median(bias_q1e)\n bq1o = np.median(bias_q1o)\n bq2e = np.median(bias_q2e)\n bq2o = np.median(bias_q2o)\n bq3e = np.median(bias_q3e)\n bq3o = np.median(bias_q3o)\n bq4e = np.median(bias_q4e)\n bq4o = np.median(bias_q4o)\n if AT_MDM:\n for r in range(r1,r2+1):\n for c in cols_q1e:\n data[r,c] -= bq1e\n for c in cols_q1o:\n data[r,c] -= bq1o\n for c in cols_q2e:\n data[r+rowlen,c] -= bq2e\n for c in cols_q2o:\n data[r+rowlen,c] -= bq2o\n for c in cols_q3e:\n data[r,c] -= bq3e\n for c in cols_q3o:\n data[r,c] -= bq3o\n for c in cols_q4e:\n data[r+rowlen,c] -= bq4e\n for c in cols_q4o:\n data[r+rowlen,c] -= bq4o\n else:\n data[r1:r2+1,cols_q1e] -= bq1e\n data[r1:r2+1,cols_q1o] -= bq1o\n data[r3:r4+1,cols_q2e] -= bq2e\n data[r3:r4+1,cols_q2o] -= bq2o\n data[r1:r2+1,cols_q3e] -= bq3e\n data[r1:r2+1,cols_q3o] -= bq3o\n data[r3:r4+1,cols_q4e] -= bq4e\n data[r3:r4+1,cols_q4o] -= bq4o\n else:\n bq1 = np.median(bias_q1)\n bq2 = np.median(bias_q2)\n bq3 = np.median(bias_q3)\n bq4 = np.median(bias_q4)\n if AT_MDM:\n for r in range(r1,r2+1):\n for c in cols_q1:\n data[r,c] -= bq1\n for c in cols_q2:\n data[r+rowlen,c] -= bq2\n for c in cols_q3:\n data[r,c] -= bq3\n for c in cols_q4:\n data[r+rowlen,c] -= bq4\n else:\n data[r1:r2+1,cols_q1] -= bq1\n data[r3:r4+1,cols_q2] -= bq2\n data[r1:r2+1,cols_q3] -= bq3\n data[r3:r4+1,cols_q4] -= bq4\n elif BiasType == BiasRow:\n # not implemented on Hiltner, for MDM4K, etc.\n print \"Warning: This mode has not been fully tested\"\n OverscanKeyValue = 'BiasRow'\n # subtract a bias value for each row of each amplifier\n #print r1, r2, len(bias_q1e)\n suffix = 'br'\n for i in range(r1, r2 + 1, 1):\n data[i,cols_q1e] -= bias_q1e[i]\n data[i,cols_q1o] -= bias_q1o[i]\n data[i+rowlen,cols_q2e] -= bias_q2e[i]\n data[i+rowlen,cols_q2o] -= bias_q2o[i]\n data[i,cols_q3e] -= bias_q3e[i]\n data[i,cols_q3o] -= bias_q3o[i]\n data[i+rowlen,cols_q4e] -= bias_q4e[i]\n data[i+rowlen,cols_q4o] -= bias_q4o[i]\n elif BiasType == BiasFit:\n OverscanKeyValue = 'BiasFit'\n # print \"Error: Have not implemented a fit to the bias yet. Please use BiasSingle\"\n suffix = 'bf'\n\n xl = range(r1, r2 + 1, 1)\n d = 4\n\n f_q1e = filt(xl, bias_q1e)\n f_q1o = filt(xl, bias_q1o)\n f_q2e = filt(xl, bias_q2e)\n f_q2o = filt(xl, bias_q2o)\n f_q3e = filt(xl, bias_q3e)\n f_q3o = filt(xl, bias_q3o)\n f_q4e = filt(xl, bias_q4e)\n f_q4o = filt(xl, bias_q4o)\n for i in xl:\n data[i,cols_q1e] -= ftlgd(xl, f_q1e, i, d)\n data[i,cols_q1o] -= ftlgd(xl, f_q1o, i, d)\n data[i+rowlen,cols_q2e] -= ftlgd(xl, f_q2e, i, d)\n data[i+rowlen,cols_q2o] -= ftlgd(xl, f_q2o, i, d)\n data[i,cols_q3e] -= ftlgd(xl, f_q3e, i, d)\n data[i,cols_q3o] -= ftlgd(xl, f_q3o, i, d)\n data[i+rowlen,cols_q4e] -= ftlgd(xl, f_q4e, i, d)\n data[i+rowlen,cols_q4o] -= ftlgd(xl, f_q4o, i, d)\n # exit(1)\n# pyplot.plot(xl, [a for a in xl], color='blue')\n# pyplot.plot(xl, [ftlgd(xl, xl, a, d) for a in xl], color='red')\n# print bias_q1e\n# print xl\n\n pyplot.plot(xl, f_q1e, color='blue')\n pyplot.plot(xl, [ftlgd(xl, f_q1e, a, d) for a in xl], color='red')\n #pyplot.step(bedge[:-1], [a + 1e-20 for a in histbg], color='black')\n #pyplot.step(bedge[:-1], [a + 1e-20 for a in histreal], color='red')\n ## pyplot.bar(bedge[:-1], fakehistn_l[0], edgecolor='green', width=0.4, log=True, fill=False)\n #pyplot.yscale('log')\n #pyplot.ylim(ymin=1e-1)\n pyplot.show()\n else:\n print \"Error: Bias subtraction type not parsed correctly\"\n exit(1)\n\n ##########################################################################\n # Apply the gain correction [not yet implemented]\n ##########################################################################\n\n if Gain:\n if R4K:\n if AT_MDM:\n for r in range(r1,r2+1):\n for c in cols_q1e:\n data[r,c] -= r4k_gain_q1e\n for c in cols_q1o:\n data[r,c] -= r4k_gain_q1o\n for c in cols_q2e:\n data[r+rowlen,c] -= r4k_gain_q2e\n for c in cols_q2o:\n data[r+rowlen,c] -= r4k_gain_q2o\n for c in cols_q2o:\n data[r,c] -= r4k_gain_q3e\n for c in cols_q2o:\n data[r,c] -= r4k_gain_q3o\n for c in cols_q2o:\n data[r+rowlen,c] -= r4k_gain_q4e\n for c in cols_q2o:\n data[r+rowlen,c] -= r4k_gain_q4o\n else:\n data[r1:r2,cols_q1e] /= r4k_gain_q1e\n data[r1:r2,cols_q1o] /= r4k_gain_q1o\n data[r3:r4,cols_q2e] /= r4k_gain_q2e\n data[r3:r4,cols_q2o] /= r4k_gain_q2o\n data[r1:r2,cols_q3e] /= r4k_gain_q3e\n data[r1:r2,cols_q3o] /= r4k_gain_q3o\n data[r3:r4,cols_q4e] /= r4k_gain_q4e\n data[r3:r4,cols_q4o] /= r4k_gain_q4o\n else:\n if AT_MDM:\n for r in range(r1,r2+1):\n for c in cols_q1:\n data[r,c] /= mdm4k_gain_q1\n for c in cols_q2:\n data[r+rowlen,c] /= mdm4k_gain_q2\n for c in cols_q2:\n data[r,c] /= mdm4k_gain_q3\n for c in cols_q2:\n data[r+rowlen,c] /= mdm4k_gain_q4\n else:\n data[r1:r2,cols_q1] /= mdm4k_gain_q1\n data[r3:r4,cols_q2] /= mdm4k_gain_q2\n data[r1:r2,cols_q3] /= mdm4k_gain_q3\n data[r3:r4,cols_q4] /= mdm4k_gain_q4\n\n\n ##########################################################################\n # Write the output file\n ##########################################################################\n\n fitsfile[0].data = data[r1:r4+1,c1:c4+1]\n OverscanKeyComment = 'Overscan by proc4k.py v%s (%s)' % (versNum, versDate)\n GainKeyValue = 'Relative'\n GainKeyComment = 'Gain removed by proc4k.py'\n #BiasKeyValue = '%s' % (versNum)\n #BiasKeyComment = 'Gain removed by proc4k.py'\n\n if OSMOS: \t# prevent a pyfits error if these are not assigned values\n try:\n fitsfile[0].header['MISFILT'] = -1\n fitsfile[0].header['MISFLTID'] = -1\n except:\n if Debug:\n print \"Note: MISFILT and MISFLTID keywords not found\"\n\n fitsfile[0].header.update('BIASPROC', OverscanKeyValue, OverscanKeyComment)\n #fitsfile[0].header.update('BIASVER', BiasKeyValue, BiasKeyComment)\n #if R4K:\n #fitsfile[0].header.update('BIASQ1E', bq1e, 'Bias subtracted from Q1E')\n #fitsfile[0].header.update('BIASQ1O', bq1o, 'Bias subtracted from Q1O')\n #fitsfile[0].header.update('BIASQ2E', bq2e, 'Bias subtracted from Q2E')\n #fitsfile[0].header.update('BIASQ2O', bq2o, 'Bias subtracted from Q2O')\n #fitsfile[0].header.update('BIASQ3E', bq3e, 'Bias subtracted from Q3E')\n #fitsfile[0].header.update('BIASQ3O', bq3o, 'Bias subtracted from Q3O')\n #fitsfile[0].header.update('BIASQ4E', bq4e, 'Bias subtracted from Q4E')\n #fitsfile[0].header.update('BIASQ4O', bq4o, 'Bias subtracted from Q4O')\n #else:\n #fitsfile[0].header.update('BIASQ1', bq1, 'Bias subtracted from Q1')\n #fitsfile[0].header.update('BIASQ2', bq2, 'Bias subtracted from Q2')\n #fitsfile[0].header.update('BIASQ3', bq3, 'Bias subtracted from Q3')\n #fitsfile[0].header.update('BIASQ4', bq4, 'Bias subtracted from Q4')\n\n if Gain:\n if R4K:\n fitsfile[0].header.update('GAINPROC', GainKeyValue, GainKeyComment)\n fitsfile[0].header.update('GainQ1', r4k_gain_q1, 'Gain for Q1')\n fitsfile[0].header.update('GainQ2', r4k_gain_q2, 'Gain for Q2')\n fitsfile[0].header.update('GainQ3', r4k_gain_q3, 'Gain for Q3')\n fitsfile[0].header.update('GainQ4', r4k_gain_q4, 'Gain for Q4')\n \n fitsfile[0].header['SECPIX'] = 0.273*ccdxbin\n outfile = file[:str.find(file, '.fits')]+suffix+'.fits'\n if os.path.isfile(outfile):\n print \" Warning: Overwriting pre-existing file %s\" % (outfile)\n os.remove(outfile)\n fitsfile.writeto(outfile)\n fitsfile.close()\n\n# print \"%s Done\" % (argv[0])\nprint \"%s Done\" % (scriptname)\n\n"},"license":{"kind":"string","value":"bsd-3-clause"},"hash":{"kind":"number","value":-724741215411220500,"string":"-724,741,215,411,220,500"},"line_mean":{"kind":"number","value":42.0642722117,"string":"42.064272"},"line_max":{"kind":"number","value":132,"string":"132"},"alpha_frac":{"kind":"number","value":0.4916816645,"string":"0.491682"},"autogenerated":{"kind":"bool","value":false,"string":"false"}}},{"rowIdx":109581,"cells":{"repo_name":{"kind":"string","value":"msullivan/advent-of-code"},"path":{"kind":"string","value":"2020/16b.py"},"copies":{"kind":"string","value":"1"},"size":{"kind":"string","value":"1812"},"content":{"kind":"string","value":"#!/usr/bin/env python3\n\nimport sys\nimport re\n\ndef extract(s):\n return [int(x) for x in re.findall(r'\\d+', s)]\n\n\ndef valid(n, r):\n (a,b),(c,d) = r\n return (a <= n <= b) or (c <= n <= d)\n\n\ndef main(args):\n data = [s.strip() for s in sys.stdin]\n\n fields = {}\n for i,x in enumerate(data):\n if x == \"\": break\n thing = x.split(\":\")[0]\n a,b,c,d = extract(x)\n fields[thing] = ((a,b),(c,d))\n\n tickets = []\n for x in data[i:]:\n if ',' not in x: continue\n z = [int(y) for y in x.split(',')]\n tickets.append(z)\n\n my_ticket = tickets[0]\n tickets = tickets[1:]\n\n cnt = 0\n fucked = []\n good = []\n for t in tickets:\n bad = [\n v\n for v in t\n if all(not valid(v, r) for r in fields.values())\n ]\n fucked += bad\n if not bad:\n good.append(t)\n\n part1 = sum(fucked)\n\n options = [set(fields.keys()) for _ in range(len(good[0]))]\n for t in good:\n for i, f in enumerate(t):\n opts = set(name for name, range in fields.items()\n if valid(f, range))\n options[i] &= opts\n\n picks = {}\n while True:\n for i, thing in enumerate(options):\n if len(thing) == 1:\n break\n else:\n assert False\n pick = list(thing)[0]\n picks[pick] = i\n for x in options:\n x.discard(pick)\n\n if not any(x for x in options):\n break\n\n print(picks)\n\n departuresf = [x for x in fields if x.startswith('departure')]\n depts = [my_ticket[picks[f]] for f in departuresf]\n print(depts)\n prod = 1\n for x in depts: prod *= x\n\n print(part1)\n print(prod)\n\n\n # print(data)\n\nif __name__ == '__main__':\n sys.exit(main(sys.argv))\n"},"license":{"kind":"string","value":"mit"},"hash":{"kind":"number","value":-2483929906540208000,"string":"-2,483,929,906,540,208,000"},"line_mean":{"kind":"number","value":20.0697674419,"string":"20.069767"},"line_max":{"kind":"number","value":66,"string":"66"},"alpha_frac":{"kind":"number","value":0.4806843267,"string":"0.480684"},"autogenerated":{"kind":"bool","value":false,"string":"false"}}},{"rowIdx":109582,"cells":{"repo_name":{"kind":"string","value":"SeanEstey/Bravo"},"path":{"kind":"string","value":"app/lib/dt.py"},"copies":{"kind":"string","value":"1"},"size":{"kind":"string","value":"3699"},"content":{"kind":"string","value":"'''app.dt'''\nimport pytz\nfrom datetime import datetime, date, time, timedelta\nlocal_tz = pytz.timezone('America/Edmonton')\n\n#-------------------------------------------------------------------------------\ndef json_serial(obj):\n if isinstance(obj, datetime):\n serial = obj.isoformat()\n return serial\n raise TypeError (\"Type not serializable\")\n\n#-------------------------------------------------------------------------------\ndef to_utc(obj=None, dt=None, d=None, t=None, to_str=False):\n if obj:\n return convert_obj(obj, to_tz=pytz.utc, to_str=to_str)\n else:\n return to_timezone(pytz.utc, dt=dt, d=d, t=t, to_str=to_str)\n\n#-------------------------------------------------------------------------------\ndef to_local(obj=None, dt=None, d=None, t=None, to_str=False):\n if obj:\n return convert_obj(obj, to_tz=local_tz, to_str=to_str)\n else:\n return to_timezone(local_tz, dt=dt, d=d, t=t, to_str=to_str)\n\n#-------------------------------------------------------------------------------\ndef to_timezone(tz, dt=None, d=None, t=None, to_str=False):\n if dt:\n dt = dt.replace(tzinfo=local_tz) if not dt.tzinfo else dt\n dt = dt.astimezone(tz)\n return dt.strftime(to_str) if to_str else dt\n elif d and t:\n dt_ = datetime.combine(d,t)\n dt_ = local_tz.localize(dt_).astimezone(tz)\n return dt_.strftime(to_str) if to_str else dt_\n elif d and not t:\n dt_ = datetime.combine(d, time(0,0)).replace(tzinfo=local_tz).astimezone(tz)\n return dt_.strftime(to_str) if to_str else dt_\n\n#-------------------------------------------------------------------------------\ndef d_to_dt(date_):\n return datetime.combine(date_, time())\n\n#-------------------------------------------------------------------------------\ndef convert_obj(obj, to_tz=None, to_str=False):\n '''Returns a datetime with given timezone. Will convert timezones for\n non-naive datetimes\n @obj: any data structure (dict, list, etc)\n '''\n\n if isinstance(obj, dict):\n for k, v in obj.iteritems():\n obj[k] = convert_obj(v, to_str=to_str, to_tz=to_tz)\n return obj\n elif isinstance(obj, list):\n for idx, item in enumerate(obj):\n obj[idx] = convert_obj(item, to_str=to_str, to_tz=to_tz)\n return obj\n elif isinstance(obj, datetime):\n tz = to_tz if to_tz else local_tz\n obj = obj.replace(tzinfo=tz) if not obj.tzinfo else obj.astimezone(tz)\n return obj.strftime(to_str) if to_str else obj\n else:\n return obj\n\n#-------------------------------------------------------------------------------\ndef ddmmyyyy_to_dt(ddmmyyyy):\n '''@date_str: etapestry native dd/mm/yyyy\n '''\n parts = ddmmyyyy.split('/')\n return datetime(int(parts[2]), int(parts[1]), int(parts[0]))\n\n#-------------------------------------------------------------------------------\ndef ddmmyyyy_to_date(ddmmyyyy):\n '''@date_str: etapestry native dd/mm/yyyy\n '''\n parts = ddmmyyyy.split('/')\n return date(int(parts[2]), int(parts[1]), int(parts[0]))\n\n#-------------------------------------------------------------------------------\ndef ddmmyyyy_to_local_dt(ddmmyyyy):\n '''@date_str: etapestry native dd/mm/yyyy\n '''\n parts = ddmmyyyy.split('/')\n return to_local(dt=datetime(int(parts[2]), int(parts[1]), int(parts[0])))\n\n#-------------------------------------------------------------------------------\ndef dt_to_ddmmyyyy(dt):\n return dt.strftime('%d/%m/%Y')\n\n#-------------------------------------------------------------------------------\ndef ddmmyyyy_to_mmddyyyy(ddmmyyyy):\n p = ddmmyyyy.split('/')\n return '%s/%s/%s' % (p[1],p[0],p[2])\n"},"license":{"kind":"string","value":"gpl-2.0"},"hash":{"kind":"number","value":-5052450258988737000,"string":"-5,052,450,258,988,737,000"},"line_mean":{"kind":"number","value":37.9368421053,"string":"37.936842"},"line_max":{"kind":"number","value":84,"string":"84"},"alpha_frac":{"kind":"number","value":0.4687753447,"string":"0.468775"},"autogenerated":{"kind":"bool","value":false,"string":"false"}}},{"rowIdx":109583,"cells":{"repo_name":{"kind":"string","value":"Zahajamaan/Fudulbank"},"path":{"kind":"string","value":"exams/migrations/0002_add_categories_and_exams.py"},"copies":{"kind":"string","value":"1"},"size":{"kind":"string","value":"3996"},"content":{"kind":"string","value":"# -*- coding: utf-8 -*-\n# Generated by Django 1.11.2 on 2017-06-13 17:17\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations\n\ndef add_categories(apps, schema_editor):\n Category = apps.get_model('exams', 'Category')\n Exam = apps.get_model('exams', 'Exam')\n Subject = apps.get_model('exams', 'Subject')\n\n ksau_hs = Category.objects.create(name=\"KSAU-HS\", slug='ksau-hs')\n com = Category.objects.create(name=\"College of Medicine\",\n slug='com', parent_category=ksau_hs)\n basic = Category.objects.create(name=\"Basic years\", slug='basic',\n parent_category=com)\n clinical = Category.objects.create(name=\"Clinical years\",\n slug='clinical',\n parent_category=com)\n basic_subjects = ['Anatomy', 'Biochemistry and Immunology',\n 'Clinical', 'EBM', 'Histology', 'Pathology',\n 'Physiology']\n\n exam_map = {'Foundation': basic_subjects,\n 'MSK': basic_subjects,\n 'Respiratory': basic_subjects,\n 'Hematology': basic_subjects,\n 'Cardiology': basic_subjects,\n 'Neurology': basic_subjects,\n 'Endocrinology': basic_subjects,\n 'GIT': basic_subjects,\n 'Urology': basic_subjects,\n 'Oncology': basic_subjects,\n 'Medicine I': ['Emergency medicine', 'Endocrinology',\n 'Gastroenterology & hepatology',\n 'Infectious diseases',\n 'Nephrology & urinary disorders',\n 'Respiratory medicine',\n 'Rheumatology',\n 'General medicine/Others'],\n 'Pediatric': ['Developmental & behavior clinical pediatrics',\n 'Endocrinology', 'Genetics and Dysmorphology',\n 'Hematology/oncology',\n 'Neonatology', 'Neurology',\n 'Pediatric cardiology',\n 'Pediatric infectious disease',\n 'General Pediatrics/Others'],\n 'Surgery I': ['Appendix', 'Breast', 'Gallbladder',\n 'Genitururinary track', 'Hernias', 'Lower GI',\n 'Pancreas', 'Pediatrics', 'Thyroid',\n 'Trauma & emergency ', 'Upper GI',\n 'Vascualr', 'Others'],\n 'Family Medicine': ['Community medicine & communication',\n 'General medicine ',\n 'Pediatrics', 'Psychiatry',\n 'Women\\'s health'],\n 'Medicine II': ['Cardiovascular Disease', 'Hematology',\n 'Medical Oncology', 'General Medicine/Others'],\n 'Surgery II': ['Anesthesia', 'Orthopedics',\n 'Plastic Surgery', 'Others'],\n 'Obstetrics & Gynecology': ['Obstetrics', 'Gynecology'],\n 'Special Senses and Mental Health': ['Neurology', 'Ophthalmology', 'Otolaryngology', 'Psychiatry']\n }\n\n for exam in exam_map:\n subjects = exam_map[exam]\n if subjects == basic_subjects:\n category = basic\n else:\n category = clinical\n\n exam = Exam.objects.create(name=exam,\n category=category)\n\n for subject_name in subjects:\n Subject.objects.create(name=subject_name, exam=exam)\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('exams', '0001_initial'),\n ]\n\n operations = [\n migrations.RunPython(add_categories)\n ]\n \n"},"license":{"kind":"string","value":"agpl-3.0"},"hash":{"kind":"number","value":-5443299401549826000,"string":"-5,443,299,401,549,826,000"},"line_mean":{"kind":"number","value":44.9310344828,"string":"44.931034"},"line_max":{"kind":"number","value":114,"string":"114"},"alpha_frac":{"kind":"number","value":0.4772272272,"string":"0.477227"},"autogenerated":{"kind":"bool","value":false,"string":"false"}}},{"rowIdx":109584,"cells":{"repo_name":{"kind":"string","value":"openstack/networking-odl"},"path":{"kind":"string","value":"networking_odl/common/config.py"},"copies":{"kind":"string","value":"1"},"size":{"kind":"string","value":"3889"},"content":{"kind":"string","value":"# Copyright (c) 2014 Red Hat Inc.\n# All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nfrom oslo_config import cfg\n\nfrom networking_odl._i18n import _\n\n\nodl_opts = [\n cfg.StrOpt('url',\n help=_(\"HTTP URL of OpenDaylight REST interface.\")),\n cfg.StrOpt('username',\n help=_(\"HTTP username for authentication.\")),\n cfg.StrOpt('password', secret=True,\n help=_(\"HTTP password for authentication.\")),\n cfg.IntOpt('timeout', default=10,\n help=_(\"HTTP timeout in seconds.\")),\n cfg.IntOpt('session_timeout', default=30,\n help=_(\"Tomcat session timeout in minutes.\")),\n cfg.FloatOpt('sync_timeout', default=10,\n help=_(\"Sync thread timeout in seconds or fraction.\")),\n cfg.IntOpt('retry_count', default=5,\n help=_(\"Number of times to retry a row before failing.\")),\n cfg.IntOpt('maintenance_interval', default=300,\n help=_(\"Journal maintenance operations interval in seconds.\")),\n cfg.IntOpt('completed_rows_retention', default=0,\n help=_(\"Time to keep completed rows (in seconds).\"\n \"For performance reasons it's not recommended to \"\n \"change this from the default value (0) which \"\n \"indicates completed rows aren't kept.\"\n \"This value will be checked every maintenance_interval \"\n \"by the cleanup thread. To keep completed rows \"\n \"indefinitely, set the value to -1\")),\n cfg.BoolOpt('enable_lightweight_testing',\n default=False,\n help=_('Test without real ODL.')),\n cfg.StrOpt('port_binding_controller',\n default='pseudo-agentdb-binding',\n help=_('Name of the controller to be used for port binding.')),\n cfg.IntOpt('processing_timeout', default='100',\n help=_(\"Time in seconds to wait before a \"\n \"processing row is marked back to pending.\")),\n cfg.StrOpt('odl_hostconf_uri',\n help=_(\"Path for ODL host configuration REST interface\"),\n default=\"/restconf/operational/neutron:neutron/hostconfigs\"),\n cfg.IntOpt('restconf_poll_interval', default=30,\n help=_(\"Poll interval in seconds for getting ODL hostconfig\")),\n cfg.BoolOpt('enable_websocket_pseudo_agentdb', default=False,\n help=_('Enable websocket for pseudo-agent-port-binding.')),\n cfg.IntOpt('odl_features_retry_interval', default=5,\n help=_(\"Wait this many seconds before retrying the odl features\"\n \" fetch\")),\n cfg.ListOpt('odl_features',\n help='A list of features supported by ODL.'),\n cfg.StrOpt('odl_features_json',\n help='Features supported by ODL, in the json format returned'\n 'by ODL. Note: This config option takes precedence over'\n 'odl_features.'),\n cfg.BoolOpt('enable_dhcp_service', default=False,\n help=_('Enables the networking-odl driver to supply special'\n ' neutron ports of \"dhcp\" type to OpenDaylight'\n ' Controller for its use in providing DHCP Service.')),\n]\n\ncfg.CONF.register_opts(odl_opts, \"ml2_odl\")\n\n\ndef list_opts():\n return [('ml2_odl', odl_opts)]\n"},"license":{"kind":"string","value":"apache-2.0"},"hash":{"kind":"number","value":-5828423179853321000,"string":"-5,828,423,179,853,321,000"},"line_mean":{"kind":"number","value":47.012345679,"string":"47.012346"},"line_max":{"kind":"number","value":79,"string":"79"},"alpha_frac":{"kind":"number","value":0.6109539727,"string":"0.610954"},"autogenerated":{"kind":"bool","value":false,"string":"false"}}},{"rowIdx":109585,"cells":{"repo_name":{"kind":"string","value":"tbttfox/TwistyTools"},"path":{"kind":"string","value":"ttLib/Draw/DrawSide.py"},"copies":{"kind":"string","value":"1"},"size":{"kind":"string","value":"1401"},"content":{"kind":"string","value":"#!/usr/bin/python\nfrom DrawBase import DrawBase\nfrom OpenGL.GL import *\n\nclass DrawSide(DrawBase):\n def __init__(self,*args,**kwargs):\n super(DrawSide,self).__init__(*args,**kwargs)\n self.inrad = None\n self.outrad = None\n\n @property\n def glID(self):\n if self._glID == None:\n pn = (self.thing.posneg * 2) - 1\n arcPoints = self.thing.arc.subdivide(angleHint=10)\n arcNormals = [(self.thing.arc.c - ap)*pn for ap in arcPoints]\n self._glID = glGenLists(1)\n glNewList(self._glID,GL_COMPILE)\n glBegin(GL_TRIANGLE_STRIP)\n for i in range(len(arcPoints)):\n glNormal3f(*arcNormals[i])\n glVertex3f(*(arcPoints[i] * self.inrad))\n glVertex3f(*(arcPoints[i] * self.outrad))\n glEnd()\n glEndList()\n return self._glID\n \n\n def draw(self,ds):\n if ds.inrad != self.inrad or ds.outrad != self.outrad:\n #update the object any time the radii change\n self.inrad = inrad\n self.outrad = outrad\n if self._glID:\n glDeleteLists(self._glID,1)\n self._glID = None\n scale = ds.scale\n material = ds.material\n glPushMatrix()\n glScalef(self.scale, self.scale, self.scale)\n glCallList(self.glID)\n glPopMatrix()\n\n\n"},"license":{"kind":"string","value":"gpl-3.0"},"hash":{"kind":"number","value":-6870017515603258000,"string":"-6,870,017,515,603,258,000"},"line_mean":{"kind":"number","value":30.8409090909,"string":"30.840909"},"line_max":{"kind":"number","value":73,"string":"73"},"alpha_frac":{"kind":"number","value":0.5403283369,"string":"0.540328"},"autogenerated":{"kind":"bool","value":false,"string":"false"}}},{"rowIdx":109586,"cells":{"repo_name":{"kind":"string","value":"abhijithanilkumar/ns-3-AppStore"},"path":{"kind":"string","value":"src/backend/views.py"},"copies":{"kind":"string","value":"1"},"size":{"kind":"string","value":"24061"},"content":{"kind":"string","value":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.shortcuts import get_object_or_404, render\nfrom django.contrib.auth.decorators import login_required\nfrom .forms import CreateAppForm, EditAppForm, ReleaseForm, \\\n InstallationForm, MaintenanceForm, EditDetailsForm, \\\n DownloadForm, DevelopmentForm, ScreenshotForm\nfrom profiles.models import Profile\nfrom django.apps import apps\nfrom util.img_util import scale_img\nfrom django.views.generic.edit import DeleteView\nfrom django.urls import reverse\nfrom django.contrib.auth.decorators import permission_required\n\n\n@login_required\n@permission_required('apps.add_app', login_url='/error/denied/')\ndef createApp(request):\n profile = Profile.objects.get(user=request.user)\n if request.user.is_staff or profile.moderated:\n if request.method == 'GET':\n form = CreateAppForm()\n elif request.method == 'POST':\n form = CreateAppForm(request.POST)\n if form.is_valid():\n new_app = form.save()\n new_app.save()\n context = {\n 'message': \"New App Page created Successfully!\",\n 'go_back_to_url': \"/app/\" + new_app.name,\n 'go_back_to_title': \"App Page\",\n }\n return render(request, 'message.html', context)\n else:\n context = {\n 'message': \"You are not authorized to view this page!\",\n 'go_back_to_url': \"/\",\n 'go_back_to_title': \"Home Page\",\n }\n return render(request, 'message.html', context)\n return render(request, 'create.html', {'form': form})\n\n\n@login_required\ndef editApp(request, num):\n App = apps.get_model('apps', 'App')\n try:\n edit_app = App.objects.get(id=num)\n except BaseException:\n context = {\n 'message': \"Requested App does not Exist!\",\n 'go_back_to_url': \"/app/\" + edi_app.name,\n 'go_back_to_title': \"App Page\",\n }\n return render(request, 'message.html', context)\n editors = edit_app.editors.all()\n if request.user in editors or request.user.is_staff:\n if request.method == 'GET':\n form = EditAppForm(instance=edit_app)\n elif request.method == 'POST':\n form = EditAppForm(request.POST, request.FILES, instance=edit_app)\n if form.is_valid():\n edited_app = form.save(commit=False)\n cleaned_data = form.clean()\n tags = cleaned_data['tags']\n for tag in tags:\n edited_app.tags.add(tag)\n if 'icon' in request.FILES:\n icon_file = request.FILES['icon']\n edited_app.icon = scale_img(\n icon_file, icon_file.name, 128, 'both')\n edited_app.save()\n context = {\n 'message': \"App Page edited Successfully!\",\n 'go_back_to_url': \"/app/\" + edit_app.name,\n 'go_back_to_title': \"App Page\",\n }\n return render(request, 'message.html', context)\n else:\n context = {\n 'message': \"You are not authorized to view this page!\",\n 'go_back_to_url': \"/app/\" + edit_app.name,\n 'go_back_to_title': \"App Page\",\n }\n return render(request, 'message.html', context)\n return render(request, 'edit.html', {'form': form})\n\n\n@login_required\ndef createRelease(request, num):\n App = apps.get_model('apps', 'App')\n try:\n release_app = App.objects.get(id=num)\n except BaseException:\n context = {\n 'message': \"Requested App does not Exist!\",\n 'go_back_to_url': \"/app/\" + release_app.name,\n 'go_back_to_title': \"App Page\",\n }\n return render(request, 'message.html', context)\n editors = release_app.editors.all()\n if request.user in editors or request.user.is_staff:\n if request.method == 'GET':\n form = ReleaseForm()\n elif request.method == 'POST':\n form = ReleaseForm(request.POST, request.FILES)\n if form.is_valid():\n new_release = form.save(commit=False)\n new_release.app = release_app\n new_release.save()\n context = {\n 'message': \"Release added Successfully!\",\n 'go_back_to_url': \"/app/\" + release_app.name,\n 'go_back_to_title': \"App Page\",\n }\n return render(request, 'message.html', context)\n else:\n context = {\n 'message': \"You are not authorized to view this page!\",\n 'go_back_to_url': \"/app/\" + release_app.name,\n 'go_back_to_title': \"App Page\",\n }\n return render(request, 'message.html', context)\n return render(request, 'create_release.html', {'form': form})\n\n\n@login_required\ndef editRelease(request, num):\n Release = apps.get_model('apps', 'Release')\n App = apps.get_model('apps', 'App')\n try:\n edit_release = Release.objects.get(id=num)\n except BaseException:\n context = {\n 'message': \"Requested App does not Exist!\",\n 'go_back_to_url': \"/app/\" + edit_release.app.name,\n 'go_back_to_title': \"App Page\",\n }\n return render(request, 'message.html', context)\n editors = edit_release.app.editors.all()\n if request.user in editors or request.user.is_staff:\n if request.method == 'GET':\n form = ReleaseForm(instance=edit_release)\n elif request.method == 'POST':\n form = ReleaseForm(\n request.POST,\n request.FILES,\n instance=edit_release)\n if form.is_valid():\n edited_release = form.save()\n edited_release.save()\n context = {\n 'message': \"Release edited Successfully!\",\n 'go_back_to_url': \"/app/\" + edit_release.app.name,\n 'go_back_to_title': \"App Page\",\n }\n return render(request, 'message.html', context)\n else:\n context = {\n 'message': \"You are not authorized to view this page!\",\n 'go_back_to_url': \"/app/\" + edit_release.app.name,\n 'go_back_to_title': \"App Page\",\n }\n return render(request, 'message.html', context)\n return render(request, 'edit_release.html', {'form': form})\n\n\n@login_required\ndef modifyInstallation(request, num):\n existing = False\n App = apps.get_model('apps', 'App')\n Installation = apps.get_model('apps', 'Installation')\n try:\n app = App.objects.get(id=num)\n except BaseException:\n context = {\n 'message': \"Requested App does not Exist!\",\n 'go_back_to_url': \"/app/\" + app.name,\n 'go_back_to_title': \"App Page\",\n }\n return render(request, 'message.html', context)\n if Installation.objects.filter(app=app).exists():\n existing = True\n edit_installation = Installation.objects.get(app=app)\n if request.user.is_staff or request.user in app.editors.all():\n if request.method == 'GET':\n if existing:\n form = InstallationForm(instance=edit_installation)\n else:\n form = InstallationForm()\n elif request.method == 'POST':\n if existing:\n form = InstallationForm(\n request.POST, instance=edit_installation)\n else:\n form = InstallationForm(request.POST)\n if form.is_valid():\n if existing:\n edited_installation = form.save()\n edited_installation.save()\n else:\n installation = form.save(commit=False)\n installation.app = app\n installation.save()\n context = {\n 'message': \"Installation modified Successfully!\",\n 'go_back_to_url': \"/app/\" + app.name,\n 'go_back_to_title': \"App Page\",\n }\n return render(request, 'message.html', context)\n else:\n context = {\n 'message': \"You are not authorized to view this page!\",\n 'go_back_to_url': \"/app/\" + app.name,\n 'go_back_to_title': \"App Page\",\n }\n return render(request, 'message.html', context)\n return render(request, 'installation.html', {'form': form})\n\n\n@login_required\ndef modifyMaintenance(request, num):\n existing = False\n App = apps.get_model('apps', 'App')\n Maintenance = apps.get_model('apps', 'Maintenance')\n try:\n app = App.objects.get(id=num)\n except BaseException:\n context = {\n 'message': \"Requested App does not Exist!\",\n 'go_back_to_url': \"/app/\" + app.name,\n 'go_back_to_title': \"App Page\",\n }\n return render(request, 'message.html', context)\n if Maintenance.objects.filter(app=app).exists():\n existing = True\n edit_maintenance = Maintenance.objects.get(app=app)\n if request.user.is_staff or request.user in app.editors.all():\n if request.method == 'GET':\n if existing:\n form = MaintenanceForm(instance=edit_maintenance)\n else:\n form = MaintenanceForm()\n elif request.method == 'POST':\n if existing:\n form = MaintenanceForm(request.POST, instance=edit_maintenance)\n else:\n form = MaintenanceForm(request.POST)\n if form.is_valid():\n if existing:\n edited_maintenance = form.save()\n edited_maintenance.save()\n else:\n maintenance = form.save(commit=False)\n maintenance.app = app\n maintenance.save()\n context = {\n 'message': \"Maintenance notes modified Successfully!\",\n 'go_back_to_url': \"/app/\" + app.name,\n 'go_back_to_title': \"App Page\",\n }\n return render(request, 'message.html', context)\n else:\n context = {\n 'message': \"You are not authorized to view this page!\",\n 'go_back_to_url': \"/app/\" + app.name,\n 'go_back_to_title': \"App Page\",\n }\n return render(request, 'message.html', context)\n return render(request, 'maintenance.html', {'form': form})\n\n\n@login_required\ndef editDetails(request, num):\n App = apps.get_model('apps', 'App')\n try:\n edit_app = App.objects.get(id=num)\n except BaseException:\n context = {\n 'message': \"Requested App does not Exist!\",\n 'go_back_to_url': \"/app/\" + edit_app.name,\n 'go_back_to_title': \"App Page\",\n }\n return render(request, 'message.html', context)\n editors = edit_app.editors.all()\n if request.user in editors or request.user.is_staff:\n if request.method == 'GET':\n form = EditDetailsForm(instance=edit_app)\n elif request.method == 'POST':\n form = EditDetailsForm(request.POST, instance=edit_app)\n if form.is_valid():\n edited_app = form.save()\n edited_app.save()\n context = {\n 'message': \"Edited Details Successfully!\",\n 'go_back_to_url': \"/app/\" + edit_app.name,\n 'go_back_to_title': \"App Page\",\n }\n return render(request, 'message.html', context)\n else:\n context = {\n 'message': \"You are not authorized to view this page!\",\n 'go_back_to_url': \"/app/\" + edit_app.name,\n 'go_back_to_title': \"App Page\",\n }\n return render(request, 'message.html', context)\n return render(request, 'edit_details.html', {'form': form})\n\n\n@login_required\ndef modifyDownload(request, num):\n existing = False\n App = apps.get_model('apps', 'App')\n Download = apps.get_model('apps', 'Download')\n Release = apps.get_model('apps', 'Release')\n try:\n app = App.objects.get(id=num)\n except BaseException:\n context = {\n 'message': \"Requested App does not Exist!\",\n 'go_back_to_url': \"/app/\" + app.name,\n 'go_back_to_title': \"App Page\",\n }\n return render(request, 'message.html', context)\n if Download.objects.filter(app=app).exists():\n existing = True\n edit_download = Download.objects.get(app=app)\n if request.user.is_staff or request.user in app.editors.all():\n if request.method == 'GET':\n if existing:\n form = DownloadForm(instance=edit_download, current_app=app)\n else:\n form = DownloadForm(current_app=app)\n elif request.method == 'POST':\n if existing:\n form = DownloadForm(\n request.POST,\n instance=edit_download,\n current_app=app)\n else:\n form = DownloadForm(request.POST, current_app=app)\n if form.is_valid():\n if existing:\n instance = form.save()\n release = None\n releases = Release.objects.filter(app=instance.app)\n if releases:\n release = releases.latest('date')\n choice = instance.download_option\n link = \"https://ns-apps.washington.edu/\" + \\\n instance.app.name + \"/#cy-app-instructions-tab\"\n if choice == 'I':\n instance.download_link = link\n elif choice == 'D':\n instance.download_link = release.url\n if not release:\n instance.download_link = link\n elif choice == 'U':\n instance.download_link = instance.external_url\n if not instance.external_url:\n instance.download_link = link\n if not instance.default_release:\n instance.default_release = release\n instance.save()\n else:\n download = form.save(commit=False)\n download.app = app\n download.save()\n context = {\n 'message': \"Download Details modified Successfully!\",\n 'go_back_to_url': \"/app/\" + app.name,\n 'go_back_to_title': \"App Page\",\n }\n return render(request, 'message.html', context)\n else:\n context = {\n 'message': \"You are not authorized to view this page!\",\n 'go_back_to_url': \"/app/\" + app.name,\n 'go_back_to_title': \"App Page\",\n }\n return render(request, 'message.html', context)\n return render(request, 'download.html', {'form': form})\n\n\n@login_required\ndef modifyDevelopment(request, num):\n existing = False\n App = apps.get_model('apps', 'App')\n Development = apps.get_model('apps', 'Development')\n try:\n app = App.objects.get(id=num)\n except BaseException:\n context = {\n 'message': \"Requested App does not Exist!\",\n 'go_back_to_url': \"/app/\" + app.name,\n 'go_back_to_title': \"App Page\",\n }\n return render(request, 'message.html', context)\n if Development.objects.filter(app=app).exists():\n existing = True\n edit_development = Development.objects.get(app=app)\n if request.user.is_staff or request.user in app.editors.all():\n if request.method == 'GET':\n if existing:\n form = DevelopmentForm(instance=edit_development)\n else:\n form = DevelopmentForm()\n elif request.method == 'POST':\n if existing:\n form = DevelopmentForm(\n request.POST, request.FILES, instance=edit_development)\n else:\n form = DevelopmentForm(request.POST, request.FILES)\n if form.is_valid():\n if existing:\n edited_development = form.save()\n edited_development.save()\n else:\n development = form.save(commit=False)\n development.app = app\n development.save()\n context = {\n 'message': \"Development Version modified Successfully!\",\n 'go_back_to_url': \"/app/\" + app.name,\n 'go_back_to_title': \"App Page\",\n }\n return render(request, 'message.html', context)\n else:\n context = {\n 'message': \"You are not authorized to view this page!\",\n 'go_back_to_url': \"/app/\" + app.name,\n 'go_back_to_title': \"App Page\",\n }\n return render(request, 'message.html', context)\n return render(request, 'development.html', {'form': form})\n\n\ndef deleteReleasePrompt(request, num):\n Release = apps.get_model('apps', 'Release')\n release = Release.objects.get(id=num)\n app = release.app\n go_back_to_url = \"/app/\" + app.name\n url = \"/backend/releasedelconf/\" + str(release.id)\n if request.user.is_staff or request.user in app.editors.all():\n context = {\n 'url': url,\n 'name': app.name,\n 'go_back_to_url': go_back_to_url,\n 'go_back_to_title': \"App Page\",\n }\n return render(request, 'prompt.html', context)\n else:\n message = \"You are not authorized to view this page!\"\n context = {\n 'message': message,\n 'go_back_to_url': go_back_to_url,\n 'go_back_to_title': \"App Page\",\n }\n return render(request, 'message.html', context)\n\n\ndef deleteRelease(request, num):\n Release = apps.get_model('apps', 'Release')\n release = Release.objects.get(id=num)\n app = release.app\n if request.user.is_staff or request.user in app.editors.all():\n release.delete()\n message = \"Release Deleted Successfully!\"\n else:\n message = \"You are not authorized to view this page!\"\n go_back_to_url = \"/app/\" + app.name\n context = {\n 'message': message,\n 'go_back_to_url': go_back_to_url,\n 'go_back_to_title': \"App Page\",\n }\n return render(request, 'message.html', context)\n\n\n\"\"\"\nclass _ScreenshotEditConfig:\n max_img_size_b = 2 * 1024 * 1024\n thumbnail_height_px = 150\n\ndef _upload_screenshot(app, request):\n screenshot_f = request.FILES.get('file')\n if not screenshot_f:\n raise ValueError('no file submitted')\n if screenshot_f.size > _ScreenshotEditConfig.max_img_size_b:\n raise ValueError('image file is %d bytes but can be at most %d bytes' % (screenshot_f.size, _ScreenshotEditConfig.max_img_size_b))\n thumbnail_f = scale_img(screenshot_f, screenshot_f.name, _ScreenshotEditConfig.thumbnail_height_px, 'h')\n screenshot = Screenshot.objects.create(app = app)\n screenshot.screenshot.save(screenshot_f.name, screenshot_f)\n screenshot.thumbnail.save(thumbnail_f.name, thumbnail_f)\n screenshot.save()\n\ndef _delete_screenshot(app, request):\n screenshot_id = request.POST.get('screenshot_id')\n if not screenshot_id:\n raise ValueError('no screenshot_id specified')\n\n try:\n screenshot_id = int(screenshot_id)\n screenshot = Screenshot.objects.get(id = screenshot_id)\n except ValueError, Screenshot.DoesNotExist:\n raise ValueError('invalid screenshot_id')\n screenshot.delete()\n\n_ScreenshotActions = {\n 'upload_screenshot': _upload_screenshot,\n 'delete_screenshot': _delete_screenshot,\n}\n\"\"\"\n\n\n@login_required\ndef screenshots(request, num):\n App = apps.get_model('apps', 'App')\n try:\n app = App.objects.get(id=num)\n except BaseException:\n context = {\n 'message': \"Requested App does not Exist!\",\n 'go_back_to_url': \"/app/\" + app.name,\n 'go_back_to_title': \"App Page\",\n }\n return render(request, 'message.html', context)\n editors = app.editors.all()\n if request.user in editors or request.user.is_staff:\n if request.method == 'GET':\n form = ScreenshotForm()\n elif request.method == 'POST':\n form = ScreenshotForm(request.POST, request.FILES)\n if form.is_valid():\n new_screenshot = form.save(commit=False)\n new_screenshot.app = app\n new_screenshot.save()\n context = {\n 'message': \"Screenshot added Successfully!\",\n 'go_back_to_url': \"/app/\" + app.name,\n 'go_back_to_title': \"App Page\",\n }\n return render(request, 'message.html', context)\n else:\n context = {\n 'message': \"You are not authorized to view this page!\",\n 'go_back_to_url': \"/app/\" + app.name,\n 'go_back_to_title': \"App Page\",\n }\n return render(request, 'message.html', context)\n return render(request, 'create_screenshot.html', {'form': form})\n\n\ndef deleteScreenshotPrompt(request, num):\n Screenshot = apps.get_model('apps', 'Screenshot')\n screenshot = Screenshot.objects.get(id=num)\n app = screenshot.app\n go_back_to_url = \"/app/\" + app.name\n url = \"/backend/screenshotdelconf/\" + str(screenshot.id)\n if request.user.is_staff or request.user in app.editors.all():\n context = {\n 'url': url,\n 'name': app.name,\n 'go_back_to_url': go_back_to_url,\n 'go_back_to_title': \"App Page\",\n }\n return render(request, 'prompt.html', context)\n else:\n message = \"You are not authorized to view this page!\"\n context = {\n 'message': message,\n 'go_back_to_url': go_back_to_url,\n 'go_back_to_title': \"App Page\",\n }\n return render(request, 'message.html', context)\n\n\ndef deleteScreenshot(request, num):\n Screenshot = apps.get_model('apps', 'Screenshot')\n screenshot = Screenshot.objects.get(id=num)\n app = screenshot.app\n if request.user.is_staff or request.user in app.editors.all():\n screenshot.delete()\n message = \"Screenshot Deleted Successfully!\"\n else:\n message = \"You are not authorized to view this page!\"\n go_back_to_url = \"/app/\" + app.name\n context = {\n 'message': message,\n 'go_back_to_url': go_back_to_url,\n 'go_back_to_title': \"App Page\",\n }\n return render(request, 'message.html', context)\n\n\n\"\"\"\n@login_required\ndef screenshots(request, num):\n App = apps.get_model('apps', 'App')\n Screenshot = apps.get_model('apps', 'Screenshot')\n app = get_object_or_404(App, id=num)\n if not request.user.is_staff or request.user not in app.editors.all():\n return HttpResponseForbidden()\n if request.method == 'POST':\n print \"hey\"\n action = request.POST.get('action')\n if not action:\n return HttpResponseBadRequest('no action specified')\n if not action in _ScreenshotActions:\n return HttpResponseBadRequest('action \"%s\" invalid--must be: %s' % (action, ', '.join(_ScreenshotActions)))\n try:\n result = _ScreenshotActions[action](app, request)\n except ValueError as e:\n return HttpResponseBadRequest(str(e))\n if request.is_ajax():\n return json_response(result)\n screenshots = Screenshot.objects.filter(app=app)\n print \"entered\"\n context = {\n 'screenshots': screenshots,\n 'max_file_img_size_b': _ScreenshotEditConfig.max_img_size_b,\n 'thumbnail_height_px': _ScreenshotEditConfig.thumbnail_height_px,\n }\n return render(request, 'screenshots.html', context)\n\"\"\"\n"},"license":{"kind":"string","value":"mit"},"hash":{"kind":"number","value":7390528707503364000,"string":"7,390,528,707,503,364,000"},"line_mean":{"kind":"number","value":37.4361022364,"string":"37.436102"},"line_max":{"kind":"number","value":138,"string":"138"},"alpha_frac":{"kind":"number","value":0.5469847471,"string":"0.546985"},"autogenerated":{"kind":"bool","value":false,"string":"false"}}},{"rowIdx":109587,"cells":{"repo_name":{"kind":"string","value":"otov4its/django-walletone"},"path":{"kind":"string","value":"tests/test_views.py"},"copies":{"kind":"string","value":"1"},"size":{"kind":"string","value":"3008"},"content":{"kind":"string","value":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.core.urlresolvers import reverse\nfrom django.test import TestCase\n\nfrom walletone.models import WalletOneSuccessPayment\nfrom walletone.signals import payment_received\n\n\nclass PaymentConfirmViewTestCase(TestCase):\n def setUp(self):\n self.data = {\n 'WMI_MERCHANT_ID': '165531803223',\n 'WMI_PAYMENT_AMOUNT': '1.00',\n 'WMI_COMMISSION_AMOUNT': '0.00',\n 'WMI_CURRENCY_ID': '643',\n 'WMI_PAYMENT_NO': '1',\n 'WMI_ORDER_ID': '336077917075',\n 'WMI_DESCRIPTION': 'Мой тестовый заказ',\n 'WMI_EXPIRED_DATE': '2016-05-21 11:34:34',\n 'WMI_CREATE_DATE': '2016-04-21 11:34:34',\n 'WMI_UPDATE_DATE': '2016-04-21 11:34:34',\n 'WMI_ORDER_STATE': 'Created',\n 'WMI_SIGNATURE': 'Q0vBjbeAaoFKTVcjUfkKLw==',\n 'EXTRA_FIELD': 'value',\n # Not documented fields\n # howerer its may present in form data\n 'WMI_AUTO_ACCEPT': '1',\n 'WMI_NOTIFY_COUNT': '0',\n }\n\n self.confirm_url = reverse('w1-payment-confirm')\n\n def test_view_returns_400_if_get_request(self):\n response = self.client.get(self.confirm_url)\n self.assertEqual(response.status_code, 400)\n\n def test_view_returns_200_if_post_request(self):\n response = self.client.post(self.confirm_url, self.data)\n self.assertContains(response, 'WMI_RESULT=OK')\n\n def test_view_saves_payment_to_db(self):\n self.client.post(self.confirm_url, self.data)\n try:\n WalletOneSuccessPayment.objects.get(WMI_ORDER_ID='336077917075')\n except WalletOneSuccessPayment.DoesNotExist:\n self.fail(\"payment DoesNotExist\")\n except WalletOneSuccessPayment.MultipleObjectsReturned:\n self.fail(\"payment MultipleObjectsReturned\")\n\n def test_view_sends_a_signal(self):\n def receiver(**kwargs):\n receiver.signal_was_sent = True\n payment = kwargs['payment']\n self.assertEqual(\n payment,\n WalletOneSuccessPayment.objects.get(\n WMI_ORDER_ID=payment.WMI_ORDER_ID\n )\n )\n receiver.signal_was_sent = False\n payment_received.connect(receiver, sender=WalletOneSuccessPayment)\n self.client.post(self.confirm_url, self.data)\n self.assertTrue(receiver.signal_was_sent)\n\n def test_view_was_called_twice_with_same_wmi_order_id(self):\n response1 = self.client.post(self.confirm_url, self.data)\n self.assertEqual(response1.content, b'WMI_RESULT=OK')\n response2 = self.client.post(self.confirm_url, self.data)\n self.assertContains(response2, 'not valid')\n\n def test_view_with_bad_signature(self):\n self.data['WMI_SIGNATURE'] = 'bad'\n response = self.client.post(self.confirm_url, self.data)\n self.assertContains(response, 'not valid')\n"},"license":{"kind":"string","value":"mit"},"hash":{"kind":"number","value":5704211759134297000,"string":"5,704,211,759,134,297,000"},"line_mean":{"kind":"number","value":38.3684210526,"string":"38.368421"},"line_max":{"kind":"number","value":76,"string":"76"},"alpha_frac":{"kind":"number","value":0.6193181818,"string":"0.619318"},"autogenerated":{"kind":"bool","value":false,"string":"false"}}},{"rowIdx":109588,"cells":{"repo_name":{"kind":"string","value":"studio1247/gertrude"},"path":{"kind":"string","value":"generation/preparation_repas.py"},"copies":{"kind":"string","value":"1"},"size":{"kind":"string","value":"5142"},"content":{"kind":"string","value":"# -*- coding: utf-8 -*-\n\n# This file is part of Gertrude.\n#\n# Gertrude is free software; you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation; either version 3 of the License, or\n# (at your option) any later version.\n#\n# Gertrude is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with Gertrude; if not, see .\n\nfrom ooffice import *\n\n\nclass PreparationRepasModifications(object):\n title = \"Préparation des repas\"\n template = \"Preparation repas.ods\"\n\n def __init__(self, debut):\n self.multi = False\n self.default_output = \"Preparation repas %s.ods\" % str(debut)\n self.debut = debut\n self.email = None\n self.site = None\n\n def execute(self, filename, dom):\n if filename != 'content.xml':\n return None\n \n date_fin = self.debut + datetime.timedelta(4)\n spreadsheet = dom.getElementsByTagName('office:spreadsheet').item(0)\n table = spreadsheet.getElementsByTagName(\"table:table\")[0]\n lignes = table.getElementsByTagName(\"table:table-row\")\n\n # Les titres des pages\n ReplaceFields(lignes, GetCrecheFields(database.creche) + [\n ('date-debut', self.debut),\n ('date-fin', date_fin)])\n\n if 1:\n # Le format utilisé par Les petits potes (séparation adaptation / halte-garderie / mi-temps / plein-temps\n # Changé en format utilisé par les petits lutins (sans la séparation)\n table.setAttribute(\"table:name\", '%d %s %d - %d %s %d' % (self.debut.day, months[self.debut.month - 1], date_fin.year, date_fin.day, months[date_fin.month - 1], date_fin.year))\n\n # Les jours\n ligne = lignes.item(1)\n cellules = ligne.getElementsByTagName(\"table:table-cell\")\n for jour in range(5):\n date = self.debut + datetime.timedelta(jour)\n cellule = cellules.item(2 + jour)\n ReplaceFields([cellule], [('date', date)])\n\n # Les lignes\n inscrits = list(database.creche.select_inscrits(self.debut, date_fin))\n inscrits.sort(key=lambda x: GetPrenomNom(x))\n self.printPresences(table, inscrits, 3)\n\n # La ligne des totaux\n ligne_total = lignes.item(5)\n cellules = ligne_total.getElementsByTagName(\"table:table-cell\")\n for i in range(cellules.length):\n cellule = cellules.item(i)\n if cellule.hasAttribute('table:formula'):\n formule = cellule.getAttribute('table:formula')\n formule = formule.replace('5', str(3 + len(inscrits)))\n cellule.setAttribute('table:formula', formule)\n\n #print dom.toprettyxml()\n return None\n\n def printPresences(self, dom, inscrits, ligne_depart):\n template = dom.getElementsByTagName(\"table:table-row\")[ligne_depart]\n for inscrit in inscrits:\n line = template.cloneNode(1)\n cells = line.getElementsByTagName(\"table:table-cell\")\n ReplaceFields(cells, GetInscritFields(inscrit))\n for i, cell in enumerate(cells):\n day = (i - 3) // 5\n date = self.debut + datetime.timedelta(day)\n age = GetAge(inscrit.naissance, date)\n fields = [\n \"tranche_4_6\",\n \"tranche_6_12\",\n \"tranche_12_18\",\n \"tranche_18_24\",\n \"tranche_24_\"]\n field = fields[min(age // 6, len(fields) - 1)]\n journee = inscrit.GetJournee(date)\n present = journee and IsPresentDuringTranche(journee, database.creche.ouverture * 12, 12.5 * 12)\n food_needs = {}\n for food_need in database.creche.food_needs:\n quantity = getattr(food_need, field) if present else \"\"\n food_needs[food_need.label[0:2].lower()] = quantity\n food_needs[food_need.label[0:2].lower() + \"p\"] = quantity if inscrit.type_repas == REPAS_PUREE else \"\"\n food_needs[food_need.label[0:2].lower() + \"m\"] = quantity if inscrit.type_repas == REPAS_MORCEAUX else \"\"\n ReplaceFields(cell, list(food_needs.items()))\n dom.insertBefore(line, template)\n dom.removeChild(template)\n\n\nif __name__ == '__main__':\n import random\n from document_dialog import StartLibreOffice\n database.init(\"../databases/lutins-miniac.db\")\n database.load()\n modifications = PreparationRepasModifications(datetime.date(2017, 11, 6))\n filename = \"./test-%f.odt\" % random.random()\n errors = GenerateOODocument(modifications, filename=filename, gauge=None)\n StartLibreOffice(filename)\n"},"license":{"kind":"string","value":"gpl-3.0"},"hash":{"kind":"number","value":4578275887724565500,"string":"4,578,275,887,724,565,500"},"line_mean":{"kind":"number","value":43.6608695652,"string":"43.66087"},"line_max":{"kind":"number","value":188,"string":"188"},"alpha_frac":{"kind":"number","value":0.5983255452,"string":"0.598326"},"autogenerated":{"kind":"bool","value":false,"string":"false"}}},{"rowIdx":109589,"cells":{"repo_name":{"kind":"string","value":"cryvate/project-euler"},"path":{"kind":"string","value":"project_euler/solutions/problem_11.py"},"copies":{"kind":"string","value":"1"},"size":{"kind":"string","value":"3374"},"content":{"kind":"string","value":"from typing import List\n\nGRID = [[8, 2, 22, 97, 38, 15, 0, 40, 0, 75, 4, 5, 7, 78, 52, 12, 50, 77, 91,\n 8],\n [49, 49, 99, 40, 17, 81, 18, 57, 60, 87, 17, 40, 98, 43, 69, 48, 4, 56,\n 62, 0],\n [81, 49, 31, 73, 55, 79, 14, 29, 93, 71, 40, 67, 53, 88, 30, 3, 49, 13,\n 36, 65],\n [52, 70, 95, 23, 4, 60, 11, 42, 69, 24, 68, 56, 1, 32, 56, 71, 37, 2,\n 36, 91],\n [22, 31, 16, 71, 51, 67, 63, 89, 41, 92, 36, 54, 22, 40, 40, 28, 66,\n 33, 13, 80],\n [24, 47, 32, 60, 99, 3, 45, 2, 44, 75, 33, 53, 78, 36, 84, 20, 35, 17,\n 12, 50],\n [32, 98, 81, 28, 64, 23, 67, 10, 26, 38, 40, 67, 59, 54, 70, 66, 18,\n 38, 64, 70],\n [67, 26, 20, 68, 2, 62, 12, 20, 95, 63, 94, 39, 63, 8, 40, 91, 66, 49,\n 94, 21],\n [24, 55, 58, 5, 66, 73, 99, 26, 97, 17, 78, 78, 96, 83, 14, 88, 34, 89,\n 63, 72],\n [21, 36, 23, 9, 75, 0, 76, 44, 20, 45, 35, 14, 0, 61, 33, 97, 34, 31,\n 33, 95],\n [78, 17, 53, 28, 22, 75, 31, 67, 15, 94, 3, 80, 4, 62, 16, 14, 9, 53,\n 56, 92],\n [16, 39, 5, 42, 96, 35, 31, 47, 55, 58, 88, 24, 0, 17, 54, 24, 36, 29,\n 85, 57],\n [86, 56, 0, 48, 35, 71, 89, 7, 5, 44, 44, 37, 44, 60, 21, 58, 51, 54,\n 17, 58],\n [19, 80, 81, 68, 5, 94, 47, 69, 28, 73, 92, 13, 86, 52, 17, 77, 4, 89,\n 55, 40],\n [4, 52, 8, 83, 97, 35, 99, 16, 7, 97, 57, 32, 16, 26, 26, 79, 33, 27,\n 98, 66],\n [88, 36, 68, 87, 57, 62, 20, 72, 3, 46, 33, 67, 46, 55, 12, 32, 63, 93,\n 53, 69],\n [4, 42, 16, 73, 38, 25, 39, 11, 24, 94, 72, 18, 8, 46, 29, 32, 40, 62,\n 76, 36],\n [20, 69, 36, 41, 72, 30, 23, 88, 34, 62, 99, 69, 82, 67, 59, 85, 74, 4,\n 36, 16],\n [20, 73, 35, 29, 78, 31, 90, 1, 74, 31, 49, 71, 48, 86, 81, 16, 23, 57,\n 5, 54],\n [1, 70, 54, 71, 83, 51, 54, 69, 16, 92, 33, 48, 61, 43, 52, 1, 89, 19,\n 67, 48]]\n\n\ndef solve(grid: List[List[float]] = GRID, length: int = 4) -> int:\n greatest_product = 1\n\n rows = len(GRID[0])\n columns = len(GRID)\n\n for i in range(rows):\n for j in range(columns):\n if i <= rows - 4: # vertical\n product = grid[i][j] * grid[i + 1][j] * grid[i + 2][j] * \\\n grid[i + 3][j]\n greatest_product = max(greatest_product, product)\n\n if j <= columns - 4: # horizontal\n product = grid[i][j] * grid[i][j + 1] * grid[i][j + 2] * \\\n grid[i][j + 3]\n greatest_product = max(greatest_product, product)\n\n if i <= rows - 4 and j <= columns - 4: # diagonal\n product_up_down = grid[i][j] * grid[i + 1][j + 1] * \\\n grid[i + 2][j + 2] * grid[i + 3][j + 3]\n product_down_up = grid[i + 3][j] * grid[i + 2][j + 1] * \\\n grid[i + 1][j + 2] * grid[i][j + 3]\n greatest_product = max(greatest_product, product_up_down,\n product_down_up)\n\n if i <= rows - 4 and j <= columns - 4: # diagonal\n product = grid[i][j] * grid[i + 1][j + 1] * \\\n grid[i + 2][j + 2] * grid[i + 3][j + 3]\n greatest_product = max(greatest_product, product)\n\n return greatest_product\n"},"license":{"kind":"string","value":"mit"},"hash":{"kind":"number","value":6193119133360649000,"string":"6,193,119,133,360,649,000"},"line_mean":{"kind":"number","value":43.3947368421,"string":"43.394737"},"line_max":{"kind":"number","value":79,"string":"79"},"alpha_frac":{"kind":"number","value":0.4096028453,"string":"0.409603"},"autogenerated":{"kind":"bool","value":false,"string":"false"}}},{"rowIdx":109590,"cells":{"repo_name":{"kind":"string","value":"DayGitH/Python-Challenges"},"path":{"kind":"string","value":"DailyProgrammer/DP20161017A.py"},"copies":{"kind":"string","value":"1"},"size":{"kind":"string","value":"2725"},"content":{"kind":"string","value":"\"\"\"\n[2016-10-17] Challenge #288 [Easy] Detecting Alliteration\n\nhttps://www.reddit.com/r/dailyprogrammer/comments/57zcbm/20161017_challenge_288_easy_detecting_alliteration/\n\n# Description\nAlliteration is defined as \"the occurrence of the same letter or sound at the beginning of adjacent or closely\nconnected words.\" It's a stylistic literary device identified by the repeated sound of the first consonant in a series\nof multiple words, or the repetition of the same sounds or of the same kinds of sounds at the beginning of words or in\nstressed syllables of a phrase. The first known use of the word to refer to a literary device occurred around 1624. A\nsimple example is \"Peter Piper Picked a Peck of Pickled Peppers\".\n## Note on Stop Words\nThe following are some of the simplest English \"stop words\", words too common and uninformative to be of much use. In\nthe case of Alliteration, they can come in between the words of interest (as in the Peter Piper example):\n I \n a \n about \n an \n and\n are \n as \n at \n be \n by \n com \n for \n from\n how\n in \n is \n it \n of \n on \n or \n that\n the \n this\n to \n was \n what \n when\n where\n who \n will \n with\n the\n# Sample Input\nYou'll be given an integer on a line, telling you how many lines follow. Then on the subsequent ines, you'll be given a\nsentence, one per line. Example:\n 3\n Peter Piper Picked a Peck of Pickled Peppers\n Bugs Bunny likes to dance the slow and simple shuffle\n You'll never put a better bit of butter on your knife\n# Sample Output\nYour program should emit the words from each sentence that form the group of alliteration. Example:\n Peter Piper Picked Peck Pickled Peppers\n Bugs Bunny slow simple shuffle\n better bit butter\n# Challenge Input\n 8\n The daily diary of the American dream\n For the sky and the sea, and the sea and the sky\n Three grey geese in a green field grazing, Grey were the geese and green was the grazing.\n But a better butter makes a batter better.\n \"His soul swooned slowly as he heard the snow falling faintly through the universe and faintly falling, like the\ndescent of their last end, upon all the living and the dead.\"\n Whisper words of wisdom, let it be.\n They paved paradise and put up a parking lot.\n So what we gonna have, dessert or disaster?\n# Challenge Output\n daily diary\n sky sea\n grey geese green grazing\n better butter batter better\n soul swooned slowly\n whisper words wisdom\n paved paradise\n dessert disaster\n**EDITED** to add the word \"and\" to the stop word list. My bad, a mistake to omit.\n\"\"\"\n\n\ndef main():\n pass\n\n\nif __name__ == \"__main__\":\n main()\n"},"license":{"kind":"string","value":"mit"},"hash":{"kind":"number","value":6072102307829940000,"string":"6,072,102,307,829,940,000"},"line_mean":{"kind":"number","value":29.9659090909,"string":"29.965909"},"line_max":{"kind":"number","value":119,"string":"119"},"alpha_frac":{"kind":"number","value":0.7056880734,"string":"0.705688"},"autogenerated":{"kind":"bool","value":false,"string":"false"}}},{"rowIdx":109591,"cells":{"repo_name":{"kind":"string","value":"ringsd/projecteuler"},"path":{"kind":"string","value":"python/019.py"},"copies":{"kind":"string","value":"1"},"size":{"kind":"string","value":"1276"},"content":{"kind":"string","value":"\n#easy method\nmonth_days = {\n 1:31,\n 2:28,\n 3:31,\n 4:30,\n 5:31,\n 6:30,\n 7:31,\n 8:31,\n 9:30,\n 10:31,\n 11:30,\n 12:31,\n}\n\nweek = [0]*8\n\ndef counter_week( ):\n t = 1\n for i in range (1901, 2001):\n print \"Year-%d,\"%(i),\n for j in range(1, 13):\n days = month_days[j]\n if j == 2 and ((i % 4 == 0 and i % 100 != 0) or i % 400 == 0):\n days = days + 1\n# print \"%d-%d,\"%(j, days),\n for k in range (1, days):\n if k == 1 and i != 1900:\n week[t] = week[t] + 1\n t = t + 1\n if t == 8:\n t = 1\n print ''\n\ncounter_week() \nfor i in range( 1, 8 ):\n print \"%d-%d, \"%(i, week[i]),\nprint '' \n \n \n#Zeller’s Formula\n# \ndef day_of_week( year, month, day ):\n if month == 1 or month == 2:\n year = year - 1\n month = month + 12\n y = year % 100\n c = year / 100\n week = (y + y/4 + c/4 - 2*c + 26*(month+1)/10 + day - 1) % 7\n if week < 0:\n week = (week + 7)%7\n return week\n\nsunday = 0 \nfor i in range(1901, 2001):\n for j in range (1, 13):\n if day_of_week( i, j, 1 ) == 0:\n sunday = sunday + 1\n\nprint sunday"},"license":{"kind":"string","value":"mit"},"hash":{"kind":"number","value":12650953943867154,"string":"12,650,953,943,867,154"},"line_mean":{"kind":"number","value":19.564516129,"string":"19.564516"},"line_max":{"kind":"number","value":74,"string":"74"},"alpha_frac":{"kind":"number","value":0.3940345369,"string":"0.394035"},"autogenerated":{"kind":"bool","value":false,"string":"false"}}},{"rowIdx":109592,"cells":{"repo_name":{"kind":"string","value":"willmurnane/store"},"path":{"kind":"string","value":"cba/models.py"},"copies":{"kind":"string","value":"1"},"size":{"kind":"string","value":"13816"},"content":{"kind":"string","value":"import base64\nimport datetime\nimport decimal\nimport logging\nimport re\n\nfrom core import make_request\n\nlogger = logging.getLogger(__name__)\nlogger.setLevel(logging.DEBUG)\nlogger.addHandler(logging.StreamHandler())\n\nclass Item(object):\n def __init__(self, **kwargs):\n required_properties = 'id title price'.split()\n for prop in required_properties:\n if prop not in kwargs:\n raise ValueError('{0} property must be provided'.format(prop))\n for k, v in kwargs.iteritems():\n setattr(self, k, v)\n\n def as_dict(self):\n logging.debug('as_dict')\n properties = []\n d = {}\n for attr in dir(self.__class__):\n ca = getattr(self.__class__, attr)\n if hasattr(ca, 'fget') and hasattr(ca, '__doc__'):\n properties.append(attr)\n for prop in properties:\n ca = getattr(self.__class__, prop)\n key = ca.__doc__\n value = ca.fget(self)\n if key and value:\n d[key] = value\n\n return d\n\n @property\n def id(self):\n '''MerchantItemId'''\n return self._id\n @id.setter\n def id(self, value):\n value = unicode(value)\n if not re.match(r'[a-zA-Z0-9]+', value):\n raise ValueError('id must be alphanumeric only')\n self._id = value\n\n _sku = None\n @property\n def sku(self):\n '''SKU'''\n return self._sku\n @sku.setter\n def sku(self, value):\n self._sku = unicode(value)\n\n @property\n def title(self):\n '''Title'''\n return self._title\n @title.setter\n def title(self, value):\n self._title = unicode(value)\n\n _desc = None\n @property\n def description(self):\n '''Description'''\n return self._desc\n @description.setter\n def description(self, value):\n self._desc = unicode(value)\n\n @property\n def price(self):\n '''UnitPrice.Amount'''\n return self._price\n @price.setter\n def price(self, value):\n value = decimal.Decimal(unicode(value)).quantize(\n decimal.Decimal('0.01'), rounding=decimal.ROUND_DOWN\n )\n if value < 0:\n raise ValueError('price must be non-negative')\n self._price = value\n\n _currency = u'USD'\n @property \n def currency(self):\n '''UnitPrice.CurrencyCode'''\n return self._currency\n @currency.setter\n def currency(self, value):\n value = unicode(value).upper()\n if len(value) != 3:\n raise ValueError('Invalid currency code')\n self._currency = value\n\n _quantity = 1\n @property\n def quantity(self):\n '''Quantity'''\n return self._quantity\n @quantity.setter\n def quantity(self, value):\n value = int(value)\n if value < 1:\n raise ValueError('quantity must be at least 1')\n self._quantity = value\n\n _url = None\n @property\n def url(self):\n '''URL'''\n return self._url\n @url.setter\n def url(self, value):\n self._url = unicode(value)\n\n _category = None\n @property\n def category(self):\n '''Category'''\n return self._category\n @category.setter\n def category(self, value):\n self._category = unicode(value)\n\n _fulfillment = None\n @property\n def fulfillment(self):\n '''FulfillmentNetwork'''\n return self._fulfillment\n @fulfillment.setter\n def fulfillment(self, value):\n value = unicode(value)\n valid = u'MERCHANT AMAZON_NA'.split\n if value not in valid:\n raise ValueError('fulfillment must be one of {0}'.format(valid))\n self._fulfillment = value\n\n _custom_data = ''\n @property\n def custom_data(self):\n return base64.b64decode(self._custom_data)\n @property\n def custom_data_base64(self):\n '''ItemCustomData'''\n return self._custom_data\n @custom_data.setter\n def custom_data(self, value):\n value = base64.b64encode(unicode(value))\n if len(value) > 1024:\n raise ValueError('custom_data too long')\n self._custom_data = value\n\n _product_type = None\n @property\n def product_type(self):\n '''ProductType'''\n return self._product_type\n @product_type.setter\n def product_type(self, value):\n self._product_type = unicode(value).upper()\n\n _weight = None\n @property\n def weight(self):\n '''PhysicalProductAttributes.Weight.Value'''\n return self._weight\n @weight.setter\n def weight(self, value):\n value = decimal.Decimal(unicode(value)).quantize(\n decimal.Decimal('0.0001'), rounding=decimal.ROUND_UP\n )\n if value < 0:\n raise ValueError('weight must be non-negative')\n\n self._weight = weight\n\n _weight_unit = None\n @property\n def weight_unit(self):\n '''PhysicalProductAttributes.Weight.Unit'''\n if self.weight is not None and self._weight_unit is None:\n raise ValueError('weight and weight_unit are mutually inclusive')\n return self._weight_unit\n @weight_unit.setter\n def weight_unit(self, value):\n self._weight_unit = unicode(value)\n\n _condition = None\n @property\n def condition(self):\n '''PhysicalProductAttributes.Condition'''\n return self._condition\n @condition.setter\n def condition(self, value):\n valid = u'Any Club Collectible New Refurbished New'.split()\n value = unicode(value)\n if value not in valid:\n raise ValueError('condition must be one of {0}'.format(valid))\n self._condition = value\n\n _shipping_level = u'Standard'\n @property\n def shipping_level(self):\n '''PhysicalProductAttributes.DeliveryMethod.ServiceLevel'''\n return self._shipping_level\n @shipping_level.setter\n def shipping_level(self, value):\n valid = u'Standard OneDay TwoDay Expedited'.split()\n value = unicode(value)\n if value not in valid:\n raise ValueError('shipping_level must be one of {0}'.format(valid))\n self._shipping_level = value\n\n _shipping_level_label = None\n @property\n def shipping_level_label(self):\n '''PhysicalProductAttributes.DeliveryMethod.DisplayableShippingLabel'''\n return self._shipping_level_label\n @shipping_level_label.setter\n def shipping_level_label(self, value):\n self._shipping_level_label = unicode(value)\n\n _shipping_dest = u'#default'\n @property\n def shipping_dest(self):\n '''PhysicalProductAttributes.DeliveryMethod.DestinationName'''\n return self._shipping_dest\n @shipping_dest.setter\n def shipping_dest(self, value):\n self._shipping_dest = unicode(value)\n\n _shipping_custom_data = ''\n @property\n def shipping_custom_data(self):\n return base64.b64decode(self._shipping_custom_data)\n @property\n def shipping_custom_data_base64(self):\n '''PhysicalProductAttributes.DeliveryMethod.ShippingCustomData'''\n return self._shipping_custom_data\n @shipping_custom_data.setter\n def shipping_custom_data(self, value):\n value = base64.b64encode(unicode(value))\n if len(value) > 1024:\n raise ValueError('shipping_custom_data too long')\n self._shipping_custom_data = value\n\n _shipping_amount = decimal.Decimal('0')\n @property\n def shipping(self):\n '''PhysicalProductAttributes.ItemCharges.Shipping.Amount'''\n return self._shipping_amount\n @shipping.setter\n def shipping(self, value):\n value = decimal.Decimal(unicode(value)).quantize(\n decimal.Decimal('0.01'), rounding.ROUND_DOWN\n )\n if value < 0:\n raise ValueError('shipping amount must be non-negative')\n self._shipping_amount = value\n\n @property\n def shipping_currency(self):\n '''PhyiscalProductAttributes.ItemCharges.Shipping.CurrencyCode'''\n return self.currency\n\nclass Order(object):\n def __init__(self, items=[]):\n self.items = list(items)\n\n def __len__(self):\n return sum(item.quantity for item in self.items)\n\n def add_item(self, item):\n self.items.append(item)\n\n @property\n def price(self):\n total = 0\n for item in self.items:\n total += item.price * item.quantity\n return total\n\n _shipping = None\n @property\n def shipping(self):\n if self._shipping is not None:\n return self._shipping\n total = 0\n for item in self.items:\n total += item.shipping * item.quantity\n return total\n @shipping.setter\n def shipping(self, value):\n self._shipping = decimal.Decimal(str(value)).quantize(\n decimal.Decimal('0.01'), decimal.ROUND_UP,\n )\n\n _tax = decimal.Decimal('0.00')\n @property\n def tax(self):\n return self._tax\n @tax.setter\n def tax(self, value):\n self._tax = decimal.Decimal(str(value)).quantize(\n decimal.Decimal('0.01'), decimal.ROUND_UP,\n )\n\nclass PurchaseContract(object):\n def __init__(self, id=None, settings={}):\n self.settings = dict(settings)\n if id is None:\n id_list, request_id = make_request(\n 'POST',\n 'CreatePurchaseContract',\n {\n 'DirectedId': '',\n 'AuthorizationToken': '',\n },\n settings,\n )\n self.id = id_list[0]\n else:\n self.id = id\n\n self.destinations = {}\n self.order = Order()\n self.completed = False\n self.update()\n\n def __len__(self):\n return len(self.order)\n\n def update(self):\n params = {\n 'PurchaseContractId': self.id,\n }\n contract_list, request_id = make_request('GET', 'GetPurchaseContract', params, self.settings)\n contract = contract_list[0]\n assert contract.Id.text == self.id\n self.state = contract.State.text\n self.merchant_id = contract.MerchantId.text\n self.marketplace_id = contract.MarketplaceId.text\n self.expires = datetime.datetime.strptime(\n contract.ExpirationTimeStamp.text,\n '%Y-%m-%dT%H:%M:%S.%fZ',\n )\n try:\n for dest in contract.Destinations.Destination[:]:\n address = dest.PhysicalDestinationAttributes.ShippingAddress\n self.add_destination(\n dest_name = dest.DestinationName.text,\n name = address.Name.text,\n address = [], # ??? Didn't get an address from Amazon while testing this.\n city = address.City.text,\n state = address.StateOrProvinceCode.text,\n zip = address.PostalCode.text,\n country_code = address.CountryCode.text,\n phone = address.PhoneNumber.text,\n )\n\n except AttributeError:\n pass # No destinations chosen yet\n\n def add_destination(self, dest_name, name, address, city, state, zip, country_code='US', phone=''):\n if self.completed:\n logging.warn('This contract has already been completed.')\n self.destinations[dest_name] = {\n 'dest-type': 'PHYSICAL',\n 'address': {\n 'name': name,\n 'address': address,\n 'city': city,\n 'state': state,\n 'zip': zip,\n 'country-code': country_code,\n 'phone-number': phone,\n },\n }\n return len(self.destinations)\n\n def _add_items(self):\n params = {\n 'PurchaseContractId': self.id,\n }\n for i, item in enumerate(self.order.items):\n index = i+1 # Amazon wants the first index non-zero\n key_base = 'PurchaseItems.PurchaseItem.{0}.'.format(index)\n dict = item.as_dict()\n logger.debug('Item: {0}, .as_dict() = {1}'.format(item, dict))\n for k, v in dict.iteritems():\n params[key_base+k] = unicode(v)\n params[key_base+'MerchantId'] = self.settings['merchant-id']\n make_request('POST', 'SetPurchaseItems', params, self.settings)\n\n def complete(self):\n self.completed = True\n if not len(self):\n logger.warn('Completing contract on empty order!')\n try:\n self._add_items()\n except:\n self.completed = False\n raise\n try:\n params = {\n 'PurchaseContractId': self.id,\n 'Charges.Tax.Amount': str(self.order.tax),\n 'Charges.Tax.CurrencyCode': 'USD',\n 'Charges.Shipping.Amount': str(self.order.shipping),\n 'Charges.Shipping.CurrencyCode': 'USD',\n }\n make_request('POST', 'SetContractCharges', params, self.settings)\n order_ids, request_id = make_request(\n 'POST', 'CompletePurchaseContract',\n {'PurchaseContractId': self.id,},\n self.settings,\n )\n\n return order_ids\n except:\n self.completed = False\n raise\n\nclass Settings(object):\n secret_access_key = ''\n public_access_key = ''\n\n merchant_id = ''\n marketplace_id = ''\n\n sandbox = True\n\n def __init__(self, **kwargs):\n for k,v in kwargs.iteritems():\n setattr(self, k, v)\n\n def __iter__(self):\n return self.as_dict().iteritems()\n\n def as_dict(self):\n keys = 'secret_access_key public_access_key merchant_id marketplace_id sandbox'.split()\n transform_key = lambda key: key.replace('_', '-')\n return dict((transform_key(key), getattr(self, key)) for key in keys)\n"},"license":{"kind":"string","value":"bsd-3-clause"},"hash":{"kind":"number","value":1299372049393676500,"string":"1,299,372,049,393,676,500"},"line_mean":{"kind":"number","value":29.7706013363,"string":"29.770601"},"line_max":{"kind":"number","value":103,"string":"103"},"alpha_frac":{"kind":"number","value":0.5709322525,"string":"0.570932"},"autogenerated":{"kind":"bool","value":false,"string":"false"}}},{"rowIdx":109593,"cells":{"repo_name":{"kind":"string","value":"joberreiter/pyload"},"path":{"kind":"string","value":"module/plugins/accounts/CzshareCom.py"},"copies":{"kind":"string","value":"1"},"size":{"kind":"string","value":"1585"},"content":{"kind":"string","value":"# -*- coding: utf-8 -*-\n\nimport re\nimport time\n\nfrom module.plugins.internal.Account import Account\n\n\nclass CzshareCom(Account):\n __name__ = \"CzshareCom\"\n __type__ = \"account\"\n __version__ = \"0.24\"\n __status__ = \"testing\"\n\n __description__ = \"\"\"Czshare.com account plugin, now Sdilej.cz\"\"\"\n __license__ = \"GPLv3\"\n __authors__ = [(\"zoidberg\", \"zoidberg@mujmail.cz\"),\n (\"stickell\", \"l.stickell@yahoo.it\")]\n\n\n CREDIT_LEFT_PATTERN = r'\\s*([\\d ,]+) (KiB|MiB|GiB)\\s*([^<]*)\\s*'\n\n\n def grab_info(self, user, password, data):\n premium = False\n validuntil = None\n trafficleft = None\n\n html = self.load(\"http://sdilej.cz/prehled_kreditu/\")\n\n try:\n m = re.search(self.CREDIT_LEFT_PATTERN, html)\n trafficleft = self.parse_traffic(m.group(1), m.group(2))\n validuntil = time.mktime(time.strptime(m.group(3), '%d.%m.%y %H:%M'))\n\n except Exception, e:\n self.log_error(e, trace=True)\n\n else:\n premium = True\n\n return {'premium' : premium,\n 'validuntil' : validuntil,\n 'trafficleft': trafficleft}\n\n\n def signin(self, user, password, data):\n html = self.load('https://sdilej.cz/index.php',\n post={'Prihlasit' : \"Prihlasit\",\n \"login-password\": password,\n \"login-name\" : user})\n\n if '
>>> phil:\n newColNum = self.colNum = colNum\n newRowNum = self.rowNum = rowNum\n for char in s[:m.end()]:\n if char == '\\012':\n newRowNum = newRowNum + 1\n newColNum = 1\n else:\n newColNum = newColNum + 1\n #<<<< \n if debug: print \"$ r:c %s:%s (%s)'%s' %s:%s \" % (\n rowNum, colNum, m.end(), s[:m.end()], \n newRowNum, newColNum\n )\n \n for i in range(len(groups)):\n #print \"$$ i=%s groups[%s]={%s} $$\" % (i, i, groups[i])\n if groups[i] and self.index2func.has_key(i):\n self.index2func[i](groups[i])\n #//for i\n s = s[m.end():]\n \n rowNum = newRowNum\n colNum = newColNum\n #//while s\n\n def addToken(self, tokenType, tokenValue):\n self.rv.append(PosToken(tokenType, tokenValue, \n self.rowNum, self.colNum))\n \n#===========================================================\n###### PosToken ######\n#===========================================================\n\nclass PosToken:\n def __init__(self, type, attr=None, line=0, col=0):\n self.type = type\n self.attr = attr\n self.line = line\n self.col = col\n\n def __cmp__(self, o):\n return cmp(self.type, o)\n\n def __repr__(self):\n x = self.type\n if self.attr != None: \n x = '(' + x +' ' + repr(self.attr) + ')'\n else:\n x = self.type\n if self.line > 0:\n x = x + (':%s:%s' %(self.line, self.col))\n return x\n \n\n# end tokpos.py\n"},"license":{"kind":"string","value":"gpl-2.0"},"hash":{"kind":"number","value":-4943181126879202000,"string":"-4,943,181,126,879,202,000"},"line_mean":{"kind":"number","value":26.7415730337,"string":"26.741573"},"line_max":{"kind":"number","value":67,"string":"67"},"alpha_frac":{"kind":"number","value":0.4467395707,"string":"0.44674"},"autogenerated":{"kind":"bool","value":false,"string":"false"}}},{"rowIdx":109596,"cells":{"repo_name":{"kind":"string","value":"peeringdb/peeringdb-py"},"path":{"kind":"string","value":"peeringdb/whois.py"},"copies":{"kind":"string","value":"1"},"size":{"kind":"string","value":"7186"},"content":{"kind":"string","value":"import collections\nimport sys\nfrom peeringdb.util import pretty_speed\n\n\nclass WhoisFormat:\n def __init__(self, fobj=sys.stdout):\n self.fobj = fobj\n\n self.display_names = {\n \"fac_set\": \"Facilities\",\n }\n\n def mk_fmt(self, *widths):\n return \"%-\" + \"s %-\".join(map(str, widths)) + \"s\"\n\n def mk_set_headers(self, data, columns):\n \"\"\" figure out sizes and create header fmt \"\"\"\n columns = tuple(columns)\n lens = []\n\n for key in columns:\n value_len = max(len(str(each.get(key, \"\"))) for each in data)\n # account for header lengths\n lens.append(max(value_len, len(self._get_name(key))))\n\n fmt = self.mk_fmt(*lens)\n return fmt\n\n def _get_name(self, key):\n \"\"\" get display name for a key, or mangle for display \"\"\"\n if key in self.display_names:\n return self.display_names[key]\n\n return key.capitalize()\n\n def _get_val(self, data, key):\n \"\"\" get value from a dict, format if necessary \"\"\"\n return data.get(key, \"\")\n\n def _get_columns(self, data):\n \"\"\" get columns from a dict \"\"\"\n return data.keys()\n\n def display_section(self, name):\n self._print(name)\n self._print(\"=\" * len(name))\n self._print(\"\")\n\n def display_headers(self, fmt, headers):\n self._print(fmt % headers)\n self._print(fmt % tuple(\"-\" * len(x) for x in headers))\n\n def display_set(self, typ, data, columns):\n \"\"\" display a list of dicts \"\"\"\n self.display_section(\"%s (%d)\" % (self._get_name(typ), len(data)))\n headers = tuple(map(self._get_name, columns))\n fmt = self.mk_set_headers(data, columns)\n self.display_headers(fmt, headers)\n\n for each in data:\n row = tuple(self._get_val(each, k) for k, v in each.items())\n self._print(fmt % row)\n\n self._print(\"\\n\")\n\n def display_field(self, fmt, obj, field, display=None):\n if not display:\n display = self._get_name(field)\n self._print(fmt % (display, obj[field]))\n\n def check_set(self, data, name):\n if data.get(name, None):\n if hasattr(self, \"print_\" + name):\n getattr(self, \"print_\" + name)(data[name])\n\n def print_net(self, data):\n self.display_section(\"Network Information\")\n fmt = \"%-21s: %s\"\n self.display_field(fmt, data, \"name\", \"Name\")\n self.display_field(fmt, data, \"asn\", \"Primary ASN\")\n self.display_field(fmt, data, \"aka\", \"Also Known As\")\n self.display_field(fmt, data, \"website\", \"Website\")\n self.display_field(fmt, data, \"irr_as_set\", \"IRR AS-SET\")\n self.display_field(fmt, data, \"info_type\", \"Network Type\")\n self.display_field(fmt, data, \"info_prefixes6\", \"Approx IPv6 Prefixes\")\n self.display_field(fmt, data, \"info_prefixes4\", \"Approx IPv4 Prefixes\")\n self.display_field(fmt, data, \"looking_glass\", \"Looking Glass\")\n self.display_field(fmt, data, \"route_server\", \"Route Server\")\n self.display_field(fmt, data, \"created\", \"Created at\")\n self.display_field(fmt, data, \"updated\", \"Updated at\")\n self._print(\"\\n\")\n\n self.display_section(\"Peering Policy Information\")\n self.display_field(fmt, data, \"policy_url\", \"URL\")\n self.display_field(fmt, data, \"policy_general\", \"General Policy\")\n self.display_field(fmt, data, \"policy_locations\", \"Location Requirement\")\n self.display_field(fmt, data, \"policy_ratio\", \"Ratio Requirement\")\n self.display_field(fmt, data, \"policy_contracts\", \"Contract Requirement\")\n self._print(\"\\n\")\n\n self.check_set(data, \"poc_set\")\n self.check_set(data, \"netixlan_set\")\n self.check_set(data, \"netfac_set\")\n\n def print_poc_set(self, data):\n self.display_section(\"Contact Information\")\n fmt = self.mk_fmt(6, 20, 15, 20, 14)\n hdr = (\"Role\", \"Name\", \"Email\", \"URL\", \"Phone\")\n self.display_headers(fmt, hdr)\n\n for poc in data:\n self._print(\n fmt\n % (\n poc.get(\"role\", \"\"),\n poc.get(\"name\", \"\"),\n poc.get(\"email\", \"\"),\n poc.get(\"url\", \"\"),\n poc.get(\"phone\", \"\"),\n )\n )\n\n self._print(\"\\n\")\n\n def print_netfac_set(self, data):\n self.display_section(\"Private Peering Facilities (%d)\" % len(data))\n fmt = self.mk_fmt(51, 8, 15, 2)\n hdr = (\"Facility Name\", \"ASN\", \"City\", \"CO\")\n self.display_headers(fmt, hdr)\n for each in data:\n self._print(\n fmt\n % (\n each.get(\"name\", each.get(\"id\")),\n each.get(\"local_asn\", \"\"),\n each.get(\"city\", \"\"),\n each.get(\"country\", \"\"),\n )\n )\n self._print(\"\\n\")\n\n def print_netixlan_set(self, data):\n self.display_section(\"Public Peering Points (%d)\" % len(data))\n fmt = self.mk_fmt(36, 8, 27, 5)\n hdr = (\"Exchange Point\", \"ASN\", \"IP Address\", \"Speed\")\n self.display_headers(fmt, hdr)\n for ix in data:\n if ix.get(\"ipaddr4\", None):\n self._print(\n fmt\n % (\n ix.get(\"name\", ix.get(\"ixlan_id\")),\n ix[\"asn\"],\n ix[\"ipaddr4\"],\n pretty_speed(ix[\"speed\"]),\n )\n )\n if ix.get(\"ipaddr6\", None):\n if ix.get(\"ipaddr4\", None):\n self._print(fmt % (\"\", \"\", ix[\"ipaddr6\"], \"\"))\n else:\n self._print(\n fmt\n % (\n ix[\"name\"],\n ix[\"asn\"],\n ix[\"ipaddr6\"],\n pretty_speed(ix[\"speed\"]),\n )\n )\n self._print(\"\\n\")\n\n def _print(self, *args):\n \"\"\" internal print to self.fobj \"\"\"\n string = \" \".join(args) + \"\\n\"\n self.fobj.write(string)\n\n def print(self, typ, data):\n \"\"\" *deprecated* - use display() \"\"\"\n return self.display(typ, data)\n\n def display(self, typ, data):\n \"\"\" display section of typ with data \"\"\"\n if hasattr(self, \"print_\" + typ):\n getattr(self, \"print_\" + typ)(data)\n\n elif not data:\n self._print(\"{}: {}\".format(typ, data))\n\n elif isinstance(data, collections.Mapping):\n self._print(\"\\n\", typ)\n for k, v in data.items():\n self.print(k, v)\n\n elif isinstance(data, (list, tuple)):\n # tabular data layout for lists of dicts\n if isinstance(data[0], collections.Mapping):\n self.display_set(typ, data, self._get_columns(data[0]))\n else:\n for each in data:\n self.print(typ, each)\n else:\n self._print(\"{}: {}\".format(typ, data))\n\n self.fobj.flush()\n"},"license":{"kind":"string","value":"apache-2.0"},"hash":{"kind":"number","value":-1643596645495830000,"string":"-1,643,596,645,495,830,000"},"line_mean":{"kind":"number","value":33.8834951456,"string":"33.883495"},"line_max":{"kind":"number","value":81,"string":"81"},"alpha_frac":{"kind":"number","value":0.4949902588,"string":"0.49499"},"autogenerated":{"kind":"bool","value":false,"string":"false"}}},{"rowIdx":109597,"cells":{"repo_name":{"kind":"string","value":"intel-hpdd/intel-manager-for-lustre"},"path":{"kind":"string","value":"tests/unit/chroma_core/lib/storage_plugin/test_plugin.py"},"copies":{"kind":"string","value":"1"},"size":{"kind":"string","value":"11991"},"content":{"kind":"string","value":"import types\nimport sys\n\nimport mock\n\nfrom chroma_core.services.plugin_runner.resource_manager import PluginSession\nfrom tests.unit.lib.iml_unit_test_case import IMLUnitTestCase\nfrom chroma_core.lib.storage_plugin.api import attributes\nfrom chroma_core.lib.storage_plugin.api import identifiers\nfrom chroma_core.lib.storage_plugin.api import resources\nfrom chroma_core.lib.storage_plugin.api.plugin import Plugin\n\n\nclass TestLocalResource(resources.ScannableResource):\n class Meta:\n identifier = identifiers.ScopedId(\"name\")\n\n name = attributes.String()\n\n\nclass TestGlobalResource(resources.ScannableResource):\n class Meta:\n identifier = identifiers.GlobalId(\"name\")\n\n name = attributes.String()\n\n\nclass TestResourceExtraInfo(resources.ScannableResource):\n class Meta:\n identifier = identifiers.GlobalId(\"name\")\n\n name = attributes.String()\n extra_info = attributes.String()\n\n\nclass TestResourceStatistic(resources.ScannableResource):\n class Meta:\n identifier = identifiers.GlobalId(\"name\")\n\n name = attributes.String()\n extra_info = attributes.String()\n\n\nclass TestPlugin(Plugin):\n _resource_classes = [TestGlobalResource, TestLocalResource, TestResourceExtraInfo, TestResourceStatistic]\n\n def __init__(self, *args, **kwargs):\n self.initial_scan_called = False\n self.update_scan_called = False\n self.teardown_called = False\n\n Plugin.__init__(self, *args, **kwargs)\n\n def initial_scan(self, root_resource):\n self.initial_scan_called = True\n\n def update_scan(self, root_resource):\n self.update_scan_called = True\n\n def teardown(self):\n self.teardown_called = True\n\n\nclass TestCallbacks(IMLUnitTestCase):\n def setUp(self):\n super(TestCallbacks, self).setUp()\n\n import chroma_core.lib.storage_plugin.manager\n\n self.orig_manager = chroma_core.lib.storage_plugin.manager.storage_plugin_manager\n chroma_core.lib.storage_plugin.manager.storage_plugin_manager = (\n chroma_core.lib.storage_plugin.manager.StoragePluginManager()\n )\n\n from chroma_core.lib.storage_plugin.manager import storage_plugin_manager\n\n storage_plugin_manager._load_plugin(sys.modules[__name__], \"test_mod\", TestPlugin)\n\n from chroma_core.models import StorageResourceRecord\n\n resource_class, resource_class_id = storage_plugin_manager.get_plugin_resource_class(\n \"test_mod\", \"TestGlobalResource\"\n )\n record, created = StorageResourceRecord.get_or_create_root(resource_class, resource_class_id, {\"name\": \"test1\"})\n\n from chroma_core.lib.storage_plugin.query import ResourceQuery\n\n scannable_record = StorageResourceRecord.objects.get()\n self.scannable_resource = ResourceQuery().get_resource(scannable_record)\n self.scannable_global_id = scannable_record.pk\n\n self.resource_manager = mock.Mock(_sessions={})\n self.plugin = TestPlugin(self.resource_manager, self.scannable_global_id)\n self.resource_manager._sessions[self.scannable_global_id] = PluginSession(\n self.plugin, self.scannable_global_id, 0\n )\n\n def tearDown(self):\n import chroma_core.lib.storage_plugin.manager\n\n chroma_core.lib.storage_plugin.manager.storage_plugin_manager = self.orig_manager\n\n def test_initial(self):\n self.plugin.initial_scan = mock.Mock()\n self.plugin.do_initial_scan()\n self.plugin.initial_scan.assert_called_once()\n\n def test_update(self):\n self.plugin.do_initial_scan()\n\n self.plugin.update_scan = mock.Mock()\n self.plugin.do_periodic_update()\n self.plugin.update_scan.assert_called_once()\n\n def test_teardown(self):\n self.plugin.do_initial_scan()\n\n self.plugin.teardown = mock.Mock()\n self.plugin.do_teardown()\n self.plugin.teardown.assert_called_once()\n\n\nclass TestAddRemove(IMLUnitTestCase):\n def setUp(self):\n super(TestAddRemove, self).setUp()\n\n import chroma_core.lib.storage_plugin.manager\n\n self.orig_manager = chroma_core.lib.storage_plugin.manager.storage_plugin_manager\n chroma_core.lib.storage_plugin.manager.storage_plugin_manager = (\n chroma_core.lib.storage_plugin.manager.StoragePluginManager()\n )\n\n from chroma_core.lib.storage_plugin.manager import storage_plugin_manager\n\n storage_plugin_manager._load_plugin(sys.modules[__name__], \"test_mod\", TestPlugin)\n\n from chroma_core.models import StorageResourceRecord\n\n resource_class, resource_class_id = storage_plugin_manager.get_plugin_resource_class(\n \"test_mod\", \"TestGlobalResource\"\n )\n record, created = StorageResourceRecord.get_or_create_root(resource_class, resource_class_id, {\"name\": \"test1\"})\n\n from chroma_core.lib.storage_plugin.query import ResourceQuery\n\n scannable_record = StorageResourceRecord.objects.get()\n self.scannable_resource = ResourceQuery().get_resource(scannable_record)\n self.scannable_global_id = scannable_record.pk\n\n def tearDown(self):\n import chroma_core.lib.storage_plugin.manager\n\n chroma_core.lib.storage_plugin.manager.storage_plugin_manager = self.orig_manager\n\n def _report_resource(self, resource_to_report):\n def _report_a_resource(self, root_resource):\n if resource_to_report is not None:\n self.resource1, _ = self.update_or_create(resource_to_report, name=\"resource\")\n\n return _report_a_resource\n\n def _remove_resource(self, resource_to_remove):\n def _remove_a_resource(self, root_resource):\n if resource_to_remove is not None:\n self.remove(resource_to_remove)\n\n return _remove_a_resource\n\n def _create_mocked_resource_and_plugin(self):\n self.resource_manager = mock.Mock(_sessions={})\n self.plugin = TestPlugin(self.resource_manager, self.scannable_global_id)\n self.resource_manager._sessions[self.scannable_global_id] = PluginSession(\n self.plugin, self.scannable_global_id, 0\n )\n\n def test_initial_resources(self):\n\n # First session for the scannable, 1 resource present\n self._create_mocked_resource_and_plugin()\n\n self.plugin.initial_scan = types.MethodType(self._report_resource(TestGlobalResource), self.plugin)\n\n # Should pass the scannable resource and the one we created to session_open\n self.plugin.do_initial_scan()\n self.resource_manager.session_open.assert_called_once_with(\n self.plugin,\n self.plugin._scannable_id,\n [self.plugin._root_resource, self.plugin.resource1],\n self.plugin._update_period,\n )\n\n # Session reporting 0 resource in initial_scan\n self._create_mocked_resource_and_plugin()\n\n self.plugin.initial_scan = types.MethodType(self._report_resource(None), self.plugin)\n self.plugin.do_initial_scan()\n\n # Should just report back the scannable resource to session_open\n self.resource_manager.session_open.assert_called_once_with(\n self.plugin, self.plugin._scannable_id, [self.plugin._root_resource], self.plugin._update_period\n )\n\n def test_update_add(self):\n self._create_mocked_resource_and_plugin()\n\n self.plugin.do_initial_scan()\n\n # Patch in an update_scan which reports one resource\n self.plugin.update_scan = types.MethodType(self._report_resource(TestGlobalResource), self.plugin)\n\n # Check that doing an update_or_create calls session_add_resources\n self.plugin.do_periodic_update()\n self.resource_manager.session_add_resources.assert_called_once_with(\n self.plugin._scannable_id, [self.plugin.resource1]\n )\n\n self.resource_manager.session_add_resources.reset_mock()\n\n # Check that doing a second update_or_create silently does nothing\n self.plugin.do_periodic_update()\n self.assertFalse(self.resource_manager.session_add_resources.called)\n\n def test_update_remove_global(self):\n self._create_mocked_resource_and_plugin()\n self.plugin.do_initial_scan()\n\n self.plugin.update_scan = types.MethodType(self._report_resource(TestGlobalResource), self.plugin)\n\n self.plugin.do_periodic_update()\n self.resource_manager.session_add_resources.assert_called_once_with(\n self.plugin._scannable_id, [self.plugin.resource1]\n )\n\n self.plugin.update_scan = types.MethodType(self._remove_resource(self.plugin.resource1), self.plugin)\n\n self.plugin.do_periodic_update()\n self.resource_manager.session_remove_global_resources.assert_called_once_with(\n self.plugin._scannable_id, [self.plugin.resource1]\n )\n\n def test_update_remove_local(self):\n self._create_mocked_resource_and_plugin()\n self.plugin.do_initial_scan()\n\n self.plugin.update_scan = types.MethodType(self._report_resource(TestLocalResource), self.plugin)\n\n self.plugin.do_periodic_update()\n self.resource_manager.session_add_resources.assert_called_once_with(\n self.plugin._scannable_id, [self.plugin.resource1]\n )\n\n self.plugin.update_scan = types.MethodType(self._remove_resource(self.plugin.resource1), self.plugin)\n\n self.plugin.do_periodic_update()\n self.resource_manager.session_remove_local_resources.assert_called_once_with(\n self.plugin._scannable_id, [self.plugin.resource1]\n )\n\n def test_update_modify_parents(self):\n self._create_mocked_resource_and_plugin()\n self.plugin.do_initial_scan()\n\n # Insert two resources, both having no parents\n def report_unrelated(self, root_resource):\n self.resource1, created = self.update_or_create(TestLocalResource, name=\"test1\")\n self.resource2, created = self.update_or_create(TestLocalResource, name=\"test2\")\n self.resource3, created = self.update_or_create(TestLocalResource, name=\"test3\")\n\n self.plugin.update_scan = types.MethodType(report_unrelated, self.plugin)\n self.plugin.do_periodic_update()\n\n # Create a parent relationship between them\n def add_parents(self, root_resource):\n self.resource1.add_parent(self.resource2)\n\n self.plugin.update_scan = types.MethodType(add_parents, self.plugin)\n self.plugin.do_periodic_update()\n self.resource_manager.session_resource_add_parent.assert_called_once_with(\n self.plugin._scannable_id, self.plugin.resource1._handle, self.plugin.resource2._handle\n )\n\n # Remove the relationship\n def remove_parents(self, root_resource):\n self.resource1.remove_parent(self.resource2)\n\n self.plugin.update_scan = types.MethodType(remove_parents, self.plugin)\n self.plugin.do_periodic_update()\n self.resource_manager.session_resource_remove_parent.assert_called_once_with(\n self.plugin._scannable_id, self.plugin.resource1._handle, self.plugin.resource2._handle\n )\n\n def test_update_modify_attributes(self):\n self._create_mocked_resource_and_plugin()\n self.plugin.do_initial_scan()\n\n # Insert two resources, both having no parents\n def report1(self, root_resource):\n self.resource, created = self.update_or_create(TestResourceExtraInfo, name=\"test1\", extra_info=\"foo\")\n\n self.plugin.update_scan = types.MethodType(report1, self.plugin)\n self.plugin.do_periodic_update()\n\n # Modify the extra_info attribute\n def modify(self, root_resource):\n self.resource.extra_info = \"bar\"\n\n self.plugin.update_scan = types.MethodType(modify, self.plugin)\n self.plugin.do_periodic_update()\n self.resource_manager.session_update_resource.assert_called_once_with(\n self.plugin._scannable_id, self.plugin.resource._handle, {\"extra_info\": \"bar\"}\n )\n"},"license":{"kind":"string","value":"mit"},"hash":{"kind":"number","value":-1912038607933590800,"string":"-1,912,038,607,933,590,800"},"line_mean":{"kind":"number","value":37.4326923077,"string":"37.432692"},"line_max":{"kind":"number","value":120,"string":"120"},"alpha_frac":{"kind":"number","value":0.6835126345,"string":"0.683513"},"autogenerated":{"kind":"bool","value":false,"string":"false"}}},{"rowIdx":109598,"cells":{"repo_name":{"kind":"string","value":"yaukwankiu/armor"},"path":{"kind":"string","value":"pattern2.py"},"copies":{"kind":"string","value":"1"},"size":{"kind":"string","value":"14030"},"content":{"kind":"string","value":"# -*- coding: utf-8 -*-\n# continued from pattern.py\n# defining the basic object we will be working with\n\n# Note: makeVideo - it doesn't work yet - i haven't yet solved the issues about opencv\n# so i am outputing the slides only for the moment\n# 2013-09-23\n##############################################################################################\n#\n#==== imports ================================================================================\n# some of the stuff were moved to defaultParameters.py\nimport copy\nimport time\nimport os\nimport re\nimport numpy\nimport numpy as np\nimport numpy.ma as ma\n#import matplotlib\nimport matplotlib.pyplot as plt\n#import scipy.misc.pilutil as smp\n#import numpy.fft as fft\n#import shutil\n#import sys\nimport pickle\nfrom copy import deepcopy\ntry:\n from scipy import signal\n from scipy import interpolate\nexcept ImportError:\n #print \"Scipy not installed\"\n pass\n\n\n#==== setting up the global parameters========================================================\n\nimport defaultParameters as dp\nfrom defaultParameters import * #bad habits but all these variables are prefixed with \"default\"\n # or at least i try to make them to\nimport colourbarQPESUMS # the colourbars for the Central Weather Bureau\nimport colourbarQPESUMSwhiteBackground # the same as above, with white backgrounds\n\n#==== importing pattern.py====================================================================\nfrom . import pattern\ntry:\n from dataStreamTools import makeVideo as mv\nexcept ImportError:\n print \"import error! opencv not installed(?!)\"\nfrom dataStreamTools import kongrey as kr\ndbz = pattern.DBZ\nds = pattern.DBZstream\n#==== defining the classes ===================================================================\n\n#class DataStreamSets:\nclass DataStreamSet: # correcting a long-standing typo 2014-03-09\n\n \"\"\"\n class dataStreamSet: DSS = dataStreamSet(ds0, ds1, ds2,...dsN) \n where ds0 = observations, ds1, ds2,.. = models\n with the bare basic methods of analysis and output to panel of 20+ images\n \"\"\"\n ############################################################################\n # initialisation and basic function calls\n def __init__(self, ds0, *args):\n\n self.name = ds0.name + '_' + '_'.join([v.name for v in args])\n self.obs = ds0\n self.wrfs = list(args)\n\n ############################################################################\n # simple building block functions\n def getAllDataTimes(self):\n \"\"\"\n get the union of the sets of dataTimes for all streams\n \"\"\"\n dataTimes = set([v.dataTime for v in self.obs])\n for wrf in self.wrfs:\n dataTimes = dataTimes.union([v.dataTime for v in wrf])\n dataTimes = sorted(list(dataTimes))\n return dataTimes\n\n def getCommonDataTimes(self):\n \"\"\"\n get the intersection of the sets of dataTimes for all streams\n \"\"\"\n dataTimes = set([v.dataTime for v in self.obs])\n for wrf in self.wrfs:\n dataTimes = dataTimes.intersection([v.dataTime for v in wrf])\n dataTimes = sorted(list(dataTimes))\n return dataTimes\n\n def backupMatrices(self):\n self.obs.backupMatrices()\n for wrf in self.wrfs:\n wrf.backupMatrices()\n def restoreMatrices(self):\n self.obs.restoreMatrices()\n for wrf in self.wrfs:\n wrf.restoreMatrices()\n \n ############################################################################\n # I/O's\n\n def load(self, stream_key=\"all\", verbose=False, **kwargs):\n if stream_key == \"all\" or stream_key ==\"obs\":\n print \"loading obs\"\n obs.load(**kwargs)\n if stream_key == \"all\" or stream_key ==\"wrf\" or stream_key==\"wrfs\":\n print \"loading wrfs\"\n for wrf in wrfs:\n wrf.load(**kwargs)\n\n def unload(self, stream_key=\"all\", verbose=False, **kwargs):\n if stream_key == \"all\" or stream_key ==\"obs\":\n print \"unloading obs\"\n obs.unload(**kwargs)\n if stream_key == \"all\" or stream_key ==\"wrf\" or stream_key==\"wrfs\":\n print \"unloading wrfs\"\n for wrf in wrfs:\n wrf.unload(**kwargs)\n\n def makeVideo2(self, ordering, outputFolder=''):\n \"\"\"\n make video, with an ordering at each dataTime\n ordering = [[1,2,3,5], [3,4,6,1], ...] - first for the first dataTime, second for the second dataTime, etc\n \"\"\"\n return mv.makeVideo( [self.obs] + self.wrfs, # [ds0, ds1, ds2, ds3, ds4, ...], a list of armor.pattern.DBZstream objects\n panel_cols = 5, # number of colums in the panel\n panel_rows = 5, # no need to be filled\n fourcc = cv.CV_FOURCC('F', 'L', 'V', '1'),\n fps = defaultFps,\n extension= '.avi',\n #fourcc = cv.CV_FOURCC('P', 'I', 'M', '1'),\n outputFileName =\"\",\n outputFolder=outputFolder,\n saveFrames = True, # saving the frames as images\n useCV2 = True,\n ordering = ordering, # ordering of the models\n )\n\n def makeVideo1(self, ordering, outputFolder=''):\n \"\"\"\n make video, with a single ordering for each dataStream in its entirety\n ordering = list, e.g. [2,3,4,5,1] <-- WRF2 goes first, then WRF3, WRF4, etc\n \"\"\"\n ordering = [ordering] * len(self.getAllDataTimes())\n return self.makeVideo2(ordering, outputPath)\n\n ############################################################################\n # analyses\n\n def analyse(self, algorithm):\n \"\"\"\n input: algorithm\n output: ordering at each dataTime\n ordering = [[1,2,3,5], [3,4,6,1], ...] means WRF1, WRF2,WRF3, WRF5 for dataTime1; WRFs3,4,6,1, for the second dataTime, etc\n \n \"\"\"\n pass\n\n def matching(self, algorithm, obsTime=\"\", maxHourDiff=7, **kwargs):\n \"\"\"\n input:\n algorithm - the function defining the algorithm of matching\n algorithm(parameters): (obs, wrf) -> score (real number)\n format of algorithm function: def alg1(a=pattern.a, ...., **kwargs):\n obsTime - time at which obs is compared with the wrfs, e.g. \"20140612.0200'\n maxHourDiff - the maximal time difference (in hours) between obs and wrfs, e.g. 7 (hours)\n kwargs - parameters for the algorithm\n output:\n ranking with scores and optimal timeshifts \n 2014-03-07\n \"\"\"\n if obsTime == \"\": # if the point for matching is not given, pick the first one\n obsTime = self.obs[0].dataTime\n \n ranking = []\n obs = self.obs\n wrfs = self.wrfs\n for wrf in wrfs:\n x = algorithm(obs, wrf, obsTime=obsTime, maxHourDiff=maxHourDiff, **kwargs)\n score = x['score']\n timeShift = x['timeShift']\n ranking.append( {'wrf': wrf.name, 'timeShift': timeShift, #timeShift: in hours\n 'score': score, \n 'dataFolder': wrf.dataFolder,\n 'obsTime': obsTime,\n 'maxHourDiff': maxHourDiff # tag them along just in case\n } ) #dataFolder = for potential disambiguation \n ranking.sort(key=lambda v:v['score'], reverse=True)\n return ranking\n\n\n def filtering(self, algorithm, stream_key=\"all\", name_key=\"\", verbose=False, **kwargs):\n \"\"\"\n input:\n algorithm - the function defining the algorithm of filtering\n algorithm(parameters): changes a.matrix, a.name, no output given\n format of algorithm function: def alg1(a=pattern.a, **kwargs):\n stream_key - keyword for choosing the DBZstreams to be filtered\n if it's \"obs\" we filter just all of the self.obs\n if it's \"wrf\" or \"wrfs\" we filter just all of the self.wrfs\n name_key - keyword for choosing the DBZ patterns to be filtered\n\n kwargs - parameters for the algorithm\n output:\n ranking with scores and optimal timeshifts \n 2014-03-07\n \"\"\"\n obs = self.obs\n wrfs = self.wrfs\n # first filter the obs\n if stream_key == \"all\" or stream_key == \"obs\" or stream_key == \"OBS\":\n for a in obs:\n if name_key in a.name:\n algorithm(a, **kwargs) # key line\n if verbose:\n print a.name\n if stream_key == \"all\" or stream_key == \"wrf\" or stream_key == \"wrfs\" \\\n or stream_key == \"WRF\" or stream_key == \"WRFS\" :\n for wrf in wrfs:\n for a in wrf:\n if name_key in a.name:\n algorithm(a, **kwargs) # key line\n if verbose:\n print a.name\n \n\n \n############################################\n\n# constants\nDataStreamSets = DataStreamSet #alias; # correcting a long-standing typo 2014-03-09\nDSS = DataStreamSet # alias\n\n\"\"\"\nkey example: kongrey\n\"\"\"\nfrom dataStreamTools import kongrey as kr\n\n#compref = pattern.DBZstream(dataFolder= kr.obs_folder,\n# #name=\"COMPREF.DBZ\", \n# name=\"\",\n# lowerLeftCornerLatitudeLongitude = kr.obs_lowerLeft , \n# upperRightCornerLatitudeLongitude = kr.obs_upperRight ,\n# outputFolder= kr.summary_folder,\n# imageFolder=kr.summary_folder,\n# key1=\"\", # keywords to pick out specific files\n# key2=\"\", # used only once in the __init__\n# key3=\"\",\n# preload=False,\n# imageExtension = '.png', \n# dataExtension = '.txt',\n# )\n\"\"\"\nprint 'loading observations'\nobs = kr.constructOBSstream(dumping=False)\nprint 'loading models',\nwrfsFolder = kr.defaultWRFdumpsFolder # '/home/k/ARMOR/data/KONG-REY/summary/WRF[regridded]'\nwrfs = []\nfor i in range(1,21):\n print i,\n wrf = pickle.load(open(wrfsFolder+'dbzstream' + ('0'+str(i))[-2:] + '.pydump'))\n #wrf.setDataFolder(asdfasdf) # haven't defined this function in pattern.DBZstream yet\n wrfs.append(wrf)\n\nkongreyDSS = DSS(obs, *wrfs)\n\"\"\"\n\nprint 'constructing kongreyDSS'\nobs = ds(name=\"COMPREF.DBZ\", dataFolder=defaultRootFolder + 'data/KONG-REY/OBS/')\nwrfs = []\nfor i in range(1,21):\n print i, \n wrfName = name='WRF'+ ('0'+str(i))[-2:]\n wrf = ds(name=wrfName, key1=wrfName, \n dataFolder=defaultRootFolder + 'data/KONG-REY/summary/WRF[regridded]/')\n wrfs.append(wrf)\nkongreyDSS = DSS(obs, *wrfs)\n\n\ndef constructDSS(obsFolder, wrfsFolder):\n obsName = obsFolder.split(\"/\")[-1]\n wrfsName = wrfsFolder.split(\"/\")[-1]\n print 'Constructing DSS from:', obsName, \",\", wrfsName\n print obsFolder\n print wrfsFolder\n obs = ds(name=obsName, dataFolder=obsFolder)\n wrfs = []\n for i in range(1,21):\n print i, \n wrfName = name='WRF'+ ('0'+str(i))[-2:]\n wrf = ds(name=wrfName, key1=wrfName, \n dataFolder=wrfsFolder)\n wrfs.append(wrf)\n dss = DSS(obs, *wrfs)\n return dss\n\nprint \"constructing march11 - march13 DSS objects\"\nmarch11 = constructDSS(dp.defaultRootFolder+\"data/march2014/QPESUMS/\", \n dp.defaultRootFolder+\"data/march2014/WRFEPS[regridded]/20140311/\")\nmarch11.name = \"Rainband_11_March_2014\"\nmarch11.obs.list= [v for v in march11.obs.list if '20140311' in v.dataTime]\n\nmarch12 = constructDSS(dp.defaultRootFolder+\"data/march2014/QPESUMS/\", \n dp.defaultRootFolder+\"data/march2014/WRFEPS[regridded]/20140312/\")\nmarch12.name = \"Rainband_12_March_2014\"\nmarch12.obs.list= [v for v in march12.obs.list if '20140312' in v.dataTime]\n\nmarch13 = constructDSS(dp.defaultRootFolder+\"data/march2014/QPESUMS/\", \n dp.defaultRootFolder+\"data/march2014/WRFEPS[regridded]/20140313/\")\nmarch13.name = \"Rainband_13_March_2014\"\nmarch13.obs.list= [v for v in march13.obs.list if '20140313' in v.dataTime]\n\nprint \"constructing may2014 DSS objects\"\nmay19 = constructDSS(dp.defaultRootFolder+\"data/may14/QPESUMS/\",\n dp.defaultRootFolder+\"data/may14/WRFEPS19[regridded]/\")\nmay19.name = \"Rainband_19_May_2014\"\nmay19.obs.list= [v for v in may19.obs.list if '20140519' in v.dataTime]\n\nmay20 = constructDSS(dp.defaultRootFolder+\"data/may14/QPESUMS/\",\n dp.defaultRootFolder+\"data/may14/WRFEPS20[regridded]/\")\nmay20.name = \"Rainband_20_May_2014\"\nmay20.obs.list= [v for v in may20.obs.list if '20140520' in v.dataTime]\n\n\nmay21 = constructDSS(dp.defaultRootFolder+\"data/may14/QPESUMS/\",\n dp.defaultRootFolder+\"data/may14/WRFEPS21[regridded]/\")\nmay21.name = \"Rainband_21_May_2014\"\nmay21.obs.list= [v for v in may21.obs.list if '20140521' in v.dataTime]\n\nmay22 = constructDSS(dp.defaultRootFolder+\"data/may14/QPESUMS/\",\n dp.defaultRootFolder+\"data/may14/WRFEPS22[regridded]/\")\nmay22.name = \"Rainband_22_May_2014\"\nmay22.obs.list= [v for v in may22.obs.list if '20140522' in v.dataTime]\n\nmay23 = constructDSS(dp.defaultRootFolder+\"data/may14/QPESUMS/\",\n dp.defaultRootFolder+\"data/may14/WRFEPS23[regridded]/\")\nmay23.name = \"Rainband_23_May_2014\"\nmay23.obs.list= [v for v in may23.obs.list if '20140523' in v.dataTime]\n\n\n\n\n\n\n\n\n\n\n"},"license":{"kind":"string","value":"cc0-1.0"},"hash":{"kind":"number","value":-5918039145204858000,"string":"-5,918,039,145,204,858,000"},"line_mean":{"kind":"number","value":39.0857142857,"string":"39.085714"},"line_max":{"kind":"number","value":135,"string":"135"},"alpha_frac":{"kind":"number","value":0.549109052,"string":"0.549109"},"autogenerated":{"kind":"bool","value":false,"string":"false"}}},{"rowIdx":109599,"cells":{"repo_name":{"kind":"string","value":"genome/flow-core"},"path":{"kind":"string","value":"flow/pmon/process_monitor.py"},"copies":{"kind":"string","value":"1"},"size":{"kind":"string","value":"2419"},"content":{"kind":"string","value":"from flow.pmon.process_info import ParentProcessInfo\nfrom twisted.internet import reactor\nfrom twisted.web.resource import Resource, NoResource\nfrom twisted.web.server import Site\nfrom twisted.web.static import File\n\nimport json\nimport logging\nimport os\nimport psutil\nimport socket\n\nLOG = logging.getLogger(__name__)\n\nclass RootResource(Resource):\n def __init__(self, process_info):\n Resource.__init__(self)\n\n html_root = os.path.join(os.path.dirname(__file__), \"web\")\n\n self.putChild(\"basic\", BasicResource(process_info))\n self.putChild(\"status\", StatusResource(process_info))\n self.putChild(\"view\", File(html_root))\n\nclass JSONResource(Resource):\n\n def __init__(self, process_info):\n Resource.__init__(self)\n self.process_info = process_info\n\n def get_data(self):\n raise NotImplementedError\n\n def render_GET(self, request):\n request.setHeader('Access-Control-Allow-Origin', '*')\n request.setHeader('Access-Control-Allow-Methods', 'GET')\n request.setHeader('Content-type', 'application/json')\n\n data = self.get_data()\n return json.dumps(data)\n\nclass BasicResource(JSONResource):\n def get_data(self):\n return self.process_info.get_basic_info()\n\n def getChild(self, name, request):\n try:\n pid = int(name)\n except ValueError:\n return NoResource()\n\n if pid == self.process_info.pid:\n return BasicLeafResource(self.process_info)\n elif pid in self.process_info.children.keys():\n return BasicLeafResource(self.process_info.children[pid])\n else:\n return NoResource()\n\nclass BasicLeafResource(BasicResource):\n isLeaf = True\n\nclass StatusResource(JSONResource):\n isLeaf = True\n def get_data(self):\n return self.process_info.get_process_status()\n\nclass ProcessMonitor(object):\n def __init__(self, pid):\n self.port = None\n self.pid = pid\n\n def start(self, port=0):\n process = psutil.Process(os.getpid())\n process_info = ParentProcessInfo(process=process)\n\n factory = Site(RootResource(process_info))\n iport = reactor.listenTCP(port, factory)\n listen_port = iport.getHost().port\n self.port = listen_port\n listen_host = socket.gethostname()\n\n LOG.info(\"Process Monitor at http://%s:%s/view\",\n listen_host, listen_port)\n"},"license":{"kind":"string","value":"agpl-3.0"},"hash":{"kind":"number","value":-6455856908681783000,"string":"-6,455,856,908,681,783,000"},"line_mean":{"kind":"number","value":28.1445783133,"string":"28.144578"},"line_max":{"kind":"number","value":69,"string":"69"},"alpha_frac":{"kind":"number","value":0.6506821,"string":"0.650682"},"autogenerated":{"kind":"bool","value":false,"string":"false"}}}],"truncated":false,"partial":false},"paginationData":{"pageIndex":1095,"numItemsPerPage":100,"numTotalItems":110960,"offset":109500,"length":100}},"jwt":"eyJhbGciOiJFZERTQSJ9.eyJyZWFkIjp0cnVlLCJwZXJtaXNzaW9ucyI6eyJyZXBvLmNvbnRlbnQucmVhZCI6dHJ1ZX0sImlhdCI6MTc1NzkzMzk2MCwic3ViIjoiL2RhdGFzZXRzL2NvZGVwYXJyb3QvY29kZXBhcnJvdC12YWxpZC1uZWFyLWRlZHVwbGljYXRpb24iLCJleHAiOjE3NTc5Mzc1NjAsImlzcyI6Imh0dHBzOi8vaHVnZ2luZ2ZhY2UuY28ifQ.iLCG_gSTbSDji9YLWQvUD_xKDL60zj1jTt4U8CfBJXtItpfNM9Hs-1TVBf2aFOGPvJpVOu8FvE9QIQjG_Jt3Dw","displayUrls":true},"discussionsStats":{"closed":0,"open":0,"total":0},"fullWidth":true,"hasGatedAccess":true,"hasFullAccess":true,"isEmbedded":false,"savedQueries":{"community":[],"user":[]}}">
repo_name
stringlengths
5
92
path
stringlengths
4
232
copies
stringclasses
22 values
size
stringlengths
4
7
content
stringlengths
626
1.05M
license
stringclasses
15 values
hash
int64
-9,223,277,421,539,062,000
9,223,102,107B
line_mean
float64
5.21
99.9
line_max
int64
12
999
alpha_frac
float64
0.25
0.96
autogenerated
bool
1 class
ronanlopes/twitter-datamining
search.py
1
1695
# -*- coding: utf-8 -*- import sys import tweepy import twitter import json #Autenticações CONSUMER_KEY = 'ILHhQAC4QB0WNUdoRqmEA' CONSUMER_SECRET = 'rmmkGo4YHniiJRwkwxGu9S7l5ZfhG7CZDXHw9eUo' ACCESS_TOKEN = '339362662-rU6CizVcSZCr6CqWIhFh40yE0gmQdgusPRiwcpOj' ACCESS_TOKEN_SECRET = 'CW9UCTOqYpzA1dS9doKcow6Bnz95UwZGBskcOsF2M6yIp' auth = twitter.oauth.OAuth(ACCESS_TOKEN, ACCESS_TOKEN_SECRET, CONSUMER_KEY, CONSUMER_SECRET) twitter_api = twitter.Twitter(auth=auth) #XX: Set this variable to a trending topic, # or anything else for that matter. The example query below # was a trending topic when this content was being developed # and is used throughout the remainder of this chapter. q = 'UFSJ' count = 100 # See https://dev.twitter.com/docs/api/1.1/get/search/tweets search_results = twitter_api.search.tweets(q=q, count=count) statuses = search_results['statuses'] # Iterate through 5 more batches of results by following the cursor for _ in range(5): try: next_results = search_results['search_metadata']['next_results'] except KeyError, e: # No more results when next_results doesn't exist break # Create a dictionary from next_results, which has the following form: # ?max_id=313519052523986943&q=NCAA&include_entities=1 kwargs = dict([ kv.split('=') for kv in next_results[1:].split("&") ]) search_results = twitter_api.search.tweets(**kwargs) statuses += search_results['statuses'] # Show one sample search result by slicing the list... resultados = json.loads(json.dumps(statuses, indent=1)) for resultado in resultados: print resultado["user"]["name"],": ",resultado["text"],"\n"
gpl-2.0
8,937,783,562,914,071,000
29.781818
74
0.720614
false
v-legoff/accertin
lyntin/ui/wxpui_voice.py
1
4526
import time import pyTTS MINIMAL_PLAYING_TIME = 1 # in seconds TTS_TIMEOUT = .1 # in seconds _tts = pyTTS.Create() _tts_previous_start_time = 0 def _tts_is_speaking(): # The TTS doesn't always start speaking at once, but we don't want to wait. # So we consider that the TTS is speaking during the first milliseconds, # even if _tts.IsSpeaking() returns False. return _tts.IsSpeaking() or time.time() < _tts_previous_start_time + TTS_TIMEOUT def _tts_speak(text): global _tts_previous_start_time _tts.Speak(text, pyTTS.tts_async, pyTTS.tts_purge_before_speak) _tts.Resume() # in case Pause() has been used _tts_previous_start_time = time.time() def _tts_stop(): global _tts_previous_start_time _tts.Pause() # Stop() might take ,too much time _tts_previous_start_time = 0 class _Message: said = False def __init__(self, text): self.text = text def play(self): """Start saying the message.""" _tts_speak(self.text) self._start_time = time.time() def stop(self): """Stop saying the message.""" _tts_stop() self._stop_time = time.time() if time.time() >= self._start_time + MINIMAL_PLAYING_TIME: self.said = True class _MessageQueue: _current = None _playing_item = False _history = False def __init__(self): self._msgs = [] def alert(self, text): """Say now and entirely. Remember it as read.""" m = _Message(text) m.play() while _tts_is_speaking(): time.sleep(.1) m.said = True self._msgs.append(m) def urgent(self, text): """Start saying text now, remember it as read anyway, and leave the rest of the queue unchanged.""" self._stop_current_if_needed() m = _Message(text) m.said = True self._msgs.append(m) self._current = m m.play() def info(self, text): """Append a message to the queue. Say it when the time has come.""" self._msgs.append(_Message(text)) self.update() def _stop_current_if_needed(self): if not self._playing_item and self._current is not None: self._current.stop() def item(self, text): ## self.urgent(text) ## return """Say the message immediately and don't remember it.""" self._stop_current_if_needed() _tts_speak(text) self._playing_item = True def _next_unsaid_or_said(self): if self._current in self._msgs: try: return self._msgs[self._msgs.index(self._current) + 1] except IndexError: return def _next_msg(self): if self._history: return self._next_unsaid_or_said() for m in self._msgs: if not m.said: return m def update(self): """Must be called often to make sure that everything is said.""" if not _tts_is_speaking(): if self._playing_item: self._playing_item = False elif self._current is not None: self._current.said = True self._current = self._next_msg() if self._current is not None: self._current.play() else: self._history = False def must_talk(self): """Return True if at least a message or an item is to be said.""" return self._playing_item or self._current is not None def previous(self): """Go to the previous message.""" self._stop_current_if_needed() self._history = True if self._current in self._msgs: i = self._msgs.index(self._current) if i > 0: self._current = self._msgs[i - 1] elif self._msgs: self._current = self._msgs[-1] if self._current is not None: self._current.play() def next(self): """Go to the next message (if the current message has been said).""" if self._current is not None and not self._current.said: return self._stop_current_if_needed() self._current = self._next_unsaid_or_said() if self._current is not None: self._current.play() def flush(self): """Stop current queued message and mark all as said.""" for m in self._msgs: m.said = True self._history = False self._stop_current_if_needed() voice = _MessageQueue()
gpl-3.0
-1,661,653,340,896,824,800
28.2
107
0.561644
false
grampajoe/drippies
tests/unit/test_drippies.py
1
1772
import mock import os from drippies import get_forecast, drippify class TestForecast(object): @mock.patch('drippies.forecastio') def test_get_forecast(self, forecastio): """Should use the forecastio library to get a forecast.""" forecast = forecastio.load_forecast.return_value forecast.minutely.return_value.summary = 'Minute.' forecast.hourly.return_value.summary = 'Hour.' forecast.daily.return_value.summary = 'Day.' lat = 123 lng = 321 api_key = 'hello friend' with mock.patch.dict(os.environ, {'FORECASTIO_API_KEY': api_key}): result = get_forecast(lat, lng) forecastio.load_forecast.assert_called_with(api_key, lat, lng) assert result == 'Minute. Hour. Day.' def test_get_forecast_no_api_key(self): """Should raise an exception with no API key.""" try: with mock.patch.dict(os.environ, {'FORECASTIO_API_KEY': ''}): get_forecast(123, 321) except Exception as ex: assert 'FORECASTIO_API_KEY' in str(ex) else: raise AssertionError('No exception raised.') class TestDrippify(object): def test_replacements(self): """Should replace words with silly ones.""" assert drippify('rain') == 'drippies' assert drippify('cloudy') == 'fluff fluffs' assert drippify('drizzle') == 'pitter pats' assert drippify('temperatures') == 'hot\'n\'colds' def test_makes_good_sentences(self): """Should preserve capitalization and punctuation.""" assert drippify('Rain!!!!!!!!') == 'Drippies!!!!!!!!' assert drippify('Rain all day, cloudy all night.') == \ 'Drippies all day, fluff fluffs all night.'
bsd-3-clause
6,687,756,430,928,139,000
35.163265
74
0.613995
false
lukasklein/pruefungsplan
pruefungsplan/notifier/migrations/0003_auto__add_notification.py
1
3086
# -*- coding: utf-8 -*- from south.utils import datetime_utils as datetime from south.db import db from south.v2 import SchemaMigration from django.db import models class Migration(SchemaMigration): def forwards(self, orm): # Adding model 'Notification' db.create_table(u'notifier_notification', ( (u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)), ('pruefungsplan', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['notifier.Pruefungsplan'])), ('email', self.gf('django.db.models.fields.EmailField')(max_length=75)), ('email_verified', self.gf('django.db.models.fields.BooleanField')(default=False)), ('email_token', self.gf('django.db.models.fields.CharField')(default='2be86a2506c94506bd6758fd62a82920', max_length=32)), ('sms', self.gf('django.db.models.fields.CharField')(max_length=255)), ('sms_verified', self.gf('django.db.models.fields.BooleanField')(default=False)), ('sms_code', self.gf('django.db.models.fields.CharField')(default='55e3', max_length=4)), ('password', self.gf('django.db.models.fields.CharField')(default='0a609e7db7', max_length=10)), )) db.send_create_signal(u'notifier', ['Notification']) def backwards(self, orm): # Deleting model 'Notification' db.delete_table(u'notifier_notification') models = { u'notifier.notification': { 'Meta': {'object_name': 'Notification'}, 'email': ('django.db.models.fields.EmailField', [], {'max_length': '75'}), 'email_token': ('django.db.models.fields.CharField', [], {'default': "'6056669d197848288619a45aab2125f6'", 'max_length': '32'}), 'email_verified': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'password': ('django.db.models.fields.CharField', [], {'default': "'d6536f0e5c'", 'max_length': '10'}), 'pruefungsplan': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['notifier.Pruefungsplan']"}), 'sms': ('django.db.models.fields.CharField', [], {'max_length': '255'}), 'sms_code': ('django.db.models.fields.CharField', [], {'default': "'958a'", 'max_length': '4'}), 'sms_verified': ('django.db.models.fields.BooleanField', [], {'default': 'False'}) }, u'notifier.pruefungsplan': { 'Meta': {'object_name': 'Pruefungsplan'}, 'available_since': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'is_available': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}), 'url': ('django.db.models.fields.URLField', [], {'max_length': '200'}) } } complete_apps = ['notifier']
bsd-3-clause
2,044,085,636,269,861,000
56.166667
140
0.599157
false
praekelt/hellomama-registration
uniqueids/views.py
1
1949
from rest_framework.permissions import IsAuthenticated from rest_framework.views import APIView from rest_framework.response import Response from rest_framework.pagination import CursorPagination from rest_framework import viewsets from .models import Record, State from .serializers import StateSerializer class IdCursorPagination(CursorPagination): ordering = 'id' class RecordPost(APIView): """ Webhook listener for identities needing a unique ID """ permission_classes = (IsAuthenticated,) def post(self, request, *args, **kwargs): """ Accepts and creates a new unique ID record """ if "id" in request.data["data"]: rec = { "identity": request.data["data"]["id"] } if "details" in request.data["data"] and \ "uniqueid_field_name" in request.data["data"]["details"]: rec["write_to"] = \ request.data["data"]["details"]["uniqueid_field_name"] else: rec["write_to"] = "health_id" if "details" in request.data["data"] and \ "uniqueid_field_length" in request.data["data"]["details"]: rec["length"] = \ request.data["data"]["details"]["uniqueid_field_length"] Record.objects.create(**rec) # Return status = 201 accepted = {"accepted": True} return Response(accepted, status=status) else: # Return status = 400 accepted = {"id": ['This field is required.']} return Response(accepted, status=status) class StateGetViewSet(viewsets.ReadOnlyModelViewSet): """ API endpoint that allows States to be viewed. """ permission_classes = (IsAuthenticated,) queryset = State.objects.all() pagination_class = IdCursorPagination serializer_class = StateSerializer
bsd-3-clause
233,679,597,637,088,200
33.803571
79
0.597229
false
rupendrab/py_unstr_parse
check_309.py
1
1292
import nltk import re part_309_pattern_strings = [ "(^|^.*\s+)PART 309 OF (THE )?(FDIC|FEDERAL DEPOSIT INSURANCE CORPORATION)" ] part_309_patterns = [re.compile(p) for p in part_309_pattern_strings] def array_upper(arr): return [x.upper() for x in arr] def look_for_in_wordarray(w_arr, sent): file_words = array_upper(w_arr) words = nltk.word_tokenize(sent) no_words = len(words) file_words_n = [file_words[i:i+no_words] for i in range(len(file_words) + 1 - no_words)] try: ind = file_words_n.index(array_upper(words)) return ' '.join(file_words[ind-3:ind+10]) except ValueError: return '' def look_for_in_sentences(sents, sent): sents_words = [val for line in sents for val in nltk.word_tokenize(line)] return look_for_in_wordarray(sents_words, sent) def look_for_in_file(file, sent): f = open(file, 'r', encoding='latin1') data = f.read() f.close() return look_for_in_wordarray(nltk.word_tokenize(data), sent) def has_part_309_file(file): sent = look_for_in_file(file, 'part 309') for p in part_309_patterns: if (p.match(sent)): return True return False def has_part_309_sents(sents): sent = look_for_in_sentences(sents, 'part 309') for p in part_309_patterns: if (p.match(sent)): return True return False
mit
-5,236,462,440,671,994,000
27.086957
91
0.668731
false
rhhayward/django_webshell
webshell/views.py
1
9386
from django.http import HttpResponse from django.template import RequestContext from django.shortcuts import render_to_response from django.utils import timezone from django.contrib.auth.models import User from django.contrib.admin.views.decorators import staff_member_required from django.contrib.staticfiles.views import serve from webshell.models import History, CmdType, Cwd import subprocess import os from json import dumps, loads, JSONEncoder, JSONDecoder import magic import urllib import time import pickle class PythonFileEncoder(JSONEncoder): def default(self, obj): if isinstance(obj, (list, dict, str, unicode, int, float, bool, type(None))): return JSONEncoder.default(self, obj) return obj.to_json() def _get_cwd(request): if len(Cwd.objects.all()) <= 0: c = Cwd(user=request.user, cwd=os.getcwd()) c.save() return Cwd.objects.all()[0] def _set_cwd(request, cwd): c = _get_cwd(request) if cwd == "++": cwd = ".." cwd = cwd.replace('+', ' ') ## Turns out this breaks if the dir you're in is deleted out ## from under you. #####if os.path.isdir(os.path.join(c.cwd, cwd)): c.cwd = os.path.abspath(os.path.join(c.cwd, cwd)) c.save() return 1 def _save(request, fileName, fileContents): c = _get_cwd(request) f = open(os.path.join(c.cwd, fileName), 'w') f.write(fileContents.replace('\r\n', '\n')) f.close() def isTextFile(fileName): if fileName.endswith(".py"): return True elif fileName.endswith(".pl"): return True elif fileName.endswith(".txt"): return True elif fileName.endswith(".html"): return True return False @staff_member_required def index(request): username=request.user.username return render_to_response('webshell/index.html', locals(), context_instance=RequestContext(request)) @staff_member_required def execute(request): return render_to_response('webshell/execute.html', locals(), context_instance=RequestContext(request)) @staff_member_required def _execute(request, command, cwdin): if cwdin == "": cwdin = os.getcwd() cmdtype = CmdType.objects.all()[0] if History.objects.filter(user=request.user, cmd_type=cmdtype, cmd_text=command, cmd_pwd=cwdin).exists(): v = History.objects.get(user=request.user, cmd_type=cmdtype, cmd_text=command, cmd_pwd=cwdin) v.time_stamp=timezone.now() v.save() else: v = History(user=request.user, cmd_type=cmdtype, cmd_text=command, time_stamp=timezone.now(), cmd_pwd=cwdin) v.save() username=request.user.username out = None err = None if command.endswith("&"): subprocess.Popen(command, shell=True, cwd=cwdin) else: (out, err) = subprocess.Popen(command, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, close_fds=True, shell=True, cwd=cwdin).communicate(None) history_list = History.objects.all().order_by('time_stamp') er = ExecResult(out, err, history_list) return er @staff_member_required def history_delete(request, hid): try: v = History.objects.get(user=request.user, id=hid) v.delete() except: pass return history(request) @staff_member_required def history(request): username=request.user.username history_list = History.objects.all().order_by('time_stamp') return render_to_response('webshell/history.html', locals(), context_instance=RequestContext(request)) @staff_member_required def set_cwd(request, cwd): _set_cwd(request, cwd) return file_manager(request) @staff_member_required def file_manager(request): c = _get_cwd(request) return _file_manager(request, c.cwd) @staff_member_required def _get_files(request, cwdin): username=request.user.username if cwdin == "": cwdin = os.getcwd() dirList = [] try: dirList=os.listdir(cwdin) except: pass files = [] ### Add ".." by default f = File(os.path.abspath(os.path.join(cwdin,"..")), "..", "dir") files.append(f) ### Here's what finally worked try: dirList = [f.decode('utf-8', 'ignore') for f in dirList] except: try: dirList = [unicode(f) for f in dirList] except: try: dirList = [urllib.quote(f.encode('utf8')) for f in dirList] except: dirList = [] ### Add everything else we find for fname in sorted(dirList): filePath = os.path.join(cwdin,urllib.unquote(fname)) fileType = "file" if os.path.isdir(filePath): fileType = "dir" elif fname.lower().endswith(".png") or fname.lower().endswith(".jpg") or fname.lower().endswith(".gif"): fileType = "image" elif isTextFile(fname): fileType = "text" size = None try: size = os.path.getsize(filePath) except: pass dateModified = None try: dateModified = os.path.getmtime(filePath) except: pass #f = File(cwdin, urllib.quote_plus(fname), fileType, size, dateModified) f = File(cwdin, fname, fileType, size, dateModified) files.append(f) return files @staff_member_required def _get_history_list(request, cwdin): history_list = History.objects.all().order_by('time_stamp') return history_list @staff_member_required def _file_manager(request, cwdin): files = _get_files(request, cwdin) history_list = _get_history_list(request, cwdin) return render_to_response('webshell/file_manager.html', locals(), context_instance=RequestContext(request)) def _file_rm(request, fileName): try: fileName = fileName.replace('+', ' ') fileName = os.path.abspath(os.path.join(_get_cwd(request).cwd, fileName)) if os.path.isdir(fileName): os.removedirs(fileName) else: os.remove(fileName) except: print "there was an error " + fileName return 0 return 1 @staff_member_required def file_rm(request): if request.method == 'POST': fileName = request.POST['fileName'] print "has a filename:" + str(fileName) _delete(request, fileName) return file_manager(request) else: return file_manager(request) @staff_member_required def execute_ajax(request): cwd = _get_cwd(request).cwd if request.method == 'POST': cmd = request.POST['command'] er = _execute(request, cmd, cwd) json_data = dumps({"HTTPRESPONSE":er.out}) return HttpResponse(json_data, mimetype="application/json") else: json_data = dumps({"HTTPRESPONSE":False}) return HttpResponse(json_data, mimetype="application/json") @staff_member_required def get_files_ajax(request): cwd = _get_cwd(request).cwd files = _get_files(request, cwd) json_data = dumps(files, cls=PythonFileEncoder) return HttpResponse(json_data, mimetype="application/json") @staff_member_required def set_cwd_ajax(request, dirName): response = _set_cwd(request, dirName) cwd = _get_cwd(request).cwd json_data = dumps({"HTTPRESPONSE":response, "cwd":cwd }) return HttpResponse(json_data, mimetype="application/json") def fix_fileName(fileName): fileName = fileName.replace('+', ' ') return fileName @staff_member_required def file_rm_ajax(request, fileName): fileName = fix_fileName(fileName) response = _file_rm(request, fileName) json_data = dumps({"HTTPRESPONSE":response}) return HttpResponse(json_data, mimetype="application/json") @staff_member_required def editor_view(request, fileName): fileName = fix_fileName(fileName) cwd = _get_cwd(request).cwd fileContents = open(os.path.join(cwd, fileName)).read() return render_to_response('webshell/editor.html', locals(), context_instance=RequestContext(request)) @staff_member_required def editor_save(request, fileName): if request.method == 'POST': fileContents = request.POST['fileContents'] _save(request, fileName, fileContents) return editor_view(request, fileName) @staff_member_required def get_file(request, fileName): cwd = _get_cwd(request).cwd fileName = fileName.replace('+', ' ') fileContentType = 'application/octet-stream' if fileName.endswith(".png"): fileContentType = 'image/png' elif fileName.lower().endswith(".jpg"): fileContentType = 'image/jpg' elif fileName.endswith(".gif"): fileContentType = 'image/gif' elif isTextFile(fileName): fileContentType = 'text/plain' else: fileContentType = magic.from_file(os.path.join(cwd, fileName), mime=True) my_data = open(os.path.join(cwd, fileName)).read() response = HttpResponse(my_data, content_type=fileContentType) response['Content-Length'] = len(my_data) return response class File: def __init__(self, path=None, name=None, fileType=None, size=None, dateModified=None): self.setPath(path) self.setName(name) self.setFileType(fileType) self.setSize(size) self.setDateModified(dateModified) def to_json(self): return { 'name':self.name, 'fileType':self.fileType, 'size':self.size, 'dateModified':self.dateModified } def setPath(self, path): self.path = path def setName(self, name): self.name = name def setFileType(self, fileType): self.fileType = fileType def setDateModified(self, dateModified): self.dateModified = time.strftime("%m/%d/%Y %I:%M:%S %p",time.localtime(dateModified)) def setSize(self, size): if size == None: self.size = "" elif size < 1024: self.size = str(size) + "b" elif size < 1024*1024: self.size = str(size/1024) + "k" else: self.size = str(size/(1024*1024)) + "m" class ExecResult: def __init__(self, out=None, err=None, history_list=None): self.setOut(out) self.setErr(err) self.setHistoryList(history_list) def setOut(self, out): self.out = out def setErr(self, err): self.err = err def setHistoryList(self, history_list): self.history_list = history_list
gpl-2.0
7,406,631,666,602,353,000
25.740741
168
0.707863
false
r-owen/TUI
TUI/Misc/TelMech/TelMechModel.py
1
8177
#!/usr/bin/env python """An object that models the current state of the enclosure controller. Information about various devices in the telescope enclosure. Devices are broken up into categories, e.g. Fans, Lights. Information about each category of devices is stored as follows: model.catDict is a dictionary whose keys are device categories and values are CatInfo objects (described below) which contain information about that category of device. 2004-12-27 ROwen 2005-08-15 ROwen Bug fix: had stairs and outside lights swapped. 2005-09-23 ROwen Made polling for status a refresh command so it is more easily hidden in the log window. 2005-09-30 ROwen Fixed order of fans. 2006-04-05 ROwen Can disable polling by setting _StatusIntervalMS = 0 or None. This is preparatory to switching to telmech, which will not need it at all. Meanwhile it just helps me run from work. 2006-05-04 ROwen Renamed from EnclosureModel to TelMechModel. Modified to use the telmech actor. As a result, there is now one keyword per device. Removed polling for status. Added catNameSingular to CatInfo. 2007-06-22 ROwen Added the Eyelids category. 2007-06-26 ROwen Added devState and devIsCurrent attributes to the CatInfo class. 2007-06-27 ROwen Added covers and tertrot entries. Modified stateToBoolOrNone to use ? as "unknown value". 2007-09-17 ROwen Put the tertiary position names in order of increasing tertiary rotation angle. 2009-04-21 ROwen Updated for tuiModel root->tkRoot. 2009-10-09 ROwen Implement CR 1060: Change order of tertiary ports. 2012-10-25 ROwen Remove BC2 eyelid; it no longer exists. """ __all__ = ["getModel"] import numpy import RO.AddCallback import RO.CnvUtil import RO.Wdg import RO.KeyVariable import TUI.TUIModel # reasonable time for device toggle commands; _TimeLim = 120 _theModel = None def getModel(): global _theModel if _theModel is None: _theModel = _Model() return _theModel def stateToBoolOrNone(strVal): lowStrVal = strVal.lower() if lowStrVal in ("close", "off"): return False elif lowStrVal in ("open", "on"): return True elif lowStrVal == "?": return None else: raise ValueError("unknown state %r" % strVal) def tertRotConverter(strVal): if strVal == "?": return None return str(strVal) class CatInfo(RO.AddCallback.BaseMixin): """Information about a category of devices, e.g. Fans, Lights. Attributes: - catName: the name of the category - catNameSingular: the name of the category with the final "s" or "S" missing, if present. - devDict: an ordered dictionary of device name: boolean keyword variable - stateNames: a word describing the False and True state - verbNames: the verb used to command the False and True state; indeterminate if readOnly - readOnly: if True, this category of devices can be read but cannot be controlled - devState: numpy array representing the state of each device; values are 0, 1 or numpy.nan - devIsCurrent: numpy bool array representing the isCurrent of each device """ def __init__(self, keyVarFact, catName, devNames, isReadOnly=False, isOpenShut=False, callFunc=None): RO.AddCallback.BaseMixin.__init__(self) self.catName = catName if catName.lower().endswith("s"): self.catNameSingular = catName[:-1] else: self.catNameSingular = catName self.devDict = RO.Alg.OrderedDict() # dict of device name: keyword variable self.devIndDict = {} # dict of device name: index self.devState = numpy.zeros([len(devNames)], numpy.float) self.devIsCurrent = numpy.zeros([len(devNames)], numpy.bool) self.readOnly = isReadOnly if isOpenShut: self.stateNames = ("Closed", "Open") self.verbNames = ("Close", "Open") else: self.stateNames = ("Off", "On") self.verbNames = self.stateNames for ind, devName in enumerate(devNames): keyVar = keyVarFact(devName) self.devDict[devName] = keyVar self.devIndDict[devName] = ind keyVar.addIndexedCallback(self._updateDevState, callNow=False) if callFunc: self.addCallback(callFunc, callNow=False) def getStateStr(self, boolVal): """Returns a string representation of the state; one of: - "Off" or "Closed" if false - "On" or "Open" if true """ return self.stateNames[bool(boolVal)] def getVerbStr(self, boolVal): """Returns a the appropriate command verb for the specified date; one of: - "Off" or "Close" if false - "On" or "Open" if true """ return self.verbNames[bool(boolVal)] def _updateDevState(self, value, isCurrent, keyVar): """Update devState""" ind = self.devIndDict[keyVar.keyword] if value is None: value = numpy.nan self.devState[ind] = value self.devIsCurrent[ind] = isCurrent self._doCallbacks() class _Model (object): def __init__(self, **kargs): tuiModel = TUI.TUIModel.getModel() self.actor = "telmech" self.dispatcher = tuiModel.dispatcher self._connection = tuiModel.getConnection() self.timeLim = _TimeLim self._pollID = None self._tkRoot = tuiModel.tkRoot self.__keyVarFact = RO.KeyVariable.KeyVarFactory( actor = self.actor, converters = stateToBoolOrNone, nval = 1, dispatcher = self.dispatcher, ) self.catDict = {} self._addCat( catName = "Enable", devNames = ( "Telescope", ), ) self._addCat( catName = "Fans", devNames = ( "IntExhaust", "TelExhaust", "Press", ), ) self._addCat( catName = "Heaters", devNames = ( "H4", "H8", "H12", "H16", "H20", "H24", ), ) self._addCat( catName = "Lights", devNames = ( "FHalides", "RHalides", "Incand", "Platform", "Catwalk", # was outside "Stairs", "Int_Incand", "Int_Fluor", ), ) self._addCat( catName = "Louvers", devNames = ( "LUp", "LMid", "LLow", "RUp", "RMid", "RLow", "Stairw", "LPit", "RPit", ), isOpenShut = True, ) self._addCat( catName = "Shutters", devNames = ( "Left", "Right", ), isReadOnly = True, isOpenShut = True, ) self._addCat( catName = "Eyelids", devNames = ( "NA1", "TR1", "BC1", "TR2", "NA2", "TR3", "TR4", ), isOpenShut = True, ) self.covers = self.__keyVarFact("covers") self.tertRot = self.__keyVarFact("tertRot", converters=tertRotConverter) self.__keyVarFact.setKeysRefreshCmd() def _addCat(self, catName, devNames, isReadOnly=False, isOpenShut = False): catInfo = CatInfo( keyVarFact = self.__keyVarFact, catName = catName, devNames = devNames, isReadOnly = isReadOnly, isOpenShut = isOpenShut, ) self.catDict[catName] = catInfo if __name__ == "__main__": getModel()
bsd-3-clause
-2,897,757,714,474,932,000
31.708
105
0.558518
false
moodpulse/l2
hospitals/models.py
1
3227
from typing import Optional from django.db import models from appconf.manager import SettingManager class Hospitals(models.Model): title = models.CharField(max_length=255, help_text="Наименование") short_title = models.CharField(max_length=128, blank=True, default='', help_text="Краткое наименование", db_index=True) code_tfoms = models.CharField(max_length=128, blank=True, default='', help_text="Код больницы", db_index=True) oid = models.CharField(max_length=128, blank=True, default='', help_text="Код больницы", db_index=True) hide = models.BooleanField(default=False, blank=True, help_text='Скрытие больницы', db_index=True) is_default = models.BooleanField(default=False, blank=True, help_text='Больница по умолчанию для пустых полей', db_index=True) address = models.CharField(max_length=128, blank=True, default='', help_text="Адрес больницы") phones = models.CharField(max_length=128, blank=True, default='', help_text="Телефон больницы") ogrn = models.CharField(max_length=16, blank=True, default='', help_text="ОГРН больницы") www = models.CharField(max_length=128, blank=True, default='', help_text="Сайт больницы") rmis_org_id = models.CharField(max_length=12, blank=True, default='', help_text="ID организации в РМИС") email = models.CharField(max_length=12, blank=True, default='', help_text="email") remote_url = models.CharField(max_length=128, blank=True, default='', help_text="Адрес L2") remote_token = models.CharField(max_length=128, blank=True, default='', help_text="Токен L2") license_data = models.CharField(max_length=128, blank=True, default='', help_text="Лицензия") @staticmethod def get_default_hospital() -> Optional['Hospitals']: hosp = Hospitals.objects.filter(hide=False, is_default=True).first() if not hosp: hosp = Hospitals.objects.filter(hide=False, code_tfoms=SettingManager.get("org_id", default='', default_type='s')).first() if hosp: hosp.is_default = True hosp.save() return hosp @property def safe_full_title(self): return self.title or self.short_title @property def safe_short_title(self): return self.short_title or self.title @property def safe_address(self): return self.address or SettingManager.get("org_address") @property def safe_phones(self): return self.phones or SettingManager.get("org_phones") @property def safe_ogrn(self): return self.ogrn or SettingManager.get("org_ogrn") @property def safe_www(self): return self.www or SettingManager.get("org_www") @property def safe_email(self): # если отсутствует email то адрес сайта return self.email or SettingManager.get("org_www") def __str__(self): return f"{self.short_title} – {self.code_tfoms}" class Meta: verbose_name = 'Больница' verbose_name_plural = 'Больницы'
mit
-5,769,458,441,016,983,000
41.183099
134
0.676461
false
lkarsten/GND10read
libfdx/interfaces.py
2
6430
#!/usr/bin/env python # .- coding: utf-8 -. # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation version 2 of the License. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License along # with this program; if not, write to the Free Software Foundation, Inc., # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. # # Copyright (C) 2016-2017 Lasse Karstensen # from __future__ import print_function import doctest import logging import unittest from binascii import hexlify from datetime import datetime from pprint import pprint from time import time, sleep import serial from .decode import FDXDecode, DataError, FailedAssumptionError from .dumpreader import dumpreader, nxbdump class GND10interface(object): stream = None n_msg = 0 n_errors = 0 stream = None last_yield = None # Seconds read_timeout = 0.3 reset_sleep = 2 def __init__(self, serialport, send_modechange=False): self.serialport = serialport self.send_modechange = send_modechange def __del__(self): if self.stream is not None: self.stream.close() def open(self): logging.debug("Opening serial port %s (read_timeout=%s)" % (self.serialport, self.read_timeout)) self.stream = serial.Serial(port=self.serialport, timeout=self.read_timeout) assert self.stream is not None def close(self): if self.stream is not None: try: self.stream.close() del self.stream except serial.serialutil.SerialException as e: pass self.stream = None def recvmsg(self): buf = bytes() empty_reads = 0 while True: while self.stream is None: try: self.open() except serial.serialutil.SerialException as e: self.close() now = time() if (self.last_yield or now) < (now + self.read_timeout): # Pace the iterator if nothing is working. if self.last_yield > (now - 0.05): sleep(0.05) self.last_yield = now yield None # Retry opening the port in a while sleep(self.reset_sleep) continue # After successful open, send the mode change if asked to. if self.send_modechange: try: self.stream.write("$PSILFDX,,R\r\n".encode("ascii")) except serial.serialutil.SerialException as e: logging.error(str(e)) self.close() try: # Inefficient but easily understood. chunk = self.stream.read(1) except serial.serialutil.SerialException as e: self.close() continue assert chunk is None or isinstance(chunk, bytes) if len(chunk) == 0: empty_reads += 1 logging.info("serial read timeout after %.3f seconds" % self.stream.timeout) if empty_reads > 4: # Non-magic logging.info("Excessive empty reads, resetting port") self.close() continue self.empty_reads = 0 assert len(chunk) > 0 buf += chunk if b'\x81' in buf: # print("trying to decode %i bytes: %s" % (len(buf), buf.hex())) try: fdxmsg = FDXDecode(buf) except (DataError, FailedAssumptionError, NotImplementedError) as e: if "short message" in str(e): pass else: # This class concerns itself with the readable only. logging.warning("Ignoring exception: %s" % str(e)) self.n_errors += 1 else: if fdxmsg is not None: self.n_msg += 1 self.last_yield = time() assert isinstance(fdxmsg, dict) yield fdxmsg buf = bytes() class HEXinterface(object): """ Used for running with test data when the GND10 is not connected. Interface should be close to GND10interface(). """ last_yield = None n_msg = 0 n_errors = 0 def __init__(self, inputfile, frequency=None, seek=0): self.inputfile = inputfile self.seek = seek self.frequency = frequency with open(self.inputfile): pass # Catch permission problems early. def recvmsg(self): if self.inputfile.endswith(".nxb"): reader = nxbdump(self.inputfile, seek=self.seek) else: reader = dumpreader(self.inputfile, seek=self.seek) for msg in reader: assert isinstance(msg, tuple) assert len(msg) == 2 ts, frame = msg assert isinstance(frame, bytes) assert len(frame) > 0 try: fdxmsg = FDXDecode(frame) except (DataError, FailedAssumptionError, NotImplementedError) as e: if "short message" in str(e): pass else: logging.warning("%s" % str(e)) self.n_errors += 1 else: if fdxmsg is not None: self.n_msg += 1 self.last_yield = time() assert isinstance(fdxmsg, dict) yield fdxmsg # Pace the output. if self.frequency is not None: sleep(1.0/self.frequency) if __name__ == "__main__": unittest.main()
gpl-2.0
-8,601,768,215,098,905,000
31.311558
84
0.52084
false
beefoo/hollywood-diversity
scripts/add_imdb_id.py
1
1442
# -*- coding: utf-8 -*- # Example usage: # python add_imdb_id.py ../data/box_office_top_10_movies_2011-2015.csv ../data/people_box_office_top_10_movies_2011-2015_imdb_subset.csv import csv import sys if len(sys.argv) < 2: print "Usage: %s <inputfile movie csv> <inputfile people csv>" % sys.argv[0] sys.exit(1) MOVIE_FILE = sys.argv[1] PEOPLE_FILE = sys.argv[2] overwrite_existing = False movies = [] people = [] people_headers = [] with open(MOVIE_FILE, 'rb') as f: rows = csv.reader(f, delimiter=',') headers = next(rows, None) # remove header # populate movie list for row in rows: movie = {} for i, h in enumerate(headers): movie[h] = row[i] movies.append(movie) with open(PEOPLE_FILE, 'rb') as f: rows = csv.reader(f, delimiter=',') people_headers = next(rows, None) # remove header # populate people list for row in rows: person = {} for i, h in enumerate(people_headers): person[h] = row[i] person['movie_imdb_id'] = next(iter([m['imdb_id'] for m in movies if m['movie_id']==person['movie_id']]), False) people.append(person) with open(PEOPLE_FILE, 'wb') as f: w = csv.writer(f) w.writerow(people_headers) for p in people: row = [] for h in people_headers: row.append(p[h]) w.writerow(row) print "Updated %s people in file %s" % (len(people), PEOPLE_FILE)
mit
3,624,430,212,936,498,700
27.27451
138
0.601248
false
imjonsnooow/synapse
synapse/lib/atomic.py
1
1032
# FIXME: work in progress class File: ''' A File facilitates atomic seek/read operations. ''' def __init__(self, fd): self.fd = fd self.off = 0 self.lock = threading.Lock() fd.seek(0,os.SEEK_END) def seekAndRead(self, off, size): ''' Seek and read as an atomic operation. ''' with self.lock: if self.off != off: self.fd.seek(off) byts = self.fd.read(size) self.off = off + len(byts) return byts def seekAndWrite(self, byts): with self.lock: size = len(byts) if self.off != self.size: self.fd.seek(0,os.SEEK_END) self.fd.write(byts) self.size += size self.off = self.size def seekAndTell(self): ''' Seek to the end and return tell() as an atomic operation. ''' with self.lock: self.fd.seek(0,os.SEEK_END) return self.fd.tell()
apache-2.0
9,018,604,730,164,425,000
21.933333
65
0.49031
false
magenta/ddsp
ddsp/training/docker/task_test.py
1
3460
# Copyright 2021 The DDSP Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Lint as: python3 """Tests for task.py.""" import os from unittest import mock from ddsp.training.docker import task import tensorflow.compat.v2 as tf class GetWorkerBehaviorInfoTest(tf.test.TestCase): def test_no_tf_config(self): """Tests behavior when there is no TF_CONFIG set.""" cluster_config, save_dir = task.get_worker_behavior_info('some/dir/') self.assertEqual(save_dir, 'some/dir/') self.assertEqual(cluster_config, '') def test_incomplete_tf_config(self): """Test behavior when set TF_CONFIG is incomplete.""" with mock.patch.dict(os.environ, {'TF_CONFIG': '{"cluster": {}}'}): cluster_config, save_dir = task.get_worker_behavior_info('some/dir/') self.assertEqual(save_dir, 'some/dir/') self.assertEqual(cluster_config, '') with mock.patch.dict(os.environ, {'TF_CONFIG': '{"task": {}}'}): cluster_config, save_dir = task.get_worker_behavior_info('some/dir/') self.assertEqual(save_dir, 'some/dir/') self.assertEqual(cluster_config, '') @mock.patch.dict( os.environ, {'TF_CONFIG': ('{"cluster": {"worker": ["worker0.example.com:2221"]},' '"task": {"type": "worker", "index": 0}}')}) def test_single_worker(self): """Tests behavior when cluster has only one worker.""" cluster_config, save_dir = task.get_worker_behavior_info('some/dir/') self.assertEqual(save_dir, 'some/dir/') self.assertEqual(cluster_config, '') @mock.patch.dict( os.environ, {'TF_CONFIG': ('{"cluster": {"worker": ["worker0.example.com:2221"],' '"chief": ["chief.example.com:2222"]},' '"task": {"type": "chief", "index": 0}}')}) def test_multi_worker_as_chief(self): """Tests multi-worker behavior when task type chief is set in TF_CONFIG.""" cluster_config, save_dir = task.get_worker_behavior_info('some/dir/') self.assertEqual(save_dir, 'some/dir/') self.assertEqual( cluster_config, ('{"cluster": {"worker": ["worker0.example.com:2221"],' '"chief": ["chief.example.com:2222"]},' '"task": {"type": "chief", "index": 0}}')) @mock.patch.dict( os.environ, {'TF_CONFIG': ('{"cluster": {"worker": ["worker0.example.com:2221"],' '"chief": ["chief.example.com:2222"]},' '"task": {"type": "worker", "index": 0}}')}) def test_multi_worker_as_worker(self): """Tests multi-worker behavior when task type worker is set in TF_CONFIG.""" cluster_config, save_dir = task.get_worker_behavior_info('some/dir/') self.assertEqual(save_dir, '') self.assertEqual( cluster_config, ('{"cluster": {"worker": ["worker0.example.com:2221"],' '"chief": ["chief.example.com:2222"]},' '"task": {"type": "worker", "index": 0}}')) if __name__ == '__main__': tf.test.main()
apache-2.0
-1,943,282,461,006,319,900
38.770115
80
0.625723
false
eSmelser/SnookR
SnookR/accounts/tests.py
1
1330
from django.test import TestCase, RequestFactory, Client from django.contrib.auth import SESSION_KEY from django.urls import reverse from django.core import mail from accounts.views import signup from accounts.models import CustomUser class SignupTestCase(TestCase): def setUp(self): self.factory = RequestFactory() self.client = Client() self.data = { 'username': 'joe', 'password1': 'joepassword', 'password2': 'joepassword', 'email': '[email protected]', 'first_name': 'joe', 'last_name': 'pass' } def test_user_created(self): request = self.client.post(reverse('signup'), self.data) self.assertTrue(CustomUser.objects.all()) def test_email_sent(self): request = self.client.post(reverse('signup'), self.data) self.assertEqual(len(mail.outbox), 1) def test_email_content(self): request = self.client.post(reverse('signup'), self.data) user = CustomUser.objects.get(username=self.data['username']) email = mail.outbox.pop() self.assertIn(user.username, email.body) self.assertIn(user.first_name, email.body) self.assertIn(user.profile.activation_key, email.body) self.assertIn(str(user.profile.key_expires), email.body)
mit
-3,605,594,687,872,716,000
34.945946
69
0.642105
false
thinkry/quickx-ee
ee_module.py
1
6239
#!/usr/bin/python # -*- coding: utf-8 -*- import sys, os, re #获取脚本文件的当前路径 def getCurrDir(): #获取脚本路径 path = sys.argv[0] #判断为脚本文件还是py2exe编译后的文件,如果是脚本文件,则返回的是脚本的目录,如果是py2exe编译后的文件,则返回的是编译后的文件路径 if os.path.isdir(path): return path elif os.path.isfile(path): return os.path.dirname(path) class const: EOL = '\n' lineDelim = '-------------------------------------------------------------------------------' + EOL lineBegin = '-- ' class field: def __init__(self, name): self.name = name self.comment = '' self.parent = None def str(self): parent = 'global' if self.parent: parent = self.parent.name if len(self.comment) > 0: return self.comment else: return const.lineDelim + const.lineBegin + '@field [parent=#%s] %s' % (parent, self.name) + const.EOL class function: def __init__(self, name): self.name = name self.comment = '' self.parent = None def str(self): if len(self.comment) > 0: return self.comment parent = 'global' if self.parent: parent = self.parent.name return const.lineDelim + '%s@function [parent=#%s] %s' % (const.lineBegin, parent, self.name) + const.EOL class module: __root = None @classmethod def root(cls): if not cls.__root: cls.__root = module('global') return cls.__root def __init__(self, name): self.name = name #模块的short name self.comment = '' self.parent = None self.extends = '' self.fields = [] self.functions = [] self.children = {} #子模块 #返回模块的全名 def fullName(self): ret = self.name p = self.parent while p and p.parent: ret = p.name + '.' + ret p = p.parent return ret def addFunction(self, function): for f in self.functions: if f.name == function.name: return function.parent = self self.functions.append(function) def addField(self, field): for f in self.fields: if f.name == field.name: return field.parent = self self.fields.append(field) def output(self, dir): #先输出module信息 if len(self.comment) > 0: s = self.comment + const.EOL + const.EOL else: s = const.lineDelim s += const.lineBegin + '@module %s' % self.name + const.EOL if self.extends <> '': tmp = self.extends.split('.')[-1] s += const.lineBegin + '@extends %s#%s' % (self.extends, tmp) + const.EOL s += const.EOL + const.EOL #输出preloaded module信息 for child in self.children: mod = self.children[child] modname = mod.name s += const.lineDelim s += const.lineBegin + '@field [parent = #%s] %s#%s %s preloaded module' % \ (self.name, mod.fullName(), modname, modname) + const.EOL + const.EOL for function in self.functions: s += function.str() + const.EOL for field in self.fields: s += field.str() + const.EOL s += const.EOL + 'return nil' + const.EOL f = open(os.path.join(dir, self.fullName() + '.doclua'), 'wb') f.write(s) f.close() #根据一个字符串返回module # @param name # @param skipLast 表明name的最后一段是不是函数或变量, 例如cc.net.Socket:test cc.sdk.pay # @param parents 用来指定模块的父模块信息,例如pay>cc,模块名是shortname,父模块名是fullname # @param supers 用来指定模块的基类信息,例如pay>cc.Ref,模块名是shortname,基类是fullname # @param renames 用来指定模块的重命名信息,例如Store>pay,模块名是shortname,新名字也是shortname @classmethod def getModuleByName(cls, name, skipLast, parents = None, supers = None, renames = None): ms = name.strip().replace(':', '.').split('.') if len(ms) == 1 and skipLast: return cls.root() #类似printf/class这种顶级函数 if skipLast: ms = ms[:-1] if len(ms) == 1: m = ms[0] if m == 'global': return cls.root() #处理module重命名的情况 if renames and m in renames: if m <> renames[m]: #pay>pay这种错误写法导致的死循环 return cls.getModuleByName(renames[m], False, parents, supers) #需要检查parents if parents and m in parents: #拼成完整的name再次getModuleByName return cls.getModuleByName(parents[m]+'.'+m, False, None, supers) #说明是顶级mod root = cls.root() if m in root.children: return root.children[m] else: mod = module(m) mod.parent = root if supers and m in supers: #继承关系 mod.extends = supers[m] root.children[m] = mod return mod else: ret = cls.root() for m in ms: if m in ret.children: ret = ret.children[m] else: tmp = module(m) tmp.parent = ret if supers and m in supers: #继承关系 tmp.extends = supers[m] ret.children[m] = tmp ret = tmp return ret @staticmethod def __output(m, dir): if m: m.output(dir) for child in m.children: module.__output(m.children[child], dir) #把所有的module信息写到dir目录下 @classmethod def outputAll(cls, dir): module.__output(cls.root(), dir)
mit
-2,995,624,967,718,006,300
29.409574
113
0.505685
false
john-wang-metro/metro-openerp
metro/ir_cron.py
2
6621
# -*- coding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Management Solution # Copyright (C) 2004-TODAY OpenERP S.A. <http://www.openerp.com> # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## import logging import time from openerp.osv import fields, osv from openerp.tools.translate import _ from datetime import datetime from openerp.tools import DEFAULT_SERVER_DATETIME_FORMAT from openerp.addons.base.ir.ir_cron import _intervalTypes import traceback from openerp import netsvc import sys def str2tuple(s): return eval('tuple(%s)' % (s or '')) _logger = logging.getLogger(__name__) class ir_cron(osv.osv): _name = "ir.cron" _inherit = ['ir.cron', 'mail.thread'] def manual_run(self, cr, uid, ids, context): # cron_id = ids[0] # cron_data = self.browse(cr, uid, cron_id, context=context) # args = str2tuple(cron_data.args) # model = self.pool.get(cron_data.model) # if model and hasattr(model, cron_data.function): # method = getattr(model, cron_data.function) # method(cr, uid, *args) cron = self.read(cr, uid, ids[0], context=context) cron['user_id'] = cron['user_id'][0] self._process_job( cr, cron, cr, force_run = True) return True ''' 1.datetime.utcnow() 2.Log the cron running message and exception message 3.Add 'force_run' parameter for manual running ''' def _process_job(self, job_cr, job, cron_cr, force_run=False): """ Run a given job taking care of the repetition. :param job_cr: cursor to use to execute the job, safe to commit/rollback :param job: job to be run (as a dictionary). :param cron_cr: cursor holding lock on the cron job row, to use to update the next exec date, must not be committed/rolled back! """ try: #change to utcnow, johnw, 11/15/2014 #now = datetime.now() now = datetime.utcnow() nextcall = datetime.strptime(job['nextcall'], DEFAULT_SERVER_DATETIME_FORMAT) numbercall = job['numbercall'] ok = False while force_run or (nextcall < now and numbercall): if numbercall > 0: numbercall -= 1 if not ok or job['doall']: try: call_log = self._callback(job_cr, job['user_id'], job['model'], job['function'], job['args'], job['id']) self.message_post(cron_cr, job['user_id'], job['id'], type='comment', subtype='mail.mt_comment', subject='Runned at %s'%(datetime.now()), content_subtype="plaintext", body=call_log) except Exception,e: formatted_info = "".join(traceback.format_exception(*(sys.exc_info()))) self.message_post(cron_cr, job['user_id'], job['id'], type='comment', subtype='mail.mt_comment', subject='Runned with exception at %s'%(datetime.now()), content_subtype="plaintext", body=formatted_info) if numbercall: nextcall += _intervalTypes[job['interval_type']](job['interval_number']) ok = True if force_run: #force_run can only run one time force_run = False addsql = '' if not numbercall: addsql = ', active=False' cron_cr.execute("UPDATE ir_cron SET nextcall=%s, numbercall=%s"+addsql+" WHERE id=%s", (nextcall.strftime(DEFAULT_SERVER_DATETIME_FORMAT), numbercall, job['id'])) finally: job_cr.commit() cron_cr.commit() ''' 1.return the cron running log 2.raise the original exception ''' def _callback(self, cr, uid, model_name, method_name, args, job_id): """ Run the method associated to a given job It takes care of logging and exception handling. :param model_name: model name on which the job method is located. :param method_name: name of the method to call when this job is processed. :param args: arguments of the method (without the usual self, cr, uid). :param job_id: job id. """ args = str2tuple(args) model = self.pool.get(model_name) call_log = '' if model and hasattr(model, method_name): method = getattr(model, method_name) try: log_depth = (None if _logger.isEnabledFor(logging.DEBUG) else 1) netsvc.log(_logger, logging.DEBUG, 'cron.object.execute', (cr.dbname,uid,'*',model_name,method_name)+tuple(args), depth=log_depth) start_time = time.time() call_resu = method(cr, uid, *args) if call_resu: call_log += "return result:\n" + str(call_resu) + "\n" end_time = time.time() msg = '%.3fs (%s, %s)' % (end_time - start_time, model_name, method_name) call_log += msg + "\n" if _logger.isEnabledFor(logging.DEBUG): _logger.debug(msg) return call_log except Exception, e: self._handle_callback_exception(cr, uid, model_name, method_name, args, job_id, e) #raise the original exception, 11/15/2015, johnw raise # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
agpl-3.0
-177,968,886,835,227,460
44.979167
146
0.540553
false
Khilo84/Stack-Exchange-User-Linker
ui_User_Data_Dialog.py
1
34967
# -*- coding: utf-8 -*- # Form implementation generated from reading ui file 'ui_User_Data_Dialog.ui' # # Created: Fri Oct 2 17:54:26 2015 # by: PyQt4 UI code generator 4.10.4 # # WARNING! All changes made in this file will be lost! from PyQt4 import QtCore, QtGui try: _fromUtf8 = QtCore.QString.fromUtf8 except AttributeError: def _fromUtf8(s): return s try: _encoding = QtGui.QApplication.UnicodeUTF8 def _translate(context, text, disambig): return QtGui.QApplication.translate(context, text, disambig, _encoding) except AttributeError: def _translate(context, text, disambig): return QtGui.QApplication.translate(context, text, disambig) class Ui_dialog_User_Data(object): def setupUi(self, dialog_User_Data): dialog_User_Data.setObjectName(_fromUtf8("dialog_User_Data")) dialog_User_Data.resize(640, 680) sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Preferred, QtGui.QSizePolicy.Preferred) sizePolicy.setHorizontalStretch(0) sizePolicy.setVerticalStretch(0) sizePolicy.setHeightForWidth(dialog_User_Data.sizePolicy().hasHeightForWidth()) dialog_User_Data.setSizePolicy(sizePolicy) dialog_User_Data.setMinimumSize(QtCore.QSize(640, 680)) dialog_User_Data.setMaximumSize(QtCore.QSize(640, 680)) dialog_User_Data.setWindowOpacity(1.0) dialog_User_Data.setStyleSheet(_fromUtf8("background-color: qlineargradient(spread:pad, x1:0, y1:0, x2:0, y2:1, stop:0 rgba(98, 98, 98, 255), stop:1 rgba(199, 199, 199, 255));")) dialog_User_Data.setSizeGripEnabled(False) dialog_User_Data.setModal(False) self.horizontalLayout_5 = QtGui.QHBoxLayout(dialog_User_Data) self.horizontalLayout_5.setMargin(2) self.horizontalLayout_5.setObjectName(_fromUtf8("horizontalLayout_5")) self.verticalLayout_2 = QtGui.QVBoxLayout() self.verticalLayout_2.setMargin(4) self.verticalLayout_2.setObjectName(_fromUtf8("verticalLayout_2")) self.horizontalLayout_4 = QtGui.QHBoxLayout() self.horizontalLayout_4.setSpacing(0) self.horizontalLayout_4.setObjectName(_fromUtf8("horizontalLayout_4")) self.verticalLayout = QtGui.QVBoxLayout() self.verticalLayout.setSpacing(0) self.verticalLayout.setObjectName(_fromUtf8("verticalLayout")) self.label_4 = QtGui.QLabel(dialog_User_Data) self.label_4.setStyleSheet(_fromUtf8("background-color: rgba(255, 255, 255, 0);\n" "color: rgb(255, 255, 255);")) self.label_4.setObjectName(_fromUtf8("label_4")) self.verticalLayout.addWidget(self.label_4) spacerItem = QtGui.QSpacerItem(20, 40, QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Expanding) self.verticalLayout.addItem(spacerItem) self.horizontalLayout = QtGui.QHBoxLayout() self.horizontalLayout.setSpacing(5) self.horizontalLayout.setContentsMargins(24, -1, -1, -1) self.horizontalLayout.setObjectName(_fromUtf8("horizontalLayout")) self.label = QtGui.QLabel(dialog_User_Data) palette = QtGui.QPalette() brush = QtGui.QBrush(QtGui.QColor(255, 255, 255)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.WindowText, brush) brush = QtGui.QBrush(QtGui.QColor(255, 255, 255, 0)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Button, brush) brush = QtGui.QBrush(QtGui.QColor(255, 255, 255)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Text, brush) brush = QtGui.QBrush(QtGui.QColor(255, 255, 255)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.ButtonText, brush) brush = QtGui.QBrush(QtGui.QColor(255, 255, 255, 0)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Base, brush) brush = QtGui.QBrush(QtGui.QColor(255, 255, 255, 0)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Window, brush) brush = QtGui.QBrush(QtGui.QColor(255, 255, 255)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.WindowText, brush) brush = QtGui.QBrush(QtGui.QColor(255, 255, 255, 0)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Button, brush) brush = QtGui.QBrush(QtGui.QColor(255, 255, 255)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Text, brush) brush = QtGui.QBrush(QtGui.QColor(255, 255, 255)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.ButtonText, brush) brush = QtGui.QBrush(QtGui.QColor(255, 255, 255, 0)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Base, brush) brush = QtGui.QBrush(QtGui.QColor(255, 255, 255, 0)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Window, brush) brush = QtGui.QBrush(QtGui.QColor(255, 255, 255)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.WindowText, brush) brush = QtGui.QBrush(QtGui.QColor(255, 255, 255, 0)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Button, brush) brush = QtGui.QBrush(QtGui.QColor(255, 255, 255)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Text, brush) brush = QtGui.QBrush(QtGui.QColor(255, 255, 255)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.ButtonText, brush) brush = QtGui.QBrush(QtGui.QColor(255, 255, 255, 0)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Base, brush) brush = QtGui.QBrush(QtGui.QColor(255, 255, 255, 0)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Window, brush) self.label.setPalette(palette) self.label.setStyleSheet(_fromUtf8("background-color: rgba(255, 255, 255, 0);\n" "color: rgb(255, 255, 255);")) self.label.setObjectName(_fromUtf8("label")) self.horizontalLayout.addWidget(self.label) self.lineEdit__Dialog_User_data_User_ID = QtGui.QLineEdit(dialog_User_Data) self.lineEdit__Dialog_User_data_User_ID.setInputMethodHints(QtCore.Qt.ImhDigitsOnly|QtCore.Qt.ImhPreferNumbers) self.lineEdit__Dialog_User_data_User_ID.setText(_fromUtf8("")) self.lineEdit__Dialog_User_data_User_ID.setCursorMoveStyle(QtCore.Qt.LogicalMoveStyle) self.lineEdit__Dialog_User_data_User_ID.setObjectName(_fromUtf8("lineEdit__Dialog_User_data_User_ID")) self.horizontalLayout.addWidget(self.lineEdit__Dialog_User_data_User_ID) spacerItem1 = QtGui.QSpacerItem(40, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum) self.horizontalLayout.addItem(spacerItem1) self.horizontalLayout.setStretch(1, 10) self.horizontalLayout.setStretch(2, 4) self.verticalLayout.addLayout(self.horizontalLayout) spacerItem2 = QtGui.QSpacerItem(20, 20, QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Expanding) self.verticalLayout.addItem(spacerItem2) self.horizontalLayout_2 = QtGui.QHBoxLayout() self.horizontalLayout_2.setSpacing(5) self.horizontalLayout_2.setObjectName(_fromUtf8("horizontalLayout_2")) self.label_2 = QtGui.QLabel(dialog_User_Data) palette = QtGui.QPalette() brush = QtGui.QBrush(QtGui.QColor(255, 255, 255)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.WindowText, brush) brush = QtGui.QBrush(QtGui.QColor(255, 255, 255, 0)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Button, brush) brush = QtGui.QBrush(QtGui.QColor(255, 255, 255)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Text, brush) brush = QtGui.QBrush(QtGui.QColor(255, 255, 255)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.ButtonText, brush) brush = QtGui.QBrush(QtGui.QColor(255, 255, 255, 0)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Base, brush) brush = QtGui.QBrush(QtGui.QColor(255, 255, 255, 0)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Window, brush) brush = QtGui.QBrush(QtGui.QColor(255, 255, 255)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.WindowText, brush) brush = QtGui.QBrush(QtGui.QColor(255, 255, 255, 0)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Button, brush) brush = QtGui.QBrush(QtGui.QColor(255, 255, 255)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Text, brush) brush = QtGui.QBrush(QtGui.QColor(255, 255, 255)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.ButtonText, brush) brush = QtGui.QBrush(QtGui.QColor(255, 255, 255, 0)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Base, brush) brush = QtGui.QBrush(QtGui.QColor(255, 255, 255, 0)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Window, brush) brush = QtGui.QBrush(QtGui.QColor(255, 255, 255)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.WindowText, brush) brush = QtGui.QBrush(QtGui.QColor(255, 255, 255, 0)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Button, brush) brush = QtGui.QBrush(QtGui.QColor(255, 255, 255)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Text, brush) brush = QtGui.QBrush(QtGui.QColor(255, 255, 255)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.ButtonText, brush) brush = QtGui.QBrush(QtGui.QColor(255, 255, 255, 0)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Base, brush) brush = QtGui.QBrush(QtGui.QColor(255, 255, 255, 0)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Window, brush) self.label_2.setPalette(palette) self.label_2.setStyleSheet(_fromUtf8("background-color: rgba(255, 255, 255, 0);\n" "color: rgb(255, 255, 255);")) self.label_2.setObjectName(_fromUtf8("label_2")) self.horizontalLayout_2.addWidget(self.label_2) self.lineEdit_Dialog_User_data_User_Name = QtGui.QLineEdit(dialog_User_Data) self.lineEdit_Dialog_User_data_User_Name.setObjectName(_fromUtf8("lineEdit_Dialog_User_data_User_Name")) self.horizontalLayout_2.addWidget(self.lineEdit_Dialog_User_data_User_Name) spacerItem3 = QtGui.QSpacerItem(40, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum) self.horizontalLayout_2.addItem(spacerItem3) self.pushButton_Dialog_User_Data_Lookup = QtGui.QPushButton(dialog_User_Data) sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Fixed, QtGui.QSizePolicy.Fixed) sizePolicy.setHorizontalStretch(0) sizePolicy.setVerticalStretch(0) sizePolicy.setHeightForWidth(self.pushButton_Dialog_User_Data_Lookup.sizePolicy().hasHeightForWidth()) self.pushButton_Dialog_User_Data_Lookup.setSizePolicy(sizePolicy) self.pushButton_Dialog_User_Data_Lookup.setMinimumSize(QtCore.QSize(25, 25)) self.pushButton_Dialog_User_Data_Lookup.setMaximumSize(QtCore.QSize(25, 25)) self.pushButton_Dialog_User_Data_Lookup.setStyleSheet(_fromUtf8("\n" "background-color: rgba(255, 255, 255, 0);")) self.pushButton_Dialog_User_Data_Lookup.setText(_fromUtf8("")) icon = QtGui.QIcon() icon.addPixmap(QtGui.QPixmap(_fromUtf8(":/View/img/view_count.png")), QtGui.QIcon.Normal, QtGui.QIcon.Off) self.pushButton_Dialog_User_Data_Lookup.setIcon(icon) self.pushButton_Dialog_User_Data_Lookup.setIconSize(QtCore.QSize(25, 25)) self.pushButton_Dialog_User_Data_Lookup.setAutoDefault(False) self.pushButton_Dialog_User_Data_Lookup.setDefault(True) self.pushButton_Dialog_User_Data_Lookup.setFlat(False) self.pushButton_Dialog_User_Data_Lookup.setObjectName(_fromUtf8("pushButton_Dialog_User_Data_Lookup")) self.horizontalLayout_2.addWidget(self.pushButton_Dialog_User_Data_Lookup) spacerItem4 = QtGui.QSpacerItem(40, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum) self.horizontalLayout_2.addItem(spacerItem4) self.horizontalLayout_2.setStretch(1, 10) self.horizontalLayout_2.setStretch(2, 1) self.horizontalLayout_2.setStretch(4, 1) self.verticalLayout.addLayout(self.horizontalLayout_2) spacerItem5 = QtGui.QSpacerItem(20, 20, QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Expanding) self.verticalLayout.addItem(spacerItem5) self.horizontalLayout_3 = QtGui.QHBoxLayout() self.horizontalLayout_3.setSpacing(5) self.horizontalLayout_3.setObjectName(_fromUtf8("horizontalLayout_3")) self.label_3 = QtGui.QLabel(dialog_User_Data) palette = QtGui.QPalette() brush = QtGui.QBrush(QtGui.QColor(255, 255, 255)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.WindowText, brush) brush = QtGui.QBrush(QtGui.QColor(255, 255, 255, 0)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Button, brush) brush = QtGui.QBrush(QtGui.QColor(255, 255, 255)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Text, brush) brush = QtGui.QBrush(QtGui.QColor(255, 255, 255)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.ButtonText, brush) brush = QtGui.QBrush(QtGui.QColor(255, 255, 255, 0)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Base, brush) brush = QtGui.QBrush(QtGui.QColor(255, 255, 255, 0)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Window, brush) brush = QtGui.QBrush(QtGui.QColor(255, 255, 255)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.WindowText, brush) brush = QtGui.QBrush(QtGui.QColor(255, 255, 255, 0)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Button, brush) brush = QtGui.QBrush(QtGui.QColor(255, 255, 255)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Text, brush) brush = QtGui.QBrush(QtGui.QColor(255, 255, 255)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.ButtonText, brush) brush = QtGui.QBrush(QtGui.QColor(255, 255, 255, 0)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Base, brush) brush = QtGui.QBrush(QtGui.QColor(255, 255, 255, 0)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Window, brush) brush = QtGui.QBrush(QtGui.QColor(255, 255, 255)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.WindowText, brush) brush = QtGui.QBrush(QtGui.QColor(255, 255, 255, 0)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Button, brush) brush = QtGui.QBrush(QtGui.QColor(255, 255, 255)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Text, brush) brush = QtGui.QBrush(QtGui.QColor(255, 255, 255)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.ButtonText, brush) brush = QtGui.QBrush(QtGui.QColor(255, 255, 255, 0)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Base, brush) brush = QtGui.QBrush(QtGui.QColor(255, 255, 255, 0)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Window, brush) self.label_3.setPalette(palette) self.label_3.setStyleSheet(_fromUtf8("background-color: rgba(255, 255, 255, 0);\n" "color: rgb(255, 255, 255);")) self.label_3.setObjectName(_fromUtf8("label_3")) self.horizontalLayout_3.addWidget(self.label_3) self.lineEdit_Dialog_User_data_Password = QtGui.QLineEdit(dialog_User_Data) self.lineEdit_Dialog_User_data_Password.setInputMask(_fromUtf8("")) self.lineEdit_Dialog_User_data_Password.setObjectName(_fromUtf8("lineEdit_Dialog_User_data_Password")) self.horizontalLayout_3.addWidget(self.lineEdit_Dialog_User_data_Password) self.checkBox_Widget_User_Data_Show_Password = QtGui.QCheckBox(dialog_User_Data) self.checkBox_Widget_User_Data_Show_Password.setEnabled(True) palette = QtGui.QPalette() brush = QtGui.QBrush(QtGui.QColor(255, 255, 255)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.WindowText, brush) brush = QtGui.QBrush(QtGui.QColor(255, 255, 255, 0)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Button, brush) brush = QtGui.QBrush(QtGui.QColor(255, 255, 255)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Text, brush) brush = QtGui.QBrush(QtGui.QColor(255, 255, 255)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.ButtonText, brush) brush = QtGui.QBrush(QtGui.QColor(255, 255, 255, 0)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Base, brush) brush = QtGui.QBrush(QtGui.QColor(255, 255, 255, 0)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Window, brush) brush = QtGui.QBrush(QtGui.QColor(255, 255, 255)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.WindowText, brush) brush = QtGui.QBrush(QtGui.QColor(255, 255, 255, 0)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Button, brush) brush = QtGui.QBrush(QtGui.QColor(255, 255, 255)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Text, brush) brush = QtGui.QBrush(QtGui.QColor(255, 255, 255)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.ButtonText, brush) brush = QtGui.QBrush(QtGui.QColor(255, 255, 255, 0)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Base, brush) brush = QtGui.QBrush(QtGui.QColor(255, 255, 255, 0)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Window, brush) brush = QtGui.QBrush(QtGui.QColor(255, 255, 255)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.WindowText, brush) brush = QtGui.QBrush(QtGui.QColor(255, 255, 255, 0)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Button, brush) brush = QtGui.QBrush(QtGui.QColor(255, 255, 255)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Text, brush) brush = QtGui.QBrush(QtGui.QColor(255, 255, 255)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.ButtonText, brush) brush = QtGui.QBrush(QtGui.QColor(255, 255, 255, 0)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Base, brush) brush = QtGui.QBrush(QtGui.QColor(255, 255, 255, 0)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Window, brush) self.checkBox_Widget_User_Data_Show_Password.setPalette(palette) self.checkBox_Widget_User_Data_Show_Password.setStyleSheet(_fromUtf8("background-color: rgba(255, 255, 255, 0);\n" "color: rgb(255, 255, 255);")) self.checkBox_Widget_User_Data_Show_Password.setTristate(False) self.checkBox_Widget_User_Data_Show_Password.setObjectName(_fromUtf8("checkBox_Widget_User_Data_Show_Password")) self.horizontalLayout_3.addWidget(self.checkBox_Widget_User_Data_Show_Password) self.horizontalLayout_3.setStretch(1, 10) self.verticalLayout.addLayout(self.horizontalLayout_3) spacerItem6 = QtGui.QSpacerItem(20, 40, QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Expanding) self.verticalLayout.addItem(spacerItem6) self.horizontalLayout_4.addLayout(self.verticalLayout) self.gridLayout = QtGui.QGridLayout() self.gridLayout.setContentsMargins(-1, 52, -1, -1) self.gridLayout.setHorizontalSpacing(12) self.gridLayout.setVerticalSpacing(0) self.gridLayout.setObjectName(_fromUtf8("gridLayout")) spacerItem7 = QtGui.QSpacerItem(40, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum) self.gridLayout.addItem(spacerItem7, 0, 2, 1, 1) self.pushButton__Dialog_User_data_Log_In = QtGui.QPushButton(dialog_User_Data) self.pushButton__Dialog_User_data_Log_In.setStyleSheet(_fromUtf8("background-color: rgba(255, 255, 255, 255);")) icon1 = QtGui.QIcon() icon1.addPixmap(QtGui.QPixmap(_fromUtf8(":/Log in/img/00c3ff-login-16.png")), QtGui.QIcon.Normal, QtGui.QIcon.Off) self.pushButton__Dialog_User_data_Log_In.setIcon(icon1) self.pushButton__Dialog_User_data_Log_In.setCheckable(False) self.pushButton__Dialog_User_data_Log_In.setChecked(False) self.pushButton__Dialog_User_data_Log_In.setAutoDefault(True) self.pushButton__Dialog_User_data_Log_In.setFlat(False) self.pushButton__Dialog_User_data_Log_In.setObjectName(_fromUtf8("pushButton__Dialog_User_data_Log_In")) self.gridLayout.addWidget(self.pushButton__Dialog_User_data_Log_In, 0, 0, 1, 1) self.label_Dialog_User_data_Status_Icon = QtGui.QLabel(dialog_User_Data) sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Fixed, QtGui.QSizePolicy.Fixed) sizePolicy.setHorizontalStretch(0) sizePolicy.setVerticalStretch(0) sizePolicy.setHeightForWidth(self.label_Dialog_User_data_Status_Icon.sizePolicy().hasHeightForWidth()) self.label_Dialog_User_data_Status_Icon.setSizePolicy(sizePolicy) self.label_Dialog_User_data_Status_Icon.setMinimumSize(QtCore.QSize(32, 32)) self.label_Dialog_User_data_Status_Icon.setMaximumSize(QtCore.QSize(32, 32)) self.label_Dialog_User_data_Status_Icon.setStyleSheet(_fromUtf8("background-color: rgba(255, 255, 255, 0);")) self.label_Dialog_User_data_Status_Icon.setText(_fromUtf8("")) self.label_Dialog_User_data_Status_Icon.setPixmap(QtGui.QPixmap(_fromUtf8(":/Offline/img/969696-disconnected-32.png"))) self.label_Dialog_User_data_Status_Icon.setObjectName(_fromUtf8("label_Dialog_User_data_Status_Icon")) self.gridLayout.addWidget(self.label_Dialog_User_data_Status_Icon, 0, 1, 1, 1) self.textEdit_Dialog_User_data_Listing = QtGui.QTextEdit(dialog_User_Data) sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Fixed, QtGui.QSizePolicy.Fixed) sizePolicy.setHorizontalStretch(0) sizePolicy.setVerticalStretch(0) sizePolicy.setHeightForWidth(self.textEdit_Dialog_User_data_Listing.sizePolicy().hasHeightForWidth()) self.textEdit_Dialog_User_data_Listing.setSizePolicy(sizePolicy) self.textEdit_Dialog_User_data_Listing.setMinimumSize(QtCore.QSize(300, 150)) self.textEdit_Dialog_User_data_Listing.setMaximumSize(QtCore.QSize(300, 150)) palette = QtGui.QPalette() brush = QtGui.QBrush(QtGui.QColor(50, 50, 50, 150)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Button, brush) brush = QtGui.QBrush(QtGui.QColor(0, 255, 0)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Text, brush) brush = QtGui.QBrush(QtGui.QColor(0, 0, 0)) brush.setStyle(QtCore.Qt.NoBrush) palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Base, brush) brush = QtGui.QBrush(QtGui.QColor(50, 50, 50, 150)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Window, brush) brush = QtGui.QBrush(QtGui.QColor(50, 50, 50, 150)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Button, brush) brush = QtGui.QBrush(QtGui.QColor(0, 255, 0)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Text, brush) brush = QtGui.QBrush(QtGui.QColor(0, 0, 0)) brush.setStyle(QtCore.Qt.NoBrush) palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Base, brush) brush = QtGui.QBrush(QtGui.QColor(50, 50, 50, 150)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Window, brush) brush = QtGui.QBrush(QtGui.QColor(50, 50, 50, 150)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Button, brush) brush = QtGui.QBrush(QtGui.QColor(159, 158, 158)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Text, brush) brush = QtGui.QBrush(QtGui.QColor(0, 0, 0)) brush.setStyle(QtCore.Qt.NoBrush) palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Base, brush) brush = QtGui.QBrush(QtGui.QColor(50, 50, 50, 150)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Window, brush) self.textEdit_Dialog_User_data_Listing.setPalette(palette) self.textEdit_Dialog_User_data_Listing.setAutoFillBackground(False) self.textEdit_Dialog_User_data_Listing.setStyleSheet(_fromUtf8("background-color: rgba(50, 50, 50, 150);\n" "font: 8pt \"Monospace\";")) self.textEdit_Dialog_User_data_Listing.setReadOnly(True) self.textEdit_Dialog_User_data_Listing.setObjectName(_fromUtf8("textEdit_Dialog_User_data_Listing")) self.gridLayout.addWidget(self.textEdit_Dialog_User_data_Listing, 1, 0, 1, 4) self.horizontalLayout_4.addLayout(self.gridLayout) self.horizontalLayout_4.setStretch(0, 1) self.horizontalLayout_4.setStretch(1, 1) self.verticalLayout_2.addLayout(self.horizontalLayout_4) self.listWidget__Dialog_User_data_Users = QtGui.QListWidget(dialog_User_Data) sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Fixed, QtGui.QSizePolicy.Fixed) sizePolicy.setHorizontalStretch(0) sizePolicy.setVerticalStretch(0) sizePolicy.setHeightForWidth(self.listWidget__Dialog_User_data_Users.sizePolicy().hasHeightForWidth()) self.listWidget__Dialog_User_data_Users.setSizePolicy(sizePolicy) self.listWidget__Dialog_User_data_Users.setMinimumSize(QtCore.QSize(628, 380)) self.listWidget__Dialog_User_data_Users.setMaximumSize(QtCore.QSize(628, 380)) self.listWidget__Dialog_User_data_Users.setStyleSheet(_fromUtf8("background-color: rgba(255, 255, 255,120);")) self.listWidget__Dialog_User_data_Users.setFrameShape(QtGui.QFrame.WinPanel) self.listWidget__Dialog_User_data_Users.setFrameShadow(QtGui.QFrame.Sunken) self.listWidget__Dialog_User_data_Users.setAutoScroll(False) self.listWidget__Dialog_User_data_Users.setAutoScrollMargin(0) self.listWidget__Dialog_User_data_Users.setEditTriggers(QtGui.QAbstractItemView.NoEditTriggers) self.listWidget__Dialog_User_data_Users.setTabKeyNavigation(True) self.listWidget__Dialog_User_data_Users.setProperty("showDropIndicator", False) self.listWidget__Dialog_User_data_Users.setDragDropMode(QtGui.QAbstractItemView.NoDragDrop) self.listWidget__Dialog_User_data_Users.setSelectionMode(QtGui.QAbstractItemView.SingleSelection) self.listWidget__Dialog_User_data_Users.setSelectionBehavior(QtGui.QAbstractItemView.SelectItems) self.listWidget__Dialog_User_data_Users.setIconSize(QtCore.QSize(0, 0)) self.listWidget__Dialog_User_data_Users.setTextElideMode(QtCore.Qt.ElideMiddle) self.listWidget__Dialog_User_data_Users.setVerticalScrollMode(QtGui.QAbstractItemView.ScrollPerPixel) self.listWidget__Dialog_User_data_Users.setHorizontalScrollMode(QtGui.QAbstractItemView.ScrollPerPixel) self.listWidget__Dialog_User_data_Users.setMovement(QtGui.QListView.Static) self.listWidget__Dialog_User_data_Users.setFlow(QtGui.QListView.LeftToRight) self.listWidget__Dialog_User_data_Users.setProperty("isWrapping", True) self.listWidget__Dialog_User_data_Users.setResizeMode(QtGui.QListView.Fixed) self.listWidget__Dialog_User_data_Users.setLayoutMode(QtGui.QListView.SinglePass) self.listWidget__Dialog_User_data_Users.setSpacing(1) self.listWidget__Dialog_User_data_Users.setGridSize(QtCore.QSize(152, 120)) self.listWidget__Dialog_User_data_Users.setViewMode(QtGui.QListView.IconMode) self.listWidget__Dialog_User_data_Users.setModelColumn(0) self.listWidget__Dialog_User_data_Users.setUniformItemSizes(False) self.listWidget__Dialog_User_data_Users.setBatchSize(100) self.listWidget__Dialog_User_data_Users.setWordWrap(True) self.listWidget__Dialog_User_data_Users.setSelectionRectVisible(True) self.listWidget__Dialog_User_data_Users.setObjectName(_fromUtf8("listWidget__Dialog_User_data_Users")) self.verticalLayout_2.addWidget(self.listWidget__Dialog_User_data_Users) self.buttonBox_Dialog_User_data_Save_Cancel = QtGui.QDialogButtonBox(dialog_User_Data) self.buttonBox_Dialog_User_data_Save_Cancel.setAutoFillBackground(False) self.buttonBox_Dialog_User_data_Save_Cancel.setStyleSheet(_fromUtf8("background-color: rgba(255, 255, 255, 255);")) self.buttonBox_Dialog_User_data_Save_Cancel.setOrientation(QtCore.Qt.Horizontal) self.buttonBox_Dialog_User_data_Save_Cancel.setStandardButtons(QtGui.QDialogButtonBox.Cancel|QtGui.QDialogButtonBox.Save) self.buttonBox_Dialog_User_data_Save_Cancel.setCenterButtons(False) self.buttonBox_Dialog_User_data_Save_Cancel.setObjectName(_fromUtf8("buttonBox_Dialog_User_data_Save_Cancel")) self.verticalLayout_2.addWidget(self.buttonBox_Dialog_User_data_Save_Cancel) self.horizontalLayout_5.addLayout(self.verticalLayout_2) self.retranslateUi(dialog_User_Data) QtCore.QObject.connect(self.buttonBox_Dialog_User_data_Save_Cancel, QtCore.SIGNAL(_fromUtf8("rejected()")), dialog_User_Data.reject) QtCore.QObject.connect(self.buttonBox_Dialog_User_data_Save_Cancel, QtCore.SIGNAL(_fromUtf8("accepted()")), dialog_User_Data.hide) QtCore.QMetaObject.connectSlotsByName(dialog_User_Data) def retranslateUi(self, dialog_User_Data): dialog_User_Data.setWindowTitle(_translate("dialog_User_Data", "Dialog", None)) self.label_4.setText(_translate("dialog_User_Data", "Enter User ID or Name and Password:", None)) self.label.setText(_translate("dialog_User_Data", "User ID", None)) self.lineEdit__Dialog_User_data_User_ID.setPlaceholderText(_translate("dialog_User_Data", "Enter User ID here...", None)) self.label_2.setText(_translate("dialog_User_Data", "User Name", None)) self.lineEdit_Dialog_User_data_User_Name.setPlaceholderText(_translate("dialog_User_Data", "Search by name..", None)) self.label_3.setText(_translate("dialog_User_Data", " Password", None)) self.lineEdit_Dialog_User_data_Password.setPlaceholderText(_translate("dialog_User_Data", "Enter Password..", None)) self.checkBox_Widget_User_Data_Show_Password.setText(_translate("dialog_User_Data", "Show", None)) self.pushButton__Dialog_User_data_Log_In.setText(_translate("dialog_User_Data", "Log in", None)) self.textEdit_Dialog_User_data_Listing.setHtml(_translate("dialog_User_Data", "<!DOCTYPE HTML PUBLIC \"-//W3C//DTD HTML 4.0//EN\" \"http://www.w3.org/TR/REC-html40/strict.dtd\">\n" "<html><head><meta name=\"qrichtext\" content=\"1\" /><style type=\"text/css\">\n" "p, li { white-space: pre-wrap; }\n" "</style></head><body style=\" font-family:\'Monospace\'; font-size:8pt; font-weight:400; font-style:normal;\">\n" "<p style=\"-qt-paragraph-type:empty; margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><br /></p></body></html>", None)) import background_icons_rc
gpl-2.0
8,153,043,657,374,581,000
65.225379
188
0.71067
false
ama-jharrison/agdc
agdc/deprecated/landsat_tiler.py
1
57143
#!/usr/bin/env python #=============================================================================== # Copyright 2015 Geoscience Australia # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. #=============================================================================== ''' Original prototype script to reproject and tile ORTHO, NBAR & PQ datasets and create tile files and records in Datacube DB. Requires previous run of dbupdater.py to catalogue datasets. N.B: This functionality is now provided by landsat_ingester.py Created on 05/10/2012 @author: Alex Ip ''' import os import sys import argparse import logging import re import psycopg2 import numpy import shutil from osgeo import gdal,osr from math import floor,ceil from datetime import datetime from copy import copy import time import string from EOtools.utils import log_multiline from EOtools.execute import execute from agdc import DataCube TILE_OWNER = 'axi547:rs0' # Owner of file files # Set top level standard output console_handler = logging.StreamHandler(sys.stdout) console_handler.setLevel(logging.INFO) console_formatter = logging.Formatter('%(message)s') console_handler.setFormatter(console_formatter) logger = logging.getLogger(__name__) if not logger.level: logger.setLevel(logging.DEBUG) # Default logging level for all modules logger.addHandler(console_handler) class LandsatTiler(DataCube): CONTIGUITY_BIT_INDEX = 8 def getFileSizeMB(self, path): """Gets the size of a file (megabytes). Arguments: path: file path Returns: File size (MB) Raises: OSError [Errno=2] if file does not exist """ return os.path.getsize(path) / (1024*1024) def parse_args(self): """Overrides Datacube function to parse the command line arguments. Returns: argparse namespace object """ logger.debug(' Calling parse_args()') _arg_parser = argparse.ArgumentParser('datacube') _arg_parser.add_argument('-C', '--config', dest='config_file', default=os.path.join(self.agdc_root, 'agdc_default.conf'), help='DataCube configuration file') _arg_parser.add_argument('-d', '--debug', dest='debug', default=False, action='store_const', const=True, help='Debug mode flag') _arg_parser.add_argument('--refresh', dest='refresh', default=True, action='store_const', const=True, help='Refresh mode flag to force updating of existing records') _arg_parser.add_argument('-t', '--tile_type', dest='default_tile_type_id', required=False, default=None, help='Tile type ID of tile to be stacked') return _arg_parser.parse_args() def __init__(self, source_datacube=None, default_tile_type_id=1): """Constructor Arguments: source_datacube: Optional DataCube object whose connection and data will be shared tile_type_id: Optional tile_type_id value (defaults to config file value = 1) """ if source_datacube: # Copy values from source_datacube and then override command line args self.__dict__ = copy(source_datacube.__dict__) args = self.parse_args() # Set instance attributes for every value in command line arguments file for attribute_name in args.__dict__.keys(): attribute_value = args.__dict__[attribute_name] self.__setattr__(attribute_name, attribute_value) else: DataCube.__init__(self); # Call inherited constructor if self.debug: console_handler.setLevel(logging.DEBUG) # Turn autocommit OFF so that transaction can cover all queries for each dataset self.db_connection.autocommit = False self.db_connection.set_isolation_level(psycopg2.extensions.ISOLATION_LEVEL_READ_COMMITTED) # Attempt to parse dates from command line arguments or config file try: self.default_tile_type_id = int(self.default_tile_type_id) except: self.default_tile_type_id = default_tile_type_id try: self.start_date = datetime.strptime(self.start_date, '%d/%m/%Y').date() except: self.start_date = None try: self.end_date = datetime.strptime(self.end_date, '%d/%m/%Y').date() except: self.end_date = None try: self.min_path = int(self.min_path) except: self.min_path = None try: self.max_path = int(self.max_path) except: self.max_path = None try: self.min_row = int(self.min_row) except: self.min_row = None try: self.max_row = int(self.max_row) except: self.max_row = None def create_tiles(self, start_date=None, end_date=None, min_path=None, max_path=None, min_row=None, max_row=None, tile_type_id=None): # Set default values to instance values start_date = start_date or self.start_date end_date = end_date or self.end_date min_path = min_path or self.min_path max_path = max_path or self.max_path min_row = min_row or self.min_row max_row = max_row or self.max_row tile_type_id = tile_type_id or self.default_tile_type_id tile_type_info = self.tile_type_dict[tile_type_id] def process_dataset(dataset_info): log_multiline(logger.debug, dataset_info, 'Dataset values', '\t') def find_file(dataset_dir, file_pattern): # logger.debug('find_file(%s, %s) called', dataset_dir, file_pattern) assert os.path.isdir(dataset_dir), '%s is not a valid directory' % dataset_dir filelist = [filename for filename in os.listdir(dataset_dir) if re.match(file_pattern, filename)] # logger.debug('filelist = %s', filelist) assert len(filelist) == 1, 'Unable to find unique match for file pattern %s' % file_pattern return os.path.join(dataset_dir, filelist[0]) def get_tile_index_range(dataset_filename): """Returns integer (xmin, ymin, xmax, ymax) tuple for input GDAL dataset filename""" dataset = gdal.Open(dataset_filename) assert dataset, 'Unable to open dataset %s' % dataset_filename spatial_reference = osr.SpatialReference() spatial_reference.ImportFromWkt(dataset.GetProjection()) geotrans = dataset.GetGeoTransform() logger.debug('geotransform = %s', geotrans) # latlong_spatial_reference = spatial_reference.CloneGeogCS() tile_spatial_reference = osr.SpatialReference() s = re.match('EPSG:(\d+)', tile_type_info['crs']) if s: epsg_code = int(s.group(1)) logger.debug('epsg_code = %d', epsg_code) assert tile_spatial_reference.ImportFromEPSG(epsg_code) == 0, 'Invalid EPSG code for tile projection' else: assert tile_spatial_reference.ImportFromWkt(tile_type_info['crs']), 'Invalid WKT for tile projection' logger.debug('Tile WKT = %s', tile_spatial_reference.ExportToWkt()) coord_transform_to_tile = osr.CoordinateTransformation(spatial_reference, tile_spatial_reference) #Have looked at following with particular scene: #/g/data1/v10/NBAR/2009-03/LS5_TM_NBAR_P54_GANBAR01-002_110_078_20090319/scene01/LS5_TM_NBAR_P54_GANBAR01-002_110_078_20090319_B10.tif #MPH 04/04/2014 calculate four corners of the dataset, include cross terms of the geotransform logger.debug('Dataset vertex coordinates: UL = (%f, %f); LL = (%f, %f); UR = (%f, %f); LR = (%f, %f)', xul, yul, xll, yll, xur, yur, xlr, ylr) logger.debug('Dataset bounding box: UL = (%f, %f); LL = (%f, %f); UR = (%f, %f); LR = (%f, %f)', xmin, ymax, xmin, ymin, xmax, ymax, xmax, ymin) return (int(floor((xmin - tile_type_info['x_origin']) / tile_type_info['x_size'])), int(floor((ymin - tile_type_info['y_origin']) / tile_type_info['y_size'])), int(ceil((xmax - tile_type_info['x_origin']) / tile_type_info['x_size'])), int(ceil((ymax - tile_type_info['y_origin']) / tile_type_info['y_size']))) #Would return (-120, -27, 123, -25) on scene above def get_tiles_touched_by_acquisition(dataset_filename): """For the quadrilateral defined by the acquisitiion footprint, return a list of overlapping tiles as [(xtile, ytile), ..., ]""" def find_intersection(X, Y): """given a list of four x-coordinates, X, and a list of four y-coordinates, Y, determine if there is a point of intersection""" pvec = (X[0], Y[0]) qvec = (X[2], Y[2]) rvec = (X[1] - X[0], Y[1] - Y[0]) svec = (X[3] - X[2], Y[3] - Y[2]) rvec_cross_svec = rvec[0] * svec[1] - rvec[1] * svec[0] if rvec_cross_svec == 0: return False qminusp_cross_svec = (qvec[0] - pvec[0]) * svec[1] - (qvec[1] - pvec[1]) * svec[0] qminusp_cross_rvec = (qvec[0] - pvec[0]) * rvec[1] - (qvec[1] - pvec[1]) * rvec[0] tparameter = qminusp_cross_svec / rvec_cross_svec uparameter = qminusp_cross_rvec / rvec_cross_svec if tparameter > 0 and tparameter < 1 and uparameter > 0 and uparameter < 1: return True #get_tiles_touched_by_acquisition method starts here dataset = gdal.Open(dataset_filename) assert dataset, 'Unable to open dataset %s' % dataset_filename spatial_reference = osr.SpatialReference() spatial_reference.ImportFromWkt(dataset.GetProjection()) geotrans = dataset.GetGeoTransform() logger.debug('geotransform = %s', geotrans) # latlong_spatial_reference = spatial_reference.CloneGeogCS() tile_spatial_reference = osr.SpatialReference() s = re.match('EPSG:(\d+)', tile_type_info['crs']) if s: epsg_code = int(s.group(1)) logger.debug('epsg_code = %d', epsg_code) assert tile_spatial_reference.ImportFromEPSG(epsg_code) == 0, 'Invalid EPSG code for tile projection' else: assert tile_spatial_reference.ImportFromWkt(tile_type_info['crs']), 'Invalid WKT for tile projection' logger.debug('Tile WKT = %s', tile_spatial_reference.ExportToWkt()) coord_transform_to_tile = osr.CoordinateTransformation(spatial_reference, tile_spatial_reference) #Determine the bounding quadrilateral of the acquisition xul, yul, _z = coord_transform_to_tile.TransformPoint(geotrans[0], geotrans[3], 0) xll, yll, _z = coord_transform_to_tile.TransformPoint(geotrans[0] + geotrans[2]*dataset.RasterYSize, geotrans[3] + geotrans[5]*dataset.RasterYSize, 0) xur, yur, _z = coord_transform_to_tile.TransformPoint(geotrans[0] + geotrans[1]*dataset.RasterXSize, geotrans[3] + geotrans[4]*dataset.RasterXSize, 0) xlr, ylr, _z = coord_transform_to_tile.TransformPoint(geotrans[0] + geotrans[1]*dataset.RasterXSize + geotrans[2]*dataset.RasterYSize, geotrans[3] + geotrans[4]*dataset.RasterXSize + geotrans[5]*dataset.RasterYSize,0) acquisition_bbox = [(xul, yul), (xur, yur), (xlr, ylr), (xll, yll)] acquisition_vertex_number = len(acquisition_bbox) #Within this acqusition quadrilateral, we need to find all tiles with at least one vertex contained within the acquisition #There is an outer rectangle, which is the minimum containing rectangle for the acquisition footprint, #and an inner rectangle, which is the maximum rectagle contained by the acquisitiion footprint outer_xmin = min(xll, xul) outer_xmax = max(xlr, xur) outer_ymin = min(yll, ylr) outer_ymax = max(yul, yur) inner_xmin = max(xll, xul) inner_xmax = min(xlr, xur) inner_ymin = max(yll, ylr) inner_ymax = min(yul, yur) outer_xmin_index = int(floor((outer_xmin - tile_type_info['x_origin']) / tile_type_info['x_size'])) outer_xmax_index = int(floor((outer_xmax - tile_type_info['x_origin']) / tile_type_info['x_size'])) outer_ymin_index = int(floor((outer_ymin - tile_type_info['y_origin']) / tile_type_info['y_size'])) outer_ymax_index = int(floor((outer_ymax - tile_type_info['y_origin']) / tile_type_info['y_size'])) inner_xmin_index = int(floor((inner_xmin - tile_type_info['x_origin']) / tile_type_info['x_size'])) inner_xmax_index = int(floor((inner_xmax - tile_type_info['x_origin']) / tile_type_info['x_size'])) inner_ymin_index = int(floor((inner_ymin - tile_type_info['y_origin']) / tile_type_info['y_size'])) inner_ymax_index = int(floor((inner_ymax - tile_type_info['y_origin']) / tile_type_info['y_size'])) touched_tiles = [] #inspect tiles from the outer rectangle for itile in range(outer_xmin_index, outer_xmax_index + 1): for jtile in range(outer_ymin_index, outer_ymax_index + 1): if itile >= inner_xmin_index and itile <= inner_xmax_index and jtile >= inner_ymin_index and jtile <= inner_ymax_index: touched_tiles.append([itile, jtile]) continue #For each tile in the outer rectangle but not in the inner rectangle #define the upper-left vertexx (x0, y0) = (tile_type_info['x_origin'] + itile * tile_type_info['x_size'], tile_type_info['y_origin'] + (jtile + 1) * tile_type_info['y_size']) tile_bbox = [(x0, y0), (x0 + tile_type_info['x_size'], y0), (x0 + tile_type_info['x_size'], y0 - tile_type_info['y_size']), (x0, y0 - tile_type_info['y_size'])] tile_vertex_number = len(tile_bbox) intersection_exists = False for tile_vertex in range(tile_vertex_number): x1, y1 = tile_bbox[tile_vertex] x2, y2 = tile_bbox[(tile_vertex + 1) % tile_vertex_number] for acquisition_vertex in range(acquisition_vertex_number): x3, y3 = acquisition_bbox[acquisition_vertex] x4, y4 = acquisition_bbox[(acquisition_vertex + 1) % acquisition_vertex_number] #get intersection of the two lines (x1, y1)-to-(x2, y2) and (x3, y3)-to-(x4, y4) xcoords = [x1, x2, x3, x4] ycoords = [y1, y2, y3, y4] intersection_exists = find_intersection(xcoords,ycoords) if intersection_exists: touched_tiles.append([itile, jtile]) break if intersection_exists: break return touched_tiles def find_tiles(x_index = None, y_index = None): """Find any tile records for current dataset returns dict of tile information keyed by tile_id """ db_cursor2 = self.db_connection.cursor() sql = """-- Check for any existing tiles select tile_id, x_index, y_index, tile_type_id, tile_pathname, dataset_id, tile_class_id, tile_size from tile_footprint inner join tile using(x_index, y_index, tile_type_id) where (%(x_index)s is null or x_index = %(x_index)s) and (%(y_index)s is null or y_index = %(y_index)s) and tile_type_id = %(tile_type_id)s and (dataset_id = %(l1t_dataset_id)s or dataset_id = %(nbar_dataset_id)s or dataset_id = %(pqa_dataset_id)s) and ctime is not null -- TODO: Remove this after reload ; """ params = {'x_index': x_index, 'y_index': y_index, 'tile_type_id': tile_type_info['tile_type_id'], 'l1t_dataset_id': dataset_info['l1t_dataset_id'], 'nbar_dataset_id': dataset_info['nbar_dataset_id'], 'pqa_dataset_id': dataset_info['pqa_dataset_id']} log_multiline(logger.debug, db_cursor2.mogrify(sql, params), 'SQL', '\t') db_cursor2.execute(sql, params) tile_info = {} for record in db_cursor2: tile_info_dict = { 'x_index': record[1], 'y_index': record[2], 'tile_type_id': record[3], 'tile_pathname': record[4], 'dataset_id': record[5], 'tile_class_id': record[6], 'tile_size': record[7] } tile_info[record[0]] = tile_info_dict # Keyed by tile_id log_multiline(logger.debug, tile_info, 'tile_info', '\t') return tile_info def get_vrt_band_list(): """Returns list of band information to create tiles """ logger.debug('get_vrt_band_list() called') vrt_band_list = [] sensor_dict = self.bands[tile_type_id][(dataset_info['satellite_tag'], dataset_info['sensor_name'])] # log_multiline(logger.debug, sensor, 'Sensor', '\t') for file_number in sorted(sensor_dict.keys()): band_info = sensor_dict[file_number] if band_info['level_name'] == 'NBAR': dataset_dir = dataset_info['nbar_dataset_path'] dataset_id = dataset_info['nbar_dataset_id'] processing_level = dataset_info['nbar_level_name'] nodata_value = dataset_info['nbar_nodata_value'] resampling_method = dataset_info['nbar_resampling_method'] elif band_info['level_name'] == 'ORTHO': dataset_dir = dataset_info['l1t_dataset_path'] dataset_id = dataset_info['l1t_dataset_id'] processing_level = dataset_info['l1t_level_name'] nodata_value = dataset_info['l1t_nodata_value'] resampling_method = dataset_info['l1t_resampling_method'] else: continue # Ignore any pan-chromatic and derived bands dataset_dir = os.path.join(dataset_dir, 'scene01') filename = find_file(dataset_dir, band_info['file_pattern']) vrt_band_list.append({'file_number': band_info['file_number'], 'filename': filename, 'name': band_info['band_name'], 'dataset_id': dataset_id, 'band_id': band_info['band_id'], 'processing_level': processing_level, 'nodata_value': nodata_value, 'resampling_method': resampling_method, 'tile_layer': band_info['tile_layer']}) # Add Derived bands (only PQA at this stage) for band_level in ['PQA']: derived_bands = self.bands[tile_type_id][('DERIVED', band_level)] # log_multiline(logger.debug, derived_bands, 'derived_bands', '\t') #TODO: Make this able to handle multiple layers band_info = [band_info for band_info in derived_bands.values() if band_info['level_name'] == band_level][0] file_pattern = band_info['file_pattern'] dataset_dir = os.path.join(dataset_info['pqa_dataset_path'], 'scene01') dataset_id = dataset_info['pqa_dataset_id'] filename = find_file(dataset_dir, file_pattern) processing_level = dataset_info['pqa_level_name'] nodata_value = dataset_info['pqa_nodata_value'] # Should be None for PQA resampling_method = dataset_info['pqa_resampling_method'] vrt_band_list.append({'file_number': None, 'filename': filename, 'name': band_info['band_name'], 'dataset_id': dataset_id, 'band_id': band_info['band_id'], 'processing_level': processing_level, 'nodata_value': nodata_value, 'resampling_method': resampling_method, 'tile_layer': 1}) log_multiline(logger.debug, vrt_band_list, 'vrt_band_list = %s', '\t') return vrt_band_list # process_dataset function starts here result = False db_cursor1 = self.db_connection.cursor() logger.info('Processing dataset %s', dataset_info['nbar_dataset_path']) vrt_band_stack_basename = '_'.join([dataset_info['satellite_tag'], re.sub('\W', '', dataset_info['sensor_name']), dataset_info['start_datetime'].date().strftime('%Y%m%d'), '%03d' % dataset_info['x_ref'], '%03d' % dataset_info['y_ref']] ) + '.vrt' logger.debug('vrt_band_stack_basename = %s', vrt_band_stack_basename) tile_output_root = os.path.join(self.tile_root, tile_type_info['tile_directory'], dataset_info['satellite_tag'] + '_' + re.sub('\W', '', dataset_info['sensor_name'])) logger.debug('tile_output_root = %s', tile_output_root) vrt_band_list = get_vrt_band_list() #Upper right obtainable as (dataset_info['ul_lon'], dataset_info['ul_lat']), but these coordinates only relate to tiles in the case of (1deg, 1deg) tiles #Otherwise, we must use the generic scene-to-tile coordinate transformation in get_tile_index_range #tile_index_range = get_tile_index_range(vrt_band_list[0]['filename']) # Find extents of first band dataset tiles_in_acquisition = get_tiles_touched_by_acquisition(vrt_band_list[0]['filename']) #The number of tile footprints touched by this acquisition tile_count = len(tiles_in_acquisition) # Check whether tiles exist for every band tile_record_count = len(find_tiles()) logger.info('Found %d tile records in database for %d tiles', tile_record_count, tile_count * 3) # Count ORTHO, NBAR & PQA if tile_record_count == tile_count * 3: logger.info('All tiles already exist in database - skipping tile creation for %s', dataset_info['nbar_dataset_path']) return result try: #TODO: Create all new acquisition records and commit the transaction here # Use NBAR dataset name for dataset lock (could have been any other level) work_directory = os.path.join(self.temp_dir, os.path.basename(dataset_info['nbar_dataset_path']) ) #TODO: Apply lock on path/row instead of on dataset to try to force the same node to process the full depth if not self.lock_object(work_directory): logger.info('Already processing %s - skipping', dataset_info['nbar_dataset_path']) return result if self.refresh and os.path.exists(work_directory): shutil.rmtree(work_directory) self.create_directory(work_directory) tile_has_data = {} for processing_level in ['PQA', 'ORTHO', 'NBAR']: # N.B: PQA must be first vrt_band_info_list = [vrt_band_info for vrt_band_info in vrt_band_list if vrt_band_info['processing_level'] == processing_level] nodata_value = vrt_band_info_list[0]['nodata_value'] # All the same for a given processing_level resampling_method = vrt_band_info_list[0]['resampling_method'] # All the same for a given processing_level vrt_band_stack_filename = os.path.join(work_directory, processing_level + '_' + vrt_band_stack_basename) if not os.path.exists(vrt_band_stack_filename) or self.check_object_locked(vrt_band_stack_filename): # Check whether this dataset is already been processed if not self.lock_object(vrt_band_stack_filename): logger.warning('Band stack %s already being processed - skipping.', vrt_band_stack_filename) continue logger.info('Creating %s band stack file %s', processing_level, vrt_band_stack_filename) command_string = 'gdalbuildvrt -separate' if not self.debug: command_string += ' -q' if nodata_value is not None: command_string += ' -srcnodata %d -vrtnodata %d' % ( nodata_value, nodata_value) command_string += ' -overwrite %s %s' % ( vrt_band_stack_filename, ' '.join([vrt_band_info['filename'] for vrt_band_info in vrt_band_info_list]) ) logger.debug('command_string = %s', command_string) result = execute(command_string=command_string) if result['stdout']: log_multiline(logger.info, result['stdout'], 'stdout from ' + command_string, '\t') if result['returncode']: log_multiline(logger.error, result['stderr'], 'stderr from ' + command_string, '\t') raise Exception('%s failed', command_string) band_stack_dataset = gdal.Open(vrt_band_stack_filename) assert band_stack_dataset, 'Unable to open VRT %s' % vrt_band_stack_filename band_stack_dataset.SetMetadata( {'satellite': dataset_info['satellite_tag'], 'sensor': dataset_info['sensor_name'], 'start_datetime': dataset_info['start_datetime'].isoformat(), 'end_datetime': dataset_info['end_datetime'].isoformat(), 'path': '%03d' % dataset_info['x_ref'], 'row': '%03d' % dataset_info['y_ref']} ) for band_index in range(len(vrt_band_info_list)): band = band_stack_dataset.GetRasterBand(band_index + 1) band.SetMetadata({'name': vrt_band_info_list[band_index]['name'], 'filename': vrt_band_info_list[band_index]['filename']}) # Need to set nodata values for each band - can't seem to do it in gdalbuildvrt nodata_value = vrt_band_info_list[band_index]['nodata_value'] if nodata_value is not None: band.SetNoDataValue(nodata_value) band_stack_dataset.FlushCache() self.unlock_object(vrt_band_stack_filename) else: logger.info('Band stack %s already exists', vrt_band_stack_filename) band_stack_dataset = gdal.Open(vrt_band_stack_filename) logger.info('Processing %d %s Tiles', tile_count, processing_level) #MPH replace double-loop with single loop over tiles touched by acquisition for x_index, y_index in tiles_in_acquisition: #MPH #for x_index in range(tile_index_range[0], tile_index_range[2]): # for y_index in range(tile_index_range[1], tile_index_range[3]): #tile_extents to be used by gdalwarp -te flag. Works for our current crs EPSG 4326. In general, will need to get the tile's geotransform and #consider the max, min values in projected space (Xp, Yp). That is, need to calculate tile extents over the four vertices #Upper-left #xul = geotransform[0] #yul = geotransform[3] #Upper-right #xur = geotransform[0] + geotransform[1]*tile_type_info['x_pixels'] #yur = geotransform[3] + geotransform[4]*tile_type_info['x_pixels'] #Lower-left #xll = geotransform[0] + geotransform[2]*tile_type_info['y_pixels'] #yll = geotransform[3] + geotransform[5]*tile_type_info['y_pixels'] #Lower-right #xlr = geotransform[0] + geotransform[1]*tile_type_info['x_pixels'] + geotransform[2]*tile_type_info['y_pixels'] #ylr = geotransform[3] + geotransform[4]*tile_type_info['x_pixels'] + geotransform[5]*tile_type_info['y_pixels'] #tile_extents[0] = min(xll, xul) #tile_extents[1] = min(yll, ylr) #tile_extents[2] = max(xur, xlr) #tile_extents[3] = max(yul, yur) tile_extents = (tile_type_info['x_origin'] + x_index * tile_type_info['x_size'], tile_type_info['y_origin'] + y_index * tile_type_info['y_size'], tile_type_info['x_origin'] + (x_index + 1) * tile_type_info['x_size'], tile_type_info['y_origin'] + (y_index + 1) * tile_type_info['y_size']) logger.debug('tile_extents = %s', tile_extents) tile_output_dir = os.path.join(tile_output_root, re.sub('\+', '', '%+04d_%+04d' % (x_index, y_index)), '%04d' % dataset_info['start_datetime'].year ) self.create_directory(os.path.join(tile_output_dir, 'mosaic_cache')) tile_output_path = os.path.join(tile_output_dir, '_'.join([dataset_info['satellite_tag'], re.sub('\W', '', dataset_info['sensor_name']), processing_level, re.sub('\+', '', '%+04d_%+04d' % (x_index, y_index)), re.sub(':', '-', dataset_info['start_datetime'].isoformat()) ]) + tile_type_info['file_extension'] ) # Check whether this tile has already been processed if not self.lock_object(tile_output_path): logger.warning('Tile %s already being processed - skipping.', tile_output_path) continue # Only generate tile file if PQA tile or tile contains data if tile_has_data.get((x_index, y_index)) is None or tile_has_data[(x_index, y_index)]: #Assuming tile has data, use gdalwarp to reproject from scene projection to datacube projection command_string = 'gdalwarp' if not self.debug: command_string += ' -q' command_string += ' -t_srs %s -te %f %f %f %f -tr %f %f -tap -tap -r %s' % ( tile_type_info['crs'], tile_extents[0], tile_extents[1], tile_extents[2], tile_extents[3], tile_type_info['x_pixel_size'], tile_type_info['y_pixel_size'], resampling_method ) if nodata_value is not None: command_string += ' -srcnodata %d -dstnodata %d' % (nodata_value, nodata_value) command_string += ' -of %s' % tile_type_info['file_format'] if tile_type_info['format_options']: for format_option in tile_type_info['format_options'].split(','): command_string += ' -co %s' % format_option command_string += ' -overwrite %s %s' % ( vrt_band_stack_filename, tile_output_path ) logger.debug('command_string = %s', command_string) retry=True while retry: result = execute(command_string=command_string) if result['stdout']: log_multiline(logger.info, result['stdout'], 'stdout from ' + command_string, '\t') if result['returncode']: # Return code is non-zero log_multiline(logger.error, result['stderr'], 'stderr from ' + command_string, '\t') # Work-around for gdalwarp error writing LZW-compressed GeoTIFFs if (string.find(result['stderr'], 'LZW') > -1 # LZW-related error and tile_type_info['file_format'] == 'GTiff' # Output format is GeoTIFF and string.find(tile_type_info['format_options'], 'COMPRESS=LZW') > -1): # LZW compression requested temp_tile_path = os.path.join(os.path.dirname(vrt_band_stack_filename), os.path.basename(tile_output_path)) # Write uncompressed tile to a temporary path command_string = string.replace(command_string, 'COMPRESS=LZW', 'COMPRESS=NONE') command_string = string.replace(command_string, tile_output_path, temp_tile_path) # Translate temporary uncompressed tile to final compressed tile command_string += '; gdal_translate -of GTiff' if tile_type_info['format_options']: for format_option in tile_type_info['format_options'].split(','): command_string += ' -co %s' % format_option command_string += ' %s %s' % ( temp_tile_path, tile_output_path ) else: raise Exception('%s failed', command_string) else: retry = False # No retry on success # Set tile metadata tile_dataset = gdal.Open(tile_output_path) assert tile_dataset, 'Unable to open tile dataset %s' % tile_output_path # Check whether PQA tile contains any contiguous data #MPHtemp if tile_has_data.get((x_index, y_index)) is None and processing_level == 'PQA': tile_has_data[(x_index, y_index)] = ((numpy.bitwise_and(tile_dataset.GetRasterBand(1).ReadAsArray(), 1 << LandsatTiler.CONTIGUITY_BIT_INDEX)) > 0).any() logger.debug('%s tile (%d, %d) has data = %s', processing_level, x_index, y_index, tile_has_data[(x_index, y_index)]) #MPH check whether this processing_level has any data #if tile_has_data.get((x_index, y_index)) is None: # if processing_level == 'PQA': # tile_has_data[(x_index, y_index)] = ((numpy.bitwise_and(tile_dataset.GetRasterBand(1).ReadAsArray(), # 1 << LandsatTiler.CONTIGUITY_BIT_INDEX)) > 0).any() # else: # #pull in the multiple bands #Would need to look at NBAR and ORTHO to know if PQA contiguity bit is 1 # Only bother setting metadata if tile has valid data if tile_has_data[(x_index, y_index)]: metadata = band_stack_dataset.GetMetadata() metadata['x_index'] = str(x_index) metadata['y_index'] = str(y_index) tile_dataset.SetMetadata(metadata) # Set tile band metadata for band_index in range(len(vrt_band_info_list)): scene_band = band_stack_dataset.GetRasterBand(band_index + 1) tile_band = tile_dataset.GetRasterBand(band_index + 1) tile_band.SetMetadata(scene_band.GetMetadata()) # Need to set nodata values for each band - gdalwarp doesn't copy it across nodata_value = vrt_band_info_list[band_index]['nodata_value'] if nodata_value is not None: tile_band.SetNoDataValue(nodata_value) logger.info('Processed %s Tile (%d, %d)', processing_level, x_index, y_index) else: logger.info('AAA Skipped empty %s Tile (%d, %d)', processing_level, x_index, y_index) else: logger.info('BBB Skipped empty %s Tile (%d, %d)', processing_level, x_index, y_index) # Change permissions on any recently created files command_string = 'chmod -R 775 %s; chmod -R 777 %s' % (tile_output_dir, os.path.join(tile_output_dir, 'mosaic_cache') ) result = execute(command_string=command_string) if result['stdout']: log_multiline(logger.info, result['stdout'], 'stdout from ' + command_string, '\t') # N.B: command may return errors for files not owned by user if result['returncode']: log_multiline(logger.warning, result['stderr'], 'stderr from ' + command_string, '\t') # raise Exception('%s failed', command_string) self.unlock_object(tile_output_path) # Check whether tile contains any data if tile_has_data[(x_index, y_index)]: tile_class_id = 1 # Valid tile tile_size = self.getFileSizeMB(tile_output_path) else: # PQA tile contains no data # Remove empty PQA tile file tile_class_id = 2 # Dummy tile record with no file self.remove(tile_output_path) tile_size = 0 sql = """-- Insert new tile_footprint record if necessary insert into tile_footprint ( x_index, y_index, tile_type_id, x_min, y_min, x_max, y_max ) select %(x_index)s, %(y_index)s, %(tile_type_id)s, %(x_min)s, %(y_min)s, %(x_max)s, %(y_max)s where not exists (select x_index, y_index, tile_type_id from tile_footprint where x_index = %(x_index)s and y_index = %(y_index)s and tile_type_id = %(tile_type_id)s); -- Update any existing tile record update tile set tile_pathname = %(tile_pathname)s, tile_class_id = %(tile_class_id)s, tile_size = %(tile_size)s, ctime = now() where x_index = %(x_index)s and y_index = %(y_index)s and tile_type_id = %(tile_type_id)s and dataset_id = %(dataset_id)s; -- Insert new tile record if necessary insert into tile ( tile_id, x_index, y_index, tile_type_id, dataset_id, tile_pathname, tile_class_id, tile_size, ctime ) select nextval('tile_id_seq'::regclass), %(x_index)s, %(y_index)s, %(tile_type_id)s, %(dataset_id)s, %(tile_pathname)s, %(tile_class_id)s, %(tile_size)s, now() where not exists (select tile_id from tile where x_index = %(x_index)s and y_index = %(y_index)s and tile_type_id = %(tile_type_id)s and dataset_id = %(dataset_id)s ); """ params = {'x_index': x_index, 'y_index': y_index, 'tile_type_id': tile_type_info['tile_type_id'], 'x_min': tile_extents[0], 'y_min': tile_extents[1], 'x_max': tile_extents[2], 'y_max': tile_extents[3], 'dataset_id': vrt_band_info_list[0]['dataset_id'], # All the same 'tile_pathname': tile_output_path, 'tile_class_id': tile_class_id, 'tile_size': tile_size } log_multiline(logger.debug, db_cursor1.mogrify(sql, params), 'SQL', '\t') db_cursor1.execute(sql, params) #end loop over all tiles touched by acquisiton self.unlock_object(work_directory) #if not self.debug: # shutil.rmtree(work_directory) result = True self.db_connection.commit() logger.info('Dataset tiling completed - Transaction committed') return result except Exception, e: logger.error('Tiling operation failed: %s', e.message) # Keep on processing self.db_connection.rollback() if not self.debug: raise def process_scenes(): db_cursor = self.db_connection.cursor() sql = """-- Find all scenes with L1T, NBAR and PQA level datasets with missing tiles select * from ( select distinct acquisition_id, l1t.dataset_id as l1t_dataset_id, l1t.dataset_path as l1t_dataset_path, l1t.level_name as l1t_level_name, l1t.nodata_value as l1t_nodata_value, l1t.resampling_method as l1t_resampling_method, l1t.tile_count as l1t_tile_count, nbar.dataset_id as nbar_dataset_id, nbar.dataset_path as nbar_dataset_path, nbar.level_name as nbar_level_name, nbar.nodata_value as nbar_nodata_value, nbar.resampling_method as nbar_resampling_method, nbar.tile_count as nbar_tile_count, pqa.dataset_id as pqa_dataset_id, pqa.dataset_path as pqa_dataset_path, pqa.level_name as pqa_level_name, pqa.nodata_value as pqa_nodata_value, pqa.resampling_method as pqa_resampling_method, pqa.tile_count as pqa_tile_count, satellite_tag, sensor_name, x_ref, y_ref, start_datetime, end_datetime, ll_lon, ll_lat, lr_lon, lr_lat, ul_lon, ul_lat, ur_lon, ur_lat, nbar.crs, nbar.ll_x, nbar.ll_y, nbar.lr_x, nbar.lr_y, nbar.ul_x, nbar.ul_y, nbar.ur_x, nbar.ur_y, nbar.x_pixels, nbar.y_pixels, -- TODO: Use dataset_footprint table so that this will not break for projected tile types ( ceil(greatest((lr_lon + 360.0)::numeric %% 360.0::numeric, (ur_lon + 360.0)::numeric %% 360.0::numeric) / %(tile_x_size)s) - floor(least((ll_lon + 360.0)::numeric %% 360.0::numeric, (ul_lon + 360.0)::numeric %% 360.0::numeric) / %(tile_x_size)s) ) * ( ceil(greatest(ul_lat, ur_lat) / %(tile_y_size)s) - floor(least(ll_lat, lr_lat) / %(tile_y_size)s) ) as tiles_required from acquisition inner join ( select acquisition_id, d.dataset_id, level_name, dataset_path, nodata_value, resampling_method, count(tile_id) as tile_count from dataset d inner join processing_level using(level_id) left join tile t on t.dataset_id = d.dataset_id and tile_type_id = 1 and ctime is not null -- *** TODO: Remove this line after reload *** where level_name = 'ORTHO' group by 1,2,3,4,5,6 ) l1t using(acquisition_id) inner join ( select acquisition_id, d.dataset_id, level_name, dataset_path, nodata_value, resampling_method, -- Grab extra info from NBAR dataset - should be the same as in L1T & PQA datasets crs, ll_x, ll_y, lr_x, lr_y, ul_x, ul_y, ur_x, ur_y, x_pixels, y_pixels, count(tile_id) as tile_count from dataset d inner join processing_level using(level_id) left join tile t on t.dataset_id = d.dataset_id and tile_type_id = 1 and ctime is not null -- *** TODO: Remove this line after reload *** where level_name = 'NBAR' group by 1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17 ) nbar using(acquisition_id) inner join ( select acquisition_id, d.dataset_id, level_name, dataset_path, nodata_value, resampling_method, count(tile_id) as tile_count from dataset d inner join processing_level using(level_id) left join tile t on t.dataset_id = d.dataset_id and tile_type_id = 1 and ctime is not null -- *** TODO: Remove this line after reload *** where level_name = 'PQA' group by 1,2,3,4,5,6 ) pqa using(acquisition_id) inner join satellite sa using(satellite_id) inner join sensor se using(satellite_id, sensor_id) where (%(start_date)s is null or start_datetime >= %(start_date)s) and (%(end_date)s is null or end_datetime < cast(%(end_date)s as date) + 1) and (%(min_path)s is null or x_ref >= %(min_path)s) and (%(max_path)s is null or x_ref <= %(max_path)s) and (%(min_row)s is null or y_ref >= %(min_row)s) and (%(max_row)s is null or y_ref <= %(max_row)s) and (cloud_cover is null or cloud_cover < 98) -- Arbitrary threshold above which scene should be ignored ) datasets where l1t_tile_count < tiles_required or nbar_tile_count < tiles_required or pqa_tile_count < tiles_required order by -- Order by path, row then descending date-times l1t_tile_count + nbar_tile_count + pqa_tile_count, x_ref, y_ref, start_datetime desc, end_datetime desc, satellite_tag, sensor_name; """ params = {'tile_type_id': tile_type_id, 'start_date': start_date, 'end_date': end_date, 'min_path': min_path, 'max_path': max_path, 'min_row': min_row, 'max_row': max_row, 'tile_x_size': tile_type_info['x_size'], 'tile_y_size': tile_type_info['y_size'] } log_multiline(logger.debug, db_cursor.mogrify(sql, params), 'SQL', '\t') # This mother of all queries creates a logjam at the DB server, so we only allow one instance a query at a time to submit it #TODO: Find a nicer way of dealing with this while not self.lock_object(os.path.basename(__file__) + ' dataset query'): print 'About to sleep because %s not locked' %(os.path.basename(__file__) + ' dataset query') time.sleep(10) try: db_cursor.execute(sql, params) finally: self.unlock_object(os.path.basename(__file__) +' dataset query') column_list = ['acquisition_id', 'l1t_dataset_id', 'l1t_dataset_path', 'l1t_level_name', 'l1t_nodata_value', 'l1t_resampling_method', 'l1t_tile_count', 'nbar_dataset_id', 'nbar_dataset_path', 'nbar_level_name', 'nbar_nodata_value', 'nbar_resampling_method', 'nbar_tile_count', 'pqa_dataset_id', 'pqa_dataset_path', 'pqa_level_name', 'pqa_nodata_value', 'pqa_resampling_method', 'pqa_tile_count', 'satellite_tag', 'sensor_name', 'x_ref', 'y_ref', 'start_datetime', 'end_datetime', 'll_lon', 'll_lat', 'lr_lon', 'lr_lat', 'ul_lon', 'ul_lat', 'ur_lon', 'ur_lat', 'crs', 'll_x', 'll_y', 'lr_x', 'lr_y', 'ul_x', 'ul_y', 'ur_x', 'ur_y', 'x_pixels', 'y_pixels'] for record in db_cursor: dataset_info = {} for column_index in range(len(column_list)): dataset_info[column_list[column_index]] = record[column_index] # Ignore bad dataset and proceed to next one if not debugging if self.debug: process_dataset(dataset_info) else: try: process_dataset(dataset_info) except Exception, e: logger.warning(e.message) # Start of create_tiles function process_scenes() # create_composites() if __name__ == '__main__': landsat_tiler = LandsatTiler() #=========================================================================== # # Sleep for a random number of seconds to avoid potential database lock-up with many instances starting up at the same time # # TODO: Find something better than this nasty work-around # if not landsat_tiler.debug: # time.sleep(random.randint(0, 30)) #=========================================================================== landsat_tiler.create_tiles()
apache-2.0
-1,938,904,243,955,361,800
51.328755
165
0.475491
false
best-coloc-ever/globibot
bot/plugins/repost/plugin.py
1
1779
from globibot.lib.plugin import Plugin from collections import defaultdict from time import time import re URL_PATTERN = re.compile('http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\(\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+') class Repost(Plugin): def load(self): self.shames = defaultdict(lambda: defaultdict(list)) self.links = self.load_links() async def on_new(self, message): await self.process_message(message) async def on_edit(self, before, after): await self.process_message(after) async def process_message(self, message): for url in URL_PATTERN.findall(message.content): try: author_id, stamp = self.links[message.server.id][url] for emoji in ['🔔', '🇷', '🇪', '🇵', '🇴', '🇸', '🇹']: await self.bot.add_reaction(message, emoji) self.shames[message.server.id][message.author.id].append((url, time())) except KeyError: self.links[message.server.id][url] = (message.author.id, time()) def load_links(self): links = defaultdict(dict) with self.transaction() as trans: trans.execute(''' select author_id, stamp, server_id, content from log order by stamp asc ''') for author_id, stamp, server_id, content in trans.fetchall(): for url in URL_PATTERN.findall(content): if url in links[str(server_id)]: self.shames[str(server_id)][str(author_id)].append((url, stamp.timestamp())) else: links[str(server_id)][url] = (str(author_id), stamp.timestamp()) return links
mit
5,692,639,545,437,346,000
34.877551
106
0.542662
false
sensusaps/RoboBraille.Web.API
WorkingDirectory/DaisyPipeline/transformers/ca_cnib_rtf2dtbook/rtf2xml-py/rtf2xml/check_brackets.py
1
2889
######################################################################### # # # # # copyright 2002 Paul Henry Tremblay # # # # This program is distributed in the hope that it will be useful, # # but WITHOUT ANY WARRANTY; without even the implied warranty of # # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # # General Public License for more details. # # # # You should have received a copy of the GNU General Public License # # along with this program; if not, write to the Free Software # # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA # # 02111-1307 USA # # # # # ######################################################################### import sys,os class CheckBrackets: """Check that brackets match up""" def __init__(self, bug_handler = None, file=None): self.__file=file self.__bug_handler = bug_handler self.__bracket_count=0 self.__ob_count = 0 self.__cb_count = 0 self.__open_bracket_num = [] def open_brack(self, line): num = line[-5:-1] self.__open_bracket_num.append(num) self.__bracket_count += 1 def close_brack(self, line): num = line[-5:-1] ##self.__open_bracket_num.append(num) try: last_num = self.__open_bracket_num.pop() except: return 0 if num != last_num: return 0 self.__bracket_count -= 1 return 1 def check_brackets(self): read_obj = open(self.__file, 'r') line = 'dummy' line_count = 0 while line: line_count += 1 line = read_obj.readline() self.__token_info = line[:16] if self.__token_info == 'ob<nu<open-brack': self.open_brack(line) if self.__token_info == 'cb<nu<clos-brack': right_count = self.close_brack(line) if not right_count: return (0, "closed bracket doesn't match, line %s" % line_count) read_obj.close() if self.__bracket_count != 0: msg = 'At end of file open and closed brackets don\'t match\n' msg = msg + 'total number of brackets is %s' % self.__bracket_count return (0, msg) return (1, "brackets match!")
apache-2.0
7,332,519,273,138,419,000
39.690141
84
0.416407
false
overridelogic/bruces
src/bruces/webapp/api.py
1
1219
import os import re import imp import inspect import mimetypes from bruces import webapp from bruces.webapp import route from bruces.webapp import get_controller_cls from bruces.exceptions import ProgramError import bruces.api @bruces.api.package("webapp") @bruces.api.action def dispatch(wsgi, controller, action, *args, **kwargs): """ Dispatch a web application request. """ if webapp.base is None: raise ProgramError("no web application base module defined") cls = get_controller_cls(controller) # Routing route_path = wsgi.path.rstrip("/") if route_path.startswith(webapp.path): route_path = route_path[len(webapp.path):] current_route = None for r in route.routes.values(): if wsgi.environ["REQUEST_METHOD"] == r.http_method and controller == r.controller: m = re.search(r.expr, route_path) if m is not None: action = r.action args = [] kwargs = dict(kwargs.items() + m.groupdict().items() + r.kwargs.items()) current_route = r break # Execute with cls(wsgi, kwargs, current_route) as obj: obj(action, *args, **kwargs)
mit
-8,108,978,273,007,078,000
25.5
90
0.630025
false
Epoptes/epoptes
epoptes-client/lock_screen.py
1
3281
#!/usr/bin/python3 # This file is part of Epoptes, http://epoptes.org # Copyright 2010-2018 the Epoptes team, see AUTHORS. # SPDX-License-Identifier: GPL-3.0-or-later """ Lock the screen. """ import sys from _common import gettext as _ from gi.repository import Gdk, GdkPixbuf, GLib, Gtk class LockScreen: """Lock the screen.""" def __init__(self, from_main=False): self.backlock = None self.from_main = from_main self.frontview = None self.label = None def lock(self, msg, unlock_secs=None): """Lock the screen. Unlock after unlock_secs if it's not None.""" screen = Gdk.Screen.get_default() swidth = screen.get_width() sheight = screen.get_height() smin = min(swidth, sheight) gtk_provider = Gtk.CssProvider() gtk_context = Gtk.StyleContext() gtk_context.add_provider_for_screen( screen, gtk_provider, Gtk.STYLE_PROVIDER_PRIORITY_APPLICATION) gtk_provider.load_from_data(bytes(""" * {{ transition-property: color; transition-duration: 4s; }} window, GtkWindow {{ background-color: black; }} label, GtkLabel {{ font-size: {0:.0f}px; }} label#black, GtkLabel#black {{ color: black; }} label#white, GtkLabel#white {{ color: #e0e0e0; }} """.format(swidth / 70).encode())) backlock = Gtk.Window(type=Gtk.WindowType.POPUP) self.backlock = backlock backlock.resize(1, 1) frontview = Gtk.Window() self.frontview = frontview frontview.resize(swidth, sheight) box = Gtk.Box( orientation=Gtk.Orientation.VERTICAL, spacing=smin/12, halign=Gtk.Align.CENTER, valign=Gtk.Align.CENTER) image = Gtk.Image(pixbuf=GdkPixbuf.Pixbuf.new_from_file_at_size( 'lock.svg', smin/3, smin/3)) box.pack_start(image, False, False, 0) self.label = Gtk.Label(label=msg, name="black") box.pack_start(self.label, False, False, 0) frontview.add(box) backlock.show_all() frontview.show_all() frontview.set_keep_above(True) frontview.fullscreen() Gdk.beep() Gdk.keyboard_grab(backlock.get_window(), False, 0) # Transitions need an event to start GLib.timeout_add(100, self.do_transition) # While developing, to only lock the screen for e.g. 5 seconds, run: # ./lock-screen "" 5 if unlock_secs is not None: GLib.timeout_add(unlock_secs*1000, self.unlock) def do_transition(self): """Change the label id, so that the fade in effect is started.""" self.label.set_name("white") def unlock(self): """Unlock the screen. Also exit Gtk if called from main.""" Gdk.keyboard_ungrab(0) self.backlock.destroy() self.frontview.destroy() if self.from_main: Gtk.main_quit() def main(): """Run the module from the command line.""" if len(sys.argv) > 1 and sys.argv[1]: msg = sys.argv[1] else: msg = _("The screen is locked by a system administrator.") if len(sys.argv) > 2: unlock_secs = int(sys.argv[2]) else: unlock_secs = None LockScreen(True).lock(msg, unlock_secs) Gtk.main() if __name__ == '__main__': main()
gpl-3.0
-6,936,315,692,895,311,000
31.166667
76
0.610789
false
rst2pdf/rst2pdf
rst2pdf/tests/input/sphinx-issue529/conf.py
1
1322
# -*- coding: utf-8 -*- # -- General configuration ----------------------------------------------------- # Add any Sphinx extension module names here, as strings. They can be extensions # coming with Sphinx (named 'sphinx.ext.*') or your custom ones. extensions = ['rst2pdf.pdfbuilder'] # The master toctree document. master_doc = 'index' # General information about the project. project = u'Foobar' copyright = u'2009, Jason S' # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the # built documents. # # The short X.Y version. version = '1.0.1' # The full version, including alpha/beta/rc tags. release = '1.0.1' # -- Options for sphinx.ext.todo extension ----------------------------------- todo_include_todos = True # -- Options for PDF output -------------------------------------------------- # Grouping the document tree into PDF files. List of tuples # (source start file, target name, title, author). pdf_documents = [ ('index', u'index', u'index', u'lorenzo'), ] # A comma-separated list of custom stylesheets. Example: pdf_stylesheets = ['sphinx'] # If false, no index is generated. pdf_use_index = False # If false, no coverpage is generated. pdf_use_coverpage = False pdf_invariant = True
mit
7,878,673,909,229,144,000
26.541667
80
0.638427
false
FabianN/autopkg_recipies
MSOfficeUpdates/MSOffice2016URLandUpdateInfoProvider.py
1
13414
#!/usr/bin/env python # # Copyright 2015 Allister Banks and Tim Sutton, # based on MSOffice2011UpdateInfoProvider by Greg Neagle # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Disabling 'no-env-member' for recipe processors #pylint:disable=e1101 """See docstring for MSOffice2016URLandUpdateInfoProvider class""" import plistlib import re import urllib2 from autopkglib import Processor, ProcessorError __all__ = ["MSOffice2016URLandUpdateInfoProvider"] # CULTURE_CODE defaulting to 'en-US' as the installers and updates seem to be # multilingual. CULTURE_CODE = "0409" BASE_URL = "https://officecdn.microsoft.com/pr/%s/OfficeMac/%s.xml" # These can be easily be found as "Application ID" in ~/Library/Preferences/com.microsoft.autoupdate2.plist on a # machine that has Microsoft AutoUpdate.app installed on it. # # Note that Skype, 'MSFB' has a '16' after it, AutoUpdate has a '03' after it while all the other products have '15' PROD_DICT = { 'Excel': {'id': 'XCEL15', 'path': '/Applications/Microsoft Excel.app'}, 'OneNote': {'id': 'ONMC15', 'path': '/Applications/Microsoft OneNote.app'}, 'Outlook': {'id': 'OPIM15', 'path': '/Applications/Microsoft Outlook.app'}, 'PowerPoint': {'id': 'PPT315', 'path': '/Applications/Microsoft PowerPoint.app'}, 'Word': {'id': 'MSWD15', 'path': '/Applications/Microsoft Word.app'}, 'SkypeForBusiness': {'id': 'MSFB16', 'path': '/Applications/Skype for Business.app'}, 'AutoUpdate': { 'id': 'MSau03', 'path': '/Library/Application Support/Microsoft/MAU2.0/Microsoft AutoUpdate.app' } } LOCALE_ID_INFO_URL = "https://msdn.microsoft.com/en-us/goglobal/bb964664.aspx" SUPPORTED_VERSIONS = ["latest", "latest-delta"] DEFAULT_VERSION = "latest" CHANNELS = { 'Production': 'C1297A47-86C4-4C1F-97FA-950631F94777', 'InsiderSlow': '1ac37578-5a24-40fb-892e-b89d85b6dfaa', 'InsiderFast': '4B2D7701-0A4F-49C8-B4CB-0C2D4043F51F', } DEFAULT_CHANNEL = "Production" class MSOffice2016URLandUpdateInfoProvider(Processor): """Provides a download URL for the most recent version of MS Office 2016.""" input_variables = { "locale_id": { "required": False, "default": "1033", "description": ( "Locale ID that determines the language " "that is retrieved from the metadata, currently only " "used by the update description. See %s " "for a list of locale codes. The default is en-US." % LOCALE_ID_INFO_URL) }, "product": { "required": True, "description": "Name of product to fetch, e.g. Excel.", }, "version": { "required": False, "default": DEFAULT_VERSION, "description": ("Update type to fetch. Supported values are: " "'%s'. Defaults to %s." % ("', '".join(SUPPORTED_VERSIONS), DEFAULT_VERSION)), }, "munki_required_update_name": { "required": False, "default": "", "description": ("If the update is a delta, a 'requires' key will be set " "according to the minimum version defined in the MS " "metadata. If this key is set, this name will be used " "for the required item. If unset, NAME will be used.") }, "channel": { "required": False, "default": DEFAULT_CHANNEL, "description": ("Update feed channel that will be checked for updates. " "Defaults to %s, acceptable values are either a custom " "UUID or one of: %s" % ( DEFAULT_CHANNEL, ", ".join(CHANNELS.keys()))) } } output_variables = { "additional_pkginfo": { "description": "Some pkginfo fields extracted from the Microsoft metadata.", }, "description": { "description": "Description of the update from the manifest, in the language " "given by the locale_id input variable.", }, "version": { "description": ("The version of the update as extracted from the Microsoft " "metadata.") }, "minimum_os_version": { "description": ("The minimum os version required by the update as extracted " "from the Microsoft metadata.") }, "minimum_version_for_delta": { "description": ("If this update is a delta, this value will be set to the " "minimum required application version to which this delta " "can be applied. Otherwise it will be an empty string.") }, "url": { "description": "URL to the latest installer.", }, } description = __doc__ min_delta_version = "" def sanity_check_expected_triggers(self, item): """Raises an exeception if the Trigger Condition or Triggers for an update don't match what we expect. Protects us if these change in the future.""" # MS currently uses "Registered File" placeholders, which get replaced # with the bundle of a given application ID. In other words, this is # the bundle version of the app itself. if not item.get("Trigger Condition") == ["and", "Registered File"]: raise ProcessorError( "Unexpected Trigger Condition in item %s: %s" % (item["Title"], item["Trigger Condition"])) def get_installs_items(self, item): """Attempts to parse the Triggers to create an installs item using only manifest data, making the assumption that CFBundleVersion and CFBundleShortVersionString are equal. Skip SkypeForBusiness as its xml does not contain a 'Trigger Condition'""" if self.env["product"] != 'SkypeForBusiness': self.sanity_check_expected_triggers(item) version = self.get_version(item) # Skipping CFBundleShortVersionString because it doesn't contain # anything more specific than major.minor (no build versions # distinguishing Insider builds for example) installs_item = { "CFBundleVersion": version, "path": PROD_DICT[self.env["product"]]['path'], "type": "application", } return [installs_item] def get_version(self, item): """Extracts the version of the update item.""" # If the 'Update Version' key exists we pull the "full" version string # easily from this if item.get("Update Version"): self.output( "Extracting version %s from metadata 'Update Version' key" % item["Update Version"]) return item["Update Version"] def get_installer_info(self): """Gets info about an installer from MS metadata.""" # Get the channel UUID, matching against a custom UUID if one is given channel_input = self.env.get("channel", DEFAULT_CHANNEL) rex = r"^([0-9a-fA-F]{8}-([0-9a-fA-F]{4}-){3}[0-9a-fA-F]{12})$" match_uuid = re.match(rex, channel_input) if not match_uuid and channel_input not in CHANNELS.keys(): raise ProcessorError( "'channel' input variable must be one of: %s or a custom " "uuid" % (", ".join(CHANNELS.keys()))) if match_uuid: channel = match_uuid.groups()[0] else: channel = CHANNELS[channel_input] base_url = BASE_URL % (channel, CULTURE_CODE + PROD_DICT[self.env["product"]]['id']) # Get metadata URL self.output("Requesting xml: %s" % base_url) req = urllib2.Request(base_url) # Add the MAU User-Agent, since MAU feed server seems to explicitly # block a User-Agent of 'Python-urllib/2.7' - even a blank User-Agent # string passes. req.add_header( "User-Agent", "Microsoft%20AutoUpdate/3.6.16080300 CFNetwork/760.6.3 Darwin/15.6.0 (x86_64)") try: fdesc = urllib2.urlopen(req) data = fdesc.read() fdesc.close() except BaseException as err: raise ProcessorError("Can't download %s: %s" % (base_url, err)) metadata = plistlib.readPlistFromString(data) item = {} # According to MS, update feeds for a given 'channel' will only ever # have two items: a full and a delta. Delta updates will have a # 'FullUpdaterLocation' key, so filter by the array according to # which item has that key. if self.env["version"] == "latest": item = [u for u in metadata if not u.get("FullUpdaterLocation")] elif self.env["version"] == "latest-delta": item = [u for u in metadata if u.get("FullUpdaterLocation")] if not item: raise ProcessorError("Could not find an applicable update in " "update metadata.") item = item[0] self.env["url"] = item["Location"] self.output("Found URL %s" % self.env["url"]) self.output("Got update: '%s'" % item["Title"]) # now extract useful info from the rest of the metadata that could # be used in a pkginfo pkginfo = {} # Get a copy of the description in our locale_id all_localizations = item.get("Localized") lcid = self.env["locale_id"] if lcid not in all_localizations: raise ProcessorError( "Locale ID %s not found in manifest metadata. Available IDs: " "%s. See %s for more details." % ( lcid, ", ".join(all_localizations.keys()), LOCALE_ID_INFO_URL)) manifest_description = all_localizations[lcid]['Short Description'] # Store the description in a separate output variable and in our pkginfo # directly. pkginfo["description"] = "<html>%s</html>" % manifest_description self.env["description"] = manifest_description # Minimum OS version key should exist always, but default to the current # minimum as of 16/11/03 pkginfo["minimum_os_version"] = item.get('Minimum OS', '10.10.5') installs_items = self.get_installs_items(item) if installs_items: pkginfo["installs"] = installs_items # Extra work to do if this is a delta updater if self.env["version"] == "latest-delta": try: rel_versions = item["Triggers"]["Registered File"]["VersionsRelative"] except KeyError: raise ProcessorError("Can't find expected VersionsRelative" "keys for determining minimum update " "required for delta update.") for expression in rel_versions: operator, ver_eval = expression.split() if operator == ">=": self.min_delta_version = ver_eval break if not self.min_delta_version: raise ProcessorError("Not able to determine minimum required " "version for delta update.") # Put minimum_update_version into installs item self.output("Adding minimum required version: %s" % self.min_delta_version) pkginfo["installs"][0]["minimum_update_version"] = \ self.min_delta_version required_update_name = self.env["NAME"] if self.env["munki_required_update_name"]: required_update_name = self.env["munki_required_update_name"] # Add 'requires' array pkginfo["requires"] = ["%s-%s" % (required_update_name, self.min_delta_version)] self.env["version"] = self.get_version(item) self.env["minimum_os_version"] = pkginfo["minimum_os_version"] self.env["minimum_version_for_delta"] = self.min_delta_version self.env["additional_pkginfo"] = pkginfo self.env["url"] = item["Location"] self.output("Additional pkginfo: %s" % self.env["additional_pkginfo"]) def main(self): """Get information about an update""" if self.env["version"] not in SUPPORTED_VERSIONS: raise ProcessorError("Invalid 'version': supported values are '%s'" % "', '".join(SUPPORTED_VERSIONS)) self.get_installer_info() if __name__ == "__main__": PROCESSOR = MSOffice2016URLandUpdateInfoProvider() PROCESSOR.execute_shell()
mit
8,124,691,191,780,263,000
43.270627
116
0.58342
false
lucventurini/mikado
Mikado/preparation/annotation_parser.py
1
34495
import multiprocessing from ..parsers import parser_factory from ..parsers.bam_parser import BamParser from ..utilities.log_utils import create_queue_logger from ..utilities import overlap import logging import logging.handlers from .. import exceptions from sys import intern import rapidjson as json import msgpack import os from ..transcripts import Transcript from operator import itemgetter import random import struct import zlib __author__ = 'Luca Venturini' # TODO: found_ids can be refactored out, in preference of a unique ID per file which is used as part of the label # in doing this, we prevent requiring the user to rename their inputs as there would be no repeated naming. def __raise_redundant(row_id, name, label): if label == '': raise exceptions.RedundantNames( """{0} has already been found in another file but is present in {1}; this will cause unsolvable collisions. Please rerun preparation using labels to tag each file.""".format( row_id, name )) else: raise exceptions.RedundantNames( """"{0} has already been found in another file but is present in {1}; this will cause unsolvable collisions. This happened even if you specified label {2}; please change them in order to ensure that no collisions happen.""".format(row_id, name, label)) def __raise_invalid(row_id, name, label): raise exceptions.InvalidAssembly( """{0} is present multiple times in {1}{2}. This breaks the input parsing. Please ensure that the file is properly formatted, with unique IDs.""".format( row_id, name, "(label: {0})".format(label) if label != '' else "")) def _create_split_tobject(tobject: dict, start, stop, num: int): """ Function to create a subset of the transcript, by keeping only the relevant exons :param tobject: dictionary of the features :param segments: the segments :param tid: original name :param num: progressive numbering of the transcript :return: """ newobj = tobject.copy() foundany = False for key in tobject: if key == "features": newobj["features"] = dict.fromkeys(tobject["features"]) for feature in newobj["features"]: newobj["features"][feature] = [] for ff in tobject["features"][feature]: if overlap((start, stop), (ff[0], ff[1]), positive=True, flank=0) > 0: foundany = True newobj["features"][feature].append(ff[:]) else: newobj[key] = tobject[key] newobj["tid"] = newobj["tid"] + f"_isplit.{num}" newobj["parent"] = "{}.gene".format(newobj["tid"]) return newobj, newobj["tid"], start, stop def _evaluate_tid(tid, tobject, logger, min_length, max_intron): if "exon" in tobject["features"]: segments = tobject["features"]["exon"][:] elif "CDS" in tobject["features"]: segments = tobject["features"]["CDS"][:] for feature in tobject["features"]: if "utr" in feature.lower(): segments.extend(tobject["features"][feature]) else: continue segments = sorted(segments, key=itemgetter(0)) # Now check the exons exons = [] if len(segments) == 0: logger.warning("No valid exon feature for %s, continuing", tid) return [] elif len(segments) == 1: exons = segments[0] else: current = segments[0] for pos in range(1, len(segments)): segment = segments[pos] if segment[0] > current[1] + 1: exons.append(current) current = segment elif segment[0] == current[1] + 1: current = (current[0], segment[1], None) else: logger.warning("Overlapping segments found in %s. Discarding it", tid) return [] exons.append(current) tobject["features"]["exon"] = exons[:] else: raise KeyError(tobject["features"]) segments = sorted(segments, key=itemgetter(0)) tlength = 0 start, end = segments[0][0], segments[-1][1] introns = [] num_segments = len(segments) for pos, segment in enumerate(segments): if pos < num_segments - 1: later = segments[pos + 1] intron = later[0] - (segment[1] + 1) introns.append((pos, intron)) tlength += segment[1] + 1 - segment[0] # Discard transcript under a certain size if tlength < min_length: if tobject["is_reference"] is True: logger.info("%s retained even if it is too short (%d) as it is a reference transcript.", tid, tlength) else: logger.info("Discarding %s because its size (%d) is under the minimum of %d", tid, tlength, min_length) return [] # Discard transcripts with introns over the limit over = [intron for intron in introns if intron[1] > max_intron] if len(over) > 0: if tobject["is_reference"] is True: logger.info( "%s retained even if has %s introns the limit (%d, max: %d) as it is a reference transcript.", tid, len(over), max([_[1] for _ in over]), max_intron) return [(tobject, tid, start, end)] else: logger.info( "Splitting %s into %d transcripts because it has %d introns over the maximum of %d (longest: %d)", tid, len(over) + 1, len(over), max_intron, max([_[1] for _ in over])) splitted = [] current = 0 for num, ointron in enumerate(over): final_pos = ointron[0] segs = segments[current:final_pos+1][:] current = final_pos + 1 start, stop = segs[0][0], segs[-1][1] tlength = sum([_[1] + 1 - _[0] for _ in segs]) if tlength < min_length: logger.info("Discarding fragment %s of %s because its length is beneath the minimum of %s (%s)", num, tid, min_length, tlength) continue else: splitted.append(_create_split_tobject(tobject, start, stop, num)) segs = segments[current:] start, stop = segs[0][0], segs[-1][1] tlength = sum([_[1] + 1 - _[0] for _ in segs]) if tlength < min_length: logger.info("Discarding fragment %s of %s because its length is beneath the minimum of %s (%s)", len(over), tid, min_length, tlength) else: splitted.append(_create_split_tobject(tobject, start, stop, len(over))) return splitted else: return [(tobject, tid, start, end)] def load_into_storage(shelf_name, exon_lines, min_length, logger, strip_cds=True, max_intron=3*10**5): """Function to load the exon_lines dictionary into the temporary storage.""" if os.path.exists(shelf_name) or any(_.startswith(os.path.basename(shelf_name)) for _ in os.listdir(os.path.dirname(shelf_name))): logger.error("Shelf %s already exists (maybe from a previous aborted run?), dropping its contents", shelf_name) for _ in (_ for _ in os.listdir(os.path.dirname(shelf_name)) if _.startswith(os.path.basename(shelf_name))): if os.path.exists(_): os.remove(_) shelf = open(shelf_name, "wb") rows = [] logger.warning("Max intron: %s", max_intron) for tid in exon_lines: if "features" not in exon_lines[tid]: raise KeyError("{0}: {1}\n{2}".format(tid, "features", exon_lines[tid])) if ("exon" not in exon_lines[tid]["features"] or len(exon_lines[tid]["features"]["exon"]) == 0): # Match-like things if "match" in exon_lines[tid]["features"]: if len(exon_lines[tid]["features"]["match"]) > 1: logger.warning("Invalid features for %s, skipping.", tid) continue exon_lines[tid]["features"]["exon"] = [exon_lines[tid]["features"]["match"][0]] logger.warning("Inferring that %s is a mono-exonic transcript-match: (%s, %d-%d)", tid, exon_lines[tid]["chrom"], exon_lines[tid]["features"]["exon"][0][0], exon_lines[tid]["features"]["exon"][0][1]) del exon_lines[tid]["features"]["match"] elif (strip_cds is False and "CDS" in exon_lines[tid]["features"] and len(exon_lines[tid]["features"]["CDS"]) > 0): pass else: logger.warning("No valid exon feature for %s, continuing", tid) continue elif "match" in exon_lines[tid]["features"] and "exon" in exon_lines[tid]["features"]: del exon_lines[tid]["features"]["match"] for values, tid, start, end in _evaluate_tid(tid, exon_lines[tid], logger, max_intron=max_intron, min_length=min_length): chrom = values["chrom"] assert chrom is not None strand = values["strand"] if strand is None: strand = "." logger.debug("Inserting %s into shelf %s", tid, shelf_name) values = zlib.compress(msgpack.dumps(values)) write_start = shelf.tell() write_length = shelf.write(values) row = (chrom.encode(), start, end, strand.encode(), tid.encode(), write_start, write_length) rows.append(row) logger.warning("Finished packing rows for %s", shelf_name) return rows def load_from_gff(shelf_name, gff_handle, label, found_ids, logger, min_length=0, max_intron=3*10**5, is_reference=False, exclude_redundant=False, strip_cds=False, strand_specific=False): """ Method to load the exon lines from GFF3 files. :param shelf_name: the name of the shelf DB to use. :param gff_handle: The handle for the GTF to be parsed. :param label: label to be attached to all transcripts. :type label: str :param found_ids: set of IDs already found in other files. :type found_ids: set :param logger: a logger to be used to pass messages :type logger: logging.Logger :param min_length: minimum length for a cDNA to be considered as valid :type min_length: int :param max_intron: maximum intron length for a cDNA to be considered as valid :type max_intron: int :param strip_cds: boolean flag. If true, all CDS lines will be ignored. :type strip_cds: bool :param strand_specific: whether the assembly is strand-specific or not. :type strand_specific: bool :param is_reference: boolean. If set to True, the transcript will always be retained. :type is_reference: bool :param exclude_redundant: boolean. If set to True, fully redundant transcripts will be removed. :type exclude_redundant: bool :return: """ exon_lines = dict() strip_cds = strip_cds and (not is_reference) if strand_specific is not True and is_reference is True: strand_specific = True transcript2genes = dict() new_ids = set() to_ignore = set() for row in gff_handle: if row.feature == "protein": continue elif row.is_transcript is True or row.feature == "match": if label != '': row.id = "{0}_{1}".format(label, row.id) row.source = label if row.id in found_ids: __raise_redundant(row.id, gff_handle.name, label) elif row.id in exon_lines: # This might sometimes happen in GMAP logger.warning( "Multiple instance of %s found, skipping any subsequent entry", row.id) to_ignore.add(row.id) continue # # if row.id not in exon_lines: exon_lines[row.id] = dict() exon_lines[row.id]["source"] = row.source if row.parent: transcript2genes[row.id] = row.parent[0] else: transcript2genes[row.id] = row.id assert row.id is not None if row.id in found_ids: __raise_redundant(row.id, gff_handle.name, label) exon_lines[row.id]["attributes"] = row.attributes.copy() exon_lines[row.id]["chrom"] = row.chrom exon_lines[row.id]["strand"] = row.strand exon_lines[row.id]["tid"] = row.transcript or row.id exon_lines[row.id]["parent"] = "{}.gene".format(row.id) exon_lines[row.id]["features"] = dict() # Here we have to add the match feature as an exon, in case it is the only one present if row.feature == "match": exon_lines[row.id]["features"][row.feature] = [] exon_lines[row.id]["features"][row.feature].append((row.start, row.end, row.phase)) exon_lines[row.id]["strand_specific"] = strand_specific exon_lines[row.id]["is_reference"] = is_reference exon_lines[row.id]["exclude_redundant"] = exclude_redundant continue elif row.is_exon is True: if not row.is_cds or (row.is_cds is True and strip_cds is False): if len(row.parent) == 0 and "cDNA_match" == row.feature: if label == '': __tid = row.id else: __tid = "{0}_{1}".format(label, row.id) row.parent = __tid transcript2genes[__tid] = "{}_match".format(__tid) row.feature = "exon" elif row.feature == "match_part": if label == '': __tid = row.parent[0] else: __tid = "{0}_{1}".format(label, row.parent[0]) row.parent = __tid transcript2genes[__tid] = "{}_match".format(__tid) row.feature = "exon" elif label != '': row.transcript = ["{0}_{1}".format(label, tid) for tid in row.transcript] parents = row.transcript[:] for tid in parents: if tid in found_ids: __raise_redundant(tid, gff_handle.name, label) elif tid in to_ignore: continue if tid not in exon_lines and tid in transcript2genes: exon_lines[tid] = dict() exon_lines[tid]["attributes"] = row.attributes.copy() if label: exon_lines[tid]["source"] = label else: exon_lines[tid]["source"] = row.source exon_lines[tid]["chrom"] = row.chrom exon_lines[tid]["strand"] = row.strand exon_lines[tid]["features"] = dict() exon_lines[tid]["tid"] = tid exon_lines[tid]["parent"] = transcript2genes[tid] exon_lines[tid]["strand_specific"] = strand_specific exon_lines[tid]["is_reference"] = is_reference exon_lines[tid]["exclude_redundant"] = exclude_redundant elif tid not in exon_lines and tid not in transcript2genes: continue else: if "exon_number" in row.attributes: del row.attributes["exon_number"] if (exon_lines[tid]["chrom"] != row.chrom or exon_lines[tid]["strand"] != row.strand): __raise_invalid(tid, gff_handle.name, label) exon_lines[tid]["attributes"].update(row.attributes) if row.feature not in exon_lines[tid]["features"]: exon_lines[tid]["features"][row.feature] = [] exon_lines[tid]["features"][row.feature].append((row.start, row.end, row.phase)) new_ids.add(tid) else: continue gff_handle.close() logger.info("Starting to load %s", shelf_name) rows = load_into_storage(shelf_name, exon_lines, logger=logger, min_length=min_length, strip_cds=strip_cds, max_intron=max_intron) logger.info("Finished parsing %s", gff_handle.name) return new_ids, rows def load_from_gtf(shelf_name, gff_handle, label, found_ids, logger, min_length=0, max_intron=3*10**5, is_reference=False, exclude_redundant=False, strip_cds=False, strand_specific=False): """ Method to load the exon lines from GTF files. :param shelf_name: the name of the shelf DB to use. :param gff_handle: The handle for the GTF to be parsed. :param label: label to be attached to all transcripts. :type label: str :param found_ids: set of IDs already found in other files. :type found_ids: set :param logger: a logger to be used to pass messages :type logger: logging.Logger :param min_length: minimum length for a cDNA to be considered as valid :type min_length: int :param max_intron: maximum intron length for a cDNA to be considered as valid :type max_intron: int :param strip_cds: boolean flag. If true, all CDS lines will be ignored. :type strip_cds: bool :param strand_specific: whether the assembly is strand-specific or not. :type strand_specific: bool :param is_reference: boolean. If set to True, the transcript will always be retained. :type is_reference: bool :param exclude_redundant: boolean. If set to True, the transcript will be marked for potential redundancy removal. :type exclude_redundant: bool :return: """ exon_lines = dict() strip_cds = strip_cds and (not is_reference) strand_specific = strand_specific or is_reference # Reduce memory footprint [intern(_) for _ in ["chrom", "features", "strand", "attributes", "tid", "parent", "attributes"]] new_ids = set() to_ignore = set() for row in gff_handle: if row.is_transcript is True: if label != '': row.transcript = "{0}_{1}".format(label, row.transcript) if row.transcript in found_ids: __raise_redundant(row.transcript, gff_handle.name, label) if row.transcript in exon_lines: logger.warning( "Multiple instance of %s found, skipping any subsequent entry", row.id) to_ignore.add(row.id) continue # __raise_invalid(row.transcript, gff_handle.name, label) if row.transcript not in exon_lines: exon_lines[row.transcript] = dict() if label: exon_lines[row.transcript]["source"] = label else: exon_lines[row.transcript]["source"] = row.source exon_lines[row.transcript]["features"] = dict() exon_lines[row.transcript]["chrom"] = row.chrom exon_lines[row.transcript]["strand"] = row.strand exon_lines[row.transcript]["attributes"] = row.attributes.copy() exon_lines[row.transcript]["tid"] = row.id exon_lines[row.transcript]["parent"] = "{}.gene".format(row.id) exon_lines[row.transcript]["strand_specific"] = strand_specific exon_lines[row.transcript]["is_reference"] = is_reference exon_lines[row.transcript]["exclude_redundant"] = exclude_redundant if "exon_number" in exon_lines[row.transcript]["attributes"]: del exon_lines[row.transcript]["attributes"]["exon_number"] continue if row.is_exon is False or (row.is_cds is True and strip_cds is True): continue if label != '': row.transcript = "{0}_{1}".format(label, row.transcript) if row.transcript in found_ids: __raise_redundant(row.transcript, gff_handle.name, label) assert row.transcript is not None if row.transcript not in exon_lines: exon_lines[row.transcript] = dict() if label: exon_lines[row.transcript]["source"] = label else: exon_lines[row.transcript]["source"] = row.source exon_lines[row.transcript]["features"] = dict() exon_lines[row.transcript]["chrom"] = row.chrom exon_lines[row.transcript]["strand"] = row.strand exon_lines[row.transcript]["exon"] = [] exon_lines[row.transcript]["attributes"] = row.attributes.copy() exon_lines[row.transcript]["tid"] = row.transcript exon_lines[row.transcript]["parent"] = "{}.gene".format(row.transcript) exon_lines[row.transcript]["strand_specific"] = strand_specific exon_lines[row.transcript]["is_reference"] = is_reference exon_lines[row.transcript]["exclude_redundant"] = exclude_redundant else: if row.transcript in to_ignore: continue if "exon_number" in row.attributes: del row.attributes["exon_number"] if ("chrom" not in exon_lines[row.transcript] or exon_lines[row.transcript]["chrom"] != row.chrom or exon_lines[row.transcript]["strand"] != row.strand): __raise_invalid(row.transcript, gff_handle.name, label) exon_lines[row.transcript]["attributes"].update(row.attributes) if row.feature not in exon_lines[row.transcript]["features"]: exon_lines[row.transcript]["features"][row.feature] = [] exon_lines[row.transcript]["features"][row.feature].append((row.start, row.end, row.phase)) new_ids.add(row.transcript) gff_handle.close() logger.info("Starting to load %s", shelf_name) rows = load_into_storage(shelf_name, exon_lines, logger=logger, min_length=min_length, strip_cds=strip_cds, max_intron=max_intron) logger.info("Finished parsing %s", gff_handle.name) return new_ids, rows def load_from_bed12(shelf_name, gff_handle, label, found_ids, logger, min_length=0, max_intron=3*10**5, is_reference=False, exclude_redundant=False, strip_cds=False, strand_specific=False): """ Method to load the exon lines from GTF files. :param shelf_name: the name of the shelf DB to use. :param gff_handle: The handle for the GTF to be parsed. :param label: label to be attached to all transcripts. :type label: str :param found_ids: set of IDs already found in other files. :type found_ids: set :param logger: a logger to be used to pass messages :type logger: logging.Logger :param min_length: minimum length for a cDNA to be considered as valid :type min_length: int :param max_intron: maximum intron length for a cDNA to be considered as valid :type max_intron: int :param strip_cds: boolean flag. If true, all CDS lines will be ignored. :type strip_cds: bool :param strand_specific: whether the assembly is strand-specific or not. :type strand_specific: bool :param is_reference: boolean. If set to True, the transcript will always be retained. :type is_reference: bool :param exclude_redundant: boolean. If set to True, the transcript will be marked for potential redundancy removal. :type exclude_redundant: bool :return: """ exon_lines = dict() strip_cds = strip_cds and (not is_reference) strand_specific = strand_specific or is_reference # Reduce memory footprint [intern(_) for _ in ["chrom", "features", "strand", "attributes", "tid", "parent", "attributes"]] new_ids = set() to_ignore = set() for row in gff_handle: # Each row is a transcript transcript = Transcript(row) if label != '': transcript.id = "{0}_{1}".format(label, transcript.id) if transcript.id in found_ids: __raise_redundant(transcript.id, gff_handle.name, label) if transcript.id in exon_lines: logger.warning( "Multiple instance of %s found, skipping any subsequent entry", row.id) to_ignore.add(row.id) continue else: exon_lines[transcript.id] = dict() if label: exon_lines[transcript.id]["source"] = label else: exon_lines[transcript.id]["source"] = gff_handle.name # BED12 files have no source exon_lines[transcript.id]["features"] = dict() exon_lines[transcript.id]["chrom"] = transcript.chrom exon_lines[transcript.id]["strand"] = transcript.strand # Should deal with GFFRead style input and BAM exon_lines[transcript.id]["attributes"] = transcript.attributes exon_lines[transcript.id]["tid"] = transcript.id exon_lines[transcript.id]["parent"] = "{}.gene".format(transcript.id) exon_lines[transcript.id]["strand_specific"] = strand_specific exon_lines[transcript.id]["is_reference"] = is_reference exon_lines[transcript.id]["exclude_redundant"] = exclude_redundant exon_lines[transcript.id]["features"]["exon"] = [ (exon[0], exon[1]) for exon in transcript.exons ] if transcript.is_coding and not strip_cds: exon_lines[transcript.id]["features"]['CDS'] = [ (exon[0], exon[1]) for exon in transcript.combined_cds ] exon_lines[transcript.id]["features"]["UTR"] = [ (exon[0], exon[1]) for exon in transcript.five_utr + transcript.three_utr ] new_ids.add(transcript.id) gff_handle.close() rows = load_into_storage(shelf_name, exon_lines, logger=logger, min_length=min_length, strip_cds=strip_cds, max_intron=max_intron) logger.info("Finished parsing %s", gff_handle.name) return new_ids, rows def load_from_bam(shelf_name: str, gff_handle: BamParser, label: str, found_ids: set, logger: logging.Logger, min_length=0, max_intron=3*10**5, is_reference=False, exclude_redundant=False, strip_cds=False, strand_specific=False): """ Method to load the exon lines from BAM files. :param shelf_name: the name of the shelf DB to use. :param gff_handle: The handle for the BAM to be parsed. This handle is BamParser with a file attached to read from. :param label: label to be attached to all transcripts. :type label: str :param found_ids: set of IDs already found in other files. :type found_ids: set :param logger: a logger to report any messages :type logger: logging.Logger :param min_length: minimum length for a cDNA to be considered as valid :type min_length: int :param max_intron: maximum intron length for a cDNA to be considered as valid :type max_intron: int :param strip_cds: boolean flag. If true, all CDS lines will be ignored. :type strip_cds: bool :param strand_specific: whether the input data is strand-specific or not. :type strand_specific: bool :param is_reference: boolean. If set to True, the transcript will always be retained. :type is_reference: bool :param exclude_redundant: boolean. If set to True, the transcript will be marked for potential redundancy removal. :type exclude_redundant: bool :return: """ return load_from_bed12(shelf_name, gff_handle, label, found_ids, logger, min_length=min_length, max_intron=max_intron, is_reference=is_reference,exclude_redundant=exclude_redundant, strip_cds=strip_cds, strand_specific=strand_specific) loaders = {"gtf": load_from_gtf, "gff": load_from_gff, "gff3": load_from_gff, "bed12": load_from_bed12, "bed": load_from_bed12, "bam": load_from_bam} # Chrom, start, end, strand, Tid, write start, write length # 100 chars, unsigned Long, unsigned Long, one char, 100 chars, unsigned Long, unsigned Long _row_struct_str = ">1000sLLc1000sLLH" row_struct = struct.Struct(_row_struct_str) row_struct_size = struct.calcsize(_row_struct_str) class AnnotationParser(multiprocessing.Process): def __init__(self, submission_queue: multiprocessing.JoinableQueue, return_queue: multiprocessing.JoinableQueue, logging_queue: multiprocessing.JoinableQueue, identifier: int, min_length=0, max_intron=3*10**5, log_level="WARNING", seed=None, strip_cds=False): super().__init__() if seed is not None: # numpy.random.seed(seed % (2 ** 32 - 1)) random.seed(seed % (2 ** 32 - 1)) else: # numpy.random.seed(None) random.seed(None) self.submission_queue = submission_queue self.return_queue = return_queue self.min_length = min_length self.max_intron = max_intron self.__strip_cds = strip_cds self.logging_queue = logging_queue self.log_level = log_level self.__identifier = identifier self.name = "AnnotationParser-{0}".format(self.identifier) self.logger = None self.handler = None self.logger = logging.getLogger(self.name) create_queue_logger(self, prefix="prepare") # self.logger.warning("Started process %s", self.name) def __getstate__(self): state = self.__dict__.copy() for key in ("logger", "handler", "_log_handler"): if key in state: del state[key] return state def __setstate__(self, state): self.__dict__.update(state) create_queue_logger(self) def run(self): found_ids = set() self.logger.debug("Starting to listen to the queue") counter = 0 while True: results = self.submission_queue.get() try: label, handle, strand_specific, is_reference,\ exclude_redundant, file_strip_cds, shelf_name, shelf_index = results except ValueError as exc: raise ValueError("{}.\tValues: {}".format(exc, ", ".join([str(_) for _ in results]))) if handle == "EXIT": self.submission_queue.put(results) break counter += 1 self.logger.debug("Received %s (label: %s; SS: %s, shelf_name: %s)", handle, label, strand_specific, shelf_name) try: gff_handle = parser_factory(handle) loader = loaders.get(gff_handle.__annot_type__, None) if loader is None: raise ValueError("Invalid file type: {}".format(gff_handle.name)) if file_strip_cds is True: file_strip_cds = True else: file_strip_cds = self.__strip_cds new_ids, new_rows = loader(shelf_name, gff_handle, label, found_ids, self.logger, min_length=self.min_length, max_intron=self.max_intron, strip_cds=file_strip_cds and not is_reference, is_reference=is_reference, exclude_redundant=exclude_redundant, strand_specific=strand_specific) if len(new_ids) == 0: raise exceptions.InvalidAssembly( "No valid transcripts found in {0}{1}!".format( handle, " (label: {0})".format(label) if label != "" else "" )) # Now convert the rows into structs. self.logger.debug("Packing %d rows of %s", len(new_rows), label) [self.return_queue.put_nowait((*row, shelf_index)) for row in new_rows] self.logger.debug("Packed %d rows of %s", len(new_rows), label) except (exceptions.InvalidAssembly, exceptions.InvalidParsingFormat) as exc: self.logger.exception("Invalid file: %s. Skipping it", handle) self.logger.exception(exc) load_into_storage(shelf_name, [], self.min_length, self.logger, strip_cds=True, max_intron=3 * 10 ** 5) [self.return_queue.put_nowait((*row, shelf_index)) for row in []] continue except Exception as exc: self.logger.exception(exc) raise self.return_queue.put_nowait("FINISHED") @property def identifier(self): """ A numeric value that identifies the process uniquely. :return: """ return self.__identifier
lgpl-3.0
-6,226,314,293,436,030,000
42.886768
119
0.552892
false
oduwsdl/ipwb
ipwb/backends.py
1
2716
import dataclasses from typing import Optional from urllib.parse import urlparse import ipfshttpclient import requests from ipwb import util @dataclasses.dataclass(frozen=True) class BackendError(Exception): backend_name: str def __str__(self): return 'Cannot load index file from {self.backend_name}.'.format( self=self, ) def format_ipfs_cid(path: str) -> Optional[str]: """Format IPFS CID properly.""" if path.startswith('Qm'): return path elif path.startswith('ipfs://'): return path.replace('ipfs://', '') def fetch_ipfs_index(path: str) -> Optional[str]: """Fetch CDXJ file content from IPFS by hash.""" ipfs_hash = format_ipfs_cid(path) if ipfs_hash is None: return None try: with ipfshttpclient.connect(util.IPFSAPI_MUTLIADDRESS) as client: return client.cat(path).decode('utf-8') except ipfshttpclient.exceptions.StatusError as err: raise BackendError(backend_name='ipfs') from err def fetch_web_index(path: str) -> Optional[str]: """Fetch CDXJ file content from a URL.""" scheme = urlparse(path).scheme if not scheme: return None try: return requests.get(path).text except ( requests.ConnectionError, requests.HTTPError, ) as err: raise BackendError(backend_name='web') from err def fetch_local_index(path: str) -> str: """Fetch CDXJ index contents from a file on local disk.""" with open(path, 'r') as f: return f.read() def get_web_archive_index(path: str) -> str: """ Based on path, choose appropriate backend and fetch the file contents. """ # TODO right now, every backend is just a function which returns contents # of a CDXJ file as string. In the future, however, backends will be # probably represented as classes with much more sophisticated methods # of manipulating the archive index records. # TODO also, it will be possible to choose a backend and configure it; # whereas right now we choose a backend automatically based on the given # path itself. # Maybe it is an IPFS address? response = fetch_ipfs_index(path) if response is not None: return response # Or a traditional Web address? response = fetch_web_index(path) if response is not None: return response # Okay, this is probably a file on local disk response = fetch_local_index(path) if response is not None: return response raise ValueError(( f'Unknown format of index file location: {path}. Please provide ' f'a valid local path, HTTP or FTP URL, or an IPFS QmHash.' ))
mit
4,986,274,070,217,820,000
26.434343
78
0.656112
false
mzmttks/miteteyo
server/app.py
1
1209
from flask import jsonify, Flask, request from pymongo import MongoClient import os import json import pprint import traceback app = Flask(__name__) client = MongoClient(os.environ["MONGOLAB_URI"]) db = client["heroku_gw4w78g9"] col = db["locations"] print col @app.route('/location', methods=["POST"]) def addLocation(): try: data = request.json except Exception as e: ret = jsonify({"msg": "JSON parsing failed"}) ret.status_code = 400 return ret keys = ["latitude", "longitude", "userid", "utcTime"] for key in keys: if key not in data.keys(): ret = jsonify({"msg": "Mandatory key %s is not found" % key}) ret.status_code = 400 return ret col.insert_one(request.json) return "ok" @app.route('/userid') def getUserid(): userids = col.distinct("userid") return jsonify({"userids": userids}) @app.route('/userid/<userid>') def getLocations(userid): locs = [] for d in col.find({"userid": userid}): del d["_id"] locs.append(d) return jsonify({"locations": locs}) @app.route('/') def hello_world(): locs = [d for d in col.find({})] return "<pre>" + pprint.pformat(locs) + "</pre>" if __name__ == '__main__': app.run(debug=True)
mit
6,914,129,593,433,982,000
22.25
67
0.640199
false
zmlabe/IceVarFigs
Scripts/SeaIce/plot_AMSR2_SIC_region.py
1
6969
""" Plots JAXA AMSR2 3.125 km (UHH-Processed) Sea Ice Concentration Data Source : http://osisaf.met.no/p/ice/ Author : Zachary Labe Date : 27 February 2017 """ from netCDF4 import Dataset import matplotlib.pyplot as plt from mpl_toolkits.basemap import Basemap import urllib.request as UL import numpy as np import datetime import calendar as cal import gzip import nclcmaps as ncm import math import cmocean ### Directory and time directory = './Data/' directoryfigure = './Figures/' now = datetime.datetime.now() currentmn = str(now.month) if now.day == 1: currentdy = str(cal.monthrange(now.year,now.month-1)[1]) currentmn = str(now.month-1) else: currentdy = str(now.day-1) if int(currentdy) < 10: currentdy = '0' + currentdy currentyr = str(now.year) if int(currentmn) < 10: currentmn = '0' + currentmn currenttime = currentmn + '_' + str(currentdy) + '_' + currentyr titletime = currentmn + '/' + str(currentdy) + '/' + currentyr print('\n' 'Current Time = %s' '\n' % titletime) for i in range(24,25): currentdy = str(i+1) currentmn = '03' if int(currentdy) < 10: currentdy = '0' + currentdy currentyr = '2018' currenttime = currentmn + '_' + str(currentdy) + '_' + currentyr titletime = currentmn + '/' + str(currentdy) + '/' + currentyr ### Pick data set icedataset = 'AMSR2' if icedataset == 'AMSR2': url = 'ftp://ftp-projects.cen.uni-hamburg.de/seaice/AMSR2/3.125km/' filename = 'Arc_%s%s%s_res3.125_pyres.nc.gz' % (currentyr,currentmn,currentdy) filenameout = 'Arc_AMSR2_SIC.nc' UL.urlretrieve(url + filename, directory + filename) inF = gzip.open(directory + filename, 'rb') outF = open(directory + filenameout, 'wb') outF.write( inF.read() ) inF.close() outF.close() data = Dataset(directory + filenameout) ice = data.variables['sea_ice_concentration'][:] lat = data.variables['latitude'][:] lon = data.variables['longitude'][:] data.close() ice = np.asarray(np.squeeze(ice/100.)) print('Completed: Data read!') ice[np.where(ice <= 0.15)] = np.nan ice[np.where((ice >= 0.999) & (ice <= 1))] = 0.999 ice[np.where(ice > 1)] = np.nan ice = ice*100. print('Completed: Ice masked!') plt.rc('text',usetex=True) plt.rc('font',**{'family':'sans-serif','sans-serif':['Avant Garde']}) plt.rc('savefig',facecolor='black') plt.rc('axes',edgecolor='white') plt.rc('xtick',color='white') plt.rc('ytick',color='white') plt.rc('axes',labelcolor='white') plt.rc('axes',facecolor='black') def setcolor(x, color): for m in x: for t in x[m][1]: t.set_color(color) fig = plt.figure() ax = fig.add_subplot(111) ### Enter lat/lon region = 'bering' if region == 'kara': # Kara Sea latmin = 67 latmax = 87 lonmin = 20 lonmax = 90 elif region == 'beaufort': # Beaufort Sea latmin = 64 latmax = 87 lonmin = 180 lonmax = 240 elif region == 'bering': # Bering/Chukchi Sea/Okhotsk latmin = 56 latmax = 75 lonmin = 166 lonmax = 210 elif region == 'greenland': # Greenland latmin = 55 latmax = 89.5 lonmin = 280 lonmax = 395 elif region == 'pacific': # Central Arctic latmin = 69 latmax = 89.99 lonmin = 160 lonmax = 250 elif region == 'svalbard': latmin = 73 latmax = 86 lonmin = 340 lonmax = 420 elif region == 'GreenlandSea': latmin = 74 latmax = 88 lonmin = 330 lonmax = 410 else: ValueError('Wrong region listed!') def polar_stere(lon_w, lon_e, lat_s, lat_n, **kwargs): '''Returns a Basemap object (NPS/SPS) focused in a region. lon_w, lon_e, lat_s, lat_n -- Graphic limits in geographical coordinates. W and S directions are negative. **kwargs -- Aditional arguments for Basemap object. ''' lon_0 = lon_w + (lon_e - lon_w) / 2. ref = lat_s if abs(lat_s) > abs(lat_n) else lat_n lat_0 = math.copysign(90., ref) proj = 'npstere' if lat_0 > 0 else 'spstere' prj = Basemap(projection=proj, lon_0=lon_0, lat_0=lat_0, boundinglat=0, resolution='l') #prj = pyproj.Proj(proj='stere', lon_0=lon_0, lat_0=lat_0) lons = [lon_w, lon_e, lon_w, lon_e, lon_0, lon_0] lats = [lat_s, lat_s, lat_n, lat_n, lat_s, lat_n] x, y = prj(lons, lats) ll_lon, ll_lat = prj(min(x), min(y), inverse=True) ur_lon, ur_lat = prj(max(x), max(y), inverse=True) return Basemap(projection='stere', lat_0=lat_0, lon_0=lon_0, llcrnrlon=ll_lon, llcrnrlat=ll_lat, urcrnrlon=ur_lon, urcrnrlat=ur_lat, round=True, resolution='l') m = polar_stere(lonmin,lonmax,latmin,latmax) m.drawcoastlines(color = 'r',linewidth=1.4) m.drawmapboundary(color='k') m.drawlsmask(land_color='k',ocean_color='k') cs = m.contourf(lon,lat,ice[:,:],np.arange(20,100.01,1),extend='min',latlon=True) cmap = ncm.cmap('MPL_YlGnBu') cmap = cmocean.cm.ice cs.set_cmap(cmap) m.fillcontinents(color='k') cbar = m.colorbar(cs,location='right',pad = 0.2) cbar.outline.set_edgecolor('k') barlim = np.arange(20,101,10) cbar.set_ticks(barlim) cbar.set_ticklabels(list(map(str,barlim))) cbar.set_label(r'\textbf{Concentration (\%)}',fontsize=13, alpha=0.6) cbar.ax.tick_params(axis='y', size=.01) fig.suptitle(r'\textbf{ARCTIC SEA ICE -- %s}' % titletime, fontsize=22,color='white',alpha=0.6) plt.annotate(r'\textbf{DATA:} AMSR2 3.125 km (JAXA/Uni Hamburg-Processing)',xy=(250,100), xycoords='figure pixels',color='white',fontsize=6, alpha=0.7,rotation=0) plt.annotate(r'\textbf{SOURCE:} http://icdc.cen.uni-hamburg.de/daten/cryosphere.html',xy=(250,80), xycoords='figure pixels',color='white',fontsize=6, alpha=0.7,rotation=0) plt.annotate(r'\textbf{GRAPHIC:} Zachary Labe (@ZLabe)',xy=(250,60), xycoords='figure pixels',color='white',fontsize=6, alpha=0.7,rotation=0) fig.subplots_adjust(top=0.89) print('Completed: Figure plotted!') plt.savefig(directoryfigure + 'seaiceconc_%s_%s.png' % (region,currenttime), dpi=300) print('Completed: Script done!')
mit
9,191,539,575,763,977,000
30.396396
102
0.554312
false
e-lin/LeetCode
24-swap-nodes-in-pairs/24-swap-nodes-in-pairs.py
1
2435
# Definition for singly-linked list. class ListNode(object): def __init__(self, x): self.val = x self.next = None class Solution(object): def swapPairs(self, head): """ :type head: ListNode :rtype: ListNode """ odds = self.connOdds(head) evens = self.connEvens(head) dummyHead = ListNode(0) dummyPtr = dummyHead p1 = odds p2 = evens while p1 is not None and p2 is not None: dummyPtr.next = ListNode(p2.val) dummyPtr = dummyPtr.next p2 = p2.next dummyPtr.next = ListNode(p1.val) dummyPtr = dummyPtr.next p1 = p1.next while p1 is not None: dummyPtr.next = ListNode(p1.val) dummyPtr = dummyPtr.next p1 = p1.next while p2 is not None: dummyPtr.next = ListNode(p2.val) dummyPtr = dummyPtr.next p2 = p2.next # printNode(dummyHead.next) return dummyHead.next def connOdds(self, head): if head is None: return None ptr = head dummyOdds = ListNode(0) dummyPtr = dummyOdds while ptr is not None: dummyPtr.next = ListNode(ptr.val) dummyPtr = dummyPtr.next if ptr.next is not None and ptr.next.next is not None: ptr = ptr.next.next else: ptr = None # printNode(dummyOdds.next) return dummyOdds.next def connEvens(self, head): if head is None or head.next is None: return None ptr = head.next dummyEvens = ListNode(0) dummyPtr = dummyEvens while ptr is not None: dummyPtr.next = ListNode(ptr.val) dummyPtr = dummyPtr.next if ptr.next is not None and ptr.next.next is not None: ptr = ptr.next.next else: ptr = None # printNode(dummyEvens.next) return dummyEvens.next def printNode(node): ptr = node while ptr is not None: print ptr.val ptr = ptr.next def main(): node = ListNode(1) # node.next = ListNode(2) # node.next.next = ListNode(3) # node.next.next.next = ListNode(4) solution = Solution() result = solution.swapPairs(node) printNode(result) if __name__ == '__main__': main()
apache-2.0
2,848,055,936,710,345,700
23.857143
66
0.533881
false
InsightSoftwareConsortium/ITKExamples
src/Filtering/AntiAlias/SmoothBinaryImageBeforeSurfaceExtraction/Code.py
1
1638
#!/usr/bin/env python # Copyright NumFOCUS # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0.txt # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import sys import itk if len(sys.argv) != 6: print( "Usage: " + sys.argv[0] + " <inputImage> <outputImage> " "<maximumRMSError> <numberOfIterations> <numberOfLayers>" ) sys.exit(1) inputImage = sys.argv[1] outputImage = sys.argv[2] maximumRMSError = float(sys.argv[3]) numberOfIterations = int(sys.argv[4]) numberOfLayers = int(sys.argv[5]) PixelType = itk.F Dimension = 2 ImageType = itk.Image[PixelType, Dimension] ReaderType = itk.ImageFileReader[ImageType] reader = ReaderType.New() reader.SetFileName(inputImage) AntiAliasFilterType = itk.AntiAliasBinaryImageFilter[ImageType, ImageType] antialiasfilter = AntiAliasFilterType.New() antialiasfilter.SetInput(reader.GetOutput()) antialiasfilter.SetMaximumRMSError(maximumRMSError) antialiasfilter.SetNumberOfIterations(numberOfIterations) antialiasfilter.SetNumberOfLayers(numberOfLayers) WriterType = itk.ImageFileWriter[ImageType] writer = WriterType.New() writer.SetFileName(outputImage) writer.SetInput(antialiasfilter.GetOutput()) writer.Update()
apache-2.0
-1,858,956,442,300,015,900
29.90566
74
0.76862
false
tensorflow/federated
tensorflow_federated/python/tensorflow_libs/graph_merge_test.py
1
19532
# Copyright 2019, The TensorFlow Federated Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import numpy as np import tensorflow as tf from tensorflow_federated.python.tensorflow_libs import graph_merge from tensorflow_federated.python.tensorflow_libs import graph_spec def _make_add_one_graph(): with tf.Graph().as_default() as graph: input_val = tf.compat.v1.placeholder(tf.float32, name='input') const = tf.constant(1.0) out = tf.add(input_val, const) return graph, input_val.name, out.name def _make_add_variable_number_graph(var_name=None): with tf.Graph().as_default() as graph: input_val = tf.compat.v1.placeholder(tf.float32, name='input') var = tf.Variable(initial_value=0.0, name=var_name, import_scope='') assign_op = var.assign_add(tf.constant(1.0)) out = tf.add(input_val, assign_op) return graph, input_val.name, out.name def _make_dataset_constructing_graph(): with tf.Graph().as_default() as graph: d1 = tf.data.Dataset.range(5) v1 = tf.data.experimental.to_variant(d1) return graph, '', v1.name def _make_manual_reduce_graph(dataset_construction_graph, return_element): with tf.Graph().as_default() as graph: v1 = tf.import_graph_def( dataset_construction_graph.as_graph_def(), return_elements=[return_element])[0] structure = tf.TensorSpec([], tf.int64) ds1 = tf.data.experimental.from_variant(v1, structure=structure) out = ds1.reduce(tf.constant(0, dtype=tf.int64), lambda x, y: x + y) return graph, '', out.name class ConcatenateInputsAndOutputsTest(tf.test.TestCase): def test_raises_on_none(self): with self.assertRaises(TypeError): graph_merge.concatenate_inputs_and_outputs(None) def test_raises_on_non_iterable(self): with self.assertRaises(TypeError): graph_merge.concatenate_inputs_and_outputs(1) def test_concatenate_inputs_and_outputs_two_add_one_graphs(self): graph1, input_name_1, output_name_1 = _make_add_one_graph() graph2, input_name_2, output_name_2 = _make_add_one_graph() with graph1.as_default(): init_op_name_1 = tf.compat.v1.global_variables_initializer().name with graph2.as_default(): init_op_name_2 = tf.compat.v1.global_variables_initializer().name graph_spec_1 = graph_spec.GraphSpec(graph1.as_graph_def(), init_op_name_1, [input_name_1], [output_name_1]) graph_spec_2 = graph_spec.GraphSpec(graph2.as_graph_def(), init_op_name_2, [input_name_2], [output_name_2]) arg_list = [graph_spec_1, graph_spec_2] merged_graph, init_op_name, in_name_maps, out_name_maps = graph_merge.concatenate_inputs_and_outputs( arg_list) with merged_graph.as_default(): with tf.compat.v1.Session() as sess: sess.run(init_op_name) outputs = sess.run( [out_name_maps[0][output_name_1], out_name_maps[1][output_name_2]], feed_dict={ in_name_maps[0][input_name_1]: 1.0, in_name_maps[1][input_name_2]: 2.0 }) self.assertAllClose(outputs, np.array([2., 3.])) def test_concatenate_inputs_and_outputs_three_add_one_graphs(self): graph1, input_name_1, output_name_1 = _make_add_one_graph() graph2, input_name_2, output_name_2 = _make_add_one_graph() graph3, input_name_3, output_name_3 = _make_add_one_graph() with graph1.as_default(): init_op_name_1 = tf.compat.v1.global_variables_initializer().name with graph2.as_default(): init_op_name_2 = tf.compat.v1.global_variables_initializer().name with graph3.as_default(): init_op_name_3 = tf.compat.v1.global_variables_initializer().name graph_spec_1 = graph_spec.GraphSpec(graph1.as_graph_def(), init_op_name_1, [input_name_1], [output_name_1]) graph_spec_2 = graph_spec.GraphSpec(graph2.as_graph_def(), init_op_name_2, [input_name_2], [output_name_2]) graph_spec_3 = graph_spec.GraphSpec(graph3.as_graph_def(), init_op_name_3, [input_name_3], [output_name_3]) arg_list = [graph_spec_1, graph_spec_2, graph_spec_3] merged_graph, init_op_name, in_name_maps, out_name_maps = graph_merge.concatenate_inputs_and_outputs( arg_list) with merged_graph.as_default(): with tf.compat.v1.Session() as sess: sess.run(init_op_name) outputs = sess.run( [ out_name_maps[0][output_name_1], out_name_maps[1][output_name_2], out_name_maps[2][output_name_3] ], feed_dict={ in_name_maps[0][input_name_1]: 1.0, in_name_maps[1][input_name_2]: 2.0, in_name_maps[2][input_name_3]: 3.0 }) self.assertAllClose(outputs, np.array([2., 3., 4.])) def test_concatenate_inputs_and_outputs_no_arg_graphs(self): graph1 = tf.Graph() with graph1.as_default(): out1 = tf.constant(1.0) init_op_name_1 = tf.compat.v1.global_variables_initializer().name graph2 = tf.Graph() with graph2.as_default(): out2 = tf.constant(2.0) init_op_name_2 = tf.compat.v1.global_variables_initializer().name graph_spec_1 = graph_spec.GraphSpec(graph1.as_graph_def(), init_op_name_1, [], [out1.name]) graph_spec_2 = graph_spec.GraphSpec(graph2.as_graph_def(), init_op_name_2, [], [out2.name]) arg_list = [graph_spec_1, graph_spec_2] merged_graph, init_op_name, _, out_name_maps = graph_merge.concatenate_inputs_and_outputs( arg_list) with merged_graph.as_default(): with tf.compat.v1.Session() as sess: sess.run(init_op_name) outputs = sess.run( [out_name_maps[0][out1.name], out_name_maps[1][out2.name]]) self.assertAllClose(outputs, np.array([1., 2.])) def test_concatenate_inputs_and_outputs_no_init_op_graphs(self): graph1, input_name_1, output_name_1 = _make_add_one_graph() graph2, input_name_2, output_name_2 = _make_add_one_graph() graph_spec_1 = graph_spec.GraphSpec(graph1.as_graph_def(), None, [input_name_1], [output_name_1]) graph_spec_2 = graph_spec.GraphSpec(graph2.as_graph_def(), None, [input_name_2], [output_name_2]) arg_list = [graph_spec_1, graph_spec_2] merged_graph, init_op_name, in_name_maps, out_name_maps = graph_merge.concatenate_inputs_and_outputs( arg_list) with merged_graph.as_default(): with tf.compat.v1.Session() as sess: sess.run(init_op_name) outputs = sess.run( [out_name_maps[0][output_name_1], out_name_maps[1][output_name_2]], feed_dict={ in_name_maps[0][input_name_1]: 1.0, in_name_maps[1][input_name_2]: 2.0 }) self.assertAllClose(outputs, np.array([2., 3.])) def test_concatenate_inputs_and_outputs_two_add_variable_number_graphs(self): graph1, input_name_1, output_name_1 = _make_add_variable_number_graph() graph2, input_name_2, output_name_2 = _make_add_variable_number_graph() with graph1.as_default(): init_op_name_1 = tf.compat.v1.global_variables_initializer().name with graph2.as_default(): init_op_name_2 = tf.compat.v1.global_variables_initializer().name graph_spec_1 = graph_spec.GraphSpec(graph1.as_graph_def(), init_op_name_1, [input_name_1], [output_name_1]) graph_spec_2 = graph_spec.GraphSpec(graph2.as_graph_def(), init_op_name_2, [input_name_2], [output_name_2]) arg_list = [graph_spec_1, graph_spec_2] merged_graph, init_op_name, in_name_maps, out_name_maps = graph_merge.concatenate_inputs_and_outputs( arg_list) with merged_graph.as_default(): with tf.compat.v1.Session() as sess: sess.run(init_op_name) outputs_1 = sess.run( [out_name_maps[0][output_name_1], out_name_maps[1][output_name_2]], feed_dict={ in_name_maps[0][input_name_1]: 1.0, in_name_maps[1][input_name_2]: 2.0 }) outputs_2 = sess.run( [out_name_maps[0][output_name_1], out_name_maps[1][output_name_2]], feed_dict={ in_name_maps[0][input_name_1]: 1.0, in_name_maps[1][input_name_2]: 2.0 }) outputs_3 = sess.run( [out_name_maps[0][output_name_1], out_name_maps[1][output_name_2]], feed_dict={ in_name_maps[0][input_name_1]: 1.0, in_name_maps[1][input_name_2]: 2.0 }) self.assertAllClose(outputs_1, [2., 3.]) self.assertAllClose(outputs_2, [3., 4.]) self.assertAllClose(outputs_3, [4., 5.]) def test_concatenate_inputs_and_outputs_with_dataset_wires_correctly(self): dataset_graph, _, dataset_out_name = _make_dataset_constructing_graph() graph_1, _, out_name_1 = _make_manual_reduce_graph(dataset_graph, dataset_out_name) graph_2, _, out_name_2 = _make_manual_reduce_graph(dataset_graph, dataset_out_name) with graph_1.as_default(): init_op_name_1 = tf.compat.v1.global_variables_initializer().name with graph_2.as_default(): init_op_name_2 = tf.compat.v1.global_variables_initializer().name graph_spec_1 = graph_spec.GraphSpec(graph_1.as_graph_def(), init_op_name_1, [], [out_name_1]) graph_spec_2 = graph_spec.GraphSpec(graph_2.as_graph_def(), init_op_name_2, [], [out_name_2]) arg_list = [graph_spec_1, graph_spec_2] merged_graph, init_op_name, _, out_name_maps = graph_merge.concatenate_inputs_and_outputs( arg_list) with merged_graph.as_default(): with tf.compat.v1.Session() as sess: sess.run(init_op_name) tens = sess.run( [out_name_maps[0][out_name_1], out_name_maps[1][out_name_2]]) self.assertEqual(tens, [10, 10]) class ComposeGraphSpecTest(tf.test.TestCase): def test_raises_on_none(self): with self.assertRaises(TypeError): graph_merge.compose_graph_specs(None) def test_raises_on_graph_spec_set(self): graph1, input_name_1, output_name_1 = _make_add_one_graph() graph_spec_1 = graph_spec.GraphSpec(graph1.as_graph_def(), '', [input_name_1], [output_name_1]) with self.assertRaises(TypeError): graph_merge.compose_graph_specs(set(graph_spec_1)) def test_raises_on_list_of_ints(self): with self.assertRaises(TypeError): graph_merge.compose_graph_specs([0, 1]) def test_compose_no_input_graphs_raises(self): graph1 = tf.Graph() with graph1.as_default(): out1 = tf.constant(1.0) init_op_name_1 = tf.compat.v1.global_variables_initializer().name graph2 = tf.Graph() with graph2.as_default(): out2 = tf.constant(2.0) init_op_name_2 = tf.compat.v1.global_variables_initializer().name graph_spec_1 = graph_spec.GraphSpec(graph1.as_graph_def(), init_op_name_1, [], [out1.name]) graph_spec_2 = graph_spec.GraphSpec(graph2.as_graph_def(), init_op_name_2, [], [out2.name]) arg_list = [graph_spec_1, graph_spec_2] with self.assertRaisesRegex(ValueError, 'mismatch'): graph_merge.compose_graph_specs(arg_list) def test_compose_two_add_one_graphs_adds_two(self): graph1, input_name_1, output_name_1 = _make_add_one_graph() graph2, input_name_2, output_name_2 = _make_add_one_graph() with graph1.as_default(): init_op_name_1 = tf.compat.v1.global_variables_initializer().name with graph2.as_default(): init_op_name_2 = tf.compat.v1.global_variables_initializer().name graph_spec_1 = graph_spec.GraphSpec(graph1.as_graph_def(), init_op_name_1, [input_name_1], [output_name_1]) graph_spec_2 = graph_spec.GraphSpec(graph2.as_graph_def(), init_op_name_2, [input_name_2], [output_name_2]) arg_list = [graph_spec_1, graph_spec_2] composed_graph, init_op_name, in_name_map, out_name_map = graph_merge.compose_graph_specs( arg_list) with composed_graph.as_default(): with tf.compat.v1.Session() as sess: sess.run(init_op_name) outputs = sess.run( out_name_map[output_name_2], feed_dict={ in_name_map[input_name_1]: 0.0, }) self.assertAllClose(outputs, np.array(2.)) def test_composition_happens_in_mathematical_composition_order(self): graph1, input_name_1, output_name_1 = _make_add_one_graph() def _make_cast_to_int_graph(): with tf.Graph().as_default() as graph: input_val = tf.compat.v1.placeholder(tf.float32, name='input') out = tf.cast(input_val, tf.int32) return graph, input_val.name, out.name graph2, input_name_2, output_name_2 = _make_cast_to_int_graph() with graph1.as_default(): init_op_name_1 = tf.compat.v1.global_variables_initializer().name with graph2.as_default(): init_op_name_2 = tf.compat.v1.global_variables_initializer().name graph_spec_1 = graph_spec.GraphSpec(graph1.as_graph_def(), init_op_name_1, [input_name_1], [output_name_1]) graph_spec_2 = graph_spec.GraphSpec(graph2.as_graph_def(), init_op_name_2, [input_name_2], [output_name_2]) arg_list = [graph_spec_2, graph_spec_1] composed_graph, _, in_name_map, out_name_map = graph_merge.compose_graph_specs( arg_list) with composed_graph.as_default(): with tf.compat.v1.Session() as sess: outputs = sess.run( out_name_map[output_name_2], feed_dict={ in_name_map[input_name_1]: 0.0, }) self.assertEqual(outputs, 1) with self.assertRaises(ValueError): graph_merge.compose_graph_specs(list(reversed(arg_list))) def test_compose_three_add_one_graphs_adds_three(self): graph1, input_name_1, output_name_1 = _make_add_one_graph() graph2, input_name_2, output_name_2 = _make_add_one_graph() graph3, input_name_3, output_name_3 = _make_add_one_graph() with graph1.as_default(): init_op_name_1 = tf.compat.v1.global_variables_initializer().name with graph2.as_default(): init_op_name_2 = tf.compat.v1.global_variables_initializer().name with graph3.as_default(): init_op_name_3 = tf.compat.v1.global_variables_initializer().name graph_spec_1 = graph_spec.GraphSpec(graph1.as_graph_def(), init_op_name_1, [input_name_1], [output_name_1]) graph_spec_2 = graph_spec.GraphSpec(graph2.as_graph_def(), init_op_name_2, [input_name_2], [output_name_2]) graph_spec_3 = graph_spec.GraphSpec(graph3.as_graph_def(), init_op_name_3, [input_name_3], [output_name_3]) arg_list = [graph_spec_1, graph_spec_2, graph_spec_3] composed_graph, init_op_name, in_name_map, out_name_map = graph_merge.compose_graph_specs( arg_list) with composed_graph.as_default(): with tf.compat.v1.Session() as sess: sess.run(init_op_name) outputs = sess.run( out_name_map[output_name_3], feed_dict={ in_name_map[input_name_1]: 0.0, }) self.assertAllClose(outputs, np.array(3.)) def test_compose_two_add_variable_number_graphs_executes_correctly(self): graph1, input_name_1, output_name_1 = _make_add_variable_number_graph() graph2, input_name_2, output_name_2 = _make_add_variable_number_graph() with graph1.as_default(): init_op_name_1 = tf.compat.v1.global_variables_initializer().name with graph2.as_default(): init_op_name_2 = tf.compat.v1.global_variables_initializer().name graph_spec_1 = graph_spec.GraphSpec(graph1.as_graph_def(), init_op_name_1, [input_name_1], [output_name_1]) graph_spec_2 = graph_spec.GraphSpec(graph2.as_graph_def(), init_op_name_2, [input_name_2], [output_name_2]) arg_list = [graph_spec_1, graph_spec_2] composed_graph, init_op_name, in_name_map, out_name_map = graph_merge.compose_graph_specs( arg_list) with composed_graph.as_default(): with tf.compat.v1.Session() as sess: sess.run(init_op_name) output_one = sess.run( out_name_map[output_name_2], feed_dict={ in_name_map[input_name_1]: 0.0, }) output_two = sess.run( out_name_map[output_name_2], feed_dict={ in_name_map[input_name_1]: 0.0, }) output_three = sess.run( out_name_map[output_name_2], feed_dict={ in_name_map[input_name_1]: 0.0, }) self.assertAllClose(output_one, np.array(2.)) self.assertAllClose(output_two, np.array(4.)) self.assertAllClose(output_three, np.array(6.)) def test_compose_with_dataset_wires_correctly(self): with tf.Graph().as_default() as dataset_graph: d1 = tf.data.Dataset.range(5) v1 = tf.data.experimental.to_variant(d1) ds_out_name = v1.name variant_type = v1.dtype with tf.Graph().as_default() as reduce_graph: variant = tf.compat.v1.placeholder(variant_type) structure = tf.TensorSpec([], tf.int64) ds1 = tf.data.experimental.from_variant(variant, structure=structure) out = ds1.reduce(tf.constant(0, dtype=tf.int64), lambda x, y: x + y) ds_in_name = variant.name reduce_out_name = out.name with dataset_graph.as_default(): init_op_name_1 = tf.compat.v1.global_variables_initializer().name with reduce_graph.as_default(): init_op_name_2 = tf.compat.v1.global_variables_initializer().name dataset_graph_spec = graph_spec.GraphSpec(dataset_graph.as_graph_def(), init_op_name_1, [], [ds_out_name]) reduce_graph_spec = graph_spec.GraphSpec(reduce_graph.as_graph_def(), init_op_name_2, [ds_in_name], [reduce_out_name]) arg_list = [reduce_graph_spec, dataset_graph_spec] composed_graph, _, _, out_name_map = graph_merge.compose_graph_specs( arg_list) with composed_graph.as_default(): with tf.compat.v1.Session() as sess: ten = sess.run(out_name_map[reduce_out_name]) self.assertEqual(ten, 10) if __name__ == '__main__': tf.test.main()
apache-2.0
7,805,936,602,714,991,000
42.501114
105
0.60127
false
tomchop/volatility-autoruns
autoruns.py
1
33523
import sys import re import xml.etree.ElementTree as ET import volatility.debug as debug import volatility.plugins.registry.registryapi as registryapi import volatility.plugins.filescan as filescan import volatility.plugins.dumpfiles as dumpfiles import volatility.win32 as win32 import volatility.utils as utils import volatility.plugins.common as common from volatility.renderers import TreeGrid # HKLM\Software\ SOFTWARE_RUN_KEYS = [ "Microsoft\\Windows\\CurrentVersion\\Run", "Microsoft\\Windows\\CurrentVersion\\RunOnce", "Microsoft\\Windows\\CurrentVersion\\RunServices", "Microsoft\\Windows\\CurrentVersion\\Policies\\Explorer\\Run", "Wow6432Node\\Microsoft\\Windows\\CurrentVersion\\Run", "Wow6432Node\\Microsoft\\Windows\\CurrentVersion\\RunOnce", "Wow6432Node\\Microsoft\\Windows\\CurrentVersion\\Policies\\Explorer\\Run", "Microsoft\\Windows NT\\CurrentVersion\\Terminal Server\\Install\\Software\\Microsoft\\Windows\\CurrentVersion\\Run", "Microsoft\\Windows NT\\CurrentVersion\\Terminal Server\\Install\\Software\\Microsoft\\Windows\\CurrentVersion\\RunOnce", ] # HKCU\ NTUSER_RUN_KEYS = [ "Software\\Microsoft\\Windows\\CurrentVersion\\Run", "Software\\Microsoft\\Windows\\CurrentVersion\\RunOnce", "Software\\Microsoft\\Windows\\CurrentVersion\\RunServices", "Software\\Microsoft\\Windows\\CurrentVersion\\RunServicesOnce", "Software\\Microsoft\\Windows\\CurrentVersion\\Policies\\Explorer\\Run", "Software\\Microsoft\\Windows NT\\CurrentVersion\\Terminal Server\\Install\\Software\\Microsoft\\Windows\\CurrentVersion\\Run", "Software\\Microsoft\\Windows NT\\CurrentVersion\\Terminal Server\\Install\\Software\\Microsoft\\Windows\\CurrentVersion\\RunOnce", "Software\\Microsoft\\Windows NT\\CurrentVersion\\Run", "Software\\Wow6432Node\\Microsoft\\Windows\\CurrentVersion\\Policies\\Explorer\\Run", "Software\\Wow6432Node\\Microsoft\\Windows\\CurrentVersion\\Run", ] # Active Setup only executes commands from the SOFTWARE hive # See: https://helgeklein.com/blog/2010/04/active-setup-explained/ # http://blogs.msdn.com/b/aruns_blog/archive/2011/06/20/active-setup-registry-key-what-it-is-and-how-to-create-in-the-package-using-admin-studio-install-shield.aspx # http://blog.spiderlabs.com/2014/07/backoff-technical-analysis.html ACTIVE_SETUP_KEY = "Microsoft\\Active Setup\\Installed Components" # Abusing MS Fix-It patches to ensure persistence # References: # https://www.blackhat.com/docs/asia-14/materials/Erickson/WP-Asia-14-Erickson-Persist-It-Using-And-Abusing-Microsofts-Fix-It-Patches.pdf # http://blog.cert.societegenerale.com/2015/04/analyzing-gootkits-persistence-mechanism.html APPCOMPAT_SDB_KEY = "Microsoft\\Windows NT\\CurrentVersion\\AppCompatFlags\\InstalledSDB" # Winlogon Notification packages are supported in pre-Vista versions of Windows only # See: http://technet.microsoft.com/en-us/library/cc721961(v=ws.10).aspx WINLOGON_NOTIFICATION_EVENTS = [ "Lock", "Logoff", "Logon", "Shutdown", "StartScreenSaver", "StartShell", "Startup", "StopScreenSaver", "Unlock", ] WINLOGON_REGISTRATION_KNOWN_DLLS = [ 'crypt32.dll', 'cryptnet.dll', 'cscdll.dll', 'dimsntfy.dll', 'sclgntfy.dll', 'wlnotify.dll', 'wzcdlg.dll', ] WINLOGON_COMMON_VALUES = { 'Userinit': 'userinit.exe', 'VmApplet': 'rundll32 shell32,Control_RunDLL "sysdm.cpl"', 'Shell': 'Explorer.exe', 'TaskMan': "Taskmgr.exe", 'System': 'lsass.exe', } # Service key -> value maps # Original list from regripper plugins, extra / repeated values from # http://technet.microsoft.com/en-us/library/cc759275(v=ws.10).aspx # http://www.atmarkit.co.jp/ait/articles/1705/01/news009_2.html (in Japanese) # https://github.com/processhacker/processhacker/blob/master/phlib/svcsup.c # https://docs.microsoft.com/en-us/windows/desktop/api/winsvc/nf-winsvc-createservicea # https://www.codemachine.com/downloads/win10/winnt.h SERVICE_TYPES = { 0x001: "Kernel driver", 0x002: "File system driver", 0x004: "Arguments for adapter", 0x008: "File system driver", 0x010: "Win32_Own_Process", 0x020: "Win32_Share_Process", 0x050: "User_Own_Process TEMPLATE", 0x060: "User_Share_Process TEMPLATE", 0x0D0: "User_Own_Process INSTANCE", 0x0E0: "User_Share_Process INSTANCE", 0x100: "Interactive", 0x110: "Interactive", 0x120: "Share_process Interactive", -1: "Unknown", } SERVICE_STARTUP = { 0x00: "Boot Start", 0x01: "System Start", 0x02: "Auto Start", 0x03: "Manual", 0x04: "Disabled", -1: "Unknown", } def sanitize_path(path): # Clears the path of most equivalent forms if path: path = path.lower() path = path.replace("%systemroot%\\", '') path = path.replace("\\systemroot\\", '') path = path.replace("%windir%", '') path = path.replace("\\??\\", '') path = path.replace('\x00', '') path = path.replace('"', '').replace("'", '') return path else: return '' def get_indented_dict(d, depth=0): output = "" for key in d: output += "{}{}: ".format(" " * depth * 2, key) if isinstance(d[key], dict): output += "\n" + get_indented_dict(d[key], depth + 1) elif isinstance(d[key], list): output += '\n' for e in d[key]: output += get_indented_dict(e, depth + 1) else: output += "{}\n".format(d[key]) return output class Autoruns(common.AbstractWindowsCommand): """Searches the registry and memory space for applications running at system startup and maps them to running processes""" def __init__(self, config, *args, **kwargs): common.AbstractWindowsCommand.__init__(self, config, *args, **kwargs) config.add_option("ASEP-TYPE", short_option='t', default=None, help='Only collect the ASEP types specified. Select from: autoruns, services, appinit, winlogon, tasks, activesetup, sdb (comma-separated)', action='store', type='str') config.remove_option("VERBOSE") config.add_option("VERBOSE", short_option='v', default=False, help='Show entries that are normally filtered out (Ex. Services from the System32 folder)', action='store_true') self.process_dict = {} self.autoruns = [] self.services = [] self.appinit_dlls = [] self.winlogon = [] self.winlogon_registrations = [] self.tasks = [] self.activesetup = [] self.sdb = [] def get_dll_list(self): addr_space = utils.load_as(self._config) task_objects = win32.tasks.pslist(addr_space) for task in task_objects: if task.Peb: self.process_dict[int(task.UniqueProcessId)] = (task, [m for m in task.get_load_modules()]) # Matches a given module (executable, DLL) to a running process by looking either # in the CommandLine parameters or in the loaded modules def find_pids_for_imagepath(self, module): pids = [] module = sanitize_path(module) if module: for pid in self.process_dict: # case where the image path matches the process' command-line information if self.process_dict[pid][0].Peb: cmdline = self.process_dict[pid][0].Peb.ProcessParameters.CommandLine if module in sanitize_path(str(cmdline or '[no cmdline]')): pids.append(pid) # case where the module is actually loaded process (case for DLLs loaded by services) for dll in self.process_dict[pid][1]: if module in sanitize_path(str(dll.FullDllName or '[no dllname]')): pids.append(pid) return list(set(pids)) # Returns [] or a list of tuples(dll, key path, key.LastWriteTime, [int(pids)]) def get_appinit_dlls(self): debug.debug('Started get_appinit_dlls()') key_path="Microsoft\\Windows NT\\CurrentVersion\\Windows" results = [] try: self.regapi.reset_current() key = self.regapi.reg_get_key(hive_name='software', key=key_path) appinit_values = self.regapi.reg_get_value(None, None, value='AppInit_DLLs', given_root=key) except Exception as e: debug.warning('get_appinit_dlls() failed to complete. Exception: {} {}'.format(type(e).__name__, e.args)) else: if appinit_values: # Split on space or comma: https://msdn.microsoft.com/en-us/library/windows/desktop/dd744762(v=vs.85).aspx appinit_dlls = str(appinit_values).replace('\x00', '').replace(',', ' ').split(' ') results = [(dll, key_path, key.LastWriteTime, "AppInit_DLLs", self.find_pids_for_imagepath(dll)) for dll in appinit_dlls if dll] debug.debug('Finished get_appinit_dlls()') return results # Winlogon Notification packages are supported in pre-Vista versions of Windows only # See: http://technet.microsoft.com/fr-fr/library/cc721961(v=ws.10).aspx # returns [] or a list of tuples from parse_winlogon_registration_key() def get_winlogon_registrations(self): debug.debug('Started get_winlogon_registrations()') results = [] notify_key = "Microsoft\\Windows NT\\CurrentVersion\\Winlogon\\Notify" try: self.regapi.reset_current() for subkey in self.regapi.reg_get_all_subkeys(hive_name='software', key=notify_key): parsed_entry = self.parse_winlogon_registration_key(subkey) if parsed_entry and (self._config.VERBOSE or (parsed_entry[0].split('\\')[-1] not in WINLOGON_REGISTRATION_KNOWN_DLLS)): results.append(parsed_entry) except Exception as e: debug.warning('get_winlogon_registrations() failed to complete. Exception: {0} {1}'.format(type(e).__name__, e.args)) debug.debug('Finished get_winlogon_registrations()') return results # Returns None or (str(dllname), [(str(trigger)),str(event))], key.LastWriteTime, key path, [int(pids)]) def parse_winlogon_registration_key(self, key): dllname = "" events = [] pids = [] key_path = self.regapi.reg_get_key_path(key) or str(key.Name) try: for v_name, v_data in self.regapi.reg_yield_values(hive_name=None, key=None, given_root=key): val_name = str(v_name or '') val_data = str(v_data or '').replace('\x00', '') if val_name.lower() == 'dllname': dllname = val_data pids = self.find_pids_for_imagepath(dllname) elif val_name in WINLOGON_NOTIFICATION_EVENTS: events.append((val_name, val_data)) except Exception as e: debug.warning('Failed while parsing {}. Exception: {} {}'.format(key_path, type(e).__name__, e.args)) if dllname: return (dllname, events, key.LastWriteTime, key_path, pids) # Returns [] or a list of tuples(val_name, val_data, key.LastWriteTime, expected_val_data, [int(pids)]) def get_winlogon(self): debug.debug('Started get_winlogon()') winlogon = [] winlogon_key_path="Microsoft\\Windows NT\\CurrentVersion\\Winlogon" try: self.regapi.reset_current() key = self.regapi.reg_get_key(hive_name='software', key=winlogon_key_path) if key: for v_name, v_data in self.regapi.reg_yield_values(hive_name=None, key=None, given_root=key): val_name = str(v_name or '') val_data = str(v_data or '').replace('\x00', '') if val_data and val_name in WINLOGON_COMMON_VALUES: pids = self.find_pids_for_imagepath(val_data) winlogon.append((val_name, val_data, key.LastWriteTime, WINLOGON_COMMON_VALUES[val_name], winlogon_key_path, pids)) except Exception as e: debug.warning('get_winlogon() failed to complete. Exception: {} {}'.format(type(e).__name__, e.args)) debug.debug('Finished get_winlogon()') return winlogon # Returns [] or a list of tuples from parse_service_key() def get_services(self): debug.debug('Started get_services()') results = [] service_key_path = "{}\\Services".format(self.currentcs) try: self.regapi.reset_current() for service_sk in self.regapi.reg_get_all_subkeys(hive_name='system', key=service_key_path): parsed_service = self.parse_service_key(service_sk) if parsed_service and (self._config.VERBOSE or 'system32' not in parsed_service[5].lower()): results.append(parsed_service) except Exception as e: debug.warning('get_services() failed to complete. Exception: {0} {1}'.format(type(e).__name__, e.args)) debug.debug('Finished get_services()') return results # Returns None or (key_path, timestamp, display_name, SERVICE_STARTUP[startup], SERVICE_TYPES[type], image_path, service_dll, [int(pids)]) def parse_service_key(self, service_key): try: values = {str(val_name): str(val_data).replace('\x00', '') for val_name, val_data in self.regapi.reg_yield_values(None, None, given_root=service_key)} image_path = values.get("ImagePath", '') display_name = values.get("DisplayName",'') service_dll = values.get("ServiceDll", '') main = values.get("ServiceMain", '') startup = int(values.get("Start", -1)) type = int(values.get("Type", -1)) timestamp = service_key.LastWriteTime key_path = self.regapi.reg_get_key_path(service_key) or str(service_key.Name) # Check if the service is not set to automatically start or does not have an image path # More details here: http://technet.microsoft.com/en-us/library/cc759637(v=ws.10).aspx if not image_path or startup not in [0, 1, 2]: return None if 'svchost.exe -k' in image_path.lower() or SERVICE_TYPES[type] == 'Share_Process': sk = self.regapi.reg_get_key(hive_name='system', key='Parameters', given_root=service_key) if sk and not service_dll: timestamp = sk.LastWriteTime service_dll = self.regapi.reg_get_value(hive_name='system', key='', value="ServiceDll", given_root=sk) main = self.regapi.reg_get_value(hive_name='system', key='', value='ServiceMain', given_root=sk) if not service_dll and '@' in display_name: timestamp = service_key.LastWriteTime service_dll = display_name.split('@')[1].split(',')[0] if service_dll: service_dll = service_dll.replace('\x00', '') pids = self.find_pids_for_imagepath(service_dll) if main: service_dll = "{} ({})".format(service_dll, main.replace('\x00', '')) else: pids = self.find_pids_for_imagepath(image_path) except Exception as e: debug.warning('Failed while parsing {}. Exception: {} {}'.format(key_path, type(e).__name__, e.args)) return (key_path, timestamp, display_name, SERVICE_STARTUP[startup], SERVICE_TYPES[type], image_path, service_dll, pids) # Returns [] or a list of tuples from parse_activesetup_keys() def get_activesetup(self): debug.debug('Started get_activesetup()') results = [] try: self.regapi.reset_current() for subkey in self.regapi.reg_get_all_subkeys(hive_name='software', key=ACTIVE_SETUP_KEY): r = self.parse_activesetup_keys(subkey) if r: results.append(r) except Exception as e: debug.warning('get_activesetup() failed to complete. Exception: {0} {1}'.format(type(e).__name__, e.args)) debug.debug('Finished get_activesetup()') return results # Returns None or a tuple(exe path, subkey.LastWriteTime, key path, [int(pids)]) def parse_activesetup_keys(self, subkey): key_path = self.regapi.reg_get_key_path(subkey) or str(subkey.Name) try: stub_path_val = self.regapi.reg_get_value(hive_name='software', key='', value='StubPath', given_root=subkey) stub_path_val = str(stub_path_val or '').replace('\x00', '') except Exception as e: debug.warning('Failed while parsing {}. Exception: {} {}'.format(key_path, type(e).__name__, e.args)) if stub_path_val: pids = self.find_pids_for_imagepath(stub_path_val) return (stub_path_val, subkey.LastWriteTime, key_path, pids) # Returns [] or a list of tuples from parse_sdb_key() def get_sdb(self): debug.debug('Started get_sdb()') results = [] try: self.regapi.reset_current() sdb_keys = self.regapi.reg_get_all_subkeys(hive_name='software', key=APPCOMPAT_SDB_KEY) for subkey in sdb_keys: parsed_sdb_entry = self.parse_sdb_key(subkey) if parsed_sdb_entry: results.append(parsed_sdb_entry) except Exception as e: debug.warning('get_sdb() failed to complete. Exception: {0} {1}'.format(type(e).__name__, e.args)) debug.debug('Finished get_sdb()') return results #Returns None or a tuple(exe, db_path, subkey.LastWriteTime, key path, [int(pids)]) def parse_sdb_key(self, subkey): key_path = self.regapi.reg_get_key_path(subkey) or str(subkey.Name) try: desc = sanitize_path(self.regapi.reg_get_value('software', '', 'DatabaseDescription', subkey) or '') db_path = sanitize_path(self.regapi.reg_get_value('software', '', 'DatabasePath', subkey) or '') pids = self.find_pids_for_imagepath(desc) except Exception as e: debug.warning('Failed while parsing {}. Exception: {} {}'.format(key_path, type(e).__name__, e.args)) if desc: return (desc, db_path, subkey.LastWriteTime, key_path, pids) # Returns [] or a list of tuples from parse_autoruns_key() def get_autoruns(self): debug.debug('Started get_autoruns()') results = [] hive_key_list = [] try: # Gather all software run keys self.regapi.reset_current() for run_key in SOFTWARE_RUN_KEYS: hive_key_list += [k for k in self.regapi.reg_yield_key(hive_name='software', key=run_key)] # Gather all ntuser run keys self.regapi.reset_current() for run_key in NTUSER_RUN_KEYS: hive_key_list += [k for k in self.regapi.reg_yield_key(hive_name='ntuser.dat', key=run_key)] # hive_key = (key pointer, hive_name) for hive_key in hive_key_list: results += self.parse_autoruns_key(hive_key) except Exception as e: debug.warning('get_autoruns() failed to complete. Exception: {0} {1}'.format(type(e).__name__, e.args)) debug.debug('Finished get_autoruns()') return results # Returns [] or a list of tuples(exe path, hive name, key path, key.LastWriteTime, value name, [int(pids)]) def parse_autoruns_key(self, hive_key): results = [] key = hive_key[0] hive_name = hive_key[1] key_path = self.regapi.reg_get_key_path(key) or str(key.Name) try: # val_data is the exe path for v_name, v_data in self.regapi.reg_yield_values(None, None, given_root=key): val_name = str(v_name or '') val_data = str(v_data or '').replace('\x00', '') if val_data: pids = self.find_pids_for_imagepath(val_data) results.append((val_data, hive_name, key_path, key.LastWriteTime, val_name, pids)) except Exception as e: debug.warning('Failed while parsing {}. Exception: {} {}'.format(key_path, type(e).__name__, e.args)) return results def get_tasks(self): debug.debug('Started get_tasks()') addr_space = utils.load_as(self._config) f = filescan.FileScan(self._config) tasks = [] parsed_tasks = [] try: for file in f.calculate(): filename = str(file.file_name_with_device() or '') if "system32\\tasks\\" in filename.lower() and (('system32\\tasks\\microsoft' not in filename.lower() or self._config.VERBOSE)): tasks.append((file.obj_offset, filename)) debug.debug("Found task: 0x{0:x} {1}".format(file.obj_offset, filename)) for offset, name in tasks: self._config.PHYSOFFSET = '0x{:x}'.format(offset) df = dumpfiles.DumpFiles(self._config) self._config.DUMP_DIR = '.' for data in df.calculate(): # Doing this with mmap would probably be cleaner # Create a sufficiently large (dynamically resizable?) # memory map so that we can seek and write the file accordingly # # SystemError: mmap: resizing not available--no mremap() chopped_file = {} for mdata in data['present']: rdata = addr_space.base.read(mdata[0], mdata[2]) chopped_file[mdata[1]] = rdata task_xml = "".join(part[1] for part in sorted(chopped_file.items(), key=lambda x: x[0])) parsed = self.parse_task_xml(task_xml, name) if parsed: args = parsed['Actions']['Exec'].get("Arguments", None) if args: parsed['Actions']['Exec']['Command'] += " {}".format(args) pids = self.find_pids_for_imagepath(parsed['Actions']['Exec']['Command']) parsed_tasks.append((name.split('\\')[-1], parsed, task_xml, pids)) except Exception as e: debug.warning('get_tasks() failed to complete. Exception: {0} {1}'.format(type(e).__name__, e.args)) debug.debug('Finished get_tasks()') return parsed_tasks def parse_task_xml(self, xml, f_name): raw = xml xml = re.sub('\x00\x00+', '', xml) + '\x00' if xml: try: xml = xml.decode('utf-16') xml = re.sub(r"<Task(.*?)>", "<Task>", xml) xml = xml.encode('utf-16') root = ET.fromstring(xml) d = {} for e in root.findall("./RegistrationInfo/Date"): d['Date'] = e.text or '' for e in root.findall("./RegistrationInfo/Description"): d['Description'] = e.text or '' for e in root.findall("./Actions"): d['Actions'] = self.visit_all_children(e) for e in root.findall("./Settings/Enabled"): d['Enabled'] = e.text or '' for e in root.findall("./Settings/Hidden"): d['Hidden'] = e.text or '' for t in root.findall("./Triggers/*"): d['Triggers'] = self.visit_all_children(t) if not d.get("Actions", {}).get('Exec', {}).get("Command", False): return None return d except UnicodeDecodeError as e: debug.warning('Error while parsing the following task: {}'.format(f_name)) debug.debug('UnicodeDecodeError for: {}'.format(repr(raw))) def visit_all_children(self, node): d = {} for c in node: d[c.tag] = self.visit_all_children(c) if node.text: if node.text.strip(' \t\n\r'): d = node.text.strip(' \t\n\r') return d def calculate(self): self.get_dll_list() self.regapi = registryapi.RegistryApi(self._config) self.currentcs = self.regapi.reg_get_currentcontrolset() or "ControlSet001" asep_list = ['autoruns', 'services', 'appinit', 'winlogon', 'tasks', 'activesetup', 'sdb'] os_major = utils.load_as(self._config).profile.metadata.get('major', 0) # If all_offsets is empty then regapi was unable to find # hive offsets and we exit with an error message if not self.regapi.all_offsets: debug.error('Unable to find registry hives.') if self._config.ASEP_TYPE: debug.debug('Config: {}'.format(self._config.ASEP_TYPE)) asep_list = [s for s in self._config.ASEP_TYPE.replace(' ', '').split(',')] # Scan for ASEPs and populate the lists if 'autoruns' in asep_list: self.autoruns = self.get_autoruns() if 'services' in asep_list: self.services = self.get_services() if 'appinit' in asep_list: self.appinit_dlls = self.get_appinit_dlls() if 'winlogon' in asep_list: self.winlogon = self.get_winlogon() if os_major == 5: self.winlogon_registrations = self.get_winlogon_registrations() if 'tasks' in asep_list: self.tasks = self.get_tasks() if 'activesetup' in asep_list: self.activesetup = self.get_activesetup() if 'sdb' in asep_list: self.sdb = self.get_sdb() #Returns a generator to generator() that generates the unified output data return self.get_unified_output_data() def get_unified_output_data(self): for exe_path, hive, key, timestamp, val_name, pids in self.autoruns: yield [exe_path, 'Autoruns', timestamp, val_name, ", ".join([str(p) for p in pids]), hive, key, val_name, ""] for exe_path, key, timestamp, val_name, pids in self.appinit_dlls: yield [exe_path, 'AppInit Dlls', timestamp, '-', ", ".join([str(p) for p in pids]), "Windows/System32/config/SOFTWARE", key, val_name, ""] for exe_path, events, timestamp, key, pids in self.winlogon_registrations: yield [exe_path, 'Winlogon (Notify)', timestamp, 'Hooks: {0}'.format(", ".join([e[1] for e in events])), ", ".join([str(p) for p in pids]), "Windows/System32/config/SOFTWARE", key, "Dllname", ""] for val_name, exe_path, timestamp, common_value, key, pids in self.winlogon: yield [exe_path, 'Winlogon ({})'.format(val_name), timestamp, "Default value: {}".format(common_value), ", ".join([str(p) for p in pids]), "Windows/System32/config/SOFTWARE", key, val_name, ""] for key, timestamp, display_name, start, type, exe_path, entry, pids in self.services: yield [exe_path, 'Services', timestamp, "{0} - {1} ({2} - {3})".format(key.split('\\')[-1], display_name, type, start), ", ".join([str(p) for p in pids]), "Windows/System32/config/SYSTEM", key, "", entry] for name, task, task_xml, pids in self.tasks: yield [task['Actions']['Exec']['Command'], 'Scheduled Tasks', task.get('Date', ""), "{} ({})".format(name, task.get('Description', "N/A")), ", ".join([str(p) for p in pids]), "", "", "", ""] for exe_path, timestamp, key, pids in self.activesetup: yield [exe_path, "Active Setup", timestamp, "-", ", ".join([str(p) for p in pids]), "Windows/System32/config/SOFTWARE", key, "StubPath", ""] for desc, exe_path, timestamp, key, pids in self.sdb: yield [exe_path, "SDB", timestamp, desc, ", ".join([str(p) for p in pids]), "Windows/System32/config/SOFTWARE", key, "", ""] def unified_output(self, data): """This standardizes the output formatting""" return TreeGrid([("Executable", str), ("Source", str), ("Last write time", str), ("Details", str), ("PIDs", str), ("Hive", str), ("Key", str), ("Name", str), ("Share Process Dll", str)], self.generator(data)) def generator(self, data): """This yields data according to the unified output format""" for executable, source, lastWriteTime, details, pids, hive, key, name, spDllPath in data: yield (0, [str(executable), str(source), str(lastWriteTime), str(details), str(pids), str(hive), str(key), str(name), str(spDllPath)]) def render_table(self, outfd, data): self.table_header(outfd, [("Executable", "<65"), ("Source", "30"), ("Last write time", "28"), ("Details", "60"), ("PIDs", "15") ]) for exe, source, timestamp, details, pids, hive, key, name, share_dll in data: if share_dll: exe = share_dll self.table_row(outfd, exe, source, timestamp, details, pids) def render_text(self, outfd, data): previous_source = "" for exe, source, timestamp, details, pids, hive, key, name, share_dll in data: if source != previous_source: outfd.write("\n\n") outfd.write("{:=<50}\n\n".format(source)) if source == "Services": outfd.write("Service: {}\n".format(details)) outfd.write(" Image path: {0} (Last modified: {1})\n".format(exe, timestamp)) outfd.write(" PIDs: {}\n".format(pids)) if share_dll: outfd.write(" Loads: {}\n".format(share_dll)) elif source == "Autoruns": outfd.write("Hive: {}\n".format(hive)) outfd.write(" {0} (Last modified: {1})\n".format(key, timestamp)) outfd.write(" {0:30} : {1} (PIDs: {2})\n".format(exe, details, pids)) elif source == "Active Setup": outfd.write("Command line: {}\nLast-written: {} (PIDs: {})\n".format(exe, timestamp, pids)) elif source == "SDB": previous_source = source continue elif source == "Winlogon (Notify)": outfd.write("{0} (Last write time: {1})\n".format(exe, timestamp)) outfd.write(" PIDs: {}\n".format(pids)) outfd.write(" {}\n".format(details)) elif "Winlogon" in source: outfd.write("{0}: {1}\n".format(name, exe)) outfd.write(" {}\n".format(details)) outfd.write(" PIDs: {}\n".format(pids)) outfd.write(" Last write time: {}\n".format(timestamp)) elif source == "AppInit Dlls": outfd.write("Exe path: {}\n".format(exe)) outfd.write("PIDS: {}\n".format(pids)) elif source == "Scheduled Tasks": previous_source = source continue outfd.write("\n") previous_source = source if self.tasks: outfd.write("\n\n") outfd.write("{:=<50}\n\n".format("Scheduled tasks ")) for name, task, task_xml, pids in self.tasks: outfd.write("==== Task name: {} (PIDs: {})\n".format(name, ", ".join([str(p) for p in pids]) or "-")) outfd.write(get_indented_dict(task)) outfd.write('\n') outfd.write("Raw XML:\n\n---------\n{}\n---------\n\n\n".format(task_xml)) if self.sdb: outfd.write("\n\n") outfd.write("{:=<50}\n\n".format("SDB Fix-it patches ")) for desc, path, timestamp, pids in self.sdb: outfd.write("Description: \"{}\"\nLast-written: {}\nPatch: {}\n\n".format(desc, timestamp, path))
gpl-2.0
-5,039,099,454,219,604,000
42.03466
169
0.555708
false
ActiveState/code
recipes/Python/580811_Uno_TextBased/recipe-580811.py
1
67103
import os import sys import random import math import time class BadInputError(Exception): pass class Player(): def __init__(self, name): self.id = None self.name = name self.type = 'Human' self.hand = Hand() self.legalCards = [] self.wildCards = [] self.valueChangeCards = [] self.zeroCards = [] self.canSkip = False self.canReverse = False self.canDrawTwo = False self.canDrawFour = False self.canValueChange = False self.drew = False self.scrollMax = 0 self.points = 0 self.forceDraw = 0 def addCard(self, card): self.drew = True if self.forceDraw > 0: self.forceDraw -= 1 self.drew = False self.hand.addCard(card) def beginTurn(self): self.drew = False def didDraw(self): return self.drew def getLegalCards(self, color, value, zeroChange=False): self.canSkip = False self.canReverse = False self.canDrawTwo = False self.canDrawFour = False self.canValueChange = False self.canZeroChange = False self.legalCards = [] self.wildCards = [] self.valueChangeCards = [] self.zeroCards = [] plusFours = [] for card in self.hand: if card.isWild(): if card.getValue() == '+4': plusFours.append(card) else: self.wildCards.append(card) elif zeroChange and card.isZero(): self.canZero = True self.zeroCards.append(card) elif card.getColor() == color or card.getValue() == value: if card.getColor() != color: self.canValueChange = True self.valueChangeCards.append(card) if card.getValue() == "+2": self.canDrawTwo = True elif card.getValue() == 'R': self.canReverse = True elif card.getValue() == 'X': self.canSkip = True self.legalCards.append(card) if len(self.legalCards) == 0 and len(plusFours) > 0: self.canDrawFour = True self.wildCards += plusFours def getValidCards(self): return self.legalCards def getAllValidCards(self): return self.legalCards + self.wildCards + self.zeroCards def hasLegalCard(self): return len(self.legalCards) > 0 def addPoints(self, amount): if (self.points + amount) <= 999999999999999999999: self.points += amount def removeCard(self, index): return self.hand.removeCard(index) def assignID(self, identity): self.id = identity def getName(self): return self.name def getID(self): return self.id def getPoints(self): return self.points def getType(self): return self.type def getCardNum(self): return len(self.hand) def getHand(self, scrollNum=0, hide=False): return self.hand.show(scrollNum, hide) def getForceDraws(self): return self.forceDraw def addForceDraw(self, num): self.forceDraw += num def decreaseForceDraw(self): self.forceDraw -= 1 def removeForceDraw(self): self.forceDraw = 0 def checkCard(self, index): return self.hand.getCard(int(index)) def discardHand(self): self.hand.discard() def __str__(self): return self.name def __repr__(self): return '({},{})'.format(self.name, self.points) class Hand(): ''''deck' (Deck) : Card's Color (rgby) 'numberOfCards' (int) : Card's Value (0-9, R, X, W, +2, +4)''' def __init__(self, deck=None,numberOfCards=0): self.hand = [] if deck != None: self.draw(deck,numberOfCards) def __iter__(self): return iter(self.hand) def __len__(self): return len(self.hand) def __getitem__(self, item): try: return self.hand[item] except: return '' def addCard(self, card): self.hand.append(card) def removeCard(self, index): index = int(index) if (0 <= index < len(self)): return self.hand.pop(index) def discard(self): self.hand = [] def show(self, scrollNum=0, hide=False): if scrollNum == -1: scrollNum = 0 output = '' num = 0 header, footer, upper, lower = '', '', '', '' header += ('\033[97m\u2666--\u2666\033[0m ') upper += ('\033[97m|<-|\033[0m ') lower += ('\033[97m|<-|\033[0m ') footer += ('\033[97m\u2666--\u2666\033[0m ') for i in range(10): indexNum = i+(10*scrollNum) if indexNum < len(self): header += (self[indexNum].getRow(0,hide)+' ') upper += (self[indexNum].getRow(1,hide)+' ') lower += (self[indexNum].getRow(2,hide)+' ') footer += (self[indexNum].getRow(3,hide)+' ') num += 1 for j in range(10-num): j #unused header += (' ') footer += (' ') upper += (' ') lower += (' ') header += ('\033[97m\u2666--\u2666\033[0m ') upper += ('\033[97m|->|\033[0m ') lower += ('\033[97m|->|\033[0m ') footer += ('\033[97m\u2666--\u2666\033[0m ') output += (' '+header+'\n '+upper+'\n '+lower+'\n '+footer+'\n\033[97m|-(<)--') for k in range(num): output += '({})'.format(k) output += '--' for l in range(10-num): l #unused output += '-----' output += '(>)--|\033[0m\n' return output def getCard(self, index): return self.hand[index] def indexCard(self, card): return self.hand.index(card) class GameSettings(): playerIdentities = ('play1','play2','play3','play4') computerNames = ('Watson','SkyNet','Hal','Metal Gear') def __init__(self): self.playerStaging = [] # Where Player Objs Are Stored Before Game Starts self.players = {} # ID : Player Obj self.numPlayers = 0 self.useColor = True self.displayEffects = True self.hideComputerHands = True self.zeroChange = False self.computerSimulation = False self.mainMenuError = '' self.computerSpeed = 'normal' def canAddPlayer(self): return (self.numPlayers < 4) def canRemovePlayer(self): return (self.numPlayers > 0) def canBegin(self): return (self.numPlayers > 1) def addPlayer(self, player): self.playerStaging.append(player) self.numPlayers += 1 def removePlayer(self, number): number -= 1 del self.playerStaging[number] self.numPlayers -= 1 def clearStaging(self): self.numPlayers = 0 self.playerStaging = [] def finalizePlayers(self): self.players.clear() identity = 0 for player in self.playerStaging: playerID = self.playerIdentities[identity] player.assignID(playerID) self.players[playerID] = player identity += 1 def getPlayerNum(self): return self.numPlayers def getComputerName(self): complete = False index = self.numPlayers while not complete: name = self.computerNames[index] complete = True for player in self.playerStaging: if player.getName() == name: index += 1 if index >= len(self.computerNames): index = 0 complete = False return self.computerNames[index] def getRandomIdentity(self): '''For Getting a Random Player for First Turn.''' return random.choice(self.players.keys()) def compileMainMenuElements(self): def getBlankSpace(word, total): return " "*(total-len(word)) def getPlayerBox(playerNum, rowNum): if rowNum == 1: name = self.playerStaging[playerNum-1].getName() return '{}{}'.format(name, getBlankSpace(name, 29)) elif rowNum == 2: points = self.playerStaging[playerNum-1].getPoints() return 'Points: {}{}'.format(points, getBlankSpace(str(points), 21)) self.mainMenuElements= {'play1row1':'No Player ','play1row2':' ', 'play2row1':'No Player ', 'play2row2':' ', 'play3row1':'No Player ','play3row2':' ', 'play4row1':'No Player ', 'play4row2':' ', 'play1box':'\033[90m','play2box':'\033[90m','play3box':'\033[90m','play4box':'\033[90m', 'beginBox':'\033[90m','addBox':'\033[97m','removeBox':'\033[90m' } playerBoxKey = 'play{}box' playerRowKey = 'play{}row{}' i = 1 for j in self.playerStaging: j colorCode = ['\033[91m','\033[94m','\033[92m','\033[93m'] key = playerBoxKey.format(i) self.mainMenuElements[key] = colorCode[i-1] self.mainMenuElements[playerRowKey.format(i,1)] = getPlayerBox(i, 1) self.mainMenuElements[playerRowKey.format(i,2)] = getPlayerBox(i, 2) i+=1 if self.canBegin(): self.mainMenuElements['beginBox'] = '\033[95m' if not self.canAddPlayer(): self.mainMenuElements['addBox'] = '\033[90m' if self.canRemovePlayer(): self.mainMenuElements['removeBox'] = '\033[97m' def changeComputerSpeed(self): if self.computerSpeed == 'slow': self.computerSpeed = 'normal' elif self.computerSpeed == 'normal': self.computerSpeed = 'fast' elif self.computerSpeed == 'fast': self.computerSpeed = 'slow' def getMainMenuElements(self): return self.mainMenuElements class Deck(): ''''shuffle' (bool) : shuffle deck.''' colors = ('red','yellow','green','blue') values = ('0','1','2','3','4','5','6','7','8','9','X','R','+2') def __init__(self, populate): '''Initializes proper deck of 108 Uno Cards.''' self.deck = [] if populate: self.populate(True) def __getitem__(self, index): return self.deck[index] def populate(self, shuffle=True): for color in self.colors: for value in self.values: self.deck.append(Card(color, value)) if value != '0': self.deck.append(Card(color, value)) for i in range(4): i #unused self.deck.append(Card('wild', '+4')) self.deck.append(Card('wild', 'W')) if shuffle: self.shuffle() def __iter__(self): return iter(self.deck) def __len__(self): return len(self.deck) def draw(self): return self.deck.pop() def place(self, card): return self.deck.append(card) def insert(self, card): self.deck.insert(0, card) def shuffle(self): random.shuffle(self.deck) class ComputerPlayer(Player): def __init__(self, name): super().__init__(name) self.type = 'Computer' self.begun = False self.colorsInHand = {'red':0, 'blue':0, 'green':0, 'yellow':0, 'wild':0} self.colorsOutHand = {} self.currentColor = "" def addCard(self, card): Player.addCard(self, card) color = card.getColor() self.colorsInHand[color] += 1 def indexCard(self, cardColor, cardValue): for card in self.hand: if card.getValue() == cardValue: if cardValue in ('+4', 'W'): return self.hand.indexCard(card) else: if card.getColor() == cardColor: return self.hand.indexCard(card) raise ValueError("Card Cannot Be Found") def think(self, match): card = None self.currentColor = match.currentColor currentValue = match.currentValue zeroChangeRule = match.zeroChange twoPlayers = False previousTurnID = match.getNextTurn(True) nextTurnID = match.getNextTurn(False) previousPlayer = match.getPlayer(previousTurnID) #nextPlayer = match.getPlayer(nextTurnID) if previousTurnID == nextTurnID: twoPlayers = True if self.canSkip == False and self.canReverse == True: self.canSkip = True self.canReverse = False self.getLegalCards(self.currentColor, currentValue, zeroChangeRule) ### DRAW CASE ### if len(self.legalCards) == 0 and len(self.wildCards) == 0: return "d" else: ### NO LEGAL CARD, USE WILD CARD ### if len(self.legalCards) == 0: if zeroChangeRule and self.canZeroChange: bestZeroColor = self.getBestColor(self.zeroCards) card = self.getCardByColor(self.zeroCards, bestZeroColor) else: if self.canDrawFour: card = self.getCardByValue(self.wildCards, "+4") print(card) else: card = random.choice(self.wildCards) else: ### HAS LEGAL CARD ### if twoPlayers and self.canSkip: #Always play a skip card in a two player game #print("Shed Skip Strategy") card = self.getCardByValue(self.legalCards,"R", "X") if self.canReverse and previousPlayer.didDraw(): #print("Reverse Strategy") reverseCards = self.getAllCardsByValue(self.legalCards, "R") for reverseCard in reverseCards: if reverseCard.getColor() == self.currentColor: card = reverseCard if self.canValueChange: # Computer Can Value Change, However, Should it? # Computer Checks to See if Value Change Color is Better Than Current currentColorNum = self.colorsInHand[self.currentColor] bestValueChangeColor = self.getBestColor(self.valueChangeCards) if self.colorsInHand[bestValueChangeColor] > currentColorNum or len(self.valueChangeCards) == len(self.legalCards): card = self.getCardByColor(self.valueChangeCards, bestValueChangeColor) if card == None: #print("Random Strategy") card = random.choice(list(set(self.legalCards) - set(self.valueChangeCards))) color = card.getColor() self.colorsInHand[color] -= 1 return str(self.indexCard(card.getColor(), card.getValue())) def getWildColor(self): maxKey = max(self.colorsInHand, key=self.colorsInHand.get) if maxKey == 'wild': return random.choice(('r','g','b','y')) else: return maxKey def getCardByValue(self, cardList, *values): for card in cardList: if card.getValue() in values: return card def getAllCardsByValue(self, cardList, *values): cards = [] for card in cardList: if card.getValue() in values: cards.append(card) return cards def getCardByColor(self, cardList, *colors): for card in cardList: if card.getColor() in colors: return card def getBestColor(self, cardList): bestColor = None bestColorNum = 0 for card in cardList: color = card.getColor() if self.colorsInHand[color] > bestColorNum: bestColor = color bestColorNum = self.colorsInHand[color] return bestColor class Card(): ''' 'suit' (string) : Card's Color (rgby) 'rank' (string) : Card's Value (0-9, R, X, W, +2, +4) ''' colors = { 'red' : '\033[91m', 'green' : '\033[92m', 'yellow' : '\033[93m', 'blue' : '\033[94m', 'purple' : '\033[95m', 'cyan' : '\033[96m', 'white' : '\033[97m', 'wild' : '', 'dwild' : '', 'dred' : '\033[31m', 'dgreen' : '\033[32m', 'dyellow' : '\033[33m', 'dblue' : '\033[34m', 'dpurple' : '\033[35m', 'dcyan' : '\033[36m', 'dwhite' : '\033[37m', } idMap = { 'red':'R','blue':'B','green':'G','yellow':'Y','wild':'W', '0':'0','1':'1','2':'2','3':'3','4':'4','5':'5','6':'6','7':'7','8':'8','9':'9', '+2':'+','R':'R','W':'W','+4':'$','X':'X' } bigNums = { "0" : [" .d888b. ","d88P Y88b","888 888","888 888","888 888","888 888","d88P Y88b"," \"Y888P\" "], "1" : [" d888 "," d8888 "," 888 "," 888 "," 888 "," 888 "," 888 "," 8888888 "], "2" : [".d8888b. ","d88P Y88","d8 888"," .d88P",".od888P\" ","d88P\" ","888\" ","888888888"], "3" : [" .d8888b.","d88P Y88"," .d88"," 8888\" "," \"Y8b","888 88","Y88b d88"," \"Y8888P\""], "4" : [" d88b "," d8P88 "," d8 88 "," d8 88 ","d8 88 ","888888888"," 88 "," 88 "], "5" : ["888888888","888 ","888 ","8888888b "," \"Y88b "," 888","Y88b d88P","\"Y8888P\" "], "6" : [" .d888b. ","d88P Y88b","888 ","888d888b ","888P \"Y8b","888 888","Y88b d88b"," \"Y888P\" "], "7" : ["888888888"," d8P"," d8P "," d8P "," 8888888 "," d8P "," d8P ","d8P "], "8" : [" .d888b. ","d8P Y8b","Y8b. d8P"," \"Y8888\" "," .dP\"Yb. ","888 888","Y88b d88P"," \"Y888P\" "], "9" : [" .d888b. ","d8P Y8b","88 88","Y8b. d88"," \"Y88P888"," 888","Y88b d88P"," \"Y888P\" "], "X" : ["Y8b d8P"," Y8b d8P "," Y8o8P "," Y8P "," d8b "," d888b "," d8P Y8b ","d8P Y8b"], "W" : ["88 88","88 88","88 o 88","88 d8b 88","88d888b88","88P Y88","8P Y8","P Y"], "+2" : [" db "," 88 ","C8888D "," 88 8888"," VP 8"," 8888"," 8 "," 8888"], "+4" : [" db "," 88 ","C8888D "," 88 d "," VP d8 "," d 8 "," d8888"," 8 "], "R9" : [" d88P "," d88P "," d88P "," d88P "," Y88b "," Y88b "," Y88b "," Y88b "], "R8" : [" d88P "," d88P "," d88P ","d88P ","Y88b "," Y88b "," Y88b "," Y88b "], "R7" : [" d88P Y"," d88P ","d88P ","88P ","88b ","Y88b "," Y88b "," Y88b d"], "R6" : [" d88P Y8","d88P Y","88P ","8P ","8b ","88b ","Y88b d"," Y88b d8"], "R5" : ["d88P Y88","88P Y8","8P Y","P ","b ","8b d","88b d8","Y88b d88"], "R4" : ["88P Y88b","8P Y88","P Y8"," Y"," d","b d8","8b d88","88b d88P"], "R3" : ["8P Y88b ","P Y88b"," Y88"," Y8"," d8"," d88","b d88P","8b d88P "], "R2" : ["P Y88b "," Y88b "," Y88b"," Y88"," d88"," d88P"," d88P ","b d88P "], "R1" : [" Y88b "," Y88b "," Y88b "," Y88b"," d88P"," d88P "," d88P "," d88P "], "R0" : [" Y88b "," Y88b "," Y88b "," Y88b "," d88P "," d88P "," d88P "," d88P "], } def __init__(self, color, value): '''Initializes Uno Card w/ Color and Value.''' self.wild = False #Is wild card? self.zero = False self.cardID = '{}{}'.format(self.idMap[color],self.idMap[value]) self.setColor(color) self.setValue(value) self.setPoints(value) ############################################# ### -\/- Retrieve Card Information -\/- ### def __repr__(self): return "{},{}".format(self.color, self.value) def getBigNum(self, reverse, reverseSeed=0): '''Returns list of strings to draw card's value on the pile.''' bigNums = [] colorCode = self.colorCode colorCodeDark = self.colorCodeDark value = self.value if value == 'R': if not reverse: value += str(reverseSeed) else: value += str(9-reverseSeed) for mid in self.bigNums[value]: bigNums += ['{}| |{}'.format(colorCode,colorCodeDark)+mid+'{}| |\033[0m\t'.format(colorCode)] return bigNums def getColor(self): '''Returns card's color.''' return self.color def getColorCode(self): '''Returns card's color code.''' return self.colorCode def getValue(self): '''Returns card's value.''' return self.value def getPoints(self): '''Returns card's point value.''' return self.points def getRow(self,rowNum,hide=False): value = self.value displaySpace = self.displaySpace if hide: colorCode = '\033[97m' value = '?' displaySpace = ' ' else: colorCode = self.colorCode if self.isWild(): if rowNum == 0: colorCode = '\033[91m' elif rowNum == 1: colorCode = '\033[93m' elif rowNum == 2: colorCode = '\033[92m' elif rowNum == 3: colorCode = '\033[94m' if rowNum == 0: return '{}\u2666--\u2666\033[0m'.format(colorCode) elif rowNum == 1: return '{}|{}{}|\033[0m'.format(colorCode, displaySpace, value) elif rowNum == 2: if hide: return '{}|? |\033[0m'.format(colorCode) else: return '{}| |\033[0m'.format(colorCode) elif rowNum == 3: return '{}\u2666--\u2666\033[0m'.format(colorCode) ############################################# ### -\/- Set Card Information -\/- ### def setColor(self, color): '''Sets Card's color and escape code.''' if color == 'blue': self.color = 'blue' self.colorCode = self.colors['blue'] self.colorCodeDark = self.colors['dblue'] elif color == 'red': self.color = 'red' self.colorCode = self.colors['red'] self.colorCodeDark = self.colors['dred'] elif color == 'yellow': self.color = 'yellow' self.colorCode = self.colors['yellow'] self.colorCodeDark = self.colors['dyellow'] elif color == 'green': self.color = 'green' self.colorCode = self.colors['green'] self.colorCodeDark = self.colors['dgreen'] elif color == 'wild': #No color modification self.wild = True self.color = 'wild' self.colorCodeDark = self.colors['dwild'] self.colorCode = self.colors['wild'] def setValue(self, value): if value in ('0','1','2','3','4','5','6','7','8','9','X','R','+2','+4','W'): self.value = value self.displaySpace = ' ' if len(value) == 2: self.displaySpace = '' if value == '0': self.zero = True def setPoints(self, value): if value in ('0','1','2','3','4','5','6','7','8','9'): self.points = int(value) elif value in ("W", "+4"): self.points = 50 else: self.points = 20 ############################################# ### -\/- Wild Card Methods -\/- ### def changeColor(self, color): '''Changes Card's Color, Intended for Wild Cards.''' self.setColor(color) def isWild(self): '''Returns if card is a wild card.''' return self.wild def isZero(self): return self.zero class Match(): elementsInit = { ### Names (final) ### 'P1Name':' ', 'P2Name':' ', 'P3Name':' ', 'P4Name':' ', ### Card Values ### 'P1Cards':' ', 'P2Cards':' ', 'P3Cards':' ', 'P4Cards':' ', ### Turn Colors / Hand### 'P1Turn':'', 'P2Turn':'', 'P3Turn':'', 'P4Turn':'', 'HName':'\t\t', 'HVisual':'' ,'Hand':'', ### Deck ### 'DNum':'', 'Deck':['','','','','','','','',''], 'PostDNum':'', ### Pile ### 'uHeader':'\t\t\t\t', 'uMiddle':' ', 'uLower':' ', 'oHeader':'\t\t\t', 'oMiddle':['\t\t\t','\t\t\t','\t\t\t','\t\t\t','\t\t\t','\t\t\t','\t\t\t','\t\t\t'], ### Messages ### 'Console':'', 'Error':'' } speeds = {'slow':2,'normal':1,'fast':0} def __init__(self, gs): ### Decks ### self.deck = Deck(True) self.pile = Deck(False) ### Player Information ### self.players = gs.players self.turnList = [] self.handTitles = {'play1':'','play2':'','play3':'','play4':''} ### Carry Information ### self.displayEffects = gs.displayEffects self.hideComputerHands = gs.hideComputerHands self.zeroChange = gs.zeroChange self.computerSpeed = self.speeds[gs.computerSpeed] self.simulation = gs.computerSimulation ### Data ### self.handPosition = 0 # For hand displays self.drawAmount = 0 # Used for force draws self.passes = 0 # Keep track of consecutive passes for emergency color change self.passMax = 0 # Max passes before color change self.turn = '' # Current turn self.event = '' # Wild, Reverse, Skip, etc self.wildColorChange = '' # Specifies color to change wild card to self.currentColor = '' # Current color self.currentValue = '' # Current value self.winnerID = '' # ID of Player who Won self.reverse = False # Is turn order reversed self.turnComplete = False # Is turn complete self.matchComplete = False # Is the Game over? self.matchAbort = False # Did the match conclude without a winner? self.forcedWild = False # Force change wild ### Initialize Names / Cards / Deck (Assuming New Game) ### self.elements = dict(self.elementsInit) keyStringName = 'P{}Name' keyStringCards = 'P{}Cards' for i in self.players: self.elements[keyStringName.format(i[-1])] = self.players[i].getName()+(' '*(11-len(self.players[i].getName()))) self.elements[keyStringCards.format(i[-1])] = ' '+(' '*(3-len(str(self.players[i].getCardNum()))))+str(self.players[i].getCardNum())+' Cards' self.elements['DNum'] = len(self.deck) if len(str(len(self.deck))) < 2: self.elements['PostDNum'] = '\t' j = 8 for i in range(int(math.ceil(len(self.deck)/12))): self.elements['Deck'][j] = '=' j -= 1 for key in GameSettings.playerIdentities: try: self.buildHandString(key) self.turnList += [key] except KeyError: pass self.passMax = len(self.turnList) def clearShell(self): os.system('cls' if os.name == 'nt' else 'clear') def begin(self): self.elements['Console'] = 'Beginning Game, Press Enter.' print(self.drawScreen()) self.enterBreak() self.eventDealCards() self.turn = random.choice(self.turnList) self.elements['Console'] = 'First turn will be {}. Press Enter.'.format(self.players[self.turn].getName()) print(self.drawScreen(True)) self.enterBreak() self.placeCard() self.elements['P{}Turn'.format(self.turn[-1])] = '\033[93m' if self.event == 'wild': self.eventWildCard() elif self.event == 'reverse': self.eventReverse() def end(self, gs): if not self.matchAbort: points = 0 self.elements['P{}Turn'.format(self.turn[-1])] = '' self.elements['Console'] = '{} Wins! Press Enter to Begin Point Tally'.format(self.players[self.winnerID].getName()) print(self.drawScreen()) self.enterBreak() for identity in self.turnList: if identity != self.winnerID: self.turn = identity self.elements['HName'] = self.handTitles[self.turn] self.elements['P{}Turn'.format(self.turn[-1])] = '\033[93m' while self.players[identity].getCardNum() > 0: card = self.players[identity].removeCard(0) points += card.getPoints() self.elements['Console'] = '{} Won {} Points!'.format(self.players[self.winnerID].getName(),points) keyStringCards = 'P{}Cards' self.elements[keyStringCards.format(identity[-1])] = ' '+(' '*(3-len(str(self.players[identity].getCardNum()))))+str(self.players[identity].getCardNum())+' Cards' self.players[identity].maxScroll = math.ceil((self.players[identity].getCardNum() / 10)-1) if self.handPosition > self.players[identity].maxScroll: self.handPosition -= 1 self.buildHandVisual(identity) if self.displayEffects and not self.simulation: print(self.drawScreen()) time.sleep(.1) self.elements['P{}Turn'.format(self.turn[-1])] = '' self.players[self.winnerID].addPoints(points) self.elements['Console'] = '{} Won {} Points! Press Enter'.format(self.players[self.winnerID].getName(),points) print(self.drawScreen()) self.enterBreak() gs.clearStaging() for identity in self.turnList: self.players[identity].discardHand() gs.addPlayer(self.players[identity]) return gs def adjustCardAmount(self, playerID): keyStringCards = 'P{}Cards' self.elements[keyStringCards.format(playerID[-1])] = ' '+(' '*(3-len(str(self.players[playerID].getCardNum()))))+str(self.players[playerID].getCardNum())+' Cards' self.players[playerID].maxScroll = math.ceil((self.players[playerID].getCardNum() / 10)-1) if self.handPosition > self.players[playerID].maxScroll: self.handPosition -= 1 self.buildHandVisual(playerID) def buildHandString(self, playerID): playerName = self.players[playerID].getName() if len(playerName) < 9: self.handTitles[playerID] = "{}'s Hand\t".format(self.players[playerID].getName()) else: self.handTitles[playerID] = "{}'s Hand".format(self.players[playerID].getName()) def buildHandVisual(self, playerID): string = '[' for i in range(self.players[playerID].maxScroll+1): if i == self.handPosition: string += '|' else: string += '-' string += ']' self.elements['HVisual'] = string def checkInput(self, playerInput): if playerInput == '': return {'valid':False,'entry':playerInput} if playerInput.isnumeric(): if int(playerInput)+(10*self.handPosition) < self.players[self.turn].getCardNum(): return {'valid':True,'entry':str(int(playerInput)+(10*self.handPosition)),'type':'card'} else: self.elements['Error'] = '{} is not a card.'.format(playerInput) return {'valid':False,'entry':playerInput} else: playerInput = playerInput.lower()[0] if playerInput in ['<','>','u','d','p','q','s']: return {'valid':True,'entry':playerInput} else: self.elements['Error'] = '{} is not a valid selection.'.format(playerInput) return {'valid':False,'entry':playerInput} def checkColorInput(self, playerInput): if playerInput == '': return {'valid':False,'entry':playerInput} playerInput = str(playerInput).lower()[0] if playerInput[0] == 'b': return {'valid':True,'entry':'blue'} elif playerInput[0] == 'r': return {'valid':True,'entry':'red'} elif playerInput[0] == 'g': return {'valid':True,'entry':'green'} elif playerInput[0] == 'y': return {'valid':True,'entry':'yellow'} return {'valid':False,'entry':playerInput} def eventDealCards(self): if self.displayEffects and not self.simulation: self.elements['Console'] = 'Dealing Cards...' for i in ('play1','play2','play3','play4'): if i in self.players: for j in range(7): j #unused self.dealCard(i) if self.displayEffects and not self.simulation: print(self.drawScreen(True)) time.sleep(.1) def eventReverse(self): if self.displayEffects and not self.simulation: hide = False if self.players[self.turn].getType() == "Computer": hide = self.hideComputerHands self.elements['Console'] = "Reverse Card Played! Reversing Turn Order.".format(self.players[self.turn].getName()) print(self.drawScreen(hide)) time.sleep(1) for i in range(10): cardBigNums = self.pile[0].getBigNum(self.reverse,i) self.elements['oMiddle'] = cardBigNums print(self.drawScreen(hide)) if self.displayEffects and not self.simulation: time.sleep(.1) cardBigNums = self.pile[0].getBigNum(self.reverse,9) self.elements['oMiddle'] = cardBigNums self.reverse = not self.reverse self.event = '' def eventSkip(self): if self.displayEffects and not self.simulation: hide = False if self.players[self.turn].getType() == "Computer": hide = self.hideComputerHands self.elements['Console'] = "Skip Card Placed! Skipping {}'s Turn.".format(self.players[self.turn].getName()) print(self.drawScreen(hide)) time.sleep(1) for i in range(2): i #unused self.elements['P{}Turn'.format(self.turn[-1])] = '\033[91m' print(self.drawScreen(hide)) time.sleep(.3) self.elements['P{}Turn'.format(self.turn[-1])] = '' print(self.drawScreen(hide)) time.sleep(.3) self.turnComplete = True self.event = '' def eventWildCard(self): hide = False if not self.forcedWild: if self.players[self.turn].getType() == 'Human': self.elements['Console'] = 'Wild Card! Specifiy a Color: (B)lue, (R)ed, (G)reen, (Y)ellow' self.elements['Error'] = 'Specifiy A Color' print(self.drawScreen()) playerInput = str(input("Color Change: ")) checked = self.checkColorInput(playerInput) while not checked['valid']: if checked['entry'] == '<': self.handPosition -= 1 if self.handPosition == -1: self.handPosition = self.players[self.turn].maxScroll self.buildHandVisual(self.turn) elif checked['entry'] == '>': self.handPosition += 1 if self.handPosition > self.players[self.turn].maxScroll: self.handPosition = 0 self.buildHandVisual(self.turn) print(self.drawScreen()) playerInput = str(input("Color Change: ")) checked = self.checkColorInput(playerInput) else: hide = self.hideComputerHands checked = self.checkColorInput(self.players[self.turn].getWildColor()) self.wildColorChange = checked['entry'] else: self.wildColorChange = self.checkColorInput(random.choice(('r','b','g','y')))['entry'] self.forcedWild = False self.currentColor = self.wildColorChange self.elements['Error'] = "" if self.displayEffects and not self.simulation: self.elements['Console'] = 'Wild Card! Changing Color.' seed = 1 for i in range(10): i #unused if seed > 4: seed = 1 print(self.drawScreen(hide,wildSeed=seed)) time.sleep(.1) seed += 1 self.pile[0].changeColor(self.wildColorChange) self.wildColorChange = '' cardBigNums = self.pile[0].getBigNum(self.reverse) self.elements['oHeader'] = '{}\u2666\u2666\u2666=========\u2666\u2666\u2666\033[0m\t'.format(self.pile[0].getColorCode()) self.elements['oMiddle'] = cardBigNums self.event = '' def eventDraw(self): self.players[self.turn].addForceDraw(self.drawAmount) self.drawAmount = 0 self.event = '' def dealCard(self, playerID): card = self.deck.draw() self.players[playerID].addCard(card) ### Adjust Hand Visual ### self.players[playerID].maxScroll = math.ceil((self.players[playerID].getCardNum() / 10)-1) self.handPosition = self.players[playerID].maxScroll self.buildHandVisual(playerID) ### Adjust Player Tile ### keyStringCards = 'P{}Cards' self.elements[keyStringCards.format(playerID[-1])] = ' '+(' '*(3-len(str(self.players[playerID].getCardNum()))))+str(self.players[playerID].getCardNum())+' Cards' ### Adjust Deck ### self.elements['DNum'] = len(self.deck) if len(str(len(self.deck))) < 2: self.elements['PostDNum'] = '\t' j = 8 self.elements['Deck'] = [' ',' ',' ',' ',' ',' ',' ',' ', ' '] for i in range(math.ceil(len(self.deck)/12)): i #unused self.elements['Deck'][j] = '=' j -= 1 def placeCard(self, card=None): if card == None: ### Used At Beginning For First Card ### card = self.deck.draw() self.elements['DNum'] = len(self.deck) cardColor = card.getColorCode() cardBigNums = card.getBigNum(self.reverse) self.currentColor = card.getColor() self.currentValue = card.getValue() self.pile.insert(card) self.elements['oHeader'] = '{}\u2666\u2666\u2666=========\u2666\u2666\u2666\033[0m\t'.format(cardColor) self.elements['oMiddle'] = cardBigNums if len(self.pile) > 1: previousCard = self.pile[1] previousCardColor = previousCard.getColorCode() self.elements['uHeader'] = '{} \u2666\u2666\u2666=========\u2666\u2666\u2666\033[0m\t\t'.format(previousCardColor) self.elements['uMiddle'] = '{}| |\033[0m'.format(previousCardColor) self.elements['uLower'] = '{}\u2666\u2666\u2666\033[0m'.format(previousCardColor) if self.currentColor == 'wild': self.event = 'wild' if self.currentValue == 'X': self.event = 'skip' elif self.currentValue == 'R': if len(self.players) > 2: self.event = 'reverse' else: self.event = 'skip' elif self.currentValue == '+4': self.drawAmount = 4 elif self.currentValue == '+2': self.drawAmount = 2 self.passes = 0 def extractCard(self, playerID, index): card = self.players[playerID].removeCard(index) if self.players[playerID].getCardNum() == 0: self.matchComplete = True self.winnerID = self.turn self.adjustCardAmount(playerID) return card def enterBreak(self): if not self.simulation: str(input()) return def nextTurn(self): self.turnComplete = False self.handPosition = 0 turnType = self.players[self.turn].getType() self.players[self.turn].beginTurn() ### Prepare Hand Visuals ### self.elements['HName'] = self.handTitles[self.turn] self.buildHandVisual(self.turn) if self.event == 'skip': self.eventSkip() elif self.drawAmount > 0: self.eventDraw() while not self.turnComplete: if turnType == 'Human': self.players[self.turn].getLegalCards(self.currentColor, self.currentValue, self.zeroChange) if len(self.deck) > 0: self.elements['Console'] = 'Select a card, (D)raw, or (P)ause.' else: self.players[self.turn].removeForceDraw() self.elements['Console'] = 'Select a card, (D)raw, (P)ause, or Pas(s).' if self.players[self.turn].getForceDraws() > 0: self.elements['Error'] = 'Draw Card Played! Draw {} cards.'.format(self.players[self.turn].getForceDraws()) print(self.drawScreen()) playerInput = str(input("\033[97mSelection: \033[92m")) checked = self.checkInput(playerInput) while not checked['valid']: print(self.drawScreen()) playerInput = str(input("\033[97mSelection: \033[92m")) checked = self.checkInput(playerInput) playerInput = checked['entry'] if playerInput == '<': self.handPosition -= 1 if self.handPosition == -1: self.handPosition = self.players[self.turn].maxScroll self.buildHandVisual(self.turn) elif playerInput == '>': self.handPosition += 1 if self.handPosition > self.players[self.turn].maxScroll: self.handPosition = 0 self.buildHandVisual(self.turn) elif playerInput == 'd': if len(self.deck) > 0: self.elements['Error'] = '' self.dealCard(self.turn) else: self.elements['Error'] = "Cannot Draw. Deck is Empty" elif playerInput == 'p': pauseOutput = self.pauseScreen() if pauseOutput == 'quit': self.matchComplete = True self.turnComplete = True self.winnerID = 'play1' self.matchAbort = True elif playerInput == 's': if len(self.deck) > 0: self.elements['Error'] = "Cannot pass until Deck is empty." elif len(self.players[self.turn].getAllValidCards()) > 0: self.elements['Error'] = "Cannot pass while having playable cards." else: self.turnComplete = True self.passes += 1 if self.passes == self.passMax: self.forcedWild = True self.event = 'wild' self.passes = 0 elif playerInput.isnumeric(): if self.players[self.turn].getForceDraws() == 0: cardCheck = self.players[self.turn].checkCard(playerInput) if cardCheck in self.players[self.turn].getAllValidCards(): card = self.extractCard(self.turn, playerInput) self.placeCard(card) self.elements['Error'] = "" self.turnComplete = True else: self.elements['Error'] = "Card Doesn't Match The Color {} or Value {}!".format(self.currentColor, self.currentValue) else: pass elif turnType == 'Computer': self.elements['Console'] = '{}\'s Turn'.format(self.players[self.turn].getName()) print(self.drawScreen(self.hideComputerHands)) if not self.simulation: time.sleep(self.computerSpeed) #str(input()) while (True): if self.displayEffects and not self.simulation: time.sleep(.2) if self.players[self.turn].getForceDraws() > 0 and len(self.deck) > 0: cardIndex = 'd' else: cardIndex = self.players[self.turn].think(self) if cardIndex.isnumeric(): card = self.extractCard(self.turn, int(cardIndex)) if card.getColor() != self.currentColor: self.resetDrawBool() self.placeCard(card) self.turnComplete = True break else: if cardIndex == 'd': if len(self.deck) > 0: self.dealCard(self.turn) print(self.drawScreen(self.hideComputerHands)) else: self.turnComplete = True self.players[self.turn].removeForceDraw() self.passes += 1 if self.passes == self.passMax: self.forcedWild = True self.event = 'wild' self.passes = 0 break ### DECODE INPUT ### if self.event == 'reverse': self.eventReverse() elif self.event == 'wild': self.eventWildCard() # Clear Current Turn self.elements['P{}Turn'.format(self.turn[-1])] = '' # Prepare Next Turn self.turn = self.getNextTurn() self.elements['P{}Turn'.format(self.turn[-1])] = '\033[93m' def drawScreen(self, hide=False, wildSeed=0): if self.simulation: return '' colorCombos = { 1 : ['\033[91m','\033[93m','\033[92m','\033[94m'], 2 : ['\033[94m','\033[91m','\033[93m','\033[92m'], 3 : ['\033[92m','\033[94m','\033[91m','\033[93m'], 4 : ['\033[93m','\033[92m','\033[94m','\033[91m'] } currentTurn = self.turn if currentTurn == '': currentTurn = self.turnList[-1] hide = True if wildSeed != 0: colorMod = colorCombos[wildSeed] else: colorMod = ['','','',''] self.clearShell() screenout = '' screenout += '\t\t\033[94m || ||\033[92m ||\ || \033[91m// \\\\\n\033[0m' screenout += '\t\t\033[94m || ||\033[92m ||\\\|| \033[91m(( ))\n\033[0m' screenout += '\t\t\033[94m \\\ //\033[92m || \|| \033[91m \\\ //\n\033[0m' screenout += '\033[97m===============================================================\n' screenout += '\033[93m{}\033[0m\n'.format(self.elements['Console']) screenout += '\033[97m===============================================================\n' screenout += '\t\t\t\t\t\t' + ' \033[97m{}\u2666-----------\u2666\033[0m\n'.format(self.elements['P1Turn']) screenout += '\033[97mDeck:\t\t' + '{}'.format(self.elements['uHeader']) + ' \033[97m{}|{}|\033[0m\n'.format(self.elements['P1Turn'],self.elements['P1Name']) screenout += '\033[97m{} Cards'.format(self.elements['DNum']) + '{}'.format(self.elements['PostDNum'])+'\t' + '{}'.format(self.elements['uHeader']) + ' \033[97m{}|{}|\033[0m\n'.format(self.elements['P1Turn'],self.elements['P1Cards']) screenout += '\t\t ' + '{}'.format(self.elements['uMiddle']) + '\033[97m{}{}'.format(colorMod[0],self.elements['oHeader']) + ' \033[97m{}\u2666-----------\u2666\033[0m\n'.format(self.elements['P1Turn']) screenout += '\033[97m _+_ \t\t ' + '{}'.format(self.elements['uMiddle']) + '\033[97m{}{}'.format(colorMod[1],self.elements['oHeader']) + ' \033[97m{}\u2666-----------\u2666\033[0m\n'.format(self.elements['P2Turn']) screenout += '\033[97m | ' + '\033[92m{}\033[0m'.format(self.elements['Deck'][0]) + '\033[97m |\t\t ' + '{}'.format(self.elements['uMiddle']) + '\033[97m{}{}'.format(colorMod[2],self.elements['oMiddle'][0]) + ' \033[97m{}|{}|\033[0m\n'.format(self.elements['P2Turn'],self.elements['P2Name']) screenout += '\033[97m | ' + '\033[92m{}\033[0m'.format(self.elements['Deck'][1]) + '\033[97m |\t\t ' + '{}'.format(self.elements['uMiddle']) + '\033[97m{}{}'.format(colorMod[3],self.elements['oMiddle'][1]) + ' \033[97m{}|{}|\033[0m\n'.format(self.elements['P2Turn'],self.elements['P2Cards']) screenout += '\033[97m | ' + '\033[92m{}\033[0m'.format(self.elements['Deck'][2]) + '\033[97m |\t\t ' + '{}'.format(self.elements['uMiddle']) + '\033[97m{}{}'.format(colorMod[0],self.elements['oMiddle'][2]) + ' \033[97m{}\u2666-----------\u2666\033[0m\n'.format(self.elements['P2Turn']) screenout += '\033[97m | ' + '\033[93m{}\033[0m'.format(self.elements['Deck'][3]) + '\033[97m |\t\t ' + '{}'.format(self.elements['uMiddle']) + '\033[97m{}{}'.format(colorMod[1],self.elements['oMiddle'][3]) + ' \033[97m{}\u2666-----------\u2666\033[0m\n'.format(self.elements['P3Turn']) screenout += '\033[97m | ' + '\033[93m{}\033[0m'.format(self.elements['Deck'][4]) + '\033[97m |\t\t ' + '{}'.format(self.elements['uMiddle']) + '\033[97m{}{}'.format(colorMod[2],self.elements['oMiddle'][4]) + ' \033[97m{}|{}|\033[0m\n'.format(self.elements['P3Turn'],self.elements['P3Name']) screenout += '\033[97m | ' + '\033[93m{}\033[0m'.format(self.elements['Deck'][5]) + '\033[97m |\t\t ' + '{}'.format(self.elements['uMiddle']) + '\033[97m{}{}'.format(colorMod[3],self.elements['oMiddle'][5]) + ' \033[97m{}|{}|\033[0m\n'.format(self.elements['P3Turn'],self.elements['P3Cards']) screenout += '\033[97m | ' + '\033[91m{}\033[0m'.format(self.elements['Deck'][6]) + '\033[97m |\t\t ' + '{}'.format(self.elements['uLower']) + '\033[97m{}{}'.format(colorMod[0],self.elements['oMiddle'][6]) + ' \033[97m{}\u2666-----------\u2666\033[0m\n'.format(self.elements['P3Turn']) screenout += '\033[97m | ' + '\033[91m{}\033[0m'.format(self.elements['Deck'][7]) + '\033[97m |\t\t ' + '{}'.format(self.elements['uLower']) + '\033[97m{}{}'.format(colorMod[1],self.elements['oMiddle'][7]) + ' \033[97m{}\u2666-----------\u2666\033[0m\n'.format(self.elements['P4Turn']) screenout += '\033[97m |_' + '\033[91m{}\033[0m'.format(self.elements['Deck'][8]) + '\033[97m_|\t\t ' + '\033[97m{}{}'.format(colorMod[2],self.elements['oHeader']) + ' \033[97m{}|{}|\033[0m\n'.format(self.elements['P4Turn'],self.elements['P4Name']) screenout += '\033[97m\t\t ' + '\033[97m{}{}'.format(colorMod[3],self.elements['oHeader']) + ' \033[97m{}|{}|\033[0m\n'.format(self.elements['P4Turn'],self.elements['P4Cards']) screenout += '\t\t\t\t\t\t' + ' \033[97m{}\u2666-----------\u2666\033[0m\n'.format(self.elements['P4Turn']) screenout += "\033[97m{}".format(self.elements['HName']) + "\t\t\t\t {}\n".format(self.elements['HVisual']) screenout += '\033[97m===============================================================\n' screenout += self.players[currentTurn].getHand(self.handPosition,hide) screenout += '\033[91m{}\033[0m'.format(self.elements['Error']) return screenout def pauseScreen(self): while True: self.clearShell() print('\n\t\t\tPause') print('\n\t\t1. Resume') print('\t\t2. Quit') selection = str(input('\nSelection: ')).upper() while selection not in ['1', '2']: print('\nSelection Invalid') selection = str(input('\nSelection: ')).upper() if selection == '1' or "": return "" elif selection == '2': return "quit" def isComplete(self): return self.matchComplete def next(self): self.turn = self.getNextTurn() def getNextTurn(self, forceReverse=False): if forceReverse: reverse = not self.reverse else: reverse = self.reverse currentIndex = self.turnList.index(self.turn) if not reverse: if (currentIndex + 1) == len(self.turnList): return self.turnList[0] else: return self.turnList[currentIndex+1] else: if currentIndex == 0: return self.turnList[len(self.turnList) - 1] else: return self.turnList[currentIndex-1] def getPlayer(self, playerID): return self.players[playerID] def resetDrawBool(self): for identity in self.players: self.players[identity].drew = False def Uno(debugging=False): ###MENUS### def clearShell(): os.system('cls' if os.name == 'nt' else 'clear') def mainMenu(): sys.stdout.write("\x1b[8;32;63t") sys.stdout.flush() gs = GameSettings() while True: print(drawMainMenu(gs)) selection = str(input('\033[97mSelection: \033[92m')) while selection not in ['1', '2', '3', '4', '5']: gs.mainMenuError = "Invalid Selection" print(drawMainMenu(gs)) selection = str(input('\033[97mSelection: \033[92m')) if selection == '1': if gs.canBegin(): gs.mainMenuError = "" gs.finalizePlayers() gs = playMatch(gs) else: gs.mainMenuError = "Two Players Required to Begin" elif selection == '2': if gs.canAddPlayer(): gs.mainMenuError = "" gs = addPlayer(gs) else: gs.mainMenuError = "Max Number of Players Reached" elif selection == '3': if gs.canAddPlayer(): gs.mainMenuError = "" gs = addComputer(gs) else: gs.mainMenuError = "Max Number of Players Reached" elif selection == '4': if gs.canRemovePlayer(): gs.mainMenuError = "" gs = removePlayer(gs) else: gs.mainMenuError = "No Players to Remove" elif selection == '5': gs.mainMenuError = "" gs = settingsMenu(gs) else: raise BadInputError('Data Provided Has No Function') def playMatch(gs): for i in range(1): i m = Match(gs) m.begin() while (not m.isComplete()): m.nextTurn() gs = m.end(gs) return gs def addPlayer(gs): colors = ['\033[91m','\033[94m', '\033[92m', '\033[93m'] nameOkay = False playerNum = gs.getPlayerNum() + 1 colorIndex = playerNum - 1 message = "\033[97mPlease Enter Player {}'s Name: {}".format(playerNum, colors[colorIndex]) while not nameOkay: print(drawMainMenu(gs)) name = str(input(message)).title() if len(name) > 11: gs.mainMenuError = "Name Must Be 11 Characters or Less!" elif len(name) == 0: gs.mainMenuError = "" return gs else: nameOkay = True for player in gs.playerStaging: if player.getName() == name: nameOkay = False if nameOkay == False or name in GameSettings.computerNames: gs.mainMenuError = "Name Cannot Match Another Player's Name!" p = Player(name) gs.addPlayer(p) gs.mainMenuError = "" return gs def addComputer(gs): name = gs.getComputerName() c = ComputerPlayer(name) gs.addPlayer(c) return gs def removePlayer(gs): sys.stdout.write("\x1b[8;{rows};{cols}t".format(rows=32, cols=63)) sys.stdout.flush() clearShell() complete = False playerNum = gs.getPlayerNum() message = "\033[97mPlease Enter Player Number to Remove: \033[91m".format(playerNum) while (not complete): print(drawMainMenu(gs)) number = str(input(message)) if len(number) == 0: gs.mainMenuError = "" return gs try: number = int(number) if 0 < number <= playerNum: complete = True else: gs.mainMenuError = "Invalid Player Number!" except: gs.mainMenuError = "Please Enter the Player Number, not Name!" gs.mainMenuError = "" gs.removePlayer(number) return gs def settingsMenu(gs): while True: sys.stdout.write("\x1b[8;32;63t") sys.stdout.flush() clearShell() print('\n\t\tSettings') print('\n\t1. Draw Effects\t\t\t{}'.format(gs.displayEffects)) print('\t2. Hide Computer Hands\t\t{}'.format(gs.hideComputerHands)) print('\t3. Computer Speed\t\t{}'.format(gs.computerSpeed.title())) #print('\t4. Zero Card Changes Color\t{}'.format(gs.zeroChange)) #print('\t5. Run Simulations\t\t{}'.format(gs.computerSimulation)) print('\n\tA. Exit') selection = str(input('\nSelection: ')).upper() while selection not in ('1', '2', '3', '4', '5', 'A', ''): print('\nSelection Invalid') selection = str(input('\nSelection: ')).upper() if selection == '1': gs.displayEffects = not gs.displayEffects elif selection == '2': gs.hideComputerHands = not gs.hideComputerHands elif selection == '3': gs.changeComputerSpeed() ''' elif selection == '4': gs.zeroChange = not gs.zeroChange elif selection == '5': gs.computerSimulation = not gs.computerSimulation ''' elif selection == 'A' or selection == '' or selection in ('4','5'): return gs def drawMainMenu(gs): clearShell() gs.compileMainMenuElements() menuElements = gs.getMainMenuElements() screenout = '' screenout += '\t\t\033[94m || ||\033[92m ||\ || \033[91m// \\\\\n\033[0m' screenout += '\t\t\033[94m || ||\033[92m ||\\\|| \033[91m(( ))\n\033[0m' screenout += '\t\t\033[94m \\\ //\033[92m || \|| \033[91m \\\ //\n\033[0m' screenout += '\033[97m===============================================================\033[0m\n' screenout += "{}1-----------------------------1\033[0m {}2-----------------------------2\033[0m\n".format(menuElements['play1box'],menuElements['play2box']) screenout += "{}|{}|\033[0m {}|{}|\033[0m\n".format(menuElements['play1box'],menuElements['play1row1'],menuElements['play2box'],menuElements['play2row1']) screenout += "{}|{}|\033[0m {}|{}|\033[0m\n".format(menuElements['play1box'],menuElements['play1row2'],menuElements['play2box'],menuElements['play2row2']) screenout += "{}1-----------------------------1\033[0m {}2-----------------------------2\033[0m\n".format(menuElements['play1box'],menuElements['play2box']) screenout += "{}3-----------------------------3\033[0m {}4-----------------------------4\033[0m\n".format(menuElements['play3box'],menuElements['play4box']) screenout += "{}|{}|\033[0m {}|{}|\033[0m\n".format(menuElements['play3box'],menuElements['play3row1'],menuElements['play4box'],menuElements['play4row1']) screenout += "{}|{}|\033[0m {}|{}|\033[0m\n".format(menuElements['play3box'],menuElements['play3row2'],menuElements['play4box'],menuElements['play4row2']) screenout += "{}3-----------------------------3\033[0m {}4-----------------------------4\033[0m\n".format(menuElements['play3box'],menuElements['play4box']) screenout += "\033[97m===============================================================\033[0m\n" screenout += " {}\u2666---------------------------\u2666\033[0m \u2666===========================\u2666\n".format(menuElements['beginBox']) screenout += " {}|1. Begin Match |\033[0m | High Scores |\n".format(menuElements['beginBox']) screenout += " {}\u2666---------------------------\u2666\033[0m \u2666---------------------------\u2666\n".format(menuElements['beginBox']) screenout += " {}\u2666---------------------------\u2666\033[0m | |\n".format(menuElements['addBox']) screenout += " {}|2. Add Player |\033[0m | |\n".format(menuElements['addBox']) screenout += " {}\u2666---------------------------\u2666\033[0m | |\n".format(menuElements['addBox']) screenout += " {}\u2666---------------------------\u2666\033[0m | |\n".format(menuElements['addBox']) screenout += " {}|3. Add Computer |\033[0m | |\n".format(menuElements['addBox']) screenout += " {}\u2666---------------------------\u2666\033[0m | |\n".format(menuElements['addBox']) screenout += " {}\u2666---------------------------\u2666\033[0m | |\n".format(menuElements['removeBox']) screenout += " {}|4. Remove Player |\033[0m | |\n".format(menuElements['removeBox']) screenout += " {}\u2666---------------------------\u2666\033[0m | |\n".format(menuElements['removeBox']) screenout += " \033[97m\u2666---------------------------\u2666\033[0m | |\n" screenout += " \033[97m|5. Settings |\033[0m | |\n" screenout += " \033[97m\u2666---------------------------\u2666\033[0m \u2666===========================\u2666\n" screenout += "\033[97m===============================================================\033[0m\n" screenout += '\033[91m{}\033[0m'.format(gs.mainMenuError) return screenout mainMenu() if __name__ == "__main__": Uno()
mit
3,301,806,578,621,670,400
42.715309
439
0.474837
false
SRLKilling/sigma-backend
data-server/django_app/sigma_core/views/group_field.py
1
2127
from rest_framework import status from rest_framework.decorators import detail_route from sigma_core.views.sigma_viewset import SigmaViewSet from sigma_core.importer import load_ressource GroupField = load_ressource("GroupField") class GroupFieldViewSet(SigmaViewSet): serializer_class = GroupField.serializer queryset = GroupField.model.objects.all() #*********************************************************************************************# #** Read actions **# #*********************************************************************************************# def retrieve(self, request, pk): """ REST retrieve action. Used to retrieve a group_field. """ return self.handle_action_pk('retrieve', request, pk) #*********************************************************************************************# #** Write actions **# #*********************************************************************************************# def create(self, request): """ REST create action. Used to create a Group Field. If succeeded, returns HTTP_201_CREATED with the newly created Group field object. """ return self.handle_action('create', request) def update(self, request, pk): """ REST update action. Used to update a Group Field. If succeeded, returns HTTP_201_SUCCESS with the updated Group field object. """ # return self.basic_update(request, pk) # HERE ! pass def destroy(self, request, pk): """ REST destroy action. Used to update a Group Field. If succeeded, returns HTTP_204_NO_CONTENT. """ return self.handle_action_pk('destroy', request, pk)
agpl-3.0
7,523,328,847,063,784,000
35.067797
136
0.416549
false
USGSDenverPychron/pychron
pychron/envisage/browser/analysis_table.py
1
8406
# =============================================================================== # Copyright 2013 Jake Ross # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # =============================================================================== # ============= enthought library imports ======================= import json import os from collections import OrderedDict from datetime import datetime from hashlib import md5 from traits.api import List, Any, Str, Enum, Bool, Event, Property, cached_property, Instance, DelegatesTo, \ CStr, Int, Button from pychron.column_sorter_mixin import ColumnSorterMixin from pychron.core.fuzzyfinder import fuzzyfinder from pychron.core.ui.table_configurer import AnalysisTableConfigurer from pychron.dvc.func import get_review_status from pychron.envisage.browser.adapters import AnalysisAdapter from pychron.paths import paths def sort_items(ans): return sorted(ans, key=lambda x: x.timestampf) class AnalysisTable(ColumnSorterMixin): analyses = List oanalyses = List selected = Any dclicked = Any context_menu_event = Event analysis_filter = CStr analysis_filter_values = List analysis_filter_comparator = Enum('=', '<', '>', '>=', '<=', 'not =', 'startswith') analysis_filter_parameter = Str analysis_filter_parameters = Property(List, depends_on='tabular_adapter.columns') # omit_invalid = Bool(True) table_configurer = Instance(AnalysisTableConfigurer) limit = DelegatesTo('table_configurer') omit_invalid = DelegatesTo('table_configurer') no_update = False scroll_to_row = Event refresh_needed = Event tabular_adapter = Instance(AnalysisAdapter) append_replace_enabled = Bool(True) add_analysis_set_button = Button analysis_set = Str analysis_set_names = List _analysis_sets = None max_history = Int suppress_load_analysis_set = False def __init__(self, *args, **kw): super(AnalysisTable, self).__init__(*args, **kw) self._analysis_sets = OrderedDict() def load(self): p = paths.hidden_path('analysis_sets') if os.path.isfile(p): with open(p, 'r') as rfile: jd = json.load(rfile, object_pairs_hook=OrderedDict) self._analysis_sets = jd self.analysis_set_names = list(reversed([ji[0] for ji in jd.values()])) def dump(self): p = paths.hidden_path('analysis_sets') with open(p, 'w') as wfile: json.dump(self._analysis_sets, wfile) def get_selected_analyses(self): if self.analyses: ans = self.selected if not ans: ans = self.analyses return ans def add_analysis_set(self): ans = self.get_selected_analyses() if ans: aset = [(a.uuid, a.record_id) for a in ans] if aset: if len(aset) > 1: name = '{} -- {}'.format(aset[0][1], aset[-1][1]) else: name = aset[0][1] h = md5(''.join(sorted((ai[0] for ai in aset)))).hexdigest() if h not in self._analysis_sets: name = '{} ({})'.format(name, datetime.now().strftime('%m/%d/%y')) self._analysis_sets[h] = (name, aset) if self.max_history: while len(self._analysis_sets) > self.max_history: self._analysis_sets.popitem(last=False) return name def get_analysis_set(self, name): return next((a[1] for a in self._analysis_sets.itervalues() if a[0] == name)) def set_tags(self, tag, items): for i in items: ai = next((a for a in self.oanalyses if a.uuid == i.uuid), None) if ai: ai.tag = tag self._analysis_filter_changed(self.analysis_filter) def remove_invalid(self): self.oanalyses = [ai for ai in self.oanalyses if ai.tag != 'invalid'] self._analysis_filter_changed(self.analysis_filter) def add_analyses(self, ans): items = self.analyses items.extend(ans) self.oanalyses = self.analyses = sort_items(items) self.calculate_dts(self.analyses) self.scroll_to_row = len(self.analyses) - 1 def set_analyses(self, ans, tc=None, page=None, reset_page=False, selected_identifiers=None): if selected_identifiers: aa = self.analyses aa = [ai for ai in aa if ai.identifier in selected_identifiers] aa.extend(ans) else: aa = ans new_items = sort_items(aa) items = [ai for ai in self.analyses if ai.frozen] new_items = [ai for ai in new_items if ai not in items] items.extend(new_items) self.oanalyses = self.analyses = items self.calculate_dts(self.analyses) self.scroll_to_row = len(self.analyses) - 1 def calculate_dts(self, ans): if ans and len(ans) > 1: self._python_dt(ans) def _python_dt(self, ans): ref = ans[0] prev = ref.timestampf ref.delta_time = 0 for ai in ans[1:]: t = ai.timestampf dt = (t - prev) / 60. ai.delta_time = dt prev = t def configure_table(self): self.table_configurer.edit_traits(kind='livemodal') def review_status_details(self): from pychron.envisage.browser.review_status_details import ReviewStatusDetailsView, ReviewStatusDetailsModel m = ReviewStatusDetailsModel(self.selected[0]) rsd = ReviewStatusDetailsView(model=m) rsd.edit_traits() def toggle_freeze(self): for ai in self.get_selected_analyses(): ai.frozen = not ai.frozen self.refresh_needed = True def load_review_status(self): records = self.get_analysis_records() if records: for ri in records: get_review_status(ri) self.refresh_needed = True def get_analysis_records(self): records = self.selected if not records: records = self.analyses return records # handlers def _add_analysis_set_button_fired(self): name = self.add_analysis_set() if name: self.dump() self.load() self.suppress_load_analysis_set = True self.analysis_set = name self.suppress_load_analysis_set = False def _analyses_items_changed(self, old, new): if self.sort_suppress: return self.calculate_dts(self.analyses) if new.removed: for ai in new.removed: self.oanalyses.remove(ai) def _analysis_filter_changed(self, new): if new: name = self.analysis_filter_parameter self.analyses = fuzzyfinder(new, self.oanalyses, name) # self.analyses = filter(filter_func(new, name), self.oanalyses) else: self.analyses = self.oanalyses def _analysis_filter_comparator_changed(self): self._analysis_filter_changed(self.analysis_filter) def _get_analysis_filter_parameter(self): p = self.analysis_filter_parameter return p.lower() @cached_property def _get_analysis_filter_parameters(self): return dict([(ci[1], ci[0]) for ci in self.tabular_adapter.columns]) # defaults def _table_configurer_default(self): return AnalysisTableConfigurer(id='analysis.table', title='Configure Analysis Table') def _analysis_filter_parameter_default(self): return 'record_id' def _tabular_adapter_default(self): adapter = AnalysisAdapter() self.table_configurer.adapter = adapter self.table_configurer.load() return adapter # ============= EOF =============================================
apache-2.0
-1,964,306,526,817,542,100
32.094488
116
0.592315
false
chripo/calflate
tests/test_vcard.py
1
1509
# -*- encoding: utf-8 -*- # AUTHOR: http://www.christoph-polcin.com # LICENSE: FreeBSD License # CREATED: 2014-01-19 import calflate def test_new_empty_collection(): data = calflate.new_collection(("", "VCARD", )) assert data == '' def test_new_collection(): data = calflate.new_collection(("UID:12345", "VCARD", )) assert data == 'UID:12345' data = calflate.new_collection(("UID:12345\nREV:2011-01-02", "VCARD", )) assert data == 'UID:12345\r\nREV:2011-01-02' data = calflate.new_collection(("UID:12345\n\nREV:2011-01-02", "VCARD", )) assert data == 'UID:12345\r\nREV:2011-01-02' def test_get_items(): items = calflate.get_items('''BEGIN:VCARD VERSION:3.0 N:Gump;Forrest;Mr. UID:01234-01234-01234-01234 REV:2008-04-24T19:52:43Z END:VCARD BEGIN:VCARD VERSION:4.0 N:Gump;Forrest;Mr. UID:3333-3444-55 END:VCARD ''') item = next(items) assert len(item) == 4 assert item[0] == '''BEGIN:VCARD\nVERSION:3.0\nN:Gump;Forrest;Mr.\n\ UID:01234-01234-01234-01234\nREV:2008-04-24T19:52:43Z\nEND:VCARD''' assert item[1] == 'VCARD' assert item[2] == '01234-01234-01234-01234' assert item[3] == '2008-04-24T19:52:43Z' item = next(items) assert len(item) == 4 assert item[0] == 'BEGIN:VCARD\nVERSION:4.0\nN:Gump;Forrest;Mr.\nUID:3333-3444-55\nEND:VCARD' assert item[1] == 'VCARD' assert item[2] == '3333-3444-55' assert item[3] == '0' try: item = next(items) assert 0 except StopIteration: assert 1
bsd-2-clause
-2,247,921,644,035,591,000
25.946429
97
0.638834
false
labase/activnce
main/bookmarks/model.py
1
12237
# -*- coding: utf-8 -*- """ ################################################ Plataforma ActivUFRJ ################################################ :Author: *Núcleo de Computação Eletrônica (NCE/UFRJ)* :Contact: [email protected] :Date: $Date: 2009-2010 $ :Status: This is a "work in progress" :Revision: $Revision: 0.01 $ :Home: `LABASE `__ :Copyright: ©2009, `GPL """ try: from couchdb.schema import Document, TextField, IntegerField, LongField, DateTimeField, DictField, ListField, Schema, FloatField except ImportError: from couchdb.mapping import Document, TextField, IntegerField, LongField, DateTimeField, DictField, ListField, FloatField from couchdb.mapping import Mapping as Schema import database from search.model import addTag, removeTag from libs.permissions import isAllowedToDeleteObject, isAllowedToWriteObject, isAllowedToDeleteComment from libs.dateformat import short_datetime from libs.strformat import remove_diacritics, remove_special_chars from datetime import datetime import operator from operator import itemgetter def _strListSize(number, str, genero='M'): plural = lambda x: 's' if x!=1 else '' if number>0: return u"%d %s%s" % (number, str, plural(number)) elif genero=='M': return u"nenhum %s" % str else: return u"nenhuma %s" % str def _urlCount(url): for row in database.BOOKMARKS.view('bookmarks/count_by_url',startkey=url, group="true"): return row.value return 0 class Bookmarks(Document): # _id = <couchdb_id> registry_id = TextField() # dono do bookmark: usuário ou comunidade owner = TextField() # quem criou o bookmark. # caso bookmark seja de uma comunidade, owner!=registry_id url = TextField() title = TextField() description = TextField() tags = ListField(TextField()) data_cri = TextField() data_alt = TextField() alterado_por = TextField() comentarios = ListField(DictField(Schema.build( owner = TextField(), comment = TextField(), data_cri = TextField() ))) @classmethod def createBookmarkLink(self, user, url): for item in database.BOOKMARKS.view('bookmarks/by_registry_id_and_url',startkey=[user,url],endkey=[user, url, {}]): return ("Editar favorito", "/static/imagens/icones/bookmark32_true.png", "/bookmarks/popup/"+user+"?url="+url, "","",True) return ("Adicionar favorito", "/static/imagens/icones/bookmark32_false.png", "/bookmarks/popup/"+user+"?url="+url, "","",True) @classmethod def searchIdByUrl(self, registry_id, url): for row in database.BOOKMARKS.view('bookmarks/by_registry_id_and_url' ,startkey=[registry_id, url],endkey=[registry_id, url, {}]): return row.key[2] return None @classmethod def searchBookmarksByUrl(self, user, page, page_size, url): bookmarks = [] # Obtem uma página de resultados no BD # descending=true para listar os mais recentes primeiro # como a árvore é percorrida em sentido inverso, endkey é o documento inicial e startkey é o final. for row in database.BOOKMARKS.view('bookmarks/by_url' ,startkey=[url, {}, {}], endkey=[url], descending="true", skip=(page-1)*page_size , limit=page_size): (url, data_alt, bookmark_id) = row.key bookmark_data = dict() bookmark_data["registry_id"] = row.value["registry_id"] bookmark_data["owner"] = row.value["owner"] bookmark_data["description"] = row.value["description"] bookmark_data["title"] = row.value["title"] bookmark_data["url"] = row.value["url"] bookmark_data["tags"] = row.value["tags"] bookmark_data["id"] = bookmark_id bookmark_data["alterar"] = isAllowedToWriteObject(user, "bookmarks", row.value["registry_id"]) bookmark_data["apagar"] = bookmark_data["alterar"] and isAllowedToDeleteObject(user, row.value["owner"], row.value["registry_id"]+"/"+bookmark_id) bookmark_data["data_nofmt"] = row.value["data_alt"] bookmark_data["data_alt"] = short_datetime(row.value["data_alt"]) bookmark_data["alterado_por"] = row.value["alterado_por"] bookmark_data["num_comments"] = _strListSize (len(row.value["comentarios"]), u"comentário") #bookmark_data["url_count"] = _strListSize (_urlCount(bookmark_data["url"]), u"referência", genero='F') #url_count = _urlCount(bookmark_data["url"]) bookmark_data["url_count"] = "" #if url_count > 1: bookmark_data["url_count"] = u"%d usuários marcaram esta página" % url_count bookmarks.append(bookmark_data) bookmarks = sorted(bookmarks, key=itemgetter("data_nofmt"), reverse = True) return bookmarks @classmethod def countBookmarksByUrl(self, url): for row in database.BOOKMARKS.view('bookmarks/count_by_url', \ startkey=url, \ group="true"): return row.value return 0 @classmethod def countBookmarksByRegistryId(self, registry_id): for row in database.BOOKMARKS.view('bookmarks/count_by_registry_id', \ startkey=[registry_id],endkey=[registry_id, {}], \ group_level=1, group="true"): return row.value return 0 @classmethod def countBookmarksByRegistryIdAndTag(self, registry_id, tag): for row in database.BOOKMARKS.view('bookmarks/count_by_registry_id_and_tag', \ startkey=[registry_id, tag],endkey=[registry_id, tag, {}], \ group_level=1, group="true"): return row.value return 0 @classmethod def listBookmarks(self, user, registry_id, page, page_size, tag=None): bookmarks = [] if tag: view_name = 'bookmarks/by_registry_id_and_tag' start_key = [registry_id, tag] end_key = [registry_id, tag, {}, {}] else: view_name = 'bookmarks/by_registry_id' start_key = [registry_id] end_key = [registry_id, {}, {}] # Obtem uma página de resultados no BD # descending=true para listar os mais recentes primeiro # como a árvore é percorrida em sentido inverso, endkey é o documento inicial e startkey é o final. for row in database.BOOKMARKS.view(view_name, startkey=end_key,endkey=start_key, descending="true", skip=(page-1)*page_size , limit=page_size): if tag: (registry_id, tag_found, data_alt, bookmark_id) = row.key else: (registry_id, data_alt, bookmark_id) = row.key bookmark_data = dict() bookmark_data["registry_id"] = registry_id bookmark_data["owner"] = row.value["owner"] bookmark_data["description"] = row.value["description"] bookmark_data["title"] = row.value["title"] bookmark_data["url"] = row.value["url"] bookmark_data["tags"] = row.value["tags"] bookmark_data["id"] = bookmark_id # _file = Files().retrieve(file_id) bookmark_data["alterar"] = isAllowedToWriteObject(user, "bookmarks", registry_id) bookmark_data["apagar"] = bookmark_data["alterar"] and isAllowedToDeleteObject(user, row.value["owner"], registry_id+"/"+bookmark_id) bookmark_data["data_nofmt"] = row.value["data_alt"] bookmark_data["data_alt"] = short_datetime(row.value["data_alt"]) bookmark_data["alterado_por"] = row.value["alterado_por"] bookmark_data["num_comments"] = _strListSize (len(row.value["comentarios"]), u"comentário") #bookmark_data["url_count"] = _strListSize (url_count, u"referência", genero='F') url_count = _urlCount(bookmark_data["url"]) bookmark_data["url_count"] = "" if url_count > 1: bookmark_data["url_count"] = u"%d usuários marcaram esta página" % url_count bookmarks.append(bookmark_data) return bookmarks @classmethod def listAllTags(self, registry_id, tag=None): tags_list = [] for row in database.BOOKMARKS.view('bookmarks/by_registry_id_and_tag', startkey = [registry_id], endkey = [registry_id, {}, {}, {}]): (registry_id, tag_found, data_alt, bookmark_id) = row.key tags_list.append(tag_found) if tag and tag in tags_list: tags_list.remove(tag) tags_list = sorted(set(tags_list)) return tags_list def saveBookmark(self, id=None): self.save(id=id) # atualiza tabela de tags # vai para o tags.model data_tag = str(datetime.now()) for tag in self.tags: if self.title: nome = self.title else: url = self.url url = remove_special_chars(remove_diacritics(url.replace(" ","_"))) nome = url addTag(tag, self.registry_id, self.owner, "bookmarks", self.id, nome, data_tag) def deleteBookmark(self): tags = self.tags self.delete() # atualiza tabela de tags # vai para o tags.model for tag in tags: removeTag(remove_diacritics(tag.lower()), "bookmarks", self.id) def editBookmark(self, user, newtitle, newdesc, newtags): # preserva tags anteriores old_tags = self.tags self.title = newtitle self.description = newdesc self.tags = newtags self.alterado_por = user self.data_alt = str(datetime.now()) self.save() # compara as tags anteriores com as modificadas, atualizando a lista de tags no BD data_tag = str(datetime.now()) if self.title: nome = self.title else: url = self.url url = remove_special_chars(remove_diacritics(url.replace(" ","_"))) nome = url for tag in self.tags: if tag not in old_tags: addTag(tag, self.registry_id, user, "bookmarks", self.id, nome, data_tag) for tag in old_tags: if tag not in self.tags: removeTag(remove_diacritics(tag.lower()), "bookmarks", self.id) def addBookmarkComment(self, owner, comment): self.comentarios.append(dict( owner = owner, comment = comment, data_cri = str(datetime.now()) )) self.save() def prepareCommentsToPrint(self, user): for comment in self.comentarios: comment["apagar"] = isAllowedToDeleteComment(user, self.registry_id, comment["owner"]) comment["data_fmt"] = short_datetime(comment["data_cri"]) comment["comment"] = comment["comment"].replace("\r\n", "<br/>") self.comentarios = sorted(self.comentarios, key=itemgetter("data_cri"), reverse=True) def deleteBookmarkComment(self, owner, data_cri): for comentario in self.comentarios: if comentario["owner"]==owner and comentario["data_cri"]==data_cri: self.comentarios.remove(comentario) self.save() return True return False def save(self, id=None, db=database.BOOKMARKS): if not self.id and id: self.id = id self.store(db) def retrieve(self, id, db=database.BOOKMARKS): return Bookmarks.load(db, id) def delete(self, db=database.BOOKMARKS): #db.delete(self) del db[self.id]
gpl-2.0
-8,946,841,370,132,735,000
42.007042
163
0.570048
false
Zincr0/xtweepy
setup.py
1
1135
# -*- coding=utf-8 -*- import sys import os from setuptools import setup reload(sys) sys.setdefaultencoding('utf-8') # Utility function to read the README file. # Used for the long_description. It's nice, because now 1) we have a top level # README file and 2) it's easier to type in the README file than to put a raw # string in below ... files = ['xtweepy/*'] def read(fname): return open(os.path.join(os.path.dirname(__file__), fname)).read() setup( name = 'xtweepy', version = '0.0.1', author = 'Daniel Mondaca Seguel', author_email = '[email protected]', description = ('tweepy based library with 1.1 search api support'), license = 'MIT', keywords = 'twitter library', url = 'https://github.com/Nievous', packages=['xtweepy'], install_requires = ['simplejson', 'tweepy'], long_description=read('README.txt'), package_data = {'package' : files }, classifiers=[ 'Development Status :: 4 - Beta', 'Topic :: Software Development', 'License :: MIT', 'Operating System :: POSIX :: Linux', 'Programming Language :: Python', ], )
mit
-1,463,559,531,065,777,000
26.682927
79
0.629956
false
slackhq/python-slackclient
slack_sdk/scim/v1/group.py
1
2522
from typing import Optional, List, Union, Dict, Any from .default_arg import DefaultArg, NotGiven from .internal_utils import _to_dict_without_not_given, _is_iterable class GroupMember: display: Union[Optional[str], DefaultArg] value: Union[Optional[str], DefaultArg] unknown_fields: Dict[str, Any] def __init__( self, *, display: Union[Optional[str], DefaultArg] = NotGiven, value: Union[Optional[str], DefaultArg] = NotGiven, **kwargs, ) -> None: self.display = display self.value = value self.unknown_fields = kwargs def to_dict(self): return _to_dict_without_not_given(self) class GroupMeta: created: Union[Optional[str], DefaultArg] location: Union[Optional[str], DefaultArg] unknown_fields: Dict[str, Any] def __init__( self, *, created: Union[Optional[str], DefaultArg] = NotGiven, location: Union[Optional[str], DefaultArg] = NotGiven, **kwargs, ) -> None: self.created = created self.location = location self.unknown_fields = kwargs def to_dict(self): return _to_dict_without_not_given(self) class Group: display_name: Union[Optional[str], DefaultArg] id: Union[Optional[str], DefaultArg] members: Union[Optional[List[GroupMember]], DefaultArg] meta: Union[Optional[GroupMeta], DefaultArg] schemas: Union[Optional[List[str]], DefaultArg] unknown_fields: Dict[str, Any] def __init__( self, *, display_name: Union[Optional[str], DefaultArg] = NotGiven, id: Union[Optional[str], DefaultArg] = NotGiven, members: Union[Optional[List[GroupMember]], DefaultArg] = NotGiven, meta: Union[Optional[GroupMeta], DefaultArg] = NotGiven, schemas: Union[Optional[List[str]], DefaultArg] = NotGiven, **kwargs, ) -> None: self.display_name = display_name self.id = id self.members = ( [a if isinstance(a, GroupMember) else GroupMember(**a) for a in members] if _is_iterable(members) else members ) self.meta = ( GroupMeta(**meta) if meta is not None and isinstance(meta, dict) else meta ) self.schemas = schemas self.unknown_fields = kwargs def to_dict(self): return _to_dict_without_not_given(self) def __repr__(self): return f"<slack_sdk.scim.{self.__class__.__name__}: {self.to_dict()}>"
mit
2,794,789,609,398,700,500
29.756098
86
0.610626
false
StratoSource/StratoSource
stratosource/management/commands/sfdiff.py
1
28907
# Copyright 2010, 2011 Red Hat Inc. # # This file is part of StratoSource. # # StratoSource is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # StratoSource is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied waarranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with StratoSource. If not, see <http://www.gnu.org/licenses/>. # import pytz from django.core.management.base import BaseCommand from django.core.exceptions import ObjectDoesNotExist import os import sys import subprocess import logging from datetime import datetime from django.db import transaction from lxml import etree import stratosource.models __author__ = "masmith" __date__ = "$Jul 26, 2010 2:23:44 PM$" SF_NAMESPACE = '{http://soap.sforce.com/2006/04/metadata}' CODE_BASE = 'unpackaged' documentCache = {} mapCache = {} logger = logging.getLogger('console') class NewObjectException(Exception): pass class DeletedObjectException(Exception): pass ## # [ git utilities ] ## def resetLocalRepo(branch_name): subprocess.check_call(["git", "checkout", branch_name]) # subprocess.check_call(["git","reset","--hard","{0}".format(branch_name)]) def branchExists(branchname): proc = subprocess.Popen(['git', 'branch', '-a'], shell=False, stdout=subprocess.PIPE) input, error = proc.communicate() for br in input.split('\n'): br = br.rstrip() if len(br) > 0 and br[2:] == branchname: return True return False def getCurrentTag(): proc = subprocess.Popen(['git', 'describe'], shell=False, stdout=subprocess.PIPE) input, error = proc.communicate() tag = input.rstrip() return tag def getCurrentBranch(): proc = subprocess.Popen(['git', 'branch'], shell=False, stdout=subprocess.PIPE) input, error = proc.communicate() for br in input.split('\n'): br = br.rstrip() if len(br) > 0 and br[0:2] == "* ": return br[2:] return 'unknown' def verifyGitRepo(): proc = subprocess.Popen(['git', 'status'], shell=False, stderr=subprocess.PIPE) input, error = proc.communicate() if error.find('Not a git repository') > 0: logger.error('Error: Not a git repository') sys.exit(1) def getDiffNames(left, right): logger.info('git diff --name-only %s %s' % (left, right)) proc = subprocess.Popen(['git', 'diff', '--name-only', left, right], shell=False, stdout=subprocess.PIPE) input, error = proc.communicate() changedList = [] all = 0 map = {} for entry in input.split('\n'): all = all + 1 entry = entry.rstrip() if entry == '.gitignore': continue if len(entry) > 1 and not entry.endswith('.xml'): #logger.debug(' entry={0}'.format(entry)) parts = entry.split('/') type = parts[1] name = '/'.join(parts[2:]) # put trailing components back together to support folder-based assets # a, b = os.path.split(entry) if not map.has_key(type): map[type] = [] map[type].append(name) changedList.append(entry) changedList.sort() return map ## # [ XML parsing and searching ] ## def getElementMap(key): global mapCache if mapCache.has_key(key): return mapCache[key] m = {} mapCache[key] = m return m def getObjectChanges(lkey, lcache, rkey, rcache, objectName, elementName, resolver): global documentCache ldoc = None rdoc = None if documentCache.has_key(lkey + objectName): ldoc = documentCache[lkey + objectName] if documentCache.has_key(rkey + objectName): rdoc = documentCache[rkey + objectName] rmap = getElementMap(rkey + objectName + elementName) lmap = getElementMap(lkey + objectName + elementName) if ldoc is None: lobj = lcache.get(objectName) if lobj: ldoc = etree.XML(lobj) if ldoc is None: return None, None documentCache[lkey + objectName] = ldoc if rdoc is None: robj = rcache.get(objectName) if robj: rdoc = etree.XML(robj) if rdoc is None: return None, None documentCache[rkey + objectName] = rdoc if ldoc is None and not rdoc is None: raise NewObjectException() if not ldoc is None and rdoc is None: raise DeletedObjectException() return resolver(ldoc, rdoc, rmap, lmap, elementName) def compareObjectMaps(lmap, rmap): missing = {} updates = {} for lname, lnodestring in lmap.items(): # find the field in the other file if rmap.has_key(lname): rnodestring = rmap[lname] # compare for changes if lnodestring != rnodestring: updates[lname] = rnodestring else: # field missing on right, must be deleted missing[lname] = lnodestring return updates, missing def populateElementMap(doc, nodeName, elementName, amap): if doc != None and len(amap) == 0: children = doc.findall(nodeName) for child in children: node = child.find(elementName) amap[node.text] = etree.tostring(child) def objectChangeResolver(ldoc, rdoc, rmap, lmap, elementName): nodeName = SF_NAMESPACE + elementName nameKey = SF_NAMESPACE + 'fullName' # # build a map of custom label names and xml fragment for faster lookups # populateElementMap(rdoc, nodeName, nameKey, rmap) populateElementMap(ldoc, nodeName, nameKey, lmap) return compareObjectMaps(lmap, rmap) def objectTranslationChangeResolver(ldoc, rdoc, rmap, lmap, elementName): nodeName = SF_NAMESPACE + elementName nameKey = SF_NAMESPACE + 'name' # # build a map of custom label names and xml fragment for faster lookups # populateElementMap(rdoc, nodeName, nameKey, rmap) populateElementMap(ldoc, nodeName, nameKey, lmap) return compareObjectMaps(lmap, rmap) def translationChangeResolver(ldoc, rdoc, rmap, lmap, elementName): nodeName = SF_NAMESPACE + elementName nameKey = SF_NAMESPACE + 'name' # # build a map of custom label names and xml fragment for faster lookups # populateElementMap(rdoc, nodeName, nameKey, rmap) populateElementMap(ldoc, nodeName, nameKey, lmap) return compareObjectMaps(lmap, rmap) def getAllFullNames(doc, elementName, tagname='fullName'): fqfullname = SF_NAMESPACE + tagname nodes = doc.findall(SF_NAMESPACE + elementName) if nodes: allnames = [] for node in nodes: el = node.find(fqfullname) if el is not None: allnames.append(el.text) # allnames = [node.find(fqfullname).text for node in nodes] return allnames else: logger.debug('No nodes found for %s' % elementName) return [] def getAllObjectChanges(objectName, lFileCache, rFileCache, elementname, resolver): updates, deletes = getObjectChanges('l', lFileCache, 'r', rFileCache, objectName, elementname, resolver) rupdates, inserts = getObjectChanges('r', rFileCache, 'l', lFileCache, objectName, elementname, resolver) return inserts, updates, deletes ## # [ database and caching ] ## def createFileCache(hash, map, branch_name): subprocess.check_call(["git", "checkout", branch_name]) try: tmpbranch = branch_name + '_sfdiff' if branchExists(tmpbranch): subprocess.check_call(["git", "branch", "-D", tmpbranch]) subprocess.check_call(["git", "checkout", "-b", tmpbranch, hash]) # os.system('git reset --hard {0}'.format(hash)) cache = {} for type, list in map.items(): if type in ('objects', 'labels', 'translations', 'objectTranslations', 'workflows'): for objectName in list: try: path = os.path.join(CODE_BASE, type, objectName) f = open(path) cache[objectName] = f.read() f.close() except IOError: # print '** not able to load ' + path pass # caused by a new file added, not present on current branch else: for objectName in list: if os.path.isfile(os.path.join(CODE_BASE, type, objectName)): cache[objectName] = None return cache finally: subprocess.check_call(["git", "checkout", branch_name]) def getDeployable(branch, objectName, objectType, el_type, el_name, el_subtype=None): try: if el_type and el_name: deployable = stratosource.models.DeployableObject.objects.get(branch=branch, type__exact=objectType, filename__exact=objectName, el_type__exact=el_type, el_name__exact=el_name, el_subtype__exact=el_subtype, status__exact='a') else: deployable = stratosource.models.DeployableObject.objects.get(branch=branch, type__exact=objectType, filename__exact=objectName, status__exact='a') except ObjectDoesNotExist: deployable = stratosource.models.DeployableObject() deployable.type = objectType deployable.filename = objectName deployable.branch = branch deployable.el_type = el_type deployable.el_name = el_name deployable.el_subtype = el_subtype deployable.save() return deployable def insert_deltas(commit, objectName, type, items, delta_type, el_type, el_subtype=None): # global mqclient for item in items: deployable = getDeployable(commit.branch, objectName, type, el_type, item, el_subtype) delta = stratosource.models.Delta() delta.user_change = get_last_change(objectName, el_type, item) delta.object = deployable delta.commit = commit delta.delta_type = delta_type delta.save() # if not delta.user_change is None: # mqclient.publish({'user': delta.user_change.sfuser.name.encode('ascii', 'ignore'), 'commit': commit.hash, # 'dtype': delta_type, 'type': type, 'item': item, # 'last_update': delta.user_change.last_update.isoformat()}) def get_last_change(objectName, el_type, el_name): fullName = objectName if el_type == 'labels': return None # not doing audit tracking for labels if el_type == 'fields': el_type = 'object' parts = objectName.split('.') if len(parts) > 1 and not el_type is None: parts[0] = el_type + ':' + parts[0] if el_name: parts[0] += '.' + el_name fullName = parts[0] # '.'.join(parts) # print ' fullName=%s' % fullName lastchangelist = list(stratosource.models.UserChange.objects.filter(branch=working_branch, apex_name=fullName).order_by('-last_update')) if len(lastchangelist) > 0: return lastchangelist[0] return None def getDeployableTranslation(branch, label, locale): try: deployableT = stratosource.models.DeployableTranslation.objects.get(branch=branch, label=label, locale=locale, status__exact='a') except ObjectDoesNotExist: deployableT = stratosource.models.DeployableTranslation() deployableT.label = label deployableT.locale = locale deployableT.branch = branch deployableT.save() return deployableT def insertTranslationDeltas(commit, items, delta_type, locale): for item in items: deployableT = getDeployableTranslation(commit.branch, item, locale) delta = stratosource.models.TranslationDelta() delta.translation = deployableT delta.commit = commit delta.delta_type = delta_type delta.save() ## # [ objects ] ## def analyze_object_changes(list, lFileCache, rFileCache, elementname, commit): global documentCache changesFound = False for objectName in list: logger.debug('analyzing %s > %s' % (objectName, elementname)) try: inserts, updates, deletes = getAllObjectChanges(objectName, lFileCache, rFileCache, elementname, objectChangeResolver) if (inserts and len(inserts)) or (updates and len(updates)) or (deletes and len(deletes)): if inserts: insert_deltas(commit, objectName, 'objects', inserts.keys(), 'a', elementname) if updates: insert_deltas(commit, objectName, 'objects', updates.keys(), 'u', elementname) if deletes: insert_deltas(commit, objectName, 'objects', deletes.keys(), 'd', elementname) changesFound = True except NewObjectException: logger.debug('New object %s' % objectName) doc = documentCache['r' + objectName] insert_deltas(commit, objectName, 'objects', getAllFullNames(doc, elementname), 'a', elementname) except DeletedObjectException: doc = documentCache['l' + objectName] insert_deltas(commit, objectName, 'objects', getAllFullNames(doc, elementname), 'd', elementname) if not changesFound: pass ## # [ objectTranslation ] ## def analyze_object_translation_changes(list, lFileCache, rFileCache, elementname, commit): global documentCache changesFound = False for objectName in list: try: inserts, updates, deletes = getAllObjectChanges(objectName, lFileCache, rFileCache, elementname, objectTranslationChangeResolver) if (inserts and len(inserts)) or (updates and len(updates)) or (deletes and len(deletes)): if inserts: insert_deltas(commit, objectName, 'objectTranslations', inserts.keys(), 'a', elementname) if updates: insert_deltas(commit, objectName, 'objectTranslations', updates.keys(), 'u', elementname) if deletes: insert_deltas(commit, objectName, 'objectTranslations', deletes.keys(), 'd', elementname) changesFound = True except NewObjectException: doc = documentCache['r' + objectName] insert_deltas(commit, objectName, 'objectTranslations', getAllFullNames(doc, elementname), 'a', elementname) return except DeletedObjectException: doc = documentCache['l' + objectName] insert_deltas(commit, objectName, 'objectTranslations', getAllFullNames(doc, elementname), 'd', elementname) return if not changesFound: pass ## # [ labels ] ## def analyze_label_changes(list, lFileCache, rFileCache, elementname, commit): global documentCache for objectName in list: try: inserts, updates, deletes = getAllObjectChanges(objectName, lFileCache, rFileCache, elementname, objectChangeResolver) if (inserts and len(inserts)) or (updates and len(updates)) or (deletes and len(deletes)): if inserts: insert_deltas(commit, objectName, 'labels', inserts.keys(), 'a', elementname) if updates: insert_deltas(commit, objectName, 'labels', updates.keys(), 'u', elementname) if deletes: insert_deltas(commit, objectName, 'labels', deletes.keys(), 'd', elementname) except NewObjectException: doc = documentCache['r' + objectName] insert_deltas(commit, objectName, 'labels', getAllFullNames(doc, elementname), 'a', elementname) except DeletedObjectException: doc = documentCache['l' + objectName] insert_deltas(commit, objectName, 'labels', getAllFullNames(doc, elementname), 'd', elementname) ## # [ translations ] ## def analyze_translation_changes(list, lFileCache, rFileCache, commit): global documentCache for objectName in list: locale = objectName[:-12] # the locale is part of the object name try: inserts, updates, deletes = getAllObjectChanges(objectName, lFileCache, rFileCache, 'customLabels', translationChangeResolver) if (inserts and len(inserts)) or (updates and len(updates)) or (deletes and len(deletes)): if inserts: insertTranslationDeltas(commit, inserts.keys(), 'a', locale) if updates: insertTranslationDeltas(commit, updates.keys(), 'u', locale) if deletes: insertTranslationDeltas(commit, deletes.keys(), 'd', locale) except NewObjectException: doc = documentCache['r' + objectName] insert_deltas(commit, objectName, 'translations', getAllFullNames(doc, 'customLabels', tagname='name'), 'a', 'customLabels') except DeletedObjectException: doc = documentCache['l' + objectName] insert_deltas(commit, objectName, 'translations', getAllFullNames(doc, 'customLabels', tagname='name'), 'd', 'customLabels') ## # [ record types/picklists ] ## def recTypePicklistResolver(ldoc, rdoc, rmap, map, elementName): missing = {} updates = {} inserts = {} if ldoc is None: lnodes = [] else: lnodes = ldoc.findall(SF_NAMESPACE + elementName) if rdoc is None: rnodes = [] else: rnodes = rdoc.findall(SF_NAMESPACE + elementName) # # put the left and right lists in a hash for easier analysis # fqfullname = SF_NAMESPACE + 'fullName' fqpicklistvalues = SF_NAMESPACE + 'picklistValues' fqpicklist = SF_NAMESPACE + 'picklist' llists = {} for lnode in lnodes: fullName = lnode.find(fqfullname).text lpicklists = lnode.findall(fqpicklistvalues) for lpicklist in lpicklists: lpicklist_name = lpicklist.find(fqpicklist).text llists[fullName + ':' + lpicklist_name] = etree.tostring(lpicklist) rlists = {} for rnode in rnodes: fullName = rnode.find(fqfullname).text rpicklists = rnode.findall(fqpicklistvalues) for rpicklist in rpicklists: rpicklist_name = rpicklist.find(fqpicklist).text rlists[fullName + ':' + rpicklist_name] = etree.tostring(rpicklist) # # go down the left side lookup for updates and deletes # for lrectype_name in llists.keys(): if rlists.has_key(lrectype_name): if rlists[lrectype_name] != llists[lrectype_name]: updates[lrectype_name] = rlists[lrectype_name] else: missing[lrectype_name] = llists[lrectype_name] # # go down the right side looking for additions # for rrectype_name in rlists.keys(): if not llists.has_key(rrectype_name): inserts[rrectype_name] = rlists[rrectype_name] return inserts, updates, missing def analyze_recordtype_picklist_changes(list, lFileCache, rFileCache, commit): global documentCache for objectName in list: try: inserts, updates, deletes = getObjectChanges('l', lFileCache, 'r', rFileCache, objectName, 'recordTypes', recTypePicklistResolver) if inserts: insert_deltas(commit, objectName, 'objects', inserts, 'a', 'recordTypes', 'picklists') if updates: insert_deltas(commit, objectName, 'objects', updates, 'u', 'recordTypes', 'picklists') if deletes: insert_deltas(commit, objectName, 'objects', deletes, 'd', 'recordTypes', 'picklists') except NewObjectException: doc = documentCache['r' + objectName] insert_deltas(commit, objectName, 'objects', getAllFullNames(doc, 'recordTypes', tagname='name'), 'a', 'recordTypes') except DeletedObjectException: doc = documentCache['l' + objectName] insert_deltas(commit, objectName, 'objects', getAllFullNames(doc, 'recordTypes', tagname='name'), 'd', 'recordTypes') ## # [ workflows ] ## def analyze_workflow_changes(list, lFileCache, rFileCache, elementname, commit): global documentCache changesFound = False for objectName in list: try: # print' object name is', objectName, 'element name is', elementname inserts, updates, deletes = getAllObjectChanges(objectName, lFileCache, rFileCache, elementname, objectChangeResolver) if (inserts and len(inserts)) or (updates and len(updates)) or (deletes and len(deletes)): if inserts: insert_deltas(commit, objectName, 'workflows', inserts.keys(), 'a', elementname) if updates: insert_deltas(commit, objectName, 'workflows', updates.keys(), 'u', elementname) if deletes: insert_deltas(commit, objectName, 'workflows', deletes.keys(), 'd', elementname) changesFound = True except NewObjectException: doc = documentCache['r' + objectName] insert_deltas(commit, objectName, 'workflows', getAllFullNames(doc, elementname), 'a', elementname) except DeletedObjectException: doc = documentCache['l' + objectName] insert_deltas(commit, objectName, 'workflows', getAllFullNames(doc, elementname), 'd', elementname) if not changesFound: pass @transaction.atomic def analyze_commit(branch, commit): global documentCache global mapCache global working_branch global change_batch working_branch = branch logger.info("Analyzing commit %s" % commit.hash) documentCache = {} # do not want to accumulate this stuff over multiple iterations mapCache = {} change_batch = None # clean up deltas in case we are rerunning stratosource.models.Delta.objects.filter(commit=commit).delete() stratosource.models.TranslationDelta.objects.filter(commit=commit).delete() lhash = commit.prev_hash rhash = commit.hash ## # call "git diff" to get a list of changed files ## omap = getDiffNames(lhash, rhash) ## # load all changed files from each hash into a map for performance (config only) ## lFileCache = createFileCache(lhash, omap, branch.name) rFileCache = createFileCache(rhash, omap, branch.name) for otype, olist in omap.items(): # logger.debug("Type: %s" % otype) if otype == 'objects': analyze_object_changes(olist, lFileCache, rFileCache, 'fields', commit) analyze_object_changes(olist, lFileCache, rFileCache, 'fieldSets', commit) analyze_object_changes(olist, lFileCache, rFileCache, 'validationRules', commit) analyze_object_changes(olist, lFileCache, rFileCache, 'webLinks', commit) analyze_object_changes(olist, lFileCache, rFileCache, 'recordTypes', commit) analyze_recordtype_picklist_changes(olist, lFileCache, rFileCache, commit) analyze_object_changes(olist, lFileCache, rFileCache, 'namedFilters', commit) analyze_object_changes(olist, lFileCache, rFileCache, 'listViews', commit) # misc single-node elements # analyzeObjectChanges(list, lFileCache, rFileCache, 'label', commit) # analyzeObjectChanges(list, lFileCache, rFileCache, 'nameField', commit, nameKey='label') # analyzeObjectChanges(list, lFileCache, rFileCache, 'pluralLabel', commit) # analyzeObjectChanges(list, lFileCache, rFileCache, 'searchLayouts', commit) # analyzeObjectChanges(list, lFileCache, rFileCache, 'sharingModel', commit) elif otype == 'translations': analyze_translation_changes(olist, lFileCache, rFileCache, commit) elif otype == 'workflows': analyze_workflow_changes(olist, lFileCache, rFileCache, 'alerts', commit) analyze_workflow_changes(olist, lFileCache, rFileCache, 'fieldUpdates', commit) analyze_workflow_changes(olist, lFileCache, rFileCache, 'rules', commit) analyze_workflow_changes(olist, lFileCache, rFileCache, 'tasks', commit) elif otype == 'objectTranslations': analyze_object_translation_changes(olist, lFileCache, rFileCache, 'fields', commit) analyze_object_translation_changes(olist, lFileCache, rFileCache, 'validationRules', commit) analyze_object_translation_changes(olist, lFileCache, rFileCache, 'webLinks', commit) elif otype == 'labels': analyze_label_changes(olist, lFileCache, rFileCache, 'labels', commit) else: for listitem in olist: delta_type = None if lFileCache.has_key(listitem) and rFileCache.has_key(listitem) == False: delta_type = 'd' elif lFileCache.has_key(listitem) == False: delta_type = 'a' else: delta_type = 'u' delta = stratosource.models.Delta() delta.object = getDeployable(branch, listitem, otype, None, None, None) delta.commit = commit delta.user_change = get_last_change(listitem, None, None) if delta.user_change is None: #logger.debug('** Audit record not found for %s' % listitem) pass else: # print 'audit record found!' pass delta.delta_type = delta_type delta.save() logger.debug(' added delta for {0}'.format(listitem)) # if not delta.user_change is None: # print 'user %s' % (delta.user_change.sfuser.name,) # print 'commit %s' % (commit,) # print 'dtype %s' % (delta_type,) # print 'otype %s' % (otype,) # print 'item %s' % (listitem,) # mqclient.publish( # {'user': delta.user_change.sfuser.name.encode('ascii', 'ignore'), 'commit': commit.hash, # 'dtype': delta_type, 'type': otype, 'item': listitem, # 'last_update': delta.user_change.last_update.isoformat()}) commit.status = 'c' commit.save() def generate_analysis(branch, start_date): global documentCache commits = stratosource.models.Commit.objects.filter(branch=branch, status__exact='p', prev_hash__isnull=False, date_added__gte=start_date).order_by('-date_added') for commit in commits: if commit.prev_hash is None or len(commit.prev_hash) == 0: continue analyze_commit(branch, commit) # msg = AdminMessage() # msg.subject = branch.name + ' commits processed' # msg.body = '%d %s commits were processed on %s' % (len(commits), branch.name, str(datetime.now())) # msg.sender = 'sfdiff' # msg.save() ## # [ Entry point ] ## class Command(BaseCommand): def add_arguments(self, parser): parser.add_argument('repo', help='repository name') parser.add_argument('branch', help='branch name') def handle(self, *args, **options): global documentCache repo = stratosource.models.Repo.objects.get(name__exact=options['repo']) branch = stratosource.models.Branch.objects.get(repo=repo, name__exact=options['branch']) # mqclient = MQClient(exch='delta') # if len(args) == 3: # start_date = datetime.strptime(args[2], '%m-%d-%Y') # else: start_date = datetime(2000, 1, 1, 0, 0) #, tzinfo=pytz.utc) os.chdir(repo.location) ## # some basic housekeeping ## resetLocalRepo(branch.name) verifyGitRepo() generate_analysis(branch, start_date) documentCache = {} # just in case running stateful by django middleware, clear out between calls # try to leave repo in a good state resetLocalRepo(branch.name)
gpl-3.0
5,506,570,039,653,036,000
37.594126
140
0.613069
false
ethertricity/bluesky
bluesky/traffic/adsbmodel.py
1
1990
""" ADS-B model. Implements real-life limitations of ADS-B communication.""" import numpy as np import bluesky as bs from bluesky.tools.aero import ft from bluesky.tools.trafficarrays import TrafficArrays, RegisterElementParameters class ADSB(TrafficArrays): """ ADS-B model. Implements real-life limitations of ADS-B communication.""" def __init__(self): super(ADSB, self).__init__() # From here, define object arrays with RegisterElementParameters(self): # Most recent broadcast data self.lastupdate = np.array([]) self.lat = np.array([]) self.lon = np.array([]) self.alt = np.array([]) self.trk = np.array([]) self.tas = np.array([]) self.gs = np.array([]) self.vs = np.array([]) self.SetNoise(False) def SetNoise(self, n): self.transnoise = n self.truncated = n self.transerror = [1, 100, 100 * ft] # [degree,m,m] standard bearing, distance, altitude error self.trunctime = 0 # [s] def create(self, n=1): super(ADSB, self).create(n) self.lastupdate[-n:] = -self.trunctime * np.random.rand(n) self.lat[-n:] = bs.traf.lat[-n:] self.lon[-n:] = bs.traf.lon[-n:] self.alt[-n:] = bs.traf.alt[-n:] self.trk[-n:] = bs.traf.trk[-n:] self.tas[-n:] = bs.traf.tas[-n:] self.gs[-n:] = bs.traf.gs[-n:] def update(self, time): up = np.where(self.lastupdate + self.trunctime < time) self.lat[up] = bs.traf.lat[up] self.lon[up] = bs.traf.lon[up] self.alt[up] = bs.traf.alt[up] self.trk[up] = bs.traf.trk[up] self.tas[up] = bs.traf.tas[up] self.gs[up] = bs.traf.gs[up] self.vs[up] = bs.traf.vs[up] self.lastupdate[up] = self.lastupdate[up] + self.trunctime
gpl-3.0
-2,682,508,057,104,136,700
35.54717
103
0.532663
false
AltarBeastiful/rateItSeven
rateItSeven/legacy/legacysenscritique.py
1
6348
#!/usr/bin/env python # -*- coding: utf-8 -*- # === This file is part of RateItSeven === # # Copyright 2015, Rémi Benoit <[email protected]> # # RateItSeven is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # RateItSeven is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with RateItSeven. If not, see <http://www.gnu.org/licenses/>. # import logging from rateItSeven.legacy.movie import Movie from rateItSeven.legacy.senscritiquepages import HomePage, ListCollectionPage, ListPage, \ ListModule from selenium.webdriver import PhantomJS, ActionChains from selenium.webdriver.common.desired_capabilities import DesiredCapabilities from rateItSeven.legacy import sclist from rateItSeven.legacy.sclist import SCList LINUX_USER_AGENT = "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2272.118 Safari/537.36" class LegacySensCritique(object): CHANGEPAGE_TIMEOUT = 20 ''' Interact with SensCritique website ''' def __init__(self, login, password, userAgent=LINUX_USER_AGENT): ''' Constructor :param login: :param password: ''' self.login = login self.password = password dcap = dict(DesiredCapabilities.PHANTOMJS) dcap["phantomjs.page.settings.userAgent"] = ( userAgent ) self.driver = PhantomJS(desired_capabilities=dcap) self.driver.set_window_size(1366, 768) def sign_in(self): ''' Sign-in to SensCritique using the given login details :rtype: bool :Return: true if login succeeded, false otherwise ''' self.to(HomePage()) self.page.alreadySuscribed().click() self.page.loginField().send_keys(self.login) self.page.passwordField().send_keys(self.password) self.page.submitLoginButton().click() #TODO changing page so wait or something currentUser = self.page.username(self.CHANGEPAGE_TIMEOUT) if currentUser is not None: self._currentUsername = currentUser.value() logging.warn("Logged in with user " + self._currentUsername) return True else: if self.page.loginError() is not None: logging.error("Couldn't login : " + self.page.loginError().value()) return False def is_logged_in(self): return self.page is not None and self.page.username() is not None def retrieveListById(self, listId): self.to(ListCollectionPage(self._currentUsername)) for l in self.page.lists(): if listId in l.url(): return self.createSCListFromListModule(l) return None def retrieveListByTitle(self, title): self.to(ListCollectionPage(self._currentUsername)) for l in self.page.lists(): if l.title() == title: return self.createSCListFromListModule(l) return None def retrieveMoviesFromList(self, l : SCList): self.to(ListPage(l)) for movie in self.page.movies(): yield movie def createList(self, l : SCList): self.to(ListCollectionPage(self._currentUsername)) self.page.create_list_button().click() self.page.new_list_title().send_keys(l.title()) self.page.film_type_radio().click() self.page.classic_list_radio().click() self.page.public_list_radio().click() self.page.confirm_create_list_button().click() # Change the current page as we are now on the list page self.page = ListPage(l) self.page._driver = self.driver # TODO: fixme, we don't want to use self.to(page) as it would reload the page self.page.set_description(l.description()) url = self.driver.current_url l._id = url[url.rfind("/") + 1:] return l def deleteList(self, l : sclist): self.to(ListCollectionPage(self._currentUsername)) for module in self.page.lists(): if l.id() in module.url(): # Alert box will be auto-accepted. Needed as Phantomjs cannot handle them self.driver.execute_script("window.confirm = function(msg) { return true; };") delete_button = module.delete_button() delete_action = ActionChains(self.driver) delete_action.move_to_element(module.title_node()) delete_action.move_to_element(delete_button) delete_action.click(delete_button) delete_action.perform() def addMovie(self, movie: Movie, l : SCList): self.to(ListPage(l)) self.page.query_input().send_keys(movie.title()) add_button = self.page.add_movie_button(0) if add_button is None: return False # Movie already in list if movie.description(): self.page.movie_description_field(0).send_keys(movie.description()) add_button.click() return True def deleteMovies(self, movies_to_delete, l : SCList): self.to(ListPage(l)) for movie in self.page.movies(): try: movies_to_delete.remove(movie.title()) delete = movie.delete_button() delete.click() movie.confirm_delete_button().click() self.page.wait_loading_finished() except Exception as e: logging.error("Fail to delete movie " + movie.title() + ". " + format(e)) return movies_to_delete def to(self, page): page.to(self.driver) self.page = page def createSCListFromListModule(self, module : ListModule): list = sclist.SCList(module.id()) list.setTitle(module.title()) list.setDescription(module.description()) list.setType(None) # TODO: parse the type return list
gpl-3.0
-2,295,932,688,796,886,500
30.112745
126
0.627225
false
JasonFruit/doremi
doremi/doremi_parser.py
1
12775
"""A simple music-representation language suitable for hymn tunes, part-songs, and other brief, vocal-style works. """ # TODO: figure out how to make fermatas in bass staves upside-down in # the template import codecs import copy from parsimonious import Grammar, NodeVisitor from doremi.lilypond import * from doremi.lyric_parser import Lyric, LyricParser class RepeatMarker(object): def __init__(self, text): self.text = text def to_lilypond(self, *args, **kwargs): if self.text == "|:": return r"\repeat volta 2 {" elif self.text == ":|": return r"}" elif self.text == "!": return r"} \alternative { {" elif self.text == "1!": return r"} {" elif self.text == "2!": return r"} }" elif self.text == "|.": return r'\bar "|."' elif self.text == "||": return r'\bar "||"' class Note(object): """Represents a note (or rest) in a musical work, including scale degree, duration, octave, and other information""" def __init__(self, # initialize with empty properties pitch=None, # because they are built on-the-fly duration=None, octave=None, modifiers=list()): self.pitch = pitch self.duration = duration self.octave = octave self.modifiers = modifiers def to_lilypond(self, key, octave_offset = 0): """ Convert to an equivalent Lilypond representation """ # short-circuit if this is a rest if self.pitch == "r": return "%s%s" % (self.pitch, self.duration) pitch = syllable_to_note(self.pitch, key) octave = self.octave + octave_offset + 1 # convert internal octave representation to Lilypond, which # uses c->b offset = key_octave_offset[key.lower()] local_pitch_level = copy.copy(pitch_level) # adjust the local copy of the pitch-level order to go from # la->sol if key is minor if "minor" in key.lower(): for k in local_pitch_level.keys(): local_pitch_level[k] = local_pitch_level[k] + 2 if local_pitch_level[k] > 6: local_pitch_level[k] -= 7 if local_pitch_level[self.pitch] - offset < 0: octave -= 1 elif local_pitch_level[self.pitch] - offset > 6: octave += 1 if octave < 0: octave = "," * abs(octave) else: octave = "'" * octave # start or end slurs (or beams) as indicated by modifiers slur = "" if "slur" in self.modifiers: if self.duration in ["8", "8.", "16"]: slur = "[" else: slur = "(" elif "end slur" in self.modifiers: if self.duration in ["8", "8.", "16"]: slur = "]" else: slur = ")" # ties only ever connect two notes, so need not be explicitly # terminated tie = "" if "tie" in self.modifiers: tie = "~" # add a fermata if "fermata" in self.modifiers: fermata = r"\fermata" else: fermata = "" # assemble and return the Lilypond string return "%s%s%s%s%s%s" % (pitch, octave, self.duration, tie, slur, fermata) class Voice(list): """Represents a named part in a vocal-style composition""" def __init__(self, name="", octave=""): list.__init__(self) self.name = name self.octave = octave # the starting octave for the part def last_note(self): index = -1 try: while type(self[index]) != Note: index -= 1 return self[index] except IndexError: raise IndexError("No previous notes") def to_lilypond(self, time, key, octave_offset=0, shapes=None, template="default"): """A representation of the voice as a Lilypond string""" # association of doremi shape args and Lilypond shape commands shape_dic = {"round": ("", ""), "aikin": (r"\aikenHeads", "Minor"), "sacredharp": (r"\sacredHarpHeads", "Minor"), "southernharmony": (r"\southernHarmonyHeads", "Minor"), "funk": (r"\funkHeads", "Minor"), "walker": (r"\walkerHeads", "Minor")} # build the lilypond shape command if shapes == None: lshapes = "" else: lparts = shape_dic[shapes.lower()] lshapes = lparts[0] # there's a different command for minor if "minor" in key: lshapes += lparts[1] tmpl = codecs.open("templates/%s-voice.tmpl" % template, "r", "utf-8").read() return tmpl % {"name": self.name, "key": key.replace(" ", " \\"), # a minor -> a \minor "time": time, "shapes": lshapes, "notes": " ".join( [note.to_lilypond( key, octave_offset=octave_offset) for note in self])} class Tune(list): """Represents a vocal-style tune, e.g. a hymn-tune or partsong""" def __init__(self, title="", scripture="", composer="", key="", time=None, partial=None): self.title = title self.scripture = scripture self.composer = composer self.key = key self.time = time self.partial = partial def to_lilypond(self, key, octave_offset=0, shapes=None, lyric=None, template="default"): """Return a Lilypond version of the tune""" key = key_to_lilypond(key) # represent the partial beginning measure a la Lilypond if # necessary if self.partial: partial = r"\partial %s" % self.partial else: partial = "" # TODO: make this allow other templates ly = codecs.open("templates/%s.tmpl" % template, "r", "utf-8").read() tmpl_data = {"voices": "\n".join( [voice.to_lilypond(self.time, key, octave_offset=octave_offset, shapes=shapes, template=template) for voice in self]), "author": lyric.author, "lyrictitle": lyric.title, "meter": lyric.meter, "title": self.title, "scripture": self.scripture, "composer": self.composer, "partial": partial} for voice in self: tmpl_data["%s_lyrics" % voice.name] = "" for lvoice in lyric.voices: tmpl_data["%s_lyrics" % lvoice.name] = lvoice.to_lilypond() return ly % tmpl_data def get_node_val(node, val_type): """Return the value as a string of a child node of the specified type, or raise ValueError if none exists""" for child in node.children: if child.expr_name == val_type: return child.text.strip('"') raise ValueError("No value of specified type.") def get_string_val(node): """Return the value of a string child node, if exists; otherwise, raise a ValueError""" try: return get_node_val(node, "string") except: raise ValueError("No string value.") class DoremiParser(NodeVisitor): def __init__(self, tune_fn): NodeVisitor.__init__(self) # start with an empty tune, voice, note, and list of modifiers self.tune = Tune() self.voice = Voice() self.note = Note() self.note_modifiers = [] # at the outset, we are not in a voice's content self.in_content = False # set up the actual parser grammar = Grammar(open("doremi-grammar", "r").read()) # read and parse the tune tune_text = codecs.open(tune_fn, "r", "utf-8").read() self.syntax = grammar.parse(tune_text) def convert(self): """Convert the parse tree to the internal music representation""" self.visit(self.syntax) return self.tune # title, composer, key, and partial value can only occur at the # tune level, so they always are added to the tune def visit_title(self, node, vc): self.tune.title = get_string_val(node) def visit_scripture(self, node, vc): self.tune.scripture = get_string_val(node) def visit_composer(self, node, vc): self.tune.composer = get_string_val(node) def visit_key(self, node, vc): text = " ".join([child.text for child in node.children if child.expr_name == "name"]) self.tune.key = text def visit_partial(self, node, vc): self.tune.partial = int(get_node_val(node, "number")) def visit_time(self, node, vc): time = get_node_val(node, "fraction") # if it occurs inside a voice's note array if self.in_content: self.note_modifiers.append(time) else: # otherwise, it's at the tune level self.tune.time = time # octave and voice-name only occur at the voice level def visit_octave(self, node, vc): self.voice.octave = int(get_node_val(node, "number")) def visit_voice_name(self, node, vc): self.voice.name = node.children[-1].text # modifiers only occur in a collection of notes, and are stored at # the note level def visit_note_modifier(self, node, vc): self.note_modifiers.append(node.text) def visit_voice(self, node, vc): # a voice is only visited when fully parsed, so the voice is # already fully constructed; add it to the tune and start a # new one self.tune.append(self.voice) self.voice = Voice() def visit_note(self, node, vc): # a note is only visited after its modifiers have been # visited, so we finalize it and add it to the voice here # if there's no duration explicit, it's the same as the # previous note in the same voice if not self.note.duration: self.note.duration = self.voice.last_note().duration self.note.modifiers = self.note_modifiers self.note.pitch = node.text # if there's a previous note, start from its octave; if not, # start from the voice's octave try: self.note.octave = self.voice.last_note().octave except IndexError: self.note.octave = self.voice.octave # alter the octave according to octave modifiers for mod in self.note.modifiers: if mod == "-": self.note.octave -= 1 elif mod == "+": self.note.octave += 1 # if a slur started on the previous note and is not continued # by this one, explicitly end it try: if "slur" in self.voice.last_note().modifiers: if not "slur" in self.note.modifiers: self.note.modifiers.append("end slur") except IndexError: pass # add the note to the voice and start a new one with no # modifiers self.voice.append(self.note) self.note = Note() self.note_modifiers = [] def visit_repeat(self, node, vc): self.voice.append(RepeatMarker(node.text)) def visit_number(self, node, vc): # all numbers except note durations are handled at a higher level if self.in_content: self.note.duration = node.text def generic_visit(self, node, vc): # set whether we're in the note-content of a voice based on # open- and close-brackets if node.text == "[": self.in_content = True elif node.text == "]": self.in_content = False
mit
-1,205,490,265,218,397,200
33.341398
77
0.513503
false
mmcardle/MServe
django-mserve/rassc/tasks.py
1
4340
######################################################################## # # University of Southampton IT Innovation Centre, 2012 # # Copyright in this library belongs to the University of Southampton # University Road, Highfield, Southampton, UK, SO17 1BJ # # This software may not be used, sold, licensed, transferred, copied # or reproduced in whole or in part in any manner or form or in or # on any media by any person other than in accordance with the terms # of the Licence Agreement supplied with the software, or otherwise # without the prior written consent of the copyright owners. # # This software is distributed WITHOUT ANY WARRANTY, without even the # implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR # PURPOSE, except where stated in the Licence Agreement supplied with # the software. # # Created By : Mark McArdle # Created Date : 2012-01-06 # Created for Project : RASCC # ######################################################################## import logging import subprocess import tempfile import os import os.path import shutil import PythonMagick import settings as settings from celery.task import task from dataservice.tasks import _get_mfile from dataservice.tasks import _save_joboutput @task(default_retry_delay=5,max_retries=1) def dumbtask(inputs,outputs,options={},callbacks=[]): logging.info("Processing dumb task") try: mfileid = inputs[0] filepath = _get_mfile(mfileid) toutfile = tempfile.NamedTemporaryFile(delete=False,suffix=".txt") joboutput = outputs[0] retcode = subprocess.call(["wc",filepath,toutfile.name]) _save_joboutput(joboutput,toutfile) return {"success":True,"message":"Dumb task successful"} except Exception as e: logging.info("Error with dumb task %s" % e) raise e @task(default_retry_delay=15,max_retries=3) def swirl(inputs,outputs,options={},callbacks=[]): try: mfileid = inputs[0] path = _get_mfile(mfileid) logging.info("Swirling image for %s (%s)" % (input, path)) img = PythonMagick.Image() img.read(str(path)) img.swirl(90) # Create swirled image as job output toutfile = tempfile.NamedTemporaryFile(delete=False,suffix=".jpg") img.write(toutfile.name) joboutput = outputs[0] _save_joboutput(joboutput,toutfile) return {"success":True,"message":"Swirl successful"} except Exception ,e: logging.info("Error with swirl %s" % e) @task(default_retry_delay=15,max_retries=3) def imodel(inputs,outputs,options={},callbacks=[]): mfileid = inputs[0] path = _get_mfile(mfileid) logging.info("CWD: %s" % os.getcwd()) logging.info("Running imodel on %s" % (path)) # Run iModel in a temporary directory logging.info(os.environ) #imodel_home = "/opt/iModel-1.0-beta-3-SNAPSHOT" #imodel_home = os.environ["IMODEL_HOME"] imodel_home = settings.IMODEL_HOME logging.info("Running iModel from %s" % imodel_home) (mfile_dir, mfile_name) = os.path.split(path) # XXX: configuration.txt must be in CWD # XXX: Runtime arguments should not be provided in a file tempdir = tempfile.mkdtemp() logging.info("iModel temp dir: %s" % (tempdir)) shutil.copy("imodel/configuration.txt", tempdir) shutil.copy(path, tempdir) p = subprocess.Popen(["java", "-cp", imodel_home + ":" + imodel_home+"/lib/*:" + imodel_home+"/bin", "uk.ac.soton.itinnovation.prestoprime.imodel.batch.start.StartArchiveSystemModel", mfile_name], cwd=tempdir, stdout=subprocess.PIPE) # save stdout stdoutfile = open(tempdir+"/stdout", 'w') logging.info("Temp file for stdout: %s" % (stdoutfile.name)) (stdout, stderr) = p.communicate() stdoutfile.write(stdout) stdoutfile.close() _save_joboutput(outputs[1], stdoutfile) # Process results import sys sys.path.append("imodel") import parseimodel processedresultsfilename = tempdir+"/data.csv" parseimodel.parse(tempdir+"/outputSystemPerformance.log", processedresultsfilename) joboutput = outputs[0] processedresultsfile = open(processedresultsfilename, 'r') _save_joboutput(joboutput, processedresultsfile) return {"success":True,"message":"iModel simulation successful"}
lgpl-2.1
1,262,400,644,338,189,000
36.094017
241
0.666359
false
alphagov/digitalmarketplace-api
migrations/versions/320_drop_selection_answers.py
1
1121
"""Drop selection_answers table Revision ID: 320_drop_selection_answers Revises: 310_rename_selection_answers Create Date: 2015-10-14 10:52:26.557319 """ # revision identifiers, used by Alembic. revision = '320_drop_selection_answers' down_revision = '310_rename_selection_answers' from alembic import op import sqlalchemy as sa from sqlalchemy.dialects import postgresql def upgrade(): op.execute('DROP TABLE selection_answers') def downgrade(): op.create_table( 'selection_answers', sa.Column('supplier_id', sa.Integer(), nullable=False), sa.Column('framework_id', sa.Integer(), nullable=False), sa.Column('question_answers', postgresql.JSON(), nullable=True), sa.ForeignKeyConstraint(['framework_id'], ['frameworks.id'], ), sa.ForeignKeyConstraint(['supplier_id'], ['suppliers.supplier_id'], ), sa.PrimaryKeyConstraint('supplier_id', 'framework_id') ) op.execute(""" INSERT INTO selection_answers(supplier_id, framework_id, question_answers) SELECT supplier_id, framework_id, declaration FROM supplier_frameworks """)
mit
-1,919,669,192,622,042,400
31.028571
82
0.697591
false
slightlynybbled/tk_tools
tk_tools/tooltips.py
1
1552
import tkinter as tk class ToolTip(object): """ Add a tooltip to any widget.:: entry = tk.Entry(root) entry.grid() # createst a tooltip tk_tools.ToolTip(entry, 'enter a value between 1 and 10') :param widget: the widget on which to hover :param text: the text to display :param time: the time to display the text, in milliseconds """ def __init__(self, widget, text: str = 'widget info', time: int = 4000): self._widget = widget self._text = text self._time = time self._widget.bind("<Enter>", lambda _: self._widget.after(500, self._enter())) self._widget.bind("<Leave>", self._close) self._tw = None def _enter(self, event=None): x, y, cx, cy = self._widget.bbox("insert") x += self._widget.winfo_rootx() + 25 y += self._widget.winfo_rooty() + 20 # creates a toplevel window self._tw = tk.Toplevel(self._widget) # Leaves only the label and removes the app window self._tw.wm_overrideredirect(True) self._tw.wm_geometry("+%d+%d" % (x, y)) label = tk.Label(self._tw, text=self._text, justify='left', background='#FFFFDD', relief='solid', borderwidth=1, font=("times", "8", "normal")) label.pack(ipadx=1) if self._time: self._tw.after(self._time, self._tw.destroy) def _close(self, event=None): if self._tw: self._tw.destroy()
mit
6,823,558,609,957,457,000
28.846154
77
0.544459
false
SchrodingersGat/kicad-footprint-generator
KicadModTree/nodes/base/Arc.py
1
3826
# KicadModTree is free software: you can redistribute it and/or # modify it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # KicadModTree is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with kicad-footprint-generator. If not, see < http://www.gnu.org/licenses/ >. # # (C) 2016 by Thomas Pointhuber, <[email protected]> from KicadModTree.Vector import * from KicadModTree.nodes.Node import Node import math class Arc(Node): r"""Add an Arc to the render tree :param \**kwargs: See below :Keyword Arguments: * *center* (``Vector2D``) -- center of arc * *start* (``Vector2D``) -- start point of arc * *angle* (``float``) -- angle of arc * *layer* (``str``) -- layer on which the arc is drawn (default: 'F.SilkS') * *width* (``float``) -- width of the arc line (default: None, which means auto detection) :Example: >>> from KicadModTree import * >>> Arc(center=[0, 0], start=[-1, 0], angle=180, layer='F.SilkS') """ def __init__(self, **kwargs): Node.__init__(self) self.center_pos = Vector2D(kwargs['center']) self.start_pos = Vector2D(kwargs['start']) self.angle = kwargs['angle'] self.layer = kwargs.get('layer', 'F.SilkS') self.width = kwargs.get('width') def calculateBoundingBox(self): # TODO: finish implementation min_x = min(self.start_pos.x, self._calulateEndPos().x) min_y = min(self.start_pos.x, self._calulateEndPos().y) max_x = max(self.start_pos.x, self._calulateEndPos().x) max_y = max(self.start_pos.x, self._calulateEndPos().y) ''' for angle in range(4): float_angle = angle * math.pi/2. start_angle = _calculateStartAngle(self) end_angle = start_angle + math.radians(self.angle) # TODO: +- pi border if float_angle < start_angle: continue if float_angle > end_angle: continue print("TODO: add angle side: {1}".format(float_angle)) ''' return Node.calculateBoundingBox({'min': Vector2D((min_x, min_y)), 'max': Vector2D((max_x, max_y))}) def _calulateEndPos(self): radius = self._calculateRadius() angle = self._calculateStartAngle() + math.radians(self.angle) return Vector2D(math.sin(angle)*radius, math.cos(angle)*radius) def _calculateRadius(self): x_size = self.start_pos.x - self.center_pos.x y_size = self.start_pos.y - self.center_pos.y return math.sqrt(math.pow(x_size, 2) + math.pow(y_size, 2)) def _calculateStartAngle(self): x_size = self.start_pos.x - self.center_pos.x y_size = self.start_pos.y - self.center_pos.y return math.atan2(y_size, x_size) def _getRenderTreeText(self): render_strings = ['fp_arc'] render_strings.append(self.center_pos.render('(center {x} {y})')) render_strings.append(self.start_pos.render('(start {x} {y})')) render_strings.append('(angle {angle})'.format(angle=self.angle)) render_strings.append('(layer {layer})'.format(layer=self.layer)) render_strings.append('(width {width})'.format(width=self.width)) render_text = Node._getRenderTreeText(self) render_text += ' ({})'.format(' '.join(render_strings)) return render_text
gpl-3.0
-2,313,590,398,890,318,300
34.100917
108
0.611343
false
dcmease/ChessMoves
chessboard.py
1
18072
""" chessboard.py ~~~~~~~~~~~~~ This class will hold the state for a chessboard """ # Imports import sys # Defines EMPTY_SQUARE = " " WHITE_KING = "♔" WHITE_QUEEN = "♕" WHITE_BISHOP = "♗" WHITE_KNIGHT = "♘" WHITE_ROOK = "♖" WHITE_PAWN = "♙" BLACK_KING = "♚" BLACK_QUEEN = "♛" BLACK_BISHOP = "♝" BLACK_KNIGHT = "♞" BLACK_ROOK = "♜" BLACK_PAWN = "♟" WHITE_PIECES = [WHITE_KING, WHITE_QUEEN, WHITE_BISHOP, WHITE_KNIGHT, WHITE_ROOK, WHITE_PAWN] BLACK_PIECES = [BLACK_KING, BLACK_QUEEN, BLACK_BISHOP, BLACK_KNIGHT, BLACK_ROOK, BLACK_PAWN] ALL_PIECES = [EMPTY_SQUARE] + WHITE_PIECES + BLACK_PIECES class Chessboard(): # Constructor def __init__(self): # Create an empty board self.__board = [[EMPTY_SQUARE for i in range(8)] for i in range(8)] # Default player turn to white self.__player = "white" # Print out current state of the board def debug(self): for row in range(7, -1, -1): print(row+1, end=" ") for col in range(8): print(self.__board[row][col], end=" ") print("") print(" a b c d e f g h") # Update current player def set_current_player(self, player): if player != "white" and player != "black": raise Exception('Unknown player: %s' % player) self.__player = player # Add a new piece to the board def __add_piece(self, type, loc): # If location is out of range if (loc[0] < 0 or loc[0] > 7) or (loc[1] < 0 or loc[1] > 7): raise Exception('Piece out of bounds') # If space is already occupied elif self.__board[loc[0]][loc[1]] != EMPTY_SQUARE: raise Exception('Space already occupied') # Add piece to board else: self.__board[loc[0]][loc[1]] = type; # Validate board def __validate_board_state(self): # Maintain a count total of all pieces counts = { WHITE_KING: 0, WHITE_QUEEN: 0, WHITE_BISHOP: 0, WHITE_KNIGHT: 0, WHITE_ROOK: 0, WHITE_PAWN: 0, BLACK_KING: 0, BLACK_QUEEN: 0, BLACK_BISHOP: 0, BLACK_KNIGHT: 0, BLACK_ROOK: 0, BLACK_PAWN: 0 } # Count all pieces on board for row in range(8): for col in range(8): cur_piece = self.__board[row][col] # Check for unknown pieces if cur_piece not in ALL_PIECES: raise Exception('Unknown piece: %s' % str(cur_piece)) # Update total if cur_piece != EMPTY_SQUARE: counts[cur_piece] += 1 # Check for illegal piece counts if counts[WHITE_KING] < 1: raise Exception('Missing white king') if counts[WHITE_KING] > 1: raise Exception('Too many white kings') if counts[BLACK_KING] < 1: raise Exception('Missing black king') if counts[BLACK_KING] > 1: raise Exception('Too many black kings') if counts[WHITE_QUEEN] > 9: raise Exception('Too many white queens') if counts[BLACK_QUEEN] > 9: raise Exception('Too many black queens') if counts[WHITE_BISHOP] > 10: raise Exception('Too many white bishops') if counts[BLACK_BISHOP] > 10: raise Exception('Too many black bishops') if counts[WHITE_KNIGHT] > 10: raise Exception('Too many white knights') if counts[BLACK_KNIGHT] > 10: raise Exception('Too many black knights') if counts[WHITE_ROOK] > 10: raise Exception('Too many white rooks') if counts[BLACK_ROOK] > 10: raise Exception('Too many black rooks') if counts[WHITE_PAWN] > 8: raise Exception('Too many white pawns') if counts[BLACK_PAWN] > 8: raise Exception('Too many black pawns') if counts[WHITE_QUEEN] + counts[WHITE_BISHOP] + counts[WHITE_KNIGHT] + counts[WHITE_ROOK] + counts[WHITE_PAWN] > 15: raise Exception('Too many white pieces') if counts[BLACK_QUEEN] + counts[BLACK_BISHOP] + counts[BLACK_KNIGHT] + counts[BLACK_ROOK] + counts[BLACK_PAWN] > 15: raise Exception('Too many black pieces') # Import new board def import_board(self, board): if len(board) != 8: raise Exception('Board size must be 8x8') for row in range(8): if len(board[row]) != 8: raise Exception('Board size must be 8x8') for col in range(8): self.__add_piece(board[row][col], [7-row, col]) # Ensure new board is valid self.__validate_board_state() # Format move for output def __new_move(self, from_loc, to_loc): col = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h'] move = self.__board[from_loc[0]][from_loc[1]] + " " move += str(from_loc[0]+1) + col[from_loc[1]] move += str(to_loc[0]+1) + col[to_loc[1]] return move # Check is a square is valid (on the board) def __check_square_valid(self, loc): if loc[0] < 0 or loc[0] > 7: return False elif loc[1] < 0 or loc[1] > 7: return False else: return True # Check if a square in unoccupied def __check_square_empty(self, loc): if not self.__check_square_valid(loc): return False elif self.__board[loc[0]][loc[1]] != EMPTY_SQUARE: return False else: return True # Check is a square contains an enemy (opposite color piece) def __check_square_enemy(self, loc): if not self.__check_square_valid(loc): return False elif self.__player == "white" and self.__board[loc[0]][loc[1]] in BLACK_PIECES: return True elif self.__player == "black" and self.__board[loc[0]][loc[1]] in WHITE_PIECES: return True else: return False # Check is a square is empty or contains an enemy def __check_square_empty_or_enemy(self, loc): if self.__check_square_empty(loc): return True elif self.__check_square_enemy(loc): return True else: return False # List all moves for a Pawn def __get_pawn_moves(self, loc): # Init moves moves = [] # If White Player if self.__player == "white": # Move double forward if loc[0] == 1 and self.__check_square_empty([loc[0]+1, loc[1]]) and self.__check_square_empty([loc[0]+2, loc[1]]): move = self.__new_move(loc, [loc[0]+2, loc[1]]) moves.append(move) # Attack diagonally left if self.__check_square_enemy([loc[0]+1, loc[1]-1]): move = self.__new_move(loc, [loc[0]+1, loc[1]-1]) moves.append(move) # Move forward if self.__check_square_empty([loc[0]+1, loc[1]]): move = self.__new_move(loc, [loc[0]+1, loc[1]]) moves.append(move) # Attack diagonally right if self.__check_square_enemy([loc[0]+1, loc[1]+1]): move = self.__new_move(loc, [loc[0]+1, loc[1]+1]) moves.append(move) # If Black Player else: # Move forward twice if loc[0] == 6 and self.__check_square_empty([loc[0]-1, loc[1]]) and self.__check_square_empty([loc[0]-2, loc[1]]): move = self.__new_move(loc, [loc[0]-2, loc[1]]) moves.append(move) # Attack diagonally left if self.__check_square_enemy([loc[0]-1, loc[1]-1]): move = self.__new_move(loc, [loc[0]-1, loc[1]-1]) moves.append(move) # Move forward if self.__check_square_empty([loc[0]-1, loc[1]]): move = self.__new_move(loc, [loc[0]-1, loc[1]]) moves.append(move) # Attack diagonally right if self.__check_square_enemy([loc[0]-1, loc[1]+1]): move = self.__new_move(loc, [loc[0]-1, loc[1]+1]) moves.append(move) return moves # List all moves for a Rook def __get_rook_moves(self, loc): moves = [] # Move/Attack Up new_x = loc[0]+1 new_y = loc[1] while self.__check_square_empty_or_enemy([new_x, new_y]): move = self.__new_move(loc, [new_x, new_y]) moves.append(move) # Stop moving if enemy is in the way if self.__check_square_enemy([new_x, new_y]): break new_x += 1 # Move/Attack Left new_x = loc[0] new_y = loc[1]-1 while self.__check_square_empty_or_enemy([new_x, new_y]): move = self.__new_move(loc, [new_x, new_y]) moves.append(move) # Stop moving if enemy is in the way if self.__check_square_enemy([new_x, new_y]): break new_y -= 1 # Move/Attack Right new_x = loc[0] new_y = loc[1]+1 while self.__check_square_empty_or_enemy([new_x, new_y]): move = self.__new_move(loc, [new_x, new_y]) moves.append(move) # Stop moving if enemy is in the way if self.__check_square_enemy([new_x, new_y]): break new_y += 1 # Move/Attack Down new_x = loc[0]-1 new_y = loc[1] while self.__check_square_empty_or_enemy([new_x, new_y]): move = self.__new_move(loc, [new_x, new_y]) moves.append(move) # Stop moving if enemy is in the way if self.__check_square_enemy([new_x, new_y]): break new_x -= 1 return moves # List all moves for a Knight def __get_knight_moves(self, loc): moves = [] # Move/Attack Up-Up-Right if self.__check_square_empty_or_enemy([loc[0]+2, loc[1]+1]): move = self.__new_move(loc, [loc[0]+2, loc[1]+1]) moves.append(move) # Move/Attack Up-Right-Right if self.__check_square_empty_or_enemy([loc[0]+1, loc[1]+2]): move = self.__new_move(loc, [loc[0]+1, loc[1]+2]) moves.append(move) # Move/Attack Down-Right-Right if self.__check_square_empty_or_enemy([loc[0]-1, loc[1]+2]): move = self.__new_move(loc, [loc[0]-1, loc[1]+2]) moves.append(move) # Move/Attack Down-Down-Right if self.__check_square_empty_or_enemy([loc[0]-2, loc[1]+1]): move = self.__new_move(loc, [loc[0]-2, loc[1]+1]) moves.append(move) # Move/Attack Down-Down-Left if self.__check_square_empty_or_enemy([loc[0]-2, loc[1]-1]): move = self.__new_move(loc, [loc[0]-2, loc[1]-1]) moves.append(move) # Move/Attack Down-Left-Left if self.__check_square_empty_or_enemy([loc[0]-1, loc[1]-2]): move = self.__new_move(loc, [loc[0]-1, loc[1]-2]) moves.append(move) # Move/Attack Up-Left-Left if self.__check_square_empty_or_enemy([loc[0]+1, loc[1]-2]): move = self.__new_move(loc, [loc[0]+1, loc[1]-2]) moves.append(move) # Move/Attack Up-Up-Left if self.__check_square_empty_or_enemy([loc[0]+2, loc[1]-1]): move = self.__new_move(loc, [loc[0]+2, loc[1]-1]) moves.append(move) return moves # List all moves for a Bishop def __get_bishop_moves(self, loc): moves = [] # Move/Attack Up-Left new_x = loc[0]+1 new_y = loc[1]-1 while self.__check_square_empty_or_enemy([new_x, new_y]): move = self.__new_move(loc, [new_x, new_y]) moves.append(move) # Stop moving if enemy is in the way if self.__check_square_enemy([new_x, new_y]): break new_x += 1 new_y -= 1 # Move/Attack Up-Right new_x = loc[0]+1 new_y = loc[1]+1 while self.__check_square_empty_or_enemy([new_x, new_y]): move = self.__new_move(loc, [new_x, new_y]) moves.append(move) # Stop moving if enemy is in the way if self.__check_square_enemy([new_x, new_y]): break new_x += 1 new_y += 1 # Move/Attack Down-Left new_x = loc[0]-1 new_y = loc[1]-1 while self.__check_square_empty_or_enemy([new_x, new_y]): move = self.__new_move(loc, [new_x, new_y]) moves.append(move) # Stop moving if enemy is in the way if self.__check_square_enemy([new_x, new_y]): break new_x -= 1 new_y -= 1 # Move/ATtack Down-Right new_x = loc[0]-1 new_y = loc[1]+1 while self.__check_square_empty_or_enemy([new_x, new_y]): move = self.__new_move(loc, [new_x, new_y]) moves.append(move) # Stop moving if enemy is in the way if self.__check_square_enemy([new_x, new_y]): break new_x -= 1 new_y += 1 return moves # List all moves for a King def __get_king_moves(self, loc): moves = [] # Move/Attack Up-Left if self.__check_square_empty_or_enemy([loc[0]+1, loc[1]-1]): move = self.__new_move(loc, [loc[0]+1, loc[1]-1]) moves.append(move) # Move/Attack Up if self.__check_square_empty_or_enemy([loc[0]+1, loc[1]]): move = self.__new_move(loc, [loc[0]+1, loc[1]]) moves.append(move) # Move/Attack Up-Right if self.__check_square_empty_or_enemy([loc[0]+1, loc[1]+1]): move = self.__new_move(loc, [loc[0]+1, loc[1]+1]) moves.append(move) # Move/Attack Left if self.__check_square_empty_or_enemy([loc[0], loc[1]-1]): move = self.__new_move(loc, [loc[0], loc[1]-1]) moves.append(move) # Move/Attack Right if self.__check_square_empty_or_enemy([loc[0], loc[1]+1]): move = self.__new_move(loc, [loc[0], loc[1]+1]) moves.append(move) # Move/Attack Down-Left if self.__check_square_empty_or_enemy([loc[0]-1, loc[1]-1]): move = self.__new_move(loc, [loc[0]-1, loc[1]-1]) moves.append(move) # Move/Attack Down if self.__check_square_empty_or_enemy([loc[0]-1, loc[1]]): move = self.__new_move(loc, [loc[0]-1, loc[1]]) moves.append(move) # Move/Attack Down-Right if self.__check_square_empty_or_enemy([loc[0]-1, loc[1]+1]): move = self.__new_move(loc, [loc[0]-1, loc[1]+1]) moves.append(move) # Castling # White side if self.__player == "white" and loc[0] == 0 and loc[1] == 4: # Check right side if self.__check_square_empty([0, 5]) and self.__check_square_empty([0, 6]) and self.__board[0][7] == WHITE_ROOK: move = self.__new_move(loc, [0, 6]) # Move rook too? moves.append(move) # Check left side if self.__check_square_empty([0, 3]) and self.__check_square_empty([0, 2]) and self.__check_square_empty([0, 1]) and self.__board[0][0] == WHITE_ROOK: move = self.__new_move(loc, [0, 2]) # Move rook too? moves.append(move) # Black side if self.__player == "black" and loc[0] == 7 and loc[1] == 4: # Check right side if self.__check_square_empty([7, 5]) and self.__check_square_empty([7, 6]) and self.__board[7][7] == BLACK_ROOK: move = self.__new_move(loc, [7, 6]) # Move rook too? moves.append(move) # Check left side if self.__check_square_empty([7, 3]) and self.__check_square_empty([7, 2]) and self.__check_square_empty([7, 1]) and self.__board[7][0] == BLACK_ROOK: move = self.__new_move(loc, [7, 2]) # Move rook too? moves.append(move) return moves # List all moves for a Queen def __get_queen_moves(self, loc): moves = [] # The Queen moves like a Rook and Bishop combined moves += self.__get_rook_moves(loc) moves += self.__get_bishop_moves(loc) return moves # List all all available moves def get_valid_moves(self): moves = [] for row in range(8): for col in range(8): new_moves = [] cur_piece = self.__board[row][col] # Ignore pieces that don't belong to the current player if self.__check_square_enemy([row,col]): continue; # Calculate moves based on the current piece if cur_piece == WHITE_PAWN or cur_piece == BLACK_PAWN: new_moves = self.__get_pawn_moves([row, col]) elif cur_piece == WHITE_ROOK or cur_piece == BLACK_ROOK: new_moves = self.__get_rook_moves([row, col]) elif cur_piece == WHITE_KNIGHT or cur_piece == BLACK_KNIGHT: new_moves = self.__get_knight_moves([row, col]) elif cur_piece == WHITE_BISHOP or cur_piece == BLACK_BISHOP: new_moves = self.__get_bishop_moves([row, col]) elif cur_piece == WHITE_QUEEN or cur_piece == BLACK_QUEEN: new_moves = self.__get_queen_moves([row, col]) elif cur_piece == WHITE_KING or cur_piece == BLACK_KING: new_moves = self.__get_king_moves([row, col]) moves += new_moves return moves
mit
2,038,706,519,729,979,400
38.492341
162
0.508754
false
odoo-arg/odoo_l10n_ar
l10n_ar_invoice_presentation/models/presentation_tools.py
1
4805
# coding: utf-8 ############################################################################## # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. ############################################################################## class PresentationTools: def __init__(self): pass @staticmethod def format_date(d): # type: (str) -> str """ Formatea la fecha para su presentacion en ventas compras. :param d: La fecha a formatear. :type d: str :return: La fecha formateada. :rtype: str """ if not isinstance(d, str): d = str(d) return d.replace("-", "") @staticmethod def get_currency_rate_from_move(invoice): """ Obtiene la currency de la factura, a partir de las lineas del asiento. :param invoice: record, factura :return: float, currency. ej: 15.32 """ move = invoice.move_id account = invoice.account_id # Traemos todas las lineas del asiento que tengan esa cuenta move_line = move.line_ids.filtered(lambda x: x.account_id == account)[0] # Traemos el monto de la linea, si es de debito o credito amount = move_line.credit or move_line.debit amount_currency = abs(move_line.amount_currency) # El rate sera el monto dividido la currency si es distinto de cero, sino se divide por si mismo currency_rate = float(amount) / float(amount_currency or amount) return currency_rate @staticmethod def get_invoice_type(invoice): """ Obtiene el tipo de factura, a partir de los codigos de AFIP. :param invoice: record, factura :return: string, codigo de afip del tipo de factura """ # Guardamos en invoice_type si es nota de debito, credito o factura if invoice.is_debit_note: invoice_type = 'debit_note' else: invoice_type = 'invoice' if invoice.type in ['out_invoice', 'in_invoice'] else 'refund' # Buscamos el tipo de talonario segun el tipo de factura document_type_id = invoice.env['document.book.document.type'].search([ ('type', '=', invoice_type), ('category', '=', 'invoice'), ], limit=1) # Buscamos el tipo de voucher almacenado en sistema de acuerdo al tipo de talonario y denominacion # TODO: Para los despachos de importacion el voucher type no va a existir por un problema de mapeo # El siguiente bloque if/else es temporal y debe ser removido. type_i = invoice.env.ref('l10n_ar_afip_tables.account_denomination_i') if invoice.denomination_id == invoice.env.ref('l10n_ar_afip_tables.account_denomination_d'): voucher_type = invoice.env['afip.voucher.type'].search([ ('document_type_id', '=', document_type_id.id), ('denomination_id', '=', type_i.id)], limit=1 ) else: # Conservar solo esta porcion de codigo voucher_type = invoice.env['afip.voucher.type'].search([ ('document_type_id', '=', document_type_id.id), ('denomination_id', '=', invoice.denomination_id.id)], limit=1 ) # Traemos el codigo de afip de la tabla de relaciones, en base a lo antes calculado document_afip_code = int(invoice.env['codes.models.relation'].get_code('afip.voucher.type', voucher_type.id)) return document_afip_code @staticmethod def format_amount(amount, dp=2): # type: (float, int) -> str """ Formatea el numero con la cantidad de decimales que se le pase, o dos decimales por defecto. :param amount: El numero a formatear. :type amount: float :param dp: La precision decimal, a.k.a. la cantidad de decimales. :type dp: int :return: El numero formateado a string. :rtype: str """ amount = str("{0:.{1}f}".format(amount, dp)) amount = amount.replace(".", "").replace(",", "") return amount # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
agpl-3.0
-2,529,918,797,849,831,000
42.288288
117
0.59667
false
michael-hart/Imperial-Vex-2014
src/Python/serial_methods.py
1
1297
import dispatch import receive import time import struct class serial_master: pending_ack = {} disp = dispatch.dispatch_master() rec = receive.receive_master() ack_timeout = 10 #very much a placeholder serial_timeout = 10 #very much a placeholder def dispatch(self): cmd = self.disp.dispatch() if ((cmd[0] == '\x02') | (cmd[0] == '\x03') | (cmd[0] == '\x05')): #command requires ack y = struct.pack('f5p',time.time(), cmd) self.pending_ack[cmd[1]] =y return cmd def push_msg(self,cmd,data=0): self.disp.generate_msg(cmd,data) def receive(self,cmd): result = self.rec.rcv_msg(cmd) if cmd[0] == '\x04': #is acknowledge try: self.pending_ack.pop(chr(result)) except ValueError: print "iD of acknowledge invalid" def check_timeout(self): t = time.time() if (t - self.rec.time_last) > self.serial_timeout: print 'the cortex is dead. everything is lost' else: for key in self.pending_ack: y = struct.unpack('f5p',self.pending_ack[key]) if (y[1] -t) > self.ack_timeout: self.push_msg(y[0][0],y[0][2])
gpl-2.0
-7,951,646,154,742,164,000
34.081081
97
0.535852
false
jcherqui/searx
tests/unit/engines/test_google_videos.py
1
2859
from collections import defaultdict import mock from searx.engines import google_videos from searx.testing import SearxTestCase class TestGoogleVideosEngine(SearxTestCase): def test_request(self): query = 'test_query' dicto = defaultdict(dict) dicto['pageno'] = 1 dicto['safesearch'] = 1 dicto['time_range'] = '' params = google_videos.request(query, dicto) self.assertIn('url', params) self.assertIn(query, params['url']) dicto['safesearch'] = 0 params = google_videos.request(query, dicto) self.assertNotIn('safe', params['url']) def test_response(self): self.assertRaises(AttributeError, google_videos.response, None) self.assertRaises(AttributeError, google_videos.response, []) self.assertRaises(AttributeError, google_videos.response, '') self.assertRaises(AttributeError, google_videos.response, '[]') html = r""" <div> <div> <div class="g"> <div class="r"> <a href="url_1"><h3>Title 1</h3></a> </div> <div class="s"> <div> <a> <g-img> <img id="vidthumb1"> </g-img> </a> </div> </div> <div> <span class="st">Content 1</span> </div> </div> <div class="g"> <div class="r"> <a href="url_2"><h3>Title 2</h3></a> </div> <div class="s"> <div> <a> <g-img> <img id="vidthumb2"> </g-img> </a> </div> </div> <div> <span class="st">Content 2</span> </div> </div> </div> </div> <script>function _setImagesSrc(c,d,e){}</script> """ response = mock.Mock(text=html) results = google_videos.response(response) self.assertEqual(type(results), list) self.assertEqual(len(results), 2) self.assertEqual(results[0]['url'], u'url_1') self.assertEqual(results[0]['title'], u'Title 1') self.assertEqual(results[0]['content'], u'Content 1') self.assertEqual(results[1]['url'], u'url_2') self.assertEqual(results[1]['title'], u'Title 2') self.assertEqual(results[1]['content'], u'Content 2')
agpl-3.0
7,804,260,171,419,426,000
35.189873
71
0.432319
false
stcorp/legato
setup.py
1
1043
from setuptools import setup, find_packages import sys if sys.hexversion < 0x02070000: sys.exit("Python 2.7 or newer is required to use this package.") setup( name="legato", version="1.2", author="S[&]T", url="https://github.com/stcorp/legato", description="Task trigger daemon", license="BSD", packages=find_packages(), entry_points={ "console_scripts": [ "legato = legato.main:main", ], }, classifiers=[ "Development Status :: 5 - Production/Stable", "Programming Language :: Python :: 2", "Programming Language :: Python :: 2.7", "Programming Language :: Python :: 3", "Programming Language :: Python :: 3.6", "Programming Language :: Python :: 3.7", "Programming Language :: Python :: 3.8", "License :: OSI Approved :: BSD License", "Operating System :: OS Independent", "Topic :: System", ], install_requires=[ "pyyaml", "schedule", "watchdog" ] )
bsd-3-clause
977,491,382,755,786,200
27.189189
68
0.567593
false
iamaris/ppf
ppf/test/test_hull_white.py
1
14343
import ppf, math, numpy, unittest def _assert_seq_close(a, b, tol=1.0e-8): assert (len(a) == len(b)) and \ not [l for l in [math.fabs(x - y) <= tol for (x, y) in zip(a, b)] if not l] class requestor_tests(unittest.TestCase): def test_discount_factor(self): env = ppf.market.environment() times = numpy.linspace(0, 2, 5) env.add_curve( "zc.disc.eur" , ppf.market.curve( times , numpy.array([math.exp(-0.05*t) for t in times]) , ppf.math.interpolation.loglinear ) ) r = ppf.model.hull_white.requestor() t = 1.5 Bt = [r.discount_factor(t, "eur", env)] _assert_seq_close([0.927743486329], Bt) def test_term_vol(self): env = ppf.market.environment() env.add_constant("cv.mr.eur.hw", 0.0) expiries, tenors = [0.1, 0.5, 1.0, 1.5, 2.0, 3.0, 4.0, 5.0], [0, 90] env.add_surface( "ve.term.eur.hw" , ppf.market.surface(expiries, tenors, numpy.array(8*[[0.04, 0.04]])) ) r = ppf.model.hull_white.requestor() t = 0.25 sig = [r.term_vol(t, "eur", env)] _assert_seq_close(sig, [0.1]) class state_tests(unittest.TestCase): def test(self): expiries, tenors = [0.1, 0.5, 1.0, 1.5, 2.0, 3.0, 4.0, 5.0], [0, 90] surf = ppf.market.surface(expiries, tenors, numpy.array(8*[[0.04, 0.04]])) env = ppf.market.environment() env.add_surface("ve.term.eur.hw", surf) env.add_constant( "cv.mr.eur.hw", 0.01) s = ppf.model.hull_white.lattice.state("eur", 11, 3.5) x = s.fill(1.25, ppf.model.hull_white.requestor(), env) exp = \ [-0.78754076 ,-0.63003261 ,-0.47252446 ,-0.31501631 ,-0.15750815 , 0. , 0.15750815 , 0.31501631 , 0.47252446 , 0.63003261 , 0.78754076] _assert_seq_close(exp, x) class fill_tests(unittest.TestCase): def test_numeraire_rebased_bond(self): env = ppf.market.environment() times = numpy.linspace(0, 2, 5) factors = numpy.array([math.exp(-0.05*t) for t in times]) env.add_curve("zc.disc.eur" , ppf.market.curve(times, factors, ppf.math.interpolation.loglinear)) expiries, tenors = [0.1, 0.5, 1.0, 1.5, 2.0, 3.0, 4.0, 5.0], [0, 90] env.add_surface("ve.term.eur.hw" , ppf.market.surface(expiries, tenors, numpy.zeros((8, 2)))) env.add_constant("cv.mr.eur.hw", 0.0) r = ppf.model.hull_white.requestor() s = ppf.model.hull_white.lattice.state("eur", 11, 3.5) sx = s.fill(0.25, r, env) f = ppf.model.hull_white.fill(2.0) PtT = f.numeraire_rebased_bond(0.25, 1.5, "eur", env, r, sx) exp = \ [0.927743486329 ,0.927743486329 ,0.927743486329 ,0.927743486329 ,0.927743486329 ,0.927743486329 ,0.927743486329 ,0.927743486329 ,0.927743486329 ,0.927743486329 ,0.927743486329] _assert_seq_close(exp, PtT) def test_libor(self): from ppf.date_time \ import date, shift, modified_following, basis_act_360, months pd = date(2008, 01, 01) env = ppf.market.environment(pd) times = numpy.linspace(0, 2, 5) factors = numpy.array([math.exp(-0.05*t) for t in times]) env.add_curve("zc.disc.eur" , ppf.market.curve(times, factors, ppf.math.interpolation.loglinear)) expiries, tenors = [0.1, 0.5, 1.0, 1.5, 2.0, 3.0, 4.0, 5.0], [0, 90] env.add_surface("ve.term.eur.hw" , ppf.market.surface(expiries, tenors, numpy.zeros((8, 2)))) env.add_constant("cv.mr.eur.hw", 0.0) rd = date(2008, 07, 01) libor_obs = \ ppf.core.libor_rate( \ None #attributes , 0 #flow-id , 0 #reset-id , rd #reset-date , "eur"#reset-currency , rd #projection-start-date , shift(rd + months(6), modified_following)#projection-end-date , basis_act_360#projection-basis , ppf.core.fixing(False))# fixing (and no spread) r = ppf.model.hull_white.requestor() s = ppf.model.hull_white.lattice.state("eur", 11, 3.5) sx = s.fill(0.25, r, env) f = ppf.model.hull_white.fill(2.0) libortT = f.libor(0.25, libor_obs, env, r, sx) exp = \ [0.0499418283138 ,0.0499418283138 ,0.0499418283138 ,0.0499418283138 ,0.0499418283138 ,0.0499418283138 ,0.0499418283138 ,0.0499418283138 ,0.0499418283138 ,0.0499418283138 ,0.0499418283138 ] _assert_seq_close(exp, libortT) class rollback_tests(unittest.TestCase): def test_discounted_libor_rollback(self): from ppf.date_time \ import date, shift, modified_following, basis_act_360, months pd = date(2008, 01, 01) env = ppf.market.environment(pd) times = numpy.linspace(0, 6, 10) factors = numpy.array([math.exp(-0.05*t) for t in times]) env.add_curve("zc.disc.eur" , ppf.market.curve(times, factors, ppf.math.interpolation.loglinear)) expiries, tenors = [0.0, 0.5, 1.0, 1.5, 2.0, 3.0, 4.0, 5.0, 6.0], [0, 90] values = numpy.zeros((9, 2)) values.fill(0.001) env.add_surface("ve.term.eur.hw" , ppf.market.surface(expiries, tenors, values)) env.add_constant("cv.mr.eur.hw", 0.01) r = ppf.model.hull_white.requestor() s = ppf.model.hull_white.lattice.state("eur", 41, 4.5) f = ppf.model.hull_white.fill(5.0) rd = date(2011, 01, 01) libor_obs = \ ppf.core.libor_rate( \ None #attributes , 0 #flow-id , 0 #reset-id , rd #reset-date , "eur"#reset-currency , rd #projection-start-date , shift(rd + months(6), modified_following)#projection-end-date , basis_act_360#projection-basis , ppf.core.fixing(False))# fixing (and no spread) t = env.relative_date(libor_obs.proj_start_date())/365.0 T = env.relative_date(libor_obs.proj_end_date())/365.0 sx = s.fill(t, r, env) libort = f.libor(t, libor_obs, env, r, sx) ptT = f.numeraire_rebased_bond(t, T, "eur", env, r, sx) pv = libort*ptT*libor_obs.year_fraction() roll = ppf.model.hull_white.lattice.rollback("eur") intermediate_pv = roll.rollback(0.5*t, t, s, r, env, pv) actual = roll.rollback(0.0, 0.5*t, s, r, env, intermediate_pv).mean() expected = r.discount_factor(t, "eur", env)-r.discount_factor(T, "eur", env) _assert_seq_close([expected],[actual],1.0e-6) def test_bond_option(self): from ppf.date_time \ import date, shift, modified_following, basis_act_360, months pd = date(2008, 01, 01) env = ppf.market.environment(pd) times = numpy.linspace(0, 6, 10) factors = numpy.array([math.exp(-0.05*t) for t in times]) env.add_curve("zc.disc.eur" , ppf.market.curve(times, factors, ppf.math.interpolation.loglinear)) expiries, tenors = [0.0, 0.5, 1.0, 1.5, 2.0, 3.0, 4.0, 5.0, 6.0], [0, 90] values = numpy.zeros((9, 2)) values.fill(0.001) env.add_surface("ve.term.eur.hw" , ppf.market.surface(expiries, tenors, values)) env.add_constant("cv.mr.eur.hw", 0.01) r = ppf.model.hull_white.requestor() s = ppf.model.hull_white.lattice.state("eur", 41, 4.5) f = ppf.model.hull_white.fill(5.0) t = 3.0 T = 4.0 terminal_T = 5.0 sx = s.fill(t, r, env) ptT = f.numeraire_rebased_bond(t, T, "eur", env, r, sx) k = 0.9 pv = ptT-k roll = ppf.model.hull_white.lattice.rollback("eur") actual = roll.rollback_max(0.0, t, s, r, env, pv).mean() volt = r.term_vol(t, "eur", env)*r.local_vol(T, terminal_T, "eur", env) F = r.discount_factor(T, "eur", env) d1 = math.log(F/k)/volt+0.5*volt d2 = d1-volt expected = F*ppf.math.N(d1)-k*ppf.math.N(d2) _assert_seq_close([expected],[actual],1.0e-5) def test_constant(self): from ppf.date_time \ import date, shift, modified_following, basis_act_360, months pd = date(2008, 01, 01) env = ppf.market.environment(pd) times = numpy.linspace(0, 6, 10) factors = numpy.array([math.exp(-0.05*t) for t in times]) env.add_curve("zc.disc.eur" , ppf.market.curve(times, factors, ppf.math.interpolation.loglinear)) expiries, tenors = [0.0, 0.5, 1.0, 1.5, 2.0, 3.0, 4.0, 5.0, 6.0], [0, 90] values = numpy.zeros((9, 2)) values.fill(0.001) env.add_surface("ve.term.eur.hw" , ppf.market.surface(expiries, tenors, values)) env.add_constant("cv.mr.eur.hw", 0.01) r = ppf.model.hull_white.requestor() s = ppf.model.hull_white.lattice.state("eur", 41, 5.5) f = ppf.model.hull_white.fill(5.0) t = 3.0 T = 4.0 terminal_T = 5.0 sx = s.fill(t, r, env) yT = numpy.zeros(41) yT.fill(1) roll = ppf.model.hull_white.lattice.rollback("eur") yt = roll.rollback(t, T, s, r, env, yT) _assert_seq_close(yt, yT, 1.0e-5) class evolve_tests(unittest.TestCase): def test_mean_and_variance(self): from ppf.date_time \ import date, shift, modified_following, basis_act_360, months pd = date(2008, 01, 01) env = ppf.market.environment(pd) times = numpy.linspace(0, 6, 10) factors = numpy.array([math.exp(-0.05*t) for t in times]) env.add_curve("zc.disc.eur" , ppf.market.curve(times, factors, ppf.math.interpolation.loglinear)) expiries, tenors = [0.0, 0.5, 1.0, 1.5, 2.0, 3.0, 4.0, 5.0, 6.0], [0, 90] values = numpy.zeros((9, 2)) values.fill(0.001) env.add_surface("ve.term.eur.hw" , ppf.market.surface(expiries, tenors, values)) env.add_constant("cv.mr.eur.hw", 0.01) r = ppf.model.hull_white.requestor() s = ppf.model.hull_white.monte_carlo.state(10000) e = ppf.model.hull_white.monte_carlo.evolve("eur") e.evolve(0.0,0.5,s,r,env) e.evolve(0.5,1.0,s,r,env) variates = s.get_variates() mean = variates.sum()/10000 assert(math.fabs(mean) < 1.0e-4) tmp = variates*variates variance = tmp.sum()/10000 vol = r.term_vol(1.0,"eur",env) assert(math.fabs(variance-vol*vol) < 1.0e-4) def test_bond(self): from ppf.date_time \ import date, shift, modified_following, basis_act_360, months pd = date(2008, 01, 01) env = ppf.market.environment(pd) times = numpy.linspace(0, 6, 10) factors = numpy.array([math.exp(-0.05*t) for t in times]) env.add_curve("zc.disc.eur" , ppf.market.curve(times, factors, ppf.math.interpolation.loglinear)) expiries, tenors = [0.0, 0.5, 1.0, 1.5, 2.0, 3.0, 4.0, 5.0, 6.0], [0, 90] values = numpy.zeros((9, 2)) values.fill(0.001) env.add_surface("ve.term.eur.hw" , ppf.market.surface(expiries, tenors, values)) env.add_constant("cv.mr.eur.hw", 0.01) r = ppf.model.hull_white.requestor() s = ppf.model.hull_white.monte_carlo.state(10000) e = ppf.model.hull_white.monte_carlo.evolve("eur") e.evolve(0.0,3.0,s,r,env) f = ppf.model.hull_white.fill(5.0) t = 3.0 T = 4.0 sx = s.fill(t, r, env) ptT = f.numeraire_rebased_bond(t, T, "eur", env, r, sx) actual = ptT.mean() expected = r.discount_factor(T, "eur", env) assert(math.fabs(actual-expected) < 1.0e-3) class exercise_tests(unittest.TestCase): def test_explanatory_variables(self): from ppf.math.interpolation import loglinear times = [0.0, 0.5, 1.0, 1.5, 2.0, 2.5, 3.0] factors = [math.exp(-0.05*t) for t in times] c = ppf.market.curve(times, factors, loglinear) expiries = [0.0, 0.5, 1.0, 1.5, 2.0, 3.0, 4.0, 5.0] tenors = [0, 90] values = numpy.zeros((8, 2)) surf = ppf.market.surface(expiries, tenors, values) from ppf.date_time \ import date, shift_convention, modified_following, basis_act_360, months pd = date(2008, 01, 01) env = ppf.market.environment(pd) key = "zc.disc.eur" env.add_curve(key, c) key = "ve.term.eur.hw" env.add_surface(key, surf) key = "cv.mr.eur.hw" env.add_constant(key, 0.0) r = ppf.model.hull_white.requestor() s = ppf.model.hull_white.monte_carlo.state(10) sx = s.fill(0.25, r, env) f = ppf.model.hull_white.fill(3.0) flows = ppf.core.generate_flows( start = date(2008, 01, 01) , end = date(2010, 01, 01) , duration = months , period = 6 , shift_method = shift_convention.modified_following , basis = "ACT/360" , pay_currency = "EUR") lg = ppf.core.leg(flows, ppf.core.PAY) ex = ppf.model.hull_white.monte_carlo.cle_exercise(lg) t = env.relative_date(flows[1].accrual_start_date())/365.0 T = env.relative_date(flows[1].accrual_end_date())/365.0 ret = ex(t, f, sx, r, env) dft = c(t) dfT = c(T) expected_libor = (dft/dfT-1.0)/flows[1].year_fraction() pv01 = 0.0 for fl in flows[1:]: T = env.relative_date(fl.pay_date())/365.0 dfT = c(T) pv01 += fl.year_fraction()*dfT T = env.relative_date(flows[-1].accrual_end_date())/365.0 dfT = c(T) expected_swap = (dft-dfT)/pv01 expected_libors = numpy.zeros(10) expected_libors.fill(expected_libor) expected_swaps = numpy.zeros(10) expected_swaps.fill(expected_swap) actual_libors = ret[:, 0] actual_swaps = ret[:, 1] _assert_seq_close(actual_libors, expected_libors) _assert_seq_close(actual_swaps, expected_swaps) class hull_white_test_suite(unittest.TestSuite): def __init__(self): tests = map(requestor_tests,('test_discount_factor','test_term_vol')) + \ map(state_tests,('test',)) + \ map(fill_tests,('test_numeraire_rebased_bond', 'test_libor')) + \ map(rollback_tests, ('test_discounted_libor_rollback','test_bond_option', 'test_constant')) + \ map(evolve_tests, ('test_mean_and_variance', 'test_bond')) + \ map(exercise_tests, ('test_explanatory_variables',)) unittest.TestSuite.__init__(self, tests) #////////1/////////2/////////3/////////4/////////5/////////6/////////7/////////8 # driver # def suite(): all_tests = unittest.TestSuite( ( hull_white_test_suite() , ) ) return all_tests def run_tests(): runner = unittest.TextTestRunner() runner.run(suite()) if __name__ == '__main__': run_tests()
mit
-2,732,997,826,539,042,000
35.966495
107
0.585721
false
hayashizakitakaaki/Introduction_mysite
accounts/models.py
1
3386
# -*- coding: utf-8 -*- from django.contrib.auth.models import AbstractBaseUser, BaseUserManager, PermissionsMixin from django.db import models from django.shortcuts import get_object_or_404 class UserManager(BaseUserManager): use_in_migrations = True def _create_user(self, username, password, is_superuser, first_name=None, last_name=None, **extra_fields): if not username or len(username.strip()) < 1: raise ValueError(u'ユーザー名を入力してください!') if User.objects.filter(username=username): raise ValueError(u'そのユーザーは登録されています') if first_name is not None and len(first_name.strip()) < 1: raise ValueError(u'姓を入力してください!') if last_name is not None and len(last_name.strip()) < 1: raise ValueError(u'名を入力してください!') user = self.model( username=username, first_name=first_name, last_name=last_name, is_superuser=is_superuser, **extra_fields) user.set_password(password) user.save(using=self._db) return user def _edit_user(self, id, username, is_superuser, first_name=None, last_name=None, **extra_fields): if not username or len(username.strip()) < 1: raise ValueError(u'ユーザー名を入力してください!') if first_name is not None and len(first_name.strip()) < 1: raise ValueError(u'姓を入力してください!') if last_name is not None and len(last_name.strip()) < 1: raise ValueError(u'名を入力してください!') try: user = get_object_or_404(User, pk=id) except: raise ValueError(u'ユーザーが存在しません') user.username = username user.first_name = first_name user.last_name = last_name user.is_superuser = is_superuser user.save(using=self._db) return user def create_superuser(self, username, password, **extra_fields): return self._create_user(username, password, True, **extra_fields) class User(AbstractBaseUser, PermissionsMixin): username = models.CharField('ユーザーID', max_length=30, unique=True, help_text="This using user ID and use login or logout") screenname = models.CharField('表示名', max_length=255, help_text="") first_name = models.CharField('姓', max_length=255, blank=True, null=True, help_text="") last_name = models.CharField('名', max_length=255, blank=True, null=True, help_text="") is_active = models.BooleanField('有効フラグ', default=True) is_staff = models.BooleanField('スタッフ', default=True) created_date = models.DateTimeField('登録日時', auto_now_add=True) modified_date = models.DateTimeField('更新日時', auto_now=True) objects = UserManager() USERNAME_FIELD = 'username' class Meta: verbose_name = 'ユーザー' verbose_name_plural = verbose_name def get_full_name(self): if self.first_name and self.last_name: return self.first_name + self.last_name else: return self.username def get_short_name(self): return self.first_name
mit
5,415,129,295,822,937,000
40.68
117
0.622521
false
Micronaet/micronaet-mx8
l10n_it_ddt_sectional/journal.py
1
2205
# -*- coding: utf-8 -*- ############################################################################### # # Copyright (C) 2001-2014 Micronaet SRL (<http://www.micronaet.it>). # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as published # by the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################### import os import sys import logging import openerp import openerp.netsvc as netsvc import openerp.addons.decimal_precision as dp from openerp.osv import fields, osv, expression, orm from datetime import datetime, timedelta from dateutil.relativedelta import relativedelta from openerp import SUPERUSER_ID, api from openerp import tools from openerp.tools.translate import _ from openerp.tools.float_utils import float_round as round from openerp.tools import (DEFAULT_SERVER_DATE_FORMAT, DEFAULT_SERVER_DATETIME_FORMAT, DATETIME_FORMATS_MAP, float_compare) _logger = logging.getLogger(__name__) class StockDdT(orm.Model): '''Override DDT for change set number function: ''' _inherit = 'stock.ddt' def set_number(self, cr, uid, ids, context=None): ''' Override original function: ''' for ddt in self.browse(cr, uid, ids, context=context): if not ddt.name: name = self.pool.get('ir.sequence').get_id( cr, uid, ddt.sequence.id, code_or_id='id', context=context) self.write(cr, uid, ddt.id, { 'name': name}, context=context) # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
agpl-3.0
1,794,800,547,456,019,000
37.017241
79
0.636735
false
ageek/useful-papers-codes
dropconnect-on-CIFAR10/drop-nn/imagenetdata-old.py
2
7620
# image net data provider from PIL import Image from util import pickle,unpickle import numpy as n import sys from numpy.random import random_integers from time import time, asctime, localtime, strftime from math import * MEAN_FILE_EXT = "_mean" def PIL2array(img): #if img.mode == 'L': # r = n.array(img.getdata(), n.uint8).reshape(img.size[1], img.size[0] ) # result = n.zeros( (img.size[1], img.size[0],3 ), n.uint8 ) # result[:,:,0] = r # result[:,:,1] = r # result[:,:,2] = r # return result #else: # return n.array(img.getdata(), n.uint8).reshape(img.size[1], img.size[0], 3) if img.mode == 'L': I = n.asarray( img ) result = n.zeros( (img.size[1], img.size[0],3 ), n.uint8 ) result[:,:,0] = I result[:,:,1] = I result[:,:,2] = I return result else: return n.asarray( img ) def array2PIL(arr): return Image.fromarray( n.uint8(arr) ) class ImagenetDataProvider: def __init__( self, data_file, root_path, data_mode = "train", random_transform = False, batch_size = 128, crop_width = 224, crop_height = 224 ): # read image-name image-index map from file self.data = unpickle( data_file ) self.num_classes = len( self.data['index_map'] ) self.data_mode = data_mode self.random_transform = random_transform self.root_path = root_path if data_mode == "all": index_map = self.data['index_map'] elif data_mode == "val": index_map = self.data['index_map_val'] elif data_mode == "train": index_map = self.data['index_map_train'] else: print "data_mode: " + data_mode + " not valid" import pdb; pdb.set_trace() sys.exit(1) # get batch queue self.batch_queue = [] has_add = True while has_add: has_add = False for i in range( self.num_classes ): if len(index_map[i]) > 0: index = index_map[i].pop() self.batch_queue.append( index ) has_add = True self.num_images = len( self.batch_queue ) #init current index and batch size self.batch_size = batch_size self.prev_batch_size = batch_size self.crop_width = crop_width self.crop_height = crop_height self.batch_index = 1 self.epoch = 1 # read data mean from file data_mean_file = unpickle( data_file + MEAN_FILE_EXT ) self.data_mean = data_mean_file['data'] def get_data_dims( self, idx ): if idx == 0: return self.crop_width * self.crop_height * 3 if idx == 1: return 1 def get_previous_batch_size( self ): return self.prev_batch_size def get_next_batch( self ): # construct next batch online # batch_data[0]: epoch # batch_data[1]: batchnum # batch_data[2]['label']: each column represents an image # batch_data[2]['data'] : each column represents an image # this function only crop center 256 x 256 in image for classification total_time_start = time() alloc_time_start = time() result_data = n.zeros( ( self.crop_width * self.crop_height * 3, self.batch_size ), \ n.float32 ) result_label = n.zeros( (1,self.batch_size ), n.float32 ) batch_index = self.batch_index - 1 if batch_index * self.batch_size >= self.num_images: self.batch_index = 1 self.epoch += 1 batch_index = 0 alloc_time = time() - alloc_time_start # loading/tranform image time load_time = 0 transform_time = 0 lt_time_start = time() k = 0 for i in range( self.batch_size ): index = (i + batch_index * self.batch_size ) if index >= self.num_images: break k += 1 index = self.batch_queue[index] result_data[:,i], result_label[0,i], lti, tti = self.get_data_label( index ) load_time += lti transform_time += tti lt_time = time() - lt_time_start pack_time_start = time() # shrink result_data, result_label to have k columns if k < self.batch_size: result_data = result_data[:,0:k] result_label = result_label[0,0:k].reshape(1,k) self.previous_batch_size = k self.batch_index += 1 result = {} result['data'] = result_data result['label'] = result_label #result['label'] = result_label % 10 #import pdb; pdb.set_trace() pack_time = time() - pack_time_start print "load data: (%.3f sec) " % ( time() - total_time_start ), print " = %.2f(%.2f + %.2f) + %.2f" % (lt_time, load_time , transform_time, alloc_time), return self.epoch, batch_index+1, result def get_data_label( self, index ): #import pdb; pdb.set_trace() image_path = self.root_path + "/" + self.data['image_path'][index] label = self.data['image_label'][index] #load image load_time_start= time() im = Image.open( image_path ) image_matrix = PIL2array( im ) load_time = time() - load_time_start # generate transformed image transform_time_start = time() #[x,y,w,h] = im.getbbox() x = 0 y = 0 (w,h) = im.size # get image matrix and substract mean image_matrix = image_matrix.astype(n.float32) image_matrix -= self.data_mean if self.random_transform: # random crop x += random_integers( 0, w - self.crop_width - 1) y += random_integers( 0, h - self.crop_height - 1) else: # fixed crop x += (w - self.crop_width)/2 y += (h - self.crop_height)/2 #crop image assert( x + self.crop_width < w ) assert( y + self.crop_height < h ) #im = im.crop( (x,y, x + self.crop_width, y + self.crop_height ) ) image_matrix = image_matrix[ x:x+self.crop_width, y:y+self.crop_width, : ] if self.random_transform: # flip: roll a dice to whether flip image if random_integers( 0,1 ) > 0.5: #im = im.transpose( Image.FLIP_LEFT_RIGHT ) image_matrix = image_matrix[:, -1::-1, :] image_matrix = image_matrix.reshape( (self.crop_width * self.crop_height * 3, ) ) image_matrix = n.require( image_matrix, dtype=n.single, requirements='C') label = n.require( label, dtype=n.single, requirements='C' ) transform_time = time() - transform_time_start return image_matrix, label, load_time, transform_time; def get_num_classes( self ): return self.num_classes def get_num_batches( self ): return int(ceil( 1.0 * len(self.batch_queue) / self.batch_size )) def print_data_summary( self ): class_labels = [ self.data['image_label'][x] for x in self.batch_queue ] label_hist = [0] * self.get_num_classes() for i in range( len(class_labels ) ): label_hist[ class_labels[i] ] += 1 print "Class Label Hist: ", label_hist, len(label_hist) print "Num Batches : ", self.get_num_batches() if __name__ == "__main__": data_file = '/home/snwiz/data/imagenet12/code/data/imagenet_data_tiny10' provider = ImagenetDataProvider( data_file, 'val', batch_size = 128, random_transform = True ) for i in range(2000): epoch, batch_index, data = provider.get_next_batch() print 'epoch: ' + str(epoch) + ' batch_index: ' + str(batch_index) + \ '/' + str(provider.get_num_batches()) + \ ' data: ' + str(data['data'][0:5,0:5]) +\ ' label: ' + str(data['label'][0:5,0:5] )
gpl-2.0
716,190,105,202,208,900
33.479638
97
0.570079
false
pintomollo/zztip
organize_shh.py
1
1240
#!/usr/bin/env python from __future__ import print_function import sys import os import re import glob import shutil import shlex, subprocess if __name__=='__main__': helpmess="""Usage: organize_shh dir Converts the output of the Leica SB configuration into single tiffs. """ # Inputs if len(sys.argv)<2: print(helpmess) sys.exit(0) else: indir=sys.argv[1] indir=os.path.realpath(indir) count = 1; dpa = '-100'; isgfp = False; if os.path.exists(indir) and os.path.isdir(indir): files = os.listdir(indir) for fname in files: res = re.match('SB25_(\d+)_(-?\d+)dpa_(Z\d+)_(\d+)(.*)\.tif', fname) if res != None: ids = res.group(2) index = '' if ids != dpa: if (res.group(5) == '_GFP'): dpa = ids count = 1 else: index = chr(96+count) if (res.group(5) == '_GFP'): count += 1 print('{name} -> SB25_{z}{i}{g}_{d}dpa_{D}'.format(name=fname, z=res.group(3), i=index, g=res.group(5), d=ids, D=res.group(1))) os.rename(os.path.join(indir, fname), os.path.join(indir, 'SB25_{z}{i}{g}_{d}dpa_{D}.tif'.format(z=res.group(3), i=index, g=res.group(5), d=ids, D=res.group(1))))
gpl-3.0
-584,310,953,087,189,600
24.833333
170
0.556452
false
meine-stadt-transparent/meine-stadt-transparent
importer/management/commands/_import_base_command.py
1
1662
import logging from abc import ABC from typing import Tuple, Dict, Any from django.conf import settings from django.core.management.base import BaseCommand from importer.importer import Importer from importer.loader import get_loader_from_body, BaseLoader from mainapp.models import Body logger = logging.getLogger(__name__) class ImportBaseCommand(BaseCommand, ABC): def add_arguments(self, parser): parser.add_argument("--body", help="The oparl id of the body") parser.add_argument( "--ignore-modified", dest="ignore_modified", action="store_true" ) parser.add_argument("--force-singlethread", action="store_true") parser.add_argument( "--skip-download", action="store_true", dest="skip_download", default=False, help="Do not download and parse the files", ) def get_importer(self, options: Dict[str, Any]) -> Tuple[Importer, Body]: if options.get("body"): body = Body.objects.get(oparl_id=options["body"]) else: body = Body.objects.get(id=settings.SITE_DEFAULT_BODY) if body.oparl_id is not None: loader = get_loader_from_body(body.oparl_id) importer = Importer( loader, body, ignore_modified=options["ignore_modified"] ) else: importer = Importer( BaseLoader(dict()), ignore_modified=options["ignore_modified"] ) importer.force_singlethread = options["force_singlethread"] importer.download_files = not options["skip_download"] return importer, body
mit
8,626,599,972,573,587,000
33.625
78
0.624549
false
kratsg/ironman
tests/test_history.py
1
1083
from zope.interface.verify import verifyClass, verifyObject from ironman.history import History from ironman.interfaces import IHistory from ironman.globals import IPBUS_VERSION, TESTPACKETS from ironman.packet import IPBusPacket # fixtures for passing in the objects import pytest def test_history_create(): obj = History() assert obj is not None def test_history_class_iface(): # Assure the class implements the declared interface assert verifyClass(IHistory, History) def test_history_instance_iface(): # Assure instances of the class provide the declared interface assert verifyObject(IHistory, History()) def test_history_empty(): h = History() assert len(h) == 0 assert any(h.packets) == False assert len(h.packets) == h.maxlen def test_history_record(): h = History() for i in range(101): p = IPBusPacket(TESTPACKETS['big-endian']) p.request.header.id = i h.record(p) assert len(h) == h.maxlen assert all(h.packets) == True assert 0 not in h assert 1 in h assert 100 in h
mit
-3,390,162,100,391,632,400
23.613636
66
0.698984
false
paulpc/nyx
plugin_template.py
1
2211
import syslog def add_ip(ip,settings,intel_list,tags): """ adds an IP to the pre-established list. The tags might or might not be supported by the control""" # your code here if "[condition for confirming a successful addition": syslog.syslog(syslog.LOG_INFO,'nyx->[this_plugin]: successfully added %s to %s'% (ip,intel_list)) return True else: syslog.syslog(syslog.LOG_ERR,'nyx->[this_plugin]: problems adding %s to %s'% (ip,intel_list)) return False def add_domain(domain,settings,intel_list,tags): """ adds an domain to the pre-established list. The tags might or might not be supported by the control""" # your code here if "[condition for confirming a successful addition": syslog.syslog(syslog.LOG_INFO,'nyx->[this_plugin]: successfully added %s to %s'% (ip,intel_list)) return True else: syslog.syslog(syslog.LOG_ERR,'nyx->[this_plugin]: problems adding %s to %s'% (ip,intel_list)) return False def list_ips(settings): """ retrieves the IP addresses from the control's specific lists for comparison""" ip_index={} # your code here return ip_index def list_domains(settings): """ retrieves the domains from the control's lists for comparison. The index should be structured as a dictionary of {domain:intel_list}""" domain_index={} # your code here return domain_index def remove_ip(ip,settings): """ removes an IP from the control""" # your code here if "[conditions for successful removal]": syslog.syslog(syslog.LOG_INFO,'nyx->[this_plugin]:: successfully removed %s'% (ip)) return True else: syslog.syslog(syslog.LOG_ERR,'nyx->[this_plugin]: problems removing %s'% (ip)) return False def remove_domain(domain,settings): """ removes a domain from the control""" # your code here if "[conditions for successful removal]": syslog.syslog(syslog.LOG_INFO,'nyx->[this_plugin]:: successfully removed %s'% (ip)) return True else: syslog.syslog(syslog.LOG_ERR,'nyx->[this_plugin]: problems removing %s'% (ip)) return False
mit
-30,865,448,108,538,424
33.5625
110
0.645409
false
yugangzhang/GitTest
CMS_Profile/81-beam.py
1
92270
#!/usr/bin/python # -*- coding: utf-8 -*- # vi: ts=4 sw=4 ################################################################################ # Code for querying and controlling beamline components that 'affect' the # beam. (Beam energy, beam flux, etc.) ################################################################################ # Known Bugs: # N/A ################################################################################ # TODO: # Search for "TODO" below. ################################################################################ # Notes ################################################################################ # verbosity=0 : Output nothing # verbosity=1 : Output only final (minimal) result # verbosity=2 : Output 'regular' amounts of information/data # verbosity=3 : Output all useful information # verbosity=4 : Output marginally useful things (e.g. essentially redundant/obvious things) # verbosity=5 : Output everything (e.g. for testing) # These imports are not necessary if part of the startup sequence. # If this file is called separately, some of these may be needed. #import numpy as np #from epics import caget, caput #from time import sleep #from ophyd import EpicsMotor, Device, Component as Cpt #from ophyd.commands import * # For mov, movr #define pilatus_name and _Epicsname, instead of pilatus300 or pilatus2M #moved to 20-area-detectors.py #pilatus_name = pilatus2M #pilatus_Epicsname = '{Det:PIL2M}' class BeamlineDetector(object): def __init__(self, detector, **md): self.detector = detector self.md = md def get_md(self, prefix='detector_', **md): '''Returns a dictionary of the current metadata. The 'prefix' argument is prepended to all the md keys, which allows the metadata to be grouped with other metadata in a clear way. (Especially, to make it explicit that this metadata came from the beamline.)''' md_return = self.md.copy() # Include the user-specified metadata md_return.update(md) # Add an optional prefix if prefix is not None: md_return = { '{:s}{:s}'.format(prefix, key) : value for key, value in md_return.items() } return md_return class CMS_SAXS_Detector(BeamlineDetector): def setCalibration(self, direct_beam, distance, detector_position=None, pixel_size=0.172): self.direct_beam = direct_beam self.distance = distance if detector_position is None: self.detector_position = [SAXSx.user_readback.value, SAXSy.user_readback.value] else: self.detector_position = detector_position self.pixel_size = pixel_size def get_md(self, prefix='detector_SAXS_', **md): md_return = self.md.copy() x0, y0 = self.direct_beam position_defined_x, position_defined_y = self.detector_position position_current_x, position_current_y = SAXSx.user_readback.value, SAXSy.user_readback.value md_return['name'] = self.detector.name md_return['x0_pix'] = round( x0 + (position_current_x-position_defined_x)/self.pixel_size , 2 ) md_return['y0_pix'] = round( y0 + (position_current_y-position_defined_y)/self.pixel_size , 2 ) md_return['distance_m'] = self.distance # Include the user-specified metadata md_return.update(md) # Add an optional prefix if prefix is not None: md_return = { '{:s}{:s}'.format(prefix, key) : value for key, value in md_return.items() } return md_return class BeamlineElement(object): '''Defines a component of the beamline that (may) intersect the x-ray beam.''' def __init__(self, name, zposition, description="", pv=None, **args): self.name = name self.zposition = zposition self.description = description self.conversion_factor = 1 self._pv_main = pv self.has_flux = True def state(self): """ Returns the current state of the beamline element. Common states: out - Element is out of the way of the beam (and should not be blocking). in - Element is in the beam (but should not be blocking). block - Element is in the beam, and should be blocking the beam. undefined - Element is in an unexpected state. """ return "out" def transmission(self, t=None, verbosity=0): """ Returns the predicted transmission of this beamline element, based on its current state. """ if t is not None: print("WARNING: To change transmission, use 'setTransmission'.") print("WARNING: Beam transmission was not changed.") return tr_tot = 1.0 if verbosity>=2: print('{:s} transmission = {:.6g}'.format(self.name, tr_tot)) # Assume a generic beamline element doesn't block/perturb the beam return tr_tot def flux(self, verbosity=3): reading = self.reading(verbosity=0) flux = self.conversion_factor*reading # ph/s if verbosity>=2: print('flux = {:.4g} ph/s'.format(flux)) return flux class Shutter(BeamlineElement): # Example # XF:11BMA-PPS{PSh}Enbl-Sts # Status: XF:11BMA-PPS{PSh}Pos-Sts 0 for open, 1 for close # Open: XF:11BMA-PPS{PSh}Cmd:Opn-Cmd # Close: XF:11BMA-PPS{PSh}Cmd:Cls-Cmd def __init__(self, name, zposition, description="", pv=None, **args): super().__init__(name=name, zposition=zposition, description=description, pv=pv, **args) self.has_flux = False def state(self): """ Returns the current state of the beamline element. Common states: out - Element is out of the way of the beam (and should not be blocking). in - Element is in the beam (but should not be blocking). block - Element is in the beam, and should be blocking the beam. undefined - Element is in an unexpected state. """ state_n = caget(self._pv_main+'Pos-Sts') if state_n is 0: return "out" elif state_n is 1: return "block" else: return "undefined" def open(self, verbosity=3): if verbosity>=3: print('Opening {:s}...'.format(self.name)) # E.g. #XF:11BMB-VA{Slt:4-GV:1}Cmd:Opn-Cmd pv = self._pv_main + 'Cmd:Opn-Cmd' #caput(pv, 1) # TODO: Test this. def close(self, verbosity=3): if verbosity>=3: print('Closing {:s}...'.format(self.name)) pv = self._pv_main + 'Cmd:Cls-Cmd' #caput(pv, 1) # TODO: Test this. class GateValve(Shutter): # Example # Status: XF:11BMB-VA{Slt:4-GV:1}Pos-Sts 1 for open, 0 for close # Open: XF:11BMB-VA{Slt:4-GV:1}Cmd:Opn-Cmd # Close: XF:11BMB-VA{Slt:4-GV:1}Cmd:Cls-Cmd def state(self): """ Returns the current state of the beamline element. Common states: out - Element is out of the way of the beam (and should not be blocking). in - Element is in the beam (but should not be blocking). block - Element is in the beam, and should be blocking the beam. undefined - Element is in an unexpected state. """ state_n = caget(self._pv_main+'Pos-Sts') if state_n is 1: return "out" elif state_n is 0: return "block" else: return "undefined" class ThreePoleWiggler(BeamlineElement): def __init__(self, name='3PW', zposition=0.0, description='Three-pole wiggler source of x-rays', **args): super().__init__(name=name, zposition=zposition, description=description, **args) # TODO: Find out the right conversion factor self.conversion_factor = 3e18/500.0 #(ph/s)/mA def state(self): """ Returns the current state of the beamline element. Common states: out - Element is out of the way of the beam (and should not be blocking). in - Element is in the beam (but should not be blocking). block - Element is in the beam, and should be blocking the beam. undefined - Element is in an unexpected state. """ position = caget('SR:C11-ID:G5{3PW:1}Mtr.RBV') # TODO: Instead use the 'inserted' flag? # caget('SR:C11-ID:G5{3PW:1}InsertedFlag') if abs(position-0)<3: return "in" elif abs(position - -189.0)<10: return "out" else: return "undefined" def reading(self, verbosity=3): if self.state() is 'in': ring_current = caget('SR:OPS-BI{DCCT:1}I:Real-I') if verbosity>=2: print('{:s} is inserted; ring current = {:.1f} mA'.format(self.name, ring_current)) return ring_current else: if verbosity>=2: print('{:s} is not inserted.'.format(self.name)) return 0 class Monitor(BeamlineElement): def quickReading(self, verbosity=3, delay=1.0): """ Puts the diagnostic into the beam, takes a reading, and removes the diagnostic. """ self.insert() time.sleep(delay) value = self.reading(verbosity=verbosity) self.retract() time.sleep(delay) return value class DiagnosticScreen(Monitor): #XF:11BMB-BI{FS:4}Pos-Sts def __init__(self, name, zposition, description="", pv=None, epics_signal=None, **args): super().__init__(name=name, zposition=zposition, description=description, pv=pv, **args) self.epics_signal = epics_signal self.has_flux = False def state(self): """ Returns the current state of the beamline element. Common states: out - Element is out of the way of the beam (and should not be blocking). in - Element is in the beam (but should not be blocking). block - Element is in the beam, and should be blocking the beam. undefined - Element is in an unexpected state. """ state_n = caget(self._pv_main+'Pos-Sts') if state_n is 0: return "out" elif state_n is 1: return "block" else: return "undefined" def insert(self, verbosity=3): if verbosity>=3: print('Inserting {:s}...'.format(self.name)) # E.g. #XF:11BMB-VA{Slt:4-GV:1}Cmd:Opn-Cmd pv = self._pv_main + 'Cmd:In-Cmd' caput(pv, 1) def retract(self, verbosity=3): if verbosity>=3: print('Retracting {:s}...'.format(self.name)) pv = self._pv_main + 'Cmd:Out-Cmd' caput(pv, 1) def reading(self, verbosity=3): value = self.epics_signal.stats1.total.value if self.state() is 'block': ring_current = caget('SR:OPS-BI{DCCT:1}I:Real-I') if verbosity>=2: print('{:s} is inserted; reading = {:.4g}'.format(self.name, value)) return value else: if verbosity>=2: print('{:s} is not inserted.'.format(self.name)) return 0 class PointDiode_CMS(Monitor): def __init__(self, name='bim6 point diode', zposition=58.3, description="Bar holding a point-diode, downstream of sample.", pv='XF:11BMB-BI{IM:2}EM180:Current1:MeanValue_RBV', epics_signal=None, **args): super().__init__(name=name, zposition=zposition, description=description, pv=pv, **args) self.has_flux = True if epics_signal is None: #bim6 = EpicsSignalROWait("XF:11BMB-BI{IM:2}EM180:Current1:MeanValue_RBV", wait_time=1, name='bim6') #bim6_integrating = EpicsSignalROIntegrate("XF:11BMB-BI{IM:2}EM180:Current1:MeanValue_RBV", wait_time=0.5, integrate_num=8, integrate_delay=0.1, name='bim6') self.epics_signal = bim6_integrating else: self.epics_signal = epics_signal # The beam (at the ion chamber) is roughly 0.50x0.50 mm. # If we slit down to 0.20x0.05 mm, we are capturing 0.4*0.25 = 0.1 of the beam. # bim6 reads 70000 cts (of course this depends on settings) when ion chamber reads 1.3e11 ph/s. # (settings: trans = 5e-4) # So conversion_factor is roughly: self.conversion_factor = 1.3e11*0.1/70000. # (ph/s)/cts self.in_position_x = 0.0 self.in_position_y = 0.0 self.out_position_x = 0.0 self.out_position_y = -16.0 self.position_tolerance = 0.1 def state(self): """ Returns the current state of the beamline element. Common states: out - Element is out of the way of the beam (and should not be blocking). in - Element is in the beam (but should not be blocking). block - Element is in the beam, and should be blocking the beam. undefined - Element is in an unexpected state. """ position_x = DETx.user_readback.value position_y = DETy.user_readback.value if abs(position_x-self.out_position_x)<self.position_tolerance and abs(position_y-self.out_position_y)<self.position_tolerance: return "out" if abs(position_x-self.in_position_x)<self.position_tolerance and abs(position_y-self.in_position_y)<self.position_tolerance: return "block" else: return "undefined" def insert(self, verbosity=3): if verbosity>=3: print('Inserting {:s}...'.format(self.name)) #mov( [DETx, DETy], [self.in_position_x, self.in_position_y] ) DETx.move = self.in_position_x DETy.move = self.in_position_y def retract(self, verbosity=3): if verbosity>=3: print('Retracting {:s}...'.format(self.name)) #mov( [DETx, DETy], [self.out_position_x, self.out_position_y] ) DETx.move = self.out_position_x DETy.move = self.out_position_y def reading(self, verbosity=3): value = self.epics_signal.read()[self.epics_signal.name]['value'] if self.state() is 'block': if verbosity>=2: print('{:s} is inserted; reading = {:.4g}'.format(self.name, value)) return value else: if verbosity>=2: print('{:s} is not inserted.'.format(self.name)) return value class IonChamber_CMS(Monitor): def __init__(self, name='bim3 ionchamber', zposition=49, description="Ion chamber (FMB Oxford I404) at start of endstation hutch", pv=None, beam=None, **args): super().__init__(name=name, zposition=zposition, description=description, pv=pv, **args) self.has_flux = True self.beam = beam # PVs import epics self.v1 = epics.PV('XF:11BMB-BI{IM:3}:IC1_MON') self.v2 = epics.PV('XF:11BMB-BI{IM:3}:IC2_MON') self.h1 = epics.PV('XF:11BMB-BI{IM:3}:IC3_MON') self.h2 = epics.PV('XF:11BMB-BI{IM:3}:IC4_MON') def state(self): return "in" def v_position(self): total = self.v1.value+self.v2.value if total>0: return (self.v1.value-self.v2.value)/(total) else: return 0 def h_position(self): total = self.h1.value+self.h2.value if total>0: return (self.h1.value-self.h2.value)/(total) else: return 0 def reading(self, verbosity=3): total = self.h1.value + self.h2.value + self.v1.value + self.v2.value if verbosity>=3: print('Reading for {:s} ({:s})'.format(self.name, self.description)) print(' Horizontal: {:9.4g} + {:9.4g} = {:9.4g}'.format(self.h1.value, self.h2.value, self.h1.value+self.h2.value)) print(' position: {:.3f}'.format(self.h_position())) print(' Vertical: {:9.4g} + {:9.4g} = {:9.4g}'.format(self.v1.value, self.v2.value, self.v1.value+self.v2.value)) print(' position: {:.3f}'.format(self.v_position())) if verbosity>=2: print(' Total: {:9.4g}'.format(total)) return total def current_to_flux(self, current): energy_keV = self.beam.energy(verbosity=0) V_ion = 0.036 ## ionization energy of N2 gas in [keV] IC_len = 6.0 ## active length of Ion Chamber in [cm] qe = 1.602e-19 ## electron charge in [C] ## Absorption length [cm] of gas N2 (1 atm, 1.131 g/L) vs E [keV] # based on polynomial fit to the calculated abs length data from: henke.lbl.gov/optical_constants/atten2.html # see /home/xf11bm/masa/atten_len_N2* abs_len = 355.21 - 112.26*energy_keV + 11.200*np.square(energy_keV) - 0.10611*np.power(energy_keV,3.0) N_abs = current*V_ion/(qe*energy_keV) flux = N_abs / (1.0 - np.exp(-IC_len/abs_len)) return flux def flux(self, verbosity=3): if self.reading(verbosity=0) < 5e-10: return 0.0 h1 = self.current_to_flux(self.h1.value) h2 = self.current_to_flux(self.h2.value) h_total = h1 + h2 v1 = self.current_to_flux(self.v1.value) v2 = self.current_to_flux(self.v2.value) v_total = v1 + v2 total = h_total + v_total avg = total*0.5 if verbosity>=3: print('Flux for {:s} ({:s})'.format(self.name, self.description)) print(' Horizontal: {:9.4g} + {:9.4g} = {:9.4g} ph/s'.format(h1, h2, h1+h2)) print(' position: {:.3f}'.format(self.h_position())) print(' Vertical: {:9.4g} + {:9.4g} = {:9.4g} ph/s'.format(v1, v2, v1+v2)) print(' position: {:.3f}'.format(self.v_position())) if verbosity>=2: print(' Average: {:9.4g} ph/s'.format(avg)) return avg #ionchamber = IonChamber_CMS(beam=beam) class Scintillator_CMS(Monitor): def __init__(self, name='bim4 scintillator', zposition=57, description="Scintillation detector (FMB Oxford C400) between S3 and KB tank in endstation hutch. Captures scattering off of a Kapton film at 45 degrees.", pv=None, beam=None, **args): super().__init__(name=name, zposition=zposition, description=description, pv=pv, **args) self.has_flux = True self.beam = beam # PVs import epics self.sec = epics.PV('XF:11BMB-BI{IM:4}:GET_PERIOD') # integration time in [sec] self.cts = epics.PV('XF:11BMB-BI{IM:4}:C1_1') # raw counts def state(self): return "in" def reading(self, verbosity=3): if self.sec.value == 0.0: print('Counting time set to zero. Check CSS settings for FMB Oxford C400.') return 0 else: sec = self.sec.value cts = self.cts.value cps = cts/sec if verbosity>=3: print('Reading for {:s} ({:s})'.format(self.name, self.description)) print(' Count time: {:9.4g} sec'.format(sec)) print(' Raw counts: {:9.4g} counts'.format(cts)) if verbosity>=2: print(' Count rate: {:9.4g} counts/sec'.format(cps)) return cps def cps_to_flux(self, cps): ### Ratio between estimated beam flux to raw scintillator counts # (see Olog entry on July 7, 2017) # For unslitted, unattenuated beam at 13.5 keV, # BIM4 yields 2.86E5 cts/sec for 1.85E11 ph/s at BIM3: # 1.85E11 / 2.86E5 = 647000 (ph/s)/(cts/sec). cps_to_flux_factor = 647000. flux = cps_to_flux_factor * cps return flux def flux(self, verbosity=3): if self.reading(verbosity=0) < 5e-10: return 0.0 flux = self.cps_to_flux(self.reading(verbosity=0)) if verbosity>=3: print('Flux for {:s} ({:s})'.format(self.name, self.description)) if verbosity>=2: print(' Beam flux: {:9.4g} ph/s'.format(flux)) return flux class DiamondDiode_CMS(Monitor): def __init__(self, name='bim5 diamonddiode', zposition=58.2, description="Diamond diode BPM (Dectris RIGI via FMB Oxford F460) between KB tank and sample chamber in endstation hutch. Needs to be insered into beam via IM:5.", pv=None, beam=None, **args): super().__init__(name=name, zposition=zposition, description=description, pv=pv, **args) self.has_flux = True self.beam = beam # PVs import epics self.i0 = epics.PV('XF:11BMB-BI{BPM:1}Cur:I0-I') # upper left self.i1 = epics.PV('XF:11BMB-BI{BPM:1}Cur:I1-I') # upper right self.i2 = epics.PV('XF:11BMB-BI{BPM:1}Cur:I2-I') # lower left self.i3 = epics.PV('XF:11BMB-BI{BPM:1}Cur:I3-I') # lower right def state(self): # TODO: fix this so it queries state of IM:5 return "in" def v_position(self): total = self.i0.value + self.i1.value + self.i2.value + self.i3.value if total>0: return (self.i0.value + self.i1.value - self.i2.value - self.i3.value)/(total) else: return 0 def h_position(self): total = self.i0.value + self.i1.value + self.i2.value + self.i3.value if total>0: return (self.i1.value + self.i3.value - self.i0.value - self.i2.value)/(total) else: return 0 def reading(self, verbosity=3): #total = self.i0.value + self.i1.value + self.i2.value + self.i3.value ## 07/12/2017 Total dark current with beam off is ~9.3e-10 A. dark_current = 9.3e-10 total = self.i0.value + self.i1.value + self.i2.value + self.i3.value - dark_current if verbosity>=3: print('Reading for {:s} ({:s})'.format(self.name, self.description)) print(' Horizontal:') print(' Right: {:9.4g} + {:9.4g} = {:9.4g} A'.format(self.i1.value, self.i3.value, self.i1.value+self.i3.value)) print(' Left: {:9.4g} + {:9.4g} = {:9.4g} A'.format(self.i0.value, self.i2.value, self.i0.value+self.i2.value)) print(' Position [-1(L) to 1(R), 0 at center]: {:.3f}'.format(self.h_position())) print(' Vertical:') print(' Top: {:9.4g} + {:9.4g} = {:9.4g} A'.format(self.i0.value, self.i1.value, self.i0.value+self.i1.value)) print(' Bottom: {:9.4g} + {:9.4g} = {:9.4g} A'.format(self.i2.value, self.i3.value, self.i2.value+self.i3.value)) print(' Position [-1(B) to 1(T), 0 at center]: {:.3f}'.format(self.v_position())) if verbosity>=2: print(' Total current: {:9.4g} A'.format(total)) return total def current_to_flux(self, current): ### Ratio between estimated beam flux to raw TOTAL current for the 4 quadrants # (see Olog entry on July 7, 2017). # For unslitted, unattenuated beam at 13.5 keV, # BIM5 yields a TOTAL current of 4.8E-8 A at ~230 mA ring current, # corresponding to 1.38E11 ph/s at BIM3: # 1.38E11 / 4.8E-8 = 0.29E19 (ph/s)/A. # With dark current (total = 9.3e-10 A = 0.093e-8 A) taken into account, # 1.38E11 / 4.7E-8 = 0.294E19 (ph/s)/A. current_to_flux_factor = 2.94E18 flux = current_to_flux_factor * current return flux def flux(self, verbosity=3): if self.reading(verbosity=0) < 1e-11: return 0.0 right = self.current_to_flux(self.i1.value+self.i3.value) left = self.current_to_flux(self.i0.value+self.i2.value) top = self.current_to_flux(self.i0.value+self.i1.value) bottom = self.current_to_flux(self.i2.value+self.i3.value) total = self.current_to_flux(self.reading(verbosity=0)) if verbosity>=3: print('Flux for {:s} ({:s})'.format(self.name, self.description)) print(' Horizontal:') print(' Right: {:9.4g} ph/s'.format(right)) print(' Left: {:9.4g} ph/s'.format(left)) print(' Position [-1(L) to 1(R), 0 at center]: {:.3f}'.format(self.h_position())) print(' Vertical:') print(' Top: {:9.4g} ph/s'.format(top)) print(' Bottom: {:9.4g} ph/s'.format(bottom)) print(' Position [-1(B) to 1(T), 0 at center]: {:.3f}'.format(self.v_position())) if verbosity>=2: print(' Total flux: {:9.4g} ph/s'.format(total)) return total # CMSBeam ################################################################################ class CMSBeam(object): """ This class represents the 'beam' at the beamline. This collects together aspects of querying or changing beam properties, including the energy (or wavelength), the beam intensity (or measuring flux), and so forth. """ def __init__(self): self.mono_bragg_pv = 'XF:11BMA-OP{Mono:DMM-Ax:Bragg}Mtr.RBV' # (planck constant * speed of light)/(electronic charge) self.hc_over_e = 1.23984197e-6 # m^3 kg s^-3 Amp^-1 = eV*m self.hc_over_e_keVpA = self.hc_over_e*1e7 # = 12.4 keV*Angstrom # DMM bilayer pitch in Angstroms, according to Rigaku metrology report self.dmm_dsp = 20.1 # Angstroms self.mono = BeamlineElement('monochromator', 26.5) def transmission(verbosity=0): return 1e-7 self.mono.transmission = transmission self.attenuator = BeamlineElement('attenuator', 53.8, description="Attenuator/filter box") self.attenuator.has_flux = False def reading(verbosity=0): return self.transmission(verbosity=verbosity) self.attenuator.reading = reading self.attenuator.transmission = self.transmission if False: self.fs1 = DiagnosticScreen( 'fs1', 27.2, pv='XF:11BMA-BI{FS:1}', epics_signal=StandardProsilica('XF:11BMA-BI{FS:1-Cam:1}', name='fs1') ) #self.fs2 = DiagnosticScreen( 'fs2', 29.1, pv='XF:11BMA-BI{FS:2}', epics_signal=StandardProsilica('XF:11BMA-BI{FS:2-Cam:1}', name='fs2') ) self.fs3 = DiagnosticScreen( 'fs3', 55.8, pv='XF:11BMB-BI{FS:3}', epics_signal=StandardProsilica('XF:11BMB-BI{FS:3-Cam:1}', name='fs3') ) self.fs4 = DiagnosticScreen( 'fs4', 58.2, pv='XF:11BMB-BI{FS:4}', epics_signal=StandardProsilica('XF:11BMB-BI{FS:4-Cam:1}', name='fs4') ) self.fs5 = DiagnosticScreen( 'fs5', 70.0, pv='XF:11BMB-BI{FS:Test-Cam:1}', epics_signal=StandardProsilica('XF:11BMB-BI{FS:4-Cam:1}', name='fs5') ) else: # Rely on the fact that these are defined in 20-area-detectors.py self.fs1 = DiagnosticScreen( 'fs1', 27.2, pv='XF:11BMA-BI{FS:1}', epics_signal=fs1 ) #self.fs2 = DiagnosticScreen( 'fs2', 29.1, pv='XF:11BMA-BI{FS:2}', epics_signal=fs2 ) self.fs3 = DiagnosticScreen( 'fs3', 55.8, pv='XF:11BMB-BI{FS:3}', epics_signal=fs3 ) self.fs4 = DiagnosticScreen( 'fs4', 58.2, pv='XF:11BMB-BI{FS:4}', epics_signal=fs4 ) self.fs5 = DiagnosticScreen( 'fs5', 70.0, pv='XF:11BMB-BI{FS:Test-Cam:1}', epics_signal=fs5 ) self.bim3 = IonChamber_CMS(beam=self) self.bim4 = Scintillator_CMS() self.beam_defining_slit = s4 self.bim5 = DiamondDiode_CMS() self.bim6 = PointDiode_CMS() self.GVdsbig = GateValve('GV ds big', 60.0, pv='XF:11BMB-VA{Chm:Det-GV:1}') self.elements = [] # Front End self.elements.append(ThreePoleWiggler()) #SR:C03-EPS{PLC:1}Sts:BM_BMPS_Opn-Sts BMPS self.elements.append(GateValve('GV1', 20.0, pv='FE:C03A-VA{GV:1}DB:')) self.elements.append(GateValve('GV2', 21.0, pv='FE:C03A-VA{GV:2}DB:')) # FOE self.elements.append(Shutter('FE shutter', 25.0, pv='XF:11BM-PPS{Sh:FE}')) self.elements.append(GateValve('GV', 26.0, pv='FE:C11B-VA{GV:2}')) self.elements.append(self.mono) self.elements.append(self.fs1) # bim1 # slit0 # bim2 self.elements.append(GateValve('GV', 28.0, pv='XF:11BMA-VA{Slt:0-GV:1}')) self.elements.append(BeamlineElement('mirror', 29.1)) self.elements.append(GateValve('GV', 30.5, pv='XF:11BMA-VA{Mir:Tor-GV:1}')) self.elements.append(BeamlineElement('fs2 (manual)', 30.9)) # self.elements.append(self.fs2) self.elements.append(Shutter('photon shutter', 33.7, pv='XF:11BMA-PPS{PSh}')) self.elements.append(GateValve('GV', 34.0, pv='XF:11BMA-VA{PSh:1-GV:1}')) # Endstation self.elements.append(self.bim3) # Experimental shutter 49.5 self.elements.append(self.attenuator) self.elements.append(self.fs3) self.elements.append(self.bim4) # scintillation detector self.elements.append(BeamlineElement('KB mirrors', 57.8)) self.elements.append(self.fs4) self.elements.append(self.bim5) # diamond diode BPM # im4 #self.elements.append(GateValve('GV us small', 58.5, pv='XF:11BMB-VA{Slt:4-GV:1}')) self.elements.append(BeamlineElement('sample', 58.8)) self.elements.append(self.bim6) # dsmon self.elements.append(BeamlineElement('WAXS detector', 59.0)) self.elements.append(self.GVdsbig) self.elements.append(BeamlineElement('SAXS detector', 58.8+5)) # Sort by position along the beam self.elements.sort(key=lambda o: o.zposition, reverse=False) # Monochromator ######################################## def energy(self, verbosity=3): """ Returns the current x-ray photon energy (in keV). """ # Current angle of monochromator multilayer crystal Bragg_deg = caget(self.mono_bragg_pv) Bragg_rad = np.radians(Bragg_deg) wavelength_A = 2.*self.dmm_dsp*np.sin(Bragg_rad) wavelength_m = wavelength_A*1e-10 energy_eV = self.hc_over_e/wavelength_m energy_keV = energy_eV/1000. if verbosity>=3: print('E = {:.2f} keV, wavelength = {:.4f} Å, Bragg = {:.6f} rad = {:.4f} deg'.format(energy_keV, wavelength_A, Bragg_rad, Bragg_deg)) elif verbosity>=1: print('E = {:.3f} keV'.format(energy_keV)) return energy_keV def wavelength(self, verbosity=3): """ Returns the current x-ray photon wavelength (in Angstroms). """ # Current angle of monochromator multilayer crystal Bragg_deg = caget(self.mono_bragg_pv) Bragg_rad = np.radians(Bragg_deg) wavelength_A = 2.*self.dmm_dsp*np.sin(Bragg_rad) wavelength_m = wavelength_A*1e-10 # (planck constant * speed of light)/(electronic charge) energy_eV = self.hc_over_e/wavelength_m energy_keV = energy_eV/1000. if verbosity>=3: print('wavelength = {:.4f} Å, E = {:.2f} keV, Bragg = {:.6f} rad = {:.4f} deg'.format(wavelength_A, energy_keV, Bragg_rad, Bragg_deg)) elif verbosity>=1: print('wavelength = {:.5f} Å'.format(wavelength_A)) return wavelength_A def setEnergy(self, energy_keV, verbosity=3): """ Set the x-ray beam to the specified energy (by changing the monochromator angle. """ energy_eV = energy_keV*1000. wavelength_m = self.hc_over_e/energy_eV wavelength_A = wavelength_m*1.e10 self.setWavelength(wavelength_A, verbosity=verbosity) return self.energy(verbosity=0) def setWavelength(self, wavelength_A, verbosity=3): """ Set the x-ray beam to the specified wavelength (by changing the monochromator angle. """ Bragg_deg_initial = caget(self.mono_bragg_pv) wavelength_m = wavelength_A*1.e-10 Bragg_rad = np.arcsin(wavelength_A/(2.*self.dmm_dsp)) Bragg_deg = np.degrees(Bragg_rad) print('mono_bragg will move to {:.4f}g deg'.format(Bragg_deg)) response = input(' Are you sure? (y/[n]) ') if response is 'y' or response is 'Y': #mov(mono_bragg, Bragg_deg) mono_bragg.move = Bragg_deg if verbosity>=1: print('mono_bragg moved from {:.4f} deg to {:.4f} deg'.format(Bragg_deg_initial, Bragg_deg)) elif verbosity>=1: print('No move was made.') return self.wavelength(verbosity=verbosity) # Slits ######################################## def size(self, verbosity=3): """ Returns the current beam size (rough estimate). The return is (size_horizontal, size_vertical) (in mm). """ size_h = self.beam_defining_slit.xg.user_readback.value size_v = self.beam_defining_slit.yg.user_readback.value if verbosity>=3: print('Beam size:') print(' horizontal = {:.3f} mm'.format(size_h)) print(' vertical = {:.3f} mm'.format(size_v)) return size_h, size_v def setSize(self, horizontal, vertical, verbosity=3): """ Sets the beam size. """ h, v = self.size(verbosity=0) if verbosity>=3: print('Changing horizontal beam size from {:.3f} mm to {:.3f} mm'.format(h, horizontal)) self.beam_defining_slit.xg.user_setpoint.value = horizontal if verbosity>=3: print('Changing vertical beam size from {:.3f} mm to {:.3f} mm'.format(v, vertical)) self.beam_defining_slit.yg.user_setpoint.value = vertical def divergence(self, verbosity=3): """ Returns the beamline divergence. This is based on the Front End (FE) slits. The return is (horizontal, vertical) (in mrad). """ distance_m = 10.0 # distance from source to slits horizontal_mm = caget('FE:C11B-OP{Slt:12-Ax:X}t2.C') vertical_mm = caget('FE:C11B-OP{Slt:12-Ax:Y}t2.C') horizontal_mrad = horizontal_mm/distance_m vertical_mrad = vertical_mm/distance_m if verbosity>=3: print('Beam divergence:') print(' horizontal = {:.3f} mrad'.format(horizontal_mrad)) print(' vertical = {:.3f} mrad'.format(vertical_mrad)) return horizontal_mrad, vertical_mrad def setDivergence(self, horizontal, vertical, verbosity=3): """ Set beamline divergence (in mrad). This is controlled using the Front End (FE) slits. """ h, v = self.divergence(verbosity=0) distance_m = 10.0 # distance from source to slits horizontal_mm = horizontal*distance_m vertical_mm = vertical*distance_m if horizontal<0: if verbosity>=1: print("Horizontal divergence less than zero ({}) doesn't make sense.".format(horizontal)) elif horizontal>1.5: if verbosity>=1: print("Horizontal divergence should be less than 1.5 mrad.") else: if verbosity>=3: print('Changing horizontal divergence from {:.3f} mrad to {:.3f} mrad.'.format(h, horizontal)) caput('FE:C11B-OP{Slt:12-Ax:X}size', horizontal_mm) if vertical<0: if verbosity>=1: print("Vertical divergence less than zero ({}) doesn't make sense.".format(vertical)) elif vertical>0.15: if verbosity>=1: print("Vertical divergence should be less than 0.15 mrad.") else: if verbosity>=3: print('Changing vertical divergence from {:.3f} mrad to {:.3f} mrad.'.format(v, vertical)) caput('FE:C11B-OP{Slt:12-Ax:Y}size', vertical_mm) # Experimental Shutter ######################################## def is_on(self, verbosity=3): '''Returns true if the beam is on (experimental shutter open).''' blade1 = caget('XF:11BMB-OP{PSh:2}Pos:1-Sts') blade2 = caget('XF:11BMB-OP{PSh:2}Pos:2-Sts') if blade1==1 and blade2==1: if verbosity>=4: print('Beam on (shutter open).') return True else: if verbosity>=4: print('Beam off (shutter closed).') return False def on(self, verbosity=3, wait_time=0.1, poling_period=0.10, retry_time=2.0, max_retries=5): '''Turn on the beam (open experimental shutter). update: 090517, RL: change the wait_time from 0.005 to 0.1, change sleep to time.sleep''' if self.is_on(verbosity=0): if verbosity>=4: print('Beam on (shutter already open.)') else: itry = 0 while (not self.is_on(verbosity=0)) and itry<max_retries: # Trigger the shutter to toggle state caput('XF:11BMB-CT{MC:06}Asyn.AOUT','M112=1') time.sleep(wait_time) caput('XF:11BMB-CT{MC:06}Asyn.AOUT','M111=1') time.sleep(wait_time) caput('XF:11BMB-CT{MC:06}Asyn.AOUT','M112=0') time.sleep(wait_time) caput('XF:11BMB-CT{MC:06}Asyn.AOUT','M111=1') time.sleep(wait_time) # Give the system a chance to update start_time = time.time() while (not self.is_on(verbosity=0)) and (time.time()-start_time)<retry_time: if verbosity>=5: print(' try {:d}, t = {:02.2f} s, state = {:s}'.format(itry+1, (time.time()-start_time), 'OPEN_____' if self.is_on(verbosity=0) else 'CLOSE====')) time.sleep(poling_period) itry += 1 if verbosity>=4: if self.is_on(verbosity=0): print('Beam on (shutter opened).') else: print("Beam off (shutter didn't open).") def off(self, verbosity=3, wait_time=0.1, poling_period=0.10, retry_time=2.0, max_retries=5): '''Turn off the beam (close experimental shutter). update: 090517, RL: change the wait_time from 0.005 to 0.1, change sleep to time.sleep''' if self.is_on(verbosity=0): itry = 0 while self.is_on(verbosity=0) and itry<max_retries: # Trigger the shutter to toggle state caput('XF:11BMB-CT{MC:06}Asyn.AOUT','M112=1') time.sleep(wait_time) caput('XF:11BMB-CT{MC:06}Asyn.AOUT','M111=1') time.sleep(wait_time) caput('XF:11BMB-CT{MC:06}Asyn.AOUT','M112=0') time.sleep(wait_time) caput('XF:11BMB-CT{MC:06}Asyn.AOUT','M111=1') time.sleep(wait_time) # Give the system a chance to update start_time = time.time() while self.is_on(verbosity=0) and (time.time()-start_time)<retry_time: if verbosity>=5: print(' try {:d}, t = {:02.2f} s, state = {:s}'.format(itry+1, (time.time()-start_time), 'OPEN_____' if self.is_on(verbosity=0) else 'CLOSE====')) time.sleep(poling_period) itry += 1 if verbosity>=4: if self.is_on(verbosity=0): print("Beam on (shutter didn't close).") else: print('Beam off (shutter closed).') else: if verbosity>=4: print('Beam off (shutter already closed).') def blade1_is_on(self, verbosity=3): '''Returns true if the beam is on (experimental shutter open).''' blade1 = caget('XF:11BMB-OP{PSh:2}Pos:1-Sts') if blade1==1: if verbosity>=4: print('Beam on (shutter open).') return True else: if verbosity>=4: print('Beam off (shutter closed).') return False def blade2_is_on(self, verbosity=3): '''Returns true if the beam is on (experimental shutter open).''' blade2 = caget('XF:11BMB-OP{PSh:2}Pos:2-Sts') if blade2==1: if verbosity>=4: print('Beam on (shutter open).') return True else: if verbosity>=4: print('Beam off (shutter closed).') return False def _test_on(self, verbosity=3, wait_time=0.1, poling_period=0.10, retry_time=2.0, max_retries=5): '''Turn on the beam (open experimental shutter).''' #print('1') #print(sam.clock()) if self.is_on(verbosity=0): if verbosity>=4: print('Beam on (shutter already open.)') else: itry = 0 while (not self.blade1_is_on(verbosity=0)) and itry<max_retries: # Trigger the shutter to toggle state caput('XF:11BMB-CT{MC:06}Asyn.AOUT','M112=1') time.sleep(wait_time) caput('XF:11BMB-CT{MC:06}Asyn.AOUT','M111=1') time.sleep(wait_time) caput('XF:11BMB-CT{MC:06}Asyn.AOUT','M112=0') time.sleep(wait_time) caput('XF:11BMB-CT{MC:06}Asyn.AOUT','M111=1') time.sleep(wait_time) # Give the system a chance to update start_time = time.time() #print('2') #print(sam.clock()) while (not self.blade1_is_on(verbosity=0)) and (time.time()-start_time)<retry_time: if verbosity>=5: print(' try {:d}, t = {:02.2f} s, state = {:s}'.format(itry+1, (time.time()-start_time), 'OPEN_____' if self.is_on(verbosity=0) else 'CLOSE====')) sleep(poling_period) #print('3') #print(sam.clock()) itry += 1 if verbosity>=4: if self.blade1_is_on(verbosity=0): print('Beam on (shutter opened).') else: print("Beam off (shutter didn't open).") #print('4') #print(sam.clock()) def _test_off(self, verbosity=3, wait_time=0.1, poling_period=0.10, retry_time=2.0, max_retries=5): '''Turn off the beam (close experimental shutter).''' #print('1') #print(sam.clock()) if self.is_on(verbosity=0): itry = 0 while self.is_on(verbosity=0) and itry<max_retries: # Trigger the shutter to toggle state caput('XF:11BMB-CT{MC:06}Asyn.AOUT','M112=1') time.sleep(wait_time) caput('XF:11BMB-CT{MC:06}Asyn.AOUT','M111=1') time.sleep(wait_time) caput('XF:11BMB-CT{MC:06}Asyn.AOUT','M112=0') time.sleep(wait_time) caput('XF:11BMB-CT{MC:06}Asyn.AOUT','M111=1') time.sleep(wait_time) # Give the system a chance to update start_time = time.time() #print('2') #print(sam.clock()) while self.is_on(verbosity=0) and (time.time()-start_time)<retry_time: if verbosity>=5: print(' try {:d}, t = {:02.2f} s, state = {:s}'.format(itry+1, (time.time()-start_time), 'OPEN_____' if self.is_on(verbosity=0) else 'CLOSE====')) time.sleep(poling_period) #print('3') #print(sam.clock()) itry += 1 if verbosity>=4: if self.blade1_is_on(verbosity=0): print("Beam on (shutter didn't close).") else: print('Beam off (shutter closed).') #print('4') #print(sam.clock()) else: if verbosity>=4: print('Beam off (shutter already closed).') #print('5') #print(sam.clock()) # Attenuator/Filter Box ######################################## def transmission(self, verbosity=3): """ Returns the current beam transmission through the attenuator/filter box. To change the transmission, use 'setTransmission'. """ energy_keV = self.energy(verbosity=0) if energy_keV < 6.0 or energy_keV > 18.0: print('Transmission data not available at the current X-ray energy ({.2f} keV).'.format(energy_keV)) else: # The states of the foils in the filter box N = [ caget('XF:11BMB-OP{{Fltr:{:d}}}Pos-Sts'.format(ifoil)) for ifoil in range(1, 8+1) ] tr_tot = self.calc_transmission_filters(N, verbosity=verbosity) return tr_tot def calc_transmission_filters(self, filter_settings, energy_keV=None, verbosity=3): """ Returns the computed transmission value for the given configuration of foils. Note that the foils are not actually moved. This is just a calculation. Parameters ---------- filter_settings : array of length 8 Each element must be either a zero (foil removed) or a 1 (foil blocking beam) energy_keV : float If 'None', the current energy is used. If specified, the calculation is performed for the requested energy. Returns ------- transmission : float The computed transmission value of the x-ray beam through the filter box. """ if energy_keV is None: energy_keV = self.energy(verbosity=0) if len(filter_settings) != 8: print('States for all eight foils must be specified.') else: N = filter_settings E = energy_keV E2 = np.square(E) E3 = np.power(E, 3) # foil thickness blocking the beam N_Al = N[0] + 2*N[1] + 4*N[2] + 8*N[3] N_Nb = N[4] + 2*N[5] + 4*N[6] + 8*N[7] d_Nb = 0.1 # Thickness [mm] of one Nb foil d_Al = 0.25 # Thickness [mm] of one Al foil # Absorption length [mm] based on fits to LBL CXRO data for 6 < E < 19 keV l_Nb = 1.4476e-3 - 5.6011e-4 * E + 1.0401e-4 * E2 + 8.7961e-6 * E3 l_Al = 5.2293e-3 - 1.3491e-3 * E + 1.7833e-4 * E2 + 1.4001e-4 * E3 # transmission factors tr_Nb = np.exp(-N_Nb*d_Nb/l_Nb) tr_Al = np.exp(-N_Al*d_Al/l_Al) tr_tot = tr_Nb*tr_Al if verbosity>=5: print(' state: {} T = {:.6g}'.format(filter_settings, tr_tot)) if verbosity>=4: print('{:d} × 0.25 mm Al ({:.4g}) and {:d} × 0.10 mm Nb ({:.4g})'.format(N_Al, tr_Al, N_Nb, tr_Nb) ) if verbosity>=1: print('transmission = {:.6g}'.format(tr_tot)) return tr_tot def set_attenuation_filters(self, filter_settings, verbosity=3): """ Sets the positions (in/out) for each of the foils in the attenuator/ filter box. The input 'filter_settings' should be an array of length 8, where each element is either a zero (foil removed) or a 1 (foil blocking beam). """ if verbosity>=4: print('Filters:') # The states of the foils in the filter box filters_initial = [ caget('XF:11BMB-OP{{Fltr:{:d}}}Pos-Sts'.format(ifoil)) for ifoil in range(1, 8+1) ] print(' initial: {} T = {:.6g}'.format(filters_initial, self.calc_transmission_filters(filters_initial, verbosity=0))) print(' requested: {} T = {:.6g}'.format(filter_settings, self.calc_transmission_filters(filter_settings, verbosity=0))) if len(filter_settings) != 8: print('States for all eight foils must be specified.') else: for i, state in enumerate(filter_settings): ifoil = i+1 if state==1: # Put foil #ifoil into the beam caput( 'XF:11BMB-OP{{Fltr:{:d}}}Cmd:In-Cmd'.format(ifoil) , 1 ) elif state==0: # Remove foil #ifoil caput( 'XF:11BMB-OP{{Fltr:{:d}}}Cmd:Out-Cmd'.format(ifoil) , 1 ) else: if verbosity>=3: state_actual = caget( 'XF:11BMB-OP{{Fltr:{:d}}}Pos-Sts'.format(ifoil) ) state_actual_str = 'IN' if state_actual is 1 else 'OUT' print('WARNING: Filter state {} not recognized. Filter {:d} is {:s}.'.format(state, ifoil, state_actual_str)) time.sleep(1.) # Wait for filter box to settle if verbosity>=4: filters_final = [ caget('XF:11BMB-OP{{Fltr:{:d}}}Pos-Sts'.format(ifoil)) for ifoil in range(1, 8+1) ] print(' final: {} T = {:.6g}'.format(filters_final, self.calc_transmission_filters(filters_final, verbosity=0))) def setTransmission(self, transmission, retries=3, tolerance=0.5, verbosity=3): """ Sets the transmission through the attenuator/filter box. Because the filter box has a discrete set of foils, it is impossible to exactly match a given transmission value. A nearby value will be selected. """ energy_keV = self.energy(verbosity=0) if energy_keV < 6.0 or energy_keV > 18.0: print('Transmission data not available at the current X-ray energy ({.2f} keV).'.format(energy_keV)) elif transmission > 1.0: print('A transmission above 1.0 is not possible.') elif transmission < 1e-10: print('A transmission this low ({:g}) cannot be reliably achieved.'.format(transmission)) else: E = energy_keV E2 = np.square(E) E3 = np.power(E, 3) d_Nb = 0.1 # Thickness [mm] of one Nb foil d_Al = 0.25 # Thickness [mm] of one Al foil # Absorption length [mm] based on fits to LBL CXRO data for 6 < E < 19 keV l_Nb = 1.4476e-3 - 5.6011e-4 * E + 1.0401e-4 * E2 + 8.7961e-6 * E3 l_Al = 5.2293e-3 - 1.3491e-3 * E + 1.7833e-4 * E2 + 1.4001e-4 * E3 d_l_Nb = d_Nb/l_Nb d_l_Al = d_Al/l_Al # Number of foils to be inserted (equivalent to "XIA_attn.mac" from X9) #N_Nb = int(-log(transmission)/d_l_Nb) ##N_Al = int((-log(transmission) - N_Nb*d_l_Nb)/(d_l_Al-0.5)) #N_Al = int((-log(transmission) - N_Nb*d_l_Nb)/d_l_Al) # Number of foils to be inserted (picks a set that gives smallest deviation from requested transmission) dev = [] for i in np.arange(16): for j in np.arange(16): dev_ij = abs(transmission - exp(-i*d_l_Nb)*exp(-j*d_l_Al)) dev.append(dev_ij) if (dev_ij == min(dev)): N_Nb = i # number of Nb foils selected N_Al = j # number of Al foils selected N = [] state = N_Al for i in np.arange(4): N.append(state % 2) state = int(state/2) state = N_Nb for i in np.arange(4): N.append(state % 2) state = int(state/2) self.set_attenuation_filters(N, verbosity=verbosity) # Check that transmission was actually correctly changed if abs(self.transmission(verbosity=0)-transmission)/transmission > tolerance: if retries>0: #time.sleep(0.5) # Try again return self.setTransmission(transmission, retries=retries-1, tolerance=tolerance, verbosity=verbosity) else: print("WARNING: transmission didn't update correctly (request: {}; actual: {})".format(transmission, self.transmission(verbosity=0))) return self.transmission(verbosity=verbosity) # Flux estimates at various points along the beam ######################################## # TBD # Flux diagnostics ######################################## def fluxes(self, verbosity=3): """ Outputs a list of fluxes at various points along the beam. Also checks the state (in or out of the beam) of various components, to help identify if anything could be blocking the beam. """ if verbosity>=1: print('+--------+------------------+-----+-------------+-------------+-------------+') print('| pos | name |path | reading | flux (ph/s) | expected |') print('|--------|------------------|-----|-------------|-------------|-------------|') last_z = -100 beam = True flux_expected = None for element in self.elements: state = element.state() if state is 'block': beam = False if verbosity>=4: if element.zposition >= 0 and last_z < 0: print('| Front End | | | | |') if element.zposition > 25 and last_z < 25: print('| FOE | | | | |') if element.zposition > 50 and last_z < 50: print('| Endstation | | | | |') last_z = element.zposition flux_expected if verbosity>=1: if state is 'in': if beam: path = '(|)' else: path = '(-)' elif state is 'out': if beam: path = ' | ' else: path = '---' elif state is 'block': path = '[X]' beam = False elif state is 'undefined': if beam: path = '?|?' else: path = '?-?' else: path = '???' if flux_expected is None or not beam: flux_expected_str = '' else: flux_expected_str = '{:11.3g}'.format(flux_expected) flux_expected *= element.transmission(verbosity=0) if callable(getattr(element, 'reading', None)): reading_str = '{:11.3g}'.format(element.reading(verbosity=0)) state = element.state() if element.has_flux and (state=='in' or state=='block'): flux_cur = element.flux(verbosity=0) flux_expected = flux_cur flux_str = '{:11.3g}'.format(flux_cur) else: flux_str = '' else: reading_str = '' flux_str = '' print('|{:5.1f} m | {:16.16} | {:s} | {:11.11} | {:11.11} | {:11.11} |'.format(element.zposition, element.name, path, reading_str, flux_str, flux_expected_str)) #beam = True # For testing if verbosity>=1: print('+--------+------------------+-----+-------------+-------------+-------------+') # End class CMSBeam(object) ######################################## beam = CMSBeam() class Beamline(object): '''Generic class that encapsulates different aspects of the beamline. The intention for this object is to have methods that activate various 'standard' protocols or sequences of actions.''' def __init__(self, **kwargs): self.md = {} self.current_mode = 'undefined' def mode(self, new_mode): '''Tells the instrument to switch into the requested mode. This may involve moving detectors, moving the sample, enabling/disabling detectors, and so on.''' getattr(self, 'mode'+new_mode)() def get_md(self, prefix=None, **md): '''Returns a dictionary of the current metadata. The 'prefix' argument is prepended to all the md keys, which allows the metadata to be grouped with other metadata in a clear way. (Especially, to make it explicit that this metadata came from the beamline.)''' # Update internal md #self.md['key'] = value md_return = self.md.copy() # Add md that may change md_return['mode'] = self.current_mode # Include the user-specified metadata md_return.update(md) # Add an optional prefix if prefix is not None: md_return = { '{:s}{:s}'.format(prefix, key) : value for key, value in md_return.items() } return md_return def comment(self, text, logbooks=None, tags=None, append_md=True, **md): text += '\n\n[comment for beamline: {}]'.format(self.__class__.__name__) if append_md: # Global md md_current = { k : v for k, v in RE.md.items() } # Beamline md md_current.update(self.get_md()) # Specified md md_current.update(md) text += '\n\n\nMetadata\n----------------------------------------' for key, value in sorted(md_current.items()): text += '\n{}: {}'.format(key, value) logbook.log(text, logbooks=logbooks, tags=tags) def log_motors(self, motors, verbosity=3, **md): log_text = 'Motors\n----------------------------------------\nname | position | offset | direction |\n' for motor in motors: offset = float(caget(motor.prefix+'.OFF')) direction = int(caget(motor.prefix+'.DIR')) log_text += '{} | {} | {} | {} |\n'.format(motor.name, motor.user_readback.value, offset, direction) md_current = { k : v for k, v in RE.md.items() } md_current.update(md) log_text += '\nMetadata\n----------------------------------------\n' for k, v in sorted(md_current.items()): log_text += '{}: {}\n'.format(k, v) if verbosity>=3: print(log_text) self.comment(log_text) class CMS_Beamline(Beamline): '''This object collects together various standard protocols and sequences of action used on the CMS (11-BM) beamline at NSLS-II.''' def __init__(self, **kwargs): super().__init__(**kwargs) self.beam = beam #self.SAXS = CMS_SAXS_Detector(pilatus300) #self.WAXS = CMS_WAXS_Detector() self.SAXS = CMS_SAXS_Detector(pilatus_name) from epics import PV self._chamber_pressure_pv = PV('XF:11BMB-VA{Chm:Det-TCG:1}P-I') self.detector = [] self.PLOT_Y = [] self.TABLE_COLS = [] self.bsx_pos = -16.74 def modeAlignment_bim6(self, verbosity=3): self.current_mode = 'undefined' # TODO: Check what mode (TSAXS, GISAXS) and respond accordingly # TODO: Check if gate valves are open and flux is okay (warn user) self.beam.off() #self.beam.setTransmission(1e-4) self.beam.setTransmission(5e-4) #mov( [DETx, DETy], [0, 0] ) self.beam.bim6.insert() caput('XF:11BMB-BI{IM:2}EM180:Acquire', 1) # Turn on bim6 detselect(bim6, suffix='') self.current_mode = 'alignment' self.beam.bim6.reading() def modeMeasurement_bim6(self, verbosity=3): self.current_mode = 'undefined' self.beam.off() self.beam.setTransmission(1) #mov(DETy, -16) self.beam.bim6.retract() caput('XF:11BMB-BI{IM:2}EM180:Acquire', 0) # Turn off bim6 #detselect(pilatus300) detselect(pilatus_name) #if RE.state is not 'idle': # RE.abort() self.current_mode = 'measurement' # Check if gate valves are open if self.beam.GVdsbig.state() is not 'out' and verbosity>=1: print('Warning: Sample chamber gate valve (large, downstream) is not open.') def modeAlignment(self, verbosity=3): self.current_mode = 'undefined' # TODO: Check what mode (TSAXS, GISAXS) and respond accordingly # TODO: Check if gate valves are open and flux is okay (warn user) # TODO: Check list: change attenuator for different energy, change the bsx position with beamcenter accordingly self.beam.off() self.beam.setTransmission(1e-8) #1e-6 for 13.5kev, 1e-8 for 17kev while beam.transmission() > 3e-8: time.sleep(0.5) self.beam.setTransmission(1e-8) #mov(bsx, -10.95) bsx.move(self.bsx_pos+3) #detselect(pilatus300, suffix='_stats4_total') #caput('XF:11BMB-ES{Det:SAXS}:cam1:AcquireTime', 0.5) #caput('XF:11BMB-ES{Det:SAXS}:cam1:AcquirePeriod', 0.6) detselect(pilatus_name, suffix='_stats4_total') caput('XF:11BMB-ES{}:cam1:AcquireTime'.format(pilatus_Epicsname), 0.5) caput('XF:11BMB-ES{}:cam1:AcquirePeriod'.format(pilatus_Epicsname), 0.6) #caput('XF:11BMB-ES{Det:PIL2M}:cam1:AcquirePeriod', 0.6) #TODO: Update ROI based on current SAXSx, SAXSy and the md in cms object self.current_mode = 'alignment' #self.beam.bim6.reading() def modeMeasurement(self, verbosity=3): self.current_mode = 'undefined' self.beam.off() #mov(bsx, -15.95) bsx.move(self.bsx_pos) if abs(bsx.user_readback.value - self.bsx_pos)>0.1: print('WARNING: Beamstop did not return to correct position!') return self.beam.setTransmission(1) #detselect(pilatus300) #detselect([pilatus300, psccd]) detselect(pilatus_name) #if RE.state is not 'idle': # RE.abort() self.current_mode = 'measurement' # Check if gate valves are open if self.beam.GVdsbig.state() is not 'out' and verbosity>=1: print('Warning: Sample chamber gate valve (large, downstream) is not open.') def modeBeamstopAlignment(self, verbosity=3): '''Places bim6 (dsmon) as a temporary beamstop.''' DETy.move(-6.1) def beamstopCircular(self, verbosity=3): self.beam.setTransmission(1e-6) bsx.move(0) bsphi.move(-12.0) bsx.move(self.bsx_pos) bsy.move(-15.47) # TODO: Capture image and confirm that it's okay? if verbosity>=1: print("WARNING: This routine merely puts the beamstop in the ~approximately~ correct position. You must confirm that the beam is being blocked correctly.") self.beam.transmission(verbosity=verbosity) def beamstopLinear(self, verbosity=3): self.beam.setTransmission(1e-6) bsx.move(0) bsphi.move(-223.4) bsx.move(self.bsx_pos) bsy.move(17) # TODO: Capture image and confirm that it's okay? if verbosity>=1: print("WARNING: This routine merely puts the beamstop in the ~approximately~ correct position. You must confirm that the beam is being blocked correctly.") self.beam.transmission(verbosity=verbosity) def _actuate_open(self, pv, max_tries=5, wait_time=1.0, verbosity=2): tries = 1 if verbosity>=4: print(' Opening {} (try # {:d})'.format(pv, tries)) caput(pv+'Cmd:Opn-Cmd', 1) time.sleep(wait_time) while caget(pv+'Pos-Sts')!= 1 and tries<max_tries: tries += 1 if verbosity>=4: print(' Opening {} (try # {:d})'.format(pv, tries)) caput(pv+'Cmd:Opn-Cmd', 1) time.sleep(wait_time) if verbosity>=1 and caget(pv+'Pos-Sts')!= 1: print('ERROR, valve did not open ({})'.format(pv)) def _actuate_close(self, pv, max_tries=5, wait_time=1.0, verbosity=2): tries = 1 if verbosity>=4: print(' Closing {} (try # {:d})'.format(pv, tries)) caput(pv+'Cmd:Cls-Cmd', 1) time.sleep(wait_time) while caget(pv+'Pos-Sts')!= 0 and tries<max_tries: tries += 1 if verbosity>=4: print(' Closing {} (try # {:d})'.format(pv, tries)) caput(pv+'Cmd:Cls-Cmd', 1) time.sleep(wait_time) if verbosity>=1 and caget(pv+'Pos-Sts')!= 0: print('ERROR, valve did not close ({})'.format(pv)) def ventChamber(self, verbosity=3): #TODO: Remove the old (commented-out) caput lines # Close large gate valve (downstream side of sample chamber) #caput('XF:11BMB-VA{Chm:Det-GV:1}Cmd:Cls-Cmd',1) self._actuate_close('XF:11BMB-VA{Chm:Det-GV:1}', verbosity=verbosity) # Close small gate valve (upstream side of sample chamber) #caput('XF:11BMB-VA{Slt:4-GV:1}Cmd:Cls-Cmd',1) #self._actuate_close('XF:11BMB-VA{Slt:4-GV:1}', verbosity=verbosity) # Close valve connecting sample chamber to vacuum pump #caput('XF:11BMB-VA{Chm:Det-IV:1}Cmd:Cls-Cmd',1) self._actuate_close('XF:11BMB-VA{Chm:Det-IV:1}', verbosity=verbosity) # Soft-open the upstream vent-valve #caput('XF:11BMB-VA{Chm:Smpl-VV:1}Cmd:Cls-Cmd', 1) self._actuate_close('XF:11BMB-VA{Chm:Smpl-VV:1}', verbosity=verbosity) time.sleep(1.0) #caput('XF:11BMB-VA{Chm:Smpl-VV:1_Soft}Cmd:Opn-Cmd', 1) self._actuate_open('XF:11BMB-VA{Chm:Smpl-VV:1_Soft}', verbosity=verbosity) self.chamberPressure(range_high=100) # Fully open the upstream vent-vale #caput('XF:11BMB-VA{Chm:Smpl-VV:1_Soft}Cmd:Cls-Cmd', 1) self._actuate_close('XF:11BMB-VA{Chm:Smpl-VV:1_Soft}', verbosity=verbosity) time.sleep(1.0) #caput('XF:11BMB-VA{Chm:Smpl-VV:1}Cmd:Opn-Cmd', 1) self._actuate_open('XF:11BMB-VA{Chm:Smpl-VV:1}', verbosity=verbosity) # Fully open the downstream vent-vale #caput('XF:11BMB-VA{Chm:Det-VV:1_Soft}Cmd:Cls-Cmd', 1) self._actuate_close('XF:11BMB-VA{Chm:Det-VV:1_Soft}', verbosity=verbosity) time.sleep(1.0) #caput('XF:11BMB-VA{Chm:Det-VV:1}Cmd:Opn-Cmd', 1) self._actuate_open('XF:11BMB-VA{Chm:Det-VV:1}', verbosity=verbosity) self.chamberPressure(range_high=1000) if verbosity>=1: print('Sample chamber is ready to be opened.') def _old_ventChamber(self, verbosity=3): # TODO: deprecate and delete # Close large gate valve (downstream side of sample chamber) caput('XF:11BMB-VA{Chm:Det-GV:1}Cmd:Cls-Cmd',1) # Close small gate valve (upstream side of sample chamber) #caput('XF:11BMB-VA{Slt:4-GV:1}Cmd:Cls-Cmd',1) # Close valve connecting sample chamber to vacuum pump caput('XF:11BMB-VA{Chm:Det-IV:1}Cmd:Cls-Cmd',1) time.sleep(0.5) # Soft-open the upstream vent-valve caput('XF:11BMB-VA{Chm:Smpl-VV:1}Cmd:Cls-Cmd', 1) time.sleep(1.0) caput('XF:11BMB-VA{Chm:Smpl-VV:1_Soft}Cmd:Opn-Cmd', 1) self.chamberPressure(range_high=100) # Fully open the upstream vent-vale caput('XF:11BMB-VA{Chm:Smpl-VV:1_Soft}Cmd:Cls-Cmd', 1) time.sleep(1.0) caput('XF:11BMB-VA{Chm:Smpl-VV:1}Cmd:Opn-Cmd', 1) # Fully open the downstream vent-vale caput('XF:11BMB-VA{Chm:Det-VV:1_Soft}Cmd:Cls-Cmd', 1) time.sleep(1.0) caput('XF:11BMB-VA{Chm:Det-VV:1}Cmd:Opn-Cmd', 1) self.chamberPressure(range_high=1000) if verbosity>=1: print('Sample chamber is ready to be opened.') def chamberPressure(self, range_low=None, range_high=None, readout_period=1.0, verbosity=3): '''Monitors the pressure in the sample chamber, printing the current value. If range arguments are provided, the monitoring will end once the pressure is outside the range. ''' monitor = True while monitor: try: if range_low is not None and self._chamber_pressure_pv.get()<range_low: monitor = False if range_high is not None and self._chamber_pressure_pv.get()>range_high: monitor = False P_mbar = self._chamber_pressure_pv.get() P_atm = P_mbar*0.000986923 P_torr = P_mbar*0.750062 P_kPa = P_mbar*0.1 P_psi = 0.0145038 if verbosity>=4: print('Sample chamber pressure: {:8.2f} mbar = {:5.3f} atm = {:7.3f} torr = {:4.1g} kPa \r'.format(P_mbar, P_atm, P_torr, P_kPa), end='', flush=True) elif verbosity>=2: print('Sample chamber pressure: {:8.2f} mbar ({:5.3f} atm) \r'.format(P_mbar, P_atm), end='', flush=True) time.sleep(readout_period) except KeyboardInterrupt: monitor = False def pumpChamber(self, max_tries=8, verbosity=3): # Close vent-valves #caput('XF:11BMB-VA{Chm:Smpl-VV:1_Soft}Cmd:Cls-Cmd', 1) #caput('XF:11BMB-VA{Chm:Smpl-VV:1}Cmd:Cls-Cmd', 1) #caput('XF:11BMB-VA{Chm:Det-VV:1_Soft}Cmd:Cls-Cmd', 1) #caput('XF:11BMB-VA{Chm:Det-VV:1}Cmd:Cls-Cmd', 1) self._actuate_close('XF:11BMB-VA{Chm:Smpl-VV:1_Soft}', verbosity=verbosity) self._actuate_close('XF:11BMB-VA{Chm:Smpl-VV:1}', verbosity=verbosity) self._actuate_close('XF:11BMB-VA{Chm:Det-VV:1_Soft}', verbosity=verbosity) self._actuate_close('XF:11BMB-VA{Chm:Det-VV:1}', verbosity=verbosity) # Turn on pump (if necessary) tries = 1 while caget('XF:11BMB-VA{Chm:Det-Pmp:1}Sts:Enbl-Sts')==0 and tries<=max_tries: caput('XF:11BMB-VA{Chm:Det-Pmp:1}Cmd:Enbl-Cmd', 0) time.sleep(0.2) caput('XF:11BMB-VA{Chm:Det-Pmp:1}Cmd:Enbl-Cmd', 1) time.sleep(2.0) tries += 1 # Soft-open valve to pump #caput('XF:11BMB-VA{Chm:Det-IV:1}Cmd:Cls-Cmd', 1) self._actuate_close('XF:11BMB-VA{Chm:Det-IV:1}', verbosity=verbosity) time.sleep(0.5) #caput('XF:11BMB-VA{Chm:Det-IV:1_Soft}Cmd:Opn-Cmd', 1) self._actuate_open('XF:11BMB-VA{Chm:Det-IV:1_Soft}', verbosity=verbosity) time.sleep(5.0) # Check pump again tries = 1 while caget('XF:11BMB-VA{Chm:Det-Pmp:1}Sts:Enbl-Sts')==0 and tries<=max_tries: caput('XF:11BMB-VA{Chm:Det-Pmp:1}Cmd:Enbl-Cmd', 0) time.sleep(0.2) caput('XF:11BMB-VA{Chm:Det-Pmp:1}Cmd:Enbl-Cmd', 1) time.sleep(2.0) tries += 1 self.chamberPressure(range_low=500) # Fully open valve to pump #caput('XF:11BMB-VA{Chm:Det-IV:1_Soft}Cmd:Cls-Cmd', 1) self._actuate_close('XF:11BMB-VA{Chm:Det-IV:1_Soft}', verbosity=verbosity) time.sleep(0.5) #caput('XF:11BMB-VA{Chm:Det-IV:1}Cmd:Opn-Cmd', 1) self._actuate_open('XF:11BMB-VA{Chm:Det-IV:1}', verbosity=verbosity) self.chamberPressure(range_low=200) def _old_pumpChamber(self, readout_delay=0.2): # TODO: deprecate and delete # Close vent-valves caput('XF:11BMB-VA{Chm:Smpl-VV:1_Soft}Cmd:Cls-Cmd', 1) time.sleep(0.5) caput('XF:11BMB-VA{Chm:Smpl-VV:1}Cmd:Cls-Cmd', 1) time.sleep(0.5) caput('XF:11BMB-VA{Chm:Det-VV:1_Soft}Cmd:Cls-Cmd', 1) time.sleep(0.5) caput('XF:11BMB-VA{Chm:Det-VV:1}Cmd:Cls-Cmd', 1) time.sleep(0.2) # Turn on pump (if necessary) if caget('XF:11BMB-VA{Chm:Det-Pmp:1}Sts:Enbl-Sts')==0: caput('XF:11BMB-VA{Chm:Det-Pmp:1}Cmd:Enbl-Cmd', 0) time.sleep(0.2) caput('XF:11BMB-VA{Chm:Det-Pmp:1}Cmd:Enbl-Cmd', 1) # Soft-open valve to pump caput('XF:11BMB-VA{Chm:Det-IV:1}Cmd:Cls-Cmd', 1) time.sleep(1.0) caput('XF:11BMB-VA{Chm:Det-IV:1_Soft}Cmd:Opn-Cmd', 1) time.sleep(0.2) sleep(5.0) # Check pump again if caget('XF:11BMB-VA{Chm:Det-Pmp:1}Sts:Enbl-Sts')==0: caput('XF:11BMB-VA{Chm:Det-Pmp:1}Cmd:Enbl-Cmd', 0) time.sleep(0.2) caput('XF:11BMB-VA{Chm:Det-Pmp:1}Cmd:Enbl-Cmd', 1) self.chamberPressure(range_low=500) # Fully open valve to pump caput('XF:11BMB-VA{Chm:Det-IV:1_Soft}Cmd:Cls-Cmd', 1) time.sleep(1.0) caput('XF:11BMB-VA{Chm:Det-IV:1}Cmd:Opn-Cmd', 1) time.sleep(0.2) self.chamberPressure(range_low=200) def openChamberGateValve(self): caput('XF:11BMB-VA{Chm:Det-GV:1}Cmd:Opn-Cmd', 1) # Large (downstream) #caput('XF:11BMB-VA{Slt:4-GV:1}Cmd:Opn-Cmd',1) # Small (upstream) def closeChamberGateValve(self): caput('XF:11BMB-VA{Chm:Det-GV:1}Cmd:Cls-Cmd', 1) # Large (downstream) #caput('XF:11BMB-VA{Slt:4-GV:1}Cmd:Cls-Cmd',1) # Small (upstream) # Metatdata methods ######################################## def get_md(self, prefix=None, **md): md_current = self.md.copy() md_current['calibration_energy_keV'] = round(self.beam.energy(verbosity=0), 3) md_current['calibration_wavelength_A'] = round(self.beam.wavelength(verbosity=0), 5) h, v = self.beam.size(verbosity=0) md_current['beam_size_x_mm'] = h md_current['beam_size_y_mm'] = v #temperarily block it for bad communication. 17:30, 071617 h, v = self.beam.divergence(verbosity=0) md_current['beam_divergence_x_mrad'] = h md_current['beam_divergence_y_mrad'] = v md_current['beamline_mode'] = self.current_mode #md_current['detector'] = self.detector md_current['motor_SAXSx'] = SAXSx.user_readback.value md_current['motor_SAXSy'] = SAXSy.user_readback.value md_current['motor_DETx'] = DETx.user_readback.value md_current['motor_DETy'] = DETy.user_readback.value md_current['motor_WAXSx'] = WAXSx.user_readback.value md_current['motor_smx'] = smx.user_readback.value md_current['motor_smy'] = smy.user_readback.value md_current['motor_sth'] = sth.user_readback.value md_current['motor_bsx'] = bsx.user_readback.value md_current['motor_bsy'] = bsy.user_readback.value md_current['motor_bsphi'] = bsphi.user_readback.value md_current.update(self.SAXS.get_md(prefix='detector_SAXS_')) md_current.update(md) # Add an optional prefix if prefix is not None: md_current = { '{:s}{:s}'.format(prefix, key) : value for key, value in md_current.items() } return md_current def setMetadata(self, verbosity=3): '''Guides the user through setting some of the required and recommended meta-data fields.''' if verbosity>=3: print('This will guide you through adding some meta-data for the upcoming experiment.') if verbosity>=4: print('You can accept default values (shown in square [] brackets) by pressing enter. You can leave a value blank (or put a space) to skip that entry.') # Set some values automatically month = int(time.strftime('%m')) if month<=4: cycle = 1 elif month<=8: cycle = 2 else: cycle = 3 RE.md['experiment_cycle'] = '{:s}_{:d}'.format( time.strftime('%Y'), cycle ) RE.md['calibration_energy_keV'] = round(self.beam.energy(verbosity=0), 3) RE.md['calibration_wavelength_A'] = round(self.beam.wavelength(verbosity=0), 5) # TODO: # RE.md['calibration_detector_distance_m'] = # RE.md['calibration_detector_x0'] = # RE.md['calibration_detector_y0'] = # Ask the user some questions questions = [ ['experiment_proposal_number', 'Proposal number'] , ['experiment_SAF_number', 'SAF number'] , ['experiment_group', 'User group (e.g. PI)'] , ['experiment_user', 'The specific user/person running the experiment'] , ['experiment_project', 'Project name/code'] , ['experiment_alias_directory', 'Alias directory'] , ['experiment_type', 'Type of experiments/measurements (SAXS, GIWAXS, etc.)'] , ] # TBD: # Path where data will be stored? self._dialog_total_questions = len(questions) self._dialog_question_number = 1 for key, text in questions: try: self._ask_question(key, text) except KeyboardInterrupt: return if verbosity>=4: print('You can also add/edit metadata directly using the RE.md object.') def _ask_question(self, key, text, default=None): if default is None and key in RE.md: default = RE.md[key] if default is None: ret = input(' Q{:d}/{:d}. {:s}: '.format(self._dialog_question_number, self._dialog_total_questions, text) ) else: ret = input(' Q{:d}/{:d}. {:s} [{}]: '.format(self._dialog_question_number, self._dialog_total_questions, text, default) ) if ret=='': ret = default if ret!='' and ret!=' ': RE.md[key] = ret self._dialog_question_number += 1 # Logging methods ######################################## def logAllMotors(self, verbosity=3, **md): log_pos() motor_list = [ mono_bragg , mono_pitch2 , mono_roll2 , mono_perp2 , mir_usx , mir_dsx , mir_usy , mir_dsyi , mir_dsyo , mir_bend , s0.tp , s0.bt , s0.ob , s0.ib , s1.xc , s1.xg , s1.yc , s1.yg , s2.xc , s2.xg , s2.yc , s2.yg , s3.xc , s3.xg , s3.yc , s3.yg , s4.xc , s4.xg , s4.yc , s4.yg , s5.xc , s5.xg , s5.yc , s5.yg , bim3y , fs3y , bim4y , bim5y , smx , smy , sth , schi , sphi , srot , strans , camx , camy , cam2x , cam2z , DETx , DETy , WAXSx , SAXSx , SAXSy , bsx , bsy , bsphi , armz , armx , armphi , army , armr , ] self.log_motors(motor_list, verbosity=verbosity, **md) # End class CMS_Beamline(Beamline) ######################################## class CMS_Beamline_GISAXS(CMS_Beamline): def modeAlignment(self, verbosity=3): if RE.state!='idle': RE.abort() self.current_mode = 'undefined' # TODO: Check what mode (TSAXS, GISAXS) and respond accordingly # TODO: Check if gate valves are open and flux is okay (warn user) self.beam.off() self.beam.setTransmission(1e-6) while beam.transmission() > 2e-6: time.sleep(0.5) self.beam.setTransmission(1e-6) #mov(bsx, -11.55) #mov(bsx, -11.55+2) # changed at 06/02/17, Osuji beam time #mov(bsx, -14.73+2) # changed at 06/04/17, SAXS, 3m, Osuji beam time #mov(bsx, -15.23+2) # changed at 06/04/17, GISAXS, 3m, Osuji beam time #mov(bsx, -17.03+3) # changed at 06/04/17, GISAXS, 3m, Osuji beam time #mov(bsx, -16.0+3) #change it at 07/10/17, GISAXS, 2m, LSita Beam time #mov(bsx, -16.53+3) # 07/20/17, GISAXS, 5m, CRoss #mov(bsx, self.bsx_pos+3) bsx.move(self.bsx_pos+3) self.setReflectedBeamROI() self.setDirectBeamROI() #detselect(pilatus300, suffix='_stats4_total') #caput('XF:11BMB-ES{Det:SAXS}:cam1:AcquireTime', 0.5) #caput('XF:11BMB-ES{Det:SAXS}:cam1:AcquirePeriod', 0.6) detselect(pilatus_name, suffix='_stats4_total') caput('XF:11BMB-ES{}:cam1:AcquireTime'.format(pilatus_Epicsname), 0.5) caput('XF:11BMB-ES{}:cam1:AcquirePeriod'.format(pilatus_Epicsname), 0.6) #TODO: Update ROI based on current SAXSx, SAXSy and the md in cms object self.current_mode = 'alignment' #self.beam.bim6.reading() def modeMeasurement(self, verbosity=3): if RE.state!='idle': RE.abort() self.current_mode = 'undefined' self.beam.off() #bsx_pos=-16.74 #mov(bsx, -16.55) #mov(bsx, -13.83) #change it at 06/02/17, Osuji Beam time #mov(bsx, -14.73) #change it at 06/04/17, SAXS, 3m, Osuji Beam time #mov(bsx, -15.03) #change it at 06/04/17, GISAXS, 3m, Osuji Beam time #mov(bsx, -16.43) #change it at 06/12/17, GISAXS, 3m, LZhu Beam time #mov(bsx, -16.53) #change it at 06/19/17, GISAXS, 5m, AHexemer Beam time #mov(bsx, -16.2) #change it at 07/07/17, GISAXS, 3m, TKoga Beam time #mov(bsx, -16.43) #change it at 07/10/17, GISAXS, 2m, LSita Beam time #mov(bsx, -16.53) # 07/20/17, GISAXS, 5m, CRoss Beam time #mov(bsx, -15.84) # 07/26/17, SAXS/WAXS, 2m, BVogt Beam time #mov(bsx, -16.34) # 08/02/17, TOMO GISAXS, 5m, LRichter Beam time #mov(bsx, -16.74) # 08/02/17, TOMO GISAXS, 5m, LRichter Beam time #mov(bsx, self.bsx_pos) bsx.move(self.bsx_pos) #if abs(bsx.user_readback.value - -16.74)>0.1: if abs(bsx.user_readback.value - self.bsx_pos)>0.1: print('WARNING: Beamstop did not return to correct position!') return self.beam.setTransmission(1) #mov(DETy, -16) #self.beam.bim6.retract() #caput('XF:11BMB-BI{IM:2}EM180:Acquire', 0) # Turn off bim6 #detselect(pilatus300) #detselect([pilatus300, psccd]) detselect(pilatus_name) self.current_mode = 'measurement' # Check if gate valves are open if self.beam.GVdsbig.state() is not 'out' and verbosity>=1: print('Warning: Sample chamber gate valve (large, downstream) is not open.') def setDirectBeamROI(self, size=[10,4], verbosity=3): '''Update the ROI (stats4) for the direct beam on the Pilatus detector. This (should) update correctly based on the current SAXSx, SAXSy. The size argument controls the size (in pixels) of the ROI itself (in the format [width, height]). A size=[6,4] is reasonable. The size is changed to [10, 4] for possible beam drift during a user run (changed at 08/16/17)''' detector = self.SAXS # These positions are updated based on current detector position det_md = detector.get_md() x0 = det_md['detector_SAXS_x0_pix'] y0 = det_md['detector_SAXS_y0_pix'] #caput('XF:11BMB-ES{Det:SAXS}:ROI4:MinX', int(x0-size[0]/2)) #caput('XF:11BMB-ES{Det:SAXS}:ROI4:SizeX', int(size[0])) #caput('XF:11BMB-ES{Det:SAXS}:ROI4:MinY', int(y0-size[1]/2)) #caput('XF:11BMB-ES{Det:SAXS}:ROI4:SizeY', int(size[1])) #detselect(pilatus300, suffix='_stats4_total') caput('XF:11BMB-ES{}:ROI4:MinX'.format(pilatus_Epicsname), int(x0-size[0]/2)) caput('XF:11BMB-ES{}:ROI4:SizeX'.format(pilatus_Epicsname), int(size[0])) caput('XF:11BMB-ES{}:ROI4:MinY'.format(pilatus_Epicsname), int(y0-size[1]/2)) caput('XF:11BMB-ES{}:ROI4:SizeY'.format(pilatus_Epicsname), int(size[1])) detselect(pilatus_name, suffix='_stats4_total') def setReflectedBeamROI(self, total_angle=0.16, size=[10,2], verbosity=3): '''Update the ROI (stats3) for the reflected beam on the Pilatus300k detector. This (should) update correctly based on the current SAXSx, SAXSy. The size argument controls the size (in pixels) of the ROI itself (in the format [width, height]). A size=[6,2] is reasonable.''' detector = self.SAXS # These positions are updated based on current detector position det_md = detector.get_md() x0 = det_md['detector_SAXS_x0_pix'] y0 = det_md['detector_SAXS_y0_pix'] d = detector.distance*1000.0 # mm pixel_size = detector.pixel_size # mm y_offset_mm = np.tan(np.radians(total_angle))*d y_offset_pix = y_offset_mm/pixel_size #for pilatus300k #y_pos = int( y0 - size[1]/2 - y_offset_pix ) #for pilatus2M, placed up-side down y_pos = int( y0 - size[1]/2 + y_offset_pix ) #caput('XF:11BMB-ES{Det:SAXS}:ROI3:MinX', int(x0-size[0]/2)) #caput('XF:11BMB-ES{Det:SAXS}:ROI3:SizeX', int(size[0])) #caput('XF:11BMB-ES{Det:SAXS}:ROI3:MinY', y_pos) #caput('XF:11BMB-ES{Det:SAXS}:ROI3:SizeY', int(size[1])) #detselect(pilatus300, suffix='_stats3_total') caput('XF:11BMB-ES{}:ROI3:MinX'.format(pilatus_Epicsname), int(x0-size[0]/2)) caput('XF:11BMB-ES{}:ROI3:SizeX'.format(pilatus_Epicsname), int(size[0])) caput('XF:11BMB-ES{}:ROI3:MinY'.format(pilatus_Epicsname), y_pos) caput('XF:11BMB-ES{}:ROI3:SizeY'.format(pilatus_Epicsname), int(size[1])) detselect(pilatus_name, suffix='_stats3_total') #cms = CMS_Beamline() cms = CMS_Beamline_GISAXS() def get_beamline(): return cms
bsd-3-clause
9,100,477,858,381,139,000
34.747772
257
0.512806
false
pybel/pybel-tools
src/pybel_tools/selection/paths.py
1
5832
# -*- coding: utf-8 -*- """Path selection tools.""" import itertools as itt from operator import itemgetter from typing import Any, List, Mapping, Optional, Tuple import networkx as nx from more_itertools import pairwise from pybel import BELGraph, BaseEntity from pybel.constants import ( ANALOGOUS_TO, ASSOCIATION, BIOMARKER_FOR, CAUSES_NO_CHANGE, DECREASES, DIRECTLY_DECREASES, DIRECTLY_INCREASES, EQUIVALENT_TO, HAS_PRODUCT, HAS_REACTANT, HAS_VARIANT, INCREASES, IS_A, NEGATIVE_CORRELATION, PART_OF, POSITIVE_CORRELATION, PROGONSTIC_BIOMARKER_FOR, RATE_LIMITING_STEP_OF, REGULATES, RELATION, SUBPROCESS_OF, TRANSCRIBED_TO, TRANSLATED_TO, ) from pybel.struct.mutation import get_nodes_in_all_shortest_paths __all__ = [ 'get_nodes_in_all_shortest_paths', 'get_shortest_directed_path_between_subgraphs', 'get_shortest_undirected_path_between_subgraphs', ] default_edge_ranking = { INCREASES: 2, DIRECTLY_INCREASES: 3, DECREASES: 2, DIRECTLY_DECREASES: 3, RATE_LIMITING_STEP_OF: 0, CAUSES_NO_CHANGE: 0, REGULATES: 0, NEGATIVE_CORRELATION: 2, POSITIVE_CORRELATION: 2, ASSOCIATION: 1, HAS_PRODUCT: 0, HAS_VARIANT: 0, HAS_REACTANT: 0, TRANSLATED_TO: 0, TRANSCRIBED_TO: 0, IS_A: 0, PART_OF: 0, SUBPROCESS_OF: 0, ANALOGOUS_TO: 0, BIOMARKER_FOR: 0, PROGONSTIC_BIOMARKER_FOR: 0, EQUIVALENT_TO: 0, } def rank_path(graph: BELGraph, path: List[BaseEntity], edge_ranking: Optional[Mapping[str, int]] = None) -> int: """Score the given path. :param graph: A BEL graph :param path: A list of nodes in the path (includes terminal nodes) :param edge_ranking: A dictionary of {relationship: score} :return: The score for the edge """ if edge_ranking is None: edge_ranking = default_edge_ranking return sum( max( edge_ranking[data[RELATION]] for data in graph.edges[source][target].values() ) for source, target in pairwise(path) ) # TODO consider all shortest paths? def _get_shortest_path_between_subgraphs_helper( graph: nx.Graph, a: nx.Graph, b: nx.Graph, ) -> List[List[Any]]: """Calculate the shortest path(s) between disconnected sub-graphs ``a`` and ``b`` through ``graph``. :param graph: A graph :param a: A sub-graph of :code:`graph`, disjoint from :code:`b` :param b: A sub-graph of :code:`graph`, disjoint from :code:`a` :return: A list of the shortest paths between the two sub-graphs """ if graph.is_directed(): shortest_paths = [ shortest_path for na, nb in itt.product(a, b) for shortest_path in ( # do it going both ways because it's directed nx.shortest_path(graph, na, nb), nx.shortest_path(graph, nb, na), ) ] else: shortest_paths = [ nx.shortest_path(graph, na, nb) for na, nb in itt.product(a, b) ] min_len = min(map(len, shortest_paths)) return [ shortest_path for shortest_path in shortest_paths if len(shortest_path) == min_len ] def get_shortest_directed_path_between_subgraphs(graph: BELGraph, a: BELGraph, b: BELGraph) -> List[List[Any]]: """Calculate the shortest path(s) between disconnected sub-graphs ``a`` and ``b`` through ``graph``. :param graph: A BEL graph :param a: A sub-graph of :code:`graph`, disjoint from :code:`b` :param b: A sub-graph of :code:`graph`, disjoint from :code:`a` :return: A list of the shortest paths between the two sub-graphs """ return _get_shortest_path_between_subgraphs_helper(graph, a, b) def get_shortest_undirected_path_between_subgraphs(graph: BELGraph, a: BELGraph, b: BELGraph) -> List[List[Any]]: """Calculate the undirected shortest path(s) between disconnected sub-graphs ``a`` and ``b`` through ``graph``. :param graph: A BEL graph :param a: A sub-graph of :code:`graph`, disjoint from :code:`b` :param b: A sub-graph of :code:`graph`, disjoint from :code:`a` :return: A list of the shortest paths between the two sub-graphs """ ug = graph.to_undirected() return _get_shortest_path_between_subgraphs_helper(ug, a, b) def find_root_in_path(graph: BELGraph, path_nodes: List[BaseEntity]) -> Tuple[BELGraph, BaseEntity]: """Find the root of the path. This is defined as the node with the lowest out degree, if multiple: the root is the one with the highest out degree among those with lowest out degree :param graph: A BEL Graph :param path_nodes: A list of nodes in their order in a path :return: A pair of the graph: graph of the path and the root node """ path_graph = graph.subgraph(path_nodes) # node_in_degree_tuple: list of tuples with (node,in_degree_of_node) in ascending order in_degrees = sorted(path_graph.in_degree().items(), key=itemgetter(1)) # In case all have the same in degree it needs to be reference before tied_root_index = 0 # Get index where the min in_degree stops (in case they are duplicates) for i in range(0, (len(in_degrees) - 1)): if in_degrees[i][1] < in_degrees[i + 1][1]: tied_root_index = i break # If there are multiple nodes with minimum in_degree take the one with max out degree # (in case multiple have the same out degree pick one random) if tied_root_index != 0: # node_out_degree_tuple: ordered list of tuples with (node,in_degree_of_node) in descending order out_degrees = sorted(path_graph.out_degree().items(), key=itemgetter(1), reverse=True) root_tuple = max(out_degrees[:tied_root_index], key=itemgetter(1)) else: root_tuple = in_degrees[0] return path_graph, root_tuple[0]
mit
-1,052,130,577,255,750,700
34.345455
115
0.653978
false
tommo/gii
packages/Mock/PhysicsTools/PhysicsTools.py
1
1092
import random ##----------------------------------------------------------------## from gii.core import app, signals from gii.SceneEditor import SceneEditorModule, getSceneSelectionManager, SceneTool, SceneToolButton from mock import SceneViewTool def _getModulePath( path ): import os.path return os.path.dirname( __file__ ) + '/' + path ##----------------------------------------------------------------## class PhyscsShapeTool( SceneViewTool ): name = 'physics_shape_editor' tool = 'physics_shape_editor' ##----------------------------------------------------------------## class PhysicTools( SceneEditorModule ): name = 'physics_tools' dependency = [ 'scene_view' ] def onLoad( self ): self.mainToolBar = self.addToolBar( 'physics_tools', self.getMainWindow().requestToolBar( 'physics_tools' ) ) toolManager = self.getModule( 'scene_tool_manager' ) self.addTool( 'physics_tools/shape_editor', widget = SceneToolButton( 'physics_shape_editor', label = 'Physics Shape Editor', icon = 'tools/box2d' ) ) def onStart( self ): pass
mit
1,252,288,696,254,564,600
27.736842
99
0.571429
false
synapse-wireless/bulk-reprogramming
snappyImages/synapse/DK200base.py
1
7614
# Copyright (C) 2015 Synapse Wireless, Inc. # Subject to your agreement of the disclaimer set forth below, permission is given by Synapse Wireless, Inc. ("Synapse") to you to freely modify, redistribute or include this SNAPpy code in any program. The purpose of this code is to help you understand and learn about SNAPpy by code examples. # BY USING ALL OR ANY PORTION OF THIS SNAPPY CODE, YOU ACCEPT AND AGREE TO THE BELOW DISCLAIMER. If you do not accept or agree to the below disclaimer, then you may not use, modify, or distribute this SNAPpy code. # THE CODE IS PROVIDED UNDER THIS LICENSE ON AN "AS IS" BASIS, WITHOUT # WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, WITHOUT # LIMITATION, WARRANTIES THAT THE COVERED CODE IS FREE OF DEFECTS, # MERCHANTABLE, FIT FOR A PARTICULAR PURPOSE OR NON-INFRINGING. THE ENTIRE # RISK AS TO THE QUALITY AND PERFORMANCE OF THE COVERED CODE IS WITH YOU. # SHOULD ANY COVERED CODE PROVE DEFECTIVE IN ANY RESPECT, YOU (NOT THE # INITIAL DEVELOPER OR ANY OTHER CONTRIBUTOR) ASSUME THE COST OF ANY # NECESSARY SERVICING, REPAIR OR CORRECTION. UNDER NO CIRCUMSTANCES WILL # SYNAPSE BE LIABLE TO YOU, OR ANY OTHER PERSON OR ENTITY, FOR ANY LOSS OF # USE, REVENUE OR PROFIT, LOST OR DAMAGED DATA, OR OTHER COMMERCIAL OR # ECONOMIC LOSS OR FOR ANY DAMAGES WHATSOEVER RELATED TO YOUR USE OR # RELIANCE UPON THE SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH # DAMAGES OR IF SUCH DAMAGES ARE FORESEEABLE. THIS DISCLAIMER OF WARRANTY # AND LIABILITY CONSTITUTES AN ESSENTIAL PART OF THIS LICENSE. NO USE OF # ANY COVERED CODE IS AUTHORIZED HEREUNDER EXCEPT UNDER THIS DISCLAIMER. """Synapse Evaluation Board base definitions This module provides easy initialization of the evaluation boards: ProtoBoards (SN171 & SN172) and RF266 USB Dongle Call the function 'detectEvalBoards()' to detect and intialize one of these boards in the default configuration. """ from switchboard import * from nvparams import * from platforms import * def isSM220(): """Detect SM220 based on platform.""" return (platform == 'SM220') if isSM220(): # SN172 ProtoBoard pin definitions PROTO_172_LED_PIN_1 = GPIO_B2 PROTO_172_LED_PIN_2 = GPIO_F4 PROTO_172_BUTTON_PIN = GPIO_F2 DEMO_LED_PIN = None DEMO_BUTTON_PIN = None BATT_SENSE = None RELAY_SET_PIN = None RELAY_RESET_PIN = None # Proto board pin definitions PROTO_LED_GRN_PIN = None PROTO_LED_YLW_PIN = None PROTO_BUTTON_PIN = None PROTO_BUZZER_PIN = None else: # Demo board pin definitions DEMO_LED_PIN = GPIO_0 DEMO_BUTTON_PIN = GPIO_1 BATT_SENSE = GPIO_3 RELAY_SET_PIN = GPIO_16 RELAY_RESET_PIN = GPIO_17 # Proto board pin definitions PROTO_LED_GRN_PIN = GPIO_1 PROTO_LED_YLW_PIN = GPIO_2 PROTO_BUTTON_PIN = GPIO_5 PROTO_BUZZER_PIN = GPIO_12 PROTO_172_LED_PIN_1 = None PROTO_172_LED_PIN_2 = None PROTO_172_BUTTON_PIN = None # RF 266 USB Dongle pin definitions DONGLE_LED_PIN = 20 # SNAPpy IO 20 (IO_20) # Generic pin definitions (set during board-type detection) LED_PIN = None BUTTON_PIN = None deviceType = None def detectEvalBoards(): """Detect and initialize Evaluation Boards to default configuration.""" global deviceType initDeviceType() # Perform base eval-board initialization if deviceType == 'Buzz': initProtoHw() elif deviceType == 'Proto': init172ProtoHw() elif deviceType == 'Dongle': initRF266DongleHw() # Make the default UART port accessible to Portal defaultUart = 1 crossConnect(DS_UART0 + defaultUart, DS_PACKET_SERIAL) saveNvParam(NV_DEFAULT_UART_ID, defaultUart) def initProtoHw(): """Initialize ProtoBoard (SN171) hardware peripherals.""" global BUTTON_PIN, LED_PIN LED_PIN = PROTO_LED_YLW_PIN setPinDir(LED_PIN, True) setPinDir(PROTO_LED_GRN_PIN, True) BUTTON_PIN = PROTO_BUTTON_PIN setPinDir(BUTTON_PIN, False) setPinPullup(BUTTON_PIN, True) setPinDir(PROTO_BUZZER_PIN, True) writePin(PROTO_BUZZER_PIN, False) def init172ProtoHw(): """Initialize ProtoBoard (SN172) hardware peripherals.""" global BUTTON_PIN, LED_PIN BUTTON_PIN = PROTO_172_BUTTON_PIN setPinDir(BUTTON_PIN, False) setPinPullup(BUTTON_PIN, True) LED_PIN = PROTO_172_LED_PIN_1 setPinDir(LED_PIN, True) setPinDir(PROTO_172_LED_PIN_2, True) def initRF266DongleHw(): """Initialize RF266 USB Dongle hardware peripherals.""" global LED_PIN LED_PIN = DONGLE_LED_PIN # There's only one LED on RF266 USB Dongle setPinDir(LED_PIN, True) writePin(LED_PIN, False) # Initial state of the LED is OFF def blinkLed(msDuration): """Blink LED for specified duration in milliseconds.""" global deviceType pulsePin(LED_PIN, msDuration, True) def blinkLed2(msDuration): """Blink LED2 (if it exists) for specified duration in milliseconds.""" global deviceType if deviceType == "Buzz": pulsePin(PROTO_LED_GRN_PIN, msDuration, True) elif deviceType == "Proto": pulsePin(PROTO_172_LED_PIN_2, msDuration, True) elif deviceType == "Dongle": blinkLed(msDuration) def blinkLed3(msDuration): """Blink LED3 (if it exists) for specified duration in milliseconds.""" global deviceType if deviceType == "Buzz": pulsePin(PROTO_LED_GRN_PIN, msDuration, True) pulsePin(PROTO_LED_YLW_PIN, msDuration, True) elif deviceType == "Proto": pulsePin(PROTO_172_LED_PIN_1, msDuration, True) pulsePin(PROTO_172_LED_PIN_2, msDuration, True) elif deviceType == "Dongle": blinkLed(msDuration) def lightLed(): """Light the first LED.""" global deviceType if deviceType == "Proto": writePin(PROTO_172_LED_PIN_1, True) writePin(PROTO_172_LED_PIN_2, False) elif deviceType == "Buzz": writePin(PROTO_LED_GRN_PIN, False) writePin(PROTO_LED_YLW_PIN, True) elif deviceType == "Dongle": writePin(LED_PIN, True) def lightLed2(): """Light LED2 (if it exists)""" global deviceType if deviceType == "Buzz": writePin(PROTO_LED_GRN_PIN, True) writePin(PROTO_LED_YLW_PIN, False) elif deviceType == "Proto": writePin(PROTO_172_LED_PIN_1, True) writePin(PROTO_172_LED_PIN_2, True) elif deviceType == "Dongle": writePin(LED_PIN, False) def lightLed3(): """Light LED3 (if itexists)""" global deviceType if deviceType == "Buzz": writePin(PROTO_LED_GRN_PIN, True) writePin(PROTO_LED_YLW_PIN, True) elif deviceType == "Proto": writePin(PROTO_172_LED_PIN_1, False) writePin(PROTO_172_LED_PIN_2, True) elif deviceType == "Dongle": writePin(LED_PIN, True) def ledsOff(): """Turn off all LEDs.""" global deviceType if deviceType == "Buzz": writePin(PROTO_LED_GRN_PIN, False) writePin(PROTO_LED_YLW_PIN, False) elif deviceType == "Proto": writePin(PROTO_172_LED_PIN_1, False) writePin(PROTO_172_LED_PIN_2, False) elif deviceType == "Dongle": writePin(LED_PIN, False) def isRF266(): """Detect the RF266 USB Dongle based on platform.""" return (platform == 'RF266') def initDeviceType(): """Set default device types for Evaluation boards.""" global deviceType if isSM220(): deviceType = "Proto" elif isRF266(): deviceType = "Dongle" else: deviceType = "Buzz" # Store detected device type string to reserved system NV param saveNvParam(NV_DEVICE_TYPE_ID, deviceType)
apache-2.0
6,309,866,372,889,226,000
31.262712
294
0.684397
false
italomaia/turtle-linux
games/WhichWayIsUp/lib/projectile.py
1
1297
'''One of the enemies, the spider, which climbs along walls and shoots at the player.''' import pygame import os from pygame.locals import * from locals import * import data from object import Gameobject from sound import play_sound from animation import Animation class Projectile(Gameobject): def __init__(self, screen, x = None, y = None, dx = None, dy = None, damage = 5, set = "energy"): Gameobject.__init__(self, screen, False, False, x, y, -1) self.animations["default"] = Animation(set, "flying") self.animations["dying"] = Animation(set, "dying") self.image = self.animations[self.current_animation].update_and_get_image() self.rect = self.image.get_rect() self.dx = dx self.dy = dy self.saveddx = None self.damage = damage self.itemclass = "projectile" return def update(self, level = None): Gameobject.update(self, level) if self.y < 0 and self.current_animation != "dying": #This kills projectiles that wander off the screen from the top self.current_animation = "dying" if self.dx == 0 and self.dy == 0 and self.saveddx != None: self.dx = self.saveddx self.dy = self.saveddy return def flip(self): self.saveddx = -self.dy self.saveddy = self.dx Gameobject.flip(self) return
gpl-3.0
8,307,536,100,845,769,000
28.5
121
0.670008
false
simon-weber/Instant-SQLite-Audit-Trail
test.py
1
4363
#!/usr/bin/env python import sqlite3 import unittest import audit class TestAudit(unittest.TestCase): def setUp(self): self.conn = sqlite3.connect(':memory:') self.conn.execute('CREATE TABLE tab(c1, c2)') audit.attach_log(self.conn) def tearDown(self): audit.detach_log(self.conn) self.conn.close() def test_string_to_python(self): self.conn.execute("INSERT INTO tab VALUES('a', 'b')") r = self.conn.execute("SELECT * FROM _audit").fetchone() py_val = audit.to_python(r[4]) self.assertEqual(py_val, [['c1', 'a'], ['c2', 'b']]) def test_nums_to_python(self): self.conn.execute("INSERT INTO tab VALUES(5, 3.14)") r = self.conn.execute("SELECT * FROM _audit").fetchone() py_val = audit.to_python(r[4]) self.assertEqual(py_val, [['c1', 5], ['c2', 3.14]]) def test_null_to_python(self): self.conn.execute("INSERT INTO tab VALUES(NULL, NULL)") r = self.conn.execute("SELECT * FROM _audit").fetchone() py_val = audit.to_python(r[4]) self.assertEqual(py_val, [['c1', None], ['c2', None]]) def test_insert(self): self.conn.execute("INSERT INTO tab VALUES('audit', 'this')") audit_rows = self.conn.execute("SELECT * FROM _audit").fetchall() self.assertEqual(len(audit_rows), 1) r = audit_rows[0] #table and op self.assertEqual(r[1:3], (u'tab', u'INSERT')) #no previous, new is what we inserted self.assertEqual(r[3:], (None, unicode(repr([['c1', 'audit'], ['c2', 'this']])), )) def test_update(self): self.conn.execute("INSERT INTO tab VALUES('audit', 'this')") self.assertEqual( self.conn.execute("UPDATE tab SET c2='everything' WHERE" " c2='this'").rowcount, 1) audit_rows = self.conn.execute("SELECT * FROM _audit").fetchall() self.assertEqual(len(audit_rows), 2) r = audit_rows[1] self.assertEqual(r[1:3], (u'tab', u'UPDATE')) self.assertEqual(r[3:], (unicode(repr([['c1', 'audit'], ['c2', 'this']])), unicode(repr([['c1', 'audit'], ['c2', 'everything']])), )) def test_delete(self): self.conn.execute("INSERT INTO tab VALUES('audit', 'this')") self.assertEqual( self.conn.execute("DELETE FROM tab WHERE c1='audit'").rowcount, 1) audit_rows = self.conn.execute("SELECT * FROM _audit").fetchall() self.assertEqual(len(audit_rows), 2) r = audit_rows[1] self.assertEqual(r[1:3], (u'tab', u'DELETE')) self.assertEqual(r[3:], (unicode(repr([['c1', 'audit'], ['c2', 'this']])), None, )) def test_update_null(self): self.conn.execute("INSERT INTO tab VALUES('audit', NULL)") self.assertEqual( self.conn.execute("UPDATE tab SET c2='everything' WHERE" " c2 is NULL").rowcount, 1) audit_rows = self.conn.execute("SELECT * FROM _audit").fetchall() self.assertEqual(len(audit_rows), 2) r = audit_rows[1] self.assertEqual(r[1:3], (u'tab', u'UPDATE')) self.assertEqual(r[3:], (unicode(repr([['c1', 'audit'], ['c2', None]])), unicode(repr([['c1', 'audit'], ['c2', 'everything']])), )) def test_detach(self): audit.detach_log(self.conn) self.conn.execute("INSERT INTO tab VALUES('no', 'audit')") with self.assertRaises(sqlite3.OperationalError): #no _audit table self.conn.execute("SELECT * FROM _audit").fetchall() if __name__ == '__main__': unittest.main()
mit
-5,486,037,840,757,958,000
30.846715
75
0.472611
false
eliostvs/django-budget
django-budget/dashboard/views.py
1
1405
from __future__ import unicode_literals from datetime import date, timedelta from decimal import InvalidOperation from django.contrib.auth.decorators import login_required from django.shortcuts import redirect, render from django.utils import timezone from budget.models import Budget from transaction.models import Transaction @login_required def dashboard(request): try: budget = Budget.active.most_current_for_date(timezone.now()) except Budget.DoesNotExist: return redirect('setup') latest_expenses = Transaction.expenses.get_latest() latest_incomes = Transaction.incomes.get_latest() now = timezone.now() start_date = date(now.year, now.month, 1) end_year, end_month = now.year, now.month + 1 end_date = date(end_year, end_month + 1, 1) - timedelta(days=1) estimated_amount = budget.monthly_estimated_total() amount_used = budget.actual_total(start_date, end_date) try: progress_bar_percent = min(100, amount_used / estimated_amount * 100) except InvalidOperation: progress_bar_percent = 0 ctx = {'budget': budget, 'estimated_amount': estimated_amount, 'amount_used': amount_used, 'latest_incomes': latest_incomes, 'latest_expenses': latest_expenses, 'progress_bar_percent': progress_bar_percent} return render(request, 'dashboard.html', ctx)
mit
777,468,964,307,954,300
28.893617
77
0.692527
false
ryanjoneil/docker-image-construction
dicp/solvers/colgen_model_gurobi.py
1
9388
from collections import defaultdict from dicp.clique import Clique from gurobipy import GRB, Model, quicksum from itertools import combinations, product import time class ColgenModelGurobi(object): '''Column Generation model''' _slug = 'colgen-model-gurobi' def __init__(self, time=None): self.time = time # in minutes def slug(self): return ColgenModelGurobi._slug def solve(self, problem, saver): self.problem = problem # Starting cliques self.cliques = set() self.img_cmd_to_cliques = defaultdict(list) self.img_to_cliques = defaultdict(list) # cliques with >= 2 cmds for img, cmds in problem.images.items(): for cmd in cmds: clique = Clique(problem, [img], [cmd]) self.cliques.add(clique) self.img_cmd_to_cliques[img, cmd].append(clique) self.img_to_cliques[img].append(clique) for cmd, imgs in problem.images_by_command.items(): if len(imgs) < 2: continue clique = Clique(problem, imgs, [cmd]) self.cliques.add(clique) for img in imgs: self.img_cmd_to_cliques[img, cmd].append(clique) self.img_to_cliques[img].append(clique) # Initial set of intersections self.intersections = set() for c1, c2 in combinations(self.cliques, 2): self._test_intersection(c1, c2) for iteration in range(1000): print '[iteration %02d / %s]' % (iteration + 1, time.asctime()) done = True self._master() for clique in self._subproblem(): if clique is not None and clique not in self.cliques: print '[new clique] %s' % clique for c in self.cliques: self._test_intersection(clique, c) done = False self.cliques.add(clique) for img, cmd in product(clique.images, clique.commands): self.img_cmd_to_cliques[img, cmd].append(clique) for img in clique.images: self.img_to_cliques[img].append(clique) if done: solution = self._master(final=True) print '\n[solution]' for clique in sorted(solution): if len(clique.images) > 1: print clique print '\n[cliques]' for clique in sorted(self.cliques): if len(clique.images) > 1: print clique break print def _test_intersection(self, c1, c2): c1, c2 = tuple(sorted([c1, c2])) if (c1, c2) in self.intersections: return overlapping_images = bool(c1.images_set.intersection(c2.images_set)) if not overlapping_images: return disjoint_images = bool(c1.images_set - c2.images_set and c2.images_set - c1.images_set) overlapping_commands = bool(c1.commands_set.intersection(c2.commands_set)) if disjoint_images or (overlapping_images and overlapping_commands): self.intersections.add((c1, c2)) def _master(self, final=False): self.img_cmd_duals = defaultdict(float) self.clique_inter_duals = defaultdict(float) model = Model() model.params.OutputFlag = False obj = [] # x[i,c] = 1 if clique c is used x = {} for clique in self.cliques: if final: x[clique] = v = model.addVar(vtype=GRB.BINARY) else: x[clique] = v = model.addVar() obj.append(clique.cost * v) model.update() # Each image has to run each of its commands. img_cmd_constraints = {} for img, cmds in self.problem.images.items(): for cmd in cmds: vlist = [x[c] for c in self.img_cmd_to_cliques[img, cmd]] if final: model.addConstr(quicksum(vlist) == 1) else: img_cmd_constraints[img, cmd] = model.addConstr(quicksum(vlist) >= 1) # Clique intersections clique_inter_constraints = {} for c1, c2 in self.intersections: clique_inter_constraints[c1, c2] = model.addConstr(x[c1] + x[c2] <= 1) model.setObjective(quicksum(obj), GRB.MINIMIZE) model.optimize() if final: print '\n[final master obj: %.02f]' % model.objVal return [c for c in self.cliques if x[c].x > 0.5] else: header = ' | %s' % (' '.join('% 6s' % c for c in self.problem.commands)) print '-' * len(header) print header print '-' * len(header) for img in self.problem.images: duals = [] for cmd in self.problem.commands: try: extra = img_cmd_constraints[img, cmd].pi self.img_cmd_duals[img, cmd] = extra duals.append(round(extra, 1) or '') except KeyError: duals.append('') print '% 4s | %s' % (img, ' '.join('% 6s' % d for d in duals)) print '-' * len(header) for (c1, c2), c in sorted(clique_inter_constraints.items()): if c.pi: print '[clique/inter dual] %s | %s = %.02f' % (c1, c2, c.pi) self.clique_inter_duals[c1, c2] = c.pi def _subproblem(self): cliques = [] for (c1, c2), pi in self.clique_inter_duals.items(): int_images = c1.images_set.intersection(c2.images_set) # Remove images in c1 not in c2 z = pi + c1.cost for i in int_images: dual = sum(self.img_cmd_duals[i, cmd] for cmd in c1.commands) z -= dual if z < 0: cliques.append(Clique(self.problem, int_images, c1.commands)) # Remove images in c2 not in c1 z = pi + c2.cost for i in int_images: dual = sum(self.img_cmd_duals[i, cmd] for cmd in c2.commands) z -= dual if z < 0: cliques.append(Clique(self.problem, int_images, c2.commands)) # Pare images off until they don't intersect anymore if len(c1.images) <= 2 or len(c2.images) <= 2: continue model = Model() model.params.OutputFlag = False # model.params.OutputFlag = False obj = [pi] p1 = model.addVar(vtype=GRB.BINARY) p2 = model.addVar(vtype=GRB.BINARY) q1 = {i: model.addVar(vtype=GRB.BINARY) for i in c1.images} q2 = {i: model.addVar(vtype=GRB.BINARY) for i in c2.images} r1 = {i: model.addVar(vtype=GRB.BINARY) for i in int_images} r2 = {i: model.addVar(vtype=GRB.BINARY) for i in int_images} model.update() obj.append(c1.cost * p1) obj.append(c2.cost * p2) for i in c1.images: dual = sum(self.img_cmd_duals[i, cmd] for cmd in c1.commands) obj.append(-dual * q1[i]) for i in c2.images: dual = sum(self.img_cmd_duals[i, cmd] for cmd in c2.commands) obj.append(-dual * q2[i]) for i in int_images: model.addConstr(p1 <= r1[i] + r2[i]) model.addConstr(p2 <= r1[i] + r2[i]) for i in c1.images: model.addConstr(q1[i] <= p1) if i in int_images: model.addConstr(q1[i] <= 1 - r1[i]) for i in c2.images: model.addConstr(q2[i] <= p1) if i in int_images: model.addConstr(q2[i] <= 1 - r2[i]) model.setObjective(sum(obj), GRB.MINIMIZE) model.optimize() if model.objVal >= 0: continue for c, v, r in [(c1, p1, r1), (c2, p2, r2)]: if v.x < 0.5: continue # Figure out what images are left in the clique rem_imgs = set(c.images) for i, riv in r.items(): if riv.x > 0.5: rem_imgs.remove(i) if len(rem_imgs) > 1: cliques.append(Clique(self.problem, rem_imgs, c.commands)) rem_imgs_1 = set(c1.images) rem_imgs_2 = set(c2.images) z = -c1.cost - c2.cost for i in int_images: dual1 = sum(self.img_cmd_duals[i, cmd] for cmd in c1.commands) dual2 = sum(self.img_cmd_duals[i, cmd] for cmd in c2.commands) if dual1 > dual2: rem_imgs_1.remove(i) z += dual1 else: rem_imgs_2.remove(i) z += dual2 if z > 0: cliques.append(Clique(self.problem, rem_imgs_1, c1.commands)) cliques.append(Clique(self.problem, rem_imgs_2, c2.commands)) return cliques
mit
5,374,028,787,520,214,000
34.695817
95
0.496804
false
akvo/akvo-rsr
akvo/rsr/models/result/indicator_period_data_comment.py
1
1260
# -*- coding: utf-8 -*- # Akvo RSR is covered by the GNU Affero General Public License. # See more details in the license.txt file located at the root folder of the Akvo RSR module. # For additional details on the GNU license please see < http://www.gnu.org/licenses/agpl.html >. from .indicator_period_data import IndicatorPeriodData from akvo.rsr.fields import ValidXMLTextField from akvo.rsr.mixins import TimestampsMixin from django.conf import settings from django.db import models from django.utils.translation import ugettext_lazy as _ class IndicatorPeriodDataComment(TimestampsMixin, models.Model): """ Model for adding comments to data of an indicator period. """ project_relation = 'results__indicators__periods__data__comments__in' data = models.ForeignKey(IndicatorPeriodData, verbose_name=_('indicator period data'), related_name='comments') user = models.ForeignKey(settings.AUTH_USER_MODEL, verbose_name=_('user'), db_index=True) comment = ValidXMLTextField(_('comment'), blank=True) class Meta: app_label = 'rsr' verbose_name = _('indicator period data comment') verbose_name_plural = _('indicator period data comments') ordering = ('-id', )
agpl-3.0
1,878,097,736,667,727,400
38.375
97
0.70873
false
reimandlab/Visualistion-Framework-for-Genome-Mutations
website/imports/sites/site_mapper.py
1
8076
import logging import re from typing import List from warnings import warn from pandas import DataFrame from tqdm import tqdm from database import create_key_model_dict from models import Protein, Gene logger = logging.getLogger(__name__) def find_all(longer_string: str, sub_string: str): """Returns positions of all overlapping matches. Allowed alphabet excludes '^' and '$' characters. If sub_string starts with '^' or ends with '$' an exact match (at front or at the end) will be performed. """ if sub_string.startswith('^'): # there can be only one match # (or otherwise we would be matching with # less precision than declared earlier) return [0] if longer_string.startswith(sub_string[1:]) else [] if sub_string.endswith('$'): return [len(longer_string) - len(sub_string) + 1] if longer_string.endswith(sub_string[:-1]) else [] position = -1 matches = [] while True: position = longer_string.find(sub_string, position + 1) if position == -1: return matches matches.append(position) def find_all_regex(longer_string: str, sub_string: str): """Should have the same effect as `find_all`, but as it adds an overhead of creating match objects and has to supports a lot of additional features, is probably match slower than `find_all` """ if not (sub_string.startswith('^') or sub_string.endswith('$')): # positive lookahead to enable detection of overlapping matches sub_string = '(?=' + sub_string + ')' return [ match.start() for match in re.finditer(sub_string, longer_string) ] class OneBasedPosition(int): pass class SiteMapper: def __init__(self, proteins, repr_site): self.proteins = proteins self.repr_site = repr_site self.genes = create_key_model_dict(Gene, 'name') self.has_gene_names = None self.already_warned = None def map_sites_by_sequence(self, sites: DataFrame) -> DataFrame: """Given a site with an isoform it should occur in, verify if the site really appears on the given position in this isoform and find where in all other isoforms this site appears (by exact match of a sequence span, typically one 15 amino acids long: site position +/-7 aa). If a site does not appear on declared position in the original isoform, emit a warning and try to find the correct position (to overcome a potential sequence shift which might be a result of different sequence versions). If there is no isoform with given refseq and there is a gene column in provided sites DataFrame, all isoforms of this gene will be used for mapping. Args: sites: data frame with sites, having (at least) following columns: 'sequence', 'position', 'refseq', 'residue', 'left_sequence_offset' Returns: Data frame of sites mapped to isoforms in database, including the sites in isoforms provided on input, if those has been confirmed or adjusted. """ print('Mapping sites to isoforms') mapped_cnt = 0 mapped_sites = [] self.already_warned = set() self.has_gene_names = 'gene' in sites.columns for site in tqdm(sites.itertuples(index=False), total=len(sites)): was_mapped = False protein = None positions = {} isoforms_to_map = self.choose_isoforms_to_map(site) # find matches for isoform in isoforms_to_map: positions[isoform] = self.map_site_to_isoform(site, isoform) if protein: matches = positions[protein] self.collate_matches_with_expectations(matches, site) # create rows with sites for isoform, matched_positions in positions.items(): for position in matched_positions: # _replace() returns new namedtuple with replaced values; # it is not protected but hidden (to allow 'replace' field) new_site = site._replace( refseq=isoform.refseq, position=position ) mapped_sites.append(new_site) was_mapped = True if was_mapped: mapped_cnt += 1 print( f'Successfully mapped {mapped_cnt} ' f'({mapped_cnt / len(sites) * 100}%) sites' ) return DataFrame(mapped_sites) def map_site_to_isoform(self, site, isoform: Protein) -> List[OneBasedPosition]: """Finds all occurrences of a site (by exact sequence match) in provided sequence of an alternative isoform. Original position of the site is used to highlight "suspicious" cases, in which the matched site is far away (>50/90% of isoform length) from the one of the original site. This is based on premise that most of alternative isoform should not differ so much. Returned positions are 1-based """ matches = [ m + 1 + site.left_sequence_offset # asterisks (*) representing stop codon are removed for the time of mapping # so expression like 'SOMECTERMINALSEQUENCE$' can be easily matched for m in find_all(isoform.sequence.rstrip('*'), site.sequence) ] if len(matches) > 1: warn(f'More than one match for: {self.repr_site(site)}') if matches: biggest_distance = max(abs(position - site.position) for position in matches) if biggest_distance > len(isoform.sequence) / 2: positions = ", ".join([str(m) for m in matches]) if biggest_distance > len(isoform.sequence) * 9 / 10: inform = warn else: inform = logger.info inform( f'This site {self.repr_site(site)} was found on position(s): ' f'{positions}; some are quite far away from the ' f'position in original isoform: {site.position}.' ) return matches def choose_isoforms_to_map(self, site): protein = None if site.refseq not in self.proteins: if self.has_gene_names and site.gene in self.genes: gene = self.genes[site.gene] logger.info( f'Using {gene} to map {self.repr_site(site)} (not using ' f'{site.refseq}, as this sequence is not available).' ) else: if site.refseq not in self.already_warned: warn( f'No protein with {site.refseq} ' + (f'and no gene named {site.gene} ' if self.has_gene_names else '') + f'(first encountered for {self.repr_site(site)}).' ) self.already_warned.add(site.refseq) return [] else: protein = self.proteins[site.refseq] gene = protein.gene if gene and gene.isoforms: return {self.proteins[isoform.refseq] for isoform in gene.isoforms} elif protein: return {protein} return [] def collate_matches_with_expectations(self, original_isoform_matches, site): if not original_isoform_matches: warn(f'The site: {self.repr_site(site)} was not found in {site.refseq}, ' f'though it should appear in this isoform according to provided sites data.') elif all(match_pos != site.position for match_pos in original_isoform_matches): warn(f'The site: {self.repr_site(site)} does not appear on the exact given position in ' f'{site.refseq} isoform, though it was re-mapped to: {original_isoform_matches}.')
lgpl-2.1
-1,987,020,756,523,349,200
35.053571
108
0.586553
false
lconceicao/son-security-pilot
fsm/tor-config/tor/tor.py
1
6736
""" Copyright (c) 2015 SONATA-NFV ALL RIGHTS RESERVED. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Neither the name of the SONATA-NFV [, ANY ADDITIONAL AFFILIATION] nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. This work has been performed in the framework of the SONATA project, funded by the European Commission under Grant number 671517 through the Horizon 2020 and 5G-PPP programmes. The authors would like to acknowledge the contributions of their colleagues of the SONATA partner consortium (www.sonata-nfv.eu). """ import logging import yaml import os import sys from collections import namedtuple from ansible.parsing.dataloader import DataLoader from ansible.vars import VariableManager from ansible.inventory import Inventory from ansible.executor.playbook_executor import PlaybookExecutor from sonsmbase.smbase import sonSMbase logging.basicConfig(level=logging.INFO) LOG = logging.getLogger("fsm-tor-config-1") LOG.setLevel(logging.DEBUG) logging.getLogger("son-mano-base:messaging").setLevel(logging.INFO) class TORConfigurationFSM(sonSMbase): def __init__(self): self.specific_manager_type = 'fsm' self.service_name = 'psa-service' # TODO: match nsd name self.function_name = 'tor-vnf' self.specific_manager_name = 'tor-config' self.id_number = '1' self.version = 'v0.1' self.description = "FSM for configuring the TOR VNF" self.amqp_topic = ('son.' + self.specific_manager_name + self.id_number + self.version) super(self.__class__, self).__init__( specific_manager_type=self.specific_manager_type, service_name=self.service_name, function_name=self.function_name, specific_manager_name=self.specific_manager_name, id_number=self.id_number, version=self.version, description=self.description) def on_registration_ok(self): LOG.debug("Received registration ok event.") # send the status to the SMR (not necessary) self.manoconn.publish(topic='specific.manager.registry.ssm.status', message=yaml.dump( {'name': self.specific_manager_id, 'status': 'Registration is done, ' 'initialising the configuration...'})) # subscribes to related topic (could be any other topic) self.manoconn.subscribe(self.on_configuration, topic=self.amqp_topic) def on_configuration(self, ch, method, props, response): if props.app_id != self.specific_manager_id: LOG.info('Start retrieving the IP address ...') response = yaml.load(str(response)) list = response['VNFR'] mgmt_ip = None vm_image = 'http://files.sonata-nfv.eu/son-psa-pilot/tor-vnf/' \ 'sonata-tor.qcow2' for x in range(len(list)): if (response['VNFR'][x]['virtual_deployment_units'] [0]['vm_image']) == vm_image: mgmt_ip = (response['VNFR'][x]['virtual_deployment_units'] [0]['vnfc_instance'][0]['connection_points'][0] ['type']['address']) if not mgmt_ip: LOG.error("Couldn't obtain IP address from VNFR") return # send the status to the SMR (not necessary) self.manoconn.publish( topic='specific.manager.registry.ssm.status', message=yaml.dump( {'name': self.specific_manager_id, 'status': "IP address:'{0}'".format(mgmt_ip)})) LOG.info("IP address:'{0}'".format(mgmt_ip)) self.manoconn.notify(topic=self.amqp_topic, msg=yaml.dump( {'name': self.specific_manager_id, 'IP': mgmt_ip})) # configure vm using ansible playbook variable_manager = VariableManager() loader = DataLoader() inventory = Inventory(loader=loader, variable_manager=variable_manager) playbook_path = 'fsm/tor-config/ansible/site.yml' if not os.path.exists(playbook_path): LOG.error('The playbook does not exist') return Options = namedtuple('Options', ['listtags', 'listtasks', 'listhosts', 'syntax', 'connection', 'module_path', 'forks', 'remote_user', 'private_key_file', 'ssh_common_args', 'ssh_extra_args', 'sftp_extra_args', 'scp_extra_args', 'become', 'become_method', 'become_user', 'verbosity', 'check']) options = Options(listtags=False, listtasks=False, listhosts=False, syntax=False, connection='ssh', module_path=None, forks=100, remote_user='slotlocker', private_key_file=None, ssh_common_args=None, ssh_extra_args=None, sftp_extra_args=None, scp_extra_args=None, become=True, become_method=None, become_user='root', verbosity=None, check=False) variable_manager.extra_vars = {'hosts': mgmt_ip} passwords = {} pbex = PlaybookExecutor(playbooks=[playbook_path], inventory=inventory, variable_manager=variable_manager, loader=loader, options=options, passwords=passwords) results = pbex.run() return def main(): TORConfigurationFSM() if __name__ == '__main__': main()
apache-2.0
1,664,009,224,081,678,800
39.095238
79
0.564578
false
JuBra/GEMEditor
GEMEditor/base/ui/EmptyDialogHorzButtons.py
1
1289
# -*- coding: utf-8 -*- # Form implementation generated from reading ui file '.\EmptyDialogHorzButtons.ui' # # Created by: PyQt5 UI code generator 5.8.2 # # WARNING! All changes made in this file will be lost! from PyQt5 import QtCore, QtGui, QtWidgets class Ui_EmptyDialogHorzButtons(object): def setupUi(self, EmptyDialogHorzButtons): EmptyDialogHorzButtons.setObjectName("EmptyDialogHorzButtons") EmptyDialogHorzButtons.resize(212, 70) self.buttonBox = QtWidgets.QDialogButtonBox(EmptyDialogHorzButtons) self.buttonBox.setGeometry(QtCore.QRect(20, 20, 171, 32)) self.buttonBox.setOrientation(QtCore.Qt.Horizontal) self.buttonBox.setStandardButtons(QtWidgets.QDialogButtonBox.Cancel|QtWidgets.QDialogButtonBox.Ok) self.buttonBox.setObjectName("buttonBox") self.retranslateUi(EmptyDialogHorzButtons) self.buttonBox.accepted.connect(EmptyDialogHorzButtons.accept) self.buttonBox.rejected.connect(EmptyDialogHorzButtons.reject) QtCore.QMetaObject.connectSlotsByName(EmptyDialogHorzButtons) def retranslateUi(self, EmptyDialogHorzButtons): _translate = QtCore.QCoreApplication.translate EmptyDialogHorzButtons.setWindowTitle(_translate("EmptyDialogHorzButtons", "Dialog"))
gpl-3.0
-3,072,252,802,214,121,000
43.448276
106
0.762607
false
zkbt/exopop
exoatlas/populations/Population.py
1
50046
# general class for exoplanet populations from ..imports import * from ..telescopes import * from ..models import * import string basic_columns = [ 'name', 'hostname', 'ra', 'dec', 'distance', 'discoverer'] transit_columns = [ 'period', 'semimajoraxis', 'e', 'omega', 'inclination', 'transit_epoch', 'transit_duration', 'transit_depth', 'stellar_teff', 'stellar_mass', 'stellar_radius', 'radius', 'mass', 'transit_ar', 'transit_b'] calculated_columns = [ 'a_over_rs', 'b', 'insolation', 'relative_insolation', 'log_relative_insolation', 'teq', 'planet_luminosity', 'density', 'surface_gravity', 'distance_modulus', 'escape_velocity', 'escape_parameter', 'angular_separation', 'imaging_contrast', 'stellar_luminosity', ] table_columns = basic_columns + transit_columns attribute_columns = table_columns + calculated_columns method_columns = ['scale_height', 'transmission_signal', 'transmission_snr', 'emission_signal', 'emission_snr', 'reflection_signal', 'reflection_snr', 'stellar_brightness', 'stellar_brightness_in_telescope_units', 'depth_uncertainty'] desired_columns = [ 'mass_uncertainty_upper', 'mass_uncertainty_lower', 'radius_uncertainty_upper', 'radius_uncertainty_lower', 'distance_uncertainty_upper', 'distance_uncertainty_lower'] # these are keywords that can be set for a population default_plotkw = dict(color='black', alpha=1, zorder=0, marker='o', linewidth=1, respond_to_color=True, respond_to_size=True, exact=False, label_planets=False, filled=True, outlined=False) # what keywords can we set for the population plotkw? allowed_plotkw = list(default_plotkw.keys()) allowed_plotkw += ['s', 'c', 'cmap', 'norm', 'vmin', 'vmax' 'outlined', 'filled'] class Population(Talker): ''' Create a population from a standardized table. ''' #kludge? _pithy = True def __init__(self, standard, label='unknown', verbose=False, **plotkw): ''' Initialize a Population of exoplanets from a standardized table. Parameters ---------- standard : astropy.table.Table A table that contains all the necessary columns. **plotkw : dict All other keyword arguments wil ''' # a standardized table with a minimum set of columns we can expect self.standard = Table(standard) # store a label for this population self.label = label # keywords to use for plotting self.plotkw = plotkw self._pithy = verbose == False # define some cleaned names and hostnames, for indexing try: self.standard['tidyname'] except KeyError: self.standard['tidyname'] = [clean(x).lower() for x in self.standard['name']] try: self.standard['tidyhostname'] except KeyError: self.standard['tidyhostname'] = [clean(x).lower() for x in self.standard['hostname']] # make sure the table is searchable via names self.standard.add_index('tidyname') self.standard.add_index('tidyhostname') def sort(self, x, reverse=False): ''' Sort this population by some key or attribute. ''' to_sort = getattr(self, x) i = np.argsort(to_sort) if reverse: i = i[::-1] self.standard = self.standard[i] return self def __add__(self, other): ''' Create a new population by adding two together: `bigger = this + other` Parameters ---------- other : Population The population to be tacked onto this one. Returns ------- bigger : Population A new population, consisting of all the planets in `this` population and some extra ones added from `other`. ''' # skip any warnings that pop up with warnings.catch_warnings(): warnings.simplefilter('ignore') # create a new table, joining both together table = join(self.standard, other.standard, join_type='outer') # create an informative label label = f'{self.label} + {other.label}' # create and return the new population return Population(table, label=label) def remove_by_key(self, other, key='tidyname'): ''' Create a new population by removing some rows from here: `smaller = this - other` Parameters ---------- other : Population The population of planets to be removed from `this` population to create a new `smaller` one. Returns ------- smaller : Population A subset of `this` population, where some rows have been removed. ''' # skip any warnings that pop up with warnings.catch_warnings(): warnings.simplefilter('ignore') # create a new table, joining both together table = setdiff(self.standard, other.standard, keys=key) # create an informative label label = f'{self.label} - {other.label}' # create and return the new population return Population(table, label=label) def __sub__(self, other): ''' Create a new population by removing some rows from here: `smaller = this - other` Parameters ---------- other : Population The population of planets to be removed from `this` population to create a new `smaller` one. Returns ------- smaller : Population A subset of `this` population, where some rows have been removed. ''' return self.remove_by_key(other) def __getitem__(self, key): ''' Create a subpopulation of planets by indexing, slicing, or masking. ''' # FIXME -- maybe make it easier to pull out intermediate masks? try: # if the key is an index/slice/mask, return it if self.label is None: label = None else: label = f'Subset of {self.label}' subset = Population(standard=self.standard[key], label=label, **self.plotkw) # if the key is a column, raise an error if type(key) in self.standard.colnames: raise IndexError(f''' You seem to be trying to access a column from this population via `pop[{key}]`. For clarity, all `[]` indexing is reserved for selecting subsets of the population. To access your particular column, please try either `pop.{key}` or `pop.standard[{key}]` to return a 1D array of the entries in that column. ''') except KeyError: # use a string or a list of strings make a subset by planet name # FIXME - maybe we should make this say more when it's making a sneaky choice for us? try: subset = self.create_subset_by_name(key) except KeyError: subset = self.create_subset_by_hostname(key) return subset def create_subset_by_name(self, key): ''' Extract a subset of this population, based on one or more planet names. Parameters ---------- key : strings, list of strings The name of a planet ("GJ1132b") or a list of planet names. (All names will be stripped of special characters and converted to lower case before indexing.) Returns ------- subset : Population A new population containing some subset of the original. ''' # use a (list of) string(s) to index population by name if isinstance(key, str): # is it just one name? key = clean(key).lower() elif isinstance(key[0], str): # is it a list of names? key = [clean(k).lower() for k in key] # pull out rows by planet name subset = self.standard.loc['tidyname', key] # create a useful label for the population if isinstance(key, str): label = key elif isinstance(key[0], str): label = '+'.join(key) # create that new sub-population return Population(standard=subset, label=label, **self.plotkw) def create_subset_by_hostname(self, key): ''' Extract a subset of this population, based on one or more planet hostnames. Parameters ---------- key : strings, list of strings The hostname of a planet ("GJ1132") or a list of planet hostnames. (All names will be stripped of special characters and converted to lower case before indexing.) Returns ------- subset : Population A new population containing some subset of the original. ''' # use a string or a list of strings to index the population by name if isinstance(key, str): # is it just one name? key = clean(key).lower() elif isinstance(key[0], str): # is it a list of names? key = [clean(k).lower() for k in key] # pull out rows by planet name subset = self.standard.loc['tidyhostname', key] # create a useful label for the population if isinstance(key, str): label = key elif isinstance(key[0], str): label = '+'.join(key) # create that new sub-population return Population(standard=subset, label=label, **self.plotkw) def create_subset_by_position(self, coordinates, radius=1*u.arcmin, use_proper_motions=False, return_indices=False): ''' Extract a subset of this population, by performing a spatial cross-match by RA and Dec. This will return all plannets from this population that fall within the specified radius of at least one of the specified coordinates. Parameters ---------- coordinates : astropy.coordinates.SkyCoord The sky coordinate (RA, Dec) or list of coordinates we want to search for nearby objects. radius : astropy.units.Quantity The angular radius around each position that we should include in each search. use_proper_motions : bool Should we use available proper motions, embedded in the skycoords, to propagate positions to a shared epoch before cross-matching? Alas, this ability is *not yet implemented*. FIXME! return_indices : bool Should we also return the indices of the original coordinates that were matched to existing positions? Returns ------- subset : Population A new population containing a subset of the original, including *all* planets that fall within the 2D sky search space. ''' if use_proper_motions: raise NotImplementedError('No cross-matching with proper motions yet :-(') # create astropy coordinates for this population population_coords = SkyCoord(ra=self.ra, dec=self.dec) # do a spatial cross match on the sky # (idx gives the index into coordinates, # each corresponding to an entry in population_coords) idx, d2d, d3d = population_coords.match_to_catalog_sky(coordinates) # identify which systems are actually close on the sky match = d2d < radius # create new populations that are linked by spatial position i_match = match.nonzero()[0] #matched_coordinates = coordinates[idx[i_match]] subset = self.standard[i_match] # define a meaningful label label = f'Spatial Cross-Match ({len(coordinates)} positions, {radius} radius)' # create that new sub-population new_population = Population(standard=subset, label=label, **self.plotkw) # choose what to return if return_indices: i_from_original_coordinates = idx[i_match] return new_population, i_from_original_coordinates else: return new_population def __getattr__(self, key): ''' If an attribute/method isn't defined for a population, look for it as a column of the standardized table. For example, `population.stellar_radius` will try to access `population.standard['stellar_radius']`. Parameters ---------- key : str The attribute we're trying to get. ''' if key == 'label': raise RuntimeError('Yikes!') try: # extract the column from the standardized table try: # try to return the array of quantities (with units) return self.standard[key].quantity except TypeError: # columns without units don't have quantities return self.standard[key].data except KeyError: # try to get a plotkw from this pop, from the plotting defaults, from None try: assert(key in allowed_plotkw) return self.plotkw.get(key, default_plotkw[key]) except (AssertionError, KeyError): raise AttributeError(f""" Alas, there seems to be no way to find `.{key}` as an attribute or propetry of {self}. """) #AtlasError def __setattr__(self, key, value): ''' Define what happens when we try to set an attribute via `pop.attr = x`. If the keyword is a pre-defined "plotting" keyword in `allowed_plotkw`, then we should save it in a special `plotkw` dictionary. Otherwise, the attribute should be set as normal. Parameters ---------- key : str The attribute we're trying to set. value : anything The value we're trying to give that attribute. ''' if key in allowed_plotkw: # store plotting keywords in a separate plotting dictionary self.plotkw[key] = value else: # otherwise, store attributes as normal for objects self.__dict__[key] = value def __repr__(self): ''' How should this object appear as a repr/str? ''' return f'<{self.label} | population of {self.n} planets>' def uncertainty(self, key): ''' Return an array of symmetric uncertainties on a column. Parameters ---------- key : str The column for which we want errors. ''' # first try for an `uncertainty_{key}` column try: return self.__getattr__(f'{key}_uncertainty') except (KeyError, AssertionError, AtlasError, AttributeError): # is including AttributeError a kludge? # this can be removed after debugging self.speak(f'no symmetric uncertainties found for "{key}"') # then try for crudely averaging asymmetric uncertainties try: lower = self.__getattr__(f'{key}_uncertainty_lower') upper = self.__getattr__(f'{key}_uncertainty_upper') avg = 0.5*(np.abs(lower) + np.abs(upper)) return avg except (KeyError, AssertionError, AtlasError, AttributeError): # this can be removed after debugging self.speak(f'no asymmetric uncertainties found for "{key}"') # then give up and return nans return np.nan*self.standard[key] def uncertainty_lowerupper(self, key): ''' Return two arrays of lower and upper uncertainties on a column. Parameters ---------- key : str The column for which we want errors. Returns ------- lower : np.array The magnitude of the lower uncertainties (x_{-lower}^{+upper}) upper : np.array The magnitude of the upper uncertainties (x_{-lower}^{+upper}) ''' # first try for actual asymmetric uncertainties try: lower = self.__getattr__(f'{key}_uncertainty_lower') upper = self.__getattr__(f'{key}_uncertainty_upper') return np.abs(lower), np.abs(upper) except (KeyError, AssertionError, AttributeError): # this can be removed after debugging self.speak(f'no asymmetric uncertainties found for "{key}"') # first try for an `uncertainty_{key}` column try: sym = self.__getattr__(f'{key}_uncertainty') return np.abs(sym), np.abs(sym) except (KeyError, AssertionError, AttributeError): # this can be removed after debugging self.speak(f'no symmetric uncertainties found for "{key}"') # then give up and return nans unc = np.nan*self.__getattr__(key) return unc, unc def single(self, name): ''' Create a subpopulation of a single planet. ''' # create a subset of the standardized table subset = self.standard.loc[name] # create a new object, from this subset return Population(standard=subset, label=name, **self.plotkw) def validate_columns(self): ''' Make sure this standardized table has all the necessary columns. Summarize the amount of good data in each. ''' N = len(self.standard) for k in table_columns: try: n = sum(self.standard[k].mask == False) except AttributeError: try: n = sum(np.isfinite(self.standard[k])) except TypeError: n = sum(self.standard[k] != '') self.speak(f'{k:>25} | {n:4}/{N} rows = {n/N:4.0%} are not empty') def find(self, name): ''' Return index of a particular planet in the population. ??? = maybe this could/should be replaced with some table cleverness? ''' return np.array([clean(name) in clean(x) for x in self.name]).nonzero()[0] def update_planet(self, planet_name, **kwargs): ''' Correct the properties of a particular planet, modifying its values in the standardized table. Parameters ---------- planet_name : str The name of the planet to fix. **kwargs : dict Keyword arguments will go into modifying the properties of that planet. ''' # find the entry to replace match = self.find(planet_name) if len(match) != 1: self.speak(f'failed when trying to modify parameters for {planet_name}') return else: match = match[0] # loop over the keys, modifying each self.speak(f'for planet "{planet_name}"') for k, new in kwargs.items(): old = self.standard[k][match] self.speak(f' {k} changed from {old} to {new}') self.standard[k][match] = new if k == 'name': self.standard['tidyname'][match] = clean(new).lower() if k == 'hostname': self.standard['tidyhostname'][match] = clean(new).lower() def removeRows(self, indices): raise NotImplementedError(''' The `removeRows` method has been removed. Please use something like `population[0:42]` or `population[ok]` to use slices, indices, or masks to create new sub-populations that extract subsets from this one. ''') @property def n(self): ''' How many planets are in this population? ''' return len(self.standard) def __len__(self): ''' How many planets are in this population? ''' return len(self.standard) @property def semimajor_axis(self): ''' Have a safe way to calculate the semimajor axis of planets, that fills in gaps as necessary. Basic strategy: First from table. Then from NVK3L. Then from a/R*. ''' # pull out the actual values from the table a = self.standard['semimajoraxis'].copy().quantity # try to replace bad ones with NVK3L bad = np.isfinite(a) == False self.speak(f'{sum(bad)}/{self.n} semimajoraxes are missing') # calculate from the period and the stellar mass P = self.period[bad] M = self.stellar_mass[bad] G = con.G a[bad] = ((G*M*P**2/4/np.pi**2)**(1/3)).to('AU') # replace those that are still bad with the a/R* stillbad = np.isfinite(a) == False self.speak(f'{sum(stillbad)}/{self.n} are still missing after NVK3L') # (pull from table to avoid potential for recursion) a_over_rs = self.standard['transit_ar'][stillbad].quantity rs = self.standard['stellar_radius'][stillbad].quantity a[stillbad] = a_over_rs*rs return a @property def angular_separation(self): ''' Calculate the angular separation, simply as theta = a/D ''' a = self.semimajor_axis D = self.distance theta = np.arctan(a/D).to(u.arcsec) return theta @property def imaging_contrast(self): ''' What is the reflected light eclipse depth, for an albedo of 100%? But use a kludged radius ''' return 0.25*(self.kludge_radius/self.semimajor_axis).decompose()**2 @property def a_over_rs(self): ''' Have a safe way to calculate the scaled semimajor axis of planets, that fills in gaps as necessary. Basic strategy: First from table, mostly derived from transit. Then from the semimajor axis. ''' # pull out the values from the table a_over_rs = self.standard['transit_ar'].copy() # try to replace bad ones with NVK3L bad = np.isfinite(a_over_rs) == False self.speak(f'{sum(bad)}/{self.n} values for a/R* are missing') a = self.semimajor_axis[bad] R = self.stellar_radius[bad] a_over_rs[bad] = a/R stillbad = np.isfinite(a_over_rs) == False self.speak(f'{sum(stillbad)}/{self.n} are still missing after a and R*') return a_over_rs @property def stellar_luminosity(self): T = self.stellar_teff R = self.stellar_radius sigma = con.sigma_sb return (4*np.pi*R**2*sigma*T**4).to(u.Lsun) @property def e(self): ''' FIXME -- assumes are missing eccentricities are 0! ''' # pull out the actual values from the table e = self.standard['e'].copy().quantity # try to replace bad ones with NVK3L bad = np.isfinite(e) == False self.speak(f'{sum(bad)}/{self.n} eccentricities are missing') self.speak(f'assuming they are all zero') e[bad] = 0 return e @property def omega(self): ''' (FIXME! we need better longitudes of periastron) ''' # pull out the actual values from the table omega = self.standard['omega'].copy().quantity # try to replace bad ones with NVK3L bad = np.isfinite(omega) == False self.speak(f'{sum(bad)}/{self.n} longitudes of periastron are missing') e_zero = self.e == 0 self.speak(f'{sum(e_zero)} have eccentricities assumed to be 0') omega[e_zero] = 0*u.deg return omega @property def b(self): ''' Transit impact parameter. (FIXME! split this into transit and occultation) ''' # pull out the actual values from the table b = self.standard['transit_b'].copy().quantity # try to replace bad ones with NVK3L bad = np.isfinite(b) == False self.speak(f'{sum(bad)}/{self.n} impact parameters are missing') # calculate from the period and the stellar mass a_over_rs = self.a_over_rs[bad] i = self.standard['inclination'][bad].quantity e = self.e[bad] omega = self.omega[bad] b[bad] = a_over_rs*np.cos(i)*((1-e**2)/(1+e*np.sin(omega))) # report those that are still bad stillbad = np.isfinite(b) == False self.speak(f'{sum(stillbad)}/{self.n} are still missing after using i') return b # the 1360 W/m^2 that Earth receives from the Sun earth_insolation = (1*u.Lsun/4/np.pi/u.AU**2).to(u.W/u.m**2) @property def insolation(self): ''' The insolation the planet receives, in W/m^2. ''' # calculate the average insolation the planet receives insolation = self.stellar_luminosity/4/np.pi/self.semimajor_axis**2 return insolation.to(u.W/u.m**2) @property def relative_insolation(self): ''' The insolation the planet receives, relative to Earth. ''' return self.insolation/self.earth_insolation @property def log_relative_insolation(self): return np.log10(self.relative_insolation) @property def relative_cumulative_xuv(self): xuv_proxy = (self.stellar_luminosity/u.Lsun)**-0.6 return self.relative_insolation*xuv_proxy @property def teq(self): ''' The equilibrium temperature of the planet. ''' f = self.insolation sigma = con.sigma_sb A = 1 return ((f*A/4/sigma)**(1/4)).to(u.K) @property def planet_luminosity(self): ''' The bolometric luminosity of the planet (assuming zero albedo). ''' return (self.teq**4*con.sigma_sb*4*np.pi*self.radius**2).to(u.W) @property def transit_depth(self): ''' The depth of the transit (FIXME, clarify if this is 1.5-3.5 or what) ''' # pull out the actual values from the table d = self.standard['transit_depth'].copy().quantity # try to replace bad ones with NVK3L bad = np.isfinite(d) == False self.speak(f'{sum(bad)}/{self.n} transit depths are missing') Rp = self.radius[bad] Rs = self.stellar_radius[bad] d[bad] = (Rp/Rs).decompose()**2 # report those that are still bad stillbad = np.isfinite(d) == False self.speak(f'{sum(stillbad)}/{self.n} are still missing after Rp/Rs') return d @property def transit_duration(self): ''' The duration of the transit (FIXME, clarify if this is 1.5-3.5 or what) ''' with warnings.catch_warnings(): warnings.simplefilter('ignore') # pull out the actual values from the table d = self.standard['transit_duration'].copy().quantity # try to replace bad ones with NVK3L bad = np.isfinite(d) == False self.speak(f'{sum(bad)}/{self.n} transit durations are missing') P = self.period[bad] a_over_rs = self.a_over_rs[bad] b = self.b[bad] T0 = P/np.pi/a_over_rs T = T0*np.sqrt(1-b**2) e = self.e[bad] omega = self.omega[bad] factor = np.sqrt(1 - e**2)/(1 + e*np.sin(omega)) d[bad] = (T*factor).to(u.day) # report those that are still bad stillbad = np.isfinite(d) == False self.speak(f'{sum(stillbad)}/{self.n} are still missing after P, a/R*, b') return d @property def kludge_mass(self): ''' Have a safe way to calculate the mass of planets, that fills in gaps as necessary. Basic strategy: First from table. Then from msini. ''' # pull out the actual values from the table M = self.standard['mass'].copy().quantity # try to replace bad ones with NVK3L bad = np.isfinite(M) == False self.speak(f'{sum(bad)}/{self.n} masses are missing') # estimate from the msini try: M[bad] = self.msini[bad] except (KeyError, AssertionError, AtlasError, AttributeError): pass # replace those that are still bad with the a/R* stillbad = np.isfinite(M) == False self.speak(f'{sum(stillbad)}/{self.n} are still missing after msini') return M @property def kludge_radius(self): ''' Have a safe way to calculate the radii of planets, that fills in gaps as necessary. Basic strategy: First from table. Then from mass, via Chen & Kipping (2017). ''' # pull out the actual values from the table R = self.standard['radius'].copy().quantity # try to replace bad ones with NVK3L bad = np.isfinite(R) == False self.speak(f'{sum(bad)}/{self.n} radii are missing') # estimate from Chen and Kipping try: M = self.kludge_mass R[bad] = estimate_radius(M[bad]) except (KeyError, AssertionError, AtlasError, AttributeError): pass # replace those that are still bad with the a/R* stillbad = np.isfinite(R) == False self.speak(f'{sum(stillbad)}/{self.n} are still missing after Chen & Kipping (2017)') return R @property def kludge_age(self): ''' Have a safe way to calculate the age of planets, that fills in gaps as necessary. Basic strategy: First from table. Then assume 5 Gyr. ''' # pull out the actual values from the table age = self.standard['stellar_age'].copy().quantity # try to replace bad ones with NVK3L bad = np.isfinite(age) == False self.speak(f'{sum(bad)}/{self.n} ages are missing') # estimate from the msini try: age[bad] = 5*u.Gyr except (KeyError, AssertionError, AtlasError, AttributeError): pass # replace those that are still bad with the a/R* stillbad = np.isfinite(age) == False self.speak(f'{sum(stillbad)}/{self.n} are still missing after blindly assuming 5Gyr for missing ages') return age @property def surface_gravity(self): ''' (FIXME) -- make an assumption for planets without masses ''' G = con.G M = self.mass R = self.radius g = (G*M/R**2).to('m/s**2') return g @property def density(self): ''' The density of the planet. ''' mass = self.mass volume = 4/3*np.pi*(self.radius)**3 return (mass/volume).to('g/cm**3') @property def escape_velocity(self): ''' The escape velocity of the planet. ''' G = con.G M = self.mass R = self.radius return np.sqrt(2*G*M/R).to('km/s') @property def escape_parameter(self): ''' The Jeans atmospheric escape parameter for atomic hydrogen, at the equilibrium temperature of the planet. ''' k = con.k_B T = self.teq mu = 1 m_p = con.m_p G = con.G M = self.mass R = self.radius e_thermal = k*T e_grav = G*M*m_p/R return (e_grav/e_thermal).decompose() @property def distance_modulus(self): ''' The distance modulus to the system, in magnitudes. ''' mu = 5*np.log10(self.distance/(10*u.pc)) return mu def scale_height(self, mu=2.32): ''' The scale height of the atmosphere, at equilibrium temperature. ''' k = con.k_B T = self.teq m_p = con.m_p g = self.surface_gravity return (k*T/mu/m_p/g).to('km') def transmission_signal(self, mu=2.32, threshold=2): ''' What is the transit depth of 1 scale height of an atmosphere transiting in front of the star. Parameters ---------- mu : float Mean molecular weight (default 2.2 for H/He) threshold : float By how many sigma must the planet mass be detected? ''' with np.errstate(invalid='ignore'): H = self.scale_height(mu) Rp = self.radius Rs = self.stellar_radius depth = (2*H*Rp/Rs**2).decompose() dlnm = self.uncertainty('mass')/self.mass bad = dlnm > 1/threshold depth[bad] = np.nan return depth def reflection_signal(self, albedo=1.0): ''' What is the reflected light eclipse depth, for an albedo of 100%? ''' return albedo*0.25*(self.radius/self.semimajor_axis).decompose()**2 def emission_signal(self, wavelength=5*u.micron): ''' What is the thermal emission eclipse depth, assuming Planck spectra for both star and planet? This calculation assumes a Bond albedo of 0 and that heat is uniformly distributed over the planet. Parameters ---------- wavelength : astropy.unit.Quantity The wavelength at which it should be calculated. ''' # create thermal emission sources for both star and planet import rainbowconnection as rc star = rc.Thermal(teff=self.stellar_teff, radius=self.stellar_radius) planet = rc.Thermal(teff=self.teq, radius=self.radius) # calculate the depth as the luminosity ratio depths = planet.spectrum(wavelength)/star.spectrum(wavelength) return depths def stellar_brightness(self, wavelength=5*u.micron): ''' How many photons/s/m^2/micron do we receive from the star? This is calculated from the distance, radius, and stellar effective temperature of the stars. (It could be potentially be improved with PHOENIX model grids and/or cleverness with photometry.) Parameters ---------- wavelength : astropy.unit.Quantity The wavelength at which it should be calculated. ''' # import some tools for easy cartoon spectra import rainbowconnection as rc # create source with right temperature, size, distance teff, radius = self.stellar_teff, self.stellar_radius star = rc.Thermal(teff=teff, radius=radius).at(self.distance) # calculate the energy flux flux_in_energy = star.spectrum(wavelength) # convert to photon flux photon_energy = con.h*con.c/wavelength/u.ph flux_in_photons = flux_in_energy/photon_energy # return the return flux_in_photons.to('ph s^-1 m^-2 micron^-1') def stellar_brightness_in_telescope_units(self, telescope_name='JWST', **kw): ''' The stellar brightness, converted to telescope units. Parameters ---------- telescope_name : str The name of the telescope. wavelength : astropy.unit.Quantity The wavelength at which it should be calculated. R : float The spectral resolution at which the telescope will bin wavelengths. dt : astropy.units.quantity.Quantity The time over which the telescope exposes. ''' # what counts as 1 "telescope unit" (e.g. JWST at R=20 at 5 microns for 1 hour) telescope_unit = define_telescope_unit_by_name(telescope_name, **kw) # what's the photon flux (photons/m**2/s) flux_in_photons = self.stellar_brightness(telescope_unit.wavelength) # quote the brightness as (for example) gigaphotons/JWST at R=20 at 5 microns in 1 hour unit = lotsofphotons_unit/telescope_unit return flux_in_photons.to(unit) def depth_uncertainty(self, telescope_name='JWST', per_transit=False, dt=1*u.hour, **kw): ''' What is the transit/eclipse depth uncertainty with a particular telescope at a particular wavelength at a particular resolution? By default, this will be calculated for one transit. Optionally, it can be calculated for a given amount of time instead. Parameters ---------- telescope_name : str The name of the telescope. per_transit : bool If True, calculate the depth uncertainty for one transit. If False, calculate the depth uncertainty for a certain amount of in-transit time. You likely want to specify `dt` as a keyword argument to set that amount of in-transit time. In either case, an out-of-transit baseline equal to the total in-transit time will be assumed. This means the actual time cost will be twice the transit duration or `dt` chosen, and the depth uncertainty will be a factor sqrt(2) larger than the pure photon noise binned to the relevant timescale. wavelength : astropy.unit.Quantity The wavelength at which it should be calculated. R : float The spectral resolution at which the telescope will bin wavelengths. dt : astropy.units.quantity.Quantity The time over which the telescope exposes. If `per_transit=True`, this will be ignored. Otherwise, it will set the total amount of in-transit time observed, assuming that an equal amount of time will *also* be observed out of transit. ''' # what counts as 1 "telescope unit" (e.g. JWST at R=20 at 5 microns for 1 hour) telescope_unit = define_telescope_unit_by_name(telescope_name, dt=dt, **kw) # what's the photon flux (photons/m**2/s) flux_in_photons = self.stellar_brightness(telescope_unit.wavelength) # what's the total collecting power? if per_transit: ratio_of_collecting_time = self.transit_duration/dt else: ratio_of_collecting_time = 1 collecting_power = 1*telescope_unit*ratio_of_collecting_time # what's the total number of photons collected during transit N = (flux_in_photons*collecting_power).to(u.ph).value # what's the flux uncertainty on the time scale of one transit? sigma = 1/np.sqrt(N) # inflate by a factor of sqrt(2) for equal out-of-transit oot = np.sqrt(2) sigma_depth = sigma*oot return sigma_depth def _get_noise_and_unit(self, telescope_name='JWST', per_transit=False, **kw): ''' Tiny helper to get the noise and the telescope_unit for a telescope observation of a planet. ''' # figure out the noise noise = self.depth_uncertainty(telescope_name=telescope_name, per_transit=per_transit, **kw) # create a telescope unit (mostly to get a default wavelength) telescope_unit = define_telescope_unit_by_name(telescope_name, **kw) return noise, telescope_unit def emission_snr(self, telescope_name='JWST', **kw): ''' What's the approximate S/N for the detection of the thermal emission eclipse of a planet? ''' noise, telescope_unit = self._get_noise_and_unit(telescope_name=telescope_name, **kw) signal = self.emission_signal(wavelength=telescope_unit.wavelength) return signal/noise def reflection_snr(self, telescope_name='JWST', albedo=1, **kw): ''' What's the approximate S/N for the detection of the reflected light eclipse of a planet? ''' noise, telescope_unit = self._get_noise_and_unit(telescope_name=telescope_name, **kw) signal = self.reflection_signal(albedo=albedo) return signal/noise def transmission_snr(self, telescope_name='JWST', mu=2.32, threshold=2, **kw): ''' What's the approximate S/N for the detection of the reflected light eclipse of a planet? ''' noise, telescope_unit = self._get_noise_and_unit(telescope_name=telescope_name, **kw) signal = self.transmission_signal(mu=mu, threshold=threshold) return signal/noise def scatter(self, xname, yname, c=None, s=None, names=True, xlog=True, ylog=True, **kw): ''' Quick tool to plot one parameter against another. ''' plt.ion() x, y = self.__getattr__(xname), self.__getattr__(yname) try: self.ax.cla() except: self.figure = plt.figure('Exoplanet Population') self.ax = plt.subplot() self.ax.set_xlabel(xname) self.ax.set_ylabel(yname) self.ax.scatter(x, y, c=c, s=s, **kw) if False: for i in range(len(x)): self.ax.text(x[i], y[i], self.table['NAME'][i]) if xlog: plt.xscale('log') if ylog: plt.yscale('log') plt.draw() def thumbtack(self, maxr=1000, dr=100, labels=False): '''Plot the planets as thumbtacks.''' def scale(d): return np.array(d)**1.5 r = scale(self.distance) x, y = r*np.cos(self.ra*np.pi/180), r*np.sin(self.ra*np.pi/180) plt.ion() plt.figure('thumbtacks') ax = plt.subplot() ax.cla() ax.set_aspect('equal') theta = np.linspace(0,2*np.pi,1000) angle = -90*np.pi/180 gridkw = dict(alpha=0.25, color='green') for originalradius in np.arange(dr,maxr*2,dr): radii = scale(originalradius) ax.plot(radii*np.cos(theta), radii*np.sin(theta), linewidth=3, **gridkw) ax.text(radii*np.cos(angle), radii*np.sin(angle), '{0:.0f} pc'.format(originalradius), rotation=90+ angle*180/np.pi, va='bottom', ha='center', size=13, weight='extra bold', **gridkw) ax.plot(x, y, marker='o', alpha=0.5, color='gray', linewidth=0, markeredgewidth=0) close = (self.name == 'WASP-94A b').nonzero()[0]#(self.distance < maxr).nonzero()[0] if labels: for c in close: plt.text(x[c], y[c], self.name[c]) ax.set_xlim(-scale(maxr), scale(maxr)) ax.set_ylim(-scale(maxr), scale(maxr)) def compare(self, x='teq', y='radius', area='depth', color='stellar_radius'): xplot = self.__dict__[x] yplot = self.__dict__[y] sizeplot = self.__dict__[size] colorplot = self.__dict__[color] maxarea = 1000 area = self.__dict__[area] sizeplot = np.sqrt(area/np.nanmax(area)*maxarea) plt.scatter(xplot, yplot, linewidth=0, marker='o', markersize=sizeplot) class PredefinedPopulation(Population): ''' Population object keeps track of an exoplanet population. ''' expiration = 0.00001 def __init__(self, label='exoplanets', remake=False, skip_update=False, **plotkw): ''' Initialize a population, by trying the following steps: 1) Load a standardized ascii table. 2) Ingest a raw table, and standardize it. Parameters ---------- label : str The name of this population, for use both in filenames and labeling points on plots. remake : bool Should we re-ingest this table from its raw ingredients? skip_update : bool Should we skip checking for updates in the existing data? **plotkw : dict All other keywords are stored as plotting suggestions. ''' # set the name for this population self.label = label try: # try to load the standardized table assert(remake == False) standard = self.load_standard(skip_update=skip_update) except (IOError,FileNotFoundError,AssertionError): # or create a new standardized table and save it standard = self.ingest_table(remake=remake) # initialize with a standard table Population.__init__(self, standard=standard, label=label, **plotkw) @property def fileprefix(self): ''' Define a fileprefix for this population, to be used for setting the filename of the standardized population. ''' return clean(self.label) def ingest_table(self, **kwargs): ''' Ingest a new population table of arbitrary format, and then standardize it, using the tools defined in inherited population classes.''' # load the raw table raw = self.load_raw() # trim elements from raw table as necessary trimmed = self.trim_raw(raw) # create a standardized table from the array standard = self.create_standard(trimmed) # save the standardized table self.save_standard(standard) return standard @property def standard_path(self): ''' Define the filepath for the standardized table. ''' return os.path.join(directories['data'], f'standardized-{self.fileprefix}.txt') def load_raw(self): raise NotImplementedError(''' Yikes! The `.load_raw` method has not been defined for whatever object is trying to call it! ''') def trim_raw(self, raw): ''' Trim bad/unnecessary rows out of a raw table of planet properties. ''' # no trimming necessary trimmed = raw # for debugging, hang onto the trimmed table as a hidden attribute self._trimmed = trimmed # a trimmed table return self._trimmed def load_standard(self, skip_update=False): ''' Load a standardized population table. Generally this will be from a file like ~/.exoatlas/standardized-*.txt Returns ------- standard : astropy.table.Table A table of planet properties, with a minimal set of columns. skip_update : bool Should we skip checks to see if the data are too stale? ''' # make sure this file is recent enough (unless we're skipping updates) if not skip_update: old = check_if_needs_updating(self.standard_path, self.expiration) assert(old == False) # keywords for reading a standardized table readkw = dict(format='ecsv', fill_values=[('',np.nan), ('--', np.nan)]) standard = ascii.read(self.standard_path, **readkw) self.speak(f'Loaded standardized table from {self.standard_path}') # ??? change this to do something more clever with tables # masked = np.ma.filled(standard, fill_value = np.nan) return standard def save_standard(self, standard): ''' Save the standardized table out to a text file like ~/exoatlas/standardized-*.txt ''' # save it as an ascii table for humans to read standard.write(self.standard_path, format='ascii.ecsv', overwrite=True ) self.speak(f'Saved a standardized text table to {self.standard_path}') def create_table(self, desired_columns=['name', 'radius', 'relative_insolation', 'stellar_radius', 'stellar_teff', 'ra', 'dec', 'distance']): ''' Create an astropy table based on this population, using a subset of columns, which may include ones that have been calculated as Population properties. Parameters ---------- desired_columns : list The columns you want to include. Anything that can be accessed via Population.??? can be provided here as a string. Returns ------- table : astropy.table.Table A table, with those columns, in the same order as the Population itself. ''' # FIXME! need to add method support for arguments # create a dictionary with the desired columns d = {c:getattr(self, c) for c in desired_columns} # turn that into an astropy Table t = Table(d) return t
mit
-3,495,989,945,911,375,000
30.416196
194
0.562143
false
ajaybhatia/ypg-odm-project
src/profiles/migrations/0001_initial.py
1
1466
# -*- coding: utf-8 -*- # Generated by Django 1.10.1 on 2016-09-30 09:57 from __future__ import unicode_literals from django.conf import settings from django.db import migrations, models import django.db.models.deletion import uuid class Migration(migrations.Migration): initial = True dependencies = [ ('authtools', '0003_auto_20160128_0912'), ] operations = [ migrations.CreateModel( name='Profile', fields=[ ('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, primary_key=True, serialize=False, to=settings.AUTH_USER_MODEL)), ('slug', models.UUIDField(blank=True, default=uuid.uuid4, editable=False)), ('odm', models.CharField(choices=[('Tinno', 'Tinno'), ('Huaqin', 'Huaqin'), ('IMG', 'IMG'), ('Ragentek', 'Ragentek'), ('Wingtech', 'Wingtech'), ('Coolpad', 'Coolpad'), ('Amer', 'Amer'), ('Sprocomm', 'Sprocomm'), ('Topwisez', 'Topwisez')], default='Tinno', max_length=100)), ('picture', models.ImageField(blank=True, null=True, upload_to='profile_pics/%Y-%m-%d/', verbose_name='Profile picture')), ('bio', models.CharField(blank=True, max_length=200, null=True, verbose_name='Short Bio')), ('email_verified', models.BooleanField(default=False, verbose_name='Email verified')), ], options={ 'abstract': False, }, ), ]
mit
-2,959,312,260,762,456,600
42.117647
289
0.600273
false
bobbydurrett/PythonDBAGraphs
ashcount.py
1
4982
""" PythonDBAGraphs: Graphs to help with Oracle Database Tuning Copyright (C) 2016 Robert Taft Durrett (Bobby Durrett) This program is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program. If not, see <http://www.gnu.org/licenses/>. Contact: [email protected] ashcount.py Shows ASH active session counts in time period. """ import myplot import util def dbaashcount(start_time,end_time,instance_number): """ Group by minute. 10 second samples. dba table """ q_string = """ create table dbaashcount as select to_char(all_time.sample_time,'YYYY/MM/DD HH24:MI') date_minute, sum(all_time.cnt)/6 all_count, sum(nvl(cpu_time.cnt,0))/6 cpu_count from (select sample_time, count(*) cnt from DBA_HIST_ACTIVE_SESS_HISTORY a where sample_time between to_date('""" q_string += start_time q_string += """','DD-MON-YYYY HH24:MI:SS') and to_date('""" q_string += end_time q_string += """','DD-MON-YYYY HH24:MI:SS') and a.INSTANCE_NUMBER = """ q_string += instance_number q_string += """ group by sample_time) all_time, (select sample_time, count(*) cnt from DBA_HIST_ACTIVE_SESS_HISTORY a where sample_time between to_date('""" q_string += start_time q_string += """','DD-MON-YYYY HH24:MI:SS') and to_date('""" q_string += end_time q_string += """','DD-MON-YYYY HH24:MI:SS') and session_state = 'ON CPU' and a.INSTANCE_NUMBER = """ q_string += instance_number q_string += """ group by sample_time) cpu_time where all_time.sample_time=cpu_time.sample_time(+) group by to_char(all_time.sample_time,'YYYY/MM/DD HH24:MI') """ return q_string def vdollarashcount(start_time,end_time,instance_number): """ Group by minute. 1 second samples. v$ table """ q_string = """ create table combinedashcount as select to_char(all_time.sample_time,'YYYY/MM/DD HH24:MI') date_minute, sum(all_time.cnt)/60 all_count, sum(nvl(cpu_time.cnt,0))/60 cpu_count from (select sample_time, count(*) cnt from GV$ACTIVE_SESSION_HISTORY a where sample_time between to_date('""" q_string += start_time q_string += """','DD-MON-YYYY HH24:MI:SS') and to_date('""" q_string += end_time q_string += """','DD-MON-YYYY HH24:MI:SS') and a.INST_ID = """ q_string += instance_number q_string += """ group by sample_time) all_time, (select sample_time, count(*) cnt from GV$ACTIVE_SESSION_HISTORY a where sample_time between to_date('""" q_string += start_time q_string += """','DD-MON-YYYY HH24:MI:SS') and to_date('""" q_string += end_time q_string += """','DD-MON-YYYY HH24:MI:SS') and session_state = 'ON CPU' and a.INST_ID = """ q_string += instance_number q_string += """ group by sample_time) cpu_time where all_time.sample_time=cpu_time.sample_time(+) group by to_char(all_time.sample_time,'YYYY/MM/DD HH24:MI') """ return q_string database,dbconnection = util.script_startup('ASH active session counts') start_time=util.input_with_default('Start date and time (DD-MON-YYYY HH24:MI:SS)','01-JAN-1900 12:00:00') end_time=util.input_with_default('End date and time (DD-MON-YYYY HH24:MI:SS)','01-JAN-2200 12:00:00') instance_number=util.input_with_default('Database Instance (1 if not RAC)','1') # first get ash counts by minutes from dba view dbconnection.run_return_no_results_catch_error("drop table dbaashcount") dbacrtable = dbaashcount(start_time,end_time,instance_number) dbconnection.run_return_no_results(dbacrtable); # now get from ash view put in combined table first dbconnection.run_return_no_results_catch_error("drop table combinedashcount") vdcrtable = vdollarashcount(start_time,end_time,instance_number) dbconnection.run_return_no_results(vdcrtable) # insert dba rows for date and minute not in v$ insert_sql = """ insert into combinedashcount select * from dbaashcount d where d.date_minute not in (select date_minute from combinedashcount)""" dbconnection.run_return_no_results(insert_sql) dbconnection.commit() querytext = """ select to_date(DATE_MINUTE,'YYYY/MM/DD HH24:MI'), ALL_COUNT, CPU_COUNT from combinedashcount order by date_minute""" results = dbconnection.run_return_flipped_results(querytext) util.exit_no_results(results) # plot query myplot.xdatetimes = results[0] myplot.ylists = results[1:] myplot.title = "ASH active session count for "+database+" database, instance "+instance_number myplot.ylabel1 = "Sessions" myplot.ylistlabels=["Total","CPU"] myplot.line()
gpl-3.0
5,110,317,334,580,013,000
23.78607
105
0.70273
false
Zerack/zoll.me
leapday/templatetags/leapday_extras.py
1
1051
''' James D. Zoll 4/15/2013 Purpose: Defines template tags for the Leapday Recipedia application. License: This is a public work. ''' from django import template register = template.Library() @register.filter() def css_name(value): ''' Returns the lower-case hyphen-replaced display name, which used as the css class for the good. Keyword Arguments: value -> Good. The good to get the css class for. ''' return value.lower().replace(' ','-') @register.filter() def desc_value_sort(value): ''' Designed to sort the results of .iteritems() on a dict of goods for the index. value -> List of tuples. ''' return sorted(value, key=lambda x: x[1]['active']['value'], reverse=True) @register.filter() def base_good_display_name(value): BASE_GOODS = {'good_water': 'Water', 'good_food': 'Food', 'good_wood': 'Wood', 'good_stone': 'Stone', 'goodtype_crystal': 'Crystal'} return BASE_GOODS[value]
mit
-1,046,326,108,224,100,700
22.377778
77
0.602284
false
karlnapf/ozone-roulette
russian_roulette/RussianRoulette.py
1
3664
from abc import abstractmethod from numpy.lib.function_base import delete from numpy.ma.core import mean, zeros, log, arange, std from numpy.random import permutation, rand import logging class RussianRoulette(object): def __init__(self, threshold, block_size=1): self.threshold = threshold self.block_size = block_size @abstractmethod def get_estimate(self, estimates, index): start_idx = index * self.block_size stop_idx = index * self.block_size + self.block_size # if there are enough samples, use them, sub-sample if not if stop_idx <= len(estimates): logging.debug("Averaging over %d samples from index %d to %d" % (self.block_size, start_idx, stop_idx)) indices = arange(start_idx, stop_idx) else: logging.debug("Averaging over a random subset of %d samples" % self.block_size) indices = permutation(len(estimates))[:self.block_size] return mean(estimates[indices]) def exponential(self, estimates): logging.debug("Entering") # find a strict lower bound on the estimates and remove it from list bound = estimates.min() bound_idx = estimates.argmin() estimates = delete(estimates, bound_idx) estimates = estimates - bound # find an integer close to the mean of the transformed estimates and divide E = max(int(round(abs(mean(estimates)))), 1) estimates = estimates / E logging.info("Using %f as lower bound on estimates" % bound) logging.info("Computing product of E=%d RR estimates" % E) logging.info("Std-deviation after scaling is %f" % std(estimates)) # index for iterating through the used estimates # (might be averaged, so might be lower than the number of available estimates # if the block size is greater than one estimate_idx = 0 samples = zeros(E) for iteration in range(E): weight = 1 # start with x^0 which is 1 samples[iteration] = 1 term = 1 # index for computed samples series_term_idx = 1 while weight > 0: # update current term of infinite series # average over block x_inner = self.get_estimate(estimates, estimate_idx) term *= (x_inner / series_term_idx) # if summation has reached threshold, update weights if abs(term) < self.threshold: q = term / self.threshold if rand() < q: # continue and update weight weight = weight / q else: # stop summation weight = 0 samples[iteration] += weight * term; estimate_idx += 1 series_term_idx += 1 logging.info("RR estimate %d/%d with threshold %.2f is %.4f and took %d series terms" % (iteration + 1, E, self.threshold, samples[iteration], series_term_idx)) # now put things together. Note that samples contains an unbiased estimate # which might be quite small. However, due to the removal of the bound, # this will not cause an underflow and we can just take the log. logging.debug("Leaving") return bound + sum(log(samples));
bsd-2-clause
1,499,286,222,712,020,500
38.826087
100
0.550764
false
qedsoftware/commcare-hq
custom/ilsgateway/zipline/reports/zipline_warehouse_order_report.py
1
1663
from collections import namedtuple from django.utils.translation import ugettext_lazy as _ from corehq.apps.reports.filters.dates import DatespanFilter from corehq.apps.reports.filters.fixtures import AsyncLocationFilter from custom.ilsgateway.zipline.data_sources.zipline_warehouse_order_data_source import \ ZiplineWarehouseOrderDataSource from custom.ilsgateway.zipline.filters import EmergencyOrderStatusChoiceFilter from custom.ilsgateway.zipline.reports.zipline_report import ZiplineReport ReportConfig = namedtuple( 'ReportConfig', ['domain', 'start_date', 'end_date', 'location_id', 'statuses'] ) class ZiplineWarehouseOrderReport(ZiplineReport): report_title = _('Zipline Warehouse - Order') name = _('Zipline Warehouse - Order') slug = 'zipline_warehouse_order' fields = [ DatespanFilter, AsyncLocationFilter, EmergencyOrderStatusChoiceFilter ] @property def report_config(self): return ReportConfig( domain=self.domain, start_date=self.datespan.startdate, end_date=self.datespan.end_of_end_day, location_id=self.location_id, statuses=self.statuses ) @property def data_source(self): return ZiplineWarehouseOrderDataSource(self.report_config) @property def shared_pagination_GET_params(self): return [ dict(name='startdate', value=self.datespan.startdate_display), dict(name='enddate', value=self.datespan.enddate_display), dict(name='location_id', value=self.location_id), dict(name='statuses', value=self.statuses) ]
bsd-3-clause
2,349,617,236,119,291,400
32.26
88
0.698737
false
T-002/pycast
pycast/common/json_encoder.py
1
1477
# !/usr/bin/env python # -*- coding: UTF-8 -*- # Copyright (c) 2012-2015 Christian Schwarz # # Permission is hereby granted, free of charge, to any person obtaining # a copy of this software and associated documentation files (the # "Software"), to deal in the Software without restriction, including # without limitation the rights to use, copy, modify, merge, publish, # distribute, sublicense, and/or sell copies of the Software, and to # permit persons to whom the Software is furnished to do so, subject to # the following conditions: # # The above copyright notice and this permission notice shall be # included in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, # EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF # MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND # NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE # LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION # OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION # WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. import json from pycastobject import PyCastObject class PycastEncoder(json.JSONEncoder, PyCastObject): """Encodes a PyCastObject to json.""" def default(self, obj): # Cannot use the to_json method, because it returns a string rather # than a serializable list. return obj.to_twodim_list()
mit
-1,432,928,872,212,610,000
41.2
75
0.752877
false
sanja7s/SR_Twitter
src_MONGO/monthly_user_tweets_clean_2_json.py
1
2578
#!/usr/bin/env python # -*- coding: UTF-8 -*- """ go through user tweets, collect them per user, clean and save in a josn format per each month. we end up with 7 months since May and Nov are half included. The output is 7 files json formatted and ready to be inputted to MongoDB """ from nltk.tokenize import wordpunct_tokenize from nltk.corpus import stopwords from nltk.book import FreqDist from collections import defaultdict import codecs import matplotlib.pyplot as plt import pylab as P import numpy as np import glob, os import datetime import json IN_DIR = "../../../DATA/General/" os.chdir(IN_DIR) F_IN = "usrs_with_more_than_20_tweets.dat" F_OUT_5 = codecs.open("5_tweets.dat", "w") F_OUT_6 = codecs.open("6_tweets.dat", "w") F_OUT_7 = codecs.open("7_tweets.dat", "w") F_OUT_8 = codecs.open("8_tweets.dat", "w") F_OUT_9 = codecs.open("9_tweets.dat", "w") F_OUT_10 = codecs.open("10_tweets.dat", "w") F_OUT_11 = codecs.open("11_tweets.dat", "w") F_OUT_12 = codecs.open("12_tweets.dat", "w") f_out_list = {5:F_OUT_5, 6:F_OUT_6, 7:F_OUT_7, 8:F_OUT_8, 9:F_OUT_9, \ 10:F_OUT_10, 11:F_OUT_11} ENGLISH_STOPWORDS = set(stopwords.words('english')) def clean(tweet): return [i.lower() for i in tweet if i.isalpha() and i not in ENGLISH_STOPWORDS and i != 'RT'] def extract_monthly_user_CV_and_num_tweets(): user_monthly_tweets = defaultdict(int) user_monthly_count = defaultdict(int) cnt_all_tweets = 0 with codecs.open(F_IN,'r', encoding='utf8') as input_file: # the code loops through the input, collects tweets text for each user into a dict for line in input_file: cnt_all_tweets += 1 line = line.split() user = line[0] if user not in user_monthly_tweets: user_monthly_tweets[user] = defaultdict(list) user_monthly_count[user] = defaultdict(int) UTS = long(line[4]) month = datetime.datetime.utcfromtimestamp(UTS).month tweet = line[5:] user_monthly_tweets[user][month] += clean(tweet) user_monthly_count[user][month] += 1 if cnt_all_tweets % 100000 == 0: print tweet, clean(tweet) print "Processed %d tweets" % cnt_all_tweets for user in user_monthly_tweets: for MO in user_monthly_tweets[user]: output_file = f_out_list[MO] usr_tweets_json = {} usr_tweets_json['_id'] = str(user) usr_tweets_json['count'] = str(user_monthly_count[user][MO]) usr_tweets_json['txt'] = [ {el[0]: el[1]} for el in FreqDist(user_monthly_tweets[user][MO]).iteritems() ] output_file.write(unicode(json.dumps(usr_tweets_json, ensure_ascii=False)) + '\n') extract_monthly_user_CV_and_num_tweets()
mit
-763,975,166,775,583,000
30.060241
119
0.683088
false
giffordw/OSMOSreduce
proc4k.py
1
22781
#! /usr/bin/env python # # Paul Martini (OSU) # # proc4k.py files # # Perform the overscan subtraction and remove the relative gain # differences for a single R4K image or a list of R4K images. # Also works for MDM4K. # # Steps: # 1. determine if input is a file or list of files # 2. identify binning, size of overscan # 3. remove overscan and trim # 4. remove relative gain variations (TBD) # # 8 Sep 2011: initial version for just bias subtraction # 12 Sep 2011: tested, adapted to run on MDM computers, work on MDM4K data # 16 Sep 2011: added glob module # 8 Feb 2012: fixed error in even/odd column definitions, added more # tracking and debugging information # #----------------------------------------------------------------------------- #from __future__ import division import string as str import os from sys import argv, exit import numpy as np from astropy.io import fits as pyfits import glob from matplotlib import pyplot # Version and Date versNum = "1.1.0" versDate = "2012-02-08" ############################ #### Define various routines ############################ scriptname = argv[0][str.rfind(argv[0], "/") + 1::] def usage(): print "\nUsage for %s v%s (%s):" % (scriptname, versNum, versDate) print " %s file.fits [or file*.fits or file1.fits file2.fits\ ]" % (scriptname) print "\nWhere: file.fits, file*.fits, etc. are fits files\n" def parseinput(): flags = [] files = [] # check for any command-line arguments and input files for i in range(1, len(argv)): if str.find(argv[i], "-") == 0: flags.append(argv[i].strip("-")) else: files.append(argv[i]) # check that the input files exist for i in range(1, len(files)): if os.path.isfile(files[i]) == 0: print "\n** ERROR: " + files[i] + " does not exist." exit(1) return files, flags def filt(x, l): y = [0] * len(x) c = 0.6745 for a in range(0, len(x)): y[a] = l[x[a]] m = np.median(y) print m dv = [elm - m for elm in y] mad = np.median(np.fabs(dv) / c) # Median-Asbolute-Deviation # print m + mad / 2 for b in range(0, len(y)): if y[b] > m + 20 * mad / 2 or y[b] < m - 20 * mad / 2: print "reject: %d " % b y[b] = m return y def ftlgd(x, l, i, d): coe = np.polynomial.legendre.legfit(x, l, d) return np.polynomial.legendre.legval(i, coe) ############################ #### Script starts here #### ############################ Debug = False BiasSingle = 0 BiasRow = 1 BiasFit = 2 #BiasType = BiasRow BiasType = BiasSingle #BiasType = BiasFit Gain = False # keep as False until gain values are known R4K = True # Gain values for each amplifier [to be computed] r4k_gain_q1e = 1.0 r4k_gain_q1o = 1.0 r4k_gain_q2e = 1.0 r4k_gain_q2o = 1.0 r4k_gain_q3e = 1.0 r4k_gain_q3o = 1.0 r4k_gain_q4e = 1.0 r4k_gain_q4o = 1.0 mdm4k_gain_q1 = 1.0 mdm4k_gain_q2 = 1.0 mdm4k_gain_q3 = 1.0 mdm4k_gain_q4 = 1.0 # switch to more primitive (slower) code at MDM AT_MDM = False user = os.getlogin() if str.find(user, 'obs24m') >= 0 or str.find(user, 'obs13m') >= 0: AT_MDM = True files = [] for input in argv[1:]: files = files + glob.glob(input) if len(files) == 0: usage() exit(1) for file in files: if os.path.isfile(file): fitsfile = pyfits.open(file) naxis1 = fitsfile[0].header['NAXIS1'] naxis2 = fitsfile[0].header['NAXIS2'] overscanx = fitsfile[0].header['OVERSCNX'] overscany = fitsfile[0].header['OVERSCNY'] # should be 0 ccdxbin = fitsfile[0].header['CCDXBIN'] ccdybin = fitsfile[0].header['CCDYBIN'] detector = fitsfile[0].header['DETECTOR'] telescope = fitsfile[0].header['TELESCOP'] overscanx /= ccdxbin overscany /= ccdybin # OSMOS or direct? [useful for knowing if MIS keywords have values] OSMOS = True if str.find(telescope, 'McGraw') >= 0: OSMOS = False # direct image with the 1.3m #print file, naxis1, naxis2, overscanx, overscany, detector print "Processing %s[%d:%d] OVERSCANX=%d OVERSCANY=%d from %s \ obtained at the %s" \ % (file, naxis1, naxis2, overscanx, overscany, detector, telescope) if overscanx * ccdxbin < 32: print "Error: OVERSCNX=%d less than 32 in %s" % (overscanx, file) exit(1) if overscany > 0: print "Error: code not tested with OVERSCNY > 0!" exit(1) if str.find(detector, 'R4K') < 0: # if not R4K, assume MDM4K R4K = False # IRAF units: 1:32, 33:556, 557:1080, 1081:1112 # Python units: 0:31, 32:555, 556:1079, 1080:1111 c1 = overscanx # 32 first image column counting from *zero* c2 = int(0.5 * naxis1) - 1 # 555 last image column on first half c3 = c2 + 1 # 556 first image column on second half c4 = naxis1 - overscanx - 1 # 1079 last image column r1 = overscany # 0 first image row r2 = int(0.5 * naxis2) - 1 # 523 last image row on first half r3 = r2 + 1 # 524 first image row on second half r4 = naxis2 - overscany - 1 # 1047 last image row outnaxis1 = c4 - c1 + 1 # 1048 columns in output, trimmed image outnaxis2 = r4 - r1 + 1 # 1048 rows in output, trimmed image collen = int(0.5 * outnaxis1) # number of rows in an image quadrant rowlen = int(0.5 * outnaxis2) # number of rows in an image quadrant # # Assumed layout: (ds9 perspective) # # # # q2 q4 # # # # q1 q3 # # # # each R4K quadrant has an even 'e' and an odd 'o' amplifier # if Debug: print "Quadrants in IRAF pixels: " print " q1: [%d : %d, %d : %d] " % (c1 + 1, c2 + 1, r1 + 1, r2 + 1) print " q2: [%d : %d, %d : %d] " % (c1 + 1, c2 + 1, r3 + 1, r4 + 1) print " q3: [%d : %d, %d : %d] " % (c3 + 1, c4 + 1, r1 + 1, r2 + 1) print " q4: [%d : %d, %d : %d] " % (c3 + 1, c4 + 1, r3 + 1, r4 + 1) ## Calculate the bias level for each amplifier data = fitsfile[0].data # identify the columns to use to calculate the bias level # skip the first and last columns of the overscan # changed to 'list' for hiltner due to primitive python version starti = 4 / ccdxbin if AT_MDM: if R4K: cols_over_q1e = list(np.arange(starti, overscanx - 2, 2)) cols_over_q1o = list(np.arange(starti + 1, overscanx - 2, 2)) cols_over_q2e = cols_over_q1e cols_over_q2o = cols_over_q1o cols_over_q3e = list(np.arange(naxis1 - overscanx + starti, naxis1 - 2, 2)) cols_over_q3o = list(np.arange(naxis1 - overscanx + starti + 1, naxis1 - 2, 2)) cols_over_q4e = cols_over_q3e cols_over_q4o = cols_over_q3o cols_q1e = list(np.arange(c1, c2, 2)) cols_q1o = list(np.arange(c1 + 1, c2 + 2, 2)) cols_q2e = cols_q1e cols_q2o = cols_q1o cols_q3e = list(np.arange(c3, c4, 2)) cols_q3o = list(np.arange(c3 + 1, c4 + 2, 2)) cols_q4e = cols_q3e cols_q4o = cols_q3o else: cols_over_q1 = list(np.arange(starti, overscanx - 2, 1)) cols_over_q2 = cols_over_q1 cols_over_q3 = list(np.arange(naxis1 - overscanx + starti, naxis1 - 2, 1)) cols_over_q4 = cols_over_q3 cols_q1 = list(np.arange(c1, c2 + 1, 1)) cols_q2 = cols_q1 cols_q3 = list(np.arange(c3, c4 + 1, 1)) cols_q4 = cols_q3 else: if R4K: # identify the even and odd columns in the overscan cols_over_q1e = np.arange(starti, overscanx - starti, 2) cols_over_q1o = np.arange(starti + 1, overscanx - starti, 2) cols_over_q2e = cols_over_q1e cols_over_q2o = cols_over_q1o cols_over_q3e = np.arange(naxis1 - overscanx + starti, naxis1 - starti, 2) cols_over_q3o = np.arange(naxis1 - overscanx + starti + 1, naxis1 - starti, 2) cols_over_q4e = cols_over_q3e cols_over_q4o = cols_over_q3o # identify the even and odd columns in each quadrant cols_q1e = np.arange(c1, c2, 2) cols_q2e = cols_q1e cols_q1o = np.arange(c1 + 1, c2 + 2, 2) cols_q2o = cols_q1o cols_q3e = np.arange(c3, c4, 2) cols_q4e = cols_q3e cols_q3o = np.arange(c3 + 1, c4 + 2, 2) cols_q4o = cols_q3o else: cols_over_q1 = np.arange(starti, overscanx-2, 1) cols_over_q2 = cols_over_q1 cols_over_q3 = np.arange(naxis1-overscanx+starti, naxis1-2, 1) cols_over_q4 = cols_over_q3 cols_q1 = np.arange(c1,c2+1,1) cols_q2 = cols_q1 cols_q3 = np.arange(c3,c4+1,1) cols_q4 = cols_q3 if Debug: print "Overscan columns: " print "Q1/Q2 overscan even first and last columns:", cols_over_q1e[0], cols_over_q1e[-1], len(cols_over_q1e) print "Q1/Q2 overscan odd first and last columns:", cols_over_q1o[0], cols_over_q1o[-1], len(cols_over_q1o) print "Q3/Q4 overscan even first and last columns:", cols_over_q3e[0], cols_over_q3e[-1], len(cols_over_q3e) print "Q3/Q4 overscan odd first and last columns:", cols_over_q3o[0], cols_over_q3o[-1], len(cols_over_q3o) if Debug: print "Image columns: " print "Q1/Q2 even first and last columns:", cols_q1e[0], cols_q1e[-1], len(cols_q1e), r1, r2, len(cols_q1e) print "Q1/Q2 odd first and last columns:", cols_q1o[0], cols_q1o[-1], len(cols_q1o), r1+rowlen, r2+rowlen, len(cols_q1o) print "Q3/Q4 even first and last columns:", cols_q3e[0], cols_q3e[-1], len(cols_q3e), r1, r2, len(cols_q3e) print "Q3/Q4 odd first and last columns:", cols_q3o[0], cols_q3o[-1], len(cols_q3o), r1+rowlen, r2+rowlen, len(cols_q3o) # create arrays with the median overscan vs. row for each amplifier if R4K: bias_q1e = np.zeros(rowlen, dtype=float) bias_q1o = np.zeros(rowlen, dtype=float) bias_q2e = np.zeros(rowlen, dtype=float) bias_q2o = np.zeros(rowlen, dtype=float) bias_q3e = np.zeros(rowlen, dtype=float) bias_q3o = np.zeros(rowlen, dtype=float) bias_q4e = np.zeros(rowlen, dtype=float) bias_q4o = np.zeros(rowlen, dtype=float) else: bias_q1 = np.zeros(rowlen, dtype=float) bias_q2 = np.zeros(rowlen, dtype=float) bias_q3 = np.zeros(rowlen, dtype=float) bias_q4 = np.zeros(rowlen, dtype=float) # calculate 1-D bias arrays for each amplifier for i in range(r1, r2+1, 1): if R4K: bias_q1e[i] = np.median(data[i,cols_over_q1e]) # data[rows, columns] bias_q1o[i] = np.median(data[i,cols_over_q1o]) bias_q2e[i] = np.median(data[i+rowlen,cols_over_q2e]) bias_q2o[i] = np.median(data[i+rowlen,cols_over_q2o]) bias_q3e[i] = np.median(data[i,cols_over_q3e]) bias_q3o[i] = np.median(data[i,cols_over_q3o]) bias_q4e[i] = np.median(data[i+rowlen,cols_over_q4e]) bias_q4o[i] = np.median(data[i+rowlen,cols_over_q4o]) else: #MDM4K bias_q1[i] = np.median(data[i,cols_over_q1]) # data[rows, columns] bias_q2[i] = np.median(data[i+rowlen,cols_over_q2]) bias_q3[i] = np.median(data[i,cols_over_q3]) bias_q4[i] = np.median(data[i+rowlen,cols_over_q4]) ########################################################################## # Subtract the bias from the output ########################################################################## if BiasType == BiasSingle: OverscanKeyValue = 'BiasSingle' suffix = 'b' # subtract a single bias value for each amplifier if R4K: bq1e = np.median(bias_q1e) bq1o = np.median(bias_q1o) bq2e = np.median(bias_q2e) bq2o = np.median(bias_q2o) bq3e = np.median(bias_q3e) bq3o = np.median(bias_q3o) bq4e = np.median(bias_q4e) bq4o = np.median(bias_q4o) if AT_MDM: for r in range(r1,r2+1): for c in cols_q1e: data[r,c] -= bq1e for c in cols_q1o: data[r,c] -= bq1o for c in cols_q2e: data[r+rowlen,c] -= bq2e for c in cols_q2o: data[r+rowlen,c] -= bq2o for c in cols_q3e: data[r,c] -= bq3e for c in cols_q3o: data[r,c] -= bq3o for c in cols_q4e: data[r+rowlen,c] -= bq4e for c in cols_q4o: data[r+rowlen,c] -= bq4o else: data[r1:r2+1,cols_q1e] -= bq1e data[r1:r2+1,cols_q1o] -= bq1o data[r3:r4+1,cols_q2e] -= bq2e data[r3:r4+1,cols_q2o] -= bq2o data[r1:r2+1,cols_q3e] -= bq3e data[r1:r2+1,cols_q3o] -= bq3o data[r3:r4+1,cols_q4e] -= bq4e data[r3:r4+1,cols_q4o] -= bq4o else: bq1 = np.median(bias_q1) bq2 = np.median(bias_q2) bq3 = np.median(bias_q3) bq4 = np.median(bias_q4) if AT_MDM: for r in range(r1,r2+1): for c in cols_q1: data[r,c] -= bq1 for c in cols_q2: data[r+rowlen,c] -= bq2 for c in cols_q3: data[r,c] -= bq3 for c in cols_q4: data[r+rowlen,c] -= bq4 else: data[r1:r2+1,cols_q1] -= bq1 data[r3:r4+1,cols_q2] -= bq2 data[r1:r2+1,cols_q3] -= bq3 data[r3:r4+1,cols_q4] -= bq4 elif BiasType == BiasRow: # not implemented on Hiltner, for MDM4K, etc. print "Warning: This mode has not been fully tested" OverscanKeyValue = 'BiasRow' # subtract a bias value for each row of each amplifier #print r1, r2, len(bias_q1e) suffix = 'br' for i in range(r1, r2 + 1, 1): data[i,cols_q1e] -= bias_q1e[i] data[i,cols_q1o] -= bias_q1o[i] data[i+rowlen,cols_q2e] -= bias_q2e[i] data[i+rowlen,cols_q2o] -= bias_q2o[i] data[i,cols_q3e] -= bias_q3e[i] data[i,cols_q3o] -= bias_q3o[i] data[i+rowlen,cols_q4e] -= bias_q4e[i] data[i+rowlen,cols_q4o] -= bias_q4o[i] elif BiasType == BiasFit: OverscanKeyValue = 'BiasFit' # print "Error: Have not implemented a fit to the bias yet. Please use BiasSingle" suffix = 'bf' xl = range(r1, r2 + 1, 1) d = 4 f_q1e = filt(xl, bias_q1e) f_q1o = filt(xl, bias_q1o) f_q2e = filt(xl, bias_q2e) f_q2o = filt(xl, bias_q2o) f_q3e = filt(xl, bias_q3e) f_q3o = filt(xl, bias_q3o) f_q4e = filt(xl, bias_q4e) f_q4o = filt(xl, bias_q4o) for i in xl: data[i,cols_q1e] -= ftlgd(xl, f_q1e, i, d) data[i,cols_q1o] -= ftlgd(xl, f_q1o, i, d) data[i+rowlen,cols_q2e] -= ftlgd(xl, f_q2e, i, d) data[i+rowlen,cols_q2o] -= ftlgd(xl, f_q2o, i, d) data[i,cols_q3e] -= ftlgd(xl, f_q3e, i, d) data[i,cols_q3o] -= ftlgd(xl, f_q3o, i, d) data[i+rowlen,cols_q4e] -= ftlgd(xl, f_q4e, i, d) data[i+rowlen,cols_q4o] -= ftlgd(xl, f_q4o, i, d) # exit(1) # pyplot.plot(xl, [a for a in xl], color='blue') # pyplot.plot(xl, [ftlgd(xl, xl, a, d) for a in xl], color='red') # print bias_q1e # print xl pyplot.plot(xl, f_q1e, color='blue') pyplot.plot(xl, [ftlgd(xl, f_q1e, a, d) for a in xl], color='red') #pyplot.step(bedge[:-1], [a + 1e-20 for a in histbg], color='black') #pyplot.step(bedge[:-1], [a + 1e-20 for a in histreal], color='red') ## pyplot.bar(bedge[:-1], fakehistn_l[0], edgecolor='green', width=0.4, log=True, fill=False) #pyplot.yscale('log') #pyplot.ylim(ymin=1e-1) pyplot.show() else: print "Error: Bias subtraction type not parsed correctly" exit(1) ########################################################################## # Apply the gain correction [not yet implemented] ########################################################################## if Gain: if R4K: if AT_MDM: for r in range(r1,r2+1): for c in cols_q1e: data[r,c] -= r4k_gain_q1e for c in cols_q1o: data[r,c] -= r4k_gain_q1o for c in cols_q2e: data[r+rowlen,c] -= r4k_gain_q2e for c in cols_q2o: data[r+rowlen,c] -= r4k_gain_q2o for c in cols_q2o: data[r,c] -= r4k_gain_q3e for c in cols_q2o: data[r,c] -= r4k_gain_q3o for c in cols_q2o: data[r+rowlen,c] -= r4k_gain_q4e for c in cols_q2o: data[r+rowlen,c] -= r4k_gain_q4o else: data[r1:r2,cols_q1e] /= r4k_gain_q1e data[r1:r2,cols_q1o] /= r4k_gain_q1o data[r3:r4,cols_q2e] /= r4k_gain_q2e data[r3:r4,cols_q2o] /= r4k_gain_q2o data[r1:r2,cols_q3e] /= r4k_gain_q3e data[r1:r2,cols_q3o] /= r4k_gain_q3o data[r3:r4,cols_q4e] /= r4k_gain_q4e data[r3:r4,cols_q4o] /= r4k_gain_q4o else: if AT_MDM: for r in range(r1,r2+1): for c in cols_q1: data[r,c] /= mdm4k_gain_q1 for c in cols_q2: data[r+rowlen,c] /= mdm4k_gain_q2 for c in cols_q2: data[r,c] /= mdm4k_gain_q3 for c in cols_q2: data[r+rowlen,c] /= mdm4k_gain_q4 else: data[r1:r2,cols_q1] /= mdm4k_gain_q1 data[r3:r4,cols_q2] /= mdm4k_gain_q2 data[r1:r2,cols_q3] /= mdm4k_gain_q3 data[r3:r4,cols_q4] /= mdm4k_gain_q4 ########################################################################## # Write the output file ########################################################################## fitsfile[0].data = data[r1:r4+1,c1:c4+1] OverscanKeyComment = 'Overscan by proc4k.py v%s (%s)' % (versNum, versDate) GainKeyValue = 'Relative' GainKeyComment = 'Gain removed by proc4k.py' #BiasKeyValue = '%s' % (versNum) #BiasKeyComment = 'Gain removed by proc4k.py' if OSMOS: # prevent a pyfits error if these are not assigned values try: fitsfile[0].header['MISFILT'] = -1 fitsfile[0].header['MISFLTID'] = -1 except: if Debug: print "Note: MISFILT and MISFLTID keywords not found" fitsfile[0].header.update('BIASPROC', OverscanKeyValue, OverscanKeyComment) #fitsfile[0].header.update('BIASVER', BiasKeyValue, BiasKeyComment) #if R4K: #fitsfile[0].header.update('BIASQ1E', bq1e, 'Bias subtracted from Q1E') #fitsfile[0].header.update('BIASQ1O', bq1o, 'Bias subtracted from Q1O') #fitsfile[0].header.update('BIASQ2E', bq2e, 'Bias subtracted from Q2E') #fitsfile[0].header.update('BIASQ2O', bq2o, 'Bias subtracted from Q2O') #fitsfile[0].header.update('BIASQ3E', bq3e, 'Bias subtracted from Q3E') #fitsfile[0].header.update('BIASQ3O', bq3o, 'Bias subtracted from Q3O') #fitsfile[0].header.update('BIASQ4E', bq4e, 'Bias subtracted from Q4E') #fitsfile[0].header.update('BIASQ4O', bq4o, 'Bias subtracted from Q4O') #else: #fitsfile[0].header.update('BIASQ1', bq1, 'Bias subtracted from Q1') #fitsfile[0].header.update('BIASQ2', bq2, 'Bias subtracted from Q2') #fitsfile[0].header.update('BIASQ3', bq3, 'Bias subtracted from Q3') #fitsfile[0].header.update('BIASQ4', bq4, 'Bias subtracted from Q4') if Gain: if R4K: fitsfile[0].header.update('GAINPROC', GainKeyValue, GainKeyComment) fitsfile[0].header.update('GainQ1', r4k_gain_q1, 'Gain for Q1') fitsfile[0].header.update('GainQ2', r4k_gain_q2, 'Gain for Q2') fitsfile[0].header.update('GainQ3', r4k_gain_q3, 'Gain for Q3') fitsfile[0].header.update('GainQ4', r4k_gain_q4, 'Gain for Q4') fitsfile[0].header['SECPIX'] = 0.273*ccdxbin outfile = file[:str.find(file, '.fits')]+suffix+'.fits' if os.path.isfile(outfile): print " Warning: Overwriting pre-existing file %s" % (outfile) os.remove(outfile) fitsfile.writeto(outfile) fitsfile.close() # print "%s Done" % (argv[0]) print "%s Done" % (scriptname)
bsd-3-clause
-724,741,215,411,220,500
42.064272
132
0.491682
false
msullivan/advent-of-code
2020/16b.py
1
1812
#!/usr/bin/env python3 import sys import re def extract(s): return [int(x) for x in re.findall(r'\d+', s)] def valid(n, r): (a,b),(c,d) = r return (a <= n <= b) or (c <= n <= d) def main(args): data = [s.strip() for s in sys.stdin] fields = {} for i,x in enumerate(data): if x == "": break thing = x.split(":")[0] a,b,c,d = extract(x) fields[thing] = ((a,b),(c,d)) tickets = [] for x in data[i:]: if ',' not in x: continue z = [int(y) for y in x.split(',')] tickets.append(z) my_ticket = tickets[0] tickets = tickets[1:] cnt = 0 fucked = [] good = [] for t in tickets: bad = [ v for v in t if all(not valid(v, r) for r in fields.values()) ] fucked += bad if not bad: good.append(t) part1 = sum(fucked) options = [set(fields.keys()) for _ in range(len(good[0]))] for t in good: for i, f in enumerate(t): opts = set(name for name, range in fields.items() if valid(f, range)) options[i] &= opts picks = {} while True: for i, thing in enumerate(options): if len(thing) == 1: break else: assert False pick = list(thing)[0] picks[pick] = i for x in options: x.discard(pick) if not any(x for x in options): break print(picks) departuresf = [x for x in fields if x.startswith('departure')] depts = [my_ticket[picks[f]] for f in departuresf] print(depts) prod = 1 for x in depts: prod *= x print(part1) print(prod) # print(data) if __name__ == '__main__': sys.exit(main(sys.argv))
mit
-2,483,929,906,540,208,000
20.069767
66
0.480684
false
SeanEstey/Bravo
app/lib/dt.py
1
3699
'''app.dt''' import pytz from datetime import datetime, date, time, timedelta local_tz = pytz.timezone('America/Edmonton') #------------------------------------------------------------------------------- def json_serial(obj): if isinstance(obj, datetime): serial = obj.isoformat() return serial raise TypeError ("Type not serializable") #------------------------------------------------------------------------------- def to_utc(obj=None, dt=None, d=None, t=None, to_str=False): if obj: return convert_obj(obj, to_tz=pytz.utc, to_str=to_str) else: return to_timezone(pytz.utc, dt=dt, d=d, t=t, to_str=to_str) #------------------------------------------------------------------------------- def to_local(obj=None, dt=None, d=None, t=None, to_str=False): if obj: return convert_obj(obj, to_tz=local_tz, to_str=to_str) else: return to_timezone(local_tz, dt=dt, d=d, t=t, to_str=to_str) #------------------------------------------------------------------------------- def to_timezone(tz, dt=None, d=None, t=None, to_str=False): if dt: dt = dt.replace(tzinfo=local_tz) if not dt.tzinfo else dt dt = dt.astimezone(tz) return dt.strftime(to_str) if to_str else dt elif d and t: dt_ = datetime.combine(d,t) dt_ = local_tz.localize(dt_).astimezone(tz) return dt_.strftime(to_str) if to_str else dt_ elif d and not t: dt_ = datetime.combine(d, time(0,0)).replace(tzinfo=local_tz).astimezone(tz) return dt_.strftime(to_str) if to_str else dt_ #------------------------------------------------------------------------------- def d_to_dt(date_): return datetime.combine(date_, time()) #------------------------------------------------------------------------------- def convert_obj(obj, to_tz=None, to_str=False): '''Returns a datetime with given timezone. Will convert timezones for non-naive datetimes @obj: any data structure (dict, list, etc) ''' if isinstance(obj, dict): for k, v in obj.iteritems(): obj[k] = convert_obj(v, to_str=to_str, to_tz=to_tz) return obj elif isinstance(obj, list): for idx, item in enumerate(obj): obj[idx] = convert_obj(item, to_str=to_str, to_tz=to_tz) return obj elif isinstance(obj, datetime): tz = to_tz if to_tz else local_tz obj = obj.replace(tzinfo=tz) if not obj.tzinfo else obj.astimezone(tz) return obj.strftime(to_str) if to_str else obj else: return obj #------------------------------------------------------------------------------- def ddmmyyyy_to_dt(ddmmyyyy): '''@date_str: etapestry native dd/mm/yyyy ''' parts = ddmmyyyy.split('/') return datetime(int(parts[2]), int(parts[1]), int(parts[0])) #------------------------------------------------------------------------------- def ddmmyyyy_to_date(ddmmyyyy): '''@date_str: etapestry native dd/mm/yyyy ''' parts = ddmmyyyy.split('/') return date(int(parts[2]), int(parts[1]), int(parts[0])) #------------------------------------------------------------------------------- def ddmmyyyy_to_local_dt(ddmmyyyy): '''@date_str: etapestry native dd/mm/yyyy ''' parts = ddmmyyyy.split('/') return to_local(dt=datetime(int(parts[2]), int(parts[1]), int(parts[0]))) #------------------------------------------------------------------------------- def dt_to_ddmmyyyy(dt): return dt.strftime('%d/%m/%Y') #------------------------------------------------------------------------------- def ddmmyyyy_to_mmddyyyy(ddmmyyyy): p = ddmmyyyy.split('/') return '%s/%s/%s' % (p[1],p[0],p[2])
gpl-2.0
-5,052,450,258,988,737,000
37.936842
84
0.468775
false
Zahajamaan/Fudulbank
exams/migrations/0002_add_categories_and_exams.py
1
3996
# -*- coding: utf-8 -*- # Generated by Django 1.11.2 on 2017-06-13 17:17 from __future__ import unicode_literals from django.db import migrations def add_categories(apps, schema_editor): Category = apps.get_model('exams', 'Category') Exam = apps.get_model('exams', 'Exam') Subject = apps.get_model('exams', 'Subject') ksau_hs = Category.objects.create(name="KSAU-HS", slug='ksau-hs') com = Category.objects.create(name="College of Medicine", slug='com', parent_category=ksau_hs) basic = Category.objects.create(name="Basic years", slug='basic', parent_category=com) clinical = Category.objects.create(name="Clinical years", slug='clinical', parent_category=com) basic_subjects = ['Anatomy', 'Biochemistry and Immunology', 'Clinical', 'EBM', 'Histology', 'Pathology', 'Physiology'] exam_map = {'Foundation': basic_subjects, 'MSK': basic_subjects, 'Respiratory': basic_subjects, 'Hematology': basic_subjects, 'Cardiology': basic_subjects, 'Neurology': basic_subjects, 'Endocrinology': basic_subjects, 'GIT': basic_subjects, 'Urology': basic_subjects, 'Oncology': basic_subjects, 'Medicine I': ['Emergency medicine', 'Endocrinology', 'Gastroenterology & hepatology', 'Infectious diseases', 'Nephrology & urinary disorders', 'Respiratory medicine', 'Rheumatology', 'General medicine/Others'], 'Pediatric': ['Developmental & behavior clinical pediatrics', 'Endocrinology', 'Genetics and Dysmorphology', 'Hematology/oncology', 'Neonatology', 'Neurology', 'Pediatric cardiology', 'Pediatric infectious disease', 'General Pediatrics/Others'], 'Surgery I': ['Appendix', 'Breast', 'Gallbladder', 'Genitururinary track', 'Hernias', 'Lower GI', 'Pancreas', 'Pediatrics', 'Thyroid', 'Trauma & emergency ', 'Upper GI', 'Vascualr', 'Others'], 'Family Medicine': ['Community medicine & communication', 'General medicine ', 'Pediatrics', 'Psychiatry', 'Women\'s health'], 'Medicine II': ['Cardiovascular Disease', 'Hematology', 'Medical Oncology', 'General Medicine/Others'], 'Surgery II': ['Anesthesia', 'Orthopedics', 'Plastic Surgery', 'Others'], 'Obstetrics & Gynecology': ['Obstetrics', 'Gynecology'], 'Special Senses and Mental Health': ['Neurology', 'Ophthalmology', 'Otolaryngology', 'Psychiatry'] } for exam in exam_map: subjects = exam_map[exam] if subjects == basic_subjects: category = basic else: category = clinical exam = Exam.objects.create(name=exam, category=category) for subject_name in subjects: Subject.objects.create(name=subject_name, exam=exam) class Migration(migrations.Migration): dependencies = [ ('exams', '0001_initial'), ] operations = [ migrations.RunPython(add_categories) ]
agpl-3.0
-5,443,299,401,549,826,000
44.931034
114
0.477227
false
openstack/networking-odl
networking_odl/common/config.py
1
3889
# Copyright (c) 2014 Red Hat Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg from networking_odl._i18n import _ odl_opts = [ cfg.StrOpt('url', help=_("HTTP URL of OpenDaylight REST interface.")), cfg.StrOpt('username', help=_("HTTP username for authentication.")), cfg.StrOpt('password', secret=True, help=_("HTTP password for authentication.")), cfg.IntOpt('timeout', default=10, help=_("HTTP timeout in seconds.")), cfg.IntOpt('session_timeout', default=30, help=_("Tomcat session timeout in minutes.")), cfg.FloatOpt('sync_timeout', default=10, help=_("Sync thread timeout in seconds or fraction.")), cfg.IntOpt('retry_count', default=5, help=_("Number of times to retry a row before failing.")), cfg.IntOpt('maintenance_interval', default=300, help=_("Journal maintenance operations interval in seconds.")), cfg.IntOpt('completed_rows_retention', default=0, help=_("Time to keep completed rows (in seconds)." "For performance reasons it's not recommended to " "change this from the default value (0) which " "indicates completed rows aren't kept." "This value will be checked every maintenance_interval " "by the cleanup thread. To keep completed rows " "indefinitely, set the value to -1")), cfg.BoolOpt('enable_lightweight_testing', default=False, help=_('Test without real ODL.')), cfg.StrOpt('port_binding_controller', default='pseudo-agentdb-binding', help=_('Name of the controller to be used for port binding.')), cfg.IntOpt('processing_timeout', default='100', help=_("Time in seconds to wait before a " "processing row is marked back to pending.")), cfg.StrOpt('odl_hostconf_uri', help=_("Path for ODL host configuration REST interface"), default="/restconf/operational/neutron:neutron/hostconfigs"), cfg.IntOpt('restconf_poll_interval', default=30, help=_("Poll interval in seconds for getting ODL hostconfig")), cfg.BoolOpt('enable_websocket_pseudo_agentdb', default=False, help=_('Enable websocket for pseudo-agent-port-binding.')), cfg.IntOpt('odl_features_retry_interval', default=5, help=_("Wait this many seconds before retrying the odl features" " fetch")), cfg.ListOpt('odl_features', help='A list of features supported by ODL.'), cfg.StrOpt('odl_features_json', help='Features supported by ODL, in the json format returned' 'by ODL. Note: This config option takes precedence over' 'odl_features.'), cfg.BoolOpt('enable_dhcp_service', default=False, help=_('Enables the networking-odl driver to supply special' ' neutron ports of "dhcp" type to OpenDaylight' ' Controller for its use in providing DHCP Service.')), ] cfg.CONF.register_opts(odl_opts, "ml2_odl") def list_opts(): return [('ml2_odl', odl_opts)]
apache-2.0
-5,828,423,179,853,321,000
47.012346
79
0.610954
false
tbttfox/TwistyTools
ttLib/Draw/DrawSide.py
1
1401
#!/usr/bin/python from DrawBase import DrawBase from OpenGL.GL import * class DrawSide(DrawBase): def __init__(self,*args,**kwargs): super(DrawSide,self).__init__(*args,**kwargs) self.inrad = None self.outrad = None @property def glID(self): if self._glID == None: pn = (self.thing.posneg * 2) - 1 arcPoints = self.thing.arc.subdivide(angleHint=10) arcNormals = [(self.thing.arc.c - ap)*pn for ap in arcPoints] self._glID = glGenLists(1) glNewList(self._glID,GL_COMPILE) glBegin(GL_TRIANGLE_STRIP) for i in range(len(arcPoints)): glNormal3f(*arcNormals[i]) glVertex3f(*(arcPoints[i] * self.inrad)) glVertex3f(*(arcPoints[i] * self.outrad)) glEnd() glEndList() return self._glID def draw(self,ds): if ds.inrad != self.inrad or ds.outrad != self.outrad: #update the object any time the radii change self.inrad = inrad self.outrad = outrad if self._glID: glDeleteLists(self._glID,1) self._glID = None scale = ds.scale material = ds.material glPushMatrix() glScalef(self.scale, self.scale, self.scale) glCallList(self.glID) glPopMatrix()
gpl-3.0
-6,870,017,515,603,258,000
30.840909
73
0.540328
false
abhijithanilkumar/ns-3-AppStore
src/backend/views.py
1
24061
# -*- coding: utf-8 -*- from __future__ import unicode_literals from django.shortcuts import get_object_or_404, render from django.contrib.auth.decorators import login_required from .forms import CreateAppForm, EditAppForm, ReleaseForm, \ InstallationForm, MaintenanceForm, EditDetailsForm, \ DownloadForm, DevelopmentForm, ScreenshotForm from profiles.models import Profile from django.apps import apps from util.img_util import scale_img from django.views.generic.edit import DeleteView from django.urls import reverse from django.contrib.auth.decorators import permission_required @login_required @permission_required('apps.add_app', login_url='/error/denied/') def createApp(request): profile = Profile.objects.get(user=request.user) if request.user.is_staff or profile.moderated: if request.method == 'GET': form = CreateAppForm() elif request.method == 'POST': form = CreateAppForm(request.POST) if form.is_valid(): new_app = form.save() new_app.save() context = { 'message': "New App Page created Successfully!", 'go_back_to_url': "/app/" + new_app.name, 'go_back_to_title': "App Page", } return render(request, 'message.html', context) else: context = { 'message': "You are not authorized to view this page!", 'go_back_to_url': "/", 'go_back_to_title': "Home Page", } return render(request, 'message.html', context) return render(request, 'create.html', {'form': form}) @login_required def editApp(request, num): App = apps.get_model('apps', 'App') try: edit_app = App.objects.get(id=num) except BaseException: context = { 'message': "Requested App does not Exist!", 'go_back_to_url': "/app/" + edi_app.name, 'go_back_to_title': "App Page", } return render(request, 'message.html', context) editors = edit_app.editors.all() if request.user in editors or request.user.is_staff: if request.method == 'GET': form = EditAppForm(instance=edit_app) elif request.method == 'POST': form = EditAppForm(request.POST, request.FILES, instance=edit_app) if form.is_valid(): edited_app = form.save(commit=False) cleaned_data = form.clean() tags = cleaned_data['tags'] for tag in tags: edited_app.tags.add(tag) if 'icon' in request.FILES: icon_file = request.FILES['icon'] edited_app.icon = scale_img( icon_file, icon_file.name, 128, 'both') edited_app.save() context = { 'message': "App Page edited Successfully!", 'go_back_to_url': "/app/" + edit_app.name, 'go_back_to_title': "App Page", } return render(request, 'message.html', context) else: context = { 'message': "You are not authorized to view this page!", 'go_back_to_url': "/app/" + edit_app.name, 'go_back_to_title': "App Page", } return render(request, 'message.html', context) return render(request, 'edit.html', {'form': form}) @login_required def createRelease(request, num): App = apps.get_model('apps', 'App') try: release_app = App.objects.get(id=num) except BaseException: context = { 'message': "Requested App does not Exist!", 'go_back_to_url': "/app/" + release_app.name, 'go_back_to_title': "App Page", } return render(request, 'message.html', context) editors = release_app.editors.all() if request.user in editors or request.user.is_staff: if request.method == 'GET': form = ReleaseForm() elif request.method == 'POST': form = ReleaseForm(request.POST, request.FILES) if form.is_valid(): new_release = form.save(commit=False) new_release.app = release_app new_release.save() context = { 'message': "Release added Successfully!", 'go_back_to_url': "/app/" + release_app.name, 'go_back_to_title': "App Page", } return render(request, 'message.html', context) else: context = { 'message': "You are not authorized to view this page!", 'go_back_to_url': "/app/" + release_app.name, 'go_back_to_title': "App Page", } return render(request, 'message.html', context) return render(request, 'create_release.html', {'form': form}) @login_required def editRelease(request, num): Release = apps.get_model('apps', 'Release') App = apps.get_model('apps', 'App') try: edit_release = Release.objects.get(id=num) except BaseException: context = { 'message': "Requested App does not Exist!", 'go_back_to_url': "/app/" + edit_release.app.name, 'go_back_to_title': "App Page", } return render(request, 'message.html', context) editors = edit_release.app.editors.all() if request.user in editors or request.user.is_staff: if request.method == 'GET': form = ReleaseForm(instance=edit_release) elif request.method == 'POST': form = ReleaseForm( request.POST, request.FILES, instance=edit_release) if form.is_valid(): edited_release = form.save() edited_release.save() context = { 'message': "Release edited Successfully!", 'go_back_to_url': "/app/" + edit_release.app.name, 'go_back_to_title': "App Page", } return render(request, 'message.html', context) else: context = { 'message': "You are not authorized to view this page!", 'go_back_to_url': "/app/" + edit_release.app.name, 'go_back_to_title': "App Page", } return render(request, 'message.html', context) return render(request, 'edit_release.html', {'form': form}) @login_required def modifyInstallation(request, num): existing = False App = apps.get_model('apps', 'App') Installation = apps.get_model('apps', 'Installation') try: app = App.objects.get(id=num) except BaseException: context = { 'message': "Requested App does not Exist!", 'go_back_to_url': "/app/" + app.name, 'go_back_to_title': "App Page", } return render(request, 'message.html', context) if Installation.objects.filter(app=app).exists(): existing = True edit_installation = Installation.objects.get(app=app) if request.user.is_staff or request.user in app.editors.all(): if request.method == 'GET': if existing: form = InstallationForm(instance=edit_installation) else: form = InstallationForm() elif request.method == 'POST': if existing: form = InstallationForm( request.POST, instance=edit_installation) else: form = InstallationForm(request.POST) if form.is_valid(): if existing: edited_installation = form.save() edited_installation.save() else: installation = form.save(commit=False) installation.app = app installation.save() context = { 'message': "Installation modified Successfully!", 'go_back_to_url': "/app/" + app.name, 'go_back_to_title': "App Page", } return render(request, 'message.html', context) else: context = { 'message': "You are not authorized to view this page!", 'go_back_to_url': "/app/" + app.name, 'go_back_to_title': "App Page", } return render(request, 'message.html', context) return render(request, 'installation.html', {'form': form}) @login_required def modifyMaintenance(request, num): existing = False App = apps.get_model('apps', 'App') Maintenance = apps.get_model('apps', 'Maintenance') try: app = App.objects.get(id=num) except BaseException: context = { 'message': "Requested App does not Exist!", 'go_back_to_url': "/app/" + app.name, 'go_back_to_title': "App Page", } return render(request, 'message.html', context) if Maintenance.objects.filter(app=app).exists(): existing = True edit_maintenance = Maintenance.objects.get(app=app) if request.user.is_staff or request.user in app.editors.all(): if request.method == 'GET': if existing: form = MaintenanceForm(instance=edit_maintenance) else: form = MaintenanceForm() elif request.method == 'POST': if existing: form = MaintenanceForm(request.POST, instance=edit_maintenance) else: form = MaintenanceForm(request.POST) if form.is_valid(): if existing: edited_maintenance = form.save() edited_maintenance.save() else: maintenance = form.save(commit=False) maintenance.app = app maintenance.save() context = { 'message': "Maintenance notes modified Successfully!", 'go_back_to_url': "/app/" + app.name, 'go_back_to_title': "App Page", } return render(request, 'message.html', context) else: context = { 'message': "You are not authorized to view this page!", 'go_back_to_url': "/app/" + app.name, 'go_back_to_title': "App Page", } return render(request, 'message.html', context) return render(request, 'maintenance.html', {'form': form}) @login_required def editDetails(request, num): App = apps.get_model('apps', 'App') try: edit_app = App.objects.get(id=num) except BaseException: context = { 'message': "Requested App does not Exist!", 'go_back_to_url': "/app/" + edit_app.name, 'go_back_to_title': "App Page", } return render(request, 'message.html', context) editors = edit_app.editors.all() if request.user in editors or request.user.is_staff: if request.method == 'GET': form = EditDetailsForm(instance=edit_app) elif request.method == 'POST': form = EditDetailsForm(request.POST, instance=edit_app) if form.is_valid(): edited_app = form.save() edited_app.save() context = { 'message': "Edited Details Successfully!", 'go_back_to_url': "/app/" + edit_app.name, 'go_back_to_title': "App Page", } return render(request, 'message.html', context) else: context = { 'message': "You are not authorized to view this page!", 'go_back_to_url': "/app/" + edit_app.name, 'go_back_to_title': "App Page", } return render(request, 'message.html', context) return render(request, 'edit_details.html', {'form': form}) @login_required def modifyDownload(request, num): existing = False App = apps.get_model('apps', 'App') Download = apps.get_model('apps', 'Download') Release = apps.get_model('apps', 'Release') try: app = App.objects.get(id=num) except BaseException: context = { 'message': "Requested App does not Exist!", 'go_back_to_url': "/app/" + app.name, 'go_back_to_title': "App Page", } return render(request, 'message.html', context) if Download.objects.filter(app=app).exists(): existing = True edit_download = Download.objects.get(app=app) if request.user.is_staff or request.user in app.editors.all(): if request.method == 'GET': if existing: form = DownloadForm(instance=edit_download, current_app=app) else: form = DownloadForm(current_app=app) elif request.method == 'POST': if existing: form = DownloadForm( request.POST, instance=edit_download, current_app=app) else: form = DownloadForm(request.POST, current_app=app) if form.is_valid(): if existing: instance = form.save() release = None releases = Release.objects.filter(app=instance.app) if releases: release = releases.latest('date') choice = instance.download_option link = "https://ns-apps.washington.edu/" + \ instance.app.name + "/#cy-app-instructions-tab" if choice == 'I': instance.download_link = link elif choice == 'D': instance.download_link = release.url if not release: instance.download_link = link elif choice == 'U': instance.download_link = instance.external_url if not instance.external_url: instance.download_link = link if not instance.default_release: instance.default_release = release instance.save() else: download = form.save(commit=False) download.app = app download.save() context = { 'message': "Download Details modified Successfully!", 'go_back_to_url': "/app/" + app.name, 'go_back_to_title': "App Page", } return render(request, 'message.html', context) else: context = { 'message': "You are not authorized to view this page!", 'go_back_to_url': "/app/" + app.name, 'go_back_to_title': "App Page", } return render(request, 'message.html', context) return render(request, 'download.html', {'form': form}) @login_required def modifyDevelopment(request, num): existing = False App = apps.get_model('apps', 'App') Development = apps.get_model('apps', 'Development') try: app = App.objects.get(id=num) except BaseException: context = { 'message': "Requested App does not Exist!", 'go_back_to_url': "/app/" + app.name, 'go_back_to_title': "App Page", } return render(request, 'message.html', context) if Development.objects.filter(app=app).exists(): existing = True edit_development = Development.objects.get(app=app) if request.user.is_staff or request.user in app.editors.all(): if request.method == 'GET': if existing: form = DevelopmentForm(instance=edit_development) else: form = DevelopmentForm() elif request.method == 'POST': if existing: form = DevelopmentForm( request.POST, request.FILES, instance=edit_development) else: form = DevelopmentForm(request.POST, request.FILES) if form.is_valid(): if existing: edited_development = form.save() edited_development.save() else: development = form.save(commit=False) development.app = app development.save() context = { 'message': "Development Version modified Successfully!", 'go_back_to_url': "/app/" + app.name, 'go_back_to_title': "App Page", } return render(request, 'message.html', context) else: context = { 'message': "You are not authorized to view this page!", 'go_back_to_url': "/app/" + app.name, 'go_back_to_title': "App Page", } return render(request, 'message.html', context) return render(request, 'development.html', {'form': form}) def deleteReleasePrompt(request, num): Release = apps.get_model('apps', 'Release') release = Release.objects.get(id=num) app = release.app go_back_to_url = "/app/" + app.name url = "/backend/releasedelconf/" + str(release.id) if request.user.is_staff or request.user in app.editors.all(): context = { 'url': url, 'name': app.name, 'go_back_to_url': go_back_to_url, 'go_back_to_title': "App Page", } return render(request, 'prompt.html', context) else: message = "You are not authorized to view this page!" context = { 'message': message, 'go_back_to_url': go_back_to_url, 'go_back_to_title': "App Page", } return render(request, 'message.html', context) def deleteRelease(request, num): Release = apps.get_model('apps', 'Release') release = Release.objects.get(id=num) app = release.app if request.user.is_staff or request.user in app.editors.all(): release.delete() message = "Release Deleted Successfully!" else: message = "You are not authorized to view this page!" go_back_to_url = "/app/" + app.name context = { 'message': message, 'go_back_to_url': go_back_to_url, 'go_back_to_title': "App Page", } return render(request, 'message.html', context) """ class _ScreenshotEditConfig: max_img_size_b = 2 * 1024 * 1024 thumbnail_height_px = 150 def _upload_screenshot(app, request): screenshot_f = request.FILES.get('file') if not screenshot_f: raise ValueError('no file submitted') if screenshot_f.size > _ScreenshotEditConfig.max_img_size_b: raise ValueError('image file is %d bytes but can be at most %d bytes' % (screenshot_f.size, _ScreenshotEditConfig.max_img_size_b)) thumbnail_f = scale_img(screenshot_f, screenshot_f.name, _ScreenshotEditConfig.thumbnail_height_px, 'h') screenshot = Screenshot.objects.create(app = app) screenshot.screenshot.save(screenshot_f.name, screenshot_f) screenshot.thumbnail.save(thumbnail_f.name, thumbnail_f) screenshot.save() def _delete_screenshot(app, request): screenshot_id = request.POST.get('screenshot_id') if not screenshot_id: raise ValueError('no screenshot_id specified') try: screenshot_id = int(screenshot_id) screenshot = Screenshot.objects.get(id = screenshot_id) except ValueError, Screenshot.DoesNotExist: raise ValueError('invalid screenshot_id') screenshot.delete() _ScreenshotActions = { 'upload_screenshot': _upload_screenshot, 'delete_screenshot': _delete_screenshot, } """ @login_required def screenshots(request, num): App = apps.get_model('apps', 'App') try: app = App.objects.get(id=num) except BaseException: context = { 'message': "Requested App does not Exist!", 'go_back_to_url': "/app/" + app.name, 'go_back_to_title': "App Page", } return render(request, 'message.html', context) editors = app.editors.all() if request.user in editors or request.user.is_staff: if request.method == 'GET': form = ScreenshotForm() elif request.method == 'POST': form = ScreenshotForm(request.POST, request.FILES) if form.is_valid(): new_screenshot = form.save(commit=False) new_screenshot.app = app new_screenshot.save() context = { 'message': "Screenshot added Successfully!", 'go_back_to_url': "/app/" + app.name, 'go_back_to_title': "App Page", } return render(request, 'message.html', context) else: context = { 'message': "You are not authorized to view this page!", 'go_back_to_url': "/app/" + app.name, 'go_back_to_title': "App Page", } return render(request, 'message.html', context) return render(request, 'create_screenshot.html', {'form': form}) def deleteScreenshotPrompt(request, num): Screenshot = apps.get_model('apps', 'Screenshot') screenshot = Screenshot.objects.get(id=num) app = screenshot.app go_back_to_url = "/app/" + app.name url = "/backend/screenshotdelconf/" + str(screenshot.id) if request.user.is_staff or request.user in app.editors.all(): context = { 'url': url, 'name': app.name, 'go_back_to_url': go_back_to_url, 'go_back_to_title': "App Page", } return render(request, 'prompt.html', context) else: message = "You are not authorized to view this page!" context = { 'message': message, 'go_back_to_url': go_back_to_url, 'go_back_to_title': "App Page", } return render(request, 'message.html', context) def deleteScreenshot(request, num): Screenshot = apps.get_model('apps', 'Screenshot') screenshot = Screenshot.objects.get(id=num) app = screenshot.app if request.user.is_staff or request.user in app.editors.all(): screenshot.delete() message = "Screenshot Deleted Successfully!" else: message = "You are not authorized to view this page!" go_back_to_url = "/app/" + app.name context = { 'message': message, 'go_back_to_url': go_back_to_url, 'go_back_to_title': "App Page", } return render(request, 'message.html', context) """ @login_required def screenshots(request, num): App = apps.get_model('apps', 'App') Screenshot = apps.get_model('apps', 'Screenshot') app = get_object_or_404(App, id=num) if not request.user.is_staff or request.user not in app.editors.all(): return HttpResponseForbidden() if request.method == 'POST': print "hey" action = request.POST.get('action') if not action: return HttpResponseBadRequest('no action specified') if not action in _ScreenshotActions: return HttpResponseBadRequest('action "%s" invalid--must be: %s' % (action, ', '.join(_ScreenshotActions))) try: result = _ScreenshotActions[action](app, request) except ValueError as e: return HttpResponseBadRequest(str(e)) if request.is_ajax(): return json_response(result) screenshots = Screenshot.objects.filter(app=app) print "entered" context = { 'screenshots': screenshots, 'max_file_img_size_b': _ScreenshotEditConfig.max_img_size_b, 'thumbnail_height_px': _ScreenshotEditConfig.thumbnail_height_px, } return render(request, 'screenshots.html', context) """
mit
7,390,528,707,503,364,000
37.436102
138
0.546985
false
otov4its/django-walletone
tests/test_views.py
1
3008
# -*- coding: utf-8 -*- from __future__ import unicode_literals from django.core.urlresolvers import reverse from django.test import TestCase from walletone.models import WalletOneSuccessPayment from walletone.signals import payment_received class PaymentConfirmViewTestCase(TestCase): def setUp(self): self.data = { 'WMI_MERCHANT_ID': '165531803223', 'WMI_PAYMENT_AMOUNT': '1.00', 'WMI_COMMISSION_AMOUNT': '0.00', 'WMI_CURRENCY_ID': '643', 'WMI_PAYMENT_NO': '1', 'WMI_ORDER_ID': '336077917075', 'WMI_DESCRIPTION': 'Мой тестовый заказ', 'WMI_EXPIRED_DATE': '2016-05-21 11:34:34', 'WMI_CREATE_DATE': '2016-04-21 11:34:34', 'WMI_UPDATE_DATE': '2016-04-21 11:34:34', 'WMI_ORDER_STATE': 'Created', 'WMI_SIGNATURE': 'Q0vBjbeAaoFKTVcjUfkKLw==', 'EXTRA_FIELD': 'value', # Not documented fields # howerer its may present in form data 'WMI_AUTO_ACCEPT': '1', 'WMI_NOTIFY_COUNT': '0', } self.confirm_url = reverse('w1-payment-confirm') def test_view_returns_400_if_get_request(self): response = self.client.get(self.confirm_url) self.assertEqual(response.status_code, 400) def test_view_returns_200_if_post_request(self): response = self.client.post(self.confirm_url, self.data) self.assertContains(response, 'WMI_RESULT=OK') def test_view_saves_payment_to_db(self): self.client.post(self.confirm_url, self.data) try: WalletOneSuccessPayment.objects.get(WMI_ORDER_ID='336077917075') except WalletOneSuccessPayment.DoesNotExist: self.fail("payment DoesNotExist") except WalletOneSuccessPayment.MultipleObjectsReturned: self.fail("payment MultipleObjectsReturned") def test_view_sends_a_signal(self): def receiver(**kwargs): receiver.signal_was_sent = True payment = kwargs['payment'] self.assertEqual( payment, WalletOneSuccessPayment.objects.get( WMI_ORDER_ID=payment.WMI_ORDER_ID ) ) receiver.signal_was_sent = False payment_received.connect(receiver, sender=WalletOneSuccessPayment) self.client.post(self.confirm_url, self.data) self.assertTrue(receiver.signal_was_sent) def test_view_was_called_twice_with_same_wmi_order_id(self): response1 = self.client.post(self.confirm_url, self.data) self.assertEqual(response1.content, b'WMI_RESULT=OK') response2 = self.client.post(self.confirm_url, self.data) self.assertContains(response2, 'not valid') def test_view_with_bad_signature(self): self.data['WMI_SIGNATURE'] = 'bad' response = self.client.post(self.confirm_url, self.data) self.assertContains(response, 'not valid')
mit
5,704,211,759,134,297,000
38.368421
76
0.619318
false
studio1247/gertrude
generation/preparation_repas.py
1
5142
# -*- coding: utf-8 -*- # This file is part of Gertrude. # # Gertrude is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 3 of the License, or # (at your option) any later version. # # Gertrude is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Gertrude; if not, see <http://www.gnu.org/licenses/>. from ooffice import * class PreparationRepasModifications(object): title = "Préparation des repas" template = "Preparation repas.ods" def __init__(self, debut): self.multi = False self.default_output = "Preparation repas %s.ods" % str(debut) self.debut = debut self.email = None self.site = None def execute(self, filename, dom): if filename != 'content.xml': return None date_fin = self.debut + datetime.timedelta(4) spreadsheet = dom.getElementsByTagName('office:spreadsheet').item(0) table = spreadsheet.getElementsByTagName("table:table")[0] lignes = table.getElementsByTagName("table:table-row") # Les titres des pages ReplaceFields(lignes, GetCrecheFields(database.creche) + [ ('date-debut', self.debut), ('date-fin', date_fin)]) if 1: # Le format utilisé par Les petits potes (séparation adaptation / halte-garderie / mi-temps / plein-temps # Changé en format utilisé par les petits lutins (sans la séparation) table.setAttribute("table:name", '%d %s %d - %d %s %d' % (self.debut.day, months[self.debut.month - 1], date_fin.year, date_fin.day, months[date_fin.month - 1], date_fin.year)) # Les jours ligne = lignes.item(1) cellules = ligne.getElementsByTagName("table:table-cell") for jour in range(5): date = self.debut + datetime.timedelta(jour) cellule = cellules.item(2 + jour) ReplaceFields([cellule], [('date', date)]) # Les lignes inscrits = list(database.creche.select_inscrits(self.debut, date_fin)) inscrits.sort(key=lambda x: GetPrenomNom(x)) self.printPresences(table, inscrits, 3) # La ligne des totaux ligne_total = lignes.item(5) cellules = ligne_total.getElementsByTagName("table:table-cell") for i in range(cellules.length): cellule = cellules.item(i) if cellule.hasAttribute('table:formula'): formule = cellule.getAttribute('table:formula') formule = formule.replace('5', str(3 + len(inscrits))) cellule.setAttribute('table:formula', formule) #print dom.toprettyxml() return None def printPresences(self, dom, inscrits, ligne_depart): template = dom.getElementsByTagName("table:table-row")[ligne_depart] for inscrit in inscrits: line = template.cloneNode(1) cells = line.getElementsByTagName("table:table-cell") ReplaceFields(cells, GetInscritFields(inscrit)) for i, cell in enumerate(cells): day = (i - 3) // 5 date = self.debut + datetime.timedelta(day) age = GetAge(inscrit.naissance, date) fields = [ "tranche_4_6", "tranche_6_12", "tranche_12_18", "tranche_18_24", "tranche_24_"] field = fields[min(age // 6, len(fields) - 1)] journee = inscrit.GetJournee(date) present = journee and IsPresentDuringTranche(journee, database.creche.ouverture * 12, 12.5 * 12) food_needs = {} for food_need in database.creche.food_needs: quantity = getattr(food_need, field) if present else "" food_needs[food_need.label[0:2].lower()] = quantity food_needs[food_need.label[0:2].lower() + "p"] = quantity if inscrit.type_repas == REPAS_PUREE else "" food_needs[food_need.label[0:2].lower() + "m"] = quantity if inscrit.type_repas == REPAS_MORCEAUX else "" ReplaceFields(cell, list(food_needs.items())) dom.insertBefore(line, template) dom.removeChild(template) if __name__ == '__main__': import random from document_dialog import StartLibreOffice database.init("../databases/lutins-miniac.db") database.load() modifications = PreparationRepasModifications(datetime.date(2017, 11, 6)) filename = "./test-%f.odt" % random.random() errors = GenerateOODocument(modifications, filename=filename, gauge=None) StartLibreOffice(filename)
gpl-3.0
4,578,275,887,724,565,500
43.66087
188
0.598326
false
cryvate/project-euler
project_euler/solutions/problem_11.py
1
3374
from typing import List GRID = [[8, 2, 22, 97, 38, 15, 0, 40, 0, 75, 4, 5, 7, 78, 52, 12, 50, 77, 91, 8], [49, 49, 99, 40, 17, 81, 18, 57, 60, 87, 17, 40, 98, 43, 69, 48, 4, 56, 62, 0], [81, 49, 31, 73, 55, 79, 14, 29, 93, 71, 40, 67, 53, 88, 30, 3, 49, 13, 36, 65], [52, 70, 95, 23, 4, 60, 11, 42, 69, 24, 68, 56, 1, 32, 56, 71, 37, 2, 36, 91], [22, 31, 16, 71, 51, 67, 63, 89, 41, 92, 36, 54, 22, 40, 40, 28, 66, 33, 13, 80], [24, 47, 32, 60, 99, 3, 45, 2, 44, 75, 33, 53, 78, 36, 84, 20, 35, 17, 12, 50], [32, 98, 81, 28, 64, 23, 67, 10, 26, 38, 40, 67, 59, 54, 70, 66, 18, 38, 64, 70], [67, 26, 20, 68, 2, 62, 12, 20, 95, 63, 94, 39, 63, 8, 40, 91, 66, 49, 94, 21], [24, 55, 58, 5, 66, 73, 99, 26, 97, 17, 78, 78, 96, 83, 14, 88, 34, 89, 63, 72], [21, 36, 23, 9, 75, 0, 76, 44, 20, 45, 35, 14, 0, 61, 33, 97, 34, 31, 33, 95], [78, 17, 53, 28, 22, 75, 31, 67, 15, 94, 3, 80, 4, 62, 16, 14, 9, 53, 56, 92], [16, 39, 5, 42, 96, 35, 31, 47, 55, 58, 88, 24, 0, 17, 54, 24, 36, 29, 85, 57], [86, 56, 0, 48, 35, 71, 89, 7, 5, 44, 44, 37, 44, 60, 21, 58, 51, 54, 17, 58], [19, 80, 81, 68, 5, 94, 47, 69, 28, 73, 92, 13, 86, 52, 17, 77, 4, 89, 55, 40], [4, 52, 8, 83, 97, 35, 99, 16, 7, 97, 57, 32, 16, 26, 26, 79, 33, 27, 98, 66], [88, 36, 68, 87, 57, 62, 20, 72, 3, 46, 33, 67, 46, 55, 12, 32, 63, 93, 53, 69], [4, 42, 16, 73, 38, 25, 39, 11, 24, 94, 72, 18, 8, 46, 29, 32, 40, 62, 76, 36], [20, 69, 36, 41, 72, 30, 23, 88, 34, 62, 99, 69, 82, 67, 59, 85, 74, 4, 36, 16], [20, 73, 35, 29, 78, 31, 90, 1, 74, 31, 49, 71, 48, 86, 81, 16, 23, 57, 5, 54], [1, 70, 54, 71, 83, 51, 54, 69, 16, 92, 33, 48, 61, 43, 52, 1, 89, 19, 67, 48]] def solve(grid: List[List[float]] = GRID, length: int = 4) -> int: greatest_product = 1 rows = len(GRID[0]) columns = len(GRID) for i in range(rows): for j in range(columns): if i <= rows - 4: # vertical product = grid[i][j] * grid[i + 1][j] * grid[i + 2][j] * \ grid[i + 3][j] greatest_product = max(greatest_product, product) if j <= columns - 4: # horizontal product = grid[i][j] * grid[i][j + 1] * grid[i][j + 2] * \ grid[i][j + 3] greatest_product = max(greatest_product, product) if i <= rows - 4 and j <= columns - 4: # diagonal product_up_down = grid[i][j] * grid[i + 1][j + 1] * \ grid[i + 2][j + 2] * grid[i + 3][j + 3] product_down_up = grid[i + 3][j] * grid[i + 2][j + 1] * \ grid[i + 1][j + 2] * grid[i][j + 3] greatest_product = max(greatest_product, product_up_down, product_down_up) if i <= rows - 4 and j <= columns - 4: # diagonal product = grid[i][j] * grid[i + 1][j + 1] * \ grid[i + 2][j + 2] * grid[i + 3][j + 3] greatest_product = max(greatest_product, product) return greatest_product
mit
6,193,119,133,360,649,000
43.394737
79
0.409603
false
DayGitH/Python-Challenges
DailyProgrammer/DP20161017A.py
1
2725
""" [2016-10-17] Challenge #288 [Easy] Detecting Alliteration https://www.reddit.com/r/dailyprogrammer/comments/57zcbm/20161017_challenge_288_easy_detecting_alliteration/ # Description Alliteration is defined as "the occurrence of the same letter or sound at the beginning of adjacent or closely connected words." It's a stylistic literary device identified by the repeated sound of the first consonant in a series of multiple words, or the repetition of the same sounds or of the same kinds of sounds at the beginning of words or in stressed syllables of a phrase. The first known use of the word to refer to a literary device occurred around 1624. A simple example is "Peter Piper Picked a Peck of Pickled Peppers". ## Note on Stop Words The following are some of the simplest English "stop words", words too common and uninformative to be of much use. In the case of Alliteration, they can come in between the words of interest (as in the Peter Piper example): I a about an and are as at be by com for from how in is it of on or that the this to was what when where who will with the # Sample Input You'll be given an integer on a line, telling you how many lines follow. Then on the subsequent ines, you'll be given a sentence, one per line. Example: 3 Peter Piper Picked a Peck of Pickled Peppers Bugs Bunny likes to dance the slow and simple shuffle You'll never put a better bit of butter on your knife # Sample Output Your program should emit the words from each sentence that form the group of alliteration. Example: Peter Piper Picked Peck Pickled Peppers Bugs Bunny slow simple shuffle better bit butter # Challenge Input 8 The daily diary of the American dream For the sky and the sea, and the sea and the sky Three grey geese in a green field grazing, Grey were the geese and green was the grazing. But a better butter makes a batter better. "His soul swooned slowly as he heard the snow falling faintly through the universe and faintly falling, like the descent of their last end, upon all the living and the dead." Whisper words of wisdom, let it be. They paved paradise and put up a parking lot. So what we gonna have, dessert or disaster? # Challenge Output daily diary sky sea grey geese green grazing better butter batter better soul swooned slowly whisper words wisdom paved paradise dessert disaster **EDITED** to add the word "and" to the stop word list. My bad, a mistake to omit. """ def main(): pass if __name__ == "__main__": main()
mit
6,072,102,307,829,940,000
29.965909
119
0.705688
false
ringsd/projecteuler
python/019.py
1
1276
#easy method month_days = { 1:31, 2:28, 3:31, 4:30, 5:31, 6:30, 7:31, 8:31, 9:30, 10:31, 11:30, 12:31, } week = [0]*8 def counter_week( ): t = 1 for i in range (1901, 2001): print "Year-%d,"%(i), for j in range(1, 13): days = month_days[j] if j == 2 and ((i % 4 == 0 and i % 100 != 0) or i % 400 == 0): days = days + 1 # print "%d-%d,"%(j, days), for k in range (1, days): if k == 1 and i != 1900: week[t] = week[t] + 1 t = t + 1 if t == 8: t = 1 print '' counter_week() for i in range( 1, 8 ): print "%d-%d, "%(i, week[i]), print '' #Zeller’s Formula # def day_of_week( year, month, day ): if month == 1 or month == 2: year = year - 1 month = month + 12 y = year % 100 c = year / 100 week = (y + y/4 + c/4 - 2*c + 26*(month+1)/10 + day - 1) % 7 if week < 0: week = (week + 7)%7 return week sunday = 0 for i in range(1901, 2001): for j in range (1, 13): if day_of_week( i, j, 1 ) == 0: sunday = sunday + 1 print sunday
mit
12,650,953,943,867,154
19.564516
74
0.394035
false
willmurnane/store
cba/models.py
1
13816
import base64 import datetime import decimal import logging import re from core import make_request logger = logging.getLogger(__name__) logger.setLevel(logging.DEBUG) logger.addHandler(logging.StreamHandler()) class Item(object): def __init__(self, **kwargs): required_properties = 'id title price'.split() for prop in required_properties: if prop not in kwargs: raise ValueError('{0} property must be provided'.format(prop)) for k, v in kwargs.iteritems(): setattr(self, k, v) def as_dict(self): logging.debug('as_dict') properties = [] d = {} for attr in dir(self.__class__): ca = getattr(self.__class__, attr) if hasattr(ca, 'fget') and hasattr(ca, '__doc__'): properties.append(attr) for prop in properties: ca = getattr(self.__class__, prop) key = ca.__doc__ value = ca.fget(self) if key and value: d[key] = value return d @property def id(self): '''MerchantItemId''' return self._id @id.setter def id(self, value): value = unicode(value) if not re.match(r'[a-zA-Z0-9]+', value): raise ValueError('id must be alphanumeric only') self._id = value _sku = None @property def sku(self): '''SKU''' return self._sku @sku.setter def sku(self, value): self._sku = unicode(value) @property def title(self): '''Title''' return self._title @title.setter def title(self, value): self._title = unicode(value) _desc = None @property def description(self): '''Description''' return self._desc @description.setter def description(self, value): self._desc = unicode(value) @property def price(self): '''UnitPrice.Amount''' return self._price @price.setter def price(self, value): value = decimal.Decimal(unicode(value)).quantize( decimal.Decimal('0.01'), rounding=decimal.ROUND_DOWN ) if value < 0: raise ValueError('price must be non-negative') self._price = value _currency = u'USD' @property def currency(self): '''UnitPrice.CurrencyCode''' return self._currency @currency.setter def currency(self, value): value = unicode(value).upper() if len(value) != 3: raise ValueError('Invalid currency code') self._currency = value _quantity = 1 @property def quantity(self): '''Quantity''' return self._quantity @quantity.setter def quantity(self, value): value = int(value) if value < 1: raise ValueError('quantity must be at least 1') self._quantity = value _url = None @property def url(self): '''URL''' return self._url @url.setter def url(self, value): self._url = unicode(value) _category = None @property def category(self): '''Category''' return self._category @category.setter def category(self, value): self._category = unicode(value) _fulfillment = None @property def fulfillment(self): '''FulfillmentNetwork''' return self._fulfillment @fulfillment.setter def fulfillment(self, value): value = unicode(value) valid = u'MERCHANT AMAZON_NA'.split if value not in valid: raise ValueError('fulfillment must be one of {0}'.format(valid)) self._fulfillment = value _custom_data = '' @property def custom_data(self): return base64.b64decode(self._custom_data) @property def custom_data_base64(self): '''ItemCustomData''' return self._custom_data @custom_data.setter def custom_data(self, value): value = base64.b64encode(unicode(value)) if len(value) > 1024: raise ValueError('custom_data too long') self._custom_data = value _product_type = None @property def product_type(self): '''ProductType''' return self._product_type @product_type.setter def product_type(self, value): self._product_type = unicode(value).upper() _weight = None @property def weight(self): '''PhysicalProductAttributes.Weight.Value''' return self._weight @weight.setter def weight(self, value): value = decimal.Decimal(unicode(value)).quantize( decimal.Decimal('0.0001'), rounding=decimal.ROUND_UP ) if value < 0: raise ValueError('weight must be non-negative') self._weight = weight _weight_unit = None @property def weight_unit(self): '''PhysicalProductAttributes.Weight.Unit''' if self.weight is not None and self._weight_unit is None: raise ValueError('weight and weight_unit are mutually inclusive') return self._weight_unit @weight_unit.setter def weight_unit(self, value): self._weight_unit = unicode(value) _condition = None @property def condition(self): '''PhysicalProductAttributes.Condition''' return self._condition @condition.setter def condition(self, value): valid = u'Any Club Collectible New Refurbished New'.split() value = unicode(value) if value not in valid: raise ValueError('condition must be one of {0}'.format(valid)) self._condition = value _shipping_level = u'Standard' @property def shipping_level(self): '''PhysicalProductAttributes.DeliveryMethod.ServiceLevel''' return self._shipping_level @shipping_level.setter def shipping_level(self, value): valid = u'Standard OneDay TwoDay Expedited'.split() value = unicode(value) if value not in valid: raise ValueError('shipping_level must be one of {0}'.format(valid)) self._shipping_level = value _shipping_level_label = None @property def shipping_level_label(self): '''PhysicalProductAttributes.DeliveryMethod.DisplayableShippingLabel''' return self._shipping_level_label @shipping_level_label.setter def shipping_level_label(self, value): self._shipping_level_label = unicode(value) _shipping_dest = u'#default' @property def shipping_dest(self): '''PhysicalProductAttributes.DeliveryMethod.DestinationName''' return self._shipping_dest @shipping_dest.setter def shipping_dest(self, value): self._shipping_dest = unicode(value) _shipping_custom_data = '' @property def shipping_custom_data(self): return base64.b64decode(self._shipping_custom_data) @property def shipping_custom_data_base64(self): '''PhysicalProductAttributes.DeliveryMethod.ShippingCustomData''' return self._shipping_custom_data @shipping_custom_data.setter def shipping_custom_data(self, value): value = base64.b64encode(unicode(value)) if len(value) > 1024: raise ValueError('shipping_custom_data too long') self._shipping_custom_data = value _shipping_amount = decimal.Decimal('0') @property def shipping(self): '''PhysicalProductAttributes.ItemCharges.Shipping.Amount''' return self._shipping_amount @shipping.setter def shipping(self, value): value = decimal.Decimal(unicode(value)).quantize( decimal.Decimal('0.01'), rounding.ROUND_DOWN ) if value < 0: raise ValueError('shipping amount must be non-negative') self._shipping_amount = value @property def shipping_currency(self): '''PhyiscalProductAttributes.ItemCharges.Shipping.CurrencyCode''' return self.currency class Order(object): def __init__(self, items=[]): self.items = list(items) def __len__(self): return sum(item.quantity for item in self.items) def add_item(self, item): self.items.append(item) @property def price(self): total = 0 for item in self.items: total += item.price * item.quantity return total _shipping = None @property def shipping(self): if self._shipping is not None: return self._shipping total = 0 for item in self.items: total += item.shipping * item.quantity return total @shipping.setter def shipping(self, value): self._shipping = decimal.Decimal(str(value)).quantize( decimal.Decimal('0.01'), decimal.ROUND_UP, ) _tax = decimal.Decimal('0.00') @property def tax(self): return self._tax @tax.setter def tax(self, value): self._tax = decimal.Decimal(str(value)).quantize( decimal.Decimal('0.01'), decimal.ROUND_UP, ) class PurchaseContract(object): def __init__(self, id=None, settings={}): self.settings = dict(settings) if id is None: id_list, request_id = make_request( 'POST', 'CreatePurchaseContract', { 'DirectedId': '', 'AuthorizationToken': '', }, settings, ) self.id = id_list[0] else: self.id = id self.destinations = {} self.order = Order() self.completed = False self.update() def __len__(self): return len(self.order) def update(self): params = { 'PurchaseContractId': self.id, } contract_list, request_id = make_request('GET', 'GetPurchaseContract', params, self.settings) contract = contract_list[0] assert contract.Id.text == self.id self.state = contract.State.text self.merchant_id = contract.MerchantId.text self.marketplace_id = contract.MarketplaceId.text self.expires = datetime.datetime.strptime( contract.ExpirationTimeStamp.text, '%Y-%m-%dT%H:%M:%S.%fZ', ) try: for dest in contract.Destinations.Destination[:]: address = dest.PhysicalDestinationAttributes.ShippingAddress self.add_destination( dest_name = dest.DestinationName.text, name = address.Name.text, address = [], # ??? Didn't get an address from Amazon while testing this. city = address.City.text, state = address.StateOrProvinceCode.text, zip = address.PostalCode.text, country_code = address.CountryCode.text, phone = address.PhoneNumber.text, ) except AttributeError: pass # No destinations chosen yet def add_destination(self, dest_name, name, address, city, state, zip, country_code='US', phone=''): if self.completed: logging.warn('This contract has already been completed.') self.destinations[dest_name] = { 'dest-type': 'PHYSICAL', 'address': { 'name': name, 'address': address, 'city': city, 'state': state, 'zip': zip, 'country-code': country_code, 'phone-number': phone, }, } return len(self.destinations) def _add_items(self): params = { 'PurchaseContractId': self.id, } for i, item in enumerate(self.order.items): index = i+1 # Amazon wants the first index non-zero key_base = 'PurchaseItems.PurchaseItem.{0}.'.format(index) dict = item.as_dict() logger.debug('Item: {0}, .as_dict() = {1}'.format(item, dict)) for k, v in dict.iteritems(): params[key_base+k] = unicode(v) params[key_base+'MerchantId'] = self.settings['merchant-id'] make_request('POST', 'SetPurchaseItems', params, self.settings) def complete(self): self.completed = True if not len(self): logger.warn('Completing contract on empty order!') try: self._add_items() except: self.completed = False raise try: params = { 'PurchaseContractId': self.id, 'Charges.Tax.Amount': str(self.order.tax), 'Charges.Tax.CurrencyCode': 'USD', 'Charges.Shipping.Amount': str(self.order.shipping), 'Charges.Shipping.CurrencyCode': 'USD', } make_request('POST', 'SetContractCharges', params, self.settings) order_ids, request_id = make_request( 'POST', 'CompletePurchaseContract', {'PurchaseContractId': self.id,}, self.settings, ) return order_ids except: self.completed = False raise class Settings(object): secret_access_key = '' public_access_key = '' merchant_id = '' marketplace_id = '' sandbox = True def __init__(self, **kwargs): for k,v in kwargs.iteritems(): setattr(self, k, v) def __iter__(self): return self.as_dict().iteritems() def as_dict(self): keys = 'secret_access_key public_access_key merchant_id marketplace_id sandbox'.split() transform_key = lambda key: key.replace('_', '-') return dict((transform_key(key), getattr(self, key)) for key in keys)
bsd-3-clause
1,299,372,049,393,676,500
29.770601
103
0.570932
false
joberreiter/pyload
module/plugins/accounts/CzshareCom.py
1
1585
# -*- coding: utf-8 -*- import re import time from module.plugins.internal.Account import Account class CzshareCom(Account): __name__ = "CzshareCom" __type__ = "account" __version__ = "0.24" __status__ = "testing" __description__ = """Czshare.com account plugin, now Sdilej.cz""" __license__ = "GPLv3" __authors__ = [("zoidberg", "[email protected]"), ("stickell", "[email protected]")] CREDIT_LEFT_PATTERN = r'<tr class="active">\s*<td>([\d ,]+) (KiB|MiB|GiB)</td>\s*<td>([^<]*)</td>\s*</tr>' def grab_info(self, user, password, data): premium = False validuntil = None trafficleft = None html = self.load("http://sdilej.cz/prehled_kreditu/") try: m = re.search(self.CREDIT_LEFT_PATTERN, html) trafficleft = self.parse_traffic(m.group(1), m.group(2)) validuntil = time.mktime(time.strptime(m.group(3), '%d.%m.%y %H:%M')) except Exception, e: self.log_error(e, trace=True) else: premium = True return {'premium' : premium, 'validuntil' : validuntil, 'trafficleft': trafficleft} def signin(self, user, password, data): html = self.load('https://sdilej.cz/index.php', post={'Prihlasit' : "Prihlasit", "login-password": password, "login-name" : user}) if '<div class="login' in html: self.fail_login()
gpl-3.0
-3,539,204,736,722,824,000
28.351852
110
0.51041
false
rhn/fuseblocks
fuseblocks/cache.py
1
2224
"""Deprecated. Keeping as a reference of the locking implementation.""" import threading import os from .base import OpenFile from .passthrough import Passthrough class CacheFile(OpenFile): def __init__(self, store, mode): self.store = store self.mode = mode def read(self, size, offset): return self.store.data[offset:offset+size] class DataStore: def __init__(self): self.complete_lock = threading.Lock() self.data = None def wait_more(self, length, size): lock = threading.Lock() self.queue.append(length, size, lock) self.lock.acquire() self.lock.acquire() class DataCacheBlock(Passthrough): """Caches file data.""" def __init__(self, backend): self.backend = backend self.data_mapping = {} self.mapping_lock = threading.Lock() # guards data_mapping def get_cache(self, path): self.mapping_lock.acquire() try: if path not in self.data_mapping: open_file = Passthrough.open(self, path, os.O_RDONLY) store = DataStore() self.data_mapping[path] = store with store.complete_lock: self.mapping_lock.release() data = b'' offset = 0 readsize = 2 ** 16 while True: new_data = open_file.read(readsize, offset) data += new_data offset = len(data) if len(new_data) < readsize: break store.data = data open_file.release() else: store = self.data_mapping[path] self.mapping_lock.release() with store.complete_lock: pass return store except: try: # no nice way to only catch errors up until some point in alternative ifs self.mapping_lock.release() except RuntimeError: pass raise def open(self, path, mode): return CacheFile(self.get_cache(path), mode)
gpl-3.0
-8,694,671,872,855,858,000
29.888889
90
0.517986
false
cabalamat/parrot
tokpos.py
1
2469
# tokpos.py # # (C) 2000 Philip Hunt # Released under the GNU General Public Licence # # Contains Philip Hunt's extensions to version 0.5 of # John Aycock's SPARK parsing framework. The purpose of the # extensions is to cause tokens (instances of PosToken) to # remember the line and column that they started at in the # input source file. # Last altered: 12-Feb-2000 # History: # 12-Feb-2000 PhilHunt: created from generic import * debug = 0 #=========================================================== ###### PosScanner ###### #=========================================================== class PosScanner(GenericScanner): def tokenize(self, s): colNum = 1 rowNum = 1 while s: m = self.re.match(s) assert m groups = m.groups() #>>>> phil: newColNum = self.colNum = colNum newRowNum = self.rowNum = rowNum for char in s[:m.end()]: if char == '\012': newRowNum = newRowNum + 1 newColNum = 1 else: newColNum = newColNum + 1 #<<<< if debug: print "$ r:c %s:%s (%s)'%s' %s:%s " % ( rowNum, colNum, m.end(), s[:m.end()], newRowNum, newColNum ) for i in range(len(groups)): #print "$$ i=%s groups[%s]={%s} $$" % (i, i, groups[i]) if groups[i] and self.index2func.has_key(i): self.index2func[i](groups[i]) #//for i s = s[m.end():] rowNum = newRowNum colNum = newColNum #//while s def addToken(self, tokenType, tokenValue): self.rv.append(PosToken(tokenType, tokenValue, self.rowNum, self.colNum)) #=========================================================== ###### PosToken ###### #=========================================================== class PosToken: def __init__(self, type, attr=None, line=0, col=0): self.type = type self.attr = attr self.line = line self.col = col def __cmp__(self, o): return cmp(self.type, o) def __repr__(self): x = self.type if self.attr != None: x = '(' + x +' ' + repr(self.attr) + ')' else: x = self.type if self.line > 0: x = x + (':%s:%s' %(self.line, self.col)) return x # end tokpos.py
gpl-2.0
-4,943,181,126,879,202,000
26.741573
67
0.44674
false
peeringdb/peeringdb-py
peeringdb/whois.py
1
7186
import collections import sys from peeringdb.util import pretty_speed class WhoisFormat: def __init__(self, fobj=sys.stdout): self.fobj = fobj self.display_names = { "fac_set": "Facilities", } def mk_fmt(self, *widths): return "%-" + "s %-".join(map(str, widths)) + "s" def mk_set_headers(self, data, columns): """ figure out sizes and create header fmt """ columns = tuple(columns) lens = [] for key in columns: value_len = max(len(str(each.get(key, ""))) for each in data) # account for header lengths lens.append(max(value_len, len(self._get_name(key)))) fmt = self.mk_fmt(*lens) return fmt def _get_name(self, key): """ get display name for a key, or mangle for display """ if key in self.display_names: return self.display_names[key] return key.capitalize() def _get_val(self, data, key): """ get value from a dict, format if necessary """ return data.get(key, "") def _get_columns(self, data): """ get columns from a dict """ return data.keys() def display_section(self, name): self._print(name) self._print("=" * len(name)) self._print("") def display_headers(self, fmt, headers): self._print(fmt % headers) self._print(fmt % tuple("-" * len(x) for x in headers)) def display_set(self, typ, data, columns): """ display a list of dicts """ self.display_section("%s (%d)" % (self._get_name(typ), len(data))) headers = tuple(map(self._get_name, columns)) fmt = self.mk_set_headers(data, columns) self.display_headers(fmt, headers) for each in data: row = tuple(self._get_val(each, k) for k, v in each.items()) self._print(fmt % row) self._print("\n") def display_field(self, fmt, obj, field, display=None): if not display: display = self._get_name(field) self._print(fmt % (display, obj[field])) def check_set(self, data, name): if data.get(name, None): if hasattr(self, "print_" + name): getattr(self, "print_" + name)(data[name]) def print_net(self, data): self.display_section("Network Information") fmt = "%-21s: %s" self.display_field(fmt, data, "name", "Name") self.display_field(fmt, data, "asn", "Primary ASN") self.display_field(fmt, data, "aka", "Also Known As") self.display_field(fmt, data, "website", "Website") self.display_field(fmt, data, "irr_as_set", "IRR AS-SET") self.display_field(fmt, data, "info_type", "Network Type") self.display_field(fmt, data, "info_prefixes6", "Approx IPv6 Prefixes") self.display_field(fmt, data, "info_prefixes4", "Approx IPv4 Prefixes") self.display_field(fmt, data, "looking_glass", "Looking Glass") self.display_field(fmt, data, "route_server", "Route Server") self.display_field(fmt, data, "created", "Created at") self.display_field(fmt, data, "updated", "Updated at") self._print("\n") self.display_section("Peering Policy Information") self.display_field(fmt, data, "policy_url", "URL") self.display_field(fmt, data, "policy_general", "General Policy") self.display_field(fmt, data, "policy_locations", "Location Requirement") self.display_field(fmt, data, "policy_ratio", "Ratio Requirement") self.display_field(fmt, data, "policy_contracts", "Contract Requirement") self._print("\n") self.check_set(data, "poc_set") self.check_set(data, "netixlan_set") self.check_set(data, "netfac_set") def print_poc_set(self, data): self.display_section("Contact Information") fmt = self.mk_fmt(6, 20, 15, 20, 14) hdr = ("Role", "Name", "Email", "URL", "Phone") self.display_headers(fmt, hdr) for poc in data: self._print( fmt % ( poc.get("role", ""), poc.get("name", ""), poc.get("email", ""), poc.get("url", ""), poc.get("phone", ""), ) ) self._print("\n") def print_netfac_set(self, data): self.display_section("Private Peering Facilities (%d)" % len(data)) fmt = self.mk_fmt(51, 8, 15, 2) hdr = ("Facility Name", "ASN", "City", "CO") self.display_headers(fmt, hdr) for each in data: self._print( fmt % ( each.get("name", each.get("id")), each.get("local_asn", ""), each.get("city", ""), each.get("country", ""), ) ) self._print("\n") def print_netixlan_set(self, data): self.display_section("Public Peering Points (%d)" % len(data)) fmt = self.mk_fmt(36, 8, 27, 5) hdr = ("Exchange Point", "ASN", "IP Address", "Speed") self.display_headers(fmt, hdr) for ix in data: if ix.get("ipaddr4", None): self._print( fmt % ( ix.get("name", ix.get("ixlan_id")), ix["asn"], ix["ipaddr4"], pretty_speed(ix["speed"]), ) ) if ix.get("ipaddr6", None): if ix.get("ipaddr4", None): self._print(fmt % ("", "", ix["ipaddr6"], "")) else: self._print( fmt % ( ix["name"], ix["asn"], ix["ipaddr6"], pretty_speed(ix["speed"]), ) ) self._print("\n") def _print(self, *args): """ internal print to self.fobj """ string = " ".join(args) + "\n" self.fobj.write(string) def print(self, typ, data): """ *deprecated* - use display() """ return self.display(typ, data) def display(self, typ, data): """ display section of typ with data """ if hasattr(self, "print_" + typ): getattr(self, "print_" + typ)(data) elif not data: self._print("{}: {}".format(typ, data)) elif isinstance(data, collections.Mapping): self._print("\n", typ) for k, v in data.items(): self.print(k, v) elif isinstance(data, (list, tuple)): # tabular data layout for lists of dicts if isinstance(data[0], collections.Mapping): self.display_set(typ, data, self._get_columns(data[0])) else: for each in data: self.print(typ, each) else: self._print("{}: {}".format(typ, data)) self.fobj.flush()
apache-2.0
-1,643,596,645,495,830,000
33.883495
81
0.49499
false
intel-hpdd/intel-manager-for-lustre
tests/unit/chroma_core/lib/storage_plugin/test_plugin.py
1
11991
import types import sys import mock from chroma_core.services.plugin_runner.resource_manager import PluginSession from tests.unit.lib.iml_unit_test_case import IMLUnitTestCase from chroma_core.lib.storage_plugin.api import attributes from chroma_core.lib.storage_plugin.api import identifiers from chroma_core.lib.storage_plugin.api import resources from chroma_core.lib.storage_plugin.api.plugin import Plugin class TestLocalResource(resources.ScannableResource): class Meta: identifier = identifiers.ScopedId("name") name = attributes.String() class TestGlobalResource(resources.ScannableResource): class Meta: identifier = identifiers.GlobalId("name") name = attributes.String() class TestResourceExtraInfo(resources.ScannableResource): class Meta: identifier = identifiers.GlobalId("name") name = attributes.String() extra_info = attributes.String() class TestResourceStatistic(resources.ScannableResource): class Meta: identifier = identifiers.GlobalId("name") name = attributes.String() extra_info = attributes.String() class TestPlugin(Plugin): _resource_classes = [TestGlobalResource, TestLocalResource, TestResourceExtraInfo, TestResourceStatistic] def __init__(self, *args, **kwargs): self.initial_scan_called = False self.update_scan_called = False self.teardown_called = False Plugin.__init__(self, *args, **kwargs) def initial_scan(self, root_resource): self.initial_scan_called = True def update_scan(self, root_resource): self.update_scan_called = True def teardown(self): self.teardown_called = True class TestCallbacks(IMLUnitTestCase): def setUp(self): super(TestCallbacks, self).setUp() import chroma_core.lib.storage_plugin.manager self.orig_manager = chroma_core.lib.storage_plugin.manager.storage_plugin_manager chroma_core.lib.storage_plugin.manager.storage_plugin_manager = ( chroma_core.lib.storage_plugin.manager.StoragePluginManager() ) from chroma_core.lib.storage_plugin.manager import storage_plugin_manager storage_plugin_manager._load_plugin(sys.modules[__name__], "test_mod", TestPlugin) from chroma_core.models import StorageResourceRecord resource_class, resource_class_id = storage_plugin_manager.get_plugin_resource_class( "test_mod", "TestGlobalResource" ) record, created = StorageResourceRecord.get_or_create_root(resource_class, resource_class_id, {"name": "test1"}) from chroma_core.lib.storage_plugin.query import ResourceQuery scannable_record = StorageResourceRecord.objects.get() self.scannable_resource = ResourceQuery().get_resource(scannable_record) self.scannable_global_id = scannable_record.pk self.resource_manager = mock.Mock(_sessions={}) self.plugin = TestPlugin(self.resource_manager, self.scannable_global_id) self.resource_manager._sessions[self.scannable_global_id] = PluginSession( self.plugin, self.scannable_global_id, 0 ) def tearDown(self): import chroma_core.lib.storage_plugin.manager chroma_core.lib.storage_plugin.manager.storage_plugin_manager = self.orig_manager def test_initial(self): self.plugin.initial_scan = mock.Mock() self.plugin.do_initial_scan() self.plugin.initial_scan.assert_called_once() def test_update(self): self.plugin.do_initial_scan() self.plugin.update_scan = mock.Mock() self.plugin.do_periodic_update() self.plugin.update_scan.assert_called_once() def test_teardown(self): self.plugin.do_initial_scan() self.plugin.teardown = mock.Mock() self.plugin.do_teardown() self.plugin.teardown.assert_called_once() class TestAddRemove(IMLUnitTestCase): def setUp(self): super(TestAddRemove, self).setUp() import chroma_core.lib.storage_plugin.manager self.orig_manager = chroma_core.lib.storage_plugin.manager.storage_plugin_manager chroma_core.lib.storage_plugin.manager.storage_plugin_manager = ( chroma_core.lib.storage_plugin.manager.StoragePluginManager() ) from chroma_core.lib.storage_plugin.manager import storage_plugin_manager storage_plugin_manager._load_plugin(sys.modules[__name__], "test_mod", TestPlugin) from chroma_core.models import StorageResourceRecord resource_class, resource_class_id = storage_plugin_manager.get_plugin_resource_class( "test_mod", "TestGlobalResource" ) record, created = StorageResourceRecord.get_or_create_root(resource_class, resource_class_id, {"name": "test1"}) from chroma_core.lib.storage_plugin.query import ResourceQuery scannable_record = StorageResourceRecord.objects.get() self.scannable_resource = ResourceQuery().get_resource(scannable_record) self.scannable_global_id = scannable_record.pk def tearDown(self): import chroma_core.lib.storage_plugin.manager chroma_core.lib.storage_plugin.manager.storage_plugin_manager = self.orig_manager def _report_resource(self, resource_to_report): def _report_a_resource(self, root_resource): if resource_to_report is not None: self.resource1, _ = self.update_or_create(resource_to_report, name="resource") return _report_a_resource def _remove_resource(self, resource_to_remove): def _remove_a_resource(self, root_resource): if resource_to_remove is not None: self.remove(resource_to_remove) return _remove_a_resource def _create_mocked_resource_and_plugin(self): self.resource_manager = mock.Mock(_sessions={}) self.plugin = TestPlugin(self.resource_manager, self.scannable_global_id) self.resource_manager._sessions[self.scannable_global_id] = PluginSession( self.plugin, self.scannable_global_id, 0 ) def test_initial_resources(self): # First session for the scannable, 1 resource present self._create_mocked_resource_and_plugin() self.plugin.initial_scan = types.MethodType(self._report_resource(TestGlobalResource), self.plugin) # Should pass the scannable resource and the one we created to session_open self.plugin.do_initial_scan() self.resource_manager.session_open.assert_called_once_with( self.plugin, self.plugin._scannable_id, [self.plugin._root_resource, self.plugin.resource1], self.plugin._update_period, ) # Session reporting 0 resource in initial_scan self._create_mocked_resource_and_plugin() self.plugin.initial_scan = types.MethodType(self._report_resource(None), self.plugin) self.plugin.do_initial_scan() # Should just report back the scannable resource to session_open self.resource_manager.session_open.assert_called_once_with( self.plugin, self.plugin._scannable_id, [self.plugin._root_resource], self.plugin._update_period ) def test_update_add(self): self._create_mocked_resource_and_plugin() self.plugin.do_initial_scan() # Patch in an update_scan which reports one resource self.plugin.update_scan = types.MethodType(self._report_resource(TestGlobalResource), self.plugin) # Check that doing an update_or_create calls session_add_resources self.plugin.do_periodic_update() self.resource_manager.session_add_resources.assert_called_once_with( self.plugin._scannable_id, [self.plugin.resource1] ) self.resource_manager.session_add_resources.reset_mock() # Check that doing a second update_or_create silently does nothing self.plugin.do_periodic_update() self.assertFalse(self.resource_manager.session_add_resources.called) def test_update_remove_global(self): self._create_mocked_resource_and_plugin() self.plugin.do_initial_scan() self.plugin.update_scan = types.MethodType(self._report_resource(TestGlobalResource), self.plugin) self.plugin.do_periodic_update() self.resource_manager.session_add_resources.assert_called_once_with( self.plugin._scannable_id, [self.plugin.resource1] ) self.plugin.update_scan = types.MethodType(self._remove_resource(self.plugin.resource1), self.plugin) self.plugin.do_periodic_update() self.resource_manager.session_remove_global_resources.assert_called_once_with( self.plugin._scannable_id, [self.plugin.resource1] ) def test_update_remove_local(self): self._create_mocked_resource_and_plugin() self.plugin.do_initial_scan() self.plugin.update_scan = types.MethodType(self._report_resource(TestLocalResource), self.plugin) self.plugin.do_periodic_update() self.resource_manager.session_add_resources.assert_called_once_with( self.plugin._scannable_id, [self.plugin.resource1] ) self.plugin.update_scan = types.MethodType(self._remove_resource(self.plugin.resource1), self.plugin) self.plugin.do_periodic_update() self.resource_manager.session_remove_local_resources.assert_called_once_with( self.plugin._scannable_id, [self.plugin.resource1] ) def test_update_modify_parents(self): self._create_mocked_resource_and_plugin() self.plugin.do_initial_scan() # Insert two resources, both having no parents def report_unrelated(self, root_resource): self.resource1, created = self.update_or_create(TestLocalResource, name="test1") self.resource2, created = self.update_or_create(TestLocalResource, name="test2") self.resource3, created = self.update_or_create(TestLocalResource, name="test3") self.plugin.update_scan = types.MethodType(report_unrelated, self.plugin) self.plugin.do_periodic_update() # Create a parent relationship between them def add_parents(self, root_resource): self.resource1.add_parent(self.resource2) self.plugin.update_scan = types.MethodType(add_parents, self.plugin) self.plugin.do_periodic_update() self.resource_manager.session_resource_add_parent.assert_called_once_with( self.plugin._scannable_id, self.plugin.resource1._handle, self.plugin.resource2._handle ) # Remove the relationship def remove_parents(self, root_resource): self.resource1.remove_parent(self.resource2) self.plugin.update_scan = types.MethodType(remove_parents, self.plugin) self.plugin.do_periodic_update() self.resource_manager.session_resource_remove_parent.assert_called_once_with( self.plugin._scannable_id, self.plugin.resource1._handle, self.plugin.resource2._handle ) def test_update_modify_attributes(self): self._create_mocked_resource_and_plugin() self.plugin.do_initial_scan() # Insert two resources, both having no parents def report1(self, root_resource): self.resource, created = self.update_or_create(TestResourceExtraInfo, name="test1", extra_info="foo") self.plugin.update_scan = types.MethodType(report1, self.plugin) self.plugin.do_periodic_update() # Modify the extra_info attribute def modify(self, root_resource): self.resource.extra_info = "bar" self.plugin.update_scan = types.MethodType(modify, self.plugin) self.plugin.do_periodic_update() self.resource_manager.session_update_resource.assert_called_once_with( self.plugin._scannable_id, self.plugin.resource._handle, {"extra_info": "bar"} )
mit
-1,912,038,607,933,590,800
37.432692
120
0.683513
false
yaukwankiu/armor
pattern2.py
1
14030
# -*- coding: utf-8 -*- # continued from pattern.py # defining the basic object we will be working with # Note: makeVideo - it doesn't work yet - i haven't yet solved the issues about opencv # so i am outputing the slides only for the moment # 2013-09-23 ############################################################################################## # #==== imports ================================================================================ # some of the stuff were moved to defaultParameters.py import copy import time import os import re import numpy import numpy as np import numpy.ma as ma #import matplotlib import matplotlib.pyplot as plt #import scipy.misc.pilutil as smp #import numpy.fft as fft #import shutil #import sys import pickle from copy import deepcopy try: from scipy import signal from scipy import interpolate except ImportError: #print "Scipy not installed" pass #==== setting up the global parameters======================================================== import defaultParameters as dp from defaultParameters import * #bad habits but all these variables are prefixed with "default" # or at least i try to make them to import colourbarQPESUMS # the colourbars for the Central Weather Bureau import colourbarQPESUMSwhiteBackground # the same as above, with white backgrounds #==== importing pattern.py==================================================================== from . import pattern try: from dataStreamTools import makeVideo as mv except ImportError: print "import error! opencv not installed(?!)" from dataStreamTools import kongrey as kr dbz = pattern.DBZ ds = pattern.DBZstream #==== defining the classes =================================================================== #class DataStreamSets: class DataStreamSet: # correcting a long-standing typo 2014-03-09 """ class dataStreamSet: DSS = dataStreamSet(ds0, ds1, ds2,...dsN) where ds0 = observations, ds1, ds2,.. = models with the bare basic methods of analysis and output to panel of 20+ images """ ############################################################################ # initialisation and basic function calls def __init__(self, ds0, *args): self.name = ds0.name + '_' + '_'.join([v.name for v in args]) self.obs = ds0 self.wrfs = list(args) ############################################################################ # simple building block functions def getAllDataTimes(self): """ get the union of the sets of dataTimes for all streams """ dataTimes = set([v.dataTime for v in self.obs]) for wrf in self.wrfs: dataTimes = dataTimes.union([v.dataTime for v in wrf]) dataTimes = sorted(list(dataTimes)) return dataTimes def getCommonDataTimes(self): """ get the intersection of the sets of dataTimes for all streams """ dataTimes = set([v.dataTime for v in self.obs]) for wrf in self.wrfs: dataTimes = dataTimes.intersection([v.dataTime for v in wrf]) dataTimes = sorted(list(dataTimes)) return dataTimes def backupMatrices(self): self.obs.backupMatrices() for wrf in self.wrfs: wrf.backupMatrices() def restoreMatrices(self): self.obs.restoreMatrices() for wrf in self.wrfs: wrf.restoreMatrices() ############################################################################ # I/O's def load(self, stream_key="all", verbose=False, **kwargs): if stream_key == "all" or stream_key =="obs": print "loading obs" obs.load(**kwargs) if stream_key == "all" or stream_key =="wrf" or stream_key=="wrfs": print "loading wrfs" for wrf in wrfs: wrf.load(**kwargs) def unload(self, stream_key="all", verbose=False, **kwargs): if stream_key == "all" or stream_key =="obs": print "unloading obs" obs.unload(**kwargs) if stream_key == "all" or stream_key =="wrf" or stream_key=="wrfs": print "unloading wrfs" for wrf in wrfs: wrf.unload(**kwargs) def makeVideo2(self, ordering, outputFolder=''): """ make video, with an ordering at each dataTime ordering = [[1,2,3,5], [3,4,6,1], ...] - first for the first dataTime, second for the second dataTime, etc """ return mv.makeVideo( [self.obs] + self.wrfs, # [ds0, ds1, ds2, ds3, ds4, ...], a list of armor.pattern.DBZstream objects panel_cols = 5, # number of colums in the panel panel_rows = 5, # no need to be filled fourcc = cv.CV_FOURCC('F', 'L', 'V', '1'), fps = defaultFps, extension= '.avi', #fourcc = cv.CV_FOURCC('P', 'I', 'M', '1'), outputFileName ="", outputFolder=outputFolder, saveFrames = True, # saving the frames as images useCV2 = True, ordering = ordering, # ordering of the models ) def makeVideo1(self, ordering, outputFolder=''): """ make video, with a single ordering for each dataStream in its entirety ordering = list, e.g. [2,3,4,5,1] <-- WRF2 goes first, then WRF3, WRF4, etc """ ordering = [ordering] * len(self.getAllDataTimes()) return self.makeVideo2(ordering, outputPath) ############################################################################ # analyses def analyse(self, algorithm): """ input: algorithm output: ordering at each dataTime ordering = [[1,2,3,5], [3,4,6,1], ...] means WRF1, WRF2,WRF3, WRF5 for dataTime1; WRFs3,4,6,1, for the second dataTime, etc """ pass def matching(self, algorithm, obsTime="", maxHourDiff=7, **kwargs): """ input: algorithm - the function defining the algorithm of matching algorithm(parameters): (obs, wrf) -> score (real number) format of algorithm function: def alg1(a=pattern.a, ...., **kwargs): obsTime - time at which obs is compared with the wrfs, e.g. "20140612.0200' maxHourDiff - the maximal time difference (in hours) between obs and wrfs, e.g. 7 (hours) kwargs - parameters for the algorithm output: ranking with scores and optimal timeshifts 2014-03-07 """ if obsTime == "": # if the point for matching is not given, pick the first one obsTime = self.obs[0].dataTime ranking = [] obs = self.obs wrfs = self.wrfs for wrf in wrfs: x = algorithm(obs, wrf, obsTime=obsTime, maxHourDiff=maxHourDiff, **kwargs) score = x['score'] timeShift = x['timeShift'] ranking.append( {'wrf': wrf.name, 'timeShift': timeShift, #timeShift: in hours 'score': score, 'dataFolder': wrf.dataFolder, 'obsTime': obsTime, 'maxHourDiff': maxHourDiff # tag them along just in case } ) #dataFolder = for potential disambiguation ranking.sort(key=lambda v:v['score'], reverse=True) return ranking def filtering(self, algorithm, stream_key="all", name_key="", verbose=False, **kwargs): """ input: algorithm - the function defining the algorithm of filtering algorithm(parameters): changes a.matrix, a.name, no output given format of algorithm function: def alg1(a=pattern.a, **kwargs): stream_key - keyword for choosing the DBZstreams to be filtered if it's "obs" we filter just all of the self.obs if it's "wrf" or "wrfs" we filter just all of the self.wrfs name_key - keyword for choosing the DBZ patterns to be filtered kwargs - parameters for the algorithm output: ranking with scores and optimal timeshifts 2014-03-07 """ obs = self.obs wrfs = self.wrfs # first filter the obs if stream_key == "all" or stream_key == "obs" or stream_key == "OBS": for a in obs: if name_key in a.name: algorithm(a, **kwargs) # key line if verbose: print a.name if stream_key == "all" or stream_key == "wrf" or stream_key == "wrfs" \ or stream_key == "WRF" or stream_key == "WRFS" : for wrf in wrfs: for a in wrf: if name_key in a.name: algorithm(a, **kwargs) # key line if verbose: print a.name ############################################ # constants DataStreamSets = DataStreamSet #alias; # correcting a long-standing typo 2014-03-09 DSS = DataStreamSet # alias """ key example: kongrey """ from dataStreamTools import kongrey as kr #compref = pattern.DBZstream(dataFolder= kr.obs_folder, # #name="COMPREF.DBZ", # name="", # lowerLeftCornerLatitudeLongitude = kr.obs_lowerLeft , # upperRightCornerLatitudeLongitude = kr.obs_upperRight , # outputFolder= kr.summary_folder, # imageFolder=kr.summary_folder, # key1="", # keywords to pick out specific files # key2="", # used only once in the __init__ # key3="", # preload=False, # imageExtension = '.png', # dataExtension = '.txt', # ) """ print 'loading observations' obs = kr.constructOBSstream(dumping=False) print 'loading models', wrfsFolder = kr.defaultWRFdumpsFolder # '/home/k/ARMOR/data/KONG-REY/summary/WRF[regridded]' wrfs = [] for i in range(1,21): print i, wrf = pickle.load(open(wrfsFolder+'dbzstream' + ('0'+str(i))[-2:] + '.pydump')) #wrf.setDataFolder(asdfasdf) # haven't defined this function in pattern.DBZstream yet wrfs.append(wrf) kongreyDSS = DSS(obs, *wrfs) """ print 'constructing kongreyDSS' obs = ds(name="COMPREF.DBZ", dataFolder=defaultRootFolder + 'data/KONG-REY/OBS/') wrfs = [] for i in range(1,21): print i, wrfName = name='WRF'+ ('0'+str(i))[-2:] wrf = ds(name=wrfName, key1=wrfName, dataFolder=defaultRootFolder + 'data/KONG-REY/summary/WRF[regridded]/') wrfs.append(wrf) kongreyDSS = DSS(obs, *wrfs) def constructDSS(obsFolder, wrfsFolder): obsName = obsFolder.split("/")[-1] wrfsName = wrfsFolder.split("/")[-1] print 'Constructing DSS from:', obsName, ",", wrfsName print obsFolder print wrfsFolder obs = ds(name=obsName, dataFolder=obsFolder) wrfs = [] for i in range(1,21): print i, wrfName = name='WRF'+ ('0'+str(i))[-2:] wrf = ds(name=wrfName, key1=wrfName, dataFolder=wrfsFolder) wrfs.append(wrf) dss = DSS(obs, *wrfs) return dss print "constructing march11 - march13 DSS objects" march11 = constructDSS(dp.defaultRootFolder+"data/march2014/QPESUMS/", dp.defaultRootFolder+"data/march2014/WRFEPS[regridded]/20140311/") march11.name = "Rainband_11_March_2014" march11.obs.list= [v for v in march11.obs.list if '20140311' in v.dataTime] march12 = constructDSS(dp.defaultRootFolder+"data/march2014/QPESUMS/", dp.defaultRootFolder+"data/march2014/WRFEPS[regridded]/20140312/") march12.name = "Rainband_12_March_2014" march12.obs.list= [v for v in march12.obs.list if '20140312' in v.dataTime] march13 = constructDSS(dp.defaultRootFolder+"data/march2014/QPESUMS/", dp.defaultRootFolder+"data/march2014/WRFEPS[regridded]/20140313/") march13.name = "Rainband_13_March_2014" march13.obs.list= [v for v in march13.obs.list if '20140313' in v.dataTime] print "constructing may2014 DSS objects" may19 = constructDSS(dp.defaultRootFolder+"data/may14/QPESUMS/", dp.defaultRootFolder+"data/may14/WRFEPS19[regridded]/") may19.name = "Rainband_19_May_2014" may19.obs.list= [v for v in may19.obs.list if '20140519' in v.dataTime] may20 = constructDSS(dp.defaultRootFolder+"data/may14/QPESUMS/", dp.defaultRootFolder+"data/may14/WRFEPS20[regridded]/") may20.name = "Rainband_20_May_2014" may20.obs.list= [v for v in may20.obs.list if '20140520' in v.dataTime] may21 = constructDSS(dp.defaultRootFolder+"data/may14/QPESUMS/", dp.defaultRootFolder+"data/may14/WRFEPS21[regridded]/") may21.name = "Rainband_21_May_2014" may21.obs.list= [v for v in may21.obs.list if '20140521' in v.dataTime] may22 = constructDSS(dp.defaultRootFolder+"data/may14/QPESUMS/", dp.defaultRootFolder+"data/may14/WRFEPS22[regridded]/") may22.name = "Rainband_22_May_2014" may22.obs.list= [v for v in may22.obs.list if '20140522' in v.dataTime] may23 = constructDSS(dp.defaultRootFolder+"data/may14/QPESUMS/", dp.defaultRootFolder+"data/may14/WRFEPS23[regridded]/") may23.name = "Rainband_23_May_2014" may23.obs.list= [v for v in may23.obs.list if '20140523' in v.dataTime]
cc0-1.0
-5,918,039,145,204,858,000
39.085714
135
0.549109
false
genome/flow-core
flow/pmon/process_monitor.py
1
2419
from flow.pmon.process_info import ParentProcessInfo from twisted.internet import reactor from twisted.web.resource import Resource, NoResource from twisted.web.server import Site from twisted.web.static import File import json import logging import os import psutil import socket LOG = logging.getLogger(__name__) class RootResource(Resource): def __init__(self, process_info): Resource.__init__(self) html_root = os.path.join(os.path.dirname(__file__), "web") self.putChild("basic", BasicResource(process_info)) self.putChild("status", StatusResource(process_info)) self.putChild("view", File(html_root)) class JSONResource(Resource): def __init__(self, process_info): Resource.__init__(self) self.process_info = process_info def get_data(self): raise NotImplementedError def render_GET(self, request): request.setHeader('Access-Control-Allow-Origin', '*') request.setHeader('Access-Control-Allow-Methods', 'GET') request.setHeader('Content-type', 'application/json') data = self.get_data() return json.dumps(data) class BasicResource(JSONResource): def get_data(self): return self.process_info.get_basic_info() def getChild(self, name, request): try: pid = int(name) except ValueError: return NoResource() if pid == self.process_info.pid: return BasicLeafResource(self.process_info) elif pid in self.process_info.children.keys(): return BasicLeafResource(self.process_info.children[pid]) else: return NoResource() class BasicLeafResource(BasicResource): isLeaf = True class StatusResource(JSONResource): isLeaf = True def get_data(self): return self.process_info.get_process_status() class ProcessMonitor(object): def __init__(self, pid): self.port = None self.pid = pid def start(self, port=0): process = psutil.Process(os.getpid()) process_info = ParentProcessInfo(process=process) factory = Site(RootResource(process_info)) iport = reactor.listenTCP(port, factory) listen_port = iport.getHost().port self.port = listen_port listen_host = socket.gethostname() LOG.info("Process Monitor at http://%s:%s/view", listen_host, listen_port)
agpl-3.0
-6,455,856,908,681,783,000
28.144578
69
0.650682
false