\n\"\"\"\n\nTEMPLATE_MUSTACHE = \"\"\"\n{{{body_html}}}\n\"\"\"\n\n\nclass AllTheThingsTestCase(unittest.TestCase):\n\n def setUp(self):\n self.config = json.loads(CONFIG_JSON)\n self.soups = {\n 'index': bs4.BeautifulSoup(INDEX_HTML),\n 'subdir/page1': bs4.BeautifulSoup(P1_HTML),\n 'subdir/page2': bs4.BeautifulSoup(P2_HTML),\n }\n self.precomputed = sitegen.precompute(self.config, self.soups)\n\n def test_fixup_internal_links(self):\n sitegen.fixup_internal_links(self.config, self.soups)\n html = sitegen.render_html('index',\n self.config,\n self.soups,\n self.precomputed,\n TEMPLATE_MUSTACHE)\n self.assertIn('subdir/page1.html', html,\n 'p1.html link did not get fixed up to page1.html')\n\n def test_pantsrefs(self):\n sitegen.link_pantsrefs(self.soups, self.precomputed)\n p1_html = sitegen.render_html('subdir/page1',\n self.config,\n self.soups,\n self.precomputed,\n TEMPLATE_MUSTACHE)\n self.assertIn('href=\"../index.html#pantsmark_index\"', p1_html,\n 'pantsref_index did not get linked')\n p2_html = sitegen.render_html('subdir/page2',\n self.config,\n self.soups,\n self.precomputed,\n TEMPLATE_MUSTACHE)\n self.assertIn('href=\"page1.html#an_pantsmark\"', p2_html,\n 'pantsref_p1 did not get linked')\n\n def test_find_title(self):\n p2_html = sitegen.render_html('subdir/page2',\n self.config,\n self.soups,\n self.precomputed,\n '{{title}}')\n self.assertEqual(p2_html, 'Page 2: Electric Boogaloo',\n \"\"\"Didn't find correct title\"\"\")\n # ascii worked? great, try non-ASCII\n p1_html = sitegen.render_html('subdir/page1',\n self.config,\n self.soups,\n self.precomputed,\n '{{title}}')\n self.assertEqual(p1_html, u'東京 is Tokyo',\n \"\"\"Didn't find correct non-ASCII title\"\"\")\n\n def test_page_toc(self):\n # One of our \"pages\" has a couple of basic headings.\n # Do we get the correct info from that to generate\n # a page-level table of contents?\n sitegen.generate_page_tocs(self.soups, self.precomputed)\n rendered = sitegen.render_html('subdir/page2',\n self.config,\n self.soups,\n self.precomputed,\n \"\"\"\n {{#page_toc}}\n DEPTH={{depth}} LINK={{link}} TEXT={{text}}\n {{/page_toc}}\n \"\"\")\n self.assertIn('DEPTH=1 LINK=one TEXT=Section One', rendered)\n self.assertIn('DEPTH=1 LINK=two TEXT=Section Two', rendered)\n\n def test_transforms_not_discard_page_tocs(self):\n # We had a bug where one step of transform lost the info\n # we need to build page-tocs. Make sure that doesn't happen again.\n sitegen.transform_soups(self.config, self.soups, self.precomputed)\n rendered = sitegen.render_html('subdir/page2',\n self.config,\n self.soups,\n self.precomputed,\n \"\"\"\n {{#page_toc}}\n DEPTH={{depth}} LINK={{link}} TEXT={{text}}\n {{/page_toc}}\n \"\"\")\n self.assertIn('DEPTH=1 LINK=one TEXT=Section One', rendered)\n self.assertIn('DEPTH=1 LINK=two TEXT=Section Two', rendered)\n\n def test_here_links(self):\n sitegen.add_here_links(self.soups)\n html = sitegen.render_html('index',\n self.config,\n self.soups,\n self.precomputed,\n TEMPLATE_MUSTACHE)\n self.assertIn('href=\"#pants-build-system\"', html,\n 'Generated html lacks auto-created link to h1.')\n\n def test_breadcrumbs(self):\n # Our \"site\" has a simple outline.\n # Do we get the correct info from that to generate\n # \"breadcrumbs\" navigating from one page up to the top?\n rendered = sitegen.render_html('subdir/page2',\n self.config,\n self.soups,\n self.precomputed,\n \"\"\"\n {{#breadcrumbs}}\n LINK={{link}} TEXT={{text}}\n {{/breadcrumbs}}\n \"\"\")\n self.assertIn('LINK=../index.html TEXT=Pants Build System', rendered)\n\n def test_site_toc(self):\n # Our \"site\" has a simple outline.\n # Do we get the correct info from that to generate\n # a site-level table of contents?\n rendered = sitegen.render_html('index',\n self.config,\n self.soups,\n self.precomputed,\n \"\"\"\n {{#site_toc}}\n DEPTH={{depth}} LINK={{link}} TEXT={{text}}\n {{/site_toc}}\n \"\"\")\n self.assertIn(u'DEPTH=1 LINK=subdir/page1.html TEXT=東京 is Tokyo', rendered)\n self.assertIn('DEPTH=1 LINK=subdir/page2.html TEXT=Page 2: Electric Boogaloo', rendered)\n\n def test_transform_fixes_up_internal_links(self):\n sitegen.transform_soups(self.config, self.soups, self.precomputed)\n html = sitegen.render_html('index',\n self.config,\n self.soups,\n self.precomputed,\n TEMPLATE_MUSTACHE)\n self.assertTrue('subdir/page1.html' in html,\n 'p1.html link did not get fixed up to page1.html')\n"},"license":{"kind":"string","value":"apache-2.0"}}},{"rowIdx":203451,"cells":{"repo_name":{"kind":"string","value":"datawire/qpid-proton"},"path":{"kind":"string","value":"examples/python/reactor/hello-world.py"},"copies":{"kind":"string","value":"4"},"size":{"kind":"string","value":"1554"},"content":{"kind":"string","value":"#!/usr/bin/python\n#\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n#\n\nfrom proton.reactor import Reactor\n\n# The proton reactor provides a general purpose event processing\n# library for writing reactive programs. A reactive program is defined\n# by a set of event handlers. An event handler is just any class or\n# object that defines the \"on_\" methods that it cares to\n# handle.\n\nclass Program:\n\n # The reactor init event is produced by the reactor itself when it\n # starts.\n def on_reactor_init(self, event):\n print \"Hello, World!\"\n\n# When you construct a reactor, you give it a handler.\nr = Reactor(Program())\n\n# When you call run, the reactor will process events. The reactor init\n# event is what kicks off everything else. When the reactor has no\n# more events to process, it exits.\nr.run()\n"},"license":{"kind":"string","value":"apache-2.0"}}},{"rowIdx":203452,"cells":{"repo_name":{"kind":"string","value":"hidekb/espressopp"},"path":{"kind":"string","value":"testsuite/AdResS/FreeEnergyCompensation/test_FreeEnergyCompensation.py"},"copies":{"kind":"string","value":"5"},"size":{"kind":"string","value":"6885"},"content":{"kind":"string","value":"#!/usr/bin/env python2\n#\n# Copyright (C) 2013-2017(H)\n# Max Planck Institute for Polymer Research\n#\n# This file is part of ESPResSo++.\n# \n# ESPResSo++ is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n# \n# ESPResSo++ is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n# \n# You should have received a copy of the GNU General Public License\n# along with this program. If not, see .\n# \n# -*- coding: utf-8 -*-\n#\n\nimport sys\nimport time\nimport espressopp\nimport mpi4py.MPI as MPI\n\nimport unittest\n\nclass TestFreeEnergyCompensation(unittest.TestCase):\n def setUp(self):\n # set up system\n system = espressopp.System()\n box = (10, 10, 10)\n system.bc = espressopp.bc.OrthorhombicBC(system.rng, box)\n system.skin = 0.3\n system.comm = MPI.COMM_WORLD\n nodeGrid = espressopp.tools.decomp.nodeGrid(espressopp.MPI.COMM_WORLD.size,box,rc=1.5,skin=system.skin)\n cellGrid = espressopp.tools.decomp.cellGrid(box, nodeGrid, rc=1.5, skin=system.skin)\n system.storage = espressopp.storage.DomainDecompositionAdress(system, nodeGrid, cellGrid)\n self.system = system\n\n def test_slab(self):\n # add some particles\n particle_list = [\n (1, 1, 0, espressopp.Real3D(5.5, 5.0, 5.0), 1.0, 0),\n (2, 1, 0, espressopp.Real3D(6.5, 5.0, 5.0), 1.0, 0),\n (3, 1, 0, espressopp.Real3D(7.5, 5.0, 5.0), 1.0, 0),\n (4, 1, 0, espressopp.Real3D(8.5, 5.0, 5.0), 1.0, 0),\n (5, 1, 0, espressopp.Real3D(9.5, 5.0, 5.0), 1.0, 0),\n (6, 0, 0, espressopp.Real3D(5.5, 5.0, 5.0), 1.0, 1),\n (7, 0, 0, espressopp.Real3D(6.5, 5.0, 5.0), 1.0, 1),\n (8, 0, 0, espressopp.Real3D(7.5, 5.0, 5.0), 1.0, 1),\n (9, 0, 0, espressopp.Real3D(8.5, 5.0, 5.0), 1.0, 1),\n (10, 0, 0, espressopp.Real3D(9.5, 5.0, 5.0), 1.0, 1),\n ]\n tuples = [(1,6),(2,7),(3,8),(4,9),(5,10)]\n self.system.storage.addParticles(particle_list, 'id', 'type', 'q', 'pos', 'mass','adrat')\n ftpl = espressopp.FixedTupleListAdress(self.system.storage)\n ftpl.addTuples(tuples)\n self.system.storage.setFixedTuplesAdress(ftpl)\n self.system.storage.decompose()\n\n # generate a verlet list\n vl = espressopp.VerletListAdress(self.system, cutoff=1.5, adrcut=1.5,\n dEx=2.0, dHy=1.0, adrCenter=[5.0, 5.0, 5.0], sphereAdr=False)\n\n # initialize lambda values\n integrator = espressopp.integrator.VelocityVerlet(self.system)\n integrator.dt = 0.01\n adress = espressopp.integrator.Adress(self.system,vl,ftpl)\n integrator.addExtension(adress)\n espressopp.tools.AdressDecomp(self.system, integrator)\n\n # set up FEC\n fec = espressopp.integrator.FreeEnergyCompensation(self.system, center=[5.0, 5.0, 5.0])\n fec.addForce(itype=3,filename=\"table_fec.tab\",type=1)\n integrator.addExtension(fec)\n\n # x coordinates of particles before integration\n before = [self.system.storage.getParticle(i).pos[0] for i in range(1,6)]\n\n # run ten steps and compute energy\n integrator.run(10)\n energy = fec.computeCompEnergy()\n\n # x coordinates of particles after integration\n after = [self.system.storage.getParticle(i).pos[0] for i in range(1,6)]\n\n # run checks (only one particle is in hybrid region and should feel the FEC. Also check that its FEC energy is correct)\n self.assertEqual(before[0], after[0])\n self.assertEqual(before[1], after[1])\n self.assertAlmostEqual(after[2], 7.598165, places=5)\n self.assertEqual(before[3], after[3])\n self.assertEqual(before[4], after[4])\n self.assertAlmostEqual(energy, 6.790157, places=5)\n\n def test_sphere(self):\n # add some particles\n particle_list = [\n (1, 1, 0, espressopp.Real3D(5.0, 5.5, 5.0), 1.0, 0),\n (2, 1, 0, espressopp.Real3D(5.0, 6.5, 5.0), 1.0, 0),\n (3, 1, 0, espressopp.Real3D(5.0, 7.5, 5.0), 1.0, 0),\n (4, 1, 0, espressopp.Real3D(5.0, 8.5, 5.0), 1.0, 0),\n (5, 1, 0, espressopp.Real3D(5.0, 9.5, 5.0), 1.0, 0),\n (6, 0, 0, espressopp.Real3D(5.0, 5.5, 5.0), 1.0, 1),\n (7, 0, 0, espressopp.Real3D(5.0, 6.5, 5.0), 1.0, 1),\n (8, 0, 0, espressopp.Real3D(5.0, 7.5, 5.0), 1.0, 1),\n (9, 0, 0, espressopp.Real3D(5.0, 8.5, 5.0), 1.0, 1),\n (10, 0, 0, espressopp.Real3D(5.0, 9.5, 5.0), 1.0, 1),\n ]\n tuples = [(1,6),(2,7),(3,8),(4,9),(5,10)]\n self.system.storage.addParticles(particle_list, 'id', 'type', 'q', 'pos', 'mass','adrat')\n ftpl = espressopp.FixedTupleListAdress(self.system.storage)\n ftpl.addTuples(tuples)\n self.system.storage.setFixedTuplesAdress(ftpl)\n self.system.storage.decompose()\n\n # generate a verlet list\n vl = espressopp.VerletListAdress(self.system, cutoff=1.5, adrcut=1.5,\n dEx=2.0, dHy=1.0, adrCenter=[5.0, 5.0, 5.0], sphereAdr=True)\n\n # initialize lambda values\n integrator = espressopp.integrator.VelocityVerlet(self.system)\n integrator.dt = 0.01\n adress = espressopp.integrator.Adress(self.system,vl,ftpl)\n integrator.addExtension(adress)\n espressopp.tools.AdressDecomp(self.system, integrator)\n\n # set up FEC\n fec = espressopp.integrator.FreeEnergyCompensation(self.system, center=[5.0, 5.0, 5.0], sphereAdr=True)\n fec.addForce(itype=3,filename=\"table_fec.tab\",type=1)\n integrator.addExtension(fec)\n\n # y coordinates of particles before integration\n before = [self.system.storage.getParticle(i).pos[1] for i in range(1,6)]\n\n # run ten steps\n integrator.run(10)\n energy = fec.computeCompEnergy()\n\n # y coordinates of particles after integration\n after = [self.system.storage.getParticle(i).pos[1] for i in range(1,6)]\n\n # run checks (as for test with slab-geometry, but check y-coordinates this time. Given the now spherical setup, particles should move as before but along the y-axis).\n self.assertEqual(before[0], after[0])\n self.assertEqual(before[1], after[1])\n self.assertAlmostEqual(after[2], 7.598165, places=5)\n self.assertEqual(before[3], after[3])\n self.assertEqual(before[4], after[4])\n self.assertAlmostEqual(energy, 6.790157, places=5)\n\n\nif __name__ == '__main__':\n unittest.main()\n"},"license":{"kind":"string","value":"gpl-3.0"}}},{"rowIdx":203453,"cells":{"repo_name":{"kind":"string","value":"acuriel/nahuatilli"},"path":{"kind":"string","value":"contracts/urls.py"},"copies":{"kind":"string","value":"1"},"size":{"kind":"string","value":"3303"},"content":{"kind":"string","value":"from django.conf.urls import url\n\nfrom . import views\n\n\nurlpatterns = [\n url(r'^/create$', views.ContractCreateView.as_view(), name='create'),\n url(r'^/add/preamble$', views.PreambleAddView.as_view(), name='add_preamble'),\n url(r'^/add/clause$', views.ContractCreateView.as_view(), name='add_clause'),\n url(r'^/add/declarant$', views.DeclarantAddView.as_view(), name='add_declarant'),\n url(r'^/add/contract/declarant/(?P\\d+)$', views.DeclarantAddView.as_view(), name='add_contract_declarant'),\n url(r'^/edit/contract/preamble/(?P\\d+)$', views.PreambleEditView.as_view(), name='edit_preamble_contract'),\n url(r'^/edit/contract/signature/(?P\\d+)$', views.SignatureEditView.as_view(), name='edit_signature_contract'),\n url(r'^/edit/contract/annex/(?P\\d+)$', views.AnnexEditView.as_view(), name='edit_contract_annex'),\n url(r'^/add/contract/clause/(?P\\d+)$', views.ClauseAddView.as_view(), name='add_clause_param'),\n url(r'^/add/contract/signature/(?P\\d+)$', views.SignatureAddView.as_view(), name='add_signature_param'),\n url(r'^/add/contract/annex/(?P\\d+)$', views.AnnexAddView.as_view(), name='add_annex_param'),\n url(r'^/add/contract/declarant/(?P\\d+)$', views.DeclarantAddView.as_view(), name='add_declarant_param'),\n url(r'^/add/declaration/(?P\\d+)$', views.DeclarationAddView.as_view(), name='add_declaration'),\n url(r'^/add/contract/declaration/(?P\\d+)/(?P\\d+)$', views.DeclarationAddView.as_view(), name='add_contract_declaration'),\n url(r'^/edit/declaration/(?P\\d+)$', views.DeclarationEditView.as_view(), name='edit_declaration'),\n url(r'^/edit/contract/declaration/(?P\\d+)/(?P\\d+)$', views.DeclarationEditView.as_view(), name='edit_contract_declaration'),\n url(r'^/edit/contract/clause/(?P\\d+)/(?P\\d+)$', views.ClauseEditView.as_view(), name='edit_contract_clause'),\n url(r'^/edit/contract/annex/(?P\\d+)/(?P\\d+)$', views.AnnexEditView.as_view(), name='edit_contract_annex'),\n url(r'^/add/contract/preamble/(?P\\d+)$', views.PreambleAddView.as_view(), name='add_preamble_param'),\n url(r'^/view$', views.ListContractView.as_view(), name='list_view'),\n url(r'^/edit/(?P\\d+)$', views.ContractEdit.as_view(), name='edit'),\n url(r'^/edit/declarant/(?P\\d+)$', views.DeclarantEdit.as_view(), name='edit_declarant'),\n url(r'^/edit/contract/declarant/(?P\\d+)/(?P\\d+)$', views.DeclarantEdit.as_view(), name='edit_contract_declarant'),\n url(r'^/edit/declarants/(?P\\d+)$', views.EditAllDeclarants.as_view(), name='edit_all_declarants'),\n url(r'^/edit/clauses/(?P\\d+)$', views.EditAllClauses.as_view(), name='edit_all_clauses'),\n url(r'^/edit/annexes/(?P\\d+)$', views.EditAllAnnexes.as_view(), name='edit_all_annexes'),\n url(r'^/publish/(?P\\d+)$', views.Publish.as_view(), name='publish'),\n url(r'^/draft/(?P\\d+)$', views.MakePrivate.as_view(), name='make_private'),\n url(r'^/generate/(?P\\d+)$', views.Generate.as_view(), name='generate'),\n]"},"license":{"kind":"string","value":"gpl-3.0"}}},{"rowIdx":203454,"cells":{"repo_name":{"kind":"string","value":"faizalpribadi/electron"},"path":{"kind":"string","value":"script/update-external-binaries.py"},"copies":{"kind":"string","value":"124"},"size":{"kind":"string","value":"1682"},"content":{"kind":"string","value":"#!/usr/bin/env python\n\nimport errno\nimport sys\nimport os\n\nfrom lib.config import get_target_arch\nfrom lib.util import safe_mkdir, rm_rf, extract_zip, tempdir, download\n\n\nVERSION = 'v0.7.0'\nSOURCE_ROOT = os.path.abspath(os.path.dirname(os.path.dirname(__file__)))\nFRAMEWORKS_URL = 'http://github.com/atom/atom-shell-frameworks/releases' \\\n '/download/' + VERSION\n\n\ndef main():\n os.chdir(SOURCE_ROOT)\n version_file = os.path.join(SOURCE_ROOT, 'external_binaries', '.version')\n\n if (is_updated(version_file, VERSION)):\n return\n\n rm_rf('external_binaries')\n safe_mkdir('external_binaries')\n\n if sys.platform == 'darwin':\n download_and_unzip('Mantle')\n download_and_unzip('ReactiveCocoa')\n download_and_unzip('Squirrel')\n elif sys.platform in ['cygwin', 'win32']:\n download_and_unzip('directxsdk-' + get_target_arch())\n download_and_unzip('vs2012-crt-' + get_target_arch())\n\n with open(version_file, 'w') as f:\n f.write(VERSION)\n\n\ndef is_updated(version_file, version):\n existing_version = ''\n try:\n with open(version_file, 'r') as f:\n existing_version = f.readline().strip()\n except IOError as e:\n if e.errno != errno.ENOENT:\n raise\n return existing_version == version\n\n\ndef download_and_unzip(framework):\n zip_path = download_framework(framework)\n if zip_path:\n extract_zip(zip_path, 'external_binaries')\n\n\ndef download_framework(framework):\n filename = framework + '.zip'\n url = FRAMEWORKS_URL + '/' + filename\n download_dir = tempdir(prefix='electron-')\n path = os.path.join(download_dir, filename)\n\n download('Download ' + framework, url, path)\n return path\n\n\nif __name__ == '__main__':\n sys.exit(main())\n"},"license":{"kind":"string","value":"mit"}}},{"rowIdx":203455,"cells":{"repo_name":{"kind":"string","value":"mcgachey/edx-platform"},"path":{"kind":"string","value":"lms/djangoapps/course_api/blocks/transformers/blocks_api.py"},"copies":{"kind":"string","value":"32"},"size":{"kind":"string","value":"2397"},"content":{"kind":"string","value":"\"\"\"\nBlocks API Transformer\n\"\"\"\nfrom openedx.core.lib.block_cache.transformer import BlockStructureTransformer\nfrom .block_counts import BlockCountsTransformer\nfrom .block_depth import BlockDepthTransformer\nfrom .navigation import BlockNavigationTransformer\nfrom .student_view import StudentViewTransformer\n\n\nclass BlocksAPITransformer(BlockStructureTransformer):\n \"\"\"\n Umbrella transformer that contains all the transformers needed by the\n Course Blocks API.\n\n Contained Transformers (processed in this order):\n StudentViewTransformer\n BlockCountsTransformer\n BlockDepthTransformer\n BlockNavigationTransformer\n\n Note: BlockDepthTransformer must be executed before BlockNavigationTransformer.\n \"\"\"\n\n VERSION = 1\n STUDENT_VIEW_DATA = 'student_view_data'\n STUDENT_VIEW_MULTI_DEVICE = 'student_view_multi_device'\n\n def __init__(self, block_types_to_count, requested_student_view_data, depth=None, nav_depth=None):\n self.block_types_to_count = block_types_to_count\n self.requested_student_view_data = requested_student_view_data\n self.depth = depth\n self.nav_depth = nav_depth\n\n @classmethod\n def name(cls):\n return \"blocks_api\"\n\n @classmethod\n def collect(cls, block_structure):\n \"\"\"\n Collects any information that's necessary to execute this transformer's\n transform method.\n \"\"\"\n # collect basic xblock fields\n block_structure.request_xblock_fields('graded', 'format', 'display_name', 'category')\n\n # collect data from containing transformers\n StudentViewTransformer.collect(block_structure)\n BlockCountsTransformer.collect(block_structure)\n BlockDepthTransformer.collect(block_structure)\n BlockNavigationTransformer.collect(block_structure)\n\n # TODO support olx_data by calling export_to_xml(?)\n\n def transform(self, usage_info, block_structure):\n \"\"\"\n Mutates block_structure based on the given usage_info.\n \"\"\"\n StudentViewTransformer(self.requested_student_view_data).transform(usage_info, block_structure)\n BlockCountsTransformer(self.block_types_to_count).transform(usage_info, block_structure)\n BlockDepthTransformer(self.depth).transform(usage_info, block_structure)\n BlockNavigationTransformer(self.nav_depth).transform(usage_info, block_structure)\n"},"license":{"kind":"string","value":"agpl-3.0"}}},{"rowIdx":203456,"cells":{"repo_name":{"kind":"string","value":"CG3002/Hardware-Bootloader-Timer"},"path":{"kind":"string","value":"reg.py"},"copies":{"kind":"string","value":"1"},"size":{"kind":"string","value":"1129"},"content":{"kind":"string","value":"import time\nimport serial\n\nser = serial.Serial(port=29, baudrate=9600, bytesize=serial.EIGHTBITS, parity=serial.PARITY_NONE, stopbits=serial.STOPBITS_TWO, timeout=1) \nser.isOpen()\nconnected=False\n\ncash_reg = []\nmy_dict = []\n\nreg = ['@r3', '@r1', '@r2', '@r4']\t\nflag = 1\nstart_rec = 0\nwrong_id = 0\nstart_count = 0\nbarcode_flag = 0\n\ndef handle_data(data):\n print(data)\n\nprint 'start transmission'\nwhile 1 :\n\tfor item in reg:\n\t\ttry:\n\t\t\tsend_pkg = item+'/'\n\t\t\tser.write(send_pkg)\n\t\t\tprint 'sending '+ send_pkg\n\t\t\twhile flag :\n\t\t\t\n\t\t\t\tstart_count += 1\n\t\t\t\tbuffer = ser.read()\t#blocking call\n\t\t\t\tprint 'received '+buffer\n\t\t\t\tif start_rec == 1:\n\t\t\t\t\tif buffer == item[1] :\n\t\t\t\t\t\tbarcode_flag = 1\n\t\t\t\tif buffer == '/' :\n\t\t\t\t\t#print 'end round'\n\t\t\t\t\tflag = 0\n\t\t\t\t\tbreak\n\t\t\t\tif buffer == '@' :\n\t\t\t\t\tstart_rec = 1\n\t\t\t\tif buffer == '0' :\n\t\t\t\t\tif start_rec == 1:\n\t\t\t\t\t\tstart_rec = 0\n\t\t\t\t\t\twrong_id = 1\n\t\t\t\t\t\tprint 'wrong id'\n\t\t\t\tif start_count == 5 :\n\t\t\t\t\tstart_count = 0\n\t\t\t\t\tflag = 0\n\t\t\t\t\tbreak\n\t\t\t\t\t\n\t\t\tstart_rec = 0\n\t\t\twrong_id = 0\n\t\t\tflag = 1\n\t\t\tstart_count = 0\n\t\texcept SerialTimeoutException:\n\t\t\tprint 'Serial time out'\n\t\t\tcontinue\n"},"license":{"kind":"string","value":"mit"}}},{"rowIdx":203457,"cells":{"repo_name":{"kind":"string","value":"subailong/kubernetes"},"path":{"kind":"string","value":"hack/lookup_pull.py"},"copies":{"kind":"string","value":"368"},"size":{"kind":"string","value":"1319"},"content":{"kind":"string","value":"#!/usr/bin/env python\n\n# Copyright 2015 The Kubernetes Authors All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# Script to print out PR info in release note format.\n\nimport json\nimport sys\nimport urllib2\n\nPULLQUERY=(\"https://api.github.com/repos/\"\n \"GoogleCloudPlatform/kubernetes/pulls/{pull}\")\nLOGIN=\"login\"\nTITLE=\"title\"\nUSER=\"user\"\n\ndef print_pulls(pulls):\n for pull in pulls:\n d = json.loads(urllib2.urlopen(PULLQUERY.format(pull=pull)).read())\n print \"* {title} #{pull} ({author})\".format(\n title=d[TITLE], pull=pull, author=d[USER][LOGIN])\n\nif __name__ == \"__main__\":\n if len(sys.argv) < 2:\n print (\"Usage: {cmd} ...: Prints out short \" +\n \"markdown description for PRs appropriate for release notes.\")\n sys.exit(1)\n print_pulls(sys.argv[1:])\n"},"license":{"kind":"string","value":"apache-2.0"}}},{"rowIdx":203458,"cells":{"repo_name":{"kind":"string","value":"willmcgugan/rich"},"path":{"kind":"string","value":"rich/box.py"},"copies":{"kind":"string","value":"1"},"size":{"kind":"string","value":"9014"},"content":{"kind":"string","value":"import sys\nfrom typing import TYPE_CHECKING, Iterable, List\n\nif sys.version_info >= (3, 8):\n from typing import Literal\nelse:\n from typing_extensions import Literal # pragma: no cover\n\n\nfrom ._loop import loop_last\n\nif TYPE_CHECKING:\n from rich.console import ConsoleOptions\n\n\nclass Box:\n \"\"\"Defines characters to render boxes.\n\n ┌─┬┐ top\n │ ││ head\n ├─┼┤ head_row\n │ ││ mid\n ├─┼┤ row\n ├─┼┤ foot_row\n │ ││ foot\n └─┴┘ bottom\n\n Args:\n box (str): Characters making up box.\n ascii (bool, optional): True if this box uses ascii characters only. Default is False.\n \"\"\"\n\n def __init__(self, box: str, *, ascii: bool = False) -> None:\n self._box = box\n self.ascii = ascii\n line1, line2, line3, line4, line5, line6, line7, line8 = box.splitlines()\n # top\n self.top_left, self.top, self.top_divider, self.top_right = iter(line1)\n # head\n self.head_left, _, self.head_vertical, self.head_right = iter(line2)\n # head_row\n (\n self.head_row_left,\n self.head_row_horizontal,\n self.head_row_cross,\n self.head_row_right,\n ) = iter(line3)\n\n # mid\n self.mid_left, _, self.mid_vertical, self.mid_right = iter(line4)\n # row\n self.row_left, self.row_horizontal, self.row_cross, self.row_right = iter(line5)\n # foot_row\n (\n self.foot_row_left,\n self.foot_row_horizontal,\n self.foot_row_cross,\n self.foot_row_right,\n ) = iter(line6)\n # foot\n self.foot_left, _, self.foot_vertical, self.foot_right = iter(line7)\n # bottom\n self.bottom_left, self.bottom, self.bottom_divider, self.bottom_right = iter(\n line8\n )\n\n def __repr__(self) -> str:\n return \"Box(...)\"\n\n def __str__(self) -> str:\n return self._box\n\n def substitute(self, options: \"ConsoleOptions\", safe: bool = True) -> \"Box\":\n \"\"\"Substitute this box for another if it won't render due to platform issues.\n\n Args:\n options (ConsoleOptions): Console options used in rendering.\n safe (bool, optional): Substitute this for another Box if there are known problems\n displaying on the platform (currently only relevant on Windows). Default is True.\n\n Returns:\n Box: A different Box or the same Box.\n \"\"\"\n box = self\n if options.legacy_windows and safe:\n box = LEGACY_WINDOWS_SUBSTITUTIONS.get(box, box)\n if options.ascii_only and not box.ascii:\n box = ASCII\n return box\n\n def get_top(self, widths: Iterable[int]) -> str:\n \"\"\"Get the top of a simple box.\n\n Args:\n widths (List[int]): Widths of columns.\n\n Returns:\n str: A string of box characters.\n \"\"\"\n\n parts: List[str] = []\n append = parts.append\n append(self.top_left)\n for last, width in loop_last(widths):\n append(self.top * width)\n if not last:\n append(self.top_divider)\n append(self.top_right)\n return \"\".join(parts)\n\n def get_row(\n self,\n widths: Iterable[int],\n level: Literal[\"head\", \"row\", \"foot\", \"mid\"] = \"row\",\n edge: bool = True,\n ) -> str:\n \"\"\"Get the top of a simple box.\n\n Args:\n width (List[int]): Widths of columns.\n\n Returns:\n str: A string of box characters.\n \"\"\"\n if level == \"head\":\n left = self.head_row_left\n horizontal = self.head_row_horizontal\n cross = self.head_row_cross\n right = self.head_row_right\n elif level == \"row\":\n left = self.row_left\n horizontal = self.row_horizontal\n cross = self.row_cross\n right = self.row_right\n elif level == \"mid\":\n left = self.mid_left\n horizontal = \" \"\n cross = self.mid_vertical\n right = self.mid_right\n elif level == \"foot\":\n left = self.foot_row_left\n horizontal = self.foot_row_horizontal\n cross = self.foot_row_cross\n right = self.foot_row_right\n else:\n raise ValueError(\"level must be 'head', 'row' or 'foot'\")\n\n parts: List[str] = []\n append = parts.append\n if edge:\n append(left)\n for last, width in loop_last(widths):\n append(horizontal * width)\n if not last:\n append(cross)\n if edge:\n append(right)\n return \"\".join(parts)\n\n def get_bottom(self, widths: Iterable[int]) -> str:\n \"\"\"Get the bottom of a simple box.\n\n Args:\n widths (List[int]): Widths of columns.\n\n Returns:\n str: A string of box characters.\n \"\"\"\n\n parts: List[str] = []\n append = parts.append\n append(self.bottom_left)\n for last, width in loop_last(widths):\n append(self.bottom * width)\n if not last:\n append(self.bottom_divider)\n append(self.bottom_right)\n return \"\".join(parts)\n\n\nASCII: Box = Box(\n \"\"\"\\\n+--+\n| ||\n|-+|\n| ||\n|-+|\n|-+|\n| ||\n+--+\n\"\"\",\n ascii=True,\n)\n\nASCII2: Box = Box(\n \"\"\"\\\n+-++\n| ||\n+-++\n| ||\n+-++\n+-++\n| ||\n+-++\n\"\"\",\n ascii=True,\n)\n\nASCII_DOUBLE_HEAD: Box = Box(\n \"\"\"\\\n+-++\n| ||\n+=++\n| ||\n+-++\n+-++\n| ||\n+-++\n\"\"\",\n ascii=True,\n)\n\nSQUARE: Box = Box(\n \"\"\"\\\n┌─┬┐\n│ ││\n├─┼┤\n│ ││\n├─┼┤\n├─┼┤\n│ ││\n└─┴┘\n\"\"\"\n)\n\nSQUARE_DOUBLE_HEAD: Box = Box(\n \"\"\"\\\n┌─┬┐\n│ ││\n╞═╪╡\n│ ││\n├─┼┤\n├─┼┤\n│ ││\n└─┴┘\n\"\"\"\n)\n\nMINIMAL: Box = Box(\n \"\"\"\\\n ╷ \n │ \n╶─┼╴\n │ \n╶─┼╴\n╶─┼╴\n │ \n ╵ \n\"\"\"\n)\n\n\nMINIMAL_HEAVY_HEAD: Box = Box(\n \"\"\"\\\n ╷ \n │ \n╺━┿╸\n │ \n╶─┼╴\n╶─┼╴\n │ \n ╵ \n\"\"\"\n)\n\nMINIMAL_DOUBLE_HEAD: Box = Box(\n \"\"\"\\\n ╷ \n │ \n ═╪ \n │ \n ─┼ \n ─┼ \n │ \n ╵ \n\"\"\"\n)\n\n\nSIMPLE: Box = Box(\n \"\"\"\\\n \n \n ── \n \n \n ── \n \n \n\"\"\"\n)\n\nSIMPLE_HEAD: Box = Box(\n \"\"\"\\\n \n \n ── \n \n \n \n \n \n\"\"\"\n)\n\n\nSIMPLE_HEAVY: Box = Box(\n \"\"\"\\\n \n \n ━━ \n \n \n ━━ \n \n \n\"\"\"\n)\n\n\nHORIZONTALS: Box = Box(\n \"\"\"\\\n ── \n \n ── \n \n ── \n ── \n \n ── \n\"\"\"\n)\n\nROUNDED: Box = Box(\n \"\"\"\\\n╭─┬╮\n│ ││\n├─┼┤\n│ ││\n├─┼┤\n├─┼┤\n│ ││\n╰─┴╯\n\"\"\"\n)\n\nHEAVY: Box = Box(\n \"\"\"\\\n┏━┳┓\n┃ ┃┃\n┣━╋┫\n┃ ┃┃\n┣━╋┫\n┣━╋┫\n┃ ┃┃\n┗━┻┛\n\"\"\"\n)\n\nHEAVY_EDGE: Box = Box(\n \"\"\"\\\n┏━┯┓\n┃ │┃\n┠─┼┨\n┃ │┃\n┠─┼┨\n┠─┼┨\n┃ │┃\n┗━┷┛\n\"\"\"\n)\n\nHEAVY_HEAD: Box = Box(\n \"\"\"\\\n┏━┳┓\n┃ ┃┃\n┡━╇┩\n│ ││\n├─┼┤\n├─┼┤\n│ ││\n└─┴┘\n\"\"\"\n)\n\nDOUBLE: Box = Box(\n \"\"\"\\\n╔═╦╗\n║ ║║\n╠═╬╣\n║ ║║\n╠═╬╣\n╠═╬╣\n║ ║║\n╚═╩╝\n\"\"\"\n)\n\nDOUBLE_EDGE: Box = Box(\n \"\"\"\\\n╔═╤╗\n║ │║\n╟─┼╢\n║ │║\n╟─┼╢\n╟─┼╢\n║ │║\n╚═╧╝\n\"\"\"\n)\n\n# Map Boxes that don't render with raster fonts on to equivalent that do\nLEGACY_WINDOWS_SUBSTITUTIONS = {\n ROUNDED: SQUARE,\n MINIMAL_HEAVY_HEAD: MINIMAL,\n SIMPLE_HEAVY: SIMPLE,\n HEAVY: SQUARE,\n HEAVY_EDGE: SQUARE,\n HEAVY_HEAD: SQUARE,\n}\n\n\nif __name__ == \"__main__\": # pragma: no cover\n\n from rich.columns import Columns\n from rich.panel import Panel\n\n from . import box\n from .console import Console\n from .table import Table\n from .text import Text\n\n console = Console(record=True)\n\n BOXES = [\n \"ASCII\",\n \"ASCII2\",\n \"ASCII_DOUBLE_HEAD\",\n \"SQUARE\",\n \"SQUARE_DOUBLE_HEAD\",\n \"MINIMAL\",\n \"MINIMAL_HEAVY_HEAD\",\n \"MINIMAL_DOUBLE_HEAD\",\n \"SIMPLE\",\n \"SIMPLE_HEAD\",\n \"SIMPLE_HEAVY\",\n \"HORIZONTALS\",\n \"ROUNDED\",\n \"HEAVY\",\n \"HEAVY_EDGE\",\n \"HEAVY_HEAD\",\n \"DOUBLE\",\n \"DOUBLE_EDGE\",\n ]\n\n console.print(Panel(\"[bold green]Box Constants\", style=\"green\"), justify=\"center\")\n console.print()\n\n columns = Columns(expand=True, padding=2)\n for box_name in sorted(BOXES):\n table = Table(\n show_footer=True, style=\"dim\", border_style=\"not dim\", expand=True\n )\n table.add_column(\"Header 1\", \"Footer 1\")\n table.add_column(\"Header 2\", \"Footer 2\")\n table.add_row(\"Cell\", \"Cell\")\n table.add_row(\"Cell\", \"Cell\")\n table.box = getattr(box, box_name)\n table.title = Text(f\"box.{box_name}\", style=\"magenta\")\n columns.add_renderable(table)\n console.print(columns)\n\n # console.save_html(\"box.html\", inline_styles=True)\n"},"license":{"kind":"string","value":"mit"}}},{"rowIdx":203459,"cells":{"repo_name":{"kind":"string","value":"hexxcointakeover/hexxcoin"},"path":{"kind":"string","value":"contrib/linearize/linearize-hashes.py"},"copies":{"kind":"string","value":"214"},"size":{"kind":"string","value":"3037"},"content":{"kind":"string","value":"#!/usr/bin/python\n#\n# linearize-hashes.py: List blocks in a linear, no-fork version of the chain.\n#\n# Copyright (c) 2013-2014 The Bitcoin Core developers\n# Distributed under the MIT software license, see the accompanying\n# file COPYING or http://www.opensource.org/licenses/mit-license.php.\n#\n\nfrom __future__ import print_function\nimport json\nimport struct\nimport re\nimport base64\nimport httplib\nimport sys\n\nsettings = {}\n\nclass BitcoinRPC:\n\tdef __init__(self, host, port, username, password):\n\t\tauthpair = \"%s:%s\" % (username, password)\n\t\tself.authhdr = \"Basic %s\" % (base64.b64encode(authpair))\n\t\tself.conn = httplib.HTTPConnection(host, port, False, 30)\n\n\tdef execute(self, obj):\n\t\tself.conn.request('POST', '/', json.dumps(obj),\n\t\t\t{ 'Authorization' : self.authhdr,\n\t\t\t 'Content-type' : 'application/json' })\n\n\t\tresp = self.conn.getresponse()\n\t\tif resp is None:\n\t\t\tprint(\"JSON-RPC: no response\", file=sys.stderr)\n\t\t\treturn None\n\n\t\tbody = resp.read()\n\t\tresp_obj = json.loads(body)\n\t\treturn resp_obj\n\n\t@staticmethod\n\tdef build_request(idx, method, params):\n\t\tobj = { 'version' : '1.1',\n\t\t\t'method' : method,\n\t\t\t'id' : idx }\n\t\tif params is None:\n\t\t\tobj['params'] = []\n\t\telse:\n\t\t\tobj['params'] = params\n\t\treturn obj\n\n\t@staticmethod\n\tdef response_is_error(resp_obj):\n\t\treturn 'error' in resp_obj and resp_obj['error'] is not None\n\ndef get_block_hashes(settings, max_blocks_per_call=10000):\n\trpc = BitcoinRPC(settings['host'], settings['port'],\n\t\t\t settings['rpcuser'], settings['rpcpassword'])\n\n\theight = settings['min_height']\n\twhile height < settings['max_height']+1:\n\t\tnum_blocks = min(settings['max_height']+1-height, max_blocks_per_call)\n\t\tbatch = []\n\t\tfor x in range(num_blocks):\n\t\t\tbatch.append(rpc.build_request(x, 'getblockhash', [height + x]))\n\n\t\treply = rpc.execute(batch)\n\n\t\tfor x,resp_obj in enumerate(reply):\n\t\t\tif rpc.response_is_error(resp_obj):\n\t\t\t\tprint('JSON-RPC: error at height', height+x, ': ', resp_obj['error'], file=sys.stderr)\n\t\t\t\texit(1)\n\t\t\tassert(resp_obj['id'] == x) # assume replies are in-sequence\n\t\t\tprint(resp_obj['result'])\n\n\t\theight += num_blocks\n\nif __name__ == '__main__':\n\tif len(sys.argv) != 2:\n\t\tprint(\"Usage: linearize-hashes.py CONFIG-FILE\")\n\t\tsys.exit(1)\n\n\tf = open(sys.argv[1])\n\tfor line in f:\n\t\t# skip comment lines\n\t\tm = re.search('^\\s*#', line)\n\t\tif m:\n\t\t\tcontinue\n\n\t\t# parse key=value lines\n\t\tm = re.search('^(\\w+)\\s*=\\s*(\\S.*)$', line)\n\t\tif m is None:\n\t\t\tcontinue\n\t\tsettings[m.group(1)] = m.group(2)\n\tf.close()\n\n\tif 'host' not in settings:\n\t\tsettings['host'] = '127.0.0.1'\n\tif 'port' not in settings:\n\t\tsettings['port'] = 8332\n\tif 'min_height' not in settings:\n\t\tsettings['min_height'] = 0\n\tif 'max_height' not in settings:\n\t\tsettings['max_height'] = 313000\n\tif 'rpcuser' not in settings or 'rpcpassword' not in settings:\n\t\tprint(\"Missing username and/or password in cfg file\", file=stderr)\n\t\tsys.exit(1)\n\n\tsettings['port'] = int(settings['port'])\n\tsettings['min_height'] = int(settings['min_height'])\n\tsettings['max_height'] = int(settings['max_height'])\n\n\tget_block_hashes(settings)\n\n"},"license":{"kind":"string","value":"mit"}}},{"rowIdx":203460,"cells":{"repo_name":{"kind":"string","value":"karteek/simplekv"},"path":{"kind":"string","value":"simplekv/memory/redisstore.py"},"copies":{"kind":"string","value":"2"},"size":{"kind":"string","value":"1824"},"content":{"kind":"string","value":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nfrom io import BytesIO\n\nfrom .. import KeyValueStore, TimeToLiveMixin, NOT_SET, FOREVER\nimport re\n\nclass RedisStore(TimeToLiveMixin, KeyValueStore):\n \"\"\"Uses a redis-database as the backend.\n\n :param redis: An instance of :py:class:`redis.StrictRedis`.\n \"\"\"\n\n def __init__(self, redis):\n self.redis = redis\n\n def _delete(self, key):\n return self.redis.delete(key)\n\n def keys(self, prefix=u\"\"):\n return list(map(lambda b: b.decode(), self.redis.keys(pattern=re.escape(prefix) + '*')))\n\n def iter_keys(self, prefix=u\"\"):\n return iter(self.keys(prefix))\n\n def _has_key(self, key):\n return self.redis.exists(key)\n\n def _get(self, key):\n val = self.redis.get(key)\n\n if val is None:\n raise KeyError(key)\n return val\n\n def _get_file(self, key, file):\n file.write(self._get(key))\n\n def _open(self, key):\n return BytesIO(self._get(key))\n\n def _put(self, key, value, ttl_secs):\n if ttl_secs in (NOT_SET, FOREVER):\n # if we do not care about ttl, just use set\n # in redis, using SET will also clear the timeout\n # note that this assumes that there is no way in redis\n # to set a default timeout on keys\n self.redis.set(key, value)\n else:\n ittl = None\n try:\n ittl = int(ttl_secs)\n except ValueError:\n pass # let it blow up further down\n\n if ittl == ttl_secs:\n self.redis.setex(key, ittl, value)\n else:\n self.redis.psetex(key, int(ttl_secs * 1000), value)\n\n return key\n\n def _put_file(self, key, file, ttl_secs):\n self._put(key, file.read(), ttl_secs)\n return key\n"},"license":{"kind":"string","value":"mit"}}},{"rowIdx":203461,"cells":{"repo_name":{"kind":"string","value":"xijunlee/leetcode"},"path":{"kind":"string","value":"547.py"},"copies":{"kind":"string","value":"1"},"size":{"kind":"string","value":"1294"},"content":{"kind":"string","value":"#!/usr/bin/env python\n# coding=utf-8\n\nclass Solution(object):\n hash = []\n\n def findCircleNum(self, M):\n \"\"\"\n :type M: List[List[int]]\n :rtype: int\n \"\"\"\n N = len(M)\n self.hash = [0 for i in range(N)]\n h_count = 1\n for i in range(N):\n if not self.hash[i]:\n for j in range(N):\n if M[i][j] and self.hash[j]:\n self.hash[i] = self.hash[j]\n break\n else:\n self.hash[i] = h_count\n h_count += 1\n dset = []\n for j in range(N):\n if i!=j and M[i][j] and self.hash[i] != self.hash[j]: dset.append(j)\n self.union_set(M,self.hash[i],dset)\n\n return len(set(self.hash))\n\n def union_set(self, M, h_value, dset):\n if dset:\n for i in dset:\n tmp = []\n self.hash[i] = h_value\n for j in range(len(M)):\n if i!=j and M[i][j] and self.hash[i] != self.hash[j]: tmp.append(j)\n self.union_set(M,self.hash[i],tmp)\n return\nif __name__ == '__main__':\n \n s = Solution()\n print s.findCircleNum([[1,0,0,1],[0,1,1,0],[0,1,1,1],[1,0,1,1]])\n "},"license":{"kind":"string","value":"mit"}}},{"rowIdx":203462,"cells":{"repo_name":{"kind":"string","value":"OpenCode/purchase-workflow"},"path":{"kind":"string","value":"purchase_discount/models/stock_move.py"},"copies":{"kind":"string","value":"15"},"size":{"kind":"string","value":"1305"},"content":{"kind":"string","value":"# -*- encoding: utf-8 -*-\n##############################################################################\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU Affero General Public License as\n# published by the Free Software Foundation, either version 3 of the\n# License, or (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU Affero General Public License for more details.\n#\n# You should have received a copy of the GNU Affero General Public License\n# along with this program. If not, see .\n#\n##############################################################################\n\nfrom openerp import models, api\n\n\nclass StockMove(models.Model):\n _inherit = \"stock.move\"\n\n @api.model\n def _get_invoice_line_vals(self, move, partner, inv_type):\n res = super(StockMove, self)._get_invoice_line_vals(move, partner,\n inv_type)\n if move.purchase_line_id:\n res['discount'] = move.purchase_line_id.discount\n return res\n"},"license":{"kind":"string","value":"agpl-3.0"}}},{"rowIdx":203463,"cells":{"repo_name":{"kind":"string","value":"marcusmueller/gnuradio"},"path":{"kind":"string","value":"gnuradio-runtime/python/gnuradio/ctrlport/RPCConnectionThrift.py"},"copies":{"kind":"string","value":"3"},"size":{"kind":"string","value":"10855"},"content":{"kind":"string","value":"#!/usr/bin/env python\n#\n# Copyright 2015 Free Software Foundation, Inc.\n#\n# This file is part of GNU Radio\n#\n# GNU Radio is free software; you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation; either version 3, or (at your option)\n# any later version.\n#\n# GNU Radio is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with GNU Radio; see the file COPYING. If not, write to\n# the Free Software Foundation, Inc., 51 Franklin Street,\n# Boston, MA 02110-1301, USA.\n#\n\nfrom __future__ import unicode_literals\nfrom thrift import Thrift\nfrom thrift.transport import TSocket\nfrom thrift.transport import TTransport\nfrom thrift.protocol import TBinaryProtocol\nfrom gnuradio.ctrlport.GNURadio import ControlPort\nfrom gnuradio.ctrlport import RPCConnection\nfrom gnuradio import gr\nimport pmt\nimport sys\n\nclass ThriftRadioClient(object):\n def __init__(self, host, port):\n self.tsocket = TSocket.TSocket(host, port)\n self.transport = TTransport.TBufferedTransport(self.tsocket)\n self.protocol = TBinaryProtocol.TBinaryProtocol(self.transport)\n\n self.radio = ControlPort.Client(self.protocol)\n self.transport.open()\n self.host = host\n self.port = port\n\n def __del__(self):\n try:\n self.transport.close()\n self.radio.shutdown()\n except:\n pass\n\n def getRadio(self):\n return self.radio\n\n\"\"\"\nRPC Client interface for the Apache Thrift middle-ware RPC transport.\n\nArgs:\n port: port number of the connection\n host: hostname of the connection\n\"\"\"\n\nclass RPCConnectionThrift(RPCConnection.RPCConnection):\n class Knob(object):\n def __init__(self, key, value=None, ktype=0):\n (self.key, self.value, self.ktype) = (key, value, ktype)\n\n def __repr__(self):\n return \"({0} = {1})\".format(self.key, self.value)\n\n def __init__(self, host=None, port=None):\n from gnuradio.ctrlport.GNURadio import ttypes\n self.BaseTypes = ttypes.BaseTypes\n self.KnobBase = ttypes.KnobBase\n\n # If not set by the user, get the port number from the thrift\n # config file, if one is set. Defaults to 9090 otherwise.\n if port is None:\n p = gr.prefs()\n thrift_config_file = p.get_string(\"ControlPort\", \"config\", \"\")\n if(len(thrift_config_file) > 0):\n p.add_config_file(thrift_config_file)\n port = p.get_long(\"thrift\", \"port\", 9090)\n else:\n port = 9090\n else:\n port = int(port)\n\n super(RPCConnectionThrift, self).__init__(method='thrift', port=port, host=host)\n self.newConnection(host, port)\n\n self.unpack_dict = {\n self.BaseTypes.BOOL: lambda k,b: self.Knob(k, b.value.a_bool, self.BaseTypes.BOOL),\n self.BaseTypes.BYTE: lambda k,b: self.Knob(k, b.value.a_byte, self.BaseTypes.BYTE),\n self.BaseTypes.SHORT: lambda k,b: self.Knob(k, b.value.a_short, self.BaseTypes.SHORT),\n self.BaseTypes.INT: lambda k,b: self.Knob(k, b.value.a_int, self.BaseTypes.INT),\n self.BaseTypes.LONG: lambda k,b: self.Knob(k, b.value.a_long, self.BaseTypes.LONG),\n self.BaseTypes.DOUBLE: lambda k,b: self.Knob(k, b.value.a_double, self.BaseTypes.DOUBLE),\n self.BaseTypes.STRING: lambda k,b: self.Knob(k, b.value.a_string, self.BaseTypes.STRING),\n self.BaseTypes.COMPLEX: lambda k,b: self.Knob(k, b.value.a_complex, self.BaseTypes.COMPLEX),\n self.BaseTypes.F32VECTOR: lambda k,b: self.Knob(k, b.value.a_f32vector, self.BaseTypes.F32VECTOR),\n self.BaseTypes.F64VECTOR: lambda k,b: self.Knob(k, b.value.a_f64vector, self.BaseTypes.F64VECTOR),\n self.BaseTypes.S64VECTOR: lambda k,b: self.Knob(k, b.value.a_s64vector, self.BaseTypes.S64VECTOR),\n self.BaseTypes.S32VECTOR: lambda k,b: self.Knob(k, b.value.a_s32vector, self.BaseTypes.S32VECTOR),\n self.BaseTypes.S16VECTOR: lambda k,b: self.Knob(k, b.value.a_s16vector, self.BaseTypes.S16VECTOR),\n self.BaseTypes.S8VECTOR: lambda k,b: self.Knob(k, b.value.a_s8vector, self.BaseTypes.S8VECTOR),\n self.BaseTypes.C32VECTOR: lambda k,b: self.Knob(k, b.value.a_c32vector, self.BaseTypes.C32VECTOR),\n }\n\n self.pack_dict = {\n self.BaseTypes.BOOL: lambda k: ttypes.Knob(type=k.ktype, value=ttypes.KnobBase(a_bool = k.value)),\n self.BaseTypes.BYTE: lambda k: ttypes.Knob(type=k.ktype, value=ttypes.KnobBase(a_byte = k.value)),\n self.BaseTypes.SHORT: lambda k: ttypes.Knob(type=k.ktype, value=ttypes.KnobBase(a_short = k.value)),\n self.BaseTypes.INT: lambda k: ttypes.Knob(type=k.ktype, value=ttypes.KnobBase(a_int = k.value)),\n self.BaseTypes.LONG: lambda k: ttypes.Knob(type=k.ktype, value=ttypes.KnobBase(a_long = k.value)),\n self.BaseTypes.DOUBLE: lambda k: ttypes.Knob(type=k.ktype, value=ttypes.KnobBase(a_double = k.value)),\n self.BaseTypes.STRING: lambda k: ttypes.Knob(type=k.ktype, value=ttypes.KnobBase(a_string = k.value)),\n self.BaseTypes.COMPLEX: lambda k: ttypes.Knob(type=k.ktype, value=ttypes.KnobBase(a_complex = k.value)),\n self.BaseTypes.F32VECTOR: lambda k: ttypes.Knob(type=k.ktype, value=ttypes.KnobBase(a_f32vector = k.value)),\n self.BaseTypes.F64VECTOR: lambda k: ttypes.Knob(type=k.ktype, value=ttypes.KnobBase(a_f64vector = k.value)),\n self.BaseTypes.S64VECTOR: lambda k: ttypes.Knob(type=k.ktype, value=ttypes.KnobBase(a_s64vector = k.value)),\n self.BaseTypes.S32VECTOR: lambda k: ttypes.Knob(type=k.ktype, value=ttypes.KnobBase(a_s32vector = k.value)),\n self.BaseTypes.S16VECTOR: lambda k: ttypes.Knob(type=k.ktype, value=ttypes.KnobBase(a_s16vector = k.value)),\n self.BaseTypes.S8VECTOR: lambda k: ttypes.Knob(type=k.ktype, value=ttypes.KnobBase(a_s8vector = k.value)),\n self.BaseTypes.C32VECTOR: lambda k: ttypes.Knob(type=k.ktype, value=ttypes.KnobBase(a_c32vector = k.value)),\n }\n\n def __str__(self):\n return \"Apache Thrift connection to {0}:{1}\".format(\n self.thriftclient.host,\n self.thriftclient.port)\n\n def unpackKnob(self, key, knob):\n f = self.unpack_dict.get(knob.type, None)\n if(f):\n return f(key, knob)\n else:\n sys.stderr.write(\"unpackKnobs: Incorrect Knob type: {0}\\n\".format(knob.type))\n raise exceptions.ValueError\n\n def packKnob(self, knob):\n f = self.pack_dict.get(knob.ktype, None)\n if(f):\n return f(knob)\n else:\n sys.stderr.write(\"packKnobs: Incorrect Knob type: {0}\\n\".format(knob.type))\n raise exceptions.ValueError\n\n def newConnection(self, host=None, port=None):\n self.thriftclient = ThriftRadioClient(host, int(port))\n\n def properties(self, *args):\n knobprops = self.thriftclient.radio.properties(*args)\n for key, knobprop in list(knobprops.items()):\n #print(\"key:\", key, \"value:\", knobprop, \"type:\", knobprop.type)\n knobprops[key].min = self.unpackKnob(key, knobprop.min)\n knobprops[key].max = self.unpackKnob(key, knobprop.max)\n knobprops[key].defaultvalue = self.unpackKnob(key, knobprop.defaultvalue)\n return knobprops\n\n def getKnobs(self, *args):\n result = {}\n for key, knob in list(self.thriftclient.radio.getKnobs(*args).items()):\n #print(\"key:\", key, \"value:\", knob, \"type:\", knob.type)\n result[key] = self.unpackKnob(key, knob)\n\n # If complex, convert to Python complex\n # FIXME: better list iterator way to handle this?\n if(knob.type == self.BaseTypes.C32VECTOR):\n for i in range(len(result[key].value)):\n result[key].value[i] = complex(result[key].value[i].re,\n result[key].value[i].im)\n return result\n\n def getKnobsRaw(self, *args):\n result = {}\n for key, knob in list(self.thriftclient.radio.getKnobs(*args).items()):\n #print(\"key:\", key, \"value:\", knob, \"type:\", knob.type)\n result[key] = knob\n return result\n\n def getRe(self,*args):\n result = {}\n for key, knob in list(self.thriftclient.radio.getRe(*args).items()):\n result[key] = self.unpackKnob(key, knob)\n return result\n\n def setKnobs(self, *args):\n if(type(*args) == dict):\n a = dict(*args)\n result = {}\n for key, knob in list(a.items()):\n result[key] = self.packKnob(knob)\n self.thriftclient.radio.setKnobs(result)\n elif(type(*args) == list or type(*args) == tuple):\n a = list(*args)\n result = {}\n for k in a:\n result[k.key] = self.packKnob(k)\n self.thriftclient.radio.setKnobs(result)\n else:\n sys.stderr.write(\"setKnobs: Invalid type; must be dict, list, or tuple\\n\")\n\n def shutdown(self):\n self.thriftclient.radio.shutdown()\n\n def postMessage(self, blk_alias, port, msg):\n '''\n blk_alias: the alias of the block we are posting the message\n to; must have an open message port named 'port'.\n Provide as a string.\n port: The name of the message port we are sending the message to.\n Provide as a string.\n msg: The actual message. Provide this as a PMT of the form\n right for the message port.\n The alias and port names are converted to PMT symbols and\n serialized. The msg is already a PMT and so just serialized.\n '''\n self.thriftclient.radio.postMessage(pmt.serialize_str(pmt.intern(blk_alias)),\n pmt.serialize_str(pmt.intern(port)),\n pmt.serialize_str(msg))\n def printProperties(self, props):\n info = \"\"\n info += \"Item:\\t\\t{0}\\n\".format(props.description)\n info += \"units:\\t\\t{0}\\n\".format(props.units)\n info += \"min:\\t\\t{0}\\n\".format(props.min.value)\n info += \"max:\\t\\t{0}\\n\".format(props.max.value)\n info += \"default:\\t\\t{0}\\n\".format(props.defaultvalue.value)\n info += \"Type Code:\\t0x{0:x}\\n\".format(props.type)\n info += \"Disp Code:\\t0x{0:x}\\n\".format(props.display)\n return info\n"},"license":{"kind":"string","value":"gpl-3.0"}}},{"rowIdx":203464,"cells":{"repo_name":{"kind":"string","value":"zfil/ansible"},"path":{"kind":"string","value":"test/units/vars/test_variable_manager.py"},"copies":{"kind":"string","value":"70"},"size":{"kind":"string","value":"5534"},"content":{"kind":"string","value":"# (c) 2012-2014, Michael DeHaan \n#\n# This file is part of Ansible\n#\n# Ansible is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# Ansible is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with Ansible. If not, see .\n\n# Make coding more python3-ish\nfrom __future__ import (absolute_import, division, print_function)\n__metaclass__ = type\n\nfrom ansible.compat.tests import unittest\nfrom ansible.compat.tests.mock import patch, MagicMock\n\nfrom ansible.vars import VariableManager\n\nfrom units.mock.loader import DictDataLoader\n\nclass TestVariableManager(unittest.TestCase):\n\n def setUp(self):\n pass\n\n def tearDown(self):\n pass\n\n def test_basic_manager(self):\n fake_loader = DictDataLoader({})\n\n v = VariableManager()\n vars = v.get_vars(loader=fake_loader, use_cache=False)\n if 'omit' in vars:\n del vars['omit']\n if 'vars' in vars:\n del vars['vars']\n if 'ansible_version' in vars:\n del vars['ansible_version']\n\n self.assertEqual(vars, dict(playbook_dir='.'))\n\n self.assertEqual(\n v._merge_dicts(\n dict(a=1),\n dict(b=2)\n ), dict(a=1, b=2)\n )\n self.assertEqual(\n v._merge_dicts(\n dict(a=1, c=dict(foo='bar')),\n dict(b=2, c=dict(baz='bam'))\n ), dict(a=1, b=2, c=dict(foo='bar', baz='bam'))\n )\n\n\n def test_variable_manager_extra_vars(self):\n fake_loader = DictDataLoader({})\n\n extra_vars = dict(a=1, b=2, c=3)\n v = VariableManager()\n v.extra_vars = extra_vars\n\n vars = v.get_vars(loader=fake_loader, use_cache=False)\n\n for (key, val) in extra_vars.iteritems():\n self.assertEqual(vars.get(key), val)\n\n self.assertIsNot(v.extra_vars, extra_vars)\n\n def test_variable_manager_host_vars_file(self):\n fake_loader = DictDataLoader({\n \"host_vars/hostname1.yml\": \"\"\"\n foo: bar\n \"\"\"\n })\n\n v = VariableManager()\n v.add_host_vars_file(\"host_vars/hostname1.yml\", loader=fake_loader)\n self.assertIn(\"hostname1\", v._host_vars_files)\n self.assertEqual(v._host_vars_files[\"hostname1\"], dict(foo=\"bar\"))\n\n mock_host = MagicMock()\n mock_host.get_name.return_value = \"hostname1\"\n mock_host.get_vars.return_value = dict()\n mock_host.get_groups.return_value = ()\n\n self.assertEqual(v.get_vars(loader=fake_loader, host=mock_host, use_cache=False).get(\"foo\"), \"bar\")\n\n def test_variable_manager_group_vars_file(self):\n fake_loader = DictDataLoader({\n \"group_vars/all.yml\": \"\"\"\n foo: bar\n \"\"\",\n \"group_vars/somegroup.yml\": \"\"\"\n bam: baz\n \"\"\"\n })\n\n v = VariableManager()\n v.add_group_vars_file(\"group_vars/all.yml\", loader=fake_loader)\n v.add_group_vars_file(\"group_vars/somegroup.yml\", loader=fake_loader)\n self.assertIn(\"somegroup\", v._group_vars_files)\n self.assertEqual(v._group_vars_files[\"all\"], dict(foo=\"bar\"))\n self.assertEqual(v._group_vars_files[\"somegroup\"], dict(bam=\"baz\"))\n\n mock_group = MagicMock()\n mock_group.name = \"somegroup\"\n mock_group.get_ancestors.return_value = ()\n mock_group.get_vars.return_value = dict()\n\n mock_host = MagicMock()\n mock_host.get_name.return_value = \"hostname1\"\n mock_host.get_vars.return_value = dict()\n mock_host.get_groups.return_value = (mock_group,)\n\n vars = v.get_vars(loader=fake_loader, host=mock_host, use_cache=False)\n self.assertEqual(vars.get(\"foo\"), \"bar\")\n self.assertEqual(vars.get(\"bam\"), \"baz\")\n\n def test_variable_manager_play_vars(self):\n fake_loader = DictDataLoader({})\n\n mock_play = MagicMock()\n mock_play.get_vars.return_value = dict(foo=\"bar\")\n mock_play.get_roles.return_value = []\n mock_play.get_vars_files.return_value = []\n\n v = VariableManager()\n self.assertEqual(v.get_vars(loader=fake_loader, play=mock_play, use_cache=False).get(\"foo\"), \"bar\")\n\n def test_variable_manager_play_vars_files(self):\n fake_loader = DictDataLoader({\n \"/path/to/somefile.yml\": \"\"\"\n foo: bar\n \"\"\"\n })\n\n mock_play = MagicMock()\n mock_play.get_vars.return_value = dict()\n mock_play.get_roles.return_value = []\n mock_play.get_vars_files.return_value = ['/path/to/somefile.yml']\n\n v = VariableManager()\n self.assertEqual(v.get_vars(loader=fake_loader, play=mock_play, use_cache=False).get(\"foo\"), \"bar\")\n\n def test_variable_manager_task_vars(self):\n fake_loader = DictDataLoader({})\n\n mock_task = MagicMock()\n mock_task._role = None\n mock_task.get_vars.return_value = dict(foo=\"bar\")\n\n v = VariableManager()\n self.assertEqual(v.get_vars(loader=fake_loader, task=mock_task, use_cache=False).get(\"foo\"), \"bar\")\n\n"},"license":{"kind":"string","value":"gpl-3.0"}}},{"rowIdx":203465,"cells":{"repo_name":{"kind":"string","value":"zahodi/ansible"},"path":{"kind":"string","value":"lib/ansible/plugins/test/core.py"},"copies":{"kind":"string","value":"46"},"size":{"kind":"string","value":"4440"},"content":{"kind":"string","value":"# (c) 2012, Jeroen Hoekx \n#\n# This file is part of Ansible\n#\n# Ansible is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# Ansible is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with Ansible. If not, see .\n\n# Make coding more python3-ish\nfrom __future__ import (absolute_import, division, print_function)\n__metaclass__ = type\n\nimport re\nimport operator as py_operator\nfrom distutils.version import LooseVersion, StrictVersion\n\nfrom ansible import errors\n\ndef failed(*a, **kw):\n ''' Test if task result yields failed '''\n item = a[0]\n if type(item) != dict:\n raise errors.AnsibleFilterError(\"|failed expects a dictionary\")\n rc = item.get('rc',0)\n failed = item.get('failed',False)\n if rc != 0 or failed:\n return True\n else:\n return False\n\ndef success(*a, **kw):\n ''' Test if task result yields success '''\n return not failed(*a, **kw)\n\ndef changed(*a, **kw):\n ''' Test if task result yields changed '''\n item = a[0]\n if type(item) != dict:\n raise errors.AnsibleFilterError(\"|changed expects a dictionary\")\n if not 'changed' in item:\n changed = False\n if ('results' in item # some modules return a 'results' key\n and type(item['results']) == list\n and type(item['results'][0]) == dict):\n for result in item['results']:\n changed = changed or result.get('changed', False)\n else:\n changed = item.get('changed', False)\n return changed\n\ndef skipped(*a, **kw):\n ''' Test if task result yields skipped '''\n item = a[0]\n if type(item) != dict:\n raise errors.AnsibleFilterError(\"|skipped expects a dictionary\")\n skipped = item.get('skipped', False)\n return skipped\n\ndef regex(value='', pattern='', ignorecase=False, multiline=False, match_type='search'):\n ''' Expose `re` as a boolean filter using the `search` method by default.\n This is likely only useful for `search` and `match` which already\n have their own filters.\n '''\n flags = 0\n if ignorecase:\n flags |= re.I\n if multiline:\n flags |= re.M\n _re = re.compile(pattern, flags=flags)\n _bool = __builtins__.get('bool')\n return _bool(getattr(_re, match_type, 'search')(value))\n\ndef match(value, pattern='', ignorecase=False, multiline=False):\n ''' Perform a `re.match` returning a boolean '''\n return regex(value, pattern, ignorecase, multiline, 'match')\n\ndef search(value, pattern='', ignorecase=False, multiline=False):\n ''' Perform a `re.search` returning a boolean '''\n return regex(value, pattern, ignorecase, multiline, 'search')\n\ndef version_compare(value, version, operator='eq', strict=False):\n ''' Perform a version comparison on a value '''\n op_map = {\n '==': 'eq', '=': 'eq', 'eq': 'eq',\n '<': 'lt', 'lt': 'lt',\n '<=': 'le', 'le': 'le',\n '>': 'gt', 'gt': 'gt',\n '>=': 'ge', 'ge': 'ge',\n '!=': 'ne', '<>': 'ne', 'ne': 'ne'\n }\n\n if strict:\n Version = StrictVersion\n else:\n Version = LooseVersion\n\n if operator in op_map:\n operator = op_map[operator]\n else:\n raise errors.AnsibleFilterError('Invalid operator type')\n\n try:\n method = getattr(py_operator, operator)\n return method(Version(str(value)), Version(str(version)))\n except Exception as e:\n raise errors.AnsibleFilterError('Version comparison: %s' % e)\n\nclass TestModule(object):\n ''' Ansible core jinja2 tests '''\n\n def tests(self):\n return {\n # failure testing\n 'failed' : failed,\n 'succeeded' : success,\n\n # changed testing\n 'changed' : changed,\n\n # skip testing\n 'skipped' : skipped,\n\n # regex\n 'match': match,\n 'search': search,\n 'regex': regex,\n\n # version comparison\n 'version_compare': version_compare,\n\n }\n"},"license":{"kind":"string","value":"gpl-3.0"}}},{"rowIdx":203466,"cells":{"repo_name":{"kind":"string","value":"senthil10/scilifelab"},"path":{"kind":"string","value":"tests/utils/test_slurm.py"},"copies":{"kind":"string","value":"4"},"size":{"kind":"string","value":"1824"},"content":{"kind":"string","value":"\"\"\"Test the utils/slurm.py functionality\n\"\"\"\nimport subprocess\nimport unittest\nfrom mock import Mock\n\nimport scilifelab.utils.slurm as sq\nfrom scilifelab.pm.ext.ext_distributed import convert_to_drmaa_time\n\nclass TestSlurm(unittest.TestCase):\n \n \n def test__get_slurm_jobid(self):\n \"\"\"Extract the jobid for a slurm job name\n \"\"\"\n \n # Mock the system calls\n subprocess.check_output = Mock(return_value='')\n # Assert that getting non-existing jobs return an empty job list\n self.assertListEqual([],sq.get_slurm_jobid(\"jobname\"),\n \"Querying for jobid of non-existing job should return an empty list\")\n # Assert that a returned job id is parsed correctly\n for jobids in [[123456789],[123456789,987654321]]:\n subprocess.check_output = Mock(return_value=\"\\n\".join([str(jid) for jid in jobids]))\n self.assertListEqual(jobids,sq.get_slurm_jobid(\"jobname\"),\n \"Querying for jobid of existing job did not return the correct value\")\n \n \n\nclass TestDrmaa(unittest.TestCase):\n\n def test_drmaa_time_string(self):\n \"\"\"Test parsing of time string formatted as d-hh:mm:ss and translate days to hours\"\"\"\n t_new = convert_to_drmaa_time(\"4-10:00:00\")\n self.assertEqual(t_new, \"106:00:00\")\n t_new = convert_to_drmaa_time(\"10:00:00\")\n self.assertEqual(t_new, \"10:00:00\")\n t_new = convert_to_drmaa_time(\"3:00:00\")\n self.assertEqual(t_new, \"03:00:00\")\n t_new = convert_to_drmaa_time(\"10:00\")\n self.assertEqual(t_new, \"00:10:00\")\n t_new = convert_to_drmaa_time(\"0:00\")\n self.assertEqual(t_new, \"00:00:00\")\n t_new = convert_to_drmaa_time(\"144:00:00\")\n self.assertEqual(t_new, \"144:00:00\")\n"},"license":{"kind":"string","value":"mit"}}},{"rowIdx":203467,"cells":{"repo_name":{"kind":"string","value":"thekingofkings/focusread"},"path":{"kind":"string","value":"libs/dns/zone.py"},"copies":{"kind":"string","value":"9"},"size":{"kind":"string","value":"40026"},"content":{"kind":"string","value":"# Copyright (C) 2003-2007, 2009-2011 Nominum, Inc.\n#\n# Permission to use, copy, modify, and distribute this software and its\n# documentation for any purpose with or without fee is hereby granted,\n# provided that the above copyright notice and this permission notice\n# appear in all copies.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\" AND NOMINUM DISCLAIMS ALL WARRANTIES\n# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF\n# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR\n# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES\n# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN\n# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT\n# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.\n\n\"\"\"DNS Zones.\"\"\"\n\nfrom __future__ import generators\n\nimport sys\nimport re\nimport os\nfrom io import BytesIO\n\nimport dns.exception\nimport dns.name\nimport dns.node\nimport dns.rdataclass\nimport dns.rdatatype\nimport dns.rdata\nimport dns.rrset\nimport dns.tokenizer\nimport dns.ttl\nimport dns.grange\nfrom ._compat import string_types, text_type\n\n\n_py3 = sys.version_info > (3,)\n\n\nclass BadZone(dns.exception.DNSException):\n\n \"\"\"The DNS zone is malformed.\"\"\"\n\n\nclass NoSOA(BadZone):\n\n \"\"\"The DNS zone has no SOA RR at its origin.\"\"\"\n\n\nclass NoNS(BadZone):\n\n \"\"\"The DNS zone has no NS RRset at its origin.\"\"\"\n\n\nclass UnknownOrigin(BadZone):\n\n \"\"\"The DNS zone's origin is unknown.\"\"\"\n\n\nclass Zone(object):\n\n \"\"\"A DNS zone.\n\n A Zone is a mapping from names to nodes. The zone object may be\n treated like a Python dictionary, e.g. zone[name] will retrieve\n the node associated with that name. The I{name} may be a\n dns.name.Name object, or it may be a string. In the either case,\n if the name is relative it is treated as relative to the origin of\n the zone.\n\n @ivar rdclass: The zone's rdata class; the default is class IN.\n @type rdclass: int\n @ivar origin: The origin of the zone.\n @type origin: dns.name.Name object\n @ivar nodes: A dictionary mapping the names of nodes in the zone to the\n nodes themselves.\n @type nodes: dict\n @ivar relativize: should names in the zone be relativized?\n @type relativize: bool\n @cvar node_factory: the factory used to create a new node\n @type node_factory: class or callable\n \"\"\"\n\n node_factory = dns.node.Node\n\n __slots__ = ['rdclass', 'origin', 'nodes', 'relativize']\n\n def __init__(self, origin, rdclass=dns.rdataclass.IN, relativize=True):\n \"\"\"Initialize a zone object.\n\n @param origin: The origin of the zone.\n @type origin: dns.name.Name object\n @param rdclass: The zone's rdata class; the default is class IN.\n @type rdclass: int\"\"\"\n\n if origin is not None:\n if isinstance(origin, string_types):\n origin = dns.name.from_text(origin)\n elif not isinstance(origin, dns.name.Name):\n raise ValueError(\"origin parameter must be convertible to a \"\n \"DNS name\")\n if not origin.is_absolute():\n raise ValueError(\"origin parameter must be an absolute name\")\n self.origin = origin\n self.rdclass = rdclass\n self.nodes = {}\n self.relativize = relativize\n\n def __eq__(self, other):\n \"\"\"Two zones are equal if they have the same origin, class, and\n nodes.\n @rtype: bool\n \"\"\"\n\n if not isinstance(other, Zone):\n return False\n if self.rdclass != other.rdclass or \\\n self.origin != other.origin or \\\n self.nodes != other.nodes:\n return False\n return True\n\n def __ne__(self, other):\n \"\"\"Are two zones not equal?\n @rtype: bool\n \"\"\"\n\n return not self.__eq__(other)\n\n def _validate_name(self, name):\n if isinstance(name, string_types):\n name = dns.name.from_text(name, None)\n elif not isinstance(name, dns.name.Name):\n raise KeyError(\"name parameter must be convertible to a DNS name\")\n if name.is_absolute():\n if not name.is_subdomain(self.origin):\n raise KeyError(\n \"name parameter must be a subdomain of the zone origin\")\n if self.relativize:\n name = name.relativize(self.origin)\n return name\n\n def __getitem__(self, key):\n key = self._validate_name(key)\n return self.nodes[key]\n\n def __setitem__(self, key, value):\n key = self._validate_name(key)\n self.nodes[key] = value\n\n def __delitem__(self, key):\n key = self._validate_name(key)\n del self.nodes[key]\n\n def __iter__(self):\n return self.nodes.__iter__()\n\n def iterkeys(self):\n if _py3:\n return self.nodes.keys()\n else:\n return self.nodes.iterkeys() # pylint: disable=dict-iter-method\n\n def keys(self):\n return self.nodes.keys()\n\n def itervalues(self):\n if _py3:\n return self.nodes.values()\n else:\n return self.nodes.itervalues() # pylint: disable=dict-iter-method\n\n def values(self):\n return self.nodes.values()\n\n def items(self):\n return self.nodes.items()\n\n iteritems = items\n\n def get(self, key):\n key = self._validate_name(key)\n return self.nodes.get(key)\n\n def __contains__(self, other):\n return other in self.nodes\n\n def find_node(self, name, create=False):\n \"\"\"Find a node in the zone, possibly creating it.\n\n @param name: the name of the node to find\n @type name: dns.name.Name object or string\n @param create: should the node be created if it doesn't exist?\n @type create: bool\n @raises KeyError: the name is not known and create was not specified.\n @rtype: dns.node.Node object\n \"\"\"\n\n name = self._validate_name(name)\n node = self.nodes.get(name)\n if node is None:\n if not create:\n raise KeyError\n node = self.node_factory()\n self.nodes[name] = node\n return node\n\n def get_node(self, name, create=False):\n \"\"\"Get a node in the zone, possibly creating it.\n\n This method is like L{find_node}, except it returns None instead\n of raising an exception if the node does not exist and creation\n has not been requested.\n\n @param name: the name of the node to find\n @type name: dns.name.Name object or string\n @param create: should the node be created if it doesn't exist?\n @type create: bool\n @rtype: dns.node.Node object or None\n \"\"\"\n\n try:\n node = self.find_node(name, create)\n except KeyError:\n node = None\n return node\n\n def delete_node(self, name):\n \"\"\"Delete the specified node if it exists.\n\n It is not an error if the node does not exist.\n \"\"\"\n\n name = self._validate_name(name)\n if name in self.nodes:\n del self.nodes[name]\n\n def find_rdataset(self, name, rdtype, covers=dns.rdatatype.NONE,\n create=False):\n \"\"\"Look for rdata with the specified name and type in the zone,\n and return an rdataset encapsulating it.\n\n The I{name}, I{rdtype}, and I{covers} parameters may be\n strings, in which case they will be converted to their proper\n type.\n\n The rdataset returned is not a copy; changes to it will change\n the zone.\n\n KeyError is raised if the name or type are not found.\n Use L{get_rdataset} if you want to have None returned instead.\n\n @param name: the owner name to look for\n @type name: DNS.name.Name object or string\n @param rdtype: the rdata type desired\n @type rdtype: int or string\n @param covers: the covered type (defaults to None)\n @type covers: int or string\n @param create: should the node and rdataset be created if they do not\n exist?\n @type create: bool\n @raises KeyError: the node or rdata could not be found\n @rtype: dns.rrset.RRset object\n \"\"\"\n\n name = self._validate_name(name)\n if isinstance(rdtype, string_types):\n rdtype = dns.rdatatype.from_text(rdtype)\n if isinstance(covers, string_types):\n covers = dns.rdatatype.from_text(covers)\n node = self.find_node(name, create)\n return node.find_rdataset(self.rdclass, rdtype, covers, create)\n\n def get_rdataset(self, name, rdtype, covers=dns.rdatatype.NONE,\n create=False):\n \"\"\"Look for rdata with the specified name and type in the zone,\n and return an rdataset encapsulating it.\n\n The I{name}, I{rdtype}, and I{covers} parameters may be\n strings, in which case they will be converted to their proper\n type.\n\n The rdataset returned is not a copy; changes to it will change\n the zone.\n\n None is returned if the name or type are not found.\n Use L{find_rdataset} if you want to have KeyError raised instead.\n\n @param name: the owner name to look for\n @type name: DNS.name.Name object or string\n @param rdtype: the rdata type desired\n @type rdtype: int or string\n @param covers: the covered type (defaults to None)\n @type covers: int or string\n @param create: should the node and rdataset be created if they do not\n exist?\n @type create: bool\n @rtype: dns.rrset.RRset object\n \"\"\"\n\n try:\n rdataset = self.find_rdataset(name, rdtype, covers, create)\n except KeyError:\n rdataset = None\n return rdataset\n\n def delete_rdataset(self, name, rdtype, covers=dns.rdatatype.NONE):\n \"\"\"Delete the rdataset matching I{rdtype} and I{covers}, if it\n exists at the node specified by I{name}.\n\n The I{name}, I{rdtype}, and I{covers} parameters may be\n strings, in which case they will be converted to their proper\n type.\n\n It is not an error if the node does not exist, or if there is no\n matching rdataset at the node.\n\n If the node has no rdatasets after the deletion, it will itself\n be deleted.\n\n @param name: the owner name to look for\n @type name: DNS.name.Name object or string\n @param rdtype: the rdata type desired\n @type rdtype: int or string\n @param covers: the covered type (defaults to None)\n @type covers: int or string\n \"\"\"\n\n name = self._validate_name(name)\n if isinstance(rdtype, string_types):\n rdtype = dns.rdatatype.from_text(rdtype)\n if isinstance(covers, string_types):\n covers = dns.rdatatype.from_text(covers)\n node = self.get_node(name)\n if node is not None:\n node.delete_rdataset(self.rdclass, rdtype, covers)\n if len(node) == 0:\n self.delete_node(name)\n\n def replace_rdataset(self, name, replacement):\n \"\"\"Replace an rdataset at name.\n\n It is not an error if there is no rdataset matching I{replacement}.\n\n Ownership of the I{replacement} object is transferred to the zone;\n in other words, this method does not store a copy of I{replacement}\n at the node, it stores I{replacement} itself.\n\n If the I{name} node does not exist, it is created.\n\n @param name: the owner name\n @type name: DNS.name.Name object or string\n @param replacement: the replacement rdataset\n @type replacement: dns.rdataset.Rdataset\n \"\"\"\n\n if replacement.rdclass != self.rdclass:\n raise ValueError('replacement.rdclass != zone.rdclass')\n node = self.find_node(name, True)\n node.replace_rdataset(replacement)\n\n def find_rrset(self, name, rdtype, covers=dns.rdatatype.NONE):\n \"\"\"Look for rdata with the specified name and type in the zone,\n and return an RRset encapsulating it.\n\n The I{name}, I{rdtype}, and I{covers} parameters may be\n strings, in which case they will be converted to their proper\n type.\n\n This method is less efficient than the similar\n L{find_rdataset} because it creates an RRset instead of\n returning the matching rdataset. It may be more convenient\n for some uses since it returns an object which binds the owner\n name to the rdata.\n\n This method may not be used to create new nodes or rdatasets;\n use L{find_rdataset} instead.\n\n KeyError is raised if the name or type are not found.\n Use L{get_rrset} if you want to have None returned instead.\n\n @param name: the owner name to look for\n @type name: DNS.name.Name object or string\n @param rdtype: the rdata type desired\n @type rdtype: int or string\n @param covers: the covered type (defaults to None)\n @type covers: int or string\n @raises KeyError: the node or rdata could not be found\n @rtype: dns.rrset.RRset object\n \"\"\"\n\n name = self._validate_name(name)\n if isinstance(rdtype, string_types):\n rdtype = dns.rdatatype.from_text(rdtype)\n if isinstance(covers, string_types):\n covers = dns.rdatatype.from_text(covers)\n rdataset = self.nodes[name].find_rdataset(self.rdclass, rdtype, covers)\n rrset = dns.rrset.RRset(name, self.rdclass, rdtype, covers)\n rrset.update(rdataset)\n return rrset\n\n def get_rrset(self, name, rdtype, covers=dns.rdatatype.NONE):\n \"\"\"Look for rdata with the specified name and type in the zone,\n and return an RRset encapsulating it.\n\n The I{name}, I{rdtype}, and I{covers} parameters may be\n strings, in which case they will be converted to their proper\n type.\n\n This method is less efficient than the similar L{get_rdataset}\n because it creates an RRset instead of returning the matching\n rdataset. It may be more convenient for some uses since it\n returns an object which binds the owner name to the rdata.\n\n This method may not be used to create new nodes or rdatasets;\n use L{find_rdataset} instead.\n\n None is returned if the name or type are not found.\n Use L{find_rrset} if you want to have KeyError raised instead.\n\n @param name: the owner name to look for\n @type name: DNS.name.Name object or string\n @param rdtype: the rdata type desired\n @type rdtype: int or string\n @param covers: the covered type (defaults to None)\n @type covers: int or string\n @rtype: dns.rrset.RRset object\n \"\"\"\n\n try:\n rrset = self.find_rrset(name, rdtype, covers)\n except KeyError:\n rrset = None\n return rrset\n\n def iterate_rdatasets(self, rdtype=dns.rdatatype.ANY,\n covers=dns.rdatatype.NONE):\n \"\"\"Return a generator which yields (name, rdataset) tuples for\n all rdatasets in the zone which have the specified I{rdtype}\n and I{covers}. If I{rdtype} is dns.rdatatype.ANY, the default,\n then all rdatasets will be matched.\n\n @param rdtype: int or string\n @type rdtype: int or string\n @param covers: the covered type (defaults to None)\n @type covers: int or string\n \"\"\"\n\n if isinstance(rdtype, string_types):\n rdtype = dns.rdatatype.from_text(rdtype)\n if isinstance(covers, string_types):\n covers = dns.rdatatype.from_text(covers)\n for (name, node) in self.iteritems():\n for rds in node:\n if rdtype == dns.rdatatype.ANY or \\\n (rds.rdtype == rdtype and rds.covers == covers):\n yield (name, rds)\n\n def iterate_rdatas(self, rdtype=dns.rdatatype.ANY,\n covers=dns.rdatatype.NONE):\n \"\"\"Return a generator which yields (name, ttl, rdata) tuples for\n all rdatas in the zone which have the specified I{rdtype}\n and I{covers}. If I{rdtype} is dns.rdatatype.ANY, the default,\n then all rdatas will be matched.\n\n @param rdtype: int or string\n @type rdtype: int or string\n @param covers: the covered type (defaults to None)\n @type covers: int or string\n \"\"\"\n\n if isinstance(rdtype, string_types):\n rdtype = dns.rdatatype.from_text(rdtype)\n if isinstance(covers, string_types):\n covers = dns.rdatatype.from_text(covers)\n for (name, node) in self.iteritems():\n for rds in node:\n if rdtype == dns.rdatatype.ANY or \\\n (rds.rdtype == rdtype and rds.covers == covers):\n for rdata in rds:\n yield (name, rds.ttl, rdata)\n\n def to_file(self, f, sorted=True, relativize=True, nl=None):\n \"\"\"Write a zone to a file.\n\n @param f: file or string. If I{f} is a string, it is treated\n as the name of a file to open.\n @param sorted: if True, the file will be written with the\n names sorted in DNSSEC order from least to greatest. Otherwise\n the names will be written in whatever order they happen to have\n in the zone's dictionary.\n @param relativize: if True, domain names in the output will be\n relativized to the zone's origin (if possible).\n @type relativize: bool\n @param nl: The end of line string. If not specified, the\n output will use the platform's native end-of-line marker (i.e.\n LF on POSIX, CRLF on Windows, CR on Macintosh).\n @type nl: string or None\n \"\"\"\n\n if isinstance(f, string_types):\n f = open(f, 'wb')\n want_close = True\n else:\n want_close = False\n\n # must be in this way, f.encoding may contain None, or even attribute\n # may not be there\n file_enc = getattr(f, 'encoding', None)\n if file_enc is None:\n file_enc = 'utf-8'\n\n if nl is None:\n nl_b = os.linesep.encode(file_enc) # binary mode, '\\n' is not enough\n nl = u'\\n'\n elif isinstance(nl, string_types):\n nl_b = nl.encode(file_enc)\n else:\n nl_b = nl\n nl = nl.decode()\n\n try:\n if sorted:\n names = list(self.keys())\n names.sort()\n else:\n names = self.iterkeys()\n for n in names:\n l = self[n].to_text(n, origin=self.origin,\n relativize=relativize)\n if isinstance(l, text_type):\n l_b = l.encode(file_enc)\n else:\n l_b = l\n l = l.decode()\n\n try:\n f.write(l_b)\n f.write(nl_b)\n except TypeError: # textual mode\n f.write(l)\n f.write(nl)\n finally:\n if want_close:\n f.close()\n\n def to_text(self, sorted=True, relativize=True, nl=None):\n \"\"\"Return a zone's text as though it were written to a file.\n\n @param sorted: if True, the file will be written with the\n names sorted in DNSSEC order from least to greatest. Otherwise\n the names will be written in whatever order they happen to have\n in the zone's dictionary.\n @param relativize: if True, domain names in the output will be\n relativized to the zone's origin (if possible).\n @type relativize: bool\n @param nl: The end of line string. If not specified, the\n output will use the platform's native end-of-line marker (i.e.\n LF on POSIX, CRLF on Windows, CR on Macintosh).\n @type nl: string or None\n \"\"\"\n temp_buffer = BytesIO()\n self.to_file(temp_buffer, sorted, relativize, nl)\n return_value = temp_buffer.getvalue()\n temp_buffer.close()\n return return_value\n\n def check_origin(self):\n \"\"\"Do some simple checking of the zone's origin.\n\n @raises dns.zone.NoSOA: there is no SOA RR\n @raises dns.zone.NoNS: there is no NS RRset\n @raises KeyError: there is no origin node\n \"\"\"\n if self.relativize:\n name = dns.name.empty\n else:\n name = self.origin\n if self.get_rdataset(name, dns.rdatatype.SOA) is None:\n raise NoSOA\n if self.get_rdataset(name, dns.rdatatype.NS) is None:\n raise NoNS\n\n\nclass _MasterReader(object):\n\n \"\"\"Read a DNS master file\n\n @ivar tok: The tokenizer\n @type tok: dns.tokenizer.Tokenizer object\n @ivar ttl: The default TTL\n @type ttl: int\n @ivar last_name: The last name read\n @type last_name: dns.name.Name object\n @ivar current_origin: The current origin\n @type current_origin: dns.name.Name object\n @ivar relativize: should names in the zone be relativized?\n @type relativize: bool\n @ivar zone: the zone\n @type zone: dns.zone.Zone object\n @ivar saved_state: saved reader state (used when processing $INCLUDE)\n @type saved_state: list of (tokenizer, current_origin, last_name, file)\n tuples.\n @ivar current_file: the file object of the $INCLUDed file being parsed\n (None if no $INCLUDE is active).\n @ivar allow_include: is $INCLUDE allowed?\n @type allow_include: bool\n @ivar check_origin: should sanity checks of the origin node be done?\n The default is True.\n @type check_origin: bool\n \"\"\"\n\n def __init__(self, tok, origin, rdclass, relativize, zone_factory=Zone,\n allow_include=False, check_origin=True):\n if isinstance(origin, string_types):\n origin = dns.name.from_text(origin)\n self.tok = tok\n self.current_origin = origin\n self.relativize = relativize\n self.ttl = 0\n self.last_name = self.current_origin\n self.zone = zone_factory(origin, rdclass, relativize=relativize)\n self.saved_state = []\n self.current_file = None\n self.allow_include = allow_include\n self.check_origin = check_origin\n\n def _eat_line(self):\n while 1:\n token = self.tok.get()\n if token.is_eol_or_eof():\n break\n\n def _rr_line(self):\n \"\"\"Process one line from a DNS master file.\"\"\"\n # Name\n if self.current_origin is None:\n raise UnknownOrigin\n token = self.tok.get(want_leading=True)\n if not token.is_whitespace():\n self.last_name = dns.name.from_text(\n token.value, self.current_origin)\n else:\n token = self.tok.get()\n if token.is_eol_or_eof():\n # treat leading WS followed by EOL/EOF as if they were EOL/EOF.\n return\n self.tok.unget(token)\n name = self.last_name\n if not name.is_subdomain(self.zone.origin):\n self._eat_line()\n return\n if self.relativize:\n name = name.relativize(self.zone.origin)\n token = self.tok.get()\n if not token.is_identifier():\n raise dns.exception.SyntaxError\n # TTL\n try:\n ttl = dns.ttl.from_text(token.value)\n token = self.tok.get()\n if not token.is_identifier():\n raise dns.exception.SyntaxError\n except dns.ttl.BadTTL:\n ttl = self.ttl\n # Class\n try:\n rdclass = dns.rdataclass.from_text(token.value)\n token = self.tok.get()\n if not token.is_identifier():\n raise dns.exception.SyntaxError\n except dns.exception.SyntaxError:\n raise dns.exception.SyntaxError\n except Exception:\n rdclass = self.zone.rdclass\n if rdclass != self.zone.rdclass:\n raise dns.exception.SyntaxError(\"RR class is not zone's class\")\n # Type\n try:\n rdtype = dns.rdatatype.from_text(token.value)\n except:\n raise dns.exception.SyntaxError(\n \"unknown rdatatype '%s'\" % token.value)\n n = self.zone.nodes.get(name)\n if n is None:\n n = self.zone.node_factory()\n self.zone.nodes[name] = n\n try:\n rd = dns.rdata.from_text(rdclass, rdtype, self.tok,\n self.current_origin, False)\n except dns.exception.SyntaxError:\n # Catch and reraise.\n (ty, va) = sys.exc_info()[:2]\n raise va\n except:\n # All exceptions that occur in the processing of rdata\n # are treated as syntax errors. This is not strictly\n # correct, but it is correct almost all of the time.\n # We convert them to syntax errors so that we can emit\n # helpful filename:line info.\n (ty, va) = sys.exc_info()[:2]\n raise dns.exception.SyntaxError(\n \"caught exception %s: %s\" % (str(ty), str(va)))\n\n rd.choose_relativity(self.zone.origin, self.relativize)\n covers = rd.covers()\n rds = n.find_rdataset(rdclass, rdtype, covers, True)\n rds.add(rd, ttl)\n\n def _parse_modify(self, side):\n # Here we catch everything in '{' '}' in a group so we can replace it\n # with ''.\n is_generate1 = re.compile(\"^.*\\$({(\\+|-?)(\\d+),(\\d+),(.)}).*$\")\n is_generate2 = re.compile(\"^.*\\$({(\\+|-?)(\\d+)}).*$\")\n is_generate3 = re.compile(\"^.*\\$({(\\+|-?)(\\d+),(\\d+)}).*$\")\n # Sometimes there are modifiers in the hostname. These come after\n # the dollar sign. They are in the form: ${offset[,width[,base]]}.\n # Make names\n g1 = is_generate1.match(side)\n if g1:\n mod, sign, offset, width, base = g1.groups()\n if sign == '':\n sign = '+'\n g2 = is_generate2.match(side)\n if g2:\n mod, sign, offset = g2.groups()\n if sign == '':\n sign = '+'\n width = 0\n base = 'd'\n g3 = is_generate3.match(side)\n if g3:\n mod, sign, offset, width = g1.groups()\n if sign == '':\n sign = '+'\n width = g1.groups()[2]\n base = 'd'\n\n if not (g1 or g2 or g3):\n mod = ''\n sign = '+'\n offset = 0\n width = 0\n base = 'd'\n\n if base != 'd':\n raise NotImplementedError()\n\n return mod, sign, offset, width, base\n\n def _generate_line(self):\n # range lhs [ttl] [class] type rhs [ comment ]\n \"\"\"Process one line containing the GENERATE statement from a DNS\n master file.\"\"\"\n if self.current_origin is None:\n raise UnknownOrigin\n\n token = self.tok.get()\n # Range (required)\n try:\n start, stop, step = dns.grange.from_text(token.value)\n token = self.tok.get()\n if not token.is_identifier():\n raise dns.exception.SyntaxError\n except:\n raise dns.exception.SyntaxError\n\n # lhs (required)\n try:\n lhs = token.value\n token = self.tok.get()\n if not token.is_identifier():\n raise dns.exception.SyntaxError\n except:\n raise dns.exception.SyntaxError\n\n # TTL\n try:\n ttl = dns.ttl.from_text(token.value)\n token = self.tok.get()\n if not token.is_identifier():\n raise dns.exception.SyntaxError\n except dns.ttl.BadTTL:\n ttl = self.ttl\n # Class\n try:\n rdclass = dns.rdataclass.from_text(token.value)\n token = self.tok.get()\n if not token.is_identifier():\n raise dns.exception.SyntaxError\n except dns.exception.SyntaxError:\n raise dns.exception.SyntaxError\n except Exception:\n rdclass = self.zone.rdclass\n if rdclass != self.zone.rdclass:\n raise dns.exception.SyntaxError(\"RR class is not zone's class\")\n # Type\n try:\n rdtype = dns.rdatatype.from_text(token.value)\n token = self.tok.get()\n if not token.is_identifier():\n raise dns.exception.SyntaxError\n except Exception:\n raise dns.exception.SyntaxError(\"unknown rdatatype '%s'\" %\n token.value)\n\n # lhs (required)\n try:\n rhs = token.value\n except:\n raise dns.exception.SyntaxError\n\n lmod, lsign, loffset, lwidth, lbase = self._parse_modify(lhs)\n rmod, rsign, roffset, rwidth, rbase = self._parse_modify(rhs)\n for i in range(start, stop + 1, step):\n # +1 because bind is inclusive and python is exclusive\n\n if lsign == u'+':\n lindex = i + int(loffset)\n elif lsign == u'-':\n lindex = i - int(loffset)\n\n if rsign == u'-':\n rindex = i - int(roffset)\n elif rsign == u'+':\n rindex = i + int(roffset)\n\n lzfindex = str(lindex).zfill(int(lwidth))\n rzfindex = str(rindex).zfill(int(rwidth))\n\n name = lhs.replace(u'$%s' % (lmod), lzfindex)\n rdata = rhs.replace(u'$%s' % (rmod), rzfindex)\n\n self.last_name = dns.name.from_text(name, self.current_origin)\n name = self.last_name\n if not name.is_subdomain(self.zone.origin):\n self._eat_line()\n return\n if self.relativize:\n name = name.relativize(self.zone.origin)\n\n n = self.zone.nodes.get(name)\n if n is None:\n n = self.zone.node_factory()\n self.zone.nodes[name] = n\n try:\n rd = dns.rdata.from_text(rdclass, rdtype, rdata,\n self.current_origin, False)\n except dns.exception.SyntaxError:\n # Catch and reraise.\n (ty, va) = sys.exc_info()[:2]\n raise va\n except:\n # All exceptions that occur in the processing of rdata\n # are treated as syntax errors. This is not strictly\n # correct, but it is correct almost all of the time.\n # We convert them to syntax errors so that we can emit\n # helpful filename:line info.\n (ty, va) = sys.exc_info()[:2]\n raise dns.exception.SyntaxError(\"caught exception %s: %s\" %\n (str(ty), str(va)))\n\n rd.choose_relativity(self.zone.origin, self.relativize)\n covers = rd.covers()\n rds = n.find_rdataset(rdclass, rdtype, covers, True)\n rds.add(rd, ttl)\n\n def read(self):\n \"\"\"Read a DNS master file and build a zone object.\n\n @raises dns.zone.NoSOA: No SOA RR was found at the zone origin\n @raises dns.zone.NoNS: No NS RRset was found at the zone origin\n \"\"\"\n\n try:\n while 1:\n token = self.tok.get(True, True)\n if token.is_eof():\n if self.current_file is not None:\n self.current_file.close()\n if len(self.saved_state) > 0:\n (self.tok,\n self.current_origin,\n self.last_name,\n self.current_file,\n self.ttl) = self.saved_state.pop(-1)\n continue\n break\n elif token.is_eol():\n continue\n elif token.is_comment():\n self.tok.get_eol()\n continue\n elif token.value[0] == u'$':\n c = token.value.upper()\n if c == u'$TTL':\n token = self.tok.get()\n if not token.is_identifier():\n raise dns.exception.SyntaxError(\"bad $TTL\")\n self.ttl = dns.ttl.from_text(token.value)\n self.tok.get_eol()\n elif c == u'$ORIGIN':\n self.current_origin = self.tok.get_name()\n self.tok.get_eol()\n if self.zone.origin is None:\n self.zone.origin = self.current_origin\n elif c == u'$INCLUDE' and self.allow_include:\n token = self.tok.get()\n filename = token.value\n token = self.tok.get()\n if token.is_identifier():\n new_origin =\\\n dns.name.from_text(token.value,\n self.current_origin)\n self.tok.get_eol()\n elif not token.is_eol_or_eof():\n raise dns.exception.SyntaxError(\n \"bad origin in $INCLUDE\")\n else:\n new_origin = self.current_origin\n self.saved_state.append((self.tok,\n self.current_origin,\n self.last_name,\n self.current_file,\n self.ttl))\n self.current_file = open(filename, 'r')\n self.tok = dns.tokenizer.Tokenizer(self.current_file,\n filename)\n self.current_origin = new_origin\n elif c == u'$GENERATE':\n self._generate_line()\n else:\n raise dns.exception.SyntaxError(\n \"Unknown master file directive '\" + c + \"'\")\n continue\n self.tok.unget(token)\n self._rr_line()\n except dns.exception.SyntaxError as detail:\n (filename, line_number) = self.tok.where()\n if detail is None:\n detail = \"syntax error\"\n raise dns.exception.SyntaxError(\n \"%s:%d: %s\" % (filename, line_number, detail))\n\n # Now that we're done reading, do some basic checking of the zone.\n if self.check_origin:\n self.zone.check_origin()\n\n\ndef from_text(text, origin=None, rdclass=dns.rdataclass.IN,\n relativize=True, zone_factory=Zone, filename=None,\n allow_include=False, check_origin=True):\n \"\"\"Build a zone object from a master file format string.\n\n @param text: the master file format input\n @type text: string.\n @param origin: The origin of the zone; if not specified, the first\n $ORIGIN statement in the master file will determine the origin of the\n zone.\n @type origin: dns.name.Name object or string\n @param rdclass: The zone's rdata class; the default is class IN.\n @type rdclass: int\n @param relativize: should names be relativized? The default is True\n @type relativize: bool\n @param zone_factory: The zone factory to use\n @type zone_factory: function returning a Zone\n @param filename: The filename to emit when describing where an error\n occurred; the default is ''.\n @type filename: string\n @param allow_include: is $INCLUDE allowed?\n @type allow_include: bool\n @param check_origin: should sanity checks of the origin node be done?\n The default is True.\n @type check_origin: bool\n @raises dns.zone.NoSOA: No SOA RR was found at the zone origin\n @raises dns.zone.NoNS: No NS RRset was found at the zone origin\n @rtype: dns.zone.Zone object\n \"\"\"\n\n # 'text' can also be a file, but we don't publish that fact\n # since it's an implementation detail. The official file\n # interface is from_file().\n\n if filename is None:\n filename = ''\n tok = dns.tokenizer.Tokenizer(text, filename)\n reader = _MasterReader(tok, origin, rdclass, relativize, zone_factory,\n allow_include=allow_include,\n check_origin=check_origin)\n reader.read()\n return reader.zone\n\n\ndef from_file(f, origin=None, rdclass=dns.rdataclass.IN,\n relativize=True, zone_factory=Zone, filename=None,\n allow_include=True, check_origin=True):\n \"\"\"Read a master file and build a zone object.\n\n @param f: file or string. If I{f} is a string, it is treated\n as the name of a file to open.\n @param origin: The origin of the zone; if not specified, the first\n $ORIGIN statement in the master file will determine the origin of the\n zone.\n @type origin: dns.name.Name object or string\n @param rdclass: The zone's rdata class; the default is class IN.\n @type rdclass: int\n @param relativize: should names be relativized? The default is True\n @type relativize: bool\n @param zone_factory: The zone factory to use\n @type zone_factory: function returning a Zone\n @param filename: The filename to emit when describing where an error\n occurred; the default is '', or the value of I{f} if I{f} is a\n string.\n @type filename: string\n @param allow_include: is $INCLUDE allowed?\n @type allow_include: bool\n @param check_origin: should sanity checks of the origin node be done?\n The default is True.\n @type check_origin: bool\n @raises dns.zone.NoSOA: No SOA RR was found at the zone origin\n @raises dns.zone.NoNS: No NS RRset was found at the zone origin\n @rtype: dns.zone.Zone object\n \"\"\"\n\n str_type = string_types\n opts = 'rU'\n\n if isinstance(f, str_type):\n if filename is None:\n filename = f\n f = open(f, opts)\n want_close = True\n else:\n if filename is None:\n filename = ''\n want_close = False\n\n try:\n z = from_text(f, origin, rdclass, relativize, zone_factory,\n filename, allow_include, check_origin)\n finally:\n if want_close:\n f.close()\n return z\n\n\ndef from_xfr(xfr, zone_factory=Zone, relativize=True, check_origin=True):\n \"\"\"Convert the output of a zone transfer generator into a zone object.\n\n @param xfr: The xfr generator\n @type xfr: generator of dns.message.Message objects\n @param relativize: should names be relativized? The default is True.\n It is essential that the relativize setting matches the one specified\n to dns.query.xfr().\n @type relativize: bool\n @param check_origin: should sanity checks of the origin node be done?\n The default is True.\n @type check_origin: bool\n @raises dns.zone.NoSOA: No SOA RR was found at the zone origin\n @raises dns.zone.NoNS: No NS RRset was found at the zone origin\n @rtype: dns.zone.Zone object\n \"\"\"\n\n z = None\n for r in xfr:\n if z is None:\n if relativize:\n origin = r.origin\n else:\n origin = r.answer[0].name\n rdclass = r.answer[0].rdclass\n z = zone_factory(origin, rdclass, relativize=relativize)\n for rrset in r.answer:\n znode = z.nodes.get(rrset.name)\n if not znode:\n znode = z.node_factory()\n z.nodes[rrset.name] = znode\n zrds = znode.find_rdataset(rrset.rdclass, rrset.rdtype,\n rrset.covers, True)\n zrds.update_ttl(rrset.ttl)\n for rd in rrset:\n rd.choose_relativity(z.origin, relativize)\n zrds.add(rd)\n if check_origin:\n z.check_origin()\n return z\n"},"license":{"kind":"string","value":"mit"}}},{"rowIdx":203468,"cells":{"repo_name":{"kind":"string","value":"xkcd1253/SocialNetworkforTwo"},"path":{"kind":"string","value":"flask/lib/python2.7/site-packages/migrate/versioning/shell.py"},"copies":{"kind":"string","value":"69"},"size":{"kind":"string","value":"6460"},"content":{"kind":"string","value":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\"\"\"The migrate command-line tool.\"\"\"\n\nimport sys\nimport inspect\nimport logging\nfrom optparse import OptionParser, BadOptionError\n\nfrom migrate import exceptions\nfrom migrate.versioning import api\nfrom migrate.versioning.config import *\nfrom migrate.versioning.util import asbool\n\n\nalias = dict(\n s=api.script,\n vc=api.version_control,\n dbv=api.db_version,\n v=api.version,\n)\n\ndef alias_setup():\n global alias\n for key, val in alias.iteritems():\n setattr(api, key, val)\nalias_setup()\n\n\nclass PassiveOptionParser(OptionParser):\n\n def _process_args(self, largs, rargs, values):\n \"\"\"little hack to support all --some_option=value parameters\"\"\"\n\n while rargs:\n arg = rargs[0]\n if arg == \"--\":\n del rargs[0]\n return\n elif arg[0:2] == \"--\":\n # if parser does not know about the option\n # pass it along (make it anonymous)\n try:\n opt = arg.split('=', 1)[0]\n self._match_long_opt(opt)\n except BadOptionError:\n largs.append(arg)\n del rargs[0]\n else:\n self._process_long_opt(rargs, values)\n elif arg[:1] == \"-\" and len(arg) > 1:\n self._process_short_opts(rargs, values)\n elif self.allow_interspersed_args:\n largs.append(arg)\n del rargs[0]\n\ndef main(argv=None, **kwargs):\n \"\"\"Shell interface to :mod:`migrate.versioning.api`.\n\n kwargs are default options that can be overriden with passing\n --some_option as command line option\n\n :param disable_logging: Let migrate configure logging\n :type disable_logging: bool\n \"\"\"\n if argv is not None:\n argv = argv\n else:\n argv = list(sys.argv[1:])\n commands = list(api.__all__)\n commands.sort()\n\n usage = \"\"\"%%prog COMMAND ...\n\n Available commands:\n %s\n\n Enter \"%%prog help COMMAND\" for information on a particular command.\n \"\"\" % '\\n\\t'.join([\"%s - %s\" % (command.ljust(28), api.command_desc.get(command)) for command in commands])\n\n parser = PassiveOptionParser(usage=usage)\n parser.add_option(\"-d\", \"--debug\",\n action=\"store_true\",\n dest=\"debug\",\n default=False,\n help=\"Shortcut to turn on DEBUG mode for logging\")\n parser.add_option(\"-q\", \"--disable_logging\",\n action=\"store_true\",\n dest=\"disable_logging\",\n default=False,\n help=\"Use this option to disable logging configuration\")\n help_commands = ['help', '-h', '--help']\n HELP = False\n\n try:\n command = argv.pop(0)\n if command in help_commands:\n HELP = True\n command = argv.pop(0)\n except IndexError:\n parser.print_help()\n return\n\n command_func = getattr(api, command, None)\n if command_func is None or command.startswith('_'):\n parser.error(\"Invalid command %s\" % command)\n\n parser.set_usage(inspect.getdoc(command_func))\n f_args, f_varargs, f_kwargs, f_defaults = inspect.getargspec(command_func)\n for arg in f_args:\n parser.add_option(\n \"--%s\" % arg,\n dest=arg,\n action='store',\n type=\"string\")\n\n # display help of the current command\n if HELP:\n parser.print_help()\n return\n\n options, args = parser.parse_args(argv)\n\n # override kwargs with anonymous parameters\n override_kwargs = dict()\n for arg in list(args):\n if arg.startswith('--'):\n args.remove(arg)\n if '=' in arg:\n opt, value = arg[2:].split('=', 1)\n else:\n opt = arg[2:]\n value = True\n override_kwargs[opt] = value\n\n # override kwargs with options if user is overwriting\n for key, value in options.__dict__.iteritems():\n if value is not None:\n override_kwargs[key] = value\n\n # arguments that function accepts without passed kwargs\n f_required = list(f_args)\n candidates = dict(kwargs)\n candidates.update(override_kwargs)\n for key, value in candidates.iteritems():\n if key in f_args:\n f_required.remove(key)\n\n # map function arguments to parsed arguments\n for arg in args:\n try:\n kw = f_required.pop(0)\n except IndexError:\n parser.error(\"Too many arguments for command %s: %s\" % (command,\n arg))\n kwargs[kw] = arg\n\n # apply overrides\n kwargs.update(override_kwargs)\n\n # configure options\n for key, value in options.__dict__.iteritems():\n kwargs.setdefault(key, value)\n\n # configure logging\n if not asbool(kwargs.pop('disable_logging', False)):\n # filter to log =< INFO into stdout and rest to stderr\n class SingleLevelFilter(logging.Filter):\n def __init__(self, min=None, max=None):\n self.min = min or 0\n self.max = max or 100\n\n def filter(self, record):\n return self.min <= record.levelno <= self.max\n\n logger = logging.getLogger()\n h1 = logging.StreamHandler(sys.stdout)\n f1 = SingleLevelFilter(max=logging.INFO)\n h1.addFilter(f1)\n h2 = logging.StreamHandler(sys.stderr)\n f2 = SingleLevelFilter(min=logging.WARN)\n h2.addFilter(f2)\n logger.addHandler(h1)\n logger.addHandler(h2)\n\n if options.debug:\n logger.setLevel(logging.DEBUG)\n else:\n logger.setLevel(logging.INFO)\n\n log = logging.getLogger(__name__)\n\n # check if all args are given\n try:\n num_defaults = len(f_defaults)\n except TypeError:\n num_defaults = 0\n f_args_default = f_args[len(f_args) - num_defaults:]\n required = list(set(f_required) - set(f_args_default))\n if required:\n parser.error(\"Not enough arguments for command %s: %s not specified\" \\\n % (command, ', '.join(required)))\n\n # handle command\n try:\n ret = command_func(**kwargs)\n if ret is not None:\n log.info(ret)\n except (exceptions.UsageError, exceptions.KnownError), e:\n parser.error(e.args[0])\n\nif __name__ == \"__main__\":\n main()\n"},"license":{"kind":"string","value":"gpl-2.0"}}},{"rowIdx":203469,"cells":{"repo_name":{"kind":"string","value":"hyperized/ansible"},"path":{"kind":"string","value":"lib/ansible/module_utils/facts/network/darwin.py"},"copies":{"kind":"string","value":"128"},"size":{"kind":"string","value":"2011"},"content":{"kind":"string","value":"# This file is part of Ansible\n#\n# Ansible is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# Ansible is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with Ansible. If not, see .\n\nfrom __future__ import (absolute_import, division, print_function)\n__metaclass__ = type\n\nfrom ansible.module_utils.facts.network.base import NetworkCollector\nfrom ansible.module_utils.facts.network.generic_bsd import GenericBsdIfconfigNetwork\n\n\nclass DarwinNetwork(GenericBsdIfconfigNetwork):\n \"\"\"\n This is the Mac macOS Darwin Network Class.\n It uses the GenericBsdIfconfigNetwork unchanged\n \"\"\"\n platform = 'Darwin'\n\n # media line is different to the default FreeBSD one\n def parse_media_line(self, words, current_if, ips):\n # not sure if this is useful - we also drop information\n current_if['media'] = 'Unknown' # Mac does not give us this\n current_if['media_select'] = words[1]\n if len(words) > 2:\n # MacOSX sets the media to '' for bridge interface\n # and parsing splits this into two words; this if/else helps\n if words[1] == '':\n current_if['media_select'] = 'Unknown'\n current_if['media_type'] = 'unknown type'\n else:\n current_if['media_type'] = words[2][1:-1]\n if len(words) > 3:\n current_if['media_options'] = self.get_options(words[3])\n\n\nclass DarwinNetworkCollector(NetworkCollector):\n _fact_class = DarwinNetwork\n _platform = 'Darwin'\n"},"license":{"kind":"string","value":"gpl-3.0"}}},{"rowIdx":203470,"cells":{"repo_name":{"kind":"string","value":"CoolCloud/taiga-back"},"path":{"kind":"string","value":"taiga/feedback/apps.py"},"copies":{"kind":"string","value":"21"},"size":{"kind":"string","value":"1263"},"content":{"kind":"string","value":"# Copyright (C) 2014 Andrey Antukh \n# Copyright (C) 2014 Jesús Espino \n# Copyright (C) 2014 David Barragán \n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU Affero General Public License as\n# published by the Free Software Foundation, either version 3 of the\n# License, or (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU Affero General Public License for more details.\n#\n# You should have received a copy of the GNU Affero General Public License\n# along with this program. If not, see .\n\nfrom django.apps import AppConfig\nfrom django.apps import apps\nfrom django.conf import settings\nfrom django.conf.urls import include, url\n\nfrom .routers import router\n\n\nclass FeedbackAppConfig(AppConfig):\n name = \"taiga.feedback\"\n verbose_name = \"Feedback\"\n\n def ready(self):\n if settings.FEEDBACK_ENABLED:\n from taiga.urls import urlpatterns\n urlpatterns.append(url(r'^api/v1/', include(router.urls)))\n"},"license":{"kind":"string","value":"agpl-3.0"}}},{"rowIdx":203471,"cells":{"repo_name":{"kind":"string","value":"tmthydvnprt/compfipy"},"path":{"kind":"string","value":"compfipy/models.py"},"copies":{"kind":"string","value":"1"},"size":{"kind":"string","value":"3612"},"content":{"kind":"string","value":"\"\"\"\nmodels.py\n\nVarious Stochastic models of the \"market\" that provide \"fake\" asset prices to test on.\n\n\"\"\"\n\nimport math\nimport datetime\nimport pandas as pd\nimport numpy as np\n\nfrom compfipy import market\n\n# Common conversion functions used across all models\n# ------------------------------------------------------------------------------------------------------------------------------\ndef convert_to_returns(log_returns=None):\n \"\"\"\n Convert log returns to normal returns.\n \"\"\"\n return np.exp(log_returns)\n\ndef convert_to_price(x0=1, log_returns=None):\n \"\"\"\n Convert log returns to normal returns and calculate value from initial price.\n \"\"\"\n returns = convert_to_returns(log_returns)\n prices = pd.concat([pd.Series(x0), returns[:-1]], ignore_index=True)\n return prices.cumprod()\n\n# Stochastic Models\n# ------------------------------------------------------------------------------------------------------------------------------\ndef brownian_motion(time=500, delta_t=(1.0 / 252.0), sigma=2):\n \"\"\"\n Return asset price whose returnes evolve according to brownian motion.\n \"\"\"\n sqrt_delta_t_sigma = math.sqrt(delta_t) * sigma\n log_returns = pd.Series(np.random.normal(loc=0, scale=sqrt_delta_t_sigma, size=time))\n return log_returns\n\ndef geometric_brownian_motion(time=500, delta_t=(1.0 / 252.0), sigma=2, mu=0.5):\n \"\"\"\n Return asset price whose returnes evolve according to geometric brownian motion.\n \"\"\"\n wiener_process = brownian_motion(time, delta_t, sigma)\n sigma_pow_mu_delta_t = (mu - 0.5 * math.pow(sigma, 2)) * delta_t\n log_returns = wiener_process + sigma_pow_mu_delta_t\n return log_returns\n\ndef jump_diffusion(time=500, delta_t=(1.0 / 252.0), mu=0.0, sigma=0.3, jd_lambda=0.1):\n \"\"\"\n Return jump diffusion process.\n \"\"\"\n s_n = 0\n t = 0\n small_lambda = -(1.0 / jd_lambda)\n jump_sizes = pd.Series(np.zeros((time,)))\n\n while s_n < time:\n s_n += small_lambda * math.log(np.random.uniform(0, 1))\n for j in xrange(0, time):\n if t * delta_t <= s_n * delta_t <= (j+1) * delta_t:\n jump_sizes[j] += np.random.normal(loc=mu, scale=sigma)\n break\n t += 1\n\n return jump_sizes\n\ndef merton_jump_diffusion(time=500, delta_t=(1.0 / 252.0), sigma=2, gbm_mu=0.5, jd_mu=0.0, jd_sigma=0.3, jd_lambda=0.1):\n \"\"\"\n Return asset price whose returnes evolve according to geometric brownian motion with jump diffusion.\n \"\"\"\n jd = jump_diffusion(time, delta_t, jd_mu, jd_sigma, jd_lambda)\n gbm = geometric_brownian_motion(time, delta_t, sigma, gbm_mu)\n return gbm + jd\n\n# Create standard EOD data from price data\n# ------------------------------------------------------------------------------------------------------------------------------\ndef generate_ochlv(prices=None, ochl_mu=0.0, ochl_sigma=0.1, v_mu=100000, v_sigma=math.sqrt(10000)):\n \"\"\"\n Turn asset price into standard EOD data.\n \"\"\"\n date_rng = market.date_range(datetime.date.today(), periods=len(prices))\n ochlv = pd.DataFrame({'Close':prices})\n ochlv['Open'] = prices + prices * np.random.normal(loc=ochl_mu, scale=ochl_sigma, size=prices.shape)\n ochlv['High'] = prices + prices * np.random.normal(loc=ochl_mu, scale=ochl_sigma, size=prices.shape)\n ochlv['Low'] = prices + prices * np.random.normal(loc=ochl_mu, scale=ochl_sigma, size=prices.shape)\n ochlv['Volume'] = v_mu * np.abs(prices.pct_change(2).shift(-2).ffill()) \\\n + np.random.normal(loc=v_mu, scale=v_sigma, size=prices.shape)\n ochlv = ochlv.set_index(date_rng)\n return ochlv\n"},"license":{"kind":"string","value":"mit"}}},{"rowIdx":203472,"cells":{"repo_name":{"kind":"string","value":"Kiiv/CouchPotatoServer"},"path":{"kind":"string","value":"libs/requests/packages/charade/langcyrillicmodel.py"},"copies":{"kind":"string","value":"2762"},"size":{"kind":"string","value":"17725"},"content":{"kind":"string","value":"######################## BEGIN LICENSE BLOCK ########################\n# The Original Code is Mozilla Communicator client code.\n#\n# The Initial Developer of the Original Code is\n# Netscape Communications Corporation.\n# Portions created by the Initial Developer are Copyright (C) 1998\n# the Initial Developer. All Rights Reserved.\n#\n# Contributor(s):\n# Mark Pilgrim - port to Python\n#\n# This library is free software; you can redistribute it and/or\n# modify it under the terms of the GNU Lesser General Public\n# License as published by the Free Software Foundation; either\n# version 2.1 of the License, or (at your option) any later version.\n#\n# This library is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU\n# Lesser General Public License for more details.\n#\n# You should have received a copy of the GNU Lesser General Public\n# License along with this library; if not, write to the Free Software\n# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA\n# 02110-1301 USA\n######################### END LICENSE BLOCK #########################\n\n# KOI8-R language model\n# Character Mapping Table:\nKOI8R_CharToOrderMap = (\n255,255,255,255,255,255,255,255,255,255,254,255,255,254,255,255, # 00\n255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 10\n253,253,253,253,253,253,253,253,253,253,253,253,253,253,253,253, # 20\n252,252,252,252,252,252,252,252,252,252,253,253,253,253,253,253, # 30\n253,142,143,144,145,146,147,148,149,150,151,152, 74,153, 75,154, # 40\n155,156,157,158,159,160,161,162,163,164,165,253,253,253,253,253, # 50\n253, 71,172, 66,173, 65,174, 76,175, 64,176,177, 77, 72,178, 69, # 60\n 67,179, 78, 73,180,181, 79,182,183,184,185,253,253,253,253,253, # 70\n191,192,193,194,195,196,197,198,199,200,201,202,203,204,205,206, # 80\n207,208,209,210,211,212,213,214,215,216,217,218,219,220,221,222, # 90\n223,224,225, 68,226,227,228,229,230,231,232,233,234,235,236,237, # a0\n238,239,240,241,242,243,244,245,246,247,248,249,250,251,252,253, # b0\n 27, 3, 21, 28, 13, 2, 39, 19, 26, 4, 23, 11, 8, 12, 5, 1, # c0\n 15, 16, 9, 7, 6, 14, 24, 10, 17, 18, 20, 25, 30, 29, 22, 54, # d0\n 59, 37, 44, 58, 41, 48, 53, 46, 55, 42, 60, 36, 49, 38, 31, 34, # e0\n 35, 43, 45, 32, 40, 52, 56, 33, 61, 62, 51, 57, 47, 63, 50, 70, # f0\n)\n\nwin1251_CharToOrderMap = (\n255,255,255,255,255,255,255,255,255,255,254,255,255,254,255,255, # 00\n255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 10\n253,253,253,253,253,253,253,253,253,253,253,253,253,253,253,253, # 20\n252,252,252,252,252,252,252,252,252,252,253,253,253,253,253,253, # 30\n253,142,143,144,145,146,147,148,149,150,151,152, 74,153, 75,154, # 40\n155,156,157,158,159,160,161,162,163,164,165,253,253,253,253,253, # 50\n253, 71,172, 66,173, 65,174, 76,175, 64,176,177, 77, 72,178, 69, # 60\n 67,179, 78, 73,180,181, 79,182,183,184,185,253,253,253,253,253, # 70\n191,192,193,194,195,196,197,198,199,200,201,202,203,204,205,206,\n207,208,209,210,211,212,213,214,215,216,217,218,219,220,221,222,\n223,224,225,226,227,228,229,230,231,232,233,234,235,236,237,238,\n239,240,241,242,243,244,245,246, 68,247,248,249,250,251,252,253,\n 37, 44, 33, 46, 41, 48, 56, 51, 42, 60, 36, 49, 38, 31, 34, 35,\n 45, 32, 40, 52, 53, 55, 58, 50, 57, 63, 70, 62, 61, 47, 59, 43,\n 3, 21, 10, 19, 13, 2, 24, 20, 4, 23, 11, 8, 12, 5, 1, 15,\n 9, 7, 6, 14, 39, 26, 28, 22, 25, 29, 54, 18, 17, 30, 27, 16,\n)\n\nlatin5_CharToOrderMap = (\n255,255,255,255,255,255,255,255,255,255,254,255,255,254,255,255, # 00\n255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 10\n253,253,253,253,253,253,253,253,253,253,253,253,253,253,253,253, # 20\n252,252,252,252,252,252,252,252,252,252,253,253,253,253,253,253, # 30\n253,142,143,144,145,146,147,148,149,150,151,152, 74,153, 75,154, # 40\n155,156,157,158,159,160,161,162,163,164,165,253,253,253,253,253, # 50\n253, 71,172, 66,173, 65,174, 76,175, 64,176,177, 77, 72,178, 69, # 60\n 67,179, 78, 73,180,181, 79,182,183,184,185,253,253,253,253,253, # 70\n191,192,193,194,195,196,197,198,199,200,201,202,203,204,205,206,\n207,208,209,210,211,212,213,214,215,216,217,218,219,220,221,222,\n223,224,225,226,227,228,229,230,231,232,233,234,235,236,237,238,\n 37, 44, 33, 46, 41, 48, 56, 51, 42, 60, 36, 49, 38, 31, 34, 35,\n 45, 32, 40, 52, 53, 55, 58, 50, 57, 63, 70, 62, 61, 47, 59, 43,\n 3, 21, 10, 19, 13, 2, 24, 20, 4, 23, 11, 8, 12, 5, 1, 15,\n 9, 7, 6, 14, 39, 26, 28, 22, 25, 29, 54, 18, 17, 30, 27, 16,\n239, 68,240,241,242,243,244,245,246,247,248,249,250,251,252,255,\n)\n\nmacCyrillic_CharToOrderMap = (\n255,255,255,255,255,255,255,255,255,255,254,255,255,254,255,255, # 00\n255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 10\n253,253,253,253,253,253,253,253,253,253,253,253,253,253,253,253, # 20\n252,252,252,252,252,252,252,252,252,252,253,253,253,253,253,253, # 30\n253,142,143,144,145,146,147,148,149,150,151,152, 74,153, 75,154, # 40\n155,156,157,158,159,160,161,162,163,164,165,253,253,253,253,253, # 50\n253, 71,172, 66,173, 65,174, 76,175, 64,176,177, 77, 72,178, 69, # 60\n 67,179, 78, 73,180,181, 79,182,183,184,185,253,253,253,253,253, # 70\n 37, 44, 33, 46, 41, 48, 56, 51, 42, 60, 36, 49, 38, 31, 34, 35,\n 45, 32, 40, 52, 53, 55, 58, 50, 57, 63, 70, 62, 61, 47, 59, 43,\n191,192,193,194,195,196,197,198,199,200,201,202,203,204,205,206,\n207,208,209,210,211,212,213,214,215,216,217,218,219,220,221,222,\n223,224,225,226,227,228,229,230,231,232,233,234,235,236,237,238,\n239,240,241,242,243,244,245,246,247,248,249,250,251,252, 68, 16,\n 3, 21, 10, 19, 13, 2, 24, 20, 4, 23, 11, 8, 12, 5, 1, 15,\n 9, 7, 6, 14, 39, 26, 28, 22, 25, 29, 54, 18, 17, 30, 27,255,\n)\n\nIBM855_CharToOrderMap = (\n255,255,255,255,255,255,255,255,255,255,254,255,255,254,255,255, # 00\n255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 10\n253,253,253,253,253,253,253,253,253,253,253,253,253,253,253,253, # 20\n252,252,252,252,252,252,252,252,252,252,253,253,253,253,253,253, # 30\n253,142,143,144,145,146,147,148,149,150,151,152, 74,153, 75,154, # 40\n155,156,157,158,159,160,161,162,163,164,165,253,253,253,253,253, # 50\n253, 71,172, 66,173, 65,174, 76,175, 64,176,177, 77, 72,178, 69, # 60\n 67,179, 78, 73,180,181, 79,182,183,184,185,253,253,253,253,253, # 70\n191,192,193,194, 68,195,196,197,198,199,200,201,202,203,204,205,\n206,207,208,209,210,211,212,213,214,215,216,217, 27, 59, 54, 70,\n 3, 37, 21, 44, 28, 58, 13, 41, 2, 48, 39, 53, 19, 46,218,219,\n220,221,222,223,224, 26, 55, 4, 42,225,226,227,228, 23, 60,229,\n230,231,232,233,234,235, 11, 36,236,237,238,239,240,241,242,243,\n 8, 49, 12, 38, 5, 31, 1, 34, 15,244,245,246,247, 35, 16,248,\n 43, 9, 45, 7, 32, 6, 40, 14, 52, 24, 56, 10, 33, 17, 61,249,\n250, 18, 62, 20, 51, 25, 57, 30, 47, 29, 63, 22, 50,251,252,255,\n)\n\nIBM866_CharToOrderMap = (\n255,255,255,255,255,255,255,255,255,255,254,255,255,254,255,255, # 00\n255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 10\n253,253,253,253,253,253,253,253,253,253,253,253,253,253,253,253, # 20\n252,252,252,252,252,252,252,252,252,252,253,253,253,253,253,253, # 30\n253,142,143,144,145,146,147,148,149,150,151,152, 74,153, 75,154, # 40\n155,156,157,158,159,160,161,162,163,164,165,253,253,253,253,253, # 50\n253, 71,172, 66,173, 65,174, 76,175, 64,176,177, 77, 72,178, 69, # 60\n 67,179, 78, 73,180,181, 79,182,183,184,185,253,253,253,253,253, # 70\n 37, 44, 33, 46, 41, 48, 56, 51, 42, 60, 36, 49, 38, 31, 34, 35,\n 45, 32, 40, 52, 53, 55, 58, 50, 57, 63, 70, 62, 61, 47, 59, 43,\n 3, 21, 10, 19, 13, 2, 24, 20, 4, 23, 11, 8, 12, 5, 1, 15,\n191,192,193,194,195,196,197,198,199,200,201,202,203,204,205,206,\n207,208,209,210,211,212,213,214,215,216,217,218,219,220,221,222,\n223,224,225,226,227,228,229,230,231,232,233,234,235,236,237,238,\n 9, 7, 6, 14, 39, 26, 28, 22, 25, 29, 54, 18, 17, 30, 27, 16,\n239, 68,240,241,242,243,244,245,246,247,248,249,250,251,252,255,\n)\n\n# Model Table:\n# total sequences: 100%\n# first 512 sequences: 97.6601%\n# first 1024 sequences: 2.3389%\n# rest sequences: 0.1237%\n# negative sequences: 0.0009%\nRussianLangModel = (\n0,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,1,1,3,3,3,3,1,3,3,3,2,3,2,3,3,\n3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,0,3,2,2,2,2,2,0,0,2,\n3,3,3,2,3,3,3,3,3,3,3,3,3,3,2,3,3,0,0,3,3,3,3,3,3,3,3,3,2,3,2,0,\n0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,\n3,3,3,2,2,3,3,3,3,3,3,3,3,3,2,3,3,0,0,3,3,3,3,3,3,3,3,2,3,3,1,0,\n0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,\n3,2,3,2,3,3,3,3,3,3,3,3,3,3,3,3,3,0,0,3,3,3,3,3,3,3,3,3,3,3,2,1,\n0,0,0,0,0,0,0,2,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,\n3,3,3,3,3,3,3,3,3,3,3,3,3,3,2,3,3,0,0,3,3,3,3,3,3,3,3,3,3,3,2,1,\n0,0,0,0,0,1,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,\n3,3,3,3,3,3,3,3,2,2,2,3,1,3,3,1,3,3,3,3,2,2,3,0,2,2,2,3,3,2,1,0,\n0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,\n3,3,3,3,3,3,2,3,3,3,3,3,2,2,3,2,3,3,3,2,1,2,2,0,1,2,2,2,2,2,2,0,\n0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,\n3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,2,2,2,3,0,2,2,3,3,2,1,2,0,\n0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,1,0,0,2,0,0,0,0,0,0,0,0,0,\n3,3,3,3,3,3,2,3,3,1,2,3,2,2,3,2,3,3,3,3,2,2,3,0,3,2,2,3,1,1,1,0,\n0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,\n3,3,3,3,3,3,3,3,2,2,3,3,3,3,3,2,3,3,3,3,2,2,2,0,3,3,3,2,2,2,2,0,\n0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,\n3,3,3,3,3,3,3,3,3,3,2,3,2,3,3,3,3,3,3,2,3,2,2,0,1,3,2,1,2,2,1,0,\n0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,\n3,3,3,3,3,3,3,3,3,3,3,2,1,1,3,0,1,1,1,1,2,1,1,0,2,2,2,1,2,0,1,0,\n0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,\n3,3,3,3,3,3,2,3,3,2,2,2,2,1,3,2,3,2,3,2,1,2,2,0,1,1,2,1,2,1,2,0,\n0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,\n3,3,3,3,3,3,3,3,3,3,3,3,2,2,3,2,3,3,3,2,2,2,2,0,2,2,2,2,3,1,1,0,\n0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,\n3,2,3,2,2,3,3,3,3,3,3,3,3,3,1,3,2,0,0,3,3,3,3,2,3,3,3,3,2,3,2,0,\n0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,\n2,3,3,3,3,3,2,2,3,3,0,2,1,0,3,2,3,2,3,0,0,1,2,0,0,1,0,1,2,1,1,0,\n0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,\n3,0,3,0,2,3,3,3,3,2,3,3,3,3,1,2,2,0,0,2,3,2,2,2,3,2,3,2,2,3,0,0,\n0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,\n3,2,3,0,2,3,2,3,0,1,2,3,3,2,0,2,3,0,0,2,3,2,2,0,1,3,1,3,2,2,1,0,\n0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,\n3,1,3,0,2,3,3,3,3,3,3,3,3,2,1,3,2,0,0,2,2,3,3,3,2,3,3,0,2,2,0,0,\n0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,\n3,3,3,3,3,3,2,2,3,3,2,2,2,3,3,0,0,1,1,1,1,1,2,0,0,1,1,1,1,0,1,0,\n0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,\n3,3,3,3,3,3,2,2,3,3,3,3,3,3,3,0,3,2,3,3,2,3,2,0,2,1,0,1,1,0,1,0,\n0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,\n3,3,3,3,3,3,2,3,3,3,2,2,2,2,3,1,3,2,3,1,1,2,1,0,2,2,2,2,1,3,1,0,\n0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,\n2,2,3,3,3,3,3,1,2,2,1,3,1,0,3,0,0,3,0,0,0,1,1,0,1,2,1,0,0,0,0,0,\n0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,\n3,2,2,1,1,3,3,3,2,2,1,2,2,3,1,1,2,0,0,2,2,1,3,0,0,2,1,1,2,1,1,0,\n0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,\n3,2,3,3,3,3,1,2,2,2,1,2,1,3,3,1,1,2,1,2,1,2,2,0,2,0,0,1,1,0,1,0,\n0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,\n2,3,3,3,3,3,2,1,3,2,2,3,2,0,3,2,0,3,0,1,0,1,1,0,0,1,1,1,1,0,1,0,\n0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,\n3,3,2,3,3,3,2,2,2,3,3,1,2,1,2,1,0,1,0,1,1,0,1,0,0,2,1,1,1,0,1,0,\n0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,\n3,1,1,2,1,2,3,3,2,2,1,2,2,3,0,2,1,0,0,2,2,3,2,1,2,2,2,2,2,3,1,0,\n0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,\n3,3,3,3,3,1,1,0,1,1,2,2,1,1,3,0,0,1,3,1,1,1,0,0,0,1,0,1,1,0,0,0,\n0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,\n2,1,3,3,3,2,0,0,0,2,1,0,1,0,2,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,\n0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,\n2,0,1,0,0,2,3,2,2,2,1,2,2,2,1,2,1,0,0,1,1,1,0,2,0,1,1,1,0,0,1,1,\n1,0,0,0,0,0,1,2,0,0,0,0,0,1,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,\n2,3,3,3,3,0,0,0,0,1,0,0,0,0,3,0,1,2,1,0,0,0,0,0,0,0,1,1,0,0,1,1,\n1,0,1,0,1,2,0,0,1,1,2,1,0,1,1,1,1,0,1,1,1,1,0,1,0,0,1,0,0,1,1,0,\n2,2,3,2,2,2,3,1,2,2,2,2,2,2,2,2,1,1,1,1,1,1,1,0,1,0,1,1,1,0,2,1,\n1,1,1,1,1,1,1,1,2,1,1,1,1,1,1,1,1,1,1,0,1,0,1,1,0,1,1,1,0,1,1,0,\n3,3,3,2,2,2,2,3,2,2,1,1,2,2,2,2,1,1,3,1,2,1,2,0,0,1,1,0,1,0,2,1,\n1,1,1,1,1,2,1,0,1,1,1,1,0,1,0,0,1,1,0,0,1,0,1,0,0,1,0,0,0,1,1,0,\n2,0,0,1,0,3,2,2,2,2,1,2,1,2,1,2,0,0,0,2,1,2,2,1,1,2,2,0,1,1,0,2,\n1,1,1,1,1,0,1,1,1,2,1,1,1,2,1,0,1,2,1,1,1,1,0,1,1,1,0,0,1,0,0,1,\n1,3,2,2,2,1,1,1,2,3,0,0,0,0,2,0,2,2,1,0,0,0,0,0,0,1,0,0,0,0,1,1,\n1,0,1,1,0,1,0,1,1,0,1,1,0,2,0,0,1,1,0,0,1,0,0,0,0,0,0,0,0,1,1,0,\n2,3,2,3,2,1,2,2,2,2,1,0,0,0,2,0,0,1,1,0,0,0,0,0,0,0,1,1,0,0,2,1,\n1,1,2,1,0,2,0,0,1,0,1,0,0,1,0,0,1,1,0,1,1,0,0,0,0,0,1,0,0,0,0,0,\n3,0,0,1,0,2,2,2,3,2,2,2,2,2,2,2,0,0,0,2,1,2,1,1,1,2,2,0,0,0,1,2,\n1,1,1,1,1,0,1,2,1,1,1,1,1,1,1,0,1,1,1,1,1,1,0,1,1,1,1,1,1,0,0,1,\n2,3,2,3,3,2,0,1,1,1,0,0,1,0,2,0,1,1,3,1,0,0,0,0,0,0,0,1,0,0,2,1,\n1,1,1,1,1,1,1,0,1,0,1,1,1,1,0,1,1,1,0,0,1,1,0,1,0,0,0,0,0,0,1,0,\n2,3,3,3,3,1,2,2,2,2,0,1,1,0,2,1,1,1,2,1,0,1,1,0,0,1,0,1,0,0,2,0,\n0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,\n2,3,3,3,2,0,0,1,1,2,2,1,0,0,2,0,1,1,3,0,0,1,0,0,0,0,0,1,0,1,2,1,\n1,1,2,0,1,1,1,0,1,0,1,1,0,1,0,1,1,1,1,0,1,0,0,0,0,0,0,1,0,1,1,0,\n1,3,2,3,2,1,0,0,2,2,2,0,1,0,2,0,1,1,1,0,1,0,0,0,3,0,1,1,0,0,2,1,\n1,1,1,0,1,1,0,0,0,0,1,1,0,1,0,0,2,1,1,0,1,0,0,0,1,0,1,0,0,1,1,0,\n3,1,2,1,1,2,2,2,2,2,2,1,2,2,1,1,0,0,0,2,2,2,0,0,0,1,2,1,0,1,0,1,\n2,1,1,1,1,1,1,1,1,1,1,1,1,1,1,0,2,1,1,1,0,1,0,1,1,0,1,1,1,0,0,1,\n3,0,0,0,0,2,0,1,1,1,1,1,1,1,0,1,0,0,0,1,1,1,0,1,0,1,1,0,0,1,0,1,\n1,1,0,0,1,0,0,0,1,0,1,1,0,0,1,0,1,0,1,0,0,0,0,1,0,0,0,1,0,0,0,1,\n1,3,3,2,2,0,0,0,2,2,0,0,0,1,2,0,1,1,2,0,0,0,0,0,0,0,0,1,0,0,2,1,\n0,1,1,0,0,1,1,0,0,0,1,1,0,1,1,0,1,1,0,0,1,0,0,0,0,0,0,0,0,0,1,0,\n2,3,2,3,2,0,0,0,0,1,1,0,0,0,2,0,2,0,2,0,0,0,0,0,1,0,0,1,0,0,1,1,\n1,1,2,0,1,2,1,0,1,1,2,1,1,1,1,1,2,1,1,0,1,0,0,1,1,1,1,1,0,1,1,0,\n1,3,2,2,2,1,0,0,2,2,1,0,1,2,2,0,0,1,0,0,0,0,0,0,0,0,0,1,0,0,1,1,\n0,0,1,1,0,1,1,0,0,1,1,0,1,1,0,0,1,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,\n1,0,0,1,0,2,3,1,2,2,2,2,2,2,1,1,0,0,0,1,0,1,0,2,1,1,1,0,0,0,0,1,\n1,1,0,1,1,0,1,1,1,1,0,0,0,1,0,0,0,1,0,0,0,0,0,0,0,0,0,0,1,0,0,0,\n2,0,2,0,0,1,0,3,2,1,2,1,2,2,0,1,0,0,0,2,1,0,0,2,1,1,1,1,0,2,0,2,\n2,1,1,1,1,1,1,1,1,1,1,1,1,2,1,0,1,1,1,1,0,0,0,1,1,1,1,0,1,0,0,1,\n1,2,2,2,2,1,0,0,1,0,0,0,0,0,2,0,1,1,1,1,0,0,0,0,1,0,1,2,0,0,2,0,\n1,0,1,1,1,2,1,0,1,0,1,1,0,0,1,0,1,1,1,0,1,0,0,0,1,0,0,1,0,1,1,0,\n2,1,2,2,2,0,3,0,1,1,0,0,0,0,2,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,1,\n0,0,0,1,1,1,0,0,1,0,1,0,0,0,0,0,1,0,0,0,1,0,0,0,0,0,0,0,0,1,0,0,\n1,2,2,3,2,2,0,0,1,1,2,0,1,2,1,0,1,0,1,0,0,1,0,0,0,0,0,0,0,0,0,1,\n0,1,1,0,0,1,1,0,0,1,1,0,0,1,1,0,1,1,0,0,1,0,0,0,0,0,0,0,0,1,1,0,\n2,2,1,1,2,1,2,2,2,2,2,1,2,2,0,1,0,0,0,1,2,2,2,1,2,1,1,1,1,1,2,1,\n1,1,1,1,1,1,1,1,1,1,0,0,1,1,1,0,1,1,1,0,0,0,0,1,1,1,0,1,1,0,0,1,\n1,2,2,2,2,0,1,0,2,2,0,0,0,0,2,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,2,0,\n0,0,1,0,0,1,0,0,0,0,1,0,1,1,0,0,1,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,\n0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,\n0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,\n1,2,2,2,2,0,0,0,2,2,2,0,1,0,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,1,1,\n0,1,1,0,0,1,1,0,0,0,1,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,\n1,2,2,2,2,0,0,0,0,1,0,0,1,1,2,0,0,0,0,1,0,1,0,0,1,0,0,2,0,0,0,1,\n0,0,1,0,0,1,0,0,0,1,1,0,0,0,0,0,1,0,0,1,1,0,0,0,0,0,0,0,0,0,0,0,\n1,2,2,2,1,1,2,0,2,1,1,1,1,0,2,2,0,0,0,0,0,0,0,0,0,1,1,0,0,0,1,1,\n0,0,1,0,1,1,0,0,0,0,1,0,0,0,0,0,1,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,\n1,0,2,1,2,0,0,0,0,0,1,0,0,0,1,0,0,0,1,0,0,0,0,0,0,0,0,1,0,0,0,0,\n0,0,1,0,1,1,0,0,0,0,1,0,0,0,0,0,1,0,0,0,1,0,0,0,0,0,0,0,0,0,1,0,\n1,0,0,0,0,2,0,1,2,1,0,1,1,1,0,1,0,0,0,1,0,1,0,0,1,0,1,0,0,0,0,1,\n0,0,0,0,0,1,0,0,1,1,0,0,1,1,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,1,\n2,2,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,\n1,0,0,0,1,0,0,0,1,1,0,0,0,0,0,0,0,1,0,0,0,0,0,1,0,0,1,0,0,0,0,0,\n2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,\n1,1,1,0,1,0,1,0,0,1,1,1,1,0,0,0,1,0,0,0,0,1,0,0,0,1,0,1,0,0,0,0,\n1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,\n1,1,0,1,1,0,1,0,1,0,0,0,0,1,1,0,1,1,0,0,0,0,0,1,0,1,1,0,1,0,0,0,\n0,1,1,1,1,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,\n0,0,0,0,0,1,0,0,0,0,1,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,\n)\n\nKoi8rModel = {\n 'charToOrderMap': KOI8R_CharToOrderMap,\n 'precedenceMatrix': RussianLangModel,\n 'mTypicalPositiveRatio': 0.976601,\n 'keepEnglishLetter': False,\n 'charsetName': \"KOI8-R\"\n}\n\nWin1251CyrillicModel = {\n 'charToOrderMap': win1251_CharToOrderMap,\n 'precedenceMatrix': RussianLangModel,\n 'mTypicalPositiveRatio': 0.976601,\n 'keepEnglishLetter': False,\n 'charsetName': \"windows-1251\"\n}\n\nLatin5CyrillicModel = {\n 'charToOrderMap': latin5_CharToOrderMap,\n 'precedenceMatrix': RussianLangModel,\n 'mTypicalPositiveRatio': 0.976601,\n 'keepEnglishLetter': False,\n 'charsetName': \"ISO-8859-5\"\n}\n\nMacCyrillicModel = {\n 'charToOrderMap': macCyrillic_CharToOrderMap,\n 'precedenceMatrix': RussianLangModel,\n 'mTypicalPositiveRatio': 0.976601,\n 'keepEnglishLetter': False,\n 'charsetName': \"MacCyrillic\"\n};\n\nIbm866Model = {\n 'charToOrderMap': IBM866_CharToOrderMap,\n 'precedenceMatrix': RussianLangModel,\n 'mTypicalPositiveRatio': 0.976601,\n 'keepEnglishLetter': False,\n 'charsetName': \"IBM866\"\n}\n\nIbm855Model = {\n 'charToOrderMap': IBM855_CharToOrderMap,\n 'precedenceMatrix': RussianLangModel,\n 'mTypicalPositiveRatio': 0.976601,\n 'keepEnglishLetter': False,\n 'charsetName': \"IBM855\"\n}\n\n# flake8: noqa\n"},"license":{"kind":"string","value":"gpl-3.0"}}},{"rowIdx":203473,"cells":{"repo_name":{"kind":"string","value":"gale320/newfies-dialer"},"path":{"kind":"string","value":"newfies/callcenter/constants.py"},"copies":{"kind":"string","value":"3"},"size":{"kind":"string","value":"1376"},"content":{"kind":"string","value":"#\n# Newfies-Dialer License\n# http://www.newfies-dialer.org\n#\n# This Source Code Form is subject to the terms of the Mozilla Public\n# License, v. 2.0. If a copy of the MPL was not distributed with this file,\n# You can obtain one at http://mozilla.org/MPL/2.0/.\n#\n# Copyright (C) 2011-2014 Star2Billing S.L.\n#\n# The Initial Developer of the Original Code is\n# Arezqui Belaid \n#\n\nfrom django.utils.translation import ugettext_lazy as _\nfrom common.utils import Choice\n\n\nclass STRATEGY(Choice):\n ring_all = 1, 'ring-all'\n longest_idle_agent = 2, 'longest-idle-agent'\n round_robin = 3, 'round-robin'\n top_down = 4, 'top-down'\n agent_with_least_talk_time = 5, 'agent-with-least-talk-time'\n agent_with_fewest_calls = 6, 'agent-with-fewest-calls'\n sequentially_by_agent_order = 7, 'sequentially-by-agent-order'\n random = 8, 'random'\n\n\nclass QUEUE_COLUMN_NAME(Choice):\n name = _('name')\n strategy = _('strategy')\n time_base_score = _('time base score')\n date = _('date')\n\n\nclass TIER_COLUMN_NAME(Choice):\n agent = _('agent')\n queue = _('queue')\n level = _('level')\n position = _('position')\n date = _('date')\n\n\nclass TIME_BASE_SCORE_TYPE(Choice):\n queue = 'queue'\n system = 'system'\n\n\nclass AGENT_CALLSTATE_TYPE(Choice):\n agent_offering = 'agent-offering'\n bridge_agent_start = 'bridge-agent-start'\n"},"license":{"kind":"string","value":"mpl-2.0"}}},{"rowIdx":203474,"cells":{"repo_name":{"kind":"string","value":"sunfish-prj/Platform-docs"},"path":{"kind":"string","value":"docs/conf.py"},"copies":{"kind":"string","value":"1"},"size":{"kind":"string","value":"5207"},"content":{"kind":"string","value":"# -*- coding: utf-8 -*-\n#\n# SUNFISH Platform Documentation documentation build configuration file, created by\n# sphinx-quickstart on Thu May 25 10:40:42 2017.\n#\n# This file is execfile()d with the current directory set to its\n# containing dir.\n#\n# Note that not all possible configuration values are present in this\n# autogenerated file.\n#\n# All configuration values have a default; values that are commented out\n# serve to show the default.\n\n# If extensions (or modules to document with autodoc) are in another directory,\n# add these directories to sys.path here. If the directory is relative to the\n# documentation root, use os.path.abspath to make it absolute, like shown here.\n#\n# import os\n# import sys\n# sys.path.insert(0, os.path.abspath('.'))\n\nimport sphinx_rtd_theme\n\n# -- General configuration ------------------------------------------------\n\n# If your documentation needs a minimal Sphinx version, state it here.\n#\n# needs_sphinx = '1.0'\n\n# Add any Sphinx extension module names here, as strings. They can be\n# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom\n# ones.\nextensions = [\n# 'sphinxcontrib.openapi',\n 'sphinxcontrib.swaggerdoc',\n 'sphinx.ext.mathjax'\n]\n\n# Add any paths that contain templates here, relative to this directory.\ntemplates_path = ['_templates']\n\n# The suffix(es) of source filenames.\n# You can specify multiple suffix as a list of string:\n#\n# source_suffix = ['.rst', '.md']\nsource_suffix = '.rst'\n\n# The master toctree document.\nmaster_doc = 'index'\n\n# General information about the project.\nproject = u'SUNFISH Platform Documentation'\ncopyright = u'2017, SUNFISH Consortium'\nauthor = u'SUNFISH Consortium'\n\n# The version info for the project you're documenting, acts as replacement for\n# |version| and |release|, also used in various other places throughout the\n# built documents.\n#\n# The short X.Y version.\nversion = u'0.9'\n# The full version, including alpha/beta/rc tags.\nrelease = u'0.9'\n\n# The language for content autogenerated by Sphinx. Refer to documentation\n# for a list of supported languages.\n#\n# This is also used if you do content translation via gettext catalogs.\n# Usually you set \"language\" from the command line for these cases.\nlanguage = None\n\n# List of patterns, relative to source directory, that match files and\n# directories to ignore when looking for source files.\n# This patterns also effect to html_static_path and html_extra_path\nexclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']\n\n# The name of the Pygments (syntax highlighting) style to use.\npygments_style = 'sphinx'\n\n# If true, `todo` and `todoList` produce output, else they produce nothing.\ntodo_include_todos = False\n\n\n# -- Options for HTML output ----------------------------------------------\n\n# The theme to use for HTML and HTML Help pages. See the documentation for\n# a list of builtin themes.\n#\nhtml_theme = 'sphinx_rtd_theme'\n\nhtml_theme_path = [sphinx_rtd_theme.get_html_theme_path()]\n\n# Theme options are theme-specific and customize the look and feel of a theme\n# further. For a list of options available for each theme, see the\n# documentation.\n#\nhtml_theme_options = {\n 'collapse_navigation': False,\n 'display_version': False,\n 'navigation_depth': 3,}\n\n# Add any paths that contain custom static files (such as style sheets) here,\n# relative to this directory. They are copied after the builtin static files,\n# so a file named \"default.css\" will overwrite the builtin \"default.css\".\n# html_static_path = ['_static']\n\n\n# -- Options for HTMLHelp output ------------------------------------------\n\n# Output file base name for HTML help builder.\nhtmlhelp_basename = 'SUNFISHPlatformDocumentationdoc'\n\n\n# -- Options for LaTeX output ---------------------------------------------\n\nlatex_elements = {\n # The paper size ('letterpaper' or 'a4paper').\n #\n # 'papersize': 'letterpaper',\n\n # The font size ('10pt', '11pt' or '12pt').\n #\n # 'pointsize': '10pt',\n\n # Additional stuff for the LaTeX preamble.\n #\n # 'preamble': '',\n\n # Latex figure (float) alignment\n #\n # 'figure_align': 'htbp',\n}\n\n# Grouping the document tree into LaTeX files. List of tuples\n# (source start file, target name, title,\n# author, documentclass [howto, manual, or own class]).\nlatex_documents = [\n (master_doc, 'SUNFISHPlatformDocumentation.tex', u'SUNFISH Platform Documentation Documentation',\n u'SUNFISH Consortium', 'manual'),\n]\n\n\n# -- Options for manual page output ---------------------------------------\n\n# One entry per manual page. List of tuples\n# (source start file, name, description, authors, manual section).\nman_pages = [\n (master_doc, 'sunfishplatformdocumentation', u'SUNFISH Platform Documentation Documentation',\n [author], 1)\n]\n\n\n# -- Options for Texinfo output -------------------------------------------\n\n# Grouping the document tree into Texinfo files. List of tuples\n# (source start file, target name, title, author,\n# dir menu entry, description, category)\ntexinfo_documents = [\n (master_doc, 'SUNFISHPlatformDocumentation', u'SUNFISH Platform Documentation Documentation',\n author, 'SUNFISHPlatformDocumentation', 'One line description of project.',\n 'Miscellaneous'),\n]\n"},"license":{"kind":"string","value":"gpl-3.0"}}},{"rowIdx":203475,"cells":{"repo_name":{"kind":"string","value":"jasonbot/django"},"path":{"kind":"string","value":"tests/test_client_regress/views.py"},"copies":{"kind":"string","value":"143"},"size":{"kind":"string","value":"5161"},"content":{"kind":"string","value":"import json\n\nfrom django.conf import settings\nfrom django.contrib.auth.decorators import login_required\nfrom django.core.serializers.json import DjangoJSONEncoder\nfrom django.http import HttpResponse, HttpResponseRedirect, JsonResponse\nfrom django.shortcuts import render_to_response\nfrom django.template import RequestContext\nfrom django.template.loader import render_to_string\nfrom django.test import Client\nfrom django.test.client import CONTENT_TYPE_RE\nfrom django.test.utils import setup_test_environment\nfrom django.utils.six.moves.urllib.parse import urlencode\n\n\nclass CustomTestException(Exception):\n pass\n\n\ndef no_template_view(request):\n \"A simple view that expects a GET request, and returns a rendered template\"\n return HttpResponse(\"No template used. Sample content: twice once twice. Content ends.\")\n\n\ndef staff_only_view(request):\n \"A view that can only be visited by staff. Non staff members get an exception\"\n if request.user.is_staff:\n return HttpResponse('')\n else:\n raise CustomTestException()\n\n\ndef get_view(request):\n \"A simple login protected view\"\n return HttpResponse(\"Hello world\")\nget_view = login_required(get_view)\n\n\ndef request_data(request, template='base.html', data='sausage'):\n \"A simple view that returns the request data in the context\"\n return render_to_response(template, {\n 'get-foo': request.GET.get('foo'),\n 'get-bar': request.GET.get('bar'),\n 'post-foo': request.POST.get('foo'),\n 'post-bar': request.POST.get('bar'),\n 'data': data,\n })\n\n\ndef view_with_argument(request, name):\n \"\"\"A view that takes a string argument\n\n The purpose of this view is to check that if a space is provided in\n the argument, the test framework unescapes the %20 before passing\n the value to the view.\n \"\"\"\n if name == 'Arthur Dent':\n return HttpResponse('Hi, Arthur')\n else:\n return HttpResponse('Howdy, %s' % name)\n\n\ndef nested_view(request):\n \"\"\"\n A view that uses test client to call another view.\n \"\"\"\n setup_test_environment()\n c = Client()\n c.get(\"/no_template_view/\")\n return render_to_response('base.html', {'nested': 'yes'})\n\n\ndef login_protected_redirect_view(request):\n \"A view that redirects all requests to the GET view\"\n return HttpResponseRedirect('/get_view/')\nlogin_protected_redirect_view = login_required(login_protected_redirect_view)\n\n\ndef redirect_to_self_with_changing_query_view(request):\n query = request.GET.copy()\n query['counter'] += '0'\n return HttpResponseRedirect('/redirect_to_self_with_changing_query_view/?%s' % urlencode(query))\n\n\ndef set_session_view(request):\n \"A view that sets a session variable\"\n request.session['session_var'] = 'YES'\n return HttpResponse('set_session')\n\n\ndef check_session_view(request):\n \"A view that reads a session variable\"\n return HttpResponse(request.session.get('session_var', 'NO'))\n\n\ndef request_methods_view(request):\n \"A view that responds with the request method\"\n return HttpResponse('request method: %s' % request.method)\n\n\ndef return_unicode(request):\n return render_to_response('unicode.html')\n\n\ndef return_undecodable_binary(request):\n return HttpResponse(\n b'%PDF-1.4\\r\\n%\\x93\\x8c\\x8b\\x9e ReportLab Generated PDF document http://www.reportlab.com'\n )\n\n\ndef return_json_response(request):\n return JsonResponse({'key': 'value'})\n\n\ndef return_json_file(request):\n \"A view that parses and returns a JSON string as a file.\"\n match = CONTENT_TYPE_RE.match(request.META['CONTENT_TYPE'])\n if match:\n charset = match.group(1)\n else:\n charset = settings.DEFAULT_CHARSET\n\n # This just checks that the uploaded data is JSON\n obj_dict = json.loads(request.body.decode(charset))\n obj_json = json.dumps(obj_dict, cls=DjangoJSONEncoder, ensure_ascii=False)\n response = HttpResponse(obj_json.encode(charset), status=200,\n content_type='application/json; charset=%s' % charset)\n response['Content-Disposition'] = 'attachment; filename=testfile.json'\n return response\n\n\ndef check_headers(request):\n \"A view that responds with value of the X-ARG-CHECK header\"\n return HttpResponse('HTTP_X_ARG_CHECK: %s' % request.META.get('HTTP_X_ARG_CHECK', 'Undefined'))\n\n\ndef body(request):\n \"A view that is requested with GET and accesses request.body. Refs #14753.\"\n return HttpResponse(request.body)\n\n\ndef read_all(request):\n \"A view that is requested with accesses request.read().\"\n return HttpResponse(request.read())\n\n\ndef read_buffer(request):\n \"A view that is requested with accesses request.read(LARGE_BUFFER).\"\n return HttpResponse(request.read(99999))\n\n\ndef request_context_view(request):\n # Special attribute that won't be present on a plain HttpRequest\n request.special_path = request.path\n return render_to_response('request_context.html', context_instance=RequestContext(request, {}))\n\n\ndef render_template_multiple_times(request):\n \"\"\"A view that renders a template multiple times.\"\"\"\n return HttpResponse(\n render_to_string('base.html') + render_to_string('base.html'))\n"},"license":{"kind":"string","value":"bsd-3-clause"}}},{"rowIdx":203476,"cells":{"repo_name":{"kind":"string","value":"overtherain/scriptfile"},"path":{"kind":"string","value":"software/googleAppEngine/lib/jinja2/setup.py"},"copies":{"kind":"string","value":"20"},"size":{"kind":"string","value":"3257"},"content":{"kind":"string","value":"# -*- coding: utf-8 -*-\n\"\"\"\nJinja2\n~~~~~~\n\nJinja2 is a template engine written in pure Python. It provides a\n`Django`_ inspired non-XML syntax but supports inline expressions and\nan optional `sandboxed`_ environment.\n\nNutshell\n--------\n\nHere a small example of a Jinja template::\n\n {% extends 'base.html' %}\n {% block title %}Memberlist{% endblock %}\n {% block content %}\n
\n {% endblock %}\n\nPhilosophy\n----------\n\nApplication logic is for the controller but don't try to make the life\nfor the template designer too hard by giving him too few functionality.\n\nFor more informations visit the new `Jinja2 webpage`_ and `documentation`_.\n\n.. _sandboxed: http://en.wikipedia.org/wiki/Sandbox_(computer_security)\n.. _Django: http://www.djangoproject.com/\n.. _Jinja2 webpage: http://jinja.pocoo.org/\n.. _documentation: http://jinja.pocoo.org/2/documentation/\n\"\"\"\nimport sys\n\nfrom setuptools import setup, Extension, Feature\n\ndebugsupport = Feature(\n 'optional C debug support',\n standard=False,\n ext_modules = [\n Extension('jinja2._debugsupport', ['jinja2/_debugsupport.c']),\n ],\n)\n\n\n# tell distribute to use 2to3 with our own fixers.\nextra = {}\nif sys.version_info >= (3, 0):\n extra.update(\n use_2to3=True,\n use_2to3_fixers=['custom_fixers']\n )\n\n# ignore the old '--with-speedups' flag\ntry:\n speedups_pos = sys.argv.index('--with-speedups')\nexcept ValueError:\n pass\nelse:\n sys.argv[speedups_pos] = '--with-debugsupport'\n sys.stderr.write('*' * 74 + '\\n')\n sys.stderr.write('WARNING:\\n')\n sys.stderr.write(' the --with-speedups flag is deprecated, assuming '\n '--with-debugsupport\\n')\n sys.stderr.write(' For the actual speedups install the MarkupSafe '\n 'package.\\n')\n sys.stderr.write('*' * 74 + '\\n')\n\n\nsetup(\n name='Jinja2',\n version='2.6',\n url='http://jinja.pocoo.org/',\n license='BSD',\n author='Armin Ronacher',\n author_email='armin.ronacher@active-4.com',\n description='A small but fast and easy to use stand-alone template '\n 'engine written in pure python.',\n long_description=__doc__,\n # jinja is egg safe. But we hate eggs\n zip_safe=False,\n classifiers=[\n 'Development Status :: 5 - Production/Stable',\n 'Environment :: Web Environment',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: BSD License',\n 'Operating System :: OS Independent',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 3',\n 'Topic :: Internet :: WWW/HTTP :: Dynamic Content',\n 'Topic :: Software Development :: Libraries :: Python Modules',\n 'Topic :: Text Processing :: Markup :: HTML'\n ],\n packages=['jinja2', 'jinja2.testsuite', 'jinja2.testsuite.res',\n 'jinja2._markupsafe'],\n extras_require={'i18n': ['Babel>=0.8']},\n test_suite='jinja2.testsuite.suite',\n include_package_data=True,\n entry_points=\"\"\"\n [babel.extractors]\n jinja2 = jinja2.ext:babel_extract[i18n]\n \"\"\",\n features={'debugsupport': debugsupport},\n **extra\n)\n"},"license":{"kind":"string","value":"mit"}}},{"rowIdx":203477,"cells":{"repo_name":{"kind":"string","value":"kochbeck/icsisumm"},"path":{"kind":"string","value":"icsisumm-primary-sys34_v1/nltk/nltk-0.9.2/nltk/corpus/reader/bracket_parse.py"},"copies":{"kind":"string","value":"9"},"size":{"kind":"string","value":"4538"},"content":{"kind":"string","value":"# Natural Language Toolkit: Penn Treebank Reader\n#\n# Copyright (C) 2001-2008 University of Pennsylvania\n# Author: Steven Bird \n# Edward Loper \n# URL: \n# For license information, see LICENSE.TXT\n\nfrom nltk.corpus.reader.util import *\nfrom nltk.corpus.reader.api import *\nfrom nltk.tree import bracket_parse, Tree\nimport sys\n\n\"\"\"\nCorpus reader for corpora that consist of parenthesis-delineated parse trees.\n\"\"\"\n\n# we use [^\\s()]+ instead of \\S+? to avoid matching ()\nTAGWORD = re.compile(r'\\(([^\\s()]+) ([^\\s()]+)\\)')\nWORD = re.compile(r'\\([^\\s()]+ ([^\\s()]+)\\)')\nEMPTY_BRACKETS = re.compile(r'\\s*\\(\\s*\\(')\n\nclass BracketParseCorpusReader(SyntaxCorpusReader):\n \"\"\"\n Reader for corpora that consist of parenthesis-delineated parse\n trees.\n \"\"\"\n def __init__(self, root, files, comment_char=None,\n detect_blocks='unindented_paren'):\n \"\"\"\n @param root: The root directory for this corpus.\n @param files: A list or regexp specifying the files in this corpus.\n @param comment: The character which can appear at the start of\n a line to indicate that the rest of the line is a comment.\n @param detect_blocks: The method that is used to find blocks\n in the corpus; can be 'unindented_paren' (every unindented\n parenthesis starts a new parse) or 'sexpr' (brackets are\n matched).\n \"\"\"\n CorpusReader.__init__(self, root, files)\n self._comment_char = comment_char\n self._detect_blocks = detect_blocks\n\n def _read_block(self, stream):\n if self._detect_blocks == 'sexpr':\n return read_sexpr_block(stream, comment_char=self._comment_char)\n elif self._detect_blocks == 'blankline':\n return read_blankline_block(stream)\n elif self._detect_blocks == 'unindented_paren':\n # Tokens start with unindented left parens.\n toks = read_regexp_block(stream, start_re=r'^\\(')\n # Strip any comments out of the tokens.\n if self._comment_char:\n toks = [re.sub('(?m)^%s.*'%re.escape(self._comment_char),\n '', tok)\n for tok in toks]\n return toks\n else:\n assert 0, 'bad block type'\n \n def _normalize(self, t):\n # If there's an empty set of brackets surrounding the actual\n # parse, then strip them off.\n if EMPTY_BRACKETS.match(t):\n t = t.strip()[1:-1]\n # Replace leaves of the form (!), (,), with (! !), (, ,)\n t = re.sub(r\"\\((.)\\)\", r\"(\\1 \\1)\", t)\n # Replace leaves of the form (tag word root) with (tag word)\n t = re.sub(r\"\\(([^\\s()]+) ([^\\s()]+) [^\\s()]+\\)\", r\"(\\1 \\2)\", t)\n return t\n\n def _parse(self, t):\n try:\n return bracket_parse(self._normalize(t))\n except ValueError, e:\n sys.stderr.write(\"Bad tree detected; trying to recover...\\n\")\n # Try to recover, if we can:\n if e.args == ('mismatched parens',):\n for n in range(1, 5):\n try:\n v = bracket_parse(self._normalize(t+')'*n))\n sys.stderr.write(\" Recovered by adding %d close \"\n \"paren(s)\\n\" % n)\n return v\n except ValueError: pass\n # Try something else:\n sys.stderr.write(\" Recovered by returning a flat parse.\\n\")\n #sys.stderr.write(' '.join(t.split())+'\\n')\n return Tree('S', self._tag(t))\n\n def _tag(self, t):\n return [(w,t) for (t,w) in TAGWORD.findall(self._normalize(t))]\n\n def _word(self, t):\n return WORD.findall(self._normalize(t))\n\nclass AlpinoCorpusReader(BracketParseCorpusReader):\n \"\"\"\n Reader for the Alpino Dutch Treebank.\n \"\"\"\n def __init__(self, root):\n BracketParseCorpusReader.__init__(self, root, 'alpino\\.xml',\n detect_blocks='blankline')\n\n def _normalize(self, t):\n if t[:10] != \"', r\"(\\1\", t)\n t = re.sub(r' ', r\"(\\1 \\2)\", t)\n t = re.sub(r\" \", r\")\", t)\n t = re.sub(r\".*\", r\"\", t)\n t = re.sub(r\"?alpino_ds.*>\", r\"\", t)\n return t\n"},"license":{"kind":"string","value":"gpl-3.0"}}},{"rowIdx":203478,"cells":{"repo_name":{"kind":"string","value":"yanheven/console"},"path":{"kind":"string","value":"openstack_dashboard/openstack/common/notifier/rpc_notifier.py"},"copies":{"kind":"string","value":"19"},"size":{"kind":"string","value":"1746"},"content":{"kind":"string","value":"# Copyright 2011 OpenStack Foundation.\n# All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nfrom oslo.config import cfg\n\nfrom openstack_dashboard.openstack.common import context as req_context\nfrom openstack_dashboard.openstack.common.gettextutils import _\nfrom openstack_dashboard.openstack.common import log as logging\nfrom openstack_dashboard.openstack.common import rpc\n\nLOG = logging.getLogger(__name__)\n\nnotification_topic_opt = cfg.ListOpt(\n 'notification_topics', default=['notifications', ],\n help='AMQP topic used for openstack notifications')\n\nCONF = cfg.CONF\nCONF.register_opt(notification_topic_opt)\n\n\ndef notify(context, message):\n \"\"\"Sends a notification via RPC.\"\"\"\n if not context:\n context = req_context.get_admin_context()\n priority = message.get('priority',\n CONF.default_notification_level)\n priority = priority.lower()\n for topic in CONF.notification_topics:\n topic = '%s.%s' % (topic, priority)\n try:\n rpc.notify(context, topic, message)\n except Exception:\n LOG.exception(_(\"Could not send notification to %(topic)s. \"\n \"Payload=%(message)s\"), locals())\n"},"license":{"kind":"string","value":"apache-2.0"}}},{"rowIdx":203479,"cells":{"repo_name":{"kind":"string","value":"HiroIshikawa/21playground"},"path":{"kind":"string","value":"thumblelog/myproject/lib/python3.5/site-packages/wheel/test/test_tagopt.py"},"copies":{"kind":"string","value":"236"},"size":{"kind":"string","value":"3550"},"content":{"kind":"string","value":"\"\"\"\nTests for the bdist_wheel tag options (--python-tag and --universal)\n\"\"\"\n\nimport sys\nimport shutil\nimport pytest\nimport py.path\nimport tempfile\nimport subprocess\n\nSETUP_PY = \"\"\"\\\nfrom setuptools import setup\n\nsetup(\n name=\"Test\",\n version=\"1.0\",\n author_email=\"author@example.com\",\n py_modules=[\"test\"],\n)\n\"\"\"\n\n@pytest.fixture\ndef temp_pkg(request):\n tempdir = tempfile.mkdtemp()\n def fin():\n shutil.rmtree(tempdir)\n request.addfinalizer(fin)\n temppath = py.path.local(tempdir)\n temppath.join('test.py').write('print(\"Hello, world\")')\n temppath.join('setup.py').write(SETUP_PY)\n return temppath\n\ndef test_default_tag(temp_pkg):\n subprocess.check_call([sys.executable, 'setup.py', 'bdist_wheel'],\n cwd=str(temp_pkg))\n dist_dir = temp_pkg.join('dist')\n assert dist_dir.check(dir=1)\n wheels = dist_dir.listdir()\n assert len(wheels) == 1\n assert wheels[0].basename.startswith('Test-1.0-py%s-' % (sys.version[0],))\n assert wheels[0].ext == '.whl'\n\ndef test_explicit_tag(temp_pkg):\n subprocess.check_call(\n [sys.executable, 'setup.py', 'bdist_wheel', '--python-tag=py32'],\n cwd=str(temp_pkg))\n dist_dir = temp_pkg.join('dist')\n assert dist_dir.check(dir=1)\n wheels = dist_dir.listdir()\n assert len(wheels) == 1\n assert wheels[0].basename.startswith('Test-1.0-py32-')\n assert wheels[0].ext == '.whl'\n\ndef test_universal_tag(temp_pkg):\n subprocess.check_call(\n [sys.executable, 'setup.py', 'bdist_wheel', '--universal'],\n cwd=str(temp_pkg))\n dist_dir = temp_pkg.join('dist')\n assert dist_dir.check(dir=1)\n wheels = dist_dir.listdir()\n assert len(wheels) == 1\n assert wheels[0].basename.startswith('Test-1.0-py2.py3-')\n assert wheels[0].ext == '.whl'\n\ndef test_universal_beats_explicit_tag(temp_pkg):\n subprocess.check_call(\n [sys.executable, 'setup.py', 'bdist_wheel', '--universal', '--python-tag=py32'],\n cwd=str(temp_pkg))\n dist_dir = temp_pkg.join('dist')\n assert dist_dir.check(dir=1)\n wheels = dist_dir.listdir()\n assert len(wheels) == 1\n assert wheels[0].basename.startswith('Test-1.0-py2.py3-')\n assert wheels[0].ext == '.whl'\n\ndef test_universal_in_setup_cfg(temp_pkg):\n temp_pkg.join('setup.cfg').write('[bdist_wheel]\\nuniversal=1')\n subprocess.check_call(\n [sys.executable, 'setup.py', 'bdist_wheel'],\n cwd=str(temp_pkg))\n dist_dir = temp_pkg.join('dist')\n assert dist_dir.check(dir=1)\n wheels = dist_dir.listdir()\n assert len(wheels) == 1\n assert wheels[0].basename.startswith('Test-1.0-py2.py3-')\n assert wheels[0].ext == '.whl'\n\ndef test_pythontag_in_setup_cfg(temp_pkg):\n temp_pkg.join('setup.cfg').write('[bdist_wheel]\\npython_tag=py32')\n subprocess.check_call(\n [sys.executable, 'setup.py', 'bdist_wheel'],\n cwd=str(temp_pkg))\n dist_dir = temp_pkg.join('dist')\n assert dist_dir.check(dir=1)\n wheels = dist_dir.listdir()\n assert len(wheels) == 1\n assert wheels[0].basename.startswith('Test-1.0-py32-')\n assert wheels[0].ext == '.whl'\n\ndef test_legacy_wheel_section_in_setup_cfg(temp_pkg):\n temp_pkg.join('setup.cfg').write('[wheel]\\nuniversal=1')\n subprocess.check_call(\n [sys.executable, 'setup.py', 'bdist_wheel'],\n cwd=str(temp_pkg))\n dist_dir = temp_pkg.join('dist')\n assert dist_dir.check(dir=1)\n wheels = dist_dir.listdir()\n assert len(wheels) == 1\n assert wheels[0].basename.startswith('Test-1.0-py2.py3-')\n assert wheels[0].ext == '.whl'\n\n"},"license":{"kind":"string","value":"mit"}}},{"rowIdx":203480,"cells":{"repo_name":{"kind":"string","value":"olivierdalang/QGIS"},"path":{"kind":"string","value":"python/plugins/processing/algs/grass7/ext/r_li_mpa.py"},"copies":{"kind":"string","value":"45"},"size":{"kind":"string","value":"1287"},"content":{"kind":"string","value":"# -*- coding: utf-8 -*-\n\n\"\"\"\n***************************************************************************\n r_li_mpa.py\n -----------\n Date : February 2016\n Copyright : (C) 2016 by Médéric Ribreux\n Email : medspx at medspx dot fr\n***************************************************************************\n* *\n* This program is free software; you can redistribute it and/or modify *\n* it under the terms of the GNU General Public License as published by *\n* the Free Software Foundation; either version 2 of the License, or *\n* (at your option) any later version. *\n* *\n***************************************************************************\n\"\"\"\n\n__author__ = 'Médéric Ribreux'\n__date__ = 'February 2016'\n__copyright__ = '(C) 2016, Médéric Ribreux'\n\nfrom .r_li import checkMovingWindow, configFile\n\n\ndef checkParameterValuesBeforeExecuting(alg, parameters, context):\n return checkMovingWindow(alg, parameters, context)\n\n\ndef processCommand(alg, parameters, context, feedback):\n configFile(alg, parameters, context, feedback)\n"},"license":{"kind":"string","value":"gpl-2.0"}}},{"rowIdx":203481,"cells":{"repo_name":{"kind":"string","value":"sudosurootdev/kernel_lge_msm8974"},"path":{"kind":"string","value":"tools/perf/scripts/python/net_dropmonitor.py"},"copies":{"kind":"string","value":"4235"},"size":{"kind":"string","value":"1554"},"content":{"kind":"string","value":"# Monitor the system for dropped packets and proudce a report of drop locations and counts\n\nimport os\nimport sys\n\nsys.path.append(os.environ['PERF_EXEC_PATH'] + \\\n\t\t'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')\n\nfrom perf_trace_context import *\nfrom Core import *\nfrom Util import *\n\ndrop_log = {}\nkallsyms = []\n\ndef get_kallsyms_table():\n\tglobal kallsyms\n\ttry:\n\t\tf = open(\"/proc/kallsyms\", \"r\")\n\t\tlinecount = 0\n\t\tfor line in f:\n\t\t\tlinecount = linecount+1\n\t\tf.seek(0)\n\texcept:\n\t\treturn\n\n\n\tj = 0\n\tfor line in f:\n\t\tloc = int(line.split()[0], 16)\n\t\tname = line.split()[2]\n\t\tj = j +1\n\t\tif ((j % 100) == 0):\n\t\t\tprint \"\\r\" + str(j) + \"/\" + str(linecount),\n\t\tkallsyms.append({ 'loc': loc, 'name' : name})\n\n\tprint \"\\r\" + str(j) + \"/\" + str(linecount)\n\tkallsyms.sort()\n\treturn\n\ndef get_sym(sloc):\n\tloc = int(sloc)\n\tfor i in kallsyms:\n\t\tif (i['loc'] >= loc):\n\t\t\treturn (i['name'], i['loc']-loc)\n\treturn (None, 0)\n\ndef print_drop_table():\n\tprint \"%25s %25s %25s\" % (\"LOCATION\", \"OFFSET\", \"COUNT\")\n\tfor i in drop_log.keys():\n\t\t(sym, off) = get_sym(i)\n\t\tif sym == None:\n\t\t\tsym = i\n\t\tprint \"%25s %25s %25s\" % (sym, off, drop_log[i])\n\n\ndef trace_begin():\n\tprint \"Starting trace (Ctrl-C to dump results)\"\n\ndef trace_end():\n\tprint \"Gathering kallsyms data\"\n\tget_kallsyms_table()\n\tprint_drop_table()\n\n# called from perf, when it finds a correspoinding event\ndef skb__kfree_skb(name, context, cpu, sec, nsec, pid, comm,\n\t\t\tskbaddr, protocol, location):\n\tslocation = str(location)\n\ttry:\n\t\tdrop_log[slocation] = drop_log[slocation] + 1\n\texcept:\n\t\tdrop_log[slocation] = 1\n"},"license":{"kind":"string","value":"gpl-2.0"}}},{"rowIdx":203482,"cells":{"repo_name":{"kind":"string","value":"CarlSorensen/lilypond-standards"},"path":{"kind":"string","value":"python/auxiliar/buildlib.py"},"copies":{"kind":"string","value":"9"},"size":{"kind":"string","value":"2865"},"content":{"kind":"string","value":"#!@PYTHON@\n\nimport subprocess\nimport re\nimport sys\n\nverbose = False\n\ndef read_pipe (command):\n child = subprocess.Popen (command,\n stdout = subprocess.PIPE,\n stderr = subprocess.PIPE,\n shell = True)\n (output, error) = child.communicate ()\n code = str (child.wait ())\n if not child.stdout or child.stdout.close ():\n print \"pipe failed: %(command)s\" % locals ()\n if code != '0':\n error = code + ' ' + error\n return (output, error)\n\n### Renamed files map to ensure continuity of file history\n## Map of new_name: old_name\nrenames_map = {\n 'usage.tely': 'user/lilypond-program.tely',\n 'notation.tely': 'user/lilypond.tely',\n 'learning.tely': 'user/lilypond-learning.tely',\n 'changes.tely': 'topdocs/NEWS.tely',\n}\n\n# FIXME: Hardcoded file names!?\nmanuals_subdirectories_re = \\\n re.compile ('(usage|automated-engraving|changes|essay|extending|web|learning|notation)/')\n\ndef add_old_name (file_path):\n for new_path in renames_map:\n if file_path.endswith (new_path):\n old_file_path = file_path.replace (new_path,\n renames_map[new_path])\n break\n else:\n if file_path.endswith ('macros.itexi'):\n old_file_path = file_path.replace ('macros.itexi',\n 'user/macros.itexi')\n elif file_path.endswith ('.itely'):\n old_file_path = manuals_subdirectories_re.sub ('user/',\n file_path)\n elif 'snippets/' in file_path:\n old_file_path = file_path.replace ('snippets/',\n '../input/lsr/')\n else:\n return file_path\n return file_path + ' ' + old_file_path\n\nrevision_re = re.compile ('GIT [Cc]ommittish:\\s+([a-f0-9]+)')\nvc_diff_cmd = 'git diff -M %(color_flag)s %(revision)s \\\n%(upper_revision)s -- %(original_with_old_name)s | cat'\nno_committish_fatal_error = \"\"\"error: %s: no 'GIT committish: ' found.\nPlease check the whole file against the original in English, then\nfill in HEAD committish in the header.\n\"\"\"\n\ndef check_translated_doc (original, translated_file, translated_contents,\n color=False, upper_revision='HEAD'):\n m = revision_re.search (translated_contents)\n if not m:\n sys.stderr.write (no_committish_fatal_error % translated_file)\n sys.exit (1)\n revision = m.group (1)\n if revision == '0':\n return '', 0\n\n if color:\n color_flag = '--color --color-words'\n else:\n color_flag = '--no-color'\n original_with_old_name = add_old_name (original)\n c = vc_diff_cmd % vars ()\n if verbose:\n sys.stderr.write ('running: ' + c)\n return read_pipe (c)\n"},"license":{"kind":"string","value":"gpl-3.0"}}},{"rowIdx":203483,"cells":{"repo_name":{"kind":"string","value":"fviard/s3cmd"},"path":{"kind":"string","value":"S3/Crypto.py"},"copies":{"kind":"string","value":"2"},"size":{"kind":"string","value":"11269"},"content":{"kind":"string","value":"# -*- coding: utf-8 -*-\n\n## Amazon S3 manager\n## Author: Michal Ludvig \n## http://www.logix.cz/michal\n## License: GPL Version 2\n## Copyright: TGRMN Software and contributors\n\nfrom __future__ import absolute_import\n\nimport sys\nimport hmac\ntry:\n from base64 import encodebytes as encodestring\nexcept ImportError:\n # Python 2 support\n from base64 import encodestring\n\nfrom . import Config\nfrom logging import debug\nfrom .BaseUtils import encode_to_s3, decode_from_s3, s3_quote\nfrom .Utils import time_to_epoch, deunicodise, check_bucket_name_dns_support\nfrom .SortedDict import SortedDict\n\nimport datetime\n\n\nfrom hashlib import sha1, sha256\n\n__all__ = []\n\ndef format_param_str(params, always_have_equal=False, limited_keys=None):\n \"\"\"\n Format URL parameters from a params dict and returns\n ?parm1=val1&parm2=val2 or an empty string if there\n are no parameters. Output of this function should\n be appended directly to self.resource['uri']\n - Set \"always_have_equal\" to always have the \"=\" char for a param even when\n there is no value for it.\n - Set \"limited_keys\" list to restrict the param string to keys that are\n defined in it.\n \"\"\"\n if not params:\n return \"\"\n\n param_str = \"\"\n equal_str = always_have_equal and u'=' or ''\n for key in sorted(params.keys()):\n if limited_keys and key not in limited_keys:\n continue\n value = params[key]\n if value in (None, \"\"):\n param_str += \"&%s%s\" % (s3_quote(key, unicode_output=True), equal_str)\n else:\n param_str += \"&%s=%s\" % (key, s3_quote(params[key], unicode_output=True))\n return param_str and \"?\" + param_str[1:]\n__all__.append(\"format_param_str\")\n\n### AWS Version 2 signing\ndef sign_string_v2(string_to_sign):\n \"\"\"Sign a string with the secret key, returning base64 encoded results.\n By default the configured secret key is used, but may be overridden as\n an argument.\n\n Useful for REST authentication. See http://s3.amazonaws.com/doc/s3-developer-guide/RESTAuthentication.html\n string_to_sign should be utf-8 \"bytes\".\n and returned signature will be utf-8 encoded \"bytes\".\n \"\"\"\n secret_key = Config.Config().secret_key\n signature = encodestring(hmac.new(encode_to_s3(secret_key), string_to_sign, sha1).digest()).strip()\n return signature\n__all__.append(\"sign_string_v2\")\n\ndef sign_request_v2(method='GET', canonical_uri='/', params=None, cur_headers=None):\n \"\"\"Sign a string with the secret key, returning base64 encoded results.\n By default the configured secret key is used, but may be overridden as\n an argument.\n\n Useful for REST authentication. See http://s3.amazonaws.com/doc/s3-developer-guide/RESTAuthentication.html\n string_to_sign should be utf-8 \"bytes\".\n \"\"\"\n # valid sub-resources to be included in sign v2:\n SUBRESOURCES_TO_INCLUDE = ['acl', 'lifecycle', 'location', 'logging',\n 'notification', 'partNumber', 'policy',\n 'requestPayment', 'torrent', 'uploadId',\n 'uploads', 'versionId', 'versioning',\n 'versions', 'website',\n # Missing of aws s3 doc but needed\n 'delete', 'cors', 'restore']\n\n if cur_headers is None:\n cur_headers = SortedDict(ignore_case = True)\n\n access_key = Config.Config().access_key\n\n string_to_sign = method + \"\\n\"\n string_to_sign += cur_headers.get(\"content-md5\", \"\") + \"\\n\"\n string_to_sign += cur_headers.get(\"content-type\", \"\") + \"\\n\"\n string_to_sign += cur_headers.get(\"date\", \"\") + \"\\n\"\n\n for header in sorted(cur_headers.keys()):\n if header.startswith(\"x-amz-\"):\n string_to_sign += header + \":\" + cur_headers[header] + \"\\n\"\n if header.startswith(\"x-emc-\"):\n string_to_sign += header + \":\"+ cur_headers[header] + \"\\n\"\n\n\n canonical_uri = s3_quote(canonical_uri, quote_backslashes=False, unicode_output=True)\n canonical_querystring = format_param_str(params, limited_keys=SUBRESOURCES_TO_INCLUDE)\n # canonical_querystring would be empty if no param given, otherwise it will\n # starts with a \"?\"\n canonical_uri += canonical_querystring\n\n string_to_sign += canonical_uri\n\n debug(\"SignHeaders: \" + repr(string_to_sign))\n signature = decode_from_s3(sign_string_v2(encode_to_s3(string_to_sign)))\n\n new_headers = SortedDict(list(cur_headers.items()), ignore_case=True)\n new_headers[\"Authorization\"] = \"AWS \" + access_key + \":\" + signature\n\n return new_headers\n__all__.append(\"sign_request_v2\")\n\ndef sign_url_v2(url_to_sign, expiry):\n \"\"\"Sign a URL in s3://bucket/object form with the given expiry\n time. The object will be accessible via the signed URL until the\n AWS key and secret are revoked or the expiry time is reached, even\n if the object is otherwise private.\n\n See: http://s3.amazonaws.com/doc/s3-developer-guide/RESTAuthentication.html\n \"\"\"\n return sign_url_base_v2(\n bucket = url_to_sign.bucket(),\n object = url_to_sign.object(),\n expiry = expiry\n )\n__all__.append(\"sign_url_v2\")\n\ndef sign_url_base_v2(**parms):\n \"\"\"Shared implementation of sign_url methods. Takes a hash of 'bucket', 'object' and 'expiry' as args.\"\"\"\n content_disposition=Config.Config().content_disposition\n content_type=Config.Config().content_type\n parms['expiry']=time_to_epoch(parms['expiry'])\n parms['access_key']=Config.Config().access_key\n parms['host_base']=Config.Config().host_base\n parms['object'] = s3_quote(parms['object'], quote_backslashes=False, unicode_output=True)\n parms['proto'] = 'http'\n if Config.Config().signurl_use_https:\n parms['proto'] = 'https'\n debug(\"Expiry interpreted as epoch time %s\", parms['expiry'])\n signtext = 'GET\\n\\n\\n%(expiry)d\\n/%(bucket)s/%(object)s' % parms\n param_separator = '?'\n if content_disposition:\n signtext += param_separator + 'response-content-disposition=' + content_disposition\n param_separator = '&'\n if content_type:\n signtext += param_separator + 'response-content-type=' + content_type\n param_separator = '&'\n debug(\"Signing plaintext: %r\", signtext)\n parms['sig'] = s3_quote(sign_string_v2(encode_to_s3(signtext)), unicode_output=True)\n debug(\"Urlencoded signature: %s\", parms['sig'])\n if check_bucket_name_dns_support(Config.Config().host_bucket, parms['bucket']):\n url = \"%(proto)s://%(bucket)s.%(host_base)s/%(object)s\"\n else:\n url = \"%(proto)s://%(host_base)s/%(bucket)s/%(object)s\"\n url += \"?AWSAccessKeyId=%(access_key)s&Expires=%(expiry)d&Signature=%(sig)s\"\n url = url % parms\n if content_disposition:\n url += \"&response-content-disposition=\" + s3_quote(content_disposition, unicode_output=True)\n if content_type:\n url += \"&response-content-type=\" + s3_quote(content_type, unicode_output=True)\n return url\n\ndef sign(key, msg):\n return hmac.new(key, encode_to_s3(msg), sha256).digest()\n\ndef getSignatureKey(key, dateStamp, regionName, serviceName):\n \"\"\"\n Input: unicode params\n Output: bytes\n \"\"\"\n kDate = sign(encode_to_s3('AWS4' + key), dateStamp)\n kRegion = sign(kDate, regionName)\n kService = sign(kRegion, serviceName)\n kSigning = sign(kService, 'aws4_request')\n return kSigning\n\ndef sign_request_v4(method='GET', host='', canonical_uri='/', params=None,\n region='us-east-1', cur_headers=None, body=b''):\n service = 's3'\n if cur_headers is None:\n cur_headers = SortedDict(ignore_case = True)\n\n cfg = Config.Config()\n access_key = cfg.access_key\n secret_key = cfg.secret_key\n\n t = datetime.datetime.utcnow()\n amzdate = t.strftime('%Y%m%dT%H%M%SZ')\n datestamp = t.strftime('%Y%m%d')\n\n signing_key = getSignatureKey(secret_key, datestamp, region, service)\n\n\n canonical_uri = s3_quote(canonical_uri, quote_backslashes=False, unicode_output=True)\n canonical_querystring = format_param_str(params, always_have_equal=True).lstrip('?')\n\n\n if type(body) == type(sha256(b'')):\n payload_hash = decode_from_s3(body.hexdigest())\n else:\n payload_hash = decode_from_s3(sha256(encode_to_s3(body)).hexdigest())\n\n canonical_headers = {'host' : host,\n 'x-amz-content-sha256': payload_hash,\n 'x-amz-date' : amzdate\n }\n signed_headers = 'host;x-amz-content-sha256;x-amz-date'\n\n for header in cur_headers.keys():\n # avoid duplicate headers and previous Authorization\n if header == 'Authorization' or header in signed_headers.split(';'):\n continue\n canonical_headers[header.strip()] = cur_headers[header].strip()\n signed_headers += ';' + header.strip()\n\n # sort headers into a string\n canonical_headers_str = ''\n for k, v in sorted(canonical_headers.items()):\n canonical_headers_str += k + \":\" + v + \"\\n\"\n\n canonical_headers = canonical_headers_str\n debug(u\"canonical_headers = %s\" % canonical_headers)\n signed_headers = ';'.join(sorted(signed_headers.split(';')))\n\n canonical_request = method + '\\n' + canonical_uri + '\\n' + canonical_querystring + '\\n' + canonical_headers + '\\n' + signed_headers + '\\n' + payload_hash\n debug('Canonical Request:\\n%s\\n----------------------' % canonical_request)\n\n algorithm = 'AWS4-HMAC-SHA256'\n credential_scope = datestamp + '/' + region + '/' + service + '/' + 'aws4_request'\n string_to_sign = algorithm + '\\n' + amzdate + '\\n' + credential_scope + '\\n' + decode_from_s3(sha256(encode_to_s3(canonical_request)).hexdigest())\n\n signature = decode_from_s3(hmac.new(signing_key, encode_to_s3(string_to_sign), sha256).hexdigest())\n authorization_header = algorithm + ' ' + 'Credential=' + access_key + '/' + credential_scope + ',' + 'SignedHeaders=' + signed_headers + ',' + 'Signature=' + signature\n new_headers = SortedDict(cur_headers.items())\n new_headers.update({'x-amz-date':amzdate,\n 'Authorization':authorization_header,\n 'x-amz-content-sha256': payload_hash})\n debug(\"signature-v4 headers: %s\" % new_headers)\n return new_headers\n__all__.append(\"sign_request_v4\")\n\ndef checksum_sha256_file(filename, offset=0, size=None):\n try:\n hash = sha256()\n except Exception:\n # fallback to Crypto SHA256 module\n hash = sha256.new()\n with open(deunicodise(filename),'rb') as f:\n if size is None:\n for chunk in iter(lambda: f.read(8192), b''):\n hash.update(chunk)\n else:\n f.seek(offset)\n size_left = size\n while size_left > 0:\n chunk = f.read(min(8192, size_left))\n if not chunk:\n break\n size_left -= len(chunk)\n hash.update(chunk)\n\n return hash\n\ndef checksum_sha256_buffer(buffer, offset=0, size=None):\n try:\n hash = sha256()\n except Exception:\n # fallback to Crypto SHA256 module\n hash = sha256.new()\n if size is None:\n hash.update(buffer)\n else:\n hash.update(buffer[offset:offset+size])\n return hash\n"},"license":{"kind":"string","value":"gpl-2.0"}}},{"rowIdx":203484,"cells":{"repo_name":{"kind":"string","value":"40223108/-2015cd_midterm"},"path":{"kind":"string","value":"static/Brython3.1.1-20150328-091302/Lib/datetime.py"},"copies":{"kind":"string","value":"628"},"size":{"kind":"string","value":"75044"},"content":{"kind":"string","value":"\"\"\"Concrete date/time and related types.\n\nSee http://www.iana.org/time-zones/repository/tz-link.html for\ntime zone and DST data sources.\n\"\"\"\n\nimport time as _time\nimport math as _math\n\ndef _cmp(x, y):\n return 0 if x == y else 1 if x > y else -1\n\nMINYEAR = 1\nMAXYEAR = 9999\n_MAXORDINAL = 3652059 # date.max.toordinal()\n\n# Utility functions, adapted from Python's Demo/classes/Dates.py, which\n# also assumes the current Gregorian calendar indefinitely extended in\n# both directions. Difference: Dates.py calls January 1 of year 0 day\n# number 1. The code here calls January 1 of year 1 day number 1. This is\n# to match the definition of the \"proleptic Gregorian\" calendar in Dershowitz\n# and Reingold's \"Calendrical Calculations\", where it's the base calendar\n# for all computations. See the book for algorithms for converting between\n# proleptic Gregorian ordinals and many other calendar systems.\n\n_DAYS_IN_MONTH = [None, 31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]\n\n_DAYS_BEFORE_MONTH = [None]\ndbm = 0\nfor dim in _DAYS_IN_MONTH[1:]:\n _DAYS_BEFORE_MONTH.append(dbm)\n dbm += dim\ndel dbm, dim\n\ndef _is_leap(year):\n \"year -> 1 if leap year, else 0.\"\n return year % 4 == 0 and (year % 100 != 0 or year % 400 == 0)\n\ndef _days_before_year(year):\n \"year -> number of days before January 1st of year.\"\n y = year - 1\n return y*365 + y//4 - y//100 + y//400\n\ndef _days_in_month(year, month):\n \"year, month -> number of days in that month in that year.\"\n assert 1 <= month <= 12, month\n if month == 2 and _is_leap(year):\n return 29\n return _DAYS_IN_MONTH[month]\n\ndef _days_before_month(year, month):\n \"year, month -> number of days in year preceding first day of month.\"\n assert 1 <= month <= 12, 'month must be in 1..12'\n return _DAYS_BEFORE_MONTH[month] + (month > 2 and _is_leap(year))\n\ndef _ymd2ord(year, month, day):\n \"year, month, day -> ordinal, considering 01-Jan-0001 as day 1.\"\n assert 1 <= month <= 12, 'month must be in 1..12'\n dim = _days_in_month(year, month)\n assert 1 <= day <= dim, ('day must be in 1..%d' % dim)\n return (_days_before_year(year) +\n _days_before_month(year, month) +\n day)\n\n_DI400Y = _days_before_year(401) # number of days in 400 years\n_DI100Y = _days_before_year(101) # \" \" \" \" 100 \"\n_DI4Y = _days_before_year(5) # \" \" \" \" 4 \"\n\n# A 4-year cycle has an extra leap day over what we'd get from pasting\n# together 4 single years.\nassert _DI4Y == 4 * 365 + 1\n\n# Similarly, a 400-year cycle has an extra leap day over what we'd get from\n# pasting together 4 100-year cycles.\nassert _DI400Y == 4 * _DI100Y + 1\n\n# OTOH, a 100-year cycle has one fewer leap day than we'd get from\n# pasting together 25 4-year cycles.\nassert _DI100Y == 25 * _DI4Y - 1\n\ndef _ord2ymd(n):\n \"ordinal -> (year, month, day), considering 01-Jan-0001 as day 1.\"\n\n # n is a 1-based index, starting at 1-Jan-1. The pattern of leap years\n # repeats exactly every 400 years. The basic strategy is to find the\n # closest 400-year boundary at or before n, then work with the offset\n # from that boundary to n. Life is much clearer if we subtract 1 from\n # n first -- then the values of n at 400-year boundaries are exactly\n # those divisible by _DI400Y:\n #\n # D M Y n n-1\n # -- --- ---- ---------- ----------------\n # 31 Dec -400 -_DI400Y -_DI400Y -1\n # 1 Jan -399 -_DI400Y +1 -_DI400Y 400-year boundary\n # ...\n # 30 Dec 000 -1 -2\n # 31 Dec 000 0 -1\n # 1 Jan 001 1 0 400-year boundary\n # 2 Jan 001 2 1\n # 3 Jan 001 3 2\n # ...\n # 31 Dec 400 _DI400Y _DI400Y -1\n # 1 Jan 401 _DI400Y +1 _DI400Y 400-year boundary\n n -= 1\n n400, n = divmod(n, _DI400Y)\n year = n400 * 400 + 1 # ..., -399, 1, 401, ...\n\n # Now n is the (non-negative) offset, in days, from January 1 of year, to\n # the desired date. Now compute how many 100-year cycles precede n.\n # Note that it's possible for n100 to equal 4! In that case 4 full\n # 100-year cycles precede the desired day, which implies the desired\n # day is December 31 at the end of a 400-year cycle.\n n100, n = divmod(n, _DI100Y)\n\n # Now compute how many 4-year cycles precede it.\n n4, n = divmod(n, _DI4Y)\n\n # And now how many single years. Again n1 can be 4, and again meaning\n # that the desired day is December 31 at the end of the 4-year cycle.\n n1, n = divmod(n, 365)\n\n year += n100 * 100 + n4 * 4 + n1\n if n1 == 4 or n100 == 4:\n assert n == 0\n return year-1, 12, 31\n\n # Now the year is correct, and n is the offset from January 1. We find\n # the month via an estimate that's either exact or one too large.\n leapyear = n1 == 3 and (n4 != 24 or n100 == 3)\n assert leapyear == _is_leap(year)\n month = (n + 50) >> 5\n preceding = _DAYS_BEFORE_MONTH[month] + (month > 2 and leapyear)\n if preceding > n: # estimate is too large\n month -= 1\n preceding -= _DAYS_IN_MONTH[month] + (month == 2 and leapyear)\n n -= preceding\n assert 0 <= n < _days_in_month(year, month)\n\n # Now the year and month are correct, and n is the offset from the\n # start of that month: we're done!\n return year, month, n+1\n\n# Month and day names. For localized versions, see the calendar module.\n_MONTHNAMES = [None, \"Jan\", \"Feb\", \"Mar\", \"Apr\", \"May\", \"Jun\",\n \"Jul\", \"Aug\", \"Sep\", \"Oct\", \"Nov\", \"Dec\"]\n_DAYNAMES = [None, \"Mon\", \"Tue\", \"Wed\", \"Thu\", \"Fri\", \"Sat\", \"Sun\"]\n\n\ndef _build_struct_time(y, m, d, hh, mm, ss, dstflag):\n wday = (_ymd2ord(y, m, d) + 6) % 7\n dnum = _days_before_month(y, m) + d\n return _time.struct_time((y, m, d, hh, mm, ss, wday, dnum, dstflag))\n\ndef _format_time(hh, mm, ss, us):\n # Skip trailing microseconds when us==0.\n result = \"%02d:%02d:%02d\" % (hh, mm, ss)\n if us:\n result += \".%06d\" % us\n return result\n\n# Correctly substitute for %z and %Z escapes in strftime formats.\ndef _wrap_strftime(object, format, timetuple):\n # Don't call utcoffset() or tzname() unless actually needed.\n freplace = None # the string to use for %f\n zreplace = None # the string to use for %z\n Zreplace = None # the string to use for %Z\n\n # Scan format for %z and %Z escapes, replacing as needed.\n newformat = []\n push = newformat.append\n i, n = 0, len(format)\n while i < n:\n ch = format[i]\n i += 1\n if ch == '%':\n if i < n:\n ch = format[i]\n i += 1\n if ch == 'f':\n if freplace is None:\n freplace = '%06d' % getattr(object,\n 'microsecond', 0)\n newformat.append(freplace)\n elif ch == 'z':\n if zreplace is None:\n zreplace = \"\"\n if hasattr(object, \"utcoffset\"):\n offset = object.utcoffset()\n if offset is not None:\n sign = '+'\n if offset.days < 0:\n offset = -offset\n sign = '-'\n h, m = divmod(offset, timedelta(hours=1))\n assert not m % timedelta(minutes=1), \"whole minute\"\n m //= timedelta(minutes=1)\n zreplace = '%c%02d%02d' % (sign, h, m)\n assert '%' not in zreplace\n newformat.append(zreplace)\n elif ch == 'Z':\n if Zreplace is None:\n Zreplace = \"\"\n if hasattr(object, \"tzname\"):\n s = object.tzname()\n if s is not None:\n # strftime is going to have at this: escape %\n Zreplace = s.replace('%', '%%')\n newformat.append(Zreplace)\n else:\n push('%')\n push(ch)\n else:\n push('%')\n else:\n push(ch)\n newformat = \"\".join(newformat)\n return _time.strftime(newformat, timetuple)\n\ndef _call_tzinfo_method(tzinfo, methname, tzinfoarg):\n if tzinfo is None:\n return None\n return getattr(tzinfo, methname)(tzinfoarg)\n\n# Just raise TypeError if the arg isn't None or a string.\ndef _check_tzname(name):\n if name is not None and not isinstance(name, str):\n raise TypeError(\"tzinfo.tzname() must return None or string, \"\n \"not '%s'\" % type(name))\n\n# name is the offset-producing method, \"utcoffset\" or \"dst\".\n# offset is what it returned.\n# If offset isn't None or timedelta, raises TypeError.\n# If offset is None, returns None.\n# Else offset is checked for being in range, and a whole # of minutes.\n# If it is, its integer value is returned. Else ValueError is raised.\ndef _check_utc_offset(name, offset):\n assert name in (\"utcoffset\", \"dst\")\n if offset is None:\n return\n if not isinstance(offset, timedelta):\n raise TypeError(\"tzinfo.%s() must return None \"\n \"or timedelta, not '%s'\" % (name, type(offset)))\n if offset % timedelta(minutes=1) or offset.microseconds:\n raise ValueError(\"tzinfo.%s() must return a whole number \"\n \"of minutes, got %s\" % (name, offset))\n if not -timedelta(1) < offset < timedelta(1):\n raise ValueError(\"%s()=%s, must be must be strictly between\"\n \" -timedelta(hours=24) and timedelta(hours=24)\"\n % (name, offset))\n\ndef _check_date_fields(year, month, day):\n if not isinstance(year, int):\n raise TypeError('int expected')\n if not MINYEAR <= year <= MAXYEAR:\n raise ValueError('year must be in %d..%d' % (MINYEAR, MAXYEAR), year)\n if not 1 <= month <= 12:\n raise ValueError('month must be in 1..12', month)\n dim = _days_in_month(year, month)\n if not 1 <= day <= dim:\n raise ValueError('day must be in 1..%d' % dim, day)\n\ndef _check_time_fields(hour, minute, second, microsecond):\n if not isinstance(hour, int):\n raise TypeError('int expected')\n if not 0 <= hour <= 23:\n raise ValueError('hour must be in 0..23', hour)\n if not 0 <= minute <= 59:\n raise ValueError('minute must be in 0..59', minute)\n if not 0 <= second <= 59:\n raise ValueError('second must be in 0..59', second)\n if not 0 <= microsecond <= 999999:\n raise ValueError('microsecond must be in 0..999999', microsecond)\n\ndef _check_tzinfo_arg(tz):\n if tz is not None and not isinstance(tz, tzinfo):\n raise TypeError(\"tzinfo argument must be None or of a tzinfo subclass\")\n\ndef _cmperror(x, y):\n raise TypeError(\"can't compare '%s' to '%s'\" % (\n type(x).__name__, type(y).__name__))\n\nclass timedelta:\n \"\"\"Represent the difference between two datetime objects.\n\n Supported operators:\n\n - add, subtract timedelta\n - unary plus, minus, abs\n - compare to timedelta\n - multiply, divide by int\n\n In addition, datetime supports subtraction of two datetime objects\n returning a timedelta, and addition or subtraction of a datetime\n and a timedelta giving a datetime.\n\n Representation: (days, seconds, microseconds). Why? Because I\n felt like it.\n \"\"\"\n __slots__ = '_days', '_seconds', '_microseconds'\n\n def __new__(cls, days=0, seconds=0, microseconds=0,\n milliseconds=0, minutes=0, hours=0, weeks=0):\n # Doing this efficiently and accurately in C is going to be difficult\n # and error-prone, due to ubiquitous overflow possibilities, and that\n # C double doesn't have enough bits of precision to represent\n # microseconds over 10K years faithfully. The code here tries to make\n # explicit where go-fast assumptions can be relied on, in order to\n # guide the C implementation; it's way more convoluted than speed-\n # ignoring auto-overflow-to-long idiomatic Python could be.\n\n # XXX Check that all inputs are ints or floats.\n\n # Final values, all integer.\n # s and us fit in 32-bit signed ints; d isn't bounded.\n d = s = us = 0\n\n # Normalize everything to days, seconds, microseconds.\n days += weeks*7\n seconds += minutes*60 + hours*3600\n microseconds += milliseconds*1000\n\n # Get rid of all fractions, and normalize s and us.\n # Take a deep breath .\n if isinstance(days, float):\n dayfrac, days = _math.modf(days)\n daysecondsfrac, daysecondswhole = _math.modf(dayfrac * (24.*3600.))\n assert daysecondswhole == int(daysecondswhole) # can't overflow\n s = int(daysecondswhole)\n assert days == int(days)\n d = int(days)\n else:\n daysecondsfrac = 0.0\n d = days\n assert isinstance(daysecondsfrac, float)\n assert abs(daysecondsfrac) <= 1.0\n assert isinstance(d, int)\n assert abs(s) <= 24 * 3600\n # days isn't referenced again before redefinition\n\n if isinstance(seconds, float):\n secondsfrac, seconds = _math.modf(seconds)\n assert seconds == int(seconds)\n seconds = int(seconds)\n secondsfrac += daysecondsfrac\n assert abs(secondsfrac) <= 2.0\n else:\n secondsfrac = daysecondsfrac\n # daysecondsfrac isn't referenced again\n assert isinstance(secondsfrac, float)\n assert abs(secondsfrac) <= 2.0\n\n assert isinstance(seconds, int)\n days, seconds = divmod(seconds, 24*3600)\n d += days\n s += int(seconds) # can't overflow\n assert isinstance(s, int)\n assert abs(s) <= 2 * 24 * 3600\n # seconds isn't referenced again before redefinition\n\n usdouble = secondsfrac * 1e6\n assert abs(usdouble) < 2.1e6 # exact value not critical\n # secondsfrac isn't referenced again\n\n if isinstance(microseconds, float):\n microseconds += usdouble\n microseconds = round(microseconds, 0)\n seconds, microseconds = divmod(microseconds, 1e6)\n assert microseconds == int(microseconds)\n assert seconds == int(seconds)\n days, seconds = divmod(seconds, 24.*3600.)\n assert days == int(days)\n assert seconds == int(seconds)\n d += int(days)\n s += int(seconds) # can't overflow\n assert isinstance(s, int)\n assert abs(s) <= 3 * 24 * 3600\n else:\n seconds, microseconds = divmod(microseconds, 1000000)\n days, seconds = divmod(seconds, 24*3600)\n d += days\n s += int(seconds) # can't overflow\n assert isinstance(s, int)\n assert abs(s) <= 3 * 24 * 3600\n microseconds = float(microseconds)\n microseconds += usdouble\n microseconds = round(microseconds, 0)\n assert abs(s) <= 3 * 24 * 3600\n assert abs(microseconds) < 3.1e6\n\n # Just a little bit of carrying possible for microseconds and seconds.\n assert isinstance(microseconds, float)\n assert int(microseconds) == microseconds\n us = int(microseconds)\n seconds, us = divmod(us, 1000000)\n s += seconds # cant't overflow\n assert isinstance(s, int)\n days, s = divmod(s, 24*3600)\n d += days\n\n assert isinstance(d, int)\n assert isinstance(s, int) and 0 <= s < 24*3600\n assert isinstance(us, int) and 0 <= us < 1000000\n\n self = object.__new__(cls)\n\n self._days = d\n self._seconds = s\n self._microseconds = us\n if abs(d) > 999999999:\n raise OverflowError(\"timedelta # of days is too large: %d\" % d)\n\n return self\n\n def __repr__(self):\n if self._microseconds:\n return \"%s(%d, %d, %d)\" % ('datetime.' + self.__class__.__name__,\n self._days,\n self._seconds,\n self._microseconds)\n if self._seconds:\n return \"%s(%d, %d)\" % ('datetime.' + self.__class__.__name__,\n self._days,\n self._seconds)\n return \"%s(%d)\" % ('datetime.' + self.__class__.__name__, self._days)\n\n def __str__(self):\n mm, ss = divmod(self._seconds, 60)\n hh, mm = divmod(mm, 60)\n s = \"%d:%02d:%02d\" % (hh, mm, ss)\n if self._days:\n def plural(n):\n return n, abs(n) != 1 and \"s\" or \"\"\n s = (\"%d day%s, \" % plural(self._days)) + s\n if self._microseconds:\n s = s + \".%06d\" % self._microseconds\n return s\n\n def total_seconds(self):\n \"\"\"Total seconds in the duration.\"\"\"\n return ((self.days * 86400 + self.seconds)*10**6 +\n self.microseconds) / 10**6\n\n # Read-only field accessors\n @property\n def days(self):\n \"\"\"days\"\"\"\n return self._days\n\n @property\n def seconds(self):\n \"\"\"seconds\"\"\"\n return self._seconds\n\n @property\n def microseconds(self):\n \"\"\"microseconds\"\"\"\n return self._microseconds\n\n def __add__(self, other):\n if isinstance(other, timedelta):\n # for CPython compatibility, we cannot use\n # our __class__ here, but need a real timedelta\n return timedelta(self._days + other._days,\n self._seconds + other._seconds,\n self._microseconds + other._microseconds)\n return NotImplemented\n\n __radd__ = __add__\n\n def __sub__(self, other):\n if isinstance(other, timedelta):\n # for CPython compatibility, we cannot use\n # our __class__ here, but need a real timedelta\n return timedelta(self._days - other._days,\n self._seconds - other._seconds,\n self._microseconds - other._microseconds)\n return NotImplemented\n\n def __rsub__(self, other):\n if isinstance(other, timedelta):\n return -self + other\n return NotImplemented\n\n def __neg__(self):\n # for CPython compatibility, we cannot use\n # our __class__ here, but need a real timedelta\n return timedelta(-self._days,\n -self._seconds,\n -self._microseconds)\n\n def __pos__(self):\n return self\n\n def __abs__(self):\n if self._days < 0:\n return -self\n else:\n return self\n\n def __mul__(self, other):\n if isinstance(other, int):\n # for CPython compatibility, we cannot use\n # our __class__ here, but need a real timedelta\n return timedelta(self._days * other,\n self._seconds * other,\n self._microseconds * other)\n if isinstance(other, float):\n a, b = other.as_integer_ratio()\n return self * a / b\n return NotImplemented\n\n __rmul__ = __mul__\n\n def _to_microseconds(self):\n return ((self._days * (24*3600) + self._seconds) * 1000000 +\n self._microseconds)\n\n def __floordiv__(self, other):\n if not isinstance(other, (int, timedelta)):\n return NotImplemented\n usec = self._to_microseconds()\n if isinstance(other, timedelta):\n return usec // other._to_microseconds()\n if isinstance(other, int):\n return timedelta(0, 0, usec // other)\n\n def __truediv__(self, other):\n if not isinstance(other, (int, float, timedelta)):\n return NotImplemented\n usec = self._to_microseconds()\n if isinstance(other, timedelta):\n return usec / other._to_microseconds()\n if isinstance(other, int):\n return timedelta(0, 0, usec / other)\n if isinstance(other, float):\n a, b = other.as_integer_ratio()\n return timedelta(0, 0, b * usec / a)\n\n def __mod__(self, other):\n if isinstance(other, timedelta):\n r = self._to_microseconds() % other._to_microseconds()\n return timedelta(0, 0, r)\n return NotImplemented\n\n def __divmod__(self, other):\n if isinstance(other, timedelta):\n q, r = divmod(self._to_microseconds(),\n other._to_microseconds())\n return q, timedelta(0, 0, r)\n return NotImplemented\n\n # Comparisons of timedelta objects with other.\n\n def __eq__(self, other):\n if isinstance(other, timedelta):\n return self._cmp(other) == 0\n else:\n return False\n\n def __ne__(self, other):\n if isinstance(other, timedelta):\n return self._cmp(other) != 0\n else:\n return True\n\n def __le__(self, other):\n if isinstance(other, timedelta):\n return self._cmp(other) <= 0\n else:\n _cmperror(self, other)\n\n def __lt__(self, other):\n if isinstance(other, timedelta):\n return self._cmp(other) < 0\n else:\n _cmperror(self, other)\n\n def __ge__(self, other):\n if isinstance(other, timedelta):\n return self._cmp(other) >= 0\n else:\n _cmperror(self, other)\n\n def __gt__(self, other):\n if isinstance(other, timedelta):\n return self._cmp(other) > 0\n else:\n _cmperror(self, other)\n\n def _cmp(self, other):\n assert isinstance(other, timedelta)\n return _cmp(self._getstate(), other._getstate())\n\n def __hash__(self):\n return hash(self._getstate())\n\n def __bool__(self):\n return (self._days != 0 or\n self._seconds != 0 or\n self._microseconds != 0)\n\n # Pickle support.\n\n def _getstate(self):\n return (self._days, self._seconds, self._microseconds)\n\n def __reduce__(self):\n return (self.__class__, self._getstate())\n\ntimedelta.min = timedelta(-999999999)\ntimedelta.max = timedelta(days=999999999, hours=23, minutes=59, seconds=59,\n microseconds=999999)\ntimedelta.resolution = timedelta(microseconds=1)\n\nclass date:\n \"\"\"Concrete date type.\n\n Constructors:\n\n __new__()\n fromtimestamp()\n today()\n fromordinal()\n\n Operators:\n\n __repr__, __str__\n __cmp__, __hash__\n __add__, __radd__, __sub__ (add/radd only with timedelta arg)\n\n Methods:\n\n timetuple()\n toordinal()\n weekday()\n isoweekday(), isocalendar(), isoformat()\n ctime()\n strftime()\n\n Properties (readonly):\n year, month, day\n \"\"\"\n __slots__ = '_year', '_month', '_day'\n\n def __new__(cls, year, month=None, day=None):\n \"\"\"Constructor.\n\n Arguments:\n\n year, month, day (required, base 1)\n \"\"\"\n if (isinstance(year, bytes) and len(year) == 4 and\n 1 <= year[2] <= 12 and month is None): # Month is sane\n # Pickle support\n self = object.__new__(cls)\n self.__setstate(year)\n return self\n _check_date_fields(year, month, day)\n self = object.__new__(cls)\n self._year = year\n self._month = month\n self._day = day\n return self\n\n # Additional constructors\n\n @classmethod\n def fromtimestamp(cls, t):\n \"Construct a date from a POSIX timestamp (like time.time()).\"\n y, m, d, hh, mm, ss, weekday, jday, dst = _time.localtime(t)\n return cls(y, m, d)\n\n @classmethod\n def today(cls):\n \"Construct a date from time.time().\"\n t = _time.time()\n return cls.fromtimestamp(t)\n\n @classmethod\n def fromordinal(cls, n):\n \"\"\"Contruct a date from a proleptic Gregorian ordinal.\n\n January 1 of year 1 is day 1. Only the year, month and day are\n non-zero in the result.\n \"\"\"\n y, m, d = _ord2ymd(n)\n return cls(y, m, d)\n\n # Conversions to string\n\n def __repr__(self):\n \"\"\"Convert to formal string, for repr().\n\n >>> dt = datetime(2010, 1, 1)\n >>> repr(dt)\n 'datetime.datetime(2010, 1, 1, 0, 0)'\n\n >>> dt = datetime(2010, 1, 1, tzinfo=timezone.utc)\n >>> repr(dt)\n 'datetime.datetime(2010, 1, 1, 0, 0, tzinfo=datetime.timezone.utc)'\n \"\"\"\n return \"%s(%d, %d, %d)\" % ('datetime.' + self.__class__.__name__,\n self._year,\n self._month,\n self._day)\n # XXX These shouldn't depend on time.localtime(), because that\n # clips the usable dates to [1970 .. 2038). At least ctime() is\n # easily done without using strftime() -- that's better too because\n # strftime(\"%c\", ...) is locale specific.\n\n\n def ctime(self):\n \"Return ctime() style string.\"\n weekday = self.toordinal() % 7 or 7\n return \"%s %s %2d 00:00:00 %04d\" % (\n _DAYNAMES[weekday],\n _MONTHNAMES[self._month],\n self._day, self._year)\n\n def strftime(self, fmt):\n \"Format using strftime().\"\n return _wrap_strftime(self, fmt, self.timetuple())\n\n def __format__(self, fmt):\n if len(fmt) != 0:\n return self.strftime(fmt)\n return str(self)\n\n def isoformat(self):\n \"\"\"Return the date formatted according to ISO.\n\n This is 'YYYY-MM-DD'.\n\n References:\n - http://www.w3.org/TR/NOTE-datetime\n - http://www.cl.cam.ac.uk/~mgk25/iso-time.html\n \"\"\"\n return \"%04d-%02d-%02d\" % (self._year, self._month, self._day)\n\n __str__ = isoformat\n\n # Read-only field accessors\n @property\n def year(self):\n \"\"\"year (1-9999)\"\"\"\n return self._year\n\n @property\n def month(self):\n \"\"\"month (1-12)\"\"\"\n return self._month\n\n @property\n def day(self):\n \"\"\"day (1-31)\"\"\"\n return self._day\n\n # Standard conversions, __cmp__, __hash__ (and helpers)\n\n def timetuple(self):\n \"Return local time tuple compatible with time.localtime().\"\n return _build_struct_time(self._year, self._month, self._day,\n 0, 0, 0, -1)\n\n def toordinal(self):\n \"\"\"Return proleptic Gregorian ordinal for the year, month and day.\n\n January 1 of year 1 is day 1. Only the year, month and day values\n contribute to the result.\n \"\"\"\n return _ymd2ord(self._year, self._month, self._day)\n\n def replace(self, year=None, month=None, day=None):\n \"\"\"Return a new date with new values for the specified fields.\"\"\"\n if year is None:\n year = self._year\n if month is None:\n month = self._month\n if day is None:\n day = self._day\n _check_date_fields(year, month, day)\n return date(year, month, day)\n\n # Comparisons of date objects with other.\n\n def __eq__(self, other):\n if isinstance(other, date):\n return self._cmp(other) == 0\n return NotImplemented\n\n def __ne__(self, other):\n if isinstance(other, date):\n return self._cmp(other) != 0\n return NotImplemented\n\n def __le__(self, other):\n if isinstance(other, date):\n return self._cmp(other) <= 0\n return NotImplemented\n\n def __lt__(self, other):\n if isinstance(other, date):\n return self._cmp(other) < 0\n return NotImplemented\n\n def __ge__(self, other):\n if isinstance(other, date):\n return self._cmp(other) >= 0\n return NotImplemented\n\n def __gt__(self, other):\n if isinstance(other, date):\n return self._cmp(other) > 0\n return NotImplemented\n\n def _cmp(self, other):\n assert isinstance(other, date)\n y, m, d = self._year, self._month, self._day\n y2, m2, d2 = other._year, other._month, other._day\n return _cmp((y, m, d), (y2, m2, d2))\n\n def __hash__(self):\n \"Hash.\"\n return hash(self._getstate())\n\n # Computations\n\n def __add__(self, other):\n \"Add a date to a timedelta.\"\n if isinstance(other, timedelta):\n o = self.toordinal() + other.days\n if 0 < o <= _MAXORDINAL:\n return date.fromordinal(o)\n raise OverflowError(\"result out of range\")\n return NotImplemented\n\n __radd__ = __add__\n\n def __sub__(self, other):\n \"\"\"Subtract two dates, or a date and a timedelta.\"\"\"\n if isinstance(other, timedelta):\n return self + timedelta(-other.days)\n if isinstance(other, date):\n days1 = self.toordinal()\n days2 = other.toordinal()\n return timedelta(days1 - days2)\n return NotImplemented\n\n def weekday(self):\n \"Return day of the week, where Monday == 0 ... Sunday == 6.\"\n return (self.toordinal() + 6) % 7\n\n # Day-of-the-week and week-of-the-year, according to ISO\n\n def isoweekday(self):\n \"Return day of the week, where Monday == 1 ... Sunday == 7.\"\n # 1-Jan-0001 is a Monday\n return self.toordinal() % 7 or 7\n\n def isocalendar(self):\n \"\"\"Return a 3-tuple containing ISO year, week number, and weekday.\n\n The first ISO week of the year is the (Mon-Sun) week\n containing the year's first Thursday; everything else derives\n from that.\n\n The first week is 1; Monday is 1 ... Sunday is 7.\n\n ISO calendar algorithm taken from\n http://www.phys.uu.nl/~vgent/calendar/isocalendar.htm\n \"\"\"\n year = self._year\n week1monday = _isoweek1monday(year)\n today = _ymd2ord(self._year, self._month, self._day)\n # Internally, week and day have origin 0\n week, day = divmod(today - week1monday, 7)\n if week < 0:\n year -= 1\n week1monday = _isoweek1monday(year)\n week, day = divmod(today - week1monday, 7)\n elif week >= 52:\n if today >= _isoweek1monday(year+1):\n year += 1\n week = 0\n return year, week+1, day+1\n\n # Pickle support.\n\n def _getstate(self):\n yhi, ylo = divmod(self._year, 256)\n return bytes([yhi, ylo, self._month, self._day]),\n\n def __setstate(self, string):\n if len(string) != 4 or not (1 <= string[2] <= 12):\n raise TypeError(\"not enough arguments\")\n yhi, ylo, self._month, self._day = string\n self._year = yhi * 256 + ylo\n\n def __reduce__(self):\n return (self.__class__, self._getstate())\n\n_date_class = date # so functions w/ args named \"date\" can get at the class\n\ndate.min = date(1, 1, 1)\ndate.max = date(9999, 12, 31)\ndate.resolution = timedelta(days=1)\n\nclass tzinfo:\n \"\"\"Abstract base class for time zone info classes.\n\n Subclasses must override the name(), utcoffset() and dst() methods.\n \"\"\"\n __slots__ = ()\n def tzname(self, dt):\n \"datetime -> string name of time zone.\"\n raise NotImplementedError(\"tzinfo subclass must override tzname()\")\n\n def utcoffset(self, dt):\n \"datetime -> minutes east of UTC (negative for west of UTC)\"\n raise NotImplementedError(\"tzinfo subclass must override utcoffset()\")\n\n def dst(self, dt):\n \"\"\"datetime -> DST offset in minutes east of UTC.\n\n Return 0 if DST not in effect. utcoffset() must include the DST\n offset.\n \"\"\"\n raise NotImplementedError(\"tzinfo subclass must override dst()\")\n\n def fromutc(self, dt):\n \"datetime in UTC -> datetime in local time.\"\n\n if not isinstance(dt, datetime):\n raise TypeError(\"fromutc() requires a datetime argument\")\n if dt.tzinfo is not self:\n raise ValueError(\"dt.tzinfo is not self\")\n\n dtoff = dt.utcoffset()\n if dtoff is None:\n raise ValueError(\"fromutc() requires a non-None utcoffset() \"\n \"result\")\n\n # See the long comment block at the end of this file for an\n # explanation of this algorithm.\n dtdst = dt.dst()\n if dtdst is None:\n raise ValueError(\"fromutc() requires a non-None dst() result\")\n delta = dtoff - dtdst\n if delta:\n dt += delta\n dtdst = dt.dst()\n if dtdst is None:\n raise ValueError(\"fromutc(): dt.dst gave inconsistent \"\n \"results; cannot convert\")\n return dt + dtdst\n\n # Pickle support.\n\n def __reduce__(self):\n getinitargs = getattr(self, \"__getinitargs__\", None)\n if getinitargs:\n args = getinitargs()\n else:\n args = ()\n getstate = getattr(self, \"__getstate__\", None)\n if getstate:\n state = getstate()\n else:\n state = getattr(self, \"__dict__\", None) or None\n if state is None:\n return (self.__class__, args)\n else:\n return (self.__class__, args, state)\n\n_tzinfo_class = tzinfo\n\nclass time:\n \"\"\"Time with time zone.\n\n Constructors:\n\n __new__()\n\n Operators:\n\n __repr__, __str__\n __cmp__, __hash__\n\n Methods:\n\n strftime()\n isoformat()\n utcoffset()\n tzname()\n dst()\n\n Properties (readonly):\n hour, minute, second, microsecond, tzinfo\n \"\"\"\n\n def __new__(cls, hour=0, minute=0, second=0, microsecond=0, tzinfo=None):\n \"\"\"Constructor.\n\n Arguments:\n\n hour, minute (required)\n second, microsecond (default to zero)\n tzinfo (default to None)\n \"\"\"\n self = object.__new__(cls)\n if isinstance(hour, bytes) and len(hour) == 6:\n # Pickle support\n self.__setstate(hour, minute or None)\n return self\n _check_tzinfo_arg(tzinfo)\n _check_time_fields(hour, minute, second, microsecond)\n self._hour = hour\n self._minute = minute\n self._second = second\n self._microsecond = microsecond\n self._tzinfo = tzinfo\n return self\n\n # Read-only field accessors\n @property\n def hour(self):\n \"\"\"hour (0-23)\"\"\"\n return self._hour\n\n @property\n def minute(self):\n \"\"\"minute (0-59)\"\"\"\n return self._minute\n\n @property\n def second(self):\n \"\"\"second (0-59)\"\"\"\n return self._second\n\n @property\n def microsecond(self):\n \"\"\"microsecond (0-999999)\"\"\"\n return self._microsecond\n\n @property\n def tzinfo(self):\n \"\"\"timezone info object\"\"\"\n return self._tzinfo\n\n # Standard conversions, __hash__ (and helpers)\n\n # Comparisons of time objects with other.\n\n def __eq__(self, other):\n if isinstance(other, time):\n return self._cmp(other, allow_mixed=True) == 0\n else:\n return False\n\n def __ne__(self, other):\n if isinstance(other, time):\n return self._cmp(other, allow_mixed=True) != 0\n else:\n return True\n\n def __le__(self, other):\n if isinstance(other, time):\n return self._cmp(other) <= 0\n else:\n _cmperror(self, other)\n\n def __lt__(self, other):\n if isinstance(other, time):\n return self._cmp(other) < 0\n else:\n _cmperror(self, other)\n\n def __ge__(self, other):\n if isinstance(other, time):\n return self._cmp(other) >= 0\n else:\n _cmperror(self, other)\n\n def __gt__(self, other):\n if isinstance(other, time):\n return self._cmp(other) > 0\n else:\n _cmperror(self, other)\n\n def _cmp(self, other, allow_mixed=False):\n assert isinstance(other, time)\n mytz = self._tzinfo\n ottz = other._tzinfo\n myoff = otoff = None\n\n if mytz is ottz:\n base_compare = True\n else:\n myoff = self.utcoffset()\n otoff = other.utcoffset()\n base_compare = myoff == otoff\n\n if base_compare:\n return _cmp((self._hour, self._minute, self._second,\n self._microsecond),\n (other._hour, other._minute, other._second,\n other._microsecond))\n if myoff is None or otoff is None:\n if allow_mixed:\n return 2 # arbitrary non-zero value\n else:\n raise TypeError(\"cannot compare naive and aware times\")\n myhhmm = self._hour * 60 + self._minute - myoff//timedelta(minutes=1)\n othhmm = other._hour * 60 + other._minute - otoff//timedelta(minutes=1)\n return _cmp((myhhmm, self._second, self._microsecond),\n (othhmm, other._second, other._microsecond))\n\n def __hash__(self):\n \"\"\"Hash.\"\"\"\n tzoff = self.utcoffset()\n if not tzoff: # zero or None\n return hash(self._getstate()[0])\n h, m = divmod(timedelta(hours=self.hour, minutes=self.minute) - tzoff,\n timedelta(hours=1))\n assert not m % timedelta(minutes=1), \"whole minute\"\n m //= timedelta(minutes=1)\n if 0 <= h < 24:\n return hash(time(h, m, self.second, self.microsecond))\n return hash((h, m, self.second, self.microsecond))\n\n # Conversion to string\n\n def _tzstr(self, sep=\":\"):\n \"\"\"Return formatted timezone offset (+xx:xx) or None.\"\"\"\n off = self.utcoffset()\n if off is not None:\n if off.days < 0:\n sign = \"-\"\n off = -off\n else:\n sign = \"+\"\n hh, mm = divmod(off, timedelta(hours=1))\n assert not mm % timedelta(minutes=1), \"whole minute\"\n mm //= timedelta(minutes=1)\n assert 0 <= hh < 24\n off = \"%s%02d%s%02d\" % (sign, hh, sep, mm)\n return off\n\n def __repr__(self):\n \"\"\"Convert to formal string, for repr().\"\"\"\n if self._microsecond != 0:\n s = \", %d, %d\" % (self._second, self._microsecond)\n elif self._second != 0:\n s = \", %d\" % self._second\n else:\n s = \"\"\n s= \"%s(%d, %d%s)\" % ('datetime.' + self.__class__.__name__,\n self._hour, self._minute, s)\n if self._tzinfo is not None:\n assert s[-1:] == \")\"\n s = s[:-1] + \", tzinfo=%r\" % self._tzinfo + \")\"\n return s\n\n def isoformat(self):\n \"\"\"Return the time formatted according to ISO.\n\n This is 'HH:MM:SS.mmmmmm+zz:zz', or 'HH:MM:SS+zz:zz' if\n self.microsecond == 0.\n \"\"\"\n s = _format_time(self._hour, self._minute, self._second,\n self._microsecond)\n tz = self._tzstr()\n if tz:\n s += tz\n return s\n\n __str__ = isoformat\n\n def strftime(self, fmt):\n \"\"\"Format using strftime(). The date part of the timestamp passed\n to underlying strftime should not be used.\n \"\"\"\n # The year must be >= 1000 else Python's strftime implementation\n # can raise a bogus exception.\n timetuple = (1900, 1, 1,\n self._hour, self._minute, self._second,\n 0, 1, -1)\n return _wrap_strftime(self, fmt, timetuple)\n\n def __format__(self, fmt):\n if len(fmt) != 0:\n return self.strftime(fmt)\n return str(self)\n\n # Timezone functions\n\n def utcoffset(self):\n \"\"\"Return the timezone offset in minutes east of UTC (negative west of\n UTC).\"\"\"\n if self._tzinfo is None:\n return None\n offset = self._tzinfo.utcoffset(None)\n _check_utc_offset(\"utcoffset\", offset)\n return offset\n\n def tzname(self):\n \"\"\"Return the timezone name.\n\n Note that the name is 100% informational -- there's no requirement that\n it mean anything in particular. For example, \"GMT\", \"UTC\", \"-500\",\n \"-5:00\", \"EDT\", \"US/Eastern\", \"America/New York\" are all valid replies.\n \"\"\"\n if self._tzinfo is None:\n return None\n name = self._tzinfo.tzname(None)\n _check_tzname(name)\n return name\n\n def dst(self):\n \"\"\"Return 0 if DST is not in effect, or the DST offset (in minutes\n eastward) if DST is in effect.\n\n This is purely informational; the DST offset has already been added to\n the UTC offset returned by utcoffset() if applicable, so there's no\n need to consult dst() unless you're interested in displaying the DST\n info.\n \"\"\"\n if self._tzinfo is None:\n return None\n offset = self._tzinfo.dst(None)\n _check_utc_offset(\"dst\", offset)\n return offset\n\n def replace(self, hour=None, minute=None, second=None, microsecond=None,\n tzinfo=True):\n \"\"\"Return a new time with new values for the specified fields.\"\"\"\n if hour is None:\n hour = self.hour\n if minute is None:\n minute = self.minute\n if second is None:\n second = self.second\n if microsecond is None:\n microsecond = self.microsecond\n if tzinfo is True:\n tzinfo = self.tzinfo\n _check_time_fields(hour, minute, second, microsecond)\n _check_tzinfo_arg(tzinfo)\n return time(hour, minute, second, microsecond, tzinfo)\n\n def __bool__(self):\n if self.second or self.microsecond:\n return True\n offset = self.utcoffset() or timedelta(0)\n return timedelta(hours=self.hour, minutes=self.minute) != offset\n\n # Pickle support.\n\n def _getstate(self):\n us2, us3 = divmod(self._microsecond, 256)\n us1, us2 = divmod(us2, 256)\n basestate = bytes([self._hour, self._minute, self._second,\n us1, us2, us3])\n if self._tzinfo is None:\n return (basestate,)\n else:\n return (basestate, self._tzinfo)\n\n def __setstate(self, string, tzinfo):\n if len(string) != 6 or string[0] >= 24:\n raise TypeError(\"an integer is required\")\n (self._hour, self._minute, self._second,\n us1, us2, us3) = string\n self._microsecond = (((us1 << 8) | us2) << 8) | us3\n if tzinfo is None or isinstance(tzinfo, _tzinfo_class):\n self._tzinfo = tzinfo\n else:\n raise TypeError(\"bad tzinfo state arg %r\" % tzinfo)\n\n def __reduce__(self):\n return (time, self._getstate())\n\n_time_class = time # so functions w/ args named \"time\" can get at the class\n\ntime.min = time(0, 0, 0)\ntime.max = time(23, 59, 59, 999999)\ntime.resolution = timedelta(microseconds=1)\n\nclass datetime(date):\n \"\"\"datetime(year, month, day[, hour[, minute[, second[, microsecond[,tzinfo]]]]])\n\n The year, month and day arguments are required. tzinfo may be None, or an\n instance of a tzinfo subclass. The remaining arguments may be ints.\n \"\"\"\n\n __slots__ = date.__slots__ + (\n '_hour', '_minute', '_second',\n '_microsecond', '_tzinfo')\n def __new__(cls, year, month=None, day=None, hour=0, minute=0, second=0,\n microsecond=0, tzinfo=None):\n if isinstance(year, bytes) and len(year) == 10:\n # Pickle support\n self = date.__new__(cls, year[:4])\n self.__setstate(year, month)\n return self\n _check_tzinfo_arg(tzinfo)\n _check_time_fields(hour, minute, second, microsecond)\n self = date.__new__(cls, year, month, day)\n self._hour = hour\n self._minute = minute\n self._second = second\n self._microsecond = microsecond\n self._tzinfo = tzinfo\n return self\n\n # Read-only field accessors\n @property\n def hour(self):\n \"\"\"hour (0-23)\"\"\"\n return self._hour\n\n @property\n def minute(self):\n \"\"\"minute (0-59)\"\"\"\n return self._minute\n\n @property\n def second(self):\n \"\"\"second (0-59)\"\"\"\n return self._second\n\n @property\n def microsecond(self):\n \"\"\"microsecond (0-999999)\"\"\"\n return self._microsecond\n\n @property\n def tzinfo(self):\n \"\"\"timezone info object\"\"\"\n return self._tzinfo\n\n @classmethod\n def fromtimestamp(cls, t, tz=None):\n \"\"\"Construct a datetime from a POSIX timestamp (like time.time()).\n\n A timezone info object may be passed in as well.\n \"\"\"\n\n _check_tzinfo_arg(tz)\n\n converter = _time.localtime if tz is None else _time.gmtime\n\n t, frac = divmod(t, 1.0)\n us = int(frac * 1e6)\n\n # If timestamp is less than one microsecond smaller than a\n # full second, us can be rounded up to 1000000. In this case,\n # roll over to seconds, otherwise, ValueError is raised\n # by the constructor.\n if us == 1000000:\n t += 1\n us = 0\n y, m, d, hh, mm, ss, weekday, jday, dst = converter(t)\n ss = min(ss, 59) # clamp out leap seconds if the platform has them\n result = cls(y, m, d, hh, mm, ss, us, tz)\n if tz is not None:\n result = tz.fromutc(result)\n return result\n\n @classmethod\n def utcfromtimestamp(cls, t):\n \"Construct a UTC datetime from a POSIX timestamp (like time.time()).\"\n t, frac = divmod(t, 1.0)\n us = int(frac * 1e6)\n\n # If timestamp is less than one microsecond smaller than a\n # full second, us can be rounded up to 1000000. In this case,\n # roll over to seconds, otherwise, ValueError is raised\n # by the constructor.\n if us == 1000000:\n t += 1\n us = 0\n y, m, d, hh, mm, ss, weekday, jday, dst = _time.gmtime(t)\n ss = min(ss, 59) # clamp out leap seconds if the platform has them\n return cls(y, m, d, hh, mm, ss, us)\n\n # XXX This is supposed to do better than we *can* do by using time.time(),\n # XXX if the platform supports a more accurate way. The C implementation\n # XXX uses gettimeofday on platforms that have it, but that isn't\n # XXX available from Python. So now() may return different results\n # XXX across the implementations.\n @classmethod\n def now(cls, tz=None):\n \"Construct a datetime from time.time() and optional time zone info.\"\n t = _time.time()\n return cls.fromtimestamp(t, tz)\n\n @classmethod\n def utcnow(cls):\n \"Construct a UTC datetime from time.time().\"\n t = _time.time()\n return cls.utcfromtimestamp(t)\n\n @classmethod\n def combine(cls, date, time):\n \"Construct a datetime from a given date and a given time.\"\n if not isinstance(date, _date_class):\n raise TypeError(\"date argument must be a date instance\")\n if not isinstance(time, _time_class):\n raise TypeError(\"time argument must be a time instance\")\n return cls(date.year, date.month, date.day,\n time.hour, time.minute, time.second, time.microsecond,\n time.tzinfo)\n\n def timetuple(self):\n \"Return local time tuple compatible with time.localtime().\"\n dst = self.dst()\n if dst is None:\n dst = -1\n elif dst:\n dst = 1\n else:\n dst = 0\n return _build_struct_time(self.year, self.month, self.day,\n self.hour, self.minute, self.second,\n dst)\n\n def timestamp(self):\n \"Return POSIX timestamp as float\"\n if self._tzinfo is None:\n return _time.mktime((self.year, self.month, self.day,\n self.hour, self.minute, self.second,\n -1, -1, -1)) + self.microsecond / 1e6\n else:\n return (self - _EPOCH).total_seconds()\n\n def utctimetuple(self):\n \"Return UTC time tuple compatible with time.gmtime().\"\n offset = self.utcoffset()\n if offset:\n self -= offset\n y, m, d = self.year, self.month, self.day\n hh, mm, ss = self.hour, self.minute, self.second\n return _build_struct_time(y, m, d, hh, mm, ss, 0)\n\n def date(self):\n \"Return the date part.\"\n return date(self._year, self._month, self._day)\n\n def time(self):\n \"Return the time part, with tzinfo None.\"\n return time(self.hour, self.minute, self.second, self.microsecond)\n\n def timetz(self):\n \"Return the time part, with same tzinfo.\"\n return time(self.hour, self.minute, self.second, self.microsecond,\n self._tzinfo)\n\n def replace(self, year=None, month=None, day=None, hour=None,\n minute=None, second=None, microsecond=None, tzinfo=True):\n \"\"\"Return a new datetime with new values for the specified fields.\"\"\"\n if year is None:\n year = self.year\n if month is None:\n month = self.month\n if day is None:\n day = self.day\n if hour is None:\n hour = self.hour\n if minute is None:\n minute = self.minute\n if second is None:\n second = self.second\n if microsecond is None:\n microsecond = self.microsecond\n if tzinfo is True:\n tzinfo = self.tzinfo\n _check_date_fields(year, month, day)\n _check_time_fields(hour, minute, second, microsecond)\n _check_tzinfo_arg(tzinfo)\n return datetime(year, month, day, hour, minute, second,\n microsecond, tzinfo)\n\n def astimezone(self, tz=None):\n if tz is None:\n if self.tzinfo is None:\n raise ValueError(\"astimezone() requires an aware datetime\")\n ts = (self - _EPOCH) // timedelta(seconds=1)\n localtm = _time.localtime(ts)\n local = datetime(*localtm[:6])\n try:\n # Extract TZ data if available\n gmtoff = localtm.tm_gmtoff\n zone = localtm.tm_zone\n except AttributeError:\n # Compute UTC offset and compare with the value implied\n # by tm_isdst. If the values match, use the zone name\n # implied by tm_isdst.\n delta = local - datetime(*_time.gmtime(ts)[:6])\n dst = _time.daylight and localtm.tm_isdst > 0\n gmtoff = -(_time.altzone if dst else _time.timezone)\n if delta == timedelta(seconds=gmtoff):\n tz = timezone(delta, _time.tzname[dst])\n else:\n tz = timezone(delta)\n else:\n tz = timezone(timedelta(seconds=gmtoff), zone)\n\n elif not isinstance(tz, tzinfo):\n raise TypeError(\"tz argument must be an instance of tzinfo\")\n\n mytz = self.tzinfo\n if mytz is None:\n raise ValueError(\"astimezone() requires an aware datetime\")\n\n if tz is mytz:\n return self\n\n # Convert self to UTC, and attach the new time zone object.\n myoffset = self.utcoffset()\n if myoffset is None:\n raise ValueError(\"astimezone() requires an aware datetime\")\n utc = (self - myoffset).replace(tzinfo=tz)\n\n # Convert from UTC to tz's local time.\n return tz.fromutc(utc)\n\n # Ways to produce a string.\n\n def ctime(self):\n \"Return ctime() style string.\"\n weekday = self.toordinal() % 7 or 7\n return \"%s %s %2d %02d:%02d:%02d %04d\" % (\n _DAYNAMES[weekday],\n _MONTHNAMES[self._month],\n self._day,\n self._hour, self._minute, self._second,\n self._year)\n\n def isoformat(self, sep='T'):\n \"\"\"Return the time formatted according to ISO.\n\n This is 'YYYY-MM-DD HH:MM:SS.mmmmmm', or 'YYYY-MM-DD HH:MM:SS' if\n self.microsecond == 0.\n\n If self.tzinfo is not None, the UTC offset is also attached, giving\n 'YYYY-MM-DD HH:MM:SS.mmmmmm+HH:MM' or 'YYYY-MM-DD HH:MM:SS+HH:MM'.\n\n Optional argument sep specifies the separator between date and\n time, default 'T'.\n \"\"\"\n s = (\"%04d-%02d-%02d%c\" % (self._year, self._month, self._day,\n sep) +\n _format_time(self._hour, self._minute, self._second,\n self._microsecond))\n off = self.utcoffset()\n if off is not None:\n if off.days < 0:\n sign = \"-\"\n off = -off\n else:\n sign = \"+\"\n hh, mm = divmod(off, timedelta(hours=1))\n assert not mm % timedelta(minutes=1), \"whole minute\"\n mm //= timedelta(minutes=1)\n s += \"%s%02d:%02d\" % (sign, hh, mm)\n return s\n\n def __repr__(self):\n \"\"\"Convert to formal string, for repr().\"\"\"\n L = [self._year, self._month, self._day, # These are never zero\n self._hour, self._minute, self._second, self._microsecond]\n if L[-1] == 0:\n del L[-1]\n if L[-1] == 0:\n del L[-1]\n s = \", \".join(map(str, L))\n s = \"%s(%s)\" % ('datetime.' + self.__class__.__name__, s)\n if self._tzinfo is not None:\n assert s[-1:] == \")\"\n s = s[:-1] + \", tzinfo=%r\" % self._tzinfo + \")\"\n return s\n\n def __str__(self):\n \"Convert to string, for str().\"\n return self.isoformat(sep=' ')\n\n @classmethod\n def strptime(cls, date_string, format):\n 'string, format -> new datetime parsed from a string (like time.strptime()).'\n import _strptime\n return _strptime._strptime_datetime(cls, date_string, format)\n\n def utcoffset(self):\n \"\"\"Return the timezone offset in minutes east of UTC (negative west of\n UTC).\"\"\"\n if self._tzinfo is None:\n return None\n offset = self._tzinfo.utcoffset(self)\n _check_utc_offset(\"utcoffset\", offset)\n return offset\n\n def tzname(self):\n \"\"\"Return the timezone name.\n\n Note that the name is 100% informational -- there's no requirement that\n it mean anything in particular. For example, \"GMT\", \"UTC\", \"-500\",\n \"-5:00\", \"EDT\", \"US/Eastern\", \"America/New York\" are all valid replies.\n \"\"\"\n name = _call_tzinfo_method(self._tzinfo, \"tzname\", self)\n _check_tzname(name)\n return name\n\n def dst(self):\n \"\"\"Return 0 if DST is not in effect, or the DST offset (in minutes\n eastward) if DST is in effect.\n\n This is purely informational; the DST offset has already been added to\n the UTC offset returned by utcoffset() if applicable, so there's no\n need to consult dst() unless you're interested in displaying the DST\n info.\n \"\"\"\n if self._tzinfo is None:\n return None\n offset = self._tzinfo.dst(self)\n _check_utc_offset(\"dst\", offset)\n return offset\n\n # Comparisons of datetime objects with other.\n\n def __eq__(self, other):\n if isinstance(other, datetime):\n return self._cmp(other, allow_mixed=True) == 0\n elif not isinstance(other, date):\n return NotImplemented\n else:\n return False\n\n def __ne__(self, other):\n if isinstance(other, datetime):\n return self._cmp(other, allow_mixed=True) != 0\n elif not isinstance(other, date):\n return NotImplemented\n else:\n return True\n\n def __le__(self, other):\n if isinstance(other, datetime):\n return self._cmp(other) <= 0\n elif not isinstance(other, date):\n return NotImplemented\n else:\n _cmperror(self, other)\n\n def __lt__(self, other):\n if isinstance(other, datetime):\n return self._cmp(other) < 0\n elif not isinstance(other, date):\n return NotImplemented\n else:\n _cmperror(self, other)\n\n def __ge__(self, other):\n if isinstance(other, datetime):\n return self._cmp(other) >= 0\n elif not isinstance(other, date):\n return NotImplemented\n else:\n _cmperror(self, other)\n\n def __gt__(self, other):\n if isinstance(other, datetime):\n return self._cmp(other) > 0\n elif not isinstance(other, date):\n return NotImplemented\n else:\n _cmperror(self, other)\n\n def _cmp(self, other, allow_mixed=False):\n assert isinstance(other, datetime)\n mytz = self._tzinfo\n ottz = other._tzinfo\n myoff = otoff = None\n\n if mytz is ottz:\n base_compare = True\n else:\n myoff = self.utcoffset()\n otoff = other.utcoffset()\n base_compare = myoff == otoff\n\n if base_compare:\n return _cmp((self._year, self._month, self._day,\n self._hour, self._minute, self._second,\n self._microsecond),\n (other._year, other._month, other._day,\n other._hour, other._minute, other._second,\n other._microsecond))\n if myoff is None or otoff is None:\n if allow_mixed:\n return 2 # arbitrary non-zero value\n else:\n raise TypeError(\"cannot compare naive and aware datetimes\")\n # XXX What follows could be done more efficiently...\n diff = self - other # this will take offsets into account\n if diff.days < 0:\n return -1\n return diff and 1 or 0\n\n def __add__(self, other):\n \"Add a datetime and a timedelta.\"\n if not isinstance(other, timedelta):\n return NotImplemented\n delta = timedelta(self.toordinal(),\n hours=self._hour,\n minutes=self._minute,\n seconds=self._second,\n microseconds=self._microsecond)\n delta += other\n hour, rem = divmod(delta.seconds, 3600)\n minute, second = divmod(rem, 60)\n if 0 < delta.days <= _MAXORDINAL:\n return datetime.combine(date.fromordinal(delta.days),\n time(hour, minute, second,\n delta.microseconds,\n tzinfo=self._tzinfo))\n raise OverflowError(\"result out of range\")\n\n __radd__ = __add__\n\n def __sub__(self, other):\n \"Subtract two datetimes, or a datetime and a timedelta.\"\n if not isinstance(other, datetime):\n if isinstance(other, timedelta):\n return self + -other\n return NotImplemented\n\n days1 = self.toordinal()\n days2 = other.toordinal()\n secs1 = self._second + self._minute * 60 + self._hour * 3600\n secs2 = other._second + other._minute * 60 + other._hour * 3600\n base = timedelta(days1 - days2,\n secs1 - secs2,\n self._microsecond - other._microsecond)\n if self._tzinfo is other._tzinfo:\n return base\n myoff = self.utcoffset()\n otoff = other.utcoffset()\n if myoff == otoff:\n return base\n if myoff is None or otoff is None:\n raise TypeError(\"cannot mix naive and timezone-aware time\")\n return base + otoff - myoff\n\n def __hash__(self):\n tzoff = self.utcoffset()\n if tzoff is None:\n return hash(self._getstate()[0])\n days = _ymd2ord(self.year, self.month, self.day)\n seconds = self.hour * 3600 + self.minute * 60 + self.second\n return hash(timedelta(days, seconds, self.microsecond) - tzoff)\n\n # Pickle support.\n\n def _getstate(self):\n yhi, ylo = divmod(self._year, 256)\n us2, us3 = divmod(self._microsecond, 256)\n us1, us2 = divmod(us2, 256)\n basestate = bytes([yhi, ylo, self._month, self._day,\n self._hour, self._minute, self._second,\n us1, us2, us3])\n if self._tzinfo is None:\n return (basestate,)\n else:\n return (basestate, self._tzinfo)\n\n def __setstate(self, string, tzinfo):\n (yhi, ylo, self._month, self._day, self._hour,\n self._minute, self._second, us1, us2, us3) = string\n self._year = yhi * 256 + ylo\n self._microsecond = (((us1 << 8) | us2) << 8) | us3\n if tzinfo is None or isinstance(tzinfo, _tzinfo_class):\n self._tzinfo = tzinfo\n else:\n raise TypeError(\"bad tzinfo state arg %r\" % tzinfo)\n\n def __reduce__(self):\n return (self.__class__, self._getstate())\n\n\ndatetime.min = datetime(1, 1, 1)\ndatetime.max = datetime(9999, 12, 31, 23, 59, 59, 999999)\ndatetime.resolution = timedelta(microseconds=1)\n\n\ndef _isoweek1monday(year):\n # Helper to calculate the day number of the Monday starting week 1\n # XXX This could be done more efficiently\n THURSDAY = 3\n firstday = _ymd2ord(year, 1, 1)\n firstweekday = (firstday + 6) % 7 # See weekday() above\n week1monday = firstday - firstweekday\n if firstweekday > THURSDAY:\n week1monday += 7\n return week1monday\n\nclass timezone(tzinfo):\n __slots__ = '_offset', '_name'\n\n # Sentinel value to disallow None\n _Omitted = object()\n def __new__(cls, offset, name=_Omitted):\n if not isinstance(offset, timedelta):\n raise TypeError(\"offset must be a timedelta\")\n if name is cls._Omitted:\n if not offset:\n return cls.utc\n name = None\n elif not isinstance(name, str):\n raise TypeError(\"name must be a string\")\n if not cls._minoffset <= offset <= cls._maxoffset:\n raise ValueError(\"offset must be a timedelta\"\n \" strictly between -timedelta(hours=24) and\"\n \" timedelta(hours=24).\")\n if (offset.microseconds != 0 or\n offset.seconds % 60 != 0):\n raise ValueError(\"offset must be a timedelta\"\n \" representing a whole number of minutes\")\n return cls._create(offset, name)\n\n @classmethod\n def _create(cls, offset, name=None):\n self = tzinfo.__new__(cls)\n self._offset = offset\n self._name = name\n return self\n\n def __getinitargs__(self):\n \"\"\"pickle support\"\"\"\n if self._name is None:\n return (self._offset,)\n return (self._offset, self._name)\n\n def __eq__(self, other):\n if type(other) != timezone:\n return False\n return self._offset == other._offset\n\n def __hash__(self):\n return hash(self._offset)\n\n def __repr__(self):\n \"\"\"Convert to formal string, for repr().\n\n >>> tz = timezone.utc\n >>> repr(tz)\n 'datetime.timezone.utc'\n >>> tz = timezone(timedelta(hours=-5), 'EST')\n >>> repr(tz)\n \"datetime.timezone(datetime.timedelta(-1, 68400), 'EST')\"\n \"\"\"\n if self is self.utc:\n return 'datetime.timezone.utc'\n if self._name is None:\n return \"%s(%r)\" % ('datetime.' + self.__class__.__name__,\n self._offset)\n return \"%s(%r, %r)\" % ('datetime.' + self.__class__.__name__,\n self._offset, self._name)\n\n def __str__(self):\n return self.tzname(None)\n\n def utcoffset(self, dt):\n if isinstance(dt, datetime) or dt is None:\n return self._offset\n raise TypeError(\"utcoffset() argument must be a datetime instance\"\n \" or None\")\n\n def tzname(self, dt):\n if isinstance(dt, datetime) or dt is None:\n if self._name is None:\n return self._name_from_offset(self._offset)\n return self._name\n raise TypeError(\"tzname() argument must be a datetime instance\"\n \" or None\")\n\n def dst(self, dt):\n if isinstance(dt, datetime) or dt is None:\n return None\n raise TypeError(\"dst() argument must be a datetime instance\"\n \" or None\")\n\n def fromutc(self, dt):\n if isinstance(dt, datetime):\n if dt.tzinfo is not self:\n raise ValueError(\"fromutc: dt.tzinfo \"\n \"is not self\")\n return dt + self._offset\n raise TypeError(\"fromutc() argument must be a datetime instance\"\n \" or None\")\n\n _maxoffset = timedelta(hours=23, minutes=59)\n _minoffset = -_maxoffset\n\n @staticmethod\n def _name_from_offset(delta):\n if delta < timedelta(0):\n sign = '-'\n delta = -delta\n else:\n sign = '+'\n hours, rest = divmod(delta, timedelta(hours=1))\n minutes = rest // timedelta(minutes=1)\n return 'UTC{}{:02d}:{:02d}'.format(sign, hours, minutes)\n\ntimezone.utc = timezone._create(timedelta(0))\ntimezone.min = timezone._create(timezone._minoffset)\ntimezone.max = timezone._create(timezone._maxoffset)\n_EPOCH = datetime(1970, 1, 1, tzinfo=timezone.utc)\n\"\"\"\nSome time zone algebra. For a datetime x, let\n x.n = x stripped of its timezone -- its naive time.\n x.o = x.utcoffset(), and assuming that doesn't raise an exception or\n return None\n x.d = x.dst(), and assuming that doesn't raise an exception or\n return None\n x.s = x's standard offset, x.o - x.d\n\nNow some derived rules, where k is a duration (timedelta).\n\n1. x.o = x.s + x.d\n This follows from the definition of x.s.\n\n2. If x and y have the same tzinfo member, x.s = y.s.\n This is actually a requirement, an assumption we need to make about\n sane tzinfo classes.\n\n3. The naive UTC time corresponding to x is x.n - x.o.\n This is again a requirement for a sane tzinfo class.\n\n4. (x+k).s = x.s\n This follows from #2, and that datimetimetz+timedelta preserves tzinfo.\n\n5. (x+k).n = x.n + k\n Again follows from how arithmetic is defined.\n\nNow we can explain tz.fromutc(x). Let's assume it's an interesting case\n(meaning that the various tzinfo methods exist, and don't blow up or return\nNone when called).\n\nThe function wants to return a datetime y with timezone tz, equivalent to x.\nx is already in UTC.\n\nBy #3, we want\n\n y.n - y.o = x.n [1]\n\nThe algorithm starts by attaching tz to x.n, and calling that y. So\nx.n = y.n at the start. Then it wants to add a duration k to y, so that [1]\nbecomes true; in effect, we want to solve [2] for k:\n\n (y+k).n - (y+k).o = x.n [2]\n\nBy #1, this is the same as\n\n (y+k).n - ((y+k).s + (y+k).d) = x.n [3]\n\nBy #5, (y+k).n = y.n + k, which equals x.n + k because x.n=y.n at the start.\nSubstituting that into [3],\n\n x.n + k - (y+k).s - (y+k).d = x.n; the x.n terms cancel, leaving\n k - (y+k).s - (y+k).d = 0; rearranging,\n k = (y+k).s - (y+k).d; by #4, (y+k).s == y.s, so\n k = y.s - (y+k).d\n\nOn the RHS, (y+k).d can't be computed directly, but y.s can be, and we\napproximate k by ignoring the (y+k).d term at first. Note that k can't be\nvery large, since all offset-returning methods return a duration of magnitude\nless than 24 hours. For that reason, if y is firmly in std time, (y+k).d must\nbe 0, so ignoring it has no consequence then.\n\nIn any case, the new value is\n\n z = y + y.s [4]\n\nIt's helpful to step back at look at [4] from a higher level: it's simply\nmapping from UTC to tz's standard time.\n\nAt this point, if\n\n z.n - z.o = x.n [5]\n\nwe have an equivalent time, and are almost done. The insecurity here is\nat the start of daylight time. Picture US Eastern for concreteness. The wall\ntime jumps from 1:59 to 3:00, and wall hours of the form 2:MM don't make good\nsense then. The docs ask that an Eastern tzinfo class consider such a time to\nbe EDT (because it's \"after 2\"), which is a redundant spelling of 1:MM EST\non the day DST starts. We want to return the 1:MM EST spelling because that's\nthe only spelling that makes sense on the local wall clock.\n\nIn fact, if [5] holds at this point, we do have the standard-time spelling,\nbut that takes a bit of proof. We first prove a stronger result. What's the\ndifference between the LHS and RHS of [5]? Let\n\n diff = x.n - (z.n - z.o) [6]\n\nNow\n z.n = by [4]\n (y + y.s).n = by #5\n y.n + y.s = since y.n = x.n\n x.n + y.s = since z and y are have the same tzinfo member,\n y.s = z.s by #2\n x.n + z.s\n\nPlugging that back into [6] gives\n\n diff =\n x.n - ((x.n + z.s) - z.o) = expanding\n x.n - x.n - z.s + z.o = cancelling\n - z.s + z.o = by #2\n z.d\n\nSo diff = z.d.\n\nIf [5] is true now, diff = 0, so z.d = 0 too, and we have the standard-time\nspelling we wanted in the endcase described above. We're done. Contrarily,\nif z.d = 0, then we have a UTC equivalent, and are also done.\n\nIf [5] is not true now, diff = z.d != 0, and z.d is the offset we need to\nadd to z (in effect, z is in tz's standard time, and we need to shift the\nlocal clock into tz's daylight time).\n\nLet\n\n z' = z + z.d = z + diff [7]\n\nand we can again ask whether\n\n z'.n - z'.o = x.n [8]\n\nIf so, we're done. If not, the tzinfo class is insane, according to the\nassumptions we've made. This also requires a bit of proof. As before, let's\ncompute the difference between the LHS and RHS of [8] (and skipping some of\nthe justifications for the kinds of substitutions we've done several times\nalready):\n\n diff' = x.n - (z'.n - z'.o) = replacing z'.n via [7]\n x.n - (z.n + diff - z'.o) = replacing diff via [6]\n x.n - (z.n + x.n - (z.n - z.o) - z'.o) =\n x.n - z.n - x.n + z.n - z.o + z'.o = cancel x.n\n - z.n + z.n - z.o + z'.o = cancel z.n\n - z.o + z'.o = #1 twice\n -z.s - z.d + z'.s + z'.d = z and z' have same tzinfo\n z'.d - z.d\n\nSo z' is UTC-equivalent to x iff z'.d = z.d at this point. If they are equal,\nwe've found the UTC-equivalent so are done. In fact, we stop with [7] and\nreturn z', not bothering to compute z'.d.\n\nHow could z.d and z'd differ? z' = z + z.d [7], so merely moving z' by\na dst() offset, and starting *from* a time already in DST (we know z.d != 0),\nwould have to change the result dst() returns: we start in DST, and moving\na little further into it takes us out of DST.\n\nThere isn't a sane case where this can happen. The closest it gets is at\nthe end of DST, where there's an hour in UTC with no spelling in a hybrid\ntzinfo class. In US Eastern, that's 5:MM UTC = 0:MM EST = 1:MM EDT. During\nthat hour, on an Eastern clock 1:MM is taken as being in standard time (6:MM\nUTC) because the docs insist on that, but 0:MM is taken as being in daylight\ntime (4:MM UTC). There is no local time mapping to 5:MM UTC. The local\nclock jumps from 1:59 back to 1:00 again, and repeats the 1:MM hour in\nstandard time. Since that's what the local clock *does*, we want to map both\nUTC hours 5:MM and 6:MM to 1:MM Eastern. The result is ambiguous\nin local time, but so it goes -- it's the way the local clock works.\n\nWhen x = 5:MM UTC is the input to this algorithm, x.o=0, y.o=-5 and y.d=0,\nso z=0:MM. z.d=60 (minutes) then, so [5] doesn't hold and we keep going.\nz' = z + z.d = 1:MM then, and z'.d=0, and z'.d - z.d = -60 != 0 so [8]\n(correctly) concludes that z' is not UTC-equivalent to x.\n\nBecause we know z.d said z was in daylight time (else [5] would have held and\nwe would have stopped then), and we know z.d != z'.d (else [8] would have held\nand we have stopped then), and there are only 2 possible values dst() can\nreturn in Eastern, it follows that z'.d must be 0 (which it is in the example,\nbut the reasoning doesn't depend on the example -- it depends on there being\ntwo possible dst() outcomes, one zero and the other non-zero). Therefore\nz' must be in standard time, and is the spelling we want in this case.\n\nNote again that z' is not UTC-equivalent as far as the hybrid tzinfo class is\nconcerned (because it takes z' as being in standard time rather than the\ndaylight time we intend here), but returning it gives the real-life \"local\nclock repeats an hour\" behavior when mapping the \"unspellable\" UTC hour into\ntz.\n\nWhen the input is 6:MM, z=1:MM and z.d=0, and we stop at once, again with\nthe 1:MM standard time spelling we want.\n\nSo how can this break? One of the assumptions must be violated. Two\npossibilities:\n\n1) [2] effectively says that y.s is invariant across all y belong to a given\n time zone. This isn't true if, for political reasons or continental drift,\n a region decides to change its base offset from UTC.\n\n2) There may be versions of \"double daylight\" time where the tail end of\n the analysis gives up a step too early. I haven't thought about that\n enough to say.\n\nIn any case, it's clear that the default fromutc() is strong enough to handle\n\"almost all\" time zones: so long as the standard offset is invariant, it\ndoesn't matter if daylight time transition points change from year to year, or\nif daylight time is skipped in some years; it doesn't matter how large or\nsmall dst() may get within its bounds; and it doesn't even matter if some\nperverse time zone returns a negative dst()). So a breaking case must be\npretty bizarre, and a tzinfo subclass can override fromutc() if it is.\n\"\"\"\n#brython does not have a _datetime, so lets comment this out for now.\n#try:\n# from _datetime import *\n#except ImportError:\n# pass\n#else:\n# # Clean up unused names\n# del (_DAYNAMES, _DAYS_BEFORE_MONTH, _DAYS_IN_MONTH,\n# _DI100Y, _DI400Y, _DI4Y, _MAXORDINAL, _MONTHNAMES,\n# _build_struct_time, _call_tzinfo_method, _check_date_fields,\n# _check_time_fields, _check_tzinfo_arg, _check_tzname,\n# _check_utc_offset, _cmp, _cmperror, _date_class, _days_before_month,\n# _days_before_year, _days_in_month, _format_time, _is_leap,\n# _isoweek1monday, _math, _ord2ymd, _time, _time_class, _tzinfo_class,\n# _wrap_strftime, _ymd2ord)\n# # XXX Since import * above excludes names that start with _,\n# # docstring does not get overwritten. In the future, it may be\n# # appropriate to maintain a single module level docstring and\n# # remove the following line.\n# #from _datetime import __doc__\n"},"license":{"kind":"string","value":"gpl-3.0"}}},{"rowIdx":203485,"cells":{"repo_name":{"kind":"string","value":"udayinfy/openerp-7.0"},"path":{"kind":"string","value":"sale_order_line/sale.py"},"copies":{"kind":"string","value":"4"},"size":{"kind":"string","value":"4650"},"content":{"kind":"string","value":"# -*- coding: utf-8 -*-\n##############################################################################\n#\n# OpenERP, Open Source Management Solution\n# Copyright (c) 2010-2013 Elico Corp. All Rights Reserved.\n# Jon Chow \n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU Affero General Public License as\n# published by the Free Software Foundation, either version 3 of the\n# License, or (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU Affero General Public License for more details.\n#\n# You should have received a copy of the GNU Affero General Public License\n# along with this program. If not, see .\n#\n##############################################################################\n\nfrom openerp.osv import fields, osv\n\n\nclass sale_order_line(osv.osv):\n _inherit = 'sale.order.line'\n _name = 'sale.order.line'\n\n def _get_pdt_code(self, cr, uid, ids, field, arg=None, context=None):\n res = {}\n for line in self.browse(cr, uid, ids):\n res[line.id] = line.product_id.default_code\n return res\n\n def _get_pdt_mmx_type(self, cr, uid, ids, field, arg=None, context=None):\n res = {}\n dic = dict(\n self.pool.get('product.product')._columns['mmx_type'].selection\n )\n for line in self.browse(cr, uid, ids):\n res[line.id] = dic[line.product_id.mmx_type]\n return res\n\n _columns = {\n 'qty_store': fields.float('QTY store',help='Want you look this field,Pls first run xxxx wizard'),\n 'product_default_code': fields.function(_get_pdt_code,\n arg=None,\n string='Product Code',\n type='char',\n size=32,\n readonly=True,\n store=True),\n 'product_mmx_type': fields.function(_get_pdt_mmx_type,\n arg=None,\n string='Product Type',\n type='char',\n size=32,\n readonly=True,\n store=True),\n 'qty_available': fields.related('product_id', 'qty_available', type='float', string='Quantity On Hand',),\n 'virtual_available': fields.related('product_id', 'virtual_available', type='float', string='Forecasted Quantity',),\n \n }\n _sql_constraints = [\n ('product_uom_qty_check',\n 'CHECK( product_uom_qty >= 0 )',\n 'Sale Qty must be greater than zero.'),\n ]\n\n def link_to_order(self, cr, uid, ids, context=None):\n sol = self.browse(cr, uid, ids[0])\n so_id = sol.order_id.id\n return {\n 'name': 'Order info',\n 'target': \"new\",\n 'view_type': 'form',\n \"view_mode\": 'form',\n 'res_model': 'sale.order',\n 'res_id': so_id,\n 'type': 'ir.actions.act_window',\n }\n\n def product_id_change(self, cr, uid, ids, pricelist, product, qty=0,\n uom=False, qty_uos=0, uos=False, name='',\n partner_id=False, lang=False, update_tax=True,\n date_order=False, packaging=False,\n fiscal_position=False, flag=False, context=None):\n \"\"\"\n if product sale_line_warn is set no-message,\n don't pop any warning\n \"\"\"\n res = super(sale_order_line, self).product_id_change(\n cr, uid, ids, pricelist, product, qty=qty, uom=uom,\n qty_uos=qty_uos, uos=uos, name=name, partner_id=partner_id,\n lang=lang, update_tax=update_tax, date_order=date_order,\n packaging=packaging, fiscal_position=fiscal_position, flag=flag,\n context=context)\n if product:\n pdt = self.pool.get('product.product').browse(cr, uid, product)\n # if only to cancel the quantity warning\n if pdt.sale_line_warn == 'no-message':\n res['warning'] = None\n return res\n\nsale_order_line()\n\n# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:\n"},"license":{"kind":"string","value":"agpl-3.0"}}},{"rowIdx":203486,"cells":{"repo_name":{"kind":"string","value":"cuckoobox/cuckoo"},"path":{"kind":"string","value":"cuckoo/web/controllers/analysis/control/routes.py"},"copies":{"kind":"string","value":"1"},"size":{"kind":"string","value":"1273"},"content":{"kind":"string","value":"# Copyright (C) 2017-2018 Cuckoo Foundation.\n# This file is part of Cuckoo Sandbox - http://www.cuckoosandbox.org\n# See the file \"docs/LICENSE\" for copying permission.\n\nfrom django.http import Http404, HttpResponseRedirect\n\nfrom cuckoo.common.config import config\nfrom cuckoo.core.database import Database\nfrom cuckoo.web.utils import render_template\n\ndb = Database()\n\nclass AnalysisControlRoutes(object):\n @staticmethod\n def player(request, task_id):\n task = db.view_task(task_id)\n if not task:\n raise Http404(\"Task not found!\")\n\n if not config(\"cuckoo:remotecontrol:enabled\"):\n raise Http404(\n \"Remote control is not enabled in the configuration! \"\n \"Please check our documentation on configuring Guacamole.\"\n )\n\n if task.options.get(\"remotecontrol\") != \"yes\":\n raise Http404(\"Remote control was not enabled for this task.\")\n\n if task.status == \"reported\":\n return HttpResponseRedirect(\"/analysis/%d/summary\" % int(task_id))\n\n if task.status not in (\"running\", \"completed\"):\n raise Http404(\"task is not running\")\n\n request.extra_scripts = [\"guac.js\"]\n return render_template(request, \"rdp/index.html\", task=task)\n"},"license":{"kind":"string","value":"mit"}}},{"rowIdx":203487,"cells":{"repo_name":{"kind":"string","value":"Miserlou/Anomos"},"path":{"kind":"string","value":"Anomos/launchmanycore.py"},"copies":{"kind":"string","value":"1"},"size":{"kind":"string","value":"9499"},"content":{"kind":"string","value":"#!/usr/bin/env python\n\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program. If not, see .\n\n# Original version written by John Hoffman, heavily modified for different\n# multitorrent architecture by Uoti Urpala (over 40% shorter than original)\n\nimport os\nfrom cStringIO import StringIO\nfrom traceback import print_exc\n\nfrom Anomos.parsedir import parsedir\nfrom Anomos.download import Multitorrent, Feedback\nfrom Anomos.ConvertedMetainfo import ConvertedMetainfo\nfrom Anomos import bttime, configfile, BTFailure\n\nfrom threading import Event\n\n\nclass LaunchMany(Feedback):\n\n def __init__(self, config, output, configfile_key):\n try:\n self.config = config\n self.output = output\n self.configfile_key = configfile_key\n\n self.torrent_dir = config['torrent_dir']\n self.torrent_cache = {}\n self.file_cache = {}\n self.blocked_files = {}\n\n self.torrent_list = []\n self.downloads = {}\n self.doneflag = Event()\n\n self.hashcheck_queue = []\n self.hashcheck_store = {}\n self.hashcheck_current = None\n\n self.multitorrent = Multitorrent(config, self.doneflag)\n\n self.multitorrent.schedule(0, self.scan)\n self.multitorrent.schedule(0, self.stats)\n\n try:\n import signal\n def handler(signum, frame):\n self.multitorrent.schedule(0, self.read_config)\n signal.signal(signal.SIGHUP, handler)\n except Exception, e:\n self.output.message('Could not set signal handler: ' + str(e))\n\n self.multitorrent.event_handler.loop()\n\n self.output.message('shutting down')\n for infohash in self.torrent_list:\n self.output.message('dropped \"'+self.torrent_cache[infohash]['path']+'\"')\n torrent = self.downloads[infohash]\n if torrent is not None:\n torrent.shutdown()\n except:\n data = StringIO()\n print_exc(file = data)\n output.exception(data.getvalue())\n\n def scan(self):\n self.multitorrent.schedule(self.config['parse_dir_interval'], self.scan)\n\n r = parsedir(self.torrent_dir, self.torrent_cache,\n self.file_cache, self.blocked_files,\n self.output.message)\n\n ( self.torrent_cache, self.file_cache, self.blocked_files,\n added, removed ) = r\n\n for infohash, data in removed.items():\n self.output.message('dropped \"'+data['path']+'\"')\n self.remove(infohash)\n for infohash, data in added.items():\n self.output.message('added \"'+data['path']+'\"')\n self.add(infohash, data)\n\n def stats(self):\n self.multitorrent.schedule(self.config['display_interval'], self.stats)\n data = []\n for infohash in self.torrent_list:\n cache = self.torrent_cache[infohash]\n if self.config['display_path']:\n name = cache['path']\n else:\n name = cache['name']\n size = cache['length']\n d = self.downloads[infohash]\n progress = '0.0%'\n peers = 0\n seeds = 0\n seedsmsg = \"S\"\n dist = 0.0\n uprate = 0.0\n dnrate = 0.0\n upamt = 0\n dnamt = 0\n t = 0\n msg = ''\n if d is None:\n status = 'waiting for hash check'\n else:\n stats = d.get_status()\n status = stats['activity']\n progress = '%.1f%%' % (int(stats['fractionDone']*1000)/10.0)\n if d.started and not d.closed:\n s = stats\n dist = s['numCopies']\n if d.is_seed:\n seeds = 0 # s['numOldSeeds']\n seedsmsg = \"s\"\n else:\n if s['numSeeds'] + s['numPeers']:\n t = stats['timeEst']\n if t is None:\n t = -1\n if t == 0: # unlikely\n t = 0.01\n status = 'downloading'\n else:\n t = -1\n status = 'connecting to peers'\n seeds = s['numSeeds']\n dnrate = stats['downRate']\n peers = s['numPeers']\n uprate = stats['upRate']\n upamt = s['upTotal']\n dnamt = s['downTotal']\n if d.messages and (d.closed or d.messages[-1][0] + 300 > bttime()):\n msg = d.messages[-1][2]\n\n data.append(( name, status, progress, peers, seeds, seedsmsg, dist,\n uprate, dnrate, upamt, dnamt, size, t, msg ))\n stop = self.output.display(data)\n if stop:\n self.doneflag.set()\n\n def remove(self, infohash):\n self.torrent_list.remove(infohash)\n if self.downloads[infohash] is not None:\n self.downloads[infohash].shutdown()\n self.was_stopped(infohash)\n del self.downloads[infohash]\n\n def add(self, infohash, data):\n self.torrent_list.append(infohash)\n self.downloads[infohash] = None\n self.hashcheck_queue.append(infohash)\n self.hashcheck_store[infohash] = data['metainfo']\n self.check_hashcheck_queue()\n\n def check_hashcheck_queue(self):\n if self.hashcheck_current is not None or not self.hashcheck_queue:\n return\n self.hashcheck_current = self.hashcheck_queue.pop(0)\n metainfo = self.hashcheck_store[self.hashcheck_current]\n del self.hashcheck_store[self.hashcheck_current]\n filename = self.determine_filename(self.hashcheck_current)\n self.downloads[self.hashcheck_current] = self.multitorrent. \\\n start_torrent(ConvertedMetainfo(metainfo),\n self.config, self, filename)\n\n def determine_filename(self, infohash):\n x = self.torrent_cache[infohash]\n name = x['name']\n savein = self.config['save_in']\n isdir = not x['metainfo'].has_key('length')\n style = self.config['saveas_style']\n if style == 1 or style == 3:\n if savein:\n name = x['file']\n ext = max(name.find('.torrent'), name.find('.atorrent'))\n saveas = os.path.join(savein,name[:ext]) # strip '.[a]torrent'\n else:\n name = x['path']\n ext = max(name.find('.torrent'), name.find('.atorrent'))\n saveas = x['path'][:ext] # strip '.[a]torrent'\n if style == 3 and not isdir:\n saveas = os.path.join(saveas, name)\n else:\n if savein:\n saveas = os.path.join(savein, name)\n else:\n saveas = os.path.join(os.path.split(x['path'])[0], name)\n return saveas\n\n def was_stopped(self, infohash):\n try:\n self.hashcheck_queue.remove(infohash)\n except:\n pass\n else:\n del self.hashcheck_store[infohash]\n if self.hashcheck_current == infohash:\n self.hashcheck_current = None\n self.check_hashcheck_queue()\n\n def exchandler(self, s):\n self.output.exception(s)\n\n def read_config(self):\n try:\n newvalues = configfile.get_config(self.config, self.configfile_key)\n except Exception, e:\n self.output.message('Error reading config: ' + str(e))\n return\n self.output.message('Rereading config file')\n self.config.update(newvalues)\n # The set_option call can potentially trigger something that kills\n # the torrent (when writing this the only possibility is a change in\n # max_files_open causing an IOError while closing files), and so\n # the self.failed() callback can run during this loop.\n for option, value in newvalues.iteritems():\n self.multitorrent.set_option(option, value)\n for torrent in self.downloads.values():\n if torrent is not None:\n for option, value in newvalues.iteritems():\n torrent.set_option(option, value)\n\n # rest are callbacks from torrent instances\n\n def started(self, torrent):\n self.hashcheck_current = None\n self.check_hashcheck_queue()\n\n def failed(self, torrent, is_external):\n infohash = torrent.infohash\n self.was_stopped(infohash)\n if self.torrent_cache.has_key(infohash):\n self.output.message('DIED: \"'+self.torrent_cache[infohash]['path']+'\"')\n\n def exception(self, torrent, text):\n self.exchandler(text)\n"},"license":{"kind":"string","value":"gpl-3.0"}}},{"rowIdx":203488,"cells":{"repo_name":{"kind":"string","value":"tom-f-oconnell/multi_tracker"},"path":{"kind":"string","value":"nodes/roi_finder.py"},"copies":{"kind":"string","value":"1"},"size":{"kind":"string","value":"31087"},"content":{"kind":"string","value":"#!/usr/bin/env python\n\nimport os\nfrom subprocess import Popen\nimport Queue\nimport glob\nimport pickle\nimport copy\nimport sys\n\nimport rospy\nimport roslaunch\n# latest versions of ROS (from source, particularly) should have a method in\n# here for getting topic names, so we wouldn't need to use rosgraph import\n# rostopic\nimport rospkg\nfrom sensor_msgs.msg import Image\n# import dynamic_reconfigure.server\nfrom cv_bridge import CvBridge, CvBridgeError\nimport cv2\nimport numpy as np\n\nfrom multi_tracker.msg import Point2D, PolygonalROI, RectangularROI, CircularROI\nfrom multi_tracker.srv import RegisterROIs\n\n\n# TODO break out ROI definitions from core tracking launch file, and make\n# another tracking launch file that includes the core + ROI defs, the core which\n# will be called here separately\n# TODO dynamic reconfigure and display ROIs that will be selected with button to\n# lock them in maybe a gui to manually edit / define ROIs too?\nclass RoiFinder:\n def __init__(self):\n # TODO what happens if init_node is called after / before defining\n # subscribers and publishers and stuff? what all does it do?\n # (examples have subscribers after and pub before)\n\n # start node\n rospy.init_node('roi_finder')\n\n # TODO maybe have this launch file do something that won't be changed\n # (launch core tracking nodes without setting parameters?)\n # so I can keep it in a central location?\n # TODO idiomatic ROS way to get package path? use python script location\n # + relative path?\n # TODO need to specify if launch is only in source, as before?\n THIS_PACKAGE = 'multi_tracker'\n # TODO shorter call for this package path?\n # TODO still valid once installed / using that path?\n # TODO TODO test parameters are still accessible / valid across ROIs?\n self.tracking_launch_file = rospy.get_param(\n 'roi_finder/tracking_launch_file',\n rospkg.RosPack().get_path(THIS_PACKAGE) +\n '/launch/single_tracking_pipeline.launch')\n\n self.roi_cache_name = os.path.abspath('../.tmp_roi_cache.p')\n self.current_node_num = 1\n\n node_namespace = 'roi_finder/'\n self.roi_type = rospy.get_param(node_namespace + 'roi_type',\n 'rectangles')\n\n # will not launch any tracking pipelines if this is True\n # but will still register the rois with the delta video node\n self.video_only = rospy.get_param('~video_only', False)\n # TODO populate automatically from those with a launch pipeline and a\n # automatic / manual roi selection function depending on current\n # function\n # TODO factor this kind of check into validator node?\n self.valid_roi_types = {'rectangle', 'circle', 'mask', 'polygon'}\n if not self.roi_type in self.valid_roi_types:\n raise ValueError('invalid roi_type: {}. valid types are {}'.format(\n self.roi_types, self.valid_roi_types))\n\n load_rois = rospy.get_param(node_namespace + 'load_rois', False)\n automatic_roi_detection = \\\n rospy.get_param(node_namespace + 'automatic_roi_detection', False)\n\n if not automatic_roi_detection:\n # a place for the click event callback to store points\n self.points = []\n self.rois = []\n\n self.toss_first_n_frames = rospy.get_param(node_namespace +\n 'toss_first_n_frames', 0)\n\n self.frames_tossed = 0\n\n self.bridge = CvBridge()\n\n self.camera = 'camera/image_raw'\n queue_size = 10\n # TODO determine automatically\n size_image = 128 + 1920 * 1080 * 3\n # TODO should the buff_size not be queue_size * size_image?\n buff_size = 2 * size_image\n self.frame_to_save = None\n self.frame = None\n\n # can't just rospy.spin() here because the main thread\n # is the only one that can call launch files (a sequence\n # of functions beginning with a callback can't start\n # a launch file because it can't register signals)\n self.launch_queue = Queue.Queue()\n self.to_kill = []\n\n if not load_rois:\n # TODO check there aren't race conditions that could cause this to\n # trigger twice / handle\n if automatic_roi_detection:\n rospy.Subscriber(\n self.camera,\n Image,\n self.detect_roi_callback,\n queue_size=queue_size,\n buff_size=buff_size\n )\n\n else:\n self.preload_cache = rospy.get_param(node_namespace +\n 'preload_cached_rois', True)\n self.delete_cache_if_cleared = rospy.get_param(node_namespace +\n 'clearing_loaded_rois_deletes_cache', True)\n self.use_cached_without_displaying = rospy.get_param(\n node_namespace + 'use_cached_without_displaying', False)\n self.autocache_rois = rospy.get_param(node_namespace +\n 'autocache_rois', True)\n\n if (self.use_cached_without_displaying and \n not self.preload_cache):\n\n # TODO test\n raise ValueError(('Must have {0}preload_cached_rois ' +\n 'True if {0}use_cached_without_displaying is True.'\n ).format(node_namespace))\n\n self.manual_sub = rospy.Subscriber(\n self.camera,\n Image,\n self.update_frame,\n queue_size=queue_size,\n buff_size=buff_size\n )\n self.undo_stack = []\n self.undo_index = 0\n self.manual_roi_selection()\n\n else:\n if automatic_roi_detection:\n rospy.logwarn('Ignoring roi_finder/automatic_roi_detection, ' + \n 'because roi_finder/load_rois was True.')\n\n self.load_rois()\n\n self.main()\n\n\n def launch_tracking_common(self, param_dict):\n extra_params = []\n for k, v in param_dict.items():\n if isinstance(k, str) and isinstance(v, str):\n extra_params.append(k + ':=' + v)\n else:\n raise ValueError(\n 'param_dict must have all keys and values be strings')\n \n params = ['roslaunch', 'multi_tracker', \n 'single_tracking_pipeline.launch', 'dump_roi_params:=True', \n 'viewer:=False', 'num:={}'.format(self.current_node_num), \n 'camera:=' + rospy.resolve_name(self.camera)] + extra_params\n\n self.current_node_num += 1\n rospy.logwarn(params)\n # TODO consider using floris' technique to kill these gently with pgroup\n p = Popen(params)\n self.to_kill.append(p)\n \n\n # any support there might have been before for setting arguments via\n # roslaunch api seems to have disappeared... will need to use subprocess for\n # now\n \"\"\"\n def launch_tracking_common(self):\n # TODO could maybe rospy.get_namespace() to get prefix for child nodes?\n # TODO how exactly do private (\"~\" prefix) names work?\n # TODO condense these calls into some helper function?\n # rospy.on_shutdown(self.shutdown) isn't necessary is it?\n # TODO this doesnt make a second master or anything does it?\n # TODO maybe set is_child=True if exposed somewhere?\n # see roslaunchrunner api\n uuid = roslaunch.rlutil.get_or_generate_uuid(None, False)\n roslaunch.configure_logging(uuid)\n launch = roslaunch.parent.ROSLaunchParent(uuid,\n [self.tracking_launch_file])\n # TODO TODO make all nodes names unique somehow, assuming they need to\n # be globally unique?\n launch.start()\n self.current_node_num += 1\n # TODO problems with shutting down elsewhere?\n #launch.shutdown()\n # decrement current_node_num when shuts down / whenever we manually\n # shutdown?\n self.to_stop.append(launch)\n \"\"\"\n\n \"\"\"\n def get_topics(self):\n # see issue #946 (which has a commit added recently) for rostopic\n # alternative\n # TODO is /rostopic correct? return type?\n try:\n # the rosbridge cost uses Master('/rosbridge')\n publishers, subscribers, services = \\\n Master('/rostopic').getSystemState()\n\n has_node_num = lambda x: \n # can you not filter a set?\n return filter(lambda x: any(fnmatch.fnmatch(str(x), glob) \n for glob in topics_glob), list(set([x for x, _ in publishers] +\n [x for x, _, in subscribers])))\n\n # TODO which exception type?\n except:\n return []\n\n\n def get_topics_in_namespace(self, namespace):\n raise NotImplementedError\n \"\"\"\n\n def new_tracker_namespace(self):\n # TODO fix / test this works\n this_node_namespace = rospy.get_namespace()\n rospy.logwarn('rospy.get_namespace()=' + this_node_namespace)\n # remove prefix first?\n #nmax = max([int(ns.split('/')[0])\n # for ns in rostopic.list(this_node_namespace)])\n\n # TODO anything to do to make the namespace? maybe only possible when\n # making node?\n #return this_node_namespace + '/' + str(nmax + 1) + '/'\n return this_node_namespace + str(self.current_node_num) + '/'\n\n\n def launch_a_tracking_pipeline_polygons(self, points):\n # TODO test repr here works\n param_dict = {'polygonal_roi': 'True', 'roi_points': repr(points)}\n self.launch_tracking_common(param_dict)\n\n\n # TODO would only work for rectangle oriented to axes... couldn't find\n # rotatedrectangle in python cv2 dir\n def launch_a_tracking_pipeline_rectangles(self, left, right, top, bottom):\n # TODO if inputs are arbitrary corners, will need to do some min /\n # maxing to use the roi_* parameters as is (or just cv2 boundingBox /\n # rect)\n param_dict = {'rectangular_roi': 'True', 'roi_b': str(bottom),\n 'roi_t': str(top), 'roi_l': str(left), 'roi_r': str(right)}\n self.launch_tracking_common(param_dict)\n \n\n def launch_a_tracking_pipeline_circles(self, x, y, radius):\n raise NotImplementedError\n\n\n def launch_a_tracking_pipeline_masks(self, mask):\n raise NotImplementedError\n\n\n def save_state_for_undo(self):\n # If not at tail of undo_stack, we need to replace the current tail with\n # the current state. Has no effect if we are at tail.\n self.undo_stack = self.undo_stack[:(self.undo_index + 1)]\n\n # TODO cause problem in case where it gets cleared?\n if len(self.undo_stack) > 0:\n self.undo_index += 1\n\n rois_copy = copy.deepcopy(self.rois)\n points_copy = copy.deepcopy(self.points)\n\n self.undo_stack.append((rois_copy, points_copy))\n\n\n def undo(self):\n if len(self.undo_stack) == 0:\n return\n\n if self.undo_index > 0:\n self.undo_index -= 1\n prev_rois, prev_points = self.undo_stack[self.undo_index]\n self.rois = copy.deepcopy(prev_rois)\n self.points = copy.deepcopy(prev_points)\n\n\n def redo(self):\n if len(self.undo_stack) == 0:\n return\n\n if self.undo_index < (len(self.undo_stack) - 1):\n self.undo_index += 1\n newer_rois, newer_points = self.undo_stack[self.undo_index]\n self.rois = copy.deepcopy(newer_rois)\n self.points = copy.deepcopy(newer_points)\n\n\n def get_pixel_coords(self, event, x, y, flags, param):\n if event == cv2.EVENT_LBUTTONDOWN:\n self.points.append([x, y])\n rospy.loginfo('Added point ' + str([x, y]))\n self.save_state_for_undo()\n\n\n # TODO TODO restructure so gui (save functions, un/redo, etc) can be shared\n # across ROI types\n def manual_polygons(self):\n \"\"\"\n Prompt the user to click the corners of each rectangle.\n \"\"\"\n rospy.loginfo('Click corners of the polygonal ROI. Press any key to ' +\n 'store the points added so far as an ROI. Press to close ' +\n 'manual selection and launch tracking pipelines.')\n loaded_rois = False\n saved_rois = False\n\n def load_cache():\n # TODO test this case\n self.undo_stack = []\n self.undo_index = 0\n\n self.points = []\n with open(self.roi_cache_name, 'rb') as f:\n self.rois = pickle.load(f)\n\n self.save_state_for_undo()\n\n def write_cache(rois):\n # TODO TODO check each element is also a list (of lists?)\n if len(rois) == 0:\n return\n\n with open(self.roi_cache_name, 'wb') as f:\n pickle.dump(rois, f)\n rospy.loginfo('Saving ROIs to {}'.format(self.roi_cache_name))\n\n if self.preload_cache:\n if os.path.isfile(self.roi_cache_name):\n rospy.logwarn(\"Loading ROIs from \" +\n \"{} because preload_cached_rois\".format(\n self.roi_cache_name))\n\n load_cache()\n loaded_rois = True\n\n if self.use_cached_without_displaying:\n rospy.logwarn('Starting without showing ROIs because ' +\n 'use_cached_without_displaying True.')\n return self.rois\n\n else:\n rospy.logwarn('Tried to load ROIs from ' +\n '{}, but file not there.'.format(self.roi_cache_name) + \n ' Press S/s to save current ROIs there.')\n\n\n while self.frame is None:\n if rospy.is_shutdown():\n sys.exit()\n\n rospy.sleep(0.2)\n\n while True:\n if rospy.is_shutdown():\n sys.exit()\n\n frame = np.copy(self.frame)\n\n if len(self.points) > 0:\n hull = cv2.convexHull(np.array(self.points))\n cv2.drawContours(frame, [hull], -1, (255, 0, 0))\n\n for p in self.points:\n cv2.circle(frame, tuple(p), 5, (0, 255, 0))\n\n for p in self.rois:\n hull = cv2.convexHull(np.array(p))\n # TODO convert to one drawContours call outside loop?\n cv2.drawContours(frame, [hull], -1, (0, 255, 0))\n\n cv2.imshow(self.window_name, frame)\n\n # waitKey delays for >= milliseconds equal to the argument\n key = cv2.waitKey(20)\n\n # bitwise and to get the last 8 bytes, so that key states are\n # considered the same whether or not things like num-lock are\n # pressed\n masked_key = key & 0xFF\n\n # 27 is the escape key\n # ctrl-s? z/y?\n if masked_key == 27:\n if len(self.rois) == 0:\n rospy.logerr('Need to select at least one polygon before' +\n ' ESC closes ROI selection window.')\n else:\n break\n\n # shift/alt/no-modifier 'c' (not ctrl) (99)\n elif masked_key == ord('c'):\n if len(self.rois) > 0 or len(self.points) > 0:\n self.rois = []\n self.points = []\n self.save_state_for_undo()\n\n rospy.logwarn(\n \"Clearing all ROIs and points because 'C/c' pressed.\")\n\n if loaded_rois and self.delete_cache_if_cleared:\n # TODO test\n os.remove(self.roi_cache_name)\n\n # shift/alt/no-modifier 'x' (not ctrl) (120)\n elif masked_key == ord('x') and len(self.points) > 0:\n self.points = []\n self.save_state_for_undo()\n rospy.logwarn(\"Clearing point buffer because 'X/x' pressed.\")\n\n # Delete cache if there is one.\n # shift/alt/no-modifier 'd' (not ctrl) (100)\n elif masked_key == ord('d'):\n if os.path.isfile(self.roi_cache_name):\n rospy.logwarn(\"Deleting {} because 'D/d' pressed.\".format(\n self.roi_cache_name))\n\n os.remove(self.roi_cache_name)\n\n # shift/alt/no-modifier 'l' (not ctrl) (108)\n # Not undoable. (would require saving state of loaded_rois too)\n elif masked_key == ord('l'):\n if os.path.isfile(self.roi_cache_name):\n # TODO deal w/ ROIs being in a different format, if i\n # implement support for other ROI formats\n rospy.logwarn(\"Loading ROIs from \" +\n \"{} because 'L/l' pressed\".format(self.roi_cache_name))\n\n load_cache()\n loaded_rois = True\n\n else:\n rospy.logerr('Tried to load ROIs from ' +\n '{}, but file not there.'.format(self.roi_cache_name) + \n \" Press 'S/s' to save current ROIs there.\")\n\n # TODO try to get ctrl-s somehow? (captured by imshow window now)\n elif masked_key == ord('s'):\n write_cache(self.rois)\n saved_rois = True\n\n # undo\n # TODO check shift state?\n # TODO arrow keys too?\n elif masked_key == ord('z') or masked_key == ord('u'):\n self.undo()\n\n elif masked_key == ord('y') or masked_key == ord('r'):\n self.redo()\n \n #if len(self.points) == 4:\n # TODO prompt to press any / specific key to move to next roi\n elif masked_key != 255:\n polygon = []\n # this won't get cleared will it?\n for p in self.points:\n polygon.append(p)\n # TODO draw?\n if len(polygon) < 3:\n rospy.logerr('key press with less than 3 points in ' +\n 'buffer. need at least 3 points for a polygon. ' +\n 'points still in buffer.')\n else:\n rospy.loginfo('Added polygon from current points. ' + \n 'Resetting current points.')\n\n self.rois.append(polygon)\n self.points = []\n self.save_state_for_undo()\n\n if self.autocache_rois and not saved_rois:\n write_cache(self.rois)\n\n return self.rois\n\n\n def manual_rectangles(self):\n \"\"\"\n Prompt the user to click the corners of each rectangle.\n (allow ctrl-z and ctrl-[(shift-z)/y]?)\n \"\"\"\n raise NotImplementedError\n return rectangles\n\n\n def manual_circles(self):\n raise NotImplementedError\n\n\n def manual_mask(self):\n raise NotImplementedError\n\n\n def get_edges(self, frame):\n gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n # TODO present these args are ROS params\n gray = cv2.GaussianBlur(gray, (5,5), 0)\n edges = cv2.Canny(gray, 100, 200)\n return edges\n\n\n def detect_rectangles(self, frame):\n raise NotImplementedError\n edges = self.get_edges(frame)\n # TODO\n rois = cv2.houghRectangles(edges)\n return rois\n\n\n def detect_circles(self, frame):\n raise NotImplementedError\n edges = self.get_edges(frame)\n #circles = cv2.houghCircles(frame, ...)\n #return circles\n\n\n def detect_masks(self, frame):\n raise NotImplementedError\n edges = self.get_edges(frame)\n # TODO how to fill exterior?\n # findContours?\n return mask\n\n\n def detect_masks(self, frame, expected_mask):\n raise NotImplementedError\n edges = self.get_edges(frame)\n # convert mask if not gray? gray from binary?\n # better way to get edges of binary image?\n mask_edges = cv2.Canny(expected_mask)\n rois = cv2.hough(frame, mask_edges)\n # TODO what i want to return here kinda depends on how i want to process\n # the ROIs later\n return rois\n\n\n def load_polygons(self, params):\n \"\"\"\n \"\"\"\n rospy.logwarn('load_polygons with params=' + str(params))\n rois = []\n for k, v in params.items():\n try:\n n = int(k)\n except:\n continue\n\n if 'roi_points' in v:\n rospy.logwarn('appending roi ' + str(v['roi_points']))\n rois.append(v['roi_points'])\n else:\n rospy.logwarn('numbered namespace without polygonal roi. ' + \n 'experiment done with different roi type?')\n return rois\n\n\n def launch_tracking_pipelines(self, rois):\n \"\"\"\n \"\"\"\n found_launch = False\n for attr in dir(self):\n if 'launch_a_tracking_pipeline_' in attr and self.roi_type in attr:\n f = getattr(self.__class__, attr)\n if callable(f):\n # TODO put behind debug flags\n #rospy.logwarn('ROIS = ' + str(rois))\n for r in rois:\n #rospy.logwarn('THIS ROI = ' + str(r))\n rospy.logwarn(\n 'starting one tracking pipeline launch file')\n\n f(self, r)\n # TODO remove me? longer?\n # TODO TODO only remove me when sim_time is set?\n #rospy.sleep(1)\n found_launch = True\n break\n if not found_launch:\n raise ValueError(\n 'no launch function found for roi_type \"' + self.roi_type + '\"')\n\n\n # can't see how to easily let launch_tracking_pipeline use this too, but\n # would be nice\n def find_and_call_function(self, prefix, description, frame=None,\n params=None):\n \"\"\"\n Finds a function in the instance of this class with prefix in it, and\n calls that function with frame as an (the only) argument following self.\n Description should describe the type of function being sought and will\n be included in an error message if no function is found.\n \"\"\"\n # TODO rename fn to indicate it is also deciding whether to toss frames?\n # or refactor?\n # TODO refactor. would be used by ROI detection methods (not that those\n # are currently used) but no longer used for manual ROI selection\n '''\n if not frame is None:\n if self.frames_tossed < self.toss_first_n_frames:\n self.frames_tossed += 1\n return\n\n try:\n frame = self.bridge.imgmsg_to_cv2(frame, 'bgr8')\n self.frame_to_save = frame\n\n except CvBridgeError as e:\n # raise?\n rospy.logerr(e)\n return None\n '''\n \n found_func = False\n for attr in dir(self):\n if prefix in attr and self.roi_type in attr:\n f = getattr(self.__class__, attr)\n if callable(f):\n if not frame is None:\n rois = f(self, frame)\n # TODO what was this for again?\n elif not params is None:\n rois = f(self, params)\n else:\n # TODO delete me\n #raise ValueError(\n # 'either params or frame needs to be specified')\n rois = f(self)\n\n found_func = True\n break\n\n if not found_func:\n raise ValueError('no ' + description +\n ' function found for roi_type \"' + self.roi_type + '\"')\n\n return rois\n\n\n def load_rois(self):\n \"\"\"\n \"\"\"\n import rosparam\n # TODO also check in current directory?\n #files = glob.glob('compressor_rois_*.yaml')\n files = glob.glob(os.path.join(rospy.get_param('source_directory'), \n 'compressor_rois_*.yaml'))\n\n if len(files) < 1:\n rospy.logfatal(\n 'Did not find any files matching compressor_rois_*.yaml')\n return []\n\n elif len(files) > 1:\n rospy.logfatal(\n 'Found too many files matching compressor_rois_*.yaml')\n return []\n\n filename = os.path.abspath(files[0])\n # get the parameters in the namespace of the name we want\n # TODO find roi specifiers wherever they are, in the future\n paramlist = rosparam.load_file(filename)\n ns = 'delta_compressor'\n ns_param_dict = self.find_roi_namespace(ns, paramlist)\n if ns_param_dict is None:\n rospy.logfatal('could not find parameter namespace: ' + ns)\n return\n\n rois = self.find_and_call_function('load_', 'parameter dump loading',\n params=ns_param_dict)\n rospy.logwarn('loaded rois:' + str(rois))\n self.launch_queue.put(rois)\n\n\n # maybe make static\n def find_roi_namespace(self, key, params):\n if type(params) is list:\n for ps, ns in params:\n if ns == key:\n return params\n else:\n ret = self.find_roi_namespace(key, ps)\n if not ret is None:\n return ret\n return None\n\n elif type(params) is dict:\n if key in params:\n return params[key]\n\n else:\n for v in params.values():\n ret = self.find_roi_namespace(key, v)\n if not ret is None:\n return ret\n return None\n\n\n def update_frame(self, frame):\n if not frame is None:\n if self.frames_tossed < self.toss_first_n_frames:\n self.frames_tossed += 1\n return\n\n try:\n self.frame = self.bridge.imgmsg_to_cv2(frame, 'bgr8')\n if self.frame_to_save is None:\n self.frame_to_save = self.frame\n\n except CvBridgeError as e:\n # raise?\n rospy.logerr(e)\n return\n\n\n # TODO TODO TODO Refactor so GUI is initialized unconditionally, and then\n # frames are added (w/ ROIs redrawn) in the callback.\n # May not be straightforward to maintain similarities w/ ROI detection\n # callbacks...\n def manual_roi_selection(self):\n \"\"\"\n Manually select ROIs of specified type and launch an instance of\n tracking pipeline appropriately.\n \"\"\"\n # TODO maybe move this next to self.undo_index init\n self.save_state_for_undo()\n\n self.window_name = 'Manual ROI selection'\n cv2.namedWindow(self.window_name)\n cv2.setMouseCallback(self.window_name, self.get_pixel_coords)\n\n rois = self.find_and_call_function('manual_', 'manual selection')\n self.manual_sub.unregister()\n\n if len(self.points) != 0:\n rospy.logwarn(\n 'had points in buffer when key ended manual selection.')\n self.launch_queue.put(rois)\n\n # TODO how to only destroy one window? those from this node?\n # (don't want to screw with liveviewer or image_view windows...)\n cv2.destroyAllWindows()\n\n\n # TODO what does Ctrax use to detect the ROIs?\n def detect_roi_callback(self, frame):\n \"\"\"\n Detect ROIs of specified type and launch an instance of tracking\n pipeline appropriately.\n \"\"\"\n rois = self.find_and_call_funcion('detect_', 'roi detection',\n frame=frame)\n self.launch_queue.put(rois)\n #self.launch_tracking_pipelines(rois)\n\n\n def register_rois(self, rois):\n rospy.wait_for_service('register_rois')\n try:\n register = rospy.ServiceProxy('register_rois', RegisterROIs)\n l = []\n if self.roi_type == 'rectangle':\n raise NotImplementedError\n for r in rois:\n rect = RectangularROI()\n '''\n rect.t = \n rect.b = \n rect.l = \n rect.r = \n '''\n l.append(rect)\n register(l, [], [])\n \n elif self.roi_type == 'circle':\n raise NotImplementedError\n register([], [], l)\n \n elif self.roi_type == 'polygon':\n for r in rois:\n poly = []\n for p in r:\n poly.append(Point2D(p[0], p[1]))\n l.append(PolygonalROI(poly))\n register([], l, [])\n \n elif self.roi_type == 'mask':\n raise NotImplementedError('mask not supported w/ register_rois')\n except rospy.ServiceException as exc:\n rospy.logfatal('service did not process request: ' + str(exc))\n\n\n def main(self):\n \"\"\"\n Checks for launch requests and executes them.\n \"\"\"\n rois = None\n experiment_basename = None\n while not rospy.is_shutdown():\n if not self.launch_queue.empty():\n rois = self.launch_queue.get()\n if not self.video_only:\n self.launch_tracking_pipelines(rois)\n # tries to send ROIs (to delta_video node)\n self.register_rois(rois)\n\n if self.launch_queue.empty() and rois is None:\n rospy.logerr(\n 'Manual selection closed without selecting any ROIs!')\n break\n\n # TODO i thought this node shut down, but it doesn't seem like it\n # does? is it busy spinning (fix if so)?\n if experiment_basename is None:\n experiment_basename = rospy.get_param(\n 'multi_tracker/experiment_basename', None)\n else:\n rospy.sleep(5.0)\n\n\n if not (self.frame_to_save is None):\n if not (experiment_basename is None):\n data_dir = os.path.join(os.getcwd(), experiment_basename)\n full_bg_filename = os.path.join(data_dir, 'full_background.png')\n cv2.imwrite(full_bg_filename, self.frame_to_save)\n\n else:\n rospy.logwarn('had frame_to_save, but did not have ' +\n 'experiment_basename, so did not know where to save it')\n\n elif not (rois is None):\n rospy.logwarn('did not have frame to save uncropped background ' + \n 'when shutdown')\n\n\n for p in self.to_kill:\n p.kill()\n\n\nif __name__ == '__main__':\n rf = RoiFinder()\n"},"license":{"kind":"string","value":"mit"}}},{"rowIdx":203489,"cells":{"repo_name":{"kind":"string","value":"pearsonlab/nipype"},"path":{"kind":"string","value":"nipype/interfaces/semtools/diffusion/tractography/fiberprocess.py"},"copies":{"kind":"string","value":"10"},"size":{"kind":"string","value":"5827"},"content":{"kind":"string","value":"# -*- coding: utf8 -*-\n\"\"\"Autogenerated file - DO NOT EDIT\nIf you spot a bug, please report it on the mailing list and/or change the generator.\"\"\"\n\nimport os\n\nfrom ....base import (CommandLine, CommandLineInputSpec, SEMLikeCommandLine,\n TraitedSpec, File, Directory, traits, isdefined,\n InputMultiPath, OutputMultiPath)\n\n\nclass fiberprocessInputSpec(CommandLineInputSpec):\n fiber_file = File(desc=\"DTI fiber file\", exists=True, argstr=\"--fiber_file %s\")\n fiber_output = traits.Either(traits.Bool, File(), hash_files=False, desc=\"Output fiber file. May be warped or updated with new data depending on other options used.\", argstr=\"--fiber_output %s\")\n tensor_volume = File(desc=\"Interpolate tensor values from the given field\", exists=True, argstr=\"--tensor_volume %s\")\n h_field = File(desc=\"HField for warp and statistics lookup. If this option is used tensor-volume must also be specified.\", exists=True, argstr=\"--h_field %s\")\n displacement_field = File(desc=\"Displacement Field for warp and statistics lookup. If this option is used tensor-volume must also be specified.\", exists=True, argstr=\"--displacement_field %s\")\n saveProperties = traits.Bool(desc=\"save the tensor property as scalar data into the vtk (only works for vtk fiber files). \", argstr=\"--saveProperties \")\n no_warp = traits.Bool(desc=\"Do not warp the geometry of the tensors only obtain the new statistics.\", argstr=\"--no_warp \")\n fiber_radius = traits.Float(desc=\"set radius of all fibers to this value\", argstr=\"--fiber_radius %f\")\n index_space = traits.Bool(desc=\"Use index-space for fiber output coordinates, otherwise us world space for fiber output coordinates (from tensor file).\", argstr=\"--index_space \")\n voxelize = traits.Either(traits.Bool, File(), hash_files=False,\n desc=\"Voxelize fiber into a label map (the labelmap filename is the argument of -V). The tensor file must be specified using -T for information about the size, origin, spacing of the image. The deformation is applied before the voxelization \", argstr=\"--voxelize %s\")\n voxelize_count_fibers = traits.Bool(desc=\"Count number of fibers per-voxel instead of just setting to 1\", argstr=\"--voxelize_count_fibers \")\n voxel_label = traits.Int(desc=\"Label for voxelized fiber\", argstr=\"--voxel_label %d\")\n verbose = traits.Bool(desc=\"produce verbose output\", argstr=\"--verbose \")\n noDataChange = traits.Bool(desc=\"Do not change data ??? \", argstr=\"--noDataChange \")\n\n\nclass fiberprocessOutputSpec(TraitedSpec):\n fiber_output = File(desc=\"Output fiber file. May be warped or updated with new data depending on other options used.\", exists=True)\n voxelize = File(desc=\"Voxelize fiber into a label map (the labelmap filename is the argument of -V). The tensor file must be specified using -T for information about the size, origin, spacing of the image. The deformation is applied before the voxelization \", exists=True)\n\n\nclass fiberprocess(SEMLikeCommandLine):\n\n \"\"\"title: FiberProcess (DTIProcess)\n\ncategory: Diffusion.Tractography\n\ndescription: fiberprocess is a tool that manage fiber files extracted from the fibertrack tool or any fiber tracking algorithm. It takes as an input .fib and .vtk files (--fiber_file) and saves the changed fibers (--fiber_output) into the 2 same formats. The main purpose of this tool is to deform the fiber file with a transformation field as an input (--displacement_field or --h_field depending if you deal with dfield or hfield). To use that option you need to specify the tensor field from which the fiber file was extracted with the option --tensor_volume. The transformation applied on the fiber file is the inverse of the one input. If the transformation is from one case to an atlas, fiberprocess assumes that the fiber file is in the atlas space and you want it in the original case space, so it's the inverse of the transformation which has been computed.\nYou have 2 options for fiber modification. You can either deform the fibers (their geometry) into the space OR you can keep the same geometry but map the diffusion properties (fa, md, lbd's...) of the original tensor field along the fibers at the corresponding locations. This is triggered by the --no_warp option. To use the previous example: when you have a tensor field in the original space and the deformed tensor field in the atlas space, you want to track the fibers in the atlas space, keeping this geometry but with the original case diffusion properties. Then you can specify the transformations field (from original case -> atlas) and the original tensor field with the --tensor_volume option.\nWith fiberprocess you can also binarize a fiber file. Using the --voxelize option will create an image where each voxel through which a fiber is passing is set to 1. The output is going to be a binary image with the values 0 or 1 by default but the 1 value voxel can be set to any number with the --voxel_label option. Finally you can create an image where the value at the voxel is the number of fiber passing through. (--voxelize_count_fibers)\n\nversion: 1.0.0\n\ndocumentation-url: http://www.slicer.org/slicerWiki/index.php/Documentation/Nightly/Extensions/DTIProcess\n\nlicense: Copyright (c) Casey Goodlett. All rights reserved.\n See http://www.ia.unc.edu/dev/Copyright.htm for details.\n This software is distributed WITHOUT ANY WARRANTY; without even\n the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR\n PURPOSE. See the above copyright notices for more information.\n\ncontributor: Casey Goodlett\n\n\"\"\"\n\n input_spec = fiberprocessInputSpec\n output_spec = fiberprocessOutputSpec\n _cmd = \" fiberprocess \"\n _outputs_filenames = {'fiber_output': 'fiber_output.vtk', 'voxelize': 'voxelize.nii'}\n _redirect_x = False\n"},"license":{"kind":"string","value":"bsd-3-clause"}}},{"rowIdx":203490,"cells":{"repo_name":{"kind":"string","value":"p0linka/AA_hmw"},"path":{"kind":"string","value":"hmw_3/fox.py"},"copies":{"kind":"string","value":"1"},"size":{"kind":"string","value":"1482"},"content":{"kind":"string","value":"#!/usr/bin/env python \n# -*- coding: utf-8 -*-\n#Polina Morozova 16.11.2014\n\nimport sqlite3\nimport sys\nimport re\nimport datetime\n\ndef unescape(line):\n\tline = line.replace(\""\", \"\\\"\")\n\tline = line.replace(\"'\", \"'\")\n\tline = line.replace(\"&\", \"&\")\n\tline = line.replace(\"<\", \"<\")\n\tline = line.replace(\">\", \">\")\n\tline = line.replace(\"«\", \"<<\")\n\tline = line.replace(\"»\", \">>\")\n\tline = line.replace(\"'\", \"'\")\n\tline = line.replace(\"“\", \"\\\"\")\n\tline = line.replace(\"”\", \"\\\"\")\n\tline = line.replace(\"‘\", \"\\'\")\n\tline = line.replace(\"’\", \"\\'\")\n\tline = line.replace(\"■\", \"\")\n\tline = line.replace(\"•\", \"-\")\n\treturn line\n\ndef query_messages(autor, d_low, d_high):\n\tconn = sqlite3.connect('main.db')\n\ttry:\n\t\tc = conn.cursor()\n\t\tr = c.execute('SELECT body_xml FROM Messages WHERE author = ? and timestamp >= ? and timestamp < ? order by timestamp asc', (autor, d_low, d_high))\n\t\tresult=[]\n\t\tfor row in r: \n\t\t\ttext = re.sub('<[^<]+>', \"\", str(row[0]))\n\t\t\ttext = unescape(text)\n\t\t\tresult.append(text)\n\t\treturn result\n\tfinally:\n\t\tconn.close()\n\ndef main(argv):\n\tif len(argv) < 2:\n\t\tprint (\"python fox.py date author\")\n\t\treturn\n\tdate_input=argv[0] # 2014-11-30\n\tautor = argv [1]\n\td = datetime.datetime.strptime( date_input, \"%Y-%m-%d\" )\n\td_low = int(d.timestamp())\n\td_high = d_low + 24*60*60*1000\n\tresult = query_messages(autor, d_low, d_high)\n\tfor message in result: \n\t\tprint (message)\n\n\nif __name__ == '__main__':\n\tmain(sys.argv[1:])"},"license":{"kind":"string","value":"mit"}}},{"rowIdx":203491,"cells":{"repo_name":{"kind":"string","value":"puttarajubr/commcare-hq"},"path":{"kind":"string","value":"corehq/apps/receiverwrapper/repeater_generators.py"},"copies":{"kind":"string","value":"1"},"size":{"kind":"string","value":"1916"},"content":{"kind":"string","value":"import json\nfrom casexml.apps.case.xform import cases_referenced_by_xform\n\nfrom corehq.apps.receiverwrapper.models import FormRepeater, CaseRepeater, ShortFormRepeater, \\\n AppStructureRepeater, RegisterGenerator\n\nfrom casexml.apps.case.xml import V2\n\nfrom dimagi.utils.parsing import json_format_datetime\n\n\nclass BasePayloadGenerator(object):\n\n def __init__(self, repeater):\n self.repeater = repeater\n\n @staticmethod\n def enabled_for_domain(domain):\n return True\n\n def get_payload(self, repeat_record, payload_doc):\n raise NotImplementedError()\n\n def get_headers(self, repeat_record, payload_doc):\n return {}\n\n\n@RegisterGenerator(FormRepeater, 'form_xml', 'XML', is_default=True)\nclass FormRepeaterXMLPayloadGenerator(BasePayloadGenerator):\n def get_payload(self, repeat_record, payload_doc):\n return payload_doc.get_xml()\n\n\n@RegisterGenerator(CaseRepeater, 'case_xml', 'XML', is_default=True)\nclass CaseRepeaterXMLPayloadGenerator(BasePayloadGenerator):\n def get_payload(self, repeat_record, payload_doc):\n return payload_doc.to_xml(self.repeater.version or V2, include_case_on_closed=True)\n\n\n@RegisterGenerator(AppStructureRepeater, \"app_structure_xml\", \"XML\", is_default=True)\nclass AppStructureGenerator(BasePayloadGenerator):\n def get_payload(self, repeat_record, payload_doc):\n # This is the id of the application, currently all we forward\n return repeat_record.payload_id\n\n\n@RegisterGenerator(ShortFormRepeater, \"short_form_json\", \"Default JSON\", is_default=True)\nclass ShortFormRepeaterXMLPayloadGenerator(BasePayloadGenerator):\n def get_payload(self, repeat_record, form):\n cases = cases_referenced_by_xform(form)\n return json.dumps({'form_id': form._id,\n 'received_on': json_format_datetime(form.received_on),\n 'case_ids': [case._id for case in cases]})\n"},"license":{"kind":"string","value":"bsd-3-clause"}}},{"rowIdx":203492,"cells":{"repo_name":{"kind":"string","value":"OS2World/APP-INTERNET-torpak_2"},"path":{"kind":"string","value":"Lib/imputil.py"},"copies":{"kind":"string","value":"3"},"size":{"kind":"string","value":"25394"},"content":{"kind":"string","value":"\"\"\"\nImport utilities\n\nExported classes:\n ImportManager Manage the import process\n\n Importer Base class for replacing standard import functions\n BuiltinImporter Emulate the import mechanism for builtin and frozen modules\n\n DynLoadSuffixImporter\n\"\"\"\n\n# note: avoid importing non-builtin modules\nimport imp ### not available in JPython?\nimport sys\nimport __builtin__\n\n# for the DirectoryImporter\nimport struct\nimport marshal\n\n__all__ = [\"ImportManager\",\"Importer\",\"BuiltinImporter\"]\n\n_StringType = type('')\n_ModuleType = type(sys) ### doesn't work in JPython...\n\nclass ImportManager:\n \"Manage the import process.\"\n\n def install(self, namespace=vars(__builtin__)):\n \"Install this ImportManager into the specified namespace.\"\n\n if isinstance(namespace, _ModuleType):\n namespace = vars(namespace)\n\n # Note: we have no notion of \"chaining\"\n\n # Record the previous import hook, then install our own.\n self.previous_importer = namespace['__import__']\n self.namespace = namespace\n namespace['__import__'] = self._import_hook\n\n ### fix this\n #namespace['reload'] = self._reload_hook\n\n def uninstall(self):\n \"Restore the previous import mechanism.\"\n self.namespace['__import__'] = self.previous_importer\n\n def add_suffix(self, suffix, importFunc):\n assert callable(importFunc)\n self.fs_imp.add_suffix(suffix, importFunc)\n\n ######################################################################\n #\n # PRIVATE METHODS\n #\n\n clsFilesystemImporter = None\n\n def __init__(self, fs_imp=None):\n # we're definitely going to be importing something in the future,\n # so let's just load the OS-related facilities.\n if not _os_stat:\n _os_bootstrap()\n\n # This is the Importer that we use for grabbing stuff from the\n # filesystem. It defines one more method (import_from_dir) for our use.\n if fs_imp is None:\n cls = self.clsFilesystemImporter or _FilesystemImporter\n fs_imp = cls()\n self.fs_imp = fs_imp\n\n # Initialize the set of suffixes that we recognize and import.\n # The default will import dynamic-load modules first, followed by\n # .py files (or a .py file's cached bytecode)\n for desc in imp.get_suffixes():\n if desc[2] == imp.C_EXTENSION:\n self.add_suffix(desc[0],\n DynLoadSuffixImporter(desc).import_file)\n self.add_suffix('.py', py_suffix_importer)\n\n def _import_hook(self, fqname, globals=None, locals=None, fromlist=None):\n \"\"\"Python calls this hook to locate and import a module.\"\"\"\n\n parts = fqname.split('.')\n\n # determine the context of this import\n parent = self._determine_import_context(globals)\n\n # if there is a parent, then its importer should manage this import\n if parent:\n module = parent.__importer__._do_import(parent, parts, fromlist)\n if module:\n return module\n\n # has the top module already been imported?\n try:\n top_module = sys.modules[parts[0]]\n except KeyError:\n\n # look for the topmost module\n top_module = self._import_top_module(parts[0])\n if not top_module:\n # the topmost module wasn't found at all.\n raise ImportError, 'No module named ' + fqname\n\n # fast-path simple imports\n if len(parts) == 1:\n if not fromlist:\n return top_module\n\n if not top_module.__dict__.get('__ispkg__'):\n # __ispkg__ isn't defined (the module was not imported by us),\n # or it is zero.\n #\n # In the former case, there is no way that we could import\n # sub-modules that occur in the fromlist (but we can't raise an\n # error because it may just be names) because we don't know how\n # to deal with packages that were imported by other systems.\n #\n # In the latter case (__ispkg__ == 0), there can't be any sub-\n # modules present, so we can just return.\n #\n # In both cases, since len(parts) == 1, the top_module is also\n # the \"bottom\" which is the defined return when a fromlist\n # exists.\n return top_module\n\n importer = top_module.__dict__.get('__importer__')\n if importer:\n return importer._finish_import(top_module, parts[1:], fromlist)\n\n # Grrr, some people \"import os.path\"\n if len(parts) == 2 and hasattr(top_module, parts[1]):\n return top_module\n\n # If the importer does not exist, then we have to bail. A missing\n # importer means that something else imported the module, and we have\n # no knowledge of how to get sub-modules out of the thing.\n raise ImportError, 'No module named ' + fqname\n\n def _determine_import_context(self, globals):\n \"\"\"Returns the context in which a module should be imported.\n\n The context could be a loaded (package) module and the imported module\n will be looked for within that package. The context could also be None,\n meaning there is no context -- the module should be looked for as a\n \"top-level\" module.\n \"\"\"\n\n if not globals or not globals.get('__importer__'):\n # globals does not refer to one of our modules or packages. That\n # implies there is no relative import context (as far as we are\n # concerned), and it should just pick it off the standard path.\n return None\n\n # The globals refer to a module or package of ours. It will define\n # the context of the new import. Get the module/package fqname.\n parent_fqname = globals['__name__']\n\n # if a package is performing the import, then return itself (imports\n # refer to pkg contents)\n if globals['__ispkg__']:\n parent = sys.modules[parent_fqname]\n assert globals is parent.__dict__\n return parent\n\n i = parent_fqname.rfind('.')\n\n # a module outside of a package has no particular import context\n if i == -1:\n return None\n\n # if a module in a package is performing the import, then return the\n # package (imports refer to siblings)\n parent_fqname = parent_fqname[:i]\n parent = sys.modules[parent_fqname]\n assert parent.__name__ == parent_fqname\n return parent\n\n def _import_top_module(self, name):\n # scan sys.path looking for a location in the filesystem that contains\n # the module, or an Importer object that can import the module.\n for item in sys.path:\n if isinstance(item, _StringType):\n module = self.fs_imp.import_from_dir(item, name)\n else:\n module = item.import_top(name)\n if module:\n return module\n return None\n\n def _reload_hook(self, module):\n \"Python calls this hook to reload a module.\"\n\n # reloading of a module may or may not be possible (depending on the\n # importer), but at least we can validate that it's ours to reload\n importer = module.__dict__.get('__importer__')\n if not importer:\n ### oops. now what...\n pass\n\n # okay. it is using the imputil system, and we must delegate it, but\n # we don't know what to do (yet)\n ### we should blast the module dict and do another get_code(). need to\n ### flesh this out and add proper docco...\n raise SystemError, \"reload not yet implemented\"\n\n\nclass Importer:\n \"Base class for replacing standard import functions.\"\n\n def import_top(self, name):\n \"Import a top-level module.\"\n return self._import_one(None, name, name)\n\n ######################################################################\n #\n # PRIVATE METHODS\n #\n def _finish_import(self, top, parts, fromlist):\n # if \"a.b.c\" was provided, then load the \".b.c\" portion down from\n # below the top-level module.\n bottom = self._load_tail(top, parts)\n\n # if the form is \"import a.b.c\", then return \"a\"\n if not fromlist:\n # no fromlist: return the top of the import tree\n return top\n\n # the top module was imported by self.\n #\n # this means that the bottom module was also imported by self (just\n # now, or in the past and we fetched it from sys.modules).\n #\n # since we imported/handled the bottom module, this means that we can\n # also handle its fromlist (and reliably use __ispkg__).\n\n # if the bottom node is a package, then (potentially) import some\n # modules.\n #\n # note: if it is not a package, then \"fromlist\" refers to names in\n # the bottom module rather than modules.\n # note: for a mix of names and modules in the fromlist, we will\n # import all modules and insert those into the namespace of\n # the package module. Python will pick up all fromlist names\n # from the bottom (package) module; some will be modules that\n # we imported and stored in the namespace, others are expected\n # to be present already.\n if bottom.__ispkg__:\n self._import_fromlist(bottom, fromlist)\n\n # if the form is \"from a.b import c, d\" then return \"b\"\n return bottom\n\n def _import_one(self, parent, modname, fqname):\n \"Import a single module.\"\n\n # has the module already been imported?\n try:\n return sys.modules[fqname]\n except KeyError:\n pass\n\n # load the module's code, or fetch the module itself\n result = self.get_code(parent, modname, fqname)\n if result is None:\n return None\n\n module = self._process_result(result, fqname)\n\n # insert the module into its parent\n if parent:\n setattr(parent, modname, module)\n return module\n\n def _process_result(self, (ispkg, code, values), fqname):\n # did get_code() return an actual module? (rather than a code object)\n is_module = isinstance(code, _ModuleType)\n\n # use the returned module, or create a new one to exec code into\n if is_module:\n module = code\n else:\n module = imp.new_module(fqname)\n\n ### record packages a bit differently??\n module.__importer__ = self\n module.__ispkg__ = ispkg\n\n # insert additional values into the module (before executing the code)\n module.__dict__.update(values)\n\n # the module is almost ready... make it visible\n sys.modules[fqname] = module\n\n # execute the code within the module's namespace\n if not is_module:\n exec code in module.__dict__\n\n # fetch from sys.modules instead of returning module directly.\n # also make module's __name__ agree with fqname, in case\n # the \"exec code in module.__dict__\" played games on us.\n module = sys.modules[fqname]\n module.__name__ = fqname\n return module\n\n def _load_tail(self, m, parts):\n \"\"\"Import the rest of the modules, down from the top-level module.\n\n Returns the last module in the dotted list of modules.\n \"\"\"\n for part in parts:\n fqname = \"%s.%s\" % (m.__name__, part)\n m = self._import_one(m, part, fqname)\n if not m:\n raise ImportError, \"No module named \" + fqname\n return m\n\n def _import_fromlist(self, package, fromlist):\n 'Import any sub-modules in the \"from\" list.'\n\n # if '*' is present in the fromlist, then look for the '__all__'\n # variable to find additional items (modules) to import.\n if '*' in fromlist:\n fromlist = list(fromlist) + \\\n list(package.__dict__.get('__all__', []))\n\n for sub in fromlist:\n # if the name is already present, then don't try to import it (it\n # might not be a module!).\n if sub != '*' and not hasattr(package, sub):\n subname = \"%s.%s\" % (package.__name__, sub)\n submod = self._import_one(package, sub, subname)\n if not submod:\n raise ImportError, \"cannot import name \" + subname\n\n def _do_import(self, parent, parts, fromlist):\n \"\"\"Attempt to import the module relative to parent.\n\n This method is used when the import context specifies that \n imported the parent module.\n \"\"\"\n top_name = parts[0]\n top_fqname = parent.__name__ + '.' + top_name\n top_module = self._import_one(parent, top_name, top_fqname)\n if not top_module:\n # this importer and parent could not find the module (relatively)\n return None\n\n return self._finish_import(top_module, parts[1:], fromlist)\n\n ######################################################################\n #\n # METHODS TO OVERRIDE\n #\n def get_code(self, parent, modname, fqname):\n \"\"\"Find and retrieve the code for the given module.\n\n parent specifies a parent module to define a context for importing. It\n may be None, indicating no particular context for the search.\n\n modname specifies a single module (not dotted) within the parent.\n\n fqname specifies the fully-qualified module name. This is a\n (potentially) dotted name from the \"root\" of the module namespace\n down to the modname.\n If there is no parent, then modname==fqname.\n\n This method should return None, or a 3-tuple.\n\n * If the module was not found, then None should be returned.\n\n * The first item of the 2- or 3-tuple should be the integer 0 or 1,\n specifying whether the module that was found is a package or not.\n\n * The second item is the code object for the module (it will be\n executed within the new module's namespace). This item can also\n be a fully-loaded module object (e.g. loaded from a shared lib).\n\n * The third item is a dictionary of name/value pairs that will be\n inserted into new module before the code object is executed. This\n is provided in case the module's code expects certain values (such\n as where the module was found). When the second item is a module\n object, then these names/values will be inserted *after* the module\n has been loaded/initialized.\n \"\"\"\n raise RuntimeError, \"get_code not implemented\"\n\n\n######################################################################\n#\n# Some handy stuff for the Importers\n#\n\n# byte-compiled file suffix character\n_suffix_char = __debug__ and 'c' or 'o'\n\n# byte-compiled file suffix\n_suffix = '.py' + _suffix_char\n\ndef _compile(pathname, timestamp):\n \"\"\"Compile (and cache) a Python source file.\n\n The file specified by is compiled to a code object and\n returned.\n\n Presuming the appropriate privileges exist, the bytecodes will be\n saved back to the filesystem for future imports. The source file's\n modification timestamp must be provided as a Long value.\n \"\"\"\n codestring = open(pathname, 'rU').read()\n if codestring and codestring[-1] != '\\n':\n codestring = codestring + '\\n'\n code = __builtin__.compile(codestring, pathname, 'exec')\n\n # try to cache the compiled code\n try:\n f = open(pathname + _suffix_char, 'wb')\n except IOError:\n pass\n else:\n f.write('\\0\\0\\0\\0')\n f.write(struct.pack('= t_py:\n f = open(file, 'rb')\n if f.read(4) == imp.get_magic():\n t = struct.unpack('>> import foo\n# >>> foo\n# \n#\n# ---- revamped import mechanism\n# >>> import imputil\n# >>> imputil._test_revamp()\n# >>> import foo\n# >>> foo\n# \n#\n#\n# from MAL:\n# should BuiltinImporter exist in sys.path or hard-wired in ImportManager?\n# need __path__ processing\n# performance\n# move chaining to a subclass [gjs: it's been nuked]\n# deinstall should be possible\n# query mechanism needed: is a specific Importer installed?\n# py/pyc/pyo piping hooks to filter/process these files\n# wish list:\n# distutils importer hooked to list of standard Internet repositories\n# module->file location mapper to speed FS-based imports\n# relative imports\n# keep chaining so that it can play nice with other import hooks\n#\n# from Gordon:\n# push MAL's mapper into sys.path[0] as a cache (hard-coded for apps)\n#\n# from Guido:\n# need to change sys.* references for rexec environs\n# need hook for MAL's walk-me-up import strategy, or Tim's absolute strategy\n# watch out for sys.modules[...] is None\n# flag to force absolute imports? (speeds _determine_import_context and\n# checking for a relative module)\n# insert names of archives into sys.path (see quote below)\n# note: reload does NOT blast module dict\n# shift import mechanisms and policies around; provide for hooks, overrides\n# (see quote below)\n# add get_source stuff\n# get_topcode and get_subcode\n# CRLF handling in _compile\n# race condition in _compile\n# refactoring of os.py to deal with _os_bootstrap problem\n# any special handling to do for importing a module with a SyntaxError?\n# (e.g. clean up the traceback)\n# implement \"domain\" for path-type functionality using pkg namespace\n# (rather than FS-names like __path__)\n# don't use the word \"private\"... maybe \"internal\"\n#\n#\n# Guido's comments on sys.path caching:\n#\n# We could cache this in a dictionary: the ImportManager can have a\n# cache dict mapping pathnames to importer objects, and a separate\n# method for coming up with an importer given a pathname that's not yet\n# in the cache. The method should do a stat and/or look at the\n# extension to decide which importer class to use; you can register new\n# importer classes by registering a suffix or a Boolean function, plus a\n# class. If you register a new importer class, the cache is zapped.\n# The cache is independent from sys.path (but maintained per\n# ImportManager instance) so that rearrangements of sys.path do the\n# right thing. If a path is dropped from sys.path the corresponding\n# cache entry is simply no longer used.\n#\n# My/Guido's comments on factoring ImportManager and Importer:\n#\n# > However, we still have a tension occurring here:\n# >\n# > 1) implementing policy in ImportManager assists in single-point policy\n# > changes for app/rexec situations\n# > 2) implementing policy in Importer assists in package-private policy\n# > changes for normal, operating conditions\n# >\n# > I'll see if I can sort out a way to do this. Maybe the Importer class will\n# > implement the methods (which can be overridden to change policy) by\n# > delegating to ImportManager.\n#\n# Maybe also think about what kind of policies an Importer would be\n# likely to want to change. I have a feeling that a lot of the code\n# there is actually not so much policy but a *necessity* to get things\n# working given the calling conventions for the __import__ hook: whether\n# to return the head or tail of a dotted name, or when to do the \"finish\n# fromlist\" stuff.\n#\n"},"license":{"kind":"string","value":"mit"}}},{"rowIdx":203493,"cells":{"repo_name":{"kind":"string","value":"acshi/osf.io"},"path":{"kind":"string","value":"addons/osfstorage/routes.py"},"copies":{"kind":"string","value":"18"},"size":{"kind":"string","value":"3385"},"content":{"kind":"string","value":"# encoding: utf-8\n\nfrom framework.routing import Rule, json_renderer\n\nfrom addons.osfstorage import views\n\n\napi_routes = {\n\n 'prefix': '/api/v1',\n\n 'rules': [\n\n Rule(\n [\n '/project//osfstorage/',\n '/project//node//osfstorage/',\n '/project//osfstorage//',\n '/project//node//osfstorage//',\n ],\n 'get',\n views.osfstorage_get_metadata,\n json_renderer,\n ),\n\n Rule(\n [\n '/project//osfstorage//',\n '/project//node//osfstorage//',\n ],\n 'delete',\n views.osfstorage_delete,\n json_renderer,\n ),\n\n Rule(\n [\n '/project//osfstorage//download/',\n '/project//node//osfstorage//download/',\n ],\n 'get',\n views.osfstorage_download,\n json_renderer,\n ),\n\n Rule(\n [\n '/project//osfstorage//revisions/',\n '/project//node//osfstorage//revisions/',\n ],\n 'get',\n views.osfstorage_get_revisions,\n json_renderer,\n ),\n\n Rule(\n [\n '/project//osfstorage//lineage/',\n '/project//node//osfstorage//lineage/',\n ],\n 'get',\n views.osfstorage_get_lineage,\n json_renderer,\n ),\n\n Rule(\n [\n '/project//osfstorage//children/',\n '/project//node//osfstorage//children/',\n ],\n 'post',\n views.osfstorage_create_child,\n json_renderer,\n ),\n\n Rule(\n [\n '/project//osfstorage//children/',\n '/project//node//osfstorage//children/',\n ],\n 'get',\n views.osfstorage_get_children,\n json_renderer,\n ),\n\n Rule(\n [\n '/project//osfstorage/hooks/metadata/',\n '/project//node//osfstorage/hooks/metadata/',\n ],\n 'put',\n views.osfstorage_update_metadata,\n json_renderer,\n ),\n\n Rule(\n [\n '/project//osfstorage/hooks/move/',\n '/project//node//osfstorage/hooks/move',\n ],\n 'post',\n views.osfstorage_move_hook,\n json_renderer,\n ),\n\n Rule(\n [\n '/project//osfstorage/hooks/copy/',\n '/project//node//osfstorage/hooks/copy/',\n ],\n 'post',\n views.osfstorage_copy_hook,\n json_renderer,\n ),\n\n Rule(\n [\n '/project//osfstorage//tags/',\n ],\n 'post',\n views.osfstorage_add_tag,\n json_renderer\n ),\n\n Rule(\n [\n '/project//osfstorage//tags/',\n ],\n 'delete',\n views.osfstorage_remove_tag,\n json_renderer\n ),\n ],\n\n}\n"},"license":{"kind":"string","value":"apache-2.0"}}},{"rowIdx":203494,"cells":{"repo_name":{"kind":"string","value":"suneeshtr/persona"},"path":{"kind":"string","value":"node_modules/l/node_modules/hook.io/node_modules/npm/node_modules/node-gyp/gyp/test/win/gyptest-cl-warning-level.py"},"copies":{"kind":"string","value":"344"},"size":{"kind":"string","value":"1394"},"content":{"kind":"string","value":"#!/usr/bin/env python\n\n# Copyright (c) 2012 Google Inc. All rights reserved.\n# Use of this source code is governed by a BSD-style license that can be\n# found in the LICENSE file.\n\n\"\"\"\nMake sure warning level is extracted properly.\n\"\"\"\n\nimport TestGyp\n\nimport sys\n\nif sys.platform == 'win32':\n test = TestGyp.TestGyp(formats=['msvs', 'ninja'])\n\n CHDIR = 'compiler-flags'\n test.run_gyp('warning-level.gyp', chdir=CHDIR)\n\n # A separate target for each warning level: one pass (compiling a file\n # containing a warning that's above the specified level); and one fail\n # (compiling a file at the specified level). No pass for 4 of course,\n # because it would have to have no warnings. The default warning level is\n # equivalent to level 1.\n\n test.build('warning-level.gyp', 'test_wl1_fail', chdir=CHDIR, status=1)\n test.build('warning-level.gyp', 'test_wl1_pass', chdir=CHDIR)\n\n test.build('warning-level.gyp', 'test_wl2_fail', chdir=CHDIR, status=1)\n test.build('warning-level.gyp', 'test_wl2_pass', chdir=CHDIR)\n\n test.build('warning-level.gyp', 'test_wl3_fail', chdir=CHDIR, status=1)\n test.build('warning-level.gyp', 'test_wl3_pass', chdir=CHDIR)\n\n test.build('warning-level.gyp', 'test_wl4_fail', chdir=CHDIR, status=1)\n\n test.build('warning-level.gyp', 'test_def_fail', chdir=CHDIR, status=1)\n test.build('warning-level.gyp', 'test_def_pass', chdir=CHDIR)\n\n test.pass_test()\n"},"license":{"kind":"string","value":"gpl-2.0"}}},{"rowIdx":203495,"cells":{"repo_name":{"kind":"string","value":"brain-research/data-linter"},"path":{"kind":"string","value":"example_pb2.py"},"copies":{"kind":"string","value":"1"},"size":{"kind":"string","value":"4651"},"content":{"kind":"string","value":"# Copyright 2017 Google Inc. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n#\n# Generated by the protocol buffer compiler. DO NOT EDIT!\n# source: example.proto\n\nfrom google.protobuf import descriptor as _descriptor\nfrom google.protobuf import message as _message\nfrom google.protobuf import reflection as _reflection\nfrom google.protobuf import symbol_database as _symbol_database\nfrom google.protobuf import descriptor_pb2\n# @@protoc_insertion_point(imports)\n\n_sym_db = _symbol_database.Default()\n\n\nimport feature_pb2 as feature__pb2\n\n\nDESCRIPTOR = _descriptor.FileDescriptor(\n name='example.proto',\n package='tensorflow',\n syntax='proto3',\n serialized_pb=b'\\n\\rexample.proto\\x12\\ntensorflow\\x1a\\rfeature.proto\\\"1\\n\\x07\\x45xample\\x12&\\n\\x08\\x66\\x65\\x61tures\\x18\\x01 \\x01(\\x0b\\x32\\x14.tensorflow.Features\\\"i\\n\\x0fSequenceExample\\x12%\\n\\x07\\x63ontext\\x18\\x01 \\x01(\\x0b\\x32\\x14.tensorflow.Features\\x12/\\n\\rfeature_lists\\x18\\x02 \\x01(\\x0b\\x32\\x18.tensorflow.FeatureListsB,\\n\\x16org.tensorflow.exampleB\\rExampleProtosP\\x01\\xf8\\x01\\x01\\x62\\x06proto3'\n ,\n dependencies=[feature__pb2.DESCRIPTOR,])\n_sym_db.RegisterFileDescriptor(DESCRIPTOR)\n\n\n\n\n_EXAMPLE = _descriptor.Descriptor(\n name='Example',\n full_name='tensorflow.Example',\n filename=None,\n file=DESCRIPTOR,\n containing_type=None,\n fields=[\n _descriptor.FieldDescriptor(\n name='features', full_name='tensorflow.Example.features', index=0,\n number=1, type=11, cpp_type=10, label=1,\n has_default_value=False, default_value=None,\n message_type=None, enum_type=None, containing_type=None,\n is_extension=False, extension_scope=None,\n options=None),\n ],\n extensions=[\n ],\n nested_types=[],\n enum_types=[\n ],\n options=None,\n is_extendable=False,\n syntax='proto3',\n extension_ranges=[],\n oneofs=[\n ],\n serialized_start=44,\n serialized_end=93,\n)\n\n\n_SEQUENCEEXAMPLE = _descriptor.Descriptor(\n name='SequenceExample',\n full_name='tensorflow.SequenceExample',\n filename=None,\n file=DESCRIPTOR,\n containing_type=None,\n fields=[\n _descriptor.FieldDescriptor(\n name='context', full_name='tensorflow.SequenceExample.context', index=0,\n number=1, type=11, cpp_type=10, label=1,\n has_default_value=False, default_value=None,\n message_type=None, enum_type=None, containing_type=None,\n is_extension=False, extension_scope=None,\n options=None),\n _descriptor.FieldDescriptor(\n name='feature_lists', full_name='tensorflow.SequenceExample.feature_lists', index=1,\n number=2, type=11, cpp_type=10, label=1,\n has_default_value=False, default_value=None,\n message_type=None, enum_type=None, containing_type=None,\n is_extension=False, extension_scope=None,\n options=None),\n ],\n extensions=[\n ],\n nested_types=[],\n enum_types=[\n ],\n options=None,\n is_extendable=False,\n syntax='proto3',\n extension_ranges=[],\n oneofs=[\n ],\n serialized_start=95,\n serialized_end=200,\n)\n\n_EXAMPLE.fields_by_name['features'].message_type = feature__pb2._FEATURES\n_SEQUENCEEXAMPLE.fields_by_name['context'].message_type = feature__pb2._FEATURES\n_SEQUENCEEXAMPLE.fields_by_name['feature_lists'].message_type = feature__pb2._FEATURELISTS\nDESCRIPTOR.message_types_by_name['Example'] = _EXAMPLE\nDESCRIPTOR.message_types_by_name['SequenceExample'] = _SEQUENCEEXAMPLE\n\nExample = _reflection.GeneratedProtocolMessageType('Example', (_message.Message,), dict(\n DESCRIPTOR = _EXAMPLE,\n __module__ = 'example_pb2'\n # @@protoc_insertion_point(class_scope:tensorflow.Example)\n ))\n_sym_db.RegisterMessage(Example)\n\nSequenceExample = _reflection.GeneratedProtocolMessageType('SequenceExample', (_message.Message,), dict(\n DESCRIPTOR = _SEQUENCEEXAMPLE,\n __module__ = 'example_pb2'\n # @@protoc_insertion_point(class_scope:tensorflow.SequenceExample)\n ))\n_sym_db.RegisterMessage(SequenceExample)\n\n\nDESCRIPTOR.has_options = True\nDESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), b'\\n\\026org.tensorflow.exampleB\\rExampleProtosP\\001\\370\\001\\001')\n# @@protoc_insertion_point(module_scope)\n"},"license":{"kind":"string","value":"apache-2.0"}}},{"rowIdx":203496,"cells":{"repo_name":{"kind":"string","value":"lino-framework/lino"},"path":{"kind":"string","value":"lino/sandbox/bcss/SSDNReply.py"},"copies":{"kind":"string","value":"3"},"size":{"kind":"string","value":"113672"},"content":{"kind":"string","value":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n#\n# Generated Mon Oct 03 15:32:12 2011 by generateDS.py version 2.6a.\n#\n\nimport sys\nimport getopt\nimport re as re_\n\netree_ = None\nVerbose_import_ = False\n(XMLParser_import_none, XMLParser_import_lxml,\n XMLParser_import_elementtree\n ) = range(3)\nXMLParser_import_library = None\ntry:\n # lxml\n from lxml import etree as etree_\n XMLParser_import_library = XMLParser_import_lxml\n if Verbose_import_:\n print(\"running with lxml.etree\")\nexcept ImportError:\n try:\n # cElementTree from Python 2.5+\n import xml.etree.cElementTree as etree_\n XMLParser_import_library = XMLParser_import_elementtree\n if Verbose_import_:\n print(\"running with cElementTree on Python 2.5+\")\n except ImportError:\n try:\n # ElementTree from Python 2.5+\n import xml.etree.ElementTree as etree_\n XMLParser_import_library = XMLParser_import_elementtree\n if Verbose_import_:\n print(\"running with ElementTree on Python 2.5+\")\n except ImportError:\n try:\n # normal cElementTree install\n import cElementTree as etree_\n XMLParser_import_library = XMLParser_import_elementtree\n if Verbose_import_:\n print(\"running with cElementTree\")\n except ImportError:\n try:\n # normal ElementTree install\n import elementtree.ElementTree as etree_\n XMLParser_import_library = XMLParser_import_elementtree\n if Verbose_import_:\n print(\"running with ElementTree\")\n except ImportError:\n raise ImportError(\n \"Failed to import ElementTree from any known place\")\n\n\ndef parsexml_(*args, **kwargs):\n if (XMLParser_import_library == XMLParser_import_lxml and\n 'parser' not in kwargs):\n # Use the lxml ElementTree compatible parser so that, e.g.,\n # we ignore comments.\n kwargs['parser'] = etree_.ETCompatXMLParser()\n doc = etree_.parse(*args, **kwargs)\n return doc\n\n#\n# User methods\n#\n# Calls to the methods in these classes are generated by generateDS.py.\n# You can replace these methods by re-implementing the following class\n# in a module named generatedssuper.py.\n\ntry:\n from generatedssuper import GeneratedsSuper\nexcept ImportError, exp:\n\n class GeneratedsSuper(object):\n\n def gds_format_string(self, input_data, input_name=''):\n return input_data\n\n def gds_validate_string(self, input_data, node, input_name=''):\n return input_data\n\n def gds_format_integer(self, input_data, input_name=''):\n return '%d' % input_data\n\n def gds_validate_integer(self, input_data, node, input_name=''):\n return input_data\n\n def gds_format_integer_list(self, input_data, input_name=''):\n return '%s' % input_data\n\n def gds_validate_integer_list(self, input_data, node, input_name=''):\n values = input_data.split()\n for value in values:\n try:\n fvalue = float(value)\n except (TypeError, ValueError), exp:\n raise_parse_error(node, 'Requires sequence of integers')\n return input_data\n\n def gds_format_float(self, input_data, input_name=''):\n return '%f' % input_data\n\n def gds_validate_float(self, input_data, node, input_name=''):\n return input_data\n\n def gds_format_float_list(self, input_data, input_name=''):\n return '%s' % input_data\n\n def gds_validate_float_list(self, input_data, node, input_name=''):\n values = input_data.split()\n for value in values:\n try:\n fvalue = float(value)\n except (TypeError, ValueError), exp:\n raise_parse_error(node, 'Requires sequence of floats')\n return input_data\n\n def gds_format_double(self, input_data, input_name=''):\n return '%e' % input_data\n\n def gds_validate_double(self, input_data, node, input_name=''):\n return input_data\n\n def gds_format_double_list(self, input_data, input_name=''):\n return '%s' % input_data\n\n def gds_validate_double_list(self, input_data, node, input_name=''):\n values = input_data.split()\n for value in values:\n try:\n fvalue = float(value)\n except (TypeError, ValueError), exp:\n raise_parse_error(node, 'Requires sequence of doubles')\n return input_data\n\n def gds_format_boolean(self, input_data, input_name=''):\n return '%s' % input_data\n\n def gds_validate_boolean(self, input_data, node, input_name=''):\n return input_data\n\n def gds_format_boolean_list(self, input_data, input_name=''):\n return '%s' % input_data\n\n def gds_validate_boolean_list(self, input_data, node, input_name=''):\n values = input_data.split()\n for value in values:\n if value not in ('true', '1', 'false', '0', ):\n raise_parse_error(\n node, 'Requires sequence of booleans (\"true\", \"1\", \"false\", \"0\")')\n return input_data\n\n def gds_str_lower(self, instring):\n return instring.lower()\n\n def get_path_(self, node):\n path_list = []\n self.get_path_list_(node, path_list)\n path_list.reverse()\n path = '/'.join(path_list)\n return path\n Tag_strip_pattern_ = re_.compile(r'\\{.*\\}')\n\n def get_path_list_(self, node, path_list):\n if node is None:\n return\n tag = GeneratedsSuper.Tag_strip_pattern_.sub('', node.tag)\n if tag:\n path_list.append(tag)\n self.get_path_list_(node.getparent(), path_list)\n\n def get_class_obj_(self, node, default_class=None):\n class_obj1 = default_class\n if 'xsi' in node.nsmap:\n classname = node.get('{%s}type' % node.nsmap['xsi'])\n if classname is not None:\n names = classname.split(':')\n if len(names) == 2:\n classname = names[1]\n class_obj2 = globals().get(classname)\n if class_obj2 is not None:\n class_obj1 = class_obj2\n return class_obj1\n\n\n#\n# If you have installed IPython you can uncomment and use the following.\n# IPython is available from http://ipython.scipy.org/.\n#\n\n## from IPython.Shell import IPShellEmbed\n## args = ''\n# ipshell = IPShellEmbed(args,\n## banner = 'Dropping into IPython',\n# exit_msg = 'Leaving Interpreter, back to program.')\n\n# Then use the following line where and when you want to drop into the\n# IPython shell:\n# ipshell(' -- Entering ipshell.\\nHit Ctrl-D to exit')\n\n#\n# Globals\n#\n\nExternalEncoding = 'ascii'\nTag_pattern_ = re_.compile(r'({.*})?(.*)')\nString_cleanup_pat_ = re_.compile(r\"[\\n\\r\\s]+\")\nNamespace_extract_pat_ = re_.compile(r'{(.*)}(.*)')\n\n#\n# Support/utility functions.\n#\n\n\ndef showIndent(outfile, level):\n for idx in range(level):\n outfile.write(' ')\n\n\ndef quote_xml(inStr):\n if not inStr:\n return ''\n s1 = (isinstance(inStr, basestring) and inStr or\n '%s' % inStr)\n s1 = s1.replace('&', '&')\n s1 = s1.replace('<', '<')\n s1 = s1.replace('>', '>')\n return s1\n\n\ndef quote_attrib(inStr):\n s1 = (isinstance(inStr, basestring) and inStr or\n '%s' % inStr)\n s1 = s1.replace('&', '&')\n s1 = s1.replace('<', '<')\n s1 = s1.replace('>', '>')\n if '\"' in s1:\n if \"'\" in s1:\n s1 = '\"%s\"' % s1.replace('\"', \""\")\n else:\n s1 = \"'%s'\" % s1\n else:\n s1 = '\"%s\"' % s1\n return s1\n\n\ndef quote_python(inStr):\n s1 = inStr\n if s1.find(\"'\") == -1:\n if s1.find('\\n') == -1:\n return \"'%s'\" % s1\n else:\n return \"'''%s'''\" % s1\n else:\n if s1.find('\"') != -1:\n s1 = s1.replace('\"', '\\\\\"')\n if s1.find('\\n') == -1:\n return '\"%s\"' % s1\n else:\n return '\"\"\"%s\"\"\"' % s1\n\n\ndef get_all_text_(node):\n if node.text is not None:\n text = node.text\n else:\n text = ''\n for child in node:\n if child.tail is not None:\n text += child.tail\n return text\n\n\ndef find_attr_value_(attr_name, node):\n attrs = node.attrib\n attr_parts = attr_name.split(':')\n value = None\n if len(attr_parts) == 1:\n value = attrs.get(attr_name)\n elif len(attr_parts) == 2:\n prefix, name = attr_parts\n namespace = node.nsmap.get(prefix)\n if namespace is not None:\n value = attrs.get('{%s}%s' % (namespace, name, ))\n return value\n\n\nclass GDSParseError(Exception):\n pass\n\n\ndef raise_parse_error(node, msg):\n if XMLParser_import_library == XMLParser_import_lxml:\n msg = '%s (element %s/line %d)' % (msg, node.tag, node.sourceline, )\n else:\n msg = '%s (element %s)' % (msg, node.tag, )\n raise GDSParseError(msg)\n\n\nclass MixedContainer:\n # Constants for category:\n CategoryNone = 0\n CategoryText = 1\n CategorySimple = 2\n CategoryComplex = 3\n # Constants for content_type:\n TypeNone = 0\n TypeText = 1\n TypeString = 2\n TypeInteger = 3\n TypeFloat = 4\n TypeDecimal = 5\n TypeDouble = 6\n TypeBoolean = 7\n\n def __init__(self, category, content_type, name, value):\n self.category = category\n self.content_type = content_type\n self.name = name\n self.value = value\n\n def getCategory(self):\n return self.category\n\n def getContenttype(self, content_type):\n return self.content_type\n\n def getValue(self):\n return self.value\n\n def getName(self):\n return self.name\n\n def export(self, outfile, level, name, namespace):\n if self.category == MixedContainer.CategoryText:\n # Prevent exporting empty content as empty lines.\n if self.value.strip():\n outfile.write(self.value)\n elif self.category == MixedContainer.CategorySimple:\n self.exportSimple(outfile, level, name)\n else: # category == MixedContainer.CategoryComplex\n self.value.export(outfile, level, namespace, name)\n\n def exportSimple(self, outfile, level, name):\n if self.content_type == MixedContainer.TypeString:\n outfile.write('<%s>%s%s>' % (self.name, self.value, self.name))\n elif self.content_type == MixedContainer.TypeInteger or \\\n self.content_type == MixedContainer.TypeBoolean:\n outfile.write('<%s>%d%s>' % (self.name, self.value, self.name))\n elif self.content_type == MixedContainer.TypeFloat or \\\n self.content_type == MixedContainer.TypeDecimal:\n outfile.write('<%s>%f%s>' % (self.name, self.value, self.name))\n elif self.content_type == MixedContainer.TypeDouble:\n outfile.write('<%s>%g%s>' % (self.name, self.value, self.name))\n\n def exportLiteral(self, outfile, level, name):\n if self.category == MixedContainer.CategoryText:\n showIndent(outfile, level)\n outfile.write('model_.MixedContainer(%d, %d, \"%s\", \"%s\"),\\n' %\n (self.category, self.content_type, self.name, self.value))\n elif self.category == MixedContainer.CategorySimple:\n showIndent(outfile, level)\n outfile.write('model_.MixedContainer(%d, %d, \"%s\", \"%s\"),\\n' %\n (self.category, self.content_type, self.name, self.value))\n else: # category == MixedContainer.CategoryComplex\n showIndent(outfile, level)\n outfile.write('model_.MixedContainer(%d, %d, \"%s\",\\n' %\n (self.category, self.content_type, self.name,))\n self.value.exportLiteral(outfile, level + 1)\n showIndent(outfile, level)\n outfile.write(')\\n')\n\n\nclass MemberSpec_(object):\n\n def __init__(self, name='', data_type='', container=0):\n self.name = name\n self.data_type = data_type\n self.container = container\n\n def set_name(self, name):\n self.name = name\n\n def get_name(self):\n return self.name\n\n def set_data_type(self, data_type):\n self.data_type = data_type\n\n def get_data_type_chain(self):\n return self.data_type\n\n def get_data_type(self):\n if isinstance(self.data_type, list):\n if len(self.data_type) > 0:\n return self.data_type[-1]\n else:\n return 'xs:string'\n else:\n return self.data_type\n\n def set_container(self, container):\n self.container = container\n\n def get_container(self):\n return self.container\n\n\ndef _cast(typ, value):\n if typ is None or value is None:\n return value\n return typ(value)\n\n#\n# Data representation classes.\n#\n\n\nclass SSDNReply(GeneratedsSuper):\n\n \"\"\"A reply from the SSDN application at the CBSS\"\"\"\n subclass = None\n superclass = None\n\n def __init__(self, ReplyContext=None, ServiceReply=None):\n self.ReplyContext = ReplyContext\n if ServiceReply is None:\n self.ServiceReply = []\n else:\n self.ServiceReply = ServiceReply\n\n def factory(*args_, **kwargs_):\n if SSDNReply.subclass:\n return SSDNReply.subclass(*args_, **kwargs_)\n else:\n return SSDNReply(*args_, **kwargs_)\n factory = staticmethod(factory)\n\n def get_ReplyContext(self):\n return self.ReplyContext\n\n def set_ReplyContext(self, ReplyContext):\n self.ReplyContext = ReplyContext\n\n def get_ServiceReply(self):\n return self.ServiceReply\n\n def set_ServiceReply(self, ServiceReply):\n self.ServiceReply = ServiceReply\n\n def add_ServiceReply(self, value):\n self.ServiceReply.append(value)\n\n def insert_ServiceReply(self, index, value):\n self.ServiceReply[index] = value\n\n def export(self, outfile, level, namespace_='', name_='SSDNReply', namespacedef_=''):\n showIndent(outfile, level)\n outfile.write('<%s%s%s' %\n (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))\n already_processed = []\n self.exportAttributes(\n outfile, level, already_processed, namespace_, name_='SSDNReply')\n if self.hasContent_():\n outfile.write('>\\n')\n self.exportChildren(outfile, level + 1, namespace_, name_)\n showIndent(outfile, level)\n outfile.write('%s%s>\\n' % (namespace_, name_))\n else:\n outfile.write('/>\\n')\n\n def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='SSDNReply'):\n pass\n\n def exportChildren(self, outfile, level, namespace_='', name_='SSDNReply', fromsubclass_=False):\n if self.ReplyContext:\n self.ReplyContext.export(\n outfile, level, namespace_, name_='ReplyContext', )\n for ServiceReply_ in self.ServiceReply:\n ServiceReply_.export(\n outfile, level, namespace_, name_='ServiceReply')\n\n def hasContent_(self):\n if (\n self.ReplyContext is not None or\n self.ServiceReply\n ):\n return True\n else:\n return False\n\n def exportLiteral(self, outfile, level, name_='SSDNReply'):\n level += 1\n self.exportLiteralAttributes(outfile, level, [], name_)\n if self.hasContent_():\n self.exportLiteralChildren(outfile, level, name_)\n\n def exportLiteralAttributes(self, outfile, level, already_processed, name_):\n pass\n\n def exportLiteralChildren(self, outfile, level, name_):\n if self.ReplyContext is not None:\n showIndent(outfile, level)\n outfile.write('ReplyContext=model_.ReplyContextType(\\n')\n self.ReplyContext.exportLiteral(\n outfile, level, name_='ReplyContext')\n showIndent(outfile, level)\n outfile.write('),\\n')\n showIndent(outfile, level)\n outfile.write('ServiceReply=[\\n')\n level += 1\n for ServiceReply_ in self.ServiceReply:\n showIndent(outfile, level)\n outfile.write('model_.ServiceReplyType(\\n')\n ServiceReply_.exportLiteral(\n outfile, level, name_='ServiceReplyType')\n showIndent(outfile, level)\n outfile.write('),\\n')\n level -= 1\n showIndent(outfile, level)\n outfile.write('],\\n')\n\n def build(self, node):\n self.buildAttributes(node, node.attrib, [])\n for child in node:\n nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]\n self.buildChildren(child, node, nodeName_)\n\n def buildAttributes(self, node, attrs, already_processed):\n pass\n\n def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):\n if nodeName_ == 'ReplyContext':\n obj_ = ReplyContextType.factory()\n obj_.build(child_)\n self.set_ReplyContext(obj_)\n elif nodeName_ == 'ServiceReply':\n obj_ = ServiceReplyType.factory()\n obj_.build(child_)\n self.ServiceReply.append(obj_)\n# end class SSDNReply\n\n\nclass ReplyContextType(GeneratedsSuper):\n\n \"\"\"context information regarding the reply\"\"\"\n subclass = None\n superclass = None\n\n def __init__(self, ResultSummary=None, AuthorizedUser=None, Message=None):\n self.ResultSummary = ResultSummary\n self.AuthorizedUser = AuthorizedUser\n self.Message = Message\n\n def factory(*args_, **kwargs_):\n if ReplyContextType.subclass:\n return ReplyContextType.subclass(*args_, **kwargs_)\n else:\n return ReplyContextType(*args_, **kwargs_)\n factory = staticmethod(factory)\n\n def get_ResultSummary(self):\n return self.ResultSummary\n\n def set_ResultSummary(self, ResultSummary):\n self.ResultSummary = ResultSummary\n\n def get_AuthorizedUser(self):\n return self.AuthorizedUser\n\n def set_AuthorizedUser(self, AuthorizedUser):\n self.AuthorizedUser = AuthorizedUser\n\n def get_Message(self):\n return self.Message\n\n def set_Message(self, Message):\n self.Message = Message\n\n def export(self, outfile, level, namespace_='', name_='ReplyContextType', namespacedef_=''):\n showIndent(outfile, level)\n outfile.write('<%s%s%s' %\n (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))\n already_processed = []\n self.exportAttributes(outfile, level, already_processed,\n namespace_, name_='ReplyContextType')\n if self.hasContent_():\n outfile.write('>\\n')\n self.exportChildren(outfile, level + 1, namespace_, name_)\n showIndent(outfile, level)\n outfile.write('%s%s>\\n' % (namespace_, name_))\n else:\n outfile.write('/>\\n')\n\n def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='ReplyContextType'):\n pass\n\n def exportChildren(self, outfile, level, namespace_='', name_='ReplyContextType', fromsubclass_=False):\n if self.ResultSummary:\n self.ResultSummary.export(\n outfile, level, namespace_, name_='ResultSummary', )\n if self.AuthorizedUser:\n self.AuthorizedUser.export(\n outfile, level, namespace_, name_='AuthorizedUser')\n if self.Message:\n self.Message.export(outfile, level, namespace_, name_='Message')\n\n def hasContent_(self):\n if (\n self.ResultSummary is not None or\n self.AuthorizedUser is not None or\n self.Message is not None\n ):\n return True\n else:\n return False\n\n def exportLiteral(self, outfile, level, name_='ReplyContextType'):\n level += 1\n self.exportLiteralAttributes(outfile, level, [], name_)\n if self.hasContent_():\n self.exportLiteralChildren(outfile, level, name_)\n\n def exportLiteralAttributes(self, outfile, level, already_processed, name_):\n pass\n\n def exportLiteralChildren(self, outfile, level, name_):\n if self.ResultSummary is not None:\n showIndent(outfile, level)\n outfile.write('ResultSummary=model_.ResultSummary(\\n')\n self.ResultSummary.exportLiteral(outfile, level)\n showIndent(outfile, level)\n outfile.write('),\\n')\n if self.AuthorizedUser is not None:\n showIndent(outfile, level)\n outfile.write('AuthorizedUser=model_.AuthorizedUserType(\\n')\n self.AuthorizedUser.exportLiteral(\n outfile, level, name_='AuthorizedUser')\n showIndent(outfile, level)\n outfile.write('),\\n')\n if self.Message is not None:\n showIndent(outfile, level)\n outfile.write('Message=model_.ReplyMessageType(\\n')\n self.Message.exportLiteral(outfile, level, name_='Message')\n showIndent(outfile, level)\n outfile.write('),\\n')\n\n def build(self, node):\n self.buildAttributes(node, node.attrib, [])\n for child in node:\n nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]\n self.buildChildren(child, node, nodeName_)\n\n def buildAttributes(self, node, attrs, already_processed):\n pass\n\n def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):\n if nodeName_ == 'ResultSummary':\n obj_ = ResultSummary.factory()\n obj_.build(child_)\n self.set_ResultSummary(obj_)\n elif nodeName_ == 'AuthorizedUser':\n obj_ = AuthorizedUserType.factory()\n obj_.build(child_)\n self.set_AuthorizedUser(obj_)\n elif nodeName_ == 'Message':\n obj_ = ReplyMessageType.factory()\n obj_.build(child_)\n self.set_Message(obj_)\n# end class ReplyContextType\n\n\nclass ReplyMessageType(GeneratedsSuper):\n\n \"\"\"Information about the message\"\"\"\n subclass = None\n superclass = None\n\n def __init__(self, Reference=None, Ticket=None, TimeRequest=None, TimeReceive=None, TimeResponse=None):\n self.Reference = Reference\n self.Ticket = Ticket\n self.TimeRequest = TimeRequest\n self.TimeReceive = TimeReceive\n self.TimeResponse = TimeResponse\n\n def factory(*args_, **kwargs_):\n if ReplyMessageType.subclass:\n return ReplyMessageType.subclass(*args_, **kwargs_)\n else:\n return ReplyMessageType(*args_, **kwargs_)\n factory = staticmethod(factory)\n\n def get_Reference(self):\n return self.Reference\n\n def set_Reference(self, Reference):\n self.Reference = Reference\n\n def get_Ticket(self):\n return self.Ticket\n\n def set_Ticket(self, Ticket):\n self.Ticket = Ticket\n\n def get_TimeRequest(self):\n return self.TimeRequest\n\n def set_TimeRequest(self, TimeRequest):\n self.TimeRequest = TimeRequest\n\n def validate_t_DateTimeUTC(self, value):\n # Validate type t_DateTimeUTC, a restriction on xs:string.\n pass\n\n def get_TimeReceive(self):\n return self.TimeReceive\n\n def set_TimeReceive(self, TimeReceive):\n self.TimeReceive = TimeReceive\n\n def get_TimeResponse(self):\n return self.TimeResponse\n\n def set_TimeResponse(self, TimeResponse):\n self.TimeResponse = TimeResponse\n\n def export(self, outfile, level, namespace_='', name_='ReplyMessageType', namespacedef_=''):\n showIndent(outfile, level)\n outfile.write('<%s%s%s' %\n (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))\n already_processed = []\n self.exportAttributes(outfile, level, already_processed,\n namespace_, name_='ReplyMessageType')\n if self.hasContent_():\n outfile.write('>\\n')\n self.exportChildren(outfile, level + 1, namespace_, name_)\n showIndent(outfile, level)\n outfile.write('%s%s>\\n' % (namespace_, name_))\n else:\n outfile.write('/>\\n')\n\n def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='ReplyMessageType'):\n pass\n\n def exportChildren(self, outfile, level, namespace_='', name_='ReplyMessageType', fromsubclass_=False):\n if self.Reference is not None:\n showIndent(outfile, level)\n outfile.write('<%sReference>%s%sReference>\\n' %\n (namespace_, self.gds_format_string(quote_xml(self.Reference).encode(ExternalEncoding), input_name='Reference'), namespace_))\n if self.Ticket is not None:\n showIndent(outfile, level)\n outfile.write('<%sTicket>%s%sTicket>\\n' %\n (namespace_, self.gds_format_string(quote_xml(self.Ticket).encode(ExternalEncoding), input_name='Ticket'), namespace_))\n if self.TimeRequest is not None:\n showIndent(outfile, level)\n outfile.write('<%sTimeRequest>%s%sTimeRequest>\\n' %\n (namespace_, self.gds_format_string(quote_xml(self.TimeRequest).encode(ExternalEncoding), input_name='TimeRequest'), namespace_))\n if self.TimeReceive is not None:\n showIndent(outfile, level)\n outfile.write('<%sTimeReceive>%s%sTimeReceive>\\n' %\n (namespace_, self.gds_format_string(quote_xml(self.TimeReceive).encode(ExternalEncoding), input_name='TimeReceive'), namespace_))\n if self.TimeResponse is not None:\n showIndent(outfile, level)\n outfile.write('<%sTimeResponse>%s%sTimeResponse>\\n' %\n (namespace_, self.gds_format_string(quote_xml(self.TimeResponse).encode(ExternalEncoding), input_name='TimeResponse'), namespace_))\n\n def hasContent_(self):\n if (\n self.Reference is not None or\n self.Ticket is not None or\n self.TimeRequest is not None or\n self.TimeReceive is not None or\n self.TimeResponse is not None\n ):\n return True\n else:\n return False\n\n def exportLiteral(self, outfile, level, name_='ReplyMessageType'):\n level += 1\n self.exportLiteralAttributes(outfile, level, [], name_)\n if self.hasContent_():\n self.exportLiteralChildren(outfile, level, name_)\n\n def exportLiteralAttributes(self, outfile, level, already_processed, name_):\n pass\n\n def exportLiteralChildren(self, outfile, level, name_):\n if self.Reference is not None:\n showIndent(outfile, level)\n outfile.write('Reference=%s,\\n' %\n quote_python(self.Reference).encode(ExternalEncoding))\n if self.Ticket is not None:\n showIndent(outfile, level)\n outfile.write('Ticket=%s,\\n' %\n quote_python(self.Ticket).encode(ExternalEncoding))\n if self.TimeRequest is not None:\n showIndent(outfile, level)\n outfile.write('TimeRequest=%s,\\n' %\n quote_python(self.TimeRequest).encode(ExternalEncoding))\n if self.TimeReceive is not None:\n showIndent(outfile, level)\n outfile.write('TimeReceive=%s,\\n' %\n quote_python(self.TimeReceive).encode(ExternalEncoding))\n if self.TimeResponse is not None:\n showIndent(outfile, level)\n outfile.write('TimeResponse=%s,\\n' %\n quote_python(self.TimeResponse).encode(ExternalEncoding))\n\n def build(self, node):\n self.buildAttributes(node, node.attrib, [])\n for child in node:\n nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]\n self.buildChildren(child, node, nodeName_)\n\n def buildAttributes(self, node, attrs, already_processed):\n pass\n\n def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):\n if nodeName_ == 'Reference':\n Reference_ = child_.text\n Reference_ = self.gds_validate_string(\n Reference_, node, 'Reference')\n self.Reference = Reference_\n elif nodeName_ == 'Ticket':\n Ticket_ = child_.text\n Ticket_ = self.gds_validate_string(Ticket_, node, 'Ticket')\n self.Ticket = Ticket_\n elif nodeName_ == 'TimeRequest':\n TimeRequest_ = child_.text\n TimeRequest_ = self.gds_validate_string(\n TimeRequest_, node, 'TimeRequest')\n self.TimeRequest = TimeRequest_\n # validate type t_DateTimeUTC\n self.validate_t_DateTimeUTC(self.TimeRequest)\n elif nodeName_ == 'TimeReceive':\n TimeReceive_ = child_.text\n TimeReceive_ = self.gds_validate_string(\n TimeReceive_, node, 'TimeReceive')\n self.TimeReceive = TimeReceive_\n # validate type t_DateTimeUTC\n self.validate_t_DateTimeUTC(self.TimeReceive)\n elif nodeName_ == 'TimeResponse':\n TimeResponse_ = child_.text\n TimeResponse_ = self.gds_validate_string(\n TimeResponse_, node, 'TimeResponse')\n self.TimeResponse = TimeResponse_\n # validate type t_DateTimeUTC\n self.validate_t_DateTimeUTC(self.TimeResponse)\n# end class ReplyMessageType\n\n\nclass ServiceReplyType(GeneratedsSuper):\n\n \"\"\"A single response from a servicereplaced by the actual service reply\n body\"\"\"\n subclass = None\n superclass = None\n\n def __init__(self, ResultSummary=None, ServiceId=None, Version=None):\n self.ResultSummary = ResultSummary\n self.ServiceId = ServiceId\n self.Version = Version\n\n def factory(*args_, **kwargs_):\n if ServiceReplyType.subclass:\n return ServiceReplyType.subclass(*args_, **kwargs_)\n else:\n return ServiceReplyType(*args_, **kwargs_)\n factory = staticmethod(factory)\n\n def get_ResultSummary(self):\n return self.ResultSummary\n\n def set_ResultSummary(self, ResultSummary):\n self.ResultSummary = ResultSummary\n\n def get_ServiceId(self):\n return self.ServiceId\n\n def set_ServiceId(self, ServiceId):\n self.ServiceId = ServiceId\n\n def get_Version(self):\n return self.Version\n\n def set_Version(self, Version):\n self.Version = Version\n\n def export(self, outfile, level, namespace_='', name_='ServiceReplyType', namespacedef_=''):\n showIndent(outfile, level)\n outfile.write('<%s%s%s' %\n (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))\n already_processed = []\n self.exportAttributes(outfile, level, already_processed,\n namespace_, name_='ServiceReplyType')\n if self.hasContent_():\n outfile.write('>\\n')\n self.exportChildren(outfile, level + 1, namespace_, name_)\n showIndent(outfile, level)\n outfile.write('%s%s>\\n' % (namespace_, name_))\n else:\n outfile.write('/>\\n')\n\n def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='ServiceReplyType'):\n pass\n\n def exportChildren(self, outfile, level, namespace_='', name_='ServiceReplyType', fromsubclass_=False):\n if self.ResultSummary:\n self.ResultSummary.export(\n outfile, level, namespace_, name_='ResultSummary', )\n if self.ServiceId is not None:\n showIndent(outfile, level)\n outfile.write('<%sServiceId>%s%sServiceId>\\n' %\n (namespace_, self.gds_format_string(quote_xml(self.ServiceId).encode(ExternalEncoding), input_name='ServiceId'), namespace_))\n if self.Version is not None:\n showIndent(outfile, level)\n outfile.write('<%sVersion>%s%sVersion>\\n' %\n (namespace_, self.gds_format_string(quote_xml(self.Version).encode(ExternalEncoding), input_name='Version'), namespace_))\n\n def hasContent_(self):\n if (\n self.ResultSummary is not None or\n self.ServiceId is not None or\n self.Version is not None\n ):\n return True\n else:\n return False\n\n def exportLiteral(self, outfile, level, name_='ServiceReplyType'):\n level += 1\n self.exportLiteralAttributes(outfile, level, [], name_)\n if self.hasContent_():\n self.exportLiteralChildren(outfile, level, name_)\n\n def exportLiteralAttributes(self, outfile, level, already_processed, name_):\n pass\n\n def exportLiteralChildren(self, outfile, level, name_):\n if self.ResultSummary is not None:\n showIndent(outfile, level)\n outfile.write('ResultSummary=model_.ResultSummary(\\n')\n self.ResultSummary.exportLiteral(outfile, level)\n showIndent(outfile, level)\n outfile.write('),\\n')\n if self.ServiceId is not None:\n showIndent(outfile, level)\n outfile.write('ServiceId=%s,\\n' %\n quote_python(self.ServiceId).encode(ExternalEncoding))\n if self.Version is not None:\n showIndent(outfile, level)\n outfile.write('Version=%s,\\n' %\n quote_python(self.Version).encode(ExternalEncoding))\n\n def build(self, node):\n self.buildAttributes(node, node.attrib, [])\n for child in node:\n nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]\n self.buildChildren(child, node, nodeName_)\n\n def buildAttributes(self, node, attrs, already_processed):\n pass\n\n def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):\n if nodeName_ == 'ResultSummary':\n obj_ = ResultSummary.factory()\n obj_.build(child_)\n self.set_ResultSummary(obj_)\n elif nodeName_ == 'ServiceId':\n ServiceId_ = child_.text\n ServiceId_ = self.gds_validate_string(\n ServiceId_, node, 'ServiceId')\n self.ServiceId = ServiceId_\n elif nodeName_ == 'Version':\n Version_ = child_.text\n Version_ = self.gds_validate_string(Version_, node, 'Version')\n self.Version = Version_\n# end class ServiceReplyType\n\n\nclass ServiceId(GeneratedsSuper):\n\n \"\"\"name of the service that sent the reply\"\"\"\n subclass = None\n superclass = None\n\n def __init__(self):\n pass\n\n def factory(*args_, **kwargs_):\n if ServiceId.subclass:\n return ServiceId.subclass(*args_, **kwargs_)\n else:\n return ServiceId(*args_, **kwargs_)\n factory = staticmethod(factory)\n\n def export(self, outfile, level, namespace_='', name_='ServiceId', namespacedef_=''):\n showIndent(outfile, level)\n outfile.write('<%s%s%s' %\n (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))\n already_processed = []\n self.exportAttributes(\n outfile, level, already_processed, namespace_, name_='ServiceId')\n if self.hasContent_():\n outfile.write('>\\n')\n self.exportChildren(outfile, level + 1, namespace_, name_)\n outfile.write('%s%s>\\n' % (namespace_, name_))\n else:\n outfile.write('/>\\n')\n\n def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='ServiceId'):\n pass\n\n def exportChildren(self, outfile, level, namespace_='', name_='ServiceId', fromsubclass_=False):\n pass\n\n def hasContent_(self):\n if (\n\n ):\n return True\n else:\n return False\n\n def exportLiteral(self, outfile, level, name_='ServiceId'):\n level += 1\n self.exportLiteralAttributes(outfile, level, [], name_)\n if self.hasContent_():\n self.exportLiteralChildren(outfile, level, name_)\n\n def exportLiteralAttributes(self, outfile, level, already_processed, name_):\n pass\n\n def exportLiteralChildren(self, outfile, level, name_):\n pass\n\n def build(self, node):\n self.buildAttributes(node, node.attrib, [])\n for child in node:\n nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]\n self.buildChildren(child, node, nodeName_)\n\n def buildAttributes(self, node, attrs, already_processed):\n pass\n\n def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):\n pass\n# end class ServiceId\n\n\nclass Version(GeneratedsSuper):\n\n \"\"\"version of the service reply\"\"\"\n subclass = None\n superclass = None\n\n def __init__(self):\n pass\n\n def factory(*args_, **kwargs_):\n if Version.subclass:\n return Version.subclass(*args_, **kwargs_)\n else:\n return Version(*args_, **kwargs_)\n factory = staticmethod(factory)\n\n def export(self, outfile, level, namespace_='', name_='Version', namespacedef_=''):\n showIndent(outfile, level)\n outfile.write('<%s%s%s' %\n (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))\n already_processed = []\n self.exportAttributes(\n outfile, level, already_processed, namespace_, name_='Version')\n if self.hasContent_():\n outfile.write('>\\n')\n self.exportChildren(outfile, level + 1, namespace_, name_)\n outfile.write('%s%s>\\n' % (namespace_, name_))\n else:\n outfile.write('/>\\n')\n\n def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='Version'):\n pass\n\n def exportChildren(self, outfile, level, namespace_='', name_='Version', fromsubclass_=False):\n pass\n\n def hasContent_(self):\n if (\n\n ):\n return True\n else:\n return False\n\n def exportLiteral(self, outfile, level, name_='Version'):\n level += 1\n self.exportLiteralAttributes(outfile, level, [], name_)\n if self.hasContent_():\n self.exportLiteralChildren(outfile, level, name_)\n\n def exportLiteralAttributes(self, outfile, level, already_processed, name_):\n pass\n\n def exportLiteralChildren(self, outfile, level, name_):\n pass\n\n def build(self, node):\n self.buildAttributes(node, node.attrib, [])\n for child in node:\n nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]\n self.buildChildren(child, node, nodeName_)\n\n def buildAttributes(self, node, attrs, already_processed):\n pass\n\n def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):\n pass\n# end class Version\n\n\nclass AuthorizedUserType(GeneratedsSuper):\n\n \"\"\"User identification information\"\"\"\n subclass = None\n superclass = None\n\n def __init__(self, UserID=None, Email=None, OrgUnit=None, MatrixID=None, MatrixSubID=None):\n self.UserID = UserID\n self.Email = Email\n self.OrgUnit = OrgUnit\n self.MatrixID = MatrixID\n self.MatrixSubID = MatrixSubID\n\n def factory(*args_, **kwargs_):\n if AuthorizedUserType.subclass:\n return AuthorizedUserType.subclass(*args_, **kwargs_)\n else:\n return AuthorizedUserType(*args_, **kwargs_)\n factory = staticmethod(factory)\n\n def get_UserID(self):\n return self.UserID\n\n def set_UserID(self, UserID):\n self.UserID = UserID\n\n def validate_t_SSIN(self, value):\n # Validate type t_SSIN, a restriction on xs:string.\n pass\n\n def get_Email(self):\n return self.Email\n\n def set_Email(self, Email):\n self.Email = Email\n\n def validate_t_EmailAddress(self, value):\n # Validate type t_EmailAddress, a restriction on xs:string.\n pass\n\n def get_OrgUnit(self):\n return self.OrgUnit\n\n def set_OrgUnit(self, OrgUnit):\n self.OrgUnit = OrgUnit\n\n def get_MatrixID(self):\n return self.MatrixID\n\n def set_MatrixID(self, MatrixID):\n self.MatrixID = MatrixID\n\n def get_MatrixSubID(self):\n return self.MatrixSubID\n\n def set_MatrixSubID(self, MatrixSubID):\n self.MatrixSubID = MatrixSubID\n\n def export(self, outfile, level, namespace_='', name_='AuthorizedUserType', namespacedef_=''):\n showIndent(outfile, level)\n outfile.write('<%s%s%s' %\n (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))\n already_processed = []\n self.exportAttributes(outfile, level, already_processed,\n namespace_, name_='AuthorizedUserType')\n if self.hasContent_():\n outfile.write('>\\n')\n self.exportChildren(outfile, level + 1, namespace_, name_)\n showIndent(outfile, level)\n outfile.write('%s%s>\\n' % (namespace_, name_))\n else:\n outfile.write('/>\\n')\n\n def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='AuthorizedUserType'):\n pass\n\n def exportChildren(self, outfile, level, namespace_='', name_='AuthorizedUserType', fromsubclass_=False):\n if self.UserID is not None:\n showIndent(outfile, level)\n outfile.write('<%sUserID>%s%sUserID>\\n' %\n (namespace_, self.gds_format_string(quote_xml(self.UserID).encode(ExternalEncoding), input_name='UserID'), namespace_))\n if self.Email is not None:\n showIndent(outfile, level)\n outfile.write('<%sEmail>%s%sEmail>\\n' %\n (namespace_, self.gds_format_string(quote_xml(self.Email).encode(ExternalEncoding), input_name='Email'), namespace_))\n if self.OrgUnit is not None:\n showIndent(outfile, level)\n outfile.write('<%sOrgUnit>%s%sOrgUnit>\\n' %\n (namespace_, self.gds_format_string(quote_xml(self.OrgUnit).encode(ExternalEncoding), input_name='OrgUnit'), namespace_))\n if self.MatrixID is not None:\n showIndent(outfile, level)\n outfile.write('<%sMatrixID>%s%sMatrixID>\\n' %\n (namespace_, self.gds_format_integer(self.MatrixID, input_name='MatrixID'), namespace_))\n if self.MatrixSubID is not None:\n showIndent(outfile, level)\n outfile.write('<%sMatrixSubID>%s%sMatrixSubID>\\n' %\n (namespace_, self.gds_format_integer(self.MatrixSubID, input_name='MatrixSubID'), namespace_))\n\n def hasContent_(self):\n if (\n self.UserID is not None or\n self.Email is not None or\n self.OrgUnit is not None or\n self.MatrixID is not None or\n self.MatrixSubID is not None\n ):\n return True\n else:\n return False\n\n def exportLiteral(self, outfile, level, name_='AuthorizedUserType'):\n level += 1\n self.exportLiteralAttributes(outfile, level, [], name_)\n if self.hasContent_():\n self.exportLiteralChildren(outfile, level, name_)\n\n def exportLiteralAttributes(self, outfile, level, already_processed, name_):\n pass\n\n def exportLiteralChildren(self, outfile, level, name_):\n if self.UserID is not None:\n showIndent(outfile, level)\n outfile.write('UserID=%s,\\n' %\n quote_python(self.UserID).encode(ExternalEncoding))\n if self.Email is not None:\n showIndent(outfile, level)\n outfile.write('Email=%s,\\n' %\n quote_python(self.Email).encode(ExternalEncoding))\n if self.OrgUnit is not None:\n showIndent(outfile, level)\n outfile.write('OrgUnit=%s,\\n' %\n quote_python(self.OrgUnit).encode(ExternalEncoding))\n if self.MatrixID is not None:\n showIndent(outfile, level)\n outfile.write('MatrixID=%d,\\n' % self.MatrixID)\n if self.MatrixSubID is not None:\n showIndent(outfile, level)\n outfile.write('MatrixSubID=%d,\\n' % self.MatrixSubID)\n\n def build(self, node):\n self.buildAttributes(node, node.attrib, [])\n for child in node:\n nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]\n self.buildChildren(child, node, nodeName_)\n\n def buildAttributes(self, node, attrs, already_processed):\n pass\n\n def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):\n if nodeName_ == 'UserID':\n UserID_ = child_.text\n UserID_ = self.gds_validate_string(UserID_, node, 'UserID')\n self.UserID = UserID_\n self.validate_t_SSIN(self.UserID) # validate type t_SSIN\n elif nodeName_ == 'Email':\n Email_ = child_.text\n Email_ = self.gds_validate_string(Email_, node, 'Email')\n self.Email = Email_\n # validate type t_EmailAddress\n self.validate_t_EmailAddress(self.Email)\n elif nodeName_ == 'OrgUnit':\n OrgUnit_ = child_.text\n OrgUnit_ = self.gds_validate_string(OrgUnit_, node, 'OrgUnit')\n self.OrgUnit = OrgUnit_\n elif nodeName_ == 'MatrixID':\n sval_ = child_.text\n try:\n ival_ = int(sval_)\n except (TypeError, ValueError), exp:\n raise_parse_error(child_, 'requires integer: %s' % exp)\n ival_ = self.gds_validate_integer(ival_, node, 'MatrixID')\n self.MatrixID = ival_\n elif nodeName_ == 'MatrixSubID':\n sval_ = child_.text\n try:\n ival_ = int(sval_)\n except (TypeError, ValueError), exp:\n raise_parse_error(child_, 'requires integer: %s' % exp)\n ival_ = self.gds_validate_integer(ival_, node, 'MatrixSubID')\n self.MatrixSubID = ival_\n# end class AuthorizedUserType\n\n\nclass ResultSummary(GeneratedsSuper):\n\n \"\"\"Summary infomation about the resultlors de la reponse, (messageType\n RESPONSE | EXCEPTION), la valeur WARNING signifie qu'il faut\n consulter l'element Information\"\"\"\n subclass = None\n superclass = None\n\n def __init__(self, ok=None, ReturnCode=None, Detail=None):\n self.ok = _cast(None, ok)\n self.ReturnCode = ReturnCode\n if Detail is None:\n self.Detail = []\n else:\n self.Detail = Detail\n\n def factory(*args_, **kwargs_):\n if ResultSummary.subclass:\n return ResultSummary.subclass(*args_, **kwargs_)\n else:\n return ResultSummary(*args_, **kwargs_)\n factory = staticmethod(factory)\n\n def get_ReturnCode(self):\n return self.ReturnCode\n\n def set_ReturnCode(self, ReturnCode):\n self.ReturnCode = ReturnCode\n\n def get_Detail(self):\n return self.Detail\n\n def set_Detail(self, Detail):\n self.Detail = Detail\n\n def add_Detail(self, value):\n self.Detail.append(value)\n\n def insert_Detail(self, index, value):\n self.Detail[index] = value\n\n def get_ok(self):\n return self.ok\n\n def set_ok(self, ok):\n self.ok = ok\n\n def validate_ResultSummaryStatusType(self, value):\n # Validate type ResultSummaryStatusType, a restriction on xs:string.\n pass\n\n def export(self, outfile, level, namespace_='', name_='ResultSummary', namespacedef_=''):\n showIndent(outfile, level)\n outfile.write('<%s%s%s' %\n (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))\n already_processed = []\n self.exportAttributes(\n outfile, level, already_processed, namespace_, name_='ResultSummary')\n if self.hasContent_():\n outfile.write('>\\n')\n self.exportChildren(outfile, level + 1, namespace_, name_)\n showIndent(outfile, level)\n outfile.write('%s%s>\\n' % (namespace_, name_))\n else:\n outfile.write('/>\\n')\n\n def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='ResultSummary'):\n if self.ok is not None and 'ok' not in already_processed:\n already_processed.append('ok')\n outfile.write(' ok=%s' % (quote_attrib(self.ok), ))\n\n def exportChildren(self, outfile, level, namespace_='', name_='ResultSummary', fromsubclass_=False):\n if self.ReturnCode is not None:\n showIndent(outfile, level)\n outfile.write('<%sReturnCode>%s%sReturnCode>\\n' %\n (namespace_, self.gds_format_integer(self.ReturnCode, input_name='ReturnCode'), namespace_))\n for Detail_ in self.Detail:\n Detail_.export(outfile, level, namespace_, name_='Detail')\n\n def hasContent_(self):\n if (\n self.ReturnCode is not None or\n self.Detail\n ):\n return True\n else:\n return False\n\n def exportLiteral(self, outfile, level, name_='ResultSummary'):\n level += 1\n self.exportLiteralAttributes(outfile, level, [], name_)\n if self.hasContent_():\n self.exportLiteralChildren(outfile, level, name_)\n\n def exportLiteralAttributes(self, outfile, level, already_processed, name_):\n if self.ok is not None and 'ok' not in already_processed:\n already_processed.append('ok')\n showIndent(outfile, level)\n outfile.write('ok = \"%s\",\\n' % (self.ok,))\n\n def exportLiteralChildren(self, outfile, level, name_):\n if self.ReturnCode is not None:\n showIndent(outfile, level)\n outfile.write('ReturnCode=%d,\\n' % self.ReturnCode)\n showIndent(outfile, level)\n outfile.write('Detail=[\\n')\n level += 1\n for Detail_ in self.Detail:\n showIndent(outfile, level)\n outfile.write('model_.DetailMessageType(\\n')\n Detail_.exportLiteral(outfile, level, name_='DetailMessageType')\n showIndent(outfile, level)\n outfile.write('),\\n')\n level -= 1\n showIndent(outfile, level)\n outfile.write('],\\n')\n\n def build(self, node):\n self.buildAttributes(node, node.attrib, [])\n for child in node:\n nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]\n self.buildChildren(child, node, nodeName_)\n\n def buildAttributes(self, node, attrs, already_processed):\n value = find_attr_value_('ok', node)\n if value is not None and 'ok' not in already_processed:\n already_processed.append('ok')\n self.ok = value\n # validate type ResultSummaryStatusType\n self.validate_ResultSummaryStatusType(self.ok)\n\n def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):\n if nodeName_ == 'ReturnCode':\n sval_ = child_.text\n try:\n ival_ = int(sval_)\n except (TypeError, ValueError), exp:\n raise_parse_error(child_, 'requires integer: %s' % exp)\n ival_ = self.gds_validate_integer(ival_, node, 'ReturnCode')\n self.ReturnCode = ival_\n elif nodeName_ == 'Detail':\n obj_ = DetailMessageType.factory()\n obj_.build(child_)\n self.Detail.append(obj_)\n# end class ResultSummary\n\n\nclass ReturnCode(GeneratedsSuper):\n\n \"\"\"general return code. 0 = OK, 1 = WARNING, 10000 = ERROR\"\"\"\n subclass = None\n superclass = None\n\n def __init__(self):\n pass\n\n def factory(*args_, **kwargs_):\n if ReturnCode.subclass:\n return ReturnCode.subclass(*args_, **kwargs_)\n else:\n return ReturnCode(*args_, **kwargs_)\n factory = staticmethod(factory)\n\n def export(self, outfile, level, namespace_='', name_='ReturnCode', namespacedef_=''):\n showIndent(outfile, level)\n outfile.write('<%s%s%s' %\n (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))\n already_processed = []\n self.exportAttributes(\n outfile, level, already_processed, namespace_, name_='ReturnCode')\n if self.hasContent_():\n outfile.write('>\\n')\n self.exportChildren(outfile, level + 1, namespace_, name_)\n outfile.write('%s%s>\\n' % (namespace_, name_))\n else:\n outfile.write('/>\\n')\n\n def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='ReturnCode'):\n pass\n\n def exportChildren(self, outfile, level, namespace_='', name_='ReturnCode', fromsubclass_=False):\n pass\n\n def hasContent_(self):\n if (\n\n ):\n return True\n else:\n return False\n\n def exportLiteral(self, outfile, level, name_='ReturnCode'):\n level += 1\n self.exportLiteralAttributes(outfile, level, [], name_)\n if self.hasContent_():\n self.exportLiteralChildren(outfile, level, name_)\n\n def exportLiteralAttributes(self, outfile, level, already_processed, name_):\n pass\n\n def exportLiteralChildren(self, outfile, level, name_):\n pass\n\n def build(self, node):\n self.buildAttributes(node, node.attrib, [])\n for child in node:\n nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]\n self.buildChildren(child, node, nodeName_)\n\n def buildAttributes(self, node, attrs, already_processed):\n pass\n\n def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):\n pass\n# end class ReturnCode\n\n\nclass InformationType(GeneratedsSuper):\n subclass = None\n superclass = None\n\n def __init__(self, FieldName=None, FieldValue=None):\n self.FieldName = FieldName\n self.FieldValue = FieldValue\n\n def factory(*args_, **kwargs_):\n if InformationType.subclass:\n return InformationType.subclass(*args_, **kwargs_)\n else:\n return InformationType(*args_, **kwargs_)\n factory = staticmethod(factory)\n\n def get_FieldName(self):\n return self.FieldName\n\n def set_FieldName(self, FieldName):\n self.FieldName = FieldName\n\n def get_FieldValue(self):\n return self.FieldValue\n\n def set_FieldValue(self, FieldValue):\n self.FieldValue = FieldValue\n\n def export(self, outfile, level, namespace_='', name_='InformationType', namespacedef_=''):\n showIndent(outfile, level)\n outfile.write('<%s%s%s' %\n (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))\n already_processed = []\n self.exportAttributes(outfile, level, already_processed,\n namespace_, name_='InformationType')\n if self.hasContent_():\n outfile.write('>\\n')\n self.exportChildren(outfile, level + 1, namespace_, name_)\n showIndent(outfile, level)\n outfile.write('%s%s>\\n' % (namespace_, name_))\n else:\n outfile.write('/>\\n')\n\n def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='InformationType'):\n pass\n\n def exportChildren(self, outfile, level, namespace_='', name_='InformationType', fromsubclass_=False):\n if self.FieldName is not None:\n showIndent(outfile, level)\n outfile.write('<%sFieldName>%s%sFieldName>\\n' %\n (namespace_, self.gds_format_string(quote_xml(self.FieldName).encode(ExternalEncoding), input_name='FieldName'), namespace_))\n if self.FieldValue is not None:\n showIndent(outfile, level)\n outfile.write('<%sFieldValue>%s%sFieldValue>\\n' %\n (namespace_, self.gds_format_string(quote_xml(self.FieldValue).encode(ExternalEncoding), input_name='FieldValue'), namespace_))\n\n def hasContent_(self):\n if (\n self.FieldName is not None or\n self.FieldValue is not None\n ):\n return True\n else:\n return False\n\n def exportLiteral(self, outfile, level, name_='InformationType'):\n level += 1\n self.exportLiteralAttributes(outfile, level, [], name_)\n if self.hasContent_():\n self.exportLiteralChildren(outfile, level, name_)\n\n def exportLiteralAttributes(self, outfile, level, already_processed, name_):\n pass\n\n def exportLiteralChildren(self, outfile, level, name_):\n if self.FieldName is not None:\n showIndent(outfile, level)\n outfile.write('FieldName=%s,\\n' %\n quote_python(self.FieldName).encode(ExternalEncoding))\n if self.FieldValue is not None:\n showIndent(outfile, level)\n outfile.write('FieldValue=%s,\\n' %\n quote_python(self.FieldValue).encode(ExternalEncoding))\n\n def build(self, node):\n self.buildAttributes(node, node.attrib, [])\n for child in node:\n nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]\n self.buildChildren(child, node, nodeName_)\n\n def buildAttributes(self, node, attrs, already_processed):\n pass\n\n def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):\n if nodeName_ == 'FieldName':\n FieldName_ = child_.text\n FieldName_ = self.gds_validate_string(\n FieldName_, node, 'FieldName')\n self.FieldName = FieldName_\n elif nodeName_ == 'FieldValue':\n FieldValue_ = child_.text\n FieldValue_ = self.gds_validate_string(\n FieldValue_, node, 'FieldValue')\n self.FieldValue = FieldValue_\n# end class InformationType\n\n\nclass FieldName(GeneratedsSuper):\n\n \"\"\"name of the field\"\"\"\n subclass = None\n superclass = None\n\n def __init__(self):\n pass\n\n def factory(*args_, **kwargs_):\n if FieldName.subclass:\n return FieldName.subclass(*args_, **kwargs_)\n else:\n return FieldName(*args_, **kwargs_)\n factory = staticmethod(factory)\n\n def export(self, outfile, level, namespace_='', name_='FieldName', namespacedef_=''):\n showIndent(outfile, level)\n outfile.write('<%s%s%s' %\n (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))\n already_processed = []\n self.exportAttributes(\n outfile, level, already_processed, namespace_, name_='FieldName')\n if self.hasContent_():\n outfile.write('>\\n')\n self.exportChildren(outfile, level + 1, namespace_, name_)\n outfile.write('%s%s>\\n' % (namespace_, name_))\n else:\n outfile.write('/>\\n')\n\n def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='FieldName'):\n pass\n\n def exportChildren(self, outfile, level, namespace_='', name_='FieldName', fromsubclass_=False):\n pass\n\n def hasContent_(self):\n if (\n\n ):\n return True\n else:\n return False\n\n def exportLiteral(self, outfile, level, name_='FieldName'):\n level += 1\n self.exportLiteralAttributes(outfile, level, [], name_)\n if self.hasContent_():\n self.exportLiteralChildren(outfile, level, name_)\n\n def exportLiteralAttributes(self, outfile, level, already_processed, name_):\n pass\n\n def exportLiteralChildren(self, outfile, level, name_):\n pass\n\n def build(self, node):\n self.buildAttributes(node, node.attrib, [])\n for child in node:\n nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]\n self.buildChildren(child, node, nodeName_)\n\n def buildAttributes(self, node, attrs, already_processed):\n pass\n\n def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):\n pass\n# end class FieldName\n\n\nclass FieldValue(GeneratedsSuper):\n\n \"\"\"value of the field\"\"\"\n subclass = None\n superclass = None\n\n def __init__(self):\n pass\n\n def factory(*args_, **kwargs_):\n if FieldValue.subclass:\n return FieldValue.subclass(*args_, **kwargs_)\n else:\n return FieldValue(*args_, **kwargs_)\n factory = staticmethod(factory)\n\n def export(self, outfile, level, namespace_='', name_='FieldValue', namespacedef_=''):\n showIndent(outfile, level)\n outfile.write('<%s%s%s' %\n (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))\n already_processed = []\n self.exportAttributes(\n outfile, level, already_processed, namespace_, name_='FieldValue')\n if self.hasContent_():\n outfile.write('>\\n')\n self.exportChildren(outfile, level + 1, namespace_, name_)\n outfile.write('%s%s>\\n' % (namespace_, name_))\n else:\n outfile.write('/>\\n')\n\n def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='FieldValue'):\n pass\n\n def exportChildren(self, outfile, level, namespace_='', name_='FieldValue', fromsubclass_=False):\n pass\n\n def hasContent_(self):\n if (\n\n ):\n return True\n else:\n return False\n\n def exportLiteral(self, outfile, level, name_='FieldValue'):\n level += 1\n self.exportLiteralAttributes(outfile, level, [], name_)\n if self.hasContent_():\n self.exportLiteralChildren(outfile, level, name_)\n\n def exportLiteralAttributes(self, outfile, level, already_processed, name_):\n pass\n\n def exportLiteralChildren(self, outfile, level, name_):\n pass\n\n def build(self, node):\n self.buildAttributes(node, node.attrib, [])\n for child in node:\n nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]\n self.buildChildren(child, node, nodeName_)\n\n def buildAttributes(self, node, attrs, already_processed):\n pass\n\n def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):\n pass\n# end class FieldValue\n\n\nclass DetailMessageType(GeneratedsSuper):\n subclass = None\n superclass = None\n\n def __init__(self, Severity=None, ReasonCode=None, Diagnostic=None, AuthorCodeList=None, Information=None):\n self.Severity = Severity\n self.ReasonCode = ReasonCode\n self.Diagnostic = Diagnostic\n self.AuthorCodeList = AuthorCodeList\n if Information is None:\n self.Information = []\n else:\n self.Information = Information\n\n def factory(*args_, **kwargs_):\n if DetailMessageType.subclass:\n return DetailMessageType.subclass(*args_, **kwargs_)\n else:\n return DetailMessageType(*args_, **kwargs_)\n factory = staticmethod(factory)\n\n def get_Severity(self):\n return self.Severity\n\n def set_Severity(self, Severity):\n self.Severity = Severity\n\n def validate_SeverityType(self, value):\n # Validate type SeverityType, a restriction on xs:string.\n pass\n\n def get_ReasonCode(self):\n return self.ReasonCode\n\n def set_ReasonCode(self, ReasonCode):\n self.ReasonCode = ReasonCode\n\n def get_Diagnostic(self):\n return self.Diagnostic\n\n def set_Diagnostic(self, Diagnostic):\n self.Diagnostic = Diagnostic\n\n def get_AuthorCodeList(self):\n return self.AuthorCodeList\n\n def set_AuthorCodeList(self, AuthorCodeList):\n self.AuthorCodeList = AuthorCodeList\n\n def get_Information(self):\n return self.Information\n\n def set_Information(self, Information):\n self.Information = Information\n\n def add_Information(self, value):\n self.Information.append(value)\n\n def insert_Information(self, index, value):\n self.Information[index] = value\n\n def export(self, outfile, level, namespace_='', name_='DetailMessageType', namespacedef_=''):\n showIndent(outfile, level)\n outfile.write('<%s%s%s' %\n (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))\n already_processed = []\n self.exportAttributes(outfile, level, already_processed,\n namespace_, name_='DetailMessageType')\n if self.hasContent_():\n outfile.write('>\\n')\n self.exportChildren(outfile, level + 1, namespace_, name_)\n showIndent(outfile, level)\n outfile.write('%s%s>\\n' % (namespace_, name_))\n else:\n outfile.write('/>\\n')\n\n def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='DetailMessageType'):\n pass\n\n def exportChildren(self, outfile, level, namespace_='', name_='DetailMessageType', fromsubclass_=False):\n if self.Severity is not None:\n showIndent(outfile, level)\n outfile.write('<%sSeverity>%s%sSeverity>\\n' %\n (namespace_, self.gds_format_string(quote_xml(self.Severity).encode(ExternalEncoding), input_name='Severity'), namespace_))\n if self.ReasonCode is not None:\n showIndent(outfile, level)\n outfile.write('<%sReasonCode>%s%sReasonCode>\\n' %\n (namespace_, self.gds_format_string(quote_xml(self.ReasonCode).encode(ExternalEncoding), input_name='ReasonCode'), namespace_))\n if self.Diagnostic is not None:\n showIndent(outfile, level)\n outfile.write('<%sDiagnostic>%s%sDiagnostic>\\n' %\n (namespace_, self.gds_format_string(quote_xml(self.Diagnostic).encode(ExternalEncoding), input_name='Diagnostic'), namespace_))\n if self.AuthorCodeList is not None:\n showIndent(outfile, level)\n outfile.write('<%sAuthorCodeList>%s%sAuthorCodeList>\\n' %\n (namespace_, self.gds_format_string(quote_xml(self.AuthorCodeList).encode(ExternalEncoding), input_name='AuthorCodeList'), namespace_))\n for Information_ in self.Information:\n Information_.export(\n outfile, level, namespace_, name_='Information')\n\n def hasContent_(self):\n if (\n self.Severity is not None or\n self.ReasonCode is not None or\n self.Diagnostic is not None or\n self.AuthorCodeList is not None or\n self.Information\n ):\n return True\n else:\n return False\n\n def exportLiteral(self, outfile, level, name_='DetailMessageType'):\n level += 1\n self.exportLiteralAttributes(outfile, level, [], name_)\n if self.hasContent_():\n self.exportLiteralChildren(outfile, level, name_)\n\n def exportLiteralAttributes(self, outfile, level, already_processed, name_):\n pass\n\n def exportLiteralChildren(self, outfile, level, name_):\n if self.Severity is not None:\n showIndent(outfile, level)\n outfile.write('Severity=%s,\\n' %\n quote_python(self.Severity).encode(ExternalEncoding))\n if self.ReasonCode is not None:\n showIndent(outfile, level)\n outfile.write('ReasonCode=%s,\\n' %\n quote_python(self.ReasonCode).encode(ExternalEncoding))\n if self.Diagnostic is not None:\n showIndent(outfile, level)\n outfile.write('Diagnostic=%s,\\n' %\n quote_python(self.Diagnostic).encode(ExternalEncoding))\n if self.AuthorCodeList is not None:\n showIndent(outfile, level)\n outfile.write('AuthorCodeList=%s,\\n' %\n quote_python(self.AuthorCodeList).encode(ExternalEncoding))\n showIndent(outfile, level)\n outfile.write('Information=[\\n')\n level += 1\n for Information_ in self.Information:\n showIndent(outfile, level)\n outfile.write('model_.InformationType(\\n')\n Information_.exportLiteral(outfile, level, name_='InformationType')\n showIndent(outfile, level)\n outfile.write('),\\n')\n level -= 1\n showIndent(outfile, level)\n outfile.write('],\\n')\n\n def build(self, node):\n self.buildAttributes(node, node.attrib, [])\n for child in node:\n nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]\n self.buildChildren(child, node, nodeName_)\n\n def buildAttributes(self, node, attrs, already_processed):\n pass\n\n def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):\n if nodeName_ == 'Severity':\n Severity_ = child_.text\n Severity_ = self.gds_validate_string(Severity_, node, 'Severity')\n self.Severity = Severity_\n # validate type SeverityType\n self.validate_SeverityType(self.Severity)\n elif nodeName_ == 'ReasonCode':\n ReasonCode_ = child_.text\n ReasonCode_ = self.gds_validate_string(\n ReasonCode_, node, 'ReasonCode')\n self.ReasonCode = ReasonCode_\n elif nodeName_ == 'Diagnostic':\n Diagnostic_ = child_.text\n Diagnostic_ = self.gds_validate_string(\n Diagnostic_, node, 'Diagnostic')\n self.Diagnostic = Diagnostic_\n elif nodeName_ == 'AuthorCodeList':\n AuthorCodeList_ = child_.text\n AuthorCodeList_ = self.gds_validate_string(\n AuthorCodeList_, node, 'AuthorCodeList')\n self.AuthorCodeList = AuthorCodeList_\n elif nodeName_ == 'Information':\n obj_ = InformationType.factory()\n obj_.build(child_)\n self.Information.append(obj_)\n# end class DetailMessageType\n\n\nclass ReasonCode(GeneratedsSuper):\n\n \"\"\"error code\"\"\"\n subclass = None\n superclass = None\n\n def __init__(self):\n pass\n\n def factory(*args_, **kwargs_):\n if ReasonCode.subclass:\n return ReasonCode.subclass(*args_, **kwargs_)\n else:\n return ReasonCode(*args_, **kwargs_)\n factory = staticmethod(factory)\n\n def export(self, outfile, level, namespace_='', name_='ReasonCode', namespacedef_=''):\n showIndent(outfile, level)\n outfile.write('<%s%s%s' %\n (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))\n already_processed = []\n self.exportAttributes(\n outfile, level, already_processed, namespace_, name_='ReasonCode')\n if self.hasContent_():\n outfile.write('>\\n')\n self.exportChildren(outfile, level + 1, namespace_, name_)\n outfile.write('%s%s>\\n' % (namespace_, name_))\n else:\n outfile.write('/>\\n')\n\n def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='ReasonCode'):\n pass\n\n def exportChildren(self, outfile, level, namespace_='', name_='ReasonCode', fromsubclass_=False):\n pass\n\n def hasContent_(self):\n if (\n\n ):\n return True\n else:\n return False\n\n def exportLiteral(self, outfile, level, name_='ReasonCode'):\n level += 1\n self.exportLiteralAttributes(outfile, level, [], name_)\n if self.hasContent_():\n self.exportLiteralChildren(outfile, level, name_)\n\n def exportLiteralAttributes(self, outfile, level, already_processed, name_):\n pass\n\n def exportLiteralChildren(self, outfile, level, name_):\n pass\n\n def build(self, node):\n self.buildAttributes(node, node.attrib, [])\n for child in node:\n nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]\n self.buildChildren(child, node, nodeName_)\n\n def buildAttributes(self, node, attrs, already_processed):\n pass\n\n def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):\n pass\n# end class ReasonCode\n\n\nclass Diagnostic(GeneratedsSuper):\n\n \"\"\"textual error message\"\"\"\n subclass = None\n superclass = None\n\n def __init__(self):\n pass\n\n def factory(*args_, **kwargs_):\n if Diagnostic.subclass:\n return Diagnostic.subclass(*args_, **kwargs_)\n else:\n return Diagnostic(*args_, **kwargs_)\n factory = staticmethod(factory)\n\n def export(self, outfile, level, namespace_='', name_='Diagnostic', namespacedef_=''):\n showIndent(outfile, level)\n outfile.write('<%s%s%s' %\n (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))\n already_processed = []\n self.exportAttributes(\n outfile, level, already_processed, namespace_, name_='Diagnostic')\n if self.hasContent_():\n outfile.write('>\\n')\n self.exportChildren(outfile, level + 1, namespace_, name_)\n outfile.write('%s%s>\\n' % (namespace_, name_))\n else:\n outfile.write('/>\\n')\n\n def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='Diagnostic'):\n pass\n\n def exportChildren(self, outfile, level, namespace_='', name_='Diagnostic', fromsubclass_=False):\n pass\n\n def hasContent_(self):\n if (\n\n ):\n return True\n else:\n return False\n\n def exportLiteral(self, outfile, level, name_='Diagnostic'):\n level += 1\n self.exportLiteralAttributes(outfile, level, [], name_)\n if self.hasContent_():\n self.exportLiteralChildren(outfile, level, name_)\n\n def exportLiteralAttributes(self, outfile, level, already_processed, name_):\n pass\n\n def exportLiteralChildren(self, outfile, level, name_):\n pass\n\n def build(self, node):\n self.buildAttributes(node, node.attrib, [])\n for child in node:\n nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]\n self.buildChildren(child, node, nodeName_)\n\n def buildAttributes(self, node, attrs, already_processed):\n pass\n\n def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):\n pass\n# end class Diagnostic\n\n\nclass AuthorCodeList(GeneratedsSuper):\n\n \"\"\"organisation responsible for the reason code\"\"\"\n subclass = None\n superclass = None\n\n def __init__(self):\n pass\n\n def factory(*args_, **kwargs_):\n if AuthorCodeList.subclass:\n return AuthorCodeList.subclass(*args_, **kwargs_)\n else:\n return AuthorCodeList(*args_, **kwargs_)\n factory = staticmethod(factory)\n\n def export(self, outfile, level, namespace_='', name_='AuthorCodeList', namespacedef_=''):\n showIndent(outfile, level)\n outfile.write('<%s%s%s' %\n (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))\n already_processed = []\n self.exportAttributes(\n outfile, level, already_processed, namespace_, name_='AuthorCodeList')\n if self.hasContent_():\n outfile.write('>\\n')\n self.exportChildren(outfile, level + 1, namespace_, name_)\n outfile.write('%s%s>\\n' % (namespace_, name_))\n else:\n outfile.write('/>\\n')\n\n def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='AuthorCodeList'):\n pass\n\n def exportChildren(self, outfile, level, namespace_='', name_='AuthorCodeList', fromsubclass_=False):\n pass\n\n def hasContent_(self):\n if (\n\n ):\n return True\n else:\n return False\n\n def exportLiteral(self, outfile, level, name_='AuthorCodeList'):\n level += 1\n self.exportLiteralAttributes(outfile, level, [], name_)\n if self.hasContent_():\n self.exportLiteralChildren(outfile, level, name_)\n\n def exportLiteralAttributes(self, outfile, level, already_processed, name_):\n pass\n\n def exportLiteralChildren(self, outfile, level, name_):\n pass\n\n def build(self, node):\n self.buildAttributes(node, node.attrib, [])\n for child in node:\n nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]\n self.buildChildren(child, node, nodeName_)\n\n def buildAttributes(self, node, attrs, already_processed):\n pass\n\n def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):\n pass\n# end class AuthorCodeList\n\n\nclass InscriptionType(GeneratedsSuper):\n\n \"\"\"An inscription\"\"\"\n subclass = None\n superclass = None\n\n def __init__(self, SSIN=None, OrgUnit=None, Purpose=None, Period=None, InscriptionCode=None, PhaseCode=None):\n self.SSIN = SSIN\n self.OrgUnit = OrgUnit\n self.Purpose = Purpose\n self.Period = Period\n self.InscriptionCode = InscriptionCode\n self.PhaseCode = PhaseCode\n\n def factory(*args_, **kwargs_):\n if InscriptionType.subclass:\n return InscriptionType.subclass(*args_, **kwargs_)\n else:\n return InscriptionType(*args_, **kwargs_)\n factory = staticmethod(factory)\n\n def get_SSIN(self):\n return self.SSIN\n\n def set_SSIN(self, SSIN):\n self.SSIN = SSIN\n\n def validate_t_SSIN(self, value):\n # Validate type t_SSIN, a restriction on xs:string.\n pass\n\n def get_OrgUnit(self):\n return self.OrgUnit\n\n def set_OrgUnit(self, OrgUnit):\n self.OrgUnit = OrgUnit\n\n def get_Purpose(self):\n return self.Purpose\n\n def set_Purpose(self, Purpose):\n self.Purpose = Purpose\n\n def get_Period(self):\n return self.Period\n\n def set_Period(self, Period):\n self.Period = Period\n\n def get_InscriptionCode(self):\n return self.InscriptionCode\n\n def set_InscriptionCode(self, InscriptionCode):\n self.InscriptionCode = InscriptionCode\n\n def get_PhaseCode(self):\n return self.PhaseCode\n\n def set_PhaseCode(self, PhaseCode):\n self.PhaseCode = PhaseCode\n\n def export(self, outfile, level, namespace_='', name_='InscriptionType', namespacedef_=''):\n showIndent(outfile, level)\n outfile.write('<%s%s%s' %\n (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))\n already_processed = []\n self.exportAttributes(outfile, level, already_processed,\n namespace_, name_='InscriptionType')\n if self.hasContent_():\n outfile.write('>\\n')\n self.exportChildren(outfile, level + 1, namespace_, name_)\n showIndent(outfile, level)\n outfile.write('%s%s>\\n' % (namespace_, name_))\n else:\n outfile.write('/>\\n')\n\n def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='InscriptionType'):\n pass\n\n def exportChildren(self, outfile, level, namespace_='', name_='InscriptionType', fromsubclass_=False):\n if self.SSIN is not None:\n showIndent(outfile, level)\n outfile.write('<%sSSIN>%s%sSSIN>\\n' %\n (namespace_, self.gds_format_string(quote_xml(self.SSIN).encode(ExternalEncoding), input_name='SSIN'), namespace_))\n if self.OrgUnit is not None:\n showIndent(outfile, level)\n outfile.write('<%sOrgUnit>%s%sOrgUnit>\\n' %\n (namespace_, self.gds_format_string(quote_xml(self.OrgUnit).encode(ExternalEncoding), input_name='OrgUnit'), namespace_))\n if self.Purpose is not None:\n showIndent(outfile, level)\n outfile.write('<%sPurpose>%s%sPurpose>\\n' %\n (namespace_, self.gds_format_integer(self.Purpose, input_name='Purpose'), namespace_))\n if self.Period:\n self.Period.export(outfile, level, namespace_, name_='Period')\n if self.InscriptionCode is not None:\n showIndent(outfile, level)\n outfile.write('<%sInscriptionCode>%s%sInscriptionCode>\\n' %\n (namespace_, self.gds_format_integer(self.InscriptionCode, input_name='InscriptionCode'), namespace_))\n if self.PhaseCode is not None:\n showIndent(outfile, level)\n outfile.write('<%sPhaseCode>%s%sPhaseCode>\\n' %\n (namespace_, self.gds_format_integer(self.PhaseCode, input_name='PhaseCode'), namespace_))\n\n def hasContent_(self):\n if (\n self.SSIN is not None or\n self.OrgUnit is not None or\n self.Purpose is not None or\n self.Period is not None or\n self.InscriptionCode is not None or\n self.PhaseCode is not None\n ):\n return True\n else:\n return False\n\n def exportLiteral(self, outfile, level, name_='InscriptionType'):\n level += 1\n self.exportLiteralAttributes(outfile, level, [], name_)\n if self.hasContent_():\n self.exportLiteralChildren(outfile, level, name_)\n\n def exportLiteralAttributes(self, outfile, level, already_processed, name_):\n pass\n\n def exportLiteralChildren(self, outfile, level, name_):\n if self.SSIN is not None:\n showIndent(outfile, level)\n outfile.write('SSIN=%s,\\n' %\n quote_python(self.SSIN).encode(ExternalEncoding))\n if self.OrgUnit is not None:\n showIndent(outfile, level)\n outfile.write('OrgUnit=%s,\\n' %\n quote_python(self.OrgUnit).encode(ExternalEncoding))\n if self.Purpose is not None:\n showIndent(outfile, level)\n outfile.write('Purpose=%d,\\n' % self.Purpose)\n if self.Period is not None:\n showIndent(outfile, level)\n outfile.write('Period=model_.PeriodType(\\n')\n self.Period.exportLiteral(outfile, level, name_='Period')\n showIndent(outfile, level)\n outfile.write('),\\n')\n if self.InscriptionCode is not None:\n showIndent(outfile, level)\n outfile.write('InscriptionCode=%d,\\n' % self.InscriptionCode)\n if self.PhaseCode is not None:\n showIndent(outfile, level)\n outfile.write('PhaseCode=%d,\\n' % self.PhaseCode)\n\n def build(self, node):\n self.buildAttributes(node, node.attrib, [])\n for child in node:\n nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]\n self.buildChildren(child, node, nodeName_)\n\n def buildAttributes(self, node, attrs, already_processed):\n pass\n\n def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):\n if nodeName_ == 'SSIN':\n SSIN_ = child_.text\n SSIN_ = self.gds_validate_string(SSIN_, node, 'SSIN')\n self.SSIN = SSIN_\n self.validate_t_SSIN(self.SSIN) # validate type t_SSIN\n elif nodeName_ == 'OrgUnit':\n OrgUnit_ = child_.text\n OrgUnit_ = self.gds_validate_string(OrgUnit_, node, 'OrgUnit')\n self.OrgUnit = OrgUnit_\n elif nodeName_ == 'Purpose':\n sval_ = child_.text\n try:\n ival_ = int(sval_)\n except (TypeError, ValueError), exp:\n raise_parse_error(child_, 'requires integer: %s' % exp)\n ival_ = self.gds_validate_integer(ival_, node, 'Purpose')\n self.Purpose = ival_\n elif nodeName_ == 'Period':\n obj_ = PeriodType.factory()\n obj_.build(child_)\n self.set_Period(obj_)\n elif nodeName_ == 'InscriptionCode':\n sval_ = child_.text\n try:\n ival_ = int(sval_)\n except (TypeError, ValueError), exp:\n raise_parse_error(child_, 'requires integer: %s' % exp)\n ival_ = self.gds_validate_integer(ival_, node, 'InscriptionCode')\n self.InscriptionCode = ival_\n elif nodeName_ == 'PhaseCode':\n sval_ = child_.text\n try:\n ival_ = int(sval_)\n except (TypeError, ValueError), exp:\n raise_parse_error(child_, 'requires integer: %s' % exp)\n ival_ = self.gds_validate_integer(ival_, node, 'PhaseCode')\n self.PhaseCode = ival_\n# end class InscriptionType\n\n\nclass DescriptionType(GeneratedsSuper):\n subclass = None\n superclass = None\n\n def __init__(self, lang=None, valueOf_=None):\n self.lang = _cast(None, lang)\n self.valueOf_ = valueOf_\n\n def factory(*args_, **kwargs_):\n if DescriptionType.subclass:\n return DescriptionType.subclass(*args_, **kwargs_)\n else:\n return DescriptionType(*args_, **kwargs_)\n factory = staticmethod(factory)\n\n def get_lang(self):\n return self.lang\n\n def set_lang(self, lang):\n self.lang = lang\n\n def validate_t_Language(self, value):\n # Validate type t_Language, a restriction on xs:string.\n pass\n\n def get_valueOf_(self):\n return self.valueOf_\n\n def set_valueOf_(self, valueOf_):\n self.valueOf_ = valueOf_\n\n def export(self, outfile, level, namespace_='', name_='DescriptionType', namespacedef_=''):\n showIndent(outfile, level)\n outfile.write('<%s%s%s' %\n (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))\n already_processed = []\n self.exportAttributes(outfile, level, already_processed,\n namespace_, name_='DescriptionType')\n if self.hasContent_():\n outfile.write('>')\n outfile.write(str(self.valueOf_).encode(ExternalEncoding))\n self.exportChildren(outfile, level + 1, namespace_, name_)\n outfile.write('%s%s>\\n' % (namespace_, name_))\n else:\n outfile.write('/>\\n')\n\n def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='DescriptionType'):\n if self.lang is not None and 'lang' not in already_processed:\n already_processed.append('lang')\n outfile.write(' lang=%s' % (quote_attrib(self.lang), ))\n\n def exportChildren(self, outfile, level, namespace_='', name_='DescriptionType', fromsubclass_=False):\n pass\n\n def hasContent_(self):\n if (\n self.valueOf_\n ):\n return True\n else:\n return False\n\n def exportLiteral(self, outfile, level, name_='DescriptionType'):\n level += 1\n self.exportLiteralAttributes(outfile, level, [], name_)\n if self.hasContent_():\n self.exportLiteralChildren(outfile, level, name_)\n showIndent(outfile, level)\n outfile.write('valueOf_ = \"\"\"%s\"\"\",\\n' % (self.valueOf_,))\n\n def exportLiteralAttributes(self, outfile, level, already_processed, name_):\n if self.lang is not None and 'lang' not in already_processed:\n already_processed.append('lang')\n showIndent(outfile, level)\n outfile.write('lang = \"%s\",\\n' % (self.lang,))\n\n def exportLiteralChildren(self, outfile, level, name_):\n pass\n\n def build(self, node):\n self.buildAttributes(node, node.attrib, [])\n self.valueOf_ = get_all_text_(node)\n for child in node:\n nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]\n self.buildChildren(child, node, nodeName_)\n\n def buildAttributes(self, node, attrs, already_processed):\n value = find_attr_value_('lang', node)\n if value is not None and 'lang' not in already_processed:\n already_processed.append('lang')\n self.lang = value\n self.validate_t_Language(self.lang) # validate type t_Language\n\n def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):\n pass\n# end class DescriptionType\n\n\nclass PeriodType(GeneratedsSuper):\n\n \"\"\"A period of time between a startdate and an enddate\"\"\"\n subclass = None\n superclass = None\n\n def __init__(self, StartDate=None, EndDate=None):\n self.StartDate = StartDate\n self.EndDate = EndDate\n\n def factory(*args_, **kwargs_):\n if PeriodType.subclass:\n return PeriodType.subclass(*args_, **kwargs_)\n else:\n return PeriodType(*args_, **kwargs_)\n factory = staticmethod(factory)\n\n def get_StartDate(self):\n return self.StartDate\n\n def set_StartDate(self, StartDate):\n self.StartDate = StartDate\n\n def get_EndDate(self):\n return self.EndDate\n\n def set_EndDate(self, EndDate):\n self.EndDate = EndDate\n\n def export(self, outfile, level, namespace_='', name_='PeriodType', namespacedef_=''):\n showIndent(outfile, level)\n outfile.write('<%s%s%s' %\n (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))\n already_processed = []\n self.exportAttributes(\n outfile, level, already_processed, namespace_, name_='PeriodType')\n if self.hasContent_():\n outfile.write('>\\n')\n self.exportChildren(outfile, level + 1, namespace_, name_)\n showIndent(outfile, level)\n outfile.write('%s%s>\\n' % (namespace_, name_))\n else:\n outfile.write('/>\\n')\n\n def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='PeriodType'):\n pass\n\n def exportChildren(self, outfile, level, namespace_='', name_='PeriodType', fromsubclass_=False):\n if self.StartDate is not None:\n showIndent(outfile, level)\n outfile.write('<%sStartDate>%s%sStartDate>\\n' %\n (namespace_, self.gds_format_string(quote_xml(self.StartDate).encode(ExternalEncoding), input_name='StartDate'), namespace_))\n if self.EndDate is not None:\n showIndent(outfile, level)\n outfile.write('<%sEndDate>%s%sEndDate>\\n' %\n (namespace_, self.gds_format_string(quote_xml(self.EndDate).encode(ExternalEncoding), input_name='EndDate'), namespace_))\n\n def hasContent_(self):\n if (\n self.StartDate is not None or\n self.EndDate is not None\n ):\n return True\n else:\n return False\n\n def exportLiteral(self, outfile, level, name_='PeriodType'):\n level += 1\n self.exportLiteralAttributes(outfile, level, [], name_)\n if self.hasContent_():\n self.exportLiteralChildren(outfile, level, name_)\n\n def exportLiteralAttributes(self, outfile, level, already_processed, name_):\n pass\n\n def exportLiteralChildren(self, outfile, level, name_):\n if self.StartDate is not None:\n showIndent(outfile, level)\n outfile.write('StartDate=%s,\\n' %\n quote_python(self.StartDate).encode(ExternalEncoding))\n if self.EndDate is not None:\n showIndent(outfile, level)\n outfile.write('EndDate=%s,\\n' %\n quote_python(self.EndDate).encode(ExternalEncoding))\n\n def build(self, node):\n self.buildAttributes(node, node.attrib, [])\n for child in node:\n nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]\n self.buildChildren(child, node, nodeName_)\n\n def buildAttributes(self, node, attrs, already_processed):\n pass\n\n def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):\n if nodeName_ == 'StartDate':\n StartDate_ = child_.text\n StartDate_ = self.gds_validate_string(\n StartDate_, node, 'StartDate')\n self.StartDate = StartDate_\n elif nodeName_ == 'EndDate':\n EndDate_ = child_.text\n EndDate_ = self.gds_validate_string(EndDate_, node, 'EndDate')\n self.EndDate = EndDate_\n# end class PeriodType\n\n\nclass StartDate(GeneratedsSuper):\n subclass = None\n superclass = None\n\n def __init__(self):\n pass\n\n def factory(*args_, **kwargs_):\n if StartDate.subclass:\n return StartDate.subclass(*args_, **kwargs_)\n else:\n return StartDate(*args_, **kwargs_)\n factory = staticmethod(factory)\n\n def export(self, outfile, level, namespace_='', name_='StartDate', namespacedef_=''):\n showIndent(outfile, level)\n outfile.write('<%s%s%s' %\n (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))\n already_processed = []\n self.exportAttributes(\n outfile, level, already_processed, namespace_, name_='StartDate')\n if self.hasContent_():\n outfile.write('>\\n')\n self.exportChildren(outfile, level + 1, namespace_, name_)\n outfile.write('%s%s>\\n' % (namespace_, name_))\n else:\n outfile.write('/>\\n')\n\n def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='StartDate'):\n pass\n\n def exportChildren(self, outfile, level, namespace_='', name_='StartDate', fromsubclass_=False):\n pass\n\n def hasContent_(self):\n if (\n\n ):\n return True\n else:\n return False\n\n def exportLiteral(self, outfile, level, name_='StartDate'):\n level += 1\n self.exportLiteralAttributes(outfile, level, [], name_)\n if self.hasContent_():\n self.exportLiteralChildren(outfile, level, name_)\n\n def exportLiteralAttributes(self, outfile, level, already_processed, name_):\n pass\n\n def exportLiteralChildren(self, outfile, level, name_):\n pass\n\n def build(self, node):\n self.buildAttributes(node, node.attrib, [])\n for child in node:\n nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]\n self.buildChildren(child, node, nodeName_)\n\n def buildAttributes(self, node, attrs, already_processed):\n pass\n\n def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):\n pass\n# end class StartDate\n\n\nclass EndDate(GeneratedsSuper):\n subclass = None\n superclass = None\n\n def __init__(self):\n pass\n\n def factory(*args_, **kwargs_):\n if EndDate.subclass:\n return EndDate.subclass(*args_, **kwargs_)\n else:\n return EndDate(*args_, **kwargs_)\n factory = staticmethod(factory)\n\n def export(self, outfile, level, namespace_='', name_='EndDate', namespacedef_=''):\n showIndent(outfile, level)\n outfile.write('<%s%s%s' %\n (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))\n already_processed = []\n self.exportAttributes(\n outfile, level, already_processed, namespace_, name_='EndDate')\n if self.hasContent_():\n outfile.write('>\\n')\n self.exportChildren(outfile, level + 1, namespace_, name_)\n outfile.write('%s%s>\\n' % (namespace_, name_))\n else:\n outfile.write('/>\\n')\n\n def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='EndDate'):\n pass\n\n def exportChildren(self, outfile, level, namespace_='', name_='EndDate', fromsubclass_=False):\n pass\n\n def hasContent_(self):\n if (\n\n ):\n return True\n else:\n return False\n\n def exportLiteral(self, outfile, level, name_='EndDate'):\n level += 1\n self.exportLiteralAttributes(outfile, level, [], name_)\n if self.hasContent_():\n self.exportLiteralChildren(outfile, level, name_)\n\n def exportLiteralAttributes(self, outfile, level, already_processed, name_):\n pass\n\n def exportLiteralChildren(self, outfile, level, name_):\n pass\n\n def build(self, node):\n self.buildAttributes(node, node.attrib, [])\n for child in node:\n nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]\n self.buildChildren(child, node, nodeName_)\n\n def buildAttributes(self, node, attrs, already_processed):\n pass\n\n def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):\n pass\n# end class EndDate\n\n\nclass ClosedPeriodType(GeneratedsSuper):\n\n \"\"\"A closed period with a mandatory start and end date\"\"\"\n subclass = None\n superclass = None\n\n def __init__(self, StartDate=None, EndDate=None):\n self.StartDate = StartDate\n self.EndDate = EndDate\n\n def factory(*args_, **kwargs_):\n if ClosedPeriodType.subclass:\n return ClosedPeriodType.subclass(*args_, **kwargs_)\n else:\n return ClosedPeriodType(*args_, **kwargs_)\n factory = staticmethod(factory)\n\n def get_StartDate(self):\n return self.StartDate\n\n def set_StartDate(self, StartDate):\n self.StartDate = StartDate\n\n def get_EndDate(self):\n return self.EndDate\n\n def set_EndDate(self, EndDate):\n self.EndDate = EndDate\n\n def export(self, outfile, level, namespace_='', name_='ClosedPeriodType', namespacedef_=''):\n showIndent(outfile, level)\n outfile.write('<%s%s%s' %\n (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))\n already_processed = []\n self.exportAttributes(outfile, level, already_processed,\n namespace_, name_='ClosedPeriodType')\n if self.hasContent_():\n outfile.write('>\\n')\n self.exportChildren(outfile, level + 1, namespace_, name_)\n showIndent(outfile, level)\n outfile.write('%s%s>\\n' % (namespace_, name_))\n else:\n outfile.write('/>\\n')\n\n def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='ClosedPeriodType'):\n pass\n\n def exportChildren(self, outfile, level, namespace_='', name_='ClosedPeriodType', fromsubclass_=False):\n if self.StartDate is not None:\n showIndent(outfile, level)\n outfile.write('<%sStartDate>%s%sStartDate>\\n' %\n (namespace_, self.gds_format_string(quote_xml(self.StartDate).encode(ExternalEncoding), input_name='StartDate'), namespace_))\n if self.EndDate is not None:\n showIndent(outfile, level)\n outfile.write('<%sEndDate>%s%sEndDate>\\n' %\n (namespace_, self.gds_format_string(quote_xml(self.EndDate).encode(ExternalEncoding), input_name='EndDate'), namespace_))\n\n def hasContent_(self):\n if (\n self.StartDate is not None or\n self.EndDate is not None\n ):\n return True\n else:\n return False\n\n def exportLiteral(self, outfile, level, name_='ClosedPeriodType'):\n level += 1\n self.exportLiteralAttributes(outfile, level, [], name_)\n if self.hasContent_():\n self.exportLiteralChildren(outfile, level, name_)\n\n def exportLiteralAttributes(self, outfile, level, already_processed, name_):\n pass\n\n def exportLiteralChildren(self, outfile, level, name_):\n if self.StartDate is not None:\n showIndent(outfile, level)\n outfile.write('StartDate=%s,\\n' %\n quote_python(self.StartDate).encode(ExternalEncoding))\n if self.EndDate is not None:\n showIndent(outfile, level)\n outfile.write('EndDate=%s,\\n' %\n quote_python(self.EndDate).encode(ExternalEncoding))\n\n def build(self, node):\n self.buildAttributes(node, node.attrib, [])\n for child in node:\n nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]\n self.buildChildren(child, node, nodeName_)\n\n def buildAttributes(self, node, attrs, already_processed):\n pass\n\n def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):\n if nodeName_ == 'StartDate':\n StartDate_ = child_.text\n StartDate_ = self.gds_validate_string(\n StartDate_, node, 'StartDate')\n self.StartDate = StartDate_\n elif nodeName_ == 'EndDate':\n EndDate_ = child_.text\n EndDate_ = self.gds_validate_string(EndDate_, node, 'EndDate')\n self.EndDate = EndDate_\n# end class ClosedPeriodType\n\n\nclass StartingPeriodType(GeneratedsSuper):\n\n \"\"\"A halfopen period with a mandatory start date\"\"\"\n subclass = None\n superclass = None\n\n def __init__(self, StartDate=None, EndDate=None):\n self.StartDate = StartDate\n self.EndDate = EndDate\n\n def factory(*args_, **kwargs_):\n if StartingPeriodType.subclass:\n return StartingPeriodType.subclass(*args_, **kwargs_)\n else:\n return StartingPeriodType(*args_, **kwargs_)\n factory = staticmethod(factory)\n\n def get_StartDate(self):\n return self.StartDate\n\n def set_StartDate(self, StartDate):\n self.StartDate = StartDate\n\n def get_EndDate(self):\n return self.EndDate\n\n def set_EndDate(self, EndDate):\n self.EndDate = EndDate\n\n def export(self, outfile, level, namespace_='', name_='StartingPeriodType', namespacedef_=''):\n showIndent(outfile, level)\n outfile.write('<%s%s%s' %\n (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))\n already_processed = []\n self.exportAttributes(outfile, level, already_processed,\n namespace_, name_='StartingPeriodType')\n if self.hasContent_():\n outfile.write('>\\n')\n self.exportChildren(outfile, level + 1, namespace_, name_)\n showIndent(outfile, level)\n outfile.write('%s%s>\\n' % (namespace_, name_))\n else:\n outfile.write('/>\\n')\n\n def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='StartingPeriodType'):\n pass\n\n def exportChildren(self, outfile, level, namespace_='', name_='StartingPeriodType', fromsubclass_=False):\n if self.StartDate is not None:\n showIndent(outfile, level)\n outfile.write('<%sStartDate>%s%sStartDate>\\n' %\n (namespace_, self.gds_format_string(quote_xml(self.StartDate).encode(ExternalEncoding), input_name='StartDate'), namespace_))\n if self.EndDate is not None:\n showIndent(outfile, level)\n outfile.write('<%sEndDate>%s%sEndDate>\\n' %\n (namespace_, self.gds_format_string(quote_xml(self.EndDate).encode(ExternalEncoding), input_name='EndDate'), namespace_))\n\n def hasContent_(self):\n if (\n self.StartDate is not None or\n self.EndDate is not None\n ):\n return True\n else:\n return False\n\n def exportLiteral(self, outfile, level, name_='StartingPeriodType'):\n level += 1\n self.exportLiteralAttributes(outfile, level, [], name_)\n if self.hasContent_():\n self.exportLiteralChildren(outfile, level, name_)\n\n def exportLiteralAttributes(self, outfile, level, already_processed, name_):\n pass\n\n def exportLiteralChildren(self, outfile, level, name_):\n if self.StartDate is not None:\n showIndent(outfile, level)\n outfile.write('StartDate=%s,\\n' %\n quote_python(self.StartDate).encode(ExternalEncoding))\n if self.EndDate is not None:\n showIndent(outfile, level)\n outfile.write('EndDate=%s,\\n' %\n quote_python(self.EndDate).encode(ExternalEncoding))\n\n def build(self, node):\n self.buildAttributes(node, node.attrib, [])\n for child in node:\n nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]\n self.buildChildren(child, node, nodeName_)\n\n def buildAttributes(self, node, attrs, already_processed):\n pass\n\n def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):\n if nodeName_ == 'StartDate':\n StartDate_ = child_.text\n StartDate_ = self.gds_validate_string(\n StartDate_, node, 'StartDate')\n self.StartDate = StartDate_\n elif nodeName_ == 'EndDate':\n EndDate_ = child_.text\n EndDate_ = self.gds_validate_string(EndDate_, node, 'EndDate')\n self.EndDate = EndDate_\n# end class StartingPeriodType\n\n\nclass EndingPeriodType(GeneratedsSuper):\n\n \"\"\"A halfopen period with a mandatory end date\"\"\"\n subclass = None\n superclass = None\n\n def __init__(self, StartDate=None, EndDate=None):\n self.StartDate = StartDate\n self.EndDate = EndDate\n\n def factory(*args_, **kwargs_):\n if EndingPeriodType.subclass:\n return EndingPeriodType.subclass(*args_, **kwargs_)\n else:\n return EndingPeriodType(*args_, **kwargs_)\n factory = staticmethod(factory)\n\n def get_StartDate(self):\n return self.StartDate\n\n def set_StartDate(self, StartDate):\n self.StartDate = StartDate\n\n def get_EndDate(self):\n return self.EndDate\n\n def set_EndDate(self, EndDate):\n self.EndDate = EndDate\n\n def export(self, outfile, level, namespace_='', name_='EndingPeriodType', namespacedef_=''):\n showIndent(outfile, level)\n outfile.write('<%s%s%s' %\n (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))\n already_processed = []\n self.exportAttributes(outfile, level, already_processed,\n namespace_, name_='EndingPeriodType')\n if self.hasContent_():\n outfile.write('>\\n')\n self.exportChildren(outfile, level + 1, namespace_, name_)\n showIndent(outfile, level)\n outfile.write('%s%s>\\n' % (namespace_, name_))\n else:\n outfile.write('/>\\n')\n\n def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='EndingPeriodType'):\n pass\n\n def exportChildren(self, outfile, level, namespace_='', name_='EndingPeriodType', fromsubclass_=False):\n if self.StartDate is not None:\n showIndent(outfile, level)\n outfile.write('<%sStartDate>%s%sStartDate>\\n' %\n (namespace_, self.gds_format_string(quote_xml(self.StartDate).encode(ExternalEncoding), input_name='StartDate'), namespace_))\n if self.EndDate is not None:\n showIndent(outfile, level)\n outfile.write('<%sEndDate>%s%sEndDate>\\n' %\n (namespace_, self.gds_format_string(quote_xml(self.EndDate).encode(ExternalEncoding), input_name='EndDate'), namespace_))\n\n def hasContent_(self):\n if (\n self.StartDate is not None or\n self.EndDate is not None\n ):\n return True\n else:\n return False\n\n def exportLiteral(self, outfile, level, name_='EndingPeriodType'):\n level += 1\n self.exportLiteralAttributes(outfile, level, [], name_)\n if self.hasContent_():\n self.exportLiteralChildren(outfile, level, name_)\n\n def exportLiteralAttributes(self, outfile, level, already_processed, name_):\n pass\n\n def exportLiteralChildren(self, outfile, level, name_):\n if self.StartDate is not None:\n showIndent(outfile, level)\n outfile.write('StartDate=%s,\\n' %\n quote_python(self.StartDate).encode(ExternalEncoding))\n if self.EndDate is not None:\n showIndent(outfile, level)\n outfile.write('EndDate=%s,\\n' %\n quote_python(self.EndDate).encode(ExternalEncoding))\n\n def build(self, node):\n self.buildAttributes(node, node.attrib, [])\n for child in node:\n nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]\n self.buildChildren(child, node, nodeName_)\n\n def buildAttributes(self, node, attrs, already_processed):\n pass\n\n def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):\n if nodeName_ == 'StartDate':\n StartDate_ = child_.text\n StartDate_ = self.gds_validate_string(\n StartDate_, node, 'StartDate')\n self.StartDate = StartDate_\n elif nodeName_ == 'EndDate':\n EndDate_ = child_.text\n EndDate_ = self.gds_validate_string(EndDate_, node, 'EndDate')\n self.EndDate = EndDate_\n# end class EndingPeriodType\n\n\nclass ExtensionPlaceHolder(GeneratedsSuper):\n\n \"\"\"The sole purpose of this element is to provide a place to initialize\n the usage of xjc extensions in.\"\"\"\n subclass = None\n superclass = None\n\n def __init__(self):\n pass\n\n def factory(*args_, **kwargs_):\n if ExtensionPlaceHolder.subclass:\n return ExtensionPlaceHolder.subclass(*args_, **kwargs_)\n else:\n return ExtensionPlaceHolder(*args_, **kwargs_)\n factory = staticmethod(factory)\n\n def export(self, outfile, level, namespace_='', name_='ExtensionPlaceHolder', namespacedef_=''):\n showIndent(outfile, level)\n outfile.write('<%s%s%s' %\n (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))\n already_processed = []\n self.exportAttributes(outfile, level, already_processed,\n namespace_, name_='ExtensionPlaceHolder')\n if self.hasContent_():\n outfile.write('>\\n')\n self.exportChildren(outfile, level + 1, namespace_, name_)\n outfile.write('%s%s>\\n' % (namespace_, name_))\n else:\n outfile.write('/>\\n')\n\n def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='ExtensionPlaceHolder'):\n pass\n\n def exportChildren(self, outfile, level, namespace_='', name_='ExtensionPlaceHolder', fromsubclass_=False):\n pass\n\n def hasContent_(self):\n if (\n\n ):\n return True\n else:\n return False\n\n def exportLiteral(self, outfile, level, name_='ExtensionPlaceHolder'):\n level += 1\n self.exportLiteralAttributes(outfile, level, [], name_)\n if self.hasContent_():\n self.exportLiteralChildren(outfile, level, name_)\n\n def exportLiteralAttributes(self, outfile, level, already_processed, name_):\n pass\n\n def exportLiteralChildren(self, outfile, level, name_):\n pass\n\n def build(self, node):\n self.buildAttributes(node, node.attrib, [])\n for child in node:\n nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]\n self.buildChildren(child, node, nodeName_)\n\n def buildAttributes(self, node, attrs, already_processed):\n pass\n\n def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):\n pass\n# end class ExtensionPlaceHolder\n\n\nUSAGE_TEXT = \"\"\"\nUsage: python .py [ -s ] \n\"\"\"\n\n\ndef usage():\n print USAGE_TEXT\n sys.exit(1)\n\n\ndef get_root_tag(node):\n tag = Tag_pattern_.match(node.tag).groups()[-1]\n rootClass = globals().get(tag)\n return tag, rootClass\n\n\ndef parse(inFileName):\n doc = parsexml_(inFileName)\n rootNode = doc.getroot()\n rootTag, rootClass = get_root_tag(rootNode)\n if rootClass is None:\n rootTag = 'SSDNReply'\n rootClass = SSDNReply\n rootObj = rootClass.factory()\n rootObj.build(rootNode)\n # Enable Python to collect the space used by the DOM.\n doc = None\n sys.stdout.write('\\n')\n rootObj.export(sys.stdout, 0, name_=rootTag,\n namespacedef_='')\n return rootObj\n\n\ndef parseString(inString):\n from StringIO import StringIO\n doc = parsexml_(StringIO(inString))\n rootNode = doc.getroot()\n rootTag, rootClass = get_root_tag(rootNode)\n if rootClass is None:\n rootTag = 'SSDNReply'\n rootClass = SSDNReply\n rootObj = rootClass.factory()\n rootObj.build(rootNode)\n # Enable Python to collect the space used by the DOM.\n doc = None\n sys.stdout.write('\\n')\n rootObj.export(sys.stdout, 0, name_=\"SSDNReply\",\n namespacedef_='')\n return rootObj\n\n\ndef parseLiteral(inFileName):\n doc = parsexml_(inFileName)\n rootNode = doc.getroot()\n rootTag, rootClass = get_root_tag(rootNode)\n if rootClass is None:\n rootTag = 'SSDNReply'\n rootClass = SSDNReply\n rootObj = rootClass.factory()\n rootObj.build(rootNode)\n # Enable Python to collect the space used by the DOM.\n doc = None\n sys.stdout.write('#from SSDNReply import *\\n\\n')\n sys.stdout.write('import SSDNReply as model_\\n\\n')\n sys.stdout.write('rootObj = model_.rootTag(\\n')\n rootObj.exportLiteral(sys.stdout, 0, name_=rootTag)\n sys.stdout.write(')\\n')\n return rootObj\n\n\ndef main():\n args = sys.argv[1:]\n if len(args) == 1:\n parse(args[0])\n else:\n usage()\n\n\nif __name__ == '__main__':\n #import pdb; pdb.set_trace()\n main()\n\n\n__all__ = [\n \"AuthorCodeList\",\n \"AuthorizedUserType\",\n \"ClosedPeriodType\",\n \"DescriptionType\",\n \"DetailMessageType\",\n \"Diagnostic\",\n \"EndDate\",\n \"EndingPeriodType\",\n \"ExtensionPlaceHolder\",\n \"FieldName\",\n \"FieldValue\",\n \"InformationType\",\n \"InscriptionType\",\n \"PeriodType\",\n \"ReasonCode\",\n \"ReplyContextType\",\n \"ReplyMessageType\",\n \"ResultSummary\",\n \"ReturnCode\",\n \"SSDNReply\",\n \"ServiceId\",\n \"ServiceReplyType\",\n \"StartDate\",\n \"StartingPeriodType\",\n \"Version\"\n]\n"},"license":{"kind":"string","value":"bsd-2-clause"}}},{"rowIdx":203497,"cells":{"repo_name":{"kind":"string","value":"walterreade/scikit-learn"},"path":{"kind":"string","value":"sklearn/tests/test_grid_search.py"},"copies":{"kind":"string","value":"68"},"size":{"kind":"string","value":"28856"},"content":{"kind":"string","value":"\"\"\"\nTesting for grid search module (sklearn.grid_search)\n\n\"\"\"\n\nfrom collections import Iterable, Sized\nfrom sklearn.externals.six.moves import cStringIO as StringIO\nfrom sklearn.externals.six.moves import xrange\nfrom itertools import chain, product\nimport pickle\nimport warnings\nimport sys\n\nimport numpy as np\nimport scipy.sparse as sp\n\nfrom sklearn.utils.testing import assert_equal\nfrom sklearn.utils.testing import assert_not_equal\nfrom sklearn.utils.testing import assert_raises\nfrom sklearn.utils.testing import assert_warns\nfrom sklearn.utils.testing import assert_raise_message\nfrom sklearn.utils.testing import assert_false, assert_true\nfrom sklearn.utils.testing import assert_array_equal\nfrom sklearn.utils.testing import assert_almost_equal\nfrom sklearn.utils.testing import assert_array_almost_equal\nfrom sklearn.utils.testing import assert_no_warnings\nfrom sklearn.utils.testing import ignore_warnings\nfrom sklearn.utils.mocking import CheckingClassifier, MockDataFrame\n\nfrom scipy.stats import bernoulli, expon, uniform\n\nfrom sklearn.externals.six.moves import zip\nfrom sklearn.base import BaseEstimator\nfrom sklearn.datasets import make_classification\nfrom sklearn.datasets import make_blobs\nfrom sklearn.datasets import make_multilabel_classification\nfrom sklearn.svm import LinearSVC, SVC\nfrom sklearn.tree import DecisionTreeRegressor\nfrom sklearn.tree import DecisionTreeClassifier\nfrom sklearn.cluster import KMeans\nfrom sklearn.neighbors import KernelDensity\nfrom sklearn.metrics import f1_score\nfrom sklearn.metrics import make_scorer\nfrom sklearn.metrics import roc_auc_score\n\nfrom sklearn.exceptions import ChangedBehaviorWarning\nfrom sklearn.exceptions import FitFailedWarning\n\nwith warnings.catch_warnings():\n warnings.simplefilter('ignore')\n from sklearn.grid_search import (GridSearchCV, RandomizedSearchCV,\n ParameterGrid, ParameterSampler)\n from sklearn.cross_validation import KFold, StratifiedKFold\n\nfrom sklearn.preprocessing import Imputer\nfrom sklearn.pipeline import Pipeline\n\n\n# Neither of the following two estimators inherit from BaseEstimator,\n# to test hyperparameter search on user-defined classifiers.\nclass MockClassifier(object):\n \"\"\"Dummy classifier to test the cross-validation\"\"\"\n def __init__(self, foo_param=0):\n self.foo_param = foo_param\n\n def fit(self, X, Y):\n assert_true(len(X) == len(Y))\n return self\n\n def predict(self, T):\n return T.shape[0]\n\n predict_proba = predict\n decision_function = predict\n transform = predict\n\n def score(self, X=None, Y=None):\n if self.foo_param > 1:\n score = 1.\n else:\n score = 0.\n return score\n\n def get_params(self, deep=False):\n return {'foo_param': self.foo_param}\n\n def set_params(self, **params):\n self.foo_param = params['foo_param']\n return self\n\n\nclass LinearSVCNoScore(LinearSVC):\n \"\"\"An LinearSVC classifier that has no score method.\"\"\"\n @property\n def score(self):\n raise AttributeError\n\nX = np.array([[-1, -1], [-2, -1], [1, 1], [2, 1]])\ny = np.array([1, 1, 2, 2])\n\n\ndef assert_grid_iter_equals_getitem(grid):\n assert_equal(list(grid), [grid[i] for i in range(len(grid))])\n\n\ndef test_parameter_grid():\n # Test basic properties of ParameterGrid.\n params1 = {\"foo\": [1, 2, 3]}\n grid1 = ParameterGrid(params1)\n assert_true(isinstance(grid1, Iterable))\n assert_true(isinstance(grid1, Sized))\n assert_equal(len(grid1), 3)\n assert_grid_iter_equals_getitem(grid1)\n\n params2 = {\"foo\": [4, 2],\n \"bar\": [\"ham\", \"spam\", \"eggs\"]}\n grid2 = ParameterGrid(params2)\n assert_equal(len(grid2), 6)\n\n # loop to assert we can iterate over the grid multiple times\n for i in xrange(2):\n # tuple + chain transforms {\"a\": 1, \"b\": 2} to (\"a\", 1, \"b\", 2)\n points = set(tuple(chain(*(sorted(p.items())))) for p in grid2)\n assert_equal(points,\n set((\"bar\", x, \"foo\", y)\n for x, y in product(params2[\"bar\"], params2[\"foo\"])))\n\n assert_grid_iter_equals_getitem(grid2)\n\n # Special case: empty grid (useful to get default estimator settings)\n empty = ParameterGrid({})\n assert_equal(len(empty), 1)\n assert_equal(list(empty), [{}])\n assert_grid_iter_equals_getitem(empty)\n assert_raises(IndexError, lambda: empty[1])\n\n has_empty = ParameterGrid([{'C': [1, 10]}, {}, {'C': [.5]}])\n assert_equal(len(has_empty), 4)\n assert_equal(list(has_empty), [{'C': 1}, {'C': 10}, {}, {'C': .5}])\n assert_grid_iter_equals_getitem(has_empty)\n\n\ndef test_grid_search():\n # Test that the best estimator contains the right value for foo_param\n clf = MockClassifier()\n grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]}, verbose=3)\n # make sure it selects the smallest parameter in case of ties\n old_stdout = sys.stdout\n sys.stdout = StringIO()\n grid_search.fit(X, y)\n sys.stdout = old_stdout\n assert_equal(grid_search.best_estimator_.foo_param, 2)\n\n for i, foo_i in enumerate([1, 2, 3]):\n assert_true(grid_search.grid_scores_[i][0]\n == {'foo_param': foo_i})\n # Smoke test the score etc:\n grid_search.score(X, y)\n grid_search.predict_proba(X)\n grid_search.decision_function(X)\n grid_search.transform(X)\n\n # Test exception handling on scoring\n grid_search.scoring = 'sklearn'\n assert_raises(ValueError, grid_search.fit, X, y)\n\n\n@ignore_warnings\ndef test_grid_search_no_score():\n # Test grid-search on classifier that has no score function.\n clf = LinearSVC(random_state=0)\n X, y = make_blobs(random_state=0, centers=2)\n Cs = [.1, 1, 10]\n clf_no_score = LinearSVCNoScore(random_state=0)\n grid_search = GridSearchCV(clf, {'C': Cs}, scoring='accuracy')\n grid_search.fit(X, y)\n\n grid_search_no_score = GridSearchCV(clf_no_score, {'C': Cs},\n scoring='accuracy')\n # smoketest grid search\n grid_search_no_score.fit(X, y)\n\n # check that best params are equal\n assert_equal(grid_search_no_score.best_params_, grid_search.best_params_)\n # check that we can call score and that it gives the correct result\n assert_equal(grid_search.score(X, y), grid_search_no_score.score(X, y))\n\n # giving no scoring function raises an error\n grid_search_no_score = GridSearchCV(clf_no_score, {'C': Cs})\n assert_raise_message(TypeError, \"no scoring\", grid_search_no_score.fit,\n [[1]])\n\n\ndef test_grid_search_score_method():\n X, y = make_classification(n_samples=100, n_classes=2, flip_y=.2,\n random_state=0)\n clf = LinearSVC(random_state=0)\n grid = {'C': [.1]}\n\n search_no_scoring = GridSearchCV(clf, grid, scoring=None).fit(X, y)\n search_accuracy = GridSearchCV(clf, grid, scoring='accuracy').fit(X, y)\n search_no_score_method_auc = GridSearchCV(LinearSVCNoScore(), grid,\n scoring='roc_auc').fit(X, y)\n search_auc = GridSearchCV(clf, grid, scoring='roc_auc').fit(X, y)\n\n # Check warning only occurs in situation where behavior changed:\n # estimator requires score method to compete with scoring parameter\n score_no_scoring = assert_no_warnings(search_no_scoring.score, X, y)\n score_accuracy = assert_warns(ChangedBehaviorWarning,\n search_accuracy.score, X, y)\n score_no_score_auc = assert_no_warnings(search_no_score_method_auc.score,\n X, y)\n score_auc = assert_warns(ChangedBehaviorWarning,\n search_auc.score, X, y)\n # ensure the test is sane\n assert_true(score_auc < 1.0)\n assert_true(score_accuracy < 1.0)\n assert_not_equal(score_auc, score_accuracy)\n\n assert_almost_equal(score_accuracy, score_no_scoring)\n assert_almost_equal(score_auc, score_no_score_auc)\n\n\ndef test_trivial_grid_scores():\n # Test search over a \"grid\" with only one point.\n # Non-regression test: grid_scores_ wouldn't be set by GridSearchCV.\n clf = MockClassifier()\n grid_search = GridSearchCV(clf, {'foo_param': [1]})\n grid_search.fit(X, y)\n assert_true(hasattr(grid_search, \"grid_scores_\"))\n\n random_search = RandomizedSearchCV(clf, {'foo_param': [0]}, n_iter=1)\n random_search.fit(X, y)\n assert_true(hasattr(random_search, \"grid_scores_\"))\n\n\ndef test_no_refit():\n # Test that grid search can be used for model selection only\n clf = MockClassifier()\n grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]}, refit=False)\n grid_search.fit(X, y)\n assert_true(hasattr(grid_search, \"best_params_\"))\n\n\ndef test_grid_search_error():\n # Test that grid search will capture errors on data with different\n # length\n X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0)\n\n clf = LinearSVC()\n cv = GridSearchCV(clf, {'C': [0.1, 1.0]})\n assert_raises(ValueError, cv.fit, X_[:180], y_)\n\n\ndef test_grid_search_iid():\n # test the iid parameter\n # noise-free simple 2d-data\n X, y = make_blobs(centers=[[0, 0], [1, 0], [0, 1], [1, 1]], random_state=0,\n cluster_std=0.1, shuffle=False, n_samples=80)\n # split dataset into two folds that are not iid\n # first one contains data of all 4 blobs, second only from two.\n mask = np.ones(X.shape[0], dtype=np.bool)\n mask[np.where(y == 1)[0][::2]] = 0\n mask[np.where(y == 2)[0][::2]] = 0\n # this leads to perfect classification on one fold and a score of 1/3 on\n # the other\n svm = SVC(kernel='linear')\n # create \"cv\" for splits\n cv = [[mask, ~mask], [~mask, mask]]\n # once with iid=True (default)\n grid_search = GridSearchCV(svm, param_grid={'C': [1, 10]}, cv=cv)\n grid_search.fit(X, y)\n first = grid_search.grid_scores_[0]\n assert_equal(first.parameters['C'], 1)\n assert_array_almost_equal(first.cv_validation_scores, [1, 1. / 3.])\n # for first split, 1/4 of dataset is in test, for second 3/4.\n # take weighted average\n assert_almost_equal(first.mean_validation_score,\n 1 * 1. / 4. + 1. / 3. * 3. / 4.)\n\n # once with iid=False\n grid_search = GridSearchCV(svm, param_grid={'C': [1, 10]}, cv=cv,\n iid=False)\n grid_search.fit(X, y)\n first = grid_search.grid_scores_[0]\n assert_equal(first.parameters['C'], 1)\n # scores are the same as above\n assert_array_almost_equal(first.cv_validation_scores, [1, 1. / 3.])\n # averaged score is just mean of scores\n assert_almost_equal(first.mean_validation_score,\n np.mean(first.cv_validation_scores))\n\n\ndef test_grid_search_one_grid_point():\n X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0)\n param_dict = {\"C\": [1.0], \"kernel\": [\"rbf\"], \"gamma\": [0.1]}\n\n clf = SVC()\n cv = GridSearchCV(clf, param_dict)\n cv.fit(X_, y_)\n\n clf = SVC(C=1.0, kernel=\"rbf\", gamma=0.1)\n clf.fit(X_, y_)\n\n assert_array_equal(clf.dual_coef_, cv.best_estimator_.dual_coef_)\n\n\ndef test_grid_search_bad_param_grid():\n param_dict = {\"C\": 1.0}\n clf = SVC()\n assert_raises(ValueError, GridSearchCV, clf, param_dict)\n\n param_dict = {\"C\": []}\n clf = SVC()\n assert_raises(ValueError, GridSearchCV, clf, param_dict)\n\n param_dict = {\"C\": np.ones(6).reshape(3, 2)}\n clf = SVC()\n assert_raises(ValueError, GridSearchCV, clf, param_dict)\n\n\ndef test_grid_search_sparse():\n # Test that grid search works with both dense and sparse matrices\n X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0)\n\n clf = LinearSVC()\n cv = GridSearchCV(clf, {'C': [0.1, 1.0]})\n cv.fit(X_[:180], y_[:180])\n y_pred = cv.predict(X_[180:])\n C = cv.best_estimator_.C\n\n X_ = sp.csr_matrix(X_)\n clf = LinearSVC()\n cv = GridSearchCV(clf, {'C': [0.1, 1.0]})\n cv.fit(X_[:180].tocoo(), y_[:180])\n y_pred2 = cv.predict(X_[180:])\n C2 = cv.best_estimator_.C\n\n assert_true(np.mean(y_pred == y_pred2) >= .9)\n assert_equal(C, C2)\n\n\ndef test_grid_search_sparse_scoring():\n X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0)\n\n clf = LinearSVC()\n cv = GridSearchCV(clf, {'C': [0.1, 1.0]}, scoring=\"f1\")\n cv.fit(X_[:180], y_[:180])\n y_pred = cv.predict(X_[180:])\n C = cv.best_estimator_.C\n\n X_ = sp.csr_matrix(X_)\n clf = LinearSVC()\n cv = GridSearchCV(clf, {'C': [0.1, 1.0]}, scoring=\"f1\")\n cv.fit(X_[:180], y_[:180])\n y_pred2 = cv.predict(X_[180:])\n C2 = cv.best_estimator_.C\n\n assert_array_equal(y_pred, y_pred2)\n assert_equal(C, C2)\n # Smoke test the score\n # np.testing.assert_allclose(f1_score(cv.predict(X_[:180]), y[:180]),\n # cv.score(X_[:180], y[:180]))\n\n # test loss where greater is worse\n def f1_loss(y_true_, y_pred_):\n return -f1_score(y_true_, y_pred_)\n F1Loss = make_scorer(f1_loss, greater_is_better=False)\n cv = GridSearchCV(clf, {'C': [0.1, 1.0]}, scoring=F1Loss)\n cv.fit(X_[:180], y_[:180])\n y_pred3 = cv.predict(X_[180:])\n C3 = cv.best_estimator_.C\n\n assert_equal(C, C3)\n assert_array_equal(y_pred, y_pred3)\n\n\ndef test_grid_search_precomputed_kernel():\n # Test that grid search works when the input features are given in the\n # form of a precomputed kernel matrix\n X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0)\n\n # compute the training kernel matrix corresponding to the linear kernel\n K_train = np.dot(X_[:180], X_[:180].T)\n y_train = y_[:180]\n\n clf = SVC(kernel='precomputed')\n cv = GridSearchCV(clf, {'C': [0.1, 1.0]})\n cv.fit(K_train, y_train)\n\n assert_true(cv.best_score_ >= 0)\n\n # compute the test kernel matrix\n K_test = np.dot(X_[180:], X_[:180].T)\n y_test = y_[180:]\n\n y_pred = cv.predict(K_test)\n\n assert_true(np.mean(y_pred == y_test) >= 0)\n\n # test error is raised when the precomputed kernel is not array-like\n # or sparse\n assert_raises(ValueError, cv.fit, K_train.tolist(), y_train)\n\n\ndef test_grid_search_precomputed_kernel_error_nonsquare():\n # Test that grid search returns an error with a non-square precomputed\n # training kernel matrix\n K_train = np.zeros((10, 20))\n y_train = np.ones((10, ))\n clf = SVC(kernel='precomputed')\n cv = GridSearchCV(clf, {'C': [0.1, 1.0]})\n assert_raises(ValueError, cv.fit, K_train, y_train)\n\n\ndef test_grid_search_precomputed_kernel_error_kernel_function():\n # Test that grid search returns an error when using a kernel_function\n X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0)\n kernel_function = lambda x1, x2: np.dot(x1, x2.T)\n clf = SVC(kernel=kernel_function)\n cv = GridSearchCV(clf, {'C': [0.1, 1.0]})\n assert_raises(ValueError, cv.fit, X_, y_)\n\n\nclass BrokenClassifier(BaseEstimator):\n \"\"\"Broken classifier that cannot be fit twice\"\"\"\n\n def __init__(self, parameter=None):\n self.parameter = parameter\n\n def fit(self, X, y):\n assert_true(not hasattr(self, 'has_been_fit_'))\n self.has_been_fit_ = True\n\n def predict(self, X):\n return np.zeros(X.shape[0])\n\n\n@ignore_warnings\ndef test_refit():\n # Regression test for bug in refitting\n # Simulates re-fitting a broken estimator; this used to break with\n # sparse SVMs.\n X = np.arange(100).reshape(10, 10)\n y = np.array([0] * 5 + [1] * 5)\n\n clf = GridSearchCV(BrokenClassifier(), [{'parameter': [0, 1]}],\n scoring=\"precision\", refit=True)\n clf.fit(X, y)\n\n\ndef test_gridsearch_nd():\n # Pass X as list in GridSearchCV\n X_4d = np.arange(10 * 5 * 3 * 2).reshape(10, 5, 3, 2)\n y_3d = np.arange(10 * 7 * 11).reshape(10, 7, 11)\n check_X = lambda x: x.shape[1:] == (5, 3, 2)\n check_y = lambda x: x.shape[1:] == (7, 11)\n clf = CheckingClassifier(check_X=check_X, check_y=check_y)\n grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]})\n grid_search.fit(X_4d, y_3d).score(X, y)\n assert_true(hasattr(grid_search, \"grid_scores_\"))\n\n\ndef test_X_as_list():\n # Pass X as list in GridSearchCV\n X = np.arange(100).reshape(10, 10)\n y = np.array([0] * 5 + [1] * 5)\n\n clf = CheckingClassifier(check_X=lambda x: isinstance(x, list))\n cv = KFold(n=len(X), n_folds=3)\n grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]}, cv=cv)\n grid_search.fit(X.tolist(), y).score(X, y)\n assert_true(hasattr(grid_search, \"grid_scores_\"))\n\n\ndef test_y_as_list():\n # Pass y as list in GridSearchCV\n X = np.arange(100).reshape(10, 10)\n y = np.array([0] * 5 + [1] * 5)\n\n clf = CheckingClassifier(check_y=lambda x: isinstance(x, list))\n cv = KFold(n=len(X), n_folds=3)\n grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]}, cv=cv)\n grid_search.fit(X, y.tolist()).score(X, y)\n assert_true(hasattr(grid_search, \"grid_scores_\"))\n\n\ndef test_pandas_input():\n # check cross_val_score doesn't destroy pandas dataframe\n types = [(MockDataFrame, MockDataFrame)]\n try:\n from pandas import Series, DataFrame\n types.append((DataFrame, Series))\n except ImportError:\n pass\n\n X = np.arange(100).reshape(10, 10)\n y = np.array([0] * 5 + [1] * 5)\n\n for InputFeatureType, TargetType in types:\n # X dataframe, y series\n X_df, y_ser = InputFeatureType(X), TargetType(y)\n check_df = lambda x: isinstance(x, InputFeatureType)\n check_series = lambda x: isinstance(x, TargetType)\n clf = CheckingClassifier(check_X=check_df, check_y=check_series)\n\n grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]})\n grid_search.fit(X_df, y_ser).score(X_df, y_ser)\n grid_search.predict(X_df)\n assert_true(hasattr(grid_search, \"grid_scores_\"))\n\n\ndef test_unsupervised_grid_search():\n # test grid-search with unsupervised estimator\n X, y = make_blobs(random_state=0)\n km = KMeans(random_state=0)\n grid_search = GridSearchCV(km, param_grid=dict(n_clusters=[2, 3, 4]),\n scoring='adjusted_rand_score')\n grid_search.fit(X, y)\n # ARI can find the right number :)\n assert_equal(grid_search.best_params_[\"n_clusters\"], 3)\n\n # Now without a score, and without y\n grid_search = GridSearchCV(km, param_grid=dict(n_clusters=[2, 3, 4]))\n grid_search.fit(X)\n assert_equal(grid_search.best_params_[\"n_clusters\"], 4)\n\n\ndef test_gridsearch_no_predict():\n # test grid-search with an estimator without predict.\n # slight duplication of a test from KDE\n def custom_scoring(estimator, X):\n return 42 if estimator.bandwidth == .1 else 0\n X, _ = make_blobs(cluster_std=.1, random_state=1,\n centers=[[0, 1], [1, 0], [0, 0]])\n search = GridSearchCV(KernelDensity(),\n param_grid=dict(bandwidth=[.01, .1, 1]),\n scoring=custom_scoring)\n search.fit(X)\n assert_equal(search.best_params_['bandwidth'], .1)\n assert_equal(search.best_score_, 42)\n\n\ndef test_param_sampler():\n # test basic properties of param sampler\n param_distributions = {\"kernel\": [\"rbf\", \"linear\"],\n \"C\": uniform(0, 1)}\n sampler = ParameterSampler(param_distributions=param_distributions,\n n_iter=10, random_state=0)\n samples = [x for x in sampler]\n assert_equal(len(samples), 10)\n for sample in samples:\n assert_true(sample[\"kernel\"] in [\"rbf\", \"linear\"])\n assert_true(0 <= sample[\"C\"] <= 1)\n\n\ndef test_randomized_search_grid_scores():\n # Make a dataset with a lot of noise to get various kind of prediction\n # errors across CV folds and parameter settings\n X, y = make_classification(n_samples=200, n_features=100, n_informative=3,\n random_state=0)\n\n # XXX: as of today (scipy 0.12) it's not possible to set the random seed\n # of scipy.stats distributions: the assertions in this test should thus\n # not depend on the randomization\n params = dict(C=expon(scale=10),\n gamma=expon(scale=0.1))\n n_cv_iter = 3\n n_search_iter = 30\n search = RandomizedSearchCV(SVC(), n_iter=n_search_iter, cv=n_cv_iter,\n param_distributions=params, iid=False)\n search.fit(X, y)\n assert_equal(len(search.grid_scores_), n_search_iter)\n\n # Check consistency of the structure of each cv_score item\n for cv_score in search.grid_scores_:\n assert_equal(len(cv_score.cv_validation_scores), n_cv_iter)\n # Because we set iid to False, the mean_validation score is the\n # mean of the fold mean scores instead of the aggregate sample-wise\n # mean score\n assert_almost_equal(np.mean(cv_score.cv_validation_scores),\n cv_score.mean_validation_score)\n assert_equal(list(sorted(cv_score.parameters.keys())),\n list(sorted(params.keys())))\n\n # Check the consistency with the best_score_ and best_params_ attributes\n sorted_grid_scores = list(sorted(search.grid_scores_,\n key=lambda x: x.mean_validation_score))\n best_score = sorted_grid_scores[-1].mean_validation_score\n assert_equal(search.best_score_, best_score)\n\n tied_best_params = [s.parameters for s in sorted_grid_scores\n if s.mean_validation_score == best_score]\n assert_true(search.best_params_ in tied_best_params,\n \"best_params_={0} is not part of the\"\n \" tied best models: {1}\".format(\n search.best_params_, tied_best_params))\n\n\ndef test_grid_search_score_consistency():\n # test that correct scores are used\n clf = LinearSVC(random_state=0)\n X, y = make_blobs(random_state=0, centers=2)\n Cs = [.1, 1, 10]\n for score in ['f1', 'roc_auc']:\n grid_search = GridSearchCV(clf, {'C': Cs}, scoring=score)\n grid_search.fit(X, y)\n cv = StratifiedKFold(n_folds=3, y=y)\n for C, scores in zip(Cs, grid_search.grid_scores_):\n clf.set_params(C=C)\n scores = scores[2] # get the separate runs from grid scores\n i = 0\n for train, test in cv:\n clf.fit(X[train], y[train])\n if score == \"f1\":\n correct_score = f1_score(y[test], clf.predict(X[test]))\n elif score == \"roc_auc\":\n dec = clf.decision_function(X[test])\n correct_score = roc_auc_score(y[test], dec)\n assert_almost_equal(correct_score, scores[i])\n i += 1\n\n\ndef test_pickle():\n # Test that a fit search can be pickled\n clf = MockClassifier()\n grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]}, refit=True)\n grid_search.fit(X, y)\n pickle.dumps(grid_search) # smoke test\n\n random_search = RandomizedSearchCV(clf, {'foo_param': [1, 2, 3]},\n refit=True, n_iter=3)\n random_search.fit(X, y)\n pickle.dumps(random_search) # smoke test\n\n\ndef test_grid_search_with_multioutput_data():\n # Test search with multi-output estimator\n\n X, y = make_multilabel_classification(random_state=0)\n\n est_parameters = {\"max_depth\": [1, 2, 3, 4]}\n cv = KFold(y.shape[0], random_state=0)\n\n estimators = [DecisionTreeRegressor(random_state=0),\n DecisionTreeClassifier(random_state=0)]\n\n # Test with grid search cv\n for est in estimators:\n grid_search = GridSearchCV(est, est_parameters, cv=cv)\n grid_search.fit(X, y)\n for parameters, _, cv_validation_scores in grid_search.grid_scores_:\n est.set_params(**parameters)\n\n for i, (train, test) in enumerate(cv):\n est.fit(X[train], y[train])\n correct_score = est.score(X[test], y[test])\n assert_almost_equal(correct_score,\n cv_validation_scores[i])\n\n # Test with a randomized search\n for est in estimators:\n random_search = RandomizedSearchCV(est, est_parameters,\n cv=cv, n_iter=3)\n random_search.fit(X, y)\n for parameters, _, cv_validation_scores in random_search.grid_scores_:\n est.set_params(**parameters)\n\n for i, (train, test) in enumerate(cv):\n est.fit(X[train], y[train])\n correct_score = est.score(X[test], y[test])\n assert_almost_equal(correct_score,\n cv_validation_scores[i])\n\n\ndef test_predict_proba_disabled():\n # Test predict_proba when disabled on estimator.\n X = np.arange(20).reshape(5, -1)\n y = [0, 0, 1, 1, 1]\n clf = SVC(probability=False)\n gs = GridSearchCV(clf, {}, cv=2).fit(X, y)\n assert_false(hasattr(gs, \"predict_proba\"))\n\n\ndef test_grid_search_allows_nans():\n # Test GridSearchCV with Imputer\n X = np.arange(20, dtype=np.float64).reshape(5, -1)\n X[2, :] = np.nan\n y = [0, 0, 1, 1, 1]\n p = Pipeline([\n ('imputer', Imputer(strategy='mean', missing_values='NaN')),\n ('classifier', MockClassifier()),\n ])\n GridSearchCV(p, {'classifier__foo_param': [1, 2, 3]}, cv=2).fit(X, y)\n\n\nclass FailingClassifier(BaseEstimator):\n \"\"\"Classifier that raises a ValueError on fit()\"\"\"\n\n FAILING_PARAMETER = 2\n\n def __init__(self, parameter=None):\n self.parameter = parameter\n\n def fit(self, X, y=None):\n if self.parameter == FailingClassifier.FAILING_PARAMETER:\n raise ValueError(\"Failing classifier failed as required\")\n\n def predict(self, X):\n return np.zeros(X.shape[0])\n\n\ndef test_grid_search_failing_classifier():\n # GridSearchCV with on_error != 'raise'\n # Ensures that a warning is raised and score reset where appropriate.\n\n X, y = make_classification(n_samples=20, n_features=10, random_state=0)\n\n clf = FailingClassifier()\n\n # refit=False because we only want to check that errors caused by fits\n # to individual folds will be caught and warnings raised instead. If\n # refit was done, then an exception would be raised on refit and not\n # caught by grid_search (expected behavior), and this would cause an\n # error in this test.\n gs = GridSearchCV(clf, [{'parameter': [0, 1, 2]}], scoring='accuracy',\n refit=False, error_score=0.0)\n\n assert_warns(FitFailedWarning, gs.fit, X, y)\n\n # Ensure that grid scores were set to zero as required for those fits\n # that are expected to fail.\n assert all(np.all(this_point.cv_validation_scores == 0.0)\n for this_point in gs.grid_scores_\n if this_point.parameters['parameter'] ==\n FailingClassifier.FAILING_PARAMETER)\n\n gs = GridSearchCV(clf, [{'parameter': [0, 1, 2]}], scoring='accuracy',\n refit=False, error_score=float('nan'))\n assert_warns(FitFailedWarning, gs.fit, X, y)\n assert all(np.all(np.isnan(this_point.cv_validation_scores))\n for this_point in gs.grid_scores_\n if this_point.parameters['parameter'] ==\n FailingClassifier.FAILING_PARAMETER)\n\n\ndef test_grid_search_failing_classifier_raise():\n # GridSearchCV with on_error == 'raise' raises the error\n\n X, y = make_classification(n_samples=20, n_features=10, random_state=0)\n\n clf = FailingClassifier()\n\n # refit=False because we want to test the behaviour of the grid search part\n gs = GridSearchCV(clf, [{'parameter': [0, 1, 2]}], scoring='accuracy',\n refit=False, error_score='raise')\n\n # FailingClassifier issues a ValueError so this is what we look for.\n assert_raises(ValueError, gs.fit, X, y)\n\n\ndef test_parameters_sampler_replacement():\n # raise error if n_iter too large\n params = {'first': [0, 1], 'second': ['a', 'b', 'c']}\n sampler = ParameterSampler(params, n_iter=7)\n assert_raises(ValueError, list, sampler)\n # degenerates to GridSearchCV if n_iter the same as grid_size\n sampler = ParameterSampler(params, n_iter=6)\n samples = list(sampler)\n assert_equal(len(samples), 6)\n for values in ParameterGrid(params):\n assert_true(values in samples)\n\n # test sampling without replacement in a large grid\n params = {'a': range(10), 'b': range(10), 'c': range(10)}\n sampler = ParameterSampler(params, n_iter=99, random_state=42)\n samples = list(sampler)\n assert_equal(len(samples), 99)\n hashable_samples = [\"a%db%dc%d\" % (p['a'], p['b'], p['c'])\n for p in samples]\n assert_equal(len(set(hashable_samples)), 99)\n\n # doesn't go into infinite loops\n params_distribution = {'first': bernoulli(.5), 'second': ['a', 'b', 'c']}\n sampler = ParameterSampler(params_distribution, n_iter=7)\n samples = list(sampler)\n assert_equal(len(samples), 7)\n"},"license":{"kind":"string","value":"bsd-3-clause"}}},{"rowIdx":203498,"cells":{"repo_name":{"kind":"string","value":"Esri/solutions-geoprocessing-toolbox"},"path":{"kind":"string","value":"utils/test/distance_to_assets_tests/DistanceToAssetsTestSuite.py"},"copies":{"kind":"string","value":"2"},"size":{"kind":"string","value":"2103"},"content":{"kind":"string","value":"#------------------------------------------------------------------------------\n# Copyright 2015 Esri\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#------------------------------------------------------------------------------\n\nimport unittest\nimport logging\nimport Configuration\n\nfrom . import DistanceToAssetsCodeAssetsToBasesTestCase\nfrom . import DistanceToAssetsCopyGeolocatedLocationsTestCase\nfrom . import DistanceToAssetsRouteAssetsToBasesLocalTestCase\nfrom . import DistanceToAssetsRouteAssetsToBasesAGOLTestCase\nfrom . import DistanceToAssetsSummarizeTestCase\n\n''' Test suite for all tools in the Distance to Assets Tools toolbox '''\n\ndef getTestSuite():\n\n if Configuration.DEBUG == True:\n print(\" DistanceToAssetsTestSuite.getSuite\")\n\n testSuite = unittest.TestSuite()\n\n ''' Add the Distance to Assets tests '''\n\n loader = unittest.TestLoader()\n\n testSuite.addTest(loader.loadTestsFromTestCase(DistanceToAssetsCodeAssetsToBasesTestCase.DistanceToAssetsCodeAssetsToBasesTestCase))\n testSuite.addTest(loader.loadTestsFromTestCase(DistanceToAssetsCopyGeolocatedLocationsTestCase.DistanceToAssetsCopyGeolocatedLocationsTestCase))\n testSuite.addTest(loader.loadTestsFromTestCase(DistanceToAssetsRouteAssetsToBasesLocalTestCase.DistanceToAssetsRouteAssetsToBasesLocalTestCase))\n testSuite.addTest(loader.loadTestsFromTestCase(DistanceToAssetsRouteAssetsToBasesAGOLTestCase.DistanceToAssetsRouteAssetsToBasesAGOLTestCase))\n testSuite.addTest(loader.loadTestsFromTestCase(DistanceToAssetsSummarizeTestCase.DistanceToAssetsSummarizeTestCase))\n\n return testSuite\n"},"license":{"kind":"string","value":"apache-2.0"}}},{"rowIdx":203499,"cells":{"repo_name":{"kind":"string","value":"Yrthgze/prueba-sourcetree2"},"path":{"kind":"string","value":"Lyndon1994/0023/flask-demo.py"},"copies":{"kind":"string","value":"2"},"size":{"kind":"string","value":"2235"},"content":{"kind":"string","value":"import os\nimport logging\n\nlogging.basicConfig(level=logging.INFO)\n\nimport sqlite3\nfrom flask import Flask, request, session, g, redirect, url_for, abort, \\\n render_template, flash\n\nimport time\n\napp = Flask(__name__)\n\nclass Config(object):\n DEBUG = True\n USERNAME='admin'\n PASSWORD='1234'\n DATABASE='/tmp/flaskr.db'\n DATABASE_URI = 'sqlite://:memory:'\n SECRET_KEY='shdjkandscbowduAIJNnjas9aSKAJSka'\n\n# 设置一个名为 FLASKR_SETTINGS 的环境变量,指向要加载的配置文件。\n# 启用静默模式告诉 Flask 在没有设置该环境变量的情况下噤声。\napp.config.from_object(Config)\n\n\n# app.config.from_envvar('FLASKR_SETTINGS', silent=True)\n\ndef connect_db():\n \"\"\"Connects to the specific database.\"\"\"\n logging.info('Connects to the specific database.')\n rv = sqlite3.connect(app.config['DATABASE'])\n rv.row_factory = sqlite3.Row\n g.db = rv\n logging.info(rv)\n return rv\n\n\ndef init_db():\n with app.app_context():\n db = connect_db()\n with app.open_resource('schema.sql', mode='r') as f:\n db.cursor().executescript(f.read())\n db.commit()\n\n\n@app.before_request\ndef before_request():\n g.db = connect_db()\n\n\n@app.teardown_request\ndef teardown_request(exception):\n db = getattr(g, 'db', None)\n if db is not None:\n db.close()\n g.db.close()\n\n@app.template_filter('format_time')\ndef format_time_filter(t):\n return time.strftime('%Y-%m-%d %H:%M:%S',time.localtime(t))\n\n@app.route('/')\ndef index():\n cur = g.db.execute('select name,title,text,created_at from entries order by id DESC ')\n entries = [dict(name=row[0], title=row[1], text=row[2], created_at=row[3]) for row in cur.fetchall()]\n logging.info(entries)\n return render_template('index.html', entries=entries)\n\n\n@app.route('/add', methods=['POST'])\ndef add_entry():\n g.db.execute('insert into entries (name,title,text,created_at) VALUES (?,?,?,?)',\n (request.form['name'], request.form['title'], request.form['text'], time.time()))\n g.db.commit()\n flash('New entry was successfully posted')\n return redirect(url_for('index'))\n\nif __name__ == '__main__':\n init_db()\n app.secret_key = app.config['SECRET_KEY']\n app.run()\n"},"license":{"kind":"string","value":"mit"}}}],"truncated":false,"partial":false},"paginationData":{"pageIndex":2034,"numItemsPerPage":100,"numTotalItems":203850,"offset":203400,"length":100}},"jwt":"eyJhbGciOiJFZERTQSJ9.eyJyZWFkIjp0cnVlLCJwZXJtaXNzaW9ucyI6eyJyZXBvLmNvbnRlbnQucmVhZCI6dHJ1ZX0sImlhdCI6MTc1NjE5NTg2OCwic3ViIjoiL2RhdGFzZXRzL3Rob213b2xmL2dpdGh1Yi1kYXRhc2V0IiwiZXhwIjoxNzU2MTk5NDY4LCJpc3MiOiJodHRwczovL2h1Z2dpbmdmYWNlLmNvIn0.o0KcXMLyru4iksJzkupjjpBaQl_1yWo0adbM8ksoOE4FVXnEb6flDtv8RSK2HmWts9XVdvNa-hl6mDZyQQfECQ","displayUrls":true},"discussionsStats":{"closed":0,"open":1,"total":1},"fullWidth":true,"hasGatedAccess":true,"hasFullAccess":true,"isEmbedded":false,"savedQueries":{"community":[],"user":[]}}">
#!/usr/bin/env python
# Copyright 2009 Simon Arlott
#
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the Free
# Software Foundation; either version 2 of the License, or (at your option)
# any later version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
# more details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 59
# Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
# Usage: cxacru-cf.py < cxacru-cf.bin
# Output: values string suitable for the sysfs adsl_config attribute
#
# Warning: cxacru-cf.bin with MD5 hash cdbac2689969d5ed5d4850f117702110
# contains mis-aligned values which will stop the modem from being able
# to make a connection. If the first and last two bytes are removed then
# the values become valid, but the modulation will be forced to ANSI
# T1.413 only which may not be appropriate.
#
# The original binary format is a packed list of le32 values.
import sys
import struct
i = 0
while True:
buf = sys.stdin.read(4)
if len(buf) == 0:
break
elif len(buf) != 4:
sys.stdout.write("\n")
sys.stderr.write("Error: read {0} not 4 bytes\n".format(len(buf)))
sys.exit(1)
if i > 0:
sys.stdout.write(" ")
sys.stdout.write("{0:x}={1}".format(i, struct.unpack("<I", buf)[0]))
i += 1
sys.stdout.write("\n")
gpl-2.0
saeki-masaki/glance
glance/tests/unit/test_cache_middleware.py
5
31757
# Copyright 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_policy import policy
# NOTE(jokke): simplified transition to py3, behaves like py2 xrange
from six.moves import range
import testtools
import webob
import glance.api.middleware.cache
import glance.api.policy
from glance.common import exception
from glance import context
import glance.registry.client.v1.api as registry
from glance.tests.unit import base
from glance.tests.unit import utils as unit_test_utils
class ImageStub(object):
def __init__(self, image_id, extra_properties={}, visibility='private'):
self.image_id = image_id
self.visibility = visibility
self.status = 'active'
self.extra_properties = extra_properties
self.checksum = 'c1234'
self.size = 123456789
class TestCacheMiddlewareURLMatching(testtools.TestCase):
def test_v1_no_match_detail(self):
req = webob.Request.blank('/v1/images/detail')
out = glance.api.middleware.cache.CacheFilter._match_request(req)
self.assertIsNone(out)
def test_v1_no_match_detail_with_query_params(self):
req = webob.Request.blank('/v1/images/detail?limit=10')
out = glance.api.middleware.cache.CacheFilter._match_request(req)
self.assertIsNone(out)
def test_v1_match_id_with_query_param(self):
req = webob.Request.blank('/v1/images/asdf?ping=pong')
out = glance.api.middleware.cache.CacheFilter._match_request(req)
self.assertEqual(('v1', 'GET', 'asdf'), out)
def test_v2_match_id(self):
req = webob.Request.blank('/v2/images/asdf/file')
out = glance.api.middleware.cache.CacheFilter._match_request(req)
self.assertEqual(('v2', 'GET', 'asdf'), out)
def test_v2_no_match_bad_path(self):
req = webob.Request.blank('/v2/images/asdf')
out = glance.api.middleware.cache.CacheFilter._match_request(req)
self.assertIsNone(out)
def test_no_match_unknown_version(self):
req = webob.Request.blank('/v3/images/asdf')
out = glance.api.middleware.cache.CacheFilter._match_request(req)
self.assertIsNone(out)
class TestCacheMiddlewareRequestStashCacheInfo(testtools.TestCase):
def setUp(self):
super(TestCacheMiddlewareRequestStashCacheInfo, self).setUp()
self.request = webob.Request.blank('')
self.middleware = glance.api.middleware.cache.CacheFilter
def test_stash_cache_request_info(self):
self.middleware._stash_request_info(self.request, 'asdf', 'GET', 'v2')
self.assertEqual('asdf', self.request.environ['api.cache.image_id'])
self.assertEqual('GET', self.request.environ['api.cache.method'])
self.assertEqual('v2', self.request.environ['api.cache.version'])
def test_fetch_cache_request_info(self):
self.request.environ['api.cache.image_id'] = 'asdf'
self.request.environ['api.cache.method'] = 'GET'
self.request.environ['api.cache.version'] = 'v2'
(image_id, method, version) = self.middleware._fetch_request_info(
self.request)
self.assertEqual('asdf', image_id)
self.assertEqual('GET', method)
self.assertEqual('v2', version)
def test_fetch_cache_request_info_unset(self):
out = self.middleware._fetch_request_info(self.request)
self.assertIsNone(out)
class ChecksumTestCacheFilter(glance.api.middleware.cache.CacheFilter):
def __init__(self):
class DummyCache(object):
def get_caching_iter(self, image_id, image_checksum, app_iter):
self.image_checksum = image_checksum
self.cache = DummyCache()
self.policy = unit_test_utils.FakePolicyEnforcer()
class TestCacheMiddlewareChecksumVerification(base.IsolatedUnitTest):
def setUp(self):
super(TestCacheMiddlewareChecksumVerification, self).setUp()
self.context = context.RequestContext(is_admin=True)
self.request = webob.Request.blank('')
self.request.context = self.context
def test_checksum_v1_header(self):
cache_filter = ChecksumTestCacheFilter()
headers = {"x-image-meta-checksum": "1234567890"}
resp = webob.Response(request=self.request, headers=headers)
cache_filter._process_GET_response(resp, None)
self.assertEqual("1234567890", cache_filter.cache.image_checksum)
def test_checksum_v2_header(self):
cache_filter = ChecksumTestCacheFilter()
headers = {
"x-image-meta-checksum": "1234567890",
"Content-MD5": "abcdefghi"
}
resp = webob.Response(request=self.request, headers=headers)
cache_filter._process_GET_response(resp, None)
self.assertEqual("abcdefghi", cache_filter.cache.image_checksum)
def test_checksum_missing_header(self):
cache_filter = ChecksumTestCacheFilter()
resp = webob.Response(request=self.request)
cache_filter._process_GET_response(resp, None)
self.assertIsNone(cache_filter.cache.image_checksum)
class FakeImageSerializer(object):
def show(self, response, raw_response):
return True
class ProcessRequestTestCacheFilter(glance.api.middleware.cache.CacheFilter):
def __init__(self):
self.serializer = FakeImageSerializer()
class DummyCache(object):
def __init__(self):
self.deleted_images = []
def is_cached(self, image_id):
return True
def get_caching_iter(self, image_id, image_checksum, app_iter):
pass
def delete_cached_image(self, image_id):
self.deleted_images.append(image_id)
def get_image_size(self, image_id):
pass
self.cache = DummyCache()
self.policy = unit_test_utils.FakePolicyEnforcer()
class TestCacheMiddlewareProcessRequest(base.IsolatedUnitTest):
def _enforcer_from_rules(self, unparsed_rules):
rules = policy.Rules.from_dict(unparsed_rules)
enforcer = glance.api.policy.Enforcer()
enforcer.set_rules(rules, overwrite=True)
return enforcer
def test_v1_deleted_image_fetch(self):
"""
Test for determining that when an admin tries to download a deleted
image it returns 404 Not Found error.
"""
def dummy_img_iterator():
for i in range(3):
yield i
image_id = 'test1'
image_meta = {
'id': image_id,
'name': 'fake_image',
'status': 'deleted',
'created_at': '',
'min_disk': '10G',
'min_ram': '1024M',
'protected': False,
'locations': '',
'checksum': 'c1234',
'owner': '',
'disk_format': 'raw',
'container_format': 'bare',
'size': '123456789',
'virtual_size': '123456789',
'is_public': 'public',
'deleted': True,
'updated_at': '',
'properties': {},
}
request = webob.Request.blank('/v1/images/%s' % image_id)
request.context = context.RequestContext()
cache_filter = ProcessRequestTestCacheFilter()
self.assertRaises(exception.NotFound, cache_filter._process_v1_request,
request, image_id, dummy_img_iterator, image_meta)
def test_process_v1_request_for_deleted_but_cached_image(self):
"""
Test for determining image is deleted from cache when it is not found
in Glance Registry.
"""
def fake_process_v1_request(request, image_id, image_iterator,
image_meta):
raise exception.ImageNotFound()
def fake_get_v1_image_metadata(request, image_id):
return {'status': 'active', 'properties': {}}
image_id = 'test1'
request = webob.Request.blank('/v1/images/%s' % image_id)
request.context = context.RequestContext()
cache_filter = ProcessRequestTestCacheFilter()
self.stubs.Set(cache_filter, '_get_v1_image_metadata',
fake_get_v1_image_metadata)
self.stubs.Set(cache_filter, '_process_v1_request',
fake_process_v1_request)
cache_filter.process_request(request)
self.assertIn(image_id, cache_filter.cache.deleted_images)
def test_v1_process_request_image_fetch(self):
def dummy_img_iterator():
for i in range(3):
yield i
image_id = 'test1'
image_meta = {
'id': image_id,
'name': 'fake_image',
'status': 'active',
'created_at': '',
'min_disk': '10G',
'min_ram': '1024M',
'protected': False,
'locations': '',
'checksum': 'c1234',
'owner': '',
'disk_format': 'raw',
'container_format': 'bare',
'size': '123456789',
'virtual_size': '123456789',
'is_public': 'public',
'deleted': False,
'updated_at': '',
'properties': {},
}
request = webob.Request.blank('/v1/images/%s' % image_id)
request.context = context.RequestContext()
cache_filter = ProcessRequestTestCacheFilter()
actual = cache_filter._process_v1_request(
request, image_id, dummy_img_iterator, image_meta)
self.assertTrue(actual)
def test_v1_remove_location_image_fetch(self):
class CheckNoLocationDataSerializer(object):
def show(self, response, raw_response):
return 'location_data' in raw_response['image_meta']
def dummy_img_iterator():
for i in range(3):
yield i
image_id = 'test1'
image_meta = {
'id': image_id,
'name': 'fake_image',
'status': 'active',
'created_at': '',
'min_disk': '10G',
'min_ram': '1024M',
'protected': False,
'locations': '',
'checksum': 'c1234',
'owner': '',
'disk_format': 'raw',
'container_format': 'bare',
'size': '123456789',
'virtual_size': '123456789',
'is_public': 'public',
'deleted': False,
'updated_at': '',
'properties': {},
}
request = webob.Request.blank('/v1/images/%s' % image_id)
request.context = context.RequestContext()
cache_filter = ProcessRequestTestCacheFilter()
cache_filter.serializer = CheckNoLocationDataSerializer()
actual = cache_filter._process_v1_request(
request, image_id, dummy_img_iterator, image_meta)
self.assertFalse(actual)
def test_verify_metadata_deleted_image(self):
"""
Test verify_metadata raises exception.NotFound for a deleted image
"""
image_meta = {'status': 'deleted', 'is_public': True, 'deleted': True}
cache_filter = ProcessRequestTestCacheFilter()
self.assertRaises(exception.NotFound,
cache_filter._verify_metadata, image_meta)
def test_verify_metadata_zero_size(self):
"""
Test verify_metadata updates metadata with cached image size for images
with 0 size
"""
image_size = 1
def fake_get_image_size(image_id):
return image_size
image_id = 'test1'
image_meta = {'size': 0, 'deleted': False, 'id': image_id,
'status': 'active'}
cache_filter = ProcessRequestTestCacheFilter()
self.stubs.Set(cache_filter.cache, 'get_image_size',
fake_get_image_size)
cache_filter._verify_metadata(image_meta)
self.assertEqual(image_size, image_meta['size'])
def test_v2_process_request_response_headers(self):
def dummy_img_iterator():
for i in range(3):
yield i
image_id = 'test1'
request = webob.Request.blank('/v2/images/test1/file')
request.context = context.RequestContext()
request.environ['api.cache.image'] = ImageStub(image_id)
image_meta = {
'id': image_id,
'name': 'fake_image',
'status': 'active',
'created_at': '',
'min_disk': '10G',
'min_ram': '1024M',
'protected': False,
'locations': '',
'checksum': 'c1234',
'owner': '',
'disk_format': 'raw',
'container_format': 'bare',
'size': '123456789',
'virtual_size': '123456789',
'is_public': 'public',
'deleted': False,
'updated_at': '',
'properties': {},
}
cache_filter = ProcessRequestTestCacheFilter()
response = cache_filter._process_v2_request(
request, image_id, dummy_img_iterator, image_meta)
self.assertEqual(response.headers['Content-Type'],
'application/octet-stream')
self.assertEqual(response.headers['Content-MD5'],
'c1234')
self.assertEqual(response.headers['Content-Length'],
'123456789')
def test_process_request_without_download_image_policy(self):
"""
Test for cache middleware skip processing when request
context has not 'download_image' role.
"""
def fake_get_v1_image_metadata(*args, **kwargs):
return {'status': 'active', 'properties': {}}
image_id = 'test1'
request = webob.Request.blank('/v1/images/%s' % image_id)
request.context = context.RequestContext()
cache_filter = ProcessRequestTestCacheFilter()
cache_filter._get_v1_image_metadata = fake_get_v1_image_metadata
enforcer = self._enforcer_from_rules({'download_image': '!'})
cache_filter.policy = enforcer
self.assertRaises(webob.exc.HTTPForbidden,
cache_filter.process_request, request)
def test_v1_process_request_download_restricted(self):
"""
Test process_request for v1 api where _member_ role not able to
download the image with custom property.
"""
image_id = 'test1'
def fake_get_v1_image_metadata(*args, **kwargs):
return {
'id': image_id,
'name': 'fake_image',
'status': 'active',
'created_at': '',
'min_disk': '10G',
'min_ram': '1024M',
'protected': False,
'locations': '',
'checksum': 'c1234',
'owner': '',
'disk_format': 'raw',
'container_format': 'bare',
'size': '123456789',
'virtual_size': '123456789',
'is_public': 'public',
'deleted': False,
'updated_at': '',
'x_test_key': 'test_1234'
}
enforcer = self._enforcer_from_rules({
"restricted":
"not ('test_1234':%(x_test_key)s and role:_member_)",
"download_image": "role:admin or rule:restricted"
})
request = webob.Request.blank('/v1/images/%s' % image_id)
request.context = context.RequestContext(roles=['_member_'])
cache_filter = ProcessRequestTestCacheFilter()
cache_filter._get_v1_image_metadata = fake_get_v1_image_metadata
cache_filter.policy = enforcer
self.assertRaises(webob.exc.HTTPForbidden,
cache_filter.process_request, request)
def test_v1_process_request_download_permitted(self):
"""
Test process_request for v1 api where member role able to
download the image with custom property.
"""
image_id = 'test1'
def fake_get_v1_image_metadata(*args, **kwargs):
return {
'id': image_id,
'name': 'fake_image',
'status': 'active',
'created_at': '',
'min_disk': '10G',
'min_ram': '1024M',
'protected': False,
'locations': '',
'checksum': 'c1234',
'owner': '',
'disk_format': 'raw',
'container_format': 'bare',
'size': '123456789',
'virtual_size': '123456789',
'is_public': 'public',
'deleted': False,
'updated_at': '',
'x_test_key': 'test_1234'
}
request = webob.Request.blank('/v1/images/%s' % image_id)
request.context = context.RequestContext(roles=['member'])
cache_filter = ProcessRequestTestCacheFilter()
cache_filter._get_v1_image_metadata = fake_get_v1_image_metadata
rules = {
"restricted":
"not ('test_1234':%(x_test_key)s and role:_member_)",
"download_image": "role:admin or rule:restricted"
}
self.set_policy_rules(rules)
cache_filter.policy = glance.api.policy.Enforcer()
actual = cache_filter.process_request(request)
self.assertTrue(actual)
def test_v1_process_request_image_meta_not_found(self):
"""
Test process_request for v1 api where registry raises NotFound
exception as image metadata not found.
"""
image_id = 'test1'
def fake_get_v1_image_metadata(*args, **kwargs):
raise exception.NotFound()
request = webob.Request.blank('/v1/images/%s' % image_id)
request.context = context.RequestContext(roles=['_member_'])
cache_filter = ProcessRequestTestCacheFilter()
self.stubs.Set(registry, 'get_image_metadata',
fake_get_v1_image_metadata)
rules = {
"restricted":
"not ('test_1234':%(x_test_key)s and role:_member_)",
"download_image": "role:admin or rule:restricted"
}
self.set_policy_rules(rules)
cache_filter.policy = glance.api.policy.Enforcer()
self.assertRaises(webob.exc.HTTPNotFound,
cache_filter.process_request, request)
def test_v2_process_request_download_restricted(self):
"""
Test process_request for v2 api where _member_ role not able to
download the image with custom property.
"""
image_id = 'test1'
extra_properties = {
'x_test_key': 'test_1234'
}
def fake_get_v2_image_metadata(*args, **kwargs):
image = ImageStub(image_id, extra_properties=extra_properties)
request.environ['api.cache.image'] = image
return glance.api.policy.ImageTarget(image)
enforcer = self._enforcer_from_rules({
"restricted":
"not ('test_1234':%(x_test_key)s and role:_member_)",
"download_image": "role:admin or rule:restricted"
})
request = webob.Request.blank('/v2/images/test1/file')
request.context = context.RequestContext(roles=['_member_'])
cache_filter = ProcessRequestTestCacheFilter()
cache_filter._get_v2_image_metadata = fake_get_v2_image_metadata
cache_filter.policy = enforcer
self.assertRaises(webob.exc.HTTPForbidden,
cache_filter.process_request, request)
def test_v2_process_request_download_permitted(self):
"""
Test process_request for v2 api where member role able to
download the image with custom property.
"""
image_id = 'test1'
extra_properties = {
'x_test_key': 'test_1234'
}
def fake_get_v2_image_metadata(*args, **kwargs):
image = ImageStub(image_id, extra_properties=extra_properties)
request.environ['api.cache.image'] = image
return glance.api.policy.ImageTarget(image)
request = webob.Request.blank('/v2/images/test1/file')
request.context = context.RequestContext(roles=['member'])
cache_filter = ProcessRequestTestCacheFilter()
cache_filter._get_v2_image_metadata = fake_get_v2_image_metadata
rules = {
"restricted":
"not ('test_1234':%(x_test_key)s and role:_member_)",
"download_image": "role:admin or rule:restricted"
}
self.set_policy_rules(rules)
cache_filter.policy = glance.api.policy.Enforcer()
actual = cache_filter.process_request(request)
self.assertTrue(actual)
class TestCacheMiddlewareProcessResponse(base.IsolatedUnitTest):
def test_process_v1_DELETE_response(self):
image_id = 'test1'
request = webob.Request.blank('/v1/images/%s' % image_id)
request.context = context.RequestContext()
cache_filter = ProcessRequestTestCacheFilter()
headers = {"x-image-meta-deleted": True}
resp = webob.Response(request=request, headers=headers)
actual = cache_filter._process_DELETE_response(resp, image_id)
self.assertEqual(resp, actual)
def test_get_status_code(self):
headers = {"x-image-meta-deleted": True}
resp = webob.Response(headers=headers)
cache_filter = ProcessRequestTestCacheFilter()
actual = cache_filter.get_status_code(resp)
self.assertEqual(200, actual)
def test_process_response(self):
def fake_fetch_request_info(*args, **kwargs):
return ('test1', 'GET', 'v1')
def fake_get_v1_image_metadata(*args, **kwargs):
return {'properties': {}}
cache_filter = ProcessRequestTestCacheFilter()
cache_filter._fetch_request_info = fake_fetch_request_info
cache_filter._get_v1_image_metadata = fake_get_v1_image_metadata
image_id = 'test1'
request = webob.Request.blank('/v1/images/%s' % image_id)
request.context = context.RequestContext()
headers = {"x-image-meta-deleted": True}
resp = webob.Response(request=request, headers=headers)
actual = cache_filter.process_response(resp)
self.assertEqual(resp, actual)
def test_process_response_without_download_image_policy(self):
"""
Test for cache middleware raise webob.exc.HTTPForbidden directly
when request context has not 'download_image' role.
"""
def fake_fetch_request_info(*args, **kwargs):
return ('test1', 'GET', 'v1')
def fake_get_v1_image_metadata(*args, **kwargs):
return {'properties': {}}
cache_filter = ProcessRequestTestCacheFilter()
cache_filter._fetch_request_info = fake_fetch_request_info
cache_filter._get_v1_image_metadata = fake_get_v1_image_metadata
rules = {'download_image': '!'}
self.set_policy_rules(rules)
cache_filter.policy = glance.api.policy.Enforcer()
image_id = 'test1'
request = webob.Request.blank('/v1/images/%s' % image_id)
request.context = context.RequestContext()
resp = webob.Response(request=request)
self.assertRaises(webob.exc.HTTPForbidden,
cache_filter.process_response, resp)
self.assertEqual([b''], resp.app_iter)
def test_v1_process_response_download_restricted(self):
"""
Test process_response for v1 api where _member_ role not able to
download the image with custom property.
"""
image_id = 'test1'
def fake_fetch_request_info(*args, **kwargs):
return ('test1', 'GET', 'v1')
def fake_get_v1_image_metadata(*args, **kwargs):
return {
'id': image_id,
'name': 'fake_image',
'status': 'active',
'created_at': '',
'min_disk': '10G',
'min_ram': '1024M',
'protected': False,
'locations': '',
'checksum': 'c1234',
'owner': '',
'disk_format': 'raw',
'container_format': 'bare',
'size': '123456789',
'virtual_size': '123456789',
'is_public': 'public',
'deleted': False,
'updated_at': '',
'x_test_key': 'test_1234'
}
cache_filter = ProcessRequestTestCacheFilter()
cache_filter._fetch_request_info = fake_fetch_request_info
cache_filter._get_v1_image_metadata = fake_get_v1_image_metadata
rules = {
"restricted":
"not ('test_1234':%(x_test_key)s and role:_member_)",
"download_image": "role:admin or rule:restricted"
}
self.set_policy_rules(rules)
cache_filter.policy = glance.api.policy.Enforcer()
request = webob.Request.blank('/v1/images/%s' % image_id)
request.context = context.RequestContext(roles=['_member_'])
resp = webob.Response(request=request)
self.assertRaises(webob.exc.HTTPForbidden,
cache_filter.process_response, resp)
def test_v1_process_response_download_permitted(self):
"""
Test process_response for v1 api where member role able to
download the image with custom property.
"""
image_id = 'test1'
def fake_fetch_request_info(*args, **kwargs):
return ('test1', 'GET', 'v1')
def fake_get_v1_image_metadata(*args, **kwargs):
return {
'id': image_id,
'name': 'fake_image',
'status': 'active',
'created_at': '',
'min_disk': '10G',
'min_ram': '1024M',
'protected': False,
'locations': '',
'checksum': 'c1234',
'owner': '',
'disk_format': 'raw',
'container_format': 'bare',
'size': '123456789',
'virtual_size': '123456789',
'is_public': 'public',
'deleted': False,
'updated_at': '',
'x_test_key': 'test_1234'
}
cache_filter = ProcessRequestTestCacheFilter()
cache_filter._fetch_request_info = fake_fetch_request_info
cache_filter._get_v1_image_metadata = fake_get_v1_image_metadata
rules = {
"restricted":
"not ('test_1234':%(x_test_key)s and role:_member_)",
"download_image": "role:admin or rule:restricted"
}
self.set_policy_rules(rules)
cache_filter.policy = glance.api.policy.Enforcer()
request = webob.Request.blank('/v1/images/%s' % image_id)
request.context = context.RequestContext(roles=['member'])
resp = webob.Response(request=request)
actual = cache_filter.process_response(resp)
self.assertEqual(actual, resp)
def test_v1_process_response_image_meta_not_found(self):
"""
Test process_response for v1 api where registry raises NotFound
exception as image metadata not found.
"""
image_id = 'test1'
def fake_fetch_request_info(*args, **kwargs):
return ('test1', 'GET', 'v1')
def fake_get_v1_image_metadata(*args, **kwargs):
raise exception.NotFound()
cache_filter = ProcessRequestTestCacheFilter()
cache_filter._fetch_request_info = fake_fetch_request_info
self.stubs.Set(registry, 'get_image_metadata',
fake_get_v1_image_metadata)
rules = {
"restricted":
"not ('test_1234':%(x_test_key)s and role:_member_)",
"download_image": "role:admin or rule:restricted"
}
self.set_policy_rules(rules)
cache_filter.policy = glance.api.policy.Enforcer()
request = webob.Request.blank('/v1/images/%s' % image_id)
request.context = context.RequestContext(roles=['_member_'])
resp = webob.Response(request=request)
self.assertRaises(webob.exc.HTTPNotFound,
cache_filter.process_response, resp)
def test_v2_process_response_download_restricted(self):
"""
Test process_response for v2 api where _member_ role not able to
download the image with custom property.
"""
image_id = 'test1'
extra_properties = {
'x_test_key': 'test_1234'
}
def fake_fetch_request_info(*args, **kwargs):
return ('test1', 'GET', 'v2')
def fake_get_v2_image_metadata(*args, **kwargs):
image = ImageStub(image_id, extra_properties=extra_properties)
request.environ['api.cache.image'] = image
return glance.api.policy.ImageTarget(image)
cache_filter = ProcessRequestTestCacheFilter()
cache_filter._fetch_request_info = fake_fetch_request_info
cache_filter._get_v2_image_metadata = fake_get_v2_image_metadata
rules = {
"restricted":
"not ('test_1234':%(x_test_key)s and role:_member_)",
"download_image": "role:admin or rule:restricted"
}
self.set_policy_rules(rules)
cache_filter.policy = glance.api.policy.Enforcer()
request = webob.Request.blank('/v2/images/test1/file')
request.context = context.RequestContext(roles=['_member_'])
resp = webob.Response(request=request)
self.assertRaises(webob.exc.HTTPForbidden,
cache_filter.process_response, resp)
def test_v2_process_response_download_permitted(self):
"""
Test process_response for v2 api where member role able to
download the image with custom property.
"""
image_id = 'test1'
extra_properties = {
'x_test_key': 'test_1234'
}
def fake_fetch_request_info(*args, **kwargs):
return ('test1', 'GET', 'v2')
def fake_get_v2_image_metadata(*args, **kwargs):
image = ImageStub(image_id, extra_properties=extra_properties)
request.environ['api.cache.image'] = image
return glance.api.policy.ImageTarget(image)
cache_filter = ProcessRequestTestCacheFilter()
cache_filter._fetch_request_info = fake_fetch_request_info
cache_filter._get_v2_image_metadata = fake_get_v2_image_metadata
rules = {
"restricted":
"not ('test_1234':%(x_test_key)s and role:_member_)",
"download_image": "role:admin or rule:restricted"
}
self.set_policy_rules(rules)
cache_filter.policy = glance.api.policy.Enforcer()
request = webob.Request.blank('/v2/images/test1/file')
request.context = context.RequestContext(roles=['member'])
resp = webob.Response(request=request)
actual = cache_filter.process_response(resp)
self.assertEqual(actual, resp)
apache-2.0
ericmckean/syzygy
third_party/numpy/files/numpy/linalg/__init__.py
40
2178
"""
Core Linear Algebra Tools
=========================
=============== ==========================================================
Linear algebra basics
==========================================================================
norm Vector or matrix norm
inv Inverse of a square matrix
solve Solve a linear system of equations
det Determinant of a square matrix
slogdet Logarithm of the determinant of a square matrix
lstsq Solve linear least-squares problem
pinv Pseudo-inverse (Moore-Penrose) calculated using a singular
value decomposition
matrix_power Integer power of a square matrix
=============== ==========================================================
=============== ==========================================================
Eigenvalues and decompositions
==========================================================================
eig Eigenvalues and vectors of a square matrix
eigh Eigenvalues and eigenvectors of a Hermitian matrix
eigvals Eigenvalues of a square matrix
eigvalsh Eigenvalues of a Hermitian matrix
qr QR decomposition of a matrix
svd Singular value decomposition of a matrix
cholesky Cholesky decomposition of a matrix
=============== ==========================================================
=============== ==========================================================
Tensor operations
==========================================================================
tensorsolve Solve a linear tensor equation
tensorinv Calculate an inverse of a tensor
=============== ==========================================================
=============== ==========================================================
Exceptions
==========================================================================
LinAlgError Indicates a failed linear algebra operation
=============== ==========================================================
"""
# To get sub-modules
from info import __doc__
from linalg import *
from numpy.testing import Tester
test = Tester().test
bench = Tester().test
# -*- coding: utf-8 -*-
###############################################################################
#
# SearchFilesAndFolders
# Allows you to search Dropbox for files or folders by a keyword search.
#
# Python versions 2.6, 2.7, 3.x
#
# Copyright 2014, Temboo Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
# either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
#
#
###############################################################################
from temboo.core.choreography import Choreography
from temboo.core.choreography import InputSet
from temboo.core.choreography import ResultSet
from temboo.core.choreography import ChoreographyExecution
import json
class SearchFilesAndFolders(Choreography):
def __init__(self, temboo_session):
"""
Create a new instance of the SearchFilesAndFolders Choreo. A TembooSession object, containing a valid
set of Temboo credentials, must be supplied.
"""
super(SearchFilesAndFolders, self).__init__(temboo_session, '/Library/Dropbox/FilesAndMetadata/SearchFilesAndFolders')
def new_input_set(self):
return SearchFilesAndFoldersInputSet()
def _make_result_set(self, result, path):
return SearchFilesAndFoldersResultSet(result, path)
def _make_execution(self, session, exec_id, path):
return SearchFilesAndFoldersChoreographyExecution(session, exec_id, path)
class SearchFilesAndFoldersInputSet(InputSet):
"""
An InputSet with methods appropriate for specifying the inputs to the SearchFilesAndFolders
Choreo. The InputSet object is used to specify input parameters when executing this Choreo.
"""
def set_AccessTokenSecret(self, value):
"""
Set the value of the AccessTokenSecret input for this Choreo. ((required, string) The Access Token Secret retrieved during the OAuth process.)
"""
super(SearchFilesAndFoldersInputSet, self)._set_input('AccessTokenSecret', value)
def set_AccessToken(self, value):
"""
Set the value of the AccessToken input for this Choreo. ((required, string) The Access Token retrieved during the OAuth process.)
"""
super(SearchFilesAndFoldersInputSet, self)._set_input('AccessToken', value)
def set_AppKey(self, value):
"""
Set the value of the AppKey input for this Choreo. ((required, string) The App Key provided by Dropbox (AKA the OAuth Consumer Key).)
"""
super(SearchFilesAndFoldersInputSet, self)._set_input('AppKey', value)
def set_AppSecret(self, value):
"""
Set the value of the AppSecret input for this Choreo. ((required, string) The App Secret provided by Dropbox (AKA the OAuth Consumer Secret).)
"""
super(SearchFilesAndFoldersInputSet, self)._set_input('AppSecret', value)
def set_FileLimit(self, value):
"""
Set the value of the FileLimit input for this Choreo. ((optional, integer) Dropbox will not return a list that exceeds this specified limit. Defaults to 10,000.)
"""
super(SearchFilesAndFoldersInputSet, self)._set_input('FileLimit', value)
def set_Path(self, value):
"""
Set the value of the Path input for this Choreo. ((optional, string) The path to the folder you want to search from (i.e. /RootFolder/SubFolder). Leave blank to search ALL.)
"""
super(SearchFilesAndFoldersInputSet, self)._set_input('Path', value)
def set_Query(self, value):
"""
Set the value of the Query input for this Choreo. ((required, string) The search string. Must be at least three characters long.)
"""
super(SearchFilesAndFoldersInputSet, self)._set_input('Query', value)
def set_ResponseFormat(self, value):
"""
Set the value of the ResponseFormat input for this Choreo. ((optional, string) The format that the response should be in. Can be set to xml or json. Defaults to json.)
"""
super(SearchFilesAndFoldersInputSet, self)._set_input('ResponseFormat', value)
def set_Root(self, value):
"""
Set the value of the Root input for this Choreo. ((optional, string) Defaults to "auto" which automatically determines the root folder using your app's permission level. Other options are "sandbox" (App Folder) and "dropbox" (Full Dropbox).)
"""
super(SearchFilesAndFoldersInputSet, self)._set_input('Root', value)
class SearchFilesAndFoldersResultSet(ResultSet):
"""
A ResultSet with methods tailored to the values returned by the SearchFilesAndFolders Choreo.
The ResultSet object is used to retrieve the results of a Choreo execution.
"""
def getJSONFromString(self, str):
return json.loads(str)
def get_Response(self):
"""
Retrieve the value for the "Response" output from this Choreo execution. (The response from Dropbox. Corresponds to the ResponseFormat input. Defaults to json.)
"""
return self._output.get('Response', None)
class SearchFilesAndFoldersChoreographyExecution(ChoreographyExecution):
def _make_result_set(self, response, path):
return SearchFilesAndFoldersResultSet(response, path)
apache-2.0
moto-timo/ironpython3
Src/StdLib/Lib/shlex.py
14
11548
"""A lexical analyzer class for simple shell-like syntaxes."""
# Module and documentation by Eric S. Raymond, 21 Dec 1998
# Input stacking and error message cleanup added by ESR, March 2000
# push_source() and pop_source() made explicit by ESR, January 2001.
# Posix compliance, split(), string arguments, and
# iterator interface by Gustavo Niemeyer, April 2003.
import os
import re
import sys
from collections import deque
from io import StringIO
__all__ = ["shlex", "split", "quote"]
class shlex:
"A lexical analyzer class for simple shell-like syntaxes."
def __init__(self, instream=None, infile=None, posix=False):
if isinstance(instream, str):
instream = StringIO(instream)
if instream is not None:
self.instream = instream
self.infile = infile
else:
self.instream = sys.stdin
self.infile = None
self.posix = posix
if posix:
self.eof = None
else:
self.eof = ''
self.commenters = '#'
self.wordchars = ('abcdfeghijklmnopqrstuvwxyz'
'ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789_')
if self.posix:
self.wordchars += ('ßàáâãäåæçèéêëìíîïðñòóôõöøùúûüýþÿ'
'ÀÁÂÃÄÅÆÇÈÉÊËÌÍÎÏÐÑÒÓÔÕÖØÙÚÛÜÝÞ')
self.whitespace = ' \t\r\n'
self.whitespace_split = False
self.quotes = '\'"'
self.escape = '\\'
self.escapedquotes = '"'
self.state = ' '
self.pushback = deque()
self.lineno = 1
self.debug = 0
self.token = ''
self.filestack = deque()
self.source = None
if self.debug:
print('shlex: reading from %s, line %d' \
% (self.instream, self.lineno))
def push_token(self, tok):
"Push a token onto the stack popped by the get_token method"
if self.debug >= 1:
print("shlex: pushing token " + repr(tok))
self.pushback.appendleft(tok)
def push_source(self, newstream, newfile=None):
"Push an input source onto the lexer's input source stack."
if isinstance(newstream, str):
newstream = StringIO(newstream)
self.filestack.appendleft((self.infile, self.instream, self.lineno))
self.infile = newfile
self.instream = newstream
self.lineno = 1
if self.debug:
if newfile is not None:
print('shlex: pushing to file %s' % (self.infile,))
else:
print('shlex: pushing to stream %s' % (self.instream,))
def pop_source(self):
"Pop the input source stack."
self.instream.close()
(self.infile, self.instream, self.lineno) = self.filestack.popleft()
if self.debug:
print('shlex: popping to %s, line %d' \
% (self.instream, self.lineno))
self.state = ' '
def get_token(self):
"Get a token from the input stream (or from stack if it's nonempty)"
if self.pushback:
tok = self.pushback.popleft()
if self.debug >= 1:
print("shlex: popping token " + repr(tok))
return tok
# No pushback. Get a token.
raw = self.read_token()
# Handle inclusions
if self.source is not None:
while raw == self.source:
spec = self.sourcehook(self.read_token())
if spec:
(newfile, newstream) = spec
self.push_source(newstream, newfile)
raw = self.get_token()
# Maybe we got EOF instead?
while raw == self.eof:
if not self.filestack:
return self.eof
else:
self.pop_source()
raw = self.get_token()
# Neither inclusion nor EOF
if self.debug >= 1:
if raw != self.eof:
print("shlex: token=" + repr(raw))
else:
print("shlex: token=EOF")
return raw
def read_token(self):
quoted = False
escapedstate = ' '
while True:
nextchar = self.instream.read(1)
if nextchar == '\n':
self.lineno = self.lineno + 1
if self.debug >= 3:
print("shlex: in state", repr(self.state), \
"I see character:", repr(nextchar))
if self.state is None:
self.token = '' # past end of file
break
elif self.state == ' ':
if not nextchar:
self.state = None # end of file
break
elif nextchar in self.whitespace:
if self.debug >= 2:
print("shlex: I see whitespace in whitespace state")
if self.token or (self.posix and quoted):
break # emit current token
else:
continue
elif nextchar in self.commenters:
self.instream.readline()
self.lineno = self.lineno + 1
elif self.posix and nextchar in self.escape:
escapedstate = 'a'
self.state = nextchar
elif nextchar in self.wordchars:
self.token = nextchar
self.state = 'a'
elif nextchar in self.quotes:
if not self.posix:
self.token = nextchar
self.state = nextchar
elif self.whitespace_split:
self.token = nextchar
self.state = 'a'
else:
self.token = nextchar
if self.token or (self.posix and quoted):
break # emit current token
else:
continue
elif self.state in self.quotes:
quoted = True
if not nextchar: # end of file
if self.debug >= 2:
print("shlex: I see EOF in quotes state")
# XXX what error should be raised here?
raise ValueError("No closing quotation")
if nextchar == self.state:
if not self.posix:
self.token = self.token + nextchar
self.state = ' '
break
else:
self.state = 'a'
elif self.posix and nextchar in self.escape and \
self.state in self.escapedquotes:
escapedstate = self.state
self.state = nextchar
else:
self.token = self.token + nextchar
elif self.state in self.escape:
if not nextchar: # end of file
if self.debug >= 2:
print("shlex: I see EOF in escape state")
# XXX what error should be raised here?
raise ValueError("No escaped character")
# In posix shells, only the quote itself or the escape
# character may be escaped within quotes.
if escapedstate in self.quotes and \
nextchar != self.state and nextchar != escapedstate:
self.token = self.token + self.state
self.token = self.token + nextchar
self.state = escapedstate
elif self.state == 'a':
if not nextchar:
self.state = None # end of file
break
elif nextchar in self.whitespace:
if self.debug >= 2:
print("shlex: I see whitespace in word state")
self.state = ' '
if self.token or (self.posix and quoted):
break # emit current token
else:
continue
elif nextchar in self.commenters:
self.instream.readline()
self.lineno = self.lineno + 1
if self.posix:
self.state = ' '
if self.token or (self.posix and quoted):
break # emit current token
else:
continue
elif self.posix and nextchar in self.quotes:
self.state = nextchar
elif self.posix and nextchar in self.escape:
escapedstate = 'a'
self.state = nextchar
elif nextchar in self.wordchars or nextchar in self.quotes \
or self.whitespace_split:
self.token = self.token + nextchar
else:
self.pushback.appendleft(nextchar)
if self.debug >= 2:
print("shlex: I see punctuation in word state")
self.state = ' '
if self.token:
break # emit current token
else:
continue
result = self.token
self.token = ''
if self.posix and not quoted and result == '':
result = None
if self.debug > 1:
if result:
print("shlex: raw token=" + repr(result))
else:
print("shlex: raw token=EOF")
return result
def sourcehook(self, newfile):
"Hook called on a filename to be sourced."
if newfile[0] == '"':
newfile = newfile[1:-1]
# This implements cpp-like semantics for relative-path inclusion.
if isinstance(self.infile, str) and not os.path.isabs(newfile):
newfile = os.path.join(os.path.dirname(self.infile), newfile)
return (newfile, open(newfile, "r"))
def error_leader(self, infile=None, lineno=None):
"Emit a C-compiler-like, Emacs-friendly error-message leader."
if infile is None:
infile = self.infile
if lineno is None:
lineno = self.lineno
return "\"%s\", line %d: " % (infile, lineno)
def __iter__(self):
return self
def __next__(self):
token = self.get_token()
if token == self.eof:
raise StopIteration
return token
def split(s, comments=False, posix=True):
lex = shlex(s, posix=posix)
lex.whitespace_split = True
if not comments:
lex.commenters = ''
return list(lex)
_find_unsafe = re.compile(r'[^\w@%+=:,./-]', re.ASCII).search
def quote(s):
"""Return a shell-escaped version of the string *s*."""
if not s:
return "''"
if _find_unsafe(s) is None:
return s
# use single quotes, and put single quotes into double quotes
# the string $'b is then quoted as '$'"'"'b'
return "'" + s.replace("'", "'\"'\"'") + "'"
def _print_tokens(lexer):
while 1:
tt = lexer.get_token()
if not tt:
break
print("Token: " + repr(tt))
if __name__ == '__main__':
if len(sys.argv) == 1:
_print_tokens(shlex())
else:
fn = sys.argv[1]
with open(fn) as f:
_print_tokens(shlex(f, fn))
##############################################################################
# Copyright (c) 2013-2018, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, [email protected], All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the LICENSE file for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class ManDb(AutotoolsPackage):
"""man-db is an implementation of the standard Unix
documentation system accessed using the man command. It uses
a Berkeley DB database in place of the traditional
flat-text whatis databases."""
homepage = "http://www.nongnu.org/man-db/"
url = "http://git.savannah.nongnu.org/cgit/man-db.git/snapshot/man-db-2.7.6.1.tar.gz"
version('2.7.6.1', '312761baade811db2b956af3432c285e')
depends_on('autoconf')
depends_on('automake')
depends_on('gettext')
depends_on('libpipeline')
depends_on('flex')
depends_on('groff', type=('build', 'link', 'run'))
# TODO: add gzip support via a new package.
# man pages are typically compressed, include all available
# compression libraries
depends_on('bzip2', type=('build', 'link', 'run'))
depends_on('lzma', type=('build', 'link', 'run'))
depends_on('xz', type=('build', 'link', 'run'))
def configure_args(self):
args = [
'--disable-setuid',
# defaults to a location that needs root privs to write in
'--with-systemdtmpfilesdir={0}/tmp'.format(self.prefix)
]
return args
lgpl-2.1
rryan/sana.mds
src/mds/core/models/notification.py
2
1311
"""
Notifications for the Sana data engine.
:Authors: Sana dev team
:Version: 2.0
"""
import cjson
from django.db import models
from mds.api.utils import make_uuid
class Notification(models.Model):
""" A message to be sent """
class Meta:
app_label = "core"
uuid = models.SlugField(max_length=36, unique=True, default=make_uuid, editable=False)
""" A universally unique identifier """
created = models.DateTimeField(auto_now_add=True)
""" When the object was created """
modified = models.DateTimeField(auto_now=True)
""" updated on modification """
address = models.CharField(max_length=512)
""" The recipient address """
header = models.CharField(max_length=512)
""" Short descriptive text; i.e. subject field """
message = models.TextField()
""" The message body """
delivered = models.BooleanField(default = False)
""" Set True when delivered """
voided = models.BooleanField(default=False)
#TODO This is likely better moved elsewhere
def to_json(self, **kwargs):
msg = {'address': self.client,
'subject': self.header,
'message': self.message,}
for k,v in kwargs.iteritems():
msg[k] = v
return cjson.encode(msg)
bsd-3-clause
MaxTyutyunnikov/lino
lino/projects/events/settings.py
1
1850
# -*- coding: UTF-8 -*-
## Copyright 2013 Luc Saffre
## This file is part of the Lino project.
## Lino is free software; you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation; either version 3 of the License, or
## (at your option) any later version.
## Lino is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
## You should have received a copy of the GNU General Public License
## along with Lino; if not, see <http://www.gnu.org/licenses/>.
"""
"""
from __future__ import unicode_literals
#~ try:
from lino.projects.std.settings import *
#~ from django.utils.translation import ugettext_lazy as _
class Site(Site):
title = "Lino Events"
verbose_name = "Lino Events"
#~ verbose_name = "Lino Cosi"
#~ description = _("a Lino application to make Belgian accounting simple.")
#~ version = "0.1"
#~ url = "http://www.lino-framework.org/autodoc/lino.projects.cosi"
#~ author = 'Luc Saffre'
#~ author_email = '[email protected]'
demo_fixtures = 'std few_countries few_cities vor'.split()
languages = 'de fr nl'
#~ languages = ['de','fr','nl']
#~ languages = 'de fr et en'.split()
def get_installed_apps(self):
for a in super(Site,self).get_installed_apps():
yield a
yield 'lino.modlib.system'
yield 'lino.modlib.countries'
yield 'lino.modlib.events'
SITE = Site(globals())
#~ except Exception as e:
#~ import traceback
#~ traceback.print_exc(e)
#~ sys.exit(1)
#~
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .error import Error, ErrorException
from .int_wrapper import IntWrapper
from .int_optional_wrapper import IntOptionalWrapper
from .string_wrapper import StringWrapper
from .string_optional_wrapper import StringOptionalWrapper
from .array_wrapper import ArrayWrapper
from .array_optional_wrapper import ArrayOptionalWrapper
from .product import Product
from .class_wrapper import ClassWrapper
from .class_optional_wrapper import ClassOptionalWrapper
__all__ = [
'Error', 'ErrorException',
'IntWrapper',
'IntOptionalWrapper',
'StringWrapper',
'StringOptionalWrapper',
'ArrayWrapper',
'ArrayOptionalWrapper',
'Product',
'ClassWrapper',
'ClassOptionalWrapper',
]
mit
unioslo/cerebrum
contrib/migrate/import_email_addresses.py
1
9084
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2003 University of Oslo, Norway
#
# This file is part of Cerebrum.
#
# Cerebrum is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# Cerebrum is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Cerebrum; if not, write to the Free Software Foundation,
# Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""
This file contains code which imports historical account and
e-mail data into Cerebrum. Normally, it should be run only
once (about right after the database has been created).
The input format for this job is a file with one line per
account/e-mail. Each line has three fields separated by ';'.
<uname>;<keyword>;<e-mail address>
... where
uname -- account name
keyword -- 'defaultmail' or 'mail'
"""
##
## TODO: This script only works for import file with old usernames
## stored as external_id. Fix this!
##
import getopt
import sys
import cereconf
from Cerebrum import Errors
from Cerebrum.Utils import Factory
from Cerebrum.modules import Email
def attempt_commit():
if dryrun:
db.rollback()
logger.debug("Rolled back all changes")
else:
db.commit()
logger.debug("Committed all changes")
def process_line(infile, spread, sepchar, homemdb):
"""
Scan all lines in INFILE and create corresponding account/e-mail entries
in Cerebrum.
"""
stream = open(infile, 'r')
commit_count = 0
commit_limit = 1000
mdb = None
# Iterate over all persons:
for line in stream:
commit_count += 1
logger.debug5("Processing %s", line.strip())
fields = [l.strip() for l in line.split(sepchar)]
if len(fields) < 2:
logger.error("Bad line, less than two values: %s." % line)
continue
if homemdb and len(fields) != 4:
logger.error("Bad line, mdb and not 4 values: %s." % line)
continue
# if no mtype is given assume that the address should be registered
# as default/primary address for account
if len(fields) == 2:
mtype = 'defaultmail'
uname, addr = fields
if len(fields) == 3:
uname, mtype, addr = fields
if len(fields) == 4:
uname, mdb, mtype, addr = fields
if uname == "":
logger.error("No uname given. Skipping!")
continue
account = get_account(uname, external_id=extid)
if account:
if mdb:
process_mail(account, mtype, addr, spread=spread, homemdb=mdb)
else:
process_mail(account, mtype, addr, spread=spread)
if commit_count % commit_limit == 0:
attempt_commit()
def get_account(uname, external_id=False):
if external_id:
try:
person.clear()
person.find_by_external_id(constants.externalid_uname, uname)
# this is not the most robust code, but it should work for all
# person objects available at this point
tmp = person.get_accounts()
if len(tmp) == 0:
logger.warn("Skipping, no valid accounts found for '%s'" % uname)
return
account_id = int(tmp[0]['account_id'])
account.clear()
account.find(account_id)
logger.info("Found account '%s' for user with external name '%s'",
account.account_name, uname)
return account
except Errors.NotFoundError:
logger.warn("Didn't find user with external name '%s'" % uname)
return
account.clear()
try:
account.find_by_name(uname)
except Errors.NotFoundError:
logger.error("Didn't find account '%s'!", uname)
return
logger.debug("found account %s", uname)
return account
def process_mail(account, mtype, addr, spread=None, homemdb=None):
et = Email.EmailTarget(db)
ea = Email.EmailAddress(db)
edom = Email.EmailDomain(db)
epat = Email.EmailPrimaryAddressTarget(db)
addr = addr.lower()
account_id = account.entity_id
fld = addr.split('@')
if len(fld) != 2:
logger.error("Bad address: %s. Skipping", addr)
return None
lp, dom = fld
try:
edom.find_by_domain(dom)
except Errors.NotFoundError:
logger.error("Domain non-existent: %s", lp + '@' + dom)
return None
try:
et.find_by_target_entity(int(account_id))
except Errors.NotFoundError:
et.populate(constants.email_target_account,
target_entity_id=int(account_id),
target_entity_type=constants.entity_account)
et.write_db()
logger.debug("EmailTarget created: %s: %d", account_id, et.entity_id)
try:
ea.find_by_address(addr)
except Errors.NotFoundError:
ea.populate(lp, edom.entity_id, et.entity_id)
ea.write_db()
logger.debug("EmailAddress created: %s: %d", addr, ea.entity_id)
# if specified, add an email spread for users with email address
if spread and not account.has_spread(spread):
account.add_spread(spread)
logger.debug("Added spread %s for account %s", spread, account_id)
if mtype == "defaultmail":
try:
epat.find(et.entity_id)
logger.debug("EmailPrimary found: %s: %d",
addr, epat.entity_id)
except Errors.NotFoundError:
if ea.email_addr_target_id == et.entity_id:
epat.clear()
epat.populate(ea.entity_id, parent=et)
epat.write_db()
logger.debug("EmailPrimary created: %s: %d",
addr, epat.entity_id)
else:
logger.error("EmailTarget mismatch: ea: %d, et: %d",
ea.email_addr_target_id, et.entity_id)
if homemdb:
logger.info("Added exchange-mbd %s\n", homemdb)
account.populate_trait(constants.trait_exchange_mdb, strval=homemdb)
account.write_db()
et.clear()
ea.clear()
edom.clear()
epat.clear()
def usage():
print """Usage: import_uname_mail.py
-d, --dryrun : Run a fake import. Rollback after run.
-f, --file : File to parse.
-s, --spread : add spread to account (optional)
-m, --homemdb : add homeMDB as trait
-e, --extid : check for account by external_id
"""
sys.exit(0)
def main():
global db, constants, account, person, fnr2person_id
global default_creator_id, default_group_id
global dryrun, logger, extid
logger = Factory.get_logger("console")
try:
opts, args = getopt.getopt(sys.argv[1:],
'f:s:c:edm',
['file=',
'spread=',
'sepchar=',
'homemdb',
'extid'
'dryrun'])
except getopt.GetoptError:
usage()
dryrun = False
spread = None
sepchar = ":"
homemdb = False
extid = False
for opt, val in opts:
if opt in ('-d', '--dryrun'):
dryrun = True
elif opt in ('-f', '--file'):
infile = val
elif opt in ('-s', '--spread'):
spread = val
elif opt in ('-c', '--sepchar'):
sepchar = val
elif opt in ('-m', '--homemdb'):
homemdb = True
elif opt in ('-e', '--extid'):
extid = True
if infile is None:
usage()
db = Factory.get('Database')()
db.cl_init(change_program='import_mail')
constants = Factory.get('Constants')(db)
account = Factory.get('Account')(db)
group = Factory.get('Group')(db)
person = Factory.get('Person')(db)
fnr2person_id = dict()
for p in person.search_external_ids(id_type=constants.externalid_fodselsnr,
fetchall=False):
fnr2person_id[p['external_id']] = p['entity_id']
account.find_by_name(cereconf.INITIAL_ACCOUNTNAME)
default_creator_id = account.entity_id
group.find_by_name(cereconf.INITIAL_GROUPNAME)
default_group_id = group.entity_id
if spread:
try:
spread = getattr(constants, spread)
except AttributeError:
logger.error("No spread %s defined", spread)
process_line(infile, spread, sepchar, homemdb)
attempt_commit()
if __name__ == '__main__':
main()
gpl-2.0
liaorubei/depot_tools
third_party/logilab/astroid/modutils.py
56
23274
# copyright 2003-2014 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
# contact http://www.logilab.fr/ -- mailto:[email protected]
#
# This file is part of astroid.
#
# astroid is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the Free
# Software Foundation, either version 2.1 of the License, or (at your option) any
# later version.
#
# astroid is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
# details.
#
# You should have received a copy of the GNU Lesser General Public License along
# with astroid. If not, see <http://www.gnu.org/licenses/>.
"""Python modules manipulation utility functions.
:type PY_SOURCE_EXTS: tuple(str)
:var PY_SOURCE_EXTS: list of possible python source file extension
:type STD_LIB_DIRS: set of str
:var STD_LIB_DIRS: directories where standard modules are located
:type BUILTIN_MODULES: dict
:var BUILTIN_MODULES: dictionary with builtin module names has key
"""
from __future__ import with_statement
__docformat__ = "restructuredtext en"
import imp
import os
import sys
from distutils.sysconfig import get_python_lib
from distutils.errors import DistutilsPlatformError
import zipimport
try:
import pkg_resources
except ImportError:
pkg_resources = None
from logilab.common import _handle_blacklist
PY_ZIPMODULE = object()
if sys.platform.startswith('win'):
PY_SOURCE_EXTS = ('py', 'pyw')
PY_COMPILED_EXTS = ('dll', 'pyd')
else:
PY_SOURCE_EXTS = ('py',)
PY_COMPILED_EXTS = ('so',)
# Notes about STD_LIB_DIRS
# Consider arch-specific installation for STD_LIB_DIRS definition
# :mod:`distutils.sysconfig` contains to much hardcoded values to rely on
#
# :see: `Problems with /usr/lib64 builds <http://bugs.python.org/issue1294959>`_
# :see: `FHS <http://www.pathname.com/fhs/pub/fhs-2.3.html#LIBLTQUALGTALTERNATEFORMATESSENTIAL>`_
try:
# The explicit sys.prefix is to work around a patch in virtualenv that
# replaces the 'real' sys.prefix (i.e. the location of the binary)
# with the prefix from which the virtualenv was created. This throws
# off the detection logic for standard library modules, thus the
# workaround.
STD_LIB_DIRS = {
get_python_lib(standard_lib=True, prefix=sys.prefix),
# Take care of installations where exec_prefix != prefix.
get_python_lib(standard_lib=True, prefix=sys.exec_prefix),
get_python_lib(standard_lib=True)}
if os.name == 'nt':
STD_LIB_DIRS.add(os.path.join(sys.prefix, 'dlls'))
try:
# real_prefix is defined when running inside virtualenv.
STD_LIB_DIRS.add(os.path.join(sys.real_prefix, 'dlls'))
except AttributeError:
pass
# get_python_lib(standard_lib=1) is not available on pypy, set STD_LIB_DIR to
# non-valid path, see https://bugs.pypy.org/issue1164
except DistutilsPlatformError:
STD_LIB_DIRS = set()
EXT_LIB_DIR = get_python_lib()
BUILTIN_MODULES = dict(zip(sys.builtin_module_names,
[1]*len(sys.builtin_module_names)))
class NoSourceFile(Exception):
"""exception raised when we are not able to get a python
source file for a precompiled file
"""
def _normalize_path(path):
return os.path.normcase(os.path.abspath(path))
_NORM_PATH_CACHE = {}
def _cache_normalize_path(path):
"""abspath with caching"""
# _module_file calls abspath on every path in sys.path every time it's
# called; on a larger codebase this easily adds up to half a second just
# assembling path components. This cache alleviates that.
try:
return _NORM_PATH_CACHE[path]
except KeyError:
if not path: # don't cache result for ''
return _normalize_path(path)
result = _NORM_PATH_CACHE[path] = _normalize_path(path)
return result
def load_module_from_name(dotted_name, path=None, use_sys=1):
"""Load a Python module from its name.
:type dotted_name: str
:param dotted_name: python name of a module or package
:type path: list or None
:param path:
optional list of path where the module or package should be
searched (use sys.path if nothing or None is given)
:type use_sys: bool
:param use_sys:
boolean indicating whether the sys.modules dictionary should be
used or not
:raise ImportError: if the module or package is not found
:rtype: module
:return: the loaded module
"""
return load_module_from_modpath(dotted_name.split('.'), path, use_sys)
def load_module_from_modpath(parts, path=None, use_sys=1):
"""Load a python module from its splitted name.
:type parts: list(str) or tuple(str)
:param parts:
python name of a module or package splitted on '.'
:type path: list or None
:param path:
optional list of path where the module or package should be
searched (use sys.path if nothing or None is given)
:type use_sys: bool
:param use_sys:
boolean indicating whether the sys.modules dictionary should be used or not
:raise ImportError: if the module or package is not found
:rtype: module
:return: the loaded module
"""
if use_sys:
try:
return sys.modules['.'.join(parts)]
except KeyError:
pass
modpath = []
prevmodule = None
for part in parts:
modpath.append(part)
curname = '.'.join(modpath)
module = None
if len(modpath) != len(parts):
# even with use_sys=False, should try to get outer packages from sys.modules
module = sys.modules.get(curname)
elif use_sys:
# because it may have been indirectly loaded through a parent
module = sys.modules.get(curname)
if module is None:
mp_file, mp_filename, mp_desc = imp.find_module(part, path)
module = imp.load_module(curname, mp_file, mp_filename, mp_desc)
# mp_file still needs to be closed.
if mp_file:
mp_file.close()
if prevmodule:
setattr(prevmodule, part, module)
_file = getattr(module, '__file__', '')
if not _file and len(modpath) != len(parts):
raise ImportError('no module in %s' % '.'.join(parts[len(modpath):]))
path = [os.path.dirname(_file)]
prevmodule = module
return module
def load_module_from_file(filepath, path=None, use_sys=1, extrapath=None):
"""Load a Python module from it's path.
:type filepath: str
:param filepath: path to the python module or package
:type path: list or None
:param path:
optional list of path where the module or package should be
searched (use sys.path if nothing or None is given)
:type use_sys: bool
:param use_sys:
boolean indicating whether the sys.modules dictionary should be
used or not
:raise ImportError: if the module or package is not found
:rtype: module
:return: the loaded module
"""
modpath = modpath_from_file(filepath, extrapath)
return load_module_from_modpath(modpath, path, use_sys)
def _check_init(path, mod_path):
"""check there are some __init__.py all along the way"""
for part in mod_path:
path = os.path.join(path, part)
if not _has_init(path):
return False
return True
def modpath_from_file(filename, extrapath=None):
"""given a file path return the corresponding splitted module's name
(i.e name of a module or package splitted on '.')
:type filename: str
:param filename: file's path for which we want the module's name
:type extrapath: dict
:param extrapath:
optional extra search path, with path as key and package name for the path
as value. This is usually useful to handle package splitted in multiple
directories using __path__ trick.
:raise ImportError:
if the corresponding module's name has not been found
:rtype: list(str)
:return: the corresponding splitted module's name
"""
base = os.path.splitext(os.path.abspath(filename))[0]
if extrapath is not None:
for path_ in extrapath:
path = os.path.abspath(path_)
if path and os.path.normcase(base[:len(path)]) == os.path.normcase(path):
submodpath = [pkg for pkg in base[len(path):].split(os.sep)
if pkg]
if _check_init(path, submodpath[:-1]):
return extrapath[path_].split('.') + submodpath
for path in sys.path:
path = _cache_normalize_path(path)
if path and os.path.normcase(base).startswith(path):
modpath = [pkg for pkg in base[len(path):].split(os.sep) if pkg]
if _check_init(path, modpath[:-1]):
return modpath
raise ImportError('Unable to find module for %s in %s' % (
filename, ', \n'.join(sys.path)))
def file_from_modpath(modpath, path=None, context_file=None):
return file_info_from_modpath(modpath, path, context_file)[0]
def file_info_from_modpath(modpath, path=None, context_file=None):
"""given a mod path (i.e. splitted module / package name), return the
corresponding file, giving priority to source file over precompiled
file if it exists
:type modpath: list or tuple
:param modpath:
splitted module's name (i.e name of a module or package splitted
on '.')
(this means explicit relative imports that start with dots have
empty strings in this list!)
:type path: list or None
:param path:
optional list of path where the module or package should be
searched (use sys.path if nothing or None is given)
:type context_file: str or None
:param context_file:
context file to consider, necessary if the identifier has been
introduced using a relative import unresolvable in the actual
context (i.e. modutils)
:raise ImportError: if there is no such module in the directory
:rtype: (str or None, import type)
:return:
the path to the module's file or None if it's an integrated
builtin module such as 'sys'
"""
if context_file is not None:
context = os.path.dirname(context_file)
else:
context = context_file
if modpath[0] == 'xml':
# handle _xmlplus
try:
return _file_from_modpath(['_xmlplus'] + modpath[1:], path, context)
except ImportError:
return _file_from_modpath(modpath, path, context)
elif modpath == ['os', 'path']:
# FIXME: currently ignoring search_path...
return os.path.__file__, imp.PY_SOURCE
return _file_from_modpath(modpath, path, context)
def get_module_part(dotted_name, context_file=None):
"""given a dotted name return the module part of the name :
>>> get_module_part('logilab.common.modutils.get_module_part')
'logilab.common.modutils'
:type dotted_name: str
:param dotted_name: full name of the identifier we are interested in
:type context_file: str or None
:param context_file:
context file to consider, necessary if the identifier has been
introduced using a relative import unresolvable in the actual
context (i.e. modutils)
:raise ImportError: if there is no such module in the directory
:rtype: str or None
:return:
the module part of the name or None if we have not been able at
all to import the given name
XXX: deprecated, since it doesn't handle package precedence over module
(see #10066)
"""
# os.path trick
if dotted_name.startswith('os.path'):
return 'os.path'
parts = dotted_name.split('.')
if context_file is not None:
# first check for builtin module which won't be considered latter
# in that case (path != None)
if parts[0] in BUILTIN_MODULES:
if len(parts) > 2:
raise ImportError(dotted_name)
return parts[0]
# don't use += or insert, we want a new list to be created !
path = None
starti = 0
if parts[0] == '':
assert context_file is not None, \
'explicit relative import, but no context_file?'
path = [] # prevent resolving the import non-relatively
starti = 1
while parts[starti] == '': # for all further dots: change context
starti += 1
context_file = os.path.dirname(context_file)
for i in range(starti, len(parts)):
try:
file_from_modpath(parts[starti:i+1], path=path,
context_file=context_file)
except ImportError:
if not i >= max(1, len(parts) - 2):
raise
return '.'.join(parts[:i])
return dotted_name
def get_module_files(src_directory, blacklist):
"""given a package directory return a list of all available python
module's files in the package and its subpackages
:type src_directory: str
:param src_directory:
path of the directory corresponding to the package
:type blacklist: list or tuple
:param blacklist:
optional list of files or directory to ignore, default to the value of
`logilab.common.STD_BLACKLIST`
:rtype: list
:return:
the list of all available python module's files in the package and
its subpackages
"""
files = []
for directory, dirnames, filenames in os.walk(src_directory):
_handle_blacklist(blacklist, dirnames, filenames)
# check for __init__.py
if not '__init__.py' in filenames:
dirnames[:] = ()
continue
for filename in filenames:
if _is_python_file(filename):
src = os.path.join(directory, filename)
files.append(src)
return files
def get_source_file(filename, include_no_ext=False):
"""given a python module's file name return the matching source file
name (the filename will be returned identically if it's a already an
absolute path to a python source file...)
:type filename: str
:param filename: python module's file name
:raise NoSourceFile: if no source file exists on the file system
:rtype: str
:return: the absolute path of the source file if it exists
"""
base, orig_ext = os.path.splitext(os.path.abspath(filename))
for ext in PY_SOURCE_EXTS:
source_path = '%s.%s' % (base, ext)
if os.path.exists(source_path):
return source_path
if include_no_ext and not orig_ext and os.path.exists(base):
return base
raise NoSourceFile(filename)
def is_python_source(filename):
"""
rtype: bool
return: True if the filename is a python source file
"""
return os.path.splitext(filename)[1][1:] in PY_SOURCE_EXTS
def is_standard_module(modname, std_path=None):
"""try to guess if a module is a standard python module (by default,
see `std_path` parameter's description)
:type modname: str
:param modname: name of the module we are interested in
:type std_path: list(str) or tuple(str)
:param std_path: list of path considered has standard
:rtype: bool
:return:
true if the module:
- is located on the path listed in one of the directory in `std_path`
- is a built-in module
"""
modname = modname.split('.')[0]
try:
filename = file_from_modpath([modname])
except ImportError:
# import failed, i'm probably not so wrong by supposing it's
# not standard...
return False
# modules which are not living in a file are considered standard
# (sys and __builtin__ for instance)
if filename is None:
return True
filename = _normalize_path(filename)
if filename.startswith(_cache_normalize_path(EXT_LIB_DIR)):
return False
if std_path is None:
std_path = STD_LIB_DIRS
for path in std_path:
if filename.startswith(_cache_normalize_path(path)):
return True
return False
def is_relative(modname, from_file):
"""return true if the given module name is relative to the given
file name
:type modname: str
:param modname: name of the module we are interested in
:type from_file: str
:param from_file:
path of the module from which modname has been imported
:rtype: bool
:return:
true if the module has been imported relatively to `from_file`
"""
if not os.path.isdir(from_file):
from_file = os.path.dirname(from_file)
if from_file in sys.path:
return False
try:
stream, _, _ = imp.find_module(modname.split('.')[0], [from_file])
# Close the stream to avoid ResourceWarnings.
if stream:
stream.close()
return True
except ImportError:
return False
# internal only functions #####################################################
def _file_from_modpath(modpath, path=None, context=None):
"""given a mod path (i.e. splitted module / package name), return the
corresponding file
this function is used internally, see `file_from_modpath`'s
documentation for more information
"""
assert len(modpath) > 0
if context is not None:
try:
mtype, mp_filename = _module_file(modpath, [context])
except ImportError:
mtype, mp_filename = _module_file(modpath, path)
else:
mtype, mp_filename = _module_file(modpath, path)
if mtype == imp.PY_COMPILED:
try:
return get_source_file(mp_filename), imp.PY_SOURCE
except NoSourceFile:
return mp_filename, imp.PY_COMPILED
elif mtype == imp.C_BUILTIN:
# integrated builtin module
return None, imp.C_BUILTIN
elif mtype == imp.PKG_DIRECTORY:
mp_filename = _has_init(mp_filename)
mtype = imp.PY_SOURCE
return mp_filename, mtype
def _search_zip(modpath, pic):
for filepath, importer in pic.items():
if importer is not None:
if importer.find_module(modpath[0]):
if not importer.find_module(os.path.sep.join(modpath)):
raise ImportError('No module named %s in %s/%s' % (
'.'.join(modpath[1:]), filepath, modpath))
return PY_ZIPMODULE, os.path.abspath(filepath) + os.path.sep + os.path.sep.join(modpath), filepath
raise ImportError('No module named %s' % '.'.join(modpath))
def _module_file(modpath, path=None):
"""get a module type / file path
:type modpath: list or tuple
:param modpath:
splitted module's name (i.e name of a module or package splitted
on '.'), with leading empty strings for explicit relative import
:type path: list or None
:param path:
optional list of path where the module or package should be
searched (use sys.path if nothing or None is given)
:rtype: tuple(int, str)
:return: the module type flag and the file path for a module
"""
# egg support compat
try:
pic = sys.path_importer_cache
_path = (path is None and sys.path or path)
for __path in _path:
if not __path in pic:
try:
pic[__path] = zipimport.zipimporter(__path)
except zipimport.ZipImportError:
pic[__path] = None
checkeggs = True
except AttributeError:
checkeggs = False
# pkg_resources support (aka setuptools namespace packages)
if (pkg_resources is not None
and modpath[0] in pkg_resources._namespace_packages
and modpath[0] in sys.modules
and len(modpath) > 1):
# setuptools has added into sys.modules a module object with proper
# __path__, get back information from there
module = sys.modules[modpath.pop(0)]
path = module.__path__
imported = []
while modpath:
modname = modpath[0]
# take care to changes in find_module implementation wrt builtin modules
#
# Python 2.6.6 (r266:84292, Sep 11 2012, 08:34:23)
# >>> imp.find_module('posix')
# (None, 'posix', ('', '', 6))
#
# Python 3.3.1 (default, Apr 26 2013, 12:08:46)
# >>> imp.find_module('posix')
# (None, None, ('', '', 6))
try:
stream, mp_filename, mp_desc = imp.find_module(modname, path)
except ImportError:
if checkeggs:
return _search_zip(modpath, pic)[:2]
raise
else:
# Don't forget to close the stream to avoid
# spurious ResourceWarnings.
if stream:
stream.close()
if checkeggs and mp_filename:
fullabspath = [_cache_normalize_path(x) for x in _path]
try:
pathindex = fullabspath.index(os.path.dirname(_normalize_path(mp_filename)))
emtype, emp_filename, zippath = _search_zip(modpath, pic)
if pathindex > _path.index(zippath):
# an egg takes priority
return emtype, emp_filename
except ValueError:
# XXX not in _path
pass
except ImportError:
pass
checkeggs = False
imported.append(modpath.pop(0))
mtype = mp_desc[2]
if modpath:
if mtype != imp.PKG_DIRECTORY:
raise ImportError('No module %s in %s' % ('.'.join(modpath),
'.'.join(imported)))
# XXX guess if package is using pkgutil.extend_path by looking for
# those keywords in the first four Kbytes
try:
with open(os.path.join(mp_filename, '__init__.py'), 'rb') as stream:
data = stream.read(4096)
except IOError:
path = [mp_filename]
else:
if b'pkgutil' in data and b'extend_path' in data:
# extend_path is called, search sys.path for module/packages
# of this name see pkgutil.extend_path documentation
path = [os.path.join(p, *imported) for p in sys.path
if os.path.isdir(os.path.join(p, *imported))]
else:
path = [mp_filename]
return mtype, mp_filename
def _is_python_file(filename):
"""return true if the given filename should be considered as a python file
.pyc and .pyo are ignored
"""
for ext in ('.py', '.so', '.pyd', '.pyw'):
if filename.endswith(ext):
return True
return False
def _has_init(directory):
"""if the given directory has a valid __init__ file, return its path,
else return None
"""
mod_or_pack = os.path.join(directory, '__init__')
for ext in PY_SOURCE_EXTS + ('pyc', 'pyo'):
if os.path.exists(mod_or_pack + '.' + ext):
return mod_or_pack + '.' + ext
return None
# -*- coding: utf-8 -*-
"""
werkzeug.contrib.cache
~~~~~~~~~~~~~~~~~~~~~~
The main problem with dynamic Web sites is, well, they're dynamic. Each
time a user requests a page, the webserver executes a lot of code, queries
the database, renders templates until the visitor gets the page he sees.
This is a lot more expensive than just loading a file from the file system
and sending it to the visitor.
For most Web applications, this overhead isn't a big deal but once it
becomes, you will be glad to have a cache system in place.
How Caching Works
=================
Caching is pretty simple. Basically you have a cache object lurking around
somewhere that is connected to a remote cache or the file system or
something else. When the request comes in you check if the current page
is already in the cache and if so, you're returning it from the cache.
Otherwise you generate the page and put it into the cache. (Or a fragment
of the page, you don't have to cache the full thing)
Here is a simple example of how to cache a sidebar for a template::
def get_sidebar(user):
identifier = 'sidebar_for/user%d' % user.id
value = cache.get(identifier)
if value is not None:
return value
value = generate_sidebar_for(user=user)
cache.set(identifier, value, timeout=60 * 5)
return value
Creating a Cache Object
=======================
To create a cache object you just import the cache system of your choice
from the cache module and instantiate it. Then you can start working
with that object:
>>> from werkzeug.contrib.cache import SimpleCache
>>> c = SimpleCache()
>>> c.set("foo", "value")
>>> c.get("foo")
'value'
>>> c.get("missing") is None
True
Please keep in mind that you have to create the cache and put it somewhere
you have access to it (either as a module global you can import or you just
put it into your WSGI application).
:copyright: (c) 2013 by the Werkzeug Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
import os
import re
import tempfile
from hashlib import md5
from time import time
try:
import cPickle as pickle
except ImportError:
import pickle
from werkzeug._compat import iteritems, string_types, text_type, \
integer_types, to_bytes
from werkzeug.posixemulation import rename
def _items(mappingorseq):
"""Wrapper for efficient iteration over mappings represented by dicts
or sequences::
>>> for k, v in _items((i, i*i) for i in xrange(5)):
... assert k*k == v
>>> for k, v in _items(dict((i, i*i) for i in xrange(5))):
... assert k*k == v
"""
if hasattr(mappingorseq, "iteritems"):
return mappingorseq.iteritems()
elif hasattr(mappingorseq, "items"):
return mappingorseq.items()
return mappingorseq
class BaseCache(object):
"""Baseclass for the cache systems. All the cache systems implement this
API or a superset of it.
:param default_timeout: the default timeout that is used if no timeout is
specified on :meth:`set`.
"""
def __init__(self, default_timeout=300):
self.default_timeout = default_timeout
def get(self, key):
"""Looks up key in the cache and returns the value for it.
If the key does not exist `None` is returned instead.
:param key: the key to be looked up.
"""
return None
def delete(self, key):
"""Deletes `key` from the cache. If it does not exist in the cache
nothing happens.
:param key: the key to delete.
"""
pass
def get_many(self, *keys):
"""Returns a list of values for the given keys.
For each key a item in the list is created. Example::
foo, bar = cache.get_many("foo", "bar")
If a key can't be looked up `None` is returned for that key
instead.
:param keys: The function accepts multiple keys as positional
arguments.
"""
return map(self.get, keys)
def get_dict(self, *keys):
"""Works like :meth:`get_many` but returns a dict::
d = cache.get_dict("foo", "bar")
foo = d["foo"]
bar = d["bar"]
:param keys: The function accepts multiple keys as positional
arguments.
"""
return dict(zip(keys, self.get_many(*keys)))
def set(self, key, value, timeout=None):
"""Adds a new key/value to the cache (overwrites value, if key already
exists in the cache).
:param key: the key to set
:param value: the value for the key
:param timeout: the cache timeout for the key (if not specified,
it uses the default timeout).
"""
pass
def add(self, key, value, timeout=None):
"""Works like :meth:`set` but does not overwrite the values of already
existing keys.
:param key: the key to set
:param value: the value for the key
:param timeout: the cache timeout for the key or the default
timeout if not specified.
"""
pass
def set_many(self, mapping, timeout=None):
"""Sets multiple keys and values from a mapping.
:param mapping: a mapping with the keys/values to set.
:param timeout: the cache timeout for the key (if not specified,
it uses the default timeout).
"""
for key, value in _items(mapping):
self.set(key, value, timeout)
def delete_many(self, *keys):
"""Deletes multiple keys at once.
:param keys: The function accepts multiple keys as positional
arguments.
"""
for key in keys:
self.delete(key)
def clear(self):
"""Clears the cache. Keep in mind that not all caches support
completely clearing the cache.
"""
pass
def inc(self, key, delta=1):
"""Increments the value of a key by `delta`. If the key does
not yet exist it is initialized with `delta`.
For supporting caches this is an atomic operation.
:param key: the key to increment.
:param delta: the delta to add.
"""
self.set(key, (self.get(key) or 0) + delta)
def dec(self, key, delta=1):
"""Decrements the value of a key by `delta`. If the key does
not yet exist it is initialized with `-delta`.
For supporting caches this is an atomic operation.
:param key: the key to increment.
:param delta: the delta to subtract.
"""
self.set(key, (self.get(key) or 0) - delta)
class NullCache(BaseCache):
"""A cache that doesn't cache. This can be useful for unit testing.
:param default_timeout: a dummy parameter that is ignored but exists
for API compatibility with other caches.
"""
class SimpleCache(BaseCache):
"""Simple memory cache for single process environments. This class exists
mainly for the development server and is not 100% thread safe. It tries
to use as many atomic operations as possible and no locks for simplicity
but it could happen under heavy load that keys are added multiple times.
:param threshold: the maximum number of items the cache stores before
it starts deleting some.
:param default_timeout: the default timeout that is used if no timeout is
specified on :meth:`~BaseCache.set`.
"""
def __init__(self, threshold=500, default_timeout=300):
BaseCache.__init__(self, default_timeout)
self._cache = {}
self.clear = self._cache.clear
self._threshold = threshold
def _prune(self):
if len(self._cache) > self._threshold:
now = time()
for idx, (key, (expires, _)) in enumerate(self._cache.items()):
if expires <= now or idx % 3 == 0:
self._cache.pop(key, None)
def get(self, key):
expires, value = self._cache.get(key, (0, None))
if expires > time():
return pickle.loads(value)
def set(self, key, value, timeout=None):
if timeout is None:
timeout = self.default_timeout
self._prune()
self._cache[key] = (time() + timeout, pickle.dumps(value,
pickle.HIGHEST_PROTOCOL))
def add(self, key, value, timeout=None):
if timeout is None:
timeout = self.default_timeout
if len(self._cache) > self._threshold:
self._prune()
item = (time() + timeout, pickle.dumps(value,
pickle.HIGHEST_PROTOCOL))
self._cache.setdefault(key, item)
def delete(self, key):
self._cache.pop(key, None)
_test_memcached_key = re.compile(br'[^\x00-\x21\xff]{1,250}$').match
class MemcachedCache(BaseCache):
"""A cache that uses memcached as backend.
The first argument can either be an object that resembles the API of a
:class:`memcache.Client` or a tuple/list of server addresses. In the
event that a tuple/list is passed, Werkzeug tries to import the best
available memcache library.
Implementation notes: This cache backend works around some limitations in
memcached to simplify the interface. For example unicode keys are encoded
to utf-8 on the fly. Methods such as :meth:`~BaseCache.get_dict` return
the keys in the same format as passed. Furthermore all get methods
silently ignore key errors to not cause problems when untrusted user data
is passed to the get methods which is often the case in web applications.
:param servers: a list or tuple of server addresses or alternatively
a :class:`memcache.Client` or a compatible client.
:param default_timeout: the default timeout that is used if no timeout is
specified on :meth:`~BaseCache.set`.
:param key_prefix: a prefix that is added before all keys. This makes it
possible to use the same memcached server for different
applications. Keep in mind that
:meth:`~BaseCache.clear` will also clear keys with a
different prefix.
"""
def __init__(self, servers=None, default_timeout=300, key_prefix=None):
BaseCache.__init__(self, default_timeout)
if servers is None or isinstance(servers, (list, tuple)):
if servers is None:
servers = ['127.0.0.1:11211']
self._client = self.import_preferred_memcache_lib(servers)
if self._client is None:
raise RuntimeError('no memcache module found')
else:
# NOTE: servers is actually an already initialized memcache
# client.
self._client = servers
self.key_prefix = to_bytes(key_prefix)
def get(self, key):
if isinstance(key, text_type):
key = key.encode('utf-8')
if self.key_prefix:
key = self.key_prefix + key
# memcached doesn't support keys longer than that. Because often
# checks for so long keys can occour because it's tested from user
# submitted data etc we fail silently for getting.
if _test_memcached_key(key):
return self._client.get(key)
def get_dict(self, *keys):
key_mapping = {}
have_encoded_keys = False
for key in keys:
if isinstance(key, unicode):
encoded_key = key.encode('utf-8')
have_encoded_keys = True
else:
encoded_key = key
if self.key_prefix:
encoded_key = self.key_prefix + encoded_key
if _test_memcached_key(key):
key_mapping[encoded_key] = key
d = rv = self._client.get_multi(key_mapping.keys())
if have_encoded_keys or self.key_prefix:
rv = {}
for key, value in iteritems(d):
rv[key_mapping[key]] = value
if len(rv) < len(keys):
for key in keys:
if key not in rv:
rv[key] = None
return rv
def add(self, key, value, timeout=None):
if timeout is None:
timeout = self.default_timeout
if isinstance(key, text_type):
key = key.encode('utf-8')
if self.key_prefix:
key = self.key_prefix + key
self._client.add(key, value, timeout)
def set(self, key, value, timeout=None):
if timeout is None:
timeout = self.default_timeout
if isinstance(key, text_type):
key = key.encode('utf-8')
if self.key_prefix:
key = self.key_prefix + key
self._client.set(key, value, timeout)
def get_many(self, *keys):
d = self.get_dict(*keys)
return [d[key] for key in keys]
def set_many(self, mapping, timeout=None):
if timeout is None:
timeout = self.default_timeout
new_mapping = {}
for key, value in _items(mapping):
if isinstance(key, text_type):
key = key.encode('utf-8')
if self.key_prefix:
key = self.key_prefix + key
new_mapping[key] = value
self._client.set_multi(new_mapping, timeout)
def delete(self, key):
if isinstance(key, unicode):
key = key.encode('utf-8')
if self.key_prefix:
key = self.key_prefix + key
if _test_memcached_key(key):
self._client.delete(key)
def delete_many(self, *keys):
new_keys = []
for key in keys:
if isinstance(key, unicode):
key = key.encode('utf-8')
if self.key_prefix:
key = self.key_prefix + key
if _test_memcached_key(key):
new_keys.append(key)
self._client.delete_multi(new_keys)
def clear(self):
self._client.flush_all()
def inc(self, key, delta=1):
if isinstance(key, unicode):
key = key.encode('utf-8')
if self.key_prefix:
key = self.key_prefix + key
self._client.incr(key, delta)
def dec(self, key, delta=1):
if isinstance(key, unicode):
key = key.encode('utf-8')
if self.key_prefix:
key = self.key_prefix + key
self._client.decr(key, delta)
def import_preferred_memcache_lib(self, servers):
"""Returns an initialized memcache client. Used by the constructor."""
try:
import pylibmc
except ImportError:
pass
else:
return pylibmc.Client(servers)
try:
from google.appengine.api import memcache
except ImportError:
pass
else:
return memcache.Client()
try:
import memcache
except ImportError:
pass
else:
return memcache.Client(servers)
# backwards compatibility
GAEMemcachedCache = MemcachedCache
class RedisCache(BaseCache):
"""Uses the Redis key-value store as a cache backend.
The first argument can be either a string denoting address of the Redis
server or an object resembling an instance of a redis.Redis class.
Note: Python Redis API already takes care of encoding unicode strings on
the fly.
.. versionadded:: 0.7
.. versionadded:: 0.8
`key_prefix` was added.
.. versionchanged:: 0.8
This cache backend now properly serializes objects.
.. versionchanged:: 0.8.3
This cache backend now supports password authentication.
:param host: address of the Redis server or an object which API is
compatible with the official Python Redis client (redis-py).
:param port: port number on which Redis server listens for connections.
:param password: password authentication for the Redis server.
:param db: db (zero-based numeric index) on Redis Server to connect.
:param default_timeout: the default timeout that is used if no timeout is
specified on :meth:`~BaseCache.set`.
:param key_prefix: A prefix that should be added to all keys.
"""
def __init__(self, host='localhost', port=6379, password=None,
db=0, default_timeout=300, key_prefix=None):
BaseCache.__init__(self, default_timeout)
if isinstance(host, string_types):
try:
import redis
except ImportError:
raise RuntimeError('no redis module found')
self._client = redis.Redis(host=host, port=port, password=password, db=db)
else:
self._client = host
self.key_prefix = key_prefix or ''
def dump_object(self, value):
"""Dumps an object into a string for redis. By default it serializes
integers as regular string and pickle dumps everything else.
"""
t = type(value)
if t in integer_types:
return str(value).encode('ascii')
return b'!' + pickle.dumps(value)
def load_object(self, value):
"""The reversal of :meth:`dump_object`. This might be callde with
None.
"""
if value is None:
return None
if value.startswith(b'!'):
return pickle.loads(value[1:])
try:
return int(value)
except ValueError:
# before 0.8 we did not have serialization. Still support that.
return value
def get(self, key):
return self.load_object(self._client.get(self.key_prefix + key))
def get_many(self, *keys):
if self.key_prefix:
keys = [self.key_prefix + key for key in keys]
return [self.load_object(x) for x in self._client.mget(keys)]
def set(self, key, value, timeout=None):
if timeout is None:
timeout = self.default_timeout
dump = self.dump_object(value)
self._client.setex(self.key_prefix + key, dump, timeout)
def add(self, key, value, timeout=None):
if timeout is None:
timeout = self.default_timeout
dump = self.dump_object(value)
added = self._client.setnx(self.key_prefix + key, dump)
if added:
self._client.expire(self.key_prefix + key, timeout)
def set_many(self, mapping, timeout=None):
if timeout is None:
timeout = self.default_timeout
pipe = self._client.pipeline()
for key, value in _items(mapping):
dump = self.dump_object(value)
pipe.setex(self.key_prefix + key, dump, timeout)
pipe.execute()
def delete(self, key):
self._client.delete(self.key_prefix + key)
def delete_many(self, *keys):
if not keys:
return
if self.key_prefix:
keys = [self.key_prefix + key for key in keys]
self._client.delete(*keys)
def clear(self):
if self.key_prefix:
keys = self._client.keys(self.key_prefix + '*')
if keys:
self._client.delete(*keys)
else:
self._client.flushdb()
def inc(self, key, delta=1):
return self._client.incr(self.key_prefix + key, delta)
def dec(self, key, delta=1):
return self._client.decr(self.key_prefix + key, delta)
class FileSystemCache(BaseCache):
"""A cache that stores the items on the file system. This cache depends
on being the only user of the `cache_dir`. Make absolutely sure that
nobody but this cache stores files there or otherwise the cache will
randomly delete files therein.
:param cache_dir: the directory where cache files are stored.
:param threshold: the maximum number of items the cache stores before
it starts deleting some.
:param default_timeout: the default timeout that is used if no timeout is
specified on :meth:`~BaseCache.set`.
:param mode: the file mode wanted for the cache files, default 0600
"""
#: used for temporary files by the FileSystemCache
_fs_transaction_suffix = '.__wz_cache'
def __init__(self, cache_dir, threshold=500, default_timeout=300, mode=0o600):
BaseCache.__init__(self, default_timeout)
self._path = cache_dir
self._threshold = threshold
self._mode = mode
if not os.path.exists(self._path):
os.makedirs(self._path)
def _list_dir(self):
"""return a list of (fully qualified) cache filenames
"""
return [os.path.join(self._path, fn) for fn in os.listdir(self._path)
if not fn.endswith(self._fs_transaction_suffix)]
def _prune(self):
entries = self._list_dir()
if len(entries) > self._threshold:
now = time()
for idx, fname in enumerate(entries):
remove = False
f = None
try:
try:
f = open(fname, 'rb')
expires = pickle.load(f)
remove = expires <= now or idx % 3 == 0
finally:
if f is not None:
f.close()
except Exception:
pass
if remove:
try:
os.remove(fname)
except (IOError, OSError):
pass
def clear(self):
for fname in self._list_dir():
try:
os.remove(fname)
except (IOError, OSError):
pass
def _get_filename(self, key):
if isinstance(key, text_type):
key = key.encode('utf-8') #XXX unicode review
hash = md5(key).hexdigest()
return os.path.join(self._path, hash)
def get(self, key):
filename = self._get_filename(key)
try:
f = open(filename, 'rb')
try:
if pickle.load(f) >= time():
return pickle.load(f)
finally:
f.close()
os.remove(filename)
except Exception:
return None
def add(self, key, value, timeout=None):
filename = self._get_filename(key)
if not os.path.exists(filename):
self.set(key, value, timeout)
def set(self, key, value, timeout=None):
if timeout is None:
timeout = self.default_timeout
filename = self._get_filename(key)
self._prune()
try:
fd, tmp = tempfile.mkstemp(suffix=self._fs_transaction_suffix,
dir=self._path)
f = os.fdopen(fd, 'wb')
try:
pickle.dump(int(time() + timeout), f, 1)
pickle.dump(value, f, pickle.HIGHEST_PROTOCOL)
finally:
f.close()
rename(tmp, filename)
os.chmod(filename, self._mode)
except (IOError, OSError):
pass
def delete(self, key):
try:
os.remove(self._get_filename(key))
except (IOError, OSError):
pass
apache-2.0
xguse/ete
ete_dev/treeview/OLD_qt4render.py
1
45590
import math
import types
import copy
from PyQt4 import QtCore, QtGui, QtSvg
from PyQt4.QtGui import QPrinter
from qt4gui import _PropertiesDialog, _NodeActions
import layouts
class _TextFaceItem(QtGui.QGraphicsSimpleTextItem, _NodeActions):
""" Manage faces on Scene"""
def __init__(self, face, node, *args):
QtGui.QGraphicsSimpleTextItem.__init__(self,*args)
self.node = node
#self.face = face
class _ImgFaceItem(QtGui.QGraphicsPixmapItem, _NodeActions):
""" Manage faces on Scene"""
def __init__(self, face, node, *args):
QtGui.QGraphicsPixmapItem.__init__(self,*args)
self.node = node
#self.face = face
class _NodeItem(QtGui.QGraphicsRectItem, _NodeActions):
def __init__(self, node):
self.node = node
self.radius = node.img_style["size"]/2
QtGui.QGraphicsRectItem.__init__(self, 0, 0, self.radius*2, self.radius*2)
def paint(self, p, option, widget):
#QtGui.QGraphicsRectItem.paint(self, p, option, widget)
if self.node.img_style["shape"] == "sphere":
r = self.radius
gradient = QtGui.QRadialGradient(r, r, r,(r*2)/3,(r*2)/3)
gradient.setColorAt(0.05, QtCore.Qt.white);
gradient.setColorAt(0.9, QtGui.QColor(self.node.img_style["fgcolor"]));
p.setBrush(QtGui.QBrush(gradient))
p.setPen(QtCore.Qt.NoPen)
p.drawEllipse(self.rect())
elif self.node.img_style["shape"] == "square":
p.fillRect(self.rect(),QtGui.QBrush(QtGui.QColor(self.node.img_style["fgcolor"])))
elif self.node.img_style["shape"] == "circle":
p.setBrush(QtGui.QBrush(QtGui.QColor(self.node.img_style["fgcolor"])))
p.setPen(QtGui.QPen(QtGui.QColor(self.node.img_style["fgcolor"])))
p.drawEllipse(self.rect())
class _ArcItem(QtGui.QGraphicsRectItem):
def __init__(self, angle_start, angle_span, radius, *args):
QtGui.QGraphicsRectItem.__init__(self, 0, 0, radius, radius)
self.angle_start = angle_span
self.angle_span = angle_span
self.radius = radius
def paint(self, painter, option, index):
rect = QtCore.QRectF(-self.radius, -self.radius, self.radius*2, self.radius*2);
painter.setPen(self.pen())
painter.drawArc(rect, self.angle_start, self.angle_span*16)
painter.drawRect(rect)
class _FaceGroup(QtGui.QGraphicsItem): # I resisted to name this FaceBook :)
def __init__(self, faces, node, column_widths={}, *args, **kargs):
QtGui.QGraphicsItem.__init__(self)#, *args, **kargs) # This coused segm. faults
self.node = node
self.column2faces = faces
# column_widths is a dictionary of min column size. Can be
# used to reserve some space to specific columns
self.set_min_column_widths(column_widths)
self.w = 0
self.h = 0
# updates the size of this grid
self.update_columns_size()
def set_min_column_widths(self, column_widths):
# column_widths is a dictionary of min column size. Can be
# used to reserve some space to specific columns
self.column_widths = column_widths
self.columns = sorted(set(self.column2faces.keys()+self.column_widths.keys()))
def paint(self, painter, option, index):
return
def boundingRect(self):
return QtCore.QRectF(0,0, self.w, self.h)
def get_size():
return self.w, self.h
def update_columns_size(self):
self.column2size = {}
for c in self.columns:
faces = self.column2faces.get(c, [])
height = 0
width = 0
for f in faces:
f.node = self.node
if f.type == "pixmap":
f.update_pixmap()
height += f._height() + f.margin_top + f.margin_bottom
width = max(width, f._width() + f.margin_right + f.margin_left)
width = max(width, self.column_widths.get(c, 0))
self.column2size[c] = (width, height)
self.w = sum([0]+[size[0] for size in self.column2size.itervalues()])
self.h = max([0]+[size[1] for size in self.column2size.itervalues()])
def render(self):
x = 0
for c in self.columns:
faces = self.column2faces.get(c, [])
w, h = self.column2size[c]
# Starting y position. Center columns
y = (self.h / 2) - (h/2)
for f in faces:
f.node = self.node
if f.type == "text":
obj = _TextFaceItem(f, self.node, f.get_text())
font = QtGui.QFont(f.ftype, f.fsize)
obj.setFont(font)
obj.setBrush(QtGui.QBrush(QtGui.QColor(f.fgcolor)))
obj.setParentItem(self)
obj.setAcceptsHoverEvents(True)
else:
# Loads the pre-generated pixmap
obj = _ImgFaceItem(f, self.node, f.pixmap)
obj.setAcceptsHoverEvents(True)
obj.setParentItem(self)
obj.setPos(x+ f.margin_left, y+f.margin_top)
# Y position is incremented by the height of last face
# in column
y += f._height() + f.margin_top + f.margin_bottom
# X position is incremented by the max width of the last
# processed column.
x += w
class _PartitionItem(QtGui.QGraphicsRectItem):
def __init__(self, node, *args):
QtGui.QGraphicsRectItem.__init__(self, *args)
self.node = node
self.drawbg = False
def paint(self, painter, option, index):
if self.drawbg:
return QtGui.QGraphicsRectItem.paint(self, painter, option, index)
class _SelectorItem(QtGui.QGraphicsRectItem):
def __init__(self):
self.Color = QtGui.QColor("blue")
self._active = False
QtGui.QGraphicsRectItem.__init__(self,0,0,0,0)
def paint(self, p, option, widget):
p.setPen(self.Color)
p.drawRect(self.rect())
return
# Draw info text
font = QtGui.QFont("Arial",13)
text = "%d selected." % len(self.get_selected_nodes())
textR = QtGui.QFontMetrics(font).boundingRect(text)
if self.rect().width() > textR.width() and \
self.rect().height() > textR.height()/2 and 0: # OJO !!!!
p.setPen(QtGui.QPen(self.Color))
p.setFont(QtGui.QFont("Arial",13))
p.drawText(self.rect().bottomLeft().x(),self.rect().bottomLeft().y(),text)
def get_selected_nodes(self):
selPath = QtGui.QPainterPath()
selPath.addRect(self.rect())
self.scene().setSelectionArea(selPath)
return [i.node for i in self.scene().selectedItems()]
def setActive(self,bool):
self._active = bool
def isActive(self):
return self._active
class _HighlighterItem(QtGui.QGraphicsRectItem):
def __init__(self):
self.Color = QtGui.QColor("red")
self._active = False
QtGui.QGraphicsRectItem.__init__(self,0,0,0,0)
def paint(self, p, option, widget):
p.setPen(self.Color)
p.drawRect(self.rect().x(),self.rect().y(),self.rect().width(),self.rect().height())
return
class _TreeScene(QtGui.QGraphicsScene):
def __init__(self, rootnode=None, style=None, *args):
QtGui.QGraphicsScene.__init__(self,*args)
self.view = None
# Config variables
self.buffer_node = None # Used to copy and paste
self.layout_func = None # Layout function
self.startNode = rootnode # Node to start drawing
self.scale = 0 # Tree branch scale used to draw
# Initialize scene
self.max_w_aligned_face = 0 # Stores the max width of aligned faces
self.aligned_faces = []
self.min_real_branch_separation = 0
self.selectors = []
self._highlighted_nodes = {}
self.node2faces = {}
self.node2item = {}
# Qt items
self.selector = None
self.mainItem = None # Qt Item which is parent of all other items
self.propertiesTable = _PropertiesDialog(self)
self.border = None
def initialize_tree_scene(self, tree, style, tree_properties):
self.tree = tree # Pointer to original tree
self.startNode = tree # Node to start drawing
self.max_w_aligned_face = 0 # Stores the max width of aligned faces
self.aligned_faces = []
# Load image attributes
self.props = tree_properties
# Validates layout function
if type(style) == types.FunctionType or\
type(style) == types.MethodType:
self.layout_func = style
else:
try:
self.layout_func = getattr(layouts,style)
except:
raise ValueError, "Required layout is not a function pointer nor a valid layout name."
# Set the scene background
self.setBackgroundBrush(QtGui.QColor("white"))
# Set nodes style
self.set_style_from(self.startNode,self.layout_func)
self.propertiesTable.update_properties(self.startNode)
def highlight_node(self, n):
self.unhighlight_node(n)
r = QtGui.QGraphicsRectItem(self.mainItem)
self._highlighted_nodes[n] = r
R = n.fullRegion.getRect()
width = self.i_width-n._x
r.setRect(QtCore.QRectF(n._x,n._y,width,R[3]))
#r.setRect(0,0, n.fullRegion.width(), n.fullRegion.height())
#r.setPos(n.scene_pos)
# Don't know yet why do I have to add 2 pixels :/
#r.moveBy(0,0)
r.setZValue(-1)
r.setPen(QtGui.QColor(self.props.search_node_fg))
r.setBrush(QtGui.QColor(self.props.search_node_bg))
# self.view.horizontalScrollBar().setValue(n._x)
# self.view.verticalScrollBar().setValue(n._y)
def unhighlight_node(self, n):
if n in self._highlighted_nodes and \
self._highlighted_nodes[n] is not None:
self.removeItem(self._highlighted_nodes[n])
del self._highlighted_nodes[n]
def mousePressEvent(self,e):
pos = self.selector.mapFromScene(e.scenePos())
self.selector.setRect(pos.x(),pos.y(),4,4)
self.selector.startPoint = QtCore.QPointF(pos.x(), pos.y())
self.selector.setActive(True)
self.selector.setVisible(True)
QtGui.QGraphicsScene.mousePressEvent(self,e)
def mouseReleaseEvent(self,e):
curr_pos = self.selector.mapFromScene(e.scenePos())
x = min(self.selector.startPoint.x(),curr_pos.x())
y = min(self.selector.startPoint.y(),curr_pos.y())
w = max(self.selector.startPoint.x(),curr_pos.x()) - x
h = max(self.selector.startPoint.y(),curr_pos.y()) - y
if self.selector.startPoint == curr_pos:
self.selector.setVisible(False)
self.selector.setActive(False)
QtGui.QGraphicsScene.mouseReleaseEvent(self,e)
def mouseMoveEvent(self,e):
curr_pos = self.selector.mapFromScene(e.scenePos())
if self.selector.isActive():
x = min(self.selector.startPoint.x(),curr_pos.x())
y = min(self.selector.startPoint.y(),curr_pos.y())
w = max(self.selector.startPoint.x(),curr_pos.x()) - x
h = max(self.selector.startPoint.y(),curr_pos.y()) - y
self.selector.setRect(x,y,w,h)
QtGui.QGraphicsScene.mouseMoveEvent(self, e)
def mouseDoubleClickEvent(self,e):
QtGui.QGraphicsScene.mouseDoubleClickEvent(self,e)
def save(self, imgName, w=None, h=None, header=None, \
dpi=150, take_region=False):
ext = imgName.split(".")[-1].upper()
root = self.startNode
#aspect_ratio = root.fullRegion.height() / root.fullRegion.width()
aspect_ratio = self.i_height / self.i_width
# auto adjust size
if w is None and h is None and (ext == "PDF" or ext == "PS"):
w = dpi * 6.4
h = w * aspect_ratio
if h>dpi * 11:
h = dpi * 11
w = h / aspect_ratio
elif w is None and h is None:
w = self.i_width
h = self.i_height
elif h is None :
h = w * aspect_ratio
elif w is None:
w = h / aspect_ratio
if ext == "SVG":
svg = QtSvg.QSvgGenerator()
svg.setFileName(imgName)
svg.setSize(QtCore.QSize(w, h))
svg.setViewBox(QtCore.QRect(0, 0, w, h))
#svg.setTitle("SVG Generator Example Drawing")
#svg.setDescription("An SVG drawing created by the SVG Generator")
pp = QtGui.QPainter()
pp.begin(svg)
targetRect = QtCore.QRectF(0, 0, w, h)
self.render(pp, targetRect, self.sceneRect())
pp.end()
elif ext == "PDF" or ext == "PS":
format = QPrinter.PostScriptFormat if ext == "PS" else QPrinter.PdfFormat
printer = QPrinter(QPrinter.HighResolution)
printer.setResolution(dpi)
printer.setOutputFormat(format)
printer.setPageSize(QPrinter.A4)
pageTopLeft = printer.pageRect().topLeft()
paperTopLeft = printer.paperRect().topLeft()
# For PS -> problems with margins
# print paperTopLeft.x(), paperTopLeft.y()
# print pageTopLeft.x(), pageTopLeft.y()
# print printer.paperRect().height(), printer.pageRect().height()
topleft = pageTopLeft - paperTopLeft
printer.setFullPage(True);
printer.setOutputFileName(imgName);
pp = QtGui.QPainter(printer)
if header:
pp.setFont(QtGui.QFont("Verdana",12))
pp.drawText(topleft.x(),20, header)
targetRect = QtCore.QRectF(topleft.x(), 20 + (topleft.y()*2), w, h)
else:
targetRect = QtCore.QRectF(topleft.x(), topleft.y()*2, w, h)
if take_region:
self.selector.setVisible(False)
self.render(pp, targetRect, self.selector.rect())
self.selector.setVisible(True)
else:
self.render(pp, targetRect, self.sceneRect())
pp.end()
return
else:
targetRect = QtCore.QRectF(0, 0, w, h)
ii= QtGui.QImage(w, \
h, \
QtGui.QImage.Format_ARGB32)
pp = QtGui.QPainter(ii)
pp.setRenderHint(QtGui.QPainter.Antialiasing )
pp.setRenderHint(QtGui.QPainter.TextAntialiasing)
pp.setRenderHint(QtGui.QPainter.SmoothPixmapTransform)
if take_region:
self.selector.setVisible(False)
self.render(pp, targetRect, self.selector.rect())
self.selector.setVisible(True)
else:
self.render(pp, targetRect, self.sceneRect())
pp.end()
ii.save(imgName)
def draw_tree_surrondings(self):
# Prepares and renders aligned face headers. Used to latter
# place aligned faces
column2max_width = {}
aligned_face_headers = {}
aligned_header = self.props.aligned_header
aligned_foot = self.props.aligned_foot
all_columns = set(aligned_header.keys() + aligned_foot.keys())
header_afaces = {}
foot_afaces = {}
for c in all_columns:
if c in aligned_header:
faces = aligned_header[c]
fb = _FaceGroup({0:faces}, None)
fb.setParentItem(self.mainItem)
header_afaces[c] = fb
column2max_width[c] = fb.w
if c in aligned_foot:
faces = aligned_foot[c]
fb = _FaceGroup({0:faces}, None)
fb.setParentItem(self.mainItem)
foot_afaces[c] = fb
column2max_width[c] = max(column2max_width.get(c,0), fb.w)
# Place aligned faces and calculates the max size of each
# column (needed to place column headers)
if self.props.draw_aligned_faces_as_grid:
for fb in self.aligned_faces:
for c, size in fb.column2size.iteritems():
if size[0] > column2max_width.get(c, 0):
column2max_width[c] = size[0]
# Place aligned faces
for fb in self.aligned_faces:
fb.set_min_column_widths(column2max_width)
fb.update_columns_size()
fb.render()
pos = fb.mapFromScene(self.i_width, 0)
fb.setPos(pos.x(), fb.y())
if self.props.draw_guidelines:
guideline = QtGui.QGraphicsLineItem()
partition = fb.parentItem()
guideline.setParentItem(partition)
guideline.setLine(partition.rect().width(), partition.center,\
pos.x(), partition.center)
pen = QtGui.QPen()
pen.setColor(QtGui.QColor(self.props.guideline_color))
set_pen_style(pen, self.props.guideline_type)
guideline.setPen(pen)
# Place faces around tree
x = self.i_width
y = self.i_height
max_up_height = 0
max_down_height = 0
for c in column2max_width:
fb_up = header_afaces.get(c, None)
fb_down = foot_afaces.get(c, None)
fb_width = 0
if fb_up:
fb_up.render()
fb_up.setPos(x, -fb_up.h)
fb_width = fb_up.w
max_up_height = max(max_up_height, fb_up.h)
if fb_down:
fb_down.render()
fb_down.setPos(x, y)
fb_width = max(fb_down.w, fb_width)
max_down_height = max(max_down_height, fb_down.h)
x += column2max_width.get(c, fb_width)
# updates image size
self.i_width += sum(column2max_width.values())
self.i_height += max_down_height + max_up_height
self.mainItem.moveBy(0, max_up_height)
def draw(self):
# Clean previous items from scene by removing the main parent
if self.mainItem:
self.removeItem(self.mainItem)
self.mainItem = None
if self.border:
self.removeItem(self.border)
self.border = None
# Initialize scene
self.max_w_aligned_face = 0 # Stores the max width of aligned faces
self.aligned_faces = []
self.min_aligned_column_widths = {}
self.min_real_branch_separation = 0
self.selectors = []
self._highlighted_nodes = {}
self.node2faces = {}
self.node2item = {}
self.node2ballmap = {}
#Clean_highlighting rects
for n in self._highlighted_nodes:
self._highlighted_nodes[n] = None
# Recreates main parent and add it to scene
self.mainItem = QtGui.QGraphicsRectItem()
self.addItem(self.mainItem)
# Recreates selector item (used to zoom etc...)
self.selector = _SelectorItem()
self.selector.setParentItem(self.mainItem)
self.selector.setVisible(False)
self.selector.setZValue(2)
self.highlighter = _HighlighterItem()
self.highlighter.setParentItem(self.mainItem)
self.highlighter.setVisible(False)
self.highlighter.setZValue(2)
self.min_real_branch_separation = 0
# Get branch scale
fnode, max_dist = self.startNode.get_farthest_leaf(topology_only=\
self.props.force_topology)
if max_dist>0:
self.scale = self.props.tree_width / max_dist
else:
self.scale = 1
#self.update_node_areas(self.startNode)
self.update_node_areas_rectangular(self.startNode)
# Get tree picture dimensions
self.i_width = self.startNode.fullRegion.width()
self.i_height = self.startNode.fullRegion.height()
self.draw_tree_surrondings()
# Draw scale
scaleItem = self.get_scale()
scaleItem.setParentItem(self.mainItem)
scaleItem.setPos(0, self.i_height)
self.i_height += scaleItem.rect().height()
#Re-establish node marks
for n in self._highlighted_nodes:
self.highlight_node(n)
self.setSceneRect(0,0, self.i_width, self.i_height)
# Tree border
if self.props.draw_image_border:
self.border = self.addRect(0, 0, self.i_width, self.i_height)
def get_tree_img_map(self):
node_list = []
face_list = []
nid = 0
for n, partition in self.node2item.iteritems():
n.add_feature("_nid", str(nid))
for item in partition.childItems():
if isinstance(item, _NodeItem):
pos = item.mapToScene(0,0)
size = item.mapToScene(item.rect().width(), item.rect().height())
node_list.append([pos.x(),pos.y(),size.x(),size.y(), nid, None])
elif isinstance(item, _FaceGroup):
for f in item.childItems():
pos = f.mapToScene(0,0)
if isinstance(f, _TextFaceItem):
size = f.mapToScene(f.boundingRect().width(), \
f.boundingRect().height())
face_list.append([pos.x(),pos.y(),size.x(),size.y(), nid, str(f.text())])
else:
size = f.mapToScene(f.boundingRect().width(), f.boundingRect().height())
face_list.append([pos.x(),pos.y(),size.x(),size.y(), nid, None])
nid += 1
return {"nodes": node_list, "faces": face_list}
def get_scale(self):
length = 50
scaleItem = _PartitionItem(None) # Unassociated to nodes
scaleItem.setRect(0, 0, 50, 50)
customPen = QtGui.QPen(QtGui.QColor("black"), 1)
line = QtGui.QGraphicsLineItem(scaleItem)
line2 = QtGui.QGraphicsLineItem(scaleItem)
line3 = QtGui.QGraphicsLineItem(scaleItem)
line.setPen(customPen)
line2.setPen(customPen)
line3.setPen(customPen)
line.setLine(0, 5, length, 5)
line2.setLine(0, 0, 0, 10)
line3.setLine(length, 0, length, 10)
scale_text = "%0.2f" % float(length/self.scale)
scale = QtGui.QGraphicsSimpleTextItem(scale_text)
scale.setParentItem(scaleItem)
scale.setPos(0, 10)
if self.props.force_topology:
wtext = "Force topology is enabled!\nBranch lengths does not represent original values."
warning_text = QtGui.QGraphicsSimpleTextItem(wtext)
warning_text.setFont(QtGui.QFont("Arial", 8))
warning_text.setBrush( QtGui.QBrush(QtGui.QColor("darkred")))
warning_text.setPos(0, 32)
warning_text.setParentItem(scaleItem)
return scaleItem
def set_style_from(self, node, layout_func):
# I import dict at the moment of drawing, otherwise there is a
# loop of imports between drawer and qt4render
from drawer import NodeStyleDict
for n in node.traverse():
if not hasattr(n, "img_style"):
n.img_style = NodeStyleDict()
elif isinstance(n.img_style, NodeStyleDict):
n.img_style.init()
else:
raise TypeError("img_style attribute in node %s is not of NodeStyleDict type." \
%n.name)
# Adding fixed faces during drawing is not allowed, since
# added faces will not be tracked until next execution
n.img_style._block_adding_faces = True
try:
layout_func(n)
except Exception:
n.img_style._block_adding_faces = False
raise
def update_node_faces(self, node):
# Organize all faces of this node in FaceGroups objects
# (tables of faces)
faceblock = {}
self.node2faces[node] = faceblock
for position in ["branch-right", "aligned", "branch-top", "branch-bottom"] :
if position in node.img_style["_faces"]:
# The value of this is expected to be list of columns of faces
# c2f = [ [f1, f2, f3],
# [f4, f4]
# ]
if position=="aligned" and not node.is_leaf():
faceblock[position] = _FaceGroup({}, node)
continue # aligned on internal node does not make sense
else:
faceblock[position] = _FaceGroup(node.img_style["_faces"][position], node)
else:
faceblock[position] = _FaceGroup({}, node)
return faceblock
def update_node_areas_rectangular(self,root_node):
## General scheme on how nodes size are handled
## |==========================================================================================================================|
## | fullRegion |
## | nodeRegion |================================================================================|
## | | fullRegion ||
## | | nodeRegion |=======================================||
## | | | fullRegion |||
## | | | nodeRegion |||
## | | | | xdist_offset | nodesize | facesRegion|||
## | | xdist_offset | nodesize |facesRegion |=======================================||
## | | | |=======================================||
## | | | fullRegion ||
## | | | nodeRegion ||
## | branch-top | | | | xdist_offset | nodesize | facesRegion ||
## | xdist_offset | nodesize |facesRegion | |=======================================||
## | branch-bottom | | |================================================================================|
## | |=======================================| |
## | | fullRegion | |
## | | nodeRegion | |
## | | xdist_offset | nodesize | facesRegion | |
## | |=======================================| |
## |==========================================================================================================================|
##
## Rendering means to create all QGraphicsItems that represent
## all node features. For this, I use an iterative function
## that creates a rectangleItem for each node in which all its
## features are included. The same tree node hierarchy is
## maintained for setting the parents items of partitions.
## Once a node has its partitionItem, elements are added to
## such partitionItem, and are positioned relative to the
## coordinate system of the parent.
##
## A node partition contains the branch to its parent, the
## node circle, faces and the vertical line connecting childs
n2i = self.node2item = {}
visited = set()
nodeStack = []
nodeStack.append(root_node)
while nodeStack:
node = nodeStack[-1]
finished = True
if not _leaf(node): # node.img_style["draw_descendants"]:
for c in node.children:
if c not in visited:
nodeStack.append(c)
finished = False
# Here you have the preorder position of the node.
# node.visited_preorder
if not finished:
continue
# Here you have the postorder position of the node. Now is
# when I want to visit the node
nodeStack.pop(-1)
visited.add(node)
# Branch length converted to pixels
if self.props.force_topology:
node.dist_xoffset = float(1.0 * self.scale)
else:
node.dist_xoffset = float(node.dist * self.scale)
# Organize faces by groups
faceblock = self.update_node_faces(node)
# Total height required by the node
h = node.__img_height__ = max(node.img_style["size"] + faceblock["branch-top"].h + faceblock["branch-bottom"].h,
node.img_style["hlwidth"] + faceblock["branch-top"].h + faceblock["branch-bottom"].h,
faceblock["branch-right"].h,
faceblock["aligned"].h,
self.props.min_branch_separation,
)
# Total width required by the node
w = node.__img_width__ = sum([max(node.dist_xoffset + node.img_style["size"],
faceblock["branch-top"].w + node.img_style["size"],
faceblock["branch-bottom"].w + node.img_style["size"],
),
faceblock["branch-right"].w]
)
# Updates the max width spent by aligned faces
if faceblock["aligned"].w > self.max_w_aligned_face:
self.max_w_aligned_face = faceblock["aligned"].w
# This prevents adding empty aligned faces from internal
# nodes
if faceblock["aligned"].column2faces:
self.aligned_faces.append(faceblock["aligned"])
# Rightside faces region
node.facesRegion = QtCore.QRectF(0, 0, faceblock["branch-right"].w, faceblock["branch-right"].h)
# Node region
node.nodeRegion = QtCore.QRectF(0, 0, w, h)
if self.min_real_branch_separation < h:
self.min_real_branch_separation = h
if not _leaf(node):
widths, heights = zip(*[[c.fullRegion.width(),c.fullRegion.height()] \
for c in node.children])
w += max(widths)
h = max(node.nodeRegion.height(), sum(heights))
# This is the node total region covered by the node
node.fullRegion = QtCore.QRectF(0, 0, w, h)
# ------------------ RENDERING ---------------------------
# Creates a rectItem representing the node partition. Its
# size was calculate in update_node_areas. This partition
# groups all its child partitions
partition = self.node2item[node] = \
_PartitionItem(node, 0, 0, node.fullRegion.width(), node.fullRegion.height())
# Draw virtual partition grid (for debugging)
# partition.setPen(QtGui.QColor("yellow"))
# color = QtGui.QColor("#cccfff")
# color = QtGui.QColor("#ffffff")
# partition.setBrush(color)
# partition.setPen(color)
# Faceblock parents
for f in faceblock.values():
f.setParentItem(partition)
if _leaf(node):
# Leaves will be processed from parents
partition.center = self.get_partition_center(node)
continue
else:
parent_partition = partition
# set position of child partitions
x = node.nodeRegion.width()
y = 0
all_childs_height = sum([c.fullRegion.height() for c in node.children])
if node.fullRegion.height() > all_childs_height:
y += ((node.fullRegion.height() - all_childs_height))/2
for c in node.children:
cpart = n2i[c]
# Sets x and y position of child within parent
# partition (relative positions)
cpart.setParentItem(parent_partition)
cpart.start_y = y
cpart.start_x = x
cpart.setPos(x, y)
# Increment y for the next child within partition
y += c.fullRegion.height()
# Build all node's associated items
self.render_node_partition(c, cpart)
# set partition center that will be used for parent nodes
partition.center = self.get_partition_center(node)
# Render root node and set its positions
partition = n2i[root_node]
partition.setParentItem(self.mainItem)
partition.center = self.get_partition_center(root_node)
self.render_node_partition(root_node, partition)
for part in self.node2item.values():
# save absolute position in scene (used for maps and
# highlighting)
abs_pos = part.mapToScene(0, 0)
part.abs_startx = abs_pos.x()
part.abs_starty = abs_pos.y()
def update_node_areas_radial(self,root_node):
""" UNFINISHED! """
center_item = QtGui.QGraphicsRectItem(0,0,3,3)
center_item.setPen(QtGui.QColor("#ff0000"))
center_item.setBrush(QtGui.QColor("#ff0000"))
n2a = {}
angle_step = 360./len(root_node)
next_angle = 0
n2i = self.node2item = {}
visited = set()
nodeStack = []
nodeStack.append(root_node)
while nodeStack:
node = nodeStack[-1]
finished = True
if not _leaf(node): #node.img_style["draw_descendants"]:
for c in node.children:
if c not in visited:
nodeStack.append(c)
finished = False
## Here you have the preorder position of the node.
# ... node.before_go_for_childs = blah ...
if not finished:
continue
# Here you have the postorder position of the node. Now is
# when I want to visit the node
nodeStack.pop(-1)
visited.add(node)
# Branch length converted to pixels
if self.props.force_topology:
node.dist_xoffset = 60
else:
node.dist_xoffset = float(node.dist * self.scale)
# Organize faces by groups
faceblock = self.update_node_faces(node)
# Total height required by the node
h = node.__img_height__ = max(node.img_style["size"] + faceblock["branch-top"].h + faceblock["branch-bottom"].h,
node.img_style["hlwidth"] + faceblock["branch-top"].h + faceblock["branch-bottom"].h,
faceblock["branch-right"].h,
faceblock["aligned"].h,
self.props.min_branch_separation,
)
# Total width required by the node
w = node.__img_width__ = sum([max(node.dist_xoffset + node.img_style["size"],
faceblock["branch-top"].w + node.img_style["size"],
faceblock["branch-bottom"].w + node.img_style["size"],
),
faceblock["branch-right"].w]
)
# Updates the max width spend by aligned faces
if faceblock["aligned"].w > self.max_w_aligned_face:
self.max_w_aligned_face = faceblock["aligned"].w
# Rightside faces region
node.facesRegion = QtCore.QRectF(0, 0, faceblock["branch-right"].w, faceblock["branch-right"].h)
# Node region
node.nodeRegion = QtCore.QRectF(0, 0, w, h)
if self.min_real_branch_separation < h:
self.min_real_branch_separation = h
if not _leaf(node): #node.is_leaf() and node.img_style["draw_descendants"]:
widths, heights = zip(*[[c.fullRegion.width(),c.fullRegion.height()] \
for c in node.children])
w += max(widths)
h = max(node.nodeRegion.height(), sum(heights))
# This is the node total region covered by the node
node.fullRegion = QtCore.QRectF(0, 0, w, h)
# ------------------ RENDERING ---------------------------
# Creates a rectItem representing the node partition. Its
# size was calculate in update_node_areas. This partition
# groups all its child partitions
partition = self.node2item[node] = \
_PartitionItem(node, 0, 0, node.fullRegion.width(), node.fullRegion.height())
# Draw virtual partition grid (for debugging)
#color = QtGui.QColor("#cccfff")
#color = QtGui.QColor("#ffffff")
#partition.setBrush(color)
#partition.setPen(color)
if node.is_leaf() or not node.img_style["draw_descendants"]:
# Leafs will be processed from parents
partition.angle = next_angle
partition.angle_start = next_angle
partition.angle_span = partition.angle_start + angle_step
next_angle+= angle_step
else:
p1 = n2i[node.children[0]]
p2 = n2i[node.children[-1]]
partition.angle = p2.angle_start + p2.angle_span - p1.angle_start
partition.angle_start = p1.angle_start - (p1.angle_span/2)
partition.angle_span = p2.angle_start - (p2.angle_span/2) - partition.angle_start
#partition.setParentItem(center_item)
b = node.nodeRegion.height()
a = node.nodeRegion.width()
A = partition.angle
radius = math.sqrt( (b/2*math.atan(A))**2 + a**2 + (b/2)**2 )
print radius, partition.angle_start
arc = _ArcItem(partition.angle_start, partition.angle_span, radius)
n2a[node] = arc
for c in node.children:
cpart = n2i[c]
cpart.setParentItem(arc)
carc = n2a[c]
carc.setParentItem(arc)
self.render_node_partition(node, cpart)
arc.setParentItem(center_item)
arc.setPen(QtGui.QColor("#0000ff"))
center_item.setParentItem(self.mainItem)
center_item.setPos(200,200)
# Render root node and set its positions
def rotate_node(self,node,angle,x=None,y=None):
if x and y:
x = node.fullRegion.width()/2
y = node.fullRegion.height()/2
node._QtItem_.setTransform(QtGui.QTransform().translate(x, y).rotate(angle).translate(-x, -y));
else:
node._QtItem_.rotate(angle)
def get_partition_center(self, n):
down_h = self.node2faces[n]["branch-bottom"].h
up_h = self.node2faces[n]["branch-top"].h
right_h = max(self.node2faces[n]["branch-right"].h, n.img_style["size"]/2) /2
up_h = max(right_h, up_h)
down_h = max(right_h, down_h)
if _leaf(n):
center = n.fullRegion.height()/2
else:
first_child_part = self.node2item[n.children[0]]
last_child_part = self.node2item[n.children[-1]]
c1 = first_child_part.start_y + first_child_part.center
c2 = last_child_part.start_y + last_child_part.center
center = c1+ (c2-c1)/2
if up_h > center:
center = up_h
elif down_h > n.fullRegion.height()-center:
center = n.fullRegion.height()-down_h
return center
def render_node_partition(self, node, partition):
style = node.img_style
if style["bgcolor"].upper() not in set(["#FFFFFF", "white"]):
color = QtGui.QColor(style["bgcolor"])
partition.setBrush(color)
partition.setPen(color)
partition.drawbg = True
# Draw partition components
# Draw node balls in the partition centers
ball_size = style["size"]
ball_start_x = node.nodeRegion.width() - node.facesRegion.width() - ball_size
node_ball = _NodeItem(node)
node_ball.setParentItem(partition)
node_ball.setPos(ball_start_x, partition.center-(ball_size/2))
node_ball.setAcceptsHoverEvents(True)
self.node2ballmap[node] = node_ball
# Hz line
hz_line = QtGui.QGraphicsLineItem(partition)
hz_line.setLine(0, partition.center,
node.dist_xoffset, partition.center)
# Hz line style
color = QtGui.QColor(style["hz_line_color"])
pen = QtGui.QPen(color)
set_pen_style(pen, style["line_type"])
hz_line.setPen(pen)
if self.props.complete_branch_lines:
extra_hz_line = QtGui.QGraphicsLineItem(partition)
extra_hz_line.setLine(node.dist_xoffset, partition.center,
ball_start_x, partition.center)
color = QtGui.QColor(self.props.extra_branch_line_color)
pen = QtGui.QPen(color)
set_pen_style(pen, style["line_type"])
extra_hz_line.setPen(pen)
# Attach branch-right faces to child
fblock = self.node2faces[node]["branch-right"]
fblock.setParentItem(partition)
fblock.render()
fblock.setPos(node.nodeRegion.width()-node.facesRegion.width(), \
partition.center-fblock.h/2)
# Attach branch-bottom faces to child
fblock = self.node2faces[node]["branch-bottom"]
fblock.setParentItem(partition)
fblock.render()
fblock.setPos(0, partition.center)
# Attach branch-top faces to child
fblock = self.node2faces[node]["branch-top"]
fblock.setParentItem(partition)
fblock.render()
fblock.setPos(0, partition.center-fblock.h)
if node.is_leaf():
# Attach aligned faces to node. x position will be
# modified after rendering the whole tree
fblock = self.node2faces[node]["aligned"]
fblock.setParentItem(partition)
# Rendering is delayed until I know right positions
# Vt Line
if not _leaf(node): #node.is_leaf() and style["draw_descendants"]==1:
vt_line = QtGui.QGraphicsLineItem(partition)
first_child_part = self.node2item[node.children[0]]
last_child_part = self.node2item[node.children[-1]]
c1 = first_child_part.start_y + first_child_part.center
c2 = last_child_part.start_y + last_child_part.center
vt_line.setLine(node.nodeRegion.width(), c1,\
node.nodeRegion.width(), c2)
# Vt line style
pen = QtGui.QPen(QtGui.QColor(style["vt_line_color"]))
set_pen_style(pen, style["line_type"])
vt_line.setPen(pen)
def set_pen_style(pen, line_style):
if line_style == 0:
pen.setStyle(QtCore.Qt.SolidLine)
elif line_style == 1:
pen.setStyle(QtCore.Qt.DashLine)
elif line_style == 2:
pen.setStyle(QtCore.Qt.DotLine)
def _leaf(node):
""" Returns true if node is a leaf or if draw_descendants style is
set to false """
if node.is_leaf() or not node.img_style.get("draw_descendants", True):
return True
return False
gpl-3.0
dwt/BayesianNetworks
solver.py
1
9258
#!/usr/bin/env python
# coding: utf-8
from pyexpect import expect
import itertools
from operator import attrgetter
from fluent import *
def assert_almost_sums_to_one(probabilities):
epsilon = .00000001
assert abs(1 - sum(probabilities)) < epsilon, 'Probability tables need to sum to (almost) 1'
class Reference(object):
def __init__(self, name, table):
self.name = name
self.table = table
def __repr__(self):
if self.table._name is None:
return self.name
return '%s.%s' % (self.table._name, self.name)
__str__ = __repr__
class Distribution(object):
# REFACT consider to ask the distribution for partial distributions perhaps by creating one on the fly?
@classmethod
def independent(cls, **kwargs):
assert_almost_sums_to_one(kwargs.values())
return cls(tuple(kwargs.keys()), kwargs, dependencies=())
@classmethod
def dependent(cls, labels, dependent_values):
references = tuple(dependent_values.keys())
probability_rows = tuple(dependent_values.values())
assert _(probability_rows).map(len).call(set).len() == 1,\
'Need the same number of probabilites for each row'
if isinstance(references[0], Reference): # single dependency table
# normalize structure
references = tuple((reference, ) for reference in references)
dependencies = _(references) \
.iflatten() \
.imap(lambda x: x.table) \
.call(set).call(tuple)
values = dict()
for keys, probabilities in zip(references, probability_rows):
assert_almost_sums_to_one(probabilities)
for self_key, value in zip(labels, probabilities):
values[keys + (self_key, )] = value
cross_product_of_dependencies_keys = _(dependencies) \
.imap(attrgetter('_labels')) \
.star_call(itertools.product) \
.imap(frozenset) \
.call(set)
assert _(references).map(frozenset).call(set) == cross_product_of_dependencies_keys, \
"References to other tables need to be a full product of their labels. Expected %r, \nbut got %r" \
% (set(references), cross_product_of_dependencies_keys)
return cls(labels, values, dependencies=tuple(dependencies))
def __init__(self, labels, values, dependencies):
self._network = None # to be set by network
self._name = None # to be set by network
# self._labels = [] # set in _set_references
self._set_references(labels)
self._dependencies = dependencies
self._values = dict()
assert _(values.values()).map(lambda x: isinstance(x, float)).all(), 'Need all probabilities to be floats'
self._values = { self._normalize_keys(key): value for key, value in values.items() }
def _set_references(self, labels):
self._labels = _(labels).map(lambda key: Reference(key, self))
for reference in self._labels:
setattr(self, reference.name, reference)
# REFACT consider to ignore all keys which do not apply
def __getitem__(self, key_or_keys):
keys = self._normalize_keys(key_or_keys)
self._assert_keys_are_sufficient(keys)
return self._values[keys]
def _normalize_keys(self, key_or_keys):
keys = (key_or_keys,) if isinstance(key_or_keys, (str, Reference)) else key_or_keys
def to_reference(key):
if isinstance(key, Reference): return key
return getattr(self, key)
return frozenset(map(to_reference, keys))
def _assert_keys_are_sufficient(self, keys):
assert len(tuple(self._values.keys())[0]) == len(keys), 'Need the full set of keys to get a probability'
assert any(filter(lambda x: x.table == self, keys)), 'Does not contain key to self'
def __repr__(self):
display_values = ', '.join(['%r: %s' % (set(key), value) for key, value in self._values.items()])
name = self._name if self._name is not None else 'Distribution'
return '%s(%s)' % (name, display_values)
__str__ = __repr__
def _suitable_subset_of(self, keys):
return filter(lambda key: key.table == self or key.table in self._dependencies, keys)
class BayesianNetwork(object):
def __init__(self):
for name, table in self._tables().items():
table._network = self
table._name = name
def _tables(self):
# would be a desaster if Distribution's are added after construction - but that is currently
# prevented by design
if not hasattr(self, '__tables'):
self.__tables = dict()
for name, table in vars(self.__class__).items():
if len(name) == 1 or name[0] == '_': continue # shortname, or private
self.__tables[name] = table
return self.__tables
def probability_of_event(self, *atomic_event):
probability = 1
# REFACT rename table -> distributions
for table in self._tables().values():
probability *= table[table._suitable_subset_of(keys=atomic_event)]
return probability
# REFACT not sure this is the right name for this?
def joint_probability(self, *givens): # REFACT rename events -> givens
probability = 0
by_table = self._events_by_table(self._sure_event()) # REFACT rename _sure_event -> _all_events
for event in givens:
by_table[event.table] = [event]
# dict(intelligence = [intelligence.low], difficulty=$alle, ...)
for atomic_event in itertools.product(*by_table.values()):
probability += self.probability_of_event(*atomic_event)
return probability
def conditional_probability(self, *events, given):
return self.joint_probability(*events, *given) / self.joint_probability(*given)
def _sure_event(self):
return _(self._tables().values()).map(attrgetter('_labels')).flatten().call(set)
def _events_by_table(self, events):
grouped_iterator = _(events) \
.isorted(key=lambda reference: id(reference.table)) \
.groupby(key=lambda x: x.table)
by_table = dict()
for table, events in grouped_iterator:
by_table[table] = events
return by_table
class Student(BayesianNetwork):
d = difficulty = Distribution.independent(easy=.6, hard=.4)
i = intelligence = Distribution.independent(low=.7, high=.3)
s = sat = Distribution.dependent(
('bad', 'good'), {
i.low: (.95, .05),
i.high: (.2, .8)
})
g = grade = Distribution.dependent(
('good', 'ok', 'bad'), {
(i.low, d.easy): (.3, .4, .3),
(i.low, d.hard): (.05, .25, .7),
(i.high, d.easy): (.9, .08, .02),
(i.high, d.hard): (.5, .3, .2),
})
l = letter = Distribution.dependent(
('bad', 'glowing'), {
g.good: (.1, .9),
g.ok: (.4, .6),
g.bad: (.99, .01),
})
n = network = Student()
# print(n.i)
# print(n.i.low)
# print(n.s)
expect(n.intelligence[n.i.high]) == .3
expect(n.difficulty[n.d.easy]) == .6
expect(n.grade[n.g.ok, n.i.high, n.d.easy]) == .08,
expect(n.letter[n.l.bad, n.g.ok]) == .4
# print(n.intelligence.low, n.i[n.i.low])
# print(n.difficulty.easy, n.d[n.d.easy])
#
# print(n.sat.bad, n.intelligence.low, n.sat[n.s.bad, n.i.low])
#
print(n.intelligence.low, n.difficulty.easy, n.grade.good, n.g[n.i.low, n.d.easy, n.g.good])
print(n.intelligence.low, n.difficulty.easy, n.grade.good, n.g[n.i.low, n.g.good, n.d.easy])
#
# print(n.letter.bad, n.grade.good, n.l[n.l.bad, n.g.good])
expect(n.probability_of_event(n.i.high, n.d.easy, n.g.ok, n.l.bad, n.s.good)) == 0.004608
expect(n.joint_probability()).close_to(1, 1e-6)
expect(n.joint_probability(n.l.glowing)).close_to(.502, 1e-3)
expect(n.conditional_probability(n.l.glowing, given=(n.i.low,))).close_to(.38, 1e-2)
expect(n.conditional_probability(n.l.glowing, given=(n.i.low, n.d.easy))).close_to(.513, 1e-2)
expect(n.conditional_probability(n.i.high, given=(n.g.good,))).close_to(.613, 1e-2)
expect(n.conditional_probability(n.i.high, given=(n.g.good, n.d.easy))).close_to(.5625, 1e-4)
# print('P(d0 | g1)', conditional_probability('difficulties', ['d0'], grades=['g1']))
# P(d0 | g1) 0.7955801104972375
# print('P(d0 | g1, i1)', conditional_probability('difficulties', ['d0'], grades=['g1'], intelligences=['i1']))
# P(d0 | g1, i1) 0.7297297297297298
#
#
# print('P(i1 | g3)', conditional_probability('intelligences', ['i1'], grades=['g3']))
# P(i1 | g3) 0.07894736842105264
# print('P(i1 | g3, d1)', conditional_probability('intelligences', ['i1'], grades=['g3'], difficulties=['d1']))
# P(i1 | g3, d1) 0.10909090909090914
# print('P(d1 | g3)', conditional_probability('difficulties', ['d1'], grades=['g3']))
# P(d1 | g3) 0.6292906178489701
# print('P(d1 | g3, i1)', conditional_probability('difficulties', ['d1'], grades=['g3'], intelligences=['i1']))
# P(d1 | g3, i1) 0.8695652173913044
mit
LLNL/spack
lib/spack/llnl/util/filesystem.py
2
53938
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
import collections
import errno
import hashlib
import glob
import grp
import itertools
import numbers
import os
import pwd
import re
import shutil
import stat
import sys
import tempfile
from contextlib import contextmanager
import six
from llnl.util import tty
from llnl.util.lang import dedupe, memoized
from spack.util.executable import Executable
__all__ = [
'FileFilter',
'FileList',
'HeaderList',
'LibraryList',
'ancestor',
'can_access',
'change_sed_delimiter',
'copy_mode',
'filter_file',
'find',
'find_headers',
'find_all_headers',
'find_libraries',
'find_system_libraries',
'fix_darwin_install_name',
'force_remove',
'force_symlink',
'copy',
'install',
'copy_tree',
'install_tree',
'is_exe',
'join_path',
'mkdirp',
'partition_path',
'prefixes',
'remove_dead_links',
'remove_if_dead_link',
'remove_linked_tree',
'set_executable',
'set_install_permissions',
'touch',
'touchp',
'traverse_tree',
'unset_executable_mode',
'working_dir'
]
def path_contains_subdirectory(path, root):
norm_root = os.path.abspath(root).rstrip(os.path.sep) + os.path.sep
norm_path = os.path.abspath(path).rstrip(os.path.sep) + os.path.sep
return norm_path.startswith(norm_root)
def possible_library_filenames(library_names):
"""Given a collection of library names like 'libfoo', generate the set of
library filenames that may be found on the system (e.g. libfoo.so). This
generates the library filenames that may appear on any OS.
"""
lib_extensions = ['a', 'la', 'so', 'tbd', 'dylib']
return set(
'.'.join((lib, extension)) for lib, extension in
itertools.product(library_names, lib_extensions))
def paths_containing_libs(paths, library_names):
"""Given a collection of filesystem paths, return the list of paths that
which include one or more of the specified libraries.
"""
required_lib_fnames = possible_library_filenames(library_names)
rpaths_to_include = []
for path in paths:
fnames = set(os.listdir(path))
if fnames & required_lib_fnames:
rpaths_to_include.append(path)
return rpaths_to_include
def same_path(path1, path2):
norm1 = os.path.abspath(path1).rstrip(os.path.sep)
norm2 = os.path.abspath(path2).rstrip(os.path.sep)
return norm1 == norm2
def filter_file(regex, repl, *filenames, **kwargs):
r"""Like sed, but uses python regular expressions.
Filters every line of each file through regex and replaces the file
with a filtered version. Preserves mode of filtered files.
As with re.sub, ``repl`` can be either a string or a callable.
If it is a callable, it is passed the match object and should
return a suitable replacement string. If it is a string, it
can contain ``\1``, ``\2``, etc. to represent back-substitution
as sed would allow.
Parameters:
regex (str): The regular expression to search for
repl (str): The string to replace matches with
*filenames: One or more files to search and replace
Keyword Arguments:
string (bool): Treat regex as a plain string. Default it False
backup (bool): Make backup file(s) suffixed with ``~``. Default is True
ignore_absent (bool): Ignore any files that don't exist.
Default is False
stop_at (str): Marker used to stop scanning the file further. If a text
line matches this marker filtering is stopped and the rest of the
file is copied verbatim. Default is to filter until the end of the
file.
"""
string = kwargs.get('string', False)
backup = kwargs.get('backup', True)
ignore_absent = kwargs.get('ignore_absent', False)
stop_at = kwargs.get('stop_at', None)
# Allow strings to use \1, \2, etc. for replacement, like sed
if not callable(repl):
unescaped = repl.replace(r'\\', '\\')
def replace_groups_with_groupid(m):
def groupid_to_group(x):
return m.group(int(x.group(1)))
return re.sub(r'\\([1-9])', groupid_to_group, unescaped)
repl = replace_groups_with_groupid
if string:
regex = re.escape(regex)
for filename in filenames:
msg = 'FILTER FILE: {0} [replacing "{1}"]'
tty.debug(msg.format(filename, regex))
backup_filename = filename + "~"
tmp_filename = filename + ".spack~"
if ignore_absent and not os.path.exists(filename):
msg = 'FILTER FILE: file "{0}" not found. Skipping to next file.'
tty.debug(msg.format(filename))
continue
# Create backup file. Don't overwrite an existing backup
# file in case this file is being filtered multiple times.
if not os.path.exists(backup_filename):
shutil.copy(filename, backup_filename)
# Create a temporary file to read from. We cannot use backup_filename
# in case filter_file is invoked multiple times on the same file.
shutil.copy(filename, tmp_filename)
try:
extra_kwargs = {}
if sys.version_info > (3, 0):
extra_kwargs = {'errors': 'surrogateescape'}
# Open as a text file and filter until the end of the file is
# reached or we found a marker in the line if it was specified
with open(tmp_filename, mode='r', **extra_kwargs) as input_file:
with open(filename, mode='w', **extra_kwargs) as output_file:
# Using iter and readline is a workaround needed not to
# disable input_file.tell(), which will happen if we call
# input_file.next() implicitly via the for loop
for line in iter(input_file.readline, ''):
if stop_at is not None:
current_position = input_file.tell()
if stop_at == line.strip():
output_file.write(line)
break
filtered_line = re.sub(regex, repl, line)
output_file.write(filtered_line)
else:
current_position = None
# If we stopped filtering at some point, reopen the file in
# binary mode and copy verbatim the remaining part
if current_position and stop_at:
with open(tmp_filename, mode='rb') as input_file:
input_file.seek(current_position)
with open(filename, mode='ab') as output_file:
output_file.writelines(input_file.readlines())
except BaseException:
# clean up the original file on failure.
shutil.move(backup_filename, filename)
raise
finally:
os.remove(tmp_filename)
if not backup and os.path.exists(backup_filename):
os.remove(backup_filename)
class FileFilter(object):
"""Convenience class for calling ``filter_file`` a lot."""
def __init__(self, *filenames):
self.filenames = filenames
def filter(self, regex, repl, **kwargs):
return filter_file(regex, repl, *self.filenames, **kwargs)
def change_sed_delimiter(old_delim, new_delim, *filenames):
"""Find all sed search/replace commands and change the delimiter.
e.g., if the file contains seds that look like ``'s///'``, you can
call ``change_sed_delimiter('/', '@', file)`` to change the
delimiter to ``'@'``.
Note that this routine will fail if the delimiter is ``'`` or ``"``.
Handling those is left for future work.
Parameters:
old_delim (str): The delimiter to search for
new_delim (str): The delimiter to replace with
*filenames: One or more files to search and replace
"""
assert(len(old_delim) == 1)
assert(len(new_delim) == 1)
# TODO: handle these cases one day?
assert(old_delim != '"')
assert(old_delim != "'")
assert(new_delim != '"')
assert(new_delim != "'")
whole_lines = "^s@([^@]*)@(.*)@[gIp]$"
whole_lines = whole_lines.replace('@', old_delim)
single_quoted = r"'s@((?:\\'|[^@'])*)@((?:\\'|[^'])*)@[gIp]?'"
single_quoted = single_quoted.replace('@', old_delim)
double_quoted = r'"s@((?:\\"|[^@"])*)@((?:\\"|[^"])*)@[gIp]?"'
double_quoted = double_quoted.replace('@', old_delim)
repl = r's@\1@\2@g'
repl = repl.replace('@', new_delim)
for f in filenames:
filter_file(whole_lines, repl, f)
filter_file(single_quoted, "'%s'" % repl, f)
filter_file(double_quoted, '"%s"' % repl, f)
def set_install_permissions(path):
"""Set appropriate permissions on the installed file."""
# If this points to a file maintained in a Spack prefix, it is assumed that
# this function will be invoked on the target. If the file is outside a
# Spack-maintained prefix, the permissions should not be modified.
if os.path.islink(path):
return
if os.path.isdir(path):
os.chmod(path, 0o755)
else:
os.chmod(path, 0o644)
def group_ids(uid=None):
"""Get group ids that a uid is a member of.
Arguments:
uid (int): id of user, or None for current user
Returns:
(list of int): gids of groups the user is a member of
"""
if uid is None:
uid = os.getuid()
user = pwd.getpwuid(uid).pw_name
return [g.gr_gid for g in grp.getgrall() if user in g.gr_mem]
def chgrp(path, group):
"""Implement the bash chgrp function on a single path"""
if isinstance(group, six.string_types):
gid = grp.getgrnam(group).gr_gid
else:
gid = group
os.chown(path, -1, gid)
def chmod_x(entry, perms):
"""Implements chmod, treating all executable bits as set using the chmod
utility's `+X` option.
"""
mode = os.stat(entry).st_mode
if os.path.isfile(entry):
if not mode & (stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH):
perms &= ~stat.S_IXUSR
perms &= ~stat.S_IXGRP
perms &= ~stat.S_IXOTH
os.chmod(entry, perms)
def copy_mode(src, dest):
"""Set the mode of dest to that of src unless it is a link.
"""
if os.path.islink(dest):
return
src_mode = os.stat(src).st_mode
dest_mode = os.stat(dest).st_mode
if src_mode & stat.S_IXUSR:
dest_mode |= stat.S_IXUSR
if src_mode & stat.S_IXGRP:
dest_mode |= stat.S_IXGRP
if src_mode & stat.S_IXOTH:
dest_mode |= stat.S_IXOTH
os.chmod(dest, dest_mode)
def unset_executable_mode(path):
mode = os.stat(path).st_mode
mode &= ~stat.S_IXUSR
mode &= ~stat.S_IXGRP
mode &= ~stat.S_IXOTH
os.chmod(path, mode)
def copy(src, dest, _permissions=False):
"""Copies the file *src* to the file or directory *dest*.
If *dest* specifies a directory, the file will be copied into *dest*
using the base filename from *src*.
Parameters:
src (str): the file to copy
dest (str): the destination file or directory
_permissions (bool): for internal use only
"""
if _permissions:
tty.debug('Installing {0} to {1}'.format(src, dest))
else:
tty.debug('Copying {0} to {1}'.format(src, dest))
# Expand dest to its eventual full path if it is a directory.
if os.path.isdir(dest):
dest = join_path(dest, os.path.basename(src))
shutil.copy(src, dest)
if _permissions:
set_install_permissions(dest)
copy_mode(src, dest)
def install(src, dest):
"""Installs the file *src* to the file or directory *dest*.
Same as :py:func:`copy` with the addition of setting proper
permissions on the installed file.
Parameters:
src (str): the file to install
dest (str): the destination file or directory
"""
copy(src, dest, _permissions=True)
def resolve_link_target_relative_to_the_link(l):
"""
os.path.isdir uses os.path.exists, which for links will check
the existence of the link target. If the link target is relative to
the link, we need to construct a pathname that is valid from
our cwd (which may not be the same as the link's directory)
"""
target = os.readlink(l)
if os.path.isabs(target):
return target
link_dir = os.path.dirname(os.path.abspath(l))
return os.path.join(link_dir, target)
def copy_tree(src, dest, symlinks=True, ignore=None, _permissions=False):
"""Recursively copy an entire directory tree rooted at *src*.
If the destination directory *dest* does not already exist, it will
be created as well as missing parent directories.
If *symlinks* is true, symbolic links in the source tree are represented
as symbolic links in the new tree and the metadata of the original links
will be copied as far as the platform allows; if false, the contents and
metadata of the linked files are copied to the new tree.
If *ignore* is set, then each path relative to *src* will be passed to
this function; the function returns whether that path should be skipped.
Parameters:
src (str): the directory to copy
dest (str): the destination directory
symlinks (bool): whether or not to preserve symlinks
ignore (function): function indicating which files to ignore
_permissions (bool): for internal use only
"""
if _permissions:
tty.debug('Installing {0} to {1}'.format(src, dest))
else:
tty.debug('Copying {0} to {1}'.format(src, dest))
abs_src = os.path.abspath(src)
if not abs_src.endswith(os.path.sep):
abs_src += os.path.sep
abs_dest = os.path.abspath(dest)
if not abs_dest.endswith(os.path.sep):
abs_dest += os.path.sep
# Stop early to avoid unnecessary recursion if being asked to copy from a
# parent directory.
if abs_dest.startswith(abs_src):
raise ValueError('Cannot copy ancestor directory {0} into {1}'.
format(abs_src, abs_dest))
mkdirp(dest)
for s, d in traverse_tree(abs_src, abs_dest, order='pre',
follow_symlinks=not symlinks,
ignore=ignore,
follow_nonexisting=True):
if os.path.islink(s):
link_target = resolve_link_target_relative_to_the_link(s)
if symlinks:
target = os.readlink(s)
if os.path.isabs(target):
new_target = re.sub(abs_src, abs_dest, target)
if new_target != target:
tty.debug("Redirecting link {0} to {1}"
.format(target, new_target))
target = new_target
os.symlink(target, d)
elif os.path.isdir(link_target):
mkdirp(d)
else:
shutil.copyfile(s, d)
else:
if os.path.isdir(s):
mkdirp(d)
else:
shutil.copy2(s, d)
if _permissions:
set_install_permissions(d)
copy_mode(s, d)
def install_tree(src, dest, symlinks=True, ignore=None):
"""Recursively install an entire directory tree rooted at *src*.
Same as :py:func:`copy_tree` with the addition of setting proper
permissions on the installed files and directories.
Parameters:
src (str): the directory to install
dest (str): the destination directory
symlinks (bool): whether or not to preserve symlinks
ignore (function): function indicating which files to ignore
"""
copy_tree(src, dest, symlinks=symlinks, ignore=ignore, _permissions=True)
def is_exe(path):
"""True if path is an executable file."""
return os.path.isfile(path) and os.access(path, os.X_OK)
def get_filetype(path_name):
"""
Return the output of file path_name as a string to identify file type.
"""
file = Executable('file')
file.add_default_env('LC_ALL', 'C')
output = file('-b', '-h', '%s' % path_name,
output=str, error=str)
return output.strip()
def chgrp_if_not_world_writable(path, group):
"""chgrp path to group if path is not world writable"""
mode = os.stat(path).st_mode
if not mode & stat.S_IWOTH:
chgrp(path, group)
def mkdirp(*paths, **kwargs):
"""Creates a directory, as well as parent directories if needed.
Arguments:
paths (str): paths to create with mkdirp
Keyword Aguments:
mode (permission bits or None, optional): optional permissions to set
on the created directory -- use OS default if not provided
group (group name or None, optional): optional group for permissions of
final created directory -- use OS default if not provided. Only
used if world write permissions are not set
default_perms ('parents' or 'args', optional): The default permissions
that are set for directories that are not themselves an argument
for mkdirp. 'parents' means intermediate directories get the
permissions of their direct parent directory, 'args' means
intermediate get the same permissions specified in the arguments to
mkdirp -- default value is 'args'
"""
mode = kwargs.get('mode', None)
group = kwargs.get('group', None)
default_perms = kwargs.get('default_perms', 'args')
for path in paths:
if not os.path.exists(path):
try:
# detect missing intermediate folders
intermediate_folders = []
last_parent = ''
intermediate_path = os.path.dirname(path)
while intermediate_path:
if os.path.exists(intermediate_path):
last_parent = intermediate_path
break
intermediate_folders.append(intermediate_path)
intermediate_path = os.path.dirname(intermediate_path)
# create folders
os.makedirs(path)
# leaf folder permissions
if mode is not None:
os.chmod(path, mode)
if group:
chgrp_if_not_world_writable(path, group)
if mode is not None:
os.chmod(path, mode) # reset sticky grp bit post chgrp
# for intermediate folders, change mode just for newly created
# ones and if mode_intermediate has been specified, otherwise
# intermediate folders list is not populated at all and default
# OS mode will be used
if default_perms == 'args':
intermediate_mode = mode
intermediate_group = group
elif default_perms == 'parents':
stat_info = os.stat(last_parent)
intermediate_mode = stat_info.st_mode
intermediate_group = stat_info.st_gid
else:
msg = "Invalid value: '%s'. " % default_perms
msg += "Choose from 'args' or 'parents'."
raise ValueError(msg)
for intermediate_path in reversed(intermediate_folders):
if intermediate_mode is not None:
os.chmod(intermediate_path, intermediate_mode)
if intermediate_group is not None:
chgrp_if_not_world_writable(intermediate_path,
intermediate_group)
os.chmod(intermediate_path,
intermediate_mode) # reset sticky bit after
except OSError as e:
if e.errno != errno.EEXIST or not os.path.isdir(path):
raise e
elif not os.path.isdir(path):
raise OSError(errno.EEXIST, "File already exists", path)
def force_remove(*paths):
"""Remove files without printing errors. Like ``rm -f``, does NOT
remove directories."""
for path in paths:
try:
os.remove(path)
except OSError:
pass
@contextmanager
def working_dir(dirname, **kwargs):
if kwargs.get('create', False):
mkdirp(dirname)
orig_dir = os.getcwd()
os.chdir(dirname)
try:
yield
finally:
os.chdir(orig_dir)
@contextmanager
def replace_directory_transaction(directory_name, tmp_root=None):
"""Moves a directory to a temporary space. If the operations executed
within the context manager don't raise an exception, the directory is
deleted. If there is an exception, the move is undone.
Args:
directory_name (path): absolute path of the directory name
tmp_root (path): absolute path of the parent directory where to create
the temporary
Returns:
temporary directory where ``directory_name`` has been moved
"""
# Check the input is indeed a directory with absolute path.
# Raise before anything is done to avoid moving the wrong directory
assert os.path.isdir(directory_name), \
'Invalid directory: ' + directory_name
assert os.path.isabs(directory_name), \
'"directory_name" must contain an absolute path: ' + directory_name
directory_basename = os.path.basename(directory_name)
if tmp_root is not None:
assert os.path.isabs(tmp_root)
tmp_dir = tempfile.mkdtemp(dir=tmp_root)
tty.debug('TEMPORARY DIRECTORY CREATED [{0}]'.format(tmp_dir))
shutil.move(src=directory_name, dst=tmp_dir)
tty.debug('DIRECTORY MOVED [src={0}, dest={1}]'.format(
directory_name, tmp_dir
))
try:
yield tmp_dir
except (Exception, KeyboardInterrupt, SystemExit):
# Delete what was there, before copying back the original content
if os.path.exists(directory_name):
shutil.rmtree(directory_name)
shutil.move(
src=os.path.join(tmp_dir, directory_basename),
dst=os.path.dirname(directory_name)
)
tty.debug('DIRECTORY RECOVERED [{0}]'.format(directory_name))
msg = 'the transactional move of "{0}" failed.'
raise RuntimeError(msg.format(directory_name))
else:
# Otherwise delete the temporary directory
shutil.rmtree(tmp_dir)
tty.debug('TEMPORARY DIRECTORY DELETED [{0}]'.format(tmp_dir))
def hash_directory(directory, ignore=[]):
"""Hashes recursively the content of a directory.
Args:
directory (path): path to a directory to be hashed
Returns:
hash of the directory content
"""
assert os.path.isdir(directory), '"directory" must be a directory!'
md5_hash = hashlib.md5()
# Adapted from https://stackoverflow.com/a/3431835/771663
for root, dirs, files in os.walk(directory):
for name in sorted(files):
filename = os.path.join(root, name)
if filename not in ignore:
# TODO: if caching big files becomes an issue, convert this to
# TODO: read in chunks. Currently it's used only for testing
# TODO: purposes.
with open(filename, 'rb') as f:
md5_hash.update(f.read())
return md5_hash.hexdigest()
@contextmanager
def write_tmp_and_move(filename):
"""Write to a temporary file, then move into place."""
dirname = os.path.dirname(filename)
basename = os.path.basename(filename)
tmp = os.path.join(dirname, '.%s.tmp' % basename)
with open(tmp, 'w') as f:
yield f
shutil.move(tmp, filename)
@contextmanager
def open_if_filename(str_or_file, mode='r'):
"""Takes either a path or a file object, and opens it if it is a path.
If it's a file object, just yields the file object.
"""
if isinstance(str_or_file, six.string_types):
with open(str_or_file, mode) as f:
yield f
else:
yield str_or_file
def touch(path):
"""Creates an empty file at the specified path."""
perms = (os.O_WRONLY | os.O_CREAT | os.O_NONBLOCK | os.O_NOCTTY)
fd = None
try:
fd = os.open(path, perms)
os.utime(path, None)
finally:
if fd is not None:
os.close(fd)
def touchp(path):
"""Like ``touch``, but creates any parent directories needed for the file.
"""
mkdirp(os.path.dirname(path))
touch(path)
def force_symlink(src, dest):
try:
os.symlink(src, dest)
except OSError:
os.remove(dest)
os.symlink(src, dest)
def join_path(prefix, *args):
path = str(prefix)
for elt in args:
path = os.path.join(path, str(elt))
return path
def ancestor(dir, n=1):
"""Get the nth ancestor of a directory."""
parent = os.path.abspath(dir)
for i in range(n):
parent = os.path.dirname(parent)
return parent
def get_single_file(directory):
fnames = os.listdir(directory)
if len(fnames) != 1:
raise ValueError("Expected exactly 1 file, got {0}"
.format(str(len(fnames))))
return fnames[0]
@contextmanager
def temp_cwd():
tmp_dir = tempfile.mkdtemp()
try:
with working_dir(tmp_dir):
yield tmp_dir
finally:
shutil.rmtree(tmp_dir)
@contextmanager
def temp_rename(orig_path, temp_path):
same_path = os.path.realpath(orig_path) == os.path.realpath(temp_path)
if not same_path:
shutil.move(orig_path, temp_path)
try:
yield
finally:
if not same_path:
shutil.move(temp_path, orig_path)
def can_access(file_name):
"""True if we have read/write access to the file."""
return os.access(file_name, os.R_OK | os.W_OK)
def traverse_tree(source_root, dest_root, rel_path='', **kwargs):
"""Traverse two filesystem trees simultaneously.
Walks the LinkTree directory in pre or post order. Yields each
file in the source directory with a matching path from the dest
directory, along with whether the file is a directory.
e.g., for this tree::
root/
a/
file1
file2
b/
file3
When called on dest, this yields::
('root', 'dest')
('root/a', 'dest/a')
('root/a/file1', 'dest/a/file1')
('root/a/file2', 'dest/a/file2')
('root/b', 'dest/b')
('root/b/file3', 'dest/b/file3')
Keyword Arguments:
order (str): Whether to do pre- or post-order traversal. Accepted
values are 'pre' and 'post'
ignore (function): function indicating which files to ignore
follow_nonexisting (bool): Whether to descend into directories in
``src`` that do not exit in ``dest``. Default is True
follow_links (bool): Whether to descend into symlinks in ``src``
"""
follow_nonexisting = kwargs.get('follow_nonexisting', True)
follow_links = kwargs.get('follow_link', False)
# Yield in pre or post order?
order = kwargs.get('order', 'pre')
if order not in ('pre', 'post'):
raise ValueError("Order must be 'pre' or 'post'.")
# List of relative paths to ignore under the src root.
ignore = kwargs.get('ignore', None) or (lambda filename: False)
# Don't descend into ignored directories
if ignore(rel_path):
return
source_path = os.path.join(source_root, rel_path)
dest_path = os.path.join(dest_root, rel_path)
# preorder yields directories before children
if order == 'pre':
yield (source_path, dest_path)
for f in os.listdir(source_path):
source_child = os.path.join(source_path, f)
dest_child = os.path.join(dest_path, f)
rel_child = os.path.join(rel_path, f)
# Treat as a directory
# TODO: for symlinks, os.path.isdir looks for the link target. If the
# target is relative to the link, then that may not resolve properly
# relative to our cwd - see resolve_link_target_relative_to_the_link
if os.path.isdir(source_child) and (
follow_links or not os.path.islink(source_child)):
# When follow_nonexisting isn't set, don't descend into dirs
# in source that do not exist in dest
if follow_nonexisting or os.path.exists(dest_child):
tuples = traverse_tree(
source_root, dest_root, rel_child, **kwargs)
for t in tuples:
yield t
# Treat as a file.
elif not ignore(os.path.join(rel_path, f)):
yield (source_child, dest_child)
if order == 'post':
yield (source_path, dest_path)
def set_executable(path):
mode = os.stat(path).st_mode
if mode & stat.S_IRUSR:
mode |= stat.S_IXUSR
if mode & stat.S_IRGRP:
mode |= stat.S_IXGRP
if mode & stat.S_IROTH:
mode |= stat.S_IXOTH
os.chmod(path, mode)
def remove_empty_directories(root):
"""Ascend up from the leaves accessible from `root` and remove empty
directories.
Parameters:
root (str): path where to search for empty directories
"""
for dirpath, subdirs, files in os.walk(root, topdown=False):
for sd in subdirs:
sdp = os.path.join(dirpath, sd)
try:
os.rmdir(sdp)
except OSError:
pass
def remove_dead_links(root):
"""Recursively removes any dead link that is present in root.
Parameters:
root (str): path where to search for dead links
"""
for dirpath, subdirs, files in os.walk(root, topdown=False):
for f in files:
path = join_path(dirpath, f)
remove_if_dead_link(path)
def remove_if_dead_link(path):
"""Removes the argument if it is a dead link.
Parameters:
path (str): The potential dead link
"""
if os.path.islink(path) and not os.path.exists(path):
os.unlink(path)
def remove_linked_tree(path):
"""Removes a directory and its contents.
If the directory is a symlink, follows the link and removes the real
directory before removing the link.
Parameters:
path (str): Directory to be removed
"""
if os.path.exists(path):
if os.path.islink(path):
shutil.rmtree(os.path.realpath(path), True)
os.unlink(path)
else:
shutil.rmtree(path, True)
def fix_darwin_install_name(path):
"""Fix install name of dynamic libraries on Darwin to have full path.
There are two parts of this task:
1. Use ``install_name('-id', ...)`` to change install name of a single lib
2. Use ``install_name('-change', ...)`` to change the cross linking between
libs. The function assumes that all libraries are in one folder and
currently won't follow subfolders.
Parameters:
path (str): directory in which .dylib files are located
"""
libs = glob.glob(join_path(path, "*.dylib"))
for lib in libs:
# fix install name first:
install_name_tool = Executable('install_name_tool')
install_name_tool('-id', lib, lib)
otool = Executable('otool')
long_deps = otool('-L', lib, output=str).split('\n')
deps = [dep.partition(' ')[0][1::] for dep in long_deps[2:-1]]
# fix all dependencies:
for dep in deps:
for loc in libs:
# We really want to check for either
# dep == os.path.basename(loc) or
# dep == join_path(builddir, os.path.basename(loc)),
# but we don't know builddir (nor how symbolic links look
# in builddir). We thus only compare the basenames.
if os.path.basename(dep) == os.path.basename(loc):
install_name_tool('-change', dep, loc, lib)
break
def find(root, files, recursive=True):
"""Search for ``files`` starting from the ``root`` directory.
Like GNU/BSD find but written entirely in Python.
Examples:
.. code-block:: console
$ find /usr -name python
is equivalent to:
>>> find('/usr', 'python')
.. code-block:: console
$ find /usr/local/bin -maxdepth 1 -name python
is equivalent to:
>>> find('/usr/local/bin', 'python', recursive=False)
Accepts any glob characters accepted by fnmatch:
======= ====================================
Pattern Meaning
======= ====================================
* matches everything
? matches any single character
[seq] matches any character in ``seq``
[!seq] matches any character not in ``seq``
======= ====================================
Parameters:
root (str): The root directory to start searching from
files (str or collections.Sequence): Library name(s) to search for
recurse (bool, optional): if False search only root folder,
if True descends top-down from the root. Defaults to True.
Returns:
list of strings: The files that have been found
"""
if isinstance(files, six.string_types):
files = [files]
if recursive:
return _find_recursive(root, files)
else:
return _find_non_recursive(root, files)
def _find_recursive(root, search_files):
# The variable here is **on purpose** a defaultdict. The idea is that
# we want to poke the filesystem as little as possible, but still maintain
# stability in the order of the answer. Thus we are recording each library
# found in a key, and reconstructing the stable order later.
found_files = collections.defaultdict(list)
# Make the path absolute to have os.walk also return an absolute path
root = os.path.abspath(root)
for path, _, list_files in os.walk(root):
for search_file in search_files:
matches = glob.glob(os.path.join(path, search_file))
matches = [os.path.join(path, x) for x in matches]
found_files[search_file].extend(matches)
answer = []
for search_file in search_files:
answer.extend(found_files[search_file])
return answer
def _find_non_recursive(root, search_files):
# The variable here is **on purpose** a defaultdict as os.list_dir
# can return files in any order (does not preserve stability)
found_files = collections.defaultdict(list)
# Make the path absolute to have absolute path returned
root = os.path.abspath(root)
for search_file in search_files:
matches = glob.glob(os.path.join(root, search_file))
matches = [os.path.join(root, x) for x in matches]
found_files[search_file].extend(matches)
answer = []
for search_file in search_files:
answer.extend(found_files[search_file])
return answer
# Utilities for libraries and headers
class FileList(collections.Sequence):
"""Sequence of absolute paths to files.
Provides a few convenience methods to manipulate file paths.
"""
def __init__(self, files):
if isinstance(files, six.string_types):
files = [files]
self.files = list(dedupe(files))
@property
def directories(self):
"""Stable de-duplication of the directories where the files reside.
>>> l = LibraryList(['/dir1/liba.a', '/dir2/libb.a', '/dir1/libc.a'])
>>> l.directories
['/dir1', '/dir2']
>>> h = HeaderList(['/dir1/a.h', '/dir1/b.h', '/dir2/c.h'])
>>> h.directories
['/dir1', '/dir2']
Returns:
list of strings: A list of directories
"""
return list(dedupe(
os.path.dirname(x) for x in self.files if os.path.dirname(x)
))
@property
def basenames(self):
"""Stable de-duplication of the base-names in the list
>>> l = LibraryList(['/dir1/liba.a', '/dir2/libb.a', '/dir3/liba.a'])
>>> l.basenames
['liba.a', 'libb.a']
>>> h = HeaderList(['/dir1/a.h', '/dir2/b.h', '/dir3/a.h'])
>>> h.basenames
['a.h', 'b.h']
Returns:
list of strings: A list of base-names
"""
return list(dedupe(os.path.basename(x) for x in self.files))
def __getitem__(self, item):
cls = type(self)
if isinstance(item, numbers.Integral):
return self.files[item]
return cls(self.files[item])
def __add__(self, other):
return self.__class__(dedupe(self.files + list(other)))
def __radd__(self, other):
return self.__add__(other)
def __eq__(self, other):
return self.files == other.files
def __len__(self):
return len(self.files)
def joined(self, separator=' '):
return separator.join(self.files)
def __repr__(self):
return self.__class__.__name__ + '(' + repr(self.files) + ')'
def __str__(self):
return self.joined()
class HeaderList(FileList):
"""Sequence of absolute paths to headers.
Provides a few convenience methods to manipulate header paths and get
commonly used compiler flags or names.
"""
# Make sure to only match complete words, otherwise path components such
# as "xinclude" will cause false matches.
# Avoid matching paths such as <prefix>/include/something/detail/include,
# e.g. in the CUDA Toolkit which ships internal libc++ headers.
include_regex = re.compile(r'(.*?)(\binclude\b)(.*)')
def __init__(self, files):
super(HeaderList, self).__init__(files)
self._macro_definitions = []
self._directories = None
@property
def directories(self):
"""Directories to be searched for header files."""
values = self._directories
if values is None:
values = self._default_directories()
return list(dedupe(values))
@directories.setter
def directories(self, value):
value = value or []
# Accept a single directory as input
if isinstance(value, six.string_types):
value = [value]
self._directories = [os.path.normpath(x) for x in value]
def _default_directories(self):
"""Default computation of directories based on the list of
header files.
"""
dir_list = super(HeaderList, self).directories
values = []
for d in dir_list:
# If the path contains a subdirectory named 'include' then stop
# there and don't add anything else to the path.
m = self.include_regex.match(d)
value = os.path.join(*m.group(1, 2)) if m else d
values.append(value)
return values
@property
def headers(self):
"""Stable de-duplication of the headers.
Returns:
list of strings: A list of header files
"""
return self.files
@property
def names(self):
"""Stable de-duplication of header names in the list without extensions
>>> h = HeaderList(['/dir1/a.h', '/dir2/b.h', '/dir3/a.h'])
>>> h.names
['a', 'b']
Returns:
list of strings: A list of files without extensions
"""
names = []
for x in self.basenames:
name = x
# Valid extensions include: ['.cuh', '.hpp', '.hh', '.h']
for ext in ['.cuh', '.hpp', '.hh', '.h']:
i = name.rfind(ext)
if i != -1:
names.append(name[:i])
break
else:
# No valid extension, should we still include it?
names.append(name)
return list(dedupe(names))
@property
def include_flags(self):
"""Include flags
>>> h = HeaderList(['/dir1/a.h', '/dir1/b.h', '/dir2/c.h'])
>>> h.include_flags
'-I/dir1 -I/dir2'
Returns:
str: A joined list of include flags
"""
return ' '.join(['-I' + x for x in self.directories])
@property
def macro_definitions(self):
"""Macro definitions
>>> h = HeaderList(['/dir1/a.h', '/dir1/b.h', '/dir2/c.h'])
>>> h.add_macro('-DBOOST_LIB_NAME=boost_regex')
>>> h.add_macro('-DBOOST_DYN_LINK')
>>> h.macro_definitions
'-DBOOST_LIB_NAME=boost_regex -DBOOST_DYN_LINK'
Returns:
str: A joined list of macro definitions
"""
return ' '.join(self._macro_definitions)
@property
def cpp_flags(self):
"""Include flags + macro definitions
>>> h = HeaderList(['/dir1/a.h', '/dir1/b.h', '/dir2/c.h'])
>>> h.cpp_flags
'-I/dir1 -I/dir2'
>>> h.add_macro('-DBOOST_DYN_LINK')
>>> h.cpp_flags
'-I/dir1 -I/dir2 -DBOOST_DYN_LINK'
Returns:
str: A joined list of include flags and macro definitions
"""
cpp_flags = self.include_flags
if self.macro_definitions:
cpp_flags += ' ' + self.macro_definitions
return cpp_flags
def add_macro(self, macro):
"""Add a macro definition
Parameters:
macro (str): The macro to add
"""
self._macro_definitions.append(macro)
def find_headers(headers, root, recursive=False):
"""Returns an iterable object containing a list of full paths to
headers if found.
Accepts any glob characters accepted by fnmatch:
======= ====================================
Pattern Meaning
======= ====================================
* matches everything
? matches any single character
[seq] matches any character in ``seq``
[!seq] matches any character not in ``seq``
======= ====================================
Parameters:
headers (str or list of str): Header name(s) to search for
root (str): The root directory to start searching from
recursive (bool, optional): if False search only root folder,
if True descends top-down from the root. Defaults to False.
Returns:
HeaderList: The headers that have been found
"""
if isinstance(headers, six.string_types):
headers = [headers]
elif not isinstance(headers, collections.Sequence):
message = '{0} expects a string or sequence of strings as the '
message += 'first argument [got {1} instead]'
message = message.format(find_headers.__name__, type(headers))
raise TypeError(message)
# Construct the right suffix for the headers
suffixes = [
# C
'h',
# C++
'hpp', 'hxx', 'hh', 'H', 'txx', 'tcc', 'icc',
# Fortran
'mod', 'inc',
]
# List of headers we are searching with suffixes
headers = ['{0}.{1}'.format(header, suffix) for header in headers
for suffix in suffixes]
return HeaderList(find(root, headers, recursive))
def find_all_headers(root):
"""Convenience function that returns the list of all headers found
in the directory passed as argument.
Args:
root (path): directory where to look recursively for header files
Returns:
List of all headers found in ``root`` and subdirectories.
"""
return find_headers('*', root=root, recursive=True)
class LibraryList(FileList):
"""Sequence of absolute paths to libraries
Provides a few convenience methods to manipulate library paths and get
commonly used compiler flags or names
"""
@property
def libraries(self):
"""Stable de-duplication of library files.
Returns:
list of strings: A list of library files
"""
return self.files
@property
def names(self):
"""Stable de-duplication of library names in the list
>>> l = LibraryList(['/dir1/liba.a', '/dir2/libb.a', '/dir3/liba.so'])
>>> l.names
['a', 'b']
Returns:
list of strings: A list of library names
"""
names = []
for x in self.basenames:
name = x
if x.startswith('lib'):
name = x[3:]
# Valid extensions include: ['.dylib', '.so', '.a']
for ext in ['.dylib', '.so', '.a']:
i = name.rfind(ext)
if i != -1:
names.append(name[:i])
break
else:
# No valid extension, should we still include it?
names.append(name)
return list(dedupe(names))
@property
def search_flags(self):
"""Search flags for the libraries
>>> l = LibraryList(['/dir1/liba.a', '/dir2/libb.a', '/dir1/liba.so'])
>>> l.search_flags
'-L/dir1 -L/dir2'
Returns:
str: A joined list of search flags
"""
return ' '.join(['-L' + x for x in self.directories])
@property
def link_flags(self):
"""Link flags for the libraries
>>> l = LibraryList(['/dir1/liba.a', '/dir2/libb.a', '/dir1/liba.so'])
>>> l.link_flags
'-la -lb'
Returns:
str: A joined list of link flags
"""
return ' '.join(['-l' + name for name in self.names])
@property
def ld_flags(self):
"""Search flags + link flags
>>> l = LibraryList(['/dir1/liba.a', '/dir2/libb.a', '/dir1/liba.so'])
>>> l.ld_flags
'-L/dir1 -L/dir2 -la -lb'
Returns:
str: A joined list of search flags and link flags
"""
return self.search_flags + ' ' + self.link_flags
def find_system_libraries(libraries, shared=True):
"""Searches the usual system library locations for ``libraries``.
Search order is as follows:
1. ``/lib64``
2. ``/lib``
3. ``/usr/lib64``
4. ``/usr/lib``
5. ``/usr/local/lib64``
6. ``/usr/local/lib``
Accepts any glob characters accepted by fnmatch:
======= ====================================
Pattern Meaning
======= ====================================
* matches everything
? matches any single character
[seq] matches any character in ``seq``
[!seq] matches any character not in ``seq``
======= ====================================
Parameters:
libraries (str or list of str): Library name(s) to search for
shared (bool, optional): if True searches for shared libraries,
otherwise for static. Defaults to True.
Returns:
LibraryList: The libraries that have been found
"""
if isinstance(libraries, six.string_types):
libraries = [libraries]
elif not isinstance(libraries, collections.Sequence):
message = '{0} expects a string or sequence of strings as the '
message += 'first argument [got {1} instead]'
message = message.format(find_system_libraries.__name__,
type(libraries))
raise TypeError(message)
libraries_found = []
search_locations = [
'/lib64',
'/lib',
'/usr/lib64',
'/usr/lib',
'/usr/local/lib64',
'/usr/local/lib',
]
for library in libraries:
for root in search_locations:
result = find_libraries(library, root, shared, recursive=True)
if result:
libraries_found += result
break
return libraries_found
def find_libraries(libraries, root, shared=True, recursive=False):
"""Returns an iterable of full paths to libraries found in a root dir.
Accepts any glob characters accepted by fnmatch:
======= ====================================
Pattern Meaning
======= ====================================
* matches everything
? matches any single character
[seq] matches any character in ``seq``
[!seq] matches any character not in ``seq``
======= ====================================
Parameters:
libraries (str or list of str): Library name(s) to search for
root (str): The root directory to start searching from
shared (bool, optional): if True searches for shared libraries,
otherwise for static. Defaults to True.
recursive (bool, optional): if False search only root folder,
if True descends top-down from the root. Defaults to False.
Returns:
LibraryList: The libraries that have been found
"""
if isinstance(libraries, six.string_types):
libraries = [libraries]
elif not isinstance(libraries, collections.Sequence):
message = '{0} expects a string or sequence of strings as the '
message += 'first argument [got {1} instead]'
message = message.format(find_libraries.__name__, type(libraries))
raise TypeError(message)
# Construct the right suffix for the library
if shared is True:
suffix = 'dylib' if sys.platform == 'darwin' else 'so'
else:
suffix = 'a'
# List of libraries we are searching with suffixes
libraries = ['{0}.{1}'.format(lib, suffix) for lib in libraries]
if not recursive:
# If not recursive, look for the libraries directly in root
return LibraryList(find(root, libraries, False))
# To speedup the search for external packages configured e.g. in /usr,
# perform first non-recursive search in root/lib then in root/lib64 and
# finally search all of root recursively. The search stops when the first
# match is found.
for subdir in ('lib', 'lib64'):
dirname = join_path(root, subdir)
if not os.path.isdir(dirname):
continue
found_libs = find(dirname, libraries, False)
if found_libs:
break
else:
found_libs = find(root, libraries, True)
return LibraryList(found_libs)
@memoized
def can_access_dir(path):
"""Returns True if the argument is an accessible directory.
Args:
path: path to be tested
Returns:
True if ``path`` is an accessible directory, else False
"""
return os.path.isdir(path) and os.access(path, os.R_OK | os.X_OK)
@memoized
def files_in(*search_paths):
"""Returns all the files in paths passed as arguments.
Caller must ensure that each path in ``search_paths`` is a directory.
Args:
*search_paths: directories to be searched
Returns:
List of (file, full_path) tuples with all the files found.
"""
files = []
for d in filter(can_access_dir, search_paths):
files.extend(filter(
lambda x: os.path.isfile(x[1]),
[(f, os.path.join(d, f)) for f in os.listdir(d)]
))
return files
def search_paths_for_executables(*path_hints):
"""Given a list of path hints returns a list of paths where
to search for an executable.
Args:
*path_hints (list of paths): list of paths taken into
consideration for a search
Returns:
A list containing the real path of every existing directory
in `path_hints` and its `bin` subdirectory if it exists.
"""
executable_paths = []
for path in path_hints:
if not os.path.isdir(path):
continue
path = os.path.abspath(path)
executable_paths.append(path)
bin_dir = os.path.join(path, 'bin')
if os.path.isdir(bin_dir):
executable_paths.append(bin_dir)
return executable_paths
def partition_path(path, entry=None):
"""
Split the prefixes of the path at the first occurrence of entry and
return a 3-tuple containing a list of the prefixes before the entry, a
string of the prefix ending with the entry, and a list of the prefixes
after the entry.
If the entry is not a node in the path, the result will be the prefix list
followed by an empty string and an empty list.
"""
paths = prefixes(path)
if entry is not None:
# Derive the index of entry within paths, which will correspond to
# the location of the entry in within the path.
try:
entries = path.split(os.sep)
i = entries.index(entry)
if '' in entries:
i -= 1
return paths[:i], paths[i], paths[i + 1:]
except ValueError:
pass
return paths, '', []
def prefixes(path):
"""
Returns a list containing the path and its ancestors, top-to-bottom.
The list for an absolute path will not include an ``os.sep`` entry.
For example, assuming ``os.sep`` is ``/``, given path ``/ab/cd/efg``
the resulting paths will be, in order: ``/ab``, ``/ab/cd``, and
``/ab/cd/efg``
The list for a relative path starting ``./`` will not include ``.``.
For example, path ``./hi/jkl/mn`` results in a list with the following
paths, in order: ``./hi``, ``./hi/jkl``, and ``./hi/jkl/mn``.
Parameters:
path (str): the string used to derive ancestor paths
Returns:
A list containing ancestor paths in order and ending with the path
"""
if not path:
return []
parts = path.strip(os.sep).split(os.sep)
if path.startswith(os.sep):
parts.insert(0, os.sep)
paths = [os.path.join(*parts[:i + 1]) for i in range(len(parts))]
try:
paths.remove(os.sep)
except ValueError:
pass
try:
paths.remove('.')
except ValueError:
pass
return paths
lgpl-2.1
p0psicles/SickRage
tests/test_lib.py
3
9665
# coding=UTF-8
# Author: Dennis Lutter <[email protected]>
# URL: http://code.google.com/p/sickbeard/
#
# This file is part of SickRage.
#
# SickRage is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# SickRage is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with SickRage. If not, see <http://www.gnu.org/licenses/>.
# pylint: disable=line-too-long
"""
Create a test database for testing.
Methods:
create_test_log_folder
create_test_cache_folder
setup_test_db
teardown_test_db
setup_test_episode_file
teardown_test_episode_file
setup_test_show_dir
teardown_test_show_dir
Classes:
SickbeardTestDBCase
TestDBConnection
TestCacheDBConnection
"""
import os.path
import shutil
import sys
import unittest
sys.path.insert(1, os.path.abspath(os.path.join(os.path.dirname(__file__), '../lib')))
sys.path.insert(1, os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
from configobj import ConfigObj
from sickbeard import db, providers
from sickbeard.databases import cache_db, failed_db, mainDB
from sickbeard.providers.newznab import NewznabProvider
from sickbeard.tv import TVEpisode
import shutil_custom # pylint: disable=import-error
import sickbeard
# pylint: disable=import-error
shutil.copyfile = shutil_custom.copyfile_custom
# =================
# test globals
# =================
TEST_DIR = os.path.abspath(os.path.dirname(__file__))
TEST_DB_NAME = "sickbeard.db"
TEST_CACHE_DB_NAME = "cache.db"
TEST_FAILED_DB_NAME = "failed.db"
SHOW_NAME = u"show name"
SEASON = 4
EPISODE = 2
FILENAME = u"show name - s0" + str(SEASON) + "e0" + str(EPISODE) + ".mkv"
FILE_DIR = os.path.join(TEST_DIR, SHOW_NAME)
FILE_PATH = os.path.join(FILE_DIR, FILENAME)
SHOW_DIR = os.path.join(TEST_DIR, SHOW_NAME + " final")
# =================
# prepare env functions
# =================
def create_test_log_folder():
"""
Create a log folder for test logs.
"""
if not os.path.isdir(sickbeard.LOG_DIR):
os.mkdir(sickbeard.LOG_DIR)
def create_test_cache_folder():
"""
Create a cache folder for caching tests.
"""
if not os.path.isdir(sickbeard.CACHE_DIR):
os.mkdir(sickbeard.CACHE_DIR)
# call env functions at appropriate time during SickBeard var setup
# =================
# SickBeard globals
# =================
sickbeard.SYS_ENCODING = 'UTF-8'
sickbeard.showList = []
sickbeard.QUALITY_DEFAULT = 4 # hdtv
sickbeard.FLATTEN_FOLDERS_DEFAULT = 0
sickbeard.NAMING_PATTERN = ''
sickbeard.NAMING_ABD_PATTERN = ''
sickbeard.NAMING_SPORTS_PATTERN = ''
sickbeard.NAMING_MULTI_EP = 1
sickbeard.PROVIDER_ORDER = ["sick_beard_index"]
sickbeard.newznabProviderList = NewznabProvider.get_providers_list("'Sick Beard Index|http://lolo.sickbeard.com/|0|5030,5040|0|eponly|0|0|0!!!NZBs.org|https://nzbs.org/||5030,5040,5060,5070,5090|0|eponly|0|0|0!!!Usenet-Crawler|https://www.usenet-crawler.com/||5030,5040,5060|0|eponly|0|0|0'")
sickbeard.providerList = providers.makeProviderList()
sickbeard.PROG_DIR = os.path.abspath(os.path.join(TEST_DIR, '..'))
sickbeard.DATA_DIR = TEST_DIR
sickbeard.CONFIG_FILE = os.path.join(sickbeard.DATA_DIR, "config.ini")
sickbeard.CFG = ConfigObj(sickbeard.CONFIG_FILE)
sickbeard.BRANCH = sickbeard.config.check_setting_str(sickbeard.CFG, 'General', 'branch', '')
sickbeard.CUR_COMMIT_HASH = sickbeard.config.check_setting_str(sickbeard.CFG, 'General', 'cur_commit_hash', '')
sickbeard.GIT_USERNAME = sickbeard.config.check_setting_str(sickbeard.CFG, 'General', 'git_username', '')
sickbeard.GIT_PASSWORD = sickbeard.config.check_setting_str(sickbeard.CFG, 'General', 'git_password', '', censor_log=True)
sickbeard.LOG_DIR = os.path.join(TEST_DIR, 'Logs')
sickbeard.logger.log_file = os.path.join(sickbeard.LOG_DIR, 'test_sickbeard.log')
create_test_log_folder()
sickbeard.CACHE_DIR = os.path.join(TEST_DIR, 'cache')
create_test_cache_folder()
# pylint: disable=no-member
sickbeard.logger.init_logging(False, True)
# =================
# dummy functions
# =================
def _dummy_save_config():
"""
Override the SickBeard save_config which gets called during a db upgrade.
:return: True
"""
return True
# this overrides the SickBeard save_config which gets called during a db upgrade
# this might be considered a hack
mainDB.sickbeard.save_config = _dummy_save_config
def _fake_specify_ep(self, season, episode):
"""
Override contact to TVDB indexer.
:param self: ...not used
:param season: Season to search for ...not used
:param episode: Episode to search for ...not used
"""
_ = self, season, episode # throw away unused variables
# the real one tries to contact TVDB just stop it from getting more info on the ep
TVEpisode.specifyEpisode = _fake_specify_ep
# =================
# test classes
# =================
class SickbeardTestDBCase(unittest.TestCase):
"""
Superclass for testing the database.
Methods:
setUp
tearDown
"""
def setUp(self):
sickbeard.showList = []
setup_test_db()
setup_test_episode_file()
setup_test_show_dir()
def tearDown(self):
sickbeard.showList = []
teardown_test_db()
teardown_test_episode_file()
teardown_test_show_dir()
class TestDBConnection(db.DBConnection, object):
"""
Test connecting to the database.
"""
def __init__(self, db_file_name=TEST_DB_NAME):
db_file_name = os.path.join(TEST_DIR, db_file_name)
super(TestDBConnection, self).__init__(db_file_name)
class TestCacheDBConnection(TestDBConnection, object):
"""
Test connecting to the cache database.
"""
def __init__(self, provider_name):
# pylint: disable=non-parent-init-called
db.DBConnection.__init__(self, os.path.join(TEST_DIR, TEST_CACHE_DB_NAME))
# Create the table if it's not already there
try:
if not self.hasTable(provider_name):
sql = "CREATE TABLE [" + provider_name + "] (name TEXT, season NUMERIC, episodes TEXT, indexerid NUMERIC, url TEXT, time NUMERIC, quality TEXT, release_group TEXT)"
self.connection.execute(sql)
self.connection.commit()
# pylint: disable=broad-except
# Catching too general exception
except Exception as error:
if str(error) != "table [" + provider_name + "] already exists":
raise
# add version column to table if missing
if not self.hasColumn(provider_name, 'version'):
self.addColumn(provider_name, 'version', "NUMERIC", "-1")
# Create the table if it's not already there
try:
sql = "CREATE TABLE lastUpdate (provider TEXT, time NUMERIC);"
self.connection.execute(sql)
self.connection.commit()
# pylint: disable=broad-except
# Catching too general exception
except Exception as error:
if str(error) != "table lastUpdate already exists":
raise
# this will override the normal db connection
sickbeard.db.DBConnection = TestDBConnection
sickbeard.tvcache.CacheDBConnection = TestCacheDBConnection
# =================
# test functions
# =================
def setup_test_db():
"""
Set up the test databases.
"""
# Upgrade the db to the latest version.
# upgrading the db
db.upgradeDatabase(db.DBConnection(), mainDB.InitialSchema)
# fix up any db problems
db.sanityCheckDatabase(db.DBConnection(), mainDB.MainSanityCheck)
# and for cache.db too
db.upgradeDatabase(db.DBConnection('cache.db'), cache_db.InitialSchema)
# and for failed.db too
db.upgradeDatabase(db.DBConnection('failed.db'), failed_db.InitialSchema)
def teardown_test_db():
"""
Tear down the test database.
"""
from sickbeard.db import db_cons
for connection in db_cons:
db_cons[connection].commit()
# db_cons[connection].close()
#
# for current_db in [ TEST_DB_NAME, TEST_CACHE_DB_NAME, TEST_FAILED_DB_NAME ]:
# file_name = os.path.join(TEST_DIR, current_db)
# if os.path.exists(file_name):
# try:
# os.remove(file_name)
# except Exception as e:
# print 'ERROR: Failed to remove ' + file_name
# print exception(e)
def setup_test_episode_file():
"""
Create a test episode directory with a test episode in it.
"""
if not os.path.exists(FILE_DIR):
os.makedirs(FILE_DIR)
try:
with open(FILE_PATH, 'wb') as ep_file:
ep_file.write("foo bar")
ep_file.flush()
# pylint: disable=broad-except
# Catching too general exception
except Exception:
print "Unable to set up test episode"
raise
def teardown_test_episode_file():
"""
Remove the test episode.
"""
if os.path.exists(FILE_DIR):
shutil.rmtree(FILE_DIR)
def setup_test_show_dir():
"""
Create a test show directory.
"""
if not os.path.exists(SHOW_DIR):
os.makedirs(SHOW_DIR)
def teardown_test_show_dir():
"""
Remove the test show.
"""
if os.path.exists(SHOW_DIR):
shutil.rmtree(SHOW_DIR)
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
from django.conf import settings
import django.db.models.deletion
import openedx.core.djangoapps.xmodule_django.models
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('courseware', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='CourseDynamicUpgradeDeadlineConfiguration',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('change_date', models.DateTimeField(auto_now_add=True, verbose_name='Change date')),
('enabled', models.BooleanField(default=False, verbose_name='Enabled')),
('course_id', openedx.core.djangoapps.xmodule_django.models.CourseKeyField(max_length=255, db_index=True)),
('deadline_days', models.PositiveSmallIntegerField(default=21, help_text='Number of days a learner has to upgrade after content is made available')),
('opt_out', models.BooleanField(default=False, help_text='Disable the dynamic upgrade deadline for this course run.')),
('changed_by', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, editable=False, to=settings.AUTH_USER_MODEL, null=True, verbose_name='Changed by')),
],
),
migrations.CreateModel(
name='DynamicUpgradeDeadlineConfiguration',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('change_date', models.DateTimeField(auto_now_add=True, verbose_name='Change date')),
('enabled', models.BooleanField(default=False, verbose_name='Enabled')),
('deadline_days', models.PositiveSmallIntegerField(default=21, help_text='Number of days a learner has to upgrade after content is made available')),
('changed_by', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, editable=False, to=settings.AUTH_USER_MODEL, null=True, verbose_name='Changed by')),
],
),
]
agpl-3.0
Mrs-X/Darknet
test/functional/p2p_disconnect_ban.py
3
5304
#!/usr/bin/env python3
# Copyright (c) 2014-2017 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test node disconnect and ban behavior"""
import time
from test_framework.test_framework import PivxTestFramework
from test_framework.util import (
assert_equal,
assert_raises_rpc_error,
connect_nodes_bi,
wait_until,
)
class DisconnectBanTest(PivxTestFramework):
def set_test_params(self):
self.num_nodes = 2
def run_test(self):
self.log.info("Test setban and listbanned RPCs")
self.log.info("setban: successfully ban single IP address")
assert_equal(len(self.nodes[1].getpeerinfo()), 2) # node1 should have 2 connections to node0 at this point
self.nodes[1].setban("127.0.0.1", "add")
wait_until(lambda: len(self.nodes[1].getpeerinfo()) == 0, timeout=10)
assert_equal(len(self.nodes[1].getpeerinfo()), 0) # all nodes must be disconnected at this point
assert_equal(len(self.nodes[1].listbanned()), 1)
self.log.info("clearbanned: successfully clear ban list")
self.nodes[1].clearbanned()
assert_equal(len(self.nodes[1].listbanned()), 0)
self.nodes[1].setban("127.0.0.0/24", "add")
self.log.info("setban: fail to ban an already banned subnet")
assert_equal(len(self.nodes[1].listbanned()), 1)
assert_raises_rpc_error(-23, "IP/Subnet already banned", self.nodes[1].setban, "127.0.0.1", "add")
self.log.info("setban: fail to ban an invalid subnet")
assert_raises_rpc_error(-23, "Error: Invalid IP/Subnet", self.nodes[1].setban, "127.0.0.1/42", "add")
assert_equal(len(self.nodes[1].listbanned()), 1) # still only one banned ip because 127.0.0.1 is within the range of 127.0.0.0/24
self.log.info("setban remove: fail to unban a non-banned subnet")
assert_raises_rpc_error(-1, "Error: Unban failed", self.nodes[1].setban, "127.0.0.1", "remove")
assert_equal(len(self.nodes[1].listbanned()), 1)
self.log.info("setban remove: successfully unban subnet")
self.nodes[1].setban("127.0.0.0/24", "remove")
assert_equal(len(self.nodes[1].listbanned()), 0)
self.nodes[1].clearbanned()
assert_equal(len(self.nodes[1].listbanned()), 0)
self.log.info("setban: test persistence across node restart")
self.nodes[1].setban("127.0.0.0/32", "add")
self.nodes[1].setban("127.0.0.0/24", "add")
# Set the mocktime so we can control when bans expire
old_time = int(time.time())
self.nodes[1].setmocktime(old_time)
self.nodes[1].setban("192.168.0.1", "add", 1) # ban for 1 seconds
self.nodes[1].setban("2001:4d48:ac57:400:cacf:e9ff:fe1d:9c63/19", "add", 1000) # ban for 1000 seconds
listBeforeShutdown = self.nodes[1].listbanned()
assert_equal("192.168.0.1/32", listBeforeShutdown[2]['address'])
# Move time forward by 3 seconds so the third ban has expired
self.nodes[1].setmocktime(old_time + 3)
assert_equal(len(self.nodes[1].listbanned()), 4)
self.stop_node(1)
self.start_node(1)
listAfterShutdown = self.nodes[1].listbanned()
assert_equal("127.0.0.0/24", listAfterShutdown[0]['address'])
assert_equal("127.0.0.0/32", listAfterShutdown[1]['address'])
assert_equal("/19" in listAfterShutdown[2]['address'], True)
# Clear ban lists
self.nodes[1].clearbanned()
connect_nodes_bi(self.nodes, 0, 1)
self.log.info("Test disconnectnode RPCs")
#self.log.info("disconnectnode: fail to disconnect when calling with address and nodeid")
#address1 = self.nodes[0].getpeerinfo()[0]['addr']
#node1 = self.nodes[0].getpeerinfo()[0]['addr']
#assert_raises_rpc_error(-32602, "Only one of address and nodeid should be provided.", self.nodes[0].disconnectnode, address=address1, nodeid=node1)
self.log.info("disconnectnode: fail to disconnect when calling with junk address")
assert_raises_rpc_error(-29, "Node not found in connected nodes", self.nodes[0].disconnectnode, "221B Baker Street")
self.log.info("disconnectnode: successfully disconnect node by address")
address1 = self.nodes[0].getpeerinfo()[0]['addr']
self.nodes[0].disconnectnode(address1)
wait_until(lambda: len(self.nodes[0].getpeerinfo()) == 1, timeout=10)
assert not [node for node in self.nodes[0].getpeerinfo() if node['addr'] == address1]
self.log.info("disconnectnode: successfully reconnect node")
connect_nodes_bi(self.nodes, 0, 1) # reconnect the node
assert_equal(len(self.nodes[0].getpeerinfo()), 2)
assert [node for node in self.nodes[0].getpeerinfo() if node['addr'] == address1]
#self.log.info("disconnectnode: successfully disconnect node by node id")
#id1 = self.nodes[0].getpeerinfo()[0]['id']
#self.nodes[0].disconnectnode(nodeid=id1)
#wait_until(lambda: len(self.nodes[0].getpeerinfo()) == 1, timeout=10)
#assert not [node for node in self.nodes[0].getpeerinfo() if node['id'] == id1]
if __name__ == '__main__':
DisconnectBanTest().main()
#!/usr/bin/env python
"""Classes for storing and manipulating a phylogenetic tree.
These trees can be either strictly binary, or have polytomies
(multiple children to a parent node).
Trees consist of Nodes (or branches) that connect two nodes. The Tree can
be created only from a newick formatted string read either from file or from a
string object. Other formats will be added as time permits.
Tree can:
- Deal with either rooted or unrooted tree's and can
convert between these types.
- Return a sub-tree given a list of tip-names
- Identify an edge given two tip names. This method facilitates the
statistical modelling by simplyifying the syntax for specifying
sub-regions of a tree.
- Assess whether two Tree instances represent the same topology.
Definition of relevant terms or abbreviations:
- edge: also known as a branch on a tree.
- node: the point at which two edges meet
- tip: a sequence or species
- clade: all and only the nodes (including tips) that descend
from a node
- stem: the edge immediately preceeding a clade
"""
from numpy import zeros, argsort, ceil, log
from copy import deepcopy
import re
from cogent.util.transform import comb
from cogent.maths.stats.test import correlation
from operator import or_
from cogent.util.misc import InverseDict
from random import shuffle, choice
__author__ = "Gavin Huttley, Peter Maxwell and Rob Knight"
__copyright__ = "Copyright 2007-2012, The Cogent Project"
__credits__ = ["Gavin Huttley", "Peter Maxwell", "Rob Knight",
"Andrew Butterfield", "Catherine Lozupone", "Micah Hamady",
"Jeremy Widmann", "Zongzhi Liu", "Daniel McDonald",
"Justin Kuczynski"]
__license__ = "GPL"
__version__ = "1.5.3"
__maintainer__ = "Gavin Huttley"
__email__ = "[email protected]"
__status__ = "Production"
def distance_from_r_squared(m1, m2):
"""Estimates distance as 1-r^2: no correl = max distance"""
return 1 - (correlation(m1.flat, m2.flat)[0])**2
def distance_from_r(m1, m2):
"""Estimates distance as (1-r)/2: neg correl = max distance"""
return (1-correlation(m1.flat, m2.flat)[0])/2
class TreeError(Exception):
pass
class TreeNode(object):
"""Store information about a tree node. Mutable.
Parameters:
Name: label for the node, assumed to be unique.
Children: list of the node's children.
Params: dict containing arbitrary parameters for the node.
NameLoaded: ?
"""
_exclude_from_copy = dict.fromkeys(['_parent','Children'])
def __init__(self, Name=None, Children=None, Parent=None, Params=None, \
NameLoaded=True, **kwargs):
"""Returns new TreeNode object."""
self.Name = Name
self.NameLoaded = NameLoaded
if Params is None:
Params = {}
self.params = Params
self.Children = []
if Children is not None:
self.extend(Children)
self._parent = Parent
if (Parent is not None) and not (self in Parent.Children):
Parent.append(self)
### built-in methods and list interface support
def __repr__(self):
"""Returns reconstructable string representation of tree.
WARNING: Does not currently set the class to the right type.
"""
return 'Tree("%s")' % self.getNewick()
def __str__(self):
"""Returns Newick-format string representation of tree."""
return self.getNewick()
def compareName(self, other):
"""Compares TreeNode by name"""
if self is other:
return 0
try:
return cmp(self.Name, other.Name)
except AttributeError:
return cmp(type(self), type(other))
def compareByNames(self, other):
"""Equality test for trees by name"""
# if they are the same object then they must be the same tree...
if self is other:
return True
self_names = self.getNodeNames()
other_names = other.getNodeNames()
self_names.sort()
other_names.sort()
return self_names == other_names
def _to_self_child(self, i):
"""Converts i to self's type, with self as its parent.
Cleans up refs from i's original parent, but doesn't give self ref to i.
"""
c = self.__class__
if isinstance(i, c):
if i._parent not in (None, self):
i._parent.Children.remove(i)
else:
i = c(i)
i._parent = self
return i
def append(self, i):
"""Appends i to self.Children, in-place, cleaning up refs."""
self.Children.append(self._to_self_child(i))
def extend(self, items):
"""Extends self.Children by items, in-place, cleaning up refs."""
self.Children.extend(map(self._to_self_child, items))
def insert(self, index, i):
"""Inserts an item at specified position in self.Children."""
self.Children.insert(index, self._to_self_child(i))
def pop(self, index=-1):
"""Returns and deletes child of self at index (default: -1)"""
result = self.Children.pop(index)
result._parent = None
return result
def remove(self, target):
"""Removes node by name instead of identity.
Returns True if node was present, False otherwise.
"""
if isinstance(target, TreeNode):
target = target.Name
for (i, curr_node) in enumerate(self.Children):
if curr_node.Name == target:
self.removeNode(curr_node)
return True
return False
def __getitem__(self, i):
"""Node delegates slicing to Children; faster to access them
directly."""
return self.Children[i]
def __setitem__(self, i, val):
"""Node[i] = x sets the corresponding item in Children."""
curr = self.Children[i]
if isinstance(i, slice):
for c in curr:
c._parent = None
coerced_val = map(self._to_self_child, val)
self.Children[i] = coerced_val[:]
else: #assume we got a single index
curr._parent = None
coerced_val = self._to_self_child(val)
self.Children[i] = coerced_val
def __delitem__(self, i):
"""del node[i] deletes index or slice from self.Children."""
curr = self.Children[i]
if isinstance(i, slice):
for c in curr:
c._parent = None
else:
curr._parent = None
del self.Children[i]
def __iter__(self):
"""Node iter iterates over the Children."""
return iter(self.Children)
def __len__(self):
"""Node len returns number of children."""
return len(self.Children)
#support for copy module
def copyRecursive(self, memo=None, _nil=[], constructor='ignored'):
"""Returns copy of self's structure, including shallow copy of attrs.
constructor is ignored; required to support old tree unit tests.
"""
result = self.__class__()
efc = self._exclude_from_copy
for k, v in self.__dict__.items():
if k not in efc: #avoid infinite recursion
result.__dict__[k] = deepcopy(self.__dict__[k])
for c in self:
result.append(c.copy())
return result
def copy(self, memo=None, _nil=[], constructor='ignored'):
"""Returns a copy of self using an iterative approach"""
def __copy_node(n):
result = n.__class__()
efc = n._exclude_from_copy
for k,v in n.__dict__.items():
if k not in efc:
result.__dict__[k] = deepcopy(n.__dict__[k])
return result
root = __copy_node(self)
nodes_stack = [[root, self, len(self.Children)]]
while nodes_stack:
#check the top node, any children left unvisited?
top = nodes_stack[-1]
new_top_node, old_top_node, unvisited_children = top
if unvisited_children:
top[2] -= 1
old_child = old_top_node.Children[-unvisited_children]
new_child = __copy_node(old_child)
new_top_node.append(new_child)
nodes_stack.append([new_child, old_child, \
len(old_child.Children)])
else: #no unvisited children
nodes_stack.pop()
return root
__deepcopy__ = deepcopy = copy
def copyTopology(self, constructor=None):
"""Copies only the topology and labels of a tree, not any extra data.
Useful when you want another copy of the tree with the same structure
and labels, but want to e.g. assign different branch lengths and
environments. Does not use deepcopy from the copy module, so _much_
faster than the copy() method.
"""
if constructor is None:
constructor = self.__class__
children = [c.copyTopology(constructor) for c in self.Children]
return constructor(Name=self.Name[:], Children=children)
#support for basic tree operations -- finding objects and moving in the tree
def _get_parent(self):
"""Accessor for parent.
If using an algorithm that accesses Parent a lot, it will be much
faster to access self._parent directly, but don't do it if mutating
self._parent! (or, if you must, remember to clean up the refs).
"""
return self._parent
def _set_parent(self, Parent):
"""Mutator for parent: cleans up refs in old parent."""
if self._parent is not None:
self._parent.removeNode(self)
self._parent = Parent
if (Parent is not None) and (not self in Parent.Children):
Parent.Children.append(self)
Parent = property(_get_parent, _set_parent)
def indexInParent(self):
"""Returns index of self in parent."""
return self._parent.Children.index(self)
def isTip(self):
"""Returns True if the current node is a tip, i.e. has no children."""
return not self.Children
def isRoot(self):
"""Returns True if the current is a root, i.e. has no parent."""
return self._parent is None
def traverse(self, self_before=True, self_after=False, include_self=True):
"""Returns iterator over descendants. Iterative: safe for large trees.
self_before includes each node before its descendants if True.
self_after includes each node after its descendants if True.
include_self includes the initial node if True.
self_before and self_after are independent. If neither is True, only
terminal nodes will be returned.
Note that if self is terminal, it will only be included once even if
self_before and self_after are both True.
This is a depth-first traversal. Since the trees are not binary,
preorder and postorder traversals are possible, but inorder traversals
would depend on the data in the tree and are not handled here.
"""
if self_before:
if self_after:
return self.pre_and_postorder(include_self=include_self)
else:
return self.preorder(include_self=include_self)
else:
if self_after:
return self.postorder(include_self=include_self)
else:
return self.tips(include_self=include_self)
def levelorder(self, include_self=True):
"""Performs levelorder iteration over tree"""
queue = [self]
while queue:
curr = queue.pop(0)
if include_self or (curr is not self):
yield curr
if curr.Children:
queue.extend(curr.Children)
def preorder(self, include_self=True):
"""Performs preorder iteration over tree."""
stack = [self]
while stack:
curr = stack.pop()
if include_self or (curr is not self):
yield curr
if curr.Children:
stack.extend(curr.Children[::-1]) #20% faster than reversed
def postorder(self, include_self=True):
"""Performs postorder iteration over tree.
This is somewhat inelegant compared to saving the node and its index
on the stack, but is 30% faster in the average case and 3x faster in
the worst case (for a comb tree).
Zongzhi Liu's slower but more compact version is:
def postorder_zongzhi(self):
stack = [[self, 0]]
while stack:
curr, child_idx = stack[-1]
if child_idx < len(curr.Children):
stack[-1][1] += 1
stack.append([curr.Children[child_idx], 0])
else:
yield stack.pop()[0]
"""
child_index_stack = [0]
curr = self
curr_children = self.Children
curr_children_len = len(curr_children)
while 1:
curr_index = child_index_stack[-1]
#if there are children left, process them
if curr_index < curr_children_len:
curr_child = curr_children[curr_index]
#if the current child has children, go there
if curr_child.Children:
child_index_stack.append(0)
curr = curr_child
curr_children = curr.Children
curr_children_len = len(curr_children)
curr_index = 0
#otherwise, yield that child
else:
yield curr_child
child_index_stack[-1] += 1
#if there are no children left, return self, and move to
#self's parent
else:
if include_self or (curr is not self):
yield curr
if curr is self:
break
curr = curr.Parent
curr_children = curr.Children
curr_children_len = len(curr_children)
child_index_stack.pop()
child_index_stack[-1] += 1
def pre_and_postorder(self, include_self=True):
"""Performs iteration over tree, visiting node before and after."""
#handle simple case first
if not self.Children:
if include_self:
yield self
raise StopIteration
child_index_stack = [0]
curr = self
curr_children = self.Children
while 1:
curr_index = child_index_stack[-1]
if not curr_index:
if include_self or (curr is not self):
yield curr
#if there are children left, process them
if curr_index < len(curr_children):
curr_child = curr_children[curr_index]
#if the current child has children, go there
if curr_child.Children:
child_index_stack.append(0)
curr = curr_child
curr_children = curr.Children
curr_index = 0
#otherwise, yield that child
else:
yield curr_child
child_index_stack[-1] += 1
#if there are no children left, return self, and move to
#self's parent
else:
if include_self or (curr is not self):
yield curr
if curr is self:
break
curr = curr.Parent
curr_children = curr.Children
child_index_stack.pop()
child_index_stack[-1] += 1
def traverse_recursive(self, self_before=True, self_after=False, \
include_self=True):
"""Returns iterator over descendants. IMPORTANT: read notes below.
traverse_recursive is slower than traverse, and can lead to stack
errors. However, you _must_ use traverse_recursive if you plan to
modify the tree topology as you walk over it (e.g. in post-order),
because the iterative methods use their own stack that is not updated
if you alter the tree.
self_before includes each node before its descendants if True.
self_after includes each node after its descendants if True.
include_self includes the initial node if True.
self_before and self_after are independent. If neither is True, only
terminal nodes will be returned.
Note that if self is terminal, it will only be included once even if
self_before and self_after are both True.
This is a depth-first traversal. Since the trees are not binary,
preorder and postorder traversals are possible, but inorder traversals
would depend on the data in the tree and are not handled here.
"""
if self.Children:
if self_before and include_self:
yield self
for child in self.Children:
for i in child.traverse_recursive(self_before, self_after):
yield i
if self_after and include_self:
yield self
elif include_self:
yield self
def ancestors(self):
"""Returns all ancestors back to the root. Dynamically calculated."""
result = []
curr = self._parent
while curr is not None:
result.append(curr)
curr = curr._parent
return result
def root(self):
"""Returns root of the tree self is in. Dynamically calculated."""
curr = self
while curr._parent is not None:
curr = curr._parent
return curr
def isroot(self):
"""Returns True if root of a tree, i.e. no parent."""
return self._parent is None
def siblings(self):
"""Returns all nodes that are children of the same parent as self.
Note: excludes self from the list. Dynamically calculated.
"""
if self._parent is None:
return []
result = self._parent.Children[:]
result.remove(self)
return result
def iterTips(self, include_self=False):
"""Iterates over tips descended from self, [] if self is a tip."""
#bail out in easy case
if not self.Children:
if include_self:
yield self
raise StopIteration
#use stack-based method: robust to large trees
stack = [self]
while stack:
curr = stack.pop()
if curr.Children:
stack.extend(curr.Children[::-1]) #20% faster than reversed
else:
yield curr
def tips(self, include_self=False):
"""Returns tips descended from self, [] if self is a tip."""
return list(self.iterTips(include_self=include_self))
def iterNontips(self, include_self=False):
"""Iterates over nontips descended from self, [] if none.
include_self, if True (default is False), will return the current
node as part of the list of nontips if it is a nontip."""
for n in self.traverse(True, False, include_self):
if n.Children:
yield n
def nontips(self, include_self=False):
"""Returns nontips descended from self."""
return list(self.iterNontips(include_self=include_self))
def istip(self):
"""Returns True if is tip, i.e. no children."""
return not self.Children
def tipChildren(self):
"""Returns direct children of self that are tips."""
return [i for i in self.Children if not i.Children]
def nonTipChildren(self):
"""Returns direct children in self that have descendants."""
return [i for i in self.Children if i.Children]
def childGroups(self):
"""Returns list containing lists of children sharing a state.
In other words, returns runs of tip and nontip children.
"""
#bail out in trivial cases of 0 or 1 item
if not self.Children:
return []
if len(self.Children) == 1:
return [self.Children[0]]
#otherwise, have to do it properly...
result = []
curr = []
state = None
for i in self.Children:
curr_state = bool(i.Children)
if curr_state == state:
curr.append(i)
else:
if curr:
result.append(curr)
curr = []
curr.append(i)
state = curr_state
#handle last group
result.append(curr)
return result
def lastCommonAncestor(self, other):
"""Finds last common ancestor of self and other, or None.
Always tests by identity.
"""
my_lineage = set([id(node) for node in [self] + self.ancestors()])
curr = other
while curr is not None:
if id(curr) in my_lineage:
return curr
curr = curr._parent
return None
def lowestCommonAncestor(self, tipnames):
"""Lowest common ancestor for a list of tipnames
This should be around O(H sqrt(n)), where H is height and n is the
number of tips passed in.
"""
if len(tipnames) == 1:
return self.getNodeMatchingName(tipnames[0])
tipnames = set(tipnames)
tips = [tip for tip in self.tips() if tip.Name in tipnames]
if len(tips) == 0:
return None
# scrub tree
if hasattr(self, 'black'):
for n in self.traverse(include_self=True):
if hasattr(n, 'black'):
delattr(n, 'black')
for t in tips:
prev = t
curr = t.Parent
while curr and not hasattr(curr,'black'):
setattr(curr,'black',[prev])
prev = curr
curr = curr.Parent
# increase black count, multiple children lead to here
if curr:
curr.black.append(prev)
curr = self
while len(curr.black) == 1:
curr = curr.black[0]
return curr
lca = lastCommonAncestor #for convenience
#support for more advanced tree operations
def separation(self, other):
"""Returns number of edges separating self and other."""
#detect trivial case
if self is other:
return 0
#otherwise, check the list of ancestors
my_ancestors = dict.fromkeys(map(id, [self] + self.ancestors()))
count = 0
while other is not None:
if id(other) in my_ancestors:
#need to figure out how many steps there were back from self
curr = self
while not(curr is None or curr is other):
count += 1
curr = curr._parent
return count
else:
count += 1
other = other._parent
return None
def descendantArray(self, tip_list=None):
"""Returns numpy array with nodes in rows and descendants in columns.
A value of 1 indicates that the decendant is a descendant of that node/
A value of 0 indicates that it is not
Also returns a list of nodes in the same order as they are listed
in the array.
tip_list is a list of the names of the tips that will be considered,
in the order they will appear as columns in the final array. Internal
nodes will appear as rows in preorder traversal order.
"""
#get a list of internal nodes
node_list = [node for node in self.traverse() if node.Children]
node_list.sort()
#get a list of tip names if one is not supplied
if not tip_list:
tip_list = [n.Name for n in self.tips()]
tip_list.sort()
#make a blank array of the right dimensions to alter
result = zeros([len(node_list), len(tip_list)])
#put 1 in the column for each child of each node
for (i, node) in enumerate(node_list):
children = [n.Name for n in node.tips()]
for (j, dec) in enumerate(tip_list):
if dec in children:
result[i,j] = 1
return result, node_list
def _default_tree_constructor(self):
return TreeBuilder(constructor=self.__class__).edgeFromEdge
def nameUnnamedNodes(self):
"""sets the Data property of unnamed nodes to an arbitrary value
Internal nodes are often unnamed and so this function assigns a
value for referencing."""
#make a list of the names that are already in the tree
names_in_use = []
for node in self.traverse():
if node.Name:
names_in_use.append(node.Name)
#assign unique names to the Data property of nodes where Data = None
name_index = 1
for node in self.traverse():
if not node.Name:
new_name = 'node' + str(name_index)
#choose a new name if name is already in tree
while new_name in names_in_use:
name_index += 1
new_name = 'node' + str(name_index)
node.Name = new_name
names_in_use.append(new_name)
name_index += 1
def makeTreeArray(self, dec_list=None):
"""Makes an array with nodes in rows and descendants in columns.
A value of 1 indicates that the decendant is a descendant of that node/
A value of 0 indicates that it is not
also returns a list of nodes in the same order as they are listed
in the array"""
#get a list of internal nodes
node_list = [node for node in self.traverse() if node.Children]
node_list.sort()
#get a list of tips() Name if one is not supplied
if not dec_list:
dec_list = [dec.Name for dec in self.tips()]
dec_list.sort()
#make a blank array of the right dimensions to alter
result = zeros((len(node_list), len(dec_list)))
#put 1 in the column for each child of each node
for i, node in enumerate(node_list):
children = [dec.Name for dec in node.tips()]
for j, dec in enumerate(dec_list):
if dec in children:
result[i,j] = 1
return result, node_list
def removeDeleted(self,is_deleted):
"""Removes all nodes where is_deleted tests true.
Internal nodes that have no children as a result of removing deleted
are also removed.
"""
#Traverse tree
for node in list(self.traverse(self_before=False,self_after=True)):
#if node is deleted
if is_deleted(node):
#Store current parent
curr_parent=node.Parent
#Set current node's parent to None (this deletes node)
node.Parent=None
#While there are no chilren at node and not at root
while (curr_parent is not None) and (not curr_parent.Children):
#Save old parent
old_parent=curr_parent
#Get new parent
curr_parent=curr_parent.Parent
#remove old node from tree
old_parent.Parent=None
def prune(self):
"""Reconstructs correct topology after nodes have been removed.
Internal nodes with only one child will be removed and new connections
will be made to reflect change.
"""
#traverse tree to decide nodes to be removed.
nodes_to_remove = []
for node in self.traverse():
if (node.Parent is not None) and (len(node.Children)==1):
nodes_to_remove.append(node)
for node in nodes_to_remove:
#save current parent
curr_parent=node.Parent
#save child
child=node.Children[0]
#remove current node by setting parent to None
node.Parent=None
#Connect child to current node's parent
child.Parent=curr_parent
def sameShape(self, other):
"""Ignores lengths and order, so trees should be sorted first"""
if len(self.Children) != len(other.Children):
return False
if self.Children:
for (self_child, other_child) in zip(self.Children, other.Children):
if not self_child.sameShape(other_child):
return False
return True
else:
return self.Name == other.Name
def getNewickRecursive(self, with_distances=False, semicolon=True, \
escape_name=True):
"""Return the newick string for this edge.
Arguments:
- with_distances: whether branch lengths are included.
- semicolon: end tree string with a semicolon
- escape_name: if any of these characters []'"(),:;_ exist in a
nodes name, wrap the name in single quotes
"""
newick = []
subtrees = [child.getNewick(with_distances, semicolon=False)
for child in self.Children]
if subtrees:
newick.append("(%s)" % ",".join(subtrees))
if self.NameLoaded:
if self.Name is None:
name = ''
else:
name = str(self.Name)
if escape_name and not (name.startswith("'") and \
name.endswith("'")):
if re.search("""[]['"(),:;_]""", name):
name = "'%s'" % name.replace("'","''")
else:
name = name.replace(' ','_')
newick.append(name)
if isinstance(self, PhyloNode):
if with_distances and self.Length is not None:
newick.append(":%s" % self.Length)
if semicolon:
newick.append(";")
return ''.join(newick)
def getNewick(self, with_distances=False, semicolon=True, escape_name=True):
"""Return the newick string for this tree.
Arguments:
- with_distances: whether branch lengths are included.
- semicolon: end tree string with a semicolon
- escape_name: if any of these characters []'"(),:;_ exist in a
nodes name, wrap the name in single quotes
NOTE: This method returns the Newick representation of this node
and its descendents. This method is a modification of an implementation
by Zongzhi Liu
"""
result = ['(']
nodes_stack = [[self, len(self.Children)]]
node_count = 1
while nodes_stack:
node_count += 1
#check the top node, any children left unvisited?
top = nodes_stack[-1]
top_node, num_unvisited_children = top
if num_unvisited_children: #has any child unvisited
top[1] -= 1 #decrease the #of children unvisited
next_child = top_node.Children[-num_unvisited_children] # - for order
#pre-visit
if next_child.Children:
result.append('(')
nodes_stack.append([next_child, len(next_child.Children)])
else: #no unvisited children
nodes_stack.pop()
#post-visit
if top_node.Children:
result[-1] = ')'
if top_node.NameLoaded:
if top_node.Name is None:
name = ''
else:
name = str(top_node.Name)
if escape_name and not (name.startswith("'") and \
name.endswith("'")):
if re.search("""[]['"(),:;_]""", name):
name = "'%s'" % name.replace("'", "''")
else:
name = name.replace(' ','_')
result.append(name)
if isinstance(self, PhyloNode):
if with_distances and top_node.Length is not None:
#result.append(":%s" % top_node.Length)
result[-1] = "%s:%s" % (result[-1], top_node.Length)
result.append(',')
len_result = len(result)
if len_result == 2: # single node no name
if semicolon:
return ";"
else:
return ''
elif len_result == 3: # single node with name
if semicolon:
return "%s;" % result[1]
else:
return result[1]
else:
if semicolon:
result[-1] = ';'
else:
result.pop(-1)
return ''.join(result)
def removeNode(self, target):
"""Removes node by identity instead of value.
Returns True if node was present, False otherwise.
"""
to_delete = None
for i, curr_node in enumerate(self.Children):
if curr_node is target:
to_delete = i
break
if to_delete is None:
return False
else:
del self[to_delete]
return True
def getEdgeNames(self, tip1name, tip2name,
getclade, getstem, outgroup_name=None):
"""Return the list of stem and/or sub tree (clade) edge name(s).
This is done by finding the common intersection, and then getting
the list of names. If the clade traverses the root, then use the
outgroup_name argument to ensure valid specification.
Arguments:
- tip1/2name: edge 1/2 names
- getstem: whether the name of the clade stem edge is returned.
- getclade: whether the names of the edges within the clade are
returned
- outgroup_name: if provided the calculation is done on a version of
the tree re-rooted relative to the provided tip.
Usage:
The returned list can be used to specify subtrees for special
parameterisation. For instance, say you want to allow the primates
to have a different value of a particular parameter. In this case,
provide the results of this method to the parameter controller
method `setParamRule()` along with the parameter name etc..
"""
# If outgroup specified put it at the top of the tree so that clades are
# defined by their distance from it. This makes a temporary tree with
# a named edge at it's root, but it's only used here then discarded.
if outgroup_name is not None:
outgroup = self.getNodeMatchingName(outgroup_name)
if outgroup.Children:
raise TreeError('Outgroup (%s) must be a tip' % outgroup_name)
self = outgroup.unrootedDeepcopy()
join_edge = self.getConnectingNode(tip1name, tip2name)
edge_names = []
if getstem:
if join_edge.isroot():
raise TreeError('LCA(%s,%s) is the root and so has no stem' %
(tip1name, tip2name))
else:
edge_names.append(join_edge.Name)
if getclade:
#get the list of names contained by join_edge
for child in join_edge.Children:
branchnames = child.getNodeNames(includeself = 1)
edge_names.extend(branchnames)
return edge_names
def _getNeighboursExcept(self, parent=None):
# For walking the tree as if it was unrooted.
return [c for c in (tuple(self.Children) + (self.Parent,))
if c is not None and c is not parent]
def _getDistances(self, endpoints=None):
"""Iteratively calcluates all of the root-to-tip and tip-to-tip
distances, resulting in a tuple of:
- A list of (name, path length) pairs.
- A dictionary of (tip1,tip2):distance pairs
"""
## linearize the tips in postorder.
# .__start, .__stop compose the slice in tip_order.
if endpoints is None:
tip_order = list(self.tips())
else:
tip_order = []
for i,name in enumerate(endpoints):
node = self.getNodeMatchingName(name)
tip_order.append(node)
for i, node in enumerate(tip_order):
node.__start, node.__stop = i, i+1
num_tips = len(tip_order)
result = {}
tipdistances = zeros((num_tips), float) #distances from tip to curr node
def update_result():
# set tip_tip distance between tips of different child
for child1, child2 in comb(node.Children, 2):
for tip1 in range(child1.__start, child1.__stop):
for tip2 in range(child2.__start, child2.__stop):
name1 = tip_order[tip1].Name
name2 = tip_order[tip2].Name
result[(name1,name2)] = \
tipdistances[tip1] + tipdistances[tip2]
result[(name2,name1)] = \
tipdistances[tip1] + tipdistances[tip2]
for node in self.traverse(self_before=False, self_after=True):
if not node.Children:
continue
## subtree with solved child wedges
starts, stops = [], [] #to calc ._start and ._stop for curr node
for child in node.Children:
if hasattr(child, 'Length') and child.Length is not None:
child_len = child.Length
else:
child_len = 1 # default length
tipdistances[child.__start : child.__stop] += child_len
starts.append(child.__start); stops.append(child.__stop)
node.__start, node.__stop = min(starts), max(stops)
## update result if nessessary
if len(node.Children) > 1: #not single child
update_result()
from_root = []
for i,n in enumerate(tip_order):
from_root.append((n.Name, tipdistances[i]))
return from_root, result
def getDistances(self, endpoints=None):
"""The distance matrix as a dictionary.
Usage:
Grabs the branch lengths (evolutionary distances) as
a complete matrix (i.e. a,b and b,a).
"""
(root_dists, endpoint_dists) = self._getDistances(endpoints)
return endpoint_dists
def setMaxTipTipDistance(self):
"""Propagate tip distance information up the tree
This method was originally implemented by Julia Goodrich with the intent
of being able to determine max tip to tip distances between nodes on
large trees efficiently. The code has been modified to track the
specific tips the distance is between
"""
for n in self.postorder():
if n.isTip():
n.MaxDistTips = [[0.0, n.Name], [0.0, n.Name]]
else:
if len(n.Children) == 1:
tip_a, tip_b = n.Children[0].MaxDistTips
tip_a[0] += n.Children[0].Length or 0.0
tip_b[0] += n.Children[0].Length or 0.0
else:
tip_info = [(max(c.MaxDistTips), c) for c in n.Children]
dists = [i[0][0] for i in tip_info]
best_idx = argsort(dists)[-2:]
tip_a, child_a = tip_info[best_idx[0]]
tip_b, child_b = tip_info[best_idx[1]]
tip_a[0] += child_a.Length or 0.0
tip_b[0] += child_b.Length or 0.0
n.MaxDistTips = [tip_a, tip_b]
def getMaxTipTipDistance(self):
"""Returns the max tip tip distance between any pair of tips
Returns (dist, tip_names, internal_node)
"""
if not hasattr(self, 'MaxDistTips'):
self.setMaxTipTipDistance()
longest = 0.0
names = [None,None]
best_node = None
for n in self.nontips(include_self=True):
tip_a, tip_b = n.MaxDistTips
dist = (tip_a[0] + tip_b[0])
if dist > longest:
longest = dist
best_node = n
names = [tip_a[1], tip_b[1]]
return longest, names, best_node
def maxTipTipDistance(self):
"""returns the max distance between any pair of tips
Also returns the tip names that it is between as a tuple"""
distmtx, tip_order = self.tipToTipDistances()
idx_max = divmod(distmtx.argmax(),distmtx.shape[1])
max_pair = (tip_order[idx_max[0]].Name, tip_order[idx_max[1]].Name)
return distmtx[idx_max], max_pair
def _getSubTree(self, included_names, constructor=None, keep_root=False):
"""An equivalent node with possibly fewer children, or None"""
# Renumber autonamed edges
if constructor is None:
constructor = self._default_tree_constructor()
if self.Name in included_names:
return self.deepcopy(constructor=constructor)
else:
# don't need to pass keep_root to children, though
# internal nodes will be elminated this way
children = [child._getSubTree(included_names, constructor)
for child in self.Children]
children = [child for child in children if child is not None]
if len(children) == 0:
result = None
elif len(children) == 1 and not keep_root:
# Merge parameter dictionaries by adding lengths and making
# weighted averages of other parameters. This should probably
# be moved out of here into a ParameterSet class (Model?) or
# tree subclass.
params = {}
child = children[0]
if self.Length is not None and child.Length is not None:
shared_params = [n for (n,v) in self.params.items()
if v is not None
and child.params.get(n) is not None
and n is not "length"]
length = self.Length + child.Length
if length:
params = dict([(n,
(self.params[n]*self.Length +
child.params[n]*child.Length) / length)
for n in shared_params])
params['length'] = length
result = child
result.params = params
else:
result = constructor(self, tuple(children))
return result
def getSubTree(self, name_list, ignore_missing=False, keep_root=False):
"""A new instance of a sub tree that contains all the otus that are
listed in name_list.
ignore_missing: if False, getSubTree will raise a ValueError if
name_list contains names that aren't nodes in the tree
keep_root: if False, the root of the subtree will be the last common
ancestor of all nodes kept in the subtree. Root to tip distance is
then (possibly) different from the original tree
If True, the root to tip distance remains constant, but root may only
have one child node.
"""
edge_names = set(self.getNodeNames(includeself=1, tipsonly=False))
if not ignore_missing:
# this may take a long time
for name in name_list:
if name not in edge_names:
raise ValueError("edge %s not found in tree" % name)
new_tree = self._getSubTree(name_list, keep_root=keep_root)
if new_tree is None:
raise TreeError, "no tree created in make sub tree"
elif new_tree.istip():
raise TreeError, "only a tip was returned from selecting sub tree"
else:
new_tree.Name = "root"
# keep unrooted
if len(self.Children) > 2:
new_tree = new_tree.unrooted()
return new_tree
def _edgecount(self, parent, cache):
""""The number of edges beyond 'parent' in the direction of 'self',
unrooted"""
neighbours = self._getNeighboursExcept(parent)
key = (id(parent), id(self))
if key not in cache:
cache[key] = 1 + sum([child._edgecount(self, cache)
for child in neighbours])
return cache[key]
def _imbalance(self, parent, cache):
"""The edge count from here, (except via 'parent'), divided into that
from the heaviest neighbour, and that from the rest of them. 'cache'
should be a dictionary that can be shared by calls to self.edgecount,
it stores the edgecount for each node (from self) without having to
put it on the tree itself."""
max_weight = 0
total_weight = 0
for child in self._getNeighboursExcept(parent):
weight = child._edgecount(self, cache)
total_weight += weight
if weight > max_weight:
max_weight = weight
biggest_branch = child
return (max_weight, total_weight-max_weight, biggest_branch)
def _sorted(self, sort_order):
"""Score all the edges, sort them, and return minimum score and a
sorted tree.
"""
# Only need to duplicate whole tree because of .Parent pointers
constructor = self._default_tree_constructor()
if not self.Children:
tree = self.deepcopy(constructor)
score = sort_order.index(self.Name)
else:
scored_subtrees = [child._sorted(sort_order)
for child in self.Children]
scored_subtrees.sort()
children = tuple([child.deepcopy(constructor)
for (score, child) in scored_subtrees])
tree = constructor(self, children)
non_null_scores = [score
for (score, child) in scored_subtrees if score is not None]
score = (non_null_scores or [None])[0]
return (score, tree)
def sorted(self, sort_order=[]):
"""An equivalent tree sorted into a standard order. If this is not
specified then alphabetical order is used. At each node starting from
root, the algorithm will try to put the descendant which contains the
lowest scoring tip on the left.
"""
tip_names = self.getTipNames()
tip_names.sort()
full_sort_order = sort_order + tip_names
(score, tree) = self._sorted(full_sort_order)
return tree
def _asciiArt(self, char1='-', show_internal=True, compact=False):
LEN = 10
PAD = ' ' * LEN
PA = ' ' * (LEN-1)
namestr = self.Name or '' # prevents name of NoneType
if self.Children:
mids = []
result = []
for c in self.Children:
if c is self.Children[0]:
char2 = '/'
elif c is self.Children[-1]:
char2 = '\\'
else:
char2 = '-'
(clines, mid) = c._asciiArt(char2, show_internal, compact)
mids.append(mid+len(result))
result.extend(clines)
if not compact:
result.append('')
if not compact:
result.pop()
(lo, hi, end) = (mids[0], mids[-1], len(result))
prefixes = [PAD] * (lo+1) + [PA+'|'] * (hi-lo-1) + [PAD] * (end-hi)
mid = (lo + hi) / 2
prefixes[mid] = char1 + '-'*(LEN-2) + prefixes[mid][-1]
result = [p+l for (p,l) in zip(prefixes, result)]
if show_internal:
stem = result[mid]
result[mid] = stem[0] + namestr + stem[len(namestr)+1:]
return (result, mid)
else:
return ([char1 + '-' + namestr], 0)
def asciiArt(self, show_internal=True, compact=False):
"""Returns a string containing an ascii drawing of the tree.
Arguments:
- show_internal: includes internal edge names.
- compact: use exactly one line per tip.
"""
(lines, mid) = self._asciiArt(
show_internal=show_internal, compact=compact)
return '\n'.join(lines)
def _getXmlLines(self, indent=0, parent_params=None):
"""Return the xml strings for this edge.
"""
params = {}
if parent_params is not None:
params.update(parent_params)
pad = ' ' * indent
xml = ["%s<clade>" % pad]
if self.NameLoaded:
xml.append("%s <name>%s</name>" % (pad, self.Name))
for (n,v) in self.params.items():
if v == params.get(n, None):
continue
xml.append("%s <param><name>%s</name><value>%s</value></param>"
% (pad, n, v))
params[n] = v
for child in self.Children:
xml.extend(child._getXmlLines(indent + 1, params))
xml.append(pad + "</clade>")
return xml
def getXML(self):
"""Return XML formatted tree string."""
header = ['<?xml version="1.0"?>'] # <!DOCTYPE ...
return '\n'.join(header + self._getXmlLines())
def writeToFile(self, filename, with_distances=True, format=None):
"""Save the tree to filename
Arguments:
- filename: self-evident
- with_distances: whether branch lengths are included in string.
- format: default is newick, xml is alternate. Argument overrides
the filename suffix. All attributes are saved in the xml format.
"""
if format:
xml = format.lower() == 'xml'
else:
xml = filename.lower().endswith('xml')
if xml:
data = self.getXML()
else:
data = self.getNewick(with_distances=with_distances)
outf = open(filename, "w")
outf.writelines(data)
outf.close()
def getNodeNames(self, includeself=True, tipsonly=False):
"""Return a list of edges from this edge - may or may not include self.
This node (or first connection) will be the first, and then they will
be listed in the natural traverse order.
"""
if tipsonly:
nodes = self.traverse(self_before=False, self_after=False)
else:
nodes = list(self.traverse())
if not includeself:
nodes = nodes[:-1]
return [node.Name for node in nodes]
def getTipNames(self, includeself=False):
"""return the list of the names of all tips contained by this edge
"""
return self.getNodeNames(includeself, tipsonly=True)
def getEdgeVector(self):
"""Collect the list of edges in postfix order"""
return [node for node in self.traverse(False, True)]
def _getNodeMatchingName(self, name):
"""
find the edge with the name, or return None
"""
for node in self.traverse(self_before=True, self_after=False):
if node.Name == name:
return node
return None
def getNodeMatchingName(self, name):
node = self._getNodeMatchingName(name)
if node is None:
raise TreeError("No node named '%s' in %s" %
(name, self.getTipNames()))
return node
def getConnectingNode(self, name1, name2):
"""Finds the last common ancestor of the two named edges."""
edge1 = self.getNodeMatchingName(name1)
edge2 = self.getNodeMatchingName(name2)
lca = edge1.lastCommonAncestor(edge2)
if lca is None:
raise TreeError("No LCA found for %s and %s" % (name1, name2))
return lca
def getConnectingEdges(self, name1, name2):
"""returns a list of edges connecting two nodes
includes self and other in the list"""
edge1 = self.getNodeMatchingName(name1)
edge2 = self.getNodeMatchingName(name2)
LCA = self.getConnectingNode(name1, name2)
node_path = [edge1]
node_path.extend(edge1.ancestors())
#remove nodes deeper than the LCA
LCA_ind = node_path.index(LCA)
node_path = node_path[:LCA_ind+1]
#remove LCA and deeper nodes from anc list of other
anc2 = edge2.ancestors()
LCA_ind = anc2.index(LCA)
anc2 = anc2[:LCA_ind]
anc2.reverse()
node_path.extend(anc2)
node_path.append(edge2)
return node_path
def getParamValue(self, param, edge):
"""returns the parameter value for named edge"""
return self.getNodeMatchingName(edge).params[param]
def setParamValue(self, param, edge, value):
"""set's the value for param at named edge"""
self.getNodeMatchingName(edge).params[param] = value
def reassignNames(self, mapping, nodes=None):
"""Reassigns node names based on a mapping dict
mapping : dict, old_name -> new_name
nodes : specific nodes for renaming (such as just tips, etc...)
"""
if nodes is None:
nodes = self.traverse()
for n in nodes:
if n.Name in mapping:
n.Name = mapping[n.Name]
def multifurcating(self, num, eps=None, constructor=None, \
name_unnamed=False):
"""Return a new tree with every node having num or few children
num : the number of children a node can have max
eps : default branch length to set if self or constructor is of
PhyloNode type
constructor : a TreeNode or subclass constructor. If None, uses self
"""
if num < 2:
raise TreeError, "Minimum number of children must be >= 2"
if eps is None:
eps = 0.0
if constructor is None:
constructor = self.__class__
if hasattr(constructor, 'Length'):
set_branchlength = True
else:
set_branchlength = False
new_tree = self.copy()
for n in new_tree.preorder(include_self=True):
while len(n.Children) > num:
new_node = constructor(Children=n.Children[-num:])
if set_branchlength:
new_node.Length = eps
n.append(new_node)
if name_unnamed:
alpha = 'abcdefghijklmnopqrstuvwxyz'
alpha += alpha.upper()
base = 'AUTOGENERATED_NAME_%s'
# scale the random names by tree size
s = int(ceil(log(len(new_tree.tips()))))
for n in new_tree.nontips():
if n.Name is None:
n.Name = base % ''.join([choice(alpha) for i in range(s)])
return new_tree
def bifurcating(self, eps=None, constructor=None, name_unnamed=False):
"""Wrap multifurcating with a num of 2"""
return self.multifurcating(2, eps, constructor, name_unnamed)
def getNodesDict(self):
"""Returns a dict keyed by node name, value is node
Will raise TreeError if non-unique names are encountered
"""
res = {}
for n in self.traverse():
if n.Name in res:
raise TreeError, "getNodesDict requires unique node names"
else:
res[n.Name] = n
return res
def subset(self):
"""Returns set of names that descend from specified node"""
return frozenset([i.Name for i in self.tips()])
def subsets(self):
"""Returns all sets of names that come from specified node and its kids"""
sets = []
for i in self.traverse(self_before=False, self_after=True, \
include_self=False):
if not i.Children:
i.__leaf_set = frozenset([i.Name])
else:
leaf_set = reduce(or_, [c.__leaf_set for c in i.Children])
if len(leaf_set) > 1:
sets.append(leaf_set)
i.__leaf_set = leaf_set
return frozenset(sets)
def compareBySubsets(self, other, exclude_absent_taxa=False):
"""Returns fraction of overlapping subsets where self and other differ.
Other is expected to be a tree object compatible with PhyloNode.
Note: names present in only one of the two trees will count as
mismatches: if you don't want this behavior, strip out the non-matching
tips first.
"""
self_sets, other_sets = self.subsets(), other.subsets()
if exclude_absent_taxa:
in_both = self.subset() & other.subset()
self_sets = [i & in_both for i in self_sets]
self_sets = frozenset([i for i in self_sets if len(i) > 1])
other_sets = [i & in_both for i in other_sets]
other_sets = frozenset([i for i in other_sets if len(i) > 1])
total_subsets = len(self_sets) + len(other_sets)
intersection_length = len(self_sets & other_sets)
if not total_subsets: #no common subsets after filtering, so max dist
return 1
return 1 - 2*intersection_length/float(total_subsets)
def tipToTipDistances(self, default_length=1):
"""Returns distance matrix between all pairs of tips, and a tip order.
Warning: .__start and .__stop added to self and its descendants.
tip_order contains the actual node objects, not their names (may be
confusing in some cases).
"""
## linearize the tips in postorder.
# .__start, .__stop compose the slice in tip_order.
tip_order = list(self.tips())
for i, tip in enumerate(tip_order):
tip.__start, tip.__stop = i, i+1
num_tips = len(tip_order)
result = zeros((num_tips, num_tips), float) #tip by tip matrix
tipdistances = zeros((num_tips), float) #distances from tip to curr node
def update_result():
# set tip_tip distance between tips of different child
for child1, child2 in comb(node.Children, 2):
for tip1 in range(child1.__start, child1.__stop):
for tip2 in range(child2.__start, child2.__stop):
result[tip1,tip2] = \
tipdistances[tip1] + tipdistances[tip2]
for node in self.traverse(self_before=False, self_after=True):
if not node.Children:
continue
## subtree with solved child wedges
starts, stops = [], [] #to calc ._start and ._stop for curr node
for child in node.Children:
if hasattr(child, 'Length') and child.Length is not None:
child_len = child.Length
else:
child_len = default_length
tipdistances[child.__start : child.__stop] += child_len
starts.append(child.__start); stops.append(child.__stop)
node.__start, node.__stop = min(starts), max(stops)
## update result if nessessary
if len(node.Children) > 1: #not single child
update_result()
return result+result.T, tip_order
def compareByTipDistances(self, other, dist_f=distance_from_r):
"""Compares self to other using tip-to-tip distance matrices.
Value returned is dist_f(m1, m2) for the two matrices. Default is
to use the Pearson correlation coefficient, with +1 giving a distance
of 0 and -1 giving a distance of +1 (the madimum possible value).
Depending on the application, you might instead want to use
distance_from_r_squared, which counts correlations of both +1 and -1
as identical (0 distance).
Note: automatically strips out the names that don't match (this is
necessary for this method because the distance between non-matching
names and matching names is undefined in the tree where they don't
match, and because we need to reorder the names in the two trees to
match up the distance matrices).
"""
self_names = [i.Name for i in self.tips()]
other_names = [i.Name for i in other.tips()]
common_names = frozenset(self_names) & frozenset(other_names)
if not common_names:
raise ValueError, "No names in common between the two trees."""
if len(common_names) <= 2:
return 1 #the two trees must match by definition in this case
#figure out correct order of the two name matrices
self_order = [self_names.index(i) for i in common_names]
other_order = [other_names.index(i) for i in common_names]
self_matrix = self.tipToTipDistances()[0][self_order][:,self_order]
other_matrix = other.tipToTipDistances()[0][other_order][:,other_order]
return dist_f(self_matrix, other_matrix)
class PhyloNode(TreeNode):
def __init__(self, *args, **kwargs):
length = kwargs.get('Length', None)
params = kwargs.get('Params', {})
if 'length' not in params:
params['length'] = length
kwargs['Params'] = params
super(PhyloNode, self).__init__(*args, **kwargs)
def _set_length(self, value):
if not hasattr(self, "params"):
self.params = {}
self.params["length"] = value
def _get_length(self):
return self.params.get("length", None)
Length = property(_get_length, _set_length)
def getNewick(self, with_distances=False, semicolon=True, escape_name=True):
return TreeNode.getNewick(self, with_distances, semicolon, escape_name)
def __str__(self):
"""Returns string version of self, with names and distances."""
return self.getNewick(with_distances=True)
def distance(self, other):
"""Returns branch length between self and other."""
#never any length between self and other
if self is other:
return 0
#otherwise, find self's ancestors and find the first ancestor of
#other that is in the list
self_anc = self.ancestors()
self_anc_dict = dict([(id(n),n) for n in self_anc])
self_anc_dict[id(self)] = self
count = 0
while other is not None:
if id(other) in self_anc_dict:
#found the first shared ancestor -- need to sum other branch
curr = self
while curr is not other:
if curr.Length:
count += curr.Length
curr = curr._parent
return count
else:
if other.Length:
count += other.Length
other = other._parent
return None
def totalDescendingBranchLength(self):
"""Returns total descending branch length from self"""
return sum([n.Length for n in self.traverse(include_self=False) \
if n.Length is not None])
def tipsWithinDistance(self, distance):
"""Returns tips within specified distance from self
Branch lengths of None will be interpreted as 0
"""
def get_distance(d1, d2):
if d2 is None:
return d1
else:
return d1 + d2
to_process = [(self, 0.0)]
tips_to_save = []
curr_node, curr_dist = to_process[0]
seen = set([id(self)])
while to_process:
curr_node, curr_dist = to_process.pop(0)
# have we've found a tip within distance?
if curr_node.isTip() and curr_node != self:
tips_to_save.append(curr_node)
continue
# add the parent node if it is within distance
parent_dist = get_distance(curr_dist, curr_node.Length)
if curr_node.Parent is not None and parent_dist <= distance and \
id(curr_node.Parent) not in seen:
to_process.append((curr_node.Parent, parent_dist))
seen.add(id(curr_node.Parent))
# add children if we haven't seen them and if they are in distance
for child in curr_node.Children:
if id(child) in seen:
continue
seen.add(id(child))
child_dist = get_distance(curr_dist, child.Length)
if child_dist <= distance:
to_process.append((child, child_dist))
return tips_to_save
def prune(self):
"""Reconstructs correct tree after nodes have been removed.
Internal nodes with only one child will be removed and new connections
and Branch lengths will be made to reflect change.
"""
#traverse tree to decide nodes to be removed.
nodes_to_remove = []
for node in self.traverse():
if (node.Parent is not None) and (len(node.Children)==1):
nodes_to_remove.append(node)
for node in nodes_to_remove:
#save current parent
curr_parent=node.Parent
#save child
child=node.Children[0]
#remove current node by setting parent to None
node.Parent=None
#Connect child to current node's parent
child.Parent=curr_parent
#Add the Length of the removed node to the Length of the Child
if child.Length is None or node.Length is None:
child.Length = child.Length or node.Length
else:
child.Length = child.Length + node.Length
def unrootedDeepcopy(self, constructor=None, parent=None):
# walks the tree unrooted-style, ie: treating self.Parent as just
# another child 'parent' is where we got here from, ie: the neighbour
# that we don't need to explore.
if constructor is None:
constructor = self._default_tree_constructor()
neighbours = self._getNeighboursExcept(parent)
children = []
for child in neighbours:
children.append(child.unrootedDeepcopy(constructor, parent=self))
# we might be walking UP the tree, so:
if parent is None:
# base edge
edge = None
elif parent.Parent is self:
# self's parent is becoming self's child, and edge params are stored
# by the child
edge = parent
else:
assert parent is self.Parent
edge = self
result = constructor(edge, tuple(children))
if parent is None:
result.Name = "root"
return result
def balanced(self):
"""Tree 'rooted' here with no neighbour having > 50% of the edges.
Usage:
Using a balanced tree can substantially improve performance of
the likelihood calculations. Note that the resulting tree has a
different orientation with the effect that specifying clades or
stems for model parameterisation should be done using the
'outgroup_name' argument.
"""
# this should work OK on ordinary 3-way trees, not so sure about
# other cases. Given 3 neighbours, if one has > 50% of edges it
# can only improve things to divide it up, worst case:
# (51),25,24 -> (50,1),49.
# If no neighbour has >50% we can't improve on where we are, eg:
# (49),25,26 -> (20,19),51
last_edge = None
edge = self
known_weight = 0
cache = {}
while 1:
(max_weight, remaining_weight, next_edge) = edge._imbalance(
last_edge, cache)
known_weight += remaining_weight
if max_weight <= known_weight+2:
break
last_edge = edge
edge = next_edge
known_weight += 1
return edge.unrootedDeepcopy()
def sameTopology(self, other):
"""Tests whether two trees have the same topology."""
tip_names = self.getTipNames()
root_at = tip_names[0]
me = self.rootedWithTip(root_at).sorted(tip_names)
them = other.rootedWithTip(root_at).sorted(tip_names)
return self is other or me.sameShape(them)
def unrooted(self):
"""A tree with at least 3 children at the root.
"""
constructor = self._default_tree_constructor()
need_to_expand = len(self.Children) < 3
new_children = []
for oldnode in self.Children:
if oldnode.Children and need_to_expand:
for sib in oldnode.Children:
sib = sib.deepcopy(constructor)
if sib.Length is not None and oldnode.Length is not None:
sib.Length += oldnode.Length
new_children.append(sib)
need_to_expand = False
else:
new_children.append(oldnode.deepcopy(constructor))
return constructor(self, new_children)
def rootedAt(self, edge_name):
"""Return a new tree rooted at the provided node.
Usage:
This can be useful for drawing unrooted trees with an orientation
that reflects knowledge of the true root location.
"""
newroot = self.getNodeMatchingName(edge_name)
if not newroot.Children:
raise TreeError("Can't use a tip (%s) as the root" %
repr(edge_name))
return newroot.unrootedDeepcopy()
def rootedWithTip(self, outgroup_name):
"""A new tree with the named tip as one of the root's children"""
tip = self.getNodeMatchingName(outgroup_name)
return tip.Parent.unrootedDeepcopy()
def rootAtMidpoint(self):
""" return a new tree rooted at midpoint of the two tips farthest apart
this fn doesn't preserve the internal node naming or structure,
but does keep tip to tip distances correct. uses unrootedDeepcopy()
"""
# max_dist, tip_names = tree.maxTipTipDistance()
# this is slow
max_dist, tip_names = self.maxTipTipDistance()
half_max_dist = max_dist/2.0
if max_dist == 0.0: # only pathological cases with no lengths
return self.unrootedDeepcopy()
# print tip_names
tip1 = self.getNodeMatchingName(tip_names[0])
tip2 = self.getNodeMatchingName(tip_names[1])
lca = self.getConnectingNode(tip_names[0],tip_names[1]) # last comm ancestor
if tip1.distance(lca) > half_max_dist:
climb_node = tip1
else:
climb_node = tip2
dist_climbed = 0.0
while dist_climbed + climb_node.Length < half_max_dist:
dist_climbed += climb_node.Length
climb_node = climb_node.Parent
# now midpt is either at on the branch to climb_node's parent
# or midpt is at climb_node's parent
# print dist_climbed, half_max_dist, 'dists cl hamax'
if dist_climbed + climb_node.Length == half_max_dist:
# climb to midpoint spot
climb_node = climb_node.Parent
if climb_node.isTip():
raise RuntimeError('error trying to root tree at tip')
else:
# print climb_node.Name, 'clmb node'
return climb_node.unrootedDeepcopy()
else:
# make a new node on climb_node's branch to its parent
old_br_len = climb_node.Length
new_root = type(self)()
new_root.Parent = climb_node.Parent
climb_node.Parent = new_root
climb_node.Length = half_max_dist - dist_climbed
new_root.Length = old_br_len - climb_node.Length
return new_root.unrootedDeepcopy()
def _find_midpoint_nodes(self, max_dist, tip_pair):
"""returns the nodes surrounding the maxTipTipDistance midpoint
WAS used for midpoint rooting. ORPHANED NOW
max_dist: The maximum distance between any 2 tips
tip_pair: Names of the two tips associated with max_dist
"""
half_max_dist = max_dist/2.0
#get a list of the nodes that separate the tip pair
node_path = self.getConnectingEdges(tip_pair[0], tip_pair[1])
tip1 = self.getNodeMatchingName(tip_pair[0])
for index, node in enumerate(node_path):
dist = tip1.distance(node)
if dist > half_max_dist:
return node, node_path[index-1]
def setTipDistances(self):
"""Sets distance from each node to the most distant tip."""
for node in self.traverse(self_before=False, self_after=True):
if node.Children:
node.TipDistance = max([c.Length + c.TipDistance for \
c in node.Children])
else:
node.TipDistance = 0
def scaleBranchLengths(self, max_length=100, ultrametric=False):
"""Scales BranchLengths in place to integers for ascii output.
Warning: tree might not be exactly the length you specify.
Set ultrametric=True if you want all the root-tip distances to end
up precisely the same.
"""
self.setTipDistances()
orig_max = max([n.TipDistance for n in self.traverse()])
if not ultrametric: #easy case -- just scale and round
for node in self.traverse():
curr = node.Length
if curr is not None:
node.ScaledBranchLength = \
max(1, int(round(1.0*curr/orig_max*max_length)))
else: #hard case -- need to make sure they all line up at the end
for node in self.traverse(self_before=False, self_after=True):
if not node.Children: #easy case: ignore tips
node.DistanceUsed = 0
continue
#if we get here, we know the node has children
#figure out what distance we want to set for this node
ideal_distance=int(round(node.TipDistance/orig_max*max_length))
min_distance = max([c.DistanceUsed for c in node.Children]) + 1
distance = max(min_distance, ideal_distance)
for c in node.Children:
c.ScaledBranchLength = distance - c.DistanceUsed
node.DistanceUsed = distance
#reset the BranchLengths
for node in self.traverse(self_before=True, self_after=False):
if node.Length is not None:
node.Length = node.ScaledBranchLength
if hasattr(node, 'ScaledBranchLength'):
del node.ScaledBranchLength
if hasattr(node, 'DistanceUsed'):
del node.DistanceUsed
if hasattr(node, 'TipDistance'):
del node.TipDistance
def _getDistances(self, endpoints=None):
"""Iteratively calcluates all of the root-to-tip and tip-to-tip
distances, resulting in a tuple of:
- A list of (name, path length) pairs.
- A dictionary of (tip1,tip2):distance pairs
"""
## linearize the tips in postorder.
# .__start, .__stop compose the slice in tip_order.
if endpoints is None:
tip_order = list(self.tips())
else:
tip_order = []
for i,name in enumerate(endpoints):
node = self.getNodeMatchingName(name)
tip_order.append(node)
for i, node in enumerate(tip_order):
node.__start, node.__stop = i, i+1
num_tips = len(tip_order)
result = {}
tipdistances = zeros((num_tips), float) #distances from tip to curr node
def update_result():
# set tip_tip distance between tips of different child
for child1, child2 in comb(node.Children, 2):
for tip1 in range(child1.__start, child1.__stop):
for tip2 in range(child2.__start, child2.__stop):
name1 = tip_order[tip1].Name
name2 = tip_order[tip2].Name
result[(name1,name2)] = \
tipdistances[tip1] + tipdistances[tip2]
result[(name2,name1)] = \
tipdistances[tip1] + tipdistances[tip2]
for node in self.traverse(self_before=False, self_after=True):
if not node.Children:
continue
## subtree with solved child wedges
starts, stops = [], [] #to calc ._start and ._stop for curr node
for child in node.Children:
if hasattr(child, 'Length') and child.Length is not None:
child_len = child.Length
else:
child_len = 1 # default length
tipdistances[child.__start : child.__stop] += child_len
starts.append(child.__start); stops.append(child.__stop)
node.__start, node.__stop = min(starts), max(stops)
## update result if nessessary
if len(node.Children) > 1: #not single child
update_result()
from_root = []
for i,n in enumerate(tip_order):
from_root.append((n.Name, tipdistances[i]))
return from_root, result
def getDistances(self, endpoints=None):
"""The distance matrix as a dictionary.
Usage:
Grabs the branch lengths (evolutionary distances) as
a complete matrix (i.e. a,b and b,a)."""
(root_dists, endpoint_dists) = self._getDistances(endpoints)
return endpoint_dists
def tipToTipDistances(self, endpoints=None, default_length=1):
"""Returns distance matrix between all pairs of tips, and a tip order.
Warning: .__start and .__stop added to self and its descendants.
tip_order contains the actual node objects, not their names (may be
confusing in some cases).
"""
all_tips = self.tips()
if endpoints is None:
tip_order = list(all_tips)
else:
if isinstance(endpoints[0], PhyloNode):
tip_order = endpoints
else:
tip_order = [self.getNodeMatchingName(n) for n in endpoints]
## linearize all tips in postorder
# .__start, .__stop compose the slice in tip_order.
for i, node in enumerate(all_tips):
node.__start, node.__stop = i, i+1
# the result map provides index in the result matrix
result_map = dict([(n.__start,i) for i,n in enumerate(tip_order)])
num_all_tips = len(all_tips) # total number of tips
num_tips = len(tip_order) # total number of tips in result
result = zeros((num_tips, num_tips), float) # tip by tip matrix
tipdistances = zeros((num_all_tips), float) # dist from tip to curr node
def update_result():
# set tip_tip distance between tips of different child
for child1, child2 in comb(node.Children, 2):
for tip1 in range(child1.__start, child1.__stop):
if tip1 not in result_map:
continue
res_tip1 = result_map[tip1]
for tip2 in range(child2.__start, child2.__stop):
if tip2 not in result_map:
continue
result[res_tip1,result_map[tip2]] = \
tipdistances[tip1] + tipdistances[tip2]
for node in self.traverse(self_before=False, self_after=True):
if not node.Children:
continue
## subtree with solved child wedges
starts, stops = [], [] #to calc ._start and ._stop for curr node
for child in node.Children:
if hasattr(child, 'Length') and child.Length is not None:
child_len = child.Length
else:
child_len = default_length
tipdistances[child.__start : child.__stop] += child_len
starts.append(child.__start); stops.append(child.__stop)
node.__start, node.__stop = min(starts), max(stops)
## update result if nessessary
if len(node.Children) > 1: #not single child
update_result()
return result+result.T, tip_order
def compareByTipDistances(self, other, sample=None, dist_f=distance_from_r,\
shuffle_f=shuffle):
"""Compares self to other using tip-to-tip distance matrices.
Value returned is dist_f(m1, m2) for the two matrices. Default is
to use the Pearson correlation coefficient, with +1 giving a distance
of 0 and -1 giving a distance of +1 (the madimum possible value).
Depending on the application, you might instead want to use
distance_from_r_squared, which counts correlations of both +1 and -1
as identical (0 distance).
Note: automatically strips out the names that don't match (this is
necessary for this method because the distance between non-matching
names and matching names is undefined in the tree where they don't
match, and because we need to reorder the names in the two trees to
match up the distance matrices).
"""
self_names = dict([(i.Name, i) for i in self.tips()])
other_names = dict([(i.Name, i) for i in other.tips()])
common_names = frozenset(self_names.keys()) & \
frozenset(other_names.keys())
common_names = list(common_names)
if not common_names:
raise ValueError, "No names in common between the two trees."""
if len(common_names) <= 2:
return 1 #the two trees must match by definition in this case
if sample is not None:
shuffle_f(common_names)
common_names = common_names[:sample]
self_nodes = [self_names[k] for k in common_names]
other_nodes = [other_names[k] for k in common_names]
self_matrix = self.tipToTipDistances(endpoints=self_nodes)[0]
other_matrix = other.tipToTipDistances(endpoints=other_nodes)[0]
return dist_f(self_matrix, other_matrix)
class TreeBuilder(object):
# Some tree code which isn't needed once the tree is finished.
# Mostly exists to give edges unique names
# Children must be created before their parents.
def __init__(self, mutable=False, constructor=PhyloNode):
self._used_names = {'edge':-1}
self._known_edges = {}
self.TreeNodeClass = constructor
def _unique_name(self, name):
# Unnamed edges become edge.0, edge.1 edge.2 ...
# Other duplicates go mouse mouse.2 mouse.3 ...
if not name:
name = 'edge'
if name in self._used_names:
self._used_names[name] += 1
name += '.' + str(self._used_names[name])
name = self._unique_name(name) # in case of names like 'edge.1.1'
else:
self._used_names[name] = 1
return name
def _params_for_edge(self, edge):
# default is just to keep it
return edge.params
def edgeFromEdge(self, edge, children, params=None):
"""Callback for tree-to-tree transforms like getSubTree"""
if edge is None:
assert not params
return self.createEdge(children, "root", {}, False)
else:
if params is None:
params = self._params_for_edge(edge)
return self.createEdge(
children, edge.Name, params, nameLoaded=edge.NameLoaded)
def createEdge(self, children, name, params, nameLoaded=True):
"""Callback for newick parser"""
if children is None:
children = []
node = self.TreeNodeClass(
Children = list(children),
Name = self._unique_name(name),
NameLoaded = nameLoaded and (name is not None),
Params = params,
)
self._known_edges[id(node)] = node
return node
mit
tranvanluan2/M-Tree
py/mtree/__init__.py
2
15978
from collections import namedtuple
import functions
from heap_queue import HeapQueue
_INFINITY = float("inf")
_ItemWithDistances = namedtuple('_ItemWithDistances', 'item, distance, min_distance')
class _RootNodeReplacement(Exception):
def __init__(self, new_root):
super(_RootNodeReplacement, self).__init__(new_root)
self.new_root = new_root
class _SplitNodeReplacement(Exception):
def __init__(self, new_nodes):
super(_SplitNodeReplacement, self).__init__(new_nodes)
self.new_nodes = new_nodes
class _NodeUnderCapacity(Exception):
pass
class _IndexItem(object):
def __init__(self, data):
self.data = data
self.radius = 0 # Updated when a child is added to this item
self.distance_to_parent = None # Updated when this item is added to a parent
def _check(self, mtree):
self._check_data()
self._check_radius()
self._check_distance_to_parent()
return 1
def _check_data(self):
assert self.data is not None
def _check_radius(self):
assert self.radius is not None
assert self.radius >= 0
def _check_distance_to_parent(self):
assert not isinstance(self, _RootNodeTrait), self
assert self.distance_to_parent is not None
assert self.distance_to_parent >= 0
class _Node(_IndexItem):
def __init__(self, data):
super(_Node, self).__init__(data)
self.children = {}
def add_data(self, data, distance, mtree):
self.do_add_data(data, distance, mtree)
self.check_max_capacity(mtree)
def check_max_capacity(self, mtree):
if len(self.children) > mtree.max_node_capacity:
data_objects = frozenset(self.children.iterkeys())
cached_distance_function = functions.make_cached_distance_function(mtree.distance_function)
(promoted_data1, partition1,
promoted_data2, partition2) = mtree.split_function(data_objects, cached_distance_function)
split_node_replacement_class = self.get_split_node_replacement_class()
new_nodes = []
for promoted_data, partition in [(promoted_data1, partition1),
(promoted_data2, partition2)]:
new_node = split_node_replacement_class(promoted_data)
for data in partition:
child = self.children[data]
distance = cached_distance_function(promoted_data, data)
new_node.add_child(child, distance, mtree)
new_nodes.append(new_node)
raise _SplitNodeReplacement(new_nodes)
def remove_data(self, data, distance, mtree):
self.do_remove_data(data, distance, mtree)
if len(self.children) < self.get_min_capacity(mtree):
raise _NodeUnderCapacity()
def update_metrics(self, child, distance):
child.distance_to_parent = distance
self.update_radius(child)
def update_radius(self, child):
self.radius = max(self.radius, child.distance_to_parent + child.radius)
def _check(self, mtree):
super(_Node, self)._check(mtree)
self._check_min_capacity(mtree)
self._check_max_capacity(mtree)
child_height = None
for data, child in self.children.iteritems():
assert child.data == data
self._check_child_class(child)
self._check_child_metrics(child, mtree)
height = child._check(mtree)
if child_height is None:
child_height = height
else:
assert child_height == height
return child_height + 1
def _check_max_capacity(self, mtree):
assert len(self.children) <= mtree.max_node_capacity
def _check_child_class(self, child):
expected_class = self._get_expected_child_class()
assert isinstance(child, expected_class)
def _check_child_metrics(self, child, mtree):
dist = mtree.distance_function(child.data, self.data)
assert child.distance_to_parent == dist, (child.data, self.data, child.distance_to_parent, dist, abs(child.distance_to_parent - dist))
assert child.distance_to_parent + child.radius <= self.radius
class _RootNodeTrait(_Node):
def _check_distance_to_parent(self):
assert self.distance_to_parent is None
class _NonRootNodeTrait(_Node):
def get_min_capacity(self, mtree):
return mtree.min_node_capacity
def _check_min_capacity(self, mtree):
assert len(self.children) >= mtree.min_node_capacity
class _LeafNodeTrait(_Node):
def do_add_data(self, data, distance, mtree):
entry = _Entry(data)
assert data not in self.children
self.children[data] = entry
assert data in self.children
self.update_metrics(entry, distance)
def add_child(self, child, distance, mtree):
assert child.data not in self.children
self.children[child.data] = child
assert child.data in self.children
self.update_metrics(child, distance)
@staticmethod
def get_split_node_replacement_class():
return _LeafNode
def do_remove_data(self, data, distance, mtree):
del self.children[data]
@staticmethod
def _get_expected_child_class():
return _Entry
class _NonLeafNodeTrait(_Node):
CandidateChild = namedtuple('CandidateChild', 'node, distance, metric')
def do_add_data(self, data, distance, mtree):
min_radius_increase_needed = self.CandidateChild(None, None, _INFINITY)
nearest_distance = self.CandidateChild(None, None, _INFINITY)
for child in self.children.itervalues():
distance = mtree.distance_function(child.data, data)
if distance > child.radius:
radius_increase = distance - child.radius
if radius_increase < min_radius_increase_needed.metric:
min_radius_increase_needed = self.CandidateChild(child, distance, radius_increase)
else:
if distance < nearest_distance.metric:
nearest_distance = self.CandidateChild(child, distance, distance)
if nearest_distance.node is not None:
chosen = nearest_distance
else:
chosen = min_radius_increase_needed
child = chosen.node
try:
child.add_data(data, chosen.distance, mtree)
except _SplitNodeReplacement as e:
assert len(e.new_nodes) == 2
# Replace current child with new nodes
del self.children[child.data]
for new_child in e.new_nodes:
distance = mtree.distance_function(self.data, new_child.data)
self.add_child(new_child, distance, mtree)
else:
self.update_radius(child)
def add_child(self, new_child, distance, mtree):
new_children = [(new_child, distance)]
while new_children:
new_child, distance = new_children.pop()
if new_child.data not in self.children:
self.children[new_child.data] = new_child
self.update_metrics(new_child, distance)
else:
existing_child = self.children[new_child.data]
assert existing_child.data == new_child.data
# Transfer the _children_ of the new_child to the existing_child
for grandchild in new_child.children.itervalues():
existing_child.add_child(grandchild, grandchild.distance_to_parent, mtree)
try:
existing_child.check_max_capacity(mtree)
except _SplitNodeReplacement as e:
del self.children[new_child.data]
for new_node in e.new_nodes:
distance = mtree.distance_function(self.data, new_node.data)
new_children.append((new_node, distance))
@staticmethod
def get_split_node_replacement_class():
return _InternalNode
def do_remove_data(self, data, distance, mtree):
for child in self.children.itervalues():
if abs(distance - child.distance_to_parent) <= child.radius: # TODO: confirm
distance_to_child = mtree.distance_function(data, child.data)
if distance_to_child <= child.radius:
try:
child.remove_data(data, distance_to_child, mtree)
except KeyError:
# If KeyError was raised, then the data was not found in the child
pass
except _NodeUnderCapacity:
expanded_child = self.balance_children(child, mtree)
self.update_radius(expanded_child)
return
else:
self.update_radius(child)
return
raise KeyError()
def balance_children(self, the_child, mtree):
# Tries to find another_child which can donate a grandchild to the_child.
nearest_donor = None
distance_nearest_donor = _INFINITY
nearest_merge_candidate = None
distance_nearest_merge_candidate = _INFINITY
for another_child in (child for child in self.children.itervalues() if child is not the_child):
distance = mtree.distance_function(the_child.data, another_child.data)
if len(another_child.children) > another_child.get_min_capacity(mtree):
if distance < distance_nearest_donor:
distance_nearest_donor = distance
nearest_donor = another_child
else:
if distance < distance_nearest_merge_candidate:
distance_nearest_merge_candidate = distance
nearest_merge_candidate = another_child
if nearest_donor is None:
# Merge
for grandchild in the_child.children.itervalues():
distance = mtree.distance_function(grandchild.data, nearest_merge_candidate.data)
nearest_merge_candidate.add_child(grandchild, distance, mtree)
del self.children[the_child.data]
return nearest_merge_candidate
else:
# Donate
# Look for the nearest grandchild
nearest_grandchild_distance = _INFINITY
for grandchild in nearest_donor.children.itervalues():
distance = mtree.distance_function(grandchild.data, the_child.data)
if distance < nearest_grandchild_distance:
nearest_grandchild_distance = distance
nearest_grandchild = grandchild
del nearest_donor.children[nearest_grandchild.data]
the_child.add_child(nearest_grandchild, nearest_grandchild_distance, mtree)
return the_child
@staticmethod
def _get_expected_child_class():
return (_InternalNode, _LeafNode)
class _RootLeafNode(_RootNodeTrait, _LeafNodeTrait):
def remove_data(self, data, distance, mtree):
try:
super(_RootLeafNode, self).remove_data(data, distance, mtree)
except _NodeUnderCapacity:
assert len(self.children) == 0
raise _RootNodeReplacement(None)
@staticmethod
def get_min_capacity(mtree):
return 1
def _check_min_capacity(self, mtree):
assert len(self.children) >= 1
class _RootNode(_RootNodeTrait, _NonLeafNodeTrait):
def remove_data(self, data, distance, mtree):
try:
super(_RootNode, self).remove_data(data, distance, mtree)
except _NodeUnderCapacity:
# Promote the only child to root
(the_child,) = self.children.itervalues()
if isinstance(the_child, _InternalNode):
new_root_class = _RootNode
else:
assert isinstance(the_child, _LeafNode)
new_root_class = _RootLeafNode
new_root = new_root_class(the_child.data)
for grandchild in the_child.children.itervalues():
distance = mtree.distance_function(new_root.data, grandchild.data)
new_root.add_child(grandchild, distance, mtree)
raise _RootNodeReplacement(new_root)
@staticmethod
def get_min_capacity(mtree):
return 2
def _check_min_capacity(self, mtree):
assert len(self.children) >= 2
class _InternalNode(_NonRootNodeTrait, _NonLeafNodeTrait):
pass
class _LeafNode(_NonRootNodeTrait, _LeafNodeTrait):
pass
class _Entry(_IndexItem):
pass
class MTree(object):
"""
A data structure for indexing objects based on their proximity.
The data objects must be any hashable object and the support functions
(distance and split functions) must understand them.
See http://en.wikipedia.org/wiki/M-tree
"""
ResultItem = namedtuple('ResultItem', 'data, distance')
def __init__(self,
min_node_capacity=50, max_node_capacity=None,
distance_function=functions.euclidean_distance,
split_function=functions.make_split_function(functions.random_promotion, functions.balanced_partition)
):
"""
Creates an M-Tree.
The argument min_node_capacity must be at least 2.
The argument max_node_capacity should be at least 2*min_node_capacity-1.
The optional argument distance_function must be a function which calculates
the distance between two data objects.
The optional argument split_function must be a function which chooses two
data objects and then partitions the set of data into two subsets
according to the chosen objects. Its arguments are the set of data objects
and the distance_function. Must return a sequence with the following four values:
- First chosen data object.
- Subset with at least [min_node_capacity] objects based on the first
chosen data object. Must contain the first chosen data object.
- Second chosen data object.
- Subset with at least [min_node_capacity] objects based on the second
chosen data object. Must contain the second chosen data object.
"""
if min_node_capacity < 2:
raise ValueError("min_node_capacity must be at least 2")
if max_node_capacity is None:
max_node_capacity = 2 * min_node_capacity - 1
if max_node_capacity <= min_node_capacity:
raise ValueError("max_node_capacity must be greater than min_node_capacity")
self.min_node_capacity = min_node_capacity
self.max_node_capacity = max_node_capacity
self.distance_function = distance_function
self.split_function = split_function
self.root = None
def add(self, data):
"""
Adds and indexes an object.
The object must not currently already be indexed!
"""
if self.root is None:
self.root = _RootLeafNode(data)
self.root.add_data(data, 0, self)
else:
distance = self.distance_function(data, self.root.data)
try:
self.root.add_data(data, distance, self)
except _SplitNodeReplacement as e:
assert len(e.new_nodes) == 2
self.root = _RootNode(self.root.data)
for new_node in e.new_nodes:
distance = self.distance_function(self.root.data, new_node.data)
self.root.add_child(new_node, distance, self)
def remove(self, data):
"""
Removes an object from the index.
"""
if self.root is None:
raise KeyError()
distance_to_root = self.distance_function(data, self.root.data)
try:
self.root.remove_data(data, distance_to_root, self)
except _RootNodeReplacement as e:
self.root = e.new_root
def get_nearest(self, query_data, range=_INFINITY, limit=_INFINITY):
"""
Returns an iterator on the indexed data nearest to the query_data. The
returned items are tuples containing the data and its distance to the
query_data, in increasing distance order. The results can be limited by
the range (maximum distance from the query_data) and limit arguments.
"""
if self.root is None:
# No indexed data!
return
distance = self.distance_function(query_data, self.root.data)
min_distance = max(distance - self.root.radius, 0)
pending_queue = HeapQueue(
content=[_ItemWithDistances(item=self.root, distance=distance, min_distance=min_distance)],
key=lambda iwd: iwd.min_distance,
)
nearest_queue = HeapQueue(key=lambda iwd: iwd.distance)
yielded_count = 0
while pending_queue:
pending = pending_queue.pop()
node = pending.item
assert isinstance(node, _Node)
for child in node.children.itervalues():
if abs(pending.distance - child.distance_to_parent) - child.radius <= range:
child_distance = self.distance_function(query_data, child.data)
child_min_distance = max(child_distance - child.radius, 0)
if child_min_distance <= range:
iwd = _ItemWithDistances(item=child, distance=child_distance, min_distance=child_min_distance)
if isinstance(child, _Entry):
nearest_queue.push(iwd)
else:
pending_queue.push(iwd)
# Tries to yield known results so far
if pending_queue:
next_pending = pending_queue.head()
next_pending_min_distance = next_pending.min_distance
else:
next_pending_min_distance = _INFINITY
while nearest_queue:
next_nearest = nearest_queue.head()
assert isinstance(next_nearest, _ItemWithDistances)
if next_nearest.distance <= next_pending_min_distance:
_ = nearest_queue.pop()
assert _ is next_nearest
yield self.ResultItem(data=next_nearest.item.data, distance=next_nearest.distance)
yielded_count += 1
if yielded_count >= limit:
# Limit reached
return
else:
break
def _check(self):
if self.root is not None:
self.root._check(self)
mit
wrouesnel/ansible
lib/ansible/module_utils/cloudscale.py
70
2876
# -*- coding: utf-8 -*-
#
# (c) 2017, Gaudenz Steinlin <[email protected]>
# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
import json
from ansible.module_utils.basic import env_fallback
from ansible.module_utils.urls import fetch_url
API_URL = 'https://api.cloudscale.ch/v1/'
def cloudscale_argument_spec():
return dict(
api_token=dict(fallback=(env_fallback, ['CLOUDSCALE_API_TOKEN']),
no_log=True,
required=True),
api_timeout=dict(default=30, type='int'),
)
class AnsibleCloudscaleBase(object):
def __init__(self, module):
self._module = module
self._auth_header = {'Authorization': 'Bearer %s' % module.params['api_token']}
def _get(self, api_call):
resp, info = fetch_url(self._module, API_URL + api_call,
headers=self._auth_header,
timeout=self._module.params['api_timeout'])
if info['status'] == 200:
return json.loads(resp.read())
elif info['status'] == 404:
return None
else:
self._module.fail_json(msg='Failure while calling the cloudscale.ch API with GET for '
'"%s".' % api_call, fetch_url_info=info)
def _post(self, api_call, data=None):
headers = self._auth_header.copy()
if data is not None:
data = self._module.jsonify(data)
headers['Content-type'] = 'application/json'
resp, info = fetch_url(self._module,
API_URL + api_call,
headers=headers,
method='POST',
data=data,
timeout=self._module.params['api_timeout'])
if info['status'] in (200, 201):
return json.loads(resp.read())
elif info['status'] == 204:
return None
else:
self._module.fail_json(msg='Failure while calling the cloudscale.ch API with POST for '
'"%s".' % api_call, fetch_url_info=info)
def _delete(self, api_call):
resp, info = fetch_url(self._module,
API_URL + api_call,
headers=self._auth_header,
method='DELETE',
timeout=self._module.params['api_timeout'])
if info['status'] == 204:
return None
else:
self._module.fail_json(msg='Failure while calling the cloudscale.ch API with DELETE for '
'"%s".' % api_call, fetch_url_info=info)
gpl-3.0
alexandrucoman/vbox-neutron-agent
neutron/db/l3_db.py
2
65619
# Copyright 2012 VMware, Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import netaddr
from oslo_log import log as logging
import sqlalchemy as sa
from sqlalchemy import orm
from sqlalchemy.orm import exc
from oslo_utils import excutils
from neutron.api.rpc.agentnotifiers import l3_rpc_agent_api
from neutron.api.v2 import attributes
from neutron.callbacks import events
from neutron.callbacks import exceptions
from neutron.callbacks import registry
from neutron.callbacks import resources
from neutron.common import constants as l3_constants
from neutron.common import exceptions as n_exc
from neutron.common import rpc as n_rpc
from neutron.common import utils
from neutron.db import model_base
from neutron.db import models_v2
from neutron.extensions import external_net
from neutron.extensions import l3
from neutron.i18n import _LI, _LE
from neutron import manager
from neutron.openstack.common import uuidutils
from neutron.plugins.common import constants
LOG = logging.getLogger(__name__)
DEVICE_OWNER_ROUTER_INTF = l3_constants.DEVICE_OWNER_ROUTER_INTF
DEVICE_OWNER_ROUTER_GW = l3_constants.DEVICE_OWNER_ROUTER_GW
DEVICE_OWNER_FLOATINGIP = l3_constants.DEVICE_OWNER_FLOATINGIP
EXTERNAL_GW_INFO = l3.EXTERNAL_GW_INFO
# Maps API field to DB column
# API parameter name and Database column names may differ.
# Useful to keep the filtering between API and Database.
API_TO_DB_COLUMN_MAP = {'port_id': 'fixed_port_id'}
CORE_ROUTER_ATTRS = ('id', 'name', 'tenant_id', 'admin_state_up', 'status')
class RouterPort(model_base.BASEV2):
router_id = sa.Column(
sa.String(36),
sa.ForeignKey('routers.id', ondelete="CASCADE"),
primary_key=True)
port_id = sa.Column(
sa.String(36),
sa.ForeignKey('ports.id', ondelete="CASCADE"),
primary_key=True)
# The port_type attribute is redundant as the port table already specifies
# it in DEVICE_OWNER.However, this redundancy enables more efficient
# queries on router ports, and also prevents potential error-prone
# conditions which might originate from users altering the DEVICE_OWNER
# property of router ports.
port_type = sa.Column(sa.String(255))
port = orm.relationship(
models_v2.Port,
backref=orm.backref('routerport', uselist=False, cascade="all,delete"),
lazy='joined')
class Router(model_base.BASEV2, models_v2.HasId, models_v2.HasTenant):
"""Represents a v2 neutron router."""
name = sa.Column(sa.String(255))
status = sa.Column(sa.String(16))
admin_state_up = sa.Column(sa.Boolean)
gw_port_id = sa.Column(sa.String(36), sa.ForeignKey('ports.id'))
gw_port = orm.relationship(models_v2.Port, lazy='joined')
attached_ports = orm.relationship(
RouterPort,
backref='router',
lazy='dynamic')
class FloatingIP(model_base.BASEV2, models_v2.HasId, models_v2.HasTenant):
"""Represents a floating IP address.
This IP address may or may not be allocated to a tenant, and may or
may not be associated with an internal port/ip address/router.
"""
floating_ip_address = sa.Column(sa.String(64), nullable=False)
floating_network_id = sa.Column(sa.String(36), nullable=False)
floating_port_id = sa.Column(sa.String(36),
sa.ForeignKey('ports.id', ondelete="CASCADE"),
nullable=False)
fixed_port_id = sa.Column(sa.String(36), sa.ForeignKey('ports.id'))
fixed_ip_address = sa.Column(sa.String(64))
router_id = sa.Column(sa.String(36), sa.ForeignKey('routers.id'))
# Additional attribute for keeping track of the router where the floating
# ip was associated in order to be able to ensure consistency even if an
# aysnchronous backend is unavailable when the floating IP is disassociated
last_known_router_id = sa.Column(sa.String(36))
status = sa.Column(sa.String(16))
router = orm.relationship(Router, backref='floating_ips')
class L3_NAT_dbonly_mixin(l3.RouterPluginBase):
"""Mixin class to add L3/NAT router methods to db_base_plugin_v2."""
router_device_owners = (
DEVICE_OWNER_ROUTER_INTF,
DEVICE_OWNER_ROUTER_GW,
DEVICE_OWNER_FLOATINGIP
)
@property
def _core_plugin(self):
return manager.NeutronManager.get_plugin()
def _get_router(self, context, router_id):
try:
router = self._get_by_id(context, Router, router_id)
except exc.NoResultFound:
raise l3.RouterNotFound(router_id=router_id)
return router
def _make_router_dict(self, router, fields=None, process_extensions=True):
res = dict((key, router[key]) for key in CORE_ROUTER_ATTRS)
if router['gw_port_id']:
ext_gw_info = {
'network_id': router.gw_port['network_id'],
'external_fixed_ips': [{'subnet_id': ip["subnet_id"],
'ip_address': ip["ip_address"]}
for ip in router.gw_port['fixed_ips']]}
else:
ext_gw_info = None
res.update({
EXTERNAL_GW_INFO: ext_gw_info,
'gw_port_id': router['gw_port_id'],
})
# NOTE(salv-orlando): The following assumes this mixin is used in a
# class inheriting from CommonDbMixin, which is true for all existing
# plugins.
if process_extensions:
self._apply_dict_extend_functions(l3.ROUTERS, res, router)
return self._fields(res, fields)
def _create_router_db(self, context, router, tenant_id):
"""Create the DB object."""
with context.session.begin(subtransactions=True):
# pre-generate id so it will be available when
# configuring external gw port
router_db = Router(id=uuidutils.generate_uuid(),
tenant_id=tenant_id,
name=router['name'],
admin_state_up=router['admin_state_up'],
status="ACTIVE")
context.session.add(router_db)
return router_db
def create_router(self, context, router):
r = router['router']
gw_info = r.pop(EXTERNAL_GW_INFO, None)
tenant_id = self._get_tenant_id_for_create(context, r)
with context.session.begin(subtransactions=True):
router_db = self._create_router_db(context, r, tenant_id)
if gw_info:
self._update_router_gw_info(context, router_db['id'],
gw_info, router=router_db)
return self._make_router_dict(router_db)
def _update_router_db(self, context, router_id, data, gw_info):
"""Update the DB object."""
with context.session.begin(subtransactions=True):
router_db = self._get_router(context, router_id)
if data:
router_db.update(data)
return router_db
def update_router(self, context, id, router):
r = router['router']
gw_info = r.pop(EXTERNAL_GW_INFO, attributes.ATTR_NOT_SPECIFIED)
# check whether router needs and can be rescheduled to the proper
# l3 agent (associated with given external network);
# do check before update in DB as an exception will be raised
# in case no proper l3 agent found
if gw_info != attributes.ATTR_NOT_SPECIFIED:
candidates = self._check_router_needs_rescheduling(
context, id, gw_info)
# Update the gateway outside of the DB update since it involves L2
# calls that don't make sense to rollback and may cause deadlocks
# in a transaction.
self._update_router_gw_info(context, id, gw_info)
else:
candidates = None
router_db = self._update_router_db(context, id, r, gw_info)
if candidates:
l3_plugin = manager.NeutronManager.get_service_plugins().get(
constants.L3_ROUTER_NAT)
l3_plugin.reschedule_router(context, id, candidates)
return self._make_router_dict(router_db)
def _check_router_needs_rescheduling(self, context, router_id, gw_info):
"""Checks whether router's l3 agent can handle the given network
When external_network_bridge is set, each L3 agent can be associated
with at most one external network. If router's new external gateway
is on other network then the router needs to be rescheduled to the
proper l3 agent.
If external_network_bridge is not set then the agent
can support multiple external networks and rescheduling is not needed
:return: list of candidate agents if rescheduling needed,
None otherwise; raises exception if there is no eligible l3 agent
associated with target external network
"""
# TODO(obondarev): rethink placement of this func as l3 db manager is
# not really a proper place for agent scheduling stuff
network_id = gw_info.get('network_id') if gw_info else None
if not network_id:
return
nets = self._core_plugin.get_networks(
context, {external_net.EXTERNAL: [True]})
# nothing to do if there is only one external network
if len(nets) <= 1:
return
# first get plugin supporting l3 agent scheduling
# (either l3 service plugin or core_plugin)
l3_plugin = manager.NeutronManager.get_service_plugins().get(
constants.L3_ROUTER_NAT)
if (not utils.is_extension_supported(
l3_plugin,
l3_constants.L3_AGENT_SCHEDULER_EXT_ALIAS) or
l3_plugin.router_scheduler is None):
# that might mean that we are dealing with non-agent-based
# implementation of l3 services
return
cur_agents = l3_plugin.list_l3_agents_hosting_router(
context, router_id)['agents']
for agent in cur_agents:
ext_net_id = agent['configurations'].get(
'gateway_external_network_id')
ext_bridge = agent['configurations'].get(
'external_network_bridge', 'br-ex')
if (ext_net_id == network_id or
(not ext_net_id and not ext_bridge)):
return
# otherwise find l3 agent with matching gateway_external_network_id
active_agents = l3_plugin.get_l3_agents(context, active=True)
router = {
'id': router_id,
'external_gateway_info': {'network_id': network_id}
}
candidates = l3_plugin.get_l3_agent_candidates(context,
router,
active_agents)
if not candidates:
msg = (_('No eligible l3 agent associated with external network '
'%s found') % network_id)
raise n_exc.BadRequest(resource='router', msg=msg)
return candidates
def _create_router_gw_port(self, context, router, network_id, ext_ips):
# Port has no 'tenant-id', as it is hidden from user
gw_port = self._core_plugin.create_port(context.elevated(), {
'port': {'tenant_id': '', # intentionally not set
'network_id': network_id,
'mac_address': attributes.ATTR_NOT_SPECIFIED,
'fixed_ips': ext_ips or attributes.ATTR_NOT_SPECIFIED,
'device_id': router['id'],
'device_owner': DEVICE_OWNER_ROUTER_GW,
'admin_state_up': True,
'name': ''}})
if not gw_port['fixed_ips']:
LOG.debug('No IPs available for external network %s',
network_id)
with context.session.begin(subtransactions=True):
router.gw_port = self._core_plugin._get_port(context.elevated(),
gw_port['id'])
router_port = RouterPort(
router_id=router.id,
port_id=gw_port['id'],
port_type=DEVICE_OWNER_ROUTER_GW
)
context.session.add(router)
context.session.add(router_port)
def _validate_gw_info(self, context, gw_port, info, ext_ips):
network_id = info['network_id'] if info else None
if network_id:
network_db = self._core_plugin._get_network(context, network_id)
if not network_db.external:
msg = _("Network %s is not an external network") % network_id
raise n_exc.BadRequest(resource='router', msg=msg)
if ext_ips:
subnets = self._core_plugin._get_subnets_by_network(context,
network_id)
for s in subnets:
if not s['gateway_ip']:
continue
for ext_ip in ext_ips:
if ext_ip.get('ip_address') == s['gateway_ip']:
msg = _("External IP %s is the same as the "
"gateway IP") % ext_ip.get('ip_address')
raise n_exc.BadRequest(resource='router', msg=msg)
return network_id
def _delete_current_gw_port(self, context, router_id, router, new_network):
"""Delete gw port if attached to an old network."""
port_requires_deletion = (
router.gw_port and router.gw_port['network_id'] != new_network)
if not port_requires_deletion:
return
admin_ctx = context.elevated()
if self.get_floatingips_count(
admin_ctx, {'router_id': [router_id]}):
raise l3.RouterExternalGatewayInUseByFloatingIp(
router_id=router_id, net_id=router.gw_port['network_id'])
with context.session.begin(subtransactions=True):
gw_port = router.gw_port
router.gw_port = None
context.session.add(router)
context.session.expire(gw_port)
self._check_router_gw_port_in_use(context, router_id)
self._core_plugin.delete_port(
admin_ctx, gw_port['id'], l3_port_check=False)
def _check_router_gw_port_in_use(self, context, router_id):
try:
kwargs = {'context': context, 'router_id': router_id}
registry.notify(
resources.ROUTER_GATEWAY, events.BEFORE_DELETE, self, **kwargs)
except exceptions.CallbackFailure as e:
with excutils.save_and_reraise_exception():
# NOTE(armax): preserve old check's behavior
if len(e.errors) == 1:
raise e.errors[0].error
raise l3.RouterInUse(router_id=router_id, reason=e)
def _create_gw_port(self, context, router_id, router, new_network,
ext_ips):
new_valid_gw_port_attachment = (
new_network and (not router.gw_port or
router.gw_port['network_id'] != new_network))
if new_valid_gw_port_attachment:
subnets = self._core_plugin._get_subnets_by_network(context,
new_network)
for subnet in subnets:
self._check_for_dup_router_subnet(context, router,
new_network, subnet['id'],
subnet['cidr'])
self._create_router_gw_port(context, router, new_network, ext_ips)
def _update_current_gw_port(self, context, router_id, router, ext_ips):
self._core_plugin.update_port(context, router.gw_port['id'], {'port':
{'fixed_ips': ext_ips}})
context.session.expire(router.gw_port)
def _update_router_gw_info(self, context, router_id, info, router=None):
# TODO(salvatore-orlando): guarantee atomic behavior also across
# operations that span beyond the model classes handled by this
# class (e.g.: delete_port)
router = router or self._get_router(context, router_id)
gw_port = router.gw_port
ext_ips = info.get('external_fixed_ips') if info else []
ext_ip_change = self._check_for_external_ip_change(
context, gw_port, ext_ips)
network_id = self._validate_gw_info(context, gw_port, info, ext_ips)
if gw_port and ext_ip_change and gw_port['network_id'] == network_id:
self._update_current_gw_port(context, router_id, router,
ext_ips)
else:
self._delete_current_gw_port(context, router_id, router,
network_id)
self._create_gw_port(context, router_id, router, network_id,
ext_ips)
def _check_for_external_ip_change(self, context, gw_port, ext_ips):
# determine if new external IPs differ from the existing fixed_ips
if not ext_ips:
# no external_fixed_ips were included
return False
if not gw_port:
return True
subnet_ids = set(ip['subnet_id'] for ip in gw_port['fixed_ips'])
new_subnet_ids = set(f['subnet_id'] for f in ext_ips
if f.get('subnet_id'))
subnet_change = not new_subnet_ids == subnet_ids
if subnet_change:
return True
ip_addresses = set(ip['ip_address'] for ip in gw_port['fixed_ips'])
new_ip_addresses = set(f['ip_address'] for f in ext_ips
if f.get('ip_address'))
ip_address_change = not ip_addresses == new_ip_addresses
return ip_address_change
def _ensure_router_not_in_use(self, context, router_id):
"""Ensure that no internal network interface is attached
to the router.
"""
router = self._get_router(context, router_id)
device_owner = self._get_device_owner(context, router)
if any(rp.port_type == device_owner
for rp in router.attached_ports.all()):
raise l3.RouterInUse(router_id=router_id)
return router
def delete_router(self, context, id):
#TODO(nati) Refactor here when we have router insertion model
router = self._ensure_router_not_in_use(context, id)
self._delete_current_gw_port(context, id, router, None)
router_ports = router.attached_ports.all()
for rp in router_ports:
self._core_plugin.delete_port(context.elevated(),
rp.port.id,
l3_port_check=False)
with context.session.begin(subtransactions=True):
context.session.delete(router)
def get_router(self, context, id, fields=None):
router = self._get_router(context, id)
return self._make_router_dict(router, fields)
def get_routers(self, context, filters=None, fields=None,
sorts=None, limit=None, marker=None,
page_reverse=False):
marker_obj = self._get_marker_obj(context, 'router', limit, marker)
return self._get_collection(context, Router,
self._make_router_dict,
filters=filters, fields=fields,
sorts=sorts,
limit=limit,
marker_obj=marker_obj,
page_reverse=page_reverse)
def get_routers_count(self, context, filters=None):
return self._get_collection_count(context, Router,
filters=filters)
def _check_for_dup_router_subnet(self, context, router,
network_id, subnet_id, subnet_cidr):
try:
# It's possible these ports are on the same network, but
# different subnets.
new_ipnet = netaddr.IPNetwork(subnet_cidr)
for p in (rp.port for rp in router.attached_ports):
for ip in p['fixed_ips']:
if ip['subnet_id'] == subnet_id:
msg = (_("Router already has a port on subnet %s")
% subnet_id)
raise n_exc.BadRequest(resource='router', msg=msg)
sub_id = ip['subnet_id']
cidr = self._core_plugin._get_subnet(context.elevated(),
sub_id)['cidr']
ipnet = netaddr.IPNetwork(cidr)
match1 = netaddr.all_matching_cidrs(new_ipnet, [cidr])
match2 = netaddr.all_matching_cidrs(ipnet, [subnet_cidr])
if match1 or match2:
data = {'subnet_cidr': subnet_cidr,
'subnet_id': subnet_id,
'cidr': cidr,
'sub_id': sub_id}
msg = (_("Cidr %(subnet_cidr)s of subnet "
"%(subnet_id)s overlaps with cidr %(cidr)s "
"of subnet %(sub_id)s") % data)
raise n_exc.BadRequest(resource='router', msg=msg)
except exc.NoResultFound:
pass
def _get_device_owner(self, context, router=None):
"""Get device_owner for the specified router."""
# NOTE(armando-migliaccio): in the base case this is invariant
return DEVICE_OWNER_ROUTER_INTF
def _validate_interface_info(self, interface_info, for_removal=False):
port_id_specified = interface_info and 'port_id' in interface_info
subnet_id_specified = interface_info and 'subnet_id' in interface_info
if not (port_id_specified or subnet_id_specified):
msg = _("Either subnet_id or port_id must be specified")
raise n_exc.BadRequest(resource='router', msg=msg)
if not for_removal:
if port_id_specified and subnet_id_specified:
msg = _("Cannot specify both subnet-id and port-id")
raise n_exc.BadRequest(resource='router', msg=msg)
return port_id_specified, subnet_id_specified
def _add_interface_by_port(self, context, router, port_id, owner):
with context.session.begin(subtransactions=True):
port = self._core_plugin._get_port(context, port_id)
if port['device_id']:
raise n_exc.PortInUse(net_id=port['network_id'],
port_id=port['id'],
device_id=port['device_id'])
if not port['fixed_ips']:
msg = _LE('Router port must have at least one fixed IP')
raise n_exc.BadRequest(resource='router', msg=msg)
# Only allow one router port with IPv6 subnets per network id
if self._port_has_ipv6_address(port):
for existing_port in (rp.port for rp in router.attached_ports):
if (existing_port['network_id'] == port['network_id'] and
self._port_has_ipv6_address(existing_port)):
msg = _("Cannot have multiple router ports with the "
"same network id if both contain IPv6 "
"subnets. Existing port %(p)s has IPv6 "
"subnet(s) and network id %(nid)s")
raise n_exc.BadRequest(resource='router', msg=msg % {
'p': existing_port['id'],
'nid': existing_port['network_id']})
fixed_ips = [ip for ip in port['fixed_ips']]
subnets = []
for fixed_ip in fixed_ips:
subnet = self._core_plugin._get_subnet(context,
fixed_ip['subnet_id'])
subnets.append(subnet)
self._check_for_dup_router_subnet(context, router,
port['network_id'],
subnet['id'],
subnet['cidr'])
# Keep the restriction against multiple IPv4 subnets
if len([s for s in subnets if s['ip_version'] == 4]) > 1:
msg = _LE("Cannot have multiple "
"IPv4 subnets on router port")
raise n_exc.BadRequest(resource='router', msg=msg)
port.update({'device_id': router.id, 'device_owner': owner})
return port, subnets
def _port_has_ipv6_address(self, port):
for fixed_ip in port['fixed_ips']:
if netaddr.IPNetwork(fixed_ip['ip_address']).version == 6:
return True
def _find_ipv6_router_port_by_network(self, router, net_id):
for port in router.attached_ports:
p = port['port']
if p['network_id'] == net_id and self._port_has_ipv6_address(p):
return port
def _add_interface_by_subnet(self, context, router, subnet_id, owner):
subnet = self._core_plugin._get_subnet(context, subnet_id)
if not subnet['gateway_ip']:
msg = _('Subnet for router interface must have a gateway IP')
raise n_exc.BadRequest(resource='router', msg=msg)
if (subnet['ip_version'] == 6 and subnet['ipv6_ra_mode'] is None
and subnet['ipv6_address_mode'] is not None):
msg = (_('IPv6 subnet %s configured to receive RAs from an '
'external router cannot be added to Neutron Router.') %
subnet['id'])
raise n_exc.BadRequest(resource='router', msg=msg)
self._check_for_dup_router_subnet(context, router,
subnet['network_id'],
subnet_id,
subnet['cidr'])
fixed_ip = {'ip_address': subnet['gateway_ip'],
'subnet_id': subnet['id']}
if subnet['ip_version'] == 6:
# Add new prefix to an existing ipv6 port with the same network id
# if one exists
port = self._find_ipv6_router_port_by_network(router,
subnet['network_id'])
if port:
fixed_ips = list(port['port']['fixed_ips'])
fixed_ips.append(fixed_ip)
return self._core_plugin.update_port(context,
port['port_id'], {'port':
{'fixed_ips': fixed_ips}}), [subnet], False
return self._core_plugin.create_port(context, {
'port':
{'tenant_id': subnet['tenant_id'],
'network_id': subnet['network_id'],
'fixed_ips': [fixed_ip],
'mac_address': attributes.ATTR_NOT_SPECIFIED,
'admin_state_up': True,
'device_id': router.id,
'device_owner': owner,
'name': ''}}), [subnet], True
@staticmethod
def _make_router_interface_info(
router_id, tenant_id, port_id, subnet_id, subnet_ids):
return {
'id': router_id,
'tenant_id': tenant_id,
'port_id': port_id,
'subnet_id': subnet_id, # deprecated by IPv6 multi-prefix
'subnet_ids': subnet_ids
}
def add_router_interface(self, context, router_id, interface_info):
router = self._get_router(context, router_id)
add_by_port, add_by_sub = self._validate_interface_info(interface_info)
device_owner = self._get_device_owner(context, router_id)
# This should be True unless adding an IPv6 prefix to an existing port
new_port = True
if add_by_port:
port, subnets = self._add_interface_by_port(
context, router, interface_info['port_id'], device_owner)
# add_by_subnet is not used here, because the validation logic of
# _validate_interface_info ensures that either of add_by_* is True.
else:
port, subnets, new_port = self._add_interface_by_subnet(
context, router, interface_info['subnet_id'], device_owner)
if new_port:
with context.session.begin(subtransactions=True):
router_port = RouterPort(
port_id=port['id'],
router_id=router.id,
port_type=device_owner
)
context.session.add(router_port)
return self._make_router_interface_info(
router.id, port['tenant_id'], port['id'], subnets[-1]['id'],
[subnet['id'] for subnet in subnets])
def _confirm_router_interface_not_in_use(self, context, router_id,
subnet_id):
subnet_db = self._core_plugin._get_subnet(context, subnet_id)
subnet_cidr = netaddr.IPNetwork(subnet_db['cidr'])
fip_qry = context.session.query(FloatingIP)
try:
kwargs = {'context': context, 'subnet_id': subnet_id}
registry.notify(
resources.ROUTER_INTERFACE,
events.BEFORE_DELETE, self, **kwargs)
except exceptions.CallbackFailure as e:
with excutils.save_and_reraise_exception():
# NOTE(armax): preserve old check's behavior
if len(e.errors) == 1:
raise e.errors[0].error
raise l3.RouterInUse(router_id=router_id, reason=e)
for fip_db in fip_qry.filter_by(router_id=router_id):
if netaddr.IPAddress(fip_db['fixed_ip_address']) in subnet_cidr:
raise l3.RouterInterfaceInUseByFloatingIP(
router_id=router_id, subnet_id=subnet_id)
def _remove_interface_by_port(self, context, router_id,
port_id, subnet_id, owner):
qry = context.session.query(RouterPort)
qry = qry.filter_by(
port_id=port_id,
router_id=router_id,
port_type=owner
)
try:
port_db = qry.one().port
except exc.NoResultFound:
raise l3.RouterInterfaceNotFound(router_id=router_id,
port_id=port_id)
port_subnet_ids = [fixed_ip['subnet_id']
for fixed_ip in port_db['fixed_ips']]
if subnet_id and subnet_id not in port_subnet_ids:
raise n_exc.SubnetMismatchForPort(
port_id=port_id, subnet_id=subnet_id)
subnets = [self._core_plugin._get_subnet(context, port_subnet_id)
for port_subnet_id in port_subnet_ids]
for port_subnet_id in port_subnet_ids:
self._confirm_router_interface_not_in_use(
context, router_id, port_subnet_id)
self._core_plugin.delete_port(context, port_db['id'],
l3_port_check=False)
return (port_db, subnets)
def _remove_interface_by_subnet(self, context,
router_id, subnet_id, owner):
self._confirm_router_interface_not_in_use(
context, router_id, subnet_id)
subnet = self._core_plugin._get_subnet(context, subnet_id)
try:
rport_qry = context.session.query(models_v2.Port).join(RouterPort)
ports = rport_qry.filter(
RouterPort.router_id == router_id,
RouterPort.port_type == owner,
models_v2.Port.network_id == subnet['network_id']
)
for p in ports:
port_subnets = [fip['subnet_id'] for fip in p['fixed_ips']]
if subnet_id in port_subnets and len(port_subnets) > 1:
# multiple prefix port - delete prefix from port
fixed_ips = [fip for fip in p['fixed_ips'] if
fip['subnet_id'] != subnet_id]
self._core_plugin.update_port(context, p['id'],
{'port':
{'fixed_ips': fixed_ips}})
return (p, [subnet])
elif subnet_id in port_subnets:
# only one subnet on port - delete the port
self._core_plugin.delete_port(context, p['id'],
l3_port_check=False)
return (p, [subnet])
except exc.NoResultFound:
pass
raise l3.RouterInterfaceNotFoundForSubnet(router_id=router_id,
subnet_id=subnet_id)
def remove_router_interface(self, context, router_id, interface_info):
remove_by_port, remove_by_subnet = (
self._validate_interface_info(interface_info, for_removal=True)
)
port_id = interface_info.get('port_id')
subnet_id = interface_info.get('subnet_id')
device_owner = self._get_device_owner(context, router_id)
if remove_by_port:
port, subnets = self._remove_interface_by_port(context, router_id,
port_id, subnet_id,
device_owner)
# remove_by_subnet is not used here, because the validation logic of
# _validate_interface_info ensures that at least one of remote_by_*
# is True.
else:
port, subnets = self._remove_interface_by_subnet(
context, router_id, subnet_id, device_owner)
return self._make_router_interface_info(router_id, port['tenant_id'],
port['id'], subnets[0]['id'],
[subnet['id'] for subnet in
subnets])
def _get_floatingip(self, context, id):
try:
floatingip = self._get_by_id(context, FloatingIP, id)
except exc.NoResultFound:
raise l3.FloatingIPNotFound(floatingip_id=id)
return floatingip
def _make_floatingip_dict(self, floatingip, fields=None):
res = {'id': floatingip['id'],
'tenant_id': floatingip['tenant_id'],
'floating_ip_address': floatingip['floating_ip_address'],
'floating_network_id': floatingip['floating_network_id'],
'router_id': floatingip['router_id'],
'port_id': floatingip['fixed_port_id'],
'fixed_ip_address': floatingip['fixed_ip_address'],
'status': floatingip['status']}
return self._fields(res, fields)
def _get_interface_ports_for_network(self, context, network_id):
router_intf_qry = context.session.query(RouterPort)
router_intf_qry = router_intf_qry.join(models_v2.Port)
return router_intf_qry.filter(
models_v2.Port.network_id == network_id,
RouterPort.port_type == DEVICE_OWNER_ROUTER_INTF
)
def _get_router_for_floatingip(self, context, internal_port,
internal_subnet_id,
external_network_id):
subnet_db = self._core_plugin._get_subnet(context,
internal_subnet_id)
if not subnet_db['gateway_ip']:
msg = (_('Cannot add floating IP to port on subnet %s '
'which has no gateway_ip') % internal_subnet_id)
raise n_exc.BadRequest(resource='floatingip', msg=msg)
router_intf_ports = self._get_interface_ports_for_network(
context, internal_port['network_id'])
# This joins on port_id so is not a cross-join
routerport_qry = router_intf_ports.join(models_v2.IPAllocation)
routerport_qry = routerport_qry.filter(
models_v2.IPAllocation.subnet_id == internal_subnet_id
)
for router_port in routerport_qry:
router_id = router_port.router.id
router_gw_qry = context.session.query(models_v2.Port)
has_gw_port = router_gw_qry.filter_by(
network_id=external_network_id,
device_id=router_id,
device_owner=DEVICE_OWNER_ROUTER_GW).count()
if has_gw_port:
return router_id
raise l3.ExternalGatewayForFloatingIPNotFound(
subnet_id=internal_subnet_id,
external_network_id=external_network_id,
port_id=internal_port['id'])
def _internal_fip_assoc_data(self, context, fip):
"""Retrieve internal port data for floating IP.
Retrieve information concerning the internal port where
the floating IP should be associated to.
"""
internal_port = self._core_plugin._get_port(context, fip['port_id'])
if not internal_port['tenant_id'] == fip['tenant_id']:
port_id = fip['port_id']
if 'id' in fip:
floatingip_id = fip['id']
data = {'port_id': port_id,
'floatingip_id': floatingip_id}
msg = (_('Port %(port_id)s is associated with a different '
'tenant than Floating IP %(floatingip_id)s and '
'therefore cannot be bound.') % data)
else:
msg = (_('Cannot create floating IP and bind it to '
'Port %s, since that port is owned by a '
'different tenant.') % port_id)
raise n_exc.BadRequest(resource='floatingip', msg=msg)
internal_subnet_id = None
if fip.get('fixed_ip_address'):
internal_ip_address = fip['fixed_ip_address']
for ip in internal_port['fixed_ips']:
if ip['ip_address'] == internal_ip_address:
internal_subnet_id = ip['subnet_id']
if not internal_subnet_id:
msg = (_('Port %(id)s does not have fixed ip %(address)s') %
{'id': internal_port['id'],
'address': internal_ip_address})
raise n_exc.BadRequest(resource='floatingip', msg=msg)
else:
ips = [ip['ip_address'] for ip in internal_port['fixed_ips']]
if not ips:
msg = (_('Cannot add floating IP to port %s that has '
'no fixed IP addresses') % internal_port['id'])
raise n_exc.BadRequest(resource='floatingip', msg=msg)
if len(ips) > 1:
msg = (_('Port %s has multiple fixed IPs. Must provide'
' a specific IP when assigning a floating IP') %
internal_port['id'])
raise n_exc.BadRequest(resource='floatingip', msg=msg)
internal_ip_address = internal_port['fixed_ips'][0]['ip_address']
internal_subnet_id = internal_port['fixed_ips'][0]['subnet_id']
return internal_port, internal_subnet_id, internal_ip_address
def get_assoc_data(self, context, fip, floating_network_id):
"""Determine/extract data associated with the internal port.
When a floating IP is associated with an internal port,
we need to extract/determine some data associated with the
internal port, including the internal_ip_address, and router_id.
The confirmation of the internal port whether owned by the tenant who
owns the floating IP will be confirmed by _get_router_for_floatingip.
"""
(internal_port, internal_subnet_id,
internal_ip_address) = self._internal_fip_assoc_data(context, fip)
router_id = self._get_router_for_floatingip(context,
internal_port,
internal_subnet_id,
floating_network_id)
return (fip['port_id'], internal_ip_address, router_id)
def _check_and_get_fip_assoc(self, context, fip, floatingip_db):
port_id = internal_ip_address = router_id = None
if fip.get('fixed_ip_address') and not fip.get('port_id'):
msg = _("fixed_ip_address cannot be specified without a port_id")
raise n_exc.BadRequest(resource='floatingip', msg=msg)
if fip.get('port_id'):
port_id, internal_ip_address, router_id = self.get_assoc_data(
context,
fip,
floatingip_db['floating_network_id'])
fip_qry = context.session.query(FloatingIP)
try:
fip_qry.filter_by(
fixed_port_id=fip['port_id'],
floating_network_id=floatingip_db['floating_network_id'],
fixed_ip_address=internal_ip_address).one()
raise l3.FloatingIPPortAlreadyAssociated(
port_id=fip['port_id'],
fip_id=floatingip_db['id'],
floating_ip_address=floatingip_db['floating_ip_address'],
fixed_ip=internal_ip_address,
net_id=floatingip_db['floating_network_id'])
except exc.NoResultFound:
pass
return port_id, internal_ip_address, router_id
def _update_fip_assoc(self, context, fip, floatingip_db, external_port):
previous_router_id = floatingip_db.router_id
port_id, internal_ip_address, router_id = (
self._check_and_get_fip_assoc(context, fip, floatingip_db))
floatingip_db.update({'fixed_ip_address': internal_ip_address,
'fixed_port_id': port_id,
'router_id': router_id,
'last_known_router_id': previous_router_id})
def create_floatingip(self, context, floatingip,
initial_status=l3_constants.FLOATINGIP_STATUS_ACTIVE):
fip = floatingip['floatingip']
tenant_id = self._get_tenant_id_for_create(context, fip)
fip_id = uuidutils.generate_uuid()
f_net_id = fip['floating_network_id']
if not self._core_plugin._network_is_external(context, f_net_id):
msg = _("Network %s is not a valid external network") % f_net_id
raise n_exc.BadRequest(resource='floatingip', msg=msg)
with context.session.begin(subtransactions=True):
# This external port is never exposed to the tenant.
# it is used purely for internal system and admin use when
# managing floating IPs.
port = {'tenant_id': '', # tenant intentionally not set
'network_id': f_net_id,
'mac_address': attributes.ATTR_NOT_SPECIFIED,
'fixed_ips': attributes.ATTR_NOT_SPECIFIED,
'admin_state_up': True,
'device_id': fip_id,
'device_owner': DEVICE_OWNER_FLOATINGIP,
'status': l3_constants.PORT_STATUS_NOTAPPLICABLE,
'name': ''}
if fip.get('floating_ip_address'):
port['fixed_ips'] = [
{'ip_address': fip['floating_ip_address']}]
external_port = self._core_plugin.create_port(context.elevated(),
{'port': port})
# Ensure IP addresses are allocated on external port
if not external_port['fixed_ips']:
raise n_exc.ExternalIpAddressExhausted(net_id=f_net_id)
floating_fixed_ip = external_port['fixed_ips'][0]
floating_ip_address = floating_fixed_ip['ip_address']
floatingip_db = FloatingIP(
id=fip_id,
tenant_id=tenant_id,
status=initial_status,
floating_network_id=fip['floating_network_id'],
floating_ip_address=floating_ip_address,
floating_port_id=external_port['id'])
fip['tenant_id'] = tenant_id
# Update association with internal port
# and define external IP address
self._update_fip_assoc(context, fip,
floatingip_db, external_port)
context.session.add(floatingip_db)
return self._make_floatingip_dict(floatingip_db)
def _update_floatingip(self, context, id, floatingip):
fip = floatingip['floatingip']
with context.session.begin(subtransactions=True):
floatingip_db = self._get_floatingip(context, id)
old_floatingip = self._make_floatingip_dict(floatingip_db)
fip['tenant_id'] = floatingip_db['tenant_id']
fip['id'] = id
fip_port_id = floatingip_db['floating_port_id']
self._update_fip_assoc(context, fip, floatingip_db,
self._core_plugin.get_port(
context.elevated(), fip_port_id))
return old_floatingip, self._make_floatingip_dict(floatingip_db)
def _floatingips_to_router_ids(self, floatingips):
return list(set([floatingip['router_id']
for floatingip in floatingips
if floatingip['router_id']]))
def update_floatingip(self, context, id, floatingip):
_old_floatingip, floatingip = self._update_floatingip(
context, id, floatingip)
return floatingip
def update_floatingip_status(self, context, floatingip_id, status):
"""Update operational status for floating IP in neutron DB."""
fip_query = self._model_query(context, FloatingIP).filter(
FloatingIP.id == floatingip_id)
fip_query.update({'status': status}, synchronize_session=False)
def _delete_floatingip(self, context, id):
floatingip = self._get_floatingip(context, id)
router_id = floatingip['router_id']
# Foreign key cascade will take care of the removal of the
# floating IP record once the port is deleted. We can't start
# a transaction first to remove it ourselves because the delete_port
# method will yield in its post-commit activities.
self._core_plugin.delete_port(context.elevated(),
floatingip['floating_port_id'],
l3_port_check=False)
return router_id
def delete_floatingip(self, context, id):
self._delete_floatingip(context, id)
def get_floatingip(self, context, id, fields=None):
floatingip = self._get_floatingip(context, id)
return self._make_floatingip_dict(floatingip, fields)
def get_floatingips(self, context, filters=None, fields=None,
sorts=None, limit=None, marker=None,
page_reverse=False):
marker_obj = self._get_marker_obj(context, 'floatingip', limit,
marker)
if filters is not None:
for key, val in API_TO_DB_COLUMN_MAP.iteritems():
if key in filters:
filters[val] = filters.pop(key)
return self._get_collection(context, FloatingIP,
self._make_floatingip_dict,
filters=filters, fields=fields,
sorts=sorts,
limit=limit,
marker_obj=marker_obj,
page_reverse=page_reverse)
def delete_disassociated_floatingips(self, context, network_id):
query = self._model_query(context, FloatingIP)
query = query.filter_by(floating_network_id=network_id,
fixed_port_id=None,
router_id=None)
for fip in query:
self.delete_floatingip(context, fip.id)
def get_floatingips_count(self, context, filters=None):
return self._get_collection_count(context, FloatingIP,
filters=filters)
def prevent_l3_port_deletion(self, context, port_id):
"""Checks to make sure a port is allowed to be deleted.
Raises an exception if this is not the case. This should be called by
any plugin when the API requests the deletion of a port, since some
ports for L3 are not intended to be deleted directly via a DELETE
to /ports, but rather via other API calls that perform the proper
deletion checks.
"""
try:
port_db = self._core_plugin._get_port(context, port_id)
except n_exc.PortNotFound:
# non-existent ports don't need to be protected from deletion
return
if port_db['device_owner'] in self.router_device_owners:
# Raise port in use only if the port has IP addresses
# Otherwise it's a stale port that can be removed
fixed_ips = port_db['fixed_ips']
if fixed_ips:
reason = _('has device owner %s') % port_db['device_owner']
raise n_exc.ServicePortInUse(port_id=port_db['id'],
reason=reason)
else:
LOG.debug("Port %(port_id)s has owner %(port_owner)s, but "
"no IP address, so it can be deleted",
{'port_id': port_db['id'],
'port_owner': port_db['device_owner']})
def disassociate_floatingips(self, context, port_id):
"""Disassociate all floating IPs linked to specific port.
@param port_id: ID of the port to disassociate floating IPs.
@param do_notify: whether we should notify routers right away.
@return: set of router-ids that require notification updates
if do_notify is False, otherwise None.
"""
router_ids = set()
with context.session.begin(subtransactions=True):
fip_qry = context.session.query(FloatingIP)
floating_ips = fip_qry.filter_by(fixed_port_id=port_id)
for floating_ip in floating_ips:
router_ids.add(floating_ip['router_id'])
floating_ip.update({'fixed_port_id': None,
'fixed_ip_address': None,
'router_id': None})
return router_ids
def _build_routers_list(self, context, routers, gw_ports):
for router in routers:
gw_port_id = router['gw_port_id']
# Collect gw ports only if available
if gw_port_id and gw_ports.get(gw_port_id):
router['gw_port'] = gw_ports[gw_port_id]
return routers
def _get_sync_routers(self, context, router_ids=None, active=None):
"""Query routers and their gw ports for l3 agent.
Query routers with the router_ids. The gateway ports, if any,
will be queried too.
l3 agent has an option to deal with only one router id. In addition,
when we need to notify the agent the data about only one router
(when modification of router, its interfaces, gw_port and floatingips),
we will have router_ids.
@param router_ids: the list of router ids which we want to query.
if it is None, all of routers will be queried.
@return: a list of dicted routers with dicted gw_port populated if any
"""
filters = {'id': router_ids} if router_ids else {}
if active is not None:
filters['admin_state_up'] = [active]
router_dicts = self.get_routers(context, filters=filters)
gw_port_ids = []
if not router_dicts:
return []
for router_dict in router_dicts:
gw_port_id = router_dict['gw_port_id']
if gw_port_id:
gw_port_ids.append(gw_port_id)
gw_ports = []
if gw_port_ids:
gw_ports = dict((gw_port['id'], gw_port)
for gw_port in
self.get_sync_gw_ports(context, gw_port_ids))
# NOTE(armando-migliaccio): between get_routers and get_sync_gw_ports
# gw ports may get deleted, which means that router_dicts may contain
# ports that gw_ports does not; we should rebuild router_dicts, but
# letting the callee check for missing gw_ports sounds like a good
# defensive approach regardless
return self._build_routers_list(context, router_dicts, gw_ports)
def _get_sync_floating_ips(self, context, router_ids):
"""Query floating_ips that relate to list of router_ids."""
if not router_ids:
return []
return self.get_floatingips(context, {'router_id': router_ids})
def get_sync_gw_ports(self, context, gw_port_ids):
if not gw_port_ids:
return []
filters = {'id': gw_port_ids}
gw_ports = self._core_plugin.get_ports(context, filters)
if gw_ports:
self._populate_subnets_for_ports(context, gw_ports)
return gw_ports
def get_sync_interfaces(self, context, router_ids, device_owners=None):
"""Query router interfaces that relate to list of router_ids."""
device_owners = device_owners or [DEVICE_OWNER_ROUTER_INTF]
if not router_ids:
return []
qry = context.session.query(RouterPort)
qry = qry.filter(
RouterPort.router_id.in_(router_ids),
RouterPort.port_type.in_(device_owners)
)
interfaces = [self._core_plugin._make_port_dict(rp.port, None)
for rp in qry]
if interfaces:
self._populate_subnets_for_ports(context, interfaces)
return interfaces
def _populate_subnets_for_ports(self, context, ports):
"""Populate ports with subnets.
These ports already have fixed_ips populated.
"""
if not ports:
return
def each_port_having_fixed_ips():
for port in ports:
fixed_ips = port.get('fixed_ips', [])
if not fixed_ips:
# Skip ports without IPs, which can occur if a subnet
# attached to a router is deleted
LOG.info(_LI("Skipping port %s as no IP is configure on "
"it"),
port['id'])
continue
yield port
network_ids = set(p['network_id']
for p in each_port_having_fixed_ips())
filters = {'network_id': [id for id in network_ids]}
fields = ['id', 'cidr', 'gateway_ip',
'network_id', 'ipv6_ra_mode']
subnets_by_network = dict((id, []) for id in network_ids)
for subnet in self._core_plugin.get_subnets(context, filters, fields):
subnets_by_network[subnet['network_id']].append(subnet)
for port in each_port_having_fixed_ips():
port['subnets'] = []
port['extra_subnets'] = []
for subnet in subnets_by_network[port['network_id']]:
# If this subnet is used by the port (has a matching entry
# in the port's fixed_ips), then add this subnet to the
# port's subnets list, and populate the fixed_ips entry
# entry with the subnet's prefix length.
subnet_info = {'id': subnet['id'],
'cidr': subnet['cidr'],
'gateway_ip': subnet['gateway_ip'],
'ipv6_ra_mode': subnet['ipv6_ra_mode']}
for fixed_ip in port['fixed_ips']:
if fixed_ip['subnet_id'] == subnet['id']:
port['subnets'].append(subnet_info)
prefixlen = netaddr.IPNetwork(
subnet['cidr']).prefixlen
fixed_ip['prefixlen'] = prefixlen
break
else:
# This subnet is not used by the port.
port['extra_subnets'].append(subnet_info)
def _process_floating_ips(self, context, routers_dict, floating_ips):
for floating_ip in floating_ips:
router = routers_dict.get(floating_ip['router_id'])
if router:
router_floatingips = router.get(l3_constants.FLOATINGIP_KEY,
[])
router_floatingips.append(floating_ip)
router[l3_constants.FLOATINGIP_KEY] = router_floatingips
def _process_interfaces(self, routers_dict, interfaces):
for interface in interfaces:
router = routers_dict.get(interface['device_id'])
if router:
router_interfaces = router.get(l3_constants.INTERFACE_KEY, [])
router_interfaces.append(interface)
router[l3_constants.INTERFACE_KEY] = router_interfaces
def _get_router_info_list(self, context, router_ids=None, active=None,
device_owners=None):
"""Query routers and their related floating_ips, interfaces."""
with context.session.begin(subtransactions=True):
routers = self._get_sync_routers(context,
router_ids=router_ids,
active=active)
router_ids = [router['id'] for router in routers]
interfaces = self.get_sync_interfaces(
context, router_ids, device_owners)
floating_ips = self._get_sync_floating_ips(context, router_ids)
return (routers, interfaces, floating_ips)
def get_sync_data(self, context, router_ids=None, active=None):
routers, interfaces, floating_ips = self._get_router_info_list(
context, router_ids=router_ids, active=active)
routers_dict = dict((router['id'], router) for router in routers)
self._process_floating_ips(context, routers_dict, floating_ips)
self._process_interfaces(routers_dict, interfaces)
return routers_dict.values()
class L3RpcNotifierMixin(object):
"""Mixin class to add rpc notifier attribute to db_base_plugin_v2."""
@property
def l3_rpc_notifier(self):
if not hasattr(self, '_l3_rpc_notifier'):
self._l3_rpc_notifier = l3_rpc_agent_api.L3AgentNotifyAPI()
return self._l3_rpc_notifier
@l3_rpc_notifier.setter
def l3_rpc_notifier(self, value):
self._l3_rpc_notifier = value
def notify_router_updated(self, context, router_id,
operation=None):
if router_id:
self.l3_rpc_notifier.routers_updated(
context, [router_id], operation)
def notify_routers_updated(self, context, router_ids,
operation=None, data=None):
if router_ids:
self.l3_rpc_notifier.routers_updated(
context, router_ids, operation, data)
def notify_router_deleted(self, context, router_id):
self.l3_rpc_notifier.router_deleted(context, router_id)
class L3_NAT_db_mixin(L3_NAT_dbonly_mixin, L3RpcNotifierMixin):
"""Mixin class to add rpc notifier methods to db_base_plugin_v2."""
def update_router(self, context, id, router):
router_dict = super(L3_NAT_db_mixin, self).update_router(context,
id, router)
self.notify_router_updated(context, router_dict['id'], None)
return router_dict
def delete_router(self, context, id):
super(L3_NAT_db_mixin, self).delete_router(context, id)
self.notify_router_deleted(context, id)
def notify_router_interface_action(
self, context, router_interface_info, action):
l3_method = '%s_router_interface' % action
super(L3_NAT_db_mixin, self).notify_routers_updated(
context, [router_interface_info['id']], l3_method,
{'subnet_id': router_interface_info['subnet_id']})
mapping = {'add': 'create', 'remove': 'delete'}
notifier = n_rpc.get_notifier('network')
router_event = 'router.interface.%s' % mapping[action]
notifier.info(context, router_event,
{'router_interface': router_interface_info})
def add_router_interface(self, context, router_id, interface_info):
router_interface_info = super(
L3_NAT_db_mixin, self).add_router_interface(
context, router_id, interface_info)
self.notify_router_interface_action(
context, router_interface_info, 'add')
return router_interface_info
def remove_router_interface(self, context, router_id, interface_info):
router_interface_info = super(
L3_NAT_db_mixin, self).remove_router_interface(
context, router_id, interface_info)
self.notify_router_interface_action(
context, router_interface_info, 'remove')
return router_interface_info
def create_floatingip(self, context, floatingip,
initial_status=l3_constants.FLOATINGIP_STATUS_ACTIVE):
floatingip_dict = super(L3_NAT_db_mixin, self).create_floatingip(
context, floatingip, initial_status)
router_id = floatingip_dict['router_id']
self.notify_router_updated(context, router_id, 'create_floatingip')
return floatingip_dict
def update_floatingip(self, context, id, floatingip):
old_floatingip, floatingip = self._update_floatingip(
context, id, floatingip)
router_ids = self._floatingips_to_router_ids(
[old_floatingip, floatingip])
super(L3_NAT_db_mixin, self).notify_routers_updated(
context, router_ids, 'update_floatingip', {})
return floatingip
def delete_floatingip(self, context, id):
router_id = self._delete_floatingip(context, id)
self.notify_router_updated(context, router_id, 'delete_floatingip')
def disassociate_floatingips(self, context, port_id, do_notify=True):
"""Disassociate all floating IPs linked to specific port.
@param port_id: ID of the port to disassociate floating IPs.
@param do_notify: whether we should notify routers right away.
@return: set of router-ids that require notification updates
if do_notify is False, otherwise None.
"""
router_ids = super(L3_NAT_db_mixin, self).disassociate_floatingips(
context, port_id)
if do_notify:
self.notify_routers_updated(context, router_ids)
# since caller assumes that we handled notifications on its
# behalf, return nothing
return
return router_ids
def notify_routers_updated(self, context, router_ids):
super(L3_NAT_db_mixin, self).notify_routers_updated(
context, list(router_ids), 'disassociate_floatingips', {})
def _prevent_l3_port_delete_callback(resource, event, trigger, **kwargs):
context = kwargs['context']
port_id = kwargs['port_id']
port_check = kwargs['port_check']
l3plugin = manager.NeutronManager.get_service_plugins().get(
constants.L3_ROUTER_NAT)
if l3plugin and port_check:
l3plugin.prevent_l3_port_deletion(context, port_id)
def _notify_routers_callback(resource, event, trigger, **kwargs):
context = kwargs['context']
router_ids = kwargs['router_ids']
l3plugin = manager.NeutronManager.get_service_plugins().get(
constants.L3_ROUTER_NAT)
l3plugin.notify_routers_updated(context, router_ids)
def subscribe():
registry.subscribe(
_prevent_l3_port_delete_callback, resources.PORT, events.BEFORE_DELETE)
registry.subscribe(
_notify_routers_callback, resources.PORT, events.AFTER_DELETE)
# NOTE(armax): multiple l3 service plugins (potentially out of tree) inherit
# from l3_db and may need the callbacks to be processed. Having an implicit
# subscription (through the module import) preserves the existing behavior,
# and at the same time it avoids fixing it manually in each and every l3 plugin
# out there. That said, The subscription is also made explicit in the
# reference l3 plugin. The subscription operation is idempotent so there is no
# harm in registering the same callback multiple times.
subscribe()
apache-2.0
jmehnle/ansible
lib/ansible/compat/__init__.py
241
1087
# (c) 2014, Toshio Kuratomi <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
'''
Compat library for ansible. This contains compatibility definitions for older python
When we need to import a module differently depending on python version, do it
here. Then in the code we can simply import from compat in order to get what we want.
'''
'''Multivariate Distribution
Probability of a multivariate t distribution
Now also mvstnormcdf has tests against R mvtnorm
Still need non-central t, extra options, and convenience function for
location, scale version.
Author: Josef Perktold
License: BSD (3-clause)
Reference:
Genz and Bretz for formula
'''
from __future__ import print_function
import numpy as np
from scipy import integrate, stats, special
from scipy.stats import chi,chi2
from .extras import mvnormcdf, mvstdnormcdf, mvnormcdf
from numpy import exp as np_exp
from numpy import log as np_log
from scipy.special import gamma as sps_gamma
from scipy.special import gammaln as sps_gammaln
def chi2_pdf(self, x, df):
'''pdf of chi-square distribution'''
#from scipy.stats.distributions
Px = x**(df/2.0-1)*np.exp(-x/2.0)
Px /= special.gamma(df/2.0)* 2**(df/2.0)
return Px
def chi_pdf(x, df):
tmp = (df-1.)*np_log(x) + (-x*x*0.5) - (df*0.5-1)*np_log(2.0) \
- sps_gammaln(df*0.5)
return np_exp(tmp)
#return x**(df-1.)*np_exp(-x*x*0.5)/(2.0)**(df*0.5-1)/sps_gamma(df*0.5)
def chi_logpdf(x, df):
tmp = (df-1.)*np_log(x) + (-x*x*0.5) - (df*0.5-1)*np_log(2.0) \
- sps_gammaln(df*0.5)
return tmp
def funbgh(s, a, b, R, df):
sqrt_df = np.sqrt(df+0.5)
ret = chi_logpdf(s,df)
ret += np_log(mvstdnormcdf(s*a/sqrt_df, s*b/sqrt_df, R,
maxpts=1000000, abseps=1e-6))
ret = np_exp(ret)
return ret
def funbgh2(s, a, b, R, df):
n = len(a)
sqrt_df = np.sqrt(df)
#np.power(s, df-1) * np_exp(-s*s*0.5)
return np_exp((df-1)*np_log(s)-s*s*0.5) \
* mvstdnormcdf(s*a/sqrt_df, s*b/sqrt_df, R[np.tril_indices(n, -1)],
maxpts=1000000, abseps=1e-4)
def bghfactor(df):
return np.power(2.0, 1-df*0.5) / sps_gamma(df*0.5)
def mvstdtprob(a, b, R, df, ieps=1e-5, quadkwds=None, mvstkwds=None):
'''probability of rectangular area of standard t distribution
assumes mean is zero and R is correlation matrix
Notes
-----
This function does not calculate the estimate of the combined error
between the underlying multivariate normal probability calculations
and the integration.
'''
kwds = dict(args=(a,b,R,df), epsabs=1e-4, epsrel=1e-2, limit=150)
if not quadkwds is None:
kwds.update(quadkwds)
#print kwds
res, err = integrate.quad(funbgh2, *chi.ppf([ieps,1-ieps], df),
**kwds)
prob = res * bghfactor(df)
return prob
#written by Enzo Michelangeli, style changes by josef-pktd
# Student's T random variable
def multivariate_t_rvs(m, S, df=np.inf, n=1):
'''generate random variables of multivariate t distribution
Parameters
----------
m : array_like
mean of random variable, length determines dimension of random variable
S : array_like
square array of covariance matrix
df : int or float
degrees of freedom
n : int
number of observations, return random array will be (n, len(m))
Returns
-------
rvs : ndarray, (n, len(m))
each row is an independent draw of a multivariate t distributed
random variable
'''
m = np.asarray(m)
d = len(m)
if df == np.inf:
x = 1.
else:
x = np.random.chisquare(df, n)/df
z = np.random.multivariate_normal(np.zeros(d),S,(n,))
return m + z/np.sqrt(x)[:,None] # same output format as random.multivariate_normal
if __name__ == '__main__':
corr = np.asarray([[1.0, 0, 0.5],[0,1,0],[0.5,0,1]])
corr_indep = np.asarray([[1.0, 0, 0],[0,1,0],[0,0,1]])
corr_equal = np.asarray([[1.0, 0.5, 0.5],[0.5,1,0.5],[0.5,0.5,1]])
R = corr_equal
a = np.array([-np.inf,-np.inf,-100.0])
a = np.array([-0.96,-0.96,-0.96])
b = np.array([0.0,0.0,0.0])
b = np.array([0.96,0.96, 0.96])
a[:] = -1
b[:] = 3
df = 10.
sqrt_df = np.sqrt(df)
print(mvstdnormcdf(a, b, corr, abseps=1e-6))
#print integrate.quad(funbgh, 0, np.inf, args=(a,b,R,df))
print((stats.t.cdf(b[0], df) - stats.t.cdf(a[0], df))**3)
s = 1
print(mvstdnormcdf(s*a/sqrt_df, s*b/sqrt_df, R))
df=4
print(mvstdtprob(a, b, R, df))
S = np.array([[1.,.5],[.5,1.]])
print(multivariate_t_rvs([10.,20.], S, 2, 5))
nobs = 10000
rvst = multivariate_t_rvs([10.,20.], S, 2, nobs)
print(np.sum((rvst<[10.,20.]).all(1),0) * 1. / nobs)
print(mvstdtprob(-np.inf*np.ones(2), np.zeros(2), R[:2,:2], 2))
'''
> lower <- -1
> upper <- 3
> df <- 4
> corr <- diag(3)
> delta <- rep(0, 3)
> pmvt(lower=lower, upper=upper, delta=delta, df=df, corr=corr)
[1] 0.5300413
attr(,"error")
[1] 4.321136e-05
attr(,"msg")
[1] "Normal Completion"
> (pt(upper, df) - pt(lower, df))**3
[1] 0.4988254
'''
bsd-3-clause
denys-duchier/sorl-thumbnail-py3
sorl/thumbnail/engines/base.py
1
4334
#coding=utf-8
from sorl.thumbnail.conf import settings
from sorl.thumbnail.helpers import toint
from sorl.thumbnail.parsers import parse_crop
class EngineBase(object):
"""
ABC for Thumbnail engines, methods are static
"""
def create(self, image, geometry, options):
"""
Processing conductor, returns the thumbnail as an image engine instance
"""
image = self.orientation(image, geometry, options)
image = self.colorspace(image, geometry, options)
image = self.scale(image, geometry, options)
image = self.crop(image, geometry, options)
return image
def orientation(self, image, geometry, options):
"""
Wrapper for ``_orientation``
"""
if options.get('orientation', settings.THUMBNAIL_ORIENTATION):
return self._orientation(image)
return image
def colorspace(self, image, geometry, options):
"""
Wrapper for ``_colorspace``
"""
colorspace = options['colorspace']
return self._colorspace(image, colorspace)
def scale(self, image, geometry, options):
"""
Wrapper for ``_scale``
"""
crop = options['crop']
upscale = options['upscale']
x_image, y_image = list(map(float, self.get_image_size(image)))
# calculate scaling factor
factors = (geometry[0] / x_image, geometry[1] / y_image)
factor = max(factors) if crop else min(factors)
if factor < 1 or upscale:
width = toint(x_image * factor)
height = toint(y_image * factor)
image = self._scale(image, width, height)
return image
def crop(self, image, geometry, options):
"""
Wrapper for ``_crop``
"""
crop = options['crop']
if not crop or crop == 'noop':
return image
x_image, y_image = self.get_image_size(image)
if geometry[0] > x_image or geometry[1] > y_image:
return image
x_offset, y_offset = parse_crop(crop, (x_image, y_image), geometry)
return self._crop(image, geometry[0], geometry[1], x_offset, y_offset)
def write(self, image, options, thumbnail):
"""
Wrapper for ``_write``
"""
format_ = options['format']
quality = options['quality']
# additional non-default-value options:
progressive = options.get('progressive', settings.THUMBNAIL_PROGRESSIVE)
raw_data = self._get_raw_data(image, format_, quality,
progressive=progressive
)
thumbnail.write(raw_data)
def get_image_ratio(self, image):
"""
Calculates the image ratio
"""
x, y = self.get_image_size(image)
return float(x) / y
#
# Methods which engines need to implement
# The ``image`` argument refers to a backend image object
#
def get_image(self, source):
"""
Returns the backend image objects from an ImageFile instance
"""
raise NotImplemented()
def get_image_size(self, image):
"""
Returns the image width and height as a tuple
"""
raise NotImplemented()
def is_valid_image(self, raw_data):
"""
Checks if the supplied raw data is valid image data
"""
raise NotImplemented()
def _orientation(self, image):
"""
Read orientation exif data and orientate the image accordingly
"""
return image
def _colorspace(self, image, colorspace):
"""
`Valid colorspaces
<http://www.graphicsmagick.org/GraphicsMagick.html#details-colorspace>`_.
Backends need to implement the following::
RGB, GRAY
"""
raise NotImplemented()
def _scale(self, image, width, height):
"""
Does the resizing of the image
"""
raise NotImplemented()
def _crop(self, image, width, height, x_offset, y_offset):
"""
Crops the image
"""
raise NotImplemented()
def _get_raw_data(self, image, format_, quality, progressive=False):
"""
Gets raw data given the image, format and quality. This method is
called from :meth:`write`
"""
raise NotImplemented()
bsd-3-clause
mariusbaumann/pyload
module/lib/thrift/TSerialization.py
74
1344
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
from protocol import TBinaryProtocol
from transport import TTransport
def serialize(thrift_object, protocol_factory = TBinaryProtocol.TBinaryProtocolFactory()):
transport = TTransport.TMemoryBuffer()
protocol = protocol_factory.getProtocol(transport)
thrift_object.write(protocol)
return transport.getvalue()
def deserialize(base, buf, protocol_factory = TBinaryProtocol.TBinaryProtocolFactory()):
transport = TTransport.TMemoryBuffer(buf)
protocol = protocol_factory.getProtocol(transport)
base.read(protocol)
return base
gpl-3.0
arifgursel/pyglet
pyglet/libs/x11/cursorfont.py
46
3236
# ----------------------------------------------------------------------------
# pyglet
# Copyright (c) 2006-2008 Alex Holkner
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# * Neither the name of pyglet nor the names of its
# contributors may be used to endorse or promote products
# derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# ----------------------------------------------------------------------------
'''
'''
__docformat__ = 'restructuredtext'
__version__ = '$Id$'
# /usr/include/X11/cursorfont.h
XC_num_glyphs = 154
XC_X_cursor = 0
XC_arrow = 2
XC_based_arrow_down = 4
XC_based_arrow_up = 6
XC_boat = 8
XC_bogosity = 10
XC_bottom_left_corner = 12
XC_bottom_right_corner = 14
XC_bottom_side = 16
XC_bottom_tee = 18
XC_box_spiral = 20
XC_center_ptr = 22
XC_circle = 24
XC_clock = 26
XC_coffee_mug = 28
XC_cross = 30
XC_cross_reverse = 32
XC_crosshair = 34
XC_diamond_cross = 36
XC_dot = 38
XC_dotbox = 40
XC_double_arrow = 42
XC_draft_large = 44
XC_draft_small = 46
XC_draped_box = 48
XC_exchange = 50
XC_fleur = 52
XC_gobbler = 54
XC_gumby = 56
XC_hand1 = 58
XC_hand2 = 60
XC_heart = 62
XC_icon = 64
XC_iron_cross = 66
XC_left_ptr = 68
XC_left_side = 70
XC_left_tee = 72
XC_leftbutton = 74
XC_ll_angle = 76
XC_lr_angle = 78
XC_man = 80
XC_middlebutton = 82
XC_mouse = 84
XC_pencil = 86
XC_pirate = 88
XC_plus = 90
XC_question_arrow = 92
XC_right_ptr = 94
XC_right_side = 96
XC_right_tee = 98
XC_rightbutton = 100
XC_rtl_logo = 102
XC_sailboat = 104
XC_sb_down_arrow = 106
XC_sb_h_double_arrow = 108
XC_sb_left_arrow = 110
XC_sb_right_arrow = 112
XC_sb_up_arrow = 114
XC_sb_v_double_arrow = 116
XC_shuttle = 118
XC_sizing = 120
XC_spider = 122
XC_spraycan = 124
XC_star = 126
XC_target = 128
XC_tcross = 130
XC_top_left_arrow = 132
XC_top_left_corner = 134
XC_top_right_corner = 136
XC_top_side = 138
XC_top_tee = 140
XC_trek = 142
XC_ul_angle = 144
XC_umbrella = 146
XC_ur_angle = 148
XC_watch = 150
XC_xterm = 152
bsd-3-clause
gcarothers/acidfree
setup.py
1
3462
from setuptools import setup, find_packages
import codecs
import os
import re
here = os.path.abspath(os.path.dirname(__file__))
# Read the version number from a source file.
# Why read it, and not import?
# see https://groups.google.com/d/topic/pypa-dev/0PkjVpcxTzQ/discussion
def find_version(*file_paths):
# Open in Latin-1 so that we avoid encoding errors.
# Use codecs.open for Python 2 compatibility
with codecs.open(os.path.join(here, *file_paths), 'r', 'latin1') as f:
version_file = f.read()
# The version line must have the form
# __version__ = 'ver'
version_match = re.search(r"^__version__ = ['\"]([^'\"]*)['\"]",
version_file, re.M)
if version_match:
return version_match.group(1)
raise RuntimeError("Unable to find version string.")
# Get the long description from the relevant file
with codecs.open('DESCRIPTION.rst', encoding='utf-8') as f:
long_description = f.read()
setup(
name="acidfree",
version=find_version('acidfree', '__init__.py'),
description="Publish, Share, Archive YOUR photos",
long_description=long_description,
# The project URL.
url='https://github.com/gcarothers/acidfree',
# Author details
author='Gavin Carothers',
author_email='[email protected]',
# Choose your license
license='Apache',
classifiers=[
# How mature is this project? Common values are
# 3 - Alpha
# 4 - Beta
# 5 - Production/Stable
'Development Status :: 3 - Alpha',
# Indicate who your project is intended for
'Intended Audience :: End Users/Desktop',
'Topic :: System :: Archiving',
'Topic :: Multimedia :: Graphics',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content :: News/Diary',
# Pick your license as you wish (should match "license" above)
'License :: OSI Approved :: Apache Software License',
# Specify the Python versions you support here. In particular, ensure
# that you indicate whether you support Python 2, Python 3 or both.
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
],
# What does your project relate to?
keywords='photos archive',
# You can just specify the packages manually here if your project is
# simple. Or you can use find_packages.
packages=find_packages(exclude=["contrib", "docs", "tests*"]),
# List run-time dependencies here. These will be installed by pip when your
# project is installed.
install_requires = ['requests',
'waitress',
'pyramid'],
# If there are data files included in your packages that need to be
# installed, specify them here. If using Python 2.6 or less, then these
# have to be included in MANIFEST.in as well.
package_data={
},
# To provide executable scripts, use entry points in preference to the
# "scripts" keyword. Entry points provide cross-platform support and allow
# pip to create the appropriate form of executable for the target platform.
entry_points={
'console_scripts': [
'acidfree=acidfree:main',
],
},
)
apache-2.0
calfonso/ansible
lib/ansible/modules/system/pamd.py
22
23925
#!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2017, Kenneth D. Evensen <[email protected]>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = """
module: pamd
author:
- "Kenneth D. Evensen (@kevensen)"
short_description: Manage PAM Modules
description:
- Edit PAM service's type, control, module path and module arguments.
In order for a PAM rule to be modified, the type, control and
module_path must match an existing rule. See man(5) pam.d for details.
version_added: "2.3"
options:
name:
required: true
description:
- The name generally refers to the PAM service file to
change, for example system-auth.
type:
required: true
description:
- The type of the PAM rule being modified. The type, control
and module_path all must match a rule to be modified.
control:
required: true
description:
- The control of the PAM rule being modified. This may be a
complicated control with brackets. If this is the case, be
sure to put "[bracketed controls]" in quotes. The type,
control and module_path all must match a rule to be modified.
module_path:
required: true
description:
- The module path of the PAM rule being modified. The type,
control and module_path all must match a rule to be modified.
new_type:
description:
- The new type to assign to the new rule.
new_control:
description:
- The new control to assign to the new rule.
new_module_path:
description:
- The new module path to be assigned to the new rule.
module_arguments:
description:
- When state is 'updated', the module_arguments will replace existing
module_arguments. When state is 'args_absent' args matching those
listed in module_arguments will be removed. When state is
'args_present' any args listed in module_arguments are added if
missing from the existing rule. Furthermore, if the module argument
takes a value denoted by '=', the value will be changed to that specified
in module_arguments. Note that module_arguments is a list. Please see
the examples for usage.
state:
default: updated
choices:
- updated
- before
- after
- args_present
- args_absent
- absent
description:
- The default of 'updated' will modify an existing rule if type,
control and module_path all match an existing rule. With 'before',
the new rule will be inserted before a rule matching type, control
and module_path. Similarly, with 'after', the new rule will be inserted
after an existing rule matching type, control and module_path. With
either 'before' or 'after' new_type, new_control, and new_module_path
must all be specified. If state is 'args_absent' or 'args_present',
new_type, new_control, and new_module_path will be ignored. State
'absent' will remove the rule. The 'absent' state was added in version
2.4 and is only available in Ansible versions >= 2.4.
path:
default: /etc/pam.d/
description:
- This is the path to the PAM service files
"""
EXAMPLES = """
- name: Update pamd rule's control in /etc/pam.d/system-auth
pamd:
name: system-auth
type: auth
control: required
module_path: pam_faillock.so
new_control: sufficient
- name: Update pamd rule's complex control in /etc/pam.d/system-auth
pamd:
name: system-auth
type: session
control: '[success=1 default=ignore]'
module_path: pam_succeed_if.so
new_control: '[success=2 default=ignore]'
- name: Insert a new rule before an existing rule
pamd:
name: system-auth
type: auth
control: required
module_path: pam_faillock.so
new_type: auth
new_control: sufficient
new_module_path: pam_faillock.so
state: before
- name: Insert a new rule pam_wheel.so with argument 'use_uid' after an \
existing rule pam_rootok.so
pamd:
name: su
type: auth
control: sufficient
module_path: pam_rootok.so
new_type: auth
new_control: required
new_module_path: pam_wheel.so
module_arguments: 'use_uid'
state: after
- name: Remove module arguments from an existing rule
pamd:
name: system-auth
type: auth
control: required
module_path: pam_faillock.so
module_arguments: ''
state: updated
- name: Replace all module arguments in an existing rule
pamd:
name: system-auth
type: auth
control: required
module_path: pam_faillock.so
module_arguments: 'preauth
silent
deny=3
unlock_time=604800
fail_interval=900'
state: updated
- name: Remove specific arguments from a rule
pamd:
name: system-auth
type: session control='[success=1 default=ignore]'
module_path: pam_succeed_if.so
module_arguments: crond,quiet
state: args_absent
- name: Ensure specific arguments are present in a rule
pamd:
name: system-auth
type: session
control: '[success=1 default=ignore]'
module_path: pam_succeed_if.so
module_arguments: crond,quiet
state: args_present
- name: Ensure specific arguments are present in a rule (alternative)
pamd:
name: system-auth
type: session
control: '[success=1 default=ignore]'
module_path: pam_succeed_if.so
module_arguments:
- crond
- quiet
state: args_present
- name: Module arguments requiring commas must be listed as a Yaml list
pamd:
name: special-module
type: account
control: required
module_path: pam_access.so
module_arguments:
- listsep=,
state: args_present
- name: Update specific argument value in a rule
pamd:
name: system-auth
type: auth
control: required
module_path: pam_faillock.so
module_arguments: 'fail_interval=300'
state: args_present
- name: Add pam common-auth rule for duo
pamd:
name: common-auth
new_type: auth
new_control: '[success=1 default=ignore]'
new_module_path: '/lib64/security/pam_duo.so'
state: after
type: auth
module_path: pam_sss.so
control: 'requisite'
"""
RETURN = '''
change_count:
description: How many rules were changed
type: int
sample: 1
returned: success
version_added: 2.4
new_rule:
description: The changes to the rule
type: string
sample: None None None sha512 shadow try_first_pass use_authtok
returned: success
version_added: 2.4
updated_rule_(n):
description: The rule(s) that was/were changed
type: string
sample:
- password sufficient pam_unix.so sha512 shadow try_first_pass
use_authtok
returned: success
version_added: 2.4
action:
description:
- "That action that was taken and is one of: update_rule,
insert_before_rule, insert_after_rule, args_present, args_absent,
absent."
returned: always
type: string
sample: "update_rule"
version_added: 2.4
dest:
description:
- "Path to pam.d service that was changed. This is only available in
Ansible version 2.3 and was removed in 2.4."
returned: success
type: string
sample: "/etc/pam.d/system-auth"
...
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils._text import to_native
import os
import re
import time
# The PamdRule class encapsulates a rule in a pam.d service
class PamdRule(object):
def __init__(self, rule_type,
rule_control, rule_module_path,
rule_module_args=None):
self.rule_type = rule_type
self.rule_control = rule_control
self.rule_module_path = rule_module_path
try:
if (rule_module_args is not None and
type(rule_module_args) is list):
self.rule_module_args = rule_module_args
elif (rule_module_args is not None and
type(rule_module_args) is str):
self.rule_module_args = rule_module_args.split()
except AttributeError:
self.rule_module_args = []
@classmethod
def rulefromstring(cls, stringline):
pattern = None
rule_type = ''
rule_control = ''
rule_module_path = ''
rule_module_args = ''
complicated = False
if '[' in stringline:
pattern = re.compile(
r"""([\-A-Za-z0-9_]+)\s* # Rule Type
\[([A-Za-z0-9_=\s]+)\]\s* # Rule Control
([A-Za-z0-9/_\-\.]+)\s* # Rule Path
([A-Za-z0-9,_=<>\-\s\./]*)""", # Rule Args
re.X)
complicated = True
else:
pattern = re.compile(
r"""([@\-A-Za-z0-9_]+)\s* # Rule Type
([A-Za-z0-9_\-]+)\s* # Rule Control
([A-Za-z0-9/_\-\.]*)\s* # Rule Path
([A-Za-z0-9,_=<>\-\s\./]*)""", # Rule Args
re.X)
result = pattern.match(stringline)
rule_type = result.group(1)
if complicated:
rule_control = '[' + result.group(2) + ']'
else:
rule_control = result.group(2)
rule_module_path = result.group(3)
if result.group(4) is not None:
rule_module_args = result.group(4)
return cls(rule_type, rule_control, rule_module_path, rule_module_args)
def get_module_args_as_string(self):
try:
if self.rule_module_args is not None:
return ' '.join(self.rule_module_args)
except AttributeError:
pass
return ''
def __str__(self):
return "%-10s\t%s\t%s %s" % (self.rule_type,
self.rule_control,
self.rule_module_path,
self.get_module_args_as_string())
# PamdService encapsulates an entire service and contains one or more rules
class PamdService(object):
def __init__(self, ansible=None):
if ansible is not None:
self.check = ansible.check_mode
self.check = False
self.ansible = ansible
self.preamble = []
self.rules = []
self.fname = None
if ansible is not None:
self.path = self.ansible.params["path"]
self.name = self.ansible.params["name"]
def load_rules_from_file(self):
self.fname = os.path.join(self.path, self.name)
stringline = ''
try:
for line in open(self.fname, 'r'):
stringline += line.rstrip().lstrip()
stringline += '\n'
self.load_rules_from_string(stringline.replace("\\\n", ""))
except IOError as e:
self.ansible.fail_json(msg='Unable to open/read PAM module \
file %s with error %s. And line %s' %
(self.fname, to_native(e), stringline))
def load_rules_from_string(self, stringvalue):
for line in stringvalue.splitlines():
stringline = line.rstrip()
if line.startswith('#') and not line.isspace():
self.preamble.append(line.rstrip())
elif (not line.startswith('#') and
not line.isspace() and
len(line) != 0):
try:
self.ansible.log(msg="Creating rule from string %s" % stringline)
except AttributeError:
pass
self.rules.append(PamdRule.rulefromstring(stringline))
def write(self):
if self.fname is None:
self.fname = self.path + "/" + self.name
# If the file is a symbollic link, we'll write to the source.
pamd_file = os.path.realpath(self.fname)
temp_file = "/tmp/" + self.name + "_" + time.strftime("%y%m%d%H%M%S")
try:
f = open(temp_file, 'w')
f.write(str(self))
f.close()
except IOError:
self.ansible.fail_json(msg='Unable to create temporary \
file %s' % self.temp_file)
self.ansible.atomic_move(temp_file, pamd_file)
def __str__(self):
stringvalue = ''
previous_rule = None
for amble in self.preamble:
stringvalue += amble
stringvalue += '\n'
for rule in self.rules:
if (previous_rule is not None and
(previous_rule.rule_type.replace('-', '') !=
rule.rule_type.replace('-', ''))):
stringvalue += '\n'
stringvalue += str(rule).rstrip()
stringvalue += '\n'
previous_rule = rule
if stringvalue.endswith('\n'):
stringvalue = stringvalue[:-1]
return stringvalue
def update_rule(service, old_rule, new_rule):
changed = False
change_count = 0
result = {'action': 'update_rule'}
for rule in service.rules:
if (old_rule.rule_type == rule.rule_type and
old_rule.rule_control == rule.rule_control and
old_rule.rule_module_path == rule.rule_module_path):
if (new_rule.rule_type is not None and
new_rule.rule_type != rule.rule_type):
rule.rule_type = new_rule.rule_type
changed = True
if (new_rule.rule_control is not None and
new_rule.rule_control != rule.rule_control):
rule.rule_control = new_rule.rule_control
changed = True
if (new_rule.rule_module_path is not None and
new_rule.rule_module_path != rule.rule_module_path):
rule.rule_module_path = new_rule.rule_module_path
changed = True
try:
if (new_rule.rule_module_args is not None and
new_rule.get_module_args_as_string() !=
rule.get_module_args_as_string()):
rule.rule_module_args = new_rule.rule_module_args
changed = True
except AttributeError:
pass
if changed:
result['updated_rule_' + str(change_count)] = str(rule)
result['new_rule'] = str(new_rule)
change_count += 1
result['change_count'] = change_count
return changed, result
def insert_before_rule(service, old_rule, new_rule):
index = 0
change_count = 0
result = {'action':
'insert_before_rule'}
changed = False
for rule in service.rules:
if (old_rule.rule_type == rule.rule_type and
old_rule.rule_control == rule.rule_control and
old_rule.rule_module_path == rule.rule_module_path):
if index == 0:
service.rules.insert(0, new_rule)
changed = True
elif (new_rule.rule_type != service.rules[index - 1].rule_type or
new_rule.rule_control !=
service.rules[index - 1].rule_control or
new_rule.rule_module_path !=
service.rules[index - 1].rule_module_path):
service.rules.insert(index, new_rule)
changed = True
if changed:
result['new_rule'] = str(new_rule)
result['before_rule_' + str(change_count)] = str(rule)
change_count += 1
index += 1
result['change_count'] = change_count
return changed, result
def insert_after_rule(service, old_rule, new_rule):
index = 0
change_count = 0
result = {'action': 'insert_after_rule'}
changed = False
for rule in service.rules:
if (old_rule.rule_type == rule.rule_type and
old_rule.rule_control == rule.rule_control and
old_rule.rule_module_path == rule.rule_module_path):
if (index == len(service.rules) - 1):
service.rules.insert(len(service.rules), new_rule)
changed = True
elif (new_rule.rule_type != service.rules[index + 1].rule_type or
new_rule.rule_control !=
service.rules[index + 1].rule_control or
new_rule.rule_module_path !=
service.rules[index + 1].rule_module_path):
service.rules.insert(index + 1, new_rule)
changed = True
if changed:
result['new_rule'] = str(new_rule)
result['after_rule_' + str(change_count)] = str(rule)
change_count += 1
index += 1
result['change_count'] = change_count
return changed, result
def remove_module_arguments(service, old_rule, module_args):
result = {'action': 'args_absent'}
changed = False
change_count = 0
for rule in service.rules:
if (old_rule.rule_type == rule.rule_type and
old_rule.rule_control == rule.rule_control and
old_rule.rule_module_path == rule.rule_module_path):
for arg_to_remove in module_args:
for arg in rule.rule_module_args:
if arg == arg_to_remove:
rule.rule_module_args.remove(arg)
changed = True
result['removed_arg_' + str(change_count)] = arg
result['from_rule_' + str(change_count)] = str(rule)
change_count += 1
result['change_count'] = change_count
return changed, result
def add_module_arguments(service, old_rule, module_args):
result = {'action': 'args_present'}
changed = False
change_count = 0
for rule in service.rules:
if (old_rule.rule_type == rule.rule_type and
old_rule.rule_control == rule.rule_control and
old_rule.rule_module_path == rule.rule_module_path):
for arg_to_add in module_args:
if "=" in arg_to_add:
pre_string = arg_to_add[:arg_to_add.index('=') + 1]
indicies = [i for i, arg
in enumerate(rule.rule_module_args)
if arg.startswith(pre_string)]
if len(indicies) == 0:
rule.rule_module_args.append(arg_to_add)
changed = True
result['added_arg_' + str(change_count)] = arg_to_add
result['to_rule_' + str(change_count)] = str(rule)
change_count += 1
else:
for i in indicies:
if rule.rule_module_args[i] != arg_to_add:
rule.rule_module_args[i] = arg_to_add
changed = True
result['updated_arg_' +
str(change_count)] = arg_to_add
result['in_rule_' +
str(change_count)] = str(rule)
change_count += 1
elif arg_to_add not in rule.rule_module_args:
rule.rule_module_args.append(arg_to_add)
changed = True
result['added_arg_' + str(change_count)] = arg_to_add
result['to_rule_' + str(change_count)] = str(rule)
change_count += 1
result['change_count'] = change_count
return changed, result
def remove_rule(service, old_rule):
result = {'action': 'absent'}
changed = False
change_count = 0
for rule in service.rules:
if (old_rule.rule_type == rule.rule_type and
old_rule.rule_control == rule.rule_control and
old_rule.rule_module_path == rule.rule_module_path):
service.rules.remove(rule)
changed = True
return changed, result
def main():
module = AnsibleModule(
argument_spec=dict(
name=dict(required=True, type='str'),
type=dict(required=True,
choices=['account', 'auth',
'password', 'session']),
control=dict(required=True, type='str'),
module_path=dict(required=True, type='str'),
new_type=dict(required=False,
choices=['account', 'auth',
'password', 'session']),
new_control=dict(required=False, type='str'),
new_module_path=dict(required=False, type='str'),
module_arguments=dict(required=False, type='list'),
state=dict(required=False, default="updated",
choices=['before', 'after', 'updated',
'args_absent', 'args_present', 'absent']),
path=dict(required=False, default='/etc/pam.d', type='str')
),
supports_check_mode=True,
required_if=[
("state", "args_present", ["module_arguments"]),
("state", "args_absent", ["module_arguments"]),
("state", "before", ["new_control"]),
("state", "before", ["new_type"]),
("state", "before", ["new_module_path"]),
("state", "after", ["new_control"]),
("state", "after", ["new_type"]),
("state", "after", ["new_module_path"])
]
)
service = module.params['name']
old_type = module.params['type']
old_control = module.params['control']
old_module_path = module.params['module_path']
new_type = module.params['new_type']
new_control = module.params['new_control']
new_module_path = module.params['new_module_path']
module_arguments = module.params['module_arguments']
state = module.params['state']
path = module.params['path']
pamd = PamdService(module)
pamd.load_rules_from_file()
old_rule = PamdRule(old_type,
old_control,
old_module_path)
new_rule = PamdRule(new_type,
new_control,
new_module_path,
module_arguments)
if state == 'updated':
change, result = update_rule(pamd,
old_rule,
new_rule)
elif state == 'before':
change, result = insert_before_rule(pamd,
old_rule,
new_rule)
elif state == 'after':
change, result = insert_after_rule(pamd,
old_rule,
new_rule)
elif state == 'args_absent':
change, result = remove_module_arguments(pamd,
old_rule,
module_arguments)
elif state == 'args_present':
change, result = add_module_arguments(pamd,
old_rule,
module_arguments)
elif state == 'absent':
change, result = remove_rule(pamd,
old_rule)
if not module.check_mode and change:
pamd.write()
facts = {}
facts['pamd'] = {'changed': change, 'result': result}
module.params['dest'] = pamd.fname
module.exit_json(changed=change, ansible_facts=facts)
if __name__ == '__main__':
main()
# -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2008 Tiny SPRL (<http://tiny.be>). All Rights Reserved
# $Id$
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
from openerp.tools.translate import _
class product_pricelist(osv.osv):
_inherit = 'product.pricelist'
_columns ={
'visible_discount': fields.boolean('Visible Discount'),
}
_defaults = {
'visible_discount': True,
}
class sale_order_line(osv.osv):
_inherit = "sale.order.line"
def product_id_change(self, cr, uid, ids, pricelist, product, qty=0,
uom=False, qty_uos=0, uos=False, name='', partner_id=False,
lang=False, update_tax=True, date_order=False, packaging=False,
fiscal_position=False, flag=False, context=None):
def get_real_price(res_dict, product_id, qty, uom, pricelist):
item_obj = self.pool.get('product.pricelist.item')
price_type_obj = self.pool.get('product.price.type')
product_obj = self.pool.get('product.product')
field_name = 'list_price'
product = product_obj.browse(cr, uid, product_id, context)
product_read = product_obj.read(cr, uid, [product_id], [field_name], context=context)[0]
factor = 1.0
if uom and uom != product.uom_id.id:
product_uom_obj = self.pool.get('product.uom')
uom_data = product_uom_obj.browse(cr, uid, product.uom_id.id)
factor = uom_data.factor
return product_read[field_name] * factor
res=super(sale_order_line, self).product_id_change(cr, uid, ids, pricelist, product, qty,
uom, qty_uos, uos, name, partner_id,
lang, update_tax, date_order, packaging=packaging, fiscal_position=fiscal_position, flag=flag, context=context)
context = {'lang': lang, 'partner_id': partner_id}
result=res['value']
pricelist_obj=self.pool.get('product.pricelist')
product_obj = self.pool.get('product.product')
if product and pricelist:
if result.get('price_unit',False):
price=result['price_unit']
else:
return res
product = product_obj.browse(cr, uid, product, context)
list_price = pricelist_obj.price_get(cr, uid, [pricelist],
product.id, qty or 1.0, partner_id, {'uom': uom,'date': date_order })
so_pricelist = pricelist_obj.browse(cr, uid, pricelist, context=context)
new_list_price = get_real_price(list_price, product.id, qty, uom, pricelist)
if so_pricelist.visible_discount and list_price[pricelist] != 0 and new_list_price != 0:
if product.company_id and so_pricelist.currency_id.id != product.company_id.currency_id.id:
# new_list_price is in company's currency while price in pricelist currency
new_list_price = self.pool['res.currency'].compute(cr, uid,
product.company_id.currency_id.id, so_pricelist.currency_id.id,
new_list_price, context=context)
discount = (new_list_price - price) / new_list_price * 100
if discount > 0:
result['price_unit'] = new_list_price
result['discount'] = discount
else:
result['discount'] = 0.0
else:
result['discount'] = 0.0
else:
result['discount'] = 0.0
return res
from pybench import Test
class CreateInstances(Test):
version = 2.0
operations = 3 + 7 + 4
rounds = 80000
def test(self):
class c:
pass
class d:
def __init__(self,a,b,c):
self.a = a
self.b = b
self.c = c
class e:
def __init__(self,a,b,c=4):
self.a = a
self.b = b
self.c = c
self.d = a
self.e = b
self.f = c
for i in range(self.rounds):
o = c()
o1 = c()
o2 = c()
p = d(i,i,3)
p1 = d(i,i,3)
p2 = d(i,3,3)
p3 = d(3,i,3)
p4 = d(i,i,i)
p5 = d(3,i,3)
p6 = d(i,i,i)
q = e(i,i,3)
q1 = e(i,i,3)
q2 = e(i,i,3)
q3 = e(i,i)
def calibrate(self):
class c:
pass
class d:
def __init__(self,a,b,c):
self.a = a
self.b = b
self.c = c
class e:
def __init__(self,a,b,c=4):
self.a = a
self.b = b
self.c = c
self.d = a
self.e = b
self.f = c
for i in range(self.rounds):
pass
apache-2.0
M4sse/chromium.src
chrome/test/chromedriver/embed_version_in_cpp.py
31
1400
#!/usr/bin/env python
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Embeds Chrome user data files in C++ code."""
import optparse
import os
import re
import sys
import chrome_paths
import cpp_source
sys.path.insert(0, os.path.join(chrome_paths.GetSrc(), 'build', 'util'))
import lastchange
def main():
parser = optparse.OptionParser()
parser.add_option('', '--version-file')
parser.add_option(
'', '--directory', type='string', default='.',
help='Path to directory where the cc/h file should be created')
options, args = parser.parse_args()
version = open(options.version_file, 'r').read().strip()
revision = lastchange.FetchVersionInfo(None).revision
if revision:
match = re.match('([0-9a-fA-F]+)(-refs/heads/master@{#(\d+)})?', revision)
if match:
git_hash = match.group(1)
commit_position = match.group(3)
if commit_position:
version += '.' + commit_position
version += ' (%s)' % git_hash
else:
version += ' (%s)' % revision
global_string_map = {
'kChromeDriverVersion': version
}
cpp_source.WriteSource('version',
'chrome/test/chromedriver',
options.directory, global_string_map)
if __name__ == '__main__':
sys.exit(main())
bsd-3-clause
sunwise123/shenzhouStm32Rtt
tools/buildbot.py
39
1200
import os
import sys
def usage():
print '%s all -- build all bsp' % os.path.basename(sys.argv[0])
print '%s clean -- clean all bsp' % os.path.basename(sys.argv[0])
print '%s project -- update all prject files' % os.path.basename(sys.argv[0])
BSP_ROOT = '../bsp'
if len(sys.argv) != 2:
usage()
sys.exit(0)
# get command options
command = ''
if sys.argv[1] == 'all':
command = ' '
elif sys.argv[1] == 'clean':
command = ' -c'
elif sys.argv[1] == 'project':
command = ' --target=mdk -s'
projects = os.listdir(BSP_ROOT)
for item in projects:
project_dir = os.path.join(BSP_ROOT, item)
if os.path.isfile(os.path.join(project_dir, 'template.uvproj')):
print ('prepare MDK project file on ' + project_dir)
os.system('scons --directory=' + project_dir + command)
sys.exit(0)
else:
usage()
sys.exit(0)
projects = os.listdir(BSP_ROOT)
for item in projects:
project_dir = os.path.join(BSP_ROOT, item)
if os.path.isfile(os.path.join(project_dir, 'SConstruct')):
if os.system('scons --directory=' + project_dir + command) != 0:
print 'build failed!!'
break
gpl-2.0
Bysmyyr/chromium-crosswalk
media/tools/layout_tests/trend_graph.py
174
3309
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""A module for manipulating trend graph with analyzer result history."""
import os
import layouttest_analyzer_helpers
DEFAULT_TREND_GRAPH_PATH = os.path.join('graph', 'graph.html')
# The following is necesasry to decide the point to insert.
LINE_INSERT_POINT_FOR_NUMBERS = r'// insert 1'
LINE_INSERT_POINT_FOR_PASSING_RATE = r'// insert 2'
class TrendGraph(object):
"""A class to manage trend graph which is using Google Visualization APIs.
Google Visualization API (http://code.google.com/apis/chart/interactive/docs/
gallery/annotatedtimeline.html) is used to present the historical analyzer
result. Currently, data is directly written to JavaScript file using file
in-place replacement for simplicity.
TODO(imasaki): use GoogleSpreadsheet to store the analyzer result.
"""
def __init__(self, location=DEFAULT_TREND_GRAPH_PATH):
"""Initialize this object with the location of trend graph."""
self._location = location
def Update(self, datetime_string, data_map):
"""Update trend graphs using |datetime_string| and |data_map|.
There are two kinds of graphs to be updated (one is for numbers and the
other is for passing rates).
Args:
datetime_string: a datetime string delimited by ','
(e.g., '2008,1,1,13,45,00)'. For example, in the case of the year
2008, this ranges from '2008,1,1,0,0,00' to '2008,12,31,23,59,99'.
data_map: a dictionary containing 'whole', 'skip' , 'nonskip',
'passingrate' as its keys and (number, tile, text) string tuples
as values for graph annotation.
"""
joined_str = ''
# For a date format in GViz, month is shifted (e.g., '2008,2,1' means
# March 1, 2008). So, the input parameter |datetime_string| (before this
# conversion) must be shifted in order to show the date properly on GViz.
# After the below conversion, for example, in the case of the year 2008,
# |datetime_string| ranges from '2008,0,1,0,0,00' to '2008,11,31,23,59,99'.
str_list = datetime_string.split(',')
str_list[1] = str(int(str_list[1])-1) # Month
datetime_string = ','.join(str_list)
for key in ['whole', 'skip', 'nonskip']:
joined_str += str(len(data_map[key][0])) + ','
joined_str += ','.join(data_map[key][1:]) + ','
new_line_for_numbers = ' [new Date(%s),%s],\n' % (datetime_string,
joined_str)
new_line_for_numbers += ' %s\n' % (
LINE_INSERT_POINT_FOR_NUMBERS)
layouttest_analyzer_helpers.ReplaceLineInFile(
self._location, LINE_INSERT_POINT_FOR_NUMBERS,
new_line_for_numbers)
joined_str = '%s,%s,%s' % (
str(data_map['passingrate'][0]), data_map['nonskip'][1],
data_map['nonskip'][2])
new_line_for_passingrate = ' [new Date(%s),%s],\n' % (
datetime_string, joined_str)
new_line_for_passingrate += ' %s\n' % (
LINE_INSERT_POINT_FOR_PASSING_RATE)
layouttest_analyzer_helpers.ReplaceLineInFile(
self._location, LINE_INSERT_POINT_FOR_PASSING_RATE,
new_line_for_passingrate)
bsd-3-clause
bmess/scrapy
scrapy/spiders/crawl.py
56
3521
"""
This modules implements the CrawlSpider which is the recommended spider to use
for scraping typical web sites that requires crawling pages.
See documentation in docs/topics/spiders.rst
"""
import copy
from scrapy.http import Request, HtmlResponse
from scrapy.utils.spider import iterate_spider_output
from scrapy.spiders import Spider
def identity(x):
return x
class Rule(object):
def __init__(self, link_extractor, callback=None, cb_kwargs=None, follow=None, process_links=None, process_request=identity):
self.link_extractor = link_extractor
self.callback = callback
self.cb_kwargs = cb_kwargs or {}
self.process_links = process_links
self.process_request = process_request
if follow is None:
self.follow = False if callback else True
else:
self.follow = follow
class CrawlSpider(Spider):
rules = ()
def __init__(self, *a, **kw):
super(CrawlSpider, self).__init__(*a, **kw)
self._compile_rules()
def parse(self, response):
return self._parse_response(response, self.parse_start_url, cb_kwargs={}, follow=True)
def parse_start_url(self, response):
return []
def process_results(self, response, results):
return results
def _requests_to_follow(self, response):
if not isinstance(response, HtmlResponse):
return
seen = set()
for n, rule in enumerate(self._rules):
links = [l for l in rule.link_extractor.extract_links(response) if l not in seen]
if links and rule.process_links:
links = rule.process_links(links)
for link in links:
seen.add(link)
r = Request(url=link.url, callback=self._response_downloaded)
r.meta.update(rule=n, link_text=link.text)
yield rule.process_request(r)
def _response_downloaded(self, response):
rule = self._rules[response.meta['rule']]
return self._parse_response(response, rule.callback, rule.cb_kwargs, rule.follow)
def _parse_response(self, response, callback, cb_kwargs, follow=True):
if callback:
cb_res = callback(response, **cb_kwargs) or ()
cb_res = self.process_results(response, cb_res)
for requests_or_item in iterate_spider_output(cb_res):
yield requests_or_item
if follow and self._follow_links:
for request_or_item in self._requests_to_follow(response):
yield request_or_item
def _compile_rules(self):
def get_method(method):
if callable(method):
return method
elif isinstance(method, basestring):
return getattr(self, method, None)
self._rules = [copy.copy(r) for r in self.rules]
for rule in self._rules:
rule.callback = get_method(rule.callback)
rule.process_links = get_method(rule.process_links)
rule.process_request = get_method(rule.process_request)
@classmethod
def from_crawler(cls, crawler, *args, **kwargs):
spider = super(CrawlSpider, cls).from_crawler(crawler, *args, **kwargs)
spider._follow_links = crawler.settings.getbool(
'CRAWLSPIDER_FOLLOW_LINKS', True)
return spider
def set_crawler(self, crawler):
super(CrawlSpider, self).set_crawler(crawler)
self._follow_links = crawler.settings.getbool('CRAWLSPIDER_FOLLOW_LINKS', True)
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe, json
from frappe.utils import getdate, date_diff, add_days, cstr
from frappe import _
from frappe.model.document import Document
class CircularReferenceError(frappe.ValidationError): pass
class Task(Document):
def get_feed(self):
return '{0}: {1}'.format(_(self.status), self.subject)
def get_project_details(self):
return {
"project": self.project
}
def get_customer_details(self):
cust = frappe.db.sql("select customer_name from `tabCustomer` where name=%s", self.customer)
if cust:
ret = {'customer_name': cust and cust[0][0] or ''}
return ret
def validate(self):
self.validate_dates()
self.validate_progress()
self.validate_status()
self.update_depends_on()
def validate_dates(self):
if self.exp_start_date and self.exp_end_date and getdate(self.exp_start_date) > getdate(self.exp_end_date):
frappe.throw(_("'Expected Start Date' can not be greater than 'Expected End Date'"))
if self.act_start_date and self.act_end_date and getdate(self.act_start_date) > getdate(self.act_end_date):
frappe.throw(_("'Actual Start Date' can not be greater than 'Actual End Date'"))
def validate_status(self):
if self.status!=self.get_db_value("status") and self.status == "Closed":
for d in self.depends_on:
if frappe.db.get_value("Task", d.task, "status") != "Closed":
frappe.throw(_("Cannot close task as its dependant task {0} is not closed.").format(d.task))
from frappe.desk.form.assign_to import clear
clear(self.doctype, self.name)
def validate_progress(self):
if self.progress > 100:
frappe.throw(_("Progress % for a task cannot be more than 100."))
def update_depends_on(self):
depends_on_tasks = self.depends_on_tasks or ""
for d in self.depends_on:
if d.task and not d.task in depends_on_tasks:
depends_on_tasks += d.task + ","
self.depends_on_tasks = depends_on_tasks
def on_update(self):
self.check_recursion()
self.reschedule_dependent_tasks()
self.update_project()
def update_total_expense_claim(self):
self.total_expense_claim = frappe.db.sql("""select sum(total_sanctioned_amount) from `tabExpense Claim`
where project = %s and task = %s and approval_status = "Approved" and docstatus=1""",(self.project, self.name))[0][0]
def update_time_and_costing(self):
tl = frappe.db.sql("""select min(from_time) as start_date, max(to_time) as end_date,
sum(billing_amount) as total_billing_amount, sum(costing_amount) as total_costing_amount,
sum(hours) as time from `tabTimesheet Detail` where task = %s and docstatus=1"""
,self.name, as_dict=1)[0]
if self.status == "Open":
self.status = "Working"
self.total_costing_amount= tl.total_costing_amount
self.total_billing_amount= tl.total_billing_amount
self.actual_time= tl.time
self.act_start_date= tl.start_date
self.act_end_date= tl.end_date
def update_project(self):
if self.project and not self.flags.from_project:
frappe.get_doc("Project", self.project).update_project()
def check_recursion(self):
if self.flags.ignore_recursion_check: return
check_list = [['task', 'parent'], ['parent', 'task']]
for d in check_list:
task_list, count = [self.name], 0
while (len(task_list) > count ):
tasks = frappe.db.sql(" select %s from `tabTask Depends On` where %s = %s " %
(d[0], d[1], '%s'), cstr(task_list[count]))
count = count + 1
for b in tasks:
if b[0] == self.name:
frappe.throw(_("Circular Reference Error"), CircularReferenceError)
if b[0]:
task_list.append(b[0])
if count == 15:
break
def reschedule_dependent_tasks(self):
end_date = self.exp_end_date or self.act_end_date
if end_date:
for task_name in frappe.db.sql("""select name from `tabTask` as parent where parent.project = %(project)s and parent.name in \
(select parent from `tabTask Depends On` as child where child.task = %(task)s and child.project = %(project)s)""",
{'project': self.project, 'task':self.name }, as_dict=1):
task = frappe.get_doc("Task", task_name.name)
if task.exp_start_date and task.exp_end_date and task.exp_start_date < getdate(end_date) and task.status == "Open":
task_duration = date_diff(task.exp_end_date, task.exp_start_date)
task.exp_start_date = add_days(end_date, 1)
task.exp_end_date = add_days(task.exp_start_date, task_duration)
task.flags.ignore_recursion_check = True
task.save()
def has_webform_permission(doc):
project_user = frappe.db.get_value("Project User", {"parent": doc.project, "user":frappe.session.user} , "user")
if project_user:
return True
@frappe.whitelist()
def get_events(start, end, filters=None):
"""Returns events for Gantt / Calendar view rendering.
:param start: Start date-time.
:param end: End date-time.
:param filters: Filters (JSON).
"""
from frappe.desk.calendar import get_event_conditions
conditions = get_event_conditions("Task", filters)
data = frappe.db.sql("""select name, exp_start_date, exp_end_date,
subject, status, project from `tabTask`
where ((ifnull(exp_start_date, '0000-00-00')!= '0000-00-00') \
and (exp_start_date <= %(end)s) \
or ((ifnull(exp_end_date, '0000-00-00')!= '0000-00-00') \
and exp_end_date >= %(start)s))
{conditions}""".format(conditions=conditions), {
"start": start,
"end": end
}, as_dict=True, update={"allDay": 0})
return data
def get_project(doctype, txt, searchfield, start, page_len, filters):
from erpnext.controllers.queries import get_match_cond
return frappe.db.sql(""" select name from `tabProject`
where %(key)s like "%(txt)s"
%(mcond)s
order by name
limit %(start)s, %(page_len)s """ % {'key': searchfield,
'txt': "%%%s%%" % frappe.db.escape(txt), 'mcond':get_match_cond(doctype),
'start': start, 'page_len': page_len})
@frappe.whitelist()
def set_multiple_status(names, status):
names = json.loads(names)
for name in names:
task = frappe.get_doc("Task", name)
task.status = status
task.save()
def set_tasks_as_overdue():
frappe.db.sql("""update tabTask set `status`='Overdue'
where exp_end_date is not null
and exp_end_date < CURDATE()
and `status` not in ('Closed', 'Cancelled')""")
gpl-3.0
QuickSander/CouchPotatoServer
libs/tmdb3/cache_file.py
34
13291
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#-----------------------
# Name: cache_file.py
# Python Library
# Author: Raymond Wagner
# Purpose: Persistant file-backed cache using /tmp/ to share data
# using flock or msvcrt.locking to allow safe concurrent
# access.
#-----------------------
import struct
import errno
import json
import time
import os
import io
from cStringIO import StringIO
from tmdb_exceptions import *
from cache_engine import CacheEngine, CacheObject
####################
# Cache File Format
#------------------
# cache version (2) unsigned short
# slot count (2) unsigned short
# slot 0: timestamp (8) double
# slot 0: lifetime (4) unsigned int
# slot 0: seek point (4) unsigned int
# slot 1: timestamp
# slot 1: lifetime index slots are IDd by their query date and
# slot 1: seek point are filled incrementally forwards. lifetime
# .... is how long after query date before the item
# .... expires, and seek point is the location of the
# slot N-2: timestamp start of data for that entry. 256 empty slots
# slot N-2: lifetime are pre-allocated, allowing fast updates.
# slot N-2: seek point when all slots are filled, the cache file is
# slot N-1: timestamp rewritten from scrach to add more slots.
# slot N-1: lifetime
# slot N-1: seek point
# block 1 (?) ASCII
# block 2
# .... blocks are just simple ASCII text, generated
# .... as independent objects by the JSON encoder
# block N-2
# block N-1
#
####################
def _donothing(*args, **kwargs):
pass
try:
import fcntl
class Flock(object):
"""
Context manager to flock file for the duration the object
exists. Referenced file will be automatically unflocked as the
interpreter exits the context.
Supports an optional callback to process the error and optionally
suppress it.
"""
LOCK_EX = fcntl.LOCK_EX
LOCK_SH = fcntl.LOCK_SH
def __init__(self, fileobj, operation, callback=None):
self.fileobj = fileobj
self.operation = operation
self.callback = callback
def __enter__(self):
fcntl.flock(self.fileobj, self.operation)
def __exit__(self, exc_type, exc_value, exc_tb):
suppress = False
if callable(self.callback):
suppress = self.callback(exc_type, exc_value, exc_tb)
fcntl.flock(self.fileobj, fcntl.LOCK_UN)
return suppress
def parse_filename(filename):
if '$' in filename:
# replace any environmental variables
filename = os.path.expandvars(filename)
if filename.startswith('~'):
# check for home directory
return os.path.expanduser(filename)
elif filename.startswith('/'):
# check for absolute path
return filename
# return path with temp directory prepended
return '/tmp/' + filename
except ImportError:
import msvcrt
class Flock( object ):
LOCK_EX = msvcrt.LK_LOCK
LOCK_SH = msvcrt.LK_LOCK
def __init__(self, fileobj, operation, callback=None):
self.fileobj = fileobj
self.operation = operation
self.callback = callback
def __enter__(self):
self.size = os.path.getsize(self.fileobj.name)
msvcrt.locking(self.fileobj.fileno(), self.operation, self.size)
def __exit__(self, exc_type, exc_value, exc_tb):
suppress = False
if callable(self.callback):
suppress = self.callback(exc_type, exc_value, exc_tb)
msvcrt.locking(self.fileobj.fileno(), msvcrt.LK_UNLCK, self.size)
return suppress
def parse_filename(filename):
if '%' in filename:
# replace any environmental variables
filename = os.path.expandvars(filename)
if filename.startswith('~'):
# check for home directory
return os.path.expanduser(filename)
elif (ord(filename[0]) in (range(65, 91) + range(99, 123))) \
and (filename[1:3] == ':\\'):
# check for absolute drive path (e.g. C:\...)
return filename
elif (filename.count('\\') >= 3) and (filename.startswith('\\\\')):
# check for absolute UNC path (e.g. \\server\...)
return filename
# return path with temp directory prepended
return os.path.expandvars(os.path.join('%TEMP%', filename))
class FileCacheObject(CacheObject):
_struct = struct.Struct('dII') # double and two ints
# timestamp, lifetime, position
@classmethod
def fromFile(cls, fd):
dat = cls._struct.unpack(fd.read(cls._struct.size))
obj = cls(None, None, dat[1], dat[0])
obj.position = dat[2]
return obj
def __init__(self, *args, **kwargs):
self._key = None
self._data = None
self._size = None
self._buff = StringIO()
super(FileCacheObject, self).__init__(*args, **kwargs)
@property
def size(self):
if self._size is None:
self._buff.seek(0, 2)
size = self._buff.tell()
if size == 0:
if (self._key is None) or (self._data is None):
raise RuntimeError
json.dump([self.key, self.data], self._buff)
self._size = self._buff.tell()
self._size = size
return self._size
@size.setter
def size(self, value):
self._size = value
@property
def key(self):
if self._key is None:
try:
self._key, self._data = json.loads(self._buff.getvalue())
except:
pass
return self._key
@key.setter
def key(self, value):
self._key = value
@property
def data(self):
if self._data is None:
self._key, self._data = json.loads(self._buff.getvalue())
return self._data
@data.setter
def data(self, value):
self._data = value
def load(self, fd):
fd.seek(self.position)
self._buff.seek(0)
self._buff.write(fd.read(self.size))
def dumpslot(self, fd):
pos = fd.tell()
fd.write(self._struct.pack(self.creation, self.lifetime, self.position))
def dumpdata(self, fd):
self.size
fd.seek(self.position)
fd.write(self._buff.getvalue())
class FileEngine( CacheEngine ):
"""Simple file-backed engine."""
name = 'file'
_struct = struct.Struct('HH') # two shorts for version and count
_version = 2
def __init__(self, parent):
super(FileEngine, self).__init__(parent)
self.configure(None)
def configure(self, filename, preallocate=256):
self.preallocate = preallocate
self.cachefile = filename
self.size = 0
self.free = 0
self.age = 0
def _init_cache(self):
# only run this once
self._init_cache = _donothing
if self.cachefile is None:
raise TMDBCacheError("No cache filename given.")
self.cachefile = parse_filename(self.cachefile)
try:
# attempt to read existing cache at filename
# handle any errors that occur
self._open('r+b')
# seems to have read fine, make sure we have write access
if not os.access(self.cachefile, os.W_OK):
raise TMDBCacheWriteError(self.cachefile)
except IOError as e:
if e.errno == errno.ENOENT:
# file does not exist, create a new one
try:
self._open('w+b')
self._write([])
except IOError as e:
if e.errno == errno.ENOENT:
# directory does not exist
raise TMDBCacheDirectoryError(self.cachefile)
elif e.errno == errno.EACCES:
# user does not have rights to create new file
raise TMDBCacheWriteError(self.cachefile)
else:
# let the unhandled error continue through
raise
elif e.errno == errno.EACCES:
# file exists, but we do not have permission to access it
raise TMDBCacheReadError(self.cachefile)
else:
# let the unhandled error continue through
raise
def get(self, date):
self._init_cache()
self._open('r+b')
with Flock(self.cachefd, Flock.LOCK_SH):
# return any new objects in the cache
return self._read(date)
def put(self, key, value, lifetime):
self._init_cache()
self._open('r+b')
with Flock(self.cachefd, Flock.LOCK_EX):
newobjs = self._read(self.age)
newobjs.append(FileCacheObject(key, value, lifetime))
# this will cause a new file object to be opened with the proper
# access mode, however the Flock should keep the old object open
# and properly locked
self._open('r+b')
self._write(newobjs)
return newobjs
def _open(self, mode='r+b'):
# enforce binary operation
try:
if self.cachefd.mode == mode:
# already opened in requested mode, nothing to do
self.cachefd.seek(0)
return
except:
pass # catch issue of no cachefile yet opened
self.cachefd = io.open(self.cachefile, mode)
def _read(self, date):
try:
self.cachefd.seek(0)
version, count = self._struct.unpack(\
self.cachefd.read(self._struct.size))
if version != self._version:
# old version, break out and well rewrite when finished
raise Exception
self.size = count
cache = []
while count:
# loop through storage definitions
obj = FileCacheObject.fromFile(self.cachefd)
cache.append(obj)
count -= 1
except:
# failed to read information, so just discard it and return empty
self.size = 0
self.free = 0
return []
# get end of file
self.cachefd.seek(0, 2)
position = self.cachefd.tell()
newobjs = []
emptycount = 0
# walk backward through all, collecting new content and populating size
while len(cache):
obj = cache.pop()
if obj.creation == 0:
# unused slot, skip
emptycount += 1
elif obj.expired:
# object has passed expiration date, no sense processing
continue
elif obj.creation > date:
# used slot with new data, process
obj.size, position = position - obj.position, obj.position
newobjs.append(obj)
# update age
self.age = max(self.age, obj.creation)
elif len(newobjs):
# end of new data, break
break
# walk forward and load new content
for obj in newobjs:
obj.load(self.cachefd)
self.free = emptycount
return newobjs
def _write(self, data):
if self.free and (self.size != self.free):
# we only care about the last data point, since the rest are
# already stored in the file
data = data[-1]
# determine write position of data in cache
self.cachefd.seek(0, 2)
end = self.cachefd.tell()
data.position = end
# write incremental update to free slot
self.cachefd.seek(4 + 16*(self.size-self.free))
data.dumpslot(self.cachefd)
data.dumpdata(self.cachefd)
else:
# rewrite cache file from scratch
# pull data from parent cache
data.extend(self.parent()._data.values())
data.sort(key=lambda x: x.creation)
# write header
size = len(data) + self.preallocate
self.cachefd.seek(0)
self.cachefd.truncate()
self.cachefd.write(self._struct.pack(self._version, size))
# write storage slot definitions
prev = None
for d in data:
if prev == None:
d.position = 4 + 16*size
else:
d.position = prev.position + prev.size
d.dumpslot(self.cachefd)
prev = d
# fill in allocated slots
for i in range(2**8):
self.cachefd.write(FileCacheObject._struct.pack(0, 0, 0))
# write stored data
for d in data:
d.dumpdata(self.cachefd)
self.cachefd.flush()
def expire(self, key):
pass
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""The Deterministic distribution class."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import six
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops.distributions import distribution
__all__ = [
"Deterministic",
"VectorDeterministic",
]
@six.add_metaclass(abc.ABCMeta)
class _BaseDeterministic(distribution.Distribution):
"""Base class for Deterministic distributions."""
def __init__(self,
loc,
atol=None,
rtol=None,
is_vector=False,
validate_args=False,
allow_nan_stats=True,
name="_BaseDeterministic"):
"""Initialize a batch of `_BaseDeterministic` distributions.
The `atol` and `rtol` parameters allow for some slack in `pmf`, `cdf`
computations, e.g. due to floating-point error.
```
pmf(x; loc)
= 1, if Abs(x - loc) <= atol + rtol * Abs(loc),
= 0, otherwise.
```
Args:
loc: Numeric `Tensor`. The point (or batch of points) on which this
distribution is supported.
atol: Non-negative `Tensor` of same `dtype` as `loc` and broadcastable
shape. The absolute tolerance for comparing closeness to `loc`.
Default is `0`.
rtol: Non-negative `Tensor` of same `dtype` as `loc` and broadcastable
shape. The relative tolerance for comparing closeness to `loc`.
Default is `0`.
is_vector: Python `bool`. If `True`, this is for `VectorDeterministic`,
else `Deterministic`.
validate_args: Python `bool`, default `False`. When `True` distribution
parameters are checked for validity despite possibly degrading runtime
performance. When `False` invalid inputs may silently render incorrect
outputs.
allow_nan_stats: Python `bool`, default `True`. When `True`, statistics
(e.g., mean, mode, variance) use the value "`NaN`" to indicate the
result is undefined. When `False`, an exception is raised if one or
more of the statistic's batch members are undefined.
name: Python `str` name prefixed to Ops created by this class.
Raises:
ValueError: If `loc` is a scalar.
"""
parameters = locals()
with ops.name_scope(name, values=[loc, atol, rtol]):
loc = ops.convert_to_tensor(loc, name="loc")
if is_vector and validate_args:
msg = "Argument loc must be at least rank 1."
if loc.get_shape().ndims is not None:
if loc.get_shape().ndims < 1:
raise ValueError(msg)
else:
loc = control_flow_ops.with_dependencies(
[check_ops.assert_rank_at_least(loc, 1, message=msg)], loc)
self._loc = loc
super(_BaseDeterministic, self).__init__(
dtype=self._loc.dtype,
reparameterization_type=distribution.NOT_REPARAMETERIZED,
validate_args=validate_args,
allow_nan_stats=allow_nan_stats,
parameters=parameters,
graph_parents=[self._loc],
name=name)
self._atol = self._get_tol(atol)
self._rtol = self._get_tol(rtol)
# Avoid using the large broadcast with self.loc if possible.
if rtol is None:
self._slack = self.atol
else:
self._slack = self.atol + self.rtol * math_ops.abs(self.loc)
def _get_tol(self, tol):
if tol is None:
return ops.convert_to_tensor(0, dtype=self.loc.dtype)
tol = ops.convert_to_tensor(tol, dtype=self.loc.dtype)
if self.validate_args:
tol = control_flow_ops.with_dependencies([
check_ops.assert_non_negative(
tol, message="Argument 'tol' must be non-negative")
], tol)
return tol
@property
def loc(self):
"""Point (or batch of points) at which this distribution is supported."""
return self._loc
@property
def atol(self):
"""Absolute tolerance for comparing points to `self.loc`."""
return self._atol
@property
def rtol(self):
"""Relative tolerance for comparing points to `self.loc`."""
return self._rtol
def _mean(self):
return array_ops.identity(self.loc)
def _variance(self):
return array_ops.zeros_like(self.loc)
def _mode(self):
return self.mean()
def _sample_n(self, n, seed=None): # pylint: disable=unused-arg
n_static = tensor_util.constant_value(ops.convert_to_tensor(n))
if n_static is not None and self.loc.get_shape().ndims is not None:
ones = [1] * self.loc.get_shape().ndims
multiples = [n_static] + ones
else:
ones = array_ops.ones_like(array_ops.shape(self.loc))
multiples = array_ops.concat(([n], ones), axis=0)
return array_ops.tile(self.loc[array_ops.newaxis, ...], multiples=multiples)
class Deterministic(_BaseDeterministic):
"""Scalar `Deterministic` distribution on the real line.
The scalar `Deterministic` distribution is parameterized by a [batch] point
`loc` on the real line. The distribution is supported at this point only,
and corresponds to a random variable that is constant, equal to `loc`.
See [Degenerate rv](https://en.wikipedia.org/wiki/Degenerate_distribution).
#### Mathematical Details
The probability mass function (pmf) and cumulative distribution function (cdf)
are
```none
pmf(x; loc) = 1, if x == loc, else 0
cdf(x; loc) = 1, if x >= loc, else 0
```
#### Examples
```python
# Initialize a single Deterministic supported at zero.
constant = tf.contrib.distributions.Deterministic(0.)
constant.prob(0.)
==> 1.
constant.prob(2.)
==> 0.
# Initialize a [2, 2] batch of scalar constants.
loc = [[0., 1.], [2., 3.]]
x = [[0., 1.1], [1.99, 3.]]
constant = tf.contrib.distributions.Deterministic(loc)
constant.prob(x)
==> [[1., 0.], [0., 1.]]
```
"""
def __init__(self,
loc,
atol=None,
rtol=None,
validate_args=False,
allow_nan_stats=True,
name="Deterministic"):
"""Initialize a scalar `Deterministic` distribution.
The `atol` and `rtol` parameters allow for some slack in `pmf`, `cdf`
computations, e.g. due to floating-point error.
```
pmf(x; loc)
= 1, if Abs(x - loc) <= atol + rtol * Abs(loc),
= 0, otherwise.
```
Args:
loc: Numeric `Tensor` of shape `[B1, ..., Bb]`, with `b >= 0`.
The point (or batch of points) on which this distribution is supported.
atol: Non-negative `Tensor` of same `dtype` as `loc` and broadcastable
shape. The absolute tolerance for comparing closeness to `loc`.
Default is `0`.
rtol: Non-negative `Tensor` of same `dtype` as `loc` and broadcastable
shape. The relative tolerance for comparing closeness to `loc`.
Default is `0`.
validate_args: Python `bool`, default `False`. When `True` distribution
parameters are checked for validity despite possibly degrading runtime
performance. When `False` invalid inputs may silently render incorrect
outputs.
allow_nan_stats: Python `bool`, default `True`. When `True`, statistics
(e.g., mean, mode, variance) use the value "`NaN`" to indicate the
result is undefined. When `False`, an exception is raised if one or
more of the statistic's batch members are undefined.
name: Python `str` name prefixed to Ops created by this class.
"""
super(Deterministic, self).__init__(
loc,
atol=atol,
rtol=rtol,
validate_args=validate_args,
allow_nan_stats=allow_nan_stats,
name=name)
def _batch_shape_tensor(self):
return array_ops.shape(self.loc)
def _batch_shape(self):
return self.loc.get_shape()
def _event_shape_tensor(self):
return constant_op.constant([], dtype=dtypes.int32)
def _event_shape(self):
return tensor_shape.scalar()
def _prob(self, x):
return math_ops.cast(
math_ops.abs(x - self.loc) <= self._slack, dtype=self.dtype)
def _cdf(self, x):
return math_ops.cast(x >= self.loc - self._slack, dtype=self.dtype)
class VectorDeterministic(_BaseDeterministic):
"""Vector `Deterministic` distribution on `R^k`.
The `VectorDeterministic` distribution is parameterized by a [batch] point
`loc in R^k`. The distribution is supported at this point only,
and corresponds to a random variable that is constant, equal to `loc`.
See [Degenerate rv](https://en.wikipedia.org/wiki/Degenerate_distribution).
#### Mathematical Details
The probability mass function (pmf) is
```none
pmf(x; loc)
= 1, if All[Abs(x - loc) <= atol + rtol * Abs(loc)],
= 0, otherwise.
```
#### Examples
```python
# Initialize a single VectorDeterministic supported at [0., 2.] in R^2.
constant = tf.contrib.distributions.Deterministic([0., 2.])
constant.prob([0., 2.])
==> 1.
constant.prob([0., 3.])
==> 0.
# Initialize a [3] batch of constants on R^2.
loc = [[0., 1.], [2., 3.], [4., 5.]]
constant = constant_lib.VectorDeterministic(loc)
constant.prob([[0., 1.], [1.9, 3.], [3.99, 5.]])
==> [1., 0., 0.]
```
"""
def __init__(self,
loc,
atol=None,
rtol=None,
validate_args=False,
allow_nan_stats=True,
name="VectorDeterministic"):
"""Initialize a `VectorDeterministic` distribution on `R^k`, for `k >= 0`.
Note that there is only one point in `R^0`, the "point" `[]`. So if `k = 0`
then `self.prob([]) == 1`.
The `atol` and `rtol` parameters allow for some slack in `pmf`
computations, e.g. due to floating-point error.
```
pmf(x; loc)
= 1, if All[Abs(x - loc) <= atol + rtol * Abs(loc)],
= 0, otherwise
```
Args:
loc: Numeric `Tensor` of shape `[B1, ..., Bb, k]`, with `b >= 0`, `k >= 0`
The point (or batch of points) on which this distribution is supported.
atol: Non-negative `Tensor` of same `dtype` as `loc` and broadcastable
shape. The absolute tolerance for comparing closeness to `loc`.
Default is `0`.
rtol: Non-negative `Tensor` of same `dtype` as `loc` and broadcastable
shape. The relative tolerance for comparing closeness to `loc`.
Default is `0`.
validate_args: Python `bool`, default `False`. When `True` distribution
parameters are checked for validity despite possibly degrading runtime
performance. When `False` invalid inputs may silently render incorrect
outputs.
allow_nan_stats: Python `bool`, default `True`. When `True`, statistics
(e.g., mean, mode, variance) use the value "`NaN`" to indicate the
result is undefined. When `False`, an exception is raised if one or
more of the statistic's batch members are undefined.
name: Python `str` name prefixed to Ops created by this class.
"""
super(VectorDeterministic, self).__init__(
loc,
atol=atol,
rtol=rtol,
is_vector=True,
validate_args=validate_args,
allow_nan_stats=allow_nan_stats,
name=name)
def _batch_shape_tensor(self):
return array_ops.shape(self.loc)[:-1]
def _batch_shape(self):
return self.loc.get_shape()[:-1]
def _event_shape_tensor(self):
return array_ops.shape(self.loc)[-1]
def _event_shape(self):
return self.loc.get_shape()[-1:]
def _prob(self, x):
if self.validate_args:
is_vector_check = check_ops.assert_rank_at_least(x, 1)
right_vec_space_check = check_ops.assert_equal(
self.event_shape_tensor(),
array_ops.gather(array_ops.shape(x), array_ops.rank(x) - 1),
message=
"Argument 'x' not defined in the same space R^k as this distribution")
with ops.control_dependencies([is_vector_check]):
with ops.control_dependencies([right_vec_space_check]):
x = array_ops.identity(x)
return math_ops.cast(
math_ops.reduce_all(math_ops.abs(x - self.loc) <= self._slack, axis=-1),
dtype=self.dtype)
apache-2.0
rjsproxy/wagtail
wagtail/wagtailimages/rich_text.py
23
1547
from wagtail.wagtailimages.models import get_image_model
from wagtail.wagtailimages.formats import get_image_format
class ImageEmbedHandler(object):
"""
ImageEmbedHandler will be invoked whenever we encounter an element in HTML content
with an attribute of data-embedtype="image". The resulting element in the database
representation will be:
<embed embedtype="image" id="42" format="thumb" alt="some custom alt text">
"""
@staticmethod
def get_db_attributes(tag):
"""
Given a tag that we've identified as an image embed (because it has a
data-embedtype="image" attribute), return a dict of the attributes we should
have on the resulting <embed> element.
"""
return {
'id': tag['data-id'],
'format': tag['data-format'],
'alt': tag['data-alt'],
}
@staticmethod
def expand_db_attributes(attrs, for_editor):
"""
Given a dict of attributes from the <embed> tag, return the real HTML
representation.
"""
Image = get_image_model()
try:
image = Image.objects.get(id=attrs['id'])
format = get_image_format(attrs['format'])
if for_editor:
try:
return format.image_to_editor_html(image, attrs['alt'])
except:
return ''
else:
return format.image_to_html(image, attrs['alt'])
except Image.DoesNotExist:
return "<img>"
from __future__ import unicode_literals
from django.contrib.auth.models import User
from django.core.exceptions import ObjectDoesNotExist
from djblets.util.decorators import augment_method_from
from djblets.webapi.decorators import (webapi_login_required,
webapi_response_errors,
webapi_request_fields)
from djblets.webapi.errors import (DOES_NOT_EXIST, INVALID_FORM_DATA,
NOT_LOGGED_IN, PERMISSION_DENIED)
from djblets.webapi.fields import IntFieldType, StringFieldType
from reviewboard.reviews.forms import DefaultReviewerForm
from reviewboard.reviews.models import DefaultReviewer, Group
from reviewboard.scmtools.models import Repository
from reviewboard.webapi.base import WebAPIResource
from reviewboard.webapi.decorators import (webapi_check_login_required,
webapi_check_local_site)
class DefaultReviewerResource(WebAPIResource):
"""Provides information on default reviewers for review requests.
Review Board will apply any default reviewers that match the repository
and any file path in an uploaded diff for new and updated review requests.
A default reviewer entry can list multiple users and groups.
This is useful when different groups own different parts of a codebase.
Adding DefaultReviewer entries ensures that the right people will always
see the review request and discussions.
Default reviewers take a regular expression for the file path matching,
making it flexible.
As a tip, specifying ``.*`` for the regular expression would have this
default reviewer applied to every review request on the matched
repositories.
"""
added_in = '1.6.16'
name = 'default_reviewer'
model = DefaultReviewer
fields = {
'id': {
'type': IntFieldType,
'description': 'The numeric ID of the default reviewer.',
},
'name': {
'type': StringFieldType,
'description': 'The descriptive name of the entry.',
},
'file_regex': {
'type': StringFieldType,
'description': 'The regular expression that is used to match '
'files uploaded in a diff.',
},
'repositories': {
'type': StringFieldType,
'description': 'A comma-separated list of repository IDs that '
'this default reviewer will match against.',
},
'users': {
'type': StringFieldType,
'description': 'A comma-separated list of usernames that '
'this default reviewer applies to matched review '
'requests.',
},
'groups': {
'type': StringFieldType,
'description': 'A comma-separated list of group names that '
'this default reviewer applies to matched review '
'requests.',
},
}
uri_object_key = 'default_reviewer_id'
allowed_methods = ('GET', 'POST', 'PUT', 'DELETE')
def serialize_repositories_field(self, default_reviewer, **kwargs):
return default_reviewer.repository.all()
def serialize_users_field(self, default_reviewer, **kwargs):
return default_reviewer.people.all()
@webapi_check_login_required
def get_queryset(self, request, is_list=False, local_site=None,
*args, **kwargs):
"""Returns a queryset for DefaultReviewer models.
By default, this returns all default reviewers.
If the queryset is being used for a list of default reviewer
resources, then it can be further filtered by one or more of the
arguments listed in get_list.
"""
queryset = self.model.objects.filter(local_site=local_site)
if is_list:
if 'repositories' in request.GET:
for repo_id in request.GET.get('repositories').split(','):
try:
queryset = queryset.filter(repository=repo_id)
except ValueError:
pass
if 'users' in request.GET:
for username in request.GET.get('users').split(','):
queryset = queryset.filter(people__username=username)
if 'groups' in request.GET:
for name in request.GET.get('groups').split(','):
queryset = queryset.filter(groups__name=name)
return queryset
def has_access_permissions(self, request, default_reviewer,
*args, **kwargs):
return default_reviewer.is_accessible_by(request.user)
def has_modify_permissions(self, request, default_reviewer,
*args, **kwargs):
return default_reviewer.is_mutable_by(request.user)
def has_delete_permissions(self, request, default_reviewer,
*args, **kwargs):
return default_reviewer.is_mutable_by(request.user)
@webapi_check_local_site
@webapi_request_fields(optional={
'groups': {
'type': StringFieldType,
'description': 'A comma-separated list of group names that each '
'resulting default reviewer must apply to review '
'requests.',
},
'repositories': {
'type': StringFieldType,
'description': 'A comma-separated list of IDs of repositories '
'that each resulting default reviewer must match '
'against.'
},
'users': {
'type': StringFieldType,
'description': 'A comma-separated list of usernames that each '
'resulting default reviewer must apply to review '
'requests.',
},
})
@augment_method_from(WebAPIResource)
def get_list(self, request, *args, **kwargs):
"""Retrieves the list of default reviewers on the server.
By default, this lists all default reviewers. This list can be
further filtered down through the query arguments.
"""
pass
@webapi_check_local_site
@augment_method_from(WebAPIResource)
def get(self, *args, **kwargs):
"""Retrieves information on a particular default reviewer."""
pass
@webapi_check_local_site
@webapi_login_required
@webapi_response_errors(INVALID_FORM_DATA, NOT_LOGGED_IN,
PERMISSION_DENIED)
@webapi_request_fields(
required={
'name': {
'type': StringFieldType,
'description': 'The name of the default reviewer entry.',
},
'file_regex': {
'type': StringFieldType,
'description': 'The regular expression used to match file '
'paths in newly uploaded diffs.',
},
},
optional={
'repositories': {
'type': StringFieldType,
'description': 'A comma-separated list of repository IDs.',
},
'groups': {
'type': StringFieldType,
'description': 'A comma-separated list of group names.',
},
'users': {
'type': StringFieldType,
'description': 'A comma-separated list of usernames.',
}
},
)
def create(self, request, local_site=None, *args, **kwargs):
"""Creates a new default reviewer entry.
Note that by default, a default reviewer will apply to review
requests on all repositories, unless one or more repositories are
provided in the default reviewer's list.
"""
if not self.model.objects.can_create(request.user, local_site):
return self.get_no_access_error(request)
code, data = self._create_or_update(request, local_site, **kwargs)
if code == 200:
return 201, data
else:
return code, data
@webapi_check_local_site
@webapi_login_required
@webapi_response_errors(INVALID_FORM_DATA, NOT_LOGGED_IN,
PERMISSION_DENIED)
@webapi_request_fields(
optional={
'name': {
'type': StringFieldType,
'description': 'The name of the default reviewer entry.',
},
'file_regex': {
'type': StringFieldType,
'description': 'The regular expression used to match file '
'paths in newly uploaded diffs.',
},
'repositories': {
'type': StringFieldType,
'description': 'A comma-separated list of repository IDs.',
},
'groups': {
'type': StringFieldType,
'description': 'A comma-separated list of group names.',
},
'users': {
'type': StringFieldType,
'description': 'A comma-separated list of usernames.',
}
},
)
def update(self, request, local_site=None, *args, **kwargs):
"""Updates an existing default reviewer entry.
If the list of repositories is updated with a blank entry, then
the default reviewer will apply to review requests on all repositories.
"""
try:
default_reviewer = self.get_object(request, local_site=local_site,
*args, **kwargs)
except ObjectDoesNotExist:
return DOES_NOT_EXIST
if not self.has_modify_permissions(request, default_reviewer):
return self.get_no_access_error(request)
return self._create_or_update(request, local_site, default_reviewer,
**kwargs)
def _create_or_update(self, request, local_site, default_reviewer=None,
**kwargs):
invalid_fields = {}
form_data = {}
if 'groups' in kwargs:
group_names = [
name
for name in (
name.strip()
for name in kwargs['groups'].split(',')
)
if name
]
group_ids = [
group['pk']
for group in Group.objects.filter(
name__in=group_names, local_site=local_site).values('pk')
]
if len(group_ids) != len(group_names):
invalid_fields['groups'] = [
'One or more groups were not found'
]
form_data['groups'] = group_ids
if 'repositories' in kwargs:
repo_ids = []
try:
repo_ids = [
int(repo_id)
for repo_id in (
repo_id.strip()
for repo_id in kwargs['repositories'].split(',')
)
if repo_id
]
except ValueError:
invalid_fields['repositories'] = [
'One or more repository IDs were not in a valid format.'
]
if repo_ids:
found_count = Repository.objects.filter(
pk__in=repo_ids, local_site=local_site).count()
if len(repo_ids) != found_count:
invalid_fields['repositories'] = [
'One or more repositories were not found'
]
form_data['repository'] = repo_ids
if 'users' in kwargs:
usernames = [
name
for name in (
name.strip()
for name in kwargs['users'].split(',')
)
if name
]
user_ids = [
user['pk']
for user in User.objects.filter(
username__in=usernames).values('pk')
]
if len(user_ids) != len(usernames):
invalid_fields['users'] = [
'One or more users were not found'
]
form_data['people'] = user_ids
if invalid_fields:
return INVALID_FORM_DATA, {
'fields': invalid_fields
}
for field in ('name', 'file_regex'):
if field in kwargs:
form_data[field] = kwargs[field]
form = DefaultReviewerForm(data=form_data,
instance=default_reviewer,
limit_to_local_site=local_site,
request=request)
if not form.is_valid():
# The form uses "people" and "repository", but we expose these
# as "users" and "repositories", so transmogrify the errors a bit.
field_errors = self._get_form_errors(form)
if 'people' in field_errors:
field_errors['users'] = field_errors.pop('people')
if 'repository' in field_errors:
field_errors['repositories'] = field_errors.pop('repository')
return INVALID_FORM_DATA, {
'fields': field_errors,
}
default_reviewer = form.save()
return 200, {
self.item_result_key: default_reviewer,
}
@augment_method_from(WebAPIResource)
def delete(self, *args, **kwargs):
"""Deletes the default reviewer entry.
This will not remove any reviewers from any review requests.
It will only prevent these default reviewer rules from being
applied to any new review requests or updates.
"""
pass
default_reviewer_resource = DefaultReviewerResource()
mit
liresearchgroup/uvspecgen
uvspec/logfile.py
1
3403
# Generate UV-Vis spectra from electronic structure TDHF/TDDFT output files.
# Copyright (C) 2014 Li Research Group (University of Washington)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""Electronic structure program output file parsing using ``cclib``.
This module provides the abstract ``Logfile`` class for parsing excited
state energies and oscillator strengths from CIS, TD-HF, and TD-DFT
calculations. It uses the ``cclib`` library for parsing the output of
various computational chemistry packages. The following packages are
currently supported:
* ADF
* GAMESS
* Gaussian03
* Gaussian09
* Jaguar
"""
import logging
import os.path
import sys
from uvspec.config.settings import error
try:
import numpy
except ImportError:
error('The ``numpy`` package is required\n'
' ``numpy`` is free to download at http://www.numpy.org')
try:
from cclib.parser import ccopen
except ImportError:
error('The ``cclib`` package is required\n'
' ``cclib`` is free to download at http://cclib.sf.net')
class Logfile(object):
"""Abstract logfile class for extracting excited state data.
The ``cclib`` parsing library is used to return a generic 1D-array of
excited state energies (in units of cm^-1) and a 1D-array of oscillator
strengths for all excited states in a logfile generated by one of the
supported computational chemistry packages: ADF, GAMESS, Gaussian03,
Gaussian09, or Jaguar.
"""
def __init__(self, filename=None):
"""Extract the excited state energies and oscillator strengths.
When an instance of the ``Logfile`` class is created, the
``excited_state_energy`` and ``oscillator_strength`` arrays are
initialized as empty lists. This lists are populated once the
``parse()`` method is called.
If ``filename`` is not provided, an empty instance of the ``Logfile``
object is returned.
"""
self.name = filename
self.excited_state_energy = []
self.oscillator_strength = []
def __repr__(self):
return 'Logfile: %s' % self.name
def parse(self):
"""Parse the logfile and assign the discrete spectrum values."""
try:
if os.path.exists(self.name):
logfile = ccopen(self.name)
logfile.logger.setLevel(logging.ERROR)
data = logfile.parse()
setattr(self, 'excited_state_energy', data.etenergies)
setattr(self, 'oscillator_strength', data.etoscs)
else:
error('The logfile `%s` could not be found' % self.name)
except TypeError:
error('The `parse()` method requires the `filename` argument '
'in `Logfile`')
""" Python Character Mapping Codec for PalmOS 3.5.
Written by Sjoerd Mullender ([email protected]); based on iso8859_15.py.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_table)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_table)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input,self.errors,encoding_table)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input,self.errors,decoding_table)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='palmos',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
### Decoding Table
decoding_table = (
'\x00' # 0x00 -> NULL
'\x01' # 0x01 -> START OF HEADING
'\x02' # 0x02 -> START OF TEXT
'\x03' # 0x03 -> END OF TEXT
'\x04' # 0x04 -> END OF TRANSMISSION
'\x05' # 0x05 -> ENQUIRY
'\x06' # 0x06 -> ACKNOWLEDGE
'\x07' # 0x07 -> BELL
'\x08' # 0x08 -> BACKSPACE
'\t' # 0x09 -> HORIZONTAL TABULATION
'\n' # 0x0A -> LINE FEED
'\x0b' # 0x0B -> VERTICAL TABULATION
'\x0c' # 0x0C -> FORM FEED
'\r' # 0x0D -> CARRIAGE RETURN
'\x0e' # 0x0E -> SHIFT OUT
'\x0f' # 0x0F -> SHIFT IN
'\x10' # 0x10 -> DATA LINK ESCAPE
'\x11' # 0x11 -> DEVICE CONTROL ONE
'\x12' # 0x12 -> DEVICE CONTROL TWO
'\x13' # 0x13 -> DEVICE CONTROL THREE
'\x14' # 0x14 -> DEVICE CONTROL FOUR
'\x15' # 0x15 -> NEGATIVE ACKNOWLEDGE
'\x16' # 0x16 -> SYNCHRONOUS IDLE
'\x17' # 0x17 -> END OF TRANSMISSION BLOCK
'\x18' # 0x18 -> CANCEL
'\x19' # 0x19 -> END OF MEDIUM
'\x1a' # 0x1A -> SUBSTITUTE
'\x1b' # 0x1B -> ESCAPE
'\x1c' # 0x1C -> FILE SEPARATOR
'\x1d' # 0x1D -> GROUP SEPARATOR
'\x1e' # 0x1E -> RECORD SEPARATOR
'\x1f' # 0x1F -> UNIT SEPARATOR
' ' # 0x20 -> SPACE
'!' # 0x21 -> EXCLAMATION MARK
'"' # 0x22 -> QUOTATION MARK
'#' # 0x23 -> NUMBER SIGN
'$' # 0x24 -> DOLLAR SIGN
'%' # 0x25 -> PERCENT SIGN
'&' # 0x26 -> AMPERSAND
"'" # 0x27 -> APOSTROPHE
'(' # 0x28 -> LEFT PARENTHESIS
')' # 0x29 -> RIGHT PARENTHESIS
'*' # 0x2A -> ASTERISK
'+' # 0x2B -> PLUS SIGN
',' # 0x2C -> COMMA
'-' # 0x2D -> HYPHEN-MINUS
'.' # 0x2E -> FULL STOP
'/' # 0x2F -> SOLIDUS
'0' # 0x30 -> DIGIT ZERO
'1' # 0x31 -> DIGIT ONE
'2' # 0x32 -> DIGIT TWO
'3' # 0x33 -> DIGIT THREE
'4' # 0x34 -> DIGIT FOUR
'5' # 0x35 -> DIGIT FIVE
'6' # 0x36 -> DIGIT SIX
'7' # 0x37 -> DIGIT SEVEN
'8' # 0x38 -> DIGIT EIGHT
'9' # 0x39 -> DIGIT NINE
':' # 0x3A -> COLON
';' # 0x3B -> SEMICOLON
'<' # 0x3C -> LESS-THAN SIGN
'=' # 0x3D -> EQUALS SIGN
'>' # 0x3E -> GREATER-THAN SIGN
'?' # 0x3F -> QUESTION MARK
'@' # 0x40 -> COMMERCIAL AT
'A' # 0x41 -> LATIN CAPITAL LETTER A
'B' # 0x42 -> LATIN CAPITAL LETTER B
'C' # 0x43 -> LATIN CAPITAL LETTER C
'D' # 0x44 -> LATIN CAPITAL LETTER D
'E' # 0x45 -> LATIN CAPITAL LETTER E
'F' # 0x46 -> LATIN CAPITAL LETTER F
'G' # 0x47 -> LATIN CAPITAL LETTER G
'H' # 0x48 -> LATIN CAPITAL LETTER H
'I' # 0x49 -> LATIN CAPITAL LETTER I
'J' # 0x4A -> LATIN CAPITAL LETTER J
'K' # 0x4B -> LATIN CAPITAL LETTER K
'L' # 0x4C -> LATIN CAPITAL LETTER L
'M' # 0x4D -> LATIN CAPITAL LETTER M
'N' # 0x4E -> LATIN CAPITAL LETTER N
'O' # 0x4F -> LATIN CAPITAL LETTER O
'P' # 0x50 -> LATIN CAPITAL LETTER P
'Q' # 0x51 -> LATIN CAPITAL LETTER Q
'R' # 0x52 -> LATIN CAPITAL LETTER R
'S' # 0x53 -> LATIN CAPITAL LETTER S
'T' # 0x54 -> LATIN CAPITAL LETTER T
'U' # 0x55 -> LATIN CAPITAL LETTER U
'V' # 0x56 -> LATIN CAPITAL LETTER V
'W' # 0x57 -> LATIN CAPITAL LETTER W
'X' # 0x58 -> LATIN CAPITAL LETTER X
'Y' # 0x59 -> LATIN CAPITAL LETTER Y
'Z' # 0x5A -> LATIN CAPITAL LETTER Z
'[' # 0x5B -> LEFT SQUARE BRACKET
'\\' # 0x5C -> REVERSE SOLIDUS
']' # 0x5D -> RIGHT SQUARE BRACKET
'^' # 0x5E -> CIRCUMFLEX ACCENT
'_' # 0x5F -> LOW LINE
'`' # 0x60 -> GRAVE ACCENT
'a' # 0x61 -> LATIN SMALL LETTER A
'b' # 0x62 -> LATIN SMALL LETTER B
'c' # 0x63 -> LATIN SMALL LETTER C
'd' # 0x64 -> LATIN SMALL LETTER D
'e' # 0x65 -> LATIN SMALL LETTER E
'f' # 0x66 -> LATIN SMALL LETTER F
'g' # 0x67 -> LATIN SMALL LETTER G
'h' # 0x68 -> LATIN SMALL LETTER H
'i' # 0x69 -> LATIN SMALL LETTER I
'j' # 0x6A -> LATIN SMALL LETTER J
'k' # 0x6B -> LATIN SMALL LETTER K
'l' # 0x6C -> LATIN SMALL LETTER L
'm' # 0x6D -> LATIN SMALL LETTER M
'n' # 0x6E -> LATIN SMALL LETTER N
'o' # 0x6F -> LATIN SMALL LETTER O
'p' # 0x70 -> LATIN SMALL LETTER P
'q' # 0x71 -> LATIN SMALL LETTER Q
'r' # 0x72 -> LATIN SMALL LETTER R
's' # 0x73 -> LATIN SMALL LETTER S
't' # 0x74 -> LATIN SMALL LETTER T
'u' # 0x75 -> LATIN SMALL LETTER U
'v' # 0x76 -> LATIN SMALL LETTER V
'w' # 0x77 -> LATIN SMALL LETTER W
'x' # 0x78 -> LATIN SMALL LETTER X
'y' # 0x79 -> LATIN SMALL LETTER Y
'z' # 0x7A -> LATIN SMALL LETTER Z
'{' # 0x7B -> LEFT CURLY BRACKET
'|' # 0x7C -> VERTICAL LINE
'}' # 0x7D -> RIGHT CURLY BRACKET
'~' # 0x7E -> TILDE
'\x7f' # 0x7F -> DELETE
'\u20ac' # 0x80 -> EURO SIGN
'\x81' # 0x81 -> <control>
'\u201a' # 0x82 -> SINGLE LOW-9 QUOTATION MARK
'\u0192' # 0x83 -> LATIN SMALL LETTER F WITH HOOK
'\u201e' # 0x84 -> DOUBLE LOW-9 QUOTATION MARK
'\u2026' # 0x85 -> HORIZONTAL ELLIPSIS
'\u2020' # 0x86 -> DAGGER
'\u2021' # 0x87 -> DOUBLE DAGGER
'\u02c6' # 0x88 -> MODIFIER LETTER CIRCUMFLEX ACCENT
'\u2030' # 0x89 -> PER MILLE SIGN
'\u0160' # 0x8A -> LATIN CAPITAL LETTER S WITH CARON
'\u2039' # 0x8B -> SINGLE LEFT-POINTING ANGLE QUOTATION MARK
'\u0152' # 0x8C -> LATIN CAPITAL LIGATURE OE
'\u2666' # 0x8D -> BLACK DIAMOND SUIT
'\u2663' # 0x8E -> BLACK CLUB SUIT
'\u2665' # 0x8F -> BLACK HEART SUIT
'\u2660' # 0x90 -> BLACK SPADE SUIT
'\u2018' # 0x91 -> LEFT SINGLE QUOTATION MARK
'\u2019' # 0x92 -> RIGHT SINGLE QUOTATION MARK
'\u201c' # 0x93 -> LEFT DOUBLE QUOTATION MARK
'\u201d' # 0x94 -> RIGHT DOUBLE QUOTATION MARK
'\u2022' # 0x95 -> BULLET
'\u2013' # 0x96 -> EN DASH
'\u2014' # 0x97 -> EM DASH
'\u02dc' # 0x98 -> SMALL TILDE
'\u2122' # 0x99 -> TRADE MARK SIGN
'\u0161' # 0x9A -> LATIN SMALL LETTER S WITH CARON
'\x9b' # 0x9B -> <control>
'\u0153' # 0x9C -> LATIN SMALL LIGATURE OE
'\x9d' # 0x9D -> <control>
'\x9e' # 0x9E -> <control>
'\u0178' # 0x9F -> LATIN CAPITAL LETTER Y WITH DIAERESIS
'\xa0' # 0xA0 -> NO-BREAK SPACE
'\xa1' # 0xA1 -> INVERTED EXCLAMATION MARK
'\xa2' # 0xA2 -> CENT SIGN
'\xa3' # 0xA3 -> POUND SIGN
'\xa4' # 0xA4 -> CURRENCY SIGN
'\xa5' # 0xA5 -> YEN SIGN
'\xa6' # 0xA6 -> BROKEN BAR
'\xa7' # 0xA7 -> SECTION SIGN
'\xa8' # 0xA8 -> DIAERESIS
'\xa9' # 0xA9 -> COPYRIGHT SIGN
'\xaa' # 0xAA -> FEMININE ORDINAL INDICATOR
'\xab' # 0xAB -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
'\xac' # 0xAC -> NOT SIGN
'\xad' # 0xAD -> SOFT HYPHEN
'\xae' # 0xAE -> REGISTERED SIGN
'\xaf' # 0xAF -> MACRON
'\xb0' # 0xB0 -> DEGREE SIGN
'\xb1' # 0xB1 -> PLUS-MINUS SIGN
'\xb2' # 0xB2 -> SUPERSCRIPT TWO
'\xb3' # 0xB3 -> SUPERSCRIPT THREE
'\xb4' # 0xB4 -> ACUTE ACCENT
'\xb5' # 0xB5 -> MICRO SIGN
'\xb6' # 0xB6 -> PILCROW SIGN
'\xb7' # 0xB7 -> MIDDLE DOT
'\xb8' # 0xB8 -> CEDILLA
'\xb9' # 0xB9 -> SUPERSCRIPT ONE
'\xba' # 0xBA -> MASCULINE ORDINAL INDICATOR
'\xbb' # 0xBB -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
'\xbc' # 0xBC -> VULGAR FRACTION ONE QUARTER
'\xbd' # 0xBD -> VULGAR FRACTION ONE HALF
'\xbe' # 0xBE -> VULGAR FRACTION THREE QUARTERS
'\xbf' # 0xBF -> INVERTED QUESTION MARK
'\xc0' # 0xC0 -> LATIN CAPITAL LETTER A WITH GRAVE
'\xc1' # 0xC1 -> LATIN CAPITAL LETTER A WITH ACUTE
'\xc2' # 0xC2 -> LATIN CAPITAL LETTER A WITH CIRCUMFLEX
'\xc3' # 0xC3 -> LATIN CAPITAL LETTER A WITH TILDE
'\xc4' # 0xC4 -> LATIN CAPITAL LETTER A WITH DIAERESIS
'\xc5' # 0xC5 -> LATIN CAPITAL LETTER A WITH RING ABOVE
'\xc6' # 0xC6 -> LATIN CAPITAL LETTER AE
'\xc7' # 0xC7 -> LATIN CAPITAL LETTER C WITH CEDILLA
'\xc8' # 0xC8 -> LATIN CAPITAL LETTER E WITH GRAVE
'\xc9' # 0xC9 -> LATIN CAPITAL LETTER E WITH ACUTE
'\xca' # 0xCA -> LATIN CAPITAL LETTER E WITH CIRCUMFLEX
'\xcb' # 0xCB -> LATIN CAPITAL LETTER E WITH DIAERESIS
'\xcc' # 0xCC -> LATIN CAPITAL LETTER I WITH GRAVE
'\xcd' # 0xCD -> LATIN CAPITAL LETTER I WITH ACUTE
'\xce' # 0xCE -> LATIN CAPITAL LETTER I WITH CIRCUMFLEX
'\xcf' # 0xCF -> LATIN CAPITAL LETTER I WITH DIAERESIS
'\xd0' # 0xD0 -> LATIN CAPITAL LETTER ETH (Icelandic)
'\xd1' # 0xD1 -> LATIN CAPITAL LETTER N WITH TILDE
'\xd2' # 0xD2 -> LATIN CAPITAL LETTER O WITH GRAVE
'\xd3' # 0xD3 -> LATIN CAPITAL LETTER O WITH ACUTE
'\xd4' # 0xD4 -> LATIN CAPITAL LETTER O WITH CIRCUMFLEX
'\xd5' # 0xD5 -> LATIN CAPITAL LETTER O WITH TILDE
'\xd6' # 0xD6 -> LATIN CAPITAL LETTER O WITH DIAERESIS
'\xd7' # 0xD7 -> MULTIPLICATION SIGN
'\xd8' # 0xD8 -> LATIN CAPITAL LETTER O WITH STROKE
'\xd9' # 0xD9 -> LATIN CAPITAL LETTER U WITH GRAVE
'\xda' # 0xDA -> LATIN CAPITAL LETTER U WITH ACUTE
'\xdb' # 0xDB -> LATIN CAPITAL LETTER U WITH CIRCUMFLEX
'\xdc' # 0xDC -> LATIN CAPITAL LETTER U WITH DIAERESIS
'\xdd' # 0xDD -> LATIN CAPITAL LETTER Y WITH ACUTE
'\xde' # 0xDE -> LATIN CAPITAL LETTER THORN (Icelandic)
'\xdf' # 0xDF -> LATIN SMALL LETTER SHARP S (German)
'\xe0' # 0xE0 -> LATIN SMALL LETTER A WITH GRAVE
'\xe1' # 0xE1 -> LATIN SMALL LETTER A WITH ACUTE
'\xe2' # 0xE2 -> LATIN SMALL LETTER A WITH CIRCUMFLEX
'\xe3' # 0xE3 -> LATIN SMALL LETTER A WITH TILDE
'\xe4' # 0xE4 -> LATIN SMALL LETTER A WITH DIAERESIS
'\xe5' # 0xE5 -> LATIN SMALL LETTER A WITH RING ABOVE
'\xe6' # 0xE6 -> LATIN SMALL LETTER AE
'\xe7' # 0xE7 -> LATIN SMALL LETTER C WITH CEDILLA
'\xe8' # 0xE8 -> LATIN SMALL LETTER E WITH GRAVE
'\xe9' # 0xE9 -> LATIN SMALL LETTER E WITH ACUTE
'\xea' # 0xEA -> LATIN SMALL LETTER E WITH CIRCUMFLEX
'\xeb' # 0xEB -> LATIN SMALL LETTER E WITH DIAERESIS
'\xec' # 0xEC -> LATIN SMALL LETTER I WITH GRAVE
'\xed' # 0xED -> LATIN SMALL LETTER I WITH ACUTE
'\xee' # 0xEE -> LATIN SMALL LETTER I WITH CIRCUMFLEX
'\xef' # 0xEF -> LATIN SMALL LETTER I WITH DIAERESIS
'\xf0' # 0xF0 -> LATIN SMALL LETTER ETH (Icelandic)
'\xf1' # 0xF1 -> LATIN SMALL LETTER N WITH TILDE
'\xf2' # 0xF2 -> LATIN SMALL LETTER O WITH GRAVE
'\xf3' # 0xF3 -> LATIN SMALL LETTER O WITH ACUTE
'\xf4' # 0xF4 -> LATIN SMALL LETTER O WITH CIRCUMFLEX
'\xf5' # 0xF5 -> LATIN SMALL LETTER O WITH TILDE
'\xf6' # 0xF6 -> LATIN SMALL LETTER O WITH DIAERESIS
'\xf7' # 0xF7 -> DIVISION SIGN
'\xf8' # 0xF8 -> LATIN SMALL LETTER O WITH STROKE
'\xf9' # 0xF9 -> LATIN SMALL LETTER U WITH GRAVE
'\xfa' # 0xFA -> LATIN SMALL LETTER U WITH ACUTE
'\xfb' # 0xFB -> LATIN SMALL LETTER U WITH CIRCUMFLEX
'\xfc' # 0xFC -> LATIN SMALL LETTER U WITH DIAERESIS
'\xfd' # 0xFD -> LATIN SMALL LETTER Y WITH ACUTE
'\xfe' # 0xFE -> LATIN SMALL LETTER THORN (Icelandic)
'\xff' # 0xFF -> LATIN SMALL LETTER Y WITH DIAERESIS
)
### Encoding table
encoding_table=codecs.charmap_build(decoding_table)
mit
Pafcholini/emotion_beta_511_no_updates
tools/perf/util/setup.py
2079
1438
#!/usr/bin/python2
from distutils.core import setup, Extension
from os import getenv
from distutils.command.build_ext import build_ext as _build_ext
from distutils.command.install_lib import install_lib as _install_lib
class build_ext(_build_ext):
def finalize_options(self):
_build_ext.finalize_options(self)
self.build_lib = build_lib
self.build_temp = build_tmp
class install_lib(_install_lib):
def finalize_options(self):
_install_lib.finalize_options(self)
self.build_dir = build_lib
cflags = ['-fno-strict-aliasing', '-Wno-write-strings']
cflags += getenv('CFLAGS', '').split()
build_lib = getenv('PYTHON_EXTBUILD_LIB')
build_tmp = getenv('PYTHON_EXTBUILD_TMP')
libtraceevent = getenv('LIBTRACEEVENT')
liblk = getenv('LIBLK')
ext_sources = [f.strip() for f in file('util/python-ext-sources')
if len(f.strip()) > 0 and f[0] != '#']
perf = Extension('perf',
sources = ext_sources,
include_dirs = ['util/include'],
extra_compile_args = cflags,
extra_objects = [libtraceevent, liblk],
)
setup(name='perf',
version='0.1',
description='Interface with the Linux profiling infrastructure',
author='Arnaldo Carvalho de Melo',
author_email='[email protected]',
license='GPLv2',
url='http://perf.wiki.kernel.org',
ext_modules=[perf],
cmdclass={'build_ext': build_ext, 'install_lib': install_lib})
# Copyright (C) 2013 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# Specified in seconds
queue_log_duration = 60 * 60
bsd-3-clause
nevion/cldemosaic
kernels.py
1
3631
#The MIT License (MIT)
#
#Copyright (c) 2015 Jason Newton <[email protected]>
#
#Permission is hereby granted, free of charge, to any person obtaining a copy
#of this software and associated documentation files (the "Software"), to deal
#in the Software without restriction, including without limitation the rights
#to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
#copies of the Software, and to permit persons to whom the Software is
#furnished to do so, subject to the following conditions:
#
#The above copyright notice and this permission notice shall be included in all
#copies or substantial portions of the Software.
#
#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
#IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
#FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
#AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
#LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
#OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
#SOFTWARE.
from kernel_common import *
import enum
class Demosaic(object):
class Pattern(enum.IntEnum):
RGGB = 0
GRBG = 1
GBRG = 2
BGGR = 3
def __init__(self, img_dtype, rgb_pixel_base_dtype = None, output_channels=3, debug=False):
self.img_dtype = img_dtype
if rgb_pixel_base_dtype is None:
rgb_pixel_base_dtype = img_dtype
self.rgb_pixel_base_dtype = rgb_pixel_base_dtype
self.output_channels = output_channels
self.debug = debug
self.program = None
self.kernel = None
self.TILE_ROWS = 5
self.TILE_COLS = 32
def compile(self):
PixelT = type_mapper(self.img_dtype)
RGBPixelBaseT = type_mapper(self.rgb_pixel_base_dtype)
KERNEL_FLAGS = '-D PIXELT={PixelT} -D RGBPIXELBASET={RGBPixelBaseT} -D OUTPUT_CHANNELS={output_channels} -D TILE_ROWS={tile_rows} -D TILE_COLS={tile_cols} -D IMAGE_MAD_INDEXING' \
.format(PixelT=PixelT, RGBPixelBaseT=RGBPixelBaseT, output_channels=self.output_channels, tile_rows=self.TILE_ROWS, tile_cols=self.TILE_COLS)
CL_SOURCE = None
with open(os.path.join(base_path, 'kernels.cl'), 'r') as f:
CL_SOURCE = f.read()
CL_FLAGS = "-I %s -cl-std=CL1.2 %s" %(common_lib_path, KERNEL_FLAGS)
CL_FLAGS = cl_opt_decorate(self, CL_FLAGS)
print('%r compile flags: %s'%(self.__class__.__name__, CL_FLAGS))
self.program = cl.Program(ctx, CL_SOURCE).build(options=CL_FLAGS)
self._malvar_he_cutler_demosaic = self.program.malvar_he_cutler_demosaic
def make_output_buffer(self, queue, image):
return clarray.empty(queue, image.shape + (self.output_channels,), dtype = self.img_dtype)
def __call__(self, queue, image, pattern, dst_img = None, wait_for = None):
tile_dims = self.TILE_COLS, self.TILE_ROWS
ldims = tile_dims
rows, cols = int(image.shape[0]), int(image.shape[1])
if dst_img is None:
dst_img = self.make_output_buffer(queue, image)
r_blocks, c_blocks = divUp(rows, tile_dims[1]), divUp(cols, tile_dims[0])
gdims = (c_blocks * ldims[0], r_blocks * ldims[1])
event = self._malvar_he_cutler_demosaic(queue,
gdims, ldims,
uint32(rows), uint32(cols),
image.data, uint32(image.strides[0]),
dst_img.data, uint32(dst_img.strides[0]),
np.int32(pattern),
wait_for = wait_for
)
return event, dst_img
# coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import json
import unittest
import bs4
from internal_backend.sitegen.tasks import sitegen
CONFIG_JSON = """
{
"sources": {
"index": "fake0/README.html",
"subdir/page1": "fake1/p1.html",
"subdir/page2": "fake1/p2.html"
},
"extras": {
},
"tree": [
{ "page": "index",
"children": [
{ "page": "subdir/page1" },
{ "page": "subdir/page2" }
]
}
],
"template": "fake/fake.mustache"
}
"""
INDEX_HTML = """
<h1 id="pants-build-system">Pants Build System</h1>
<p>Pants is a build system.</p>
<a pantsmark="pantsmark_index"></a>
<p>See also:
<a href="../fake1/p1.html">another page</a>.</p>
"""
P1_HTML = """
<h1>東京 is Tokyo</h1>
<a id="an_pantsmark" pantsmark="pantsmark_p1"></a>
<p>Fascinating description. <a pantsref="pantsmark_index">to index</a>
"""
P2_HTML = """
<head>
<title>Page 2: Electric Boogaloo</title>
</head>
<body>
<h1>Page 2</h1>
<p>Some text <a pantsref="pantsmark_p1">to p1</a></p>
<h2 id="one">Section One</h2>
<p>Some more text</p>
<h2 id="two">Section Two</h2>
<p>Some more text</p>
</body>
"""
TEMPLATE_MUSTACHE = """
{{{body_html}}}
"""
class AllTheThingsTestCase(unittest.TestCase):
def setUp(self):
self.config = json.loads(CONFIG_JSON)
self.soups = {
'index': bs4.BeautifulSoup(INDEX_HTML),
'subdir/page1': bs4.BeautifulSoup(P1_HTML),
'subdir/page2': bs4.BeautifulSoup(P2_HTML),
}
self.precomputed = sitegen.precompute(self.config, self.soups)
def test_fixup_internal_links(self):
sitegen.fixup_internal_links(self.config, self.soups)
html = sitegen.render_html('index',
self.config,
self.soups,
self.precomputed,
TEMPLATE_MUSTACHE)
self.assertIn('subdir/page1.html', html,
'p1.html link did not get fixed up to page1.html')
def test_pantsrefs(self):
sitegen.link_pantsrefs(self.soups, self.precomputed)
p1_html = sitegen.render_html('subdir/page1',
self.config,
self.soups,
self.precomputed,
TEMPLATE_MUSTACHE)
self.assertIn('href="../index.html#pantsmark_index"', p1_html,
'pantsref_index did not get linked')
p2_html = sitegen.render_html('subdir/page2',
self.config,
self.soups,
self.precomputed,
TEMPLATE_MUSTACHE)
self.assertIn('href="page1.html#an_pantsmark"', p2_html,
'pantsref_p1 did not get linked')
def test_find_title(self):
p2_html = sitegen.render_html('subdir/page2',
self.config,
self.soups,
self.precomputed,
'{{title}}')
self.assertEqual(p2_html, 'Page 2: Electric Boogaloo',
"""Didn't find correct title""")
# ascii worked? great, try non-ASCII
p1_html = sitegen.render_html('subdir/page1',
self.config,
self.soups,
self.precomputed,
'{{title}}')
self.assertEqual(p1_html, u'東京 is Tokyo',
"""Didn't find correct non-ASCII title""")
def test_page_toc(self):
# One of our "pages" has a couple of basic headings.
# Do we get the correct info from that to generate
# a page-level table of contents?
sitegen.generate_page_tocs(self.soups, self.precomputed)
rendered = sitegen.render_html('subdir/page2',
self.config,
self.soups,
self.precomputed,
"""
{{#page_toc}}
DEPTH={{depth}} LINK={{link}} TEXT={{text}}
{{/page_toc}}
""")
self.assertIn('DEPTH=1 LINK=one TEXT=Section One', rendered)
self.assertIn('DEPTH=1 LINK=two TEXT=Section Two', rendered)
def test_transforms_not_discard_page_tocs(self):
# We had a bug where one step of transform lost the info
# we need to build page-tocs. Make sure that doesn't happen again.
sitegen.transform_soups(self.config, self.soups, self.precomputed)
rendered = sitegen.render_html('subdir/page2',
self.config,
self.soups,
self.precomputed,
"""
{{#page_toc}}
DEPTH={{depth}} LINK={{link}} TEXT={{text}}
{{/page_toc}}
""")
self.assertIn('DEPTH=1 LINK=one TEXT=Section One', rendered)
self.assertIn('DEPTH=1 LINK=two TEXT=Section Two', rendered)
def test_here_links(self):
sitegen.add_here_links(self.soups)
html = sitegen.render_html('index',
self.config,
self.soups,
self.precomputed,
TEMPLATE_MUSTACHE)
self.assertIn('href="#pants-build-system"', html,
'Generated html lacks auto-created link to h1.')
def test_breadcrumbs(self):
# Our "site" has a simple outline.
# Do we get the correct info from that to generate
# "breadcrumbs" navigating from one page up to the top?
rendered = sitegen.render_html('subdir/page2',
self.config,
self.soups,
self.precomputed,
"""
{{#breadcrumbs}}
LINK={{link}} TEXT={{text}}
{{/breadcrumbs}}
""")
self.assertIn('LINK=../index.html TEXT=Pants Build System', rendered)
def test_site_toc(self):
# Our "site" has a simple outline.
# Do we get the correct info from that to generate
# a site-level table of contents?
rendered = sitegen.render_html('index',
self.config,
self.soups,
self.precomputed,
"""
{{#site_toc}}
DEPTH={{depth}} LINK={{link}} TEXT={{text}}
{{/site_toc}}
""")
self.assertIn(u'DEPTH=1 LINK=subdir/page1.html TEXT=東京 is Tokyo', rendered)
self.assertIn('DEPTH=1 LINK=subdir/page2.html TEXT=Page 2: Electric Boogaloo', rendered)
def test_transform_fixes_up_internal_links(self):
sitegen.transform_soups(self.config, self.soups, self.precomputed)
html = sitegen.render_html('index',
self.config,
self.soups,
self.precomputed,
TEMPLATE_MUSTACHE)
self.assertTrue('subdir/page1.html' in html,
'p1.html link did not get fixed up to page1.html')
apache-2.0
datawire/qpid-proton
examples/python/reactor/hello-world.py
4
1554
#!/usr/bin/python
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
from proton.reactor import Reactor
# The proton reactor provides a general purpose event processing
# library for writing reactive programs. A reactive program is defined
# by a set of event handlers. An event handler is just any class or
# object that defines the "on_<event>" methods that it cares to
# handle.
class Program:
# The reactor init event is produced by the reactor itself when it
# starts.
def on_reactor_init(self, event):
print "Hello, World!"
# When you construct a reactor, you give it a handler.
r = Reactor(Program())
# When you call run, the reactor will process events. The reactor init
# event is what kicks off everything else. When the reactor has no
# more events to process, it exits.
r.run()
#!/usr/bin/env python2
#
# Copyright (C) 2013-2017(H)
# Max Planck Institute for Polymer Research
#
# This file is part of ESPResSo++.
#
# ESPResSo++ is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo++ is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# -*- coding: utf-8 -*-
#
import sys
import time
import espressopp
import mpi4py.MPI as MPI
import unittest
class TestFreeEnergyCompensation(unittest.TestCase):
def setUp(self):
# set up system
system = espressopp.System()
box = (10, 10, 10)
system.bc = espressopp.bc.OrthorhombicBC(system.rng, box)
system.skin = 0.3
system.comm = MPI.COMM_WORLD
nodeGrid = espressopp.tools.decomp.nodeGrid(espressopp.MPI.COMM_WORLD.size,box,rc=1.5,skin=system.skin)
cellGrid = espressopp.tools.decomp.cellGrid(box, nodeGrid, rc=1.5, skin=system.skin)
system.storage = espressopp.storage.DomainDecompositionAdress(system, nodeGrid, cellGrid)
self.system = system
def test_slab(self):
# add some particles
particle_list = [
(1, 1, 0, espressopp.Real3D(5.5, 5.0, 5.0), 1.0, 0),
(2, 1, 0, espressopp.Real3D(6.5, 5.0, 5.0), 1.0, 0),
(3, 1, 0, espressopp.Real3D(7.5, 5.0, 5.0), 1.0, 0),
(4, 1, 0, espressopp.Real3D(8.5, 5.0, 5.0), 1.0, 0),
(5, 1, 0, espressopp.Real3D(9.5, 5.0, 5.0), 1.0, 0),
(6, 0, 0, espressopp.Real3D(5.5, 5.0, 5.0), 1.0, 1),
(7, 0, 0, espressopp.Real3D(6.5, 5.0, 5.0), 1.0, 1),
(8, 0, 0, espressopp.Real3D(7.5, 5.0, 5.0), 1.0, 1),
(9, 0, 0, espressopp.Real3D(8.5, 5.0, 5.0), 1.0, 1),
(10, 0, 0, espressopp.Real3D(9.5, 5.0, 5.0), 1.0, 1),
]
tuples = [(1,6),(2,7),(3,8),(4,9),(5,10)]
self.system.storage.addParticles(particle_list, 'id', 'type', 'q', 'pos', 'mass','adrat')
ftpl = espressopp.FixedTupleListAdress(self.system.storage)
ftpl.addTuples(tuples)
self.system.storage.setFixedTuplesAdress(ftpl)
self.system.storage.decompose()
# generate a verlet list
vl = espressopp.VerletListAdress(self.system, cutoff=1.5, adrcut=1.5,
dEx=2.0, dHy=1.0, adrCenter=[5.0, 5.0, 5.0], sphereAdr=False)
# initialize lambda values
integrator = espressopp.integrator.VelocityVerlet(self.system)
integrator.dt = 0.01
adress = espressopp.integrator.Adress(self.system,vl,ftpl)
integrator.addExtension(adress)
espressopp.tools.AdressDecomp(self.system, integrator)
# set up FEC
fec = espressopp.integrator.FreeEnergyCompensation(self.system, center=[5.0, 5.0, 5.0])
fec.addForce(itype=3,filename="table_fec.tab",type=1)
integrator.addExtension(fec)
# x coordinates of particles before integration
before = [self.system.storage.getParticle(i).pos[0] for i in range(1,6)]
# run ten steps and compute energy
integrator.run(10)
energy = fec.computeCompEnergy()
# x coordinates of particles after integration
after = [self.system.storage.getParticle(i).pos[0] for i in range(1,6)]
# run checks (only one particle is in hybrid region and should feel the FEC. Also check that its FEC energy is correct)
self.assertEqual(before[0], after[0])
self.assertEqual(before[1], after[1])
self.assertAlmostEqual(after[2], 7.598165, places=5)
self.assertEqual(before[3], after[3])
self.assertEqual(before[4], after[4])
self.assertAlmostEqual(energy, 6.790157, places=5)
def test_sphere(self):
# add some particles
particle_list = [
(1, 1, 0, espressopp.Real3D(5.0, 5.5, 5.0), 1.0, 0),
(2, 1, 0, espressopp.Real3D(5.0, 6.5, 5.0), 1.0, 0),
(3, 1, 0, espressopp.Real3D(5.0, 7.5, 5.0), 1.0, 0),
(4, 1, 0, espressopp.Real3D(5.0, 8.5, 5.0), 1.0, 0),
(5, 1, 0, espressopp.Real3D(5.0, 9.5, 5.0), 1.0, 0),
(6, 0, 0, espressopp.Real3D(5.0, 5.5, 5.0), 1.0, 1),
(7, 0, 0, espressopp.Real3D(5.0, 6.5, 5.0), 1.0, 1),
(8, 0, 0, espressopp.Real3D(5.0, 7.5, 5.0), 1.0, 1),
(9, 0, 0, espressopp.Real3D(5.0, 8.5, 5.0), 1.0, 1),
(10, 0, 0, espressopp.Real3D(5.0, 9.5, 5.0), 1.0, 1),
]
tuples = [(1,6),(2,7),(3,8),(4,9),(5,10)]
self.system.storage.addParticles(particle_list, 'id', 'type', 'q', 'pos', 'mass','adrat')
ftpl = espressopp.FixedTupleListAdress(self.system.storage)
ftpl.addTuples(tuples)
self.system.storage.setFixedTuplesAdress(ftpl)
self.system.storage.decompose()
# generate a verlet list
vl = espressopp.VerletListAdress(self.system, cutoff=1.5, adrcut=1.5,
dEx=2.0, dHy=1.0, adrCenter=[5.0, 5.0, 5.0], sphereAdr=True)
# initialize lambda values
integrator = espressopp.integrator.VelocityVerlet(self.system)
integrator.dt = 0.01
adress = espressopp.integrator.Adress(self.system,vl,ftpl)
integrator.addExtension(adress)
espressopp.tools.AdressDecomp(self.system, integrator)
# set up FEC
fec = espressopp.integrator.FreeEnergyCompensation(self.system, center=[5.0, 5.0, 5.0], sphereAdr=True)
fec.addForce(itype=3,filename="table_fec.tab",type=1)
integrator.addExtension(fec)
# y coordinates of particles before integration
before = [self.system.storage.getParticle(i).pos[1] for i in range(1,6)]
# run ten steps
integrator.run(10)
energy = fec.computeCompEnergy()
# y coordinates of particles after integration
after = [self.system.storage.getParticle(i).pos[1] for i in range(1,6)]
# run checks (as for test with slab-geometry, but check y-coordinates this time. Given the now spherical setup, particles should move as before but along the y-axis).
self.assertEqual(before[0], after[0])
self.assertEqual(before[1], after[1])
self.assertAlmostEqual(after[2], 7.598165, places=5)
self.assertEqual(before[3], after[3])
self.assertEqual(before[4], after[4])
self.assertAlmostEqual(energy, 6.790157, places=5)
if __name__ == '__main__':
unittest.main()
"""
Blocks API Transformer
"""
from openedx.core.lib.block_cache.transformer import BlockStructureTransformer
from .block_counts import BlockCountsTransformer
from .block_depth import BlockDepthTransformer
from .navigation import BlockNavigationTransformer
from .student_view import StudentViewTransformer
class BlocksAPITransformer(BlockStructureTransformer):
"""
Umbrella transformer that contains all the transformers needed by the
Course Blocks API.
Contained Transformers (processed in this order):
StudentViewTransformer
BlockCountsTransformer
BlockDepthTransformer
BlockNavigationTransformer
Note: BlockDepthTransformer must be executed before BlockNavigationTransformer.
"""
VERSION = 1
STUDENT_VIEW_DATA = 'student_view_data'
STUDENT_VIEW_MULTI_DEVICE = 'student_view_multi_device'
def __init__(self, block_types_to_count, requested_student_view_data, depth=None, nav_depth=None):
self.block_types_to_count = block_types_to_count
self.requested_student_view_data = requested_student_view_data
self.depth = depth
self.nav_depth = nav_depth
@classmethod
def name(cls):
return "blocks_api"
@classmethod
def collect(cls, block_structure):
"""
Collects any information that's necessary to execute this transformer's
transform method.
"""
# collect basic xblock fields
block_structure.request_xblock_fields('graded', 'format', 'display_name', 'category')
# collect data from containing transformers
StudentViewTransformer.collect(block_structure)
BlockCountsTransformer.collect(block_structure)
BlockDepthTransformer.collect(block_structure)
BlockNavigationTransformer.collect(block_structure)
# TODO support olx_data by calling export_to_xml(?)
def transform(self, usage_info, block_structure):
"""
Mutates block_structure based on the given usage_info.
"""
StudentViewTransformer(self.requested_student_view_data).transform(usage_info, block_structure)
BlockCountsTransformer(self.block_types_to_count).transform(usage_info, block_structure)
BlockDepthTransformer(self.depth).transform(usage_info, block_structure)
BlockNavigationTransformer(self.nav_depth).transform(usage_info, block_structure)
agpl-3.0
CG3002/Hardware-Bootloader-Timer
reg.py
1
1129
import time
import serial
ser = serial.Serial(port=29, baudrate=9600, bytesize=serial.EIGHTBITS, parity=serial.PARITY_NONE, stopbits=serial.STOPBITS_TWO, timeout=1)
ser.isOpen()
connected=False
cash_reg = []
my_dict = []
reg = ['@r3', '@r1', '@r2', '@r4']
flag = 1
start_rec = 0
wrong_id = 0
start_count = 0
barcode_flag = 0
def handle_data(data):
print(data)
print 'start transmission'
while 1 :
for item in reg:
try:
send_pkg = item+'/'
ser.write(send_pkg)
print 'sending '+ send_pkg
while flag :
start_count += 1
buffer = ser.read() #blocking call
print 'received '+buffer
if start_rec == 1:
if buffer == item[1] :
barcode_flag = 1
if buffer == '/' :
#print 'end round'
flag = 0
break
if buffer == '@' :
start_rec = 1
if buffer == '0' :
if start_rec == 1:
start_rec = 0
wrong_id = 1
print 'wrong id'
if start_count == 5 :
start_count = 0
flag = 0
break
start_rec = 0
wrong_id = 0
flag = 1
start_count = 0
except SerialTimeoutException:
print 'Serial time out'
continue
mit
subailong/kubernetes
hack/lookup_pull.py
368
1319
#!/usr/bin/env python
# Copyright 2015 The Kubernetes Authors All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Script to print out PR info in release note format.
import json
import sys
import urllib2
PULLQUERY=("https://api.github.com/repos/"
"GoogleCloudPlatform/kubernetes/pulls/{pull}")
LOGIN="login"
TITLE="title"
USER="user"
def print_pulls(pulls):
for pull in pulls:
d = json.loads(urllib2.urlopen(PULLQUERY.format(pull=pull)).read())
print "* {title} #{pull} ({author})".format(
title=d[TITLE], pull=pull, author=d[USER][LOGIN])
if __name__ == "__main__":
if len(sys.argv) < 2:
print ("Usage: {cmd} <pulls>...: Prints out short " +
"markdown description for PRs appropriate for release notes.")
sys.exit(1)
print_pulls(sys.argv[1:])
apache-2.0
willmcgugan/rich
rich/box.py
1
9014
import sys
from typing import TYPE_CHECKING, Iterable, List
if sys.version_info >= (3, 8):
from typing import Literal
else:
from typing_extensions import Literal # pragma: no cover
from ._loop import loop_last
if TYPE_CHECKING:
from rich.console import ConsoleOptions
class Box:
"""Defines characters to render boxes.
┌─┬┐ top
│ ││ head
├─┼┤ head_row
│ ││ mid
├─┼┤ row
├─┼┤ foot_row
│ ││ foot
└─┴┘ bottom
Args:
box (str): Characters making up box.
ascii (bool, optional): True if this box uses ascii characters only. Default is False.
"""
def __init__(self, box: str, *, ascii: bool = False) -> None:
self._box = box
self.ascii = ascii
line1, line2, line3, line4, line5, line6, line7, line8 = box.splitlines()
# top
self.top_left, self.top, self.top_divider, self.top_right = iter(line1)
# head
self.head_left, _, self.head_vertical, self.head_right = iter(line2)
# head_row
(
self.head_row_left,
self.head_row_horizontal,
self.head_row_cross,
self.head_row_right,
) = iter(line3)
# mid
self.mid_left, _, self.mid_vertical, self.mid_right = iter(line4)
# row
self.row_left, self.row_horizontal, self.row_cross, self.row_right = iter(line5)
# foot_row
(
self.foot_row_left,
self.foot_row_horizontal,
self.foot_row_cross,
self.foot_row_right,
) = iter(line6)
# foot
self.foot_left, _, self.foot_vertical, self.foot_right = iter(line7)
# bottom
self.bottom_left, self.bottom, self.bottom_divider, self.bottom_right = iter(
line8
)
def __repr__(self) -> str:
return "Box(...)"
def __str__(self) -> str:
return self._box
def substitute(self, options: "ConsoleOptions", safe: bool = True) -> "Box":
"""Substitute this box for another if it won't render due to platform issues.
Args:
options (ConsoleOptions): Console options used in rendering.
safe (bool, optional): Substitute this for another Box if there are known problems
displaying on the platform (currently only relevant on Windows). Default is True.
Returns:
Box: A different Box or the same Box.
"""
box = self
if options.legacy_windows and safe:
box = LEGACY_WINDOWS_SUBSTITUTIONS.get(box, box)
if options.ascii_only and not box.ascii:
box = ASCII
return box
def get_top(self, widths: Iterable[int]) -> str:
"""Get the top of a simple box.
Args:
widths (List[int]): Widths of columns.
Returns:
str: A string of box characters.
"""
parts: List[str] = []
append = parts.append
append(self.top_left)
for last, width in loop_last(widths):
append(self.top * width)
if not last:
append(self.top_divider)
append(self.top_right)
return "".join(parts)
def get_row(
self,
widths: Iterable[int],
level: Literal["head", "row", "foot", "mid"] = "row",
edge: bool = True,
) -> str:
"""Get the top of a simple box.
Args:
width (List[int]): Widths of columns.
Returns:
str: A string of box characters.
"""
if level == "head":
left = self.head_row_left
horizontal = self.head_row_horizontal
cross = self.head_row_cross
right = self.head_row_right
elif level == "row":
left = self.row_left
horizontal = self.row_horizontal
cross = self.row_cross
right = self.row_right
elif level == "mid":
left = self.mid_left
horizontal = " "
cross = self.mid_vertical
right = self.mid_right
elif level == "foot":
left = self.foot_row_left
horizontal = self.foot_row_horizontal
cross = self.foot_row_cross
right = self.foot_row_right
else:
raise ValueError("level must be 'head', 'row' or 'foot'")
parts: List[str] = []
append = parts.append
if edge:
append(left)
for last, width in loop_last(widths):
append(horizontal * width)
if not last:
append(cross)
if edge:
append(right)
return "".join(parts)
def get_bottom(self, widths: Iterable[int]) -> str:
"""Get the bottom of a simple box.
Args:
widths (List[int]): Widths of columns.
Returns:
str: A string of box characters.
"""
parts: List[str] = []
append = parts.append
append(self.bottom_left)
for last, width in loop_last(widths):
append(self.bottom * width)
if not last:
append(self.bottom_divider)
append(self.bottom_right)
return "".join(parts)
ASCII: Box = Box(
"""\
+--+
| ||
|-+|
| ||
|-+|
|-+|
| ||
+--+
""",
ascii=True,
)
ASCII2: Box = Box(
"""\
+-++
| ||
+-++
| ||
+-++
+-++
| ||
+-++
""",
ascii=True,
)
ASCII_DOUBLE_HEAD: Box = Box(
"""\
+-++
| ||
+=++
| ||
+-++
+-++
| ||
+-++
""",
ascii=True,
)
SQUARE: Box = Box(
"""\
┌─┬┐
│ ││
├─┼┤
│ ││
├─┼┤
├─┼┤
│ ││
└─┴┘
"""
)
SQUARE_DOUBLE_HEAD: Box = Box(
"""\
┌─┬┐
│ ││
╞═╪╡
│ ││
├─┼┤
├─┼┤
│ ││
└─┴┘
"""
)
MINIMAL: Box = Box(
"""\
╷
│
╶─┼╴
│
╶─┼╴
╶─┼╴
│
╵
"""
)
MINIMAL_HEAVY_HEAD: Box = Box(
"""\
╷
│
╺━┿╸
│
╶─┼╴
╶─┼╴
│
╵
"""
)
MINIMAL_DOUBLE_HEAD: Box = Box(
"""\
╷
│
═╪
│
─┼
─┼
│
╵
"""
)
SIMPLE: Box = Box(
"""\
──
──
"""
)
SIMPLE_HEAD: Box = Box(
"""\
──
"""
)
SIMPLE_HEAVY: Box = Box(
"""\
━━
━━
"""
)
HORIZONTALS: Box = Box(
"""\
──
──
──
──
──
"""
)
ROUNDED: Box = Box(
"""\
╭─┬╮
│ ││
├─┼┤
│ ││
├─┼┤
├─┼┤
│ ││
╰─┴╯
"""
)
HEAVY: Box = Box(
"""\
┏━┳┓
┃ ┃┃
┣━╋┫
┃ ┃┃
┣━╋┫
┣━╋┫
┃ ┃┃
┗━┻┛
"""
)
HEAVY_EDGE: Box = Box(
"""\
┏━┯┓
┃ │┃
┠─┼┨
┃ │┃
┠─┼┨
┠─┼┨
┃ │┃
┗━┷┛
"""
)
HEAVY_HEAD: Box = Box(
"""\
┏━┳┓
┃ ┃┃
┡━╇┩
│ ││
├─┼┤
├─┼┤
│ ││
└─┴┘
"""
)
DOUBLE: Box = Box(
"""\
╔═╦╗
║ ║║
╠═╬╣
║ ║║
╠═╬╣
╠═╬╣
║ ║║
╚═╩╝
"""
)
DOUBLE_EDGE: Box = Box(
"""\
╔═╤╗
║ │║
╟─┼╢
║ │║
╟─┼╢
╟─┼╢
║ │║
╚═╧╝
"""
)
# Map Boxes that don't render with raster fonts on to equivalent that do
LEGACY_WINDOWS_SUBSTITUTIONS = {
ROUNDED: SQUARE,
MINIMAL_HEAVY_HEAD: MINIMAL,
SIMPLE_HEAVY: SIMPLE,
HEAVY: SQUARE,
HEAVY_EDGE: SQUARE,
HEAVY_HEAD: SQUARE,
}
if __name__ == "__main__": # pragma: no cover
from rich.columns import Columns
from rich.panel import Panel
from . import box
from .console import Console
from .table import Table
from .text import Text
console = Console(record=True)
BOXES = [
"ASCII",
"ASCII2",
"ASCII_DOUBLE_HEAD",
"SQUARE",
"SQUARE_DOUBLE_HEAD",
"MINIMAL",
"MINIMAL_HEAVY_HEAD",
"MINIMAL_DOUBLE_HEAD",
"SIMPLE",
"SIMPLE_HEAD",
"SIMPLE_HEAVY",
"HORIZONTALS",
"ROUNDED",
"HEAVY",
"HEAVY_EDGE",
"HEAVY_HEAD",
"DOUBLE",
"DOUBLE_EDGE",
]
console.print(Panel("[bold green]Box Constants", style="green"), justify="center")
console.print()
columns = Columns(expand=True, padding=2)
for box_name in sorted(BOXES):
table = Table(
show_footer=True, style="dim", border_style="not dim", expand=True
)
table.add_column("Header 1", "Footer 1")
table.add_column("Header 2", "Footer 2")
table.add_row("Cell", "Cell")
table.add_row("Cell", "Cell")
table.box = getattr(box, box_name)
table.title = Text(f"box.{box_name}", style="magenta")
columns.add_renderable(table)
console.print(columns)
# console.save_html("box.html", inline_styles=True)
mit
hexxcointakeover/hexxcoin
contrib/linearize/linearize-hashes.py
214
3037
#!/usr/bin/python
#
# linearize-hashes.py: List blocks in a linear, no-fork version of the chain.
#
# Copyright (c) 2013-2014 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
from __future__ import print_function
import json
import struct
import re
import base64
import httplib
import sys
settings = {}
class BitcoinRPC:
def __init__(self, host, port, username, password):
authpair = "%s:%s" % (username, password)
self.authhdr = "Basic %s" % (base64.b64encode(authpair))
self.conn = httplib.HTTPConnection(host, port, False, 30)
def execute(self, obj):
self.conn.request('POST', '/', json.dumps(obj),
{ 'Authorization' : self.authhdr,
'Content-type' : 'application/json' })
resp = self.conn.getresponse()
if resp is None:
print("JSON-RPC: no response", file=sys.stderr)
return None
body = resp.read()
resp_obj = json.loads(body)
return resp_obj
@staticmethod
def build_request(idx, method, params):
obj = { 'version' : '1.1',
'method' : method,
'id' : idx }
if params is None:
obj['params'] = []
else:
obj['params'] = params
return obj
@staticmethod
def response_is_error(resp_obj):
return 'error' in resp_obj and resp_obj['error'] is not None
def get_block_hashes(settings, max_blocks_per_call=10000):
rpc = BitcoinRPC(settings['host'], settings['port'],
settings['rpcuser'], settings['rpcpassword'])
height = settings['min_height']
while height < settings['max_height']+1:
num_blocks = min(settings['max_height']+1-height, max_blocks_per_call)
batch = []
for x in range(num_blocks):
batch.append(rpc.build_request(x, 'getblockhash', [height + x]))
reply = rpc.execute(batch)
for x,resp_obj in enumerate(reply):
if rpc.response_is_error(resp_obj):
print('JSON-RPC: error at height', height+x, ': ', resp_obj['error'], file=sys.stderr)
exit(1)
assert(resp_obj['id'] == x) # assume replies are in-sequence
print(resp_obj['result'])
height += num_blocks
if __name__ == '__main__':
if len(sys.argv) != 2:
print("Usage: linearize-hashes.py CONFIG-FILE")
sys.exit(1)
f = open(sys.argv[1])
for line in f:
# skip comment lines
m = re.search('^\s*#', line)
if m:
continue
# parse key=value lines
m = re.search('^(\w+)\s*=\s*(\S.*)$', line)
if m is None:
continue
settings[m.group(1)] = m.group(2)
f.close()
if 'host' not in settings:
settings['host'] = '127.0.0.1'
if 'port' not in settings:
settings['port'] = 8332
if 'min_height' not in settings:
settings['min_height'] = 0
if 'max_height' not in settings:
settings['max_height'] = 313000
if 'rpcuser' not in settings or 'rpcpassword' not in settings:
print("Missing username and/or password in cfg file", file=stderr)
sys.exit(1)
settings['port'] = int(settings['port'])
settings['min_height'] = int(settings['min_height'])
settings['max_height'] = int(settings['max_height'])
get_block_hashes(settings)
mit
karteek/simplekv
simplekv/memory/redisstore.py
2
1824
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from io import BytesIO
from .. import KeyValueStore, TimeToLiveMixin, NOT_SET, FOREVER
import re
class RedisStore(TimeToLiveMixin, KeyValueStore):
"""Uses a redis-database as the backend.
:param redis: An instance of :py:class:`redis.StrictRedis`.
"""
def __init__(self, redis):
self.redis = redis
def _delete(self, key):
return self.redis.delete(key)
def keys(self, prefix=u""):
return list(map(lambda b: b.decode(), self.redis.keys(pattern=re.escape(prefix) + '*')))
def iter_keys(self, prefix=u""):
return iter(self.keys(prefix))
def _has_key(self, key):
return self.redis.exists(key)
def _get(self, key):
val = self.redis.get(key)
if val is None:
raise KeyError(key)
return val
def _get_file(self, key, file):
file.write(self._get(key))
def _open(self, key):
return BytesIO(self._get(key))
def _put(self, key, value, ttl_secs):
if ttl_secs in (NOT_SET, FOREVER):
# if we do not care about ttl, just use set
# in redis, using SET will also clear the timeout
# note that this assumes that there is no way in redis
# to set a default timeout on keys
self.redis.set(key, value)
else:
ittl = None
try:
ittl = int(ttl_secs)
except ValueError:
pass # let it blow up further down
if ittl == ttl_secs:
self.redis.setex(key, ittl, value)
else:
self.redis.psetex(key, int(ttl_secs * 1000), value)
return key
def _put_file(self, key, file, ttl_secs):
self._put(key, file.read(), ttl_secs)
return key
mit
xijunlee/leetcode
547.py
1
1294
#!/usr/bin/env python
# coding=utf-8
class Solution(object):
hash = []
def findCircleNum(self, M):
"""
:type M: List[List[int]]
:rtype: int
"""
N = len(M)
self.hash = [0 for i in range(N)]
h_count = 1
for i in range(N):
if not self.hash[i]:
for j in range(N):
if M[i][j] and self.hash[j]:
self.hash[i] = self.hash[j]
break
else:
self.hash[i] = h_count
h_count += 1
dset = []
for j in range(N):
if i!=j and M[i][j] and self.hash[i] != self.hash[j]: dset.append(j)
self.union_set(M,self.hash[i],dset)
return len(set(self.hash))
def union_set(self, M, h_value, dset):
if dset:
for i in dset:
tmp = []
self.hash[i] = h_value
for j in range(len(M)):
if i!=j and M[i][j] and self.hash[i] != self.hash[j]: tmp.append(j)
self.union_set(M,self.hash[i],tmp)
return
if __name__ == '__main__':
s = Solution()
print s.findCircleNum([[1,0,0,1],[0,1,1,0],[0,1,1,1],[1,0,1,1]])
mit
OpenCode/purchase-workflow
purchase_discount/models/stock_move.py
15
1305
# -*- encoding: utf-8 -*-
##############################################################################
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp import models, api
class StockMove(models.Model):
_inherit = "stock.move"
@api.model
def _get_invoice_line_vals(self, move, partner, inv_type):
res = super(StockMove, self)._get_invoice_line_vals(move, partner,
inv_type)
if move.purchase_line_id:
res['discount'] = move.purchase_line_id.discount
return res
#!/usr/bin/env python
#
# Copyright 2015 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
from __future__ import unicode_literals
from thrift import Thrift
from thrift.transport import TSocket
from thrift.transport import TTransport
from thrift.protocol import TBinaryProtocol
from gnuradio.ctrlport.GNURadio import ControlPort
from gnuradio.ctrlport import RPCConnection
from gnuradio import gr
import pmt
import sys
class ThriftRadioClient(object):
def __init__(self, host, port):
self.tsocket = TSocket.TSocket(host, port)
self.transport = TTransport.TBufferedTransport(self.tsocket)
self.protocol = TBinaryProtocol.TBinaryProtocol(self.transport)
self.radio = ControlPort.Client(self.protocol)
self.transport.open()
self.host = host
self.port = port
def __del__(self):
try:
self.transport.close()
self.radio.shutdown()
except:
pass
def getRadio(self):
return self.radio
"""
RPC Client interface for the Apache Thrift middle-ware RPC transport.
Args:
port: port number of the connection
host: hostname of the connection
"""
class RPCConnectionThrift(RPCConnection.RPCConnection):
class Knob(object):
def __init__(self, key, value=None, ktype=0):
(self.key, self.value, self.ktype) = (key, value, ktype)
def __repr__(self):
return "({0} = {1})".format(self.key, self.value)
def __init__(self, host=None, port=None):
from gnuradio.ctrlport.GNURadio import ttypes
self.BaseTypes = ttypes.BaseTypes
self.KnobBase = ttypes.KnobBase
# If not set by the user, get the port number from the thrift
# config file, if one is set. Defaults to 9090 otherwise.
if port is None:
p = gr.prefs()
thrift_config_file = p.get_string("ControlPort", "config", "")
if(len(thrift_config_file) > 0):
p.add_config_file(thrift_config_file)
port = p.get_long("thrift", "port", 9090)
else:
port = 9090
else:
port = int(port)
super(RPCConnectionThrift, self).__init__(method='thrift', port=port, host=host)
self.newConnection(host, port)
self.unpack_dict = {
self.BaseTypes.BOOL: lambda k,b: self.Knob(k, b.value.a_bool, self.BaseTypes.BOOL),
self.BaseTypes.BYTE: lambda k,b: self.Knob(k, b.value.a_byte, self.BaseTypes.BYTE),
self.BaseTypes.SHORT: lambda k,b: self.Knob(k, b.value.a_short, self.BaseTypes.SHORT),
self.BaseTypes.INT: lambda k,b: self.Knob(k, b.value.a_int, self.BaseTypes.INT),
self.BaseTypes.LONG: lambda k,b: self.Knob(k, b.value.a_long, self.BaseTypes.LONG),
self.BaseTypes.DOUBLE: lambda k,b: self.Knob(k, b.value.a_double, self.BaseTypes.DOUBLE),
self.BaseTypes.STRING: lambda k,b: self.Knob(k, b.value.a_string, self.BaseTypes.STRING),
self.BaseTypes.COMPLEX: lambda k,b: self.Knob(k, b.value.a_complex, self.BaseTypes.COMPLEX),
self.BaseTypes.F32VECTOR: lambda k,b: self.Knob(k, b.value.a_f32vector, self.BaseTypes.F32VECTOR),
self.BaseTypes.F64VECTOR: lambda k,b: self.Knob(k, b.value.a_f64vector, self.BaseTypes.F64VECTOR),
self.BaseTypes.S64VECTOR: lambda k,b: self.Knob(k, b.value.a_s64vector, self.BaseTypes.S64VECTOR),
self.BaseTypes.S32VECTOR: lambda k,b: self.Knob(k, b.value.a_s32vector, self.BaseTypes.S32VECTOR),
self.BaseTypes.S16VECTOR: lambda k,b: self.Knob(k, b.value.a_s16vector, self.BaseTypes.S16VECTOR),
self.BaseTypes.S8VECTOR: lambda k,b: self.Knob(k, b.value.a_s8vector, self.BaseTypes.S8VECTOR),
self.BaseTypes.C32VECTOR: lambda k,b: self.Knob(k, b.value.a_c32vector, self.BaseTypes.C32VECTOR),
}
self.pack_dict = {
self.BaseTypes.BOOL: lambda k: ttypes.Knob(type=k.ktype, value=ttypes.KnobBase(a_bool = k.value)),
self.BaseTypes.BYTE: lambda k: ttypes.Knob(type=k.ktype, value=ttypes.KnobBase(a_byte = k.value)),
self.BaseTypes.SHORT: lambda k: ttypes.Knob(type=k.ktype, value=ttypes.KnobBase(a_short = k.value)),
self.BaseTypes.INT: lambda k: ttypes.Knob(type=k.ktype, value=ttypes.KnobBase(a_int = k.value)),
self.BaseTypes.LONG: lambda k: ttypes.Knob(type=k.ktype, value=ttypes.KnobBase(a_long = k.value)),
self.BaseTypes.DOUBLE: lambda k: ttypes.Knob(type=k.ktype, value=ttypes.KnobBase(a_double = k.value)),
self.BaseTypes.STRING: lambda k: ttypes.Knob(type=k.ktype, value=ttypes.KnobBase(a_string = k.value)),
self.BaseTypes.COMPLEX: lambda k: ttypes.Knob(type=k.ktype, value=ttypes.KnobBase(a_complex = k.value)),
self.BaseTypes.F32VECTOR: lambda k: ttypes.Knob(type=k.ktype, value=ttypes.KnobBase(a_f32vector = k.value)),
self.BaseTypes.F64VECTOR: lambda k: ttypes.Knob(type=k.ktype, value=ttypes.KnobBase(a_f64vector = k.value)),
self.BaseTypes.S64VECTOR: lambda k: ttypes.Knob(type=k.ktype, value=ttypes.KnobBase(a_s64vector = k.value)),
self.BaseTypes.S32VECTOR: lambda k: ttypes.Knob(type=k.ktype, value=ttypes.KnobBase(a_s32vector = k.value)),
self.BaseTypes.S16VECTOR: lambda k: ttypes.Knob(type=k.ktype, value=ttypes.KnobBase(a_s16vector = k.value)),
self.BaseTypes.S8VECTOR: lambda k: ttypes.Knob(type=k.ktype, value=ttypes.KnobBase(a_s8vector = k.value)),
self.BaseTypes.C32VECTOR: lambda k: ttypes.Knob(type=k.ktype, value=ttypes.KnobBase(a_c32vector = k.value)),
}
def __str__(self):
return "Apache Thrift connection to {0}:{1}".format(
self.thriftclient.host,
self.thriftclient.port)
def unpackKnob(self, key, knob):
f = self.unpack_dict.get(knob.type, None)
if(f):
return f(key, knob)
else:
sys.stderr.write("unpackKnobs: Incorrect Knob type: {0}\n".format(knob.type))
raise exceptions.ValueError
def packKnob(self, knob):
f = self.pack_dict.get(knob.ktype, None)
if(f):
return f(knob)
else:
sys.stderr.write("packKnobs: Incorrect Knob type: {0}\n".format(knob.type))
raise exceptions.ValueError
def newConnection(self, host=None, port=None):
self.thriftclient = ThriftRadioClient(host, int(port))
def properties(self, *args):
knobprops = self.thriftclient.radio.properties(*args)
for key, knobprop in list(knobprops.items()):
#print("key:", key, "value:", knobprop, "type:", knobprop.type)
knobprops[key].min = self.unpackKnob(key, knobprop.min)
knobprops[key].max = self.unpackKnob(key, knobprop.max)
knobprops[key].defaultvalue = self.unpackKnob(key, knobprop.defaultvalue)
return knobprops
def getKnobs(self, *args):
result = {}
for key, knob in list(self.thriftclient.radio.getKnobs(*args).items()):
#print("key:", key, "value:", knob, "type:", knob.type)
result[key] = self.unpackKnob(key, knob)
# If complex, convert to Python complex
# FIXME: better list iterator way to handle this?
if(knob.type == self.BaseTypes.C32VECTOR):
for i in range(len(result[key].value)):
result[key].value[i] = complex(result[key].value[i].re,
result[key].value[i].im)
return result
def getKnobsRaw(self, *args):
result = {}
for key, knob in list(self.thriftclient.radio.getKnobs(*args).items()):
#print("key:", key, "value:", knob, "type:", knob.type)
result[key] = knob
return result
def getRe(self,*args):
result = {}
for key, knob in list(self.thriftclient.radio.getRe(*args).items()):
result[key] = self.unpackKnob(key, knob)
return result
def setKnobs(self, *args):
if(type(*args) == dict):
a = dict(*args)
result = {}
for key, knob in list(a.items()):
result[key] = self.packKnob(knob)
self.thriftclient.radio.setKnobs(result)
elif(type(*args) == list or type(*args) == tuple):
a = list(*args)
result = {}
for k in a:
result[k.key] = self.packKnob(k)
self.thriftclient.radio.setKnobs(result)
else:
sys.stderr.write("setKnobs: Invalid type; must be dict, list, or tuple\n")
def shutdown(self):
self.thriftclient.radio.shutdown()
def postMessage(self, blk_alias, port, msg):
'''
blk_alias: the alias of the block we are posting the message
to; must have an open message port named 'port'.
Provide as a string.
port: The name of the message port we are sending the message to.
Provide as a string.
msg: The actual message. Provide this as a PMT of the form
right for the message port.
The alias and port names are converted to PMT symbols and
serialized. The msg is already a PMT and so just serialized.
'''
self.thriftclient.radio.postMessage(pmt.serialize_str(pmt.intern(blk_alias)),
pmt.serialize_str(pmt.intern(port)),
pmt.serialize_str(msg))
def printProperties(self, props):
info = ""
info += "Item:\t\t{0}\n".format(props.description)
info += "units:\t\t{0}\n".format(props.units)
info += "min:\t\t{0}\n".format(props.min.value)
info += "max:\t\t{0}\n".format(props.max.value)
info += "default:\t\t{0}\n".format(props.defaultvalue.value)
info += "Type Code:\t0x{0:x}\n".format(props.type)
info += "Disp Code:\t0x{0:x}\n".format(props.display)
return info
gpl-3.0
zfil/ansible
test/units/vars/test_variable_manager.py
70
5534
# (c) 2012-2014, Michael DeHaan <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from ansible.compat.tests import unittest
from ansible.compat.tests.mock import patch, MagicMock
from ansible.vars import VariableManager
from units.mock.loader import DictDataLoader
class TestVariableManager(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def test_basic_manager(self):
fake_loader = DictDataLoader({})
v = VariableManager()
vars = v.get_vars(loader=fake_loader, use_cache=False)
if 'omit' in vars:
del vars['omit']
if 'vars' in vars:
del vars['vars']
if 'ansible_version' in vars:
del vars['ansible_version']
self.assertEqual(vars, dict(playbook_dir='.'))
self.assertEqual(
v._merge_dicts(
dict(a=1),
dict(b=2)
), dict(a=1, b=2)
)
self.assertEqual(
v._merge_dicts(
dict(a=1, c=dict(foo='bar')),
dict(b=2, c=dict(baz='bam'))
), dict(a=1, b=2, c=dict(foo='bar', baz='bam'))
)
def test_variable_manager_extra_vars(self):
fake_loader = DictDataLoader({})
extra_vars = dict(a=1, b=2, c=3)
v = VariableManager()
v.extra_vars = extra_vars
vars = v.get_vars(loader=fake_loader, use_cache=False)
for (key, val) in extra_vars.iteritems():
self.assertEqual(vars.get(key), val)
self.assertIsNot(v.extra_vars, extra_vars)
def test_variable_manager_host_vars_file(self):
fake_loader = DictDataLoader({
"host_vars/hostname1.yml": """
foo: bar
"""
})
v = VariableManager()
v.add_host_vars_file("host_vars/hostname1.yml", loader=fake_loader)
self.assertIn("hostname1", v._host_vars_files)
self.assertEqual(v._host_vars_files["hostname1"], dict(foo="bar"))
mock_host = MagicMock()
mock_host.get_name.return_value = "hostname1"
mock_host.get_vars.return_value = dict()
mock_host.get_groups.return_value = ()
self.assertEqual(v.get_vars(loader=fake_loader, host=mock_host, use_cache=False).get("foo"), "bar")
def test_variable_manager_group_vars_file(self):
fake_loader = DictDataLoader({
"group_vars/all.yml": """
foo: bar
""",
"group_vars/somegroup.yml": """
bam: baz
"""
})
v = VariableManager()
v.add_group_vars_file("group_vars/all.yml", loader=fake_loader)
v.add_group_vars_file("group_vars/somegroup.yml", loader=fake_loader)
self.assertIn("somegroup", v._group_vars_files)
self.assertEqual(v._group_vars_files["all"], dict(foo="bar"))
self.assertEqual(v._group_vars_files["somegroup"], dict(bam="baz"))
mock_group = MagicMock()
mock_group.name = "somegroup"
mock_group.get_ancestors.return_value = ()
mock_group.get_vars.return_value = dict()
mock_host = MagicMock()
mock_host.get_name.return_value = "hostname1"
mock_host.get_vars.return_value = dict()
mock_host.get_groups.return_value = (mock_group,)
vars = v.get_vars(loader=fake_loader, host=mock_host, use_cache=False)
self.assertEqual(vars.get("foo"), "bar")
self.assertEqual(vars.get("bam"), "baz")
def test_variable_manager_play_vars(self):
fake_loader = DictDataLoader({})
mock_play = MagicMock()
mock_play.get_vars.return_value = dict(foo="bar")
mock_play.get_roles.return_value = []
mock_play.get_vars_files.return_value = []
v = VariableManager()
self.assertEqual(v.get_vars(loader=fake_loader, play=mock_play, use_cache=False).get("foo"), "bar")
def test_variable_manager_play_vars_files(self):
fake_loader = DictDataLoader({
"/path/to/somefile.yml": """
foo: bar
"""
})
mock_play = MagicMock()
mock_play.get_vars.return_value = dict()
mock_play.get_roles.return_value = []
mock_play.get_vars_files.return_value = ['/path/to/somefile.yml']
v = VariableManager()
self.assertEqual(v.get_vars(loader=fake_loader, play=mock_play, use_cache=False).get("foo"), "bar")
def test_variable_manager_task_vars(self):
fake_loader = DictDataLoader({})
mock_task = MagicMock()
mock_task._role = None
mock_task.get_vars.return_value = dict(foo="bar")
v = VariableManager()
self.assertEqual(v.get_vars(loader=fake_loader, task=mock_task, use_cache=False).get("foo"), "bar")
gpl-3.0
zahodi/ansible
lib/ansible/plugins/test/core.py
46
4440
# (c) 2012, Jeroen Hoekx <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import re
import operator as py_operator
from distutils.version import LooseVersion, StrictVersion
from ansible import errors
def failed(*a, **kw):
''' Test if task result yields failed '''
item = a[0]
if type(item) != dict:
raise errors.AnsibleFilterError("|failed expects a dictionary")
rc = item.get('rc',0)
failed = item.get('failed',False)
if rc != 0 or failed:
return True
else:
return False
def success(*a, **kw):
''' Test if task result yields success '''
return not failed(*a, **kw)
def changed(*a, **kw):
''' Test if task result yields changed '''
item = a[0]
if type(item) != dict:
raise errors.AnsibleFilterError("|changed expects a dictionary")
if not 'changed' in item:
changed = False
if ('results' in item # some modules return a 'results' key
and type(item['results']) == list
and type(item['results'][0]) == dict):
for result in item['results']:
changed = changed or result.get('changed', False)
else:
changed = item.get('changed', False)
return changed
def skipped(*a, **kw):
''' Test if task result yields skipped '''
item = a[0]
if type(item) != dict:
raise errors.AnsibleFilterError("|skipped expects a dictionary")
skipped = item.get('skipped', False)
return skipped
def regex(value='', pattern='', ignorecase=False, multiline=False, match_type='search'):
''' Expose `re` as a boolean filter using the `search` method by default.
This is likely only useful for `search` and `match` which already
have their own filters.
'''
flags = 0
if ignorecase:
flags |= re.I
if multiline:
flags |= re.M
_re = re.compile(pattern, flags=flags)
_bool = __builtins__.get('bool')
return _bool(getattr(_re, match_type, 'search')(value))
def match(value, pattern='', ignorecase=False, multiline=False):
''' Perform a `re.match` returning a boolean '''
return regex(value, pattern, ignorecase, multiline, 'match')
def search(value, pattern='', ignorecase=False, multiline=False):
''' Perform a `re.search` returning a boolean '''
return regex(value, pattern, ignorecase, multiline, 'search')
def version_compare(value, version, operator='eq', strict=False):
''' Perform a version comparison on a value '''
op_map = {
'==': 'eq', '=': 'eq', 'eq': 'eq',
'<': 'lt', 'lt': 'lt',
'<=': 'le', 'le': 'le',
'>': 'gt', 'gt': 'gt',
'>=': 'ge', 'ge': 'ge',
'!=': 'ne', '<>': 'ne', 'ne': 'ne'
}
if strict:
Version = StrictVersion
else:
Version = LooseVersion
if operator in op_map:
operator = op_map[operator]
else:
raise errors.AnsibleFilterError('Invalid operator type')
try:
method = getattr(py_operator, operator)
return method(Version(str(value)), Version(str(version)))
except Exception as e:
raise errors.AnsibleFilterError('Version comparison: %s' % e)
class TestModule(object):
''' Ansible core jinja2 tests '''
def tests(self):
return {
# failure testing
'failed' : failed,
'succeeded' : success,
# changed testing
'changed' : changed,
# skip testing
'skipped' : skipped,
# regex
'match': match,
'search': search,
'regex': regex,
# version comparison
'version_compare': version_compare,
}
gpl-3.0
senthil10/scilifelab
tests/utils/test_slurm.py
4
1824
"""Test the utils/slurm.py functionality
"""
import subprocess
import unittest
from mock import Mock
import scilifelab.utils.slurm as sq
from scilifelab.pm.ext.ext_distributed import convert_to_drmaa_time
class TestSlurm(unittest.TestCase):
def test__get_slurm_jobid(self):
"""Extract the jobid for a slurm job name
"""
# Mock the system calls
subprocess.check_output = Mock(return_value='')
# Assert that getting non-existing jobs return an empty job list
self.assertListEqual([],sq.get_slurm_jobid("jobname"),
"Querying for jobid of non-existing job should return an empty list")
# Assert that a returned job id is parsed correctly
for jobids in [[123456789],[123456789,987654321]]:
subprocess.check_output = Mock(return_value="\n".join([str(jid) for jid in jobids]))
self.assertListEqual(jobids,sq.get_slurm_jobid("jobname"),
"Querying for jobid of existing job did not return the correct value")
class TestDrmaa(unittest.TestCase):
def test_drmaa_time_string(self):
"""Test parsing of time string formatted as d-hh:mm:ss and translate days to hours"""
t_new = convert_to_drmaa_time("4-10:00:00")
self.assertEqual(t_new, "106:00:00")
t_new = convert_to_drmaa_time("10:00:00")
self.assertEqual(t_new, "10:00:00")
t_new = convert_to_drmaa_time("3:00:00")
self.assertEqual(t_new, "03:00:00")
t_new = convert_to_drmaa_time("10:00")
self.assertEqual(t_new, "00:10:00")
t_new = convert_to_drmaa_time("0:00")
self.assertEqual(t_new, "00:00:00")
t_new = convert_to_drmaa_time("144:00:00")
self.assertEqual(t_new, "144:00:00")
mit
thekingofkings/focusread
libs/dns/zone.py
9
40026
# Copyright (C) 2003-2007, 2009-2011 Nominum, Inc.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose with or without fee is hereby granted,
# provided that the above copyright notice and this permission notice
# appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
"""DNS Zones."""
from __future__ import generators
import sys
import re
import os
from io import BytesIO
import dns.exception
import dns.name
import dns.node
import dns.rdataclass
import dns.rdatatype
import dns.rdata
import dns.rrset
import dns.tokenizer
import dns.ttl
import dns.grange
from ._compat import string_types, text_type
_py3 = sys.version_info > (3,)
class BadZone(dns.exception.DNSException):
"""The DNS zone is malformed."""
class NoSOA(BadZone):
"""The DNS zone has no SOA RR at its origin."""
class NoNS(BadZone):
"""The DNS zone has no NS RRset at its origin."""
class UnknownOrigin(BadZone):
"""The DNS zone's origin is unknown."""
class Zone(object):
"""A DNS zone.
A Zone is a mapping from names to nodes. The zone object may be
treated like a Python dictionary, e.g. zone[name] will retrieve
the node associated with that name. The I{name} may be a
dns.name.Name object, or it may be a string. In the either case,
if the name is relative it is treated as relative to the origin of
the zone.
@ivar rdclass: The zone's rdata class; the default is class IN.
@type rdclass: int
@ivar origin: The origin of the zone.
@type origin: dns.name.Name object
@ivar nodes: A dictionary mapping the names of nodes in the zone to the
nodes themselves.
@type nodes: dict
@ivar relativize: should names in the zone be relativized?
@type relativize: bool
@cvar node_factory: the factory used to create a new node
@type node_factory: class or callable
"""
node_factory = dns.node.Node
__slots__ = ['rdclass', 'origin', 'nodes', 'relativize']
def __init__(self, origin, rdclass=dns.rdataclass.IN, relativize=True):
"""Initialize a zone object.
@param origin: The origin of the zone.
@type origin: dns.name.Name object
@param rdclass: The zone's rdata class; the default is class IN.
@type rdclass: int"""
if origin is not None:
if isinstance(origin, string_types):
origin = dns.name.from_text(origin)
elif not isinstance(origin, dns.name.Name):
raise ValueError("origin parameter must be convertible to a "
"DNS name")
if not origin.is_absolute():
raise ValueError("origin parameter must be an absolute name")
self.origin = origin
self.rdclass = rdclass
self.nodes = {}
self.relativize = relativize
def __eq__(self, other):
"""Two zones are equal if they have the same origin, class, and
nodes.
@rtype: bool
"""
if not isinstance(other, Zone):
return False
if self.rdclass != other.rdclass or \
self.origin != other.origin or \
self.nodes != other.nodes:
return False
return True
def __ne__(self, other):
"""Are two zones not equal?
@rtype: bool
"""
return not self.__eq__(other)
def _validate_name(self, name):
if isinstance(name, string_types):
name = dns.name.from_text(name, None)
elif not isinstance(name, dns.name.Name):
raise KeyError("name parameter must be convertible to a DNS name")
if name.is_absolute():
if not name.is_subdomain(self.origin):
raise KeyError(
"name parameter must be a subdomain of the zone origin")
if self.relativize:
name = name.relativize(self.origin)
return name
def __getitem__(self, key):
key = self._validate_name(key)
return self.nodes[key]
def __setitem__(self, key, value):
key = self._validate_name(key)
self.nodes[key] = value
def __delitem__(self, key):
key = self._validate_name(key)
del self.nodes[key]
def __iter__(self):
return self.nodes.__iter__()
def iterkeys(self):
if _py3:
return self.nodes.keys()
else:
return self.nodes.iterkeys() # pylint: disable=dict-iter-method
def keys(self):
return self.nodes.keys()
def itervalues(self):
if _py3:
return self.nodes.values()
else:
return self.nodes.itervalues() # pylint: disable=dict-iter-method
def values(self):
return self.nodes.values()
def items(self):
return self.nodes.items()
iteritems = items
def get(self, key):
key = self._validate_name(key)
return self.nodes.get(key)
def __contains__(self, other):
return other in self.nodes
def find_node(self, name, create=False):
"""Find a node in the zone, possibly creating it.
@param name: the name of the node to find
@type name: dns.name.Name object or string
@param create: should the node be created if it doesn't exist?
@type create: bool
@raises KeyError: the name is not known and create was not specified.
@rtype: dns.node.Node object
"""
name = self._validate_name(name)
node = self.nodes.get(name)
if node is None:
if not create:
raise KeyError
node = self.node_factory()
self.nodes[name] = node
return node
def get_node(self, name, create=False):
"""Get a node in the zone, possibly creating it.
This method is like L{find_node}, except it returns None instead
of raising an exception if the node does not exist and creation
has not been requested.
@param name: the name of the node to find
@type name: dns.name.Name object or string
@param create: should the node be created if it doesn't exist?
@type create: bool
@rtype: dns.node.Node object or None
"""
try:
node = self.find_node(name, create)
except KeyError:
node = None
return node
def delete_node(self, name):
"""Delete the specified node if it exists.
It is not an error if the node does not exist.
"""
name = self._validate_name(name)
if name in self.nodes:
del self.nodes[name]
def find_rdataset(self, name, rdtype, covers=dns.rdatatype.NONE,
create=False):
"""Look for rdata with the specified name and type in the zone,
and return an rdataset encapsulating it.
The I{name}, I{rdtype}, and I{covers} parameters may be
strings, in which case they will be converted to their proper
type.
The rdataset returned is not a copy; changes to it will change
the zone.
KeyError is raised if the name or type are not found.
Use L{get_rdataset} if you want to have None returned instead.
@param name: the owner name to look for
@type name: DNS.name.Name object or string
@param rdtype: the rdata type desired
@type rdtype: int or string
@param covers: the covered type (defaults to None)
@type covers: int or string
@param create: should the node and rdataset be created if they do not
exist?
@type create: bool
@raises KeyError: the node or rdata could not be found
@rtype: dns.rrset.RRset object
"""
name = self._validate_name(name)
if isinstance(rdtype, string_types):
rdtype = dns.rdatatype.from_text(rdtype)
if isinstance(covers, string_types):
covers = dns.rdatatype.from_text(covers)
node = self.find_node(name, create)
return node.find_rdataset(self.rdclass, rdtype, covers, create)
def get_rdataset(self, name, rdtype, covers=dns.rdatatype.NONE,
create=False):
"""Look for rdata with the specified name and type in the zone,
and return an rdataset encapsulating it.
The I{name}, I{rdtype}, and I{covers} parameters may be
strings, in which case they will be converted to their proper
type.
The rdataset returned is not a copy; changes to it will change
the zone.
None is returned if the name or type are not found.
Use L{find_rdataset} if you want to have KeyError raised instead.
@param name: the owner name to look for
@type name: DNS.name.Name object or string
@param rdtype: the rdata type desired
@type rdtype: int or string
@param covers: the covered type (defaults to None)
@type covers: int or string
@param create: should the node and rdataset be created if they do not
exist?
@type create: bool
@rtype: dns.rrset.RRset object
"""
try:
rdataset = self.find_rdataset(name, rdtype, covers, create)
except KeyError:
rdataset = None
return rdataset
def delete_rdataset(self, name, rdtype, covers=dns.rdatatype.NONE):
"""Delete the rdataset matching I{rdtype} and I{covers}, if it
exists at the node specified by I{name}.
The I{name}, I{rdtype}, and I{covers} parameters may be
strings, in which case they will be converted to their proper
type.
It is not an error if the node does not exist, or if there is no
matching rdataset at the node.
If the node has no rdatasets after the deletion, it will itself
be deleted.
@param name: the owner name to look for
@type name: DNS.name.Name object or string
@param rdtype: the rdata type desired
@type rdtype: int or string
@param covers: the covered type (defaults to None)
@type covers: int or string
"""
name = self._validate_name(name)
if isinstance(rdtype, string_types):
rdtype = dns.rdatatype.from_text(rdtype)
if isinstance(covers, string_types):
covers = dns.rdatatype.from_text(covers)
node = self.get_node(name)
if node is not None:
node.delete_rdataset(self.rdclass, rdtype, covers)
if len(node) == 0:
self.delete_node(name)
def replace_rdataset(self, name, replacement):
"""Replace an rdataset at name.
It is not an error if there is no rdataset matching I{replacement}.
Ownership of the I{replacement} object is transferred to the zone;
in other words, this method does not store a copy of I{replacement}
at the node, it stores I{replacement} itself.
If the I{name} node does not exist, it is created.
@param name: the owner name
@type name: DNS.name.Name object or string
@param replacement: the replacement rdataset
@type replacement: dns.rdataset.Rdataset
"""
if replacement.rdclass != self.rdclass:
raise ValueError('replacement.rdclass != zone.rdclass')
node = self.find_node(name, True)
node.replace_rdataset(replacement)
def find_rrset(self, name, rdtype, covers=dns.rdatatype.NONE):
"""Look for rdata with the specified name and type in the zone,
and return an RRset encapsulating it.
The I{name}, I{rdtype}, and I{covers} parameters may be
strings, in which case they will be converted to their proper
type.
This method is less efficient than the similar
L{find_rdataset} because it creates an RRset instead of
returning the matching rdataset. It may be more convenient
for some uses since it returns an object which binds the owner
name to the rdata.
This method may not be used to create new nodes or rdatasets;
use L{find_rdataset} instead.
KeyError is raised if the name or type are not found.
Use L{get_rrset} if you want to have None returned instead.
@param name: the owner name to look for
@type name: DNS.name.Name object or string
@param rdtype: the rdata type desired
@type rdtype: int or string
@param covers: the covered type (defaults to None)
@type covers: int or string
@raises KeyError: the node or rdata could not be found
@rtype: dns.rrset.RRset object
"""
name = self._validate_name(name)
if isinstance(rdtype, string_types):
rdtype = dns.rdatatype.from_text(rdtype)
if isinstance(covers, string_types):
covers = dns.rdatatype.from_text(covers)
rdataset = self.nodes[name].find_rdataset(self.rdclass, rdtype, covers)
rrset = dns.rrset.RRset(name, self.rdclass, rdtype, covers)
rrset.update(rdataset)
return rrset
def get_rrset(self, name, rdtype, covers=dns.rdatatype.NONE):
"""Look for rdata with the specified name and type in the zone,
and return an RRset encapsulating it.
The I{name}, I{rdtype}, and I{covers} parameters may be
strings, in which case they will be converted to their proper
type.
This method is less efficient than the similar L{get_rdataset}
because it creates an RRset instead of returning the matching
rdataset. It may be more convenient for some uses since it
returns an object which binds the owner name to the rdata.
This method may not be used to create new nodes or rdatasets;
use L{find_rdataset} instead.
None is returned if the name or type are not found.
Use L{find_rrset} if you want to have KeyError raised instead.
@param name: the owner name to look for
@type name: DNS.name.Name object or string
@param rdtype: the rdata type desired
@type rdtype: int or string
@param covers: the covered type (defaults to None)
@type covers: int or string
@rtype: dns.rrset.RRset object
"""
try:
rrset = self.find_rrset(name, rdtype, covers)
except KeyError:
rrset = None
return rrset
def iterate_rdatasets(self, rdtype=dns.rdatatype.ANY,
covers=dns.rdatatype.NONE):
"""Return a generator which yields (name, rdataset) tuples for
all rdatasets in the zone which have the specified I{rdtype}
and I{covers}. If I{rdtype} is dns.rdatatype.ANY, the default,
then all rdatasets will be matched.
@param rdtype: int or string
@type rdtype: int or string
@param covers: the covered type (defaults to None)
@type covers: int or string
"""
if isinstance(rdtype, string_types):
rdtype = dns.rdatatype.from_text(rdtype)
if isinstance(covers, string_types):
covers = dns.rdatatype.from_text(covers)
for (name, node) in self.iteritems():
for rds in node:
if rdtype == dns.rdatatype.ANY or \
(rds.rdtype == rdtype and rds.covers == covers):
yield (name, rds)
def iterate_rdatas(self, rdtype=dns.rdatatype.ANY,
covers=dns.rdatatype.NONE):
"""Return a generator which yields (name, ttl, rdata) tuples for
all rdatas in the zone which have the specified I{rdtype}
and I{covers}. If I{rdtype} is dns.rdatatype.ANY, the default,
then all rdatas will be matched.
@param rdtype: int or string
@type rdtype: int or string
@param covers: the covered type (defaults to None)
@type covers: int or string
"""
if isinstance(rdtype, string_types):
rdtype = dns.rdatatype.from_text(rdtype)
if isinstance(covers, string_types):
covers = dns.rdatatype.from_text(covers)
for (name, node) in self.iteritems():
for rds in node:
if rdtype == dns.rdatatype.ANY or \
(rds.rdtype == rdtype and rds.covers == covers):
for rdata in rds:
yield (name, rds.ttl, rdata)
def to_file(self, f, sorted=True, relativize=True, nl=None):
"""Write a zone to a file.
@param f: file or string. If I{f} is a string, it is treated
as the name of a file to open.
@param sorted: if True, the file will be written with the
names sorted in DNSSEC order from least to greatest. Otherwise
the names will be written in whatever order they happen to have
in the zone's dictionary.
@param relativize: if True, domain names in the output will be
relativized to the zone's origin (if possible).
@type relativize: bool
@param nl: The end of line string. If not specified, the
output will use the platform's native end-of-line marker (i.e.
LF on POSIX, CRLF on Windows, CR on Macintosh).
@type nl: string or None
"""
if isinstance(f, string_types):
f = open(f, 'wb')
want_close = True
else:
want_close = False
# must be in this way, f.encoding may contain None, or even attribute
# may not be there
file_enc = getattr(f, 'encoding', None)
if file_enc is None:
file_enc = 'utf-8'
if nl is None:
nl_b = os.linesep.encode(file_enc) # binary mode, '\n' is not enough
nl = u'\n'
elif isinstance(nl, string_types):
nl_b = nl.encode(file_enc)
else:
nl_b = nl
nl = nl.decode()
try:
if sorted:
names = list(self.keys())
names.sort()
else:
names = self.iterkeys()
for n in names:
l = self[n].to_text(n, origin=self.origin,
relativize=relativize)
if isinstance(l, text_type):
l_b = l.encode(file_enc)
else:
l_b = l
l = l.decode()
try:
f.write(l_b)
f.write(nl_b)
except TypeError: # textual mode
f.write(l)
f.write(nl)
finally:
if want_close:
f.close()
def to_text(self, sorted=True, relativize=True, nl=None):
"""Return a zone's text as though it were written to a file.
@param sorted: if True, the file will be written with the
names sorted in DNSSEC order from least to greatest. Otherwise
the names will be written in whatever order they happen to have
in the zone's dictionary.
@param relativize: if True, domain names in the output will be
relativized to the zone's origin (if possible).
@type relativize: bool
@param nl: The end of line string. If not specified, the
output will use the platform's native end-of-line marker (i.e.
LF on POSIX, CRLF on Windows, CR on Macintosh).
@type nl: string or None
"""
temp_buffer = BytesIO()
self.to_file(temp_buffer, sorted, relativize, nl)
return_value = temp_buffer.getvalue()
temp_buffer.close()
return return_value
def check_origin(self):
"""Do some simple checking of the zone's origin.
@raises dns.zone.NoSOA: there is no SOA RR
@raises dns.zone.NoNS: there is no NS RRset
@raises KeyError: there is no origin node
"""
if self.relativize:
name = dns.name.empty
else:
name = self.origin
if self.get_rdataset(name, dns.rdatatype.SOA) is None:
raise NoSOA
if self.get_rdataset(name, dns.rdatatype.NS) is None:
raise NoNS
class _MasterReader(object):
"""Read a DNS master file
@ivar tok: The tokenizer
@type tok: dns.tokenizer.Tokenizer object
@ivar ttl: The default TTL
@type ttl: int
@ivar last_name: The last name read
@type last_name: dns.name.Name object
@ivar current_origin: The current origin
@type current_origin: dns.name.Name object
@ivar relativize: should names in the zone be relativized?
@type relativize: bool
@ivar zone: the zone
@type zone: dns.zone.Zone object
@ivar saved_state: saved reader state (used when processing $INCLUDE)
@type saved_state: list of (tokenizer, current_origin, last_name, file)
tuples.
@ivar current_file: the file object of the $INCLUDed file being parsed
(None if no $INCLUDE is active).
@ivar allow_include: is $INCLUDE allowed?
@type allow_include: bool
@ivar check_origin: should sanity checks of the origin node be done?
The default is True.
@type check_origin: bool
"""
def __init__(self, tok, origin, rdclass, relativize, zone_factory=Zone,
allow_include=False, check_origin=True):
if isinstance(origin, string_types):
origin = dns.name.from_text(origin)
self.tok = tok
self.current_origin = origin
self.relativize = relativize
self.ttl = 0
self.last_name = self.current_origin
self.zone = zone_factory(origin, rdclass, relativize=relativize)
self.saved_state = []
self.current_file = None
self.allow_include = allow_include
self.check_origin = check_origin
def _eat_line(self):
while 1:
token = self.tok.get()
if token.is_eol_or_eof():
break
def _rr_line(self):
"""Process one line from a DNS master file."""
# Name
if self.current_origin is None:
raise UnknownOrigin
token = self.tok.get(want_leading=True)
if not token.is_whitespace():
self.last_name = dns.name.from_text(
token.value, self.current_origin)
else:
token = self.tok.get()
if token.is_eol_or_eof():
# treat leading WS followed by EOL/EOF as if they were EOL/EOF.
return
self.tok.unget(token)
name = self.last_name
if not name.is_subdomain(self.zone.origin):
self._eat_line()
return
if self.relativize:
name = name.relativize(self.zone.origin)
token = self.tok.get()
if not token.is_identifier():
raise dns.exception.SyntaxError
# TTL
try:
ttl = dns.ttl.from_text(token.value)
token = self.tok.get()
if not token.is_identifier():
raise dns.exception.SyntaxError
except dns.ttl.BadTTL:
ttl = self.ttl
# Class
try:
rdclass = dns.rdataclass.from_text(token.value)
token = self.tok.get()
if not token.is_identifier():
raise dns.exception.SyntaxError
except dns.exception.SyntaxError:
raise dns.exception.SyntaxError
except Exception:
rdclass = self.zone.rdclass
if rdclass != self.zone.rdclass:
raise dns.exception.SyntaxError("RR class is not zone's class")
# Type
try:
rdtype = dns.rdatatype.from_text(token.value)
except:
raise dns.exception.SyntaxError(
"unknown rdatatype '%s'" % token.value)
n = self.zone.nodes.get(name)
if n is None:
n = self.zone.node_factory()
self.zone.nodes[name] = n
try:
rd = dns.rdata.from_text(rdclass, rdtype, self.tok,
self.current_origin, False)
except dns.exception.SyntaxError:
# Catch and reraise.
(ty, va) = sys.exc_info()[:2]
raise va
except:
# All exceptions that occur in the processing of rdata
# are treated as syntax errors. This is not strictly
# correct, but it is correct almost all of the time.
# We convert them to syntax errors so that we can emit
# helpful filename:line info.
(ty, va) = sys.exc_info()[:2]
raise dns.exception.SyntaxError(
"caught exception %s: %s" % (str(ty), str(va)))
rd.choose_relativity(self.zone.origin, self.relativize)
covers = rd.covers()
rds = n.find_rdataset(rdclass, rdtype, covers, True)
rds.add(rd, ttl)
def _parse_modify(self, side):
# Here we catch everything in '{' '}' in a group so we can replace it
# with ''.
is_generate1 = re.compile("^.*\$({(\+|-?)(\d+),(\d+),(.)}).*$")
is_generate2 = re.compile("^.*\$({(\+|-?)(\d+)}).*$")
is_generate3 = re.compile("^.*\$({(\+|-?)(\d+),(\d+)}).*$")
# Sometimes there are modifiers in the hostname. These come after
# the dollar sign. They are in the form: ${offset[,width[,base]]}.
# Make names
g1 = is_generate1.match(side)
if g1:
mod, sign, offset, width, base = g1.groups()
if sign == '':
sign = '+'
g2 = is_generate2.match(side)
if g2:
mod, sign, offset = g2.groups()
if sign == '':
sign = '+'
width = 0
base = 'd'
g3 = is_generate3.match(side)
if g3:
mod, sign, offset, width = g1.groups()
if sign == '':
sign = '+'
width = g1.groups()[2]
base = 'd'
if not (g1 or g2 or g3):
mod = ''
sign = '+'
offset = 0
width = 0
base = 'd'
if base != 'd':
raise NotImplementedError()
return mod, sign, offset, width, base
def _generate_line(self):
# range lhs [ttl] [class] type rhs [ comment ]
"""Process one line containing the GENERATE statement from a DNS
master file."""
if self.current_origin is None:
raise UnknownOrigin
token = self.tok.get()
# Range (required)
try:
start, stop, step = dns.grange.from_text(token.value)
token = self.tok.get()
if not token.is_identifier():
raise dns.exception.SyntaxError
except:
raise dns.exception.SyntaxError
# lhs (required)
try:
lhs = token.value
token = self.tok.get()
if not token.is_identifier():
raise dns.exception.SyntaxError
except:
raise dns.exception.SyntaxError
# TTL
try:
ttl = dns.ttl.from_text(token.value)
token = self.tok.get()
if not token.is_identifier():
raise dns.exception.SyntaxError
except dns.ttl.BadTTL:
ttl = self.ttl
# Class
try:
rdclass = dns.rdataclass.from_text(token.value)
token = self.tok.get()
if not token.is_identifier():
raise dns.exception.SyntaxError
except dns.exception.SyntaxError:
raise dns.exception.SyntaxError
except Exception:
rdclass = self.zone.rdclass
if rdclass != self.zone.rdclass:
raise dns.exception.SyntaxError("RR class is not zone's class")
# Type
try:
rdtype = dns.rdatatype.from_text(token.value)
token = self.tok.get()
if not token.is_identifier():
raise dns.exception.SyntaxError
except Exception:
raise dns.exception.SyntaxError("unknown rdatatype '%s'" %
token.value)
# lhs (required)
try:
rhs = token.value
except:
raise dns.exception.SyntaxError
lmod, lsign, loffset, lwidth, lbase = self._parse_modify(lhs)
rmod, rsign, roffset, rwidth, rbase = self._parse_modify(rhs)
for i in range(start, stop + 1, step):
# +1 because bind is inclusive and python is exclusive
if lsign == u'+':
lindex = i + int(loffset)
elif lsign == u'-':
lindex = i - int(loffset)
if rsign == u'-':
rindex = i - int(roffset)
elif rsign == u'+':
rindex = i + int(roffset)
lzfindex = str(lindex).zfill(int(lwidth))
rzfindex = str(rindex).zfill(int(rwidth))
name = lhs.replace(u'$%s' % (lmod), lzfindex)
rdata = rhs.replace(u'$%s' % (rmod), rzfindex)
self.last_name = dns.name.from_text(name, self.current_origin)
name = self.last_name
if not name.is_subdomain(self.zone.origin):
self._eat_line()
return
if self.relativize:
name = name.relativize(self.zone.origin)
n = self.zone.nodes.get(name)
if n is None:
n = self.zone.node_factory()
self.zone.nodes[name] = n
try:
rd = dns.rdata.from_text(rdclass, rdtype, rdata,
self.current_origin, False)
except dns.exception.SyntaxError:
# Catch and reraise.
(ty, va) = sys.exc_info()[:2]
raise va
except:
# All exceptions that occur in the processing of rdata
# are treated as syntax errors. This is not strictly
# correct, but it is correct almost all of the time.
# We convert them to syntax errors so that we can emit
# helpful filename:line info.
(ty, va) = sys.exc_info()[:2]
raise dns.exception.SyntaxError("caught exception %s: %s" %
(str(ty), str(va)))
rd.choose_relativity(self.zone.origin, self.relativize)
covers = rd.covers()
rds = n.find_rdataset(rdclass, rdtype, covers, True)
rds.add(rd, ttl)
def read(self):
"""Read a DNS master file and build a zone object.
@raises dns.zone.NoSOA: No SOA RR was found at the zone origin
@raises dns.zone.NoNS: No NS RRset was found at the zone origin
"""
try:
while 1:
token = self.tok.get(True, True)
if token.is_eof():
if self.current_file is not None:
self.current_file.close()
if len(self.saved_state) > 0:
(self.tok,
self.current_origin,
self.last_name,
self.current_file,
self.ttl) = self.saved_state.pop(-1)
continue
break
elif token.is_eol():
continue
elif token.is_comment():
self.tok.get_eol()
continue
elif token.value[0] == u'$':
c = token.value.upper()
if c == u'$TTL':
token = self.tok.get()
if not token.is_identifier():
raise dns.exception.SyntaxError("bad $TTL")
self.ttl = dns.ttl.from_text(token.value)
self.tok.get_eol()
elif c == u'$ORIGIN':
self.current_origin = self.tok.get_name()
self.tok.get_eol()
if self.zone.origin is None:
self.zone.origin = self.current_origin
elif c == u'$INCLUDE' and self.allow_include:
token = self.tok.get()
filename = token.value
token = self.tok.get()
if token.is_identifier():
new_origin =\
dns.name.from_text(token.value,
self.current_origin)
self.tok.get_eol()
elif not token.is_eol_or_eof():
raise dns.exception.SyntaxError(
"bad origin in $INCLUDE")
else:
new_origin = self.current_origin
self.saved_state.append((self.tok,
self.current_origin,
self.last_name,
self.current_file,
self.ttl))
self.current_file = open(filename, 'r')
self.tok = dns.tokenizer.Tokenizer(self.current_file,
filename)
self.current_origin = new_origin
elif c == u'$GENERATE':
self._generate_line()
else:
raise dns.exception.SyntaxError(
"Unknown master file directive '" + c + "'")
continue
self.tok.unget(token)
self._rr_line()
except dns.exception.SyntaxError as detail:
(filename, line_number) = self.tok.where()
if detail is None:
detail = "syntax error"
raise dns.exception.SyntaxError(
"%s:%d: %s" % (filename, line_number, detail))
# Now that we're done reading, do some basic checking of the zone.
if self.check_origin:
self.zone.check_origin()
def from_text(text, origin=None, rdclass=dns.rdataclass.IN,
relativize=True, zone_factory=Zone, filename=None,
allow_include=False, check_origin=True):
"""Build a zone object from a master file format string.
@param text: the master file format input
@type text: string.
@param origin: The origin of the zone; if not specified, the first
$ORIGIN statement in the master file will determine the origin of the
zone.
@type origin: dns.name.Name object or string
@param rdclass: The zone's rdata class; the default is class IN.
@type rdclass: int
@param relativize: should names be relativized? The default is True
@type relativize: bool
@param zone_factory: The zone factory to use
@type zone_factory: function returning a Zone
@param filename: The filename to emit when describing where an error
occurred; the default is '<string>'.
@type filename: string
@param allow_include: is $INCLUDE allowed?
@type allow_include: bool
@param check_origin: should sanity checks of the origin node be done?
The default is True.
@type check_origin: bool
@raises dns.zone.NoSOA: No SOA RR was found at the zone origin
@raises dns.zone.NoNS: No NS RRset was found at the zone origin
@rtype: dns.zone.Zone object
"""
# 'text' can also be a file, but we don't publish that fact
# since it's an implementation detail. The official file
# interface is from_file().
if filename is None:
filename = '<string>'
tok = dns.tokenizer.Tokenizer(text, filename)
reader = _MasterReader(tok, origin, rdclass, relativize, zone_factory,
allow_include=allow_include,
check_origin=check_origin)
reader.read()
return reader.zone
def from_file(f, origin=None, rdclass=dns.rdataclass.IN,
relativize=True, zone_factory=Zone, filename=None,
allow_include=True, check_origin=True):
"""Read a master file and build a zone object.
@param f: file or string. If I{f} is a string, it is treated
as the name of a file to open.
@param origin: The origin of the zone; if not specified, the first
$ORIGIN statement in the master file will determine the origin of the
zone.
@type origin: dns.name.Name object or string
@param rdclass: The zone's rdata class; the default is class IN.
@type rdclass: int
@param relativize: should names be relativized? The default is True
@type relativize: bool
@param zone_factory: The zone factory to use
@type zone_factory: function returning a Zone
@param filename: The filename to emit when describing where an error
occurred; the default is '<file>', or the value of I{f} if I{f} is a
string.
@type filename: string
@param allow_include: is $INCLUDE allowed?
@type allow_include: bool
@param check_origin: should sanity checks of the origin node be done?
The default is True.
@type check_origin: bool
@raises dns.zone.NoSOA: No SOA RR was found at the zone origin
@raises dns.zone.NoNS: No NS RRset was found at the zone origin
@rtype: dns.zone.Zone object
"""
str_type = string_types
opts = 'rU'
if isinstance(f, str_type):
if filename is None:
filename = f
f = open(f, opts)
want_close = True
else:
if filename is None:
filename = '<file>'
want_close = False
try:
z = from_text(f, origin, rdclass, relativize, zone_factory,
filename, allow_include, check_origin)
finally:
if want_close:
f.close()
return z
def from_xfr(xfr, zone_factory=Zone, relativize=True, check_origin=True):
"""Convert the output of a zone transfer generator into a zone object.
@param xfr: The xfr generator
@type xfr: generator of dns.message.Message objects
@param relativize: should names be relativized? The default is True.
It is essential that the relativize setting matches the one specified
to dns.query.xfr().
@type relativize: bool
@param check_origin: should sanity checks of the origin node be done?
The default is True.
@type check_origin: bool
@raises dns.zone.NoSOA: No SOA RR was found at the zone origin
@raises dns.zone.NoNS: No NS RRset was found at the zone origin
@rtype: dns.zone.Zone object
"""
z = None
for r in xfr:
if z is None:
if relativize:
origin = r.origin
else:
origin = r.answer[0].name
rdclass = r.answer[0].rdclass
z = zone_factory(origin, rdclass, relativize=relativize)
for rrset in r.answer:
znode = z.nodes.get(rrset.name)
if not znode:
znode = z.node_factory()
z.nodes[rrset.name] = znode
zrds = znode.find_rdataset(rrset.rdclass, rrset.rdtype,
rrset.covers, True)
zrds.update_ttl(rrset.ttl)
for rd in rrset:
rd.choose_relativity(z.origin, relativize)
zrds.add(rd)
if check_origin:
z.check_origin()
return z
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""The migrate command-line tool."""
import sys
import inspect
import logging
from optparse import OptionParser, BadOptionError
from migrate import exceptions
from migrate.versioning import api
from migrate.versioning.config import *
from migrate.versioning.util import asbool
alias = dict(
s=api.script,
vc=api.version_control,
dbv=api.db_version,
v=api.version,
)
def alias_setup():
global alias
for key, val in alias.iteritems():
setattr(api, key, val)
alias_setup()
class PassiveOptionParser(OptionParser):
def _process_args(self, largs, rargs, values):
"""little hack to support all --some_option=value parameters"""
while rargs:
arg = rargs[0]
if arg == "--":
del rargs[0]
return
elif arg[0:2] == "--":
# if parser does not know about the option
# pass it along (make it anonymous)
try:
opt = arg.split('=', 1)[0]
self._match_long_opt(opt)
except BadOptionError:
largs.append(arg)
del rargs[0]
else:
self._process_long_opt(rargs, values)
elif arg[:1] == "-" and len(arg) > 1:
self._process_short_opts(rargs, values)
elif self.allow_interspersed_args:
largs.append(arg)
del rargs[0]
def main(argv=None, **kwargs):
"""Shell interface to :mod:`migrate.versioning.api`.
kwargs are default options that can be overriden with passing
--some_option as command line option
:param disable_logging: Let migrate configure logging
:type disable_logging: bool
"""
if argv is not None:
argv = argv
else:
argv = list(sys.argv[1:])
commands = list(api.__all__)
commands.sort()
usage = """%%prog COMMAND ...
Available commands:
%s
Enter "%%prog help COMMAND" for information on a particular command.
""" % '\n\t'.join(["%s - %s" % (command.ljust(28), api.command_desc.get(command)) for command in commands])
parser = PassiveOptionParser(usage=usage)
parser.add_option("-d", "--debug",
action="store_true",
dest="debug",
default=False,
help="Shortcut to turn on DEBUG mode for logging")
parser.add_option("-q", "--disable_logging",
action="store_true",
dest="disable_logging",
default=False,
help="Use this option to disable logging configuration")
help_commands = ['help', '-h', '--help']
HELP = False
try:
command = argv.pop(0)
if command in help_commands:
HELP = True
command = argv.pop(0)
except IndexError:
parser.print_help()
return
command_func = getattr(api, command, None)
if command_func is None or command.startswith('_'):
parser.error("Invalid command %s" % command)
parser.set_usage(inspect.getdoc(command_func))
f_args, f_varargs, f_kwargs, f_defaults = inspect.getargspec(command_func)
for arg in f_args:
parser.add_option(
"--%s" % arg,
dest=arg,
action='store',
type="string")
# display help of the current command
if HELP:
parser.print_help()
return
options, args = parser.parse_args(argv)
# override kwargs with anonymous parameters
override_kwargs = dict()
for arg in list(args):
if arg.startswith('--'):
args.remove(arg)
if '=' in arg:
opt, value = arg[2:].split('=', 1)
else:
opt = arg[2:]
value = True
override_kwargs[opt] = value
# override kwargs with options if user is overwriting
for key, value in options.__dict__.iteritems():
if value is not None:
override_kwargs[key] = value
# arguments that function accepts without passed kwargs
f_required = list(f_args)
candidates = dict(kwargs)
candidates.update(override_kwargs)
for key, value in candidates.iteritems():
if key in f_args:
f_required.remove(key)
# map function arguments to parsed arguments
for arg in args:
try:
kw = f_required.pop(0)
except IndexError:
parser.error("Too many arguments for command %s: %s" % (command,
arg))
kwargs[kw] = arg
# apply overrides
kwargs.update(override_kwargs)
# configure options
for key, value in options.__dict__.iteritems():
kwargs.setdefault(key, value)
# configure logging
if not asbool(kwargs.pop('disable_logging', False)):
# filter to log =< INFO into stdout and rest to stderr
class SingleLevelFilter(logging.Filter):
def __init__(self, min=None, max=None):
self.min = min or 0
self.max = max or 100
def filter(self, record):
return self.min <= record.levelno <= self.max
logger = logging.getLogger()
h1 = logging.StreamHandler(sys.stdout)
f1 = SingleLevelFilter(max=logging.INFO)
h1.addFilter(f1)
h2 = logging.StreamHandler(sys.stderr)
f2 = SingleLevelFilter(min=logging.WARN)
h2.addFilter(f2)
logger.addHandler(h1)
logger.addHandler(h2)
if options.debug:
logger.setLevel(logging.DEBUG)
else:
logger.setLevel(logging.INFO)
log = logging.getLogger(__name__)
# check if all args are given
try:
num_defaults = len(f_defaults)
except TypeError:
num_defaults = 0
f_args_default = f_args[len(f_args) - num_defaults:]
required = list(set(f_required) - set(f_args_default))
if required:
parser.error("Not enough arguments for command %s: %s not specified" \
% (command, ', '.join(required)))
# handle command
try:
ret = command_func(**kwargs)
if ret is not None:
log.info(ret)
except (exceptions.UsageError, exceptions.KnownError), e:
parser.error(e.args[0])
if __name__ == "__main__":
main()
gpl-2.0
hyperized/ansible
lib/ansible/module_utils/facts/network/darwin.py
128
2011
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from ansible.module_utils.facts.network.base import NetworkCollector
from ansible.module_utils.facts.network.generic_bsd import GenericBsdIfconfigNetwork
class DarwinNetwork(GenericBsdIfconfigNetwork):
"""
This is the Mac macOS Darwin Network Class.
It uses the GenericBsdIfconfigNetwork unchanged
"""
platform = 'Darwin'
# media line is different to the default FreeBSD one
def parse_media_line(self, words, current_if, ips):
# not sure if this is useful - we also drop information
current_if['media'] = 'Unknown' # Mac does not give us this
current_if['media_select'] = words[1]
if len(words) > 2:
# MacOSX sets the media to '<unknown type>' for bridge interface
# and parsing splits this into two words; this if/else helps
if words[1] == '<unknown' and words[2] == 'type>':
current_if['media_select'] = 'Unknown'
current_if['media_type'] = 'unknown type'
else:
current_if['media_type'] = words[2][1:-1]
if len(words) > 3:
current_if['media_options'] = self.get_options(words[3])
class DarwinNetworkCollector(NetworkCollector):
_fact_class = DarwinNetwork
_platform = 'Darwin'
gpl-3.0
CoolCloud/taiga-back
taiga/feedback/apps.py
21
1263
# Copyright (C) 2014 Andrey Antukh <[email protected]>
# Copyright (C) 2014 Jesús Espino <[email protected]>
# Copyright (C) 2014 David Barragán <[email protected]>
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from django.apps import AppConfig
from django.apps import apps
from django.conf import settings
from django.conf.urls import include, url
from .routers import router
class FeedbackAppConfig(AppConfig):
name = "taiga.feedback"
verbose_name = "Feedback"
def ready(self):
if settings.FEEDBACK_ENABLED:
from taiga.urls import urlpatterns
urlpatterns.append(url(r'^api/v1/', include(router.urls)))
agpl-3.0
tmthydvnprt/compfipy
compfipy/models.py
1
3612
"""
models.py
Various Stochastic models of the "market" that provide "fake" asset prices to test on.
"""
import math
import datetime
import pandas as pd
import numpy as np
from compfipy import market
# Common conversion functions used across all models
# ------------------------------------------------------------------------------------------------------------------------------
def convert_to_returns(log_returns=None):
"""
Convert log returns to normal returns.
"""
return np.exp(log_returns)
def convert_to_price(x0=1, log_returns=None):
"""
Convert log returns to normal returns and calculate value from initial price.
"""
returns = convert_to_returns(log_returns)
prices = pd.concat([pd.Series(x0), returns[:-1]], ignore_index=True)
return prices.cumprod()
# Stochastic Models
# ------------------------------------------------------------------------------------------------------------------------------
def brownian_motion(time=500, delta_t=(1.0 / 252.0), sigma=2):
"""
Return asset price whose returnes evolve according to brownian motion.
"""
sqrt_delta_t_sigma = math.sqrt(delta_t) * sigma
log_returns = pd.Series(np.random.normal(loc=0, scale=sqrt_delta_t_sigma, size=time))
return log_returns
def geometric_brownian_motion(time=500, delta_t=(1.0 / 252.0), sigma=2, mu=0.5):
"""
Return asset price whose returnes evolve according to geometric brownian motion.
"""
wiener_process = brownian_motion(time, delta_t, sigma)
sigma_pow_mu_delta_t = (mu - 0.5 * math.pow(sigma, 2)) * delta_t
log_returns = wiener_process + sigma_pow_mu_delta_t
return log_returns
def jump_diffusion(time=500, delta_t=(1.0 / 252.0), mu=0.0, sigma=0.3, jd_lambda=0.1):
"""
Return jump diffusion process.
"""
s_n = 0
t = 0
small_lambda = -(1.0 / jd_lambda)
jump_sizes = pd.Series(np.zeros((time,)))
while s_n < time:
s_n += small_lambda * math.log(np.random.uniform(0, 1))
for j in xrange(0, time):
if t * delta_t <= s_n * delta_t <= (j+1) * delta_t:
jump_sizes[j] += np.random.normal(loc=mu, scale=sigma)
break
t += 1
return jump_sizes
def merton_jump_diffusion(time=500, delta_t=(1.0 / 252.0), sigma=2, gbm_mu=0.5, jd_mu=0.0, jd_sigma=0.3, jd_lambda=0.1):
"""
Return asset price whose returnes evolve according to geometric brownian motion with jump diffusion.
"""
jd = jump_diffusion(time, delta_t, jd_mu, jd_sigma, jd_lambda)
gbm = geometric_brownian_motion(time, delta_t, sigma, gbm_mu)
return gbm + jd
# Create standard EOD data from price data
# ------------------------------------------------------------------------------------------------------------------------------
def generate_ochlv(prices=None, ochl_mu=0.0, ochl_sigma=0.1, v_mu=100000, v_sigma=math.sqrt(10000)):
"""
Turn asset price into standard EOD data.
"""
date_rng = market.date_range(datetime.date.today(), periods=len(prices))
ochlv = pd.DataFrame({'Close':prices})
ochlv['Open'] = prices + prices * np.random.normal(loc=ochl_mu, scale=ochl_sigma, size=prices.shape)
ochlv['High'] = prices + prices * np.random.normal(loc=ochl_mu, scale=ochl_sigma, size=prices.shape)
ochlv['Low'] = prices + prices * np.random.normal(loc=ochl_mu, scale=ochl_sigma, size=prices.shape)
ochlv['Volume'] = v_mu * np.abs(prices.pct_change(2).shift(-2).ffill()) \
+ np.random.normal(loc=v_mu, scale=v_sigma, size=prices.shape)
ochlv = ochlv.set_index(date_rng)
return ochlv
#
# Newfies-Dialer License
# http://www.newfies-dialer.org
#
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http://mozilla.org/MPL/2.0/.
#
# Copyright (C) 2011-2014 Star2Billing S.L.
#
# The Initial Developer of the Original Code is
# Arezqui Belaid <[email protected]>
#
from django.utils.translation import ugettext_lazy as _
from common.utils import Choice
class STRATEGY(Choice):
ring_all = 1, 'ring-all'
longest_idle_agent = 2, 'longest-idle-agent'
round_robin = 3, 'round-robin'
top_down = 4, 'top-down'
agent_with_least_talk_time = 5, 'agent-with-least-talk-time'
agent_with_fewest_calls = 6, 'agent-with-fewest-calls'
sequentially_by_agent_order = 7, 'sequentially-by-agent-order'
random = 8, 'random'
class QUEUE_COLUMN_NAME(Choice):
name = _('name')
strategy = _('strategy')
time_base_score = _('time base score')
date = _('date')
class TIER_COLUMN_NAME(Choice):
agent = _('agent')
queue = _('queue')
level = _('level')
position = _('position')
date = _('date')
class TIME_BASE_SCORE_TYPE(Choice):
queue = 'queue'
system = 'system'
class AGENT_CALLSTATE_TYPE(Choice):
agent_offering = 'agent-offering'
bridge_agent_start = 'bridge-agent-start'
mpl-2.0
sunfish-prj/Platform-docs
docs/conf.py
1
5207
# -*- coding: utf-8 -*-
#
# SUNFISH Platform Documentation documentation build configuration file, created by
# sphinx-quickstart on Thu May 25 10:40:42 2017.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
import sphinx_rtd_theme
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
# 'sphinxcontrib.openapi',
'sphinxcontrib.swaggerdoc',
'sphinx.ext.mathjax'
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'SUNFISH Platform Documentation'
copyright = u'2017, SUNFISH Consortium'
author = u'SUNFISH Consortium'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = u'0.9'
# The full version, including alpha/beta/rc tags.
release = u'0.9'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'sphinx_rtd_theme'
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
html_theme_options = {
'collapse_navigation': False,
'display_version': False,
'navigation_depth': 3,}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
# html_static_path = ['_static']
# -- Options for HTMLHelp output ------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'SUNFISHPlatformDocumentationdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'SUNFISHPlatformDocumentation.tex', u'SUNFISH Platform Documentation Documentation',
u'SUNFISH Consortium', 'manual'),
]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'sunfishplatformdocumentation', u'SUNFISH Platform Documentation Documentation',
[author], 1)
]
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'SUNFISHPlatformDocumentation', u'SUNFISH Platform Documentation Documentation',
author, 'SUNFISHPlatformDocumentation', 'One line description of project.',
'Miscellaneous'),
]
gpl-3.0
jasonbot/django
tests/test_client_regress/views.py
143
5161
import json
from django.conf import settings
from django.contrib.auth.decorators import login_required
from django.core.serializers.json import DjangoJSONEncoder
from django.http import HttpResponse, HttpResponseRedirect, JsonResponse
from django.shortcuts import render_to_response
from django.template import RequestContext
from django.template.loader import render_to_string
from django.test import Client
from django.test.client import CONTENT_TYPE_RE
from django.test.utils import setup_test_environment
from django.utils.six.moves.urllib.parse import urlencode
class CustomTestException(Exception):
pass
def no_template_view(request):
"A simple view that expects a GET request, and returns a rendered template"
return HttpResponse("No template used. Sample content: twice once twice. Content ends.")
def staff_only_view(request):
"A view that can only be visited by staff. Non staff members get an exception"
if request.user.is_staff:
return HttpResponse('')
else:
raise CustomTestException()
def get_view(request):
"A simple login protected view"
return HttpResponse("Hello world")
get_view = login_required(get_view)
def request_data(request, template='base.html', data='sausage'):
"A simple view that returns the request data in the context"
return render_to_response(template, {
'get-foo': request.GET.get('foo'),
'get-bar': request.GET.get('bar'),
'post-foo': request.POST.get('foo'),
'post-bar': request.POST.get('bar'),
'data': data,
})
def view_with_argument(request, name):
"""A view that takes a string argument
The purpose of this view is to check that if a space is provided in
the argument, the test framework unescapes the %20 before passing
the value to the view.
"""
if name == 'Arthur Dent':
return HttpResponse('Hi, Arthur')
else:
return HttpResponse('Howdy, %s' % name)
def nested_view(request):
"""
A view that uses test client to call another view.
"""
setup_test_environment()
c = Client()
c.get("/no_template_view/")
return render_to_response('base.html', {'nested': 'yes'})
def login_protected_redirect_view(request):
"A view that redirects all requests to the GET view"
return HttpResponseRedirect('/get_view/')
login_protected_redirect_view = login_required(login_protected_redirect_view)
def redirect_to_self_with_changing_query_view(request):
query = request.GET.copy()
query['counter'] += '0'
return HttpResponseRedirect('/redirect_to_self_with_changing_query_view/?%s' % urlencode(query))
def set_session_view(request):
"A view that sets a session variable"
request.session['session_var'] = 'YES'
return HttpResponse('set_session')
def check_session_view(request):
"A view that reads a session variable"
return HttpResponse(request.session.get('session_var', 'NO'))
def request_methods_view(request):
"A view that responds with the request method"
return HttpResponse('request method: %s' % request.method)
def return_unicode(request):
return render_to_response('unicode.html')
def return_undecodable_binary(request):
return HttpResponse(
b'%PDF-1.4\r\n%\x93\x8c\x8b\x9e ReportLab Generated PDF document http://www.reportlab.com'
)
def return_json_response(request):
return JsonResponse({'key': 'value'})
def return_json_file(request):
"A view that parses and returns a JSON string as a file."
match = CONTENT_TYPE_RE.match(request.META['CONTENT_TYPE'])
if match:
charset = match.group(1)
else:
charset = settings.DEFAULT_CHARSET
# This just checks that the uploaded data is JSON
obj_dict = json.loads(request.body.decode(charset))
obj_json = json.dumps(obj_dict, cls=DjangoJSONEncoder, ensure_ascii=False)
response = HttpResponse(obj_json.encode(charset), status=200,
content_type='application/json; charset=%s' % charset)
response['Content-Disposition'] = 'attachment; filename=testfile.json'
return response
def check_headers(request):
"A view that responds with value of the X-ARG-CHECK header"
return HttpResponse('HTTP_X_ARG_CHECK: %s' % request.META.get('HTTP_X_ARG_CHECK', 'Undefined'))
def body(request):
"A view that is requested with GET and accesses request.body. Refs #14753."
return HttpResponse(request.body)
def read_all(request):
"A view that is requested with accesses request.read()."
return HttpResponse(request.read())
def read_buffer(request):
"A view that is requested with accesses request.read(LARGE_BUFFER)."
return HttpResponse(request.read(99999))
def request_context_view(request):
# Special attribute that won't be present on a plain HttpRequest
request.special_path = request.path
return render_to_response('request_context.html', context_instance=RequestContext(request, {}))
def render_template_multiple_times(request):
"""A view that renders a template multiple times."""
return HttpResponse(
render_to_string('base.html') + render_to_string('base.html'))
bsd-3-clause
overtherain/scriptfile
software/googleAppEngine/lib/jinja2/setup.py
20
3257
# -*- coding: utf-8 -*-
"""
Jinja2
~~~~~~
Jinja2 is a template engine written in pure Python. It provides a
`Django`_ inspired non-XML syntax but supports inline expressions and
an optional `sandboxed`_ environment.
Nutshell
--------
Here a small example of a Jinja template::
{% extends 'base.html' %}
{% block title %}Memberlist{% endblock %}
{% block content %}
<ul>
{% for user in users %}
<li><a href="{{ user.url }}">{{ user.username }}</a></li>
{% endfor %}
</ul>
{% endblock %}
Philosophy
----------
Application logic is for the controller but don't try to make the life
for the template designer too hard by giving him too few functionality.
For more informations visit the new `Jinja2 webpage`_ and `documentation`_.
.. _sandboxed: http://en.wikipedia.org/wiki/Sandbox_(computer_security)
.. _Django: http://www.djangoproject.com/
.. _Jinja2 webpage: http://jinja.pocoo.org/
.. _documentation: http://jinja.pocoo.org/2/documentation/
"""
import sys
from setuptools import setup, Extension, Feature
debugsupport = Feature(
'optional C debug support',
standard=False,
ext_modules = [
Extension('jinja2._debugsupport', ['jinja2/_debugsupport.c']),
],
)
# tell distribute to use 2to3 with our own fixers.
extra = {}
if sys.version_info >= (3, 0):
extra.update(
use_2to3=True,
use_2to3_fixers=['custom_fixers']
)
# ignore the old '--with-speedups' flag
try:
speedups_pos = sys.argv.index('--with-speedups')
except ValueError:
pass
else:
sys.argv[speedups_pos] = '--with-debugsupport'
sys.stderr.write('*' * 74 + '\n')
sys.stderr.write('WARNING:\n')
sys.stderr.write(' the --with-speedups flag is deprecated, assuming '
'--with-debugsupport\n')
sys.stderr.write(' For the actual speedups install the MarkupSafe '
'package.\n')
sys.stderr.write('*' * 74 + '\n')
setup(
name='Jinja2',
version='2.6',
url='http://jinja.pocoo.org/',
license='BSD',
author='Armin Ronacher',
author_email='[email protected]',
description='A small but fast and easy to use stand-alone template '
'engine written in pure python.',
long_description=__doc__,
# jinja is egg safe. But we hate eggs
zip_safe=False,
classifiers=[
'Development Status :: 5 - Production/Stable',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: Text Processing :: Markup :: HTML'
],
packages=['jinja2', 'jinja2.testsuite', 'jinja2.testsuite.res',
'jinja2._markupsafe'],
extras_require={'i18n': ['Babel>=0.8']},
test_suite='jinja2.testsuite.suite',
include_package_data=True,
entry_points="""
[babel.extractors]
jinja2 = jinja2.ext:babel_extract[i18n]
""",
features={'debugsupport': debugsupport},
**extra
)
# Natural Language Toolkit: Penn Treebank Reader
#
# Copyright (C) 2001-2008 University of Pennsylvania
# Author: Steven Bird <[email protected]>
# Edward Loper <[email protected]>
# URL: <http://nltk.sf.net>
# For license information, see LICENSE.TXT
from nltk.corpus.reader.util import *
from nltk.corpus.reader.api import *
from nltk.tree import bracket_parse, Tree
import sys
"""
Corpus reader for corpora that consist of parenthesis-delineated parse trees.
"""
# we use [^\s()]+ instead of \S+? to avoid matching ()
TAGWORD = re.compile(r'\(([^\s()]+) ([^\s()]+)\)')
WORD = re.compile(r'\([^\s()]+ ([^\s()]+)\)')
EMPTY_BRACKETS = re.compile(r'\s*\(\s*\(')
class BracketParseCorpusReader(SyntaxCorpusReader):
"""
Reader for corpora that consist of parenthesis-delineated parse
trees.
"""
def __init__(self, root, files, comment_char=None,
detect_blocks='unindented_paren'):
"""
@param root: The root directory for this corpus.
@param files: A list or regexp specifying the files in this corpus.
@param comment: The character which can appear at the start of
a line to indicate that the rest of the line is a comment.
@param detect_blocks: The method that is used to find blocks
in the corpus; can be 'unindented_paren' (every unindented
parenthesis starts a new parse) or 'sexpr' (brackets are
matched).
"""
CorpusReader.__init__(self, root, files)
self._comment_char = comment_char
self._detect_blocks = detect_blocks
def _read_block(self, stream):
if self._detect_blocks == 'sexpr':
return read_sexpr_block(stream, comment_char=self._comment_char)
elif self._detect_blocks == 'blankline':
return read_blankline_block(stream)
elif self._detect_blocks == 'unindented_paren':
# Tokens start with unindented left parens.
toks = read_regexp_block(stream, start_re=r'^\(')
# Strip any comments out of the tokens.
if self._comment_char:
toks = [re.sub('(?m)^%s.*'%re.escape(self._comment_char),
'', tok)
for tok in toks]
return toks
else:
assert 0, 'bad block type'
def _normalize(self, t):
# If there's an empty set of brackets surrounding the actual
# parse, then strip them off.
if EMPTY_BRACKETS.match(t):
t = t.strip()[1:-1]
# Replace leaves of the form (!), (,), with (! !), (, ,)
t = re.sub(r"\((.)\)", r"(\1 \1)", t)
# Replace leaves of the form (tag word root) with (tag word)
t = re.sub(r"\(([^\s()]+) ([^\s()]+) [^\s()]+\)", r"(\1 \2)", t)
return t
def _parse(self, t):
try:
return bracket_parse(self._normalize(t))
except ValueError, e:
sys.stderr.write("Bad tree detected; trying to recover...\n")
# Try to recover, if we can:
if e.args == ('mismatched parens',):
for n in range(1, 5):
try:
v = bracket_parse(self._normalize(t+')'*n))
sys.stderr.write(" Recovered by adding %d close "
"paren(s)\n" % n)
return v
except ValueError: pass
# Try something else:
sys.stderr.write(" Recovered by returning a flat parse.\n")
#sys.stderr.write(' '.join(t.split())+'\n')
return Tree('S', self._tag(t))
def _tag(self, t):
return [(w,t) for (t,w) in TAGWORD.findall(self._normalize(t))]
def _word(self, t):
return WORD.findall(self._normalize(t))
class AlpinoCorpusReader(BracketParseCorpusReader):
"""
Reader for the Alpino Dutch Treebank.
"""
def __init__(self, root):
BracketParseCorpusReader.__init__(self, root, 'alpino\.xml',
detect_blocks='blankline')
def _normalize(self, t):
if t[:10] != "<alpino_ds":
return ""
# convert XML to sexpr notation
t = re.sub(r' <node .*? cat="(\w+)".*>', r"(\1", t)
t = re.sub(r' <node .*? pos="(\w+)".*? word="([^"]+)".*/>', r"(\1 \2)", t)
t = re.sub(r" </node>", r")", t)
t = re.sub(r"<sentence>.*</sentence>", r"", t)
t = re.sub(r"</?alpino_ds.*>", r"", t)
return t
# Copyright 2011 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo.config import cfg
from openstack_dashboard.openstack.common import context as req_context
from openstack_dashboard.openstack.common.gettextutils import _
from openstack_dashboard.openstack.common import log as logging
from openstack_dashboard.openstack.common import rpc
LOG = logging.getLogger(__name__)
notification_topic_opt = cfg.ListOpt(
'notification_topics', default=['notifications', ],
help='AMQP topic used for openstack notifications')
CONF = cfg.CONF
CONF.register_opt(notification_topic_opt)
def notify(context, message):
"""Sends a notification via RPC."""
if not context:
context = req_context.get_admin_context()
priority = message.get('priority',
CONF.default_notification_level)
priority = priority.lower()
for topic in CONF.notification_topics:
topic = '%s.%s' % (topic, priority)
try:
rpc.notify(context, topic, message)
except Exception:
LOG.exception(_("Could not send notification to %(topic)s. "
"Payload=%(message)s"), locals())
# -*- coding: utf-8 -*-
"""
***************************************************************************
r_li_mpa.py
-----------
Date : February 2016
Copyright : (C) 2016 by Médéric Ribreux
Email : medspx at medspx dot fr
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Médéric Ribreux'
__date__ = 'February 2016'
__copyright__ = '(C) 2016, Médéric Ribreux'
from .r_li import checkMovingWindow, configFile
def checkParameterValuesBeforeExecuting(alg, parameters, context):
return checkMovingWindow(alg, parameters, context)
def processCommand(alg, parameters, context, feedback):
configFile(alg, parameters, context, feedback)
gpl-2.0
sudosurootdev/kernel_lge_msm8974
tools/perf/scripts/python/net_dropmonitor.py
4235
1554
# Monitor the system for dropped packets and proudce a report of drop locations and counts
import os
import sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from Util import *
drop_log = {}
kallsyms = []
def get_kallsyms_table():
global kallsyms
try:
f = open("/proc/kallsyms", "r")
linecount = 0
for line in f:
linecount = linecount+1
f.seek(0)
except:
return
j = 0
for line in f:
loc = int(line.split()[0], 16)
name = line.split()[2]
j = j +1
if ((j % 100) == 0):
print "\r" + str(j) + "/" + str(linecount),
kallsyms.append({ 'loc': loc, 'name' : name})
print "\r" + str(j) + "/" + str(linecount)
kallsyms.sort()
return
def get_sym(sloc):
loc = int(sloc)
for i in kallsyms:
if (i['loc'] >= loc):
return (i['name'], i['loc']-loc)
return (None, 0)
def print_drop_table():
print "%25s %25s %25s" % ("LOCATION", "OFFSET", "COUNT")
for i in drop_log.keys():
(sym, off) = get_sym(i)
if sym == None:
sym = i
print "%25s %25s %25s" % (sym, off, drop_log[i])
def trace_begin():
print "Starting trace (Ctrl-C to dump results)"
def trace_end():
print "Gathering kallsyms data"
get_kallsyms_table()
print_drop_table()
# called from perf, when it finds a correspoinding event
def skb__kfree_skb(name, context, cpu, sec, nsec, pid, comm,
skbaddr, protocol, location):
slocation = str(location)
try:
drop_log[slocation] = drop_log[slocation] + 1
except:
drop_log[slocation] = 1
gpl-2.0
CarlSorensen/lilypond-standards
python/auxiliar/buildlib.py
9
2865
#!@PYTHON@
import subprocess
import re
import sys
verbose = False
def read_pipe (command):
child = subprocess.Popen (command,
stdout = subprocess.PIPE,
stderr = subprocess.PIPE,
shell = True)
(output, error) = child.communicate ()
code = str (child.wait ())
if not child.stdout or child.stdout.close ():
print "pipe failed: %(command)s" % locals ()
if code != '0':
error = code + ' ' + error
return (output, error)
### Renamed files map to ensure continuity of file history
## Map of new_name: old_name
renames_map = {
'usage.tely': 'user/lilypond-program.tely',
'notation.tely': 'user/lilypond.tely',
'learning.tely': 'user/lilypond-learning.tely',
'changes.tely': 'topdocs/NEWS.tely',
}
# FIXME: Hardcoded file names!?
manuals_subdirectories_re = \
re.compile ('(usage|automated-engraving|changes|essay|extending|web|learning|notation)/')
def add_old_name (file_path):
for new_path in renames_map:
if file_path.endswith (new_path):
old_file_path = file_path.replace (new_path,
renames_map[new_path])
break
else:
if file_path.endswith ('macros.itexi'):
old_file_path = file_path.replace ('macros.itexi',
'user/macros.itexi')
elif file_path.endswith ('.itely'):
old_file_path = manuals_subdirectories_re.sub ('user/',
file_path)
elif 'snippets/' in file_path:
old_file_path = file_path.replace ('snippets/',
'../input/lsr/')
else:
return file_path
return file_path + ' ' + old_file_path
revision_re = re.compile ('GIT [Cc]ommittish:\s+([a-f0-9]+)')
vc_diff_cmd = 'git diff -M %(color_flag)s %(revision)s \
%(upper_revision)s -- %(original_with_old_name)s | cat'
no_committish_fatal_error = """error: %s: no 'GIT committish: <hash>' found.
Please check the whole file against the original in English, then
fill in HEAD committish in the header.
"""
def check_translated_doc (original, translated_file, translated_contents,
color=False, upper_revision='HEAD'):
m = revision_re.search (translated_contents)
if not m:
sys.stderr.write (no_committish_fatal_error % translated_file)
sys.exit (1)
revision = m.group (1)
if revision == '0':
return '', 0
if color:
color_flag = '--color --color-words'
else:
color_flag = '--no-color'
original_with_old_name = add_old_name (original)
c = vc_diff_cmd % vars ()
if verbose:
sys.stderr.write ('running: ' + c)
return read_pipe (c)
gpl-3.0
fviard/s3cmd
S3/Crypto.py
2
11269
# -*- coding: utf-8 -*-
## Amazon S3 manager
## Author: Michal Ludvig <[email protected]>
## http://www.logix.cz/michal
## License: GPL Version 2
## Copyright: TGRMN Software and contributors
from __future__ import absolute_import
import sys
import hmac
try:
from base64 import encodebytes as encodestring
except ImportError:
# Python 2 support
from base64 import encodestring
from . import Config
from logging import debug
from .BaseUtils import encode_to_s3, decode_from_s3, s3_quote
from .Utils import time_to_epoch, deunicodise, check_bucket_name_dns_support
from .SortedDict import SortedDict
import datetime
from hashlib import sha1, sha256
__all__ = []
def format_param_str(params, always_have_equal=False, limited_keys=None):
"""
Format URL parameters from a params dict and returns
?parm1=val1&parm2=val2 or an empty string if there
are no parameters. Output of this function should
be appended directly to self.resource['uri']
- Set "always_have_equal" to always have the "=" char for a param even when
there is no value for it.
- Set "limited_keys" list to restrict the param string to keys that are
defined in it.
"""
if not params:
return ""
param_str = ""
equal_str = always_have_equal and u'=' or ''
for key in sorted(params.keys()):
if limited_keys and key not in limited_keys:
continue
value = params[key]
if value in (None, ""):
param_str += "&%s%s" % (s3_quote(key, unicode_output=True), equal_str)
else:
param_str += "&%s=%s" % (key, s3_quote(params[key], unicode_output=True))
return param_str and "?" + param_str[1:]
__all__.append("format_param_str")
### AWS Version 2 signing
def sign_string_v2(string_to_sign):
"""Sign a string with the secret key, returning base64 encoded results.
By default the configured secret key is used, but may be overridden as
an argument.
Useful for REST authentication. See http://s3.amazonaws.com/doc/s3-developer-guide/RESTAuthentication.html
string_to_sign should be utf-8 "bytes".
and returned signature will be utf-8 encoded "bytes".
"""
secret_key = Config.Config().secret_key
signature = encodestring(hmac.new(encode_to_s3(secret_key), string_to_sign, sha1).digest()).strip()
return signature
__all__.append("sign_string_v2")
def sign_request_v2(method='GET', canonical_uri='/', params=None, cur_headers=None):
"""Sign a string with the secret key, returning base64 encoded results.
By default the configured secret key is used, but may be overridden as
an argument.
Useful for REST authentication. See http://s3.amazonaws.com/doc/s3-developer-guide/RESTAuthentication.html
string_to_sign should be utf-8 "bytes".
"""
# valid sub-resources to be included in sign v2:
SUBRESOURCES_TO_INCLUDE = ['acl', 'lifecycle', 'location', 'logging',
'notification', 'partNumber', 'policy',
'requestPayment', 'torrent', 'uploadId',
'uploads', 'versionId', 'versioning',
'versions', 'website',
# Missing of aws s3 doc but needed
'delete', 'cors', 'restore']
if cur_headers is None:
cur_headers = SortedDict(ignore_case = True)
access_key = Config.Config().access_key
string_to_sign = method + "\n"
string_to_sign += cur_headers.get("content-md5", "") + "\n"
string_to_sign += cur_headers.get("content-type", "") + "\n"
string_to_sign += cur_headers.get("date", "") + "\n"
for header in sorted(cur_headers.keys()):
if header.startswith("x-amz-"):
string_to_sign += header + ":" + cur_headers[header] + "\n"
if header.startswith("x-emc-"):
string_to_sign += header + ":"+ cur_headers[header] + "\n"
canonical_uri = s3_quote(canonical_uri, quote_backslashes=False, unicode_output=True)
canonical_querystring = format_param_str(params, limited_keys=SUBRESOURCES_TO_INCLUDE)
# canonical_querystring would be empty if no param given, otherwise it will
# starts with a "?"
canonical_uri += canonical_querystring
string_to_sign += canonical_uri
debug("SignHeaders: " + repr(string_to_sign))
signature = decode_from_s3(sign_string_v2(encode_to_s3(string_to_sign)))
new_headers = SortedDict(list(cur_headers.items()), ignore_case=True)
new_headers["Authorization"] = "AWS " + access_key + ":" + signature
return new_headers
__all__.append("sign_request_v2")
def sign_url_v2(url_to_sign, expiry):
"""Sign a URL in s3://bucket/object form with the given expiry
time. The object will be accessible via the signed URL until the
AWS key and secret are revoked or the expiry time is reached, even
if the object is otherwise private.
See: http://s3.amazonaws.com/doc/s3-developer-guide/RESTAuthentication.html
"""
return sign_url_base_v2(
bucket = url_to_sign.bucket(),
object = url_to_sign.object(),
expiry = expiry
)
__all__.append("sign_url_v2")
def sign_url_base_v2(**parms):
"""Shared implementation of sign_url methods. Takes a hash of 'bucket', 'object' and 'expiry' as args."""
content_disposition=Config.Config().content_disposition
content_type=Config.Config().content_type
parms['expiry']=time_to_epoch(parms['expiry'])
parms['access_key']=Config.Config().access_key
parms['host_base']=Config.Config().host_base
parms['object'] = s3_quote(parms['object'], quote_backslashes=False, unicode_output=True)
parms['proto'] = 'http'
if Config.Config().signurl_use_https:
parms['proto'] = 'https'
debug("Expiry interpreted as epoch time %s", parms['expiry'])
signtext = 'GET\n\n\n%(expiry)d\n/%(bucket)s/%(object)s' % parms
param_separator = '?'
if content_disposition:
signtext += param_separator + 'response-content-disposition=' + content_disposition
param_separator = '&'
if content_type:
signtext += param_separator + 'response-content-type=' + content_type
param_separator = '&'
debug("Signing plaintext: %r", signtext)
parms['sig'] = s3_quote(sign_string_v2(encode_to_s3(signtext)), unicode_output=True)
debug("Urlencoded signature: %s", parms['sig'])
if check_bucket_name_dns_support(Config.Config().host_bucket, parms['bucket']):
url = "%(proto)s://%(bucket)s.%(host_base)s/%(object)s"
else:
url = "%(proto)s://%(host_base)s/%(bucket)s/%(object)s"
url += "?AWSAccessKeyId=%(access_key)s&Expires=%(expiry)d&Signature=%(sig)s"
url = url % parms
if content_disposition:
url += "&response-content-disposition=" + s3_quote(content_disposition, unicode_output=True)
if content_type:
url += "&response-content-type=" + s3_quote(content_type, unicode_output=True)
return url
def sign(key, msg):
return hmac.new(key, encode_to_s3(msg), sha256).digest()
def getSignatureKey(key, dateStamp, regionName, serviceName):
"""
Input: unicode params
Output: bytes
"""
kDate = sign(encode_to_s3('AWS4' + key), dateStamp)
kRegion = sign(kDate, regionName)
kService = sign(kRegion, serviceName)
kSigning = sign(kService, 'aws4_request')
return kSigning
def sign_request_v4(method='GET', host='', canonical_uri='/', params=None,
region='us-east-1', cur_headers=None, body=b''):
service = 's3'
if cur_headers is None:
cur_headers = SortedDict(ignore_case = True)
cfg = Config.Config()
access_key = cfg.access_key
secret_key = cfg.secret_key
t = datetime.datetime.utcnow()
amzdate = t.strftime('%Y%m%dT%H%M%SZ')
datestamp = t.strftime('%Y%m%d')
signing_key = getSignatureKey(secret_key, datestamp, region, service)
canonical_uri = s3_quote(canonical_uri, quote_backslashes=False, unicode_output=True)
canonical_querystring = format_param_str(params, always_have_equal=True).lstrip('?')
if type(body) == type(sha256(b'')):
payload_hash = decode_from_s3(body.hexdigest())
else:
payload_hash = decode_from_s3(sha256(encode_to_s3(body)).hexdigest())
canonical_headers = {'host' : host,
'x-amz-content-sha256': payload_hash,
'x-amz-date' : amzdate
}
signed_headers = 'host;x-amz-content-sha256;x-amz-date'
for header in cur_headers.keys():
# avoid duplicate headers and previous Authorization
if header == 'Authorization' or header in signed_headers.split(';'):
continue
canonical_headers[header.strip()] = cur_headers[header].strip()
signed_headers += ';' + header.strip()
# sort headers into a string
canonical_headers_str = ''
for k, v in sorted(canonical_headers.items()):
canonical_headers_str += k + ":" + v + "\n"
canonical_headers = canonical_headers_str
debug(u"canonical_headers = %s" % canonical_headers)
signed_headers = ';'.join(sorted(signed_headers.split(';')))
canonical_request = method + '\n' + canonical_uri + '\n' + canonical_querystring + '\n' + canonical_headers + '\n' + signed_headers + '\n' + payload_hash
debug('Canonical Request:\n%s\n----------------------' % canonical_request)
algorithm = 'AWS4-HMAC-SHA256'
credential_scope = datestamp + '/' + region + '/' + service + '/' + 'aws4_request'
string_to_sign = algorithm + '\n' + amzdate + '\n' + credential_scope + '\n' + decode_from_s3(sha256(encode_to_s3(canonical_request)).hexdigest())
signature = decode_from_s3(hmac.new(signing_key, encode_to_s3(string_to_sign), sha256).hexdigest())
authorization_header = algorithm + ' ' + 'Credential=' + access_key + '/' + credential_scope + ',' + 'SignedHeaders=' + signed_headers + ',' + 'Signature=' + signature
new_headers = SortedDict(cur_headers.items())
new_headers.update({'x-amz-date':amzdate,
'Authorization':authorization_header,
'x-amz-content-sha256': payload_hash})
debug("signature-v4 headers: %s" % new_headers)
return new_headers
__all__.append("sign_request_v4")
def checksum_sha256_file(filename, offset=0, size=None):
try:
hash = sha256()
except Exception:
# fallback to Crypto SHA256 module
hash = sha256.new()
with open(deunicodise(filename),'rb') as f:
if size is None:
for chunk in iter(lambda: f.read(8192), b''):
hash.update(chunk)
else:
f.seek(offset)
size_left = size
while size_left > 0:
chunk = f.read(min(8192, size_left))
if not chunk:
break
size_left -= len(chunk)
hash.update(chunk)
return hash
def checksum_sha256_buffer(buffer, offset=0, size=None):
try:
hash = sha256()
except Exception:
# fallback to Crypto SHA256 module
hash = sha256.new()
if size is None:
hash.update(buffer)
else:
hash.update(buffer[offset:offset+size])
return hash
"""Concrete date/time and related types.
See http://www.iana.org/time-zones/repository/tz-link.html for
time zone and DST data sources.
"""
import time as _time
import math as _math
def _cmp(x, y):
return 0 if x == y else 1 if x > y else -1
MINYEAR = 1
MAXYEAR = 9999
_MAXORDINAL = 3652059 # date.max.toordinal()
# Utility functions, adapted from Python's Demo/classes/Dates.py, which
# also assumes the current Gregorian calendar indefinitely extended in
# both directions. Difference: Dates.py calls January 1 of year 0 day
# number 1. The code here calls January 1 of year 1 day number 1. This is
# to match the definition of the "proleptic Gregorian" calendar in Dershowitz
# and Reingold's "Calendrical Calculations", where it's the base calendar
# for all computations. See the book for algorithms for converting between
# proleptic Gregorian ordinals and many other calendar systems.
_DAYS_IN_MONTH = [None, 31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]
_DAYS_BEFORE_MONTH = [None]
dbm = 0
for dim in _DAYS_IN_MONTH[1:]:
_DAYS_BEFORE_MONTH.append(dbm)
dbm += dim
del dbm, dim
def _is_leap(year):
"year -> 1 if leap year, else 0."
return year % 4 == 0 and (year % 100 != 0 or year % 400 == 0)
def _days_before_year(year):
"year -> number of days before January 1st of year."
y = year - 1
return y*365 + y//4 - y//100 + y//400
def _days_in_month(year, month):
"year, month -> number of days in that month in that year."
assert 1 <= month <= 12, month
if month == 2 and _is_leap(year):
return 29
return _DAYS_IN_MONTH[month]
def _days_before_month(year, month):
"year, month -> number of days in year preceding first day of month."
assert 1 <= month <= 12, 'month must be in 1..12'
return _DAYS_BEFORE_MONTH[month] + (month > 2 and _is_leap(year))
def _ymd2ord(year, month, day):
"year, month, day -> ordinal, considering 01-Jan-0001 as day 1."
assert 1 <= month <= 12, 'month must be in 1..12'
dim = _days_in_month(year, month)
assert 1 <= day <= dim, ('day must be in 1..%d' % dim)
return (_days_before_year(year) +
_days_before_month(year, month) +
day)
_DI400Y = _days_before_year(401) # number of days in 400 years
_DI100Y = _days_before_year(101) # " " " " 100 "
_DI4Y = _days_before_year(5) # " " " " 4 "
# A 4-year cycle has an extra leap day over what we'd get from pasting
# together 4 single years.
assert _DI4Y == 4 * 365 + 1
# Similarly, a 400-year cycle has an extra leap day over what we'd get from
# pasting together 4 100-year cycles.
assert _DI400Y == 4 * _DI100Y + 1
# OTOH, a 100-year cycle has one fewer leap day than we'd get from
# pasting together 25 4-year cycles.
assert _DI100Y == 25 * _DI4Y - 1
def _ord2ymd(n):
"ordinal -> (year, month, day), considering 01-Jan-0001 as day 1."
# n is a 1-based index, starting at 1-Jan-1. The pattern of leap years
# repeats exactly every 400 years. The basic strategy is to find the
# closest 400-year boundary at or before n, then work with the offset
# from that boundary to n. Life is much clearer if we subtract 1 from
# n first -- then the values of n at 400-year boundaries are exactly
# those divisible by _DI400Y:
#
# D M Y n n-1
# -- --- ---- ---------- ----------------
# 31 Dec -400 -_DI400Y -_DI400Y -1
# 1 Jan -399 -_DI400Y +1 -_DI400Y 400-year boundary
# ...
# 30 Dec 000 -1 -2
# 31 Dec 000 0 -1
# 1 Jan 001 1 0 400-year boundary
# 2 Jan 001 2 1
# 3 Jan 001 3 2
# ...
# 31 Dec 400 _DI400Y _DI400Y -1
# 1 Jan 401 _DI400Y +1 _DI400Y 400-year boundary
n -= 1
n400, n = divmod(n, _DI400Y)
year = n400 * 400 + 1 # ..., -399, 1, 401, ...
# Now n is the (non-negative) offset, in days, from January 1 of year, to
# the desired date. Now compute how many 100-year cycles precede n.
# Note that it's possible for n100 to equal 4! In that case 4 full
# 100-year cycles precede the desired day, which implies the desired
# day is December 31 at the end of a 400-year cycle.
n100, n = divmod(n, _DI100Y)
# Now compute how many 4-year cycles precede it.
n4, n = divmod(n, _DI4Y)
# And now how many single years. Again n1 can be 4, and again meaning
# that the desired day is December 31 at the end of the 4-year cycle.
n1, n = divmod(n, 365)
year += n100 * 100 + n4 * 4 + n1
if n1 == 4 or n100 == 4:
assert n == 0
return year-1, 12, 31
# Now the year is correct, and n is the offset from January 1. We find
# the month via an estimate that's either exact or one too large.
leapyear = n1 == 3 and (n4 != 24 or n100 == 3)
assert leapyear == _is_leap(year)
month = (n + 50) >> 5
preceding = _DAYS_BEFORE_MONTH[month] + (month > 2 and leapyear)
if preceding > n: # estimate is too large
month -= 1
preceding -= _DAYS_IN_MONTH[month] + (month == 2 and leapyear)
n -= preceding
assert 0 <= n < _days_in_month(year, month)
# Now the year and month are correct, and n is the offset from the
# start of that month: we're done!
return year, month, n+1
# Month and day names. For localized versions, see the calendar module.
_MONTHNAMES = [None, "Jan", "Feb", "Mar", "Apr", "May", "Jun",
"Jul", "Aug", "Sep", "Oct", "Nov", "Dec"]
_DAYNAMES = [None, "Mon", "Tue", "Wed", "Thu", "Fri", "Sat", "Sun"]
def _build_struct_time(y, m, d, hh, mm, ss, dstflag):
wday = (_ymd2ord(y, m, d) + 6) % 7
dnum = _days_before_month(y, m) + d
return _time.struct_time((y, m, d, hh, mm, ss, wday, dnum, dstflag))
def _format_time(hh, mm, ss, us):
# Skip trailing microseconds when us==0.
result = "%02d:%02d:%02d" % (hh, mm, ss)
if us:
result += ".%06d" % us
return result
# Correctly substitute for %z and %Z escapes in strftime formats.
def _wrap_strftime(object, format, timetuple):
# Don't call utcoffset() or tzname() unless actually needed.
freplace = None # the string to use for %f
zreplace = None # the string to use for %z
Zreplace = None # the string to use for %Z
# Scan format for %z and %Z escapes, replacing as needed.
newformat = []
push = newformat.append
i, n = 0, len(format)
while i < n:
ch = format[i]
i += 1
if ch == '%':
if i < n:
ch = format[i]
i += 1
if ch == 'f':
if freplace is None:
freplace = '%06d' % getattr(object,
'microsecond', 0)
newformat.append(freplace)
elif ch == 'z':
if zreplace is None:
zreplace = ""
if hasattr(object, "utcoffset"):
offset = object.utcoffset()
if offset is not None:
sign = '+'
if offset.days < 0:
offset = -offset
sign = '-'
h, m = divmod(offset, timedelta(hours=1))
assert not m % timedelta(minutes=1), "whole minute"
m //= timedelta(minutes=1)
zreplace = '%c%02d%02d' % (sign, h, m)
assert '%' not in zreplace
newformat.append(zreplace)
elif ch == 'Z':
if Zreplace is None:
Zreplace = ""
if hasattr(object, "tzname"):
s = object.tzname()
if s is not None:
# strftime is going to have at this: escape %
Zreplace = s.replace('%', '%%')
newformat.append(Zreplace)
else:
push('%')
push(ch)
else:
push('%')
else:
push(ch)
newformat = "".join(newformat)
return _time.strftime(newformat, timetuple)
def _call_tzinfo_method(tzinfo, methname, tzinfoarg):
if tzinfo is None:
return None
return getattr(tzinfo, methname)(tzinfoarg)
# Just raise TypeError if the arg isn't None or a string.
def _check_tzname(name):
if name is not None and not isinstance(name, str):
raise TypeError("tzinfo.tzname() must return None or string, "
"not '%s'" % type(name))
# name is the offset-producing method, "utcoffset" or "dst".
# offset is what it returned.
# If offset isn't None or timedelta, raises TypeError.
# If offset is None, returns None.
# Else offset is checked for being in range, and a whole # of minutes.
# If it is, its integer value is returned. Else ValueError is raised.
def _check_utc_offset(name, offset):
assert name in ("utcoffset", "dst")
if offset is None:
return
if not isinstance(offset, timedelta):
raise TypeError("tzinfo.%s() must return None "
"or timedelta, not '%s'" % (name, type(offset)))
if offset % timedelta(minutes=1) or offset.microseconds:
raise ValueError("tzinfo.%s() must return a whole number "
"of minutes, got %s" % (name, offset))
if not -timedelta(1) < offset < timedelta(1):
raise ValueError("%s()=%s, must be must be strictly between"
" -timedelta(hours=24) and timedelta(hours=24)"
% (name, offset))
def _check_date_fields(year, month, day):
if not isinstance(year, int):
raise TypeError('int expected')
if not MINYEAR <= year <= MAXYEAR:
raise ValueError('year must be in %d..%d' % (MINYEAR, MAXYEAR), year)
if not 1 <= month <= 12:
raise ValueError('month must be in 1..12', month)
dim = _days_in_month(year, month)
if not 1 <= day <= dim:
raise ValueError('day must be in 1..%d' % dim, day)
def _check_time_fields(hour, minute, second, microsecond):
if not isinstance(hour, int):
raise TypeError('int expected')
if not 0 <= hour <= 23:
raise ValueError('hour must be in 0..23', hour)
if not 0 <= minute <= 59:
raise ValueError('minute must be in 0..59', minute)
if not 0 <= second <= 59:
raise ValueError('second must be in 0..59', second)
if not 0 <= microsecond <= 999999:
raise ValueError('microsecond must be in 0..999999', microsecond)
def _check_tzinfo_arg(tz):
if tz is not None and not isinstance(tz, tzinfo):
raise TypeError("tzinfo argument must be None or of a tzinfo subclass")
def _cmperror(x, y):
raise TypeError("can't compare '%s' to '%s'" % (
type(x).__name__, type(y).__name__))
class timedelta:
"""Represent the difference between two datetime objects.
Supported operators:
- add, subtract timedelta
- unary plus, minus, abs
- compare to timedelta
- multiply, divide by int
In addition, datetime supports subtraction of two datetime objects
returning a timedelta, and addition or subtraction of a datetime
and a timedelta giving a datetime.
Representation: (days, seconds, microseconds). Why? Because I
felt like it.
"""
__slots__ = '_days', '_seconds', '_microseconds'
def __new__(cls, days=0, seconds=0, microseconds=0,
milliseconds=0, minutes=0, hours=0, weeks=0):
# Doing this efficiently and accurately in C is going to be difficult
# and error-prone, due to ubiquitous overflow possibilities, and that
# C double doesn't have enough bits of precision to represent
# microseconds over 10K years faithfully. The code here tries to make
# explicit where go-fast assumptions can be relied on, in order to
# guide the C implementation; it's way more convoluted than speed-
# ignoring auto-overflow-to-long idiomatic Python could be.
# XXX Check that all inputs are ints or floats.
# Final values, all integer.
# s and us fit in 32-bit signed ints; d isn't bounded.
d = s = us = 0
# Normalize everything to days, seconds, microseconds.
days += weeks*7
seconds += minutes*60 + hours*3600
microseconds += milliseconds*1000
# Get rid of all fractions, and normalize s and us.
# Take a deep breath <wink>.
if isinstance(days, float):
dayfrac, days = _math.modf(days)
daysecondsfrac, daysecondswhole = _math.modf(dayfrac * (24.*3600.))
assert daysecondswhole == int(daysecondswhole) # can't overflow
s = int(daysecondswhole)
assert days == int(days)
d = int(days)
else:
daysecondsfrac = 0.0
d = days
assert isinstance(daysecondsfrac, float)
assert abs(daysecondsfrac) <= 1.0
assert isinstance(d, int)
assert abs(s) <= 24 * 3600
# days isn't referenced again before redefinition
if isinstance(seconds, float):
secondsfrac, seconds = _math.modf(seconds)
assert seconds == int(seconds)
seconds = int(seconds)
secondsfrac += daysecondsfrac
assert abs(secondsfrac) <= 2.0
else:
secondsfrac = daysecondsfrac
# daysecondsfrac isn't referenced again
assert isinstance(secondsfrac, float)
assert abs(secondsfrac) <= 2.0
assert isinstance(seconds, int)
days, seconds = divmod(seconds, 24*3600)
d += days
s += int(seconds) # can't overflow
assert isinstance(s, int)
assert abs(s) <= 2 * 24 * 3600
# seconds isn't referenced again before redefinition
usdouble = secondsfrac * 1e6
assert abs(usdouble) < 2.1e6 # exact value not critical
# secondsfrac isn't referenced again
if isinstance(microseconds, float):
microseconds += usdouble
microseconds = round(microseconds, 0)
seconds, microseconds = divmod(microseconds, 1e6)
assert microseconds == int(microseconds)
assert seconds == int(seconds)
days, seconds = divmod(seconds, 24.*3600.)
assert days == int(days)
assert seconds == int(seconds)
d += int(days)
s += int(seconds) # can't overflow
assert isinstance(s, int)
assert abs(s) <= 3 * 24 * 3600
else:
seconds, microseconds = divmod(microseconds, 1000000)
days, seconds = divmod(seconds, 24*3600)
d += days
s += int(seconds) # can't overflow
assert isinstance(s, int)
assert abs(s) <= 3 * 24 * 3600
microseconds = float(microseconds)
microseconds += usdouble
microseconds = round(microseconds, 0)
assert abs(s) <= 3 * 24 * 3600
assert abs(microseconds) < 3.1e6
# Just a little bit of carrying possible for microseconds and seconds.
assert isinstance(microseconds, float)
assert int(microseconds) == microseconds
us = int(microseconds)
seconds, us = divmod(us, 1000000)
s += seconds # cant't overflow
assert isinstance(s, int)
days, s = divmod(s, 24*3600)
d += days
assert isinstance(d, int)
assert isinstance(s, int) and 0 <= s < 24*3600
assert isinstance(us, int) and 0 <= us < 1000000
self = object.__new__(cls)
self._days = d
self._seconds = s
self._microseconds = us
if abs(d) > 999999999:
raise OverflowError("timedelta # of days is too large: %d" % d)
return self
def __repr__(self):
if self._microseconds:
return "%s(%d, %d, %d)" % ('datetime.' + self.__class__.__name__,
self._days,
self._seconds,
self._microseconds)
if self._seconds:
return "%s(%d, %d)" % ('datetime.' + self.__class__.__name__,
self._days,
self._seconds)
return "%s(%d)" % ('datetime.' + self.__class__.__name__, self._days)
def __str__(self):
mm, ss = divmod(self._seconds, 60)
hh, mm = divmod(mm, 60)
s = "%d:%02d:%02d" % (hh, mm, ss)
if self._days:
def plural(n):
return n, abs(n) != 1 and "s" or ""
s = ("%d day%s, " % plural(self._days)) + s
if self._microseconds:
s = s + ".%06d" % self._microseconds
return s
def total_seconds(self):
"""Total seconds in the duration."""
return ((self.days * 86400 + self.seconds)*10**6 +
self.microseconds) / 10**6
# Read-only field accessors
@property
def days(self):
"""days"""
return self._days
@property
def seconds(self):
"""seconds"""
return self._seconds
@property
def microseconds(self):
"""microseconds"""
return self._microseconds
def __add__(self, other):
if isinstance(other, timedelta):
# for CPython compatibility, we cannot use
# our __class__ here, but need a real timedelta
return timedelta(self._days + other._days,
self._seconds + other._seconds,
self._microseconds + other._microseconds)
return NotImplemented
__radd__ = __add__
def __sub__(self, other):
if isinstance(other, timedelta):
# for CPython compatibility, we cannot use
# our __class__ here, but need a real timedelta
return timedelta(self._days - other._days,
self._seconds - other._seconds,
self._microseconds - other._microseconds)
return NotImplemented
def __rsub__(self, other):
if isinstance(other, timedelta):
return -self + other
return NotImplemented
def __neg__(self):
# for CPython compatibility, we cannot use
# our __class__ here, but need a real timedelta
return timedelta(-self._days,
-self._seconds,
-self._microseconds)
def __pos__(self):
return self
def __abs__(self):
if self._days < 0:
return -self
else:
return self
def __mul__(self, other):
if isinstance(other, int):
# for CPython compatibility, we cannot use
# our __class__ here, but need a real timedelta
return timedelta(self._days * other,
self._seconds * other,
self._microseconds * other)
if isinstance(other, float):
a, b = other.as_integer_ratio()
return self * a / b
return NotImplemented
__rmul__ = __mul__
def _to_microseconds(self):
return ((self._days * (24*3600) + self._seconds) * 1000000 +
self._microseconds)
def __floordiv__(self, other):
if not isinstance(other, (int, timedelta)):
return NotImplemented
usec = self._to_microseconds()
if isinstance(other, timedelta):
return usec // other._to_microseconds()
if isinstance(other, int):
return timedelta(0, 0, usec // other)
def __truediv__(self, other):
if not isinstance(other, (int, float, timedelta)):
return NotImplemented
usec = self._to_microseconds()
if isinstance(other, timedelta):
return usec / other._to_microseconds()
if isinstance(other, int):
return timedelta(0, 0, usec / other)
if isinstance(other, float):
a, b = other.as_integer_ratio()
return timedelta(0, 0, b * usec / a)
def __mod__(self, other):
if isinstance(other, timedelta):
r = self._to_microseconds() % other._to_microseconds()
return timedelta(0, 0, r)
return NotImplemented
def __divmod__(self, other):
if isinstance(other, timedelta):
q, r = divmod(self._to_microseconds(),
other._to_microseconds())
return q, timedelta(0, 0, r)
return NotImplemented
# Comparisons of timedelta objects with other.
def __eq__(self, other):
if isinstance(other, timedelta):
return self._cmp(other) == 0
else:
return False
def __ne__(self, other):
if isinstance(other, timedelta):
return self._cmp(other) != 0
else:
return True
def __le__(self, other):
if isinstance(other, timedelta):
return self._cmp(other) <= 0
else:
_cmperror(self, other)
def __lt__(self, other):
if isinstance(other, timedelta):
return self._cmp(other) < 0
else:
_cmperror(self, other)
def __ge__(self, other):
if isinstance(other, timedelta):
return self._cmp(other) >= 0
else:
_cmperror(self, other)
def __gt__(self, other):
if isinstance(other, timedelta):
return self._cmp(other) > 0
else:
_cmperror(self, other)
def _cmp(self, other):
assert isinstance(other, timedelta)
return _cmp(self._getstate(), other._getstate())
def __hash__(self):
return hash(self._getstate())
def __bool__(self):
return (self._days != 0 or
self._seconds != 0 or
self._microseconds != 0)
# Pickle support.
def _getstate(self):
return (self._days, self._seconds, self._microseconds)
def __reduce__(self):
return (self.__class__, self._getstate())
timedelta.min = timedelta(-999999999)
timedelta.max = timedelta(days=999999999, hours=23, minutes=59, seconds=59,
microseconds=999999)
timedelta.resolution = timedelta(microseconds=1)
class date:
"""Concrete date type.
Constructors:
__new__()
fromtimestamp()
today()
fromordinal()
Operators:
__repr__, __str__
__cmp__, __hash__
__add__, __radd__, __sub__ (add/radd only with timedelta arg)
Methods:
timetuple()
toordinal()
weekday()
isoweekday(), isocalendar(), isoformat()
ctime()
strftime()
Properties (readonly):
year, month, day
"""
__slots__ = '_year', '_month', '_day'
def __new__(cls, year, month=None, day=None):
"""Constructor.
Arguments:
year, month, day (required, base 1)
"""
if (isinstance(year, bytes) and len(year) == 4 and
1 <= year[2] <= 12 and month is None): # Month is sane
# Pickle support
self = object.__new__(cls)
self.__setstate(year)
return self
_check_date_fields(year, month, day)
self = object.__new__(cls)
self._year = year
self._month = month
self._day = day
return self
# Additional constructors
@classmethod
def fromtimestamp(cls, t):
"Construct a date from a POSIX timestamp (like time.time())."
y, m, d, hh, mm, ss, weekday, jday, dst = _time.localtime(t)
return cls(y, m, d)
@classmethod
def today(cls):
"Construct a date from time.time()."
t = _time.time()
return cls.fromtimestamp(t)
@classmethod
def fromordinal(cls, n):
"""Contruct a date from a proleptic Gregorian ordinal.
January 1 of year 1 is day 1. Only the year, month and day are
non-zero in the result.
"""
y, m, d = _ord2ymd(n)
return cls(y, m, d)
# Conversions to string
def __repr__(self):
"""Convert to formal string, for repr().
>>> dt = datetime(2010, 1, 1)
>>> repr(dt)
'datetime.datetime(2010, 1, 1, 0, 0)'
>>> dt = datetime(2010, 1, 1, tzinfo=timezone.utc)
>>> repr(dt)
'datetime.datetime(2010, 1, 1, 0, 0, tzinfo=datetime.timezone.utc)'
"""
return "%s(%d, %d, %d)" % ('datetime.' + self.__class__.__name__,
self._year,
self._month,
self._day)
# XXX These shouldn't depend on time.localtime(), because that
# clips the usable dates to [1970 .. 2038). At least ctime() is
# easily done without using strftime() -- that's better too because
# strftime("%c", ...) is locale specific.
def ctime(self):
"Return ctime() style string."
weekday = self.toordinal() % 7 or 7
return "%s %s %2d 00:00:00 %04d" % (
_DAYNAMES[weekday],
_MONTHNAMES[self._month],
self._day, self._year)
def strftime(self, fmt):
"Format using strftime()."
return _wrap_strftime(self, fmt, self.timetuple())
def __format__(self, fmt):
if len(fmt) != 0:
return self.strftime(fmt)
return str(self)
def isoformat(self):
"""Return the date formatted according to ISO.
This is 'YYYY-MM-DD'.
References:
- http://www.w3.org/TR/NOTE-datetime
- http://www.cl.cam.ac.uk/~mgk25/iso-time.html
"""
return "%04d-%02d-%02d" % (self._year, self._month, self._day)
__str__ = isoformat
# Read-only field accessors
@property
def year(self):
"""year (1-9999)"""
return self._year
@property
def month(self):
"""month (1-12)"""
return self._month
@property
def day(self):
"""day (1-31)"""
return self._day
# Standard conversions, __cmp__, __hash__ (and helpers)
def timetuple(self):
"Return local time tuple compatible with time.localtime()."
return _build_struct_time(self._year, self._month, self._day,
0, 0, 0, -1)
def toordinal(self):
"""Return proleptic Gregorian ordinal for the year, month and day.
January 1 of year 1 is day 1. Only the year, month and day values
contribute to the result.
"""
return _ymd2ord(self._year, self._month, self._day)
def replace(self, year=None, month=None, day=None):
"""Return a new date with new values for the specified fields."""
if year is None:
year = self._year
if month is None:
month = self._month
if day is None:
day = self._day
_check_date_fields(year, month, day)
return date(year, month, day)
# Comparisons of date objects with other.
def __eq__(self, other):
if isinstance(other, date):
return self._cmp(other) == 0
return NotImplemented
def __ne__(self, other):
if isinstance(other, date):
return self._cmp(other) != 0
return NotImplemented
def __le__(self, other):
if isinstance(other, date):
return self._cmp(other) <= 0
return NotImplemented
def __lt__(self, other):
if isinstance(other, date):
return self._cmp(other) < 0
return NotImplemented
def __ge__(self, other):
if isinstance(other, date):
return self._cmp(other) >= 0
return NotImplemented
def __gt__(self, other):
if isinstance(other, date):
return self._cmp(other) > 0
return NotImplemented
def _cmp(self, other):
assert isinstance(other, date)
y, m, d = self._year, self._month, self._day
y2, m2, d2 = other._year, other._month, other._day
return _cmp((y, m, d), (y2, m2, d2))
def __hash__(self):
"Hash."
return hash(self._getstate())
# Computations
def __add__(self, other):
"Add a date to a timedelta."
if isinstance(other, timedelta):
o = self.toordinal() + other.days
if 0 < o <= _MAXORDINAL:
return date.fromordinal(o)
raise OverflowError("result out of range")
return NotImplemented
__radd__ = __add__
def __sub__(self, other):
"""Subtract two dates, or a date and a timedelta."""
if isinstance(other, timedelta):
return self + timedelta(-other.days)
if isinstance(other, date):
days1 = self.toordinal()
days2 = other.toordinal()
return timedelta(days1 - days2)
return NotImplemented
def weekday(self):
"Return day of the week, where Monday == 0 ... Sunday == 6."
return (self.toordinal() + 6) % 7
# Day-of-the-week and week-of-the-year, according to ISO
def isoweekday(self):
"Return day of the week, where Monday == 1 ... Sunday == 7."
# 1-Jan-0001 is a Monday
return self.toordinal() % 7 or 7
def isocalendar(self):
"""Return a 3-tuple containing ISO year, week number, and weekday.
The first ISO week of the year is the (Mon-Sun) week
containing the year's first Thursday; everything else derives
from that.
The first week is 1; Monday is 1 ... Sunday is 7.
ISO calendar algorithm taken from
http://www.phys.uu.nl/~vgent/calendar/isocalendar.htm
"""
year = self._year
week1monday = _isoweek1monday(year)
today = _ymd2ord(self._year, self._month, self._day)
# Internally, week and day have origin 0
week, day = divmod(today - week1monday, 7)
if week < 0:
year -= 1
week1monday = _isoweek1monday(year)
week, day = divmod(today - week1monday, 7)
elif week >= 52:
if today >= _isoweek1monday(year+1):
year += 1
week = 0
return year, week+1, day+1
# Pickle support.
def _getstate(self):
yhi, ylo = divmod(self._year, 256)
return bytes([yhi, ylo, self._month, self._day]),
def __setstate(self, string):
if len(string) != 4 or not (1 <= string[2] <= 12):
raise TypeError("not enough arguments")
yhi, ylo, self._month, self._day = string
self._year = yhi * 256 + ylo
def __reduce__(self):
return (self.__class__, self._getstate())
_date_class = date # so functions w/ args named "date" can get at the class
date.min = date(1, 1, 1)
date.max = date(9999, 12, 31)
date.resolution = timedelta(days=1)
class tzinfo:
"""Abstract base class for time zone info classes.
Subclasses must override the name(), utcoffset() and dst() methods.
"""
__slots__ = ()
def tzname(self, dt):
"datetime -> string name of time zone."
raise NotImplementedError("tzinfo subclass must override tzname()")
def utcoffset(self, dt):
"datetime -> minutes east of UTC (negative for west of UTC)"
raise NotImplementedError("tzinfo subclass must override utcoffset()")
def dst(self, dt):
"""datetime -> DST offset in minutes east of UTC.
Return 0 if DST not in effect. utcoffset() must include the DST
offset.
"""
raise NotImplementedError("tzinfo subclass must override dst()")
def fromutc(self, dt):
"datetime in UTC -> datetime in local time."
if not isinstance(dt, datetime):
raise TypeError("fromutc() requires a datetime argument")
if dt.tzinfo is not self:
raise ValueError("dt.tzinfo is not self")
dtoff = dt.utcoffset()
if dtoff is None:
raise ValueError("fromutc() requires a non-None utcoffset() "
"result")
# See the long comment block at the end of this file for an
# explanation of this algorithm.
dtdst = dt.dst()
if dtdst is None:
raise ValueError("fromutc() requires a non-None dst() result")
delta = dtoff - dtdst
if delta:
dt += delta
dtdst = dt.dst()
if dtdst is None:
raise ValueError("fromutc(): dt.dst gave inconsistent "
"results; cannot convert")
return dt + dtdst
# Pickle support.
def __reduce__(self):
getinitargs = getattr(self, "__getinitargs__", None)
if getinitargs:
args = getinitargs()
else:
args = ()
getstate = getattr(self, "__getstate__", None)
if getstate:
state = getstate()
else:
state = getattr(self, "__dict__", None) or None
if state is None:
return (self.__class__, args)
else:
return (self.__class__, args, state)
_tzinfo_class = tzinfo
class time:
"""Time with time zone.
Constructors:
__new__()
Operators:
__repr__, __str__
__cmp__, __hash__
Methods:
strftime()
isoformat()
utcoffset()
tzname()
dst()
Properties (readonly):
hour, minute, second, microsecond, tzinfo
"""
def __new__(cls, hour=0, minute=0, second=0, microsecond=0, tzinfo=None):
"""Constructor.
Arguments:
hour, minute (required)
second, microsecond (default to zero)
tzinfo (default to None)
"""
self = object.__new__(cls)
if isinstance(hour, bytes) and len(hour) == 6:
# Pickle support
self.__setstate(hour, minute or None)
return self
_check_tzinfo_arg(tzinfo)
_check_time_fields(hour, minute, second, microsecond)
self._hour = hour
self._minute = minute
self._second = second
self._microsecond = microsecond
self._tzinfo = tzinfo
return self
# Read-only field accessors
@property
def hour(self):
"""hour (0-23)"""
return self._hour
@property
def minute(self):
"""minute (0-59)"""
return self._minute
@property
def second(self):
"""second (0-59)"""
return self._second
@property
def microsecond(self):
"""microsecond (0-999999)"""
return self._microsecond
@property
def tzinfo(self):
"""timezone info object"""
return self._tzinfo
# Standard conversions, __hash__ (and helpers)
# Comparisons of time objects with other.
def __eq__(self, other):
if isinstance(other, time):
return self._cmp(other, allow_mixed=True) == 0
else:
return False
def __ne__(self, other):
if isinstance(other, time):
return self._cmp(other, allow_mixed=True) != 0
else:
return True
def __le__(self, other):
if isinstance(other, time):
return self._cmp(other) <= 0
else:
_cmperror(self, other)
def __lt__(self, other):
if isinstance(other, time):
return self._cmp(other) < 0
else:
_cmperror(self, other)
def __ge__(self, other):
if isinstance(other, time):
return self._cmp(other) >= 0
else:
_cmperror(self, other)
def __gt__(self, other):
if isinstance(other, time):
return self._cmp(other) > 0
else:
_cmperror(self, other)
def _cmp(self, other, allow_mixed=False):
assert isinstance(other, time)
mytz = self._tzinfo
ottz = other._tzinfo
myoff = otoff = None
if mytz is ottz:
base_compare = True
else:
myoff = self.utcoffset()
otoff = other.utcoffset()
base_compare = myoff == otoff
if base_compare:
return _cmp((self._hour, self._minute, self._second,
self._microsecond),
(other._hour, other._minute, other._second,
other._microsecond))
if myoff is None or otoff is None:
if allow_mixed:
return 2 # arbitrary non-zero value
else:
raise TypeError("cannot compare naive and aware times")
myhhmm = self._hour * 60 + self._minute - myoff//timedelta(minutes=1)
othhmm = other._hour * 60 + other._minute - otoff//timedelta(minutes=1)
return _cmp((myhhmm, self._second, self._microsecond),
(othhmm, other._second, other._microsecond))
def __hash__(self):
"""Hash."""
tzoff = self.utcoffset()
if not tzoff: # zero or None
return hash(self._getstate()[0])
h, m = divmod(timedelta(hours=self.hour, minutes=self.minute) - tzoff,
timedelta(hours=1))
assert not m % timedelta(minutes=1), "whole minute"
m //= timedelta(minutes=1)
if 0 <= h < 24:
return hash(time(h, m, self.second, self.microsecond))
return hash((h, m, self.second, self.microsecond))
# Conversion to string
def _tzstr(self, sep=":"):
"""Return formatted timezone offset (+xx:xx) or None."""
off = self.utcoffset()
if off is not None:
if off.days < 0:
sign = "-"
off = -off
else:
sign = "+"
hh, mm = divmod(off, timedelta(hours=1))
assert not mm % timedelta(minutes=1), "whole minute"
mm //= timedelta(minutes=1)
assert 0 <= hh < 24
off = "%s%02d%s%02d" % (sign, hh, sep, mm)
return off
def __repr__(self):
"""Convert to formal string, for repr()."""
if self._microsecond != 0:
s = ", %d, %d" % (self._second, self._microsecond)
elif self._second != 0:
s = ", %d" % self._second
else:
s = ""
s= "%s(%d, %d%s)" % ('datetime.' + self.__class__.__name__,
self._hour, self._minute, s)
if self._tzinfo is not None:
assert s[-1:] == ")"
s = s[:-1] + ", tzinfo=%r" % self._tzinfo + ")"
return s
def isoformat(self):
"""Return the time formatted according to ISO.
This is 'HH:MM:SS.mmmmmm+zz:zz', or 'HH:MM:SS+zz:zz' if
self.microsecond == 0.
"""
s = _format_time(self._hour, self._minute, self._second,
self._microsecond)
tz = self._tzstr()
if tz:
s += tz
return s
__str__ = isoformat
def strftime(self, fmt):
"""Format using strftime(). The date part of the timestamp passed
to underlying strftime should not be used.
"""
# The year must be >= 1000 else Python's strftime implementation
# can raise a bogus exception.
timetuple = (1900, 1, 1,
self._hour, self._minute, self._second,
0, 1, -1)
return _wrap_strftime(self, fmt, timetuple)
def __format__(self, fmt):
if len(fmt) != 0:
return self.strftime(fmt)
return str(self)
# Timezone functions
def utcoffset(self):
"""Return the timezone offset in minutes east of UTC (negative west of
UTC)."""
if self._tzinfo is None:
return None
offset = self._tzinfo.utcoffset(None)
_check_utc_offset("utcoffset", offset)
return offset
def tzname(self):
"""Return the timezone name.
Note that the name is 100% informational -- there's no requirement that
it mean anything in particular. For example, "GMT", "UTC", "-500",
"-5:00", "EDT", "US/Eastern", "America/New York" are all valid replies.
"""
if self._tzinfo is None:
return None
name = self._tzinfo.tzname(None)
_check_tzname(name)
return name
def dst(self):
"""Return 0 if DST is not in effect, or the DST offset (in minutes
eastward) if DST is in effect.
This is purely informational; the DST offset has already been added to
the UTC offset returned by utcoffset() if applicable, so there's no
need to consult dst() unless you're interested in displaying the DST
info.
"""
if self._tzinfo is None:
return None
offset = self._tzinfo.dst(None)
_check_utc_offset("dst", offset)
return offset
def replace(self, hour=None, minute=None, second=None, microsecond=None,
tzinfo=True):
"""Return a new time with new values for the specified fields."""
if hour is None:
hour = self.hour
if minute is None:
minute = self.minute
if second is None:
second = self.second
if microsecond is None:
microsecond = self.microsecond
if tzinfo is True:
tzinfo = self.tzinfo
_check_time_fields(hour, minute, second, microsecond)
_check_tzinfo_arg(tzinfo)
return time(hour, minute, second, microsecond, tzinfo)
def __bool__(self):
if self.second or self.microsecond:
return True
offset = self.utcoffset() or timedelta(0)
return timedelta(hours=self.hour, minutes=self.minute) != offset
# Pickle support.
def _getstate(self):
us2, us3 = divmod(self._microsecond, 256)
us1, us2 = divmod(us2, 256)
basestate = bytes([self._hour, self._minute, self._second,
us1, us2, us3])
if self._tzinfo is None:
return (basestate,)
else:
return (basestate, self._tzinfo)
def __setstate(self, string, tzinfo):
if len(string) != 6 or string[0] >= 24:
raise TypeError("an integer is required")
(self._hour, self._minute, self._second,
us1, us2, us3) = string
self._microsecond = (((us1 << 8) | us2) << 8) | us3
if tzinfo is None or isinstance(tzinfo, _tzinfo_class):
self._tzinfo = tzinfo
else:
raise TypeError("bad tzinfo state arg %r" % tzinfo)
def __reduce__(self):
return (time, self._getstate())
_time_class = time # so functions w/ args named "time" can get at the class
time.min = time(0, 0, 0)
time.max = time(23, 59, 59, 999999)
time.resolution = timedelta(microseconds=1)
class datetime(date):
"""datetime(year, month, day[, hour[, minute[, second[, microsecond[,tzinfo]]]]])
The year, month and day arguments are required. tzinfo may be None, or an
instance of a tzinfo subclass. The remaining arguments may be ints.
"""
__slots__ = date.__slots__ + (
'_hour', '_minute', '_second',
'_microsecond', '_tzinfo')
def __new__(cls, year, month=None, day=None, hour=0, minute=0, second=0,
microsecond=0, tzinfo=None):
if isinstance(year, bytes) and len(year) == 10:
# Pickle support
self = date.__new__(cls, year[:4])
self.__setstate(year, month)
return self
_check_tzinfo_arg(tzinfo)
_check_time_fields(hour, minute, second, microsecond)
self = date.__new__(cls, year, month, day)
self._hour = hour
self._minute = minute
self._second = second
self._microsecond = microsecond
self._tzinfo = tzinfo
return self
# Read-only field accessors
@property
def hour(self):
"""hour (0-23)"""
return self._hour
@property
def minute(self):
"""minute (0-59)"""
return self._minute
@property
def second(self):
"""second (0-59)"""
return self._second
@property
def microsecond(self):
"""microsecond (0-999999)"""
return self._microsecond
@property
def tzinfo(self):
"""timezone info object"""
return self._tzinfo
@classmethod
def fromtimestamp(cls, t, tz=None):
"""Construct a datetime from a POSIX timestamp (like time.time()).
A timezone info object may be passed in as well.
"""
_check_tzinfo_arg(tz)
converter = _time.localtime if tz is None else _time.gmtime
t, frac = divmod(t, 1.0)
us = int(frac * 1e6)
# If timestamp is less than one microsecond smaller than a
# full second, us can be rounded up to 1000000. In this case,
# roll over to seconds, otherwise, ValueError is raised
# by the constructor.
if us == 1000000:
t += 1
us = 0
y, m, d, hh, mm, ss, weekday, jday, dst = converter(t)
ss = min(ss, 59) # clamp out leap seconds if the platform has them
result = cls(y, m, d, hh, mm, ss, us, tz)
if tz is not None:
result = tz.fromutc(result)
return result
@classmethod
def utcfromtimestamp(cls, t):
"Construct a UTC datetime from a POSIX timestamp (like time.time())."
t, frac = divmod(t, 1.0)
us = int(frac * 1e6)
# If timestamp is less than one microsecond smaller than a
# full second, us can be rounded up to 1000000. In this case,
# roll over to seconds, otherwise, ValueError is raised
# by the constructor.
if us == 1000000:
t += 1
us = 0
y, m, d, hh, mm, ss, weekday, jday, dst = _time.gmtime(t)
ss = min(ss, 59) # clamp out leap seconds if the platform has them
return cls(y, m, d, hh, mm, ss, us)
# XXX This is supposed to do better than we *can* do by using time.time(),
# XXX if the platform supports a more accurate way. The C implementation
# XXX uses gettimeofday on platforms that have it, but that isn't
# XXX available from Python. So now() may return different results
# XXX across the implementations.
@classmethod
def now(cls, tz=None):
"Construct a datetime from time.time() and optional time zone info."
t = _time.time()
return cls.fromtimestamp(t, tz)
@classmethod
def utcnow(cls):
"Construct a UTC datetime from time.time()."
t = _time.time()
return cls.utcfromtimestamp(t)
@classmethod
def combine(cls, date, time):
"Construct a datetime from a given date and a given time."
if not isinstance(date, _date_class):
raise TypeError("date argument must be a date instance")
if not isinstance(time, _time_class):
raise TypeError("time argument must be a time instance")
return cls(date.year, date.month, date.day,
time.hour, time.minute, time.second, time.microsecond,
time.tzinfo)
def timetuple(self):
"Return local time tuple compatible with time.localtime()."
dst = self.dst()
if dst is None:
dst = -1
elif dst:
dst = 1
else:
dst = 0
return _build_struct_time(self.year, self.month, self.day,
self.hour, self.minute, self.second,
dst)
def timestamp(self):
"Return POSIX timestamp as float"
if self._tzinfo is None:
return _time.mktime((self.year, self.month, self.day,
self.hour, self.minute, self.second,
-1, -1, -1)) + self.microsecond / 1e6
else:
return (self - _EPOCH).total_seconds()
def utctimetuple(self):
"Return UTC time tuple compatible with time.gmtime()."
offset = self.utcoffset()
if offset:
self -= offset
y, m, d = self.year, self.month, self.day
hh, mm, ss = self.hour, self.minute, self.second
return _build_struct_time(y, m, d, hh, mm, ss, 0)
def date(self):
"Return the date part."
return date(self._year, self._month, self._day)
def time(self):
"Return the time part, with tzinfo None."
return time(self.hour, self.minute, self.second, self.microsecond)
def timetz(self):
"Return the time part, with same tzinfo."
return time(self.hour, self.minute, self.second, self.microsecond,
self._tzinfo)
def replace(self, year=None, month=None, day=None, hour=None,
minute=None, second=None, microsecond=None, tzinfo=True):
"""Return a new datetime with new values for the specified fields."""
if year is None:
year = self.year
if month is None:
month = self.month
if day is None:
day = self.day
if hour is None:
hour = self.hour
if minute is None:
minute = self.minute
if second is None:
second = self.second
if microsecond is None:
microsecond = self.microsecond
if tzinfo is True:
tzinfo = self.tzinfo
_check_date_fields(year, month, day)
_check_time_fields(hour, minute, second, microsecond)
_check_tzinfo_arg(tzinfo)
return datetime(year, month, day, hour, minute, second,
microsecond, tzinfo)
def astimezone(self, tz=None):
if tz is None:
if self.tzinfo is None:
raise ValueError("astimezone() requires an aware datetime")
ts = (self - _EPOCH) // timedelta(seconds=1)
localtm = _time.localtime(ts)
local = datetime(*localtm[:6])
try:
# Extract TZ data if available
gmtoff = localtm.tm_gmtoff
zone = localtm.tm_zone
except AttributeError:
# Compute UTC offset and compare with the value implied
# by tm_isdst. If the values match, use the zone name
# implied by tm_isdst.
delta = local - datetime(*_time.gmtime(ts)[:6])
dst = _time.daylight and localtm.tm_isdst > 0
gmtoff = -(_time.altzone if dst else _time.timezone)
if delta == timedelta(seconds=gmtoff):
tz = timezone(delta, _time.tzname[dst])
else:
tz = timezone(delta)
else:
tz = timezone(timedelta(seconds=gmtoff), zone)
elif not isinstance(tz, tzinfo):
raise TypeError("tz argument must be an instance of tzinfo")
mytz = self.tzinfo
if mytz is None:
raise ValueError("astimezone() requires an aware datetime")
if tz is mytz:
return self
# Convert self to UTC, and attach the new time zone object.
myoffset = self.utcoffset()
if myoffset is None:
raise ValueError("astimezone() requires an aware datetime")
utc = (self - myoffset).replace(tzinfo=tz)
# Convert from UTC to tz's local time.
return tz.fromutc(utc)
# Ways to produce a string.
def ctime(self):
"Return ctime() style string."
weekday = self.toordinal() % 7 or 7
return "%s %s %2d %02d:%02d:%02d %04d" % (
_DAYNAMES[weekday],
_MONTHNAMES[self._month],
self._day,
self._hour, self._minute, self._second,
self._year)
def isoformat(self, sep='T'):
"""Return the time formatted according to ISO.
This is 'YYYY-MM-DD HH:MM:SS.mmmmmm', or 'YYYY-MM-DD HH:MM:SS' if
self.microsecond == 0.
If self.tzinfo is not None, the UTC offset is also attached, giving
'YYYY-MM-DD HH:MM:SS.mmmmmm+HH:MM' or 'YYYY-MM-DD HH:MM:SS+HH:MM'.
Optional argument sep specifies the separator between date and
time, default 'T'.
"""
s = ("%04d-%02d-%02d%c" % (self._year, self._month, self._day,
sep) +
_format_time(self._hour, self._minute, self._second,
self._microsecond))
off = self.utcoffset()
if off is not None:
if off.days < 0:
sign = "-"
off = -off
else:
sign = "+"
hh, mm = divmod(off, timedelta(hours=1))
assert not mm % timedelta(minutes=1), "whole minute"
mm //= timedelta(minutes=1)
s += "%s%02d:%02d" % (sign, hh, mm)
return s
def __repr__(self):
"""Convert to formal string, for repr()."""
L = [self._year, self._month, self._day, # These are never zero
self._hour, self._minute, self._second, self._microsecond]
if L[-1] == 0:
del L[-1]
if L[-1] == 0:
del L[-1]
s = ", ".join(map(str, L))
s = "%s(%s)" % ('datetime.' + self.__class__.__name__, s)
if self._tzinfo is not None:
assert s[-1:] == ")"
s = s[:-1] + ", tzinfo=%r" % self._tzinfo + ")"
return s
def __str__(self):
"Convert to string, for str()."
return self.isoformat(sep=' ')
@classmethod
def strptime(cls, date_string, format):
'string, format -> new datetime parsed from a string (like time.strptime()).'
import _strptime
return _strptime._strptime_datetime(cls, date_string, format)
def utcoffset(self):
"""Return the timezone offset in minutes east of UTC (negative west of
UTC)."""
if self._tzinfo is None:
return None
offset = self._tzinfo.utcoffset(self)
_check_utc_offset("utcoffset", offset)
return offset
def tzname(self):
"""Return the timezone name.
Note that the name is 100% informational -- there's no requirement that
it mean anything in particular. For example, "GMT", "UTC", "-500",
"-5:00", "EDT", "US/Eastern", "America/New York" are all valid replies.
"""
name = _call_tzinfo_method(self._tzinfo, "tzname", self)
_check_tzname(name)
return name
def dst(self):
"""Return 0 if DST is not in effect, or the DST offset (in minutes
eastward) if DST is in effect.
This is purely informational; the DST offset has already been added to
the UTC offset returned by utcoffset() if applicable, so there's no
need to consult dst() unless you're interested in displaying the DST
info.
"""
if self._tzinfo is None:
return None
offset = self._tzinfo.dst(self)
_check_utc_offset("dst", offset)
return offset
# Comparisons of datetime objects with other.
def __eq__(self, other):
if isinstance(other, datetime):
return self._cmp(other, allow_mixed=True) == 0
elif not isinstance(other, date):
return NotImplemented
else:
return False
def __ne__(self, other):
if isinstance(other, datetime):
return self._cmp(other, allow_mixed=True) != 0
elif not isinstance(other, date):
return NotImplemented
else:
return True
def __le__(self, other):
if isinstance(other, datetime):
return self._cmp(other) <= 0
elif not isinstance(other, date):
return NotImplemented
else:
_cmperror(self, other)
def __lt__(self, other):
if isinstance(other, datetime):
return self._cmp(other) < 0
elif not isinstance(other, date):
return NotImplemented
else:
_cmperror(self, other)
def __ge__(self, other):
if isinstance(other, datetime):
return self._cmp(other) >= 0
elif not isinstance(other, date):
return NotImplemented
else:
_cmperror(self, other)
def __gt__(self, other):
if isinstance(other, datetime):
return self._cmp(other) > 0
elif not isinstance(other, date):
return NotImplemented
else:
_cmperror(self, other)
def _cmp(self, other, allow_mixed=False):
assert isinstance(other, datetime)
mytz = self._tzinfo
ottz = other._tzinfo
myoff = otoff = None
if mytz is ottz:
base_compare = True
else:
myoff = self.utcoffset()
otoff = other.utcoffset()
base_compare = myoff == otoff
if base_compare:
return _cmp((self._year, self._month, self._day,
self._hour, self._minute, self._second,
self._microsecond),
(other._year, other._month, other._day,
other._hour, other._minute, other._second,
other._microsecond))
if myoff is None or otoff is None:
if allow_mixed:
return 2 # arbitrary non-zero value
else:
raise TypeError("cannot compare naive and aware datetimes")
# XXX What follows could be done more efficiently...
diff = self - other # this will take offsets into account
if diff.days < 0:
return -1
return diff and 1 or 0
def __add__(self, other):
"Add a datetime and a timedelta."
if not isinstance(other, timedelta):
return NotImplemented
delta = timedelta(self.toordinal(),
hours=self._hour,
minutes=self._minute,
seconds=self._second,
microseconds=self._microsecond)
delta += other
hour, rem = divmod(delta.seconds, 3600)
minute, second = divmod(rem, 60)
if 0 < delta.days <= _MAXORDINAL:
return datetime.combine(date.fromordinal(delta.days),
time(hour, minute, second,
delta.microseconds,
tzinfo=self._tzinfo))
raise OverflowError("result out of range")
__radd__ = __add__
def __sub__(self, other):
"Subtract two datetimes, or a datetime and a timedelta."
if not isinstance(other, datetime):
if isinstance(other, timedelta):
return self + -other
return NotImplemented
days1 = self.toordinal()
days2 = other.toordinal()
secs1 = self._second + self._minute * 60 + self._hour * 3600
secs2 = other._second + other._minute * 60 + other._hour * 3600
base = timedelta(days1 - days2,
secs1 - secs2,
self._microsecond - other._microsecond)
if self._tzinfo is other._tzinfo:
return base
myoff = self.utcoffset()
otoff = other.utcoffset()
if myoff == otoff:
return base
if myoff is None or otoff is None:
raise TypeError("cannot mix naive and timezone-aware time")
return base + otoff - myoff
def __hash__(self):
tzoff = self.utcoffset()
if tzoff is None:
return hash(self._getstate()[0])
days = _ymd2ord(self.year, self.month, self.day)
seconds = self.hour * 3600 + self.minute * 60 + self.second
return hash(timedelta(days, seconds, self.microsecond) - tzoff)
# Pickle support.
def _getstate(self):
yhi, ylo = divmod(self._year, 256)
us2, us3 = divmod(self._microsecond, 256)
us1, us2 = divmod(us2, 256)
basestate = bytes([yhi, ylo, self._month, self._day,
self._hour, self._minute, self._second,
us1, us2, us3])
if self._tzinfo is None:
return (basestate,)
else:
return (basestate, self._tzinfo)
def __setstate(self, string, tzinfo):
(yhi, ylo, self._month, self._day, self._hour,
self._minute, self._second, us1, us2, us3) = string
self._year = yhi * 256 + ylo
self._microsecond = (((us1 << 8) | us2) << 8) | us3
if tzinfo is None or isinstance(tzinfo, _tzinfo_class):
self._tzinfo = tzinfo
else:
raise TypeError("bad tzinfo state arg %r" % tzinfo)
def __reduce__(self):
return (self.__class__, self._getstate())
datetime.min = datetime(1, 1, 1)
datetime.max = datetime(9999, 12, 31, 23, 59, 59, 999999)
datetime.resolution = timedelta(microseconds=1)
def _isoweek1monday(year):
# Helper to calculate the day number of the Monday starting week 1
# XXX This could be done more efficiently
THURSDAY = 3
firstday = _ymd2ord(year, 1, 1)
firstweekday = (firstday + 6) % 7 # See weekday() above
week1monday = firstday - firstweekday
if firstweekday > THURSDAY:
week1monday += 7
return week1monday
class timezone(tzinfo):
__slots__ = '_offset', '_name'
# Sentinel value to disallow None
_Omitted = object()
def __new__(cls, offset, name=_Omitted):
if not isinstance(offset, timedelta):
raise TypeError("offset must be a timedelta")
if name is cls._Omitted:
if not offset:
return cls.utc
name = None
elif not isinstance(name, str):
raise TypeError("name must be a string")
if not cls._minoffset <= offset <= cls._maxoffset:
raise ValueError("offset must be a timedelta"
" strictly between -timedelta(hours=24) and"
" timedelta(hours=24).")
if (offset.microseconds != 0 or
offset.seconds % 60 != 0):
raise ValueError("offset must be a timedelta"
" representing a whole number of minutes")
return cls._create(offset, name)
@classmethod
def _create(cls, offset, name=None):
self = tzinfo.__new__(cls)
self._offset = offset
self._name = name
return self
def __getinitargs__(self):
"""pickle support"""
if self._name is None:
return (self._offset,)
return (self._offset, self._name)
def __eq__(self, other):
if type(other) != timezone:
return False
return self._offset == other._offset
def __hash__(self):
return hash(self._offset)
def __repr__(self):
"""Convert to formal string, for repr().
>>> tz = timezone.utc
>>> repr(tz)
'datetime.timezone.utc'
>>> tz = timezone(timedelta(hours=-5), 'EST')
>>> repr(tz)
"datetime.timezone(datetime.timedelta(-1, 68400), 'EST')"
"""
if self is self.utc:
return 'datetime.timezone.utc'
if self._name is None:
return "%s(%r)" % ('datetime.' + self.__class__.__name__,
self._offset)
return "%s(%r, %r)" % ('datetime.' + self.__class__.__name__,
self._offset, self._name)
def __str__(self):
return self.tzname(None)
def utcoffset(self, dt):
if isinstance(dt, datetime) or dt is None:
return self._offset
raise TypeError("utcoffset() argument must be a datetime instance"
" or None")
def tzname(self, dt):
if isinstance(dt, datetime) or dt is None:
if self._name is None:
return self._name_from_offset(self._offset)
return self._name
raise TypeError("tzname() argument must be a datetime instance"
" or None")
def dst(self, dt):
if isinstance(dt, datetime) or dt is None:
return None
raise TypeError("dst() argument must be a datetime instance"
" or None")
def fromutc(self, dt):
if isinstance(dt, datetime):
if dt.tzinfo is not self:
raise ValueError("fromutc: dt.tzinfo "
"is not self")
return dt + self._offset
raise TypeError("fromutc() argument must be a datetime instance"
" or None")
_maxoffset = timedelta(hours=23, minutes=59)
_minoffset = -_maxoffset
@staticmethod
def _name_from_offset(delta):
if delta < timedelta(0):
sign = '-'
delta = -delta
else:
sign = '+'
hours, rest = divmod(delta, timedelta(hours=1))
minutes = rest // timedelta(minutes=1)
return 'UTC{}{:02d}:{:02d}'.format(sign, hours, minutes)
timezone.utc = timezone._create(timedelta(0))
timezone.min = timezone._create(timezone._minoffset)
timezone.max = timezone._create(timezone._maxoffset)
_EPOCH = datetime(1970, 1, 1, tzinfo=timezone.utc)
"""
Some time zone algebra. For a datetime x, let
x.n = x stripped of its timezone -- its naive time.
x.o = x.utcoffset(), and assuming that doesn't raise an exception or
return None
x.d = x.dst(), and assuming that doesn't raise an exception or
return None
x.s = x's standard offset, x.o - x.d
Now some derived rules, where k is a duration (timedelta).
1. x.o = x.s + x.d
This follows from the definition of x.s.
2. If x and y have the same tzinfo member, x.s = y.s.
This is actually a requirement, an assumption we need to make about
sane tzinfo classes.
3. The naive UTC time corresponding to x is x.n - x.o.
This is again a requirement for a sane tzinfo class.
4. (x+k).s = x.s
This follows from #2, and that datimetimetz+timedelta preserves tzinfo.
5. (x+k).n = x.n + k
Again follows from how arithmetic is defined.
Now we can explain tz.fromutc(x). Let's assume it's an interesting case
(meaning that the various tzinfo methods exist, and don't blow up or return
None when called).
The function wants to return a datetime y with timezone tz, equivalent to x.
x is already in UTC.
By #3, we want
y.n - y.o = x.n [1]
The algorithm starts by attaching tz to x.n, and calling that y. So
x.n = y.n at the start. Then it wants to add a duration k to y, so that [1]
becomes true; in effect, we want to solve [2] for k:
(y+k).n - (y+k).o = x.n [2]
By #1, this is the same as
(y+k).n - ((y+k).s + (y+k).d) = x.n [3]
By #5, (y+k).n = y.n + k, which equals x.n + k because x.n=y.n at the start.
Substituting that into [3],
x.n + k - (y+k).s - (y+k).d = x.n; the x.n terms cancel, leaving
k - (y+k).s - (y+k).d = 0; rearranging,
k = (y+k).s - (y+k).d; by #4, (y+k).s == y.s, so
k = y.s - (y+k).d
On the RHS, (y+k).d can't be computed directly, but y.s can be, and we
approximate k by ignoring the (y+k).d term at first. Note that k can't be
very large, since all offset-returning methods return a duration of magnitude
less than 24 hours. For that reason, if y is firmly in std time, (y+k).d must
be 0, so ignoring it has no consequence then.
In any case, the new value is
z = y + y.s [4]
It's helpful to step back at look at [4] from a higher level: it's simply
mapping from UTC to tz's standard time.
At this point, if
z.n - z.o = x.n [5]
we have an equivalent time, and are almost done. The insecurity here is
at the start of daylight time. Picture US Eastern for concreteness. The wall
time jumps from 1:59 to 3:00, and wall hours of the form 2:MM don't make good
sense then. The docs ask that an Eastern tzinfo class consider such a time to
be EDT (because it's "after 2"), which is a redundant spelling of 1:MM EST
on the day DST starts. We want to return the 1:MM EST spelling because that's
the only spelling that makes sense on the local wall clock.
In fact, if [5] holds at this point, we do have the standard-time spelling,
but that takes a bit of proof. We first prove a stronger result. What's the
difference between the LHS and RHS of [5]? Let
diff = x.n - (z.n - z.o) [6]
Now
z.n = by [4]
(y + y.s).n = by #5
y.n + y.s = since y.n = x.n
x.n + y.s = since z and y are have the same tzinfo member,
y.s = z.s by #2
x.n + z.s
Plugging that back into [6] gives
diff =
x.n - ((x.n + z.s) - z.o) = expanding
x.n - x.n - z.s + z.o = cancelling
- z.s + z.o = by #2
z.d
So diff = z.d.
If [5] is true now, diff = 0, so z.d = 0 too, and we have the standard-time
spelling we wanted in the endcase described above. We're done. Contrarily,
if z.d = 0, then we have a UTC equivalent, and are also done.
If [5] is not true now, diff = z.d != 0, and z.d is the offset we need to
add to z (in effect, z is in tz's standard time, and we need to shift the
local clock into tz's daylight time).
Let
z' = z + z.d = z + diff [7]
and we can again ask whether
z'.n - z'.o = x.n [8]
If so, we're done. If not, the tzinfo class is insane, according to the
assumptions we've made. This also requires a bit of proof. As before, let's
compute the difference between the LHS and RHS of [8] (and skipping some of
the justifications for the kinds of substitutions we've done several times
already):
diff' = x.n - (z'.n - z'.o) = replacing z'.n via [7]
x.n - (z.n + diff - z'.o) = replacing diff via [6]
x.n - (z.n + x.n - (z.n - z.o) - z'.o) =
x.n - z.n - x.n + z.n - z.o + z'.o = cancel x.n
- z.n + z.n - z.o + z'.o = cancel z.n
- z.o + z'.o = #1 twice
-z.s - z.d + z'.s + z'.d = z and z' have same tzinfo
z'.d - z.d
So z' is UTC-equivalent to x iff z'.d = z.d at this point. If they are equal,
we've found the UTC-equivalent so are done. In fact, we stop with [7] and
return z', not bothering to compute z'.d.
How could z.d and z'd differ? z' = z + z.d [7], so merely moving z' by
a dst() offset, and starting *from* a time already in DST (we know z.d != 0),
would have to change the result dst() returns: we start in DST, and moving
a little further into it takes us out of DST.
There isn't a sane case where this can happen. The closest it gets is at
the end of DST, where there's an hour in UTC with no spelling in a hybrid
tzinfo class. In US Eastern, that's 5:MM UTC = 0:MM EST = 1:MM EDT. During
that hour, on an Eastern clock 1:MM is taken as being in standard time (6:MM
UTC) because the docs insist on that, but 0:MM is taken as being in daylight
time (4:MM UTC). There is no local time mapping to 5:MM UTC. The local
clock jumps from 1:59 back to 1:00 again, and repeats the 1:MM hour in
standard time. Since that's what the local clock *does*, we want to map both
UTC hours 5:MM and 6:MM to 1:MM Eastern. The result is ambiguous
in local time, but so it goes -- it's the way the local clock works.
When x = 5:MM UTC is the input to this algorithm, x.o=0, y.o=-5 and y.d=0,
so z=0:MM. z.d=60 (minutes) then, so [5] doesn't hold and we keep going.
z' = z + z.d = 1:MM then, and z'.d=0, and z'.d - z.d = -60 != 0 so [8]
(correctly) concludes that z' is not UTC-equivalent to x.
Because we know z.d said z was in daylight time (else [5] would have held and
we would have stopped then), and we know z.d != z'.d (else [8] would have held
and we have stopped then), and there are only 2 possible values dst() can
return in Eastern, it follows that z'.d must be 0 (which it is in the example,
but the reasoning doesn't depend on the example -- it depends on there being
two possible dst() outcomes, one zero and the other non-zero). Therefore
z' must be in standard time, and is the spelling we want in this case.
Note again that z' is not UTC-equivalent as far as the hybrid tzinfo class is
concerned (because it takes z' as being in standard time rather than the
daylight time we intend here), but returning it gives the real-life "local
clock repeats an hour" behavior when mapping the "unspellable" UTC hour into
tz.
When the input is 6:MM, z=1:MM and z.d=0, and we stop at once, again with
the 1:MM standard time spelling we want.
So how can this break? One of the assumptions must be violated. Two
possibilities:
1) [2] effectively says that y.s is invariant across all y belong to a given
time zone. This isn't true if, for political reasons or continental drift,
a region decides to change its base offset from UTC.
2) There may be versions of "double daylight" time where the tail end of
the analysis gives up a step too early. I haven't thought about that
enough to say.
In any case, it's clear that the default fromutc() is strong enough to handle
"almost all" time zones: so long as the standard offset is invariant, it
doesn't matter if daylight time transition points change from year to year, or
if daylight time is skipped in some years; it doesn't matter how large or
small dst() may get within its bounds; and it doesn't even matter if some
perverse time zone returns a negative dst()). So a breaking case must be
pretty bizarre, and a tzinfo subclass can override fromutc() if it is.
"""
#brython does not have a _datetime, so lets comment this out for now.
#try:
# from _datetime import *
#except ImportError:
# pass
#else:
# # Clean up unused names
# del (_DAYNAMES, _DAYS_BEFORE_MONTH, _DAYS_IN_MONTH,
# _DI100Y, _DI400Y, _DI4Y, _MAXORDINAL, _MONTHNAMES,
# _build_struct_time, _call_tzinfo_method, _check_date_fields,
# _check_time_fields, _check_tzinfo_arg, _check_tzname,
# _check_utc_offset, _cmp, _cmperror, _date_class, _days_before_month,
# _days_before_year, _days_in_month, _format_time, _is_leap,
# _isoweek1monday, _math, _ord2ymd, _time, _time_class, _tzinfo_class,
# _wrap_strftime, _ymd2ord)
# # XXX Since import * above excludes names that start with _,
# # docstring does not get overwritten. In the future, it may be
# # appropriate to maintain a single module level docstring and
# # remove the following line.
# #from _datetime import __doc__
gpl-3.0
udayinfy/openerp-7.0
sale_order_line/sale.py
4
4650
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (c) 2010-2013 Elico Corp. All Rights Reserved.
# Jon Chow <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
class sale_order_line(osv.osv):
_inherit = 'sale.order.line'
_name = 'sale.order.line'
def _get_pdt_code(self, cr, uid, ids, field, arg=None, context=None):
res = {}
for line in self.browse(cr, uid, ids):
res[line.id] = line.product_id.default_code
return res
def _get_pdt_mmx_type(self, cr, uid, ids, field, arg=None, context=None):
res = {}
dic = dict(
self.pool.get('product.product')._columns['mmx_type'].selection
)
for line in self.browse(cr, uid, ids):
res[line.id] = dic[line.product_id.mmx_type]
return res
_columns = {
'qty_store': fields.float('QTY store',help='Want you look this field,Pls first run xxxx wizard'),
'product_default_code': fields.function(_get_pdt_code,
arg=None,
string='Product Code',
type='char',
size=32,
readonly=True,
store=True),
'product_mmx_type': fields.function(_get_pdt_mmx_type,
arg=None,
string='Product Type',
type='char',
size=32,
readonly=True,
store=True),
'qty_available': fields.related('product_id', 'qty_available', type='float', string='Quantity On Hand',),
'virtual_available': fields.related('product_id', 'virtual_available', type='float', string='Forecasted Quantity',),
}
_sql_constraints = [
('product_uom_qty_check',
'CHECK( product_uom_qty >= 0 )',
'Sale Qty must be greater than zero.'),
]
def link_to_order(self, cr, uid, ids, context=None):
sol = self.browse(cr, uid, ids[0])
so_id = sol.order_id.id
return {
'name': 'Order info',
'target': "new",
'view_type': 'form',
"view_mode": 'form',
'res_model': 'sale.order',
'res_id': so_id,
'type': 'ir.actions.act_window',
}
def product_id_change(self, cr, uid, ids, pricelist, product, qty=0,
uom=False, qty_uos=0, uos=False, name='',
partner_id=False, lang=False, update_tax=True,
date_order=False, packaging=False,
fiscal_position=False, flag=False, context=None):
"""
if product sale_line_warn is set no-message,
don't pop any warning
"""
res = super(sale_order_line, self).product_id_change(
cr, uid, ids, pricelist, product, qty=qty, uom=uom,
qty_uos=qty_uos, uos=uos, name=name, partner_id=partner_id,
lang=lang, update_tax=update_tax, date_order=date_order,
packaging=packaging, fiscal_position=fiscal_position, flag=flag,
context=context)
if product:
pdt = self.pool.get('product.product').browse(cr, uid, product)
# if only to cancel the quantity warning
if pdt.sale_line_warn == 'no-message':
res['warning'] = None
return res
sale_order_line()
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
agpl-3.0
cuckoobox/cuckoo
cuckoo/web/controllers/analysis/control/routes.py
1
1273
# Copyright (C) 2017-2018 Cuckoo Foundation.
# This file is part of Cuckoo Sandbox - http://www.cuckoosandbox.org
# See the file "docs/LICENSE" for copying permission.
from django.http import Http404, HttpResponseRedirect
from cuckoo.common.config import config
from cuckoo.core.database import Database
from cuckoo.web.utils import render_template
db = Database()
class AnalysisControlRoutes(object):
@staticmethod
def player(request, task_id):
task = db.view_task(task_id)
if not task:
raise Http404("Task not found!")
if not config("cuckoo:remotecontrol:enabled"):
raise Http404(
"Remote control is not enabled in the configuration! "
"Please check our documentation on configuring Guacamole."
)
if task.options.get("remotecontrol") != "yes":
raise Http404("Remote control was not enabled for this task.")
if task.status == "reported":
return HttpResponseRedirect("/analysis/%d/summary" % int(task_id))
if task.status not in ("running", "completed"):
raise Http404("task is not running")
request.extra_scripts = ["guac.js"]
return render_template(request, "rdp/index.html", task=task)
mit
Miserlou/Anomos
Anomos/launchmanycore.py
1
9499
#!/usr/bin/env python
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# Original version written by John Hoffman, heavily modified for different
# multitorrent architecture by Uoti Urpala (over 40% shorter than original)
import os
from cStringIO import StringIO
from traceback import print_exc
from Anomos.parsedir import parsedir
from Anomos.download import Multitorrent, Feedback
from Anomos.ConvertedMetainfo import ConvertedMetainfo
from Anomos import bttime, configfile, BTFailure
from threading import Event
class LaunchMany(Feedback):
def __init__(self, config, output, configfile_key):
try:
self.config = config
self.output = output
self.configfile_key = configfile_key
self.torrent_dir = config['torrent_dir']
self.torrent_cache = {}
self.file_cache = {}
self.blocked_files = {}
self.torrent_list = []
self.downloads = {}
self.doneflag = Event()
self.hashcheck_queue = []
self.hashcheck_store = {}
self.hashcheck_current = None
self.multitorrent = Multitorrent(config, self.doneflag)
self.multitorrent.schedule(0, self.scan)
self.multitorrent.schedule(0, self.stats)
try:
import signal
def handler(signum, frame):
self.multitorrent.schedule(0, self.read_config)
signal.signal(signal.SIGHUP, handler)
except Exception, e:
self.output.message('Could not set signal handler: ' + str(e))
self.multitorrent.event_handler.loop()
self.output.message('shutting down')
for infohash in self.torrent_list:
self.output.message('dropped "'+self.torrent_cache[infohash]['path']+'"')
torrent = self.downloads[infohash]
if torrent is not None:
torrent.shutdown()
except:
data = StringIO()
print_exc(file = data)
output.exception(data.getvalue())
def scan(self):
self.multitorrent.schedule(self.config['parse_dir_interval'], self.scan)
r = parsedir(self.torrent_dir, self.torrent_cache,
self.file_cache, self.blocked_files,
self.output.message)
( self.torrent_cache, self.file_cache, self.blocked_files,
added, removed ) = r
for infohash, data in removed.items():
self.output.message('dropped "'+data['path']+'"')
self.remove(infohash)
for infohash, data in added.items():
self.output.message('added "'+data['path']+'"')
self.add(infohash, data)
def stats(self):
self.multitorrent.schedule(self.config['display_interval'], self.stats)
data = []
for infohash in self.torrent_list:
cache = self.torrent_cache[infohash]
if self.config['display_path']:
name = cache['path']
else:
name = cache['name']
size = cache['length']
d = self.downloads[infohash]
progress = '0.0%'
peers = 0
seeds = 0
seedsmsg = "S"
dist = 0.0
uprate = 0.0
dnrate = 0.0
upamt = 0
dnamt = 0
t = 0
msg = ''
if d is None:
status = 'waiting for hash check'
else:
stats = d.get_status()
status = stats['activity']
progress = '%.1f%%' % (int(stats['fractionDone']*1000)/10.0)
if d.started and not d.closed:
s = stats
dist = s['numCopies']
if d.is_seed:
seeds = 0 # s['numOldSeeds']
seedsmsg = "s"
else:
if s['numSeeds'] + s['numPeers']:
t = stats['timeEst']
if t is None:
t = -1
if t == 0: # unlikely
t = 0.01
status = 'downloading'
else:
t = -1
status = 'connecting to peers'
seeds = s['numSeeds']
dnrate = stats['downRate']
peers = s['numPeers']
uprate = stats['upRate']
upamt = s['upTotal']
dnamt = s['downTotal']
if d.messages and (d.closed or d.messages[-1][0] + 300 > bttime()):
msg = d.messages[-1][2]
data.append(( name, status, progress, peers, seeds, seedsmsg, dist,
uprate, dnrate, upamt, dnamt, size, t, msg ))
stop = self.output.display(data)
if stop:
self.doneflag.set()
def remove(self, infohash):
self.torrent_list.remove(infohash)
if self.downloads[infohash] is not None:
self.downloads[infohash].shutdown()
self.was_stopped(infohash)
del self.downloads[infohash]
def add(self, infohash, data):
self.torrent_list.append(infohash)
self.downloads[infohash] = None
self.hashcheck_queue.append(infohash)
self.hashcheck_store[infohash] = data['metainfo']
self.check_hashcheck_queue()
def check_hashcheck_queue(self):
if self.hashcheck_current is not None or not self.hashcheck_queue:
return
self.hashcheck_current = self.hashcheck_queue.pop(0)
metainfo = self.hashcheck_store[self.hashcheck_current]
del self.hashcheck_store[self.hashcheck_current]
filename = self.determine_filename(self.hashcheck_current)
self.downloads[self.hashcheck_current] = self.multitorrent. \
start_torrent(ConvertedMetainfo(metainfo),
self.config, self, filename)
def determine_filename(self, infohash):
x = self.torrent_cache[infohash]
name = x['name']
savein = self.config['save_in']
isdir = not x['metainfo'].has_key('length')
style = self.config['saveas_style']
if style == 1 or style == 3:
if savein:
name = x['file']
ext = max(name.find('.torrent'), name.find('.atorrent'))
saveas = os.path.join(savein,name[:ext]) # strip '.[a]torrent'
else:
name = x['path']
ext = max(name.find('.torrent'), name.find('.atorrent'))
saveas = x['path'][:ext] # strip '.[a]torrent'
if style == 3 and not isdir:
saveas = os.path.join(saveas, name)
else:
if savein:
saveas = os.path.join(savein, name)
else:
saveas = os.path.join(os.path.split(x['path'])[0], name)
return saveas
def was_stopped(self, infohash):
try:
self.hashcheck_queue.remove(infohash)
except:
pass
else:
del self.hashcheck_store[infohash]
if self.hashcheck_current == infohash:
self.hashcheck_current = None
self.check_hashcheck_queue()
def exchandler(self, s):
self.output.exception(s)
def read_config(self):
try:
newvalues = configfile.get_config(self.config, self.configfile_key)
except Exception, e:
self.output.message('Error reading config: ' + str(e))
return
self.output.message('Rereading config file')
self.config.update(newvalues)
# The set_option call can potentially trigger something that kills
# the torrent (when writing this the only possibility is a change in
# max_files_open causing an IOError while closing files), and so
# the self.failed() callback can run during this loop.
for option, value in newvalues.iteritems():
self.multitorrent.set_option(option, value)
for torrent in self.downloads.values():
if torrent is not None:
for option, value in newvalues.iteritems():
torrent.set_option(option, value)
# rest are callbacks from torrent instances
def started(self, torrent):
self.hashcheck_current = None
self.check_hashcheck_queue()
def failed(self, torrent, is_external):
infohash = torrent.infohash
self.was_stopped(infohash)
if self.torrent_cache.has_key(infohash):
self.output.message('DIED: "'+self.torrent_cache[infohash]['path']+'"')
def exception(self, torrent, text):
self.exchandler(text)
gpl-3.0
tom-f-oconnell/multi_tracker
nodes/roi_finder.py
1
31087
#!/usr/bin/env python
import os
from subprocess import Popen
import Queue
import glob
import pickle
import copy
import sys
import rospy
import roslaunch
# latest versions of ROS (from source, particularly) should have a method in
# here for getting topic names, so we wouldn't need to use rosgraph import
# rostopic
import rospkg
from sensor_msgs.msg import Image
# import dynamic_reconfigure.server
from cv_bridge import CvBridge, CvBridgeError
import cv2
import numpy as np
from multi_tracker.msg import Point2D, PolygonalROI, RectangularROI, CircularROI
from multi_tracker.srv import RegisterROIs
# TODO break out ROI definitions from core tracking launch file, and make
# another tracking launch file that includes the core + ROI defs, the core which
# will be called here separately
# TODO dynamic reconfigure and display ROIs that will be selected with button to
# lock them in maybe a gui to manually edit / define ROIs too?
class RoiFinder:
def __init__(self):
# TODO what happens if init_node is called after / before defining
# subscribers and publishers and stuff? what all does it do?
# (examples have subscribers after and pub before)
# start node
rospy.init_node('roi_finder')
# TODO maybe have this launch file do something that won't be changed
# (launch core tracking nodes without setting parameters?)
# so I can keep it in a central location?
# TODO idiomatic ROS way to get package path? use python script location
# + relative path?
# TODO need to specify if launch is only in source, as before?
THIS_PACKAGE = 'multi_tracker'
# TODO shorter call for this package path?
# TODO still valid once installed / using that path?
# TODO TODO test parameters are still accessible / valid across ROIs?
self.tracking_launch_file = rospy.get_param(
'roi_finder/tracking_launch_file',
rospkg.RosPack().get_path(THIS_PACKAGE) +
'/launch/single_tracking_pipeline.launch')
self.roi_cache_name = os.path.abspath('../.tmp_roi_cache.p')
self.current_node_num = 1
node_namespace = 'roi_finder/'
self.roi_type = rospy.get_param(node_namespace + 'roi_type',
'rectangles')
# will not launch any tracking pipelines if this is True
# but will still register the rois with the delta video node
self.video_only = rospy.get_param('~video_only', False)
# TODO populate automatically from those with a launch pipeline and a
# automatic / manual roi selection function depending on current
# function
# TODO factor this kind of check into validator node?
self.valid_roi_types = {'rectangle', 'circle', 'mask', 'polygon'}
if not self.roi_type in self.valid_roi_types:
raise ValueError('invalid roi_type: {}. valid types are {}'.format(
self.roi_types, self.valid_roi_types))
load_rois = rospy.get_param(node_namespace + 'load_rois', False)
automatic_roi_detection = \
rospy.get_param(node_namespace + 'automatic_roi_detection', False)
if not automatic_roi_detection:
# a place for the click event callback to store points
self.points = []
self.rois = []
self.toss_first_n_frames = rospy.get_param(node_namespace +
'toss_first_n_frames', 0)
self.frames_tossed = 0
self.bridge = CvBridge()
self.camera = 'camera/image_raw'
queue_size = 10
# TODO determine automatically
size_image = 128 + 1920 * 1080 * 3
# TODO should the buff_size not be queue_size * size_image?
buff_size = 2 * size_image
self.frame_to_save = None
self.frame = None
# can't just rospy.spin() here because the main thread
# is the only one that can call launch files (a sequence
# of functions beginning with a callback can't start
# a launch file because it can't register signals)
self.launch_queue = Queue.Queue()
self.to_kill = []
if not load_rois:
# TODO check there aren't race conditions that could cause this to
# trigger twice / handle
if automatic_roi_detection:
rospy.Subscriber(
self.camera,
Image,
self.detect_roi_callback,
queue_size=queue_size,
buff_size=buff_size
)
else:
self.preload_cache = rospy.get_param(node_namespace +
'preload_cached_rois', True)
self.delete_cache_if_cleared = rospy.get_param(node_namespace +
'clearing_loaded_rois_deletes_cache', True)
self.use_cached_without_displaying = rospy.get_param(
node_namespace + 'use_cached_without_displaying', False)
self.autocache_rois = rospy.get_param(node_namespace +
'autocache_rois', True)
if (self.use_cached_without_displaying and
not self.preload_cache):
# TODO test
raise ValueError(('Must have {0}preload_cached_rois ' +
'True if {0}use_cached_without_displaying is True.'
).format(node_namespace))
self.manual_sub = rospy.Subscriber(
self.camera,
Image,
self.update_frame,
queue_size=queue_size,
buff_size=buff_size
)
self.undo_stack = []
self.undo_index = 0
self.manual_roi_selection()
else:
if automatic_roi_detection:
rospy.logwarn('Ignoring roi_finder/automatic_roi_detection, ' +
'because roi_finder/load_rois was True.')
self.load_rois()
self.main()
def launch_tracking_common(self, param_dict):
extra_params = []
for k, v in param_dict.items():
if isinstance(k, str) and isinstance(v, str):
extra_params.append(k + ':=' + v)
else:
raise ValueError(
'param_dict must have all keys and values be strings')
params = ['roslaunch', 'multi_tracker',
'single_tracking_pipeline.launch', 'dump_roi_params:=True',
'viewer:=False', 'num:={}'.format(self.current_node_num),
'camera:=' + rospy.resolve_name(self.camera)] + extra_params
self.current_node_num += 1
rospy.logwarn(params)
# TODO consider using floris' technique to kill these gently with pgroup
p = Popen(params)
self.to_kill.append(p)
# any support there might have been before for setting arguments via
# roslaunch api seems to have disappeared... will need to use subprocess for
# now
"""
def launch_tracking_common(self):
# TODO could maybe rospy.get_namespace() to get prefix for child nodes?
# TODO how exactly do private ("~" prefix) names work?
# TODO condense these calls into some helper function?
# rospy.on_shutdown(self.shutdown) isn't necessary is it?
# TODO this doesnt make a second master or anything does it?
# TODO maybe set is_child=True if exposed somewhere?
# see roslaunchrunner api
uuid = roslaunch.rlutil.get_or_generate_uuid(None, False)
roslaunch.configure_logging(uuid)
launch = roslaunch.parent.ROSLaunchParent(uuid,
[self.tracking_launch_file])
# TODO TODO make all nodes names unique somehow, assuming they need to
# be globally unique?
launch.start()
self.current_node_num += 1
# TODO problems with shutting down elsewhere?
#launch.shutdown()
# decrement current_node_num when shuts down / whenever we manually
# shutdown?
self.to_stop.append(launch)
"""
"""
def get_topics(self):
# see issue #946 (which has a commit added recently) for rostopic
# alternative
# TODO is /rostopic correct? return type?
try:
# the rosbridge cost uses Master('/rosbridge')
publishers, subscribers, services = \
Master('/rostopic').getSystemState()
has_node_num = lambda x:
# can you not filter a set?
return filter(lambda x: any(fnmatch.fnmatch(str(x), glob)
for glob in topics_glob), list(set([x for x, _ in publishers] +
[x for x, _, in subscribers])))
# TODO which exception type?
except:
return []
def get_topics_in_namespace(self, namespace):
raise NotImplementedError
"""
def new_tracker_namespace(self):
# TODO fix / test this works
this_node_namespace = rospy.get_namespace()
rospy.logwarn('rospy.get_namespace()=' + this_node_namespace)
# remove prefix first?
#nmax = max([int(ns.split('/')[0])
# for ns in rostopic.list(this_node_namespace)])
# TODO anything to do to make the namespace? maybe only possible when
# making node?
#return this_node_namespace + '/' + str(nmax + 1) + '/'
return this_node_namespace + str(self.current_node_num) + '/'
def launch_a_tracking_pipeline_polygons(self, points):
# TODO test repr here works
param_dict = {'polygonal_roi': 'True', 'roi_points': repr(points)}
self.launch_tracking_common(param_dict)
# TODO would only work for rectangle oriented to axes... couldn't find
# rotatedrectangle in python cv2 dir
def launch_a_tracking_pipeline_rectangles(self, left, right, top, bottom):
# TODO if inputs are arbitrary corners, will need to do some min /
# maxing to use the roi_* parameters as is (or just cv2 boundingBox /
# rect)
param_dict = {'rectangular_roi': 'True', 'roi_b': str(bottom),
'roi_t': str(top), 'roi_l': str(left), 'roi_r': str(right)}
self.launch_tracking_common(param_dict)
def launch_a_tracking_pipeline_circles(self, x, y, radius):
raise NotImplementedError
def launch_a_tracking_pipeline_masks(self, mask):
raise NotImplementedError
def save_state_for_undo(self):
# If not at tail of undo_stack, we need to replace the current tail with
# the current state. Has no effect if we are at tail.
self.undo_stack = self.undo_stack[:(self.undo_index + 1)]
# TODO cause problem in case where it gets cleared?
if len(self.undo_stack) > 0:
self.undo_index += 1
rois_copy = copy.deepcopy(self.rois)
points_copy = copy.deepcopy(self.points)
self.undo_stack.append((rois_copy, points_copy))
def undo(self):
if len(self.undo_stack) == 0:
return
if self.undo_index > 0:
self.undo_index -= 1
prev_rois, prev_points = self.undo_stack[self.undo_index]
self.rois = copy.deepcopy(prev_rois)
self.points = copy.deepcopy(prev_points)
def redo(self):
if len(self.undo_stack) == 0:
return
if self.undo_index < (len(self.undo_stack) - 1):
self.undo_index += 1
newer_rois, newer_points = self.undo_stack[self.undo_index]
self.rois = copy.deepcopy(newer_rois)
self.points = copy.deepcopy(newer_points)
def get_pixel_coords(self, event, x, y, flags, param):
if event == cv2.EVENT_LBUTTONDOWN:
self.points.append([x, y])
rospy.loginfo('Added point ' + str([x, y]))
self.save_state_for_undo()
# TODO TODO restructure so gui (save functions, un/redo, etc) can be shared
# across ROI types
def manual_polygons(self):
"""
Prompt the user to click the corners of each rectangle.
"""
rospy.loginfo('Click corners of the polygonal ROI. Press any key to ' +
'store the points added so far as an ROI. Press <Esc> to close ' +
'manual selection and launch tracking pipelines.')
loaded_rois = False
saved_rois = False
def load_cache():
# TODO test this case
self.undo_stack = []
self.undo_index = 0
self.points = []
with open(self.roi_cache_name, 'rb') as f:
self.rois = pickle.load(f)
self.save_state_for_undo()
def write_cache(rois):
# TODO TODO check each element is also a list (of lists?)
if len(rois) == 0:
return
with open(self.roi_cache_name, 'wb') as f:
pickle.dump(rois, f)
rospy.loginfo('Saving ROIs to {}'.format(self.roi_cache_name))
if self.preload_cache:
if os.path.isfile(self.roi_cache_name):
rospy.logwarn("Loading ROIs from " +
"{} because preload_cached_rois".format(
self.roi_cache_name))
load_cache()
loaded_rois = True
if self.use_cached_without_displaying:
rospy.logwarn('Starting without showing ROIs because ' +
'use_cached_without_displaying True.')
return self.rois
else:
rospy.logwarn('Tried to load ROIs from ' +
'{}, but file not there.'.format(self.roi_cache_name) +
' Press S/s to save current ROIs there.')
while self.frame is None:
if rospy.is_shutdown():
sys.exit()
rospy.sleep(0.2)
while True:
if rospy.is_shutdown():
sys.exit()
frame = np.copy(self.frame)
if len(self.points) > 0:
hull = cv2.convexHull(np.array(self.points))
cv2.drawContours(frame, [hull], -1, (255, 0, 0))
for p in self.points:
cv2.circle(frame, tuple(p), 5, (0, 255, 0))
for p in self.rois:
hull = cv2.convexHull(np.array(p))
# TODO convert to one drawContours call outside loop?
cv2.drawContours(frame, [hull], -1, (0, 255, 0))
cv2.imshow(self.window_name, frame)
# waitKey delays for >= milliseconds equal to the argument
key = cv2.waitKey(20)
# bitwise and to get the last 8 bytes, so that key states are
# considered the same whether or not things like num-lock are
# pressed
masked_key = key & 0xFF
# 27 is the escape key
# ctrl-s? z/y?
if masked_key == 27:
if len(self.rois) == 0:
rospy.logerr('Need to select at least one polygon before' +
' ESC closes ROI selection window.')
else:
break
# shift/alt/no-modifier 'c' (not ctrl) (99)
elif masked_key == ord('c'):
if len(self.rois) > 0 or len(self.points) > 0:
self.rois = []
self.points = []
self.save_state_for_undo()
rospy.logwarn(
"Clearing all ROIs and points because 'C/c' pressed.")
if loaded_rois and self.delete_cache_if_cleared:
# TODO test
os.remove(self.roi_cache_name)
# shift/alt/no-modifier 'x' (not ctrl) (120)
elif masked_key == ord('x') and len(self.points) > 0:
self.points = []
self.save_state_for_undo()
rospy.logwarn("Clearing point buffer because 'X/x' pressed.")
# Delete cache if there is one.
# shift/alt/no-modifier 'd' (not ctrl) (100)
elif masked_key == ord('d'):
if os.path.isfile(self.roi_cache_name):
rospy.logwarn("Deleting {} because 'D/d' pressed.".format(
self.roi_cache_name))
os.remove(self.roi_cache_name)
# shift/alt/no-modifier 'l' (not ctrl) (108)
# Not undoable. (would require saving state of loaded_rois too)
elif masked_key == ord('l'):
if os.path.isfile(self.roi_cache_name):
# TODO deal w/ ROIs being in a different format, if i
# implement support for other ROI formats
rospy.logwarn("Loading ROIs from " +
"{} because 'L/l' pressed".format(self.roi_cache_name))
load_cache()
loaded_rois = True
else:
rospy.logerr('Tried to load ROIs from ' +
'{}, but file not there.'.format(self.roi_cache_name) +
" Press 'S/s' to save current ROIs there.")
# TODO try to get ctrl-s somehow? (captured by imshow window now)
elif masked_key == ord('s'):
write_cache(self.rois)
saved_rois = True
# undo
# TODO check shift state?
# TODO arrow keys too?
elif masked_key == ord('z') or masked_key == ord('u'):
self.undo()
elif masked_key == ord('y') or masked_key == ord('r'):
self.redo()
#if len(self.points) == 4:
# TODO prompt to press any / specific key to move to next roi
elif masked_key != 255:
polygon = []
# this won't get cleared will it?
for p in self.points:
polygon.append(p)
# TODO draw?
if len(polygon) < 3:
rospy.logerr('key press with less than 3 points in ' +
'buffer. need at least 3 points for a polygon. ' +
'points still in buffer.')
else:
rospy.loginfo('Added polygon from current points. ' +
'Resetting current points.')
self.rois.append(polygon)
self.points = []
self.save_state_for_undo()
if self.autocache_rois and not saved_rois:
write_cache(self.rois)
return self.rois
def manual_rectangles(self):
"""
Prompt the user to click the corners of each rectangle.
(allow ctrl-z and ctrl-[(shift-z)/y]?)
"""
raise NotImplementedError
return rectangles
def manual_circles(self):
raise NotImplementedError
def manual_mask(self):
raise NotImplementedError
def get_edges(self, frame):
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
# TODO present these args are ROS params
gray = cv2.GaussianBlur(gray, (5,5), 0)
edges = cv2.Canny(gray, 100, 200)
return edges
def detect_rectangles(self, frame):
raise NotImplementedError
edges = self.get_edges(frame)
# TODO
rois = cv2.houghRectangles(edges)
return rois
def detect_circles(self, frame):
raise NotImplementedError
edges = self.get_edges(frame)
#circles = cv2.houghCircles(frame, ...)
#return circles
def detect_masks(self, frame):
raise NotImplementedError
edges = self.get_edges(frame)
# TODO how to fill exterior?
# findContours?
return mask
def detect_masks(self, frame, expected_mask):
raise NotImplementedError
edges = self.get_edges(frame)
# convert mask if not gray? gray from binary?
# better way to get edges of binary image?
mask_edges = cv2.Canny(expected_mask)
rois = cv2.hough(frame, mask_edges)
# TODO what i want to return here kinda depends on how i want to process
# the ROIs later
return rois
def load_polygons(self, params):
"""
"""
rospy.logwarn('load_polygons with params=' + str(params))
rois = []
for k, v in params.items():
try:
n = int(k)
except:
continue
if 'roi_points' in v:
rospy.logwarn('appending roi ' + str(v['roi_points']))
rois.append(v['roi_points'])
else:
rospy.logwarn('numbered namespace without polygonal roi. ' +
'experiment done with different roi type?')
return rois
def launch_tracking_pipelines(self, rois):
"""
"""
found_launch = False
for attr in dir(self):
if 'launch_a_tracking_pipeline_' in attr and self.roi_type in attr:
f = getattr(self.__class__, attr)
if callable(f):
# TODO put behind debug flags
#rospy.logwarn('ROIS = ' + str(rois))
for r in rois:
#rospy.logwarn('THIS ROI = ' + str(r))
rospy.logwarn(
'starting one tracking pipeline launch file')
f(self, r)
# TODO remove me? longer?
# TODO TODO only remove me when sim_time is set?
#rospy.sleep(1)
found_launch = True
break
if not found_launch:
raise ValueError(
'no launch function found for roi_type "' + self.roi_type + '"')
# can't see how to easily let launch_tracking_pipeline use this too, but
# would be nice
def find_and_call_function(self, prefix, description, frame=None,
params=None):
"""
Finds a function in the instance of this class with prefix in it, and
calls that function with frame as an (the only) argument following self.
Description should describe the type of function being sought and will
be included in an error message if no function is found.
"""
# TODO rename fn to indicate it is also deciding whether to toss frames?
# or refactor?
# TODO refactor. would be used by ROI detection methods (not that those
# are currently used) but no longer used for manual ROI selection
'''
if not frame is None:
if self.frames_tossed < self.toss_first_n_frames:
self.frames_tossed += 1
return
try:
frame = self.bridge.imgmsg_to_cv2(frame, 'bgr8')
self.frame_to_save = frame
except CvBridgeError as e:
# raise?
rospy.logerr(e)
return None
'''
found_func = False
for attr in dir(self):
if prefix in attr and self.roi_type in attr:
f = getattr(self.__class__, attr)
if callable(f):
if not frame is None:
rois = f(self, frame)
# TODO what was this for again?
elif not params is None:
rois = f(self, params)
else:
# TODO delete me
#raise ValueError(
# 'either params or frame needs to be specified')
rois = f(self)
found_func = True
break
if not found_func:
raise ValueError('no ' + description +
' function found for roi_type "' + self.roi_type + '"')
return rois
def load_rois(self):
"""
"""
import rosparam
# TODO also check in current directory?
#files = glob.glob('compressor_rois_*.yaml')
files = glob.glob(os.path.join(rospy.get_param('source_directory'),
'compressor_rois_*.yaml'))
if len(files) < 1:
rospy.logfatal(
'Did not find any files matching compressor_rois_*.yaml')
return []
elif len(files) > 1:
rospy.logfatal(
'Found too many files matching compressor_rois_*.yaml')
return []
filename = os.path.abspath(files[0])
# get the parameters in the namespace of the name we want
# TODO find roi specifiers wherever they are, in the future
paramlist = rosparam.load_file(filename)
ns = 'delta_compressor'
ns_param_dict = self.find_roi_namespace(ns, paramlist)
if ns_param_dict is None:
rospy.logfatal('could not find parameter namespace: ' + ns)
return
rois = self.find_and_call_function('load_', 'parameter dump loading',
params=ns_param_dict)
rospy.logwarn('loaded rois:' + str(rois))
self.launch_queue.put(rois)
# maybe make static
def find_roi_namespace(self, key, params):
if type(params) is list:
for ps, ns in params:
if ns == key:
return params
else:
ret = self.find_roi_namespace(key, ps)
if not ret is None:
return ret
return None
elif type(params) is dict:
if key in params:
return params[key]
else:
for v in params.values():
ret = self.find_roi_namespace(key, v)
if not ret is None:
return ret
return None
def update_frame(self, frame):
if not frame is None:
if self.frames_tossed < self.toss_first_n_frames:
self.frames_tossed += 1
return
try:
self.frame = self.bridge.imgmsg_to_cv2(frame, 'bgr8')
if self.frame_to_save is None:
self.frame_to_save = self.frame
except CvBridgeError as e:
# raise?
rospy.logerr(e)
return
# TODO TODO TODO Refactor so GUI is initialized unconditionally, and then
# frames are added (w/ ROIs redrawn) in the callback.
# May not be straightforward to maintain similarities w/ ROI detection
# callbacks...
def manual_roi_selection(self):
"""
Manually select ROIs of specified type and launch an instance of
tracking pipeline appropriately.
"""
# TODO maybe move this next to self.undo_index init
self.save_state_for_undo()
self.window_name = 'Manual ROI selection'
cv2.namedWindow(self.window_name)
cv2.setMouseCallback(self.window_name, self.get_pixel_coords)
rois = self.find_and_call_function('manual_', 'manual selection')
self.manual_sub.unregister()
if len(self.points) != 0:
rospy.logwarn(
'had points in buffer when <Esc> key ended manual selection.')
self.launch_queue.put(rois)
# TODO how to only destroy one window? those from this node?
# (don't want to screw with liveviewer or image_view windows...)
cv2.destroyAllWindows()
# TODO what does Ctrax use to detect the ROIs?
def detect_roi_callback(self, frame):
"""
Detect ROIs of specified type and launch an instance of tracking
pipeline appropriately.
"""
rois = self.find_and_call_funcion('detect_', 'roi detection',
frame=frame)
self.launch_queue.put(rois)
#self.launch_tracking_pipelines(rois)
def register_rois(self, rois):
rospy.wait_for_service('register_rois')
try:
register = rospy.ServiceProxy('register_rois', RegisterROIs)
l = []
if self.roi_type == 'rectangle':
raise NotImplementedError
for r in rois:
rect = RectangularROI()
'''
rect.t =
rect.b =
rect.l =
rect.r =
'''
l.append(rect)
register(l, [], [])
elif self.roi_type == 'circle':
raise NotImplementedError
register([], [], l)
elif self.roi_type == 'polygon':
for r in rois:
poly = []
for p in r:
poly.append(Point2D(p[0], p[1]))
l.append(PolygonalROI(poly))
register([], l, [])
elif self.roi_type == 'mask':
raise NotImplementedError('mask not supported w/ register_rois')
except rospy.ServiceException as exc:
rospy.logfatal('service did not process request: ' + str(exc))
def main(self):
"""
Checks for launch requests and executes them.
"""
rois = None
experiment_basename = None
while not rospy.is_shutdown():
if not self.launch_queue.empty():
rois = self.launch_queue.get()
if not self.video_only:
self.launch_tracking_pipelines(rois)
# tries to send ROIs (to delta_video node)
self.register_rois(rois)
if self.launch_queue.empty() and rois is None:
rospy.logerr(
'Manual selection closed without selecting any ROIs!')
break
# TODO i thought this node shut down, but it doesn't seem like it
# does? is it busy spinning (fix if so)?
if experiment_basename is None:
experiment_basename = rospy.get_param(
'multi_tracker/experiment_basename', None)
else:
rospy.sleep(5.0)
if not (self.frame_to_save is None):
if not (experiment_basename is None):
data_dir = os.path.join(os.getcwd(), experiment_basename)
full_bg_filename = os.path.join(data_dir, 'full_background.png')
cv2.imwrite(full_bg_filename, self.frame_to_save)
else:
rospy.logwarn('had frame_to_save, but did not have ' +
'experiment_basename, so did not know where to save it')
elif not (rois is None):
rospy.logwarn('did not have frame to save uncropped background ' +
'when shutdown')
for p in self.to_kill:
p.kill()
if __name__ == '__main__':
rf = RoiFinder()
# -*- coding: utf8 -*-
"""Autogenerated file - DO NOT EDIT
If you spot a bug, please report it on the mailing list and/or change the generator."""
import os
from ....base import (CommandLine, CommandLineInputSpec, SEMLikeCommandLine,
TraitedSpec, File, Directory, traits, isdefined,
InputMultiPath, OutputMultiPath)
class fiberprocessInputSpec(CommandLineInputSpec):
fiber_file = File(desc="DTI fiber file", exists=True, argstr="--fiber_file %s")
fiber_output = traits.Either(traits.Bool, File(), hash_files=False, desc="Output fiber file. May be warped or updated with new data depending on other options used.", argstr="--fiber_output %s")
tensor_volume = File(desc="Interpolate tensor values from the given field", exists=True, argstr="--tensor_volume %s")
h_field = File(desc="HField for warp and statistics lookup. If this option is used tensor-volume must also be specified.", exists=True, argstr="--h_field %s")
displacement_field = File(desc="Displacement Field for warp and statistics lookup. If this option is used tensor-volume must also be specified.", exists=True, argstr="--displacement_field %s")
saveProperties = traits.Bool(desc="save the tensor property as scalar data into the vtk (only works for vtk fiber files). ", argstr="--saveProperties ")
no_warp = traits.Bool(desc="Do not warp the geometry of the tensors only obtain the new statistics.", argstr="--no_warp ")
fiber_radius = traits.Float(desc="set radius of all fibers to this value", argstr="--fiber_radius %f")
index_space = traits.Bool(desc="Use index-space for fiber output coordinates, otherwise us world space for fiber output coordinates (from tensor file).", argstr="--index_space ")
voxelize = traits.Either(traits.Bool, File(), hash_files=False,
desc="Voxelize fiber into a label map (the labelmap filename is the argument of -V). The tensor file must be specified using -T for information about the size, origin, spacing of the image. The deformation is applied before the voxelization ", argstr="--voxelize %s")
voxelize_count_fibers = traits.Bool(desc="Count number of fibers per-voxel instead of just setting to 1", argstr="--voxelize_count_fibers ")
voxel_label = traits.Int(desc="Label for voxelized fiber", argstr="--voxel_label %d")
verbose = traits.Bool(desc="produce verbose output", argstr="--verbose ")
noDataChange = traits.Bool(desc="Do not change data ??? ", argstr="--noDataChange ")
class fiberprocessOutputSpec(TraitedSpec):
fiber_output = File(desc="Output fiber file. May be warped or updated with new data depending on other options used.", exists=True)
voxelize = File(desc="Voxelize fiber into a label map (the labelmap filename is the argument of -V). The tensor file must be specified using -T for information about the size, origin, spacing of the image. The deformation is applied before the voxelization ", exists=True)
class fiberprocess(SEMLikeCommandLine):
"""title: FiberProcess (DTIProcess)
category: Diffusion.Tractography
description: fiberprocess is a tool that manage fiber files extracted from the fibertrack tool or any fiber tracking algorithm. It takes as an input .fib and .vtk files (--fiber_file) and saves the changed fibers (--fiber_output) into the 2 same formats. The main purpose of this tool is to deform the fiber file with a transformation field as an input (--displacement_field or --h_field depending if you deal with dfield or hfield). To use that option you need to specify the tensor field from which the fiber file was extracted with the option --tensor_volume. The transformation applied on the fiber file is the inverse of the one input. If the transformation is from one case to an atlas, fiberprocess assumes that the fiber file is in the atlas space and you want it in the original case space, so it's the inverse of the transformation which has been computed.
You have 2 options for fiber modification. You can either deform the fibers (their geometry) into the space OR you can keep the same geometry but map the diffusion properties (fa, md, lbd's...) of the original tensor field along the fibers at the corresponding locations. This is triggered by the --no_warp option. To use the previous example: when you have a tensor field in the original space and the deformed tensor field in the atlas space, you want to track the fibers in the atlas space, keeping this geometry but with the original case diffusion properties. Then you can specify the transformations field (from original case -> atlas) and the original tensor field with the --tensor_volume option.
With fiberprocess you can also binarize a fiber file. Using the --voxelize option will create an image where each voxel through which a fiber is passing is set to 1. The output is going to be a binary image with the values 0 or 1 by default but the 1 value voxel can be set to any number with the --voxel_label option. Finally you can create an image where the value at the voxel is the number of fiber passing through. (--voxelize_count_fibers)
version: 1.0.0
documentation-url: http://www.slicer.org/slicerWiki/index.php/Documentation/Nightly/Extensions/DTIProcess
license: Copyright (c) Casey Goodlett. All rights reserved.
See http://www.ia.unc.edu/dev/Copyright.htm for details.
This software is distributed WITHOUT ANY WARRANTY; without even
the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
PURPOSE. See the above copyright notices for more information.
contributor: Casey Goodlett
"""
input_spec = fiberprocessInputSpec
output_spec = fiberprocessOutputSpec
_cmd = " fiberprocess "
_outputs_filenames = {'fiber_output': 'fiber_output.vtk', 'voxelize': 'voxelize.nii'}
_redirect_x = False
bsd-3-clause
p0linka/AA_hmw
hmw_3/fox.py
1
1482
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#Polina Morozova 16.11.2014
import sqlite3
import sys
import re
import datetime
def unescape(line):
line = line.replace(""", "\"")
line = line.replace("'", "'")
line = line.replace("&", "&")
line = line.replace("<", "<")
line = line.replace(">", ">")
line = line.replace("«", "<<")
line = line.replace("»", ">>")
line = line.replace("'", "'")
line = line.replace("“", "\"")
line = line.replace("”", "\"")
line = line.replace("‘", "\'")
line = line.replace("’", "\'")
line = line.replace("■", "")
line = line.replace("•", "-")
return line
def query_messages(autor, d_low, d_high):
conn = sqlite3.connect('main.db')
try:
c = conn.cursor()
r = c.execute('SELECT body_xml FROM Messages WHERE author = ? and timestamp >= ? and timestamp < ? order by timestamp asc', (autor, d_low, d_high))
result=[]
for row in r:
text = re.sub('<[^<]+>', "", str(row[0]))
text = unescape(text)
result.append(text)
return result
finally:
conn.close()
def main(argv):
if len(argv) < 2:
print ("python fox.py date author")
return
date_input=argv[0] # 2014-11-30
autor = argv [1]
d = datetime.datetime.strptime( date_input, "%Y-%m-%d" )
d_low = int(d.timestamp())
d_high = d_low + 24*60*60*1000
result = query_messages(autor, d_low, d_high)
for message in result:
print (message)
if __name__ == '__main__':
main(sys.argv[1:])
import json
from casexml.apps.case.xform import cases_referenced_by_xform
from corehq.apps.receiverwrapper.models import FormRepeater, CaseRepeater, ShortFormRepeater, \
AppStructureRepeater, RegisterGenerator
from casexml.apps.case.xml import V2
from dimagi.utils.parsing import json_format_datetime
class BasePayloadGenerator(object):
def __init__(self, repeater):
self.repeater = repeater
@staticmethod
def enabled_for_domain(domain):
return True
def get_payload(self, repeat_record, payload_doc):
raise NotImplementedError()
def get_headers(self, repeat_record, payload_doc):
return {}
@RegisterGenerator(FormRepeater, 'form_xml', 'XML', is_default=True)
class FormRepeaterXMLPayloadGenerator(BasePayloadGenerator):
def get_payload(self, repeat_record, payload_doc):
return payload_doc.get_xml()
@RegisterGenerator(CaseRepeater, 'case_xml', 'XML', is_default=True)
class CaseRepeaterXMLPayloadGenerator(BasePayloadGenerator):
def get_payload(self, repeat_record, payload_doc):
return payload_doc.to_xml(self.repeater.version or V2, include_case_on_closed=True)
@RegisterGenerator(AppStructureRepeater, "app_structure_xml", "XML", is_default=True)
class AppStructureGenerator(BasePayloadGenerator):
def get_payload(self, repeat_record, payload_doc):
# This is the id of the application, currently all we forward
return repeat_record.payload_id
@RegisterGenerator(ShortFormRepeater, "short_form_json", "Default JSON", is_default=True)
class ShortFormRepeaterXMLPayloadGenerator(BasePayloadGenerator):
def get_payload(self, repeat_record, form):
cases = cases_referenced_by_xform(form)
return json.dumps({'form_id': form._id,
'received_on': json_format_datetime(form.received_on),
'case_ids': [case._id for case in cases]})
bsd-3-clause
OS2World/APP-INTERNET-torpak_2
Lib/imputil.py
3
25394
"""
Import utilities
Exported classes:
ImportManager Manage the import process
Importer Base class for replacing standard import functions
BuiltinImporter Emulate the import mechanism for builtin and frozen modules
DynLoadSuffixImporter
"""
# note: avoid importing non-builtin modules
import imp ### not available in JPython?
import sys
import __builtin__
# for the DirectoryImporter
import struct
import marshal
__all__ = ["ImportManager","Importer","BuiltinImporter"]
_StringType = type('')
_ModuleType = type(sys) ### doesn't work in JPython...
class ImportManager:
"Manage the import process."
def install(self, namespace=vars(__builtin__)):
"Install this ImportManager into the specified namespace."
if isinstance(namespace, _ModuleType):
namespace = vars(namespace)
# Note: we have no notion of "chaining"
# Record the previous import hook, then install our own.
self.previous_importer = namespace['__import__']
self.namespace = namespace
namespace['__import__'] = self._import_hook
### fix this
#namespace['reload'] = self._reload_hook
def uninstall(self):
"Restore the previous import mechanism."
self.namespace['__import__'] = self.previous_importer
def add_suffix(self, suffix, importFunc):
assert callable(importFunc)
self.fs_imp.add_suffix(suffix, importFunc)
######################################################################
#
# PRIVATE METHODS
#
clsFilesystemImporter = None
def __init__(self, fs_imp=None):
# we're definitely going to be importing something in the future,
# so let's just load the OS-related facilities.
if not _os_stat:
_os_bootstrap()
# This is the Importer that we use for grabbing stuff from the
# filesystem. It defines one more method (import_from_dir) for our use.
if fs_imp is None:
cls = self.clsFilesystemImporter or _FilesystemImporter
fs_imp = cls()
self.fs_imp = fs_imp
# Initialize the set of suffixes that we recognize and import.
# The default will import dynamic-load modules first, followed by
# .py files (or a .py file's cached bytecode)
for desc in imp.get_suffixes():
if desc[2] == imp.C_EXTENSION:
self.add_suffix(desc[0],
DynLoadSuffixImporter(desc).import_file)
self.add_suffix('.py', py_suffix_importer)
def _import_hook(self, fqname, globals=None, locals=None, fromlist=None):
"""Python calls this hook to locate and import a module."""
parts = fqname.split('.')
# determine the context of this import
parent = self._determine_import_context(globals)
# if there is a parent, then its importer should manage this import
if parent:
module = parent.__importer__._do_import(parent, parts, fromlist)
if module:
return module
# has the top module already been imported?
try:
top_module = sys.modules[parts[0]]
except KeyError:
# look for the topmost module
top_module = self._import_top_module(parts[0])
if not top_module:
# the topmost module wasn't found at all.
raise ImportError, 'No module named ' + fqname
# fast-path simple imports
if len(parts) == 1:
if not fromlist:
return top_module
if not top_module.__dict__.get('__ispkg__'):
# __ispkg__ isn't defined (the module was not imported by us),
# or it is zero.
#
# In the former case, there is no way that we could import
# sub-modules that occur in the fromlist (but we can't raise an
# error because it may just be names) because we don't know how
# to deal with packages that were imported by other systems.
#
# In the latter case (__ispkg__ == 0), there can't be any sub-
# modules present, so we can just return.
#
# In both cases, since len(parts) == 1, the top_module is also
# the "bottom" which is the defined return when a fromlist
# exists.
return top_module
importer = top_module.__dict__.get('__importer__')
if importer:
return importer._finish_import(top_module, parts[1:], fromlist)
# Grrr, some people "import os.path"
if len(parts) == 2 and hasattr(top_module, parts[1]):
return top_module
# If the importer does not exist, then we have to bail. A missing
# importer means that something else imported the module, and we have
# no knowledge of how to get sub-modules out of the thing.
raise ImportError, 'No module named ' + fqname
def _determine_import_context(self, globals):
"""Returns the context in which a module should be imported.
The context could be a loaded (package) module and the imported module
will be looked for within that package. The context could also be None,
meaning there is no context -- the module should be looked for as a
"top-level" module.
"""
if not globals or not globals.get('__importer__'):
# globals does not refer to one of our modules or packages. That
# implies there is no relative import context (as far as we are
# concerned), and it should just pick it off the standard path.
return None
# The globals refer to a module or package of ours. It will define
# the context of the new import. Get the module/package fqname.
parent_fqname = globals['__name__']
# if a package is performing the import, then return itself (imports
# refer to pkg contents)
if globals['__ispkg__']:
parent = sys.modules[parent_fqname]
assert globals is parent.__dict__
return parent
i = parent_fqname.rfind('.')
# a module outside of a package has no particular import context
if i == -1:
return None
# if a module in a package is performing the import, then return the
# package (imports refer to siblings)
parent_fqname = parent_fqname[:i]
parent = sys.modules[parent_fqname]
assert parent.__name__ == parent_fqname
return parent
def _import_top_module(self, name):
# scan sys.path looking for a location in the filesystem that contains
# the module, or an Importer object that can import the module.
for item in sys.path:
if isinstance(item, _StringType):
module = self.fs_imp.import_from_dir(item, name)
else:
module = item.import_top(name)
if module:
return module
return None
def _reload_hook(self, module):
"Python calls this hook to reload a module."
# reloading of a module may or may not be possible (depending on the
# importer), but at least we can validate that it's ours to reload
importer = module.__dict__.get('__importer__')
if not importer:
### oops. now what...
pass
# okay. it is using the imputil system, and we must delegate it, but
# we don't know what to do (yet)
### we should blast the module dict and do another get_code(). need to
### flesh this out and add proper docco...
raise SystemError, "reload not yet implemented"
class Importer:
"Base class for replacing standard import functions."
def import_top(self, name):
"Import a top-level module."
return self._import_one(None, name, name)
######################################################################
#
# PRIVATE METHODS
#
def _finish_import(self, top, parts, fromlist):
# if "a.b.c" was provided, then load the ".b.c" portion down from
# below the top-level module.
bottom = self._load_tail(top, parts)
# if the form is "import a.b.c", then return "a"
if not fromlist:
# no fromlist: return the top of the import tree
return top
# the top module was imported by self.
#
# this means that the bottom module was also imported by self (just
# now, or in the past and we fetched it from sys.modules).
#
# since we imported/handled the bottom module, this means that we can
# also handle its fromlist (and reliably use __ispkg__).
# if the bottom node is a package, then (potentially) import some
# modules.
#
# note: if it is not a package, then "fromlist" refers to names in
# the bottom module rather than modules.
# note: for a mix of names and modules in the fromlist, we will
# import all modules and insert those into the namespace of
# the package module. Python will pick up all fromlist names
# from the bottom (package) module; some will be modules that
# we imported and stored in the namespace, others are expected
# to be present already.
if bottom.__ispkg__:
self._import_fromlist(bottom, fromlist)
# if the form is "from a.b import c, d" then return "b"
return bottom
def _import_one(self, parent, modname, fqname):
"Import a single module."
# has the module already been imported?
try:
return sys.modules[fqname]
except KeyError:
pass
# load the module's code, or fetch the module itself
result = self.get_code(parent, modname, fqname)
if result is None:
return None
module = self._process_result(result, fqname)
# insert the module into its parent
if parent:
setattr(parent, modname, module)
return module
def _process_result(self, (ispkg, code, values), fqname):
# did get_code() return an actual module? (rather than a code object)
is_module = isinstance(code, _ModuleType)
# use the returned module, or create a new one to exec code into
if is_module:
module = code
else:
module = imp.new_module(fqname)
### record packages a bit differently??
module.__importer__ = self
module.__ispkg__ = ispkg
# insert additional values into the module (before executing the code)
module.__dict__.update(values)
# the module is almost ready... make it visible
sys.modules[fqname] = module
# execute the code within the module's namespace
if not is_module:
exec code in module.__dict__
# fetch from sys.modules instead of returning module directly.
# also make module's __name__ agree with fqname, in case
# the "exec code in module.__dict__" played games on us.
module = sys.modules[fqname]
module.__name__ = fqname
return module
def _load_tail(self, m, parts):
"""Import the rest of the modules, down from the top-level module.
Returns the last module in the dotted list of modules.
"""
for part in parts:
fqname = "%s.%s" % (m.__name__, part)
m = self._import_one(m, part, fqname)
if not m:
raise ImportError, "No module named " + fqname
return m
def _import_fromlist(self, package, fromlist):
'Import any sub-modules in the "from" list.'
# if '*' is present in the fromlist, then look for the '__all__'
# variable to find additional items (modules) to import.
if '*' in fromlist:
fromlist = list(fromlist) + \
list(package.__dict__.get('__all__', []))
for sub in fromlist:
# if the name is already present, then don't try to import it (it
# might not be a module!).
if sub != '*' and not hasattr(package, sub):
subname = "%s.%s" % (package.__name__, sub)
submod = self._import_one(package, sub, subname)
if not submod:
raise ImportError, "cannot import name " + subname
def _do_import(self, parent, parts, fromlist):
"""Attempt to import the module relative to parent.
This method is used when the import context specifies that <self>
imported the parent module.
"""
top_name = parts[0]
top_fqname = parent.__name__ + '.' + top_name
top_module = self._import_one(parent, top_name, top_fqname)
if not top_module:
# this importer and parent could not find the module (relatively)
return None
return self._finish_import(top_module, parts[1:], fromlist)
######################################################################
#
# METHODS TO OVERRIDE
#
def get_code(self, parent, modname, fqname):
"""Find and retrieve the code for the given module.
parent specifies a parent module to define a context for importing. It
may be None, indicating no particular context for the search.
modname specifies a single module (not dotted) within the parent.
fqname specifies the fully-qualified module name. This is a
(potentially) dotted name from the "root" of the module namespace
down to the modname.
If there is no parent, then modname==fqname.
This method should return None, or a 3-tuple.
* If the module was not found, then None should be returned.
* The first item of the 2- or 3-tuple should be the integer 0 or 1,
specifying whether the module that was found is a package or not.
* The second item is the code object for the module (it will be
executed within the new module's namespace). This item can also
be a fully-loaded module object (e.g. loaded from a shared lib).
* The third item is a dictionary of name/value pairs that will be
inserted into new module before the code object is executed. This
is provided in case the module's code expects certain values (such
as where the module was found). When the second item is a module
object, then these names/values will be inserted *after* the module
has been loaded/initialized.
"""
raise RuntimeError, "get_code not implemented"
######################################################################
#
# Some handy stuff for the Importers
#
# byte-compiled file suffix character
_suffix_char = __debug__ and 'c' or 'o'
# byte-compiled file suffix
_suffix = '.py' + _suffix_char
def _compile(pathname, timestamp):
"""Compile (and cache) a Python source file.
The file specified by <pathname> is compiled to a code object and
returned.
Presuming the appropriate privileges exist, the bytecodes will be
saved back to the filesystem for future imports. The source file's
modification timestamp must be provided as a Long value.
"""
codestring = open(pathname, 'rU').read()
if codestring and codestring[-1] != '\n':
codestring = codestring + '\n'
code = __builtin__.compile(codestring, pathname, 'exec')
# try to cache the compiled code
try:
f = open(pathname + _suffix_char, 'wb')
except IOError:
pass
else:
f.write('\0\0\0\0')
f.write(struct.pack('<I', timestamp))
marshal.dump(code, f)
f.flush()
f.seek(0, 0)
f.write(imp.get_magic())
f.close()
return code
_os_stat = _os_path_join = None
def _os_bootstrap():
"Set up 'os' module replacement functions for use during import bootstrap."
names = sys.builtin_module_names
join = None
if 'posix' in names:
sep = '/'
from posix import stat
elif 'nt' in names:
sep = '\\'
from nt import stat
elif 'dos' in names:
sep = '\\'
from dos import stat
elif 'os2' in names:
sep = '\\'
from os2 import stat
elif 'mac' in names:
from mac import stat
def join(a, b):
if a == '':
return b
if ':' not in a:
a = ':' + a
if a[-1:] != ':':
a = a + ':'
return a + b
else:
raise ImportError, 'no os specific module found'
if join is None:
def join(a, b, sep=sep):
if a == '':
return b
lastchar = a[-1:]
if lastchar == '/' or lastchar == sep:
return a + b
return a + sep + b
global _os_stat
_os_stat = stat
global _os_path_join
_os_path_join = join
def _os_path_isdir(pathname):
"Local replacement for os.path.isdir()."
try:
s = _os_stat(pathname)
except OSError:
return None
return (s.st_mode & 0170000) == 0040000
def _timestamp(pathname):
"Return the file modification time as a Long."
try:
s = _os_stat(pathname)
except OSError:
return None
return long(s.st_mtime)
######################################################################
#
# Emulate the import mechanism for builtin and frozen modules
#
class BuiltinImporter(Importer):
def get_code(self, parent, modname, fqname):
if parent:
# these modules definitely do not occur within a package context
return None
# look for the module
if imp.is_builtin(modname):
type = imp.C_BUILTIN
elif imp.is_frozen(modname):
type = imp.PY_FROZEN
else:
# not found
return None
# got it. now load and return it.
module = imp.load_module(modname, None, modname, ('', '', type))
return 0, module, { }
######################################################################
#
# Internal importer used for importing from the filesystem
#
class _FilesystemImporter(Importer):
def __init__(self):
self.suffixes = [ ]
def add_suffix(self, suffix, importFunc):
assert callable(importFunc)
self.suffixes.append((suffix, importFunc))
def import_from_dir(self, dir, fqname):
result = self._import_pathname(_os_path_join(dir, fqname), fqname)
if result:
return self._process_result(result, fqname)
return None
def get_code(self, parent, modname, fqname):
# This importer is never used with an empty parent. Its existence is
# private to the ImportManager. The ImportManager uses the
# import_from_dir() method to import top-level modules/packages.
# This method is only used when we look for a module within a package.
assert parent
return self._import_pathname(_os_path_join(parent.__pkgdir__, modname),
fqname)
def _import_pathname(self, pathname, fqname):
if _os_path_isdir(pathname):
result = self._import_pathname(_os_path_join(pathname, '__init__'),
fqname)
if result:
values = result[2]
values['__pkgdir__'] = pathname
values['__path__'] = [ pathname ]
return 1, result[1], values
return None
for suffix, importFunc in self.suffixes:
filename = pathname + suffix
try:
finfo = _os_stat(filename)
except OSError:
pass
else:
return importFunc(filename, finfo, fqname)
return None
######################################################################
#
# SUFFIX-BASED IMPORTERS
#
def py_suffix_importer(filename, finfo, fqname):
file = filename[:-3] + _suffix
t_py = long(finfo[8])
t_pyc = _timestamp(file)
code = None
if t_pyc is not None and t_pyc >= t_py:
f = open(file, 'rb')
if f.read(4) == imp.get_magic():
t = struct.unpack('<I', f.read(4))[0]
if t == t_py:
code = marshal.load(f)
f.close()
if code is None:
file = filename
code = _compile(file, t_py)
return 0, code, { '__file__' : file }
class DynLoadSuffixImporter:
def __init__(self, desc):
self.desc = desc
def import_file(self, filename, finfo, fqname):
fp = open(filename, self.desc[1])
module = imp.load_module(fqname, fp, filename, self.desc)
module.__file__ = filename
return 0, module, { }
######################################################################
def _print_importers():
items = sys.modules.items()
items.sort()
for name, module in items:
if module:
print name, module.__dict__.get('__importer__', '-- no importer')
else:
print name, '-- non-existent module'
def _test_revamp():
ImportManager().install()
sys.path.insert(0, BuiltinImporter())
######################################################################
#
# TODO
#
# from Finn Bock:
# type(sys) is not a module in JPython. what to use instead?
# imp.C_EXTENSION is not in JPython. same for get_suffixes and new_module
#
# given foo.py of:
# import sys
# sys.modules['foo'] = sys
#
# ---- standard import mechanism
# >>> import foo
# >>> foo
# <module 'sys' (built-in)>
#
# ---- revamped import mechanism
# >>> import imputil
# >>> imputil._test_revamp()
# >>> import foo
# >>> foo
# <module 'foo' from 'foo.py'>
#
#
# from MAL:
# should BuiltinImporter exist in sys.path or hard-wired in ImportManager?
# need __path__ processing
# performance
# move chaining to a subclass [gjs: it's been nuked]
# deinstall should be possible
# query mechanism needed: is a specific Importer installed?
# py/pyc/pyo piping hooks to filter/process these files
# wish list:
# distutils importer hooked to list of standard Internet repositories
# module->file location mapper to speed FS-based imports
# relative imports
# keep chaining so that it can play nice with other import hooks
#
# from Gordon:
# push MAL's mapper into sys.path[0] as a cache (hard-coded for apps)
#
# from Guido:
# need to change sys.* references for rexec environs
# need hook for MAL's walk-me-up import strategy, or Tim's absolute strategy
# watch out for sys.modules[...] is None
# flag to force absolute imports? (speeds _determine_import_context and
# checking for a relative module)
# insert names of archives into sys.path (see quote below)
# note: reload does NOT blast module dict
# shift import mechanisms and policies around; provide for hooks, overrides
# (see quote below)
# add get_source stuff
# get_topcode and get_subcode
# CRLF handling in _compile
# race condition in _compile
# refactoring of os.py to deal with _os_bootstrap problem
# any special handling to do for importing a module with a SyntaxError?
# (e.g. clean up the traceback)
# implement "domain" for path-type functionality using pkg namespace
# (rather than FS-names like __path__)
# don't use the word "private"... maybe "internal"
#
#
# Guido's comments on sys.path caching:
#
# We could cache this in a dictionary: the ImportManager can have a
# cache dict mapping pathnames to importer objects, and a separate
# method for coming up with an importer given a pathname that's not yet
# in the cache. The method should do a stat and/or look at the
# extension to decide which importer class to use; you can register new
# importer classes by registering a suffix or a Boolean function, plus a
# class. If you register a new importer class, the cache is zapped.
# The cache is independent from sys.path (but maintained per
# ImportManager instance) so that rearrangements of sys.path do the
# right thing. If a path is dropped from sys.path the corresponding
# cache entry is simply no longer used.
#
# My/Guido's comments on factoring ImportManager and Importer:
#
# > However, we still have a tension occurring here:
# >
# > 1) implementing policy in ImportManager assists in single-point policy
# > changes for app/rexec situations
# > 2) implementing policy in Importer assists in package-private policy
# > changes for normal, operating conditions
# >
# > I'll see if I can sort out a way to do this. Maybe the Importer class will
# > implement the methods (which can be overridden to change policy) by
# > delegating to ImportManager.
#
# Maybe also think about what kind of policies an Importer would be
# likely to want to change. I have a feeling that a lot of the code
# there is actually not so much policy but a *necessity* to get things
# working given the calling conventions for the __import__ hook: whether
# to return the head or tail of a dotted name, or when to do the "finish
# fromlist" stuff.
#
#!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Make sure warning level is extracted properly.
"""
import TestGyp
import sys
if sys.platform == 'win32':
test = TestGyp.TestGyp(formats=['msvs', 'ninja'])
CHDIR = 'compiler-flags'
test.run_gyp('warning-level.gyp', chdir=CHDIR)
# A separate target for each warning level: one pass (compiling a file
# containing a warning that's above the specified level); and one fail
# (compiling a file at the specified level). No pass for 4 of course,
# because it would have to have no warnings. The default warning level is
# equivalent to level 1.
test.build('warning-level.gyp', 'test_wl1_fail', chdir=CHDIR, status=1)
test.build('warning-level.gyp', 'test_wl1_pass', chdir=CHDIR)
test.build('warning-level.gyp', 'test_wl2_fail', chdir=CHDIR, status=1)
test.build('warning-level.gyp', 'test_wl2_pass', chdir=CHDIR)
test.build('warning-level.gyp', 'test_wl3_fail', chdir=CHDIR, status=1)
test.build('warning-level.gyp', 'test_wl3_pass', chdir=CHDIR)
test.build('warning-level.gyp', 'test_wl4_fail', chdir=CHDIR, status=1)
test.build('warning-level.gyp', 'test_def_fail', chdir=CHDIR, status=1)
test.build('warning-level.gyp', 'test_def_pass', chdir=CHDIR)
test.pass_test()
gpl-2.0
brain-research/data-linter
example_pb2.py
1
4651
# Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
#
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: example.proto
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
import feature_pb2 as feature__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='example.proto',
package='tensorflow',
syntax='proto3',
serialized_pb=b'\n\rexample.proto\x12\ntensorflow\x1a\rfeature.proto\"1\n\x07\x45xample\x12&\n\x08\x66\x65\x61tures\x18\x01 \x01(\x0b\x32\x14.tensorflow.Features\"i\n\x0fSequenceExample\x12%\n\x07\x63ontext\x18\x01 \x01(\x0b\x32\x14.tensorflow.Features\x12/\n\rfeature_lists\x18\x02 \x01(\x0b\x32\x18.tensorflow.FeatureListsB,\n\x16org.tensorflow.exampleB\rExampleProtosP\x01\xf8\x01\x01\x62\x06proto3'
,
dependencies=[feature__pb2.DESCRIPTOR,])
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
_EXAMPLE = _descriptor.Descriptor(
name='Example',
full_name='tensorflow.Example',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='features', full_name='tensorflow.Example.features', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=44,
serialized_end=93,
)
_SEQUENCEEXAMPLE = _descriptor.Descriptor(
name='SequenceExample',
full_name='tensorflow.SequenceExample',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='context', full_name='tensorflow.SequenceExample.context', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='feature_lists', full_name='tensorflow.SequenceExample.feature_lists', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=95,
serialized_end=200,
)
_EXAMPLE.fields_by_name['features'].message_type = feature__pb2._FEATURES
_SEQUENCEEXAMPLE.fields_by_name['context'].message_type = feature__pb2._FEATURES
_SEQUENCEEXAMPLE.fields_by_name['feature_lists'].message_type = feature__pb2._FEATURELISTS
DESCRIPTOR.message_types_by_name['Example'] = _EXAMPLE
DESCRIPTOR.message_types_by_name['SequenceExample'] = _SEQUENCEEXAMPLE
Example = _reflection.GeneratedProtocolMessageType('Example', (_message.Message,), dict(
DESCRIPTOR = _EXAMPLE,
__module__ = 'example_pb2'
# @@protoc_insertion_point(class_scope:tensorflow.Example)
))
_sym_db.RegisterMessage(Example)
SequenceExample = _reflection.GeneratedProtocolMessageType('SequenceExample', (_message.Message,), dict(
DESCRIPTOR = _SEQUENCEEXAMPLE,
__module__ = 'example_pb2'
# @@protoc_insertion_point(class_scope:tensorflow.SequenceExample)
))
_sym_db.RegisterMessage(SequenceExample)
DESCRIPTOR.has_options = True
DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), b'\n\026org.tensorflow.exampleB\rExampleProtosP\001\370\001\001')
# @@protoc_insertion_point(module_scope)
apache-2.0
lino-framework/lino
lino/sandbox/bcss/SSDNReply.py
3
113672
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Generated Mon Oct 03 15:32:12 2011 by generateDS.py version 2.6a.
#
import sys
import getopt
import re as re_
etree_ = None
Verbose_import_ = False
(XMLParser_import_none, XMLParser_import_lxml,
XMLParser_import_elementtree
) = range(3)
XMLParser_import_library = None
try:
# lxml
from lxml import etree as etree_
XMLParser_import_library = XMLParser_import_lxml
if Verbose_import_:
print("running with lxml.etree")
except ImportError:
try:
# cElementTree from Python 2.5+
import xml.etree.cElementTree as etree_
XMLParser_import_library = XMLParser_import_elementtree
if Verbose_import_:
print("running with cElementTree on Python 2.5+")
except ImportError:
try:
# ElementTree from Python 2.5+
import xml.etree.ElementTree as etree_
XMLParser_import_library = XMLParser_import_elementtree
if Verbose_import_:
print("running with ElementTree on Python 2.5+")
except ImportError:
try:
# normal cElementTree install
import cElementTree as etree_
XMLParser_import_library = XMLParser_import_elementtree
if Verbose_import_:
print("running with cElementTree")
except ImportError:
try:
# normal ElementTree install
import elementtree.ElementTree as etree_
XMLParser_import_library = XMLParser_import_elementtree
if Verbose_import_:
print("running with ElementTree")
except ImportError:
raise ImportError(
"Failed to import ElementTree from any known place")
def parsexml_(*args, **kwargs):
if (XMLParser_import_library == XMLParser_import_lxml and
'parser' not in kwargs):
# Use the lxml ElementTree compatible parser so that, e.g.,
# we ignore comments.
kwargs['parser'] = etree_.ETCompatXMLParser()
doc = etree_.parse(*args, **kwargs)
return doc
#
# User methods
#
# Calls to the methods in these classes are generated by generateDS.py.
# You can replace these methods by re-implementing the following class
# in a module named generatedssuper.py.
try:
from generatedssuper import GeneratedsSuper
except ImportError, exp:
class GeneratedsSuper(object):
def gds_format_string(self, input_data, input_name=''):
return input_data
def gds_validate_string(self, input_data, node, input_name=''):
return input_data
def gds_format_integer(self, input_data, input_name=''):
return '%d' % input_data
def gds_validate_integer(self, input_data, node, input_name=''):
return input_data
def gds_format_integer_list(self, input_data, input_name=''):
return '%s' % input_data
def gds_validate_integer_list(self, input_data, node, input_name=''):
values = input_data.split()
for value in values:
try:
fvalue = float(value)
except (TypeError, ValueError), exp:
raise_parse_error(node, 'Requires sequence of integers')
return input_data
def gds_format_float(self, input_data, input_name=''):
return '%f' % input_data
def gds_validate_float(self, input_data, node, input_name=''):
return input_data
def gds_format_float_list(self, input_data, input_name=''):
return '%s' % input_data
def gds_validate_float_list(self, input_data, node, input_name=''):
values = input_data.split()
for value in values:
try:
fvalue = float(value)
except (TypeError, ValueError), exp:
raise_parse_error(node, 'Requires sequence of floats')
return input_data
def gds_format_double(self, input_data, input_name=''):
return '%e' % input_data
def gds_validate_double(self, input_data, node, input_name=''):
return input_data
def gds_format_double_list(self, input_data, input_name=''):
return '%s' % input_data
def gds_validate_double_list(self, input_data, node, input_name=''):
values = input_data.split()
for value in values:
try:
fvalue = float(value)
except (TypeError, ValueError), exp:
raise_parse_error(node, 'Requires sequence of doubles')
return input_data
def gds_format_boolean(self, input_data, input_name=''):
return '%s' % input_data
def gds_validate_boolean(self, input_data, node, input_name=''):
return input_data
def gds_format_boolean_list(self, input_data, input_name=''):
return '%s' % input_data
def gds_validate_boolean_list(self, input_data, node, input_name=''):
values = input_data.split()
for value in values:
if value not in ('true', '1', 'false', '0', ):
raise_parse_error(
node, 'Requires sequence of booleans ("true", "1", "false", "0")')
return input_data
def gds_str_lower(self, instring):
return instring.lower()
def get_path_(self, node):
path_list = []
self.get_path_list_(node, path_list)
path_list.reverse()
path = '/'.join(path_list)
return path
Tag_strip_pattern_ = re_.compile(r'\{.*\}')
def get_path_list_(self, node, path_list):
if node is None:
return
tag = GeneratedsSuper.Tag_strip_pattern_.sub('', node.tag)
if tag:
path_list.append(tag)
self.get_path_list_(node.getparent(), path_list)
def get_class_obj_(self, node, default_class=None):
class_obj1 = default_class
if 'xsi' in node.nsmap:
classname = node.get('{%s}type' % node.nsmap['xsi'])
if classname is not None:
names = classname.split(':')
if len(names) == 2:
classname = names[1]
class_obj2 = globals().get(classname)
if class_obj2 is not None:
class_obj1 = class_obj2
return class_obj1
#
# If you have installed IPython you can uncomment and use the following.
# IPython is available from http://ipython.scipy.org/.
#
## from IPython.Shell import IPShellEmbed
## args = ''
# ipshell = IPShellEmbed(args,
## banner = 'Dropping into IPython',
# exit_msg = 'Leaving Interpreter, back to program.')
# Then use the following line where and when you want to drop into the
# IPython shell:
# ipshell('<some message> -- Entering ipshell.\nHit Ctrl-D to exit')
#
# Globals
#
ExternalEncoding = 'ascii'
Tag_pattern_ = re_.compile(r'({.*})?(.*)')
String_cleanup_pat_ = re_.compile(r"[\n\r\s]+")
Namespace_extract_pat_ = re_.compile(r'{(.*)}(.*)')
#
# Support/utility functions.
#
def showIndent(outfile, level):
for idx in range(level):
outfile.write(' ')
def quote_xml(inStr):
if not inStr:
return ''
s1 = (isinstance(inStr, basestring) and inStr or
'%s' % inStr)
s1 = s1.replace('&', '&')
s1 = s1.replace('<', '<')
s1 = s1.replace('>', '>')
return s1
def quote_attrib(inStr):
s1 = (isinstance(inStr, basestring) and inStr or
'%s' % inStr)
s1 = s1.replace('&', '&')
s1 = s1.replace('<', '<')
s1 = s1.replace('>', '>')
if '"' in s1:
if "'" in s1:
s1 = '"%s"' % s1.replace('"', """)
else:
s1 = "'%s'" % s1
else:
s1 = '"%s"' % s1
return s1
def quote_python(inStr):
s1 = inStr
if s1.find("'") == -1:
if s1.find('\n') == -1:
return "'%s'" % s1
else:
return "'''%s'''" % s1
else:
if s1.find('"') != -1:
s1 = s1.replace('"', '\\"')
if s1.find('\n') == -1:
return '"%s"' % s1
else:
return '"""%s"""' % s1
def get_all_text_(node):
if node.text is not None:
text = node.text
else:
text = ''
for child in node:
if child.tail is not None:
text += child.tail
return text
def find_attr_value_(attr_name, node):
attrs = node.attrib
attr_parts = attr_name.split(':')
value = None
if len(attr_parts) == 1:
value = attrs.get(attr_name)
elif len(attr_parts) == 2:
prefix, name = attr_parts
namespace = node.nsmap.get(prefix)
if namespace is not None:
value = attrs.get('{%s}%s' % (namespace, name, ))
return value
class GDSParseError(Exception):
pass
def raise_parse_error(node, msg):
if XMLParser_import_library == XMLParser_import_lxml:
msg = '%s (element %s/line %d)' % (msg, node.tag, node.sourceline, )
else:
msg = '%s (element %s)' % (msg, node.tag, )
raise GDSParseError(msg)
class MixedContainer:
# Constants for category:
CategoryNone = 0
CategoryText = 1
CategorySimple = 2
CategoryComplex = 3
# Constants for content_type:
TypeNone = 0
TypeText = 1
TypeString = 2
TypeInteger = 3
TypeFloat = 4
TypeDecimal = 5
TypeDouble = 6
TypeBoolean = 7
def __init__(self, category, content_type, name, value):
self.category = category
self.content_type = content_type
self.name = name
self.value = value
def getCategory(self):
return self.category
def getContenttype(self, content_type):
return self.content_type
def getValue(self):
return self.value
def getName(self):
return self.name
def export(self, outfile, level, name, namespace):
if self.category == MixedContainer.CategoryText:
# Prevent exporting empty content as empty lines.
if self.value.strip():
outfile.write(self.value)
elif self.category == MixedContainer.CategorySimple:
self.exportSimple(outfile, level, name)
else: # category == MixedContainer.CategoryComplex
self.value.export(outfile, level, namespace, name)
def exportSimple(self, outfile, level, name):
if self.content_type == MixedContainer.TypeString:
outfile.write('<%s>%s</%s>' % (self.name, self.value, self.name))
elif self.content_type == MixedContainer.TypeInteger or \
self.content_type == MixedContainer.TypeBoolean:
outfile.write('<%s>%d</%s>' % (self.name, self.value, self.name))
elif self.content_type == MixedContainer.TypeFloat or \
self.content_type == MixedContainer.TypeDecimal:
outfile.write('<%s>%f</%s>' % (self.name, self.value, self.name))
elif self.content_type == MixedContainer.TypeDouble:
outfile.write('<%s>%g</%s>' % (self.name, self.value, self.name))
def exportLiteral(self, outfile, level, name):
if self.category == MixedContainer.CategoryText:
showIndent(outfile, level)
outfile.write('model_.MixedContainer(%d, %d, "%s", "%s"),\n' %
(self.category, self.content_type, self.name, self.value))
elif self.category == MixedContainer.CategorySimple:
showIndent(outfile, level)
outfile.write('model_.MixedContainer(%d, %d, "%s", "%s"),\n' %
(self.category, self.content_type, self.name, self.value))
else: # category == MixedContainer.CategoryComplex
showIndent(outfile, level)
outfile.write('model_.MixedContainer(%d, %d, "%s",\n' %
(self.category, self.content_type, self.name,))
self.value.exportLiteral(outfile, level + 1)
showIndent(outfile, level)
outfile.write(')\n')
class MemberSpec_(object):
def __init__(self, name='', data_type='', container=0):
self.name = name
self.data_type = data_type
self.container = container
def set_name(self, name):
self.name = name
def get_name(self):
return self.name
def set_data_type(self, data_type):
self.data_type = data_type
def get_data_type_chain(self):
return self.data_type
def get_data_type(self):
if isinstance(self.data_type, list):
if len(self.data_type) > 0:
return self.data_type[-1]
else:
return 'xs:string'
else:
return self.data_type
def set_container(self, container):
self.container = container
def get_container(self):
return self.container
def _cast(typ, value):
if typ is None or value is None:
return value
return typ(value)
#
# Data representation classes.
#
class SSDNReply(GeneratedsSuper):
"""A reply from the SSDN application at the CBSS"""
subclass = None
superclass = None
def __init__(self, ReplyContext=None, ServiceReply=None):
self.ReplyContext = ReplyContext
if ServiceReply is None:
self.ServiceReply = []
else:
self.ServiceReply = ServiceReply
def factory(*args_, **kwargs_):
if SSDNReply.subclass:
return SSDNReply.subclass(*args_, **kwargs_)
else:
return SSDNReply(*args_, **kwargs_)
factory = staticmethod(factory)
def get_ReplyContext(self):
return self.ReplyContext
def set_ReplyContext(self, ReplyContext):
self.ReplyContext = ReplyContext
def get_ServiceReply(self):
return self.ServiceReply
def set_ServiceReply(self, ServiceReply):
self.ServiceReply = ServiceReply
def add_ServiceReply(self, value):
self.ServiceReply.append(value)
def insert_ServiceReply(self, index, value):
self.ServiceReply[index] = value
def export(self, outfile, level, namespace_='', name_='SSDNReply', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s%s' %
(namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = []
self.exportAttributes(
outfile, level, already_processed, namespace_, name_='SSDNReply')
if self.hasContent_():
outfile.write('>\n')
self.exportChildren(outfile, level + 1, namespace_, name_)
showIndent(outfile, level)
outfile.write('</%s%s>\n' % (namespace_, name_))
else:
outfile.write('/>\n')
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='SSDNReply'):
pass
def exportChildren(self, outfile, level, namespace_='', name_='SSDNReply', fromsubclass_=False):
if self.ReplyContext:
self.ReplyContext.export(
outfile, level, namespace_, name_='ReplyContext', )
for ServiceReply_ in self.ServiceReply:
ServiceReply_.export(
outfile, level, namespace_, name_='ServiceReply')
def hasContent_(self):
if (
self.ReplyContext is not None or
self.ServiceReply
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='SSDNReply'):
level += 1
self.exportLiteralAttributes(outfile, level, [], name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
pass
def exportLiteralChildren(self, outfile, level, name_):
if self.ReplyContext is not None:
showIndent(outfile, level)
outfile.write('ReplyContext=model_.ReplyContextType(\n')
self.ReplyContext.exportLiteral(
outfile, level, name_='ReplyContext')
showIndent(outfile, level)
outfile.write('),\n')
showIndent(outfile, level)
outfile.write('ServiceReply=[\n')
level += 1
for ServiceReply_ in self.ServiceReply:
showIndent(outfile, level)
outfile.write('model_.ServiceReplyType(\n')
ServiceReply_.exportLiteral(
outfile, level, name_='ServiceReplyType')
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
def build(self, node):
self.buildAttributes(node, node.attrib, [])
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
pass
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'ReplyContext':
obj_ = ReplyContextType.factory()
obj_.build(child_)
self.set_ReplyContext(obj_)
elif nodeName_ == 'ServiceReply':
obj_ = ServiceReplyType.factory()
obj_.build(child_)
self.ServiceReply.append(obj_)
# end class SSDNReply
class ReplyContextType(GeneratedsSuper):
"""context information regarding the reply"""
subclass = None
superclass = None
def __init__(self, ResultSummary=None, AuthorizedUser=None, Message=None):
self.ResultSummary = ResultSummary
self.AuthorizedUser = AuthorizedUser
self.Message = Message
def factory(*args_, **kwargs_):
if ReplyContextType.subclass:
return ReplyContextType.subclass(*args_, **kwargs_)
else:
return ReplyContextType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_ResultSummary(self):
return self.ResultSummary
def set_ResultSummary(self, ResultSummary):
self.ResultSummary = ResultSummary
def get_AuthorizedUser(self):
return self.AuthorizedUser
def set_AuthorizedUser(self, AuthorizedUser):
self.AuthorizedUser = AuthorizedUser
def get_Message(self):
return self.Message
def set_Message(self, Message):
self.Message = Message
def export(self, outfile, level, namespace_='', name_='ReplyContextType', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s%s' %
(namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = []
self.exportAttributes(outfile, level, already_processed,
namespace_, name_='ReplyContextType')
if self.hasContent_():
outfile.write('>\n')
self.exportChildren(outfile, level + 1, namespace_, name_)
showIndent(outfile, level)
outfile.write('</%s%s>\n' % (namespace_, name_))
else:
outfile.write('/>\n')
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='ReplyContextType'):
pass
def exportChildren(self, outfile, level, namespace_='', name_='ReplyContextType', fromsubclass_=False):
if self.ResultSummary:
self.ResultSummary.export(
outfile, level, namespace_, name_='ResultSummary', )
if self.AuthorizedUser:
self.AuthorizedUser.export(
outfile, level, namespace_, name_='AuthorizedUser')
if self.Message:
self.Message.export(outfile, level, namespace_, name_='Message')
def hasContent_(self):
if (
self.ResultSummary is not None or
self.AuthorizedUser is not None or
self.Message is not None
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='ReplyContextType'):
level += 1
self.exportLiteralAttributes(outfile, level, [], name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
pass
def exportLiteralChildren(self, outfile, level, name_):
if self.ResultSummary is not None:
showIndent(outfile, level)
outfile.write('ResultSummary=model_.ResultSummary(\n')
self.ResultSummary.exportLiteral(outfile, level)
showIndent(outfile, level)
outfile.write('),\n')
if self.AuthorizedUser is not None:
showIndent(outfile, level)
outfile.write('AuthorizedUser=model_.AuthorizedUserType(\n')
self.AuthorizedUser.exportLiteral(
outfile, level, name_='AuthorizedUser')
showIndent(outfile, level)
outfile.write('),\n')
if self.Message is not None:
showIndent(outfile, level)
outfile.write('Message=model_.ReplyMessageType(\n')
self.Message.exportLiteral(outfile, level, name_='Message')
showIndent(outfile, level)
outfile.write('),\n')
def build(self, node):
self.buildAttributes(node, node.attrib, [])
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
pass
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'ResultSummary':
obj_ = ResultSummary.factory()
obj_.build(child_)
self.set_ResultSummary(obj_)
elif nodeName_ == 'AuthorizedUser':
obj_ = AuthorizedUserType.factory()
obj_.build(child_)
self.set_AuthorizedUser(obj_)
elif nodeName_ == 'Message':
obj_ = ReplyMessageType.factory()
obj_.build(child_)
self.set_Message(obj_)
# end class ReplyContextType
class ReplyMessageType(GeneratedsSuper):
"""Information about the message"""
subclass = None
superclass = None
def __init__(self, Reference=None, Ticket=None, TimeRequest=None, TimeReceive=None, TimeResponse=None):
self.Reference = Reference
self.Ticket = Ticket
self.TimeRequest = TimeRequest
self.TimeReceive = TimeReceive
self.TimeResponse = TimeResponse
def factory(*args_, **kwargs_):
if ReplyMessageType.subclass:
return ReplyMessageType.subclass(*args_, **kwargs_)
else:
return ReplyMessageType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_Reference(self):
return self.Reference
def set_Reference(self, Reference):
self.Reference = Reference
def get_Ticket(self):
return self.Ticket
def set_Ticket(self, Ticket):
self.Ticket = Ticket
def get_TimeRequest(self):
return self.TimeRequest
def set_TimeRequest(self, TimeRequest):
self.TimeRequest = TimeRequest
def validate_t_DateTimeUTC(self, value):
# Validate type t_DateTimeUTC, a restriction on xs:string.
pass
def get_TimeReceive(self):
return self.TimeReceive
def set_TimeReceive(self, TimeReceive):
self.TimeReceive = TimeReceive
def get_TimeResponse(self):
return self.TimeResponse
def set_TimeResponse(self, TimeResponse):
self.TimeResponse = TimeResponse
def export(self, outfile, level, namespace_='', name_='ReplyMessageType', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s%s' %
(namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = []
self.exportAttributes(outfile, level, already_processed,
namespace_, name_='ReplyMessageType')
if self.hasContent_():
outfile.write('>\n')
self.exportChildren(outfile, level + 1, namespace_, name_)
showIndent(outfile, level)
outfile.write('</%s%s>\n' % (namespace_, name_))
else:
outfile.write('/>\n')
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='ReplyMessageType'):
pass
def exportChildren(self, outfile, level, namespace_='', name_='ReplyMessageType', fromsubclass_=False):
if self.Reference is not None:
showIndent(outfile, level)
outfile.write('<%sReference>%s</%sReference>\n' %
(namespace_, self.gds_format_string(quote_xml(self.Reference).encode(ExternalEncoding), input_name='Reference'), namespace_))
if self.Ticket is not None:
showIndent(outfile, level)
outfile.write('<%sTicket>%s</%sTicket>\n' %
(namespace_, self.gds_format_string(quote_xml(self.Ticket).encode(ExternalEncoding), input_name='Ticket'), namespace_))
if self.TimeRequest is not None:
showIndent(outfile, level)
outfile.write('<%sTimeRequest>%s</%sTimeRequest>\n' %
(namespace_, self.gds_format_string(quote_xml(self.TimeRequest).encode(ExternalEncoding), input_name='TimeRequest'), namespace_))
if self.TimeReceive is not None:
showIndent(outfile, level)
outfile.write('<%sTimeReceive>%s</%sTimeReceive>\n' %
(namespace_, self.gds_format_string(quote_xml(self.TimeReceive).encode(ExternalEncoding), input_name='TimeReceive'), namespace_))
if self.TimeResponse is not None:
showIndent(outfile, level)
outfile.write('<%sTimeResponse>%s</%sTimeResponse>\n' %
(namespace_, self.gds_format_string(quote_xml(self.TimeResponse).encode(ExternalEncoding), input_name='TimeResponse'), namespace_))
def hasContent_(self):
if (
self.Reference is not None or
self.Ticket is not None or
self.TimeRequest is not None or
self.TimeReceive is not None or
self.TimeResponse is not None
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='ReplyMessageType'):
level += 1
self.exportLiteralAttributes(outfile, level, [], name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
pass
def exportLiteralChildren(self, outfile, level, name_):
if self.Reference is not None:
showIndent(outfile, level)
outfile.write('Reference=%s,\n' %
quote_python(self.Reference).encode(ExternalEncoding))
if self.Ticket is not None:
showIndent(outfile, level)
outfile.write('Ticket=%s,\n' %
quote_python(self.Ticket).encode(ExternalEncoding))
if self.TimeRequest is not None:
showIndent(outfile, level)
outfile.write('TimeRequest=%s,\n' %
quote_python(self.TimeRequest).encode(ExternalEncoding))
if self.TimeReceive is not None:
showIndent(outfile, level)
outfile.write('TimeReceive=%s,\n' %
quote_python(self.TimeReceive).encode(ExternalEncoding))
if self.TimeResponse is not None:
showIndent(outfile, level)
outfile.write('TimeResponse=%s,\n' %
quote_python(self.TimeResponse).encode(ExternalEncoding))
def build(self, node):
self.buildAttributes(node, node.attrib, [])
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
pass
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'Reference':
Reference_ = child_.text
Reference_ = self.gds_validate_string(
Reference_, node, 'Reference')
self.Reference = Reference_
elif nodeName_ == 'Ticket':
Ticket_ = child_.text
Ticket_ = self.gds_validate_string(Ticket_, node, 'Ticket')
self.Ticket = Ticket_
elif nodeName_ == 'TimeRequest':
TimeRequest_ = child_.text
TimeRequest_ = self.gds_validate_string(
TimeRequest_, node, 'TimeRequest')
self.TimeRequest = TimeRequest_
# validate type t_DateTimeUTC
self.validate_t_DateTimeUTC(self.TimeRequest)
elif nodeName_ == 'TimeReceive':
TimeReceive_ = child_.text
TimeReceive_ = self.gds_validate_string(
TimeReceive_, node, 'TimeReceive')
self.TimeReceive = TimeReceive_
# validate type t_DateTimeUTC
self.validate_t_DateTimeUTC(self.TimeReceive)
elif nodeName_ == 'TimeResponse':
TimeResponse_ = child_.text
TimeResponse_ = self.gds_validate_string(
TimeResponse_, node, 'TimeResponse')
self.TimeResponse = TimeResponse_
# validate type t_DateTimeUTC
self.validate_t_DateTimeUTC(self.TimeResponse)
# end class ReplyMessageType
class ServiceReplyType(GeneratedsSuper):
"""A single response from a servicereplaced by the actual service reply
body"""
subclass = None
superclass = None
def __init__(self, ResultSummary=None, ServiceId=None, Version=None):
self.ResultSummary = ResultSummary
self.ServiceId = ServiceId
self.Version = Version
def factory(*args_, **kwargs_):
if ServiceReplyType.subclass:
return ServiceReplyType.subclass(*args_, **kwargs_)
else:
return ServiceReplyType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_ResultSummary(self):
return self.ResultSummary
def set_ResultSummary(self, ResultSummary):
self.ResultSummary = ResultSummary
def get_ServiceId(self):
return self.ServiceId
def set_ServiceId(self, ServiceId):
self.ServiceId = ServiceId
def get_Version(self):
return self.Version
def set_Version(self, Version):
self.Version = Version
def export(self, outfile, level, namespace_='', name_='ServiceReplyType', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s%s' %
(namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = []
self.exportAttributes(outfile, level, already_processed,
namespace_, name_='ServiceReplyType')
if self.hasContent_():
outfile.write('>\n')
self.exportChildren(outfile, level + 1, namespace_, name_)
showIndent(outfile, level)
outfile.write('</%s%s>\n' % (namespace_, name_))
else:
outfile.write('/>\n')
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='ServiceReplyType'):
pass
def exportChildren(self, outfile, level, namespace_='', name_='ServiceReplyType', fromsubclass_=False):
if self.ResultSummary:
self.ResultSummary.export(
outfile, level, namespace_, name_='ResultSummary', )
if self.ServiceId is not None:
showIndent(outfile, level)
outfile.write('<%sServiceId>%s</%sServiceId>\n' %
(namespace_, self.gds_format_string(quote_xml(self.ServiceId).encode(ExternalEncoding), input_name='ServiceId'), namespace_))
if self.Version is not None:
showIndent(outfile, level)
outfile.write('<%sVersion>%s</%sVersion>\n' %
(namespace_, self.gds_format_string(quote_xml(self.Version).encode(ExternalEncoding), input_name='Version'), namespace_))
def hasContent_(self):
if (
self.ResultSummary is not None or
self.ServiceId is not None or
self.Version is not None
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='ServiceReplyType'):
level += 1
self.exportLiteralAttributes(outfile, level, [], name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
pass
def exportLiteralChildren(self, outfile, level, name_):
if self.ResultSummary is not None:
showIndent(outfile, level)
outfile.write('ResultSummary=model_.ResultSummary(\n')
self.ResultSummary.exportLiteral(outfile, level)
showIndent(outfile, level)
outfile.write('),\n')
if self.ServiceId is not None:
showIndent(outfile, level)
outfile.write('ServiceId=%s,\n' %
quote_python(self.ServiceId).encode(ExternalEncoding))
if self.Version is not None:
showIndent(outfile, level)
outfile.write('Version=%s,\n' %
quote_python(self.Version).encode(ExternalEncoding))
def build(self, node):
self.buildAttributes(node, node.attrib, [])
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
pass
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'ResultSummary':
obj_ = ResultSummary.factory()
obj_.build(child_)
self.set_ResultSummary(obj_)
elif nodeName_ == 'ServiceId':
ServiceId_ = child_.text
ServiceId_ = self.gds_validate_string(
ServiceId_, node, 'ServiceId')
self.ServiceId = ServiceId_
elif nodeName_ == 'Version':
Version_ = child_.text
Version_ = self.gds_validate_string(Version_, node, 'Version')
self.Version = Version_
# end class ServiceReplyType
class ServiceId(GeneratedsSuper):
"""name of the service that sent the reply"""
subclass = None
superclass = None
def __init__(self):
pass
def factory(*args_, **kwargs_):
if ServiceId.subclass:
return ServiceId.subclass(*args_, **kwargs_)
else:
return ServiceId(*args_, **kwargs_)
factory = staticmethod(factory)
def export(self, outfile, level, namespace_='', name_='ServiceId', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s%s' %
(namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = []
self.exportAttributes(
outfile, level, already_processed, namespace_, name_='ServiceId')
if self.hasContent_():
outfile.write('>\n')
self.exportChildren(outfile, level + 1, namespace_, name_)
outfile.write('</%s%s>\n' % (namespace_, name_))
else:
outfile.write('/>\n')
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='ServiceId'):
pass
def exportChildren(self, outfile, level, namespace_='', name_='ServiceId', fromsubclass_=False):
pass
def hasContent_(self):
if (
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='ServiceId'):
level += 1
self.exportLiteralAttributes(outfile, level, [], name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
pass
def exportLiteralChildren(self, outfile, level, name_):
pass
def build(self, node):
self.buildAttributes(node, node.attrib, [])
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
pass
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
pass
# end class ServiceId
class Version(GeneratedsSuper):
"""version of the service reply"""
subclass = None
superclass = None
def __init__(self):
pass
def factory(*args_, **kwargs_):
if Version.subclass:
return Version.subclass(*args_, **kwargs_)
else:
return Version(*args_, **kwargs_)
factory = staticmethod(factory)
def export(self, outfile, level, namespace_='', name_='Version', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s%s' %
(namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = []
self.exportAttributes(
outfile, level, already_processed, namespace_, name_='Version')
if self.hasContent_():
outfile.write('>\n')
self.exportChildren(outfile, level + 1, namespace_, name_)
outfile.write('</%s%s>\n' % (namespace_, name_))
else:
outfile.write('/>\n')
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='Version'):
pass
def exportChildren(self, outfile, level, namespace_='', name_='Version', fromsubclass_=False):
pass
def hasContent_(self):
if (
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='Version'):
level += 1
self.exportLiteralAttributes(outfile, level, [], name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
pass
def exportLiteralChildren(self, outfile, level, name_):
pass
def build(self, node):
self.buildAttributes(node, node.attrib, [])
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
pass
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
pass
# end class Version
class AuthorizedUserType(GeneratedsSuper):
"""User identification information"""
subclass = None
superclass = None
def __init__(self, UserID=None, Email=None, OrgUnit=None, MatrixID=None, MatrixSubID=None):
self.UserID = UserID
self.Email = Email
self.OrgUnit = OrgUnit
self.MatrixID = MatrixID
self.MatrixSubID = MatrixSubID
def factory(*args_, **kwargs_):
if AuthorizedUserType.subclass:
return AuthorizedUserType.subclass(*args_, **kwargs_)
else:
return AuthorizedUserType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_UserID(self):
return self.UserID
def set_UserID(self, UserID):
self.UserID = UserID
def validate_t_SSIN(self, value):
# Validate type t_SSIN, a restriction on xs:string.
pass
def get_Email(self):
return self.Email
def set_Email(self, Email):
self.Email = Email
def validate_t_EmailAddress(self, value):
# Validate type t_EmailAddress, a restriction on xs:string.
pass
def get_OrgUnit(self):
return self.OrgUnit
def set_OrgUnit(self, OrgUnit):
self.OrgUnit = OrgUnit
def get_MatrixID(self):
return self.MatrixID
def set_MatrixID(self, MatrixID):
self.MatrixID = MatrixID
def get_MatrixSubID(self):
return self.MatrixSubID
def set_MatrixSubID(self, MatrixSubID):
self.MatrixSubID = MatrixSubID
def export(self, outfile, level, namespace_='', name_='AuthorizedUserType', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s%s' %
(namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = []
self.exportAttributes(outfile, level, already_processed,
namespace_, name_='AuthorizedUserType')
if self.hasContent_():
outfile.write('>\n')
self.exportChildren(outfile, level + 1, namespace_, name_)
showIndent(outfile, level)
outfile.write('</%s%s>\n' % (namespace_, name_))
else:
outfile.write('/>\n')
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='AuthorizedUserType'):
pass
def exportChildren(self, outfile, level, namespace_='', name_='AuthorizedUserType', fromsubclass_=False):
if self.UserID is not None:
showIndent(outfile, level)
outfile.write('<%sUserID>%s</%sUserID>\n' %
(namespace_, self.gds_format_string(quote_xml(self.UserID).encode(ExternalEncoding), input_name='UserID'), namespace_))
if self.Email is not None:
showIndent(outfile, level)
outfile.write('<%sEmail>%s</%sEmail>\n' %
(namespace_, self.gds_format_string(quote_xml(self.Email).encode(ExternalEncoding), input_name='Email'), namespace_))
if self.OrgUnit is not None:
showIndent(outfile, level)
outfile.write('<%sOrgUnit>%s</%sOrgUnit>\n' %
(namespace_, self.gds_format_string(quote_xml(self.OrgUnit).encode(ExternalEncoding), input_name='OrgUnit'), namespace_))
if self.MatrixID is not None:
showIndent(outfile, level)
outfile.write('<%sMatrixID>%s</%sMatrixID>\n' %
(namespace_, self.gds_format_integer(self.MatrixID, input_name='MatrixID'), namespace_))
if self.MatrixSubID is not None:
showIndent(outfile, level)
outfile.write('<%sMatrixSubID>%s</%sMatrixSubID>\n' %
(namespace_, self.gds_format_integer(self.MatrixSubID, input_name='MatrixSubID'), namespace_))
def hasContent_(self):
if (
self.UserID is not None or
self.Email is not None or
self.OrgUnit is not None or
self.MatrixID is not None or
self.MatrixSubID is not None
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='AuthorizedUserType'):
level += 1
self.exportLiteralAttributes(outfile, level, [], name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
pass
def exportLiteralChildren(self, outfile, level, name_):
if self.UserID is not None:
showIndent(outfile, level)
outfile.write('UserID=%s,\n' %
quote_python(self.UserID).encode(ExternalEncoding))
if self.Email is not None:
showIndent(outfile, level)
outfile.write('Email=%s,\n' %
quote_python(self.Email).encode(ExternalEncoding))
if self.OrgUnit is not None:
showIndent(outfile, level)
outfile.write('OrgUnit=%s,\n' %
quote_python(self.OrgUnit).encode(ExternalEncoding))
if self.MatrixID is not None:
showIndent(outfile, level)
outfile.write('MatrixID=%d,\n' % self.MatrixID)
if self.MatrixSubID is not None:
showIndent(outfile, level)
outfile.write('MatrixSubID=%d,\n' % self.MatrixSubID)
def build(self, node):
self.buildAttributes(node, node.attrib, [])
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
pass
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'UserID':
UserID_ = child_.text
UserID_ = self.gds_validate_string(UserID_, node, 'UserID')
self.UserID = UserID_
self.validate_t_SSIN(self.UserID) # validate type t_SSIN
elif nodeName_ == 'Email':
Email_ = child_.text
Email_ = self.gds_validate_string(Email_, node, 'Email')
self.Email = Email_
# validate type t_EmailAddress
self.validate_t_EmailAddress(self.Email)
elif nodeName_ == 'OrgUnit':
OrgUnit_ = child_.text
OrgUnit_ = self.gds_validate_string(OrgUnit_, node, 'OrgUnit')
self.OrgUnit = OrgUnit_
elif nodeName_ == 'MatrixID':
sval_ = child_.text
try:
ival_ = int(sval_)
except (TypeError, ValueError), exp:
raise_parse_error(child_, 'requires integer: %s' % exp)
ival_ = self.gds_validate_integer(ival_, node, 'MatrixID')
self.MatrixID = ival_
elif nodeName_ == 'MatrixSubID':
sval_ = child_.text
try:
ival_ = int(sval_)
except (TypeError, ValueError), exp:
raise_parse_error(child_, 'requires integer: %s' % exp)
ival_ = self.gds_validate_integer(ival_, node, 'MatrixSubID')
self.MatrixSubID = ival_
# end class AuthorizedUserType
class ResultSummary(GeneratedsSuper):
"""Summary infomation about the resultlors de la reponse, (messageType
RESPONSE | EXCEPTION), la valeur WARNING signifie qu'il faut
consulter l'element Information"""
subclass = None
superclass = None
def __init__(self, ok=None, ReturnCode=None, Detail=None):
self.ok = _cast(None, ok)
self.ReturnCode = ReturnCode
if Detail is None:
self.Detail = []
else:
self.Detail = Detail
def factory(*args_, **kwargs_):
if ResultSummary.subclass:
return ResultSummary.subclass(*args_, **kwargs_)
else:
return ResultSummary(*args_, **kwargs_)
factory = staticmethod(factory)
def get_ReturnCode(self):
return self.ReturnCode
def set_ReturnCode(self, ReturnCode):
self.ReturnCode = ReturnCode
def get_Detail(self):
return self.Detail
def set_Detail(self, Detail):
self.Detail = Detail
def add_Detail(self, value):
self.Detail.append(value)
def insert_Detail(self, index, value):
self.Detail[index] = value
def get_ok(self):
return self.ok
def set_ok(self, ok):
self.ok = ok
def validate_ResultSummaryStatusType(self, value):
# Validate type ResultSummaryStatusType, a restriction on xs:string.
pass
def export(self, outfile, level, namespace_='', name_='ResultSummary', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s%s' %
(namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = []
self.exportAttributes(
outfile, level, already_processed, namespace_, name_='ResultSummary')
if self.hasContent_():
outfile.write('>\n')
self.exportChildren(outfile, level + 1, namespace_, name_)
showIndent(outfile, level)
outfile.write('</%s%s>\n' % (namespace_, name_))
else:
outfile.write('/>\n')
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='ResultSummary'):
if self.ok is not None and 'ok' not in already_processed:
already_processed.append('ok')
outfile.write(' ok=%s' % (quote_attrib(self.ok), ))
def exportChildren(self, outfile, level, namespace_='', name_='ResultSummary', fromsubclass_=False):
if self.ReturnCode is not None:
showIndent(outfile, level)
outfile.write('<%sReturnCode>%s</%sReturnCode>\n' %
(namespace_, self.gds_format_integer(self.ReturnCode, input_name='ReturnCode'), namespace_))
for Detail_ in self.Detail:
Detail_.export(outfile, level, namespace_, name_='Detail')
def hasContent_(self):
if (
self.ReturnCode is not None or
self.Detail
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='ResultSummary'):
level += 1
self.exportLiteralAttributes(outfile, level, [], name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
if self.ok is not None and 'ok' not in already_processed:
already_processed.append('ok')
showIndent(outfile, level)
outfile.write('ok = "%s",\n' % (self.ok,))
def exportLiteralChildren(self, outfile, level, name_):
if self.ReturnCode is not None:
showIndent(outfile, level)
outfile.write('ReturnCode=%d,\n' % self.ReturnCode)
showIndent(outfile, level)
outfile.write('Detail=[\n')
level += 1
for Detail_ in self.Detail:
showIndent(outfile, level)
outfile.write('model_.DetailMessageType(\n')
Detail_.exportLiteral(outfile, level, name_='DetailMessageType')
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
def build(self, node):
self.buildAttributes(node, node.attrib, [])
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('ok', node)
if value is not None and 'ok' not in already_processed:
already_processed.append('ok')
self.ok = value
# validate type ResultSummaryStatusType
self.validate_ResultSummaryStatusType(self.ok)
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'ReturnCode':
sval_ = child_.text
try:
ival_ = int(sval_)
except (TypeError, ValueError), exp:
raise_parse_error(child_, 'requires integer: %s' % exp)
ival_ = self.gds_validate_integer(ival_, node, 'ReturnCode')
self.ReturnCode = ival_
elif nodeName_ == 'Detail':
obj_ = DetailMessageType.factory()
obj_.build(child_)
self.Detail.append(obj_)
# end class ResultSummary
class ReturnCode(GeneratedsSuper):
"""general return code. 0 = OK, 1 = WARNING, 10000 = ERROR"""
subclass = None
superclass = None
def __init__(self):
pass
def factory(*args_, **kwargs_):
if ReturnCode.subclass:
return ReturnCode.subclass(*args_, **kwargs_)
else:
return ReturnCode(*args_, **kwargs_)
factory = staticmethod(factory)
def export(self, outfile, level, namespace_='', name_='ReturnCode', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s%s' %
(namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = []
self.exportAttributes(
outfile, level, already_processed, namespace_, name_='ReturnCode')
if self.hasContent_():
outfile.write('>\n')
self.exportChildren(outfile, level + 1, namespace_, name_)
outfile.write('</%s%s>\n' % (namespace_, name_))
else:
outfile.write('/>\n')
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='ReturnCode'):
pass
def exportChildren(self, outfile, level, namespace_='', name_='ReturnCode', fromsubclass_=False):
pass
def hasContent_(self):
if (
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='ReturnCode'):
level += 1
self.exportLiteralAttributes(outfile, level, [], name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
pass
def exportLiteralChildren(self, outfile, level, name_):
pass
def build(self, node):
self.buildAttributes(node, node.attrib, [])
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
pass
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
pass
# end class ReturnCode
class InformationType(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self, FieldName=None, FieldValue=None):
self.FieldName = FieldName
self.FieldValue = FieldValue
def factory(*args_, **kwargs_):
if InformationType.subclass:
return InformationType.subclass(*args_, **kwargs_)
else:
return InformationType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_FieldName(self):
return self.FieldName
def set_FieldName(self, FieldName):
self.FieldName = FieldName
def get_FieldValue(self):
return self.FieldValue
def set_FieldValue(self, FieldValue):
self.FieldValue = FieldValue
def export(self, outfile, level, namespace_='', name_='InformationType', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s%s' %
(namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = []
self.exportAttributes(outfile, level, already_processed,
namespace_, name_='InformationType')
if self.hasContent_():
outfile.write('>\n')
self.exportChildren(outfile, level + 1, namespace_, name_)
showIndent(outfile, level)
outfile.write('</%s%s>\n' % (namespace_, name_))
else:
outfile.write('/>\n')
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='InformationType'):
pass
def exportChildren(self, outfile, level, namespace_='', name_='InformationType', fromsubclass_=False):
if self.FieldName is not None:
showIndent(outfile, level)
outfile.write('<%sFieldName>%s</%sFieldName>\n' %
(namespace_, self.gds_format_string(quote_xml(self.FieldName).encode(ExternalEncoding), input_name='FieldName'), namespace_))
if self.FieldValue is not None:
showIndent(outfile, level)
outfile.write('<%sFieldValue>%s</%sFieldValue>\n' %
(namespace_, self.gds_format_string(quote_xml(self.FieldValue).encode(ExternalEncoding), input_name='FieldValue'), namespace_))
def hasContent_(self):
if (
self.FieldName is not None or
self.FieldValue is not None
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='InformationType'):
level += 1
self.exportLiteralAttributes(outfile, level, [], name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
pass
def exportLiteralChildren(self, outfile, level, name_):
if self.FieldName is not None:
showIndent(outfile, level)
outfile.write('FieldName=%s,\n' %
quote_python(self.FieldName).encode(ExternalEncoding))
if self.FieldValue is not None:
showIndent(outfile, level)
outfile.write('FieldValue=%s,\n' %
quote_python(self.FieldValue).encode(ExternalEncoding))
def build(self, node):
self.buildAttributes(node, node.attrib, [])
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
pass
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'FieldName':
FieldName_ = child_.text
FieldName_ = self.gds_validate_string(
FieldName_, node, 'FieldName')
self.FieldName = FieldName_
elif nodeName_ == 'FieldValue':
FieldValue_ = child_.text
FieldValue_ = self.gds_validate_string(
FieldValue_, node, 'FieldValue')
self.FieldValue = FieldValue_
# end class InformationType
class FieldName(GeneratedsSuper):
"""name of the field"""
subclass = None
superclass = None
def __init__(self):
pass
def factory(*args_, **kwargs_):
if FieldName.subclass:
return FieldName.subclass(*args_, **kwargs_)
else:
return FieldName(*args_, **kwargs_)
factory = staticmethod(factory)
def export(self, outfile, level, namespace_='', name_='FieldName', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s%s' %
(namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = []
self.exportAttributes(
outfile, level, already_processed, namespace_, name_='FieldName')
if self.hasContent_():
outfile.write('>\n')
self.exportChildren(outfile, level + 1, namespace_, name_)
outfile.write('</%s%s>\n' % (namespace_, name_))
else:
outfile.write('/>\n')
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='FieldName'):
pass
def exportChildren(self, outfile, level, namespace_='', name_='FieldName', fromsubclass_=False):
pass
def hasContent_(self):
if (
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='FieldName'):
level += 1
self.exportLiteralAttributes(outfile, level, [], name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
pass
def exportLiteralChildren(self, outfile, level, name_):
pass
def build(self, node):
self.buildAttributes(node, node.attrib, [])
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
pass
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
pass
# end class FieldName
class FieldValue(GeneratedsSuper):
"""value of the field"""
subclass = None
superclass = None
def __init__(self):
pass
def factory(*args_, **kwargs_):
if FieldValue.subclass:
return FieldValue.subclass(*args_, **kwargs_)
else:
return FieldValue(*args_, **kwargs_)
factory = staticmethod(factory)
def export(self, outfile, level, namespace_='', name_='FieldValue', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s%s' %
(namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = []
self.exportAttributes(
outfile, level, already_processed, namespace_, name_='FieldValue')
if self.hasContent_():
outfile.write('>\n')
self.exportChildren(outfile, level + 1, namespace_, name_)
outfile.write('</%s%s>\n' % (namespace_, name_))
else:
outfile.write('/>\n')
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='FieldValue'):
pass
def exportChildren(self, outfile, level, namespace_='', name_='FieldValue', fromsubclass_=False):
pass
def hasContent_(self):
if (
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='FieldValue'):
level += 1
self.exportLiteralAttributes(outfile, level, [], name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
pass
def exportLiteralChildren(self, outfile, level, name_):
pass
def build(self, node):
self.buildAttributes(node, node.attrib, [])
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
pass
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
pass
# end class FieldValue
class DetailMessageType(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self, Severity=None, ReasonCode=None, Diagnostic=None, AuthorCodeList=None, Information=None):
self.Severity = Severity
self.ReasonCode = ReasonCode
self.Diagnostic = Diagnostic
self.AuthorCodeList = AuthorCodeList
if Information is None:
self.Information = []
else:
self.Information = Information
def factory(*args_, **kwargs_):
if DetailMessageType.subclass:
return DetailMessageType.subclass(*args_, **kwargs_)
else:
return DetailMessageType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_Severity(self):
return self.Severity
def set_Severity(self, Severity):
self.Severity = Severity
def validate_SeverityType(self, value):
# Validate type SeverityType, a restriction on xs:string.
pass
def get_ReasonCode(self):
return self.ReasonCode
def set_ReasonCode(self, ReasonCode):
self.ReasonCode = ReasonCode
def get_Diagnostic(self):
return self.Diagnostic
def set_Diagnostic(self, Diagnostic):
self.Diagnostic = Diagnostic
def get_AuthorCodeList(self):
return self.AuthorCodeList
def set_AuthorCodeList(self, AuthorCodeList):
self.AuthorCodeList = AuthorCodeList
def get_Information(self):
return self.Information
def set_Information(self, Information):
self.Information = Information
def add_Information(self, value):
self.Information.append(value)
def insert_Information(self, index, value):
self.Information[index] = value
def export(self, outfile, level, namespace_='', name_='DetailMessageType', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s%s' %
(namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = []
self.exportAttributes(outfile, level, already_processed,
namespace_, name_='DetailMessageType')
if self.hasContent_():
outfile.write('>\n')
self.exportChildren(outfile, level + 1, namespace_, name_)
showIndent(outfile, level)
outfile.write('</%s%s>\n' % (namespace_, name_))
else:
outfile.write('/>\n')
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='DetailMessageType'):
pass
def exportChildren(self, outfile, level, namespace_='', name_='DetailMessageType', fromsubclass_=False):
if self.Severity is not None:
showIndent(outfile, level)
outfile.write('<%sSeverity>%s</%sSeverity>\n' %
(namespace_, self.gds_format_string(quote_xml(self.Severity).encode(ExternalEncoding), input_name='Severity'), namespace_))
if self.ReasonCode is not None:
showIndent(outfile, level)
outfile.write('<%sReasonCode>%s</%sReasonCode>\n' %
(namespace_, self.gds_format_string(quote_xml(self.ReasonCode).encode(ExternalEncoding), input_name='ReasonCode'), namespace_))
if self.Diagnostic is not None:
showIndent(outfile, level)
outfile.write('<%sDiagnostic>%s</%sDiagnostic>\n' %
(namespace_, self.gds_format_string(quote_xml(self.Diagnostic).encode(ExternalEncoding), input_name='Diagnostic'), namespace_))
if self.AuthorCodeList is not None:
showIndent(outfile, level)
outfile.write('<%sAuthorCodeList>%s</%sAuthorCodeList>\n' %
(namespace_, self.gds_format_string(quote_xml(self.AuthorCodeList).encode(ExternalEncoding), input_name='AuthorCodeList'), namespace_))
for Information_ in self.Information:
Information_.export(
outfile, level, namespace_, name_='Information')
def hasContent_(self):
if (
self.Severity is not None or
self.ReasonCode is not None or
self.Diagnostic is not None or
self.AuthorCodeList is not None or
self.Information
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='DetailMessageType'):
level += 1
self.exportLiteralAttributes(outfile, level, [], name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
pass
def exportLiteralChildren(self, outfile, level, name_):
if self.Severity is not None:
showIndent(outfile, level)
outfile.write('Severity=%s,\n' %
quote_python(self.Severity).encode(ExternalEncoding))
if self.ReasonCode is not None:
showIndent(outfile, level)
outfile.write('ReasonCode=%s,\n' %
quote_python(self.ReasonCode).encode(ExternalEncoding))
if self.Diagnostic is not None:
showIndent(outfile, level)
outfile.write('Diagnostic=%s,\n' %
quote_python(self.Diagnostic).encode(ExternalEncoding))
if self.AuthorCodeList is not None:
showIndent(outfile, level)
outfile.write('AuthorCodeList=%s,\n' %
quote_python(self.AuthorCodeList).encode(ExternalEncoding))
showIndent(outfile, level)
outfile.write('Information=[\n')
level += 1
for Information_ in self.Information:
showIndent(outfile, level)
outfile.write('model_.InformationType(\n')
Information_.exportLiteral(outfile, level, name_='InformationType')
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
def build(self, node):
self.buildAttributes(node, node.attrib, [])
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
pass
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'Severity':
Severity_ = child_.text
Severity_ = self.gds_validate_string(Severity_, node, 'Severity')
self.Severity = Severity_
# validate type SeverityType
self.validate_SeverityType(self.Severity)
elif nodeName_ == 'ReasonCode':
ReasonCode_ = child_.text
ReasonCode_ = self.gds_validate_string(
ReasonCode_, node, 'ReasonCode')
self.ReasonCode = ReasonCode_
elif nodeName_ == 'Diagnostic':
Diagnostic_ = child_.text
Diagnostic_ = self.gds_validate_string(
Diagnostic_, node, 'Diagnostic')
self.Diagnostic = Diagnostic_
elif nodeName_ == 'AuthorCodeList':
AuthorCodeList_ = child_.text
AuthorCodeList_ = self.gds_validate_string(
AuthorCodeList_, node, 'AuthorCodeList')
self.AuthorCodeList = AuthorCodeList_
elif nodeName_ == 'Information':
obj_ = InformationType.factory()
obj_.build(child_)
self.Information.append(obj_)
# end class DetailMessageType
class ReasonCode(GeneratedsSuper):
"""error code"""
subclass = None
superclass = None
def __init__(self):
pass
def factory(*args_, **kwargs_):
if ReasonCode.subclass:
return ReasonCode.subclass(*args_, **kwargs_)
else:
return ReasonCode(*args_, **kwargs_)
factory = staticmethod(factory)
def export(self, outfile, level, namespace_='', name_='ReasonCode', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s%s' %
(namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = []
self.exportAttributes(
outfile, level, already_processed, namespace_, name_='ReasonCode')
if self.hasContent_():
outfile.write('>\n')
self.exportChildren(outfile, level + 1, namespace_, name_)
outfile.write('</%s%s>\n' % (namespace_, name_))
else:
outfile.write('/>\n')
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='ReasonCode'):
pass
def exportChildren(self, outfile, level, namespace_='', name_='ReasonCode', fromsubclass_=False):
pass
def hasContent_(self):
if (
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='ReasonCode'):
level += 1
self.exportLiteralAttributes(outfile, level, [], name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
pass
def exportLiteralChildren(self, outfile, level, name_):
pass
def build(self, node):
self.buildAttributes(node, node.attrib, [])
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
pass
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
pass
# end class ReasonCode
class Diagnostic(GeneratedsSuper):
"""textual error message"""
subclass = None
superclass = None
def __init__(self):
pass
def factory(*args_, **kwargs_):
if Diagnostic.subclass:
return Diagnostic.subclass(*args_, **kwargs_)
else:
return Diagnostic(*args_, **kwargs_)
factory = staticmethod(factory)
def export(self, outfile, level, namespace_='', name_='Diagnostic', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s%s' %
(namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = []
self.exportAttributes(
outfile, level, already_processed, namespace_, name_='Diagnostic')
if self.hasContent_():
outfile.write('>\n')
self.exportChildren(outfile, level + 1, namespace_, name_)
outfile.write('</%s%s>\n' % (namespace_, name_))
else:
outfile.write('/>\n')
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='Diagnostic'):
pass
def exportChildren(self, outfile, level, namespace_='', name_='Diagnostic', fromsubclass_=False):
pass
def hasContent_(self):
if (
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='Diagnostic'):
level += 1
self.exportLiteralAttributes(outfile, level, [], name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
pass
def exportLiteralChildren(self, outfile, level, name_):
pass
def build(self, node):
self.buildAttributes(node, node.attrib, [])
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
pass
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
pass
# end class Diagnostic
class AuthorCodeList(GeneratedsSuper):
"""organisation responsible for the reason code"""
subclass = None
superclass = None
def __init__(self):
pass
def factory(*args_, **kwargs_):
if AuthorCodeList.subclass:
return AuthorCodeList.subclass(*args_, **kwargs_)
else:
return AuthorCodeList(*args_, **kwargs_)
factory = staticmethod(factory)
def export(self, outfile, level, namespace_='', name_='AuthorCodeList', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s%s' %
(namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = []
self.exportAttributes(
outfile, level, already_processed, namespace_, name_='AuthorCodeList')
if self.hasContent_():
outfile.write('>\n')
self.exportChildren(outfile, level + 1, namespace_, name_)
outfile.write('</%s%s>\n' % (namespace_, name_))
else:
outfile.write('/>\n')
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='AuthorCodeList'):
pass
def exportChildren(self, outfile, level, namespace_='', name_='AuthorCodeList', fromsubclass_=False):
pass
def hasContent_(self):
if (
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='AuthorCodeList'):
level += 1
self.exportLiteralAttributes(outfile, level, [], name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
pass
def exportLiteralChildren(self, outfile, level, name_):
pass
def build(self, node):
self.buildAttributes(node, node.attrib, [])
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
pass
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
pass
# end class AuthorCodeList
class InscriptionType(GeneratedsSuper):
"""An inscription"""
subclass = None
superclass = None
def __init__(self, SSIN=None, OrgUnit=None, Purpose=None, Period=None, InscriptionCode=None, PhaseCode=None):
self.SSIN = SSIN
self.OrgUnit = OrgUnit
self.Purpose = Purpose
self.Period = Period
self.InscriptionCode = InscriptionCode
self.PhaseCode = PhaseCode
def factory(*args_, **kwargs_):
if InscriptionType.subclass:
return InscriptionType.subclass(*args_, **kwargs_)
else:
return InscriptionType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_SSIN(self):
return self.SSIN
def set_SSIN(self, SSIN):
self.SSIN = SSIN
def validate_t_SSIN(self, value):
# Validate type t_SSIN, a restriction on xs:string.
pass
def get_OrgUnit(self):
return self.OrgUnit
def set_OrgUnit(self, OrgUnit):
self.OrgUnit = OrgUnit
def get_Purpose(self):
return self.Purpose
def set_Purpose(self, Purpose):
self.Purpose = Purpose
def get_Period(self):
return self.Period
def set_Period(self, Period):
self.Period = Period
def get_InscriptionCode(self):
return self.InscriptionCode
def set_InscriptionCode(self, InscriptionCode):
self.InscriptionCode = InscriptionCode
def get_PhaseCode(self):
return self.PhaseCode
def set_PhaseCode(self, PhaseCode):
self.PhaseCode = PhaseCode
def export(self, outfile, level, namespace_='', name_='InscriptionType', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s%s' %
(namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = []
self.exportAttributes(outfile, level, already_processed,
namespace_, name_='InscriptionType')
if self.hasContent_():
outfile.write('>\n')
self.exportChildren(outfile, level + 1, namespace_, name_)
showIndent(outfile, level)
outfile.write('</%s%s>\n' % (namespace_, name_))
else:
outfile.write('/>\n')
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='InscriptionType'):
pass
def exportChildren(self, outfile, level, namespace_='', name_='InscriptionType', fromsubclass_=False):
if self.SSIN is not None:
showIndent(outfile, level)
outfile.write('<%sSSIN>%s</%sSSIN>\n' %
(namespace_, self.gds_format_string(quote_xml(self.SSIN).encode(ExternalEncoding), input_name='SSIN'), namespace_))
if self.OrgUnit is not None:
showIndent(outfile, level)
outfile.write('<%sOrgUnit>%s</%sOrgUnit>\n' %
(namespace_, self.gds_format_string(quote_xml(self.OrgUnit).encode(ExternalEncoding), input_name='OrgUnit'), namespace_))
if self.Purpose is not None:
showIndent(outfile, level)
outfile.write('<%sPurpose>%s</%sPurpose>\n' %
(namespace_, self.gds_format_integer(self.Purpose, input_name='Purpose'), namespace_))
if self.Period:
self.Period.export(outfile, level, namespace_, name_='Period')
if self.InscriptionCode is not None:
showIndent(outfile, level)
outfile.write('<%sInscriptionCode>%s</%sInscriptionCode>\n' %
(namespace_, self.gds_format_integer(self.InscriptionCode, input_name='InscriptionCode'), namespace_))
if self.PhaseCode is not None:
showIndent(outfile, level)
outfile.write('<%sPhaseCode>%s</%sPhaseCode>\n' %
(namespace_, self.gds_format_integer(self.PhaseCode, input_name='PhaseCode'), namespace_))
def hasContent_(self):
if (
self.SSIN is not None or
self.OrgUnit is not None or
self.Purpose is not None or
self.Period is not None or
self.InscriptionCode is not None or
self.PhaseCode is not None
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='InscriptionType'):
level += 1
self.exportLiteralAttributes(outfile, level, [], name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
pass
def exportLiteralChildren(self, outfile, level, name_):
if self.SSIN is not None:
showIndent(outfile, level)
outfile.write('SSIN=%s,\n' %
quote_python(self.SSIN).encode(ExternalEncoding))
if self.OrgUnit is not None:
showIndent(outfile, level)
outfile.write('OrgUnit=%s,\n' %
quote_python(self.OrgUnit).encode(ExternalEncoding))
if self.Purpose is not None:
showIndent(outfile, level)
outfile.write('Purpose=%d,\n' % self.Purpose)
if self.Period is not None:
showIndent(outfile, level)
outfile.write('Period=model_.PeriodType(\n')
self.Period.exportLiteral(outfile, level, name_='Period')
showIndent(outfile, level)
outfile.write('),\n')
if self.InscriptionCode is not None:
showIndent(outfile, level)
outfile.write('InscriptionCode=%d,\n' % self.InscriptionCode)
if self.PhaseCode is not None:
showIndent(outfile, level)
outfile.write('PhaseCode=%d,\n' % self.PhaseCode)
def build(self, node):
self.buildAttributes(node, node.attrib, [])
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
pass
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'SSIN':
SSIN_ = child_.text
SSIN_ = self.gds_validate_string(SSIN_, node, 'SSIN')
self.SSIN = SSIN_
self.validate_t_SSIN(self.SSIN) # validate type t_SSIN
elif nodeName_ == 'OrgUnit':
OrgUnit_ = child_.text
OrgUnit_ = self.gds_validate_string(OrgUnit_, node, 'OrgUnit')
self.OrgUnit = OrgUnit_
elif nodeName_ == 'Purpose':
sval_ = child_.text
try:
ival_ = int(sval_)
except (TypeError, ValueError), exp:
raise_parse_error(child_, 'requires integer: %s' % exp)
ival_ = self.gds_validate_integer(ival_, node, 'Purpose')
self.Purpose = ival_
elif nodeName_ == 'Period':
obj_ = PeriodType.factory()
obj_.build(child_)
self.set_Period(obj_)
elif nodeName_ == 'InscriptionCode':
sval_ = child_.text
try:
ival_ = int(sval_)
except (TypeError, ValueError), exp:
raise_parse_error(child_, 'requires integer: %s' % exp)
ival_ = self.gds_validate_integer(ival_, node, 'InscriptionCode')
self.InscriptionCode = ival_
elif nodeName_ == 'PhaseCode':
sval_ = child_.text
try:
ival_ = int(sval_)
except (TypeError, ValueError), exp:
raise_parse_error(child_, 'requires integer: %s' % exp)
ival_ = self.gds_validate_integer(ival_, node, 'PhaseCode')
self.PhaseCode = ival_
# end class InscriptionType
class DescriptionType(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self, lang=None, valueOf_=None):
self.lang = _cast(None, lang)
self.valueOf_ = valueOf_
def factory(*args_, **kwargs_):
if DescriptionType.subclass:
return DescriptionType.subclass(*args_, **kwargs_)
else:
return DescriptionType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_lang(self):
return self.lang
def set_lang(self, lang):
self.lang = lang
def validate_t_Language(self, value):
# Validate type t_Language, a restriction on xs:string.
pass
def get_valueOf_(self):
return self.valueOf_
def set_valueOf_(self, valueOf_):
self.valueOf_ = valueOf_
def export(self, outfile, level, namespace_='', name_='DescriptionType', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s%s' %
(namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = []
self.exportAttributes(outfile, level, already_processed,
namespace_, name_='DescriptionType')
if self.hasContent_():
outfile.write('>')
outfile.write(str(self.valueOf_).encode(ExternalEncoding))
self.exportChildren(outfile, level + 1, namespace_, name_)
outfile.write('</%s%s>\n' % (namespace_, name_))
else:
outfile.write('/>\n')
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='DescriptionType'):
if self.lang is not None and 'lang' not in already_processed:
already_processed.append('lang')
outfile.write(' lang=%s' % (quote_attrib(self.lang), ))
def exportChildren(self, outfile, level, namespace_='', name_='DescriptionType', fromsubclass_=False):
pass
def hasContent_(self):
if (
self.valueOf_
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='DescriptionType'):
level += 1
self.exportLiteralAttributes(outfile, level, [], name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
showIndent(outfile, level)
outfile.write('valueOf_ = """%s""",\n' % (self.valueOf_,))
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
if self.lang is not None and 'lang' not in already_processed:
already_processed.append('lang')
showIndent(outfile, level)
outfile.write('lang = "%s",\n' % (self.lang,))
def exportLiteralChildren(self, outfile, level, name_):
pass
def build(self, node):
self.buildAttributes(node, node.attrib, [])
self.valueOf_ = get_all_text_(node)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('lang', node)
if value is not None and 'lang' not in already_processed:
already_processed.append('lang')
self.lang = value
self.validate_t_Language(self.lang) # validate type t_Language
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
pass
# end class DescriptionType
class PeriodType(GeneratedsSuper):
"""A period of time between a startdate and an enddate"""
subclass = None
superclass = None
def __init__(self, StartDate=None, EndDate=None):
self.StartDate = StartDate
self.EndDate = EndDate
def factory(*args_, **kwargs_):
if PeriodType.subclass:
return PeriodType.subclass(*args_, **kwargs_)
else:
return PeriodType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_StartDate(self):
return self.StartDate
def set_StartDate(self, StartDate):
self.StartDate = StartDate
def get_EndDate(self):
return self.EndDate
def set_EndDate(self, EndDate):
self.EndDate = EndDate
def export(self, outfile, level, namespace_='', name_='PeriodType', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s%s' %
(namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = []
self.exportAttributes(
outfile, level, already_processed, namespace_, name_='PeriodType')
if self.hasContent_():
outfile.write('>\n')
self.exportChildren(outfile, level + 1, namespace_, name_)
showIndent(outfile, level)
outfile.write('</%s%s>\n' % (namespace_, name_))
else:
outfile.write('/>\n')
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='PeriodType'):
pass
def exportChildren(self, outfile, level, namespace_='', name_='PeriodType', fromsubclass_=False):
if self.StartDate is not None:
showIndent(outfile, level)
outfile.write('<%sStartDate>%s</%sStartDate>\n' %
(namespace_, self.gds_format_string(quote_xml(self.StartDate).encode(ExternalEncoding), input_name='StartDate'), namespace_))
if self.EndDate is not None:
showIndent(outfile, level)
outfile.write('<%sEndDate>%s</%sEndDate>\n' %
(namespace_, self.gds_format_string(quote_xml(self.EndDate).encode(ExternalEncoding), input_name='EndDate'), namespace_))
def hasContent_(self):
if (
self.StartDate is not None or
self.EndDate is not None
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='PeriodType'):
level += 1
self.exportLiteralAttributes(outfile, level, [], name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
pass
def exportLiteralChildren(self, outfile, level, name_):
if self.StartDate is not None:
showIndent(outfile, level)
outfile.write('StartDate=%s,\n' %
quote_python(self.StartDate).encode(ExternalEncoding))
if self.EndDate is not None:
showIndent(outfile, level)
outfile.write('EndDate=%s,\n' %
quote_python(self.EndDate).encode(ExternalEncoding))
def build(self, node):
self.buildAttributes(node, node.attrib, [])
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
pass
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'StartDate':
StartDate_ = child_.text
StartDate_ = self.gds_validate_string(
StartDate_, node, 'StartDate')
self.StartDate = StartDate_
elif nodeName_ == 'EndDate':
EndDate_ = child_.text
EndDate_ = self.gds_validate_string(EndDate_, node, 'EndDate')
self.EndDate = EndDate_
# end class PeriodType
class StartDate(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self):
pass
def factory(*args_, **kwargs_):
if StartDate.subclass:
return StartDate.subclass(*args_, **kwargs_)
else:
return StartDate(*args_, **kwargs_)
factory = staticmethod(factory)
def export(self, outfile, level, namespace_='', name_='StartDate', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s%s' %
(namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = []
self.exportAttributes(
outfile, level, already_processed, namespace_, name_='StartDate')
if self.hasContent_():
outfile.write('>\n')
self.exportChildren(outfile, level + 1, namespace_, name_)
outfile.write('</%s%s>\n' % (namespace_, name_))
else:
outfile.write('/>\n')
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='StartDate'):
pass
def exportChildren(self, outfile, level, namespace_='', name_='StartDate', fromsubclass_=False):
pass
def hasContent_(self):
if (
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='StartDate'):
level += 1
self.exportLiteralAttributes(outfile, level, [], name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
pass
def exportLiteralChildren(self, outfile, level, name_):
pass
def build(self, node):
self.buildAttributes(node, node.attrib, [])
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
pass
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
pass
# end class StartDate
class EndDate(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self):
pass
def factory(*args_, **kwargs_):
if EndDate.subclass:
return EndDate.subclass(*args_, **kwargs_)
else:
return EndDate(*args_, **kwargs_)
factory = staticmethod(factory)
def export(self, outfile, level, namespace_='', name_='EndDate', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s%s' %
(namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = []
self.exportAttributes(
outfile, level, already_processed, namespace_, name_='EndDate')
if self.hasContent_():
outfile.write('>\n')
self.exportChildren(outfile, level + 1, namespace_, name_)
outfile.write('</%s%s>\n' % (namespace_, name_))
else:
outfile.write('/>\n')
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='EndDate'):
pass
def exportChildren(self, outfile, level, namespace_='', name_='EndDate', fromsubclass_=False):
pass
def hasContent_(self):
if (
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='EndDate'):
level += 1
self.exportLiteralAttributes(outfile, level, [], name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
pass
def exportLiteralChildren(self, outfile, level, name_):
pass
def build(self, node):
self.buildAttributes(node, node.attrib, [])
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
pass
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
pass
# end class EndDate
class ClosedPeriodType(GeneratedsSuper):
"""A closed period with a mandatory start and end date"""
subclass = None
superclass = None
def __init__(self, StartDate=None, EndDate=None):
self.StartDate = StartDate
self.EndDate = EndDate
def factory(*args_, **kwargs_):
if ClosedPeriodType.subclass:
return ClosedPeriodType.subclass(*args_, **kwargs_)
else:
return ClosedPeriodType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_StartDate(self):
return self.StartDate
def set_StartDate(self, StartDate):
self.StartDate = StartDate
def get_EndDate(self):
return self.EndDate
def set_EndDate(self, EndDate):
self.EndDate = EndDate
def export(self, outfile, level, namespace_='', name_='ClosedPeriodType', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s%s' %
(namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = []
self.exportAttributes(outfile, level, already_processed,
namespace_, name_='ClosedPeriodType')
if self.hasContent_():
outfile.write('>\n')
self.exportChildren(outfile, level + 1, namespace_, name_)
showIndent(outfile, level)
outfile.write('</%s%s>\n' % (namespace_, name_))
else:
outfile.write('/>\n')
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='ClosedPeriodType'):
pass
def exportChildren(self, outfile, level, namespace_='', name_='ClosedPeriodType', fromsubclass_=False):
if self.StartDate is not None:
showIndent(outfile, level)
outfile.write('<%sStartDate>%s</%sStartDate>\n' %
(namespace_, self.gds_format_string(quote_xml(self.StartDate).encode(ExternalEncoding), input_name='StartDate'), namespace_))
if self.EndDate is not None:
showIndent(outfile, level)
outfile.write('<%sEndDate>%s</%sEndDate>\n' %
(namespace_, self.gds_format_string(quote_xml(self.EndDate).encode(ExternalEncoding), input_name='EndDate'), namespace_))
def hasContent_(self):
if (
self.StartDate is not None or
self.EndDate is not None
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='ClosedPeriodType'):
level += 1
self.exportLiteralAttributes(outfile, level, [], name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
pass
def exportLiteralChildren(self, outfile, level, name_):
if self.StartDate is not None:
showIndent(outfile, level)
outfile.write('StartDate=%s,\n' %
quote_python(self.StartDate).encode(ExternalEncoding))
if self.EndDate is not None:
showIndent(outfile, level)
outfile.write('EndDate=%s,\n' %
quote_python(self.EndDate).encode(ExternalEncoding))
def build(self, node):
self.buildAttributes(node, node.attrib, [])
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
pass
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'StartDate':
StartDate_ = child_.text
StartDate_ = self.gds_validate_string(
StartDate_, node, 'StartDate')
self.StartDate = StartDate_
elif nodeName_ == 'EndDate':
EndDate_ = child_.text
EndDate_ = self.gds_validate_string(EndDate_, node, 'EndDate')
self.EndDate = EndDate_
# end class ClosedPeriodType
class StartingPeriodType(GeneratedsSuper):
"""A halfopen period with a mandatory start date"""
subclass = None
superclass = None
def __init__(self, StartDate=None, EndDate=None):
self.StartDate = StartDate
self.EndDate = EndDate
def factory(*args_, **kwargs_):
if StartingPeriodType.subclass:
return StartingPeriodType.subclass(*args_, **kwargs_)
else:
return StartingPeriodType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_StartDate(self):
return self.StartDate
def set_StartDate(self, StartDate):
self.StartDate = StartDate
def get_EndDate(self):
return self.EndDate
def set_EndDate(self, EndDate):
self.EndDate = EndDate
def export(self, outfile, level, namespace_='', name_='StartingPeriodType', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s%s' %
(namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = []
self.exportAttributes(outfile, level, already_processed,
namespace_, name_='StartingPeriodType')
if self.hasContent_():
outfile.write('>\n')
self.exportChildren(outfile, level + 1, namespace_, name_)
showIndent(outfile, level)
outfile.write('</%s%s>\n' % (namespace_, name_))
else:
outfile.write('/>\n')
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='StartingPeriodType'):
pass
def exportChildren(self, outfile, level, namespace_='', name_='StartingPeriodType', fromsubclass_=False):
if self.StartDate is not None:
showIndent(outfile, level)
outfile.write('<%sStartDate>%s</%sStartDate>\n' %
(namespace_, self.gds_format_string(quote_xml(self.StartDate).encode(ExternalEncoding), input_name='StartDate'), namespace_))
if self.EndDate is not None:
showIndent(outfile, level)
outfile.write('<%sEndDate>%s</%sEndDate>\n' %
(namespace_, self.gds_format_string(quote_xml(self.EndDate).encode(ExternalEncoding), input_name='EndDate'), namespace_))
def hasContent_(self):
if (
self.StartDate is not None or
self.EndDate is not None
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='StartingPeriodType'):
level += 1
self.exportLiteralAttributes(outfile, level, [], name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
pass
def exportLiteralChildren(self, outfile, level, name_):
if self.StartDate is not None:
showIndent(outfile, level)
outfile.write('StartDate=%s,\n' %
quote_python(self.StartDate).encode(ExternalEncoding))
if self.EndDate is not None:
showIndent(outfile, level)
outfile.write('EndDate=%s,\n' %
quote_python(self.EndDate).encode(ExternalEncoding))
def build(self, node):
self.buildAttributes(node, node.attrib, [])
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
pass
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'StartDate':
StartDate_ = child_.text
StartDate_ = self.gds_validate_string(
StartDate_, node, 'StartDate')
self.StartDate = StartDate_
elif nodeName_ == 'EndDate':
EndDate_ = child_.text
EndDate_ = self.gds_validate_string(EndDate_, node, 'EndDate')
self.EndDate = EndDate_
# end class StartingPeriodType
class EndingPeriodType(GeneratedsSuper):
"""A halfopen period with a mandatory end date"""
subclass = None
superclass = None
def __init__(self, StartDate=None, EndDate=None):
self.StartDate = StartDate
self.EndDate = EndDate
def factory(*args_, **kwargs_):
if EndingPeriodType.subclass:
return EndingPeriodType.subclass(*args_, **kwargs_)
else:
return EndingPeriodType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_StartDate(self):
return self.StartDate
def set_StartDate(self, StartDate):
self.StartDate = StartDate
def get_EndDate(self):
return self.EndDate
def set_EndDate(self, EndDate):
self.EndDate = EndDate
def export(self, outfile, level, namespace_='', name_='EndingPeriodType', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s%s' %
(namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = []
self.exportAttributes(outfile, level, already_processed,
namespace_, name_='EndingPeriodType')
if self.hasContent_():
outfile.write('>\n')
self.exportChildren(outfile, level + 1, namespace_, name_)
showIndent(outfile, level)
outfile.write('</%s%s>\n' % (namespace_, name_))
else:
outfile.write('/>\n')
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='EndingPeriodType'):
pass
def exportChildren(self, outfile, level, namespace_='', name_='EndingPeriodType', fromsubclass_=False):
if self.StartDate is not None:
showIndent(outfile, level)
outfile.write('<%sStartDate>%s</%sStartDate>\n' %
(namespace_, self.gds_format_string(quote_xml(self.StartDate).encode(ExternalEncoding), input_name='StartDate'), namespace_))
if self.EndDate is not None:
showIndent(outfile, level)
outfile.write('<%sEndDate>%s</%sEndDate>\n' %
(namespace_, self.gds_format_string(quote_xml(self.EndDate).encode(ExternalEncoding), input_name='EndDate'), namespace_))
def hasContent_(self):
if (
self.StartDate is not None or
self.EndDate is not None
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='EndingPeriodType'):
level += 1
self.exportLiteralAttributes(outfile, level, [], name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
pass
def exportLiteralChildren(self, outfile, level, name_):
if self.StartDate is not None:
showIndent(outfile, level)
outfile.write('StartDate=%s,\n' %
quote_python(self.StartDate).encode(ExternalEncoding))
if self.EndDate is not None:
showIndent(outfile, level)
outfile.write('EndDate=%s,\n' %
quote_python(self.EndDate).encode(ExternalEncoding))
def build(self, node):
self.buildAttributes(node, node.attrib, [])
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
pass
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'StartDate':
StartDate_ = child_.text
StartDate_ = self.gds_validate_string(
StartDate_, node, 'StartDate')
self.StartDate = StartDate_
elif nodeName_ == 'EndDate':
EndDate_ = child_.text
EndDate_ = self.gds_validate_string(EndDate_, node, 'EndDate')
self.EndDate = EndDate_
# end class EndingPeriodType
class ExtensionPlaceHolder(GeneratedsSuper):
"""The sole purpose of this element is to provide a place to initialize
the usage of xjc extensions in."""
subclass = None
superclass = None
def __init__(self):
pass
def factory(*args_, **kwargs_):
if ExtensionPlaceHolder.subclass:
return ExtensionPlaceHolder.subclass(*args_, **kwargs_)
else:
return ExtensionPlaceHolder(*args_, **kwargs_)
factory = staticmethod(factory)
def export(self, outfile, level, namespace_='', name_='ExtensionPlaceHolder', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s%s' %
(namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = []
self.exportAttributes(outfile, level, already_processed,
namespace_, name_='ExtensionPlaceHolder')
if self.hasContent_():
outfile.write('>\n')
self.exportChildren(outfile, level + 1, namespace_, name_)
outfile.write('</%s%s>\n' % (namespace_, name_))
else:
outfile.write('/>\n')
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='ExtensionPlaceHolder'):
pass
def exportChildren(self, outfile, level, namespace_='', name_='ExtensionPlaceHolder', fromsubclass_=False):
pass
def hasContent_(self):
if (
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='ExtensionPlaceHolder'):
level += 1
self.exportLiteralAttributes(outfile, level, [], name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
pass
def exportLiteralChildren(self, outfile, level, name_):
pass
def build(self, node):
self.buildAttributes(node, node.attrib, [])
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
pass
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
pass
# end class ExtensionPlaceHolder
USAGE_TEXT = """
Usage: python <Parser>.py [ -s ] <in_xml_file>
"""
def usage():
print USAGE_TEXT
sys.exit(1)
def get_root_tag(node):
tag = Tag_pattern_.match(node.tag).groups()[-1]
rootClass = globals().get(tag)
return tag, rootClass
def parse(inFileName):
doc = parsexml_(inFileName)
rootNode = doc.getroot()
rootTag, rootClass = get_root_tag(rootNode)
if rootClass is None:
rootTag = 'SSDNReply'
rootClass = SSDNReply
rootObj = rootClass.factory()
rootObj.build(rootNode)
# Enable Python to collect the space used by the DOM.
doc = None
sys.stdout.write('<?xml version="1.0" ?>\n')
rootObj.export(sys.stdout, 0, name_=rootTag,
namespacedef_='')
return rootObj
def parseString(inString):
from StringIO import StringIO
doc = parsexml_(StringIO(inString))
rootNode = doc.getroot()
rootTag, rootClass = get_root_tag(rootNode)
if rootClass is None:
rootTag = 'SSDNReply'
rootClass = SSDNReply
rootObj = rootClass.factory()
rootObj.build(rootNode)
# Enable Python to collect the space used by the DOM.
doc = None
sys.stdout.write('<?xml version="1.0" ?>\n')
rootObj.export(sys.stdout, 0, name_="SSDNReply",
namespacedef_='')
return rootObj
def parseLiteral(inFileName):
doc = parsexml_(inFileName)
rootNode = doc.getroot()
rootTag, rootClass = get_root_tag(rootNode)
if rootClass is None:
rootTag = 'SSDNReply'
rootClass = SSDNReply
rootObj = rootClass.factory()
rootObj.build(rootNode)
# Enable Python to collect the space used by the DOM.
doc = None
sys.stdout.write('#from SSDNReply import *\n\n')
sys.stdout.write('import SSDNReply as model_\n\n')
sys.stdout.write('rootObj = model_.rootTag(\n')
rootObj.exportLiteral(sys.stdout, 0, name_=rootTag)
sys.stdout.write(')\n')
return rootObj
def main():
args = sys.argv[1:]
if len(args) == 1:
parse(args[0])
else:
usage()
if __name__ == '__main__':
#import pdb; pdb.set_trace()
main()
__all__ = [
"AuthorCodeList",
"AuthorizedUserType",
"ClosedPeriodType",
"DescriptionType",
"DetailMessageType",
"Diagnostic",
"EndDate",
"EndingPeriodType",
"ExtensionPlaceHolder",
"FieldName",
"FieldValue",
"InformationType",
"InscriptionType",
"PeriodType",
"ReasonCode",
"ReplyContextType",
"ReplyMessageType",
"ResultSummary",
"ReturnCode",
"SSDNReply",
"ServiceId",
"ServiceReplyType",
"StartDate",
"StartingPeriodType",
"Version"
]
bsd-2-clause
walterreade/scikit-learn
sklearn/tests/test_grid_search.py
68
28856
"""
Testing for grid search module (sklearn.grid_search)
"""
from collections import Iterable, Sized
from sklearn.externals.six.moves import cStringIO as StringIO
from sklearn.externals.six.moves import xrange
from itertools import chain, product
import pickle
import warnings
import sys
import numpy as np
import scipy.sparse as sp
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_not_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import assert_false, assert_true
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_no_warnings
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.mocking import CheckingClassifier, MockDataFrame
from scipy.stats import bernoulli, expon, uniform
from sklearn.externals.six.moves import zip
from sklearn.base import BaseEstimator
from sklearn.datasets import make_classification
from sklearn.datasets import make_blobs
from sklearn.datasets import make_multilabel_classification
from sklearn.svm import LinearSVC, SVC
from sklearn.tree import DecisionTreeRegressor
from sklearn.tree import DecisionTreeClassifier
from sklearn.cluster import KMeans
from sklearn.neighbors import KernelDensity
from sklearn.metrics import f1_score
from sklearn.metrics import make_scorer
from sklearn.metrics import roc_auc_score
from sklearn.exceptions import ChangedBehaviorWarning
from sklearn.exceptions import FitFailedWarning
with warnings.catch_warnings():
warnings.simplefilter('ignore')
from sklearn.grid_search import (GridSearchCV, RandomizedSearchCV,
ParameterGrid, ParameterSampler)
from sklearn.cross_validation import KFold, StratifiedKFold
from sklearn.preprocessing import Imputer
from sklearn.pipeline import Pipeline
# Neither of the following two estimators inherit from BaseEstimator,
# to test hyperparameter search on user-defined classifiers.
class MockClassifier(object):
"""Dummy classifier to test the cross-validation"""
def __init__(self, foo_param=0):
self.foo_param = foo_param
def fit(self, X, Y):
assert_true(len(X) == len(Y))
return self
def predict(self, T):
return T.shape[0]
predict_proba = predict
decision_function = predict
transform = predict
def score(self, X=None, Y=None):
if self.foo_param > 1:
score = 1.
else:
score = 0.
return score
def get_params(self, deep=False):
return {'foo_param': self.foo_param}
def set_params(self, **params):
self.foo_param = params['foo_param']
return self
class LinearSVCNoScore(LinearSVC):
"""An LinearSVC classifier that has no score method."""
@property
def score(self):
raise AttributeError
X = np.array([[-1, -1], [-2, -1], [1, 1], [2, 1]])
y = np.array([1, 1, 2, 2])
def assert_grid_iter_equals_getitem(grid):
assert_equal(list(grid), [grid[i] for i in range(len(grid))])
def test_parameter_grid():
# Test basic properties of ParameterGrid.
params1 = {"foo": [1, 2, 3]}
grid1 = ParameterGrid(params1)
assert_true(isinstance(grid1, Iterable))
assert_true(isinstance(grid1, Sized))
assert_equal(len(grid1), 3)
assert_grid_iter_equals_getitem(grid1)
params2 = {"foo": [4, 2],
"bar": ["ham", "spam", "eggs"]}
grid2 = ParameterGrid(params2)
assert_equal(len(grid2), 6)
# loop to assert we can iterate over the grid multiple times
for i in xrange(2):
# tuple + chain transforms {"a": 1, "b": 2} to ("a", 1, "b", 2)
points = set(tuple(chain(*(sorted(p.items())))) for p in grid2)
assert_equal(points,
set(("bar", x, "foo", y)
for x, y in product(params2["bar"], params2["foo"])))
assert_grid_iter_equals_getitem(grid2)
# Special case: empty grid (useful to get default estimator settings)
empty = ParameterGrid({})
assert_equal(len(empty), 1)
assert_equal(list(empty), [{}])
assert_grid_iter_equals_getitem(empty)
assert_raises(IndexError, lambda: empty[1])
has_empty = ParameterGrid([{'C': [1, 10]}, {}, {'C': [.5]}])
assert_equal(len(has_empty), 4)
assert_equal(list(has_empty), [{'C': 1}, {'C': 10}, {}, {'C': .5}])
assert_grid_iter_equals_getitem(has_empty)
def test_grid_search():
# Test that the best estimator contains the right value for foo_param
clf = MockClassifier()
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]}, verbose=3)
# make sure it selects the smallest parameter in case of ties
old_stdout = sys.stdout
sys.stdout = StringIO()
grid_search.fit(X, y)
sys.stdout = old_stdout
assert_equal(grid_search.best_estimator_.foo_param, 2)
for i, foo_i in enumerate([1, 2, 3]):
assert_true(grid_search.grid_scores_[i][0]
== {'foo_param': foo_i})
# Smoke test the score etc:
grid_search.score(X, y)
grid_search.predict_proba(X)
grid_search.decision_function(X)
grid_search.transform(X)
# Test exception handling on scoring
grid_search.scoring = 'sklearn'
assert_raises(ValueError, grid_search.fit, X, y)
@ignore_warnings
def test_grid_search_no_score():
# Test grid-search on classifier that has no score function.
clf = LinearSVC(random_state=0)
X, y = make_blobs(random_state=0, centers=2)
Cs = [.1, 1, 10]
clf_no_score = LinearSVCNoScore(random_state=0)
grid_search = GridSearchCV(clf, {'C': Cs}, scoring='accuracy')
grid_search.fit(X, y)
grid_search_no_score = GridSearchCV(clf_no_score, {'C': Cs},
scoring='accuracy')
# smoketest grid search
grid_search_no_score.fit(X, y)
# check that best params are equal
assert_equal(grid_search_no_score.best_params_, grid_search.best_params_)
# check that we can call score and that it gives the correct result
assert_equal(grid_search.score(X, y), grid_search_no_score.score(X, y))
# giving no scoring function raises an error
grid_search_no_score = GridSearchCV(clf_no_score, {'C': Cs})
assert_raise_message(TypeError, "no scoring", grid_search_no_score.fit,
[[1]])
def test_grid_search_score_method():
X, y = make_classification(n_samples=100, n_classes=2, flip_y=.2,
random_state=0)
clf = LinearSVC(random_state=0)
grid = {'C': [.1]}
search_no_scoring = GridSearchCV(clf, grid, scoring=None).fit(X, y)
search_accuracy = GridSearchCV(clf, grid, scoring='accuracy').fit(X, y)
search_no_score_method_auc = GridSearchCV(LinearSVCNoScore(), grid,
scoring='roc_auc').fit(X, y)
search_auc = GridSearchCV(clf, grid, scoring='roc_auc').fit(X, y)
# Check warning only occurs in situation where behavior changed:
# estimator requires score method to compete with scoring parameter
score_no_scoring = assert_no_warnings(search_no_scoring.score, X, y)
score_accuracy = assert_warns(ChangedBehaviorWarning,
search_accuracy.score, X, y)
score_no_score_auc = assert_no_warnings(search_no_score_method_auc.score,
X, y)
score_auc = assert_warns(ChangedBehaviorWarning,
search_auc.score, X, y)
# ensure the test is sane
assert_true(score_auc < 1.0)
assert_true(score_accuracy < 1.0)
assert_not_equal(score_auc, score_accuracy)
assert_almost_equal(score_accuracy, score_no_scoring)
assert_almost_equal(score_auc, score_no_score_auc)
def test_trivial_grid_scores():
# Test search over a "grid" with only one point.
# Non-regression test: grid_scores_ wouldn't be set by GridSearchCV.
clf = MockClassifier()
grid_search = GridSearchCV(clf, {'foo_param': [1]})
grid_search.fit(X, y)
assert_true(hasattr(grid_search, "grid_scores_"))
random_search = RandomizedSearchCV(clf, {'foo_param': [0]}, n_iter=1)
random_search.fit(X, y)
assert_true(hasattr(random_search, "grid_scores_"))
def test_no_refit():
# Test that grid search can be used for model selection only
clf = MockClassifier()
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]}, refit=False)
grid_search.fit(X, y)
assert_true(hasattr(grid_search, "best_params_"))
def test_grid_search_error():
# Test that grid search will capture errors on data with different
# length
X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0)
clf = LinearSVC()
cv = GridSearchCV(clf, {'C': [0.1, 1.0]})
assert_raises(ValueError, cv.fit, X_[:180], y_)
def test_grid_search_iid():
# test the iid parameter
# noise-free simple 2d-data
X, y = make_blobs(centers=[[0, 0], [1, 0], [0, 1], [1, 1]], random_state=0,
cluster_std=0.1, shuffle=False, n_samples=80)
# split dataset into two folds that are not iid
# first one contains data of all 4 blobs, second only from two.
mask = np.ones(X.shape[0], dtype=np.bool)
mask[np.where(y == 1)[0][::2]] = 0
mask[np.where(y == 2)[0][::2]] = 0
# this leads to perfect classification on one fold and a score of 1/3 on
# the other
svm = SVC(kernel='linear')
# create "cv" for splits
cv = [[mask, ~mask], [~mask, mask]]
# once with iid=True (default)
grid_search = GridSearchCV(svm, param_grid={'C': [1, 10]}, cv=cv)
grid_search.fit(X, y)
first = grid_search.grid_scores_[0]
assert_equal(first.parameters['C'], 1)
assert_array_almost_equal(first.cv_validation_scores, [1, 1. / 3.])
# for first split, 1/4 of dataset is in test, for second 3/4.
# take weighted average
assert_almost_equal(first.mean_validation_score,
1 * 1. / 4. + 1. / 3. * 3. / 4.)
# once with iid=False
grid_search = GridSearchCV(svm, param_grid={'C': [1, 10]}, cv=cv,
iid=False)
grid_search.fit(X, y)
first = grid_search.grid_scores_[0]
assert_equal(first.parameters['C'], 1)
# scores are the same as above
assert_array_almost_equal(first.cv_validation_scores, [1, 1. / 3.])
# averaged score is just mean of scores
assert_almost_equal(first.mean_validation_score,
np.mean(first.cv_validation_scores))
def test_grid_search_one_grid_point():
X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0)
param_dict = {"C": [1.0], "kernel": ["rbf"], "gamma": [0.1]}
clf = SVC()
cv = GridSearchCV(clf, param_dict)
cv.fit(X_, y_)
clf = SVC(C=1.0, kernel="rbf", gamma=0.1)
clf.fit(X_, y_)
assert_array_equal(clf.dual_coef_, cv.best_estimator_.dual_coef_)
def test_grid_search_bad_param_grid():
param_dict = {"C": 1.0}
clf = SVC()
assert_raises(ValueError, GridSearchCV, clf, param_dict)
param_dict = {"C": []}
clf = SVC()
assert_raises(ValueError, GridSearchCV, clf, param_dict)
param_dict = {"C": np.ones(6).reshape(3, 2)}
clf = SVC()
assert_raises(ValueError, GridSearchCV, clf, param_dict)
def test_grid_search_sparse():
# Test that grid search works with both dense and sparse matrices
X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0)
clf = LinearSVC()
cv = GridSearchCV(clf, {'C': [0.1, 1.0]})
cv.fit(X_[:180], y_[:180])
y_pred = cv.predict(X_[180:])
C = cv.best_estimator_.C
X_ = sp.csr_matrix(X_)
clf = LinearSVC()
cv = GridSearchCV(clf, {'C': [0.1, 1.0]})
cv.fit(X_[:180].tocoo(), y_[:180])
y_pred2 = cv.predict(X_[180:])
C2 = cv.best_estimator_.C
assert_true(np.mean(y_pred == y_pred2) >= .9)
assert_equal(C, C2)
def test_grid_search_sparse_scoring():
X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0)
clf = LinearSVC()
cv = GridSearchCV(clf, {'C': [0.1, 1.0]}, scoring="f1")
cv.fit(X_[:180], y_[:180])
y_pred = cv.predict(X_[180:])
C = cv.best_estimator_.C
X_ = sp.csr_matrix(X_)
clf = LinearSVC()
cv = GridSearchCV(clf, {'C': [0.1, 1.0]}, scoring="f1")
cv.fit(X_[:180], y_[:180])
y_pred2 = cv.predict(X_[180:])
C2 = cv.best_estimator_.C
assert_array_equal(y_pred, y_pred2)
assert_equal(C, C2)
# Smoke test the score
# np.testing.assert_allclose(f1_score(cv.predict(X_[:180]), y[:180]),
# cv.score(X_[:180], y[:180]))
# test loss where greater is worse
def f1_loss(y_true_, y_pred_):
return -f1_score(y_true_, y_pred_)
F1Loss = make_scorer(f1_loss, greater_is_better=False)
cv = GridSearchCV(clf, {'C': [0.1, 1.0]}, scoring=F1Loss)
cv.fit(X_[:180], y_[:180])
y_pred3 = cv.predict(X_[180:])
C3 = cv.best_estimator_.C
assert_equal(C, C3)
assert_array_equal(y_pred, y_pred3)
def test_grid_search_precomputed_kernel():
# Test that grid search works when the input features are given in the
# form of a precomputed kernel matrix
X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0)
# compute the training kernel matrix corresponding to the linear kernel
K_train = np.dot(X_[:180], X_[:180].T)
y_train = y_[:180]
clf = SVC(kernel='precomputed')
cv = GridSearchCV(clf, {'C': [0.1, 1.0]})
cv.fit(K_train, y_train)
assert_true(cv.best_score_ >= 0)
# compute the test kernel matrix
K_test = np.dot(X_[180:], X_[:180].T)
y_test = y_[180:]
y_pred = cv.predict(K_test)
assert_true(np.mean(y_pred == y_test) >= 0)
# test error is raised when the precomputed kernel is not array-like
# or sparse
assert_raises(ValueError, cv.fit, K_train.tolist(), y_train)
def test_grid_search_precomputed_kernel_error_nonsquare():
# Test that grid search returns an error with a non-square precomputed
# training kernel matrix
K_train = np.zeros((10, 20))
y_train = np.ones((10, ))
clf = SVC(kernel='precomputed')
cv = GridSearchCV(clf, {'C': [0.1, 1.0]})
assert_raises(ValueError, cv.fit, K_train, y_train)
def test_grid_search_precomputed_kernel_error_kernel_function():
# Test that grid search returns an error when using a kernel_function
X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0)
kernel_function = lambda x1, x2: np.dot(x1, x2.T)
clf = SVC(kernel=kernel_function)
cv = GridSearchCV(clf, {'C': [0.1, 1.0]})
assert_raises(ValueError, cv.fit, X_, y_)
class BrokenClassifier(BaseEstimator):
"""Broken classifier that cannot be fit twice"""
def __init__(self, parameter=None):
self.parameter = parameter
def fit(self, X, y):
assert_true(not hasattr(self, 'has_been_fit_'))
self.has_been_fit_ = True
def predict(self, X):
return np.zeros(X.shape[0])
@ignore_warnings
def test_refit():
# Regression test for bug in refitting
# Simulates re-fitting a broken estimator; this used to break with
# sparse SVMs.
X = np.arange(100).reshape(10, 10)
y = np.array([0] * 5 + [1] * 5)
clf = GridSearchCV(BrokenClassifier(), [{'parameter': [0, 1]}],
scoring="precision", refit=True)
clf.fit(X, y)
def test_gridsearch_nd():
# Pass X as list in GridSearchCV
X_4d = np.arange(10 * 5 * 3 * 2).reshape(10, 5, 3, 2)
y_3d = np.arange(10 * 7 * 11).reshape(10, 7, 11)
check_X = lambda x: x.shape[1:] == (5, 3, 2)
check_y = lambda x: x.shape[1:] == (7, 11)
clf = CheckingClassifier(check_X=check_X, check_y=check_y)
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]})
grid_search.fit(X_4d, y_3d).score(X, y)
assert_true(hasattr(grid_search, "grid_scores_"))
def test_X_as_list():
# Pass X as list in GridSearchCV
X = np.arange(100).reshape(10, 10)
y = np.array([0] * 5 + [1] * 5)
clf = CheckingClassifier(check_X=lambda x: isinstance(x, list))
cv = KFold(n=len(X), n_folds=3)
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]}, cv=cv)
grid_search.fit(X.tolist(), y).score(X, y)
assert_true(hasattr(grid_search, "grid_scores_"))
def test_y_as_list():
# Pass y as list in GridSearchCV
X = np.arange(100).reshape(10, 10)
y = np.array([0] * 5 + [1] * 5)
clf = CheckingClassifier(check_y=lambda x: isinstance(x, list))
cv = KFold(n=len(X), n_folds=3)
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]}, cv=cv)
grid_search.fit(X, y.tolist()).score(X, y)
assert_true(hasattr(grid_search, "grid_scores_"))
def test_pandas_input():
# check cross_val_score doesn't destroy pandas dataframe
types = [(MockDataFrame, MockDataFrame)]
try:
from pandas import Series, DataFrame
types.append((DataFrame, Series))
except ImportError:
pass
X = np.arange(100).reshape(10, 10)
y = np.array([0] * 5 + [1] * 5)
for InputFeatureType, TargetType in types:
# X dataframe, y series
X_df, y_ser = InputFeatureType(X), TargetType(y)
check_df = lambda x: isinstance(x, InputFeatureType)
check_series = lambda x: isinstance(x, TargetType)
clf = CheckingClassifier(check_X=check_df, check_y=check_series)
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]})
grid_search.fit(X_df, y_ser).score(X_df, y_ser)
grid_search.predict(X_df)
assert_true(hasattr(grid_search, "grid_scores_"))
def test_unsupervised_grid_search():
# test grid-search with unsupervised estimator
X, y = make_blobs(random_state=0)
km = KMeans(random_state=0)
grid_search = GridSearchCV(km, param_grid=dict(n_clusters=[2, 3, 4]),
scoring='adjusted_rand_score')
grid_search.fit(X, y)
# ARI can find the right number :)
assert_equal(grid_search.best_params_["n_clusters"], 3)
# Now without a score, and without y
grid_search = GridSearchCV(km, param_grid=dict(n_clusters=[2, 3, 4]))
grid_search.fit(X)
assert_equal(grid_search.best_params_["n_clusters"], 4)
def test_gridsearch_no_predict():
# test grid-search with an estimator without predict.
# slight duplication of a test from KDE
def custom_scoring(estimator, X):
return 42 if estimator.bandwidth == .1 else 0
X, _ = make_blobs(cluster_std=.1, random_state=1,
centers=[[0, 1], [1, 0], [0, 0]])
search = GridSearchCV(KernelDensity(),
param_grid=dict(bandwidth=[.01, .1, 1]),
scoring=custom_scoring)
search.fit(X)
assert_equal(search.best_params_['bandwidth'], .1)
assert_equal(search.best_score_, 42)
def test_param_sampler():
# test basic properties of param sampler
param_distributions = {"kernel": ["rbf", "linear"],
"C": uniform(0, 1)}
sampler = ParameterSampler(param_distributions=param_distributions,
n_iter=10, random_state=0)
samples = [x for x in sampler]
assert_equal(len(samples), 10)
for sample in samples:
assert_true(sample["kernel"] in ["rbf", "linear"])
assert_true(0 <= sample["C"] <= 1)
def test_randomized_search_grid_scores():
# Make a dataset with a lot of noise to get various kind of prediction
# errors across CV folds and parameter settings
X, y = make_classification(n_samples=200, n_features=100, n_informative=3,
random_state=0)
# XXX: as of today (scipy 0.12) it's not possible to set the random seed
# of scipy.stats distributions: the assertions in this test should thus
# not depend on the randomization
params = dict(C=expon(scale=10),
gamma=expon(scale=0.1))
n_cv_iter = 3
n_search_iter = 30
search = RandomizedSearchCV(SVC(), n_iter=n_search_iter, cv=n_cv_iter,
param_distributions=params, iid=False)
search.fit(X, y)
assert_equal(len(search.grid_scores_), n_search_iter)
# Check consistency of the structure of each cv_score item
for cv_score in search.grid_scores_:
assert_equal(len(cv_score.cv_validation_scores), n_cv_iter)
# Because we set iid to False, the mean_validation score is the
# mean of the fold mean scores instead of the aggregate sample-wise
# mean score
assert_almost_equal(np.mean(cv_score.cv_validation_scores),
cv_score.mean_validation_score)
assert_equal(list(sorted(cv_score.parameters.keys())),
list(sorted(params.keys())))
# Check the consistency with the best_score_ and best_params_ attributes
sorted_grid_scores = list(sorted(search.grid_scores_,
key=lambda x: x.mean_validation_score))
best_score = sorted_grid_scores[-1].mean_validation_score
assert_equal(search.best_score_, best_score)
tied_best_params = [s.parameters for s in sorted_grid_scores
if s.mean_validation_score == best_score]
assert_true(search.best_params_ in tied_best_params,
"best_params_={0} is not part of the"
" tied best models: {1}".format(
search.best_params_, tied_best_params))
def test_grid_search_score_consistency():
# test that correct scores are used
clf = LinearSVC(random_state=0)
X, y = make_blobs(random_state=0, centers=2)
Cs = [.1, 1, 10]
for score in ['f1', 'roc_auc']:
grid_search = GridSearchCV(clf, {'C': Cs}, scoring=score)
grid_search.fit(X, y)
cv = StratifiedKFold(n_folds=3, y=y)
for C, scores in zip(Cs, grid_search.grid_scores_):
clf.set_params(C=C)
scores = scores[2] # get the separate runs from grid scores
i = 0
for train, test in cv:
clf.fit(X[train], y[train])
if score == "f1":
correct_score = f1_score(y[test], clf.predict(X[test]))
elif score == "roc_auc":
dec = clf.decision_function(X[test])
correct_score = roc_auc_score(y[test], dec)
assert_almost_equal(correct_score, scores[i])
i += 1
def test_pickle():
# Test that a fit search can be pickled
clf = MockClassifier()
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]}, refit=True)
grid_search.fit(X, y)
pickle.dumps(grid_search) # smoke test
random_search = RandomizedSearchCV(clf, {'foo_param': [1, 2, 3]},
refit=True, n_iter=3)
random_search.fit(X, y)
pickle.dumps(random_search) # smoke test
def test_grid_search_with_multioutput_data():
# Test search with multi-output estimator
X, y = make_multilabel_classification(random_state=0)
est_parameters = {"max_depth": [1, 2, 3, 4]}
cv = KFold(y.shape[0], random_state=0)
estimators = [DecisionTreeRegressor(random_state=0),
DecisionTreeClassifier(random_state=0)]
# Test with grid search cv
for est in estimators:
grid_search = GridSearchCV(est, est_parameters, cv=cv)
grid_search.fit(X, y)
for parameters, _, cv_validation_scores in grid_search.grid_scores_:
est.set_params(**parameters)
for i, (train, test) in enumerate(cv):
est.fit(X[train], y[train])
correct_score = est.score(X[test], y[test])
assert_almost_equal(correct_score,
cv_validation_scores[i])
# Test with a randomized search
for est in estimators:
random_search = RandomizedSearchCV(est, est_parameters,
cv=cv, n_iter=3)
random_search.fit(X, y)
for parameters, _, cv_validation_scores in random_search.grid_scores_:
est.set_params(**parameters)
for i, (train, test) in enumerate(cv):
est.fit(X[train], y[train])
correct_score = est.score(X[test], y[test])
assert_almost_equal(correct_score,
cv_validation_scores[i])
def test_predict_proba_disabled():
# Test predict_proba when disabled on estimator.
X = np.arange(20).reshape(5, -1)
y = [0, 0, 1, 1, 1]
clf = SVC(probability=False)
gs = GridSearchCV(clf, {}, cv=2).fit(X, y)
assert_false(hasattr(gs, "predict_proba"))
def test_grid_search_allows_nans():
# Test GridSearchCV with Imputer
X = np.arange(20, dtype=np.float64).reshape(5, -1)
X[2, :] = np.nan
y = [0, 0, 1, 1, 1]
p = Pipeline([
('imputer', Imputer(strategy='mean', missing_values='NaN')),
('classifier', MockClassifier()),
])
GridSearchCV(p, {'classifier__foo_param': [1, 2, 3]}, cv=2).fit(X, y)
class FailingClassifier(BaseEstimator):
"""Classifier that raises a ValueError on fit()"""
FAILING_PARAMETER = 2
def __init__(self, parameter=None):
self.parameter = parameter
def fit(self, X, y=None):
if self.parameter == FailingClassifier.FAILING_PARAMETER:
raise ValueError("Failing classifier failed as required")
def predict(self, X):
return np.zeros(X.shape[0])
def test_grid_search_failing_classifier():
# GridSearchCV with on_error != 'raise'
# Ensures that a warning is raised and score reset where appropriate.
X, y = make_classification(n_samples=20, n_features=10, random_state=0)
clf = FailingClassifier()
# refit=False because we only want to check that errors caused by fits
# to individual folds will be caught and warnings raised instead. If
# refit was done, then an exception would be raised on refit and not
# caught by grid_search (expected behavior), and this would cause an
# error in this test.
gs = GridSearchCV(clf, [{'parameter': [0, 1, 2]}], scoring='accuracy',
refit=False, error_score=0.0)
assert_warns(FitFailedWarning, gs.fit, X, y)
# Ensure that grid scores were set to zero as required for those fits
# that are expected to fail.
assert all(np.all(this_point.cv_validation_scores == 0.0)
for this_point in gs.grid_scores_
if this_point.parameters['parameter'] ==
FailingClassifier.FAILING_PARAMETER)
gs = GridSearchCV(clf, [{'parameter': [0, 1, 2]}], scoring='accuracy',
refit=False, error_score=float('nan'))
assert_warns(FitFailedWarning, gs.fit, X, y)
assert all(np.all(np.isnan(this_point.cv_validation_scores))
for this_point in gs.grid_scores_
if this_point.parameters['parameter'] ==
FailingClassifier.FAILING_PARAMETER)
def test_grid_search_failing_classifier_raise():
# GridSearchCV with on_error == 'raise' raises the error
X, y = make_classification(n_samples=20, n_features=10, random_state=0)
clf = FailingClassifier()
# refit=False because we want to test the behaviour of the grid search part
gs = GridSearchCV(clf, [{'parameter': [0, 1, 2]}], scoring='accuracy',
refit=False, error_score='raise')
# FailingClassifier issues a ValueError so this is what we look for.
assert_raises(ValueError, gs.fit, X, y)
def test_parameters_sampler_replacement():
# raise error if n_iter too large
params = {'first': [0, 1], 'second': ['a', 'b', 'c']}
sampler = ParameterSampler(params, n_iter=7)
assert_raises(ValueError, list, sampler)
# degenerates to GridSearchCV if n_iter the same as grid_size
sampler = ParameterSampler(params, n_iter=6)
samples = list(sampler)
assert_equal(len(samples), 6)
for values in ParameterGrid(params):
assert_true(values in samples)
# test sampling without replacement in a large grid
params = {'a': range(10), 'b': range(10), 'c': range(10)}
sampler = ParameterSampler(params, n_iter=99, random_state=42)
samples = list(sampler)
assert_equal(len(samples), 99)
hashable_samples = ["a%db%dc%d" % (p['a'], p['b'], p['c'])
for p in samples]
assert_equal(len(set(hashable_samples)), 99)
# doesn't go into infinite loops
params_distribution = {'first': bernoulli(.5), 'second': ['a', 'b', 'c']}
sampler = ParameterSampler(params_distribution, n_iter=7)
samples = list(sampler)
assert_equal(len(samples), 7)
#------------------------------------------------------------------------------
# Copyright 2015 Esri
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#------------------------------------------------------------------------------
import unittest
import logging
import Configuration
from . import DistanceToAssetsCodeAssetsToBasesTestCase
from . import DistanceToAssetsCopyGeolocatedLocationsTestCase
from . import DistanceToAssetsRouteAssetsToBasesLocalTestCase
from . import DistanceToAssetsRouteAssetsToBasesAGOLTestCase
from . import DistanceToAssetsSummarizeTestCase
''' Test suite for all tools in the Distance to Assets Tools toolbox '''
def getTestSuite():
if Configuration.DEBUG == True:
print(" DistanceToAssetsTestSuite.getSuite")
testSuite = unittest.TestSuite()
''' Add the Distance to Assets tests '''
loader = unittest.TestLoader()
testSuite.addTest(loader.loadTestsFromTestCase(DistanceToAssetsCodeAssetsToBasesTestCase.DistanceToAssetsCodeAssetsToBasesTestCase))
testSuite.addTest(loader.loadTestsFromTestCase(DistanceToAssetsCopyGeolocatedLocationsTestCase.DistanceToAssetsCopyGeolocatedLocationsTestCase))
testSuite.addTest(loader.loadTestsFromTestCase(DistanceToAssetsRouteAssetsToBasesLocalTestCase.DistanceToAssetsRouteAssetsToBasesLocalTestCase))
testSuite.addTest(loader.loadTestsFromTestCase(DistanceToAssetsRouteAssetsToBasesAGOLTestCase.DistanceToAssetsRouteAssetsToBasesAGOLTestCase))
testSuite.addTest(loader.loadTestsFromTestCase(DistanceToAssetsSummarizeTestCase.DistanceToAssetsSummarizeTestCase))
return testSuite
apache-2.0
Yrthgze/prueba-sourcetree2
Lyndon1994/0023/flask-demo.py
2
2235
import os
import logging
logging.basicConfig(level=logging.INFO)
import sqlite3
from flask import Flask, request, session, g, redirect, url_for, abort, \
render_template, flash
import time
app = Flask(__name__)
class Config(object):
DEBUG = True
USERNAME='admin'
PASSWORD='1234'
DATABASE='/tmp/flaskr.db'
DATABASE_URI = 'sqlite://:memory:'
SECRET_KEY='shdjkandscbowduAIJNnjas9aSKAJSka'
# 设置一个名为 FLASKR_SETTINGS 的环境变量,指向要加载的配置文件。
# 启用静默模式告诉 Flask 在没有设置该环境变量的情况下噤声。
app.config.from_object(Config)
# app.config.from_envvar('FLASKR_SETTINGS', silent=True)
def connect_db():
"""Connects to the specific database."""
logging.info('Connects to the specific database.')
rv = sqlite3.connect(app.config['DATABASE'])
rv.row_factory = sqlite3.Row
g.db = rv
logging.info(rv)
return rv
def init_db():
with app.app_context():
db = connect_db()
with app.open_resource('schema.sql', mode='r') as f:
db.cursor().executescript(f.read())
db.commit()
@app.before_request
def before_request():
g.db = connect_db()
@app.teardown_request
def teardown_request(exception):
db = getattr(g, 'db', None)
if db is not None:
db.close()
g.db.close()
@app.template_filter('format_time')
def format_time_filter(t):
return time.strftime('%Y-%m-%d %H:%M:%S',time.localtime(t))
@app.route('/')
def index():
cur = g.db.execute('select name,title,text,created_at from entries order by id DESC ')
entries = [dict(name=row[0], title=row[1], text=row[2], created_at=row[3]) for row in cur.fetchall()]
logging.info(entries)
return render_template('index.html', entries=entries)
@app.route('/add', methods=['POST'])
def add_entry():
g.db.execute('insert into entries (name,title,text,created_at) VALUES (?,?,?,?)',
(request.form['name'], request.form['title'], request.form['text'], time.time()))
g.db.commit()
flash('New entry was successfully posted')
return redirect(url_for('index'))
if __name__ == '__main__':
init_db()
app.secret_key = app.config['SECRET_KEY']
app.run()
mit
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.