| repo_name
				 stringlengths 6 100 | path
				 stringlengths 4 294 | copies
				 stringlengths 1 5 | size
				 stringlengths 4 6 | content
				 stringlengths 606 896k | license
				 stringclasses 15
				values | 
|---|---|---|---|---|---|
| 
	hbhdytf/mac2 | 
	build/lib.linux-x86_64-2.7/swift/common/middleware/account_quotas.py | 
	39 | 
	5676 | 
	# Copyright (c) 2013 OpenStack Foundation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#    http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
``account_quotas`` is a middleware which blocks write requests (PUT, POST) if a
given account quota (in bytes) is exceeded while DELETE requests are still
allowed.
``account_quotas`` uses the ``x-account-meta-quota-bytes`` metadata entry to
store the quota. Write requests to this metadata entry are only permitted for
resellers. There is no quota limit if ``x-account-meta-quota-bytes`` is not
set.
The ``account_quotas`` middleware should be added to the pipeline in your
``/etc/swift/proxy-server.conf`` file just after any auth middleware.
For example::
    [pipeline:main]
    pipeline = catch_errors cache tempauth account_quotas proxy-server
    [filter:account_quotas]
    use = egg:swift#account_quotas
To set the quota on an account::
    swift -A http://127.0.0.1:8080/auth/v1.0 -U account:reseller -K secret \
post -m quota-bytes:10000
Remove the quota::
    swift -A http://127.0.0.1:8080/auth/v1.0 -U account:reseller -K secret \
post -m quota-bytes:
The same limitations apply for the account quotas as for the container quotas.
For example, when uploading an object without a content-length header the proxy
server doesn't know the final size of the currently uploaded object and the
upload will be allowed if the current account size is within the quota.
Due to the eventual consistency further uploads might be possible until the
account size has been updated.
"""
from swift.common.constraints import check_copy_from_header
from swift.common.swob import HTTPForbidden, HTTPBadRequest, \
    HTTPRequestEntityTooLarge, wsgify
from swift.common.utils import register_swift_info
from swift.proxy.controllers.base import get_account_info, get_object_info
class AccountQuotaMiddleware(object):
    """Account quota middleware
    See above for a full description.
    """
    def __init__(self, app, *args, **kwargs):
        self.app = app
    @wsgify
    def __call__(self, request):
        if request.method not in ("POST", "PUT", "COPY"):
            return self.app
        try:
            ver, account, container, obj = request.split_path(
                2, 4, rest_with_last=True)
        except ValueError:
            return self.app
        if not container:
            # account request, so we pay attention to the quotas
            new_quota = request.headers.get(
                'X-Account-Meta-Quota-Bytes')
            remove_quota = request.headers.get(
                'X-Remove-Account-Meta-Quota-Bytes')
        else:
            # container or object request; even if the quota headers are set
            # in the request, they're meaningless
            new_quota = remove_quota = None
        if remove_quota:
            new_quota = 0    # X-Remove dominates if both are present
        if request.environ.get('reseller_request') is True:
            if new_quota and not new_quota.isdigit():
                return HTTPBadRequest()
            return self.app
        # deny quota set for non-reseller
        if new_quota is not None:
            return HTTPForbidden()
        if request.method == "POST" or not obj:
            return self.app
        if request.method == 'COPY':
            copy_from = container + '/' + obj
        else:
            if 'x-copy-from' in request.headers:
                src_cont, src_obj = check_copy_from_header(request)
                copy_from = "%s/%s" % (src_cont, src_obj)
            else:
                copy_from = None
        content_length = (request.content_length or 0)
        account_info = get_account_info(request.environ, self.app)
        if not account_info or not account_info['bytes']:
            return self.app
        try:
            quota = int(account_info['meta'].get('quota-bytes', -1))
        except ValueError:
            return self.app
        if quota < 0:
            return self.app
        if copy_from:
            path = '/' + ver + '/' + account + '/' + copy_from
            object_info = get_object_info(request.environ, self.app, path)
            if not object_info or not object_info['length']:
                content_length = 0
            else:
                content_length = int(object_info['length'])
        new_size = int(account_info['bytes']) + content_length
        if quota < new_size:
            resp = HTTPRequestEntityTooLarge(body='Upload exceeds quota.')
            if 'swift.authorize' in request.environ:
                orig_authorize = request.environ['swift.authorize']
                def reject_authorize(*args, **kwargs):
                    aresp = orig_authorize(*args, **kwargs)
                    if aresp:
                        return aresp
                    return resp
                request.environ['swift.authorize'] = reject_authorize
            else:
                return resp
        return self.app
def filter_factory(global_conf, **local_conf):
    """Returns a WSGI filter app for use with paste.deploy."""
    register_swift_info('account_quotas')
    def account_quota_filter(app):
        return AccountQuotaMiddleware(app)
    return account_quota_filter
 | 
	apache-2.0 | 
| 
	druuu/django | 
	django/contrib/flatpages/templatetags/flatpages.py | 
	472 | 
	3632 | 
	from django import template
from django.conf import settings
from django.contrib.flatpages.models import FlatPage
from django.contrib.sites.shortcuts import get_current_site
register = template.Library()
class FlatpageNode(template.Node):
    def __init__(self, context_name, starts_with=None, user=None):
        self.context_name = context_name
        if starts_with:
            self.starts_with = template.Variable(starts_with)
        else:
            self.starts_with = None
        if user:
            self.user = template.Variable(user)
        else:
            self.user = None
    def render(self, context):
        if 'request' in context:
            site_pk = get_current_site(context['request']).pk
        else:
            site_pk = settings.SITE_ID
        flatpages = FlatPage.objects.filter(sites__id=site_pk)
        # If a prefix was specified, add a filter
        if self.starts_with:
            flatpages = flatpages.filter(
                url__startswith=self.starts_with.resolve(context))
        # If the provided user is not authenticated, or no user
        # was provided, filter the list to only public flatpages.
        if self.user:
            user = self.user.resolve(context)
            if not user.is_authenticated():
                flatpages = flatpages.filter(registration_required=False)
        else:
            flatpages = flatpages.filter(registration_required=False)
        context[self.context_name] = flatpages
        return ''
@register.tag
def get_flatpages(parser, token):
    """
    Retrieves all flatpage objects available for the current site and
    visible to the specific user (or visible to all users if no user is
    specified). Populates the template context with them in a variable
    whose name is defined by the ``as`` clause.
    An optional ``for`` clause can be used to control the user whose
    permissions are to be used in determining which flatpages are visible.
    An optional argument, ``starts_with``, can be applied to limit the
    returned flatpages to those beginning with a particular base URL.
    This argument can be passed as a variable or a string, as it resolves
    from the template context.
    Syntax::
        {% get_flatpages ['url_starts_with'] [for user] as context_name %}
    Example usage::
        {% get_flatpages as flatpages %}
        {% get_flatpages for someuser as flatpages %}
        {% get_flatpages '/about/' as about_pages %}
        {% get_flatpages prefix as about_pages %}
        {% get_flatpages '/about/' for someuser as about_pages %}
    """
    bits = token.split_contents()
    syntax_message = ("%(tag_name)s expects a syntax of %(tag_name)s "
                      "['url_starts_with'] [for user] as context_name" %
                      dict(tag_name=bits[0]))
    # Must have at 3-6 bits in the tag
    if len(bits) >= 3 and len(bits) <= 6:
        # If there's an even number of bits, there's no prefix
        if len(bits) % 2 == 0:
            prefix = bits[1]
        else:
            prefix = None
        # The very last bit must be the context name
        if bits[-2] != 'as':
            raise template.TemplateSyntaxError(syntax_message)
        context_name = bits[-1]
        # If there are 5 or 6 bits, there is a user defined
        if len(bits) >= 5:
            if bits[-4] != 'for':
                raise template.TemplateSyntaxError(syntax_message)
            user = bits[-3]
        else:
            user = None
        return FlatpageNode(context_name, starts_with=prefix, user=user)
    else:
        raise template.TemplateSyntaxError(syntax_message)
 | 
	bsd-3-clause | 
| 
	oaklen/Shelf | 
	node_modules/npm/node_modules/node-gyp/gyp/pylib/gyp/easy_xml.py | 
	1558 | 
	4945 | 
	# Copyright (c) 2011 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import re
import os
def XmlToString(content, encoding='utf-8', pretty=False):
  """ Writes the XML content to disk, touching the file only if it has changed.
  Visual Studio files have a lot of pre-defined structures.  This function makes
  it easy to represent these structures as Python data structures, instead of
  having to create a lot of function calls.
  Each XML element of the content is represented as a list composed of:
  1. The name of the element, a string,
  2. The attributes of the element, a dictionary (optional), and
  3+. The content of the element, if any.  Strings are simple text nodes and
      lists are child elements.
  Example 1:
      <test/>
  becomes
      ['test']
  Example 2:
      <myelement a='value1' b='value2'>
         <childtype>This is</childtype>
         <childtype>it!</childtype>
      </myelement>
  becomes
      ['myelement', {'a':'value1', 'b':'value2'},
         ['childtype', 'This is'],
         ['childtype', 'it!'],
      ]
  Args:
    content:  The structured content to be converted.
    encoding: The encoding to report on the first XML line.
    pretty: True if we want pretty printing with indents and new lines.
  Returns:
    The XML content as a string.
  """
  # We create a huge list of all the elements of the file.
  xml_parts = ['<?xml version="1.0" encoding="%s"?>' % encoding]
  if pretty:
    xml_parts.append('\n')
  _ConstructContentList(xml_parts, content, pretty)
  # Convert it to a string
  return ''.join(xml_parts)
def _ConstructContentList(xml_parts, specification, pretty, level=0):
  """ Appends the XML parts corresponding to the specification.
  Args:
    xml_parts: A list of XML parts to be appended to.
    specification:  The specification of the element.  See EasyXml docs.
    pretty: True if we want pretty printing with indents and new lines.
    level: Indentation level.
  """
  # The first item in a specification is the name of the element.
  if pretty:
    indentation = '  ' * level
    new_line = '\n'
  else:
    indentation = ''
    new_line = ''
  name = specification[0]
  if not isinstance(name, str):
    raise Exception('The first item of an EasyXml specification should be '
                    'a string.  Specification was ' + str(specification))
  xml_parts.append(indentation + '<' + name)
  # Optionally in second position is a dictionary of the attributes.
  rest = specification[1:]
  if rest and isinstance(rest[0], dict):
    for at, val in sorted(rest[0].iteritems()):
      xml_parts.append(' %s="%s"' % (at, _XmlEscape(val, attr=True)))
    rest = rest[1:]
  if rest:
    xml_parts.append('>')
    all_strings = reduce(lambda x, y: x and isinstance(y, str), rest, True)
    multi_line = not all_strings
    if multi_line and new_line:
      xml_parts.append(new_line)
    for child_spec in rest:
      # If it's a string, append a text node.
      # Otherwise recurse over that child definition
      if isinstance(child_spec, str):
       xml_parts.append(_XmlEscape(child_spec))
      else:
        _ConstructContentList(xml_parts, child_spec, pretty, level + 1)
    if multi_line and indentation:
      xml_parts.append(indentation)
    xml_parts.append('</%s>%s' % (name, new_line))
  else:
    xml_parts.append('/>%s' % new_line)
def WriteXmlIfChanged(content, path, encoding='utf-8', pretty=False,
                      win32=False):
  """ Writes the XML content to disk, touching the file only if it has changed.
  Args:
    content:  The structured content to be written.
    path: Location of the file.
    encoding: The encoding to report on the first line of the XML file.
    pretty: True if we want pretty printing with indents and new lines.
  """
  xml_string = XmlToString(content, encoding, pretty)
  if win32 and os.linesep != '\r\n':
    xml_string = xml_string.replace('\n', '\r\n')
    
  try:
    xml_string = xml_string.encode(encoding)
  except Exception:
    xml_string = unicode(xml_string, 'latin-1').encode(encoding)
  # Get the old content
  try:
    f = open(path, 'r')
    existing = f.read()
    f.close()
  except:
    existing = None
  # It has changed, write it
  if existing != xml_string:
    f = open(path, 'w')
    f.write(xml_string)
    f.close()
_xml_escape_map = {
    '"': '"',
    "'": ''',
    '<': '<',
    '>': '>',
    '&': '&',
    '\n': '
',
    '\r': '
',
}
_xml_escape_re = re.compile(
    "(%s)" % "|".join(map(re.escape, _xml_escape_map.keys())))
def _XmlEscape(value, attr=False):
  """ Escape a string for inclusion in XML."""
  def replace(match):
    m = match.string[match.start() : match.end()]
    # don't replace single quotes in attrs
    if attr and m == "'":
      return m
    return _xml_escape_map[m]
  return _xml_escape_re.sub(replace, value)
 | 
	mit | 
| 
	macosforge/ccs-calendarserver | 
	txdav/caldav/datastore/scheduling/ischedule/remoteservers.py | 
	1 | 
	6936 | 
	##
# Copyright (c) 2006-2017 Apple Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##
from twext.python.filepath import CachingFilePath as FilePath
from twext.python.log import Logger
from twistedcaldav.config import config, fullServerPath
from twistedcaldav import xmlutil
"""
XML based iSchedule configuration file handling. This is for handling of remote servers. The localservers.py module
handles servers that are local (podded).
"""
__all__ = [
    "IScheduleServers",
]
log = Logger()
class IScheduleServers(object):
    _fileInfo = None
    _xmlFile = None
    _servers = None
    _domainMap = None
    def __init__(self):
        if IScheduleServers._servers is None:
            self._loadConfig()
    def _loadConfig(self):
        if config.Scheduling.iSchedule.RemoteServers:
            if IScheduleServers._servers is None:
                IScheduleServers._xmlFile = FilePath(
                    fullServerPath(
                        config.ConfigRoot,
                        config.Scheduling.iSchedule.RemoteServers,
                    )
                )
            if IScheduleServers._xmlFile.exists():
                IScheduleServers._xmlFile.restat()
                fileInfo = (IScheduleServers._xmlFile.getmtime(), IScheduleServers._xmlFile.getsize())
                if fileInfo != IScheduleServers._fileInfo:
                    parser = IScheduleServersParser(IScheduleServers._xmlFile)
                    IScheduleServers._servers = parser.servers
                    self._mapDomains()
                    IScheduleServers._fileInfo = fileInfo
            else:
                IScheduleServers._servers = ()
                IScheduleServers._domainMap = {}
        else:
            IScheduleServers._servers = ()
            IScheduleServers._domainMap = {}
    def _mapDomains(self):
        IScheduleServers._domainMap = {}
        for server in IScheduleServers._servers:
            for domain in server.domains:
                IScheduleServers._domainMap[domain] = server
    def mapDomain(self, domain):
        """
        Map a calendar user address domain to a suitable server that can
        handle server-to-server requests for that user.
        """
        return IScheduleServers._domainMap.get(domain)
ELEMENT_SERVERS = "servers"
ELEMENT_SERVER = "server"
ELEMENT_URI = "uri"
ELEMENT_AUTHENTICATION = "authentication"
ATTRIBUTE_TYPE = "type"
ATTRIBUTE_BASICAUTH = "basic"
ELEMENT_USER = "user"
ELEMENT_PASSWORD = "password"
ELEMENT_ALLOW_REQUESTS_FROM = "allow-requests-from"
ELEMENT_ALLOW_REQUESTS_TO = "allow-requests-to"
ELEMENT_DOMAINS = "domains"
ELEMENT_DOMAIN = "domain"
ELEMENT_CLIENT_HOSTS = "hosts"
ELEMENT_HOST = "host"
class IScheduleServersParser(object):
    """
    Server-to-server configuration file parser.
    """
    def __repr__(self):
        return "<{} {}>".format(self.__class__.__name__, self.xmlFile)
    def __init__(self, xmlFile):
        self.servers = []
        # Read in XML
        _ignore_etree, servers_node = xmlutil.readXML(xmlFile.path, ELEMENT_SERVERS)
        self._parseXML(servers_node)
    def _parseXML(self, node):
        """
        Parse the XML root node from the server-to-server configuration document.
        @param node: the L{Node} to parse.
        """
        for child in node:
            if child.tag == ELEMENT_SERVER:
                self.servers.append(IScheduleServerRecord())
                self.servers[-1].parseXML(child)
class IScheduleServerRecord (object):
    """
    Contains server-to-server details.
    """
    def __init__(self, uri=None, rewriteCUAddresses=True, moreHeaders=[], podding=False):
        """
        @param recordType: record type for directory entry.
        """
        self.uri = ""
        self.authentication = None
        self.allow_from = False
        self.allow_to = True
        self.domains = []
        self.client_hosts = []
        self.rewriteCUAddresses = rewriteCUAddresses
        self.moreHeaders = moreHeaders
        self._podding = podding
        if uri:
            self.uri = uri
            self._parseDetails()
    def details(self):
        return (self.ssl, self.host, self.port, self.path,)
    def podding(self):
        return self._podding
    def redirect(self, location):
        """
        Permanent redirect for the lifetime of this record.
        """
        self.uri = location
        self._parseDetails()
    def parseXML(self, node):
        for child in node:
            if child.tag == ELEMENT_URI:
                self.uri = child.text
            elif child.tag == ELEMENT_AUTHENTICATION:
                self._parseAuthentication(child)
            elif child.tag == ELEMENT_ALLOW_REQUESTS_FROM:
                self.allow_from = True
            elif child.tag == ELEMENT_ALLOW_REQUESTS_TO:
                self.allow_to = True
            elif child.tag == ELEMENT_DOMAINS:
                self._parseList(child, ELEMENT_DOMAIN, self.domains)
            elif child.tag == ELEMENT_CLIENT_HOSTS:
                self._parseList(child, ELEMENT_HOST, self.client_hosts)
            else:
                raise RuntimeError("[{}] Unknown attribute: {}".format(self.__class__, child.tag,))
        self._parseDetails()
    def _parseList(self, node, element_name, appendto):
        for child in node:
            if child.tag == element_name:
                appendto.append(child.text)
    def _parseAuthentication(self, node):
        if node.get(ATTRIBUTE_TYPE) != ATTRIBUTE_BASICAUTH:
            return
        for child in node:
            if child.tag == ELEMENT_USER:
                user = child.text
            elif child.tag == ELEMENT_PASSWORD:
                password = child.text
        self.authentication = ("basic", user, password,)
    def _parseDetails(self):
        # Extract scheme, host, port and path
        if self.uri.startswith("http://"):
            self.ssl = False
            rest = self.uri[7:]
        elif self.uri.startswith("https://"):
            self.ssl = True
            rest = self.uri[8:]
        splits = rest.split("/", 1)
        hostport = splits[0].split(":")
        self.host = hostport[0]
        if len(hostport) > 1:
            self.port = int(hostport[1])
        else:
            self.port = {False: 80, True: 443}[self.ssl]
        self.path = "/"
        if len(splits) > 1:
            self.path += splits[1]
 | 
	apache-2.0 | 
| 
	rocky/python3-trepan | 
	test/unit/test-cmdfns.py | 
	1 | 
	2471 | 
	#!/usr/bin/env python3
'Unit test for trepan.processor.command.cmdfns'
import unittest
from trepan.processor import cmdfns as Mcmdfns
class TestCommandHelper(unittest.TestCase):
    def setUp(self):
        self.errors = []
        return
    def errmsg(self, msg):
        self.errors.append(msg)
        return
    def test_get_an_int(self):
        self.assertEqual(0, Mcmdfns.get_an_int(self.errmsg, '0', 'foo', 0))
        self.assertEqual(0, len(self.errors))
        self.assertEqual(6, Mcmdfns.get_an_int(self.errmsg, '6*1', 'foo', 5))
        self.assertEqual(0, len(self.errors))
        self.assertEqual(None, Mcmdfns.get_an_int(self.errmsg, '0',
                                                  '0 is too small', 5))
        self.assertEqual(1, len(self.errors))
        self.assertEqual(None, Mcmdfns.get_an_int(self.errmsg, '4+a',
                                                  '4+a is invalid', 5))
        self.assertEqual('4+a is invalid', self.errors[-1])
        return
    def test_get_int(self):
        self.assertEqual(1, Mcmdfns.get_int(self.errmsg, '1', 5))
        self.assertEqual(3, Mcmdfns.get_int(self.errmsg, '1+2', 5))
        self.assertEqual(5, Mcmdfns.get_int(self.errmsg, None, 5))
        self.assertEqual(1, Mcmdfns.get_int(self.errmsg, None))
        self.assertRaises(ValueError, Mcmdfns.get_int,
                          *(self.errmsg, 'Foo', 5))
        return
    def test_get_onoff(self):
        for arg in ('1', 'on'):
            self.assertEqual(True, Mcmdfns.get_onoff(self.errmsg, arg))
            pass
        for arg in ('0', 'off'):
            self.assertEqual(False, Mcmdfns.get_onoff(self.errmsg, arg))
            pass
        for result in (True, False):
            self.assertEqual(result, Mcmdfns.get_onoff(self.errmsg, None,
                                                       result))
            pass
        self.assertRaises(ValueError, Mcmdfns.get_onoff, *(self.errmsg,
                                                           'Foo'))
        return
    def test_want_different_line(self):
        for cmd, default, expected in [
        ('s+', False, True),
        ('s-', True,  False),
        ('s',  False, False),
        ('n',  True,  True) ]:
            self.assertEqual(expected,
                             Mcmdfns.want_different_line(cmd, default),
                             cmd)
            pass
        return
    pass
if __name__ == '__main__':
    unittest.main()
 | 
	gpl-3.0 | 
| 
	BrandonY/python-docs-samples | 
	appengine/standard/multitenancy/datastore_test.py | 
	9 | 
	1124 | 
	# Copyright 2015 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import webtest
import datastore
def test_datastore(testbed):
    app = webtest.TestApp(datastore.app)
    response = app.get('/datastore')
    assert response.status_int == 200
    assert 'Global: 1' in response.body
    response = app.get('/datastore/a')
    assert response.status_int == 200
    assert 'Global: 2' in response.body
    assert 'a: 1' in response.body
    response = app.get('/datastore/b')
    assert response.status_int == 200
    assert 'Global: 3' in response.body
    assert 'b: 1' in response.body
 | 
	apache-2.0 | 
| 
	darmaa/odoo | 
	addons/edi/models/edi.py | 
	44 | 
	31991 | 
	# -*- coding: utf-8 -*-
##############################################################################
#
#    OpenERP, Open Source Business Applications
#    Copyright (c) 2011-2012 OpenERP S.A. <http://openerp.com>
#
#    This program is free software: you can redistribute it and/or modify
#    it under the terms of the GNU Affero General Public License as
#    published by the Free Software Foundation, either version 3 of the
#    License, or (at your option) any later version.
#
#    This program is distributed in the hope that it will be useful,
#    but WITHOUT ANY WARRANTY; without even the implied warranty of
#    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
#    GNU Affero General Public License for more details.
#
#    You should have received a copy of the GNU Affero General Public License
#    along with this program.  If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import base64
import hashlib
import simplejson as json
import logging
import re
import time
import urllib2
import openerp
import openerp.release as release
from openerp.osv import osv, fields
from openerp.tools.translate import _
from openerp.tools.safe_eval import safe_eval as eval
_logger = logging.getLogger(__name__)
EXTERNAL_ID_PATTERN = re.compile(r'^([^.:]+)(?::([^.]+))?\.(\S+)$')
EDI_VIEW_WEB_URL = '%s/edi/view?db=%s&token=%s'
EDI_PROTOCOL_VERSION = 1 # arbitrary ever-increasing version number
EDI_GENERATOR = 'OpenERP ' + release.major_version
EDI_GENERATOR_VERSION = release.version_info
def split_external_id(ext_id):
    match = EXTERNAL_ID_PATTERN.match(ext_id)
    assert match, \
            _("'%s' is an invalid external ID") % (ext_id)
    return {'module': match.group(1),
            'db_uuid': match.group(2),
            'id': match.group(3),
            'full': match.group(0)}
def safe_unique_id(database_id, model, record_id):
    """Generate a unique string to represent a (database_uuid,model,record_id) pair
    without being too long, and with a very low probability of collisions.
    """
    msg = "%s-%s-%s-%s" % (time.time(), database_id, model, record_id)
    digest = hashlib.sha1(msg).digest()
    # fold the sha1 20 bytes digest to 9 bytes
    digest = ''.join(chr(ord(x) ^ ord(y)) for (x,y) in zip(digest[:9], digest[9:-2]))
    # b64-encode the 9-bytes folded digest to a reasonable 12 chars ASCII ID
    digest = base64.urlsafe_b64encode(digest)
    return '%s-%s' % (model.replace('.','_'), digest)
def last_update_for(record):
    """Returns the last update timestamp for the given record,
       if available, otherwise False
    """
    if record._model._log_access:
        record_log = record.perm_read()[0]
        return record_log.get('write_date') or record_log.get('create_date') or False
    return False
class edi(osv.AbstractModel):
    _name = 'edi.edi'
    _description = 'EDI Subsystem'
    def new_edi_token(self, cr, uid, record):
        """Return a new, random unique token to identify this model record,
        and to be used as token when exporting it as an EDI document.
        :param browse_record record: model record for which a token is needed
        """
        db_uuid = self.pool.get('ir.config_parameter').get_param(cr, uid, 'database.uuid')
        edi_token = hashlib.sha256('%s-%s-%s-%s' % (time.time(), db_uuid, record._name, record.id)).hexdigest()
        return edi_token
    def serialize(self, edi_documents):
        """Serialize the given EDI document structures (Python dicts holding EDI data),
        using JSON serialization.
        :param [dict] edi_documents: list of EDI document structures to serialize
        :return: UTF-8 encoded string containing the serialized document
        """
        serialized_list = json.dumps(edi_documents)
        return serialized_list
    def generate_edi(self, cr, uid, records, context=None):
        """Generates a final EDI document containing the EDI serialization
        of the given records, which should all be instances of a Model
        that has the :meth:`~.edi` mixin. The document is not saved in the
        database.
        :param list(browse_record) records: records to export as EDI
        :return: UTF-8 encoded string containing the serialized records
        """
        edi_list = []
        for record in records:
            record_model = record._model
            edi_list += record_model.edi_export(cr, uid, [record], context=context)
        return self.serialize(edi_list)
    def load_edi(self, cr, uid, edi_documents, context=None):
        """Import the given EDI document structures into the system, using
        :meth:`~.import_edi`.
        :param edi_documents: list of Python dicts containing the deserialized
                              version of EDI documents
        :return: list of (model, id, action) tuple containing the model and database ID
                 of all records that were imported in the system, plus a suggested
                 action definition dict for displaying each document.
        """
        ir_module = self.pool.get('ir.module.module')
        res = []
        for edi_document in edi_documents:
            module = edi_document.get('__import_module') or edi_document.get('__module')
            assert module, 'a `__module` or `__import_module` attribute is required in each EDI document.'
            if module != 'base' and not ir_module.search(cr, uid, [('name','=',module),('state','=','installed')]):
                raise osv.except_osv(_('Missing Application.'),
                            _("The document you are trying to import requires the OpenERP `%s` application. "
                              "You can install it by connecting as the administrator and opening the configuration assistant.")%(module,))
            model = edi_document.get('__import_model') or edi_document.get('__model')
            assert model, 'a `__model` or `__import_model` attribute is required in each EDI document.'
            assert model in self.pool, 'model `%s` cannot be found, despite module `%s` being available - '\
                              'this EDI document seems invalid or unsupported.' % (model,module)
            model_obj = self.pool[model]
            record_id = model_obj.edi_import(cr, uid, edi_document, context=context)
            record_action = model_obj._edi_record_display_action(cr, uid, record_id, context=context)
            res.append((model, record_id, record_action))
        return res
    def deserialize(self, edi_documents_string):
        """Return deserialized version of the given EDI Document string.
        :param str|unicode edi_documents_string: UTF-8 string (or unicode) containing
                                                 JSON-serialized EDI document(s)
        :return: Python object representing the EDI document(s) (usually a list of dicts)
        """
        return json.loads(edi_documents_string)
    def import_edi(self, cr, uid, edi_document=None, edi_url=None, context=None):
        """Import a JSON serialized EDI Document string into the system, first retrieving it
        from the given ``edi_url`` if provided.
        :param str|unicode edi: UTF-8 string or unicode containing JSON-serialized
                                         EDI Document to import. Must not be provided if
                                         ``edi_url`` is given.
        :param str|unicode edi_url: URL where the EDI document (same format as ``edi``)
                                    may be retrieved, without authentication.
        """
        if edi_url:
            assert not edi_document, 'edi must not be provided if edi_url is given.'
            edi_document = urllib2.urlopen(edi_url).read()
        assert edi_document, 'EDI Document is empty!'
        edi_documents = self.deserialize(edi_document)
        return self.load_edi(cr, uid, edi_documents, context=context)
class EDIMixin(object):
    """Mixin class for Model objects that want be exposed as EDI documents.
       Classes that inherit from this mixin class should override the
       ``edi_import()`` and ``edi_export()`` methods to implement their
       specific behavior, based on the primitives provided by this mixin."""
    def _edi_requires_attributes(self, attributes, edi):
        model_name = edi.get('__imported_model') or edi.get('__model') or self._name
        for attribute in attributes:
            assert edi.get(attribute),\
                 'Attribute `%s` is required in %s EDI documents.' % (attribute, model_name)
    # private method, not RPC-exposed as it creates ir.model.data entries as
    # SUPERUSER based on its parameters
    def _edi_external_id(self, cr, uid, record, existing_id=None, existing_module=None,
                        context=None):
        """Generate/Retrieve unique external ID for ``record``.
        Each EDI record and each relationship attribute in it is identified by a
        unique external ID, which includes the database's UUID, as a way to
        refer to any record within any OpenERP instance, without conflict.
        For OpenERP records that have an existing "External ID" (i.e. an entry in
        ir.model.data), the EDI unique identifier for this record will be made of
        "%s:%s:%s" % (module, database UUID, ir.model.data ID). The database's
        UUID MUST NOT contain a colon characters (this is guaranteed by the
        UUID algorithm).
        For records that have no existing ir.model.data entry, a new one will be
        created during the EDI export. It is recommended that the generated external ID
        contains a readable reference to the record model, plus a unique value that
        hides the database ID. If ``existing_id`` is provided (because it came from
        an import), it will be used instead of generating a new one.
        If ``existing_module`` is provided (because it came from
        an import), it will be used instead of using local values.
        :param browse_record record: any browse_record needing an EDI external ID
        :param string existing_id: optional existing external ID value, usually coming
                                   from a just-imported EDI record, to be used instead
                                   of generating a new one
        :param string existing_module: optional existing module name, usually in the
                                       format ``module:db_uuid`` and coming from a
                                       just-imported EDI record, to be used instead
                                       of local values
        :return: the full unique External ID to use for record
        """
        ir_model_data = self.pool.get('ir.model.data')
        db_uuid = self.pool.get('ir.config_parameter').get_param(cr, uid, 'database.uuid')
        ext_id = record.get_external_id()[record.id]
        if not ext_id:
            ext_id = existing_id or safe_unique_id(db_uuid, record._name, record.id)
            # ID is unique cross-db thanks to db_uuid (already included in existing_module)
            module = existing_module or "%s:%s" % (record._original_module, db_uuid)
            _logger.debug("%s: Generating new external ID `%s.%s` for %r.", self._name,
                          module, ext_id, record)
            ir_model_data.create(cr, openerp.SUPERUSER_ID,
                                 {'name': ext_id,
                                  'model': record._name,
                                  'module': module,
                                  'res_id': record.id})
        else:
            module, ext_id = ext_id.split('.')
            if not ':' in module:
                # this record was not previously EDI-imported
                if not module == record._original_module:
                    # this could happen for data records defined in a module that depends
                    # on the module that owns the model, e.g. purchase defines
                    # product.pricelist records.
                    _logger.debug('Mismatching module: expected %s, got %s, for %s.',
                                  module, record._original_module, record)
                # ID is unique cross-db thanks to db_uuid
                module = "%s:%s" % (module, db_uuid)
        return '%s.%s' % (module, ext_id)
    def _edi_record_display_action(self, cr, uid, id, context=None):
        """Returns an appropriate action definition dict for displaying
           the record with ID ``rec_id``.
           :param int id: database ID of record to display
           :return: action definition dict
        """
        return {'type': 'ir.actions.act_window',
                'view_mode': 'form,tree',
                'view_type': 'form',
                'res_model': self._name,
                'res_id': id}
    def edi_metadata(self, cr, uid, records, context=None):
        """Return a list containing the boilerplate EDI structures for
           exporting ``records`` as EDI, including
           the metadata fields
        The metadata fields always include::
            {
               '__model': 'some.model',                # record model
               '__module': 'module',                   # require module
               '__id': 'module:db-uuid:model.id',      # unique global external ID for the record
               '__last_update': '2011-01-01 10:00:00', # last update date in UTC!
               '__version': 1,                         # EDI spec version
               '__generator' : 'OpenERP',              # EDI generator
               '__generator_version' : [6,1,0],        # server version, to check compatibility.
               '__attachments_':
           }
        :param list(browse_record) records: records to export
        :return: list of dicts containing boilerplate EDI metadata for each record,
                 at the corresponding index from ``records``.
        """
        ir_attachment = self.pool.get('ir.attachment')
        results = []
        for record in records:
            ext_id = self._edi_external_id(cr, uid, record, context=context)
            edi_dict = {
                '__id': ext_id,
                '__last_update': last_update_for(record),
                '__model' : record._name,
                '__module' : record._original_module,
                '__version': EDI_PROTOCOL_VERSION,
                '__generator': EDI_GENERATOR,
                '__generator_version': EDI_GENERATOR_VERSION,
            }
            attachment_ids = ir_attachment.search(cr, uid, [('res_model','=', record._name), ('res_id', '=', record.id)])
            if attachment_ids:
                attachments = []
                for attachment in ir_attachment.browse(cr, uid, attachment_ids, context=context):
                    attachments.append({
                            'name' : attachment.name,
                            'content': attachment.datas, # already base64 encoded!
                            'file_name': attachment.datas_fname,
                    })
                edi_dict.update(__attachments=attachments)
            results.append(edi_dict)
        return results
    def edi_m2o(self, cr, uid, record, context=None):
        """Return a m2o EDI representation for the given record.
        The EDI format for a many2one is::
            ['unique_external_id', 'Document Name']
        """
        edi_ext_id = self._edi_external_id(cr, uid, record, context=context)
        relation_model = record._model
        name = relation_model.name_get(cr, uid, [record.id], context=context)
        name = name and name[0][1] or False
        return [edi_ext_id, name]
    def edi_o2m(self, cr, uid, records, edi_struct=None, context=None):
        """Return a list representing a O2M EDI relationship containing
           all the given records, according to the given ``edi_struct``.
           This is basically the same as exporting all the record using
           :meth:`~.edi_export` with the given ``edi_struct``, and wrapping
           the results in a list.
           Example::
             [                                # O2M fields would be a list of dicts, with their
               { '__id': 'module:db-uuid.id', # own __id.
                 '__last_update': 'iso date', # update date
                 'name': 'some name',
                 #...
               },
               # ...
             ],
        """
        result = []
        for record in records:
            result += record._model.edi_export(cr, uid, [record], edi_struct=edi_struct, context=context)
        return result
    def edi_m2m(self, cr, uid, records, context=None):
        """Return a list representing a M2M EDI relationship directed towards
           all the given records.
           This is basically the same as exporting all the record using
           :meth:`~.edi_m2o` and wrapping the results in a list.
            Example::
                # M2M fields are exported as a list of pairs, like a list of M2O values
                [
                      ['module:db-uuid.id1', 'Task 01: bla bla'],
                      ['module:db-uuid.id2', 'Task 02: bla bla']
                ]
        """
        return [self.edi_m2o(cr, uid, r, context=context) for r in records]
    def edi_export(self, cr, uid, records, edi_struct=None, context=None):
        """Returns a list of dicts representing EDI documents containing the
           records, and matching the given ``edi_struct``, if provided.
           :param edi_struct: if provided, edi_struct should be a dictionary
                              with a skeleton of the fields to export.
                              Basic fields can have any key as value, but o2m
                              values should have a sample skeleton dict as value,
                              to act like a recursive export.
                              For example, for a res.partner record::
                                  edi_struct: {
                                       'name': True,
                                       'company_id': True,
                                       'address': {
                                           'name': True,
                                           'street': True,
                                           }
                                  }
                              Any field not specified in the edi_struct will not
                              be included in the exported data. Fields with no
                              value (False) will be omitted in the EDI struct.
                              If edi_struct is omitted, no fields will be exported
        """
        if edi_struct is None:
            edi_struct = {}
        fields_to_export = edi_struct.keys()
        results = []
        for record in records:
            edi_dict = self.edi_metadata(cr, uid, [record], context=context)[0]
            for field in fields_to_export:
                column = self._all_columns[field].column
                value = getattr(record, field)
                if not value and value not in ('', 0):
                    continue
                elif column._type == 'many2one':
                    value = self.edi_m2o(cr, uid, value, context=context)
                elif column._type == 'many2many':
                    value = self.edi_m2m(cr, uid, value, context=context)
                elif column._type == 'one2many':
                    value = self.edi_o2m(cr, uid, value, edi_struct=edi_struct.get(field, {}), context=context)
                edi_dict[field] = value
            results.append(edi_dict)
        return results
    def _edi_get_object_by_name(self, cr, uid, name, model_name, context=None):
        model = self.pool[model_name]
        search_results = model.name_search(cr, uid, name, operator='=', context=context)
        if len(search_results) == 1:
            return model.browse(cr, uid, search_results[0][0], context=context)
        return False
    def _edi_generate_report_attachment(self, cr, uid, record, context=None):
        """Utility method to generate the first PDF-type report declared for the
           current model with ``usage`` attribute set to ``default``.
           This must be called explicitly by models that need it, usually
           at the beginning of ``edi_export``, before the call to ``super()``."""
        ir_actions_report = self.pool.get('ir.actions.report.xml')
        matching_reports = ir_actions_report.search(cr, uid, [('model','=',self._name),
                                                              ('report_type','=','pdf'),
                                                              ('usage','=','default')])
        if matching_reports:
            report = ir_actions_report.browse(cr, uid, matching_reports[0])
            result, format = openerp.report.render_report(cr, uid, [record.id], report.report_name, {'model': self._name}, context=context)
            eval_context = {'time': time, 'object': record}
            if not report.attachment or not eval(report.attachment, eval_context):
                # no auto-saving of report as attachment, need to do it manually
                result = base64.b64encode(result)
                file_name = record.name_get()[0][1]
                file_name = re.sub(r'[^a-zA-Z0-9_-]', '_', file_name)
                file_name += ".pdf"
                self.pool.get('ir.attachment').create(cr, uid,
                                                      {
                                                       'name': file_name,
                                                       'datas': result,
                                                       'datas_fname': file_name,
                                                       'res_model': self._name,
                                                       'res_id': record.id,
                                                       'type': 'binary'
                                                      },
                                                      context=context)
    def _edi_import_attachments(self, cr, uid, record_id, edi, context=None):
        ir_attachment = self.pool.get('ir.attachment')
        for attachment in edi.get('__attachments', []):
            # check attachment data is non-empty and valid
            file_data = None
            try:
                file_data = base64.b64decode(attachment.get('content'))
            except TypeError:
                pass
            assert file_data, 'Incorrect/Missing attachment file content.'
            assert attachment.get('name'), 'Incorrect/Missing attachment name.'
            assert attachment.get('file_name'), 'Incorrect/Missing attachment file name.'
            assert attachment.get('file_name'), 'Incorrect/Missing attachment file name.'
            ir_attachment.create(cr, uid, {'name': attachment['name'],
                                           'datas_fname': attachment['file_name'],
                                           'res_model': self._name,
                                           'res_id': record_id,
                                           # should be pure 7bit ASCII
                                           'datas': str(attachment['content']),
                                           }, context=context)
    def _edi_get_object_by_external_id(self, cr, uid, external_id, model, context=None):
        """Returns browse_record representing object identified by the model and external_id,
           or None if no record was found with this external id.
           :param external_id: fully qualified external id, in the EDI form
                               ``module:db_uuid:identifier``.
           :param model: model name the record belongs to.
        """
        ir_model_data = self.pool.get('ir.model.data')
        # external_id is expected to have the form: ``module:db_uuid:model.random_name``
        ext_id_members = split_external_id(external_id)
        db_uuid = self.pool.get('ir.config_parameter').get_param(cr, uid, 'database.uuid')
        module = ext_id_members['module']
        ext_id = ext_id_members['id']
        modules = []
        ext_db_uuid = ext_id_members['db_uuid']
        if ext_db_uuid:
            modules.append('%s:%s' % (module, ext_id_members['db_uuid']))
        if ext_db_uuid is None or ext_db_uuid == db_uuid:
            # local records may also be registered without the db_uuid
            modules.append(module)
        data_ids = ir_model_data.search(cr, uid, [('model','=',model),
                                                  ('name','=',ext_id),
                                                  ('module','in',modules)])
        if data_ids:
            model = self.pool[model]
            data = ir_model_data.browse(cr, uid, data_ids[0], context=context)
            if model.exists(cr, uid, [data.res_id]):
                return model.browse(cr, uid, data.res_id, context=context)
            # stale external-id, cleanup to allow re-import, as the corresponding record is gone
            ir_model_data.unlink(cr, 1, [data_ids[0]])
    def edi_import_relation(self, cr, uid, model, value, external_id, context=None):
        """Imports a M2O/M2M relation EDI specification ``[external_id,value]`` for the
           given model, returning the corresponding database ID:
           * First, checks if the ``external_id`` is already known, in which case the corresponding
             database ID is directly returned, without doing anything else;
           * If the ``external_id`` is unknown, attempts to locate an existing record
             with the same ``value`` via name_search(). If found, the given external_id will
             be assigned to this local record (in addition to any existing one)
           * If previous steps gave no result, create a new record with the given
             value in the target model, assign it the given external_id, and return
             the new database ID
           :param str value: display name of the record to import
           :param str external_id: fully-qualified external ID of the record
           :return: database id of newly-imported or pre-existing record
        """
        _logger.debug("%s: Importing EDI relationship [%r,%r]", model, external_id, value)
        target = self._edi_get_object_by_external_id(cr, uid, external_id, model, context=context)
        need_new_ext_id = False
        if not target:
            _logger.debug("%s: Importing EDI relationship [%r,%r] - ID not found, trying name_get.",
                          self._name, external_id, value)
            target = self._edi_get_object_by_name(cr, uid, value, model, context=context)
            need_new_ext_id = True
        if not target:
            _logger.debug("%s: Importing EDI relationship [%r,%r] - name not found, creating it.",
                          self._name, external_id, value)
            # also need_new_ext_id here, but already been set above
            model = self.pool[model]
            res_id, _ = model.name_create(cr, uid, value, context=context)
            target = model.browse(cr, uid, res_id, context=context)
        else:
            _logger.debug("%s: Importing EDI relationship [%r,%r] - record already exists with ID %s, using it",
                          self._name, external_id, value, target.id)
        if need_new_ext_id:
            ext_id_members = split_external_id(external_id)
            # module name is never used bare when creating ir.model.data entries, in order
            # to avoid being taken as part of the module's data, and cleanup up at next update
            module = "%s:%s" % (ext_id_members['module'], ext_id_members['db_uuid'])
            # create a new ir.model.data entry for this value
            self._edi_external_id(cr, uid, target, existing_id=ext_id_members['id'], existing_module=module, context=context)
        return target.id
    def edi_import(self, cr, uid, edi, context=None):
        """Imports a dict representing an EDI document into the system.
           :param dict edi: EDI document to import
           :return: the database ID of the imported record
        """
        assert self._name == edi.get('__import_model') or \
                ('__import_model' not in edi and self._name == edi.get('__model')), \
                "EDI Document Model and current model do not match: '%s' (EDI) vs '%s' (current)." % \
                   (edi.get('__model'), self._name)
        # First check the record is now already known in the database, in which case it is ignored
        ext_id_members = split_external_id(edi['__id'])
        existing = self._edi_get_object_by_external_id(cr, uid, ext_id_members['full'], self._name, context=context)
        if existing:
            _logger.info("'%s' EDI Document with ID '%s' is already known, skipping import!", self._name, ext_id_members['full'])
            return existing.id
        record_values = {}
        o2m_todo = {} # o2m values are processed after their parent already exists
        for field_name, field_value in edi.iteritems():
            # skip metadata and empty fields
            if field_name.startswith('__') or field_value is None or field_value is False:
                continue
            field_info = self._all_columns.get(field_name)
            if not field_info:
                _logger.warning('Ignoring unknown field `%s` when importing `%s` EDI document.', field_name, self._name)
                continue
            field = field_info.column
            # skip function/related fields
            if isinstance(field, fields.function):
                _logger.warning("Unexpected function field value is found in '%s' EDI document: '%s'." % (self._name, field_name))
                continue
            relation_model = field._obj
            if field._type == 'many2one':
                record_values[field_name] = self.edi_import_relation(cr, uid, relation_model,
                                                                      field_value[1], field_value[0],
                                                                      context=context)
            elif field._type == 'many2many':
                record_values[field_name] = [self.edi_import_relation(cr, uid, relation_model, m2m_value[1],
                                                                       m2m_value[0], context=context)
                                             for m2m_value in field_value]
            elif field._type == 'one2many':
                # must wait until parent report is imported, as the parent relationship
                # is often required in o2m child records
                o2m_todo[field_name] = field_value
            else:
                record_values[field_name] = field_value
        module_ref = "%s:%s" % (ext_id_members['module'], ext_id_members['db_uuid'])
        record_id = self.pool.get('ir.model.data')._update(cr, uid, self._name, module_ref, record_values,
                                                           xml_id=ext_id_members['id'], context=context)
        record_display, = self.name_get(cr, uid, [record_id], context=context)
        # process o2m values, connecting them to their parent on-the-fly
        for o2m_field, o2m_value in o2m_todo.iteritems():
            field = self._all_columns[o2m_field].column
            dest_model = self.pool[field._obj]
            for o2m_line in o2m_value:
                # link to parent record: expects an (ext_id, name) pair
                o2m_line[field._fields_id] = (ext_id_members['full'], record_display[1])
                dest_model.edi_import(cr, uid, o2m_line, context=context)
        # process the attachments, if any
        self._edi_import_attachments(cr, uid, record_id, edi, context=context)
        return record_id
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
 | 
	agpl-3.0 | 
| 
	matthiasrichter/AliceO2 | 
	Analysis/Scripts/update_ccdb.py | 
	3 | 
	6042 | 
	#!/usr/bin/env python3
# Copyright 2019-2020 CERN and copyright holders of ALICE O2.
# See https://alice-o2.web.cern.ch/copyright for details of the copyright holders.
# All rights not expressly granted are reserved.
#
# This software is distributed under the terms of the GNU General Public
# License v3 (GPL Version 3), copied verbatim in the file "COPYING".
#
# In applying this license CERN does not waive the privileges and immunities
# granted to it by virtue of its status as an Intergovernmental Organization
# or submit itself to any jurisdiction.
"""
Script to update the CCDB with timestamp non-overlapping objects.
If an object is found in the range specified, the object is split into two.
If the requested range was overlapping three objects are uploaded on CCDB:
1) latest object with requested timestamp validity
2) old object with validity [old_lower_validity-requested_lower_bound]
3) old object with validity [requested_upper_bound, old_upper_validity]
Author: Nicolo' Jacazio on 2020-06-22
TODO add support for 3 files update
"""
import subprocess
from datetime import datetime
import matplotlib.pyplot as plt
import argparse
def convert_timestamp(ts):
    """
    Converts the timestamp in milliseconds in human readable format
    """
    return datetime.utcfromtimestamp(ts/1000).strftime('%Y-%m-%d %H:%M:%S')
def get_ccdb_obj(path, timestamp, dest="/tmp/", verbose=0):
    """
    Gets the ccdb object from 'path' and 'timestamp' and downloads it into 'dest'
    """
    if verbose:
        print("Getting obj", path, "with timestamp",
              timestamp, convert_timestamp(timestamp))
    cmd = f"o2-ccdb-downloadccdbfile --path {path} --dest {dest} --timestamp {timestamp}"
    subprocess.run(cmd.split())
def get_ccdb_obj_validity(path, dest="/tmp/", verbose=0):
    """
    Gets the timestamp validity for an object downloaded from CCDB.
    Returns a list with the initial and end timestamps.
    """
    cmd = f"o2-ccdb-inspectccdbfile {dest}{path}/snapshot.root"
    process = subprocess.Popen(cmd.split(), stdout=subprocess.PIPE)
    output, error = process.communicate()
    output = output.decode("utf-8").split("\n")
    error = error.decode("utf-8").split("\n") if error is not None else error
    if verbose:
        print("out:")
        print(*output, "\n")
        print("err:")
        print(error)
    result = list(filter(lambda x: x.startswith('Valid-'), output))
    ValidFrom = result[0].split()
    ValidUntil = result[1].split()
    return [int(ValidFrom[-1]), int(ValidUntil[-1])]
def upload_ccdb_obj(path, timestamp_from, timestamp_until, dest="/tmp/", meta=""):
    """
    Uploads a new object to CCDB in the 'path' using the validity timestamp specified
    """
    print("Uploading obj", path, "with timestamp", [timestamp_from, timestamp_until],
          convert_timestamp(timestamp_from), convert_timestamp(timestamp_until))
    key = path.split("/")[-1]
    cmd = f"o2-ccdb-upload -f {dest}{path}/snapshot.root "
    cmd += f"--key {key} --path {path} "
    cmd += f"--starttimestamp {timestamp_from} --endtimestamp {timestamp_until} --meta \"{meta}\""
    subprocess.run(cmd.split())
def main(path, timestamp_from, timestamp_until, verbose=0, show=False):
    """
    Used to upload a new object to CCDB in 'path' valid from 'timestamp_from' to 'timestamp_until'
    Gets the object from CCDB specified in 'path' and for 'timestamp_from-1'
    Gets the object from CCDB specified in 'path' and for 'timestamp_until+1'
    If required plots the situation before and after the update
    """
    get_ccdb_obj(path, timestamp_from-1)
    val_before = get_ccdb_obj_validity(path, verbose=verbose)
    get_ccdb_obj(path, timestamp_until+1)
    val_after = get_ccdb_obj_validity(path, verbose=verbose)
    overlap_before = val_before[1] > timestamp_from
    overlap_after = val_after[0] < timestamp_until
    if verbose:
        if overlap_before:
            print("Previous objects overalps")
        if overlap_after:
            print("Next objects overalps")
    trimmed_before = val_before if not overlap_before else [
        val_before[0], timestamp_from - 1]
    trimmed_after = val_after if not overlap_after else [
        timestamp_until+1, val_after[1]]
    if show:
        fig, ax = plt.subplots()
        fig
        def bef_af(v, y):
            return [v[0] - 1] + v + [v[1] + 1], [0, y, y, 0]
        if True:
            ax.plot(*bef_af(val_before, 0.95), label='before')
            ax.plot(*bef_af(val_after, 1.05), label='after')
        if False:
            ax.plot(*bef_af(trimmed_before, 0.9), label='trimmed before')
            ax.plot(*bef_af(trimmed_after, 1.1), label='trimmed after')
        ax.plot(*bef_af([timestamp_from, timestamp_until], 1), label='object')
        xlim = 10000000
        plt.xlim([timestamp_from-xlim, timestamp_until+xlim])
        plt.ylim(0, 2)
        plt.xlabel('Timestamp')
        plt.ylabel('Validity')
        plt.legend()
        plt.show()
if __name__ == "__main__":
    parser = argparse.ArgumentParser(
        description="Uploads timestamp non overlapping objects to CCDB."
        "Basic example: `./update_ccdb.py qc/TOF/TOFTaskCompressed/hDiagnostic 1588956517161 1588986517161 --show --verbose`")
    parser.add_argument('path', metavar='path_to_object', type=str,
                        help='Path of the object in the CCDB repository')
    parser.add_argument('timestamp_from', metavar='from_timestamp', type=int,
                        help='Timestamp of start for the new object to use')
    parser.add_argument('timestamp_until', metavar='until_timestamp', type=int,
                        help='Timestamp of stop for the new object to use')
    parser.add_argument('--verbose', '-v', action='count', default=0)
    parser.add_argument('--show', '-s', action='count', default=0)
    args = parser.parse_args()
    main(path=args.path,
         timestamp_from=args.timestamp_from,
         timestamp_until=args.timestamp_until,
         verbose=args.verbose,
         show=args.show)
 | 
	gpl-3.0 | 
| 
	Aravinthu/odoo | 
	addons/website_event_sale/models/sale_order.py | 
	16 | 
	4747 | 
	# -*- coding: utf-8 -*-
from odoo import api, models, _
from odoo.exceptions import UserError
class SaleOrder(models.Model):
    _inherit = "sale.order"
    @api.multi
    def _cart_find_product_line(self, product_id=None, line_id=None, **kwargs):
        self.ensure_one()
        lines = super(SaleOrder, self)._cart_find_product_line(product_id, line_id)
        if line_id:
            return lines
        domain = [('id', 'in', lines.ids)]
        if self.env.context.get("event_ticket_id"):
            domain.append(('event_ticket_id', '=', self.env.context.get("event_ticket_id")))
        return self.env['sale.order.line'].sudo().search(domain)
    @api.multi
    def _website_product_id_change(self, order_id, product_id, qty=0):
        order = self.env['sale.order'].sudo().browse(order_id)
        if self._context.get('pricelist') != order.pricelist_id.id:
            self = self.with_context(pricelist=order.pricelist_id.id)
        values = super(SaleOrder, self)._website_product_id_change(order_id, product_id, qty=qty)
        event_ticket_id = None
        if self.env.context.get("event_ticket_id"):
            event_ticket_id = self.env.context.get("event_ticket_id")
        else:
            product = self.env['product.product'].browse(product_id)
            if product.event_ticket_ids:
                event_ticket_id = product.event_ticket_ids[0].id
        if event_ticket_id:
            ticket = self.env['event.event.ticket'].browse(event_ticket_id)
            if product_id != ticket.product_id.id:
                raise UserError(_("The ticket doesn't match with this product."))
            values['product_id'] = ticket.product_id.id
            values['event_id'] = ticket.event_id.id
            values['event_ticket_id'] = ticket.id
            values['price_unit'] = ticket.price_reduce or ticket.price
            values['name'] = "%s\n%s" % (ticket.event_id.display_name, ticket.name)
        # avoid writing related values that end up locking the product record
        values.pop('event_ok', None)
        return values
    @api.multi
    def _cart_update(self, product_id=None, line_id=None, add_qty=0, set_qty=0, **kwargs):
        OrderLine = self.env['sale.order.line']
        if line_id:
            line = OrderLine.browse(line_id)
            ticket = line.event_ticket_id
            old_qty = int(line.product_uom_qty)
            if ticket.id:
                self = self.with_context(event_ticket_id=ticket.id, fixed_price=1)
        else:
            line = None
            ticket = self.env['event.event.ticket'].search([('product_id', '=', product_id)], limit=1)
            old_qty = 0
        new_qty = set_qty if set_qty else (add_qty or 0 + old_qty)
        # case: buying tickets for a sold out ticket
        values = {}
        if ticket and ticket.seats_availability == 'limited' and ticket.seats_available <= 0:
            values['warning'] = _('Sorry, The %(ticket)s tickets for the %(event)s event are sold out.') % {
                'ticket': ticket.name,
                'event': ticket.event_id.name}
            new_qty, set_qty, add_qty = 0, 0, 0
        # case: buying tickets, too much attendees
        elif ticket and ticket.seats_availability == 'limited' and new_qty > ticket.seats_available:
            values['warning'] = _('Sorry, only %(remaining_seats)d seats are still available for the %(ticket)s ticket for the %(event)s event.') % {
                'remaining_seats': ticket.seats_available,
                'ticket': ticket.name,
                'event': ticket.event_id.name}
            new_qty, set_qty, add_qty = ticket.seats_available, ticket.seats_available, 0
        values.update(super(SaleOrder, self)._cart_update(product_id, line_id, add_qty, set_qty, **kwargs))
        # removing attendees
        if ticket and new_qty < old_qty:
            attendees = self.env['event.registration'].search([
                ('state', '!=', 'cancel'),
                ('sale_order_id', 'in', self.ids),  # To avoid break on multi record set
                ('event_ticket_id', '=', ticket.id),
            ], offset=new_qty, limit=(old_qty - new_qty), order='create_date asc')
            attendees.button_reg_cancel()
        # adding attendees
        elif ticket and new_qty > old_qty:
            line = OrderLine.browse(values['line_id'])
            line._update_registrations(confirm=False, cancel_to_draft=True, registration_data=kwargs.get('registration_data', []))
            # add in return values the registrations, to display them on website (or not)
            values['attendee_ids'] = self.env['event.registration'].search([('sale_order_line_id', '=', line.id), ('state', '!=', 'cancel')]).ids
        return values
 | 
	agpl-3.0 | 
| 
	CGATOxford/bioconda-recipes | 
	recipes/topas/topas.py | 
	38 | 
	2648 | 
	#!/usr/bin/env python
#
# Wrapper script for Java Conda packages that ensures that the java runtime
# is invoked with the right options. Adapted from the bash script (http://stackoverflow.com/questions/59895/can-a-bash-script-tell-what-directory-its-stored-in/246128#246128).
#
#
# Program Parameters
#
import os
import sys
import subprocess
from os import access, getenv, X_OK
jar_file = 'TOPAS.jar'
default_jvm_mem_opts = ['-Xms512m', '-Xmx1g']
# !!! End of parameter section. No user-serviceable code below this line !!!
def real_dirname(path):
    """Return the symlink-resolved, canonicalized directory-portion of path."""
    return os.path.dirname(os.path.realpath(path))
def java_executable():
    """Return the executable name of the Java interpreter."""
    java_home = getenv('JAVA_HOME')
    java_bin = os.path.join('bin', 'java')
    if java_home and access(os.path.join(java_home, java_bin), X_OK):
        return os.path.join(java_home, java_bin)
    else:
        return 'java'
def jvm_opts(argv):
    """Construct list of Java arguments based on our argument list.
    The argument list passed in argv must not include the script name.
    The return value is a 3-tuple lists of strings of the form:
      (memory_options, prop_options, passthrough_options)
    """
    mem_opts = []
    prop_opts = []
    pass_args = []
    for arg in argv:
        if arg.startswith('-D'):
            prop_opts.append(arg)
        elif arg.startswith('-XX'):
            prop_opts.append(arg)
        elif arg.startswith('-Xm'):
            mem_opts.append(arg)
        else:
            pass_args.append(arg)
    # In the original shell script the test coded below read:
    #   if [ "$jvm_mem_opts" == "" ] && [ -z ${_JAVA_OPTIONS+x} ]
    # To reproduce the behaviour of the above shell code fragment
    # it is important to explictly check for equality with None
    # in the second condition, so a null envar value counts as True!
    if mem_opts == [] and getenv('_JAVA_OPTIONS') == None:
        mem_opts = default_jvm_mem_opts
    return (mem_opts, prop_opts, pass_args)
def main():
    java = java_executable()
    jar_dir = real_dirname(sys.argv[0])
    (mem_opts, prop_opts, pass_args) = jvm_opts(sys.argv[1:])
    if pass_args != [] and pass_args[0].startswith('eu'):
        jar_arg = '-cp'
    else:
        jar_arg = '-jar'
    jar_path = os.path.join(jar_dir, jar_file)
    java_args = [java]+ mem_opts + prop_opts + [jar_arg] + [jar_path] + pass_args
    if '--jar_dir' in sys.argv[1:]:
        print(jar_path)
    else:
        sys.exit(subprocess.call(java_args))
if __name__ == '__main__':
    main()
 | 
	mit | 
| 
	jaxkodex/odoo | 
	addons/point_of_sale/report/pos_order_report.py | 
	283 | 
	4297 | 
	# -*- coding: utf-8 -*-
##############################################################################
#
#    OpenERP, Open Source Management Solution
#    Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
#    This program is free software: you can redistribute it and/or modify
#    it under the terms of the GNU Affero General Public License as
#    published by the Free Software Foundation, either version 3 of the
#    License, or (at your option) any later version.
#
#    This program is distributed in the hope that it will be useful,
#    but WITHOUT ANY WARRANTY; without even the implied warranty of
#    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
#    GNU Affero General Public License for more details.
#
#    You should have received a copy of the GNU Affero General Public License
#    along with this program.  If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp import tools
from openerp.osv import fields,osv
class pos_order_report(osv.osv):
    _name = "report.pos.order"
    _description = "Point of Sale Orders Statistics"
    _auto = False
    _columns = {
        'date': fields.datetime('Date Order', readonly=True),
        'partner_id':fields.many2one('res.partner', 'Partner', readonly=True),
        'product_id':fields.many2one('product.product', 'Product', readonly=True),
        'state': fields.selection([('draft', 'New'), ('paid', 'Closed'), ('done', 'Synchronized'), ('invoiced', 'Invoiced'), ('cancel', 'Cancelled')],
                                  'Status'),
        'user_id':fields.many2one('res.users', 'Salesperson', readonly=True),
        'price_total':fields.float('Total Price', readonly=True),
        'total_discount':fields.float('Total Discount', readonly=True),
        'average_price': fields.float('Average Price', readonly=True,group_operator="avg"),
        'location_id':fields.many2one('stock.location', 'Location', readonly=True),
        'company_id':fields.many2one('res.company', 'Company', readonly=True),
        'nbr':fields.integer('# of Lines', readonly=True),  # TDE FIXME master: rename into nbr_lines
        'product_qty':fields.integer('Product Quantity', readonly=True),
        'journal_id': fields.many2one('account.journal', 'Journal'),
        'delay_validation': fields.integer('Delay Validation'),
        'product_categ_id': fields.many2one('product.category', 'Product Category', readonly=True),
    }
    _order = 'date desc'
    def init(self, cr):
        tools.drop_view_if_exists(cr, 'report_pos_order')
        cr.execute("""
            create or replace view report_pos_order as (
                select
                    min(l.id) as id,
                    count(*) as nbr,
                    s.date_order as date,
                    sum(l.qty * u.factor) as product_qty,
                    sum(l.qty * l.price_unit) as price_total,
                    sum((l.qty * l.price_unit) * (l.discount / 100)) as total_discount,
                    (sum(l.qty*l.price_unit)/sum(l.qty * u.factor))::decimal as average_price,
                    sum(cast(to_char(date_trunc('day',s.date_order) - date_trunc('day',s.create_date),'DD') as int)) as delay_validation,
                    s.partner_id as partner_id,
                    s.state as state,
                    s.user_id as user_id,
                    s.location_id as location_id,
                    s.company_id as company_id,
                    s.sale_journal as journal_id,
                    l.product_id as product_id,
                    pt.categ_id as product_categ_id
                from pos_order_line as l
                    left join pos_order s on (s.id=l.order_id)
                    left join product_product p on (p.id=l.product_id)
                    left join product_template pt on (pt.id=p.product_tmpl_id)
                    left join product_uom u on (u.id=pt.uom_id)
                group by
                    s.date_order, s.partner_id,s.state, pt.categ_id,
                    s.user_id,s.location_id,s.company_id,s.sale_journal,l.product_id,s.create_date
                having
                    sum(l.qty * u.factor) != 0)""")
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
 | 
	agpl-3.0 | 
| 
	fabianrost84/cython | 
	Cython/Plex/Errors.py | 
	33 | 
	1169 | 
	#=======================================================================
#
#   Python Lexical Analyser
#
#   Exception classes
#
#=======================================================================
class PlexError(Exception):
    message = ""
class PlexTypeError(PlexError, TypeError):
    pass
class PlexValueError(PlexError, ValueError):
    pass
class InvalidRegex(PlexError):
    pass
class InvalidToken(PlexError):
    def __init__(self, token_number, message):
        PlexError.__init__(self, "Token number %d: %s" % (token_number, message))
class InvalidScanner(PlexError):
    pass
class AmbiguousAction(PlexError):
    message = "Two tokens with different actions can match the same string"
    def __init__(self):
        pass
class UnrecognizedInput(PlexError):
    scanner = None
    position = None
    state_name = None
    def __init__(self, scanner, state_name):
        self.scanner = scanner
        self.position = scanner.get_position()
        self.state_name = state_name
    def __str__(self):
        return ("'%s', line %d, char %d: Token not recognised in state %r" % (
            self.position + (self.state_name,)))
 | 
	apache-2.0 | 
| 
	analurandis/Tur | 
	backend/venv/Lib/site-packages/sphinx/builders/qthelp.py | 
	11 | 
	10819 | 
	# -*- coding: utf-8 -*-
"""
    sphinx.builders.qthelp
    ~~~~~~~~~~~~~~~~~~~~~~
    Build input files for the Qt collection generator.
    :copyright: Copyright 2007-2014 by the Sphinx team, see AUTHORS.
    :license: BSD, see LICENSE for details.
"""
import os
import re
import codecs
import posixpath
from os import path
from docutils import nodes
from sphinx import addnodes
from sphinx.builders.html import StandaloneHTMLBuilder
from sphinx.util import force_decode
from sphinx.util.pycompat import htmlescape
_idpattern = re.compile(
    r'(?P<title>.+) (\((class in )?(?P<id>[\w\.]+)( (?P<descr>\w+))?\))$')
# Qt Help Collection Project (.qhcp).
# Is the input file for the help collection generator.
# It contains references to compressed help files which should be
# included in the collection.
# It may contain various other information for customizing Qt Assistant.
collection_template = u'''\
<?xml version="1.0" encoding="utf-8" ?>
<QHelpCollectionProject version="1.0">
    <assistant>
        <title>%(title)s</title>
        <homePage>%(homepage)s</homePage>
        <startPage>%(startpage)s</startPage>
    </assistant>
    <docFiles>
        <generate>
            <file>
                <input>%(outname)s.qhp</input>
                <output>%(outname)s.qch</output>
            </file>
        </generate>
        <register>
            <file>%(outname)s.qch</file>
        </register>
    </docFiles>
</QHelpCollectionProject>
'''
# Qt Help Project (.qhp)
# This is the input file for the help generator.
# It contains the table of contents, indices and references to the
# actual documentation files (*.html).
# In addition it defines a unique namespace for the documentation.
project_template = u'''\
<?xml version="1.0" encoding="utf-8" ?>
<QtHelpProject version="1.0">
    <namespace>%(namespace)s</namespace>
    <virtualFolder>doc</virtualFolder>
    <customFilter name="%(project)s %(version)s">
        <filterAttribute>%(outname)s</filterAttribute>
        <filterAttribute>%(version)s</filterAttribute>
    </customFilter>
    <filterSection>
        <filterAttribute>%(outname)s</filterAttribute>
        <filterAttribute>%(version)s</filterAttribute>
        <toc>
            <section title="%(title)s" ref="%(masterdoc)s.html">
%(sections)s
            </section>
        </toc>
        <keywords>
%(keywords)s
        </keywords>
        <files>
%(files)s
        </files>
    </filterSection>
</QtHelpProject>
'''
section_template = '<section title="%(title)s" ref="%(ref)s"/>'
file_template = ' '*12 + '<file>%(filename)s</file>'
class QtHelpBuilder(StandaloneHTMLBuilder):
    """
    Builder that also outputs Qt help project, contents and index files.
    """
    name = 'qthelp'
    # don't copy the reST source
    copysource = False
    supported_image_types = ['image/svg+xml', 'image/png', 'image/gif',
                             'image/jpeg']
    # don't add links
    add_permalinks = False
    # don't add sidebar etc.
    embedded = True
    def init(self):
        StandaloneHTMLBuilder.init(self)
        # the output files for HTML help must be .html only
        self.out_suffix = '.html'
        #self.config.html_style = 'traditional.css'
    def handle_finish(self):
        self.build_qhp(self.outdir, self.config.qthelp_basename)
    def build_qhp(self, outdir, outname):
        self.info('writing project file...')
        # sections
        tocdoc = self.env.get_and_resolve_doctree(self.config.master_doc, self,
                                                  prune_toctrees=False)
        istoctree = lambda node: (
                        isinstance(node, addnodes.compact_paragraph)
                            and node.has_key('toctree'))
        sections = []
        for node in tocdoc.traverse(istoctree):
            sections.extend(self.write_toc(node))
        for indexname, indexcls, content, collapse in self.domain_indices:
            item = section_template % {'title': indexcls.localname,
                                       'ref': '%s.html' % indexname}
            sections.append(' ' * 4 * 4 + item)
        # sections may be unicode strings or byte strings, we have to make sure
        # they are all unicode strings before joining them
        new_sections = []
        for section in sections:
            if not isinstance(section, unicode):
                new_sections.append(force_decode(section, None))
            else:
                new_sections.append(section)
        sections = u'\n'.join(new_sections)
        # keywords
        keywords = []
        index = self.env.create_index(self, group_entries=False)
        for (key, group) in index:
            for title, (refs, subitems) in group:
                keywords.extend(self.build_keywords(title, refs, subitems))
        keywords = u'\n'.join(keywords)
        # files
        if not outdir.endswith(os.sep):
            outdir += os.sep
        olen = len(outdir)
        projectfiles = []
        staticdir = path.join(outdir, '_static')
        imagesdir = path.join(outdir, '_images')
        for root, dirs, files in os.walk(outdir):
            resourcedir = root.startswith(staticdir) or \
                          root.startswith(imagesdir)
            for fn in files:
                if (resourcedir and not fn.endswith('.js')) or \
                       fn.endswith('.html'):
                    filename = path.join(root, fn)[olen:]
                    projectfiles.append(file_template %
                                        {'filename': htmlescape(filename)})
        projectfiles = '\n'.join(projectfiles)
        # it seems that the "namespace" may not contain non-alphanumeric
        # characters, and more than one successive dot, or leading/trailing
        # dots, are also forbidden
        nspace = 'org.sphinx.%s.%s' % (outname, self.config.version)
        nspace = re.sub('[^a-zA-Z0-9.]', '', nspace)
        nspace = re.sub(r'\.+', '.', nspace).strip('.')
        nspace = nspace.lower()
        # write the project file
        f = codecs.open(path.join(outdir, outname+'.qhp'), 'w', 'utf-8')
        try:
            f.write(project_template % {
                'outname': htmlescape(outname),
                'title': htmlescape(self.config.html_title),
                'version': htmlescape(self.config.version),
                'project': htmlescape(self.config.project),
                'namespace': htmlescape(nspace),
                'masterdoc': htmlescape(self.config.master_doc),
                'sections': sections,
                'keywords': keywords,
                'files': projectfiles})
        finally:
            f.close()
        homepage = 'qthelp://' + posixpath.join(
            nspace, 'doc', self.get_target_uri(self.config.master_doc))
        startpage = 'qthelp://' + posixpath.join(nspace, 'doc', 'index.html')
        self.info('writing collection project file...')
        f = codecs.open(path.join(outdir, outname+'.qhcp'), 'w', 'utf-8')
        try:
            f.write(collection_template % {
                'outname': htmlescape(outname),
                'title': htmlescape(self.config.html_short_title),
                'homepage': htmlescape(homepage),
                'startpage': htmlescape(startpage)})
        finally:
            f.close()
    def isdocnode(self, node):
        if not isinstance(node, nodes.list_item):
            return False
        if len(node.children) != 2:
            return False
        if not isinstance(node.children[0], addnodes.compact_paragraph):
            return False
        if not isinstance(node.children[0][0], nodes.reference):
            return False
        if not isinstance(node.children[1], nodes.bullet_list):
            return False
        return True
    def write_toc(self, node, indentlevel=4):
        # XXX this should return a Unicode string, not a bytestring
        parts = []
        if self.isdocnode(node):
            refnode = node.children[0][0]
            link = refnode['refuri']
            title = htmlescape(refnode.astext()).replace('"', '"')
            item = '<section title="%(title)s" ref="%(ref)s">' % \
                {'title': title, 'ref': link}
            parts.append(' '*4*indentlevel + item)
            for subnode in node.children[1]:
                parts.extend(self.write_toc(subnode, indentlevel+1))
            parts.append(' '*4*indentlevel + '</section>')
        elif isinstance(node, nodes.list_item):
            for subnode in node:
                parts.extend(self.write_toc(subnode, indentlevel))
        elif isinstance(node, nodes.reference):
            link = node['refuri']
            title = htmlescape(node.astext()).replace('"','"')
            item = section_template % {'title': title, 'ref': link}
            item = u' ' * 4 * indentlevel + item
            parts.append(item.encode('ascii', 'xmlcharrefreplace'))
        elif isinstance(node, nodes.bullet_list):
            for subnode in node:
                parts.extend(self.write_toc(subnode, indentlevel))
        elif isinstance(node, addnodes.compact_paragraph):
            for subnode in node:
                parts.extend(self.write_toc(subnode, indentlevel))
        return parts
    def keyword_item(self, name, ref):
        matchobj = _idpattern.match(name)
        if matchobj:
            groupdict = matchobj.groupdict()
            shortname = groupdict['title']
            id = groupdict.get('id')
            #descr = groupdict.get('descr')
            if shortname.endswith('()'):
                shortname = shortname[:-2]
            id = '%s.%s' % (id, shortname)
        else:
            id = None
        if id:
            item = ' '*12 + '<keyword name="%s" id="%s" ref="%s"/>' % (
                name, id, ref[1])
        else:
            item = ' '*12 + '<keyword name="%s" ref="%s"/>' % (name, ref[1])
        item.encode('ascii', 'xmlcharrefreplace')
        return item
    def build_keywords(self, title, refs, subitems):
        keywords = []
        title = htmlescape(title)
#        if len(refs) == 0: # XXX
#            write_param('See Also', title)
        if len(refs) == 1:
            keywords.append(self.keyword_item(title, refs[0]))
        elif len(refs) > 1:
            for i, ref in enumerate(refs):  # XXX
#                item = (' '*12 +
#                        '<keyword name="%s [%d]" ref="%s"/>' % (
#                                                        title, i, ref))
#                item.encode('ascii', 'xmlcharrefreplace')
#                keywords.append(item)
                keywords.append(self.keyword_item(title, ref))
        if subitems:
            for subitem in subitems:
                keywords.extend(self.build_keywords(subitem[0], subitem[1], []))
        return keywords
 | 
	mit | 
| 
	jaingaurav/Diamond | 
	src/diamond/handler/test/teststatsdhandler.py | 
	20 | 
	3122 | 
	#!/usr/bin/python
# coding=utf-8
##########################################################################
from test import unittest
from test import run_only
from mock import patch
import configobj
from diamond.handler.stats_d import StatsdHandler
from diamond.metric import Metric
def run_only_if_statsd_is_available(func):
    try:
        import statsd
    except ImportError:
        statsd = None
    pred = lambda: statsd is not None
    return run_only(func, pred)
class TestStatsdHandler(unittest.TestCase):
    @run_only_if_statsd_is_available
    @patch('statsd.StatsClient')
    def test_single_gauge(self, mock_client):
        config = configobj.ConfigObj()
        config['host'] = 'localhost'
        config['port'] = '9999'
        config['batch'] = 1
        metric = Metric('servers.com.example.www.cpu.total.idle',
                        123, raw_value=123, timestamp=1234567,
                        host='will-be-ignored', metric_type='GAUGE')
        expected_data = ('servers.com.example.www.cpu.total.idle', 123)
        handler = StatsdHandler(config)
        handler.process(metric)
        handler.connection.gauge.assert_called_with(*expected_data)
        handler.connection.send.assert_called_with()
    @run_only_if_statsd_is_available
    @patch('statsd.StatsClient')
    def test_single_counter(self, mock_client):
        config = configobj.ConfigObj()
        config['host'] = 'localhost'
        config['port'] = '9999'
        config['batch'] = 1
        metric = Metric('servers.com.example.www.cpu.total.idle',
                        5, raw_value=123, timestamp=1234567,
                        host='will-be-ignored', metric_type='COUNTER')
        expected_data = ('servers.com.example.www.cpu.total.idle', 123)
        handler = StatsdHandler(config)
        handler.process(metric)
        handler.connection.incr.assert_called_with(*expected_data)
        handler.connection.send.assert_called_with()
    @run_only_if_statsd_is_available
    @patch('statsd.StatsClient')
    def test_multiple_counter(self, mock_client):
        config = configobj.ConfigObj()
        config['host'] = 'localhost'
        config['port'] = '9999'
        config['batch'] = 1
        metric1 = Metric('servers.com.example.www.cpu.total.idle',
                         5, raw_value=123, timestamp=1234567,
                         host='will-be-ignored', metric_type='COUNTER')
        metric2 = Metric('servers.com.example.www.cpu.total.idle',
                         7, raw_value=128, timestamp=1234567,
                         host='will-be-ignored', metric_type='COUNTER')
        expected_data1 = ('servers.com.example.www.cpu.total.idle', 123)
        expected_data2 = ('servers.com.example.www.cpu.total.idle', 5)
        handler = StatsdHandler(config)
        handler.process(metric1)
        handler.connection.incr.assert_called_with(*expected_data1)
        handler.connection.send.assert_called_with()
        handler.process(metric2)
        handler.connection.incr.assert_called_with(*expected_data2)
        handler.connection.send.assert_called_with()
 | 
	mit | 
| 
	ckjoshi9/Auto-Mate-for-Tinder | 
	Django App/tinderapp/src/Pixel.py | 
	2 | 
	1491 | 
	from __future__ import division
from colorsys import *
class Pixel:
    def __init__(self, x, y, red, green, blue):
        self.x = x
        self.y = y
        self.red = red
        self.green = green
        self.blue = blue
        self.region = None
    @property
    def region(self):
        return self.region
    @region.setter
    def region(self, value):
        self.region = value
    @property
    def x(self):
        return self.x
    @property
    def y(self):
        return self.y
    def in_region(self):
        if self.region == None:
            return False
        else:
            return True
    def is_skin(self):
        r = self.red
        g = self.green
        b = self.blue
        rgbClassifier = ((r > 95) and (g > 40 and g < 100) and (b > 20) and ((max(r, g, b) - min(r, g, b)) > 15) and (abs(r-g) > 15) and (r > g) and (r > b))
        normalizedRGBClassifier = False
        if r != 0 and g != 0 and b != 0:
            normR = (r/(r + g + b))
            normG = (g/(r + g + b))
            normB = (b/(r + g + b))
            normalizedRGBClassifier = (((normR/normG) > 1.185) and (((r * b)/(pow(r + g + b, 2))) > 0.107) and (((r * g)/(pow(r + g + b,2))) > 0.112))
        hsv = rgb_to_hsv(r, g, b)
        hsvClassifier = (hsv[0] > 0 and hsv[0] < 35 and hsv[1] > 0.23 and hsv[1] < 0.68)
        return (rgbClassifier or normalizedRGBClassifier or hsvClassifier)
    def intensity(self):
        return (self.red + self.green + self.blue)/3
 | 
	mit | 
| 
	pquentin/libcloud | 
	libcloud/storage/drivers/s3.py | 
	3 | 
	42505 | 
	# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements.  See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License.  You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import base64
import hmac
import time
from hashlib import sha1
import libcloud.utils.py3
try:
    if libcloud.utils.py3.DEFAULT_LXML:
        from lxml.etree import Element, SubElement
    else:
        from xml.etree.ElementTree import Element, SubElement
except ImportError:
    from xml.etree.ElementTree import Element, SubElement
from libcloud.utils.py3 import httplib
from libcloud.utils.py3 import urlquote
from libcloud.utils.py3 import b
from libcloud.utils.py3 import tostring
from libcloud.utils.xml import fixxpath, findtext
from libcloud.utils.files import read_in_chunks
from libcloud.common.types import InvalidCredsError, LibcloudError
from libcloud.common.base import ConnectionUserAndKey, RawResponse
from libcloud.common.aws import AWSBaseResponse, AWSDriver, \
    AWSTokenConnection, SignedAWSConnection
from libcloud.storage.base import Object, Container, StorageDriver
from libcloud.storage.types import ContainerError
from libcloud.storage.types import ContainerIsNotEmptyError
from libcloud.storage.types import InvalidContainerNameError
from libcloud.storage.types import ContainerDoesNotExistError
from libcloud.storage.types import ObjectDoesNotExistError
from libcloud.storage.types import ObjectHashMismatchError
# How long before the token expires
EXPIRATION_SECONDS = 15 * 60
S3_US_STANDARD_HOST = 's3.amazonaws.com'
S3_US_EAST2_HOST = 's3-us-east-2.amazonaws.com'
S3_US_WEST_HOST = 's3-us-west-1.amazonaws.com'
S3_US_WEST_OREGON_HOST = 's3-us-west-2.amazonaws.com'
S3_US_GOV_WEST_HOST = 's3-us-gov-west-1.amazonaws.com'
S3_CN_NORTH_HOST = 's3.cn-north-1.amazonaws.com.cn'
S3_EU_WEST_HOST = 's3-eu-west-1.amazonaws.com'
S3_EU_WEST2_HOST = 's3-eu-west-2.amazonaws.com'
S3_EU_CENTRAL_HOST = 's3-eu-central-1.amazonaws.com'
S3_AP_SOUTH_HOST = 's3-ap-south-1.amazonaws.com'
S3_AP_SOUTHEAST_HOST = 's3-ap-southeast-1.amazonaws.com'
S3_AP_SOUTHEAST2_HOST = 's3-ap-southeast-2.amazonaws.com'
S3_AP_NORTHEAST1_HOST = 's3-ap-northeast-1.amazonaws.com'
S3_AP_NORTHEAST2_HOST = 's3-ap-northeast-2.amazonaws.com'
S3_AP_NORTHEAST_HOST = S3_AP_NORTHEAST1_HOST
S3_SA_EAST_HOST = 's3-sa-east-1.amazonaws.com'
S3_SA_SOUTHEAST2_HOST = 's3-sa-east-2.amazonaws.com'
S3_CA_CENTRAL_HOST = 's3-ca-central-1.amazonaws.com'
API_VERSION = '2006-03-01'
NAMESPACE = 'http://s3.amazonaws.com/doc/%s/' % (API_VERSION)
# AWS multi-part chunks must be minimum 5MB
CHUNK_SIZE = 5 * 1024 * 1024
# Desired number of items in each response inside a paginated request in
# ex_iterate_multipart_uploads.
RESPONSES_PER_REQUEST = 100
class S3Response(AWSBaseResponse):
    namespace = None
    valid_response_codes = [httplib.NOT_FOUND, httplib.CONFLICT,
                            httplib.BAD_REQUEST]
    def success(self):
        i = int(self.status)
        return i >= 200 and i <= 299 or i in self.valid_response_codes
    def parse_error(self):
        if self.status in [httplib.UNAUTHORIZED, httplib.FORBIDDEN]:
            raise InvalidCredsError(self.body)
        elif self.status == httplib.MOVED_PERMANENTLY:
            raise LibcloudError('This bucket is located in a different ' +
                                'region. Please use the correct driver.',
                                driver=S3StorageDriver)
        raise LibcloudError('Unknown error. Status code: %d' % (self.status),
                            driver=S3StorageDriver)
class S3RawResponse(S3Response, RawResponse):
    pass
class BaseS3Connection(ConnectionUserAndKey):
    """
    Represents a single connection to the S3 Endpoint
    """
    host = 's3.amazonaws.com'
    responseCls = S3Response
    rawResponseCls = S3RawResponse
    @staticmethod
    def get_auth_signature(method, headers, params, expires, secret_key, path,
                           vendor_prefix):
        """
        Signature = URL-Encode( Base64( HMAC-SHA1( YourSecretAccessKeyID,
                                    UTF-8-Encoding-Of( StringToSign ) ) ) );
        StringToSign = HTTP-VERB + "\n" +
            Content-MD5 + "\n" +
            Content-Type + "\n" +
            Expires + "\n" +
            CanonicalizedVendorHeaders +
            CanonicalizedResource;
        """
        special_headers = {'content-md5': '', 'content-type': '', 'date': ''}
        vendor_headers = {}
        for key, value in list(headers.items()):
            key_lower = key.lower()
            if key_lower in special_headers:
                special_headers[key_lower] = value.strip()
            elif key_lower.startswith(vendor_prefix):
                vendor_headers[key_lower] = value.strip()
        if expires:
            special_headers['date'] = str(expires)
        buf = [method]
        for _, value in sorted(special_headers.items()):
            buf.append(value)
        string_to_sign = '\n'.join(buf)
        buf = []
        for key, value in sorted(vendor_headers.items()):
            buf.append('%s:%s' % (key, value))
        header_string = '\n'.join(buf)
        values_to_sign = []
        for value in [string_to_sign, header_string, path]:
            if value:
                values_to_sign.append(value)
        string_to_sign = '\n'.join(values_to_sign)
        b64_hmac = base64.b64encode(
            hmac.new(b(secret_key), b(string_to_sign), digestmod=sha1).digest()
        )
        return b64_hmac.decode('utf-8')
    def add_default_params(self, params):
        expires = str(int(time.time()) + EXPIRATION_SECONDS)
        params['AWSAccessKeyId'] = self.user_id
        params['Expires'] = expires
        return params
    def pre_connect_hook(self, params, headers):
        params['Signature'] = self.get_auth_signature(
            method=self.method, headers=headers, params=params,
            expires=params['Expires'], secret_key=self.key, path=self.action,
            vendor_prefix=self.driver.http_vendor_prefix)
        return params, headers
class S3Connection(AWSTokenConnection, BaseS3Connection):
    """
    Represents a single connection to the S3 endpoint, with AWS-specific
    features.
    """
    pass
class S3SignatureV4Connection(SignedAWSConnection, BaseS3Connection):
    service_name = 's3'
    version = API_VERSION
    def __init__(self, user_id, key, secure=True, host=None, port=None,
                 url=None, timeout=None, proxy_url=None, token=None,
                 retry_delay=None, backoff=None):
        super(S3SignatureV4Connection, self).__init__(
            user_id, key, secure, host,
            port, url, timeout, proxy_url,
            token, retry_delay, backoff,
            4)  # force version 4
class S3MultipartUpload(object):
    """
    Class representing an amazon s3 multipart upload
    """
    def __init__(self, key, id, created_at, initiator, owner):
        """
        Class representing an amazon s3 multipart upload
        :param key: The object/key that was being uploaded
        :type key: ``str``
        :param id: The upload id assigned by amazon
        :type id: ``str``
        :param created_at: The date/time at which the upload was started
        :type created_at: ``str``
        :param initiator: The AWS owner/IAM user who initiated this
        :type initiator: ``str``
        :param owner: The AWS owner/IAM who will own this object
        :type owner: ``str``
        """
        self.key = key
        self.id = id
        self.created_at = created_at
        self.initiator = initiator
        self.owner = owner
    def __repr__(self):
        return ('<S3MultipartUpload: key=%s>' % (self.key))
class BaseS3StorageDriver(StorageDriver):
    name = 'Amazon S3 (standard)'
    website = 'http://aws.amazon.com/s3/'
    connectionCls = BaseS3Connection
    hash_type = 'md5'
    supports_chunked_encoding = False
    supports_s3_multipart_upload = True
    ex_location_name = ''
    namespace = NAMESPACE
    http_vendor_prefix = 'x-amz'
    def iterate_containers(self):
        response = self.connection.request('/')
        if response.status == httplib.OK:
            containers = self._to_containers(obj=response.object,
                                             xpath='Buckets/Bucket')
            return containers
        raise LibcloudError('Unexpected status code: %s' % (response.status),
                            driver=self)
    def list_container_objects(self, container, ex_prefix=None):
        """
        Return a list of objects for the given container.
        :param container: Container instance.
        :type container: :class:`Container`
        :param ex_prefix: Only return objects starting with ex_prefix
        :type ex_prefix: ``str``
        :return: A list of Object instances.
        :rtype: ``list`` of :class:`Object`
        """
        return list(self.iterate_container_objects(container,
                                                   ex_prefix=ex_prefix))
    def iterate_container_objects(self, container, ex_prefix=None):
        """
        Return a generator of objects for the given container.
        :param container: Container instance
        :type container: :class:`Container`
        :param ex_prefix: Only return objects starting with ex_prefix
        :type ex_prefix: ``str``
        :return: A generator of Object instances.
        :rtype: ``generator`` of :class:`Object`
        """
        params = {}
        if ex_prefix:
            params['prefix'] = ex_prefix
        last_key = None
        exhausted = False
        container_path = self._get_container_path(container)
        while not exhausted:
            if last_key:
                params['marker'] = last_key
            response = self.connection.request(container_path,
                                               params=params)
            if response.status != httplib.OK:
                raise LibcloudError('Unexpected status code: %s' %
                                    (response.status), driver=self)
            objects = self._to_objs(obj=response.object,
                                    xpath='Contents', container=container)
            is_truncated = response.object.findtext(fixxpath(
                xpath='IsTruncated', namespace=self.namespace)).lower()
            exhausted = (is_truncated == 'false')
            last_key = None
            for obj in objects:
                last_key = obj.name
                yield obj
    def get_container(self, container_name):
        try:
            response = self.connection.request('/%s' % container_name,
                                               method='HEAD')
            if response.status == httplib.NOT_FOUND:
                raise ContainerDoesNotExistError(value=None, driver=self,
                                                 container_name=container_name)
        except InvalidCredsError:
            # This just means the user doesn't have IAM permissions to do a
            # HEAD request but other requests might work.
            pass
        return Container(name=container_name, extra=None, driver=self)
    def get_object(self, container_name, object_name):
        container = self.get_container(container_name=container_name)
        object_path = self._get_object_path(container, object_name)
        response = self.connection.request(object_path, method='HEAD')
        if response.status == httplib.OK:
            obj = self._headers_to_object(object_name=object_name,
                                          container=container,
                                          headers=response.headers)
            return obj
        raise ObjectDoesNotExistError(value=None, driver=self,
                                      object_name=object_name)
    def _get_container_path(self, container):
        """
        Return a container path
        :param container: Container instance
        :type  container: :class:`Container`
        :return: A path for this container.
        :rtype: ``str``
        """
        return '/%s' % (container.name)
    def _get_object_path(self, container, object_name):
        """
        Return an object's CDN path.
        :param container: Container instance
        :type  container: :class:`Container`
        :param object_name: Object name
        :type  object_name: :class:`str`
        :return: A  path for this object.
        :rtype: ``str``
        """
        container_url = self._get_container_path(container)
        object_name_cleaned = self._clean_object_name(object_name)
        object_path = '%s/%s' % (container_url, object_name_cleaned)
        return object_path
    def create_container(self, container_name):
        if self.ex_location_name:
            root = Element('CreateBucketConfiguration')
            child = SubElement(root, 'LocationConstraint')
            child.text = self.ex_location_name
            data = tostring(root)
        else:
            data = ''
        response = self.connection.request('/%s' % (container_name),
                                           data=data,
                                           method='PUT')
        if response.status == httplib.OK:
            container = Container(name=container_name, extra=None, driver=self)
            return container
        elif response.status == httplib.CONFLICT:
            raise InvalidContainerNameError(
                value='Container with this name already exists. The name must '
                      'be unique among all the containers in the system',
                container_name=container_name, driver=self)
        elif response.status == httplib.BAD_REQUEST:
            raise ContainerError(
                value='Bad request when creating container: %s' %
                      response.body,
                container_name=container_name, driver=self)
        raise LibcloudError('Unexpected status code: %s' % (response.status),
                            driver=self)
    def delete_container(self, container):
        # Note: All the objects in the container must be deleted first
        response = self.connection.request('/%s' % (container.name),
                                           method='DELETE')
        if response.status == httplib.NO_CONTENT:
            return True
        elif response.status == httplib.CONFLICT:
            raise ContainerIsNotEmptyError(
                value='Container must be empty before it can be deleted.',
                container_name=container.name, driver=self)
        elif response.status == httplib.NOT_FOUND:
            raise ContainerDoesNotExistError(value=None,
                                             driver=self,
                                             container_name=container.name)
        return False
    def download_object(self, obj, destination_path, overwrite_existing=False,
                        delete_on_failure=True):
        obj_path = self._get_object_path(obj.container, obj.name)
        response = self.connection.request(obj_path, method='GET', raw=True)
        return self._get_object(obj=obj, callback=self._save_object,
                                response=response,
                                callback_kwargs={
                                    'obj': obj,
                                    'response': response.response,
                                    'destination_path': destination_path,
                                    'overwrite_existing': overwrite_existing,
                                    'delete_on_failure': delete_on_failure},
                                success_status_code=httplib.OK)
    def download_object_as_stream(self, obj, chunk_size=None):
        obj_path = self._get_object_path(obj.container, obj.name)
        response = self.connection.request(obj_path, method='GET',
                                           stream=True, raw=True)
        return self._get_object(
            obj=obj, callback=read_in_chunks,
            response=response,
            callback_kwargs={'iterator': response.iter_content(CHUNK_SIZE),
                             'chunk_size': chunk_size},
            success_status_code=httplib.OK)
    def upload_object(self, file_path, container, object_name, extra=None,
                      verify_hash=True, ex_storage_class=None):
        """
        @inherits: :class:`StorageDriver.upload_object`
        :param ex_storage_class: Storage class
        :type ex_storage_class: ``str``
        """
        return self._put_object(container=container, object_name=object_name,
                                extra=extra, file_path=file_path,
                                verify_hash=verify_hash,
                                storage_class=ex_storage_class)
    def _initiate_multipart(self, container, object_name, headers=None):
        """
        Initiates a multipart upload to S3
        :param container: The destination container
        :type container: :class:`Container`
        :param object_name: The name of the object which we are uploading
        :type object_name: ``str``
        :keyword headers: Additional headers to send with the request
        :type headers: ``dict``
        :return: The id of the newly created multipart upload
        :rtype: ``str``
        """
        headers = headers or {}
        request_path = self._get_object_path(container, object_name)
        params = {'uploads': ''}
        response = self.connection.request(request_path, method='POST',
                                           headers=headers, params=params)
        if response.status != httplib.OK:
            raise LibcloudError('Error initiating multipart upload',
                                driver=self)
        return findtext(element=response.object, xpath='UploadId',
                        namespace=self.namespace)
    def _upload_multipart_chunks(self, container, object_name, upload_id,
                                 stream, calculate_hash=True):
        """
        Uploads data from an iterator in fixed sized chunks to S3
        :param container: The destination container
        :type container: :class:`Container`
        :param object_name: The name of the object which we are uploading
        :type object_name: ``str``
        :param upload_id: The upload id allocated for this multipart upload
        :type upload_id: ``str``
        :param stream: The generator for fetching the upload data
        :type stream: ``generator``
        :keyword calculate_hash: Indicates if we must calculate the data hash
        :type calculate_hash: ``bool``
        :return: A tuple of (chunk info, checksum, bytes transferred)
        :rtype: ``tuple``
        """
        data_hash = None
        if calculate_hash:
            data_hash = self._get_hash_function()
        bytes_transferred = 0
        count = 1
        chunks = []
        params = {'uploadId': upload_id}
        request_path = self._get_object_path(container, object_name)
        # Read the input data in chunk sizes suitable for AWS
        for data in read_in_chunks(stream, chunk_size=CHUNK_SIZE,
                                   fill_size=True, yield_empty=True):
            bytes_transferred += len(data)
            if calculate_hash:
                data_hash.update(data)
            chunk_hash = self._get_hash_function()
            chunk_hash.update(data)
            chunk_hash = base64.b64encode(chunk_hash.digest()).decode('utf-8')
            # The Content-MD5 header provides an extra level of data check and
            # is recommended by amazon
            headers = {
                'Content-Length': len(data),
                'Content-MD5': chunk_hash,
            }
            params['partNumber'] = count
            resp = self.connection.request(request_path, method='PUT',
                                           data=data, headers=headers,
                                           params=params)
            if resp.status != httplib.OK:
                raise LibcloudError('Error uploading chunk', driver=self)
            server_hash = resp.headers['etag'].replace('"', '')
            # Keep this data for a later commit
            chunks.append((count, server_hash))
            count += 1
        if calculate_hash:
            data_hash = data_hash.hexdigest()
        return (chunks, data_hash, bytes_transferred)
    def _commit_multipart(self, container, object_name, upload_id, chunks):
        """
        Makes a final commit of the data.
        :param container: The destination container
        :type container: :class:`Container`
        :param object_name: The name of the object which we are uploading
        :type object_name: ``str``
        :param upload_id: The upload id allocated for this multipart upload
        :type upload_id: ``str``
        :param chunks: A list of (chunk_number, chunk_hash) tuples.
        :type chunks: ``list``
        :return: The server side hash of the uploaded data
        :rtype: ``str``
        """
        root = Element('CompleteMultipartUpload')
        for (count, etag) in chunks:
            part = SubElement(root, 'Part')
            part_no = SubElement(part, 'PartNumber')
            part_no.text = str(count)
            etag_id = SubElement(part, 'ETag')
            etag_id.text = str(etag)
        data = tostring(root)
        headers = {'Content-Length': len(data)}
        params = {'uploadId': upload_id}
        request_path = self._get_object_path(container, object_name)
        response = self.connection.request(request_path, headers=headers,
                                           params=params, data=data,
                                           method='POST')
        if response.status != httplib.OK:
            element = response.object
            # pylint: disable=maybe-no-member
            code, message = response._parse_error_details(element=element)
            msg = 'Error in multipart commit: %s (%s)' % (message, code)
            raise LibcloudError(msg, driver=self)
        # Get the server's etag to be passed back to the caller
        body = response.parse_body()
        server_hash = body.find(fixxpath(xpath='ETag',
                                         namespace=self.namespace)).text
        return server_hash
    def _abort_multipart(self, container, object_name, upload_id):
        """
        Aborts an already initiated multipart upload
        :param container: The destination container
        :type container: :class:`Container`
        :param object_name: The name of the object which we are uploading
        :type object_name: ``str``
        :param upload_id: The upload id allocated for this multipart upload
        :type upload_id: ``str``
        """
        params = {'uploadId': upload_id}
        request_path = self._get_object_path(container, object_name)
        resp = self.connection.request(request_path, method='DELETE',
                                       params=params)
        if resp.status != httplib.NO_CONTENT:
            raise LibcloudError('Error in multipart abort. status_code=%d' %
                                (resp.status), driver=self)
    def upload_object_via_stream(self, iterator, container, object_name,
                                 extra=None, ex_storage_class=None):
        """
        @inherits: :class:`StorageDriver.upload_object_via_stream`
        :param ex_storage_class: Storage class
        :type ex_storage_class: ``str``
        """
        method = 'PUT'
        params = None
        # This driver is used by other S3 API compatible drivers also.
        # Amazon provides a different (complex?) mechanism to do multipart
        # uploads
        if self.supports_s3_multipart_upload:
            return self._put_object_multipart(container=container,
                                              object_name=object_name,
                                              extra=extra,
                                              stream=iterator,
                                              verify_hash=False,
                                              storage_class=ex_storage_class)
        return self._put_object(container=container, object_name=object_name,
                                extra=extra, method=method, query_args=params,
                                stream=iterator, verify_hash=False,
                                storage_class=ex_storage_class)
    def delete_object(self, obj):
        object_path = self._get_object_path(obj.container, obj.name)
        response = self.connection.request(object_path, method='DELETE')
        if response.status == httplib.NO_CONTENT:
            return True
        elif response.status == httplib.NOT_FOUND:
            raise ObjectDoesNotExistError(value=None, driver=self,
                                          object_name=obj.name)
        return False
    def ex_iterate_multipart_uploads(self, container, prefix=None,
                                     delimiter=None):
        """
        Extension method for listing all in-progress S3 multipart uploads.
        Each multipart upload which has not been committed or aborted is
        considered in-progress.
        :param container: The container holding the uploads
        :type container: :class:`Container`
        :keyword prefix: Print only uploads of objects with this prefix
        :type prefix: ``str``
        :keyword delimiter: The object/key names are grouped based on
            being split by this delimiter
        :type delimiter: ``str``
        :return: A generator of S3MultipartUpload instances.
        :rtype: ``generator`` of :class:`S3MultipartUpload`
        """
        if not self.supports_s3_multipart_upload:
            raise LibcloudError('Feature not supported', driver=self)
        # Get the data for a specific container
        request_path = self._get_container_path(container)
        params = {'max-uploads': RESPONSES_PER_REQUEST, 'uploads': ''}
        if prefix:
            params['prefix'] = prefix
        if delimiter:
            params['delimiter'] = delimiter
        def finder(node, text):
            return node.findtext(fixxpath(xpath=text,
                                          namespace=self.namespace))
        while True:
            response = self.connection.request(request_path, params=params)
            if response.status != httplib.OK:
                raise LibcloudError('Error fetching multipart uploads. '
                                    'Got code: %s' % response.status,
                                    driver=self)
            body = response.parse_body()
            # pylint: disable=maybe-no-member
            for node in body.findall(fixxpath(xpath='Upload',
                                              namespace=self.namespace)):
                initiator = node.find(fixxpath(xpath='Initiator',
                                               namespace=self.namespace))
                owner = node.find(fixxpath(xpath='Owner',
                                           namespace=self.namespace))
                key = finder(node, 'Key')
                upload_id = finder(node, 'UploadId')
                created_at = finder(node, 'Initiated')
                initiator = finder(initiator, 'DisplayName')
                owner = finder(owner, 'DisplayName')
                yield S3MultipartUpload(key, upload_id, created_at,
                                        initiator, owner)
            # Check if this is the last entry in the listing
            # pylint: disable=maybe-no-member
            is_truncated = body.findtext(fixxpath(xpath='IsTruncated',
                                                  namespace=self.namespace))
            if is_truncated.lower() == 'false':
                break
            # Provide params for the next request
            upload_marker = body.findtext(fixxpath(xpath='NextUploadIdMarker',
                                                   namespace=self.namespace))
            key_marker = body.findtext(fixxpath(xpath='NextKeyMarker',
                                                namespace=self.namespace))
            params['key-marker'] = key_marker
            params['upload-id-marker'] = upload_marker
    def ex_cleanup_all_multipart_uploads(self, container, prefix=None):
        """
        Extension method for removing all partially completed S3 multipart
        uploads.
        :param container: The container holding the uploads
        :type container: :class:`Container`
        :keyword prefix: Delete only uploads of objects with this prefix
        :type prefix: ``str``
        """
        # Iterate through the container and delete the upload ids
        for upload in self.ex_iterate_multipart_uploads(container, prefix,
                                                        delimiter=None):
            self._abort_multipart(container, upload.key, upload.id)
    def _clean_object_name(self, name):
        name = urlquote(name)
        return name
    def _put_object(self, container, object_name, method='PUT',
                    query_args=None, extra=None, file_path=None,
                    stream=None, verify_hash=True, storage_class=None):
        headers = {}
        extra = extra or {}
        headers.update(self._to_storage_class_headers(storage_class))
        content_type = extra.get('content_type', None)
        meta_data = extra.get('meta_data', None)
        acl = extra.get('acl', None)
        if meta_data:
            for key, value in list(meta_data.items()):
                key = self.http_vendor_prefix + '-meta-%s' % (key)
                headers[key] = value
        if acl:
            headers[self.http_vendor_prefix + '-acl'] = acl
        request_path = self._get_object_path(container, object_name)
        if query_args:
            request_path = '?'.join((request_path, query_args))
        result_dict = self._upload_object(
            object_name=object_name, content_type=content_type,
            request_path=request_path, request_method=method,
            headers=headers, file_path=file_path, stream=stream)
        response = result_dict['response']
        bytes_transferred = result_dict['bytes_transferred']
        headers = response.headers
        response = response
        server_hash = headers.get('etag', '').replace('"', '')
        if (verify_hash and result_dict['data_hash'] != server_hash):
            raise ObjectHashMismatchError(
                value='MD5 hash {0} checksum does not match {1}'.format(
                    server_hash, result_dict['data_hash']),
                object_name=object_name, driver=self)
        elif response.status == httplib.OK:
            obj = Object(
                name=object_name, size=bytes_transferred, hash=server_hash,
                extra={'acl': acl}, meta_data=meta_data, container=container,
                driver=self)
            return obj
        else:
            raise LibcloudError(
                'Unexpected status code, status_code=%s' % (response.status),
                driver=self)
    def _put_object_multipart(self, container, object_name, stream,
                              extra=None, verify_hash=False,
                              storage_class=None):
        """
        Uploads an object using the S3 multipart algorithm.
        :param container: The destination container
        :type container: :class:`Container`
        :param object_name: The name of the object which we are uploading
        :type object_name: ``str``
        :param stream: The generator for fetching the upload data
        :type stream: ``generator``
        :keyword verify_hash: Indicates if we must calculate the data hash
        :type verify_hash: ``bool``
        :keyword extra: Additional options
        :type extra: ``dict``
        :keyword storage_class: The name of the S3 object's storage class
        :type extra: ``str``
        :return: The uploaded object
        :rtype: :class:`Object`
        """
        headers = {}
        extra = extra or {}
        headers.update(self._to_storage_class_headers(storage_class))
        content_type = extra.get('content_type', None)
        meta_data = extra.get('meta_data', None)
        acl = extra.get('acl', None)
        if content_type:
            headers['Content-Type'] = content_type
        if meta_data:
            for key, value in list(meta_data.items()):
                key = self.http_vendor_prefix + '-meta-%s' % (key)
                headers[key] = value
        if acl:
            headers[self.http_vendor_prefix + '-acl'] = acl
        upload_id = self._initiate_multipart(container, object_name,
                                             headers=headers)
        try:
            result = self._upload_multipart_chunks(container, object_name,
                                                   upload_id, stream,
                                                   calculate_hash=verify_hash)
            chunks, data_hash, bytes_transferred = result
            # Commit the chunk info and complete the upload
            etag = self._commit_multipart(container, object_name, upload_id,
                                          chunks)
        except Exception:
            # Amazon provides a mechanism for aborting an upload.
            self._abort_multipart(container, object_name, upload_id)
            raise
        return Object(
            name=object_name, size=bytes_transferred, hash=etag,
            extra={'acl': acl}, meta_data=meta_data, container=container,
            driver=self)
    def _to_storage_class_headers(self, storage_class):
        """
        Generates request headers given a storage class name.
        :keyword storage_class: The name of the S3 object's storage class
        :type extra: ``str``
        :return: Headers to include in a request
        :rtype: :dict:
        """
        headers = {}
        storage_class = storage_class or 'standard'
        if storage_class not in ['standard', 'reduced_redundancy']:
            raise ValueError(
                'Invalid storage class value: %s' % (storage_class))
        key = self.http_vendor_prefix + '-storage-class'
        headers[key] = storage_class.upper()
        return headers
    def _to_containers(self, obj, xpath):
        for element in obj.findall(fixxpath(xpath=xpath,
                                            namespace=self.namespace)):
            yield self._to_container(element)
    def _to_objs(self, obj, xpath, container):
        return [self._to_obj(element, container) for element in
                obj.findall(fixxpath(xpath=xpath, namespace=self.namespace))]
    def _to_container(self, element):
        extra = {
            'creation_date': findtext(element=element, xpath='CreationDate',
                                      namespace=self.namespace)
        }
        container = Container(name=findtext(element=element, xpath='Name',
                                            namespace=self.namespace),
                              extra=extra,
                              driver=self
                              )
        return container
    def _headers_to_object(self, object_name, container, headers):
        hash = headers['etag'].replace('"', '')
        extra = {'content_type': headers['content-type'],
                 'etag': headers['etag']}
        meta_data = {}
        if 'last-modified' in headers:
            extra['last_modified'] = headers['last-modified']
        for key, value in headers.items():
            if not key.lower().startswith(self.http_vendor_prefix + '-meta-'):
                continue
            key = key.replace(self.http_vendor_prefix + '-meta-', '')
            meta_data[key] = value
        obj = Object(name=object_name, size=headers['content-length'],
                     hash=hash, extra=extra,
                     meta_data=meta_data,
                     container=container,
                     driver=self)
        return obj
    def _to_obj(self, element, container):
        owner_id = findtext(element=element, xpath='Owner/ID',
                            namespace=self.namespace)
        owner_display_name = findtext(element=element,
                                      xpath='Owner/DisplayName',
                                      namespace=self.namespace)
        meta_data = {'owner': {'id': owner_id,
                               'display_name': owner_display_name}}
        last_modified = findtext(element=element,
                                 xpath='LastModified',
                                 namespace=self.namespace)
        extra = {'last_modified': last_modified}
        obj = Object(name=findtext(element=element, xpath='Key',
                                   namespace=self.namespace),
                     size=int(findtext(element=element, xpath='Size',
                                       namespace=self.namespace)),
                     hash=findtext(element=element, xpath='ETag',
                                   namespace=self.namespace).replace('"', ''),
                     extra=extra,
                     meta_data=meta_data,
                     container=container,
                     driver=self
                     )
        return obj
class S3StorageDriver(AWSDriver, BaseS3StorageDriver):
    name = 'Amazon S3 (us-east-1)'
    connectionCls = S3SignatureV4Connection
    region_name = 'us-east-1'
class S3USEast2Connection(S3SignatureV4Connection):
    host = S3_US_EAST2_HOST
class S3USEast2StorageDriver(S3StorageDriver):
    name = 'Amazon S3 (us-east-2)'
    connectionCls = S3USEast2Connection
    ex_location_name = 'us-east-2'
    region_name = 'us-east-2'
class S3USWestConnection(S3SignatureV4Connection):
    host = S3_US_WEST_HOST
class S3USWestStorageDriver(S3StorageDriver):
    name = 'Amazon S3 (us-west-1)'
    connectionCls = S3USWestConnection
    ex_location_name = 'us-west-1'
    region_name = 'us-west-1'
class S3USWestOregonConnection(S3SignatureV4Connection):
    host = S3_US_WEST_OREGON_HOST
class S3USWestOregonStorageDriver(S3StorageDriver):
    name = 'Amazon S3 (us-west-2)'
    connectionCls = S3USWestOregonConnection
    ex_location_name = 'us-west-2'
    region_name = 'us-west-2'
class S3USGovWestConnection(S3SignatureV4Connection):
    host = S3_US_GOV_WEST_HOST
class S3USGovWestStorageDriver(S3StorageDriver):
    name = 'Amazon S3 (us-gov-west-1)'
    connectionCls = S3USGovWestConnection
    ex_location_name = 'us-gov-west-1'
    region_name = 'us-gov-west-1'
class S3CNNorthConnection(S3SignatureV4Connection):
    host = S3_CN_NORTH_HOST
class S3CNNorthStorageDriver(S3StorageDriver):
    name = 'Amazon S3 (cn-north-1)'
    connectionCls = S3CNNorthConnection
    ex_location_name = 'cn-north-1'
    region_name = 'cn-north-1'
class S3EUWestConnection(S3SignatureV4Connection):
    host = S3_EU_WEST_HOST
class S3EUWestStorageDriver(S3StorageDriver):
    name = 'Amazon S3 (eu-west-1)'
    connectionCls = S3EUWestConnection
    ex_location_name = 'EU'
    region_name = 'eu-west-1'
class S3EUWest2Connection(S3SignatureV4Connection):
    host = S3_EU_WEST2_HOST
class S3EUWest2StorageDriver(S3StorageDriver):
    name = 'Amazon S3 (eu-west-2)'
    connectionCls = S3EUWest2Connection
    ex_location_name = 'eu-west-2'
    region_name = 'eu-west-2'
class S3EUCentralConnection(S3SignatureV4Connection):
    host = S3_EU_CENTRAL_HOST
class S3EUCentralStorageDriver(S3StorageDriver):
    name = 'Amazon S3 (eu-central-1)'
    connectionCls = S3EUCentralConnection
    ex_location_name = 'eu-central-1'
    region_name = 'eu-central-1'
class S3APSEConnection(S3SignatureV4Connection):
    host = S3_AP_SOUTHEAST_HOST
class S3APSEStorageDriver(S3StorageDriver):
    name = 'Amazon S3 (ap-southeast-1)'
    connectionCls = S3APSEConnection
    ex_location_name = 'ap-southeast-1'
    region_name = 'ap-southeast-1'
class S3APSE2Connection(S3SignatureV4Connection):
    host = S3_AP_SOUTHEAST2_HOST
class S3APSE2StorageDriver(S3StorageDriver):
    name = 'Amazon S3 (ap-southeast-2)'
    connectionCls = S3APSE2Connection
    ex_location_name = 'ap-southeast-2'
    region_name = 'ap-southeast-2'
class S3APNE1Connection(S3SignatureV4Connection):
    host = S3_AP_NORTHEAST1_HOST
S3APNEConnection = S3APNE1Connection
class S3APNE1StorageDriver(S3StorageDriver):
    name = 'Amazon S3 (ap-northeast-1)'
    connectionCls = S3APNEConnection
    ex_location_name = 'ap-northeast-1'
    region_name = 'ap-northeast-1'
S3APNEStorageDriver = S3APNE1StorageDriver
class S3APNE2Connection(S3SignatureV4Connection):
    host = S3_AP_NORTHEAST2_HOST
class S3APNE2StorageDriver(S3StorageDriver):
    name = 'Amazon S3 (ap-northeast-2)'
    connectionCls = S3APNE2Connection
    ex_location_name = 'ap-northeast-2'
    region_name = 'ap-northeast-2'
class S3APSouthConnection(S3SignatureV4Connection):
    host = S3_AP_SOUTH_HOST
class S3APSouthStorageDriver(S3StorageDriver):
    name = 'Amazon S3 (ap-south-1)'
    connectionCls = S3APSouthConnection
    ex_location_name = 'ap-south-1'
    region_name = 'ap-south-1'
class S3SAEastConnection(S3SignatureV4Connection):
    host = S3_SA_EAST_HOST
class S3SAEastStorageDriver(S3StorageDriver):
    name = 'Amazon S3 (sa-east-1)'
    connectionCls = S3SAEastConnection
    ex_location_name = 'sa-east-1'
    region_name = 'sa-east-1'
class S3CACentralConnection(S3SignatureV4Connection):
    host = S3_CA_CENTRAL_HOST
class S3CACentralStorageDriver(S3StorageDriver):
    name = 'Amazon S3 (ca-central-1)'
    connectionCls = S3CACentralConnection
    ex_location_name = 'ca-central-1'
    region_name = 'ca-central-1'
 | 
	apache-2.0 | 
| 
	dims/nova | 
	nova/scheduler/filters/trusted_filter.py | 
	14 | 
	9323 | 
	# Copyright (c) 2012 Intel, Inc.
# Copyright (c) 2011-2012 OpenStack Foundation
# All Rights Reserved.
#
#    Licensed under the Apache License, Version 2.0 (the "License"); you may
#    not use this file except in compliance with the License. You may obtain
#    a copy of the License at
#
#         http://www.apache.org/licenses/LICENSE-2.0
#
#    Unless required by applicable law or agreed to in writing, software
#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
#    License for the specific language governing permissions and limitations
#    under the License.
"""
Filter to add support for Trusted Computing Pools (EXPERIMENTAL).
Filter that only schedules tasks on a host if the integrity (trust)
of that host matches the trust requested in the ``extra_specs`` for the
flavor.  The ``extra_specs`` will contain a key/value pair where the
key is ``trust``.  The value of this pair (``trusted``/``untrusted``) must
match the integrity of that host (obtained from the Attestation
service) before the task can be scheduled on that host.
Note that the parameters to control access to the Attestation Service
are in the ``nova.conf`` file in a separate ``trust`` section.  For example,
the config file will look something like:
    [DEFAULT]
    verbose=True
    ...
    [trust]
    server=attester.mynetwork.com
Details on the specific parameters can be found in the file
``trust_attest.py``.
Details on setting up and using an Attestation Service can be found at
the Open Attestation project at:
    https://github.com/OpenAttestation/OpenAttestation
"""
from oslo_log import log as logging
from oslo_serialization import jsonutils
from oslo_utils import timeutils
import requests
import nova.conf
from nova import context
from nova.i18n import _LW
from nova import objects
from nova.scheduler import filters
LOG = logging.getLogger(__name__)
CONF = nova.conf.CONF
class AttestationService(object):
    # Provide access wrapper to attestation server to get integrity report.
    def __init__(self):
        self.api_url = CONF.trusted_computing.attestation_api_url
        self.host = CONF.trusted_computing.attestation_server
        self.port = CONF.trusted_computing.attestation_port
        self.auth_blob = CONF.trusted_computing.attestation_auth_blob
        self.key_file = None
        self.cert_file = None
        self.ca_file = CONF.trusted_computing.attestation_server_ca_file
        self.request_count = 100
        # If the CA file is not provided, let's check the cert if verification
        # asked
        self.verify = (not CONF.trusted_computing.attestation_insecure_ssl
                       and self.ca_file or True)
        self.cert = (self.cert_file, self.key_file)
    def _do_request(self, method, action_url, body, headers):
        # Connects to the server and issues a request.
        # :returns: result data
        # :raises: IOError if the request fails
        action_url = "https://%s:%s%s/%s" % (self.host, self.port,
                                             self.api_url, action_url)
        try:
            res = requests.request(method, action_url, data=body,
                                   headers=headers, cert=self.cert,
                                   verify=self.verify)
            status_code = res.status_code
            if status_code in (requests.codes.OK,
                               requests.codes.CREATED,
                               requests.codes.ACCEPTED,
                               requests.codes.NO_CONTENT):
                try:
                    return requests.codes.OK, jsonutils.loads(res.text)
                except (TypeError, ValueError):
                    return requests.codes.OK, res.text
            return status_code, None
        except requests.exceptions.RequestException:
            return IOError, None
    def _request(self, cmd, subcmd, hosts):
        body = {}
        body['count'] = len(hosts)
        body['hosts'] = hosts
        cooked = jsonutils.dumps(body)
        headers = {}
        headers['content-type'] = 'application/json'
        headers['Accept'] = 'application/json'
        if self.auth_blob:
            headers['x-auth-blob'] = self.auth_blob
        status, res = self._do_request(cmd, subcmd, cooked, headers)
        return status, res
    def do_attestation(self, hosts):
        """Attests compute nodes through OAT service.
        :param hosts: hosts list to be attested
        :returns: dictionary for trust level and validate time
        """
        result = None
        status, data = self._request("POST", "PollHosts", hosts)
        if data is not None:
            result = data.get('hosts')
        return result
class ComputeAttestationCache(object):
    """Cache for compute node attestation
    Cache compute node's trust level for sometime,
    if the cache is out of date, poll OAT service to flush the
    cache.
    OAT service may have cache also. OAT service's cache valid time
    should be set shorter than trusted filter's cache valid time.
    """
    def __init__(self):
        self.attestservice = AttestationService()
        self.compute_nodes = {}
        admin = context.get_admin_context()
        # Fetch compute node list to initialize the compute_nodes,
        # so that we don't need poll OAT service one by one for each
        # host in the first round that scheduler invokes us.
        computes = objects.ComputeNodeList.get_all(admin)
        for compute in computes:
            host = compute.hypervisor_hostname
            self._init_cache_entry(host)
    def _cache_valid(self, host):
        cachevalid = False
        if host in self.compute_nodes:
            node_stats = self.compute_nodes.get(host)
            if not timeutils.is_older_than(
                             node_stats['vtime'],
                             CONF.trusted_computing.attestation_auth_timeout):
                cachevalid = True
        return cachevalid
    def _init_cache_entry(self, host):
        self.compute_nodes[host] = {
            'trust_lvl': 'unknown',
            'vtime': timeutils.normalize_time(
                        timeutils.parse_isotime("1970-01-01T00:00:00Z"))}
    def _invalidate_caches(self):
        for host in self.compute_nodes:
            self._init_cache_entry(host)
    def _update_cache_entry(self, state):
        entry = {}
        host = state['host_name']
        entry['trust_lvl'] = state['trust_lvl']
        try:
            # Normalize as naive object to interoperate with utcnow().
            entry['vtime'] = timeutils.normalize_time(
                            timeutils.parse_isotime(state['vtime']))
        except ValueError:
            try:
                # Mt. Wilson does not necessarily return an ISO8601 formatted
                # `vtime`, so we should try to parse it as a string formatted
                # datetime.
                vtime = timeutils.parse_strtime(state['vtime'], fmt="%c")
                entry['vtime'] = timeutils.normalize_time(vtime)
            except ValueError:
                # Mark the system as un-trusted if get invalid vtime.
                entry['trust_lvl'] = 'unknown'
                entry['vtime'] = timeutils.utcnow()
        self.compute_nodes[host] = entry
    def _update_cache(self):
        self._invalidate_caches()
        states = self.attestservice.do_attestation(
            list(self.compute_nodes.keys()))
        if states is None:
            return
        for state in states:
            self._update_cache_entry(state)
    def get_host_attestation(self, host):
        """Check host's trust level."""
        if host not in self.compute_nodes:
            self._init_cache_entry(host)
        if not self._cache_valid(host):
            self._update_cache()
        level = self.compute_nodes.get(host).get('trust_lvl')
        return level
class ComputeAttestation(object):
    def __init__(self):
        self.caches = ComputeAttestationCache()
    def is_trusted(self, host, trust):
        level = self.caches.get_host_attestation(host)
        return trust == level
class TrustedFilter(filters.BaseHostFilter):
    """Trusted filter to support Trusted Compute Pools."""
    def __init__(self):
        self.compute_attestation = ComputeAttestation()
        LOG.warning(_LW('The TrustedFilter is considered experimental '
                        'by the OpenStack project because it receives much '
                        'less testing than the rest of Nova. This may change '
                        'in the future, but current deployers should be aware '
                        'that the use of it in production right now may be '
                        'risky.'))
    # The hosts the instances are running on doesn't change within a request
    run_filter_once_per_request = True
    def host_passes(self, host_state, spec_obj):
        instance_type = spec_obj.flavor
        extra = (instance_type.extra_specs
                 if 'extra_specs' in instance_type else {})
        trust = extra.get('trust:trusted_host')
        host = host_state.nodename
        if trust:
            return self.compute_attestation.is_trusted(host, trust)
        return True
 | 
	apache-2.0 | 
| 
	liu602348184/django | 
	django/contrib/postgres/fields/ranges.py | 
	172 | 
	5636 | 
	import json
from psycopg2.extras import DateRange, DateTimeTZRange, NumericRange, Range
from django.contrib.postgres import forms, lookups
from django.db import models
from django.utils import six
from .utils import AttributeSetter
__all__ = [
    'RangeField', 'IntegerRangeField', 'BigIntegerRangeField',
    'FloatRangeField', 'DateTimeRangeField', 'DateRangeField',
]
class RangeField(models.Field):
    empty_strings_allowed = False
    def get_prep_value(self, value):
        if value is None:
            return None
        elif isinstance(value, Range):
            return value
        elif isinstance(value, (list, tuple)):
            return self.range_type(value[0], value[1])
        return value
    def to_python(self, value):
        if isinstance(value, six.string_types):
            # Assume we're deserializing
            vals = json.loads(value)
            for end in ('lower', 'upper'):
                if end in vals:
                    vals[end] = self.base_field.to_python(vals[end])
            value = self.range_type(**vals)
        elif isinstance(value, (list, tuple)):
            value = self.range_type(value[0], value[1])
        return value
    def set_attributes_from_name(self, name):
        super(RangeField, self).set_attributes_from_name(name)
        self.base_field.set_attributes_from_name(name)
    def value_to_string(self, obj):
        value = self.value_from_object(obj)
        if value is None:
            return None
        if value.isempty:
            return json.dumps({"empty": True})
        base_field = self.base_field
        result = {"bounds": value._bounds}
        for end in ('lower', 'upper'):
            obj = AttributeSetter(base_field.attname, getattr(value, end))
            result[end] = base_field.value_to_string(obj)
        return json.dumps(result)
    def formfield(self, **kwargs):
        kwargs.setdefault('form_class', self.form_field)
        return super(RangeField, self).formfield(**kwargs)
class IntegerRangeField(RangeField):
    base_field = models.IntegerField()
    range_type = NumericRange
    form_field = forms.IntegerRangeField
    def db_type(self, connection):
        return 'int4range'
class BigIntegerRangeField(RangeField):
    base_field = models.BigIntegerField()
    range_type = NumericRange
    form_field = forms.IntegerRangeField
    def db_type(self, connection):
        return 'int8range'
class FloatRangeField(RangeField):
    base_field = models.FloatField()
    range_type = NumericRange
    form_field = forms.FloatRangeField
    def db_type(self, connection):
        return 'numrange'
class DateTimeRangeField(RangeField):
    base_field = models.DateTimeField()
    range_type = DateTimeTZRange
    form_field = forms.DateTimeRangeField
    def db_type(self, connection):
        return 'tstzrange'
class DateRangeField(RangeField):
    base_field = models.DateField()
    range_type = DateRange
    form_field = forms.DateRangeField
    def db_type(self, connection):
        return 'daterange'
RangeField.register_lookup(lookups.DataContains)
RangeField.register_lookup(lookups.ContainedBy)
RangeField.register_lookup(lookups.Overlap)
class RangeContainedBy(models.Lookup):
    lookup_name = 'contained_by'
    type_mapping = {
        'integer': 'int4range',
        'bigint': 'int8range',
        'double precision': 'numrange',
        'date': 'daterange',
        'timestamp with time zone': 'tstzrange',
    }
    def as_sql(self, qn, connection):
        field = self.lhs.output_field
        if isinstance(field, models.FloatField):
            sql = '%s::numeric <@ %s::{}'.format(self.type_mapping[field.db_type(connection)])
        else:
            sql = '%s <@ %s::{}'.format(self.type_mapping[field.db_type(connection)])
        lhs, lhs_params = self.process_lhs(qn, connection)
        rhs, rhs_params = self.process_rhs(qn, connection)
        params = lhs_params + rhs_params
        return sql % (lhs, rhs), params
    def get_prep_lookup(self):
        return RangeField().get_prep_lookup(self.lookup_name, self.rhs)
models.DateField.register_lookup(RangeContainedBy)
models.DateTimeField.register_lookup(RangeContainedBy)
models.IntegerField.register_lookup(RangeContainedBy)
models.BigIntegerField.register_lookup(RangeContainedBy)
models.FloatField.register_lookup(RangeContainedBy)
@RangeField.register_lookup
class FullyLessThan(lookups.PostgresSimpleLookup):
    lookup_name = 'fully_lt'
    operator = '<<'
@RangeField.register_lookup
class FullGreaterThan(lookups.PostgresSimpleLookup):
    lookup_name = 'fully_gt'
    operator = '>>'
@RangeField.register_lookup
class NotLessThan(lookups.PostgresSimpleLookup):
    lookup_name = 'not_lt'
    operator = '&>'
@RangeField.register_lookup
class NotGreaterThan(lookups.PostgresSimpleLookup):
    lookup_name = 'not_gt'
    operator = '&<'
@RangeField.register_lookup
class AdjacentToLookup(lookups.PostgresSimpleLookup):
    lookup_name = 'adjacent_to'
    operator = '-|-'
@RangeField.register_lookup
class RangeStartsWith(lookups.FunctionTransform):
    lookup_name = 'startswith'
    function = 'lower'
    @property
    def output_field(self):
        return self.lhs.output_field.base_field
@RangeField.register_lookup
class RangeEndsWith(lookups.FunctionTransform):
    lookup_name = 'endswith'
    function = 'upper'
    @property
    def output_field(self):
        return self.lhs.output_field.base_field
@RangeField.register_lookup
class IsEmpty(lookups.FunctionTransform):
    lookup_name = 'isempty'
    function = 'isempty'
    output_field = models.BooleanField()
 | 
	bsd-3-clause | 
| 
	spencerlyon2/pygments | 
	pygments/lexers/data.py | 
	2 | 
	17895 | 
	# -*- coding: utf-8 -*-
"""
    pygments.lexers.data
    ~~~~~~~~~~~~~~~~~~~~
    Lexers for data file format.
    :copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS.
    :license: BSD, see LICENSE for details.
"""
import re
from pygments.lexer import RegexLexer, ExtendedRegexLexer, LexerContext, \
    include, bygroups
from pygments.token import Text, Comment, Keyword, Name, String, Number, \
    Punctuation, Literal
__all__ = ['YamlLexer', 'JsonLexer']
class YamlLexerContext(LexerContext):
    """Indentation context for the YAML lexer."""
    def __init__(self, *args, **kwds):
        super(YamlLexerContext, self).__init__(*args, **kwds)
        self.indent_stack = []
        self.indent = -1
        self.next_indent = 0
        self.block_scalar_indent = None
class YamlLexer(ExtendedRegexLexer):
    """
    Lexer for `YAML <http://yaml.org/>`_, a human-friendly data serialization
    language.
    .. versionadded:: 0.11
    """
    name = 'YAML'
    aliases = ['yaml']
    filenames = ['*.yaml', '*.yml']
    mimetypes = ['text/x-yaml']
    def something(token_class):
        """Do not produce empty tokens."""
        def callback(lexer, match, context):
            text = match.group()
            if not text:
                return
            yield match.start(), token_class, text
            context.pos = match.end()
        return callback
    def reset_indent(token_class):
        """Reset the indentation levels."""
        def callback(lexer, match, context):
            text = match.group()
            context.indent_stack = []
            context.indent = -1
            context.next_indent = 0
            context.block_scalar_indent = None
            yield match.start(), token_class, text
            context.pos = match.end()
        return callback
    def save_indent(token_class, start=False):
        """Save a possible indentation level."""
        def callback(lexer, match, context):
            text = match.group()
            extra = ''
            if start:
                context.next_indent = len(text)
                if context.next_indent < context.indent:
                    while context.next_indent < context.indent:
                        context.indent = context.indent_stack.pop()
                    if context.next_indent > context.indent:
                        extra = text[context.indent:]
                        text = text[:context.indent]
            else:
                context.next_indent += len(text)
            if text:
                yield match.start(), token_class, text
            if extra:
                yield match.start()+len(text), token_class.Error, extra
            context.pos = match.end()
        return callback
    def set_indent(token_class, implicit=False):
        """Set the previously saved indentation level."""
        def callback(lexer, match, context):
            text = match.group()
            if context.indent < context.next_indent:
                context.indent_stack.append(context.indent)
                context.indent = context.next_indent
            if not implicit:
                context.next_indent += len(text)
            yield match.start(), token_class, text
            context.pos = match.end()
        return callback
    def set_block_scalar_indent(token_class):
        """Set an explicit indentation level for a block scalar."""
        def callback(lexer, match, context):
            text = match.group()
            context.block_scalar_indent = None
            if not text:
                return
            increment = match.group(1)
            if increment:
                current_indent = max(context.indent, 0)
                increment = int(increment)
                context.block_scalar_indent = current_indent + increment
            if text:
                yield match.start(), token_class, text
                context.pos = match.end()
        return callback
    def parse_block_scalar_empty_line(indent_token_class, content_token_class):
        """Process an empty line in a block scalar."""
        def callback(lexer, match, context):
            text = match.group()
            if (context.block_scalar_indent is None or
                    len(text) <= context.block_scalar_indent):
                if text:
                    yield match.start(), indent_token_class, text
            else:
                indentation = text[:context.block_scalar_indent]
                content = text[context.block_scalar_indent:]
                yield match.start(), indent_token_class, indentation
                yield (match.start()+context.block_scalar_indent,
                       content_token_class, content)
            context.pos = match.end()
        return callback
    def parse_block_scalar_indent(token_class):
        """Process indentation spaces in a block scalar."""
        def callback(lexer, match, context):
            text = match.group()
            if context.block_scalar_indent is None:
                if len(text) <= max(context.indent, 0):
                    context.stack.pop()
                    context.stack.pop()
                    return
                context.block_scalar_indent = len(text)
            else:
                if len(text) < context.block_scalar_indent:
                    context.stack.pop()
                    context.stack.pop()
                    return
            if text:
                yield match.start(), token_class, text
                context.pos = match.end()
        return callback
    def parse_plain_scalar_indent(token_class):
        """Process indentation spaces in a plain scalar."""
        def callback(lexer, match, context):
            text = match.group()
            if len(text) <= context.indent:
                context.stack.pop()
                context.stack.pop()
                return
            if text:
                yield match.start(), token_class, text
                context.pos = match.end()
        return callback
    tokens = {
        # the root rules
        'root': [
            # ignored whitespaces
            (r'[ ]+(?=#|$)', Text),
            # line breaks
            (r'\n+', Text),
            # a comment
            (r'#[^\n]*', Comment.Single),
            # the '%YAML' directive
            (r'^%YAML(?=[ ]|$)', reset_indent(Name.Tag), 'yaml-directive'),
            # the %TAG directive
            (r'^%TAG(?=[ ]|$)', reset_indent(Name.Tag), 'tag-directive'),
            # document start and document end indicators
            (r'^(?:---|\.\.\.)(?=[ ]|$)', reset_indent(Name.Namespace),
             'block-line'),
            # indentation spaces
            (r'[ ]*(?![ \t\n\r\f\v]|$)', save_indent(Text, start=True),
             ('block-line', 'indentation')),
        ],
        # trailing whitespaces after directives or a block scalar indicator
        'ignored-line': [
            # ignored whitespaces
            (r'[ ]+(?=#|$)', Text),
            # a comment
            (r'#[^\n]*', Comment.Single),
            # line break
            (r'\n', Text, '#pop:2'),
        ],
        # the %YAML directive
        'yaml-directive': [
            # the version number
            (r'([ ]+)([0-9]+\.[0-9]+)',
             bygroups(Text, Number), 'ignored-line'),
        ],
        # the %YAG directive
        'tag-directive': [
            # a tag handle and the corresponding prefix
            (r'([ ]+)(!|![0-9A-Za-z_-]*!)'
             r'([ ]+)(!|!?[0-9A-Za-z;/?:@&=+$,_.!~*\'()\[\]%-]+)',
             bygroups(Text, Keyword.Type, Text, Keyword.Type),
             'ignored-line'),
        ],
        # block scalar indicators and indentation spaces
        'indentation': [
            # trailing whitespaces are ignored
            (r'[ ]*$', something(Text), '#pop:2'),
            # whitespaces preceeding block collection indicators
            (r'[ ]+(?=[?:-](?:[ ]|$))', save_indent(Text)),
            # block collection indicators
            (r'[?:-](?=[ ]|$)', set_indent(Punctuation.Indicator)),
            # the beginning a block line
            (r'[ ]*', save_indent(Text), '#pop'),
        ],
        # an indented line in the block context
        'block-line': [
            # the line end
            (r'[ ]*(?=#|$)', something(Text), '#pop'),
            # whitespaces separating tokens
            (r'[ ]+', Text),
            # tags, anchors and aliases,
            include('descriptors'),
            # block collections and scalars
            include('block-nodes'),
            # flow collections and quoted scalars
            include('flow-nodes'),
            # a plain scalar
            (r'(?=[^ \t\n\r\f\v?:,\[\]{}#&*!|>\'"%@`-]|[?:-][^ \t\n\r\f\v])',
             something(Name.Variable),
             'plain-scalar-in-block-context'),
        ],
        # tags, anchors, aliases
        'descriptors': [
            # a full-form tag
            (r'!<[0-9A-Za-z;/?:@&=+$,_.!~*\'()\[\]%-]+>', Keyword.Type),
            # a tag in the form '!', '!suffix' or '!handle!suffix'
            (r'!(?:[0-9A-Za-z_-]+)?'
             r'(?:![0-9A-Za-z;/?:@&=+$,_.!~*\'()\[\]%-]+)?', Keyword.Type),
            # an anchor
            (r'&[0-9A-Za-z_-]+', Name.Label),
            # an alias
            (r'\*[0-9A-Za-z_-]+', Name.Variable),
        ],
        # block collections and scalars
        'block-nodes': [
            # implicit key
            (r':(?=[ ]|$)', set_indent(Punctuation.Indicator, implicit=True)),
            # literal and folded scalars
            (r'[|>]', Punctuation.Indicator,
             ('block-scalar-content', 'block-scalar-header')),
        ],
        # flow collections and quoted scalars
        'flow-nodes': [
            # a flow sequence
            (r'\[', Punctuation.Indicator, 'flow-sequence'),
            # a flow mapping
            (r'\{', Punctuation.Indicator, 'flow-mapping'),
            # a single-quoted scalar
            (r'\'', String, 'single-quoted-scalar'),
            # a double-quoted scalar
            (r'\"', String, 'double-quoted-scalar'),
        ],
        # the content of a flow collection
        'flow-collection': [
            # whitespaces
            (r'[ ]+', Text),
            # line breaks
            (r'\n+', Text),
            # a comment
            (r'#[^\n]*', Comment.Single),
            # simple indicators
            (r'[?:,]', Punctuation.Indicator),
            # tags, anchors and aliases
            include('descriptors'),
            # nested collections and quoted scalars
            include('flow-nodes'),
            # a plain scalar
            (r'(?=[^ \t\n\r\f\v?:,\[\]{}#&*!|>\'"%@`])',
             something(Name.Variable),
             'plain-scalar-in-flow-context'),
        ],
        # a flow sequence indicated by '[' and ']'
        'flow-sequence': [
            # include flow collection rules
            include('flow-collection'),
            # the closing indicator
            (r'\]', Punctuation.Indicator, '#pop'),
        ],
        # a flow mapping indicated by '{' and '}'
        'flow-mapping': [
            # include flow collection rules
            include('flow-collection'),
            # the closing indicator
            (r'\}', Punctuation.Indicator, '#pop'),
        ],
        # block scalar lines
        'block-scalar-content': [
            # line break
            (r'\n', Text),
            # empty line
            (r'^[ ]+$',
             parse_block_scalar_empty_line(Text, Name.Constant)),
            # indentation spaces (we may leave the state here)
            (r'^[ ]*', parse_block_scalar_indent(Text)),
            # line content
            (r'[^\n\r\f\v]+', Name.Constant),
        ],
        # the content of a literal or folded scalar
        'block-scalar-header': [
            # indentation indicator followed by chomping flag
            (r'([1-9])?[+-]?(?=[ ]|$)',
             set_block_scalar_indent(Punctuation.Indicator),
             'ignored-line'),
            # chomping flag followed by indentation indicator
            (r'[+-]?([1-9])?(?=[ ]|$)',
             set_block_scalar_indent(Punctuation.Indicator),
             'ignored-line'),
        ],
        # ignored and regular whitespaces in quoted scalars
        'quoted-scalar-whitespaces': [
            # leading and trailing whitespaces are ignored
            (r'^[ ]+', Text),
            (r'[ ]+$', Text),
            # line breaks are ignored
            (r'\n+', Text),
            # other whitespaces are a part of the value
            (r'[ ]+', Name.Variable),
        ],
        # single-quoted scalars
        'single-quoted-scalar': [
            # include whitespace and line break rules
            include('quoted-scalar-whitespaces'),
            # escaping of the quote character
            (r'\'\'', String.Escape),
            # regular non-whitespace characters
            (r'[^ \t\n\r\f\v\']+', String),
            # the closing quote
            (r'\'', String, '#pop'),
        ],
        # double-quoted scalars
        'double-quoted-scalar': [
            # include whitespace and line break rules
            include('quoted-scalar-whitespaces'),
            # escaping of special characters
            (r'\\[0abt\tn\nvfre "\\N_LP]', String),
            # escape codes
            (r'\\(?:x[0-9A-Fa-f]{2}|u[0-9A-Fa-f]{4}|U[0-9A-Fa-f]{8})',
             String.Escape),
            # regular non-whitespace characters
            (r'[^ \t\n\r\f\v\"\\]+', String),
            # the closing quote
            (r'"', String, '#pop'),
        ],
        # the beginning of a new line while scanning a plain scalar
        'plain-scalar-in-block-context-new-line': [
            # empty lines
            (r'^[ ]+$', Text),
            # line breaks
            (r'\n+', Text),
            # document start and document end indicators
            (r'^(?=---|\.\.\.)', something(Name.Namespace), '#pop:3'),
            # indentation spaces (we may leave the block line state here)
            (r'^[ ]*', parse_plain_scalar_indent(Text), '#pop'),
        ],
        # a plain scalar in the block context
        'plain-scalar-in-block-context': [
            # the scalar ends with the ':' indicator
            (r'[ ]*(?=:[ ]|:$)', something(Text), '#pop'),
            # the scalar ends with whitespaces followed by a comment
            (r'[ ]+(?=#)', Text, '#pop'),
            # trailing whitespaces are ignored
            (r'[ ]+$', Text),
            # line breaks are ignored
            (r'\n+', Text, 'plain-scalar-in-block-context-new-line'),
            # other whitespaces are a part of the value
            (r'[ ]+', Literal.Scalar.Plain),
            # regular non-whitespace characters
            (r'(?::(?![ \t\n\r\f\v])|[^ \t\n\r\f\v:])+', Literal.Scalar.Plain),
        ],
        # a plain scalar is the flow context
        'plain-scalar-in-flow-context': [
            # the scalar ends with an indicator character
            (r'[ ]*(?=[,:?\[\]{}])', something(Text), '#pop'),
            # the scalar ends with a comment
            (r'[ ]+(?=#)', Text, '#pop'),
            # leading and trailing whitespaces are ignored
            (r'^[ ]+', Text),
            (r'[ ]+$', Text),
            # line breaks are ignored
            (r'\n+', Text),
            # other whitespaces are a part of the value
            (r'[ ]+', Name.Variable),
            # regular non-whitespace characters
            (r'[^ \t\n\r\f\v,:?\[\]{}]+', Name.Variable),
        ],
    }
    def get_tokens_unprocessed(self, text=None, context=None):
        if context is None:
            context = YamlLexerContext(text, 0)
        return super(YamlLexer, self).get_tokens_unprocessed(text, context)
class JsonLexer(RegexLexer):
    """
    For JSON data structures.
    .. versionadded:: 1.5
    """
    name = 'JSON'
    aliases = ['json']
    filenames = ['*.json']
    mimetypes = ['application/json']
    flags = re.DOTALL
    # integer part of a number
    int_part = r'-?(0|[1-9]\d*)'
    # fractional part of a number
    frac_part = r'\.\d+'
    # exponential part of a number
    exp_part = r'[eE](\+|-)?\d+'
    tokens = {
        'whitespace': [
            (r'\s+', Text),
        ],
        # represents a simple terminal value
        'simplevalue': [
            (r'(true|false|null)\b', Keyword.Constant),
            (('%(int_part)s(%(frac_part)s%(exp_part)s|'
              '%(exp_part)s|%(frac_part)s)') % vars(),
             Number.Float),
            (int_part, Number.Integer),
            (r'"(\\\\|\\"|[^"])*"', String.Double),
        ],
        # the right hand side of an object, after the attribute name
        'objectattribute': [
            include('value'),
            (r':', Punctuation),
            # comma terminates the attribute but expects more
            (r',', Punctuation, '#pop'),
            # a closing bracket terminates the entire object, so pop twice
            (r'}', Punctuation, ('#pop', '#pop')),
        ],
        # a json object - { attr, attr, ... }
        'objectvalue': [
            include('whitespace'),
            (r'"(\\\\|\\"|[^"])*"', Name.Tag, 'objectattribute'),
            (r'}', Punctuation, '#pop'),
        ],
        # json array - [ value, value, ... }
        'arrayvalue': [
            include('whitespace'),
            include('value'),
            (r',', Punctuation),
            (r']', Punctuation, '#pop'),
        ],
        # a json value - either a simple value or a complex value (object or array)
        'value': [
            include('whitespace'),
            include('simplevalue'),
            (r'{', Punctuation, 'objectvalue'),
            (r'\[', Punctuation, 'arrayvalue'),
        ],
        # the root of a json document whould be a value
        'root': [
            include('value'),
        ],
    }
 | 
	bsd-2-clause | 
| 
	MQQiang/kbengine | 
	kbe/src/lib/python/Lib/threading.py | 
	61 | 
	48900 | 
	"""Thread module emulating a subset of Java's threading model."""
import sys as _sys
import _thread
try:
    from time import monotonic as _time
except ImportError:
    from time import time as _time
from traceback import format_exc as _format_exc
from _weakrefset import WeakSet
from itertools import islice as _islice
try:
    from _collections import deque as _deque
except ImportError:
    from collections import deque as _deque
# Note regarding PEP 8 compliant names
#  This threading model was originally inspired by Java, and inherited
# the convention of camelCase function and method names from that
# language. Those original names are not in any imminent danger of
# being deprecated (even for Py3k),so this module provides them as an
# alias for the PEP 8 compliant names
# Note that using the new PEP 8 compliant names facilitates substitution
# with the multiprocessing module, which doesn't provide the old
# Java inspired names.
__all__ = ['active_count', 'Condition', 'current_thread', 'enumerate', 'Event',
           'Lock', 'RLock', 'Semaphore', 'BoundedSemaphore', 'Thread', 'Barrier',
           'Timer', 'ThreadError', 'setprofile', 'settrace', 'local', 'stack_size']
# Rename some stuff so "from threading import *" is safe
_start_new_thread = _thread.start_new_thread
_allocate_lock = _thread.allocate_lock
_set_sentinel = _thread._set_sentinel
get_ident = _thread.get_ident
ThreadError = _thread.error
try:
    _CRLock = _thread.RLock
except AttributeError:
    _CRLock = None
TIMEOUT_MAX = _thread.TIMEOUT_MAX
del _thread
# Support for profile and trace hooks
_profile_hook = None
_trace_hook = None
def setprofile(func):
    """Set a profile function for all threads started from the threading module.
    The func will be passed to sys.setprofile() for each thread, before its
    run() method is called.
    """
    global _profile_hook
    _profile_hook = func
def settrace(func):
    """Set a trace function for all threads started from the threading module.
    The func will be passed to sys.settrace() for each thread, before its run()
    method is called.
    """
    global _trace_hook
    _trace_hook = func
# Synchronization classes
Lock = _allocate_lock
def RLock(*args, **kwargs):
    """Factory function that returns a new reentrant lock.
    A reentrant lock must be released by the thread that acquired it. Once a
    thread has acquired a reentrant lock, the same thread may acquire it again
    without blocking; the thread must release it once for each time it has
    acquired it.
    """
    if _CRLock is None:
        return _PyRLock(*args, **kwargs)
    return _CRLock(*args, **kwargs)
class _RLock:
    """This class implements reentrant lock objects.
    A reentrant lock must be released by the thread that acquired it. Once a
    thread has acquired a reentrant lock, the same thread may acquire it
    again without blocking; the thread must release it once for each time it
    has acquired it.
    """
    def __init__(self):
        self._block = _allocate_lock()
        self._owner = None
        self._count = 0
    def __repr__(self):
        owner = self._owner
        try:
            owner = _active[owner].name
        except KeyError:
            pass
        return "<%s owner=%r count=%d>" % (
                self.__class__.__name__, owner, self._count)
    def acquire(self, blocking=True, timeout=-1):
        """Acquire a lock, blocking or non-blocking.
        When invoked without arguments: if this thread already owns the lock,
        increment the recursion level by one, and return immediately. Otherwise,
        if another thread owns the lock, block until the lock is unlocked. Once
        the lock is unlocked (not owned by any thread), then grab ownership, set
        the recursion level to one, and return. If more than one thread is
        blocked waiting until the lock is unlocked, only one at a time will be
        able to grab ownership of the lock. There is no return value in this
        case.
        When invoked with the blocking argument set to true, do the same thing
        as when called without arguments, and return true.
        When invoked with the blocking argument set to false, do not block. If a
        call without an argument would block, return false immediately;
        otherwise, do the same thing as when called without arguments, and
        return true.
        When invoked with the floating-point timeout argument set to a positive
        value, block for at most the number of seconds specified by timeout
        and as long as the lock cannot be acquired.  Return true if the lock has
        been acquired, false if the timeout has elapsed.
        """
        me = get_ident()
        if self._owner == me:
            self._count += 1
            return 1
        rc = self._block.acquire(blocking, timeout)
        if rc:
            self._owner = me
            self._count = 1
        return rc
    __enter__ = acquire
    def release(self):
        """Release a lock, decrementing the recursion level.
        If after the decrement it is zero, reset the lock to unlocked (not owned
        by any thread), and if any other threads are blocked waiting for the
        lock to become unlocked, allow exactly one of them to proceed. If after
        the decrement the recursion level is still nonzero, the lock remains
        locked and owned by the calling thread.
        Only call this method when the calling thread owns the lock. A
        RuntimeError is raised if this method is called when the lock is
        unlocked.
        There is no return value.
        """
        if self._owner != get_ident():
            raise RuntimeError("cannot release un-acquired lock")
        self._count = count = self._count - 1
        if not count:
            self._owner = None
            self._block.release()
    def __exit__(self, t, v, tb):
        self.release()
    # Internal methods used by condition variables
    def _acquire_restore(self, state):
        self._block.acquire()
        self._count, self._owner = state
    def _release_save(self):
        if self._count == 0:
            raise RuntimeError("cannot release un-acquired lock")
        count = self._count
        self._count = 0
        owner = self._owner
        self._owner = None
        self._block.release()
        return (count, owner)
    def _is_owned(self):
        return self._owner == get_ident()
_PyRLock = _RLock
class Condition:
    """Class that implements a condition variable.
    A condition variable allows one or more threads to wait until they are
    notified by another thread.
    If the lock argument is given and not None, it must be a Lock or RLock
    object, and it is used as the underlying lock. Otherwise, a new RLock object
    is created and used as the underlying lock.
    """
    def __init__(self, lock=None):
        if lock is None:
            lock = RLock()
        self._lock = lock
        # Export the lock's acquire() and release() methods
        self.acquire = lock.acquire
        self.release = lock.release
        # If the lock defines _release_save() and/or _acquire_restore(),
        # these override the default implementations (which just call
        # release() and acquire() on the lock).  Ditto for _is_owned().
        try:
            self._release_save = lock._release_save
        except AttributeError:
            pass
        try:
            self._acquire_restore = lock._acquire_restore
        except AttributeError:
            pass
        try:
            self._is_owned = lock._is_owned
        except AttributeError:
            pass
        self._waiters = _deque()
    def __enter__(self):
        return self._lock.__enter__()
    def __exit__(self, *args):
        return self._lock.__exit__(*args)
    def __repr__(self):
        return "<Condition(%s, %d)>" % (self._lock, len(self._waiters))
    def _release_save(self):
        self._lock.release()           # No state to save
    def _acquire_restore(self, x):
        self._lock.acquire()           # Ignore saved state
    def _is_owned(self):
        # Return True if lock is owned by current_thread.
        # This method is called only if __lock doesn't have _is_owned().
        if self._lock.acquire(0):
            self._lock.release()
            return False
        else:
            return True
    def wait(self, timeout=None):
        """Wait until notified or until a timeout occurs.
        If the calling thread has not acquired the lock when this method is
        called, a RuntimeError is raised.
        This method releases the underlying lock, and then blocks until it is
        awakened by a notify() or notify_all() call for the same condition
        variable in another thread, or until the optional timeout occurs. Once
        awakened or timed out, it re-acquires the lock and returns.
        When the timeout argument is present and not None, it should be a
        floating point number specifying a timeout for the operation in seconds
        (or fractions thereof).
        When the underlying lock is an RLock, it is not released using its
        release() method, since this may not actually unlock the lock when it
        was acquired multiple times recursively. Instead, an internal interface
        of the RLock class is used, which really unlocks it even when it has
        been recursively acquired several times. Another internal interface is
        then used to restore the recursion level when the lock is reacquired.
        """
        if not self._is_owned():
            raise RuntimeError("cannot wait on un-acquired lock")
        waiter = _allocate_lock()
        waiter.acquire()
        self._waiters.append(waiter)
        saved_state = self._release_save()
        gotit = False
        try:    # restore state no matter what (e.g., KeyboardInterrupt)
            if timeout is None:
                waiter.acquire()
                gotit = True
            else:
                if timeout > 0:
                    gotit = waiter.acquire(True, timeout)
                else:
                    gotit = waiter.acquire(False)
            return gotit
        finally:
            self._acquire_restore(saved_state)
            if not gotit:
                try:
                    self._waiters.remove(waiter)
                except ValueError:
                    pass
    def wait_for(self, predicate, timeout=None):
        """Wait until a condition evaluates to True.
        predicate should be a callable which result will be interpreted as a
        boolean value.  A timeout may be provided giving the maximum time to
        wait.
        """
        endtime = None
        waittime = timeout
        result = predicate()
        while not result:
            if waittime is not None:
                if endtime is None:
                    endtime = _time() + waittime
                else:
                    waittime = endtime - _time()
                    if waittime <= 0:
                        break
            self.wait(waittime)
            result = predicate()
        return result
    def notify(self, n=1):
        """Wake up one or more threads waiting on this condition, if any.
        If the calling thread has not acquired the lock when this method is
        called, a RuntimeError is raised.
        This method wakes up at most n of the threads waiting for the condition
        variable; it is a no-op if no threads are waiting.
        """
        if not self._is_owned():
            raise RuntimeError("cannot notify on un-acquired lock")
        all_waiters = self._waiters
        waiters_to_notify = _deque(_islice(all_waiters, n))
        if not waiters_to_notify:
            return
        for waiter in waiters_to_notify:
            waiter.release()
            try:
                all_waiters.remove(waiter)
            except ValueError:
                pass
    def notify_all(self):
        """Wake up all threads waiting on this condition.
        If the calling thread has not acquired the lock when this method
        is called, a RuntimeError is raised.
        """
        self.notify(len(self._waiters))
    notifyAll = notify_all
class Semaphore:
    """This class implements semaphore objects.
    Semaphores manage a counter representing the number of release() calls minus
    the number of acquire() calls, plus an initial value. The acquire() method
    blocks if necessary until it can return without making the counter
    negative. If not given, value defaults to 1.
    """
    # After Tim Peters' semaphore class, but not quite the same (no maximum)
    def __init__(self, value=1):
        if value < 0:
            raise ValueError("semaphore initial value must be >= 0")
        self._cond = Condition(Lock())
        self._value = value
    def acquire(self, blocking=True, timeout=None):
        """Acquire a semaphore, decrementing the internal counter by one.
        When invoked without arguments: if the internal counter is larger than
        zero on entry, decrement it by one and return immediately. If it is zero
        on entry, block, waiting until some other thread has called release() to
        make it larger than zero. This is done with proper interlocking so that
        if multiple acquire() calls are blocked, release() will wake exactly one
        of them up. The implementation may pick one at random, so the order in
        which blocked threads are awakened should not be relied on. There is no
        return value in this case.
        When invoked with blocking set to true, do the same thing as when called
        without arguments, and return true.
        When invoked with blocking set to false, do not block. If a call without
        an argument would block, return false immediately; otherwise, do the
        same thing as when called without arguments, and return true.
        When invoked with a timeout other than None, it will block for at
        most timeout seconds.  If acquire does not complete successfully in
        that interval, return false.  Return true otherwise.
        """
        if not blocking and timeout is not None:
            raise ValueError("can't specify timeout for non-blocking acquire")
        rc = False
        endtime = None
        with self._cond:
            while self._value == 0:
                if not blocking:
                    break
                if timeout is not None:
                    if endtime is None:
                        endtime = _time() + timeout
                    else:
                        timeout = endtime - _time()
                        if timeout <= 0:
                            break
                self._cond.wait(timeout)
            else:
                self._value -= 1
                rc = True
        return rc
    __enter__ = acquire
    def release(self):
        """Release a semaphore, incrementing the internal counter by one.
        When the counter is zero on entry and another thread is waiting for it
        to become larger than zero again, wake up that thread.
        """
        with self._cond:
            self._value += 1
            self._cond.notify()
    def __exit__(self, t, v, tb):
        self.release()
class BoundedSemaphore(Semaphore):
    """Implements a bounded semaphore.
    A bounded semaphore checks to make sure its current value doesn't exceed its
    initial value. If it does, ValueError is raised. In most situations
    semaphores are used to guard resources with limited capacity.
    If the semaphore is released too many times it's a sign of a bug. If not
    given, value defaults to 1.
    Like regular semaphores, bounded semaphores manage a counter representing
    the number of release() calls minus the number of acquire() calls, plus an
    initial value. The acquire() method blocks if necessary until it can return
    without making the counter negative. If not given, value defaults to 1.
    """
    def __init__(self, value=1):
        Semaphore.__init__(self, value)
        self._initial_value = value
    def release(self):
        """Release a semaphore, incrementing the internal counter by one.
        When the counter is zero on entry and another thread is waiting for it
        to become larger than zero again, wake up that thread.
        If the number of releases exceeds the number of acquires,
        raise a ValueError.
        """
        with self._cond:
            if self._value >= self._initial_value:
                raise ValueError("Semaphore released too many times")
            self._value += 1
            self._cond.notify()
class Event:
    """Class implementing event objects.
    Events manage a flag that can be set to true with the set() method and reset
    to false with the clear() method. The wait() method blocks until the flag is
    true.  The flag is initially false.
    """
    # After Tim Peters' event class (without is_posted())
    def __init__(self):
        self._cond = Condition(Lock())
        self._flag = False
    def _reset_internal_locks(self):
        # private!  called by Thread._reset_internal_locks by _after_fork()
        self._cond.__init__()
    def is_set(self):
        """Return true if and only if the internal flag is true."""
        return self._flag
    isSet = is_set
    def set(self):
        """Set the internal flag to true.
        All threads waiting for it to become true are awakened. Threads
        that call wait() once the flag is true will not block at all.
        """
        self._cond.acquire()
        try:
            self._flag = True
            self._cond.notify_all()
        finally:
            self._cond.release()
    def clear(self):
        """Reset the internal flag to false.
        Subsequently, threads calling wait() will block until set() is called to
        set the internal flag to true again.
        """
        self._cond.acquire()
        try:
            self._flag = False
        finally:
            self._cond.release()
    def wait(self, timeout=None):
        """Block until the internal flag is true.
        If the internal flag is true on entry, return immediately. Otherwise,
        block until another thread calls set() to set the flag to true, or until
        the optional timeout occurs.
        When the timeout argument is present and not None, it should be a
        floating point number specifying a timeout for the operation in seconds
        (or fractions thereof).
        This method returns the internal flag on exit, so it will always return
        True except if a timeout is given and the operation times out.
        """
        self._cond.acquire()
        try:
            signaled = self._flag
            if not signaled:
                signaled = self._cond.wait(timeout)
            return signaled
        finally:
            self._cond.release()
# A barrier class.  Inspired in part by the pthread_barrier_* api and
# the CyclicBarrier class from Java.  See
# http://sourceware.org/pthreads-win32/manual/pthread_barrier_init.html and
# http://java.sun.com/j2se/1.5.0/docs/api/java/util/concurrent/
#        CyclicBarrier.html
# for information.
# We maintain two main states, 'filling' and 'draining' enabling the barrier
# to be cyclic.  Threads are not allowed into it until it has fully drained
# since the previous cycle.  In addition, a 'resetting' state exists which is
# similar to 'draining' except that threads leave with a BrokenBarrierError,
# and a 'broken' state in which all threads get the exception.
class Barrier:
    """Implements a Barrier.
    Useful for synchronizing a fixed number of threads at known synchronization
    points.  Threads block on 'wait()' and are simultaneously once they have all
    made that call.
    """
    def __init__(self, parties, action=None, timeout=None):
        """Create a barrier, initialised to 'parties' threads.
        'action' is a callable which, when supplied, will be called by one of
        the threads after they have all entered the barrier and just prior to
        releasing them all. If a 'timeout' is provided, it is uses as the
        default for all subsequent 'wait()' calls.
        """
        self._cond = Condition(Lock())
        self._action = action
        self._timeout = timeout
        self._parties = parties
        self._state = 0 #0 filling, 1, draining, -1 resetting, -2 broken
        self._count = 0
    def wait(self, timeout=None):
        """Wait for the barrier.
        When the specified number of threads have started waiting, they are all
        simultaneously awoken. If an 'action' was provided for the barrier, one
        of the threads will have executed that callback prior to returning.
        Returns an individual index number from 0 to 'parties-1'.
        """
        if timeout is None:
            timeout = self._timeout
        with self._cond:
            self._enter() # Block while the barrier drains.
            index = self._count
            self._count += 1
            try:
                if index + 1 == self._parties:
                    # We release the barrier
                    self._release()
                else:
                    # We wait until someone releases us
                    self._wait(timeout)
                return index
            finally:
                self._count -= 1
                # Wake up any threads waiting for barrier to drain.
                self._exit()
    # Block until the barrier is ready for us, or raise an exception
    # if it is broken.
    def _enter(self):
        while self._state in (-1, 1):
            # It is draining or resetting, wait until done
            self._cond.wait()
        #see if the barrier is in a broken state
        if self._state < 0:
            raise BrokenBarrierError
        assert self._state == 0
    # Optionally run the 'action' and release the threads waiting
    # in the barrier.
    def _release(self):
        try:
            if self._action:
                self._action()
            # enter draining state
            self._state = 1
            self._cond.notify_all()
        except:
            #an exception during the _action handler.  Break and reraise
            self._break()
            raise
    # Wait in the barrier until we are relased.  Raise an exception
    # if the barrier is reset or broken.
    def _wait(self, timeout):
        if not self._cond.wait_for(lambda : self._state != 0, timeout):
            #timed out.  Break the barrier
            self._break()
            raise BrokenBarrierError
        if self._state < 0:
            raise BrokenBarrierError
        assert self._state == 1
    # If we are the last thread to exit the barrier, signal any threads
    # waiting for the barrier to drain.
    def _exit(self):
        if self._count == 0:
            if self._state in (-1, 1):
                #resetting or draining
                self._state = 0
                self._cond.notify_all()
    def reset(self):
        """Reset the barrier to the initial state.
        Any threads currently waiting will get the BrokenBarrier exception
        raised.
        """
        with self._cond:
            if self._count > 0:
                if self._state == 0:
                    #reset the barrier, waking up threads
                    self._state = -1
                elif self._state == -2:
                    #was broken, set it to reset state
                    #which clears when the last thread exits
                    self._state = -1
            else:
                self._state = 0
            self._cond.notify_all()
    def abort(self):
        """Place the barrier into a 'broken' state.
        Useful in case of error.  Any currently waiting threads and threads
        attempting to 'wait()' will have BrokenBarrierError raised.
        """
        with self._cond:
            self._break()
    def _break(self):
        # An internal error was detected.  The barrier is set to
        # a broken state all parties awakened.
        self._state = -2
        self._cond.notify_all()
    @property
    def parties(self):
        """Return the number of threads required to trip the barrier."""
        return self._parties
    @property
    def n_waiting(self):
        """Return the number of threads currently waiting at the barrier."""
        # We don't need synchronization here since this is an ephemeral result
        # anyway.  It returns the correct value in the steady state.
        if self._state == 0:
            return self._count
        return 0
    @property
    def broken(self):
        """Return True if the barrier is in a broken state."""
        return self._state == -2
# exception raised by the Barrier class
class BrokenBarrierError(RuntimeError):
    pass
# Helper to generate new thread names
_counter = 0
def _newname(template="Thread-%d"):
    global _counter
    _counter += 1
    return template % _counter
# Active thread administration
_active_limbo_lock = _allocate_lock()
_active = {}    # maps thread id to Thread object
_limbo = {}
_dangling = WeakSet()
# Main class for threads
class Thread:
    """A class that represents a thread of control.
    This class can be safely subclassed in a limited fashion. There are two ways
    to specify the activity: by passing a callable object to the constructor, or
    by overriding the run() method in a subclass.
    """
    __initialized = False
    # Need to store a reference to sys.exc_info for printing
    # out exceptions when a thread tries to use a global var. during interp.
    # shutdown and thus raises an exception about trying to perform some
    # operation on/with a NoneType
    __exc_info = _sys.exc_info
    # Keep sys.exc_clear too to clear the exception just before
    # allowing .join() to return.
    #XXX __exc_clear = _sys.exc_clear
    def __init__(self, group=None, target=None, name=None,
                 args=(), kwargs=None, *, daemon=None):
        """This constructor should always be called with keyword arguments. Arguments are:
        *group* should be None; reserved for future extension when a ThreadGroup
        class is implemented.
        *target* is the callable object to be invoked by the run()
        method. Defaults to None, meaning nothing is called.
        *name* is the thread name. By default, a unique name is constructed of
        the form "Thread-N" where N is a small decimal number.
        *args* is the argument tuple for the target invocation. Defaults to ().
        *kwargs* is a dictionary of keyword arguments for the target
        invocation. Defaults to {}.
        If a subclass overrides the constructor, it must make sure to invoke
        the base class constructor (Thread.__init__()) before doing anything
        else to the thread.
        """
        assert group is None, "group argument must be None for now"
        if kwargs is None:
            kwargs = {}
        self._target = target
        self._name = str(name or _newname())
        self._args = args
        self._kwargs = kwargs
        if daemon is not None:
            self._daemonic = daemon
        else:
            self._daemonic = current_thread().daemon
        self._ident = None
        self._tstate_lock = None
        self._started = Event()
        self._is_stopped = False
        self._initialized = True
        # sys.stderr is not stored in the class like
        # sys.exc_info since it can be changed between instances
        self._stderr = _sys.stderr
        # For debugging and _after_fork()
        _dangling.add(self)
    def _reset_internal_locks(self, is_alive):
        # private!  Called by _after_fork() to reset our internal locks as
        # they may be in an invalid state leading to a deadlock or crash.
        self._started._reset_internal_locks()
        if is_alive:
            self._set_tstate_lock()
        else:
            # The thread isn't alive after fork: it doesn't have a tstate
            # anymore.
            self._is_stopped = True
            self._tstate_lock = None
    def __repr__(self):
        assert self._initialized, "Thread.__init__() was not called"
        status = "initial"
        if self._started.is_set():
            status = "started"
        self.is_alive() # easy way to get ._is_stopped set when appropriate
        if self._is_stopped:
            status = "stopped"
        if self._daemonic:
            status += " daemon"
        if self._ident is not None:
            status += " %s" % self._ident
        return "<%s(%s, %s)>" % (self.__class__.__name__, self._name, status)
    def start(self):
        """Start the thread's activity.
        It must be called at most once per thread object. It arranges for the
        object's run() method to be invoked in a separate thread of control.
        This method will raise a RuntimeError if called more than once on the
        same thread object.
        """
        if not self._initialized:
            raise RuntimeError("thread.__init__() not called")
        if self._started.is_set():
            raise RuntimeError("threads can only be started once")
        with _active_limbo_lock:
            _limbo[self] = self
        try:
            _start_new_thread(self._bootstrap, ())
        except Exception:
            with _active_limbo_lock:
                del _limbo[self]
            raise
        self._started.wait()
    def run(self):
        """Method representing the thread's activity.
        You may override this method in a subclass. The standard run() method
        invokes the callable object passed to the object's constructor as the
        target argument, if any, with sequential and keyword arguments taken
        from the args and kwargs arguments, respectively.
        """
        try:
            if self._target:
                self._target(*self._args, **self._kwargs)
        finally:
            # Avoid a refcycle if the thread is running a function with
            # an argument that has a member that points to the thread.
            del self._target, self._args, self._kwargs
    def _bootstrap(self):
        # Wrapper around the real bootstrap code that ignores
        # exceptions during interpreter cleanup.  Those typically
        # happen when a daemon thread wakes up at an unfortunate
        # moment, finds the world around it destroyed, and raises some
        # random exception *** while trying to report the exception in
        # _bootstrap_inner() below ***.  Those random exceptions
        # don't help anybody, and they confuse users, so we suppress
        # them.  We suppress them only when it appears that the world
        # indeed has already been destroyed, so that exceptions in
        # _bootstrap_inner() during normal business hours are properly
        # reported.  Also, we only suppress them for daemonic threads;
        # if a non-daemonic encounters this, something else is wrong.
        try:
            self._bootstrap_inner()
        except:
            if self._daemonic and _sys is None:
                return
            raise
    def _set_ident(self):
        self._ident = get_ident()
    def _set_tstate_lock(self):
        """
        Set a lock object which will be released by the interpreter when
        the underlying thread state (see pystate.h) gets deleted.
        """
        self._tstate_lock = _set_sentinel()
        self._tstate_lock.acquire()
    def _bootstrap_inner(self):
        try:
            self._set_ident()
            self._set_tstate_lock()
            self._started.set()
            with _active_limbo_lock:
                _active[self._ident] = self
                del _limbo[self]
            if _trace_hook:
                _sys.settrace(_trace_hook)
            if _profile_hook:
                _sys.setprofile(_profile_hook)
            try:
                self.run()
            except SystemExit:
                pass
            except:
                # If sys.stderr is no more (most likely from interpreter
                # shutdown) use self._stderr.  Otherwise still use sys (as in
                # _sys) in case sys.stderr was redefined since the creation of
                # self.
                if _sys:
                    _sys.stderr.write("Exception in thread %s:\n%s\n" %
                                      (self.name, _format_exc()))
                else:
                    # Do the best job possible w/o a huge amt. of code to
                    # approximate a traceback (code ideas from
                    # Lib/traceback.py)
                    exc_type, exc_value, exc_tb = self._exc_info()
                    try:
                        print((
                            "Exception in thread " + self.name +
                            " (most likely raised during interpreter shutdown):"), file=self._stderr)
                        print((
                            "Traceback (most recent call last):"), file=self._stderr)
                        while exc_tb:
                            print((
                                '  File "%s", line %s, in %s' %
                                (exc_tb.tb_frame.f_code.co_filename,
                                    exc_tb.tb_lineno,
                                    exc_tb.tb_frame.f_code.co_name)), file=self._stderr)
                            exc_tb = exc_tb.tb_next
                        print(("%s: %s" % (exc_type, exc_value)), file=self._stderr)
                    # Make sure that exc_tb gets deleted since it is a memory
                    # hog; deleting everything else is just for thoroughness
                    finally:
                        del exc_type, exc_value, exc_tb
            finally:
                # Prevent a race in
                # test_threading.test_no_refcycle_through_target when
                # the exception keeps the target alive past when we
                # assert that it's dead.
                #XXX self.__exc_clear()
                pass
        finally:
            with _active_limbo_lock:
                try:
                    # We don't call self._delete() because it also
                    # grabs _active_limbo_lock.
                    del _active[get_ident()]
                except:
                    pass
    def _stop(self):
        # After calling ._stop(), .is_alive() returns False and .join() returns
        # immediately.  ._tstate_lock must be released before calling ._stop().
        #
        # Normal case:  C code at the end of the thread's life
        # (release_sentinel in _threadmodule.c) releases ._tstate_lock, and
        # that's detected by our ._wait_for_tstate_lock(), called by .join()
        # and .is_alive().  Any number of threads _may_ call ._stop()
        # simultaneously (for example, if multiple threads are blocked in
        # .join() calls), and they're not serialized.  That's harmless -
        # they'll just make redundant rebindings of ._is_stopped and
        # ._tstate_lock.  Obscure:  we rebind ._tstate_lock last so that the
        # "assert self._is_stopped" in ._wait_for_tstate_lock() always works
        # (the assert is executed only if ._tstate_lock is None).
        #
        # Special case:  _main_thread releases ._tstate_lock via this
        # module's _shutdown() function.
        lock = self._tstate_lock
        if lock is not None:
            assert not lock.locked()
        self._is_stopped = True
        self._tstate_lock = None
    def _delete(self):
        "Remove current thread from the dict of currently running threads."
        # Notes about running with _dummy_thread:
        #
        # Must take care to not raise an exception if _dummy_thread is being
        # used (and thus this module is being used as an instance of
        # dummy_threading).  _dummy_thread.get_ident() always returns -1 since
        # there is only one thread if _dummy_thread is being used.  Thus
        # len(_active) is always <= 1 here, and any Thread instance created
        # overwrites the (if any) thread currently registered in _active.
        #
        # An instance of _MainThread is always created by 'threading'.  This
        # gets overwritten the instant an instance of Thread is created; both
        # threads return -1 from _dummy_thread.get_ident() and thus have the
        # same key in the dict.  So when the _MainThread instance created by
        # 'threading' tries to clean itself up when atexit calls this method
        # it gets a KeyError if another Thread instance was created.
        #
        # This all means that KeyError from trying to delete something from
        # _active if dummy_threading is being used is a red herring.  But
        # since it isn't if dummy_threading is *not* being used then don't
        # hide the exception.
        try:
            with _active_limbo_lock:
                del _active[get_ident()]
                # There must not be any python code between the previous line
                # and after the lock is released.  Otherwise a tracing function
                # could try to acquire the lock again in the same thread, (in
                # current_thread()), and would block.
        except KeyError:
            if 'dummy_threading' not in _sys.modules:
                raise
    def join(self, timeout=None):
        """Wait until the thread terminates.
        This blocks the calling thread until the thread whose join() method is
        called terminates -- either normally or through an unhandled exception
        or until the optional timeout occurs.
        When the timeout argument is present and not None, it should be a
        floating point number specifying a timeout for the operation in seconds
        (or fractions thereof). As join() always returns None, you must call
        isAlive() after join() to decide whether a timeout happened -- if the
        thread is still alive, the join() call timed out.
        When the timeout argument is not present or None, the operation will
        block until the thread terminates.
        A thread can be join()ed many times.
        join() raises a RuntimeError if an attempt is made to join the current
        thread as that would cause a deadlock. It is also an error to join() a
        thread before it has been started and attempts to do so raises the same
        exception.
        """
        if not self._initialized:
            raise RuntimeError("Thread.__init__() not called")
        if not self._started.is_set():
            raise RuntimeError("cannot join thread before it is started")
        if self is current_thread():
            raise RuntimeError("cannot join current thread")
        if timeout is None:
            self._wait_for_tstate_lock()
        else:
            # the behavior of a negative timeout isn't documented, but
            # historically .join(timeout=x) for x<0 has acted as if timeout=0
            self._wait_for_tstate_lock(timeout=max(timeout, 0))
    def _wait_for_tstate_lock(self, block=True, timeout=-1):
        # Issue #18808: wait for the thread state to be gone.
        # At the end of the thread's life, after all knowledge of the thread
        # is removed from C data structures, C code releases our _tstate_lock.
        # This method passes its arguments to _tstate_lock.aquire().
        # If the lock is acquired, the C code is done, and self._stop() is
        # called.  That sets ._is_stopped to True, and ._tstate_lock to None.
        lock = self._tstate_lock
        if lock is None:  # already determined that the C code is done
            assert self._is_stopped
        elif lock.acquire(block, timeout):
            lock.release()
            self._stop()
    @property
    def name(self):
        """A string used for identification purposes only.
        It has no semantics. Multiple threads may be given the same name. The
        initial name is set by the constructor.
        """
        assert self._initialized, "Thread.__init__() not called"
        return self._name
    @name.setter
    def name(self, name):
        assert self._initialized, "Thread.__init__() not called"
        self._name = str(name)
    @property
    def ident(self):
        """Thread identifier of this thread or None if it has not been started.
        This is a nonzero integer. See the thread.get_ident() function. Thread
        identifiers may be recycled when a thread exits and another thread is
        created. The identifier is available even after the thread has exited.
        """
        assert self._initialized, "Thread.__init__() not called"
        return self._ident
    def is_alive(self):
        """Return whether the thread is alive.
        This method returns True just before the run() method starts until just
        after the run() method terminates. The module function enumerate()
        returns a list of all alive threads.
        """
        assert self._initialized, "Thread.__init__() not called"
        if self._is_stopped or not self._started.is_set():
            return False
        self._wait_for_tstate_lock(False)
        return not self._is_stopped
    isAlive = is_alive
    @property
    def daemon(self):
        """A boolean value indicating whether this thread is a daemon thread.
        This must be set before start() is called, otherwise RuntimeError is
        raised. Its initial value is inherited from the creating thread; the
        main thread is not a daemon thread and therefore all threads created in
        the main thread default to daemon = False.
        The entire Python program exits when no alive non-daemon threads are
        left.
        """
        assert self._initialized, "Thread.__init__() not called"
        return self._daemonic
    @daemon.setter
    def daemon(self, daemonic):
        if not self._initialized:
            raise RuntimeError("Thread.__init__() not called")
        if self._started.is_set():
            raise RuntimeError("cannot set daemon status of active thread")
        self._daemonic = daemonic
    def isDaemon(self):
        return self.daemon
    def setDaemon(self, daemonic):
        self.daemon = daemonic
    def getName(self):
        return self.name
    def setName(self, name):
        self.name = name
# The timer class was contributed by Itamar Shtull-Trauring
class Timer(Thread):
    """Call a function after a specified number of seconds:
            t = Timer(30.0, f, args=None, kwargs=None)
            t.start()
            t.cancel()     # stop the timer's action if it's still waiting
    """
    def __init__(self, interval, function, args=None, kwargs=None):
        Thread.__init__(self)
        self.interval = interval
        self.function = function
        self.args = args if args is not None else []
        self.kwargs = kwargs if kwargs is not None else {}
        self.finished = Event()
    def cancel(self):
        """Stop the timer if it hasn't finished yet."""
        self.finished.set()
    def run(self):
        self.finished.wait(self.interval)
        if not self.finished.is_set():
            self.function(*self.args, **self.kwargs)
        self.finished.set()
# Special thread class to represent the main thread
# This is garbage collected through an exit handler
class _MainThread(Thread):
    def __init__(self):
        Thread.__init__(self, name="MainThread", daemon=False)
        self._set_tstate_lock()
        self._started.set()
        self._set_ident()
        with _active_limbo_lock:
            _active[self._ident] = self
# Dummy thread class to represent threads not started here.
# These aren't garbage collected when they die, nor can they be waited for.
# If they invoke anything in threading.py that calls current_thread(), they
# leave an entry in the _active dict forever after.
# Their purpose is to return *something* from current_thread().
# They are marked as daemon threads so we won't wait for them
# when we exit (conform previous semantics).
class _DummyThread(Thread):
    def __init__(self):
        Thread.__init__(self, name=_newname("Dummy-%d"), daemon=True)
        self._started.set()
        self._set_ident()
        with _active_limbo_lock:
            _active[self._ident] = self
    def _stop(self):
        pass
    def join(self, timeout=None):
        assert False, "cannot join a dummy thread"
# Global API functions
def current_thread():
    """Return the current Thread object, corresponding to the caller's thread of control.
    If the caller's thread of control was not created through the threading
    module, a dummy thread object with limited functionality is returned.
    """
    try:
        return _active[get_ident()]
    except KeyError:
        return _DummyThread()
currentThread = current_thread
def active_count():
    """Return the number of Thread objects currently alive.
    The returned count is equal to the length of the list returned by
    enumerate().
    """
    with _active_limbo_lock:
        return len(_active) + len(_limbo)
activeCount = active_count
def _enumerate():
    # Same as enumerate(), but without the lock. Internal use only.
    return list(_active.values()) + list(_limbo.values())
def enumerate():
    """Return a list of all Thread objects currently alive.
    The list includes daemonic threads, dummy thread objects created by
    current_thread(), and the main thread. It excludes terminated threads and
    threads that have not yet been started.
    """
    with _active_limbo_lock:
        return list(_active.values()) + list(_limbo.values())
from _thread import stack_size
# Create the main thread object,
# and make it available for the interpreter
# (Py_Main) as threading._shutdown.
_main_thread = _MainThread()
def _shutdown():
    # Obscure:  other threads may be waiting to join _main_thread.  That's
    # dubious, but some code does it.  We can't wait for C code to release
    # the main thread's tstate_lock - that won't happen until the interpreter
    # is nearly dead.  So we release it here.  Note that just calling _stop()
    # isn't enough:  other threads may already be waiting on _tstate_lock.
    tlock = _main_thread._tstate_lock
    # The main thread isn't finished yet, so its thread state lock can't have
    # been released.
    assert tlock is not None
    assert tlock.locked()
    tlock.release()
    _main_thread._stop()
    t = _pickSomeNonDaemonThread()
    while t:
        t.join()
        t = _pickSomeNonDaemonThread()
    _main_thread._delete()
def _pickSomeNonDaemonThread():
    for t in enumerate():
        if not t.daemon and t.is_alive():
            return t
    return None
def main_thread():
    """Return the main thread object.
    In normal conditions, the main thread is the thread from which the
    Python interpreter was started.
    """
    return _main_thread
# get thread-local implementation, either from the thread
# module, or from the python fallback
try:
    from _thread import _local as local
except ImportError:
    from _threading_local import local
def _after_fork():
    # This function is called by Python/ceval.c:PyEval_ReInitThreads which
    # is called from PyOS_AfterFork.  Here we cleanup threading module state
    # that should not exist after a fork.
    # Reset _active_limbo_lock, in case we forked while the lock was held
    # by another (non-forked) thread.  http://bugs.python.org/issue874900
    global _active_limbo_lock, _main_thread
    _active_limbo_lock = _allocate_lock()
    # fork() only copied the current thread; clear references to others.
    new_active = {}
    current = current_thread()
    _main_thread = current
    with _active_limbo_lock:
        # Dangling thread instances must still have their locks reset,
        # because someone may join() them.
        threads = set(_enumerate())
        threads.update(_dangling)
        for thread in threads:
            # Any lock/condition variable may be currently locked or in an
            # invalid state, so we reinitialize them.
            if thread is current:
                # There is only one active thread. We reset the ident to
                # its new value since it can have changed.
                thread._reset_internal_locks(True)
                ident = get_ident()
                thread._ident = ident
                new_active[ident] = thread
            else:
                # All the others are already stopped.
                thread._reset_internal_locks(False)
                thread._stop()
        _limbo.clear()
        _active.clear()
        _active.update(new_active)
        assert len(_active) == 1
 | 
	lgpl-3.0 | 
| 
	zzragida/PythonExamples | 
	MemberShip/deploy/qa-taiwan/web/db/__init__.py | 
	4 | 
	1563 | 
	# -*- coding:utf-8 -*-
from SQLRelay import PySQLRClient
from SQLRelay import PySQLRDB
from config import SQLRELAYS
INSTANCES = []
for sqlrelay in SQLRELAYS:
    INSTANCES.append([0, sqlrelay])
def sqlrelay_cursor():
    ''' Connect sqlrelay rdb '''
    info = sorted(INSTANCES, key=lambda x: x[0])[0]
    try:
        con = PySQLRDB.connect(
                info[1]['host'],
                info[1]['port'],
                '',
                info[1]['user'],
                info[1]['pass'],
                0, 1)
        cur = con.cursor()
    except PySQLRDB.DatabaseError, e:
        raise
    
    info[0] += 1
    return con, cur
def sqlrelay_close(cur, con):
    ''' Close sqlrelay rdb '''
    if cur:
        cur.close()
        del cur
    if con:
        con.close()
        del con
    import gc; gc.collect()
def sqlrelay_client_cursor(debug=False):
    ''' Connect sqlrelay client '''
    info = sorted(INSTANCES, key=lambda x: x[0])[0]
    try:
        con = PySQLRClient.sqlrconnection(
                info[1]['host'],
                info[1]['port'],
                '',
                info[1]['user'],
                info[1]['pass'],
                0, 1)
        cur = PySQLRClient.sqlrcursor(con)
        if debug:
            con.debugOn()
    except Exception, e:
        raise
    
    info[0] += 1
    return con, cur
def sqlrelay_client_close(cur, con):
    ''' Close sqlrelay client '''
    if cur:
        del cur
    if con:
        con.debugOff()
        con.endSession()
        del con
    import gc; gc.collect()
 | 
	mit | 
| 
	algorythmic/bash-completion | 
	test/t/unit/test_unit_count_args.py | 
	2 | 
	2035 | 
	import pytest
from conftest import TestUnitBase, assert_bash_exec
@pytest.mark.bashcomp(
    cmd=None, ignore_env=r"^[+-](args|COMP_(WORDS|CWORD|LINE|POINT))="
)
class TestUnitCountArgs(TestUnitBase):
    def _test(self, *args, **kwargs):
        return self._test_unit("_count_args %s; echo $args", *args, **kwargs)
    def test_1(self, bash):
        assert_bash_exec(bash, "COMP_CWORD= _count_args >/dev/null")
    def test_2(self, bash):
        """a b| should set args to 1"""
        output = self._test(bash, "(a b)", 1, "a b", 3)
        assert output == "1"
    def test_3(self, bash):
        """a b|c should set args to 1"""
        output = self._test(bash, "(a bc)", 1, "a bc", 3)
        assert output == "1"
    def test_4(self, bash):
        """a b c| should set args to 2"""
        output = self._test(bash, "(a b c)", 2, "a b c", 4)
        assert output == "2"
    def test_5(self, bash):
        """a b| c should set args to 1"""
        output = self._test(bash, "(a b c)", 1, "a b c", 3)
        assert output == "1"
    def test_6(self, bash):
        """a b -c| d should set args to 2"""
        output = self._test(bash, "(a b -c d)", 2, "a b -c d", 6)
        assert output == "2"
    def test_7(self, bash):
        """a b -c d e| with -c arg excluded should set args to 2"""
        output = self._test(
            bash, "(a b -c d e)", 4, "a b -c d e", 10, arg='"" "@(-c|--foo)"'
        )
        assert output == "2"
    def test_8(self, bash):
        """a -b -c d e| with -c arg excluded
        and -b included should set args to 1"""
        output = self._test(
            bash,
            "(a -b -c d e)",
            4,
            "a -b -c d e",
            11,
            arg='"" "@(-c|--foo)" "-[b]"',
        )
        assert output == "2"
    def test_9(self, bash):
        """a -b -c d e| with -b included should set args to 3"""
        output = self._test(
            bash, "(a -b -c d e)", 4, "a -b -c d e", 11, arg='"" "" "-b"'
        )
        assert output == "3"
 | 
	gpl-2.0 | 
| 
	gioman/QGIS | 
	python/plugins/processing/gui/MultipleInputDialog.py | 
	2 | 
	4243 | 
	# -*- coding: utf-8 -*-
"""
***************************************************************************
    MultipleInputDialog.py
    ---------------------
    Date                 : August 2012
    Copyright            : (C) 2012 by Victor Olaya
    Email                : volayaf at gmail dot com
***************************************************************************
*                                                                         *
*   This program is free software; you can redistribute it and/or modify  *
*   it under the terms of the GNU General Public License as published by  *
*   the Free Software Foundation; either version 2 of the License, or     *
*   (at your option) any later version.                                   *
*                                                                         *
***************************************************************************
"""
from builtins import range
from builtins import basestring
__author__ = 'Victor Olaya'
__date__ = 'August 2012'
__copyright__ = '(C) 2012, Victor Olaya'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
import os
from qgis.PyQt import uic
from qgis.PyQt.QtCore import Qt
from qgis.PyQt.QtWidgets import QDialog, QAbstractItemView, QPushButton, QDialogButtonBox
from qgis.PyQt.QtGui import QStandardItemModel, QStandardItem
pluginPath = os.path.split(os.path.dirname(__file__))[0]
WIDGET, BASE = uic.loadUiType(
    os.path.join(pluginPath, 'ui', 'DlgMultipleSelection.ui'))
class MultipleInputDialog(BASE, WIDGET):
    def __init__(self, options, selectedoptions=None):
        super(MultipleInputDialog, self).__init__(None)
        self.setupUi(self)
        self.lstLayers.setSelectionMode(QAbstractItemView.NoSelection)
        self.options = []
        for i, option in enumerate(options):
            if option is None or isinstance(option, basestring):
                self.options.append((i, option))
            else:
                self.options.append((option[0], option[1]))
        self.selectedoptions = selectedoptions or []
        # Additional buttons
        self.btnSelectAll = QPushButton(self.tr('Select all'))
        self.buttonBox.addButton(self.btnSelectAll,
                                 QDialogButtonBox.ActionRole)
        self.btnClearSelection = QPushButton(self.tr('Clear selection'))
        self.buttonBox.addButton(self.btnClearSelection,
                                 QDialogButtonBox.ActionRole)
        self.btnToggleSelection = QPushButton(self.tr('Toggle selection'))
        self.buttonBox.addButton(self.btnToggleSelection,
                                 QDialogButtonBox.ActionRole)
        self.btnSelectAll.clicked.connect(lambda: self.selectAll(True))
        self.btnClearSelection.clicked.connect(lambda: self.selectAll(False))
        self.btnToggleSelection.clicked.connect(self.toggleSelection)
        self.populateList()
    def populateList(self):
        model = QStandardItemModel()
        for value, text in self.options:
            item = QStandardItem(text)
            item.setData(value, Qt.UserRole)
            item.setCheckState(Qt.Checked if value in self.selectedoptions else Qt.Unchecked)
            item.setCheckable(True)
            model.appendRow(item)
        self.lstLayers.setModel(model)
    def accept(self):
        self.selectedoptions = []
        model = self.lstLayers.model()
        for i in range(model.rowCount()):
            item = model.item(i)
            if item.checkState() == Qt.Checked:
                self.selectedoptions.append(item.data(Qt.UserRole))
        QDialog.accept(self)
    def reject(self):
        self.selectedoptions = None
        QDialog.reject(self)
    def selectAll(self, value):
        model = self.lstLayers.model()
        for i in range(model.rowCount()):
            item = model.item(i)
            item.setCheckState(Qt.Checked if value else Qt.Unchecked)
    def toggleSelection(self):
        model = self.lstLayers.model()
        for i in range(model.rowCount()):
            item = model.item(i)
            checked = item.checkState() == Qt.Checked
            item.setCheckState(Qt.Unchecked if checked else Qt.Checked)
 | 
	gpl-2.0 | 
| 
	en0/Supybot_sasl | 
	plugins/String/config.py | 
	8 | 
	2799 | 
	###
# Copyright (c) 2003-2005, Jeremiah Fincher
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
#   * Redistributions of source code must retain the above copyright notice,
#     this list of conditions, and the following disclaimer.
#   * Redistributions in binary form must reproduce the above copyright notice,
#     this list of conditions, and the following disclaimer in the
#     documentation and/or other materials provided with the distribution.
#   * Neither the name of the author of this software nor the name of
#     contributors to this software may be used to endorse or promote products
#     derived from this software without specific prior written consent.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED.  IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
###
import supybot.conf as conf
import supybot.registry as registry
def configure(advanced):
    # This will be called by supybot to configure this module.  advanced is
    # a bool that specifies whether the user identified himself as an advanced
    # user or not.  You should effect your configuration by manipulating the
    # registry as appropriate.
    from supybot.questions import expect, anything, something, yn
    conf.registerPlugin('String', True)
String = conf.registerPlugin('String')
conf.registerGroup(String, 'levenshtein')
conf.registerGlobalValue(String.levenshtein, 'max',
    registry.PositiveInteger(256, """Determines the maximum size of a string
    given to the levenshtein command.  The levenshtein command uses an O(m*n)
    algorithm, which means that with strings of length 256, it can take 1.5
    seconds to finish; with strings of length 384, though, it can take 4
    seconds to finish, and with strings of much larger lengths, it takes more
    and more time.  Using nested commands, strings can get quite large, hence
    this variable, to limit the size of arguments passed to the levenshtein
    command."""))
# vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79:
 | 
	bsd-3-clause | 
| 
	Xeralux/tensorflow | 
	tensorflow/python/keras/_impl/keras/engine/training.py | 
	1 | 
	72917 | 
	# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Training-related part of the Keras engine.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.eager import context
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_util
from tensorflow.python.keras._impl.keras import backend as K
from tensorflow.python.keras._impl.keras import losses
from tensorflow.python.keras._impl.keras import metrics as metrics_module
from tensorflow.python.keras._impl.keras import optimizers
from tensorflow.python.keras._impl.keras.engine import training_arrays
from tensorflow.python.keras._impl.keras.engine import training_eager
from tensorflow.python.keras._impl.keras.engine import training_generator
from tensorflow.python.keras._impl.keras.engine import training_utils
from tensorflow.python.keras._impl.keras.engine.base_layer import Layer
from tensorflow.python.keras._impl.keras.engine.network import Network
from tensorflow.python.keras._impl.keras.utils.generic_utils import slice_arrays
from tensorflow.python.layers.base import _DeferredTensor
from tensorflow.python.ops import array_ops
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.training import optimizer as tf_optimizer_module
from tensorflow.python.util.tf_export import tf_export
@tf_export('keras.models.Model', 'keras.Model')
class Model(Network):
  """`Model` groups layers into an object with training and inference features.
  There are two ways to instantiate a `Model`:
  1 - With the "functional API", where you start from `Input`,
  you chain layer calls to specify the model's forward pass,
  and finally you create your model from inputs and outputs:
  ```python
  import tensorflow as tf
  inputs = tf.keras.Input(shape=(3,))
  x = tf.keras.layers.Dense(4, activation=tf.nn.relu)(inputs)
  outputs = tf.keras.layers.Dense(5, activation=tf.nn.softmax)(x)
  model = tf.keras.Model(inputs=inputs, outputs=outputs)
  ```
  2 - By subclassing the `Model` class: in that case, you should define your
  layers in `__init__` and you should implement the model's forward pass
  in `call`.
  ```python
  import tensorflow as tf
  class MyModel(tf.keras.Model):
    def __init__(self):
      self.dense1 = tf.keras.layers.Dense(4, activation=tf.nn.relu)
      self.dense2 = tf.keras.layers.Dense(5, activation=tf.nn.softmax)
    def call(self, inputs):
      x = self.dense1(inputs)
      return self.dense2(x)
  model = MyModel()
  ```
  If you subclass `Model`, you can optionally have
  a `training` argument (boolean) in `call`, which you can use to specify
  a different behavior in training and inference:
  ```python
  import tensorflow as tf
  class MyModel(tf.keras.Model):
    def __init__(self):
      self.dense1 = tf.keras.layers.Dense(4, activation=tf.nn.relu)
      self.dense2 = tf.keras.layers.Dense(5, activation=tf.nn.softmax)
      self.dropout = tf.keras.layers.Dropout(0.5)
    def call(self, inputs, training=False):
      x = self.dense1(inputs)
      if training:
        x = self.dropout(x, training=training)
      return self.dense2(x)
  model = MyModel()
  ```
  """
  def compile(self,
              optimizer,
              loss=None,
              metrics=None,
              loss_weights=None,
              sample_weight_mode=None,
              weighted_metrics=None,
              target_tensors=None,
              **kwargs):
    """Configures the model for training.
    Arguments:
        optimizer: String (name of optimizer) or optimizer instance.
            See [optimizers](/optimizers).
        loss: String (name of objective function) or objective function.
            See [losses](/losses).
            If the model has multiple outputs, you can use a different loss
            on each output by passing a dictionary or a list of losses.
            The loss value that will be minimized by the model
            will then be the sum of all individual losses.
        metrics: List of metrics to be evaluated by the model
            during training and testing.
            Typically you will use `metrics=['accuracy']`.
            To specify different metrics for different outputs of a
            multi-output model, you could also pass a dictionary,
            such as `metrics={'output_a': 'accuracy'}`.
        loss_weights: Optional list or dictionary specifying scalar
            coefficients (Python floats) to weight the loss contributions
            of different model outputs.
            The loss value that will be minimized by the model
            will then be the *weighted sum* of all individual losses,
            weighted by the `loss_weights` coefficients.
            If a list, it is expected to have a 1:1 mapping
            to the model's outputs. If a tensor, it is expected to map
            output names (strings) to scalar coefficients.
        sample_weight_mode: If you need to do timestep-wise
            sample weighting (2D weights), set this to `"temporal"`.
            `None` defaults to sample-wise weights (1D).
            If the model has multiple outputs, you can use a different
            `sample_weight_mode` on each output by passing a
            dictionary or a list of modes.
        weighted_metrics: List of metrics to be evaluated and weighted
            by sample_weight or class_weight during training and testing.
        target_tensors: By default, Keras will create placeholders for the
            model's target, which will be fed with the target data during
            training. If instead you would like to use your own
            target tensors (in turn, Keras will not expect external
            Numpy data for these targets at training time), you
            can specify them via the `target_tensors` argument. It can be
            a single tensor (for a single-output model), a list of tensors,
            or a dict mapping output names to target tensors.
        **kwargs: These arguments are passed to `tf.Session.run`.
    Raises:
        ValueError: In case of invalid arguments for
            `optimizer`, `loss`, `metrics` or `sample_weight_mode`.
    """
    loss = loss or {}
    if context.executing_eagerly() and not isinstance(
        optimizer, (tf_optimizer_module.Optimizer, optimizers.TFOptimizer)):
      raise ValueError('Only TF native optimizers are supported in Eager mode.')
    self.optimizer = optimizers.get(optimizer)
    self.loss = loss
    self.metrics = metrics or []
    self.loss_weights = loss_weights
    if context.executing_eagerly() and sample_weight_mode is not None:
      raise ValueError('sample_weight_mode is not supported in Eager mode.')
    self.sample_weight_mode = sample_weight_mode
    if context.executing_eagerly() and weighted_metrics is not None:
      raise ValueError('weighted_metrics is not supported in Eager mode.')
    self.weighted_metrics = weighted_metrics
    if context.executing_eagerly() and target_tensors is not None:
      raise ValueError('target_tensors is not supported in Eager mode.')
    self.target_tensors = target_tensors
    if not self.built:
      # Model is not compilable because it does not know its number of inputs
      # and outputs, nor their shapes and names. We will compile after the first
      # time the model gets called on training data.
      return
    self._is_compiled = True
    # Prepare loss functions.
    if isinstance(loss, dict):
      for name in loss:
        if name not in self.output_names:
          raise ValueError(
              'Unknown entry in loss '
              'dictionary: "' + name + '". '
              'Only expected the following keys: ' + str(self.output_names))
      loss_functions = []
      for name in self.output_names:
        if name not in loss:
          logging.warning(
              'Output "' + name + '" missing from loss dictionary. '
              'We assume this was done on purpose, '
              'and we will not be expecting '
              'any data to be passed to "' + name + '" during training.')
        loss_functions.append(losses.get(loss.get(name)))
    elif isinstance(loss, list):
      if len(loss) != len(self.outputs):
        raise ValueError('When passing a list as loss, '
                         'it should have one entry per model outputs. '
                         'The model has ' + str(len(self.outputs)) +
                         ' outputs, but you passed loss=' + str(loss))
      loss_functions = [losses.get(l) for l in loss]
    else:
      loss_function = losses.get(loss)
      loss_functions = [loss_function for _ in range(len(self.outputs))]
    self.loss_functions = loss_functions
    weighted_losses = [training_utils.weighted_masked_objective(fn)
                       for fn in loss_functions]
    skip_target_indices = []
    skip_target_weighing_indices = []
    self._feed_outputs = []
    self._feed_output_names = []
    self._feed_output_shapes = []
    self._feed_loss_fns = []
    for i in range(len(weighted_losses)):
      if weighted_losses[i] is None:
        skip_target_indices.append(i)
        skip_target_weighing_indices.append(i)
    # Prepare output masks.
    if not context.executing_eagerly():
      masks = self.compute_mask(self.inputs, mask=None)
      if masks is None:
        masks = [None for _ in self.outputs]
      if not isinstance(masks, list):
        masks = [masks]
    # Prepare loss weights.
    if loss_weights is None:
      loss_weights_list = [1. for _ in range(len(self.outputs))]
    elif isinstance(loss_weights, dict):
      for name in loss_weights:
        if name not in self.output_names:
          raise ValueError(
              'Unknown entry in loss_weights '
              'dictionary: "' + name + '". '
              'Only expected the following keys: ' + str(self.output_names))
      loss_weights_list = []
      for name in self.output_names:
        loss_weights_list.append(loss_weights.get(name, 1.))
    elif isinstance(loss_weights, list):
      if len(loss_weights) != len(self.outputs):
        raise ValueError(
            'When passing a list as loss_weights, '
            'it should have one entry per model output. '
            'The model has ' + str(len(self.outputs)) +
            ' outputs, but you passed loss_weights=' + str(loss_weights))
      loss_weights_list = loss_weights
    else:
      raise TypeError('Could not interpret loss_weights argument: ' +
                      str(loss_weights) + ' - expected a list of dicts.')
    self.loss_weights_list = loss_weights_list
    # initialization for Eager mode execution
    if context.executing_eagerly():
      if target_tensors is not None:
        raise ValueError('target_tensors are not currently supported in Eager '
                         'mode.')
      self.total_loss = None
      self.metrics_tensors = []
      self.metrics_names = ['loss']
      for i in range(len(self.outputs)):
        if len(self.outputs) > 1:
          self.metrics_names.append(self.output_names[i] + '_loss')
      self.nested_metrics = training_utils.collect_metrics(metrics,
                                                           self.output_names)
      self._feed_sample_weight_modes = []
      for i in range(len(self.outputs)):
        self._feed_sample_weight_modes.append(None)
      self.sample_weights = []
      self.targets = []
      for i in range(len(self.outputs)):
        self._feed_output_names.append(self.output_names[i])
      self._collected_trainable_weights = self.trainable_weights
      return
    # Prepare targets of model.
    self.targets = []
    self._feed_targets = []
    if target_tensors not in (None, []):
      if isinstance(target_tensors, list):
        if len(target_tensors) != len(self.outputs):
          raise ValueError(
              'When passing a list as `target_tensors`, '
              'it should have one entry per model output. '
              'The model has ' + str(len(self.outputs)) +
              ' outputs, but you passed target_tensors=' + str(target_tensors))
      elif isinstance(target_tensors, dict):
        for name in target_tensors:
          if name not in self.output_names:
            raise ValueError(
                'Unknown entry in `target_tensors` '
                'dictionary: "' + name + '". '
                'Only expected the following keys: ' + str(self.output_names))
        tmp_target_tensors = []
        for name in self.output_names:
          tmp_target_tensors.append(target_tensors.get(name, None))
        target_tensors = tmp_target_tensors
      else:
        raise TypeError('Expected `target_tensors` to be '
                        'a list or dict, but got:', target_tensors)
    for i in range(len(self.outputs)):
      if i in skip_target_indices:
        self.targets.append(None)
      else:
        shape = K.int_shape(self.outputs[i])
        name = self.output_names[i]
        if target_tensors not in (None, []):
          target = target_tensors[i]
        else:
          target = None
        if target is None or K.is_placeholder(target):
          if target is None:
            target = K.placeholder(
                ndim=len(shape),
                name=name + '_target',
                sparse=K.is_sparse(self.outputs[i]),
                dtype=K.dtype(self.outputs[i]))
          self._feed_targets.append(target)
          self._feed_outputs.append(self.outputs[i])
          self._feed_output_names.append(name)
          self._feed_output_shapes.append(shape)
          self._feed_loss_fns.append(self.loss_functions[i])
        else:
          skip_target_weighing_indices.append(i)
        self.targets.append(target)
    # Prepare sample weights.
    sample_weights = []
    sample_weight_modes = []
    if isinstance(sample_weight_mode, dict):
      for name in sample_weight_mode:
        if name not in self.output_names:
          raise ValueError(
              'Unknown entry in '
              'sample_weight_mode dictionary: "' + name + '". '
              'Only expected the following keys: ' + str(self.output_names))
      for i, name in enumerate(self.output_names):
        if i in skip_target_weighing_indices:
          weight = None
          sample_weight_modes.append(None)
        else:
          if name not in sample_weight_mode:
            raise ValueError(
                'Output "' + name + '" missing from sample_weight_modes '
                'dictionary')
          if sample_weight_mode.get(name) == 'temporal':
            weight = K.placeholder(ndim=2, name=name + '_sample_weights')
            sample_weight_modes.append('temporal')
          else:
            weight = K.placeholder(ndim=1, name=name + 'sample_weights')
            sample_weight_modes.append(None)
        sample_weights.append(weight)
    elif isinstance(sample_weight_mode, list):
      if len(sample_weight_mode) != len(self.outputs):
        raise ValueError('When passing a list as sample_weight_mode, '
                         'it should have one entry per model output. '
                         'The model has ' + str(len(self.outputs)) +
                         ' outputs, but you passed '
                         'sample_weight_mode=' + str(sample_weight_mode))
      for i in range(len(self.output_names)):
        if i in skip_target_weighing_indices:
          weight = None
          sample_weight_modes.append(None)
        else:
          mode = sample_weight_mode[i]
          name = self.output_names[i]
          if mode == 'temporal':
            weight = K.placeholder(ndim=2, name=name + '_sample_weights')
            sample_weight_modes.append('temporal')
          else:
            weight = K.placeholder(ndim=1, name=name + '_sample_weights')
            sample_weight_modes.append(None)
        sample_weights.append(weight)
    else:
      for i, name in enumerate(self.output_names):
        if i in skip_target_weighing_indices:
          sample_weight_modes.append(None)
          sample_weights.append(None)
        else:
          if sample_weight_mode == 'temporal':
            sample_weights.append(array_ops.placeholder_with_default(
                [[1.]], shape=[None, None], name=name + '_sample_weights'))
            sample_weight_modes.append('temporal')
          else:
            sample_weights.append(array_ops.placeholder_with_default(
                [1.], shape=[None], name=name + '_sample_weights'))
            sample_weight_modes.append(None)
    self.sample_weight_modes = sample_weight_modes
    self._feed_sample_weight_modes = []
    for i in range(len(self.outputs)):
      if i not in skip_target_weighing_indices:
        self._feed_sample_weight_modes.append(self.sample_weight_modes[i])
    # Prepare metrics.
    self.weighted_metrics = weighted_metrics
    self.metrics_names = ['loss']
    self.metrics_tensors = []
    # Compute total loss.
    total_loss = None
    with K.name_scope('loss'):
      for i in range(len(self.outputs)):
        if i in skip_target_indices:
          continue
        y_true = self.targets[i]
        y_pred = self.outputs[i]
        weighted_loss = weighted_losses[i]
        sample_weight = sample_weights[i]
        mask = masks[i]
        loss_weight = loss_weights_list[i]
        with K.name_scope(self.output_names[i] + '_loss'):
          output_loss = weighted_loss(y_true, y_pred, sample_weight, mask)
        if len(self.outputs) > 1:
          self.metrics_tensors.append(output_loss)
          self.metrics_names.append(self.output_names[i] + '_loss')
        if total_loss is None:
          total_loss = loss_weight * output_loss
        else:
          total_loss += loss_weight * output_loss
      if total_loss is None:
        if not self.losses:
          raise ValueError('The model cannot be compiled '
                           'because it has no loss to optimize.')
        else:
          total_loss = 0.
      # Add regularization penalties
      # and other layer-specific losses.
      for loss_tensor in self.losses:
        total_loss += loss_tensor
    # List of same size as output_names.
    # contains tuples (metrics for output, names of metrics).
    nested_metrics = training_utils.collect_metrics(metrics, self.output_names)
    nested_weighted_metrics = training_utils.collect_metrics(weighted_metrics,
                                                             self.output_names)
    self.metrics_updates = []
    self.stateful_metric_names = []
    with K.name_scope('metrics'):
      for i in range(len(self.outputs)):
        if i in skip_target_indices:
          continue
        y_true = self.targets[i]
        y_pred = self.outputs[i]
        weights = sample_weights[i]
        output_metrics = nested_metrics[i]
        output_weighted_metrics = nested_weighted_metrics[i]
        def handle_metrics(metrics, weights=None):
          metric_name_prefix = 'weighted_' if weights is not None else ''
          for metric in metrics:
            if metric in ('accuracy', 'acc', 'crossentropy', 'ce'):
              # custom handling of accuracy/crossentropy
              # (because of class mode duality)
              output_shape = self.outputs[i].get_shape().as_list()
              if (output_shape[-1] == 1 or
                  self.loss_functions[i] == losses.binary_crossentropy):
                # case: binary accuracy/crossentropy
                if metric in ('accuracy', 'acc'):
                  metric_fn = metrics_module.binary_accuracy
                elif metric in ('crossentropy', 'ce'):
                  metric_fn = metrics_module.binary_crossentropy
              elif self.loss_functions[
                  i] == losses.sparse_categorical_crossentropy:
                # case: categorical accuracy/crossentropy with sparse targets
                if metric in ('accuracy', 'acc'):
                  metric_fn = metrics_module.sparse_categorical_accuracy
                elif metric in ('crossentropy', 'ce'):
                  metric_fn = metrics_module.sparse_categorical_crossentropy
              else:
                # case: categorical accuracy/crossentropy
                if metric in ('accuracy', 'acc'):
                  metric_fn = metrics_module.categorical_accuracy
                elif metric in ('crossentropy', 'ce'):
                  metric_fn = metrics_module.categorical_crossentropy
              if metric in ('accuracy', 'acc'):
                suffix = 'acc'
              elif metric in ('crossentropy', 'ce'):
                suffix = 'ce'
              weighted_metric_fn = training_utils.weighted_masked_objective(
                  metric_fn)
              metric_name = metric_name_prefix + suffix
            else:
              metric_fn = metrics_module.get(metric)
              weighted_metric_fn = training_utils.weighted_masked_objective(
                  metric_fn)
              # Get metric name as string
              if hasattr(metric_fn, 'name'):
                metric_name = metric_fn.name
              else:
                metric_name = metric_fn.__name__
              metric_name = metric_name_prefix + metric_name
            with K.name_scope(metric_name):
              metric_result = weighted_metric_fn(
                  y_true, y_pred, weights=weights, mask=masks[i])
            # Append to self.metrics_names, self.metric_tensors,
            # self.stateful_metric_names
            if len(self.output_names) > 1:
              metric_name = '%s_%s' % (self.output_names[i], metric_name)
            # Dedupe name
            j = 1
            base_metric_name = metric_name
            while metric_name in self.metrics_names:
              metric_name = '%s_%d' % (base_metric_name, j)
              j += 1
            self.metrics_names.append(metric_name)
            self.metrics_tensors.append(metric_result)
            # Keep track of state updates created by
            # stateful metrics (i.e. metrics layers).
            if isinstance(metric_fn, Layer):
              self.stateful_metric_names.append(metric_name)
              self.metrics_updates += metric_fn.updates
        handle_metrics(output_metrics)
        handle_metrics(output_weighted_metrics, weights=weights)
    # Prepare gradient updates and state updates.
    self.total_loss = total_loss
    self.sample_weights = sample_weights
    self._feed_sample_weights = []
    for i in range(len(self.sample_weights)):
      if i not in skip_target_weighing_indices:
        self._feed_sample_weights.append(self.sample_weights[i])
    # Functions for train, test and predict will
    # be compiled lazily when required.
    # This saves time when the user is not using all functions.
    self._function_kwargs = kwargs
    self.train_function = None
    self.test_function = None
    self.predict_function = None
    # Collected trainable weights, sorted in topological order.
    trainable_weights = self.trainable_weights
    self._collected_trainable_weights = trainable_weights
  def _check_trainable_weights_consistency(self):
    """Check trainable weights count consistency.
    This will raise a warning if `trainable_weights` and
    `_collected_trainable_weights` are inconsistent (i.e. have different
    number of parameters).
    Inconsistency will typically arise when one modifies `model.trainable`
    without calling `model.compile` again.
    """
    if not hasattr(self, '_collected_trainable_weights'):
      return
    if len(self.trainable_weights) != len(self._collected_trainable_weights):
      logging.warning(
          UserWarning(
              'Discrepancy between trainable weights and collected trainable'
              ' weights, did you set `model.trainable` without calling'
              ' `model.compile` after ?'))
  def _make_train_function(self):
    if not hasattr(self, 'train_function'):
      raise RuntimeError('You must compile your model before using it.')
    self._check_trainable_weights_consistency()
    if self.train_function is None:
      inputs = (self._feed_inputs +
                self._feed_targets +
                self._feed_sample_weights)
      if self.uses_learning_phase and not isinstance(K.learning_phase(), int):
        inputs += [K.learning_phase()]
      with K.name_scope('training'):
        with K.name_scope(self.optimizer.__class__.__name__):
          # Training updates
          updates = self.optimizer.get_updates(
              params=self._collected_trainable_weights, loss=self.total_loss)
        # Unconditional updates
        updates += self.get_updates_for(None)
        # Conditional updates relevant to this model
        updates += self.get_updates_for(self._feed_inputs)
        # Stateful metrics updates
        updates += self.metrics_updates
        # Gets loss and metrics. Updates weights at each call.
        self.train_function = K.function(
            inputs, [self.total_loss] + self.metrics_tensors,
            updates=updates,
            name='train_function',
            **self._function_kwargs)
  def _make_test_function(self):
    if not hasattr(self, 'test_function'):
      raise RuntimeError('You must compile your model before using it.')
    if self.test_function is None:
      inputs = (self._feed_inputs +
                self._feed_targets +
                self._feed_sample_weights)
      if self.uses_learning_phase and not isinstance(K.learning_phase(), int):
        inputs += [K.learning_phase()]
      # Return loss and metrics, no gradient updates.
      # Does update the network states.
      self.test_function = K.function(
          inputs, [self.total_loss] + self.metrics_tensors,
          updates=self.state_updates + self.metrics_updates,
          name='test_function',
          **self._function_kwargs)
  def _make_predict_function(self):
    if not hasattr(self, 'predict_function'):
      self.predict_function = None
    if self.predict_function is None:
      if self.uses_learning_phase and not isinstance(K.learning_phase(), int):
        inputs = self._feed_inputs + [K.learning_phase()]
      else:
        inputs = self._feed_inputs
      # Gets network outputs. Does not update weights.
      # Does update the network states.
      kwargs = getattr(self, '_function_kwargs', {})
      self.predict_function = K.function(
          inputs,
          self.outputs,
          updates=self.state_updates,
          name='predict_function',
          **kwargs)
  def _standardize_user_data(self,
                             x,
                             y=None,
                             sample_weight=None,
                             class_weight=None,
                             batch_size=None):
    """Runs validation checks on input and target data passed by the user.
    Also standardizes the data to lists of arrays, in order.
    Also builds and compiles the model on the fly if it is a subclassed model
    that has never been called before (and thus has no inputs/outputs).
    This is a purely internal method, subject to refactoring at any time.
    Args:
      x: An array or list of arrays, to be used as input data. If the model
       has known, named inputs, this could also be a dict mapping input names
       to the corresponding array.
      y: An array or list of arrays, to be used as target data. If the model
       has known, named outputs, this could also be a dict mapping output names
       to the corresponding array.
      sample_weight: An optional sample-weight array passed by the user to
        weight the importance of each sample in `x`.
      class_weight: An optional class-weight array by the user to
        weight the importance of samples in `x` based on the class they belong
        to, as conveyed by `y`.
      batch_size: Integer batch size. If provided, it is used to run additional
        validation checks on stateful models.
    Returns:
      A tuple of 3 lists: input arrays, target arrays, sample-weight arrays.
      If the model's input and targets are symbolic, these lists are empty
      (since the model takes no user-provided data, instead the data comes
      from the symbolic inputs/targets).
    Raises:
      ValueError: In case of invalid user-provided data.
      RuntimeError: If the model was never compiled.
    """
    # First, we build/compile the model on the fly if necessary.
    all_inputs = []
    if not self.built:
      # We need to use `x` to set the model inputs.
      # We type-check that `x` and `y` are either single arrays
      # or lists of arrays.
      if isinstance(x, (list, tuple)):
        if not all(isinstance(v, np.ndarray) or
                   tensor_util.is_tensor(v) for v in x):
          raise ValueError('Please provide as model inputs either a single '
                           'array or a list of arrays. You passed: x=' + str(x))
        all_inputs += list(x)
      elif isinstance(x, dict):
        raise ValueError('Please do not pass a dictionary as model inputs.')
      else:
        if not isinstance(x, np.ndarray) and not tensor_util.is_tensor(x):
          raise ValueError('Please provide as model inputs either a single '
                           'array or a list of arrays. You passed: x=' + str(x))
        all_inputs.append(x)
      # Build the model using the retrieved inputs (value or symbolic).
      # If values, then in symbolic-mode placeholders will be created
      # to match the value shapes.
      if not self.inputs:
        self._set_inputs(x)
    if y is not None:
      if not self.optimizer:
        raise RuntimeError('You must compile a model before '
                           'training/testing. '
                           'Use `model.compile(optimizer, loss)`.')
      if not self._is_compiled:
        # On-the-fly compilation of the model.
        # We need to use `y` to set the model targets.
        if isinstance(y, (list, tuple)):
          if not all(isinstance(v, np.ndarray) or
                     tensor_util.is_tensor(v) for v in y):
            raise ValueError('Please provide as model targets either a single '
                             'array or a list of arrays. '
                             'You passed: y=' + str(y))
        elif isinstance(y, dict):
          raise ValueError('Please do not pass a dictionary as model targets.')
        else:
          if not isinstance(y, np.ndarray) and not tensor_util.is_tensor(y):
            raise ValueError('Please provide as model targets either a single '
                             'array or a list of arrays. '
                             'You passed: y=' + str(y))
        # Typecheck that all inputs are *either* value *or* symbolic.
        # TODO(fchollet): this check could be removed in Eager mode?
        if y is not None:
          if isinstance(y, (list, tuple)):
            all_inputs += list(y)
          else:
            all_inputs.append(y)
        if any(tensor_util.is_tensor(v) for v in all_inputs):
          if not all(tensor_util.is_tensor(v) for v in all_inputs):
            raise ValueError('Do not pass inputs that mix Numpy arrays and '
                             'TensorFlow tensors. '
                             'You passed: x=' + str(x) + '; y=' + str(y))
        if context.executing_eagerly():
          target_tensors = None
        else:
          # Handle target tensors if any passed.
          if not isinstance(y, (list, tuple)):
            y = [y]
          target_tensors = [v for v in y if tensor_util.is_tensor(v)]
        self.compile(optimizer=self.optimizer,
                     loss=self.loss,
                     metrics=self.metrics,
                     loss_weights=self.loss_weights,
                     target_tensors=target_tensors)
    # If `x` and `y` were all symbolic, then no model should not be fed any
    # inputs and targets.
    # Note: in this case, `any` and `all` are equivalent since we disallow
    # mixed symbolic/value inputs.
    if any(tensor_util.is_tensor(v) for v in all_inputs):
      return [], [], []
    # What follows is input validation and standardization to list format,
    # in the case where all inputs are value arrays.
    if context.executing_eagerly():
      # In eager mode, do not do shape validation.
      feed_input_names = self.input_names
      feed_input_shapes = None
    elif not self._is_graph_network:
      # Case: symbolic-mode subclassed network. Do not do shape validation.
      feed_input_names = self._feed_input_names
      feed_input_shapes = None
    else:
      # Case: symbolic-mode graph network.
      # In this case, we run extensive shape validation checks.
      feed_input_names = self._feed_input_names
      feed_input_shapes = self._feed_input_shapes
    # Standardize the inputs.
    x = training_utils.standardize_input_data(
        x,
        feed_input_names,
        feed_input_shapes,
        check_batch_axis=False,  # Don't enforce the batch size.
        exception_prefix='input')
    if y is not None:
      if context.executing_eagerly():
        feed_output_names = self.output_names
        feed_output_shapes = None
        # Sample weighting not supported in this case.
        # TODO(fchollet): consider supporting it.
        feed_sample_weight_modes = [None for _ in self.outputs]
      elif not self._is_graph_network:
        feed_output_names = self._feed_output_names
        feed_output_shapes = None
        # Sample weighting not supported in this case.
        # TODO(fchollet): consider supporting it.
        feed_sample_weight_modes = [None for _ in self.outputs]
      else:
        feed_output_names = self._feed_output_names
        feed_sample_weight_modes = self._feed_sample_weight_modes
        feed_output_shapes = []
        for output_shape, loss_fn in zip(self._feed_output_shapes,
                                         self._feed_loss_fns):
          if loss_fn is losses.sparse_categorical_crossentropy:
            feed_output_shapes.append(output_shape[:-1] + (1,))
          elif (not hasattr(loss_fn, '__name__') or
                getattr(losses, loss_fn.__name__, None) is None):
            # If `loss_fn` is not a function (e.g. callable class)
            # or if it not in the `losses` module, then
            # it is a user-defined loss and we make no assumptions
            # about it.
            feed_output_shapes.append(None)
          else:
            feed_output_shapes.append(output_shape)
      # Standardize the outputs.
      y = training_utils.standardize_input_data(
          y,
          feed_output_names,
          feed_output_shapes,
          check_batch_axis=False,  # Don't enforce the batch size.
          exception_prefix='target')
      # Generate sample-wise weight values given the `sample_weight` and
      # `class_weight` arguments.
      sample_weights = training_utils.standardize_sample_weights(
          sample_weight, feed_output_names)
      class_weights = training_utils.standardize_class_weights(
          class_weight, feed_output_names)
      sample_weights = [
          training_utils.standardize_weights(ref, sw, cw, mode)
          for (ref, sw, cw, mode) in zip(y, sample_weights, class_weights,
                                         feed_sample_weight_modes)
      ]
      # Check that all arrays have the same length.
      training_utils.check_array_lengths(x, y, sample_weights)
      if self._is_graph_network and not context.executing_eagerly():
        # Additional checks to avoid users mistakenly using improper loss fns.
        training_utils.check_loss_and_target_compatibility(
            y, self._feed_loss_fns, feed_output_shapes)
    else:
      y = []
      sample_weights = []
    if self.stateful and batch_size:
      # Check that for stateful networks, number of samples is a multiple
      # of the static batch size.
      if x[0].shape[0] % batch_size != 0:
        raise ValueError('In a stateful network, '
                         'you should only pass inputs with '
                         'a number of samples that can be '
                         'divided by the batch size. Found: ' +
                         str(x[0].shape[0]) + ' samples')
    return x, y, sample_weights
  def _set_inputs(self, inputs, training=None):
    """Set model's input and output specs based on the input data received.
    This is to be used for Model subclasses, which do not know at instantiation
    time what their inputs look like.
    Args:
      inputs: Single array, or list of arrays. The arrays could be placeholders,
        Numpy arrays, or data tensors.
        - if placeholders: the model is built on top of these placeholders,
          and we expect Numpy data to be fed for them when calling `fit`/etc.
        - if Numpy data: we create placeholders matching the shape of the Numpy
          arrays. We expect Numpy data to be fed for these placeholders
          when calling `fit`/etc.
        - if data tensors: the model is built on top of these tensors.
          We do not expect any Numpy data to be provided when calling `fit`/etc.
      training: Boolean or None. Only relevant in symbolic mode. Specifies
        whether to build the model's graph in inference mode (False), training
        mode (True), or using the Keras learning phase (None).
    """
    if self.__class__.__name__ == 'Sequential':
      # Note: we can't test whether the model is `Sequential` via `isinstance`
      # since `Sequential` depends on `Model`.
      if isinstance(inputs, list):
        assert len(inputs) == 1
        inputs = inputs[0]
      self.build(input_shape=(None,) + inputs.shape[1:])
    elif context.executing_eagerly():
      self._eager_set_inputs(inputs)
    else:
      self._symbolic_set_inputs(inputs, training=training)
  def _set_scope(self, scope=None):
    """Modify the Layer scope creation logic to create ResourceVariables."""
    super(Model, self)._set_scope(scope=scope)
    # Subclassed Models create ResourceVariables by default. This makes it
    # easier to use Models in an eager/graph agnostic way (since eager execution
    # always uses ResourceVariables).
    if not self._is_graph_network:
      self._scope.set_use_resource(True)
  def _eager_set_inputs(self, inputs):
    """Set model's input and output specs based on the input data received.
    This is to be used for Model subclasses, which do not know at instantiation
    time what their inputs look like.
    We assume the number and ndim of outputs
    does not change over different calls.
    Args:
      inputs: Argument `x` (input data) passed by the user upon first model use.
    Raises:
      ValueError: If the model's inputs are already set.
    """
    assert context.executing_eagerly()
    if self.inputs:
      raise ValueError('Model inputs are already set.')
    # On-the-fly setting of model inputs/outputs as DeferredTensors,
    # to keep track of number of inputs and outputs and their ndim.
    if isinstance(inputs, (list, tuple)):
      dummy_output_values = self.call(
          [ops.convert_to_tensor(v, dtype=K.floatx()) for v in inputs])
      dummy_input_values = list(inputs)
    else:
      dummy_output_values = self.call(
          ops.convert_to_tensor(inputs, dtype=K.floatx()))
      dummy_input_values = [inputs]
    if isinstance(dummy_output_values, (list, tuple)):
      dummy_output_values = list(dummy_output_values)
    else:
      dummy_output_values = [dummy_output_values]
    self.outputs = [
        _DeferredTensor(shape=(None for _ in v.shape),
                        dtype=v.dtype) for v in dummy_output_values]
    self.inputs = [
        _DeferredTensor(shape=(None for _ in v.shape),
                        dtype=v.dtype) for v in dummy_input_values]
    self.input_names = [
        'input_%d' % (i + 1) for i in range(len(dummy_input_values))]
    self.output_names = [
        'output_%d' % (i + 1) for i in range(len(dummy_output_values))]
    self.built = True
  def _symbolic_set_inputs(self, inputs, outputs=None, training=None):
    """Set model's inputs and output specs based.
    This is to be used for Model subclasses, which do not know at instantiation
    time what their inputs look like.
    Args:
      inputs: Argument `x` (input data) passed by the user upon first model use.
      outputs: None, a data tensor, or a list of data tensors. If None, the
        outputs will be determined by invoking self.call(), otherwise the
        provided value will be used.
      training: Boolean or None. Only relevant in symbolic mode. Specifies
        whether to build the model's graph in inference mode (False), training
        mode (True), or using the Keras learning phase (None).
    Raises:
      ValueError: If the model's inputs are already set.
    """
    assert not context.executing_eagerly()
    if self.inputs:
      raise ValueError('Model inputs are already set.')
    # On-the-fly setting of symbolic model inputs (either by using the tensor
    # provided, or by creating a placeholder if Numpy data was provided).
    self.inputs = []
    self.input_names = []
    self._feed_inputs = []
    self._feed_input_names = []
    self._feed_input_shapes = []
    if isinstance(inputs, (list, tuple)):
      inputs = list(inputs)
    else:
      inputs = [inputs]
    for i, v in enumerate(inputs):
      name = 'input_%d' % (i + 1)
      self.input_names.append(name)
      if isinstance(v, list):
        v = np.asarray(v)
        if v.ndim == 1:
          v = np.expand_dims(v, 1)
      if isinstance(v, (np.ndarray)):
        # We fix the placeholder shape except the batch size.
        # This is suboptimal, but it is the best we can do with the info
        # we have. The user should call `model._set_inputs(placeholders)`
        # to specify custom placeholders if the need arises.
        shape = (None,) + v.shape[1:]
        placeholder = K.placeholder(shape=shape, name=name)
        self.inputs.append(placeholder)
        self._feed_inputs.append(placeholder)
        self._feed_input_names.append(name)
        self._feed_input_shapes.append(shape)
      else:
        # Assumed tensor - TODO(fchollet) additional type check?
        self.inputs.append(v)
        if K.is_placeholder(v):
          self._feed_inputs.append(v)
          self._feed_input_names.append(name)
          self._feed_input_shapes.append(K.int_shape(v))
    if outputs is None:
      # Obtain symbolic outputs by calling the model.
      if len(self.inputs) == 1:
        if self._expects_training_arg:
          outputs = self.call(self.inputs[0], training=training)
        else:
          outputs = self.call(self.inputs[0])
      else:
        if self._expects_training_arg:
          outputs = self.call(self.inputs, training=training)
        else:
          outputs = self.call(self.inputs)
    if isinstance(outputs, (list, tuple)):
      outputs = list(outputs)
    else:
      outputs = [outputs]
    self.outputs = outputs
    self.output_names = [
        'output_%d' % (i + 1) for i in range(len(self.outputs))]
    self.built = True
  def fit(self,
          x=None,
          y=None,
          batch_size=None,
          epochs=1,
          verbose=1,
          callbacks=None,
          validation_split=0.,
          validation_data=None,
          shuffle=True,
          class_weight=None,
          sample_weight=None,
          initial_epoch=0,
          steps_per_epoch=None,
          validation_steps=None,
          **kwargs):
    """Trains the model for a fixed number of epochs (iterations on a dataset).
    Arguments:
        x: Numpy array of training data (if the model has a single input),
            or list of Numpy arrays (if the model has multiple inputs).
            If input layers in the model are named, you can also pass a
            dictionary mapping input names to Numpy arrays.
            `x` can be `None` (default) if feeding from
            TensorFlow data tensors.
        y: Numpy array of target (label) data
            (if the model has a single output),
            or list of Numpy arrays (if the model has multiple outputs).
            If output layers in the model are named, you can also pass a
            dictionary mapping output names to Numpy arrays.
            `y` can be `None` (default) if feeding from
            TensorFlow data tensors.
        batch_size: Integer or `None`.
            Number of samples per gradient update.
            If unspecified, `batch_size` will default to 32.
        epochs: Integer. Number of epochs to train the model.
            An epoch is an iteration over the entire `x` and `y`
            data provided.
            Note that in conjunction with `initial_epoch`,
            `epochs` is to be understood as "final epoch".
            The model is not trained for a number of iterations
            given by `epochs`, but merely until the epoch
            of index `epochs` is reached.
        verbose: Integer. 0, 1, or 2. Verbosity mode.
            0 = silent, 1 = progress bar, 2 = one line per epoch.
        callbacks: List of `keras.callbacks.Callback` instances.
            List of callbacks to apply during training.
            See [callbacks](/callbacks).
        validation_split: Float between 0 and 1.
            Fraction of the training data to be used as validation data.
            The model will set apart this fraction of the training data,
            will not train on it, and will evaluate
            the loss and any model metrics
            on this data at the end of each epoch.
            The validation data is selected from the last samples
            in the `x` and `y` data provided, before shuffling.
        validation_data: tuple `(x_val, y_val)` or tuple
            `(x_val, y_val, val_sample_weights)` on which to evaluate
            the loss and any model metrics at the end of each epoch.
            The model will not be trained on this data.
            `validation_data` will override `validation_split`.
        shuffle: Boolean (whether to shuffle the training data
            before each epoch) or str (for 'batch').
            'batch' is a special option for dealing with the
            limitations of HDF5 data; it shuffles in batch-sized chunks.
            Has no effect when `steps_per_epoch` is not `None`.
        class_weight: Optional dictionary mapping class indices (integers)
            to a weight (float) value, used for weighting the loss function
            (during training only).
            This can be useful to tell the model to
            "pay more attention" to samples from
            an under-represented class.
        sample_weight: Optional Numpy array of weights for
            the training samples, used for weighting the loss function
            (during training only). You can either pass a flat (1D)
            Numpy array with the same length as the input samples
            (1:1 mapping between weights and samples),
            or in the case of temporal data,
            you can pass a 2D array with shape
            `(samples, sequence_length)`,
            to apply a different weight to every timestep of every sample.
            In this case you should make sure to specify
            `sample_weight_mode="temporal"` in `compile()`.
        initial_epoch: Integer.
            Epoch at which to start training
            (useful for resuming a previous training run).
        steps_per_epoch: Integer or `None`.
            Total number of steps (batches of samples)
            before declaring one epoch finished and starting the
            next epoch. When training with input tensors such as
            TensorFlow data tensors, the default `None` is equal to
            the number of samples in your dataset divided by
            the batch size, or 1 if that cannot be determined.
        validation_steps: Only relevant if `steps_per_epoch`
            is specified. Total number of steps (batches of samples)
            to validate before stopping.
        **kwargs: Used for backwards compatibility.
    Returns:
        A `History` object. Its `History.history` attribute is
        a record of training loss values and metrics values
        at successive epochs, as well as validation loss values
        and validation metrics values (if applicable).
    Raises:
        RuntimeError: If the model was never compiled.
        ValueError: In case of mismatch between the provided input data
            and what the model expects.
    """
    # TODO(fchollet): this method may be creating reference cycles, which would
    # lead to accumulating garbage in memory when called in a loop. Investigate.
    # Backwards compatibility
    if batch_size is None and steps_per_epoch is None:
      batch_size = 32
    # Legacy support
    if 'nb_epoch' in kwargs:
      logging.warning(
          'The `nb_epoch` argument in `fit` '
          'has been renamed `epochs`.')
      epochs = kwargs.pop('nb_epoch')
    if kwargs:
      raise TypeError('Unrecognized keyword arguments: ' + str(kwargs))
    if x is None and y is None and steps_per_epoch is None:
      raise ValueError('If fitting from data tensors, '
                       'you should specify the `steps_per_epoch` '
                       'argument.')
    # Validate user data.
    x, y, sample_weights = self._standardize_user_data(
        x,
        y,
        sample_weight=sample_weight,
        class_weight=class_weight,
        batch_size=batch_size)
    # Prepare validation data.
    if validation_data:
      if len(validation_data) == 2:
        val_x, val_y = validation_data  # pylint: disable=unpacking-non-sequence
        val_sample_weight = None
      elif len(validation_data) == 3:
        val_x, val_y, val_sample_weight = validation_data  # pylint: disable=unpacking-non-sequence
      else:
        raise ValueError(
            'When passing validation_data, '
            'it must contain 2 (x_val, y_val) '
            'or 3 (x_val, y_val, val_sample_weights) '
            'items, however it contains %d items' % len(validation_data))
      val_x, val_y, val_sample_weights = self._standardize_user_data(
          val_x,
          val_y,
          sample_weight=val_sample_weight,
          batch_size=batch_size)
    elif validation_split and 0. < validation_split < 1.:
      if hasattr(x[0], 'shape'):
        split_at = int(x[0].shape[0] * (1. - validation_split))
      else:
        split_at = int(len(x[0]) * (1. - validation_split))
      x, val_x = (slice_arrays(x, 0, split_at), slice_arrays(x, split_at))
      y, val_y = (slice_arrays(y, 0, split_at), slice_arrays(y, split_at))
      sample_weights, val_sample_weights = (slice_arrays(
          sample_weights, 0, split_at), slice_arrays(sample_weights, split_at))
    elif validation_steps:
      val_x = []
      val_y = []
      val_sample_weights = []
    else:
      val_x = None
      val_y = None
      val_sample_weights = None
    if context.executing_eagerly():
      return training_eager.fit_loop(
          self,
          inputs=x,
          targets=y,
          sample_weights=sample_weights,
          batch_size=batch_size,
          epochs=epochs,
          verbose=verbose,
          callbacks=callbacks,
          val_inputs=val_x,
          val_targets=val_y,
          val_sample_weights=val_sample_weights,
          shuffle=shuffle,
          initial_epoch=initial_epoch,
          steps_per_epoch=steps_per_epoch,
          validation_steps=validation_steps)
    else:
      return training_arrays.fit_loop(
          self, x, y,
          sample_weights=sample_weights,
          batch_size=batch_size,
          epochs=epochs,
          verbose=verbose,
          callbacks=callbacks,
          val_inputs=val_x,
          val_targets=val_y,
          val_sample_weights=val_sample_weights,
          shuffle=shuffle,
          initial_epoch=initial_epoch,
          steps_per_epoch=steps_per_epoch,
          validation_steps=validation_steps)
  def evaluate(self,
               x=None,
               y=None,
               batch_size=None,
               verbose=1,
               sample_weight=None,
               steps=None):
    """Returns the loss value & metrics values for the model in test mode.
    Computation is done in batches.
    Arguments:
        x: Numpy array of test data (if the model has a single input),
            or list of Numpy arrays (if the model has multiple inputs).
            If input layers in the model are named, you can also pass a
            dictionary mapping input names to Numpy arrays.
            `x` can be `None` (default) if feeding from
            TensorFlow data tensors.
        y: Numpy array of target (label) data
            (if the model has a single output),
            or list of Numpy arrays (if the model has multiple outputs).
            If output layers in the model are named, you can also pass a
            dictionary mapping output names to Numpy arrays.
            `y` can be `None` (default) if feeding from
            TensorFlow data tensors.
        batch_size: Integer or `None`.
            Number of samples per evaluation step.
            If unspecified, `batch_size` will default to 32.
        verbose: 0 or 1. Verbosity mode.
            0 = silent, 1 = progress bar.
        sample_weight: Optional Numpy array of weights for
            the test samples, used for weighting the loss function.
            You can either pass a flat (1D)
            Numpy array with the same length as the input samples
            (1:1 mapping between weights and samples),
            or in the case of temporal data,
            you can pass a 2D array with shape
            `(samples, sequence_length)`,
            to apply a different weight to every timestep of every sample.
            In this case you should make sure to specify
            `sample_weight_mode="temporal"` in `compile()`.
        steps: Integer or `None`.
            Total number of steps (batches of samples)
            before declaring the evaluation round finished.
            Ignored with the default value of `None`.
    Returns:
        Scalar test loss (if the model has a single output and no metrics)
        or list of scalars (if the model has multiple outputs
        and/or metrics). The attribute `model.metrics_names` will give you
        the display labels for the scalar outputs.
    Raises:
        ValueError: in case of invalid arguments.
    """
    # Backwards compatibility.
    if batch_size is None and steps is None:
      batch_size = 32
    if x is None and y is None and steps is None:
      raise ValueError('If evaluating from data tensors, '
                       'you should specify the `steps` '
                       'argument.')
    # Validate user data.
    x, y, sample_weights = self._standardize_user_data(
        x,
        y,
        sample_weight=sample_weight,
        batch_size=batch_size)
    if context.executing_eagerly():
      return training_eager.test_loop(
          self, inputs=x, targets=y, sample_weights=sample_weights,
          batch_size=batch_size, verbose=verbose, steps=steps)
    else:
      return training_arrays.test_loop(
          self, inputs=x, targets=y, sample_weights=sample_weights,
          batch_size=batch_size, verbose=verbose, steps=steps)
  def predict(self, x, batch_size=None, verbose=0, steps=None):
    """Generates output predictions for the input samples.
    Computation is done in batches.
    Arguments:
        x: The input data, as a Numpy array
            (or list of Numpy arrays if the model has multiple outputs).
        batch_size: Integer. If unspecified, it will default to 32.
        verbose: Verbosity mode, 0 or 1.
        steps: Total number of steps (batches of samples)
            before declaring the prediction round finished.
            Ignored with the default value of `None`.
    Returns:
        Numpy array(s) of predictions.
    Raises:
        ValueError: In case of mismatch between the provided
            input data and the model's expectations,
            or in case a stateful model receives a number of samples
            that is not a multiple of the batch size.
    """
    # Backwards compatibility.
    if batch_size is None and steps is None:
      batch_size = 32
    if x is None and steps is None:
      raise ValueError('If predicting from data tensors, '
                       'you should specify the `steps` '
                       'argument.')
    x, _, _ = self._standardize_user_data(x)
    if context.executing_eagerly():
      return training_eager.predict_loop(
          self, x, batch_size=batch_size, verbose=verbose, steps=steps)
    else:
      return training_arrays.predict_loop(
          self, x, batch_size=batch_size, verbose=verbose, steps=steps)
  def train_on_batch(self, x, y, sample_weight=None, class_weight=None):
    """Runs a single gradient update on a single batch of data.
    Arguments:
        x: Numpy array of training data,
            or list of Numpy arrays if the model has multiple inputs.
            If all inputs in the model are named,
            you can also pass a dictionary
            mapping input names to Numpy arrays.
        y: Numpy array of target data,
            or list of Numpy arrays if the model has multiple outputs.
            If all outputs in the model are named,
            you can also pass a dictionary
            mapping output names to Numpy arrays.
        sample_weight: Optional array of the same length as x, containing
            weights to apply to the model's loss for each sample.
            In the case of temporal data, you can pass a 2D array
            with shape (samples, sequence_length),
            to apply a different weight to every timestep of every sample.
            In this case you should make sure to specify
            sample_weight_mode="temporal" in compile().
        class_weight: Optional dictionary mapping
            class indices (integers) to
            a weight (float) to apply to the model's loss for the samples
            from this class during training.
            This can be useful to tell the model to "pay more attention" to
            samples from an under-represented class.
    Returns:
        Scalar training loss
        (if the model has a single output and no metrics)
        or list of scalars (if the model has multiple outputs
        and/or metrics). The attribute `model.metrics_names` will give you
        the display labels for the scalar outputs.
    Raises:
      ValueError: In case of invalid user-provided arguments.
    """
    x, y, sample_weights = self._standardize_user_data(
        x,
        y,
        sample_weight=sample_weight,
        class_weight=class_weight)
    if context.executing_eagerly():
      outputs = training_eager.train_on_batch(
          self, x, y, sample_weights=sample_weights)
    else:
      if self.uses_learning_phase and not isinstance(K.learning_phase(), int):
        ins = x + y + sample_weights + [1]
      else:
        ins = x + y + sample_weights
      self._make_train_function()
      outputs = self.train_function(ins)
    if len(outputs) == 1:
      return outputs[0]
    return outputs
  def test_on_batch(self, x, y, sample_weight=None):
    """Test the model on a single batch of samples.
    Arguments:
        x: Numpy array of test data,
            or list of Numpy arrays if the model has multiple inputs.
            If all inputs in the model are named,
            you can also pass a dictionary
            mapping input names to Numpy arrays.
        y: Numpy array of target data,
            or list of Numpy arrays if the model has multiple outputs.
            If all outputs in the model are named,
            you can also pass a dictionary
            mapping output names to Numpy arrays.
        sample_weight: Optional array of the same length as x, containing
            weights to apply to the model's loss for each sample.
            In the case of temporal data, you can pass a 2D array
            with shape (samples, sequence_length),
            to apply a different weight to every timestep of every sample.
            In this case you should make sure to specify
            sample_weight_mode="temporal" in compile().
    Returns:
        Scalar test loss (if the model has a single output and no metrics)
        or list of scalars (if the model has multiple outputs
        and/or metrics). The attribute `model.metrics_names` will give you
        the display labels for the scalar outputs.
    Raises:
        ValueError: In case of invalid user-provided arguments.
    """
    x, y, sample_weights = self._standardize_user_data(
        x, y, sample_weight=sample_weight)
    if context.executing_eagerly():
      outputs = training_eager.test_on_batch(
          self, x, y, sample_weights=sample_weights)
    else:
      if self.uses_learning_phase and not isinstance(K.learning_phase(), int):
        ins = x + y + sample_weights + [0]
      else:
        ins = x + y + sample_weights
      self._make_test_function()
      outputs = self.test_function(ins)
    if len(outputs) == 1:
      return outputs[0]
    return outputs
  def predict_on_batch(self, x):
    """Returns predictions for a single batch of samples.
    Arguments:
        x: Input samples, as a Numpy array.
    Returns:
        Numpy array(s) of predictions.
    """
    x, _, _ = self._standardize_user_data(x)
    if context.executing_eagerly():
      inputs = [ops.convert_to_tensor(val, dtype=K.floatx()) for val in x]
      return self(inputs)  # pylint: disable=not-callable
    if not context.executing_eagerly():
      if self.uses_learning_phase and not isinstance(K.learning_phase(), int):
        ins = x + [0]
      else:
        ins = x
      self._make_predict_function()
      outputs = self.predict_function(ins)
      if len(outputs) == 1:
        return outputs[0]
      return outputs
  def fit_generator(self,
                    generator,
                    steps_per_epoch=None,
                    epochs=1,
                    verbose=1,
                    callbacks=None,
                    validation_data=None,
                    validation_steps=None,
                    class_weight=None,
                    max_queue_size=10,
                    workers=1,
                    use_multiprocessing=False,
                    shuffle=True,
                    initial_epoch=0):
    """Fits the model on data yielded batch-by-batch by a Python generator.
    The generator is run in parallel to the model, for efficiency.
    For instance, this allows you to do real-time data augmentation
    on images on CPU in parallel to training your model on GPU.
    The use of `keras.utils.Sequence` guarantees the ordering
    and guarantees the single use of every input per epoch when
    using `use_multiprocessing=True`.
    Arguments:
        generator: A generator or an instance of `Sequence`
          (`keras.utils.Sequence`)
            object in order to avoid duplicate data
            when using multiprocessing.
            The output of the generator must be either
            - a tuple `(inputs, targets)`
            - a tuple `(inputs, targets, sample_weights)`.
            This tuple (a single output of the generator) makes a single batch.
            Therefore, all arrays in this tuple must have the same length (equal
            to the size of this batch). Different batches may have different
              sizes.
            For example, the last batch of the epoch is commonly smaller than
              the
            others, if the size of the dataset is not divisible by the batch
              size.
            The generator is expected to loop over its data
            indefinitely. An epoch finishes when `steps_per_epoch`
            batches have been seen by the model.
        steps_per_epoch: Total number of steps (batches of samples)
            to yield from `generator` before declaring one epoch
            finished and starting the next epoch. It should typically
            be equal to the number of samples of your dataset
            divided by the batch size.
            Optional for `Sequence`: if unspecified, will use
            the `len(generator)` as a number of steps.
        epochs: Integer, total number of iterations on the data.
        verbose: Verbosity mode, 0, 1, or 2.
        callbacks: List of callbacks to be called during training.
        validation_data: This can be either
            - a generator for the validation data
            - a tuple (inputs, targets)
            - a tuple (inputs, targets, sample_weights).
        validation_steps: Only relevant if `validation_data`
            is a generator. Total number of steps (batches of samples)
            to yield from `generator` before stopping.
            Optional for `Sequence`: if unspecified, will use
            the `len(validation_data)` as a number of steps.
        class_weight: Dictionary mapping class indices to a weight
            for the class.
        max_queue_size: Integer. Maximum size for the generator queue.
            If unspecified, `max_queue_size` will default to 10.
        workers: Integer. Maximum number of processes to spin up
            when using process-based threading.
            If unspecified, `workers` will default to 1. If 0, will
            execute the generator on the main thread.
        use_multiprocessing: Boolean.
            If `True`, use process-based threading.
            If unspecified, `use_multiprocessing` will default to `False`.
            Note that because this implementation relies on multiprocessing,
            you should not pass non-picklable arguments to the generator
            as they can't be passed easily to children processes.
        shuffle: Boolean. Whether to shuffle the order of the batches at
            the beginning of each epoch. Only used with instances
            of `Sequence` (`keras.utils.Sequence`).
            Has no effect when `steps_per_epoch` is not `None`.
        initial_epoch: Epoch at which to start training
            (useful for resuming a previous training run)
    Returns:
        A `History` object.
    Example:
    ```python
        def generate_arrays_from_file(path):
            while 1:
                f = open(path)
                for line in f:
                    # create numpy arrays of input data
                    # and labels, from each line in the file
                    x1, x2, y = process_line(line)
                    yield ({'input_1': x1, 'input_2': x2}, {'output': y})
                f.close()
        model.fit_generator(generate_arrays_from_file('/my_file.txt'),
                            steps_per_epoch=10000, epochs=10)
    ```
    Raises:
        ValueError: In case the generator yields
            data in an invalid format.
    """
    if not self.built and not self._is_graph_network:
      raise NotImplementedError(
          '`fit_generator` is not yet enabled for unbuilt Model subclasses')
    return training_generator.fit_generator(
        self,
        generator,
        steps_per_epoch=steps_per_epoch,
        epochs=epochs,
        verbose=verbose,
        callbacks=callbacks,
        validation_data=validation_data,
        validation_steps=validation_steps,
        class_weight=class_weight,
        max_queue_size=max_queue_size,
        workers=workers,
        use_multiprocessing=use_multiprocessing,
        shuffle=shuffle,
        initial_epoch=initial_epoch)
  def evaluate_generator(self,
                         generator,
                         steps=None,
                         max_queue_size=10,
                         workers=1,
                         use_multiprocessing=False):
    """Evaluates the model on a data generator.
    The generator should return the same kind of data
    as accepted by `test_on_batch`.
    Arguments:
        generator: Generator yielding tuples (inputs, targets)
            or (inputs, targets, sample_weights)
            or an instance of Sequence (keras.utils.Sequence)
            object in order to avoid duplicate data
            when using multiprocessing.
        steps: Total number of steps (batches of samples)
            to yield from `generator` before stopping.
            Optional for `Sequence`: if unspecified, will use
            the `len(generator)` as a number of steps.
        max_queue_size: maximum size for the generator queue
        workers: Integer. Maximum number of processes to spin up
            when using process-based threading.
            If unspecified, `workers` will default to 1. If 0, will
            execute the generator on the main thread.
        use_multiprocessing: Boolean.
            If `True`, use process-based threading.
            If unspecified, `use_multiprocessing` will default to `False`.
            Note that because this implementation relies on multiprocessing,
            you should not pass non-picklable arguments to the generator
            as they can't be passed easily to children processes.
    Returns:
        Scalar test loss (if the model has a single output and no metrics)
        or list of scalars (if the model has multiple outputs
        and/or metrics). The attribute `model.metrics_names` will give you
        the display labels for the scalar outputs.
    Raises:
        ValueError: in case of invalid arguments.
    Raises:
        ValueError: In case the generator yields
            data in an invalid format.
    """
    if not self.built and not self._is_graph_network:
      raise NotImplementedError(
          '`evaluate_generator` is not yet enabled for '
          'unbuilt Model subclasses')
    return training_generator.evaluate_generator(
        self,
        generator,
        steps=steps,
        max_queue_size=max_queue_size,
        workers=workers,
        use_multiprocessing=use_multiprocessing)
  def predict_generator(self,
                        generator,
                        steps=None,
                        max_queue_size=10,
                        workers=1,
                        use_multiprocessing=False,
                        verbose=0):
    """Generates predictions for the input samples from a data generator.
    The generator should return the same kind of data as accepted by
    `predict_on_batch`.
    Arguments:
        generator: Generator yielding batches of input samples
            or an instance of Sequence (keras.utils.Sequence)
            object in order to avoid duplicate data
            when using multiprocessing.
        steps: Total number of steps (batches of samples)
            to yield from `generator` before stopping.
            Optional for `Sequence`: if unspecified, will use
            the `len(generator)` as a number of steps.
        max_queue_size: Maximum size for the generator queue.
        workers: Integer. Maximum number of processes to spin up
            when using process-based threading.
            If unspecified, `workers` will default to 1. If 0, will
            execute the generator on the main thread.
        use_multiprocessing: Boolean.
            If `True`, use process-based threading.
            If unspecified, `use_multiprocessing` will default to `False`.
            Note that because this implementation relies on multiprocessing,
            you should not pass non-picklable arguments to the generator
            as they can't be passed easily to children processes.
        verbose: verbosity mode, 0 or 1.
    Returns:
        Numpy array(s) of predictions.
    Raises:
        ValueError: In case the generator yields
            data in an invalid format.
    """
    if not self.built and not self._is_graph_network:
      raise NotImplementedError(
          '`predict_generator` is not yet enabled for unbuilt Model subclasses')
    return training_generator.predict_generator(
        self,
        generator,
        steps=steps,
        max_queue_size=max_queue_size,
        workers=workers,
        use_multiprocessing=use_multiprocessing,
        verbose=verbose)
 | 
	apache-2.0 | 
| 
	awkspace/ansible | 
	lib/ansible/plugins/callback/slack.py | 
	40 | 
	8260 | 
	# (C) 2014-2015, Matt Martz <[email protected]>
# (C) 2017 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = '''
    callback: slack
    callback_type: notification
    requirements:
      - whitelist in configuration
      - prettytable (python library)
    short_description: Sends play events to a Slack channel
    version_added: "2.1"
    description:
        - This is an ansible callback plugin that sends status updates to a Slack channel during playbook execution.
        - Before 2.4 only environment variables were available for configuring this plugin
    options:
      webhook_url:
        required: True
        description: Slack Webhook URL
        env:
          - name: SLACK_WEBHOOK_URL
        ini:
          - section: callback_slack
            key: webhook_url
      channel:
        default: "#ansible"
        description: Slack room to post in.
        env:
          - name: SLACK_CHANNEL
        ini:
          - section: callback_slack
            key: channel
      username:
        description: Username to post as.
        env:
          - name: SLACK_USERNAME
        default: ansible
        ini:
          - section: callback_slack
            key: username
      validate_certs:
        description: validate the SSL certificate of the Slack server. (For HTTPS URLs)
        version_added: "2.8"
        env:
          - name: SLACK_VALIDATE_CERTS
        ini:
          - section: callback_slack
            key: validate_certs
        default: True
        type: bool
'''
import json
import os
import uuid
from ansible import context
from ansible.module_utils._text import to_text
from ansible.module_utils.urls import open_url
from ansible.plugins.callback import CallbackBase
try:
    import prettytable
    HAS_PRETTYTABLE = True
except ImportError:
    HAS_PRETTYTABLE = False
class CallbackModule(CallbackBase):
    """This is an ansible callback plugin that sends status
    updates to a Slack channel during playbook execution.
    """
    CALLBACK_VERSION = 2.0
    CALLBACK_TYPE = 'notification'
    CALLBACK_NAME = 'slack'
    CALLBACK_NEEDS_WHITELIST = True
    def __init__(self, display=None):
        super(CallbackModule, self).__init__(display=display)
        if not HAS_PRETTYTABLE:
            self.disabled = True
            self._display.warning('The `prettytable` python module is not '
                                  'installed. Disabling the Slack callback '
                                  'plugin.')
        self.playbook_name = None
        # This is a 6 character identifier provided with each message
        # This makes it easier to correlate messages when there are more
        # than 1 simultaneous playbooks running
        self.guid = uuid.uuid4().hex[:6]
    def set_options(self, task_keys=None, var_options=None, direct=None):
        super(CallbackModule, self).set_options(task_keys=task_keys, var_options=var_options, direct=direct)
        self.webhook_url = self.get_option('webhook_url')
        self.channel = self.get_option('channel')
        self.username = self.get_option('username')
        self.show_invocation = (self._display.verbosity > 1)
        self.validate_certs = self.get_option('validate_certs')
        if self.webhook_url is None:
            self.disabled = True
            self._display.warning('Slack Webhook URL was not provided. The '
                                  'Slack Webhook URL can be provided using '
                                  'the `SLACK_WEBHOOK_URL` environment '
                                  'variable.')
    def send_msg(self, attachments):
        headers = {
            'Content-type': 'application/json',
        }
        payload = {
            'channel': self.channel,
            'username': self.username,
            'attachments': attachments,
            'parse': 'none',
            'icon_url': ('http://cdn2.hubspot.net/hub/330046/'
                         'file-449187601-png/ansible_badge.png'),
        }
        data = json.dumps(payload)
        self._display.debug(data)
        self._display.debug(self.webhook_url)
        try:
            response = open_url(self.webhook_url, data=data, validate_certs=self.validate_certs,
                                headers=headers)
            return response.read()
        except Exception as e:
            self._display.warning(u'Could not submit message to Slack: %s' %
                                  to_text(e))
    def v2_playbook_on_start(self, playbook):
        self.playbook_name = os.path.basename(playbook._file_name)
        title = [
            '*Playbook initiated* (_%s_)' % self.guid
        ]
        invocation_items = []
        if context.CLIARGS and self.show_invocation:
            tags = context.CLIARGS['tags']
            skip_tags = context.CLIARGS['skip_tags']
            extra_vars = context.CLIARGS['extra_vars']
            subset = context.CLIARGS['subset']
            inventory = [os.path.abspath(i) for i in context.CLIARGS['inventory']]
            invocation_items.append('Inventory:  %s' % ', '.join(inventory))
            if tags and tags != ['all']:
                invocation_items.append('Tags:       %s' % ', '.join(tags))
            if skip_tags:
                invocation_items.append('Skip Tags:  %s' % ', '.join(skip_tags))
            if subset:
                invocation_items.append('Limit:      %s' % subset)
            if extra_vars:
                invocation_items.append('Extra Vars: %s' %
                                        ' '.join(extra_vars))
            title.append('by *%s*' % context.CLIARGS['remote_user'])
        title.append('\n\n*%s*' % self.playbook_name)
        msg_items = [' '.join(title)]
        if invocation_items:
            msg_items.append('```\n%s\n```' % '\n'.join(invocation_items))
        msg = '\n'.join(msg_items)
        attachments = [{
            'fallback': msg,
            'fields': [
                {
                    'value': msg
                }
            ],
            'color': 'warning',
            'mrkdwn_in': ['text', 'fallback', 'fields'],
        }]
        self.send_msg(attachments=attachments)
    def v2_playbook_on_play_start(self, play):
        """Display Play start messages"""
        name = play.name or 'Play name not specified (%s)' % play._uuid
        msg = '*Starting play* (_%s_)\n\n*%s*' % (self.guid, name)
        attachments = [
            {
                'fallback': msg,
                'text': msg,
                'color': 'warning',
                'mrkdwn_in': ['text', 'fallback', 'fields'],
            }
        ]
        self.send_msg(attachments=attachments)
    def v2_playbook_on_stats(self, stats):
        """Display info about playbook statistics"""
        hosts = sorted(stats.processed.keys())
        t = prettytable.PrettyTable(['Host', 'Ok', 'Changed', 'Unreachable',
                                     'Failures', 'Rescued', 'Ignored'])
        failures = False
        unreachable = False
        for h in hosts:
            s = stats.summarize(h)
            if s['failures'] > 0:
                failures = True
            if s['unreachable'] > 0:
                unreachable = True
            t.add_row([h] + [s[k] for k in ['ok', 'changed', 'unreachable',
                                            'failures', 'rescued', 'ignored']])
        attachments = []
        msg_items = [
            '*Playbook Complete* (_%s_)' % self.guid
        ]
        if failures or unreachable:
            color = 'danger'
            msg_items.append('\n*Failed!*')
        else:
            color = 'good'
            msg_items.append('\n*Success!*')
        msg_items.append('```\n%s\n```' % t)
        msg = '\n'.join(msg_items)
        attachments.append({
            'fallback': msg,
            'fields': [
                {
                    'value': msg
                }
            ],
            'color': color,
            'mrkdwn_in': ['text', 'fallback', 'fields']
        })
        self.send_msg(attachments=attachments)
 | 
	gpl-3.0 | 
| 
	annahs/atmos_research | 
	WHI_long_term_2min_data_to_db.py | 
	1 | 
	8596 | 
	import sys
import os
import numpy as np
from pprint import pprint
from datetime import datetime
from datetime import timedelta
import mysql.connector
import math
import calendar
import matplotlib.pyplot as plt
import matplotlib.cm as cm
from matplotlib import dates
                   
start = datetime(2009,7,15,4)  #2009 - 20090628  2010 - 20100610   2012 - 20100405
end =   datetime(2009,8,17)  #2009 - 20090816  2010 - 20100726   2012 - 20100601
timestep = 6.#1./30 #hours
sample_min = 117  #117 for all 2009-2012
sample_max = 123  #123 for all 2009-2012
yag_min = 3.8  #3.8 for all 2009-2012
yag_max = 6	 #6  for all 2009-2012
BC_VED_min = 70
BC_VED_max = 220
min_scat_pkht = 20
mass_min = ((BC_VED_min/(10.**7))**3)*(math.pi/6.)*1.8*(10.**15)
mass_max = ((BC_VED_max/(10.**7))**3)*(math.pi/6.)*1.8*(10.**15)
lag_threshold_2009 = 0.1
lag_threshold_2010 = 0.25
lag_threshold_2012 = 1.5
print 'mass limits', mass_min, mass_max
cnx = mysql.connector.connect(user='root', password='Suresh15', host='localhost', database='black_carbon')
cursor = cnx.cursor()
 
def check_spike_times(particle_start_time,particle_end_time):
	cursor.execute('''SELECT count(*)
					  FROM whi_spike_times_2009to2012
					  WHERE (spike_start_UTC <= %s AND spike_end_UTC > %s)
					  OR (spike_start_UTC <= %s AND spike_end_UTC > %s)
					  ''',
					  (particle_start_time,particle_start_time,particle_end_time,particle_end_time))
		
	spike_count = cursor.fetchall()[0][0]
	return spike_count
	
def get_hysplit_id(particle_start_time):
	cursor.execute('''SELECT id
					  FROM whi_hysplit_hourly_data
					  WHERE (UNIX_UTC_start_time <= %s AND UNIX_UTC_end_time > %s)
					  ''',
					  (particle_start_time,particle_start_time))
		
	hy_id_list = cursor.fetchall()
	if hy_id_list == []:
		hy_id = None
	else:
		hy_id = hy_id_list[0][0]
	return hy_id
	
def get_met_info(particle_start_time):
	cursor.execute('''SELECT id,pressure_Pa,room_temp_C
					  FROM whi_sampling_conditions
					  WHERE (UNIX_UTC_start_time <= %s AND UNIX_UTC_end_time > %s)
					  ''',
					  (particle_start_time,particle_start_time))
		
	met_list = cursor.fetchall()
	if met_list == []:
		met_list = [[np.nan,np.nan,np.nan]]
	
	return met_list[0]
	
def get_gc_id(particle_start_time):
	cursor.execute('''SELECT id
					  FROM whi_gc_hourly_bc_data
					  WHERE (UNIX_UTC_start_time <= %s AND UNIX_UTC_end_time > %s)
					  ''',
					  (particle_start_time,particle_start_time))
		
	gc_id_list = cursor.fetchall()
	if gc_id_list == []:
		gc_id = None
	else:
		gc_id = gc_id_list[0][0]
	return gc_id
	
	
def get_sample_factor(UNIX_start):
	date_time = datetime.utcfromtimestamp(UNIX_start)
	
	sample_factors_2012 = [
		[datetime(2012,4,4,19,43,4), datetime(2012,4,5,13,47,9),   3.0],
		[datetime(2012,4,5,13,47,9), datetime(2012,4,10,3,3,25),   1.0],
		[datetime(2012,4,10,3,3,25), datetime(2012,5,16,6,9,13),   3.0],
		[datetime(2012,5,16,6,9,13), datetime(2012,6,7,18,14,39), 10.0],
		]
	
	if date_time.year in [2009,2010]:
		sample_factor = 1.0
	if date_time.year == 2012:
		for date_range in sample_factors_2012:
			start_date = date_range[0]
			end_date = date_range[1]
			range_sample_factor = date_range[2]
			if start_date<= date_time < end_date:
				sample_factor = range_sample_factor
				
	return sample_factor
	
def lag_time_calc(BB_incand_pk_pos,BB_scat_pk_pos):
	long_lags = 0		
	short_lags = 0		
	lag_time = np.nan
	
	if (-10 < lag_time < 10):	 
		lag_time = (BB_incand_pk_pos-BB_scat_pk_pos)*0.2   #us
		if start_dt.year == 2009 and lag_time > lag_threshold_2009:
			long_lags = 1
		elif start_dt.year == 2010 and lag_time > lag_threshold_2010:
			long_lags = 1
		elif start_dt.year == 2012 and lag_time > lag_threshold_2012:
			long_lags = 1
		else:
			short_lags = 1
			
	return [lag_time,long_lags,short_lags]
	
#query to add 1h mass conc data
add_data = ('''INSERT INTO whi_sp2_2min_data
			  (UNIX_UTC_start_time,UNIX_UTC_end_time,number_particles,rBC_mass_conc,rBC_mass_conc_err,volume_air_sampled,sampling_duration,mean_lag_time,sample_factor,hysplit_hourly_id,whi_sampling_cond_id,gc_hourly_id)
			  VALUES (%(UNIX_UTC_start_time)s,%(UNIX_UTC_end_time)s,%(number_particles)s,%(rBC_mass_conc)s,%(rBC_mass_conc_err)s,%(volume_air_sampled)s,%(sampling_duration)s,%(mean_lag_time)s,%(sample_factor)s,%(hysplit_hourly_id)s,%(whi_sampling_cond_id)s,%(gc_hourly_id)s)'''
			  )
	
#
multiple_records = []
i=1
while start <= end:
	long_lags = 0
	short_lags = 0
	if (4 <= start.hour < 16):
		UNIX_start = calendar.timegm(start.utctimetuple())
		UNIX_end = UNIX_start + timestep*3600.0
		print start, UNIX_start+60
		print datetime.utcfromtimestamp(UNIX_end)
		#filter on hk data here
		cursor.execute('''(SELECT 
		mn.UNIX_UTC_ts_int_start,
		mn.UNIX_UTC_ts_int_end,
		mn.rBC_mass_fg_BBHG,
		mn.rBC_mass_fg_BBHG_err,
		mn.BB_incand_pk_pos,
		mn.BB_scat_pk_pos,
		mn.BB_scat_pkht,
		hk.sample_flow,
		mn.BB_incand_HG
		FROM whi_sp2_particle_data mn
		FORCE INDEX (hourly_binning)
		JOIN whi_hk_data hk on mn.HK_id = hk.id
		WHERE
		mn.UNIX_UTC_ts_int_start >= %s
		AND mn.UNIX_UTC_ts_int_end < %s
		AND hk.sample_flow >= %s
		AND hk.sample_flow < %s
		AND hk.yag_power >= %s
		AND hk.yag_power < %s)''',
		(UNIX_start,UNIX_end,sample_min,sample_max,yag_min,yag_max))
		
		ind_data = cursor.fetchall()
		data={
		'rBC_mass_fg':[],
		'rBC_mass_fg_err':[],
		'lag_time':[]
		}
		
		total_sample_vol = 0
		for row in ind_data:
			ind_start_time = float(row[0])
			ind_end_time = float(row[1])
			bbhg_mass_corr11 = float(row[2])
			bbhg_mass_corr_err = float(row[3])
			BB_incand_pk_pos = float(row[4])
			BB_scat_pk_pos = float(row[5])	
			BB_scat_pk_ht = float(row[6])	
			sample_flow = float(row[7])  #in vccm
			incand_pkht = float(row[8])  
			
			#filter spike times here 
			if check_spike_times(ind_start_time,ind_end_time):
				print 'spike'
				continue
			
			#skip the long interval
			if (ind_end_time - ind_start_time) > 540:
				print 'long interval'
				continue
					
			#skip if no sample flow
			if sample_flow == None:
				print 'no flow'
				continue
			
			
			#get sampling conditions id and met conditions
			met_data = get_met_info(UNIX_start)
			met_id =  met_data[0]
			pressure = met_data[1]
			temperature = met_data[2]+273.15		
			correction_factor_for_STP = (273*pressure)/(101325*temperature)
			
			sample_vol =  (sample_flow*(ind_end_time-ind_start_time)/60)*correction_factor_for_STP   #/60 b/c sccm and time in secs  
			total_sample_vol = total_sample_vol + sample_vol
			
			bbhg_mass_corr = 0.01244+0.0172*incand_pkht
			
			if (mass_min <= bbhg_mass_corr < mass_max):
				#get sample factor
				sample_factor = get_sample_factor(UNIX_start)
				
				data['rBC_mass_fg'].append(bbhg_mass_corr*sample_factor)  
				data['rBC_mass_fg_err'].append(bbhg_mass_corr_err)  
				#only calc lag time if there is a scattering signal
				if BB_scat_pk_ht > min_scat_pkht:
					lags = lag_time_calc(BB_incand_pk_pos,BB_scat_pk_pos)
					data['lag_time'].append(lags[0]) 
					long_lags += lags[1]
					short_lags += lags[2]
			
				
									
		tot_rBC_mass_fg =  sum(data['rBC_mass_fg'])
		tot_rBC_mass_uncer =  sum(data['rBC_mass_fg_err'])
		rBC_number =  len(data['rBC_mass_fg'])
		mean_lag =  float(np.mean(data['lag_time']))
		if np.isnan(mean_lag):
			mean_lag = None
		
		
		#get hysplit_id
		hysplit_id = None #get_hysplit_id(UNIX_start)
		
		#get GC id 
		gc_id = None #get_gc_id(UNIX_start)
		
		
		if total_sample_vol != 0:
			mass_conc = (tot_rBC_mass_fg/total_sample_vol)
			mass_conc_uncer = (tot_rBC_mass_uncer/total_sample_vol)
			
			#add to db
			single_record = {
				'UNIX_UTC_start_time'	:UNIX_start,
				'UNIX_UTC_end_time'     :UNIX_end,
				'number_particles'      :rBC_number,
				'rBC_mass_conc'         :mass_conc,
				'rBC_mass_conc_err'     :mass_conc_uncer,
				'volume_air_sampled'    :total_sample_vol,
				'sampling_duration'     :(total_sample_vol/2),
				'mean_lag_time'         :mean_lag,
				'number_long_lag'       :long_lags,
				'number_short_lag'      :short_lags,
				'sample_factor'         :sample_factor,
				'hysplit_hourly_id'     :hysplit_id,
				'whi_sampling_cond_id'  :met_id,
				'gc_hourly_id'          :gc_id,
			}
			multiple_records.append((single_record))
		
		#bulk insert to db table
		if i%1 == 0:
			cursor.executemany(add_data, multiple_records)
			cnx.commit()
			multiple_records = []
		#increment count 
		i+= 1
		
	start += timedelta(hours = timestep)
	
#bulk insert of remaining records to db
if multiple_records != []:
	cursor.executemany(add_data, multiple_records)
	cnx.commit()
	multiple_records = []	
	
	
	
	
cnx.close()
 | 
	mit | 
| 
	turicas/outputty | 
	tests/test_Table_html.py | 
	2 | 
	5790 | 
	#!/usr/bin/env python
# coding: utf-8
# Copyright 2011 Álvaro Justen
#    This program is free software: you can redistribute it and/or modify
#    it under the terms of the GNU General Public License as published by
#    the Free Software Foundation, either version 3 of the License, or
#    (at your option) any later version.
#
#    This program is distributed in the hope that it will be useful,
#    but WITHOUT ANY WARRANTY; without even the implied warranty of
#    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
#    GNU General Public License for more details.
#
#    You should have received a copy of the GNU General Public License
#    along with this program.  If not, see <http://www.gnu.org/licenses/>.
import unittest
import tempfile
import os
from textwrap import dedent
from outputty import Table
class TestTableHtml(unittest.TestCase):
    def test_to_html_should_without_parameters_should_return_string(self):
        my_table = Table(headers=['ham', 'spam', 'eggs'])
        self.assertTrue(isinstance(my_table.write('html'), str))
    def test_to_html_with_only_headers(self):
        my_table = Table(headers=['ham', 'spam', 'eggs', 'blah'])
        output = my_table.write('html', css_classes=False)
        expected = dedent('''
        <table>
          <thead>
            <tr>
              <th>ham</th>
              <th>spam</th>
              <th>eggs</th>
              <th>blah</th>
            </tr>
          </thead>
        </table>
        ''').strip()
        self.assertEquals(output, expected)
    def test_to_html_with_headers_and_some_rows(self):
        my_table = Table(headers=['ham', 'spam', 'eggs'])
        my_table.append(['python', 'rules', '!'])
        my_table.append({'ham': 'spam', 'spam': 'eggs', 'eggs': 'ham'})
        output = my_table.write('html', css_classes=False)
        expected = dedent('''
        <table>
          <thead>
            <tr>
              <th>ham</th>
              <th>spam</th>
              <th>eggs</th>
            </tr>
          </thead>
          <tbody>
            <tr>
              <td>python</td>
              <td>rules</td>
              <td>!</td>
            </tr>
            <tr>
              <td>spam</td>
              <td>eggs</td>
              <td>ham</td>
            </tr>
          </tbody>
        </table>
        ''').strip()
        self.assertEquals(output, expected)
    def test_to_html_with_headers_and_rows_with_some_columns_empty(self):
        my_table = Table(headers=['ham', 'spam', 'eggs'])
        my_table.append({'ham': 'spam'})
        my_table.append({'spam': 'eggs'})
        my_table.append({'eggs': 'ham'})
        output = my_table.write('html', css_classes=False)
        expected = dedent('''
        <table>
          <thead>
            <tr>
              <th>ham</th>
              <th>spam</th>
              <th>eggs</th>
            </tr>
          </thead>
          <tbody>
            <tr>
              <td>spam</td>
              <td></td>
              <td></td>
            </tr>
            <tr>
              <td></td>
              <td>eggs</td>
              <td></td>
            </tr>
            <tr>
              <td></td>
              <td></td>
              <td>ham</td>
            </tr>
          </tbody>
        </table>
        ''').strip()
        self.assertEquals(output, expected)
    def test_to_html_with_a_parameter_should_save_a_file(self):
        temp_fp = tempfile.NamedTemporaryFile(delete=False)
        temp_fp.close()
        my_table = Table(headers=['ham', 'spam', 'eggs'])
        my_table.append(['python', 'rules', '!'])
        my_table.append({'ham': 'spam', 'spam': 'eggs', 'eggs': 'ham'})
        my_table.write('html', temp_fp.name, css_classes=False)
        temp_fp = open(temp_fp.name)
        output = temp_fp.read()
        temp_fp.close()
        os.remove(temp_fp.name)
        expected = dedent('''
        <table>
          <thead>
            <tr>
              <th>ham</th>
              <th>spam</th>
              <th>eggs</th>
            </tr>
          </thead>
          <tbody>
            <tr>
              <td>python</td>
              <td>rules</td>
              <td>!</td>
            </tr>
            <tr>
              <td>spam</td>
              <td>eggs</td>
              <td>ham</td>
            </tr>
          </tbody>
        </table>
        ''').strip()
        self.assertEquals(output, expected)
    def test_to_html_should_create_CSS_classes_for_odd_and_even_rows(self):
        my_table = Table(headers=['ham', 'spam', 'eggs'])
        my_table.append(['python', 'rules', '!'])
        my_table.append({'ham': 'spam', 'spam': 'eggs', 'eggs': 'ham'})
        my_table.append(['python', 'rules', '!'])
        my_table.append({'ham': 'spam', 'spam': 'eggs', 'eggs': 'ham'})
        output = my_table.write('html', css_classes=True)
        expected = dedent('''
        <table>
          <thead>
            <tr class="header">
              <th>ham</th>
              <th>spam</th>
              <th>eggs</th>
            </tr>
          </thead>
          <tbody>
            <tr class="odd">
              <td>python</td>
              <td>rules</td>
              <td>!</td>
            </tr>
            <tr class="even">
              <td>spam</td>
              <td>eggs</td>
              <td>ham</td>
            </tr>
            <tr class="odd">
              <td>python</td>
              <td>rules</td>
              <td>!</td>
            </tr>
            <tr class="even">
              <td>spam</td>
              <td>eggs</td>
              <td>ham</td>
            </tr>
          </tbody>
        </table>
        ''').strip()
        self.assertEquals(output, expected)
    #TODO: test input and output encoding
 | 
	gpl-3.0 | 
| 
	varunarya10/tempest | 
	tempest/tests/fake_http.py | 
	42 | 
	2411 | 
	# Copyright 2013 IBM Corp.
#
#    Licensed under the Apache License, Version 2.0 (the "License"); you may
#    not use this file except in compliance with the License. You may obtain
#    a copy of the License at
#
#         http://www.apache.org/licenses/LICENSE-2.0
#
#    Unless required by applicable law or agreed to in writing, software
#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
#    License for the specific language governing permissions and limitations
#    under the License.
import copy
import httplib2
class fake_httplib2(object):
    def __init__(self, return_type=None, *args, **kwargs):
        self.return_type = return_type
    def request(self, uri, method="GET", body=None, headers=None,
                redirections=5, connection_type=None):
        if not self.return_type:
            fake_headers = httplib2.Response(headers)
            return_obj = {
                'uri': uri,
                'method': method,
                'body': body,
                'headers': headers
            }
            return (fake_headers, return_obj)
        elif isinstance(self.return_type, int):
            body = "fake_body"
            header_info = {
                'content-type': 'text/plain',
                'status': str(self.return_type),
                'content-length': len(body)
            }
            resp_header = httplib2.Response(header_info)
            return (resp_header, body)
        else:
            msg = "unsupported return type %s" % self.return_type
            raise TypeError(msg)
class fake_httplib(object):
    def __init__(self, headers, body=None,
                 version=1.0, status=200, reason="Ok"):
        """
        :param headers: dict representing HTTP response headers
        :param body: file-like object
        :param version: HTTP Version
        :param status: Response status code
        :param reason: Status code related message.
        """
        self.body = body
        self.status = status
        self.reason = reason
        self.version = version
        self.headers = headers
    def getheaders(self):
        return copy.deepcopy(self.headers).items()
    def getheader(self, key, default):
        return self.headers.get(key, default)
    def read(self, amt):
        return self.body.read(amt)
 | 
	apache-2.0 | 
| 
	Saicheg/omim | 
	3party/Alohalytics/tests/googletest/xcode/Scripts/versiongenerate.py | 
	3088 | 
	4536 | 
	#!/usr/bin/env python
#
# Copyright 2008, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
#     * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#     * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
#     * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""A script to prepare version informtion for use the gtest Info.plist file.
  This script extracts the version information from the configure.ac file and
  uses it to generate a header file containing the same information. The
  #defines in this header file will be included in during the generation of
  the Info.plist of the framework, giving the correct value to the version
  shown in the Finder.
  This script makes the following assumptions (these are faults of the script,
  not problems with the Autoconf):
    1. The AC_INIT macro will be contained within the first 1024 characters
       of configure.ac
    2. The version string will be 3 integers separated by periods and will be
       surrounded by squre brackets, "[" and "]" (e.g. [1.0.1]). The first
       segment represents the major version, the second represents the minor
       version and the third represents the fix version.
    3. No ")" character exists between the opening "(" and closing ")" of
       AC_INIT, including in comments and character strings.
"""
import sys
import re
# Read the command line argument (the output directory for Version.h)
if (len(sys.argv) < 3):
  print "Usage: versiongenerate.py input_dir output_dir"
  sys.exit(1)
else:
  input_dir = sys.argv[1]
  output_dir = sys.argv[2]
# Read the first 1024 characters of the configure.ac file
config_file = open("%s/configure.ac" % input_dir, 'r')
buffer_size = 1024
opening_string = config_file.read(buffer_size)
config_file.close()
# Extract the version string from the AC_INIT macro
#   The following init_expression means:
#     Extract three integers separated by periods and surrounded by squre
#     brackets(e.g. "[1.0.1]") between "AC_INIT(" and ")". Do not be greedy
#     (*? is the non-greedy flag) since that would pull in everything between
#     the first "(" and the last ")" in the file.
version_expression = re.compile(r"AC_INIT\(.*?\[(\d+)\.(\d+)\.(\d+)\].*?\)",
                                re.DOTALL)
version_values = version_expression.search(opening_string)
major_version = version_values.group(1)
minor_version = version_values.group(2)
fix_version = version_values.group(3)
# Write the version information to a header file to be included in the
# Info.plist file.
file_data = """//
// DO NOT MODIFY THIS FILE (but you can delete it)
//
// This file is autogenerated by the versiongenerate.py script. This script
// is executed in a "Run Script" build phase when creating gtest.framework. This
// header file is not used during compilation of C-source. Rather, it simply
// defines some version strings for substitution in the Info.plist. Because of
// this, we are not not restricted to C-syntax nor are we using include guards.
//
#define GTEST_VERSIONINFO_SHORT %s.%s
#define GTEST_VERSIONINFO_LONG %s.%s.%s
""" % (major_version, minor_version, major_version, minor_version, fix_version)
version_file = open("%s/Version.h" % output_dir, 'w')
version_file.write(file_data)
version_file.close()
 | 
	apache-2.0 | 
| 
	inviwo/inviwo | 
	data/scripts/matplotlib_create_transferfunction.py | 
	2 | 
	1270 | 
	# Inviwo Python script 
import matplotlib.cm as cm
import matplotlib.pyplot as plt
import inviwopy
from inviwopy.glm import vec2,vec3,vec4
#http://matplotlib.org/examples/color/colormaps_reference.html
#Perceptually Uniform Sequential :  #['viridis', 'inferno', 'plasma', 'magma'] 
#Sequential  :  #['Blues', 'BuGn', 'BuPu','GnBu', 'Greens', 'Greys', 'Oranges', 'OrRd', 'PuBu', 'PuBuGn', 'PuRd', 'Purples', 'RdPu','Reds', 'YlGn', 'YlGnBu', 'YlOrBr', 'YlOrRd']
#Diverging :  #['afmhot', 'autumn', 'bone', 'cool','copper', 'gist_heat', 'gray', 'hot','pink', 'spring', 'summer', 'winter']
#Qualitative :  #['BrBG', 'bwr', 'coolwarm', 'PiYG', 'PRGn', 'PuOr', 'RdBu', 'RdGy', 'RdYlBu', 'RdYlGn', 'Spectral', 'seismic']
#Miscellaneous :  #['Accent', 'Dark2', 'Paired', 'Pastel1', 'Pastel2', 'Set1', 'Set2', 'Set3']
#Sequential :  #['gist_earth', 'terrain', 'ocean', 'gist_stern','brg', 'CMRmap', 'cubehelix','gnuplot', 'gnuplot2', 'gist_ncar', 'nipy_spectral', 'jet', 'rainbow', 'gist_rainbow', 'hsv', 'flag', 'prism']
tf = inviwopy.app.network.VolumeRaycaster.transferFunction
tf.clear()
cmapName = "viridis"
cmap=plt.get_cmap(cmapName)
N = 128
for i in range(0,N,1):
   x = i / (N-1)
   a = 1.0
   color = cmap(x)
   tf.add(x, vec4(color[0],color[1],color[2], a)) 
 | 
	bsd-2-clause | 
| 
	Codefans-fan/odoo | 
	openerp/addons/base/tests/test_orm.py | 
	20 | 
	17911 | 
	from collections import defaultdict
from openerp.tools import mute_logger
from openerp.tests import common
UID = common.ADMIN_USER_ID
DB = common.DB
class TestORM(common.TransactionCase):
    """ test special behaviors of ORM CRUD functions
    
        TODO: use real Exceptions types instead of Exception """
    def setUp(self):
        super(TestORM, self).setUp()
        cr, uid = self.cr, self.uid
        self.partner = self.registry('res.partner')
        self.users = self.registry('res.users')
        self.p1 = self.partner.name_create(cr, uid, 'W')[0]
        self.p2 = self.partner.name_create(cr, uid, 'Y')[0]
        self.ir_rule = self.registry('ir.rule')
        # sample unprivileged user
        employee_gid = self.ref('base.group_user')
        self.uid2 = self.users.create(cr, uid, {'name': 'test user', 'login': 'test', 'groups_id': [4,employee_gid]})
    @mute_logger('openerp.models')
    def testAccessDeletedRecords(self):
        """ Verify that accessing deleted records works as expected """
        cr, uid, uid2, p1, p2 = self.cr, self.uid, self.uid2, self.p1, self.p2
        self.partner.unlink(cr, uid, [p1])
        # read() is expected to skip deleted records because our API is not
        # transactional for a sequence of search()->read() performed from the
        # client-side... a concurrent deletion could therefore cause spurious
        # exceptions even when simply opening a list view!
        # /!\ Using unprileged user to detect former side effects of ir.rules!
        self.assertEqual([{'id': p2, 'name': 'Y'}], self.partner.read(cr, uid2, [p1,p2], ['name']), "read() should skip deleted records")
        self.assertEqual([], self.partner.read(cr, uid2, [p1], ['name']), "read() should skip deleted records")
        # Deleting an already deleted record should be simply ignored
        self.assertTrue(self.partner.unlink(cr, uid, [p1]), "Re-deleting should be a no-op")
        # Updating an already deleted record should raise, even as admin
        with self.assertRaises(Exception):
            self.partner.write(cr, uid, [p1], {'name': 'foo'})
    @mute_logger('openerp.models')
    def testAccessFilteredRecords(self):
        """ Verify that accessing filtered records works as expected for non-admin user """
        cr, uid, uid2, p1, p2 = self.cr, self.uid, self.uid2, self.p1, self.p2
        partner_model = self.registry('ir.model').search(cr, uid, [('model','=','res.partner')])[0]
        self.ir_rule.create(cr, uid, {'name': 'Y is invisible',
                                      'domain_force': [('id', '!=', p1)],
                                      'model_id': partner_model})
        # search as unprivileged user
        partners = self.partner.search(cr, uid2, [])
        self.assertFalse(p1 in partners, "W should not be visible...")
        self.assertTrue(p2 in partners, "... but Y should be visible")
        # read as unprivileged user
        with self.assertRaises(Exception):
            self.partner.read(cr, uid2, [p1], ['name'])
        # write as unprivileged user
        with self.assertRaises(Exception):
            self.partner.write(cr, uid2, [p1], {'name': 'foo'})
        # unlink as unprivileged user
        with self.assertRaises(Exception):
            self.partner.unlink(cr, uid2, [p1])
        # Prepare mixed case 
        self.partner.unlink(cr, uid, [p2])
        # read mixed records: some deleted and some filtered
        with self.assertRaises(Exception):
            self.partner.read(cr, uid2, [p1,p2], ['name'])
        # delete mixed records: some deleted and some filtered
        with self.assertRaises(Exception):
            self.partner.unlink(cr, uid2, [p1,p2])
    def test_multi_read(self):
        record_id = self.partner.create(self.cr, UID, {'name': 'MyPartner1'})
        records = self.partner.read(self.cr, UID, [record_id])
        self.assertIsInstance(records, list)
    def test_one_read(self):
        record_id = self.partner.create(self.cr, UID, {'name': 'MyPartner1'})
        record = self.partner.read(self.cr, UID, record_id)
        self.assertIsInstance(record, dict)
    @mute_logger('openerp.models')
    def test_search_read(self):
        # simple search_read
        self.partner.create(self.cr, UID, {'name': 'MyPartner1'})
        found = self.partner.search_read(self.cr, UID, [['name', '=', 'MyPartner1']], ['name'])
        self.assertEqual(len(found), 1)
        self.assertEqual(found[0]['name'], 'MyPartner1')
        self.assertTrue('id' in found[0])
        # search_read correct order
        self.partner.create(self.cr, UID, {'name': 'MyPartner2'})
        found = self.partner.search_read(self.cr, UID, [['name', 'like', 'MyPartner']], ['name'], order="name")
        self.assertEqual(len(found), 2)
        self.assertEqual(found[0]['name'], 'MyPartner1')
        self.assertEqual(found[1]['name'], 'MyPartner2')
        found = self.partner.search_read(self.cr, UID, [['name', 'like', 'MyPartner']], ['name'], order="name desc")
        self.assertEqual(len(found), 2)
        self.assertEqual(found[0]['name'], 'MyPartner2')
        self.assertEqual(found[1]['name'], 'MyPartner1')
        # search_read that finds nothing
        found = self.partner.search_read(self.cr, UID, [['name', '=', 'Does not exists']], ['name'])
        self.assertEqual(len(found), 0)
    def test_exists(self):
        partner = self.partner.browse(self.cr, UID, [])
        # check that records obtained from search exist
        recs = partner.search([])
        self.assertTrue(recs)
        self.assertEqual(recs.exists(), recs)
        # check that there is no record with id 0
        recs = partner.browse([0])
        self.assertFalse(recs.exists())
    def test_groupby_date(self):
        partners = dict(
            A='2012-11-19',
            B='2012-12-17',
            C='2012-12-31',
            D='2013-01-07',
            E='2013-01-14',
            F='2013-01-28',
            G='2013-02-11',
        )
        all_partners = []
        partners_by_day = defaultdict(set)
        partners_by_month = defaultdict(set)
        partners_by_year = defaultdict(set)
        for name, date in partners.items():
            p = self.partner.create(self.cr, UID, dict(name=name, date=date))
            all_partners.append(p)
            partners_by_day[date].add(p)
            partners_by_month[date.rsplit('-', 1)[0]].add(p)
            partners_by_year[date.split('-', 1)[0]].add(p)
        def read_group(interval, domain=None):
            main_domain = [('id', 'in', all_partners)]
            if domain:
                domain = ['&'] + main_domain + domain
            else:
                domain = main_domain
            rg = self.partner.read_group(self.cr, self.uid, domain, ['date'], 'date' + ':' + interval)
            result = {}
            for r in rg:
                result[r['date:' + interval]] = set(self.partner.search(self.cr, self.uid, r['__domain']))
            return result
        self.assertEqual(len(read_group('day')), len(partners_by_day))
        self.assertEqual(len(read_group('month')), len(partners_by_month))
        self.assertEqual(len(read_group('year')), len(partners_by_year))
        rg = self.partner.read_group(self.cr, self.uid, [('id', 'in', all_partners)], 
                        ['date'], ['date:month', 'date:day'], lazy=False)
        self.assertEqual(len(rg), len(all_partners))
class TestInherits(common.TransactionCase):
    """ test the behavior of the orm for models that use _inherits;
        specifically: res.users, that inherits from res.partner
    """
    def setUp(self):
        super(TestInherits, self).setUp()
        self.partner = self.registry('res.partner')
        self.user = self.registry('res.users')
    def test_default(self):
        """ `default_get` cannot return a dictionary or a new id """
        defaults = self.user.default_get(self.cr, UID, ['partner_id'])
        if 'partner_id' in defaults:
            self.assertIsInstance(defaults['partner_id'], (bool, int, long))
    def test_create(self):
        """ creating a user should automatically create a new partner """
        partners_before = self.partner.search(self.cr, UID, [])
        foo_id = self.user.create(self.cr, UID, {'name': 'Foo', 'login': 'foo', 'password': 'foo'})
        foo = self.user.browse(self.cr, UID, foo_id)
        self.assertNotIn(foo.partner_id.id, partners_before)
    def test_create_with_ancestor(self):
        """ creating a user with a specific 'partner_id' should not create a new partner """
        par_id = self.partner.create(self.cr, UID, {'name': 'Foo'})
        partners_before = self.partner.search(self.cr, UID, [])
        foo_id = self.user.create(self.cr, UID, {'partner_id': par_id, 'login': 'foo', 'password': 'foo'})
        partners_after = self.partner.search(self.cr, UID, [])
        self.assertEqual(set(partners_before), set(partners_after))
        foo = self.user.browse(self.cr, UID, foo_id)
        self.assertEqual(foo.name, 'Foo')
        self.assertEqual(foo.partner_id.id, par_id)
    @mute_logger('openerp.models')
    def test_read(self):
        """ inherited fields should be read without any indirection """
        foo_id = self.user.create(self.cr, UID, {'name': 'Foo', 'login': 'foo', 'password': 'foo'})
        foo_values, = self.user.read(self.cr, UID, [foo_id])
        partner_id = foo_values['partner_id'][0]
        partner_values, = self.partner.read(self.cr, UID, [partner_id])
        self.assertEqual(foo_values['name'], partner_values['name'])
        foo = self.user.browse(self.cr, UID, foo_id)
        self.assertEqual(foo.name, foo.partner_id.name)
    @mute_logger('openerp.models')
    def test_copy(self):
        """ copying a user should automatically copy its partner, too """
        foo_id = self.user.create(self.cr, UID, {'name': 'Foo', 'login': 'foo', 'password': 'foo'})
        foo_before, = self.user.read(self.cr, UID, [foo_id])
        del foo_before['__last_update']
        bar_id = self.user.copy(self.cr, UID, foo_id, {'login': 'bar', 'password': 'bar'})
        foo_after, = self.user.read(self.cr, UID, [foo_id])
        del foo_after['__last_update']
        self.assertEqual(foo_before, foo_after)
        foo, bar = self.user.browse(self.cr, UID, [foo_id, bar_id])
        self.assertEqual(bar.login, 'bar')
        self.assertNotEqual(foo.id, bar.id)
        self.assertNotEqual(foo.partner_id.id, bar.partner_id.id)
    @mute_logger('openerp.models')
    def test_copy_with_ancestor(self):
        """ copying a user with 'parent_id' in defaults should not duplicate the partner """
        foo_id = self.user.create(self.cr, UID, {'name': 'Foo', 'login': 'foo', 'password': 'foo',
                                                 'login_date': '2016-01-01', 'signature': 'XXX'})
        par_id = self.partner.create(self.cr, UID, {'name': 'Bar'})
        foo_before, = self.user.read(self.cr, UID, [foo_id])
        del foo_before['__last_update']
        partners_before = self.partner.search(self.cr, UID, [])
        bar_id = self.user.copy(self.cr, UID, foo_id, {'partner_id': par_id, 'login': 'bar'})
        foo_after, = self.user.read(self.cr, UID, [foo_id])
        del foo_after['__last_update']
        partners_after = self.partner.search(self.cr, UID, [])
        self.assertEqual(foo_before, foo_after)
        self.assertEqual(set(partners_before), set(partners_after))
        foo, bar = self.user.browse(self.cr, UID, [foo_id, bar_id])
        self.assertNotEqual(foo.id, bar.id)
        self.assertEqual(bar.partner_id.id, par_id)
        self.assertEqual(bar.login, 'bar', "login is given from copy parameters")
        self.assertFalse(bar.login_date, "login_date should not be copied from original record")
        self.assertEqual(bar.name, 'Bar', "name is given from specific partner")
        self.assertEqual(bar.signature, foo.signature, "signature should be copied")
CREATE = lambda values: (0, False, values)
UPDATE = lambda id, values: (1, id, values)
DELETE = lambda id: (2, id, False)
FORGET = lambda id: (3, id, False)
LINK_TO = lambda id: (4, id, False)
DELETE_ALL = lambda: (5, False, False)
REPLACE_WITH = lambda ids: (6, False, ids)
def sorted_by_id(list_of_dicts):
    "sort dictionaries by their 'id' field; useful for comparisons"
    return sorted(list_of_dicts, key=lambda d: d.get('id'))
class TestO2MSerialization(common.TransactionCase):
    """ test the orm method 'write' on one2many fields """
    def setUp(self):
        super(TestO2MSerialization, self).setUp()
        self.partner = self.registry('res.partner')
    def test_no_command(self):
        " empty list of commands yields an empty list of records "
        results = self.partner.resolve_2many_commands(
            self.cr, UID, 'child_ids', [])
        self.assertEqual(results, [])
    def test_CREATE_commands(self):
        " returns the VALUES dict as-is "
        values = [{'foo': 'bar'}, {'foo': 'baz'}, {'foo': 'baq'}]
        results = self.partner.resolve_2many_commands(
            self.cr, UID, 'child_ids', map(CREATE, values))
        self.assertEqual(results, values)
    def test_LINK_TO_command(self):
        " reads the records from the database, records are returned with their ids. "
        ids = [
            self.partner.create(self.cr, UID, {'name': 'foo'}),
            self.partner.create(self.cr, UID, {'name': 'bar'}),
            self.partner.create(self.cr, UID, {'name': 'baz'})
        ]
        commands = map(LINK_TO, ids)
        results = self.partner.resolve_2many_commands(
            self.cr, UID, 'child_ids', commands, ['name'])
        self.assertEqual(sorted_by_id(results), sorted_by_id([
            {'id': ids[0], 'name': 'foo'},
            {'id': ids[1], 'name': 'bar'},
            {'id': ids[2], 'name': 'baz'}
        ]))
    def test_bare_ids_command(self):
        " same as the equivalent LINK_TO commands "
        ids = [
            self.partner.create(self.cr, UID, {'name': 'foo'}),
            self.partner.create(self.cr, UID, {'name': 'bar'}),
            self.partner.create(self.cr, UID, {'name': 'baz'})
        ]
        results = self.partner.resolve_2many_commands(
            self.cr, UID, 'child_ids', ids, ['name'])
        self.assertEqual(sorted_by_id(results), sorted_by_id([
            {'id': ids[0], 'name': 'foo'},
            {'id': ids[1], 'name': 'bar'},
            {'id': ids[2], 'name': 'baz'}
        ]))
    def test_UPDATE_command(self):
        " take the in-db records and merge the provided information in "
        id_foo = self.partner.create(self.cr, UID, {'name': 'foo'})
        id_bar = self.partner.create(self.cr, UID, {'name': 'bar'})
        id_baz = self.partner.create(self.cr, UID, {'name': 'baz', 'city': 'tag'})
        results = self.partner.resolve_2many_commands(
            self.cr, UID, 'child_ids', [
                LINK_TO(id_foo),
                UPDATE(id_bar, {'name': 'qux', 'city': 'tagtag'}),
                UPDATE(id_baz, {'name': 'quux'})
            ], ['name', 'city'])
        self.assertEqual(sorted_by_id(results), sorted_by_id([
            {'id': id_foo, 'name': 'foo', 'city': False},
            {'id': id_bar, 'name': 'qux', 'city': 'tagtag'},
            {'id': id_baz, 'name': 'quux', 'city': 'tag'}
        ]))
    def test_DELETE_command(self):
        " deleted records are not returned at all. "
        ids = [
            self.partner.create(self.cr, UID, {'name': 'foo'}),
            self.partner.create(self.cr, UID, {'name': 'bar'}),
            self.partner.create(self.cr, UID, {'name': 'baz'})
        ]
        commands = [DELETE(ids[0]), DELETE(ids[1]), DELETE(ids[2])]
        results = self.partner.resolve_2many_commands(
            self.cr, UID, 'child_ids', commands, ['name'])
        self.assertEqual(results, [])
    def test_mixed_commands(self):
        ids = [
            self.partner.create(self.cr, UID, {'name': name})
            for name in ['NObar', 'baz', 'qux', 'NOquux', 'NOcorge', 'garply']
        ]
        results = self.partner.resolve_2many_commands(
            self.cr, UID, 'child_ids', [
                CREATE({'name': 'foo'}),
                UPDATE(ids[0], {'name': 'bar'}),
                LINK_TO(ids[1]),
                DELETE(ids[2]),
                UPDATE(ids[3], {'name': 'quux',}),
                UPDATE(ids[4], {'name': 'corge'}),
                CREATE({'name': 'grault'}),
                LINK_TO(ids[5])
            ], ['name'])
        self.assertEqual(sorted_by_id(results), sorted_by_id([
            {'name': 'foo'},
            {'id': ids[0], 'name': 'bar'},
            {'id': ids[1], 'name': 'baz'},
            {'id': ids[3], 'name': 'quux'},
            {'id': ids[4], 'name': 'corge'},
            {'name': 'grault'},
            {'id': ids[5], 'name': 'garply'}
        ]))
    def test_LINK_TO_pairs(self):
        "LINK_TO commands can be written as pairs, instead of triplets"
        ids = [
            self.partner.create(self.cr, UID, {'name': 'foo'}),
            self.partner.create(self.cr, UID, {'name': 'bar'}),
            self.partner.create(self.cr, UID, {'name': 'baz'})
        ]
        commands = map(lambda id: (4, id), ids)
        results = self.partner.resolve_2many_commands(
            self.cr, UID, 'child_ids', commands, ['name'])
        self.assertEqual(sorted_by_id(results), sorted_by_id([
            {'id': ids[0], 'name': 'foo'},
            {'id': ids[1], 'name': 'bar'},
            {'id': ids[2], 'name': 'baz'}
        ]))
    def test_singleton_commands(self):
        "DELETE_ALL can appear as a singleton"
        results = self.partner.resolve_2many_commands(
            self.cr, UID, 'child_ids', [DELETE_ALL()], ['name'])
        self.assertEqual(results, [])
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
 | 
	agpl-3.0 | 
| 
	alirizakeles/memopol-core | 
	memopol/reps/api.py | 
	2 | 
	2647 | 
	from tastypie import fields
from tastypie.resources import ModelResource
from memopol.reps.models import Party,\
                                Opinion,\
                                Representative,\
                                PartyRepresentative,\
                                Email,\
                                CV,\
                                WebSite,\
                                OpinionREP
class REPPartyResource(ModelResource):
    partyrepresentative_set = fields.ToManyField("memopol.reps.api.REPPartyRepresentativeResource", "partyrepresentative_set")
    class Meta:
        queryset = Party.objects.all()
class REPOpinionResource(ModelResource):
    opinionrep_set = fields.ToManyField("memopol.reps.api.REPOpinionREPResource", "opinionrep_set")
    class Meta:
        queryset = Opinion.objects.all()
class REPRepresentativeResource(ModelResource):
    opinionrep_set = fields.ToManyField("memopol.reps.api.REPOpinionREPResource", "opinionrep_set")
    email_set = fields.ToManyField("memopol.reps.api.REPEmailResource", "email_set")
    website_set = fields.ToManyField("memopol.reps.api.REPWebSiteResource", "website_set")
    cv_set = fields.ToManyField("memopol.reps.api.REPCVResource", "cv_set")
    partyrepresentative_set = fields.ToManyField("memopol.reps.api.REPPartyRepresentativeResource", "partyrepresentative_set")
    score_set = fields.ToManyField("votes.api.ScoreResource", "score_set")
    vote_set = fields.ToManyField("votes.api.VoteResource", "vote_set")
    class Meta:
        queryset = Representative.objects.all()
class REPPartyRepresentativeResource(ModelResource):
    representative = fields.ForeignKey(REPRepresentativeResource, "representative")
    party = fields.ForeignKey(REPPartyResource, "party")
    class Meta:
        queryset = PartyRepresentative.objects.all()
class REPEmailResource(ModelResource):
    representative = fields.ForeignKey(REPRepresentativeResource, "representative")
    class Meta:
        queryset = Email.objects.all()
class REPCVResource(ModelResource):
    representative = fields.ForeignKey(REPRepresentativeResource, "representative")
    class Meta:
        queryset = CV.objects.all()
class REPWebSiteResource(ModelResource):
    representative = fields.ForeignKey(REPRepresentativeResource, "representative")
    class Meta:
        queryset = WebSite.objects.all()
class REPOpinionREPResource(ModelResource):
    representative = fields.ForeignKey(REPRepresentativeResource, "representative")
    opinion = fields.ForeignKey(REPOpinionResource, "opinion")
    class Meta:
        queryset = OpinionREP.objects.all()
 | 
	gpl-3.0 | 
| 
	ndtran/compassion-switzerland | 
	sponsorship_switzerland/__openerp__.py | 
	2 | 
	1853 | 
	# -*- encoding: utf-8 -*-
##############################################################################
#
#       ______ Releasing children from poverty      _
#      / ____/___  ____ ___  ____  ____ ___________(_)___  ____
#     / /   / __ \/ __ `__ \/ __ \/ __ `/ ___/ ___/ / __ \/ __ \
#    / /___/ /_/ / / / / / / /_/ / /_/ (__  |__  ) / /_/ / / / /
#    \____/\____/_/ /_/ /_/ .___/\__,_/____/____/_/\____/_/ /_/
#                        /_/
#                            in Jesus' name
#
#    Copyright (C) 2015 Compassion CH (http://www.compassion.ch)
#    @author: Emanuel Cino <[email protected]>
#
#    This program is free software: you can redistribute it and/or modify
#    it under the terms of the GNU Affero General Public License as
#    published by the Free Software Foundation, either version 3 of the
#    License, or (at your option) any later version.
#
#    This program is distributed in the hope that it will be useful,
#    but WITHOUT ANY WARRANTY; without even the implied warranty of
#    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
#    GNU Affero General Public License for more details.
#
#    You should have received a copy of the GNU Affero General Public License
#    along with this program.  If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
    'name': 'Tailor Sponsorships to Compassion CH needs',
    'version': '1.0',
    'category': 'Other',
    'author': 'Compassion CH',
    'website': 'http://www.compassion.ch',
    'depends': ['sponsorship_tracking'],
    'data': [
        'view/contracts_view.xml',
        'data/install.xml'],
    'js': ['static/src/js/sponsorship_tracking_kanban.js'],
    'demo': [],
    'installable': True,
    'auto_install': False,
}
 | 
	agpl-3.0 | 
| 
	redhat-openstack/swift | 
	swift/common/middleware/cname_lookup.py | 
	29 | 
	6766 | 
	# Copyright (c) 2010-2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#    http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
CNAME Lookup Middleware
Middleware that translates an unknown domain in the host header to
something that ends with the configured storage_domain by looking up
the given domain's CNAME record in DNS.
This middleware will continue to follow a CNAME chain in DNS until it finds
a record ending in the configured storage domain or it reaches the configured
maximum lookup depth. If a match is found, the environment's Host header is
rewritten and the request is passed further down the WSGI chain.
"""
from six.moves import range
import socket
from swift import gettext_ as _
try:
    import dns.resolver
    from dns.exception import DNSException
    from dns.resolver import NXDOMAIN, NoAnswer
except ImportError:
    # catch this to allow docs to be built without the dependency
    MODULE_DEPENDENCY_MET = False
else:  # executed if the try block finishes with no errors
    MODULE_DEPENDENCY_MET = True
from swift.common.swob import Request, HTTPBadRequest
from swift.common.utils import cache_from_env, get_logger, list_from_csv
def lookup_cname(domain):  # pragma: no cover
    """
    Given a domain, returns its DNS CNAME mapping and DNS ttl.
    :param domain: domain to query on
    :returns: (ttl, result)
    """
    try:
        answer = dns.resolver.query(domain, 'CNAME').rrset
        ttl = answer.ttl
        result = answer.items[0].to_text()
        result = result.rstrip('.')
        return ttl, result
    except (DNSException, NXDOMAIN, NoAnswer):
        return 0, None
def is_ip(domain):
    try:
        socket.inet_pton(socket.AF_INET, domain)
        return True
    except socket.error:
        try:
            socket.inet_pton(socket.AF_INET6, domain)
            return True
        except socket.error:
            return False
class CNAMELookupMiddleware(object):
    """
    CNAME Lookup Middleware
    See above for a full description.
    :param app: The next WSGI filter or app in the paste.deploy
                chain.
    :param conf: The configuration dict for the middleware.
    """
    def __init__(self, app, conf):
        if not MODULE_DEPENDENCY_MET:
            # reraise the exception if the dependency wasn't met
            raise ImportError('dnspython is required for this module')
        self.app = app
        storage_domain = conf.get('storage_domain', 'example.com')
        self.storage_domain = ['.' + s for s in
                               list_from_csv(storage_domain)
                               if not s.startswith('.')]
        self.storage_domain += [s for s in list_from_csv(storage_domain)
                                if s.startswith('.')]
        self.lookup_depth = int(conf.get('lookup_depth', '1'))
        self.memcache = None
        self.logger = get_logger(conf, log_route='cname-lookup')
    def _domain_endswith_in_storage_domain(self, a_domain):
        for domain in self.storage_domain:
            if a_domain.endswith(domain):
                return True
        return False
    def __call__(self, env, start_response):
        if not self.storage_domain:
            return self.app(env, start_response)
        if 'HTTP_HOST' in env:
            given_domain = env['HTTP_HOST']
        else:
            given_domain = env['SERVER_NAME']
        port = ''
        if ':' in given_domain:
            given_domain, port = given_domain.rsplit(':', 1)
        if is_ip(given_domain):
            return self.app(env, start_response)
        a_domain = given_domain
        if not self._domain_endswith_in_storage_domain(a_domain):
            if self.memcache is None:
                self.memcache = cache_from_env(env)
            error = True
            for tries in range(self.lookup_depth):
                found_domain = None
                if self.memcache:
                    memcache_key = ''.join(['cname-', a_domain])
                    found_domain = self.memcache.get(memcache_key)
                if not found_domain:
                    ttl, found_domain = lookup_cname(a_domain)
                    if self.memcache:
                        memcache_key = ''.join(['cname-', given_domain])
                        self.memcache.set(memcache_key, found_domain,
                                          time=ttl)
                if found_domain is None or found_domain == a_domain:
                    # no CNAME records or we're at the last lookup
                    error = True
                    found_domain = None
                    break
                elif self._domain_endswith_in_storage_domain(found_domain):
                    # Found it!
                    self.logger.info(
                        _('Mapped %(given_domain)s to %(found_domain)s') %
                        {'given_domain': given_domain,
                         'found_domain': found_domain})
                    if port:
                        env['HTTP_HOST'] = ':'.join([found_domain, port])
                    else:
                        env['HTTP_HOST'] = found_domain
                    error = False
                    break
                else:
                    # try one more deep in the chain
                    self.logger.debug(
                        _('Following CNAME chain for  '
                          '%(given_domain)s to %(found_domain)s') %
                        {'given_domain': given_domain,
                         'found_domain': found_domain})
                    a_domain = found_domain
            if error:
                if found_domain:
                    msg = 'CNAME lookup failed after %d tries' % \
                        self.lookup_depth
                else:
                    msg = 'CNAME lookup failed to resolve to a valid domain'
                resp = HTTPBadRequest(request=Request(env), body=msg,
                                      content_type='text/plain')
                return resp(env, start_response)
        return self.app(env, start_response)
def filter_factory(global_conf, **local_conf):  # pragma: no cover
    conf = global_conf.copy()
    conf.update(local_conf)
    def cname_filter(app):
        return CNAMELookupMiddleware(app, conf)
    return cname_filter
 | 
	apache-2.0 | 
| 
	tzewangdorje/SIPserv | 
	Twisted-13.1.0/twisted/test/test_hook.py | 
	41 | 
	4250 | 
	
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Test cases for twisted.hook module.
"""
from twisted.python import hook
from twisted.trial import unittest
class BaseClass:
    """
    dummy class to help in testing.
    """
    def __init__(self):
        """
        dummy initializer
        """
        self.calledBasePre = 0
        self.calledBasePost = 0
        self.calledBase = 0
    def func(self, a, b):
        """
        dummy method
        """
        assert a == 1
        assert b == 2
        self.calledBase = self.calledBase + 1
class SubClass(BaseClass):
    """
    another dummy class
    """
    def __init__(self):
        """
        another dummy initializer
        """
        BaseClass.__init__(self)
        self.calledSubPre = 0
        self.calledSubPost = 0
        self.calledSub = 0
    def func(self, a, b):
        """
        another dummy function
        """
        assert a == 1
        assert b == 2
        BaseClass.func(self, a, b)
        self.calledSub = self.calledSub + 1
_clean_BaseClass = BaseClass.__dict__.copy()
_clean_SubClass = SubClass.__dict__.copy()
def basePre(base, a, b):
    """
    a pre-hook for the base class
    """
    base.calledBasePre = base.calledBasePre + 1
def basePost(base, a, b):
    """
    a post-hook for the base class
    """
    base.calledBasePost = base.calledBasePost + 1
def subPre(sub, a, b):
    """
    a pre-hook for the subclass
    """
    sub.calledSubPre = sub.calledSubPre + 1
def subPost(sub, a, b):
    """
    a post-hook for the subclass
    """
    sub.calledSubPost = sub.calledSubPost + 1
class HookTestCase(unittest.TestCase):
    """
    test case to make sure hooks are called
    """
    def setUp(self):
        """Make sure we have clean versions of our classes."""
        BaseClass.__dict__.clear()
        BaseClass.__dict__.update(_clean_BaseClass)
        SubClass.__dict__.clear()
        SubClass.__dict__.update(_clean_SubClass)
    def testBaseHook(self):
        """make sure that the base class's hook is called reliably
        """
        base = BaseClass()
        self.assertEqual(base.calledBase, 0)
        self.assertEqual(base.calledBasePre, 0)
        base.func(1,2)
        self.assertEqual(base.calledBase, 1)
        self.assertEqual(base.calledBasePre, 0)
        hook.addPre(BaseClass, "func", basePre)
        base.func(1, b=2)
        self.assertEqual(base.calledBase, 2)
        self.assertEqual(base.calledBasePre, 1)
        hook.addPost(BaseClass, "func", basePost)
        base.func(1, b=2)
        self.assertEqual(base.calledBasePost, 1)
        self.assertEqual(base.calledBase, 3)
        self.assertEqual(base.calledBasePre, 2)
        hook.removePre(BaseClass, "func", basePre)
        hook.removePost(BaseClass, "func", basePost)
        base.func(1, b=2)
        self.assertEqual(base.calledBasePost, 1)
        self.assertEqual(base.calledBase, 4)
        self.assertEqual(base.calledBasePre, 2)
    def testSubHook(self):
        """test interactions between base-class hooks and subclass hooks
        """
        sub = SubClass()
        self.assertEqual(sub.calledSub, 0)
        self.assertEqual(sub.calledBase, 0)
        sub.func(1, b=2)
        self.assertEqual(sub.calledSub, 1)
        self.assertEqual(sub.calledBase, 1)
        hook.addPre(SubClass, 'func', subPre)
        self.assertEqual(sub.calledSub, 1)
        self.assertEqual(sub.calledBase, 1)
        self.assertEqual(sub.calledSubPre, 0)
        self.assertEqual(sub.calledBasePre, 0)
        sub.func(1, b=2)
        self.assertEqual(sub.calledSub, 2)
        self.assertEqual(sub.calledBase, 2)
        self.assertEqual(sub.calledSubPre, 1)
        self.assertEqual(sub.calledBasePre, 0)
        # let the pain begin
        hook.addPre(BaseClass, 'func', basePre)
        BaseClass.func(sub, 1, b=2)
        # sub.func(1, b=2)
        self.assertEqual(sub.calledBase, 3)
        self.assertEqual(sub.calledBasePre, 1, str(sub.calledBasePre))
        sub.func(1, b=2)
        self.assertEqual(sub.calledBasePre, 2)
        self.assertEqual(sub.calledBase, 4)
        self.assertEqual(sub.calledSubPre, 2)
        self.assertEqual(sub.calledSub, 3)
testCases = [HookTestCase]
 | 
	gpl-3.0 | 
| 
	cursesun/scrapy | 
	tests/test_commands.py | 
	105 | 
	8613 | 
	import os
import sys
import subprocess
import tempfile
from time import sleep
from os.path import exists, join, abspath
from shutil import rmtree
from tempfile import mkdtemp
from twisted.trial import unittest
from twisted.internet import defer
from scrapy.utils.python import retry_on_eintr
from scrapy.utils.test import get_testenv
from scrapy.utils.testsite import SiteTest
from scrapy.utils.testproc import ProcessTest
class ProjectTest(unittest.TestCase):
    project_name = 'testproject'
    def setUp(self):
        self.temp_path = mkdtemp()
        self.cwd = self.temp_path
        self.proj_path = join(self.temp_path, self.project_name)
        self.proj_mod_path = join(self.proj_path, self.project_name)
        self.env = get_testenv()
    def tearDown(self):
        rmtree(self.temp_path)
    def call(self, *new_args, **kwargs):
        with tempfile.TemporaryFile() as out:
            args = (sys.executable, '-m', 'scrapy.cmdline') + new_args
            return subprocess.call(args, stdout=out, stderr=out, cwd=self.cwd,
                env=self.env, **kwargs)
    def proc(self, *new_args, **kwargs):
        args = (sys.executable, '-m', 'scrapy.cmdline') + new_args
        p = subprocess.Popen(args, cwd=self.cwd, env=self.env,
                             stdout=subprocess.PIPE, stderr=subprocess.PIPE,
                             **kwargs)
        waited = 0
        interval = 0.2
        while p.poll() is None:
            sleep(interval)
            waited += interval
            if waited > 15:
                p.kill()
                assert False, 'Command took too much time to complete'
        return p
class StartprojectTest(ProjectTest):
    def test_startproject(self):
        self.assertEqual(0, self.call('startproject', self.project_name))
        assert exists(join(self.proj_path, 'scrapy.cfg'))
        assert exists(join(self.proj_path, 'testproject'))
        assert exists(join(self.proj_mod_path, '__init__.py'))
        assert exists(join(self.proj_mod_path, 'items.py'))
        assert exists(join(self.proj_mod_path, 'pipelines.py'))
        assert exists(join(self.proj_mod_path, 'settings.py'))
        assert exists(join(self.proj_mod_path, 'spiders', '__init__.py'))
        self.assertEqual(1, self.call('startproject', self.project_name))
        self.assertEqual(1, self.call('startproject', 'wrong---project---name'))
        self.assertEqual(1, self.call('startproject', 'sys'))
class CommandTest(ProjectTest):
    def setUp(self):
        super(CommandTest, self).setUp()
        self.call('startproject', self.project_name)
        self.cwd = join(self.temp_path, self.project_name)
        self.env['SCRAPY_SETTINGS_MODULE'] = '%s.settings' % self.project_name
class GenspiderCommandTest(CommandTest):
    def test_arguments(self):
        # only pass one argument. spider script shouldn't be created
        self.assertEqual(2, self.call('genspider', 'test_name'))
        assert not exists(join(self.proj_mod_path, 'spiders', 'test_name.py'))
        # pass two arguments <name> <domain>. spider script should be created
        self.assertEqual(0, self.call('genspider', 'test_name', 'test.com'))
        assert exists(join(self.proj_mod_path, 'spiders', 'test_name.py'))
    def test_template(self, tplname='crawl'):
        args = ['--template=%s' % tplname] if tplname else []
        spname = 'test_spider'
        p = self.proc('genspider', spname, 'test.com', *args)
        out = retry_on_eintr(p.stdout.read)
        self.assertIn("Created spider %r using template %r in module" % (spname, tplname), out)
        self.assertTrue(exists(join(self.proj_mod_path, 'spiders', 'test_spider.py')))
        p = self.proc('genspider', spname, 'test.com', *args)
        out = retry_on_eintr(p.stdout.read)
        self.assertIn("Spider %r already exists in module" % spname, out)
    def test_template_basic(self):
        self.test_template('basic')
    def test_template_csvfeed(self):
        self.test_template('csvfeed')
    def test_template_xmlfeed(self):
        self.test_template('xmlfeed')
    def test_list(self):
        self.assertEqual(0, self.call('genspider', '--list'))
    def test_dump(self):
        self.assertEqual(0, self.call('genspider', '--dump=basic'))
        self.assertEqual(0, self.call('genspider', '-d', 'basic'))
    def test_same_name_as_project(self):
        self.assertEqual(2, self.call('genspider', self.project_name))
        assert not exists(join(self.proj_mod_path, 'spiders', '%s.py' % self.project_name))
class MiscCommandsTest(CommandTest):
    def test_list(self):
        self.assertEqual(0, self.call('list'))
class RunSpiderCommandTest(CommandTest):
    def test_runspider(self):
        tmpdir = self.mktemp()
        os.mkdir(tmpdir)
        fname = abspath(join(tmpdir, 'myspider.py'))
        with open(fname, 'w') as f:
            f.write("""
import scrapy
class MySpider(scrapy.Spider):
    name = 'myspider'
    def start_requests(self):
        self.logger.debug("It Works!")
        return []
""")
        p = self.proc('runspider', fname)
        log = p.stderr.read()
        self.assertIn("DEBUG: It Works!", log)
        self.assertIn("INFO: Spider opened", log)
        self.assertIn("INFO: Closing spider (finished)", log)
        self.assertIn("INFO: Spider closed (finished)", log)
    def test_runspider_no_spider_found(self):
        tmpdir = self.mktemp()
        os.mkdir(tmpdir)
        fname = abspath(join(tmpdir, 'myspider.py'))
        with open(fname, 'w') as f:
            f.write("""
from scrapy.spiders import Spider
""")
        p = self.proc('runspider', fname)
        log = p.stderr.read()
        self.assertIn("No spider found in file", log)
    def test_runspider_file_not_found(self):
        p = self.proc('runspider', 'some_non_existent_file')
        log = p.stderr.read()
        self.assertIn("File not found: some_non_existent_file", log)
    def test_runspider_unable_to_load(self):
        tmpdir = self.mktemp()
        os.mkdir(tmpdir)
        fname = abspath(join(tmpdir, 'myspider.txt'))
        with open(fname, 'w') as f:
            f.write("")
        p = self.proc('runspider', fname)
        log = p.stderr.read()
        self.assertIn("Unable to load", log)
class ParseCommandTest(ProcessTest, SiteTest, CommandTest):
    command = 'parse'
    def setUp(self):
        super(ParseCommandTest, self).setUp()
        self.spider_name = 'parse_spider'
        fname = abspath(join(self.proj_mod_path, 'spiders', 'myspider.py'))
        with open(fname, 'w') as f:
            f.write("""
import scrapy
class MySpider(scrapy.Spider):
    name = '{0}'
    def parse(self, response):
        if getattr(self, 'test_arg', None):
            self.logger.debug('It Works!')
        return [scrapy.Item(), dict(foo='bar')]
""".format(self.spider_name))
        fname = abspath(join(self.proj_mod_path, 'pipelines.py'))
        with open(fname, 'w') as f:
            f.write("""
import logging
class MyPipeline(object):
    component_name = 'my_pipeline'
    def process_item(self, item, spider):
        logging.info('It Works!')
        return item
""")
        fname = abspath(join(self.proj_mod_path, 'settings.py'))
        with open(fname, 'a') as f:
            f.write("""
ITEM_PIPELINES = {'%s.pipelines.MyPipeline': 1}
""" % self.project_name)
    @defer.inlineCallbacks
    def test_spider_arguments(self):
        _, _, stderr = yield self.execute(['--spider', self.spider_name,
                                           '-a', 'test_arg=1',
                                           '-c', 'parse',
                                           self.url('/html')])
        self.assertIn("DEBUG: It Works!", stderr)
    @defer.inlineCallbacks
    def test_pipelines(self):
        _, _, stderr = yield self.execute(['--spider', self.spider_name,
                                           '--pipelines',
                                           '-c', 'parse',
                                           self.url('/html')])
        self.assertIn("INFO: It Works!", stderr)
    @defer.inlineCallbacks
    def test_parse_items(self):
        status, out, stderr = yield self.execute(
            ['--spider', self.spider_name, '-c', 'parse', self.url('/html')]
        )
        self.assertIn("""[{}, {'foo': 'bar'}]""", out)
class BenchCommandTest(CommandTest):
    def test_run(self):
        p = self.proc('bench', '-s', 'LOGSTATS_INTERVAL=0.001',
                '-s', 'CLOSESPIDER_TIMEOUT=0.01')
        log = p.stderr.read()
        self.assertIn('INFO: Crawled', log)
 | 
	bsd-3-clause | 
| 
	xyguo/scikit-learn | 
	examples/svm/plot_svm_nonlinear.py | 
	268 | 
	1091 | 
	"""
==============
Non-linear SVM
==============
Perform binary classification using non-linear SVC
with RBF kernel. The target to predict is a XOR of the
inputs.
The color map illustrates the decision function learned by the SVC.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm
xx, yy = np.meshgrid(np.linspace(-3, 3, 500),
                     np.linspace(-3, 3, 500))
np.random.seed(0)
X = np.random.randn(300, 2)
Y = np.logical_xor(X[:, 0] > 0, X[:, 1] > 0)
# fit the model
clf = svm.NuSVC()
clf.fit(X, Y)
# plot the decision function for each datapoint on the grid
Z = clf.decision_function(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
plt.imshow(Z, interpolation='nearest',
           extent=(xx.min(), xx.max(), yy.min(), yy.max()), aspect='auto',
           origin='lower', cmap=plt.cm.PuOr_r)
contours = plt.contour(xx, yy, Z, levels=[0], linewidths=2,
                       linetypes='--')
plt.scatter(X[:, 0], X[:, 1], s=30, c=Y, cmap=plt.cm.Paired)
plt.xticks(())
plt.yticks(())
plt.axis([-3, 3, -3, 3])
plt.show()
 | 
	bsd-3-clause | 
| 
	dbremner/bite-project | 
	deps/gdata-python-client/src/gdata/blogger/data.py | 
	61 | 
	4551 | 
	#!/usr/bin/env python
#
# Copyright (C) 2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#      http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Data model classes for parsing and generating XML for the Blogger API."""
__author__ = '[email protected] (Jeff Scudder)'
import re
import urlparse
import atom.core
import gdata.data
LABEL_SCHEME = 'http://www.blogger.com/atom/ns#'
THR_TEMPLATE = '{http://purl.org/syndication/thread/1.0}%s'
BLOG_NAME_PATTERN = re.compile('(http://)(\w*)')
BLOG_ID_PATTERN = re.compile('(tag:blogger.com,1999:blog-)(\w*)')
BLOG_ID2_PATTERN = re.compile('tag:blogger.com,1999:user-(\d+)\.blog-(\d+)')
POST_ID_PATTERN = re.compile(
    '(tag:blogger.com,1999:blog-)(\w*)(.post-)(\w*)')
PAGE_ID_PATTERN = re.compile(
    '(tag:blogger.com,1999:blog-)(\w*)(.page-)(\w*)')
COMMENT_ID_PATTERN = re.compile('.*-(\w*)$')
class BloggerEntry(gdata.data.GDEntry):
  """Adds convenience methods inherited by all Blogger entries."""
  def get_blog_id(self):
    """Extracts the Blogger id of this blog.
    This method is useful when contructing URLs by hand. The blog id is
    often used in blogger operation URLs. This should not be confused with
    the id member of a BloggerBlog. The id element is the Atom id XML element.
    The blog id which this method returns is a part of the Atom id.
    Returns:
      The blog's unique id as a string.
    """
    if self.id.text:
      match = BLOG_ID_PATTERN.match(self.id.text)
      if match:
        return match.group(2)
      else:
        return BLOG_ID2_PATTERN.match(self.id.text).group(2)
    return None
  GetBlogId = get_blog_id
  def get_blog_name(self):
    """Finds the name of this blog as used in the 'alternate' URL.
    An alternate URL is in the form 'http://blogName.blogspot.com/'. For an
    entry representing the above example, this method would return 'blogName'.
    Returns:
      The blog's URL name component as a string.
    """
    for link in self.link:
      if link.rel == 'alternate':
        return urlparse.urlparse(link.href)[1].split(".", 1)[0]
    return None
  GetBlogName = get_blog_name
class Blog(BloggerEntry):
  """Represents a blog which belongs to the user."""
class BlogFeed(gdata.data.GDFeed):
  entry = [Blog]
class BlogPost(BloggerEntry):
  """Represents a single post on a blog."""
  def add_label(self, label):
    """Adds a label to the blog post.
    The label is represented by an Atom category element, so this method
    is shorthand for appending a new atom.Category object.
    Args:
      label: str
    """
    self.category.append(atom.data.Category(scheme=LABEL_SCHEME, term=label))
  AddLabel = add_label
  def get_post_id(self):
    """Extracts the postID string from the entry's Atom id.
    Returns: A string of digits which identify this post within the blog.
    """
    if self.id.text:
      return POST_ID_PATTERN.match(self.id.text).group(4)
    return None
  GetPostId = get_post_id
class BlogPostFeed(gdata.data.GDFeed):
  entry = [BlogPost]
class BlogPage(BloggerEntry):
  """Represents a single page on a blog."""
  def get_page_id(self):
    """Extracts the pageID string from entry's Atom id.
    Returns: A string of digits which identify this post within the blog.
    """
    if self.id.text:
      return PAGE_ID_PATTERN.match(self.id.text).group(4)
    return None
  GetPageId = get_page_id
class BlogPageFeed(gdata.data.GDFeed):
  entry = [BlogPage]
class InReplyTo(atom.core.XmlElement):
  _qname = THR_TEMPLATE % 'in-reply-to'
  href = 'href'
  ref = 'ref'
  source = 'source'
  type = 'type'
class Comment(BloggerEntry):
  """Blog post comment entry in a feed listing comments on a post or blog."""
  in_reply_to = InReplyTo
  def get_comment_id(self):
    """Extracts the commentID string from the entry's Atom id.
    Returns: A string of digits which identify this post within the blog.
    """
    if self.id.text:
      return COMMENT_ID_PATTERN.match(self.id.text).group(1)
    return None
  GetCommentId = get_comment_id
class CommentFeed(gdata.data.GDFeed):
  entry = [Comment]
 | 
	apache-2.0 | 
| 
	qenter/vlc-android | 
	toolchains/arm/lib/python2.7/test/test_descrtut.py | 
	75 | 
	12052 | 
	# This contains most of the executable examples from Guido's descr
# tutorial, once at
#
#     http://www.python.org/2.2/descrintro.html
#
# A few examples left implicit in the writeup were fleshed out, a few were
# skipped due to lack of interest (e.g., faking super() by hand isn't
# of much interest anymore), and a few were fiddled to make the output
# deterministic.
from test.test_support import sortdict
import pprint
class defaultdict(dict):
    def __init__(self, default=None):
        dict.__init__(self)
        self.default = default
    def __getitem__(self, key):
        try:
            return dict.__getitem__(self, key)
        except KeyError:
            return self.default
    def get(self, key, *args):
        if not args:
            args = (self.default,)
        return dict.get(self, key, *args)
    def merge(self, other):
        for key in other:
            if key not in self:
                self[key] = other[key]
test_1 = """
Here's the new type at work:
    >>> print defaultdict               # show our type
    <class 'test.test_descrtut.defaultdict'>
    >>> print type(defaultdict)         # its metatype
    <type 'type'>
    >>> a = defaultdict(default=0.0)    # create an instance
    >>> print a                         # show the instance
    {}
    >>> print type(a)                   # show its type
    <class 'test.test_descrtut.defaultdict'>
    >>> print a.__class__               # show its class
    <class 'test.test_descrtut.defaultdict'>
    >>> print type(a) is a.__class__    # its type is its class
    True
    >>> a[1] = 3.25                     # modify the instance
    >>> print a                         # show the new value
    {1: 3.25}
    >>> print a[1]                      # show the new item
    3.25
    >>> print a[0]                      # a non-existent item
    0.0
    >>> a.merge({1:100, 2:200})         # use a dict method
    >>> print sortdict(a)               # show the result
    {1: 3.25, 2: 200}
    >>>
We can also use the new type in contexts where classic only allows "real"
dictionaries, such as the locals/globals dictionaries for the exec
statement or the built-in function eval():
    >>> def sorted(seq):
    ...     seq.sort(key=str)
    ...     return seq
    >>> print sorted(a.keys())
    [1, 2]
    >>> exec "x = 3; print x" in a
    3
    >>> print sorted(a.keys())
    [1, 2, '__builtins__', 'x']
    >>> print a['x']
    3
    >>>
Now I'll show that defaultdict instances have dynamic instance variables,
just like classic classes:
    >>> a.default = -1
    >>> print a["noway"]
    -1
    >>> a.default = -1000
    >>> print a["noway"]
    -1000
    >>> 'default' in dir(a)
    True
    >>> a.x1 = 100
    >>> a.x2 = 200
    >>> print a.x1
    100
    >>> d = dir(a)
    >>> 'default' in d and 'x1' in d and 'x2' in d
    True
    >>> print sortdict(a.__dict__)
    {'default': -1000, 'x1': 100, 'x2': 200}
    >>>
"""
class defaultdict2(dict):
    __slots__ = ['default']
    def __init__(self, default=None):
        dict.__init__(self)
        self.default = default
    def __getitem__(self, key):
        try:
            return dict.__getitem__(self, key)
        except KeyError:
            return self.default
    def get(self, key, *args):
        if not args:
            args = (self.default,)
        return dict.get(self, key, *args)
    def merge(self, other):
        for key in other:
            if key not in self:
                self[key] = other[key]
test_2 = """
The __slots__ declaration takes a list of instance variables, and reserves
space for exactly these in the instance. When __slots__ is used, other
instance variables cannot be assigned to:
    >>> a = defaultdict2(default=0.0)
    >>> a[1]
    0.0
    >>> a.default = -1
    >>> a[1]
    -1
    >>> a.x1 = 1
    Traceback (most recent call last):
      File "<stdin>", line 1, in ?
    AttributeError: 'defaultdict2' object has no attribute 'x1'
    >>>
"""
test_3 = """
Introspecting instances of built-in types
For instance of built-in types, x.__class__ is now the same as type(x):
    >>> type([])
    <type 'list'>
    >>> [].__class__
    <type 'list'>
    >>> list
    <type 'list'>
    >>> isinstance([], list)
    True
    >>> isinstance([], dict)
    False
    >>> isinstance([], object)
    True
    >>>
Under the new proposal, the __methods__ attribute no longer exists:
    >>> [].__methods__
    Traceback (most recent call last):
      File "<stdin>", line 1, in ?
    AttributeError: 'list' object has no attribute '__methods__'
    >>>
Instead, you can get the same information from the list type:
    >>> pprint.pprint(dir(list))    # like list.__dict__.keys(), but sorted
    ['__add__',
     '__class__',
     '__contains__',
     '__delattr__',
     '__delitem__',
     '__delslice__',
     '__doc__',
     '__eq__',
     '__format__',
     '__ge__',
     '__getattribute__',
     '__getitem__',
     '__getslice__',
     '__gt__',
     '__hash__',
     '__iadd__',
     '__imul__',
     '__init__',
     '__iter__',
     '__le__',
     '__len__',
     '__lt__',
     '__mul__',
     '__ne__',
     '__new__',
     '__reduce__',
     '__reduce_ex__',
     '__repr__',
     '__reversed__',
     '__rmul__',
     '__setattr__',
     '__setitem__',
     '__setslice__',
     '__sizeof__',
     '__str__',
     '__subclasshook__',
     'append',
     'count',
     'extend',
     'index',
     'insert',
     'pop',
     'remove',
     'reverse',
     'sort']
The new introspection API gives more information than the old one:  in
addition to the regular methods, it also shows the methods that are
normally invoked through special notations, e.g. __iadd__ (+=), __len__
(len), __ne__ (!=). You can invoke any method from this list directly:
    >>> a = ['tic', 'tac']
    >>> list.__len__(a)          # same as len(a)
    2
    >>> a.__len__()              # ditto
    2
    >>> list.append(a, 'toe')    # same as a.append('toe')
    >>> a
    ['tic', 'tac', 'toe']
    >>>
This is just like it is for user-defined classes.
"""
test_4 = """
Static methods and class methods
The new introspection API makes it possible to add static methods and class
methods. Static methods are easy to describe: they behave pretty much like
static methods in C++ or Java. Here's an example:
    >>> class C:
    ...
    ...     @staticmethod
    ...     def foo(x, y):
    ...         print "staticmethod", x, y
    >>> C.foo(1, 2)
    staticmethod 1 2
    >>> c = C()
    >>> c.foo(1, 2)
    staticmethod 1 2
Class methods use a similar pattern to declare methods that receive an
implicit first argument that is the *class* for which they are invoked.
    >>> class C:
    ...     @classmethod
    ...     def foo(cls, y):
    ...         print "classmethod", cls, y
    >>> C.foo(1)
    classmethod test.test_descrtut.C 1
    >>> c = C()
    >>> c.foo(1)
    classmethod test.test_descrtut.C 1
    >>> class D(C):
    ...     pass
    >>> D.foo(1)
    classmethod test.test_descrtut.D 1
    >>> d = D()
    >>> d.foo(1)
    classmethod test.test_descrtut.D 1
This prints "classmethod __main__.D 1" both times; in other words, the
class passed as the first argument of foo() is the class involved in the
call, not the class involved in the definition of foo().
But notice this:
    >>> class E(C):
    ...     @classmethod
    ...     def foo(cls, y): # override C.foo
    ...         print "E.foo() called"
    ...         C.foo(y)
    >>> E.foo(1)
    E.foo() called
    classmethod test.test_descrtut.C 1
    >>> e = E()
    >>> e.foo(1)
    E.foo() called
    classmethod test.test_descrtut.C 1
In this example, the call to C.foo() from E.foo() will see class C as its
first argument, not class E. This is to be expected, since the call
specifies the class C. But it stresses the difference between these class
methods and methods defined in metaclasses (where an upcall to a metamethod
would pass the target class as an explicit first argument).
"""
test_5 = """
Attributes defined by get/set methods
    >>> class property(object):
    ...
    ...     def __init__(self, get, set=None):
    ...         self.__get = get
    ...         self.__set = set
    ...
    ...     def __get__(self, inst, type=None):
    ...         return self.__get(inst)
    ...
    ...     def __set__(self, inst, value):
    ...         if self.__set is None:
    ...             raise AttributeError, "this attribute is read-only"
    ...         return self.__set(inst, value)
Now let's define a class with an attribute x defined by a pair of methods,
getx() and setx():
    >>> class C(object):
    ...
    ...     def __init__(self):
    ...         self.__x = 0
    ...
    ...     def getx(self):
    ...         return self.__x
    ...
    ...     def setx(self, x):
    ...         if x < 0: x = 0
    ...         self.__x = x
    ...
    ...     x = property(getx, setx)
Here's a small demonstration:
    >>> a = C()
    >>> a.x = 10
    >>> print a.x
    10
    >>> a.x = -10
    >>> print a.x
    0
    >>>
Hmm -- property is builtin now, so let's try it that way too.
    >>> del property  # unmask the builtin
    >>> property
    <type 'property'>
    >>> class C(object):
    ...     def __init__(self):
    ...         self.__x = 0
    ...     def getx(self):
    ...         return self.__x
    ...     def setx(self, x):
    ...         if x < 0: x = 0
    ...         self.__x = x
    ...     x = property(getx, setx)
    >>> a = C()
    >>> a.x = 10
    >>> print a.x
    10
    >>> a.x = -10
    >>> print a.x
    0
    >>>
"""
test_6 = """
Method resolution order
This example is implicit in the writeup.
>>> class A:    # classic class
...     def save(self):
...         print "called A.save()"
>>> class B(A):
...     pass
>>> class C(A):
...     def save(self):
...         print "called C.save()"
>>> class D(B, C):
...     pass
>>> D().save()
called A.save()
>>> class A(object):  # new class
...     def save(self):
...         print "called A.save()"
>>> class B(A):
...     pass
>>> class C(A):
...     def save(self):
...         print "called C.save()"
>>> class D(B, C):
...     pass
>>> D().save()
called C.save()
"""
class A(object):
    def m(self):
        return "A"
class B(A):
    def m(self):
        return "B" + super(B, self).m()
class C(A):
    def m(self):
        return "C" + super(C, self).m()
class D(C, B):
    def m(self):
        return "D" + super(D, self).m()
test_7 = """
Cooperative methods and "super"
>>> print D().m() # "DCBA"
DCBA
"""
test_8 = """
Backwards incompatibilities
>>> class A:
...     def foo(self):
...         print "called A.foo()"
>>> class B(A):
...     pass
>>> class C(A):
...     def foo(self):
...         B.foo(self)
>>> C().foo()
Traceback (most recent call last):
 ...
TypeError: unbound method foo() must be called with B instance as first argument (got C instance instead)
>>> class C(A):
...     def foo(self):
...         A.foo(self)
>>> C().foo()
called A.foo()
"""
__test__ = {"tut1": test_1,
            "tut2": test_2,
            "tut3": test_3,
            "tut4": test_4,
            "tut5": test_5,
            "tut6": test_6,
            "tut7": test_7,
            "tut8": test_8}
# Magic test name that regrtest.py invokes *after* importing this module.
# This worms around a bootstrap problem.
# Note that doctest and regrtest both look in sys.argv for a "-v" argument,
# so this works as expected in both ways of running regrtest.
def test_main(verbose=None):
    # Obscure:  import this module as test.test_descrtut instead of as
    # plain test_descrtut because the name of this module works its way
    # into the doctest examples, and unless the full test.test_descrtut
    # business is used the name can change depending on how the test is
    # invoked.
    from test import test_support, test_descrtut
    test_support.run_doctest(test_descrtut, verbose)
# This part isn't needed for regrtest, but for running the test directly.
if __name__ == "__main__":
    test_main(1)
 | 
	gpl-2.0 | 
| 
	xxhank/namebench | 
	nb_third_party/jinja2/meta.py | 
	406 | 
	4144 | 
	# -*- coding: utf-8 -*-
"""
    jinja2.meta
    ~~~~~~~~~~~
    This module implements various functions that exposes information about
    templates that might be interesting for various kinds of applications.
    :copyright: (c) 2010 by the Jinja Team, see AUTHORS for more details.
    :license: BSD, see LICENSE for more details.
"""
from jinja2 import nodes
from jinja2.compiler import CodeGenerator
class TrackingCodeGenerator(CodeGenerator):
    """We abuse the code generator for introspection."""
    def __init__(self, environment):
        CodeGenerator.__init__(self, environment, '<introspection>',
                               '<introspection>')
        self.undeclared_identifiers = set()
    def write(self, x):
        """Don't write."""
    def pull_locals(self, frame):
        """Remember all undeclared identifiers."""
        self.undeclared_identifiers.update(frame.identifiers.undeclared)
def find_undeclared_variables(ast):
    """Returns a set of all variables in the AST that will be looked up from
    the context at runtime.  Because at compile time it's not known which
    variables will be used depending on the path the execution takes at
    runtime, all variables are returned.
    >>> from jinja2 import Environment, meta
    >>> env = Environment()
    >>> ast = env.parse('{% set foo = 42 %}{{ bar + foo }}')
    >>> meta.find_undeclared_variables(ast)
    set(['bar'])
    .. admonition:: Implementation
       Internally the code generator is used for finding undeclared variables.
       This is good to know because the code generator might raise a
       :exc:`TemplateAssertionError` during compilation and as a matter of
       fact this function can currently raise that exception as well.
    """
    codegen = TrackingCodeGenerator(ast.environment)
    codegen.visit(ast)
    return codegen.undeclared_identifiers
def find_referenced_templates(ast):
    """Finds all the referenced templates from the AST.  This will return an
    iterator over all the hardcoded template extensions, inclusions and
    imports.  If dynamic inheritance or inclusion is used, `None` will be
    yielded.
    >>> from jinja2 import Environment, meta
    >>> env = Environment()
    >>> ast = env.parse('{% extends "layout.html" %}{% include helper %}')
    >>> list(meta.find_referenced_templates(ast))
    ['layout.html', None]
    This function is useful for dependency tracking.  For example if you want
    to rebuild parts of the website after a layout template has changed.
    """
    for node in ast.find_all((nodes.Extends, nodes.FromImport, nodes.Import,
                              nodes.Include)):
        if not isinstance(node.template, nodes.Const):
            # a tuple with some non consts in there
            if isinstance(node.template, (nodes.Tuple, nodes.List)):
                for template_name in node.template.items:
                    # something const, only yield the strings and ignore
                    # non-string consts that really just make no sense
                    if isinstance(template_name, nodes.Const):
                        if isinstance(template_name.value, basestring):
                            yield template_name.value
                    # something dynamic in there
                    else:
                        yield None
            # something dynamic we don't know about here
            else:
                yield None
            continue
        # constant is a basestring, direct template name
        if isinstance(node.template.value, basestring):
            yield node.template.value
        # a tuple or list (latter *should* not happen) made of consts,
        # yield the consts that are strings.  We could warn here for
        # non string values
        elif isinstance(node, nodes.Include) and \
             isinstance(node.template.value, (tuple, list)):
            for template_name in node.template.value:
                if isinstance(template_name, basestring):
                    yield template_name
        # something else we don't care about, we could warn here
        else:
            yield None
 | 
	apache-2.0 | 
| 
	Luxoft/Twister | 
	binaries/GitPlugin/Git/GITPlugin.py | 
	3 | 
	9694 | 
	
# version: 2.006
import os, sys
import shutil
import time
import pexpect
from BasePlugin import BasePlugin
#
class Plugin(BasePlugin):
    """
    GIT Plugin has a few parameters:
    - server complete path
    - branch used for clone
    - user and password to connect to server
    - snapshot folder, where all data is cloned
    If command is Snapshot, execute a GIT clone;
      if the Snapshot folder is already present, delete it, then GIT clone.
    If command is Update and Overwrite is false, execute a GIT checkout and GIT pull on the specified branch;
      if Overwrite is true, delete the folder, then GIT clone for the specified branch.
    """
    def run(self, args):
        src = self.data.get('server')
        dst = self.data.get('snapshot')
        if not args.get('command'):
            return '*ERROR* Must specify a command like `snapshot` or `update` !'
        if args['command'] == ['snapshot']:
            return self.execCheckout(src, dst, 'clone', overwrite=True)
        elif args['command'] == ['update'] and args['overwrite'] == ['false']:
            return self.execCheckout(src, dst, 'pull', overwrite=False)
        elif args['command'] == ['update'] and args['overwrite'] == ['true']:
            return self.execCheckout(src, dst, 'pull', overwrite=True)
        elif args['command'] == ['delete']:
            return self.execCheckout('', '', '', overwrite=True)
        else:
            return 'Invalid command: `{} & {}`!'.format(args['command'], args['overwrite'])
    def execCheckout(self, src, dst, command, overwrite=False):
        
        usr = self.data['username']
        pwd = self.data['password']
        
        child = pexpect.spawn(['bash'])
        child.logfile = sys.stdout
        
        child.sendline('su {}'.format(self.user))
        try:
            child.expect('.*$')
        except Exception as e:
            print 'Error: Unable to switch to user {}'.format(self.user)
            return 'Error on switching to user {usr}'.format(usr=self.user)
        
        time.sleep(1)
        
        child.sendline('cd')
        try:
            child.expect('.*')
        except Exception as e:
            print 'Error: Unable to navigate to the user\'s {} home folder.'.format(self.user)
            return 'Error on navigating to user\'s {usr} home folder.'.format(usr=self.user)
        
        time.sleep(1)
        
        if not src:
            return '*ERROR* Git source folder is NULL !'
        if '//' not in src:
            return '*ERROR* Git source folder `{}` is invalid !'.format(src)
        if not dst:
            return '*ERROR* Git destination folder is NULL !'
        src = src.replace('//', '//{}@'.format(usr))
        branch = self.data['branch']
        if not branch: 
            return 'You must specify a branch for snapshot/update!'
        
        # Normal Git clone operation
        if command == 'clone' or (command == 'pull' and overwrite):
            if overwrite and os.path.exists(dst):
                print 'GIT Plugin: Deleting folder `{}` ...'.format(dst)
                shutil.rmtree(dst, ignore_errors=True)
            to_exec = 'git clone -b {branch} {src} {dst}'.format(branch=branch, src=src, dst=dst)
            print('GIT Plugin: Exec `{}` .'.format(to_exec.strip()))
            child.sendline(to_exec.strip())
            try:
                i = child.expect(['.*password:','Are you sure.*','Permission denied'], 10)
                if i == 0 and pwd:
                    child.sendline(pwd)
                elif i == 1 and pwd:
                    child.sendline('yes')
                    time.sleep(1)
                    try:
                        child.expect('.*password:')
                    except Exception as e:
                        return 'Error on calling GIT {cmd} (from `{src}` to `{dst}`): `{e}`!'.format(
                            cmd=command, src=src, dst=dst, e=e)
                    time.sleep(1)
                    child.sendline(pwd)
                    
                elif i == 2:
                    print 'Error on calling GIT {cmd} (from `{src}` to `{dst}`): `{e}`!'.format(
                            cmd=command, src=src, dst=dst, e='Permission denied!')
                    return 'Error on calling GIT {cmd} (from `{src}` to `{dst}`): `{e}`!'.format(
                            cmd=command, src=src, dst=dst, e='Permission denied!')         
            except Exception as e:
                return 'Error on calling GIT {cmd} (from `{src}` to `{dst}`): `{e}`!'.format(
                    cmd=command, src=src, dst=dst, e=e)
            time.sleep(1)
            try:
                i = child.expect(['Resolving deltas.*done\.',
                                  'fatal: The remote end hung up unexpectedly',
                                  'Permission denied',
                                  'Could not read from remote repository'], None)
                if i == 1:
                    # fatal: Remote branch branch_name not found in upstream origin
                    print 'Error on calling GIT clone: {} do not exist.'.format(branch)
                    return 'Error on calling GIT {cmd} (from `{src}` to `{dst}`)! Branch {br} do not exist!'.format(
                                cmd=command, src=src, dst=dst,br=branch)
                elif i == 2:
                    # that password is incorrect
                    print 'Error on calling GIT clone: Incorrect username or password for GIT repository.'
                    return 'Error on calling GIT clone: Incorrect username or password for GIT repository.'
                elif i == 3:
                    # the path to the repository is incorrect
                    print 'Error on calling GIT clone: Incorrect path for GIT repository.'
                    return 'Error on calling GIT clone: Incorrect path for GIT repository.'
            except Exception as e:
                return 'Error after calling GIT {cmd}: `{e}`!'.format(cmd=command, e=e)
            
            child.sendline('\n\n')
            time.sleep(1)
            print('-'*40)
        # Git pull operation
        elif command == 'pull':
            if not os.path.exists(dst):
                return 'Error: path `{}` does not exist!'.format(dst)
            child.sendline('cd {}'.format(dst))
            try:
                i = child.expect(['Permission denied', 'No such file or directory', '{}'.format(dst)])
                if i == 0:
                    return 'Error: cannot enter in directory: {}. Permission denied!'.format(dst)
                elif i == 1:
                    return 'Error: cannot enter in directory: {}. No such file or directory!'.format(dst)
            except Exception as e:
                print 'Error: cannot enter in directory: {}'.format(dst)
                return 'Error: cannot enter indirectory: `{dst}`!\n{e}'.format(dst=dst, e=e)
            time.sleep(1)
            to_exec = 'git checkout {}'.format(branch)
            print('GIT Plugin: Exec `{}` .'.format(to_exec.strip()))
            child.sendline(to_exec.strip())
            time.sleep(1)
            try:
                i = child.expect(['Switched to.*',
                                  'Your branch is up-to-date with',
                                  'error',
                                  'Not a git repository',
                                  'Already on.*'], 30)
                if i == 2:
                    # error: pathspec branch_name did not match any file(s) known to git.
                    # the specified branch does not exist on the repository
                    print 'Error on calling GIT checkout: branch {} do not exist.'.format(branch)
                    return 'Error on calling GIT {cmd} (from `{src}` to `{dst}`)!\n\
 Branch `{br}` does not exist!'.format(cmd=command, src=src, dst=dst, br=branch)
                elif i == 3:
                    # fatal: Not a git repository (or any of the parent directories): .git
                    # Trying to make a checkout without making a clone first
                    print 'Error on calling GIT checkout: repository {} does not exist.'.format(branch)
                    return 'Error on calling GIT {cmd} (from `{src}` to `{dst}`)!\n\
 Make a snapshot before doing an update!'.format(cmd=command, src=src, dst=dst, br=branch)
            except Exception as e:
                print 'Error on calling {}. Got unexpected response from GIT.'.format(to_exec)
                return 'Error on calling GIT {cmd} (from `{src}` to `{dst}`): `{e}`!'.format(
                    cmd=command, src=src, dst=dst, e=e)
            time.sleep(1)
            child.sendline('git pull -f')
            try:
                child.expect('.*password:')
            except Exception as e:
                print 'Error after calling GIT pull -f'
                return 'Error after calling GIT {cmd}: `{e}`!'.format(cmd=command, e=e)
            
            time.sleep(1)
            child.sendline(pwd)
            time.sleep(1)
            try:
                i = child.expect(['up-to-date', 'files changed', 'Permission denied'], 120)
                if i == 2:
                    print 'Error on calling GIT pull: Incorrect password'
                    return 'Error on calling GIT pull: Incorrect password'
            except Exception as e:
                return 'Error after calling GIT {cmd}: `{e}`!'.format(cmd=command, e=e)
            
            child.sendline('\n\n')
            time.sleep(1)
            print('-'*40)
        else:
            return '*ERROR* Unknown plugin command `{}`!'.format(command)
        return 'true'
# | 
	apache-2.0 | 
| 
	topix-hackademy/social-listener | 
	application/twitter/tweets/collector.py | 
	1 | 
	3236 | 
	from application.mongo import Connection
from application.twitter.interface import TwitterInterface
from application.twitter.tweets.fetcher import TweetsFetcher
from application.processmanager import ProcessManager
from application.utils.helpers import what_time_is_it
import logging
class TweetCollector(TwitterInterface):
    def __init__(self, user, *args, **kwargs):
        """
        Twitter Collector. This class is used for retrieve tweets from a specific user
        """
        super(TweetCollector, self).__init__(*args, **kwargs)
        self.user = user
        self.process_name = "Tweets Collector: <%s>" % user
        self.fetcherInstance = TweetsFetcher(self.auth, self.user, self.process_name)
    def __str__(self):
        """
        String representation
        :return:
        """
        return "Tweet Collector for user <{user}>".format(user=self.user)
    def start(self, process_manager):
        """
        Start async job for user's tweets
        :param process_manager: Process manager instance
        :return:
        """
        try:
            process_manager.create_process(target=self.fetcher,
                                           name=self.process_name,
                                           ptype='twitter_collector')
        except Exception:
            raise Exception('Error Creating new Process')
    def fetcher(self):
        """
        Tweets loader
        :return:
        """
        for page in self.fetcherInstance.get_tweets():
            for tweet in page:
                try:
                    if not Connection.Instance().db.twitter.find_one({'user': tweet.user.screen_name,
                                                                      'source': 'collector',
                                                                      'data.id': tweet.id}):
                        Connection.Instance().db.twitter.insert_one({
                            'source': 'collector',
                            'data': {
                                'created_at': tweet.created_at,
                                'favorite_count': tweet.favorite_count,
                                'geo': tweet.geo,
                                'id': tweet.id,
                                'source': tweet.source,
                                'in_reply_to_screen_name': tweet.in_reply_to_screen_name,
                                'in_reply_to_status_id': tweet.in_reply_to_status_id,
                                'in_reply_to_user_id': tweet.in_reply_to_user_id,
                                'retweet_count': tweet.retweet_count,
                                'retweeted': tweet.retweeted,
                                'text': tweet.text,
                                'entities': tweet.entities
                            },
                            'user': tweet.user.screen_name,
                            'created': what_time_is_it()
                        })
                except Exception as genericException:
                    logging.error("MongoDB Insert Error in collector: %s" % genericException)
        import multiprocessing
        ProcessManager.terminate_process(multiprocessing.current_process().pid, True)
 | 
	mit | 
| 
	bristy/login_app_flask | 
	env/lib/python2.7/site-packages/pip/pep425tags.py | 
	469 | 
	2969 | 
	"""Generate and work with PEP 425 Compatibility Tags."""
import sys
import warnings
try:
    import sysconfig
except ImportError:  # pragma nocover
    # Python < 2.7
    import distutils.sysconfig as sysconfig
import distutils.util
def get_abbr_impl():
    """Return abbreviated implementation name."""
    if hasattr(sys, 'pypy_version_info'):
        pyimpl = 'pp'
    elif sys.platform.startswith('java'):
        pyimpl = 'jy'
    elif sys.platform == 'cli':
        pyimpl = 'ip'
    else:
        pyimpl = 'cp'
    return pyimpl
def get_impl_ver():
    """Return implementation version."""
    return ''.join(map(str, sys.version_info[:2]))
def get_platform():
    """Return our platform name 'win32', 'linux_x86_64'"""
    # XXX remove distutils dependency
    return distutils.util.get_platform().replace('.', '_').replace('-', '_')
def get_supported(versions=None, noarch=False):
    """Return a list of supported tags for each version specified in
    `versions`.
    :param versions: a list of string versions, of the form ["33", "32"],
        or None. The first version will be assumed to support our ABI.
    """
    supported = []
    # Versions must be given with respect to the preference
    if versions is None:
        versions = []
        major = sys.version_info[0]
        # Support all previous minor Python versions.
        for minor in range(sys.version_info[1], -1, -1):
            versions.append(''.join(map(str, (major, minor))))
    impl = get_abbr_impl()
    abis = []
    try:
        soabi = sysconfig.get_config_var('SOABI')
    except IOError as e: # Issue #1074
        warnings.warn("{0}".format(e), RuntimeWarning)
        soabi = None
    if soabi and soabi.startswith('cpython-'):
        abis[0:0] = ['cp' + soabi.split('-', 1)[-1]]
    abi3s = set()
    import imp
    for suffix in imp.get_suffixes():
        if suffix[0].startswith('.abi'):
            abi3s.add(suffix[0].split('.', 2)[1])
    abis.extend(sorted(list(abi3s)))
    abis.append('none')
    if not noarch:
        arch = get_platform()
        # Current version, current API (built specifically for our Python):
        for abi in abis:
            supported.append(('%s%s' % (impl, versions[0]), abi, arch))
    # No abi / arch, but requires our implementation:
    for i, version in enumerate(versions):
        supported.append(('%s%s' % (impl, version), 'none', 'any'))
        if i == 0:
            # Tagged specifically as being cross-version compatible
            # (with just the major version specified)
            supported.append(('%s%s' % (impl, versions[0][0]), 'none', 'any'))
    # No abi / arch, generic Python
    for i, version in enumerate(versions):
        supported.append(('py%s' % (version,), 'none', 'any'))
        if i == 0:
            supported.append(('py%s' % (version[0]), 'none', 'any'))
    return supported
supported_tags = get_supported()
supported_tags_noarch = get_supported(noarch=True)
 | 
	mit | 
| 
	Manuel4131/swampdragon | 
	swampdragon/serializers/serializer_tools.py | 
	9 | 
	3428 | 
	from collections import namedtuple
from django.db.models.fields.related import ForeignKey, ReverseSingleRelatedObjectDescriptor, \
    ManyRelatedObjectsDescriptor, ReverseManyRelatedObjectsDescriptor, ForeignRelatedObjectsDescriptor, \
    SingleRelatedObjectDescriptor
# from django.db.models.related import RelatedObject
from django.db.models.fields.related import ForeignObjectRel
from django.db.models.fields.related import ManyToManyField
class FieldType(namedtuple('FieldType', 'field, model, fk, m2m')):
    '''
    Determine if a field is an m2m, reverse m2m, fk or reverse fk
    '''
    @property
    def is_m2m(self):
        return self.fk is False and self.m2m is True and isinstance(self.field, ForeignObjectRel)
    @property
    def is_reverse_m2m(self):
        return self.fk is True and self.m2m is True and isinstance(self.field, ManyToManyField)
    @property
    def is_fk(self):
        return self.fk is True and self.m2m is False and isinstance(self.field, ForeignKey)
    @property
    def is_reverse_fk(self):
        return self.fk is False and self.m2m is False and isinstance(self.field, ForeignObjectRel)
def get_serializer_relationship_field(serializer, related_serializer):
    if isinstance(serializer, type):
        model = serializer().opts.model
    else:
        model = serializer.opts.model
    if isinstance(related_serializer, type):
        related_model = related_serializer().opts.model
    else:
        related_model = related_serializer.opts.model
    for field_name in related_model._meta.get_all_field_names():
        field_type = FieldType(*related_model._meta.get_field_by_name(field_name))
        field = field_type.field
        # Foreign key
        if field_type.is_fk and field.rel.to is model:
            return field.verbose_name
        # Reverse foreign key
        if field_type.is_reverse_fk and field.model is model:
            return field.var_name
        # M2m fields
        if field_type.is_m2m and field.model is model:
            return field.var_name
        # Reverse m2m field
        if field_type.is_reverse_m2m and field.rel.to is model:
            return field.attname
def get_id_mappings(serializer):
    if not serializer.instance:
        return {}
    data = {}
    for field_name in serializer.opts.publish_fields:
        if not hasattr(serializer, field_name):
            continue
        serializable_field = serializer._get_related_serializer(field_name)
        if not hasattr(serializable_field, 'serialize'):
            continue
        field_type = getattr(serializer.opts.model, field_name)
        is_fk = isinstance(field_type, ReverseSingleRelatedObjectDescriptor)
        is_o2o = isinstance(field_type, SingleRelatedObjectDescriptor)
        is_reverse_fk = isinstance(field_type, ForeignRelatedObjectsDescriptor)
        is_m2m = isinstance(field_type, ManyRelatedObjectsDescriptor)
        is_reverse_m2m = isinstance(field_type, ReverseManyRelatedObjectsDescriptor)
        try:
            val = getattr(serializer.instance, field_name)
        except:
            continue
        if not val:
            continue
        if is_fk or is_o2o:
            data['{}'.format(field_name)] = val.pk
            continue
        if is_reverse_fk or is_m2m or is_reverse_m2m:
            data['{}'.format(field_name)] = list(val.all().values_list('pk', flat=True))
            continue
    return data
 | 
	bsd-3-clause | 
| 
	crakensio/django_training | 
	lib/python2.7/site-packages/pygments/lexers/_mapping.py | 
	68 | 
	36995 | 
	# -*- coding: utf-8 -*-
"""
    pygments.lexers._mapping
    ~~~~~~~~~~~~~~~~~~~~~~~~
    Lexer mapping defintions. This file is generated by itself. Everytime
    you change something on a builtin lexer defintion, run this script from
    the lexers folder to update it.
    Do not alter the LEXERS dictionary by hand.
    :copyright: Copyright 2006-2013 by the Pygments team, see AUTHORS.
    :license: BSD, see LICENSE for details.
"""
LEXERS = {
    'ABAPLexer': ('pygments.lexers.other', 'ABAP', ('abap',), ('*.abap',), ('text/x-abap',)),
    'ActionScript3Lexer': ('pygments.lexers.web', 'ActionScript 3', ('as3', 'actionscript3'), ('*.as',), ('application/x-actionscript', 'text/x-actionscript', 'text/actionscript')),
    'ActionScriptLexer': ('pygments.lexers.web', 'ActionScript', ('as', 'actionscript'), ('*.as',), ('application/x-actionscript3', 'text/x-actionscript3', 'text/actionscript3')),
    'AdaLexer': ('pygments.lexers.compiled', 'Ada', ('ada', 'ada95ada2005'), ('*.adb', '*.ads', '*.ada'), ('text/x-ada',)),
    'AntlrActionScriptLexer': ('pygments.lexers.parsers', 'ANTLR With ActionScript Target', ('antlr-as', 'antlr-actionscript'), ('*.G', '*.g'), ()),
    'AntlrCSharpLexer': ('pygments.lexers.parsers', 'ANTLR With C# Target', ('antlr-csharp', 'antlr-c#'), ('*.G', '*.g'), ()),
    'AntlrCppLexer': ('pygments.lexers.parsers', 'ANTLR With CPP Target', ('antlr-cpp',), ('*.G', '*.g'), ()),
    'AntlrJavaLexer': ('pygments.lexers.parsers', 'ANTLR With Java Target', ('antlr-java',), ('*.G', '*.g'), ()),
    'AntlrLexer': ('pygments.lexers.parsers', 'ANTLR', ('antlr',), (), ()),
    'AntlrObjectiveCLexer': ('pygments.lexers.parsers', 'ANTLR With ObjectiveC Target', ('antlr-objc',), ('*.G', '*.g'), ()),
    'AntlrPerlLexer': ('pygments.lexers.parsers', 'ANTLR With Perl Target', ('antlr-perl',), ('*.G', '*.g'), ()),
    'AntlrPythonLexer': ('pygments.lexers.parsers', 'ANTLR With Python Target', ('antlr-python',), ('*.G', '*.g'), ()),
    'AntlrRubyLexer': ('pygments.lexers.parsers', 'ANTLR With Ruby Target', ('antlr-ruby', 'antlr-rb'), ('*.G', '*.g'), ()),
    'ApacheConfLexer': ('pygments.lexers.text', 'ApacheConf', ('apacheconf', 'aconf', 'apache'), ('.htaccess', 'apache.conf', 'apache2.conf'), ('text/x-apacheconf',)),
    'AppleScriptLexer': ('pygments.lexers.other', 'AppleScript', ('applescript',), ('*.applescript',), ()),
    'AspectJLexer': ('pygments.lexers.jvm', 'AspectJ', ('aspectj',), ('*.aj',), ('text/x-aspectj',)),
    'AsymptoteLexer': ('pygments.lexers.other', 'Asymptote', ('asy', 'asymptote'), ('*.asy',), ('text/x-asymptote',)),
    'AutoItLexer': ('pygments.lexers.other', 'AutoIt', ('autoit', 'Autoit'), ('*.au3',), ('text/x-autoit',)),
    'AutohotkeyLexer': ('pygments.lexers.other', 'autohotkey', ('ahk',), ('*.ahk', '*.ahkl'), ('text/x-autohotkey',)),
    'AwkLexer': ('pygments.lexers.other', 'Awk', ('awk', 'gawk', 'mawk', 'nawk'), ('*.awk',), ('application/x-awk',)),
    'BBCodeLexer': ('pygments.lexers.text', 'BBCode', ('bbcode',), (), ('text/x-bbcode',)),
    'BaseMakefileLexer': ('pygments.lexers.text', 'Base Makefile', ('basemake',), (), ()),
    'BashLexer': ('pygments.lexers.shell', 'Bash', ('bash', 'sh', 'ksh'), ('*.sh', '*.ksh', '*.bash', '*.ebuild', '*.eclass', '.bashrc', 'bashrc', '.bash_*', 'bash_*'), ('application/x-sh', 'application/x-shellscript')),
    'BashSessionLexer': ('pygments.lexers.shell', 'Bash Session', ('console',), ('*.sh-session',), ('application/x-shell-session',)),
    'BatchLexer': ('pygments.lexers.shell', 'Batchfile', ('bat',), ('*.bat', '*.cmd'), ('application/x-dos-batch',)),
    'BefungeLexer': ('pygments.lexers.other', 'Befunge', ('befunge',), ('*.befunge',), ('application/x-befunge',)),
    'BlitzMaxLexer': ('pygments.lexers.compiled', 'BlitzMax', ('blitzmax', 'bmax'), ('*.bmx',), ('text/x-bmx',)),
    'BooLexer': ('pygments.lexers.dotnet', 'Boo', ('boo',), ('*.boo',), ('text/x-boo',)),
    'BrainfuckLexer': ('pygments.lexers.other', 'Brainfuck', ('brainfuck', 'bf'), ('*.bf', '*.b'), ('application/x-brainfuck',)),
    'BroLexer': ('pygments.lexers.other', 'Bro', ('bro',), ('*.bro',), ()),
    'BugsLexer': ('pygments.lexers.math', 'BUGS', ('bugs', 'winbugs', 'openbugs'), ('*.bug',), ()),
    'CLexer': ('pygments.lexers.compiled', 'C', ('c',), ('*.c', '*.h', '*.idc'), ('text/x-chdr', 'text/x-csrc')),
    'CMakeLexer': ('pygments.lexers.text', 'CMake', ('cmake',), ('*.cmake', 'CMakeLists.txt'), ('text/x-cmake',)),
    'CObjdumpLexer': ('pygments.lexers.asm', 'c-objdump', ('c-objdump',), ('*.c-objdump',), ('text/x-c-objdump',)),
    'CSharpAspxLexer': ('pygments.lexers.dotnet', 'aspx-cs', ('aspx-cs',), ('*.aspx', '*.asax', '*.ascx', '*.ashx', '*.asmx', '*.axd'), ()),
    'CSharpLexer': ('pygments.lexers.dotnet', 'C#', ('csharp', 'c#'), ('*.cs',), ('text/x-csharp',)),
    'Ca65Lexer': ('pygments.lexers.asm', 'ca65', ('ca65',), ('*.s',), ()),
    'CbmBasicV2Lexer': ('pygments.lexers.other', 'CBM BASIC V2', ('cbmbas',), ('*.bas',), ()),
    'CeylonLexer': ('pygments.lexers.jvm', 'Ceylon', ('ceylon',), ('*.ceylon',), ('text/x-ceylon',)),
    'Cfengine3Lexer': ('pygments.lexers.other', 'CFEngine3', ('cfengine3', 'cf3'), ('*.cf',), ()),
    'CheetahHtmlLexer': ('pygments.lexers.templates', 'HTML+Cheetah', ('html+cheetah', 'html+spitfire'), (), ('text/html+cheetah', 'text/html+spitfire')),
    'CheetahJavascriptLexer': ('pygments.lexers.templates', 'JavaScript+Cheetah', ('js+cheetah', 'javascript+cheetah', 'js+spitfire', 'javascript+spitfire'), (), ('application/x-javascript+cheetah', 'text/x-javascript+cheetah', 'text/javascript+cheetah', 'application/x-javascript+spitfire', 'text/x-javascript+spitfire', 'text/javascript+spitfire')),
    'CheetahLexer': ('pygments.lexers.templates', 'Cheetah', ('cheetah', 'spitfire'), ('*.tmpl', '*.spt'), ('application/x-cheetah', 'application/x-spitfire')),
    'CheetahXmlLexer': ('pygments.lexers.templates', 'XML+Cheetah', ('xml+cheetah', 'xml+spitfire'), (), ('application/xml+cheetah', 'application/xml+spitfire')),
    'ClojureLexer': ('pygments.lexers.jvm', 'Clojure', ('clojure', 'clj'), ('*.clj',), ('text/x-clojure', 'application/x-clojure')),
    'CobolFreeformatLexer': ('pygments.lexers.compiled', 'COBOLFree', ('cobolfree',), ('*.cbl', '*.CBL'), ()),
    'CobolLexer': ('pygments.lexers.compiled', 'COBOL', ('cobol',), ('*.cob', '*.COB', '*.cpy', '*.CPY'), ('text/x-cobol',)),
    'CoffeeScriptLexer': ('pygments.lexers.web', 'CoffeeScript', ('coffee-script', 'coffeescript'), ('*.coffee',), ('text/coffeescript',)),
    'ColdfusionHtmlLexer': ('pygments.lexers.templates', 'Coldfusion HTML', ('cfm',), ('*.cfm', '*.cfml', '*.cfc'), ('application/x-coldfusion',)),
    'ColdfusionLexer': ('pygments.lexers.templates', 'cfstatement', ('cfs',), (), ()),
    'CommonLispLexer': ('pygments.lexers.functional', 'Common Lisp', ('common-lisp', 'cl'), ('*.cl', '*.lisp', '*.el'), ('text/x-common-lisp',)),
    'CoqLexer': ('pygments.lexers.functional', 'Coq', ('coq',), ('*.v',), ('text/x-coq',)),
    'CppLexer': ('pygments.lexers.compiled', 'C++', ('cpp', 'c++'), ('*.cpp', '*.hpp', '*.c++', '*.h++', '*.cc', '*.hh', '*.cxx', '*.hxx', '*.C', '*.H', '*.cp', '*.CPP'), ('text/x-c++hdr', 'text/x-c++src')),
    'CppObjdumpLexer': ('pygments.lexers.asm', 'cpp-objdump', ('cpp-objdump', 'c++-objdumb', 'cxx-objdump'), ('*.cpp-objdump', '*.c++-objdump', '*.cxx-objdump'), ('text/x-cpp-objdump',)),
    'CrocLexer': ('pygments.lexers.agile', 'Croc', ('croc',), ('*.croc',), ('text/x-crocsrc',)),
    'CssDjangoLexer': ('pygments.lexers.templates', 'CSS+Django/Jinja', ('css+django', 'css+jinja'), (), ('text/css+django', 'text/css+jinja')),
    'CssErbLexer': ('pygments.lexers.templates', 'CSS+Ruby', ('css+erb', 'css+ruby'), (), ('text/css+ruby',)),
    'CssGenshiLexer': ('pygments.lexers.templates', 'CSS+Genshi Text', ('css+genshitext', 'css+genshi'), (), ('text/css+genshi',)),
    'CssLexer': ('pygments.lexers.web', 'CSS', ('css',), ('*.css',), ('text/css',)),
    'CssPhpLexer': ('pygments.lexers.templates', 'CSS+PHP', ('css+php',), (), ('text/css+php',)),
    'CssSmartyLexer': ('pygments.lexers.templates', 'CSS+Smarty', ('css+smarty',), (), ('text/css+smarty',)),
    'CudaLexer': ('pygments.lexers.compiled', 'CUDA', ('cuda', 'cu'), ('*.cu', '*.cuh'), ('text/x-cuda',)),
    'CythonLexer': ('pygments.lexers.compiled', 'Cython', ('cython', 'pyx'), ('*.pyx', '*.pxd', '*.pxi'), ('text/x-cython', 'application/x-cython')),
    'DLexer': ('pygments.lexers.compiled', 'D', ('d',), ('*.d', '*.di'), ('text/x-dsrc',)),
    'DObjdumpLexer': ('pygments.lexers.asm', 'd-objdump', ('d-objdump',), ('*.d-objdump',), ('text/x-d-objdump',)),
    'DarcsPatchLexer': ('pygments.lexers.text', 'Darcs Patch', ('dpatch',), ('*.dpatch', '*.darcspatch'), ()),
    'DartLexer': ('pygments.lexers.web', 'Dart', ('dart',), ('*.dart',), ('text/x-dart',)),
    'DebianControlLexer': ('pygments.lexers.text', 'Debian Control file', ('control',), ('control',), ()),
    'DelphiLexer': ('pygments.lexers.compiled', 'Delphi', ('delphi', 'pas', 'pascal', 'objectpascal'), ('*.pas',), ('text/x-pascal',)),
    'DgLexer': ('pygments.lexers.agile', 'dg', ('dg',), ('*.dg',), ('text/x-dg',)),
    'DiffLexer': ('pygments.lexers.text', 'Diff', ('diff', 'udiff'), ('*.diff', '*.patch'), ('text/x-diff', 'text/x-patch')),
    'DjangoLexer': ('pygments.lexers.templates', 'Django/Jinja', ('django', 'jinja'), (), ('application/x-django-templating', 'application/x-jinja')),
    'DtdLexer': ('pygments.lexers.web', 'DTD', ('dtd',), ('*.dtd',), ('application/xml-dtd',)),
    'DuelLexer': ('pygments.lexers.web', 'Duel', ('duel', 'Duel Engine', 'Duel View', 'JBST', 'jbst', 'JsonML+BST'), ('*.duel', '*.jbst'), ('text/x-duel', 'text/x-jbst')),
    'DylanConsoleLexer': ('pygments.lexers.compiled', 'Dylan session', ('dylan-console', 'dylan-repl'), ('*.dylan-console',), ('text/x-dylan-console',)),
    'DylanLexer': ('pygments.lexers.compiled', 'Dylan', ('dylan',), ('*.dylan', '*.dyl', '*.intr'), ('text/x-dylan',)),
    'DylanLidLexer': ('pygments.lexers.compiled', 'DylanLID', ('dylan-lid', 'lid'), ('*.lid', '*.hdp'), ('text/x-dylan-lid',)),
    'ECLLexer': ('pygments.lexers.other', 'ECL', ('ecl',), ('*.ecl',), ('application/x-ecl',)),
    'ECLexer': ('pygments.lexers.compiled', 'eC', ('ec',), ('*.ec', '*.eh'), ('text/x-echdr', 'text/x-ecsrc')),
    'ElixirConsoleLexer': ('pygments.lexers.functional', 'Elixir iex session', ('iex',), (), ('text/x-elixir-shellsession',)),
    'ElixirLexer': ('pygments.lexers.functional', 'Elixir', ('elixir', 'ex', 'exs'), ('*.ex', '*.exs'), ('text/x-elixir',)),
    'ErbLexer': ('pygments.lexers.templates', 'ERB', ('erb',), (), ('application/x-ruby-templating',)),
    'ErlangLexer': ('pygments.lexers.functional', 'Erlang', ('erlang',), ('*.erl', '*.hrl', '*.es', '*.escript'), ('text/x-erlang',)),
    'ErlangShellLexer': ('pygments.lexers.functional', 'Erlang erl session', ('erl',), ('*.erl-sh',), ('text/x-erl-shellsession',)),
    'EvoqueHtmlLexer': ('pygments.lexers.templates', 'HTML+Evoque', ('html+evoque',), ('*.html',), ('text/html+evoque',)),
    'EvoqueLexer': ('pygments.lexers.templates', 'Evoque', ('evoque',), ('*.evoque',), ('application/x-evoque',)),
    'EvoqueXmlLexer': ('pygments.lexers.templates', 'XML+Evoque', ('xml+evoque',), ('*.xml',), ('application/xml+evoque',)),
    'FSharpLexer': ('pygments.lexers.dotnet', 'FSharp', ('fsharp',), ('*.fs', '*.fsi'), ('text/x-fsharp',)),
    'FactorLexer': ('pygments.lexers.agile', 'Factor', ('factor',), ('*.factor',), ('text/x-factor',)),
    'FancyLexer': ('pygments.lexers.agile', 'Fancy', ('fancy', 'fy'), ('*.fy', '*.fancypack'), ('text/x-fancysrc',)),
    'FantomLexer': ('pygments.lexers.compiled', 'Fantom', ('fan',), ('*.fan',), ('application/x-fantom',)),
    'FelixLexer': ('pygments.lexers.compiled', 'Felix', ('felix', 'flx'), ('*.flx', '*.flxh'), ('text/x-felix',)),
    'FortranLexer': ('pygments.lexers.compiled', 'Fortran', ('fortran',), ('*.f', '*.f90', '*.F', '*.F90'), ('text/x-fortran',)),
    'FoxProLexer': ('pygments.lexers.foxpro', 'FoxPro', ('Clipper', 'XBase'), ('*.PRG', '*.prg'), ()),
    'GLShaderLexer': ('pygments.lexers.compiled', 'GLSL', ('glsl',), ('*.vert', '*.frag', '*.geo'), ('text/x-glslsrc',)),
    'GasLexer': ('pygments.lexers.asm', 'GAS', ('gas',), ('*.s', '*.S'), ('text/x-gas',)),
    'GenshiLexer': ('pygments.lexers.templates', 'Genshi', ('genshi', 'kid', 'xml+genshi', 'xml+kid'), ('*.kid',), ('application/x-genshi', 'application/x-kid')),
    'GenshiTextLexer': ('pygments.lexers.templates', 'Genshi Text', ('genshitext',), (), ('application/x-genshi-text', 'text/x-genshi')),
    'GettextLexer': ('pygments.lexers.text', 'Gettext Catalog', ('pot', 'po'), ('*.pot', '*.po'), ('application/x-gettext', 'text/x-gettext', 'text/gettext')),
    'GherkinLexer': ('pygments.lexers.other', 'Gherkin', ('Cucumber', 'cucumber', 'Gherkin', 'gherkin'), ('*.feature',), ('text/x-gherkin',)),
    'GnuplotLexer': ('pygments.lexers.other', 'Gnuplot', ('gnuplot',), ('*.plot', '*.plt'), ('text/x-gnuplot',)),
    'GoLexer': ('pygments.lexers.compiled', 'Go', ('go',), ('*.go',), ('text/x-gosrc',)),
    'GoodDataCLLexer': ('pygments.lexers.other', 'GoodData-CL', ('gooddata-cl',), ('*.gdc',), ('text/x-gooddata-cl',)),
    'GosuLexer': ('pygments.lexers.jvm', 'Gosu', ('gosu',), ('*.gs', '*.gsx', '*.gsp', '*.vark'), ('text/x-gosu',)),
    'GosuTemplateLexer': ('pygments.lexers.jvm', 'Gosu Template', ('gst',), ('*.gst',), ('text/x-gosu-template',)),
    'GroffLexer': ('pygments.lexers.text', 'Groff', ('groff', 'nroff', 'man'), ('*.[1234567]', '*.man'), ('application/x-troff', 'text/troff')),
    'GroovyLexer': ('pygments.lexers.jvm', 'Groovy', ('groovy',), ('*.groovy',), ('text/x-groovy',)),
    'HamlLexer': ('pygments.lexers.web', 'Haml', ('haml', 'HAML'), ('*.haml',), ('text/x-haml',)),
    'HaskellLexer': ('pygments.lexers.functional', 'Haskell', ('haskell', 'hs'), ('*.hs',), ('text/x-haskell',)),
    'HaxeLexer': ('pygments.lexers.web', 'haXe', ('hx', 'haXe'), ('*.hx',), ('text/haxe',)),
    'HtmlDjangoLexer': ('pygments.lexers.templates', 'HTML+Django/Jinja', ('html+django', 'html+jinja'), (), ('text/html+django', 'text/html+jinja')),
    'HtmlGenshiLexer': ('pygments.lexers.templates', 'HTML+Genshi', ('html+genshi', 'html+kid'), (), ('text/html+genshi',)),
    'HtmlLexer': ('pygments.lexers.web', 'HTML', ('html',), ('*.html', '*.htm', '*.xhtml', '*.xslt'), ('text/html', 'application/xhtml+xml')),
    'HtmlPhpLexer': ('pygments.lexers.templates', 'HTML+PHP', ('html+php',), ('*.phtml',), ('application/x-php', 'application/x-httpd-php', 'application/x-httpd-php3', 'application/x-httpd-php4', 'application/x-httpd-php5')),
    'HtmlSmartyLexer': ('pygments.lexers.templates', 'HTML+Smarty', ('html+smarty',), (), ('text/html+smarty',)),
    'HttpLexer': ('pygments.lexers.text', 'HTTP', ('http',), (), ()),
    'HxmlLexer': ('pygments.lexers.text', 'Hxml', ('haxeml', 'hxml'), ('*.hxml',), ()),
    'HybrisLexer': ('pygments.lexers.other', 'Hybris', ('hybris', 'hy'), ('*.hy', '*.hyb'), ('text/x-hybris', 'application/x-hybris')),
    'IDLLexer': ('pygments.lexers.math', 'IDL', ('idl',), ('*.pro',), ('text/idl',)),
    'IniLexer': ('pygments.lexers.text', 'INI', ('ini', 'cfg'), ('*.ini', '*.cfg'), ('text/x-ini',)),
    'IoLexer': ('pygments.lexers.agile', 'Io', ('io',), ('*.io',), ('text/x-iosrc',)),
    'IokeLexer': ('pygments.lexers.jvm', 'Ioke', ('ioke', 'ik'), ('*.ik',), ('text/x-iokesrc',)),
    'IrcLogsLexer': ('pygments.lexers.text', 'IRC logs', ('irc',), ('*.weechatlog',), ('text/x-irclog',)),
    'JadeLexer': ('pygments.lexers.web', 'Jade', ('jade', 'JADE'), ('*.jade',), ('text/x-jade',)),
    'JagsLexer': ('pygments.lexers.math', 'JAGS', ('jags',), ('*.jag', '*.bug'), ()),
    'JavaLexer': ('pygments.lexers.jvm', 'Java', ('java',), ('*.java',), ('text/x-java',)),
    'JavascriptDjangoLexer': ('pygments.lexers.templates', 'JavaScript+Django/Jinja', ('js+django', 'javascript+django', 'js+jinja', 'javascript+jinja'), (), ('application/x-javascript+django', 'application/x-javascript+jinja', 'text/x-javascript+django', 'text/x-javascript+jinja', 'text/javascript+django', 'text/javascript+jinja')),
    'JavascriptErbLexer': ('pygments.lexers.templates', 'JavaScript+Ruby', ('js+erb', 'javascript+erb', 'js+ruby', 'javascript+ruby'), (), ('application/x-javascript+ruby', 'text/x-javascript+ruby', 'text/javascript+ruby')),
    'JavascriptGenshiLexer': ('pygments.lexers.templates', 'JavaScript+Genshi Text', ('js+genshitext', 'js+genshi', 'javascript+genshitext', 'javascript+genshi'), (), ('application/x-javascript+genshi', 'text/x-javascript+genshi', 'text/javascript+genshi')),
    'JavascriptLexer': ('pygments.lexers.web', 'JavaScript', ('js', 'javascript'), ('*.js',), ('application/javascript', 'application/x-javascript', 'text/x-javascript', 'text/javascript')),
    'JavascriptPhpLexer': ('pygments.lexers.templates', 'JavaScript+PHP', ('js+php', 'javascript+php'), (), ('application/x-javascript+php', 'text/x-javascript+php', 'text/javascript+php')),
    'JavascriptSmartyLexer': ('pygments.lexers.templates', 'JavaScript+Smarty', ('js+smarty', 'javascript+smarty'), (), ('application/x-javascript+smarty', 'text/x-javascript+smarty', 'text/javascript+smarty')),
    'JsonLexer': ('pygments.lexers.web', 'JSON', ('json',), ('*.json',), ('application/json',)),
    'JspLexer': ('pygments.lexers.templates', 'Java Server Page', ('jsp',), ('*.jsp',), ('application/x-jsp',)),
    'JuliaConsoleLexer': ('pygments.lexers.math', 'Julia console', ('jlcon',), (), ()),
    'JuliaLexer': ('pygments.lexers.math', 'Julia', ('julia', 'jl'), ('*.jl',), ('text/x-julia', 'application/x-julia')),
    'KconfigLexer': ('pygments.lexers.other', 'Kconfig', ('kconfig', 'menuconfig', 'linux-config', 'kernel-config'), ('Kconfig', '*Config.in*', 'external.in*', 'standard-modules.in'), ('text/x-kconfig',)),
    'KokaLexer': ('pygments.lexers.functional', 'Koka', ('koka',), ('*.kk', '*.kki'), ('text/x-koka',)),
    'KotlinLexer': ('pygments.lexers.jvm', 'Kotlin', ('kotlin',), ('*.kt',), ('text/x-kotlin',)),
    'LassoCssLexer': ('pygments.lexers.templates', 'CSS+Lasso', ('css+lasso',), (), ('text/css+lasso',)),
    'LassoHtmlLexer': ('pygments.lexers.templates', 'HTML+Lasso', ('html+lasso',), (), ('text/html+lasso', 'application/x-httpd-lasso', 'application/x-httpd-lasso[89]')),
    'LassoJavascriptLexer': ('pygments.lexers.templates', 'JavaScript+Lasso', ('js+lasso', 'javascript+lasso'), (), ('application/x-javascript+lasso', 'text/x-javascript+lasso', 'text/javascript+lasso')),
    'LassoLexer': ('pygments.lexers.web', 'Lasso', ('lasso', 'lassoscript'), ('*.lasso', '*.lasso[89]'), ('text/x-lasso',)),
    'LassoXmlLexer': ('pygments.lexers.templates', 'XML+Lasso', ('xml+lasso',), (), ('application/xml+lasso',)),
    'LighttpdConfLexer': ('pygments.lexers.text', 'Lighttpd configuration file', ('lighty', 'lighttpd'), (), ('text/x-lighttpd-conf',)),
    'LiterateHaskellLexer': ('pygments.lexers.functional', 'Literate Haskell', ('lhs', 'literate-haskell'), ('*.lhs',), ('text/x-literate-haskell',)),
    'LiveScriptLexer': ('pygments.lexers.web', 'LiveScript', ('live-script', 'livescript'), ('*.ls',), ('text/livescript',)),
    'LlvmLexer': ('pygments.lexers.asm', 'LLVM', ('llvm',), ('*.ll',), ('text/x-llvm',)),
    'LogosLexer': ('pygments.lexers.compiled', 'Logos', ('logos',), ('*.x', '*.xi', '*.xm', '*.xmi'), ('text/x-logos',)),
    'LogtalkLexer': ('pygments.lexers.other', 'Logtalk', ('logtalk',), ('*.lgt',), ('text/x-logtalk',)),
    'LuaLexer': ('pygments.lexers.agile', 'Lua', ('lua',), ('*.lua', '*.wlua'), ('text/x-lua', 'application/x-lua')),
    'MOOCodeLexer': ('pygments.lexers.other', 'MOOCode', ('moocode',), ('*.moo',), ('text/x-moocode',)),
    'MakefileLexer': ('pygments.lexers.text', 'Makefile', ('make', 'makefile', 'mf', 'bsdmake'), ('*.mak', 'Makefile', 'makefile', 'Makefile.*', 'GNUmakefile'), ('text/x-makefile',)),
    'MakoCssLexer': ('pygments.lexers.templates', 'CSS+Mako', ('css+mako',), (), ('text/css+mako',)),
    'MakoHtmlLexer': ('pygments.lexers.templates', 'HTML+Mako', ('html+mako',), (), ('text/html+mako',)),
    'MakoJavascriptLexer': ('pygments.lexers.templates', 'JavaScript+Mako', ('js+mako', 'javascript+mako'), (), ('application/x-javascript+mako', 'text/x-javascript+mako', 'text/javascript+mako')),
    'MakoLexer': ('pygments.lexers.templates', 'Mako', ('mako',), ('*.mao',), ('application/x-mako',)),
    'MakoXmlLexer': ('pygments.lexers.templates', 'XML+Mako', ('xml+mako',), (), ('application/xml+mako',)),
    'MaqlLexer': ('pygments.lexers.other', 'MAQL', ('maql',), ('*.maql',), ('text/x-gooddata-maql', 'application/x-gooddata-maql')),
    'MasonLexer': ('pygments.lexers.templates', 'Mason', ('mason',), ('*.m', '*.mhtml', '*.mc', '*.mi', 'autohandler', 'dhandler'), ('application/x-mason',)),
    'MatlabLexer': ('pygments.lexers.math', 'Matlab', ('matlab',), ('*.m',), ('text/matlab',)),
    'MatlabSessionLexer': ('pygments.lexers.math', 'Matlab session', ('matlabsession',), (), ()),
    'MiniDLexer': ('pygments.lexers.agile', 'MiniD', ('minid',), ('*.md',), ('text/x-minidsrc',)),
    'ModelicaLexer': ('pygments.lexers.other', 'Modelica', ('modelica',), ('*.mo',), ('text/x-modelica',)),
    'Modula2Lexer': ('pygments.lexers.compiled', 'Modula-2', ('modula2', 'm2'), ('*.def', '*.mod'), ('text/x-modula2',)),
    'MoinWikiLexer': ('pygments.lexers.text', 'MoinMoin/Trac Wiki markup', ('trac-wiki', 'moin'), (), ('text/x-trac-wiki',)),
    'MonkeyLexer': ('pygments.lexers.compiled', 'Monkey', ('monkey',), ('*.monkey',), ('text/x-monkey',)),
    'MoonScriptLexer': ('pygments.lexers.agile', 'MoonScript', ('moon', 'moonscript'), ('*.moon',), ('text/x-moonscript', 'application/x-moonscript')),
    'MscgenLexer': ('pygments.lexers.other', 'Mscgen', ('mscgen', 'msc'), ('*.msc',), ()),
    'MuPADLexer': ('pygments.lexers.math', 'MuPAD', ('mupad',), ('*.mu',), ()),
    'MxmlLexer': ('pygments.lexers.web', 'MXML', ('mxml',), ('*.mxml',), ()),
    'MySqlLexer': ('pygments.lexers.sql', 'MySQL', ('mysql',), (), ('text/x-mysql',)),
    'MyghtyCssLexer': ('pygments.lexers.templates', 'CSS+Myghty', ('css+myghty',), (), ('text/css+myghty',)),
    'MyghtyHtmlLexer': ('pygments.lexers.templates', 'HTML+Myghty', ('html+myghty',), (), ('text/html+myghty',)),
    'MyghtyJavascriptLexer': ('pygments.lexers.templates', 'JavaScript+Myghty', ('js+myghty', 'javascript+myghty'), (), ('application/x-javascript+myghty', 'text/x-javascript+myghty', 'text/javascript+mygthy')),
    'MyghtyLexer': ('pygments.lexers.templates', 'Myghty', ('myghty',), ('*.myt', 'autodelegate'), ('application/x-myghty',)),
    'MyghtyXmlLexer': ('pygments.lexers.templates', 'XML+Myghty', ('xml+myghty',), (), ('application/xml+myghty',)),
    'NSISLexer': ('pygments.lexers.other', 'NSIS', ('nsis', 'nsi', 'nsh'), ('*.nsi', '*.nsh'), ('text/x-nsis',)),
    'NasmLexer': ('pygments.lexers.asm', 'NASM', ('nasm',), ('*.asm', '*.ASM'), ('text/x-nasm',)),
    'NemerleLexer': ('pygments.lexers.dotnet', 'Nemerle', ('nemerle',), ('*.n',), ('text/x-nemerle',)),
    'NewLispLexer': ('pygments.lexers.functional', 'NewLisp', ('newlisp',), ('*.lsp', '*.nl'), ('text/x-newlisp', 'application/x-newlisp')),
    'NewspeakLexer': ('pygments.lexers.other', 'Newspeak', ('newspeak',), ('*.ns2',), ('text/x-newspeak',)),
    'NginxConfLexer': ('pygments.lexers.text', 'Nginx configuration file', ('nginx',), (), ('text/x-nginx-conf',)),
    'NimrodLexer': ('pygments.lexers.compiled', 'Nimrod', ('nimrod', 'nim'), ('*.nim', '*.nimrod'), ('text/x-nimrod',)),
    'NumPyLexer': ('pygments.lexers.math', 'NumPy', ('numpy',), (), ()),
    'ObjdumpLexer': ('pygments.lexers.asm', 'objdump', ('objdump',), ('*.objdump',), ('text/x-objdump',)),
    'ObjectiveCLexer': ('pygments.lexers.compiled', 'Objective-C', ('objective-c', 'objectivec', 'obj-c', 'objc'), ('*.m', '*.h'), ('text/x-objective-c',)),
    'ObjectiveCppLexer': ('pygments.lexers.compiled', 'Objective-C++', ('objective-c++', 'objectivec++', 'obj-c++', 'objc++'), ('*.mm', '*.hh'), ('text/x-objective-c++',)),
    'ObjectiveJLexer': ('pygments.lexers.web', 'Objective-J', ('objective-j', 'objectivej', 'obj-j', 'objj'), ('*.j',), ('text/x-objective-j',)),
    'OcamlLexer': ('pygments.lexers.functional', 'OCaml', ('ocaml',), ('*.ml', '*.mli', '*.mll', '*.mly'), ('text/x-ocaml',)),
    'OctaveLexer': ('pygments.lexers.math', 'Octave', ('octave',), ('*.m',), ('text/octave',)),
    'OocLexer': ('pygments.lexers.compiled', 'Ooc', ('ooc',), ('*.ooc',), ('text/x-ooc',)),
    'OpaLexer': ('pygments.lexers.functional', 'Opa', ('opa',), ('*.opa',), ('text/x-opa',)),
    'OpenEdgeLexer': ('pygments.lexers.other', 'OpenEdge ABL', ('openedge', 'abl', 'progress'), ('*.p', '*.cls'), ('text/x-openedge', 'application/x-openedge')),
    'PerlLexer': ('pygments.lexers.agile', 'Perl', ('perl', 'pl'), ('*.pl', '*.pm'), ('text/x-perl', 'application/x-perl')),
    'PhpLexer': ('pygments.lexers.web', 'PHP', ('php', 'php3', 'php4', 'php5'), ('*.php', '*.php[345]', '*.inc'), ('text/x-php',)),
    'PlPgsqlLexer': ('pygments.lexers.sql', 'PL/pgSQL', ('plpgsql',), (), ('text/x-plpgsql',)),
    'PostScriptLexer': ('pygments.lexers.other', 'PostScript', ('postscript',), ('*.ps', '*.eps'), ('application/postscript',)),
    'PostgresConsoleLexer': ('pygments.lexers.sql', 'PostgreSQL console (psql)', ('psql', 'postgresql-console', 'postgres-console'), (), ('text/x-postgresql-psql',)),
    'PostgresLexer': ('pygments.lexers.sql', 'PostgreSQL SQL dialect', ('postgresql', 'postgres'), (), ('text/x-postgresql',)),
    'PovrayLexer': ('pygments.lexers.other', 'POVRay', ('pov',), ('*.pov', '*.inc'), ('text/x-povray',)),
    'PowerShellLexer': ('pygments.lexers.shell', 'PowerShell', ('powershell', 'posh', 'ps1'), ('*.ps1',), ('text/x-powershell',)),
    'PrologLexer': ('pygments.lexers.compiled', 'Prolog', ('prolog',), ('*.prolog', '*.pro', '*.pl'), ('text/x-prolog',)),
    'PropertiesLexer': ('pygments.lexers.text', 'Properties', ('properties',), ('*.properties',), ('text/x-java-properties',)),
    'ProtoBufLexer': ('pygments.lexers.other', 'Protocol Buffer', ('protobuf',), ('*.proto',), ()),
    'PuppetLexer': ('pygments.lexers.other', 'Puppet', ('puppet',), ('*.pp',), ()),
    'PyPyLogLexer': ('pygments.lexers.text', 'PyPy Log', ('pypylog', 'pypy'), ('*.pypylog',), ('application/x-pypylog',)),
    'Python3Lexer': ('pygments.lexers.agile', 'Python 3', ('python3', 'py3'), (), ('text/x-python3', 'application/x-python3')),
    'Python3TracebackLexer': ('pygments.lexers.agile', 'Python 3.0 Traceback', ('py3tb',), ('*.py3tb',), ('text/x-python3-traceback',)),
    'PythonConsoleLexer': ('pygments.lexers.agile', 'Python console session', ('pycon',), (), ('text/x-python-doctest',)),
    'PythonLexer': ('pygments.lexers.agile', 'Python', ('python', 'py', 'sage'), ('*.py', '*.pyw', '*.sc', 'SConstruct', 'SConscript', '*.tac', '*.sage'), ('text/x-python', 'application/x-python')),
    'PythonTracebackLexer': ('pygments.lexers.agile', 'Python Traceback', ('pytb',), ('*.pytb',), ('text/x-python-traceback',)),
    'QmlLexer': ('pygments.lexers.web', 'QML', ('qml', 'Qt Meta Language', 'Qt modeling Language'), ('*.qml',), ('application/x-qml',)),
    'RConsoleLexer': ('pygments.lexers.math', 'RConsole', ('rconsole', 'rout'), ('*.Rout',), ()),
    'RPMSpecLexer': ('pygments.lexers.other', 'RPMSpec', ('spec',), ('*.spec',), ('text/x-rpm-spec',)),
    'RacketLexer': ('pygments.lexers.functional', 'Racket', ('racket', 'rkt'), ('*.rkt', '*.rktl'), ('text/x-racket', 'application/x-racket')),
    'RagelCLexer': ('pygments.lexers.parsers', 'Ragel in C Host', ('ragel-c',), ('*.rl',), ()),
    'RagelCppLexer': ('pygments.lexers.parsers', 'Ragel in CPP Host', ('ragel-cpp',), ('*.rl',), ()),
    'RagelDLexer': ('pygments.lexers.parsers', 'Ragel in D Host', ('ragel-d',), ('*.rl',), ()),
    'RagelEmbeddedLexer': ('pygments.lexers.parsers', 'Embedded Ragel', ('ragel-em',), ('*.rl',), ()),
    'RagelJavaLexer': ('pygments.lexers.parsers', 'Ragel in Java Host', ('ragel-java',), ('*.rl',), ()),
    'RagelLexer': ('pygments.lexers.parsers', 'Ragel', ('ragel',), (), ()),
    'RagelObjectiveCLexer': ('pygments.lexers.parsers', 'Ragel in Objective C Host', ('ragel-objc',), ('*.rl',), ()),
    'RagelRubyLexer': ('pygments.lexers.parsers', 'Ragel in Ruby Host', ('ragel-ruby', 'ragel-rb'), ('*.rl',), ()),
    'RawTokenLexer': ('pygments.lexers.special', 'Raw token data', ('raw',), (), ('application/x-pygments-tokens',)),
    'RdLexer': ('pygments.lexers.math', 'Rd', ('rd',), ('*.Rd',), ('text/x-r-doc',)),
    'RebolLexer': ('pygments.lexers.other', 'REBOL', ('rebol',), ('*.r', '*.r3'), ('text/x-rebol',)),
    'RedcodeLexer': ('pygments.lexers.other', 'Redcode', ('redcode',), ('*.cw',), ()),
    'RegeditLexer': ('pygments.lexers.text', 'reg', ('registry',), ('*.reg',), ('text/x-windows-registry',)),
    'RhtmlLexer': ('pygments.lexers.templates', 'RHTML', ('rhtml', 'html+erb', 'html+ruby'), ('*.rhtml',), ('text/html+ruby',)),
    'RobotFrameworkLexer': ('pygments.lexers.other', 'RobotFramework', ('RobotFramework', 'robotframework'), ('*.txt', '*.robot'), ('text/x-robotframework',)),
    'RstLexer': ('pygments.lexers.text', 'reStructuredText', ('rst', 'rest', 'restructuredtext'), ('*.rst', '*.rest'), ('text/x-rst', 'text/prs.fallenstein.rst')),
    'RubyConsoleLexer': ('pygments.lexers.agile', 'Ruby irb session', ('rbcon', 'irb'), (), ('text/x-ruby-shellsession',)),
    'RubyLexer': ('pygments.lexers.agile', 'Ruby', ('rb', 'ruby', 'duby'), ('*.rb', '*.rbw', 'Rakefile', '*.rake', '*.gemspec', '*.rbx', '*.duby'), ('text/x-ruby', 'application/x-ruby')),
    'RustLexer': ('pygments.lexers.compiled', 'Rust', ('rust',), ('*.rs', '*.rc'), ('text/x-rustsrc',)),
    'SLexer': ('pygments.lexers.math', 'S', ('splus', 's', 'r'), ('*.S', '*.R', '.Rhistory', '.Rprofile'), ('text/S-plus', 'text/S', 'text/x-r-source', 'text/x-r', 'text/x-R', 'text/x-r-history', 'text/x-r-profile')),
    'SMLLexer': ('pygments.lexers.functional', 'Standard ML', ('sml',), ('*.sml', '*.sig', '*.fun'), ('text/x-standardml', 'application/x-standardml')),
    'SassLexer': ('pygments.lexers.web', 'Sass', ('sass', 'SASS'), ('*.sass',), ('text/x-sass',)),
    'ScalaLexer': ('pygments.lexers.jvm', 'Scala', ('scala',), ('*.scala',), ('text/x-scala',)),
    'ScamlLexer': ('pygments.lexers.web', 'Scaml', ('scaml', 'SCAML'), ('*.scaml',), ('text/x-scaml',)),
    'SchemeLexer': ('pygments.lexers.functional', 'Scheme', ('scheme', 'scm'), ('*.scm', '*.ss'), ('text/x-scheme', 'application/x-scheme')),
    'ScilabLexer': ('pygments.lexers.math', 'Scilab', ('scilab',), ('*.sci', '*.sce', '*.tst'), ('text/scilab',)),
    'ScssLexer': ('pygments.lexers.web', 'SCSS', ('scss',), ('*.scss',), ('text/x-scss',)),
    'ShellSessionLexer': ('pygments.lexers.shell', 'Shell Session', ('shell-session',), ('*.shell-session',), ('application/x-sh-session',)),
    'SmaliLexer': ('pygments.lexers.dalvik', 'Smali', ('smali',), ('*.smali',), ('text/smali',)),
    'SmalltalkLexer': ('pygments.lexers.other', 'Smalltalk', ('smalltalk', 'squeak'), ('*.st',), ('text/x-smalltalk',)),
    'SmartyLexer': ('pygments.lexers.templates', 'Smarty', ('smarty',), ('*.tpl',), ('application/x-smarty',)),
    'SnobolLexer': ('pygments.lexers.other', 'Snobol', ('snobol',), ('*.snobol',), ('text/x-snobol',)),
    'SourcePawnLexer': ('pygments.lexers.other', 'SourcePawn', ('sp',), ('*.sp',), ('text/x-sourcepawn',)),
    'SourcesListLexer': ('pygments.lexers.text', 'Debian Sourcelist', ('sourceslist', 'sources.list'), ('sources.list',), ()),
    'SqlLexer': ('pygments.lexers.sql', 'SQL', ('sql',), ('*.sql',), ('text/x-sql',)),
    'SqliteConsoleLexer': ('pygments.lexers.sql', 'sqlite3con', ('sqlite3',), ('*.sqlite3-console',), ('text/x-sqlite3-console',)),
    'SquidConfLexer': ('pygments.lexers.text', 'SquidConf', ('squidconf', 'squid.conf', 'squid'), ('squid.conf',), ('text/x-squidconf',)),
    'SspLexer': ('pygments.lexers.templates', 'Scalate Server Page', ('ssp',), ('*.ssp',), ('application/x-ssp',)),
    'StanLexer': ('pygments.lexers.math', 'Stan', ('stan',), ('*.stan',), ()),
    'SystemVerilogLexer': ('pygments.lexers.hdl', 'systemverilog', ('systemverilog', 'sv'), ('*.sv', '*.svh'), ('text/x-systemverilog',)),
    'TclLexer': ('pygments.lexers.agile', 'Tcl', ('tcl',), ('*.tcl',), ('text/x-tcl', 'text/x-script.tcl', 'application/x-tcl')),
    'TcshLexer': ('pygments.lexers.shell', 'Tcsh', ('tcsh', 'csh'), ('*.tcsh', '*.csh'), ('application/x-csh',)),
    'TeaTemplateLexer': ('pygments.lexers.templates', 'Tea', ('tea',), ('*.tea',), ('text/x-tea',)),
    'TexLexer': ('pygments.lexers.text', 'TeX', ('tex', 'latex'), ('*.tex', '*.aux', '*.toc'), ('text/x-tex', 'text/x-latex')),
    'TextLexer': ('pygments.lexers.special', 'Text only', ('text',), ('*.txt',), ('text/plain',)),
    'TreetopLexer': ('pygments.lexers.parsers', 'Treetop', ('treetop',), ('*.treetop', '*.tt'), ()),
    'TypeScriptLexer': ('pygments.lexers.web', 'TypeScript', ('ts',), ('*.ts',), ('text/x-typescript',)),
    'UrbiscriptLexer': ('pygments.lexers.other', 'UrbiScript', ('urbiscript',), ('*.u',), ('application/x-urbiscript',)),
    'VGLLexer': ('pygments.lexers.other', 'VGL', ('vgl',), ('*.rpf',), ()),
    'ValaLexer': ('pygments.lexers.compiled', 'Vala', ('vala', 'vapi'), ('*.vala', '*.vapi'), ('text/x-vala',)),
    'VbNetAspxLexer': ('pygments.lexers.dotnet', 'aspx-vb', ('aspx-vb',), ('*.aspx', '*.asax', '*.ascx', '*.ashx', '*.asmx', '*.axd'), ()),
    'VbNetLexer': ('pygments.lexers.dotnet', 'VB.net', ('vb.net', 'vbnet'), ('*.vb', '*.bas'), ('text/x-vbnet', 'text/x-vba')),
    'VelocityHtmlLexer': ('pygments.lexers.templates', 'HTML+Velocity', ('html+velocity',), (), ('text/html+velocity',)),
    'VelocityLexer': ('pygments.lexers.templates', 'Velocity', ('velocity',), ('*.vm', '*.fhtml'), ()),
    'VelocityXmlLexer': ('pygments.lexers.templates', 'XML+Velocity', ('xml+velocity',), (), ('application/xml+velocity',)),
    'VerilogLexer': ('pygments.lexers.hdl', 'verilog', ('verilog', 'v'), ('*.v',), ('text/x-verilog',)),
    'VhdlLexer': ('pygments.lexers.hdl', 'vhdl', ('vhdl',), ('*.vhdl', '*.vhd'), ('text/x-vhdl',)),
    'VimLexer': ('pygments.lexers.text', 'VimL', ('vim',), ('*.vim', '.vimrc', '.exrc', '.gvimrc', '_vimrc', '_exrc', '_gvimrc', 'vimrc', 'gvimrc'), ('text/x-vim',)),
    'XQueryLexer': ('pygments.lexers.web', 'XQuery', ('xquery', 'xqy', 'xq', 'xql', 'xqm'), ('*.xqy', '*.xquery', '*.xq', '*.xql', '*.xqm'), ('text/xquery', 'application/xquery')),
    'XmlDjangoLexer': ('pygments.lexers.templates', 'XML+Django/Jinja', ('xml+django', 'xml+jinja'), (), ('application/xml+django', 'application/xml+jinja')),
    'XmlErbLexer': ('pygments.lexers.templates', 'XML+Ruby', ('xml+erb', 'xml+ruby'), (), ('application/xml+ruby',)),
    'XmlLexer': ('pygments.lexers.web', 'XML', ('xml',), ('*.xml', '*.xsl', '*.rss', '*.xslt', '*.xsd', '*.wsdl'), ('text/xml', 'application/xml', 'image/svg+xml', 'application/rss+xml', 'application/atom+xml')),
    'XmlPhpLexer': ('pygments.lexers.templates', 'XML+PHP', ('xml+php',), (), ('application/xml+php',)),
    'XmlSmartyLexer': ('pygments.lexers.templates', 'XML+Smarty', ('xml+smarty',), (), ('application/xml+smarty',)),
    'XsltLexer': ('pygments.lexers.web', 'XSLT', ('xslt',), ('*.xsl', '*.xslt', '*.xpl'), ('application/xsl+xml', 'application/xslt+xml')),
    'XtendLexer': ('pygments.lexers.jvm', 'Xtend', ('xtend',), ('*.xtend',), ('text/x-xtend',)),
    'YamlLexer': ('pygments.lexers.text', 'YAML', ('yaml',), ('*.yaml', '*.yml'), ('text/x-yaml',)),
}
if __name__ == '__main__':
    import sys
    import os
    # lookup lexers
    found_lexers = []
    sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..', '..'))
    for filename in os.listdir('.'):
        if filename.endswith('.py') and not filename.startswith('_'):
            module_name = 'pygments.lexers.%s' % filename[:-3]
            print module_name
            module = __import__(module_name, None, None, [''])
            for lexer_name in module.__all__:
                lexer = getattr(module, lexer_name)
                found_lexers.append(
                    '%r: %r' % (lexer_name,
                                (module_name,
                                 lexer.name,
                                 tuple(lexer.aliases),
                                 tuple(lexer.filenames),
                                 tuple(lexer.mimetypes))))
    # sort them, that should make the diff files for svn smaller
    found_lexers.sort()
    # extract useful sourcecode from this file
    f = open(__file__)
    try:
        content = f.read()
    finally:
        f.close()
    header = content[:content.find('LEXERS = {')]
    footer = content[content.find("if __name__ == '__main__':"):]
    # write new file
    f = open(__file__, 'wb')
    f.write(header)
    f.write('LEXERS = {\n    %s,\n}\n\n' % ',\n    '.join(found_lexers))
    f.write(footer)
    f.close()
 | 
	cc0-1.0 | 
| 
	Mistobaan/tensorflow | 
	tensorflow/contrib/layers/python/layers/regularizers.py | 
	82 | 
	7339 | 
	# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Regularizers for use with layers."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numbers
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn
from tensorflow.python.ops import standard_ops
from tensorflow.python.platform import tf_logging as logging
__all__ = ['l1_regularizer',
           'l2_regularizer',
           'l1_l2_regularizer',
           'sum_regularizer',
           'apply_regularization']
def l1_regularizer(scale, scope=None):
  """Returns a function that can be used to apply L1 regularization to weights.
  L1 regularization encourages sparsity.
  Args:
    scale: A scalar multiplier `Tensor`. 0.0 disables the regularizer.
    scope: An optional scope name.
  Returns:
    A function with signature `l1(weights)` that apply L1 regularization.
  Raises:
    ValueError: If scale is negative or if scale is not a float.
  """
  if isinstance(scale, numbers.Integral):
    raise ValueError('scale cannot be an integer: %s' % scale)
  if isinstance(scale, numbers.Real):
    if scale < 0.:
      raise ValueError('Setting a scale less than 0 on a regularizer: %g' %
                       scale)
    if scale == 0.:
      logging.info('Scale of 0 disables regularizer.')
      return lambda _: None
  def l1(weights, name=None):
    """Applies L1 regularization to weights."""
    with ops.name_scope(scope, 'l1_regularizer', [weights]) as name:
      my_scale = ops.convert_to_tensor(scale,
                                       dtype=weights.dtype.base_dtype,
                                       name='scale')
      return standard_ops.multiply(
          my_scale,
          standard_ops.reduce_sum(standard_ops.abs(weights)),
          name=name)
  return l1
def l2_regularizer(scale, scope=None):
  """Returns a function that can be used to apply L2 regularization to weights.
  Small values of L2 can help prevent overfitting the training data.
  Args:
    scale: A scalar multiplier `Tensor`. 0.0 disables the regularizer.
    scope: An optional scope name.
  Returns:
    A function with signature `l2(weights)` that applies L2 regularization.
  Raises:
    ValueError: If scale is negative or if scale is not a float.
  """
  if isinstance(scale, numbers.Integral):
    raise ValueError('scale cannot be an integer: %s' % (scale,))
  if isinstance(scale, numbers.Real):
    if scale < 0.:
      raise ValueError('Setting a scale less than 0 on a regularizer: %g.' %
                       scale)
    if scale == 0.:
      logging.info('Scale of 0 disables regularizer.')
      return lambda _: None
  def l2(weights):
    """Applies l2 regularization to weights."""
    with ops.name_scope(scope, 'l2_regularizer', [weights]) as name:
      my_scale = ops.convert_to_tensor(scale,
                                       dtype=weights.dtype.base_dtype,
                                       name='scale')
      return standard_ops.multiply(my_scale, nn.l2_loss(weights), name=name)
  return l2
def l1_l2_regularizer(scale_l1=1.0, scale_l2=1.0, scope=None):
  """Returns a function that can be used to apply L1 L2 regularizations.
  Args:
    scale_l1: A scalar multiplier `Tensor` for L1 regularization.
    scale_l2: A scalar multiplier `Tensor` for L2 regularization.
    scope: An optional scope name.
  Returns:
    A function with signature `l1_l2(weights)` that applies a weighted sum of
    L1 L2 regularization.
  Raises:
    ValueError: If scale is negative or if scale is not a float.
  """
  if isinstance(scale_l1, numbers.Integral):
    raise ValueError('scale_l1 cannot be an integer: %s' % (scale_l1,))
  if isinstance(scale_l2, numbers.Integral):
    raise ValueError('scale_l2 cannot be an integer: %s' % (scale_l2,))
  scope = scope or 'l1_l2_regularizer'
  if scale_l1 == 0.:
    return l2_regularizer(scale_l2, scope)
  if scale_l2 == 0.:
    return l1_regularizer(scale_l1, scope)
  return sum_regularizer([l1_regularizer(scale_l1),
                          l2_regularizer(scale_l2)],
                         scope=scope)
def sum_regularizer(regularizer_list, scope=None):
  """Returns a function that applies the sum of multiple regularizers.
  Args:
    regularizer_list: A list of regularizers to apply.
    scope: An optional scope name
  Returns:
    A function with signature `sum_reg(weights)` that applies the
    sum of all the input regularizers.
  """
  regularizer_list = [reg for reg in regularizer_list if reg is not None]
  if not regularizer_list:
    return None
  def sum_reg(weights):
    """Applies the sum of all the input regularizers."""
    with ops.name_scope(scope, 'sum_regularizer', [weights]) as name:
      regularizer_tensors = [reg(weights) for reg in regularizer_list]
      return math_ops.add_n(regularizer_tensors, name=name)
  return sum_reg
def apply_regularization(regularizer, weights_list=None):
  """Returns the summed penalty by applying `regularizer` to the `weights_list`.
  Adding a regularization penalty over the layer weights and embedding weights
  can help prevent overfitting the training data. Regularization over layer
  biases is less common/useful, but assuming proper data preprocessing/mean
  subtraction, it usually shouldn't hurt much either.
  Args:
    regularizer: A function that takes a single `Tensor` argument and returns
      a scalar `Tensor` output.
    weights_list: List of weights `Tensors` or `Variables` to apply
      `regularizer` over. Defaults to the `GraphKeys.WEIGHTS` collection if
      `None`.
  Returns:
    A scalar representing the overall regularization penalty.
  Raises:
    ValueError: If `regularizer` does not return a scalar output, or if we find
        no weights.
  """
  if not weights_list:
    weights_list = ops.get_collection(ops.GraphKeys.WEIGHTS)
  if not weights_list:
    raise ValueError('No weights to regularize.')
  with ops.name_scope('get_regularization_penalty',
                      values=weights_list) as scope:
    penalties = [regularizer(w) for w in weights_list]
    penalties = [
        p if p is not None else constant_op.constant(0.0) for p in penalties
    ]
    for p in penalties:
      if p.get_shape().ndims != 0:
        raise ValueError('regularizer must return a scalar Tensor instead of a '
                         'Tensor with rank %d.' % p.get_shape().ndims)
    summed_penalty = math_ops.add_n(penalties, name=scope)
    ops.add_to_collection(ops.GraphKeys.REGULARIZATION_LOSSES, summed_penalty)
    return summed_penalty
 | 
	apache-2.0 | 
| 
	baris/pushmanager | 
	testing/testdb.py | 
	1 | 
	4248 | 
	#!/usr/bin/python
from datetime import datetime, timedelta
import os
import sqlite3
import tempfile
import time
from core import db
def create_temp_db_file():
    fd, db_file_path = tempfile.mkstemp(suffix="pushmanager.db")
    os.close(fd)
    return db_file_path
def get_temp_db_uri(dbfile=None):
    if not dbfile:
        dbfile = create_temp_db_file()
    return "sqlite:///" + dbfile
def make_test_db(dbfile=None):
    if not dbfile:
        dbfile = create_temp_db_file()
    testsql = open(
        os.path.join(
            os.path.dirname(__file__),
            "testdb.sql"
        )
    ).read()
    test_db = sqlite3.connect(dbfile)
    test_db.cursor().executescript(testsql)
    test_db.commit()
    test_db.close()
    return dbfile
class FakeDataMixin(object):
    now = time.time()
    yesterday = time.mktime((datetime.now() - timedelta(days=1)).timetuple())
    push_data = [
        [10, 'OnePush', 'bmetin', 'deploy-1', 'abc', 'live', yesterday, now, 'regular', ''],
        [11, 'TwoPush', 'troscoe', 'deploy-2', 'def', 'accepting', now, now, 'regular', ''],
        [12, 'RedPush', 'heyjoe', 'deploy-3', 'ghi', 'accepting', now, now, 'regular', ''],
        [13, 'BluePush', 'humpty', 'deploy-4', 'jkl', 'accepting', now, now, 'regular', ''],
    ]
    push_keys = [
        'id', 'title', 'user', 'branch', 'revision', 'state',
        'created', 'modified', 'pushtype', 'extra_pings'
    ]
    fake_revision = "0"*40
    request_data = [
        [10, 'keysersoze', 'requested', 'keysersoze', 'usual_fix', '', now, now, 'Fix stuff', 'no comment', 12345, '', fake_revision],
        [11, 'bmetin', 'requested', 'bmetin', 'fix1', '', now, now, 'Fixing more stuff', 'yes comment', 234, '', fake_revision],
        [12, 'testuser1', 'requested', 'testuser2', 'fix1', 'search', now, now, 'Fixing1', 'no comment', 123, '', fake_revision],
        [13, 'testuser2', 'requested', 'testuser2', 'fix2', 'search', now, now, 'Fixing2', 'yes comment', 456, '', fake_revision],
    ]
    request_keys = [
        'id', 'user', 'state', 'repo', 'branch', 'tags', 'created', 'modified',
        'title', 'comments', 'reviewid', 'description', 'revision'
    ]
    def on_db_return(self, success, db_results):
        assert success
    def make_push_dict(self, data):
        return dict(zip(self.push_keys, data))
    def make_request_dict(self, data):
        return dict(zip(self.request_keys, data))
    def insert_pushes(self):
        push_queries = []
        for pd in self.push_data:
            push_queries.append(db.push_pushes.insert(self.make_push_dict(pd)))
        db.execute_transaction_cb(push_queries, self.on_db_return)
    def insert_requests(self):
        request_queries = []
        for rd in self.request_data:
            request_queries.append(db.push_requests.insert(self.make_request_dict(rd)))
        db.execute_transaction_cb(request_queries, self.on_db_return)
    def insert_pushcontent(self, requestid, pushid):
        db.execute_cb(
            db.push_pushcontents.insert({'request': requestid, 'push': pushid}),
            self.on_db_return
        )
    def get_push_for_request(self, requestid):
        pushid = [None]
        def on_select_return(success, db_results):
            assert success
            _, pushid[0] = db_results.fetchone()
        # check if we have a push in with request
        first_pushcontent_query = db.push_pushcontents.select(
            db.push_pushcontents.c.request == requestid
        )
        db.execute_cb(first_pushcontent_query, on_select_return)
        return pushid[0]
    def get_pushes(self):
        pushes = [None]
        def on_select_return(success, db_results):
            assert success
            pushes[0] = db_results.fetchall()
        db.execute_cb(db.push_pushes.select(), on_select_return)
        return pushes[0]
    def get_requests(self):
        requests = [None]
        def on_select_return(success, db_results):
            assert success
            requests[0] = db_results.fetchall()
        db.execute_cb(db.push_requests.select(), on_select_return)
        return requests[0]
    def get_requests_by_user(self, user):
        return [req for req in self.get_requests() if req['user'] == user]
 | 
	apache-2.0 | 
| 
	c2theg/DDoS_Information_Sharing | 
	libraries/suds-jurko-0.6/suds/properties.py | 
	18 | 
	15900 | 
	# This program is free software; you can redistribute it and/or modify
# it under the terms of the (LGPL) GNU Lesser General Public License as
# published by the Free Software Foundation; either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
# GNU Library Lesser General Public License for more details at
# ( http://www.gnu.org/licenses/lgpl.html ).
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
# written by: Jeff Ortel ( [email protected] )
"""
Properties classes.
"""
class AutoLinker(object):
    """
    Base class, provides interface for I{automatic} link
    management between a L{Properties} object and the L{Properties}
    contained within I{values}.
    """
    def updated(self, properties, prev, next):
        """
        Notification that a values was updated and the linkage
        between the I{properties} contained with I{prev} need to
        be relinked to the L{Properties} contained within the
        I{next} value.
        """
        pass
class Link(object):
    """
    Property link object.
    @ivar endpoints: A tuple of the (2) endpoints of the link.
    @type endpoints: tuple(2)
    """
    def __init__(self, a, b):
        """
        @param a: Property (A) to link.
        @type a: L{Property}
        @param b: Property (B) to link.
        @type b: L{Property}
        """
        pA = Endpoint(self, a)
        pB = Endpoint(self, b)
        self.endpoints = (pA, pB)
        self.validate(a, b)
        a.links.append(pB)
        b.links.append(pA)
    def validate(self, pA, pB):
        """
        Validate that the two properties may be linked.
        @param pA: Endpoint (A) to link.
        @type pA: L{Endpoint}
        @param pB: Endpoint (B) to link.
        @type pB: L{Endpoint}
        @return: self
        @rtype: L{Link}
        """
        if pA in pB.links or \
           pB in pA.links:
            raise Exception, 'Already linked'
        dA = pA.domains()
        dB = pB.domains()
        for d in dA:
            if d in dB:
                raise Exception, 'Duplicate domain "%s" found' % d
        for d in dB:
            if d in dA:
                raise Exception, 'Duplicate domain "%s" found' % d
        kA = pA.keys()
        kB = pB.keys()
        for k in kA:
            if k in kB:
                raise Exception, 'Duplicate key %s found' % k
        for k in kB:
            if k in kA:
                raise Exception, 'Duplicate key %s found' % k
        return self
    def teardown(self):
        """
        Teardown the link.
        Removes endpoints from properties I{links} collection.
        @return: self
        @rtype: L{Link}
        """
        pA, pB = self.endpoints
        if pA in pB.links:
            pB.links.remove(pA)
        if pB in pA.links:
            pA.links.remove(pB)
        return self
class Endpoint(object):
    """
    Link endpoint (wrapper).
    @ivar link: The associated link.
    @type link: L{Link}
    @ivar target: The properties object.
    @type target: L{Property}
    """
    def __init__(self, link, target):
        self.link = link
        self.target = target
    def teardown(self):
        return self.link.teardown()
    def __eq__(self, rhs):
        return ( self.target == rhs )
    def __hash__(self):
        return hash(self.target)
    def __getattr__(self, name):
        return getattr(self.target, name)
class Definition:
    """
    Property definition.
    @ivar name: The property name.
    @type name: str
    @ivar classes: The (class) list of permitted values
    @type classes: tuple
    @ivar default: The default value.
    @ivar type: any
    """
    def __init__(self, name, classes, default, linker=AutoLinker()):
        """
        @param name: The property name.
        @type name: str
        @param classes: The (class) list of permitted values
        @type classes: tuple
        @param default: The default value.
        @type default: any
        """
        if not isinstance(classes, (list, tuple)):
            classes = (classes,)
        self.name = name
        self.classes = classes
        self.default = default
        self.linker = linker
    def nvl(self, value=None):
        """
        Convert the I{value} into the default when I{None}.
        @param value: The proposed value.
        @type value: any
        @return: The I{default} when I{value} is I{None}, else I{value}.
        @rtype: any
        """
        if value is None:
            return self.default
        else:
            return value
    def validate(self, value):
        """
        Validate the I{value} is of the correct class.
        @param value: The value to validate.
        @type value: any
        @raise AttributeError: When I{value} is invalid.
        """
        if value is None:
            return
        if len(self.classes) and \
            not isinstance(value, self.classes):
                msg = '"%s" must be: %s' % (self.name, self.classes)
                raise AttributeError,msg
    def __repr__(self):
        return '%s: %s' % (self.name, str(self))
    def __str__(self):
        s = []
        if len(self.classes):
            s.append('classes=%s' % str(self.classes))
        else:
            s.append('classes=*')
        s.append("default=%s" % str(self.default))
        return ', '.join(s)
class Properties:
    """
    Represents basic application properties.
    Provides basic type validation, default values and
    link/synchronization behavior.
    @ivar domain: The domain name.
    @type domain: str
    @ivar definitions: A table of property definitions.
    @type definitions: {name: L{Definition}}
    @ivar links: A list of linked property objects used to create
        a network of properties.
    @type links: [L{Property},..]
    @ivar defined: A dict of property values.
    @type defined: dict
    """
    def __init__(self, domain, definitions, kwargs):
        """
        @param domain: The property domain name.
        @type domain: str
        @param definitions: A table of property definitions.
        @type definitions: {name: L{Definition}}
        @param kwargs: A list of property name/values to set.
        @type kwargs: dict
        """
        self.definitions = {}
        for d in definitions:
            self.definitions[d.name] = d
        self.domain = domain
        self.links = []
        self.defined = {}
        self.modified = set()
        self.prime()
        self.update(kwargs)
    def definition(self, name):
        """
        Get the definition for the property I{name}.
        @param name: The property I{name} to find the definition for.
        @type name: str
        @return: The property definition
        @rtype: L{Definition}
        @raise AttributeError: On not found.
        """
        d = self.definitions.get(name)
        if d is None:
            raise AttributeError(name)
        return d
    def update(self, other):
        """
        Update the property values as specified by keyword/value.
        @param other: An object to update from.
        @type other: (dict|L{Properties})
        @return: self
        @rtype: L{Properties}
        """
        if isinstance(other, Properties):
            other = other.defined
        for n,v in other.items():
            self.set(n, v)
        return self
    def notset(self, name):
        """
        Get whether a property has never been set by I{name}.
        @param name: A property name.
        @type name: str
        @return: True if never been set.
        @rtype: bool
        """
        self.provider(name).__notset(name)
    def set(self, name, value):
        """
        Set the I{value} of a property by I{name}.
        The value is validated against the definition and set
        to the default when I{value} is None.
        @param name: The property name.
        @type name: str
        @param value: The new property value.
        @type value: any
        @return: self
        @rtype: L{Properties}
        """
        self.provider(name).__set(name, value)
        return self
    def unset(self, name):
        """
        Unset a property by I{name}.
        @param name: A property name.
        @type name: str
        @return: self
        @rtype: L{Properties}
        """
        self.provider(name).__set(name, None)
        return self
    def get(self, name, *df):
        """
        Get the value of a property by I{name}.
        @param name: The property name.
        @type name: str
        @param df: An optional value to be returned when the value
            is not set
        @type df: [1].
        @return: The stored value, or I{df[0]} if not set.
        @rtype: any
        """
        return self.provider(name).__get(name, *df)
    def link(self, other):
        """
        Link (associate) this object with anI{other} properties object
        to create a network of properties.  Links are bidirectional.
        @param other: The object to link.
        @type other: L{Properties}
        @return: self
        @rtype: L{Properties}
        """
        Link(self, other)
        return self
    def unlink(self, *others):
        """
        Unlink (disassociate) the specified properties object.
        @param others: The list object to unlink.  Unspecified means unlink all.
        @type others: [L{Properties},..]
        @return: self
        @rtype: L{Properties}
        """
        if not len(others):
            others = self.links[:]
        for p in self.links[:]:
            if p in others:
                p.teardown()
        return self
    def provider(self, name, history=None):
        """
        Find the provider of the property by I{name}.
        @param name: The property name.
        @type name: str
        @param history: A history of nodes checked to prevent
            circular hunting.
        @type history: [L{Properties},..]
        @return: The provider when found.  Otherwise, None (when nested)
            and I{self} when not nested.
        @rtype: L{Properties}
        """
        if history is None:
            history = []
        history.append(self)
        if name in self.definitions:
            return self
        for x in self.links:
            if x in history:
                continue
            provider = x.provider(name, history)
            if provider is not None:
                return provider
        history.remove(self)
        if len(history):
            return None
        return self
    def keys(self, history=None):
        """
        Get the set of I{all} property names.
        @param history: A history of nodes checked to prevent
            circular hunting.
        @type history: [L{Properties},..]
        @return: A set of property names.
        @rtype: list
        """
        if history is None:
            history = []
        history.append(self)
        keys = set()
        keys.update(self.definitions.keys())
        for x in self.links:
            if x in history:
                continue
            keys.update(x.keys(history))
        history.remove(self)
        return keys
    def domains(self, history=None):
        """
        Get the set of I{all} domain names.
        @param history: A history of nodes checked to prevent
            circular hunting.
        @type history: [L{Properties},..]
        @return: A set of domain names.
        @rtype: list
        """
        if history is None:
            history = []
        history.append(self)
        domains = set()
        domains.add(self.domain)
        for x in self.links:
            if x in history:
                continue
            domains.update(x.domains(history))
        history.remove(self)
        return domains
    def prime(self):
        """
        Prime the stored values based on default values
        found in property definitions.
        @return: self
        @rtype: L{Properties}
        """
        for d in self.definitions.values():
            self.defined[d.name] = d.default
        return self
    def __notset(self, name):
        return not (name in self.modified)
    def __set(self, name, value):
        d = self.definition(name)
        d.validate(value)
        value = d.nvl(value)
        prev = self.defined[name]
        self.defined[name] = value
        self.modified.add(name)
        d.linker.updated(self, prev, value)
    def __get(self, name, *df):
        d = self.definition(name)
        value = self.defined.get(name)
        if value == d.default and len(df):
            value = df[0]
        return value
    def str(self, history):
        s = []
        s.append('Definitions:')
        for d in self.definitions.values():
            s.append('\t%s' % repr(d))
        s.append('Content:')
        for d in self.defined.items():
            s.append('\t%s' % str(d))
        if self not in history:
            history.append(self)
            s.append('Linked:')
            for x in self.links:
                s.append(x.str(history))
            history.remove(self)
        return '\n'.join(s)
    def __repr__(self):
        return str(self)
    def __str__(self):
        return self.str([])
class Skin(object):
    """
    The meta-programming I{skin} around the L{Properties} object.
    @ivar __pts__: The wrapped object.
    @type __pts__: L{Properties}.
    """
    def __init__(self, domain, definitions, kwargs):
        self.__pts__ = Properties(domain, definitions, kwargs)
    def __setattr__(self, name, value):
        builtin = name.startswith('__') and name.endswith('__')
        if builtin:
            self.__dict__[name] = value
            return
        self.__pts__.set(name, value)
    def __getattr__(self, name):
        return self.__pts__.get(name)
    def __repr__(self):
        return str(self)
    def __str__(self):
        return str(self.__pts__)
class Unskin(object):
    def __new__(self, *args, **kwargs):
        return args[0].__pts__
class Inspector:
    """
    Wrapper inspector.
    """
    def __init__(self, options):
        self.properties = options.__pts__
    def get(self, name, *df):
        """
        Get the value of a property by I{name}.
        @param name: The property name.
        @type name: str
        @param df: An optional value to be returned when the value
            is not set
        @type df: [1].
        @return: The stored value, or I{df[0]} if not set.
        @rtype: any
        """
        return self.properties.get(name, *df)
    def update(self, **kwargs):
        """
        Update the property values as specified by keyword/value.
        @param kwargs: A list of property name/values to set.
        @type kwargs: dict
        @return: self
        @rtype: L{Properties}
        """
        return self.properties.update(**kwargs)
    def link(self, other):
        """
        Link (associate) this object with anI{other} properties object
        to create a network of properties.  Links are bidirectional.
        @param other: The object to link.
        @type other: L{Properties}
        @return: self
        @rtype: L{Properties}
        """
        p = other.__pts__
        return self.properties.link(p)
    def unlink(self, other):
        """
        Unlink (disassociate) the specified properties object.
        @param other: The object to unlink.
        @type other: L{Properties}
        @return: self
        @rtype: L{Properties}
        """
        p = other.__pts__
        return self.properties.unlink(p)
 | 
	mit | 
| 
	andreashorn/lead_dbs | 
	ext_libs/SlicerNetstim/WarpDrive/WarpDriveLib/Effects/Effect.py | 
	1 | 
	4254 | 
	import vtk, qt, slicer
class AbstractEffect():
  """
  One instance of this will be created per-view when the effect
  is selected.  It is responsible for implementing feedback and
  label map changes in response to user input.
  This class observes the editor parameter node to configure itself
  and queries the current view for background and label volume
  nodes to operate on.
  """
  def __init__(self,sliceWidget):
    # sliceWidget to operate on and convenience variables
    # to access the internals
    self.sliceWidget = sliceWidget
    self.sliceLogic = sliceWidget.sliceLogic()
    self.sliceView = self.sliceWidget.sliceView()
    self.interactor = self.sliceView.interactorStyle().GetInteractor()
    self.renderWindow = self.sliceWidget.sliceView().renderWindow()
    self.renderer = self.renderWindow.GetRenderers().GetItemAsObject(0)
    #self.editUtil = EditUtil.EditUtil()
    # optionally set by users of the class
    self.undoRedo = None
    # actors in the renderer that need to be cleaned up on destruction
    self.actors = []
    # the current operation
    self.actionState = None
    # set up observers on the interactor
    # - keep track of tags so these can be removed later
    # - currently all editor effects are restricted to these events
    # - make the observers high priority so they can override other
    #   event processors
    self.interactorObserverTags = []
    events = ( vtk.vtkCommand.LeftButtonPressEvent,
      vtk.vtkCommand.LeftButtonReleaseEvent,
      vtk.vtkCommand.MiddleButtonPressEvent,
      vtk.vtkCommand.MiddleButtonReleaseEvent,
      vtk.vtkCommand.RightButtonPressEvent,
      vtk.vtkCommand.RightButtonReleaseEvent,
      vtk.vtkCommand.LeftButtonDoubleClickEvent,
      vtk.vtkCommand.MouseMoveEvent,
      vtk.vtkCommand.KeyPressEvent,
      vtk.vtkCommand.KeyReleaseEvent,
      vtk.vtkCommand.EnterEvent,
      vtk.vtkCommand.LeaveEvent,
      vtk.vtkCommand.MouseWheelForwardEvent,
      vtk.vtkCommand.MouseWheelBackwardEvent)
    for e in events:
      tag = self.interactor.AddObserver(e, self.processEvent, 1.0)
      self.interactorObserverTags.append(tag)
    self.sliceNodeTags = []
    sliceNode = self.sliceLogic.GetSliceNode()
    tag = sliceNode.AddObserver(vtk.vtkCommand.ModifiedEvent, self.processEvent, 1.0)
    self.sliceNodeTags.append(tag)
    # spot for tracking the current cursor while it is turned off for paining
    self.savedCursor = None
  def processEvent(self, caller=None, event=None):
    """Event filter that lisens for certain key events that
    should be responded to by all events.
    Currently:
      '\\' - pick up paint color from current location (eyedropper)
    """
    if event == "KeyPressEvent":
      key = self.interactor.GetKeySym()
      if key.lower() == 's':
        return True
    return False
  def cursorOff(self):
    """Turn off and save the current cursor so
    the user can see the background image during editing"""
    qt.QApplication.setOverrideCursor(qt.QCursor(10))
    #self.savedCursor = self.sliceWidget.cursor
    #qt_BlankCursor = 10
    #self.sliceWidget.setCursor(qt.QCursor(qt_BlankCursor))
  def cursorOn(self):
    """Restore the saved cursor if it exists, otherwise
    just restore the default cursor"""
    qt.QApplication.restoreOverrideCursor()
    #if self.savedCursor:
    #  self.sliceWidget.setCursor(self.savedCursor)
    #else:
    #  self.sliceWidget.unsetCursor()
  def abortEvent(self,event):
    """Set the AbortFlag on the vtkCommand associated
    with the event - causes other things listening to the
    interactor not to receive the events"""
    # TODO: make interactorObserverTags a map to we can
    # explicitly abort just the event we handled - it will
    # be slightly more efficient
    for tag in self.interactorObserverTags:
      cmd = self.interactor.GetCommand(tag)
      cmd.SetAbortFlag(1)
  def cleanup(self):
    """clean up actors and observers"""
    for a in self.actors:
      self.renderer.RemoveActor2D(a)
    self.sliceView.scheduleRender()
    for tag in self.interactorObserverTags:
      self.interactor.RemoveObserver(tag)
    sliceNode = self.sliceLogic.GetSliceNode()
    for tag in self.sliceNodeTags:
      sliceNode.RemoveObserver(tag)
 | 
	gpl-3.0 | 
| 
	quattor/aquilon | 
	lib/aquilon/worker/commands/add_service.py | 
	2 | 
	1676 | 
	# -*- cpy-indent-level: 4; indent-tabs-mode: nil -*-
# ex: set expandtab softtabstop=4 shiftwidth=4:
#
# Copyright (C) 2008,2009,2010,2011,2012,2013,2016  Contributor
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Contains the logic for `aq add service`."""
from aquilon.exceptions_ import AuthorizationException
from aquilon.aqdb.model import Service
from aquilon.worker.broker import BrokerCommand
class CommandAddService(BrokerCommand):
    requires_plenaries = True
    required_parameters = ["service"]
    def render(self, session, plenaries, dbuser, service, need_client_list,
               allow_alias_bindings, comments, **_):
        Service.get_unique(session, service, preclude=True)
        if dbuser.role.name != 'aqd_admin' and allow_alias_bindings is not None:
            raise AuthorizationException("Only AQD admin can set allowing alias bindings")
        dbservice = Service(name=service, comments=comments,
                            need_client_list=need_client_list, allow_alias_bindings=allow_alias_bindings)
        session.add(dbservice)
        plenaries.add(dbservice)
        session.flush()
        plenaries.write()
        return
 | 
	apache-2.0 | 
| 
	bertucho/epic-movie-quotes-quiz | 
	dialogos/build/Twisted/twisted/internet/test/test_glibbase.py | 
	39 | 
	2284 | 
	# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Tests for twisted.internet.glibbase.
"""
from __future__ import division, absolute_import
import sys
from twisted.trial.unittest import TestCase
from twisted.internet._glibbase import ensureNotImported
class EnsureNotImportedTests(TestCase):
    """
    L{ensureNotImported} protects against unwanted past and future imports.
    """
    def test_ensureWhenNotImported(self):
        """
        If the specified modules have never been imported, and import
        prevention is requested, L{ensureNotImported} makes sure they will not
        be imported in the future.
        """
        modules = {}
        self.patch(sys, "modules", modules)
        ensureNotImported(["m1", "m2"], "A message.",
                          preventImports=["m1", "m2", "m3"])
        self.assertEqual(modules, {"m1": None, "m2": None, "m3": None})
    def test_ensureWhenNotImportedDontPrevent(self):
        """
        If the specified modules have never been imported, and import
        prevention is not requested, L{ensureNotImported} has no effect.
        """
        modules = {}
        self.patch(sys, "modules", modules)
        ensureNotImported(["m1", "m2"], "A message.")
        self.assertEqual(modules, {})
    def test_ensureWhenFailedToImport(self):
        """
        If the specified modules have been set to C{None} in C{sys.modules},
        L{ensureNotImported} does not complain.
        """
        modules = {"m2": None}
        self.patch(sys, "modules", modules)
        ensureNotImported(["m1", "m2"], "A message.", preventImports=["m1", "m2"])
        self.assertEqual(modules, {"m1": None, "m2": None})
    def test_ensureFailsWhenImported(self):
        """
        If one of the specified modules has been previously imported,
        L{ensureNotImported} raises an exception.
        """
        module = object()
        modules = {"m2": module}
        self.patch(sys, "modules", modules)
        e = self.assertRaises(ImportError, ensureNotImported,
                              ["m1", "m2"], "A message.",
                              preventImports=["m1", "m2"])
        self.assertEqual(modules, {"m2": module})
        self.assertEqual(e.args, ("A message.",))
 | 
	mit | 
| 
	marcoantoniooliveira/labweb | 
	oscar/apps/order/south_migrations/0027_no_null_in_charfields.py | 
	8 | 
	47820 | 
	# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import DataMigration
from django.db import models
from oscar.core.compat import AUTH_USER_MODEL, AUTH_USER_MODEL_NAME
class Migration(DataMigration):
    def forwards(self, orm):
        orm.Line.objects.filter(partner_name__isnull=True).update(partner_name='')
        orm.Line.objects.filter(status__isnull=True).update(status='')
        orm.Line.objects.filter(partner_line_reference__isnull=True).update(partner_line_reference='')
        orm.Line.objects.filter(partner_line_notes__isnull=True).update(partner_line_notes='')
        orm.OrderDiscount.objects.filter(offer_name__isnull=True).update(offer_name='')
        orm.OrderDiscount.objects.filter(voucher_code__isnull=True).update(voucher_code='')
        orm.OrderDiscount.objects.filter(message__isnull=True).update(message='')
        orm.OrderNote.objects.filter(note_type__isnull=True).update(note_type='')
        orm.Order.objects.filter(status__isnull=True).update(status='')
        orm.Order.objects.filter(shipping_method__isnull=True).update(shipping_method='')
        orm.Order.objects.filter(guest_email__isnull=True).update(guest_email='')
        orm.BillingAddress.objects.filter(first_name__isnull=True).update(first_name='')
        orm.BillingAddress.objects.filter(title__isnull=True).update(title='')
        orm.BillingAddress.objects.filter(line4__isnull=True).update(line4='')
        orm.BillingAddress.objects.filter(line3__isnull=True).update(line3='')
        orm.BillingAddress.objects.filter(line2__isnull=True).update(line2='')
        orm.BillingAddress.objects.filter(state__isnull=True).update(state='')
        orm.BillingAddress.objects.filter(postcode__isnull=True).update(postcode='')
        orm.ShippingEvent.objects.filter(notes__isnull=True).update(notes='')
        orm.ShippingAddress.objects.filter(first_name__isnull=True).update(first_name='')
        orm.ShippingAddress.objects.filter(title__isnull=True).update(title='')
        orm.ShippingAddress.objects.filter(notes__isnull=True).update(notes='')
        orm.ShippingAddress.objects.filter(line4__isnull=True).update(line4='')
        orm.ShippingAddress.objects.filter(line3__isnull=True).update(line3='')
        orm.ShippingAddress.objects.filter(line2__isnull=True).update(line2='')
        orm.ShippingAddress.objects.filter(state__isnull=True).update(state='')
        orm.ShippingAddress.objects.filter(postcode__isnull=True).update(postcode='')
    def backwards(self, orm):
        raise RuntimeError("Cannot reverse this migration.")
    models = {
        u'address.country': {
            'Meta': {'ordering': "('-display_order', 'name')", 'object_name': 'Country'},
            'display_order': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0', 'db_index': 'True'}),
            'is_shipping_country': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
            'iso_3166_1_a2': ('django.db.models.fields.CharField', [], {'max_length': '2', 'primary_key': 'True'}),
            'iso_3166_1_a3': ('django.db.models.fields.CharField', [], {'max_length': '3', 'null': 'True', 'db_index': 'True'}),
            'iso_3166_1_numeric': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'db_index': 'True'}),
            'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
            'printable_name': ('django.db.models.fields.CharField', [], {'max_length': '128'})
        },
        u'auth.group': {
            'Meta': {'object_name': 'Group'},
            u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
            'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
            'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
        },
        u'auth.permission': {
            'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
            'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
            'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
            u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
            'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
        },
        AUTH_USER_MODEL: {
            'Meta': {'object_name': AUTH_USER_MODEL_NAME},
            'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
            'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
            'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
            'groups': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Group']"}),
            u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
            'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
            'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
            'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
            'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
            'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
            'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
            'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Permission']"}),
            'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
        },
        u'basket.basket': {
            'Meta': {'object_name': 'Basket'},
            'date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
            'date_merged': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
            'date_submitted': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
            u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
            'owner': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'baskets'", 'null': 'True', 'to': u"orm['{0}']".format(AUTH_USER_MODEL)}),
            'status': ('django.db.models.fields.CharField', [], {'default': "'Open'", 'max_length': '128'}),
            'vouchers': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': u"orm['voucher.Voucher']", 'null': 'True', 'blank': 'True'})
        },
        u'catalogue.attributeentity': {
            'Meta': {'object_name': 'AttributeEntity'},
            u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
            'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
            'slug': ('django.db.models.fields.SlugField', [], {'max_length': '255', 'blank': 'True'}),
            'type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'entities'", 'to': u"orm['catalogue.AttributeEntityType']"})
        },
        u'catalogue.attributeentitytype': {
            'Meta': {'object_name': 'AttributeEntityType'},
            u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
            'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
            'slug': ('django.db.models.fields.SlugField', [], {'max_length': '255', 'blank': 'True'})
        },
        u'catalogue.attributeoption': {
            'Meta': {'object_name': 'AttributeOption'},
            'group': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'options'", 'to': u"orm['catalogue.AttributeOptionGroup']"}),
            u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
            'option': ('django.db.models.fields.CharField', [], {'max_length': '255'})
        },
        u'catalogue.attributeoptiongroup': {
            'Meta': {'object_name': 'AttributeOptionGroup'},
            u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
            'name': ('django.db.models.fields.CharField', [], {'max_length': '128'})
        },
        u'catalogue.category': {
            'Meta': {'ordering': "['full_name']", 'object_name': 'Category'},
            'depth': ('django.db.models.fields.PositiveIntegerField', [], {}),
            'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
            'full_name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
            u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
            'image': ('django.db.models.fields.files.ImageField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
            'name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
            'numchild': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
            'path': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
            'slug': ('django.db.models.fields.SlugField', [], {'max_length': '255'})
        },
        u'catalogue.option': {
            'Meta': {'object_name': 'Option'},
            'code': ('oscar.models.fields.autoslugfield.AutoSlugField', [], {'allow_duplicates': 'False', 'max_length': '128', 'separator': "u'-'", 'blank': 'True', 'unique': 'True', 'populate_from': "'name'", 'overwrite': 'False'}),
            u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
            'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
            'type': ('django.db.models.fields.CharField', [], {'default': "'Required'", 'max_length': '128'})
        },
        u'catalogue.product': {
            'Meta': {'ordering': "['-date_created']", 'object_name': 'Product'},
            'attributes': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['catalogue.ProductAttribute']", 'through': u"orm['catalogue.ProductAttributeValue']", 'symmetrical': 'False'}),
            'categories': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['catalogue.Category']", 'through': u"orm['catalogue.ProductCategory']", 'symmetrical': 'False'}),
            'date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
            'date_updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'db_index': 'True', 'blank': 'True'}),
            'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
            u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
            'is_discountable': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
            'parent': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'variants'", 'null': 'True', 'to': u"orm['catalogue.Product']"}),
            'product_class': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'products'", 'null': 'True', 'on_delete': 'models.PROTECT', 'to': u"orm['catalogue.ProductClass']"}),
            'product_options': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['catalogue.Option']", 'symmetrical': 'False', 'blank': 'True'}),
            'rating': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
            'recommended_products': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['catalogue.Product']", 'symmetrical': 'False', 'through': u"orm['catalogue.ProductRecommendation']", 'blank': 'True'}),
            'related_products': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'relations'", 'blank': 'True', 'to': u"orm['catalogue.Product']"}),
            'score': ('django.db.models.fields.FloatField', [], {'default': '0.0', 'db_index': 'True'}),
            'slug': ('django.db.models.fields.SlugField', [], {'max_length': '255'}),
            'title': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
            'upc': ('oscar.models.fields.NullCharField', [], {'max_length': '64', 'unique': 'True', 'null': 'True', 'blank': 'True'})
        },
        u'catalogue.productattribute': {
            'Meta': {'ordering': "['code']", 'object_name': 'ProductAttribute'},
            'code': ('django.db.models.fields.SlugField', [], {'max_length': '128'}),
            'entity_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['catalogue.AttributeEntityType']", 'null': 'True', 'blank': 'True'}),
            u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
            'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
            'option_group': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['catalogue.AttributeOptionGroup']", 'null': 'True', 'blank': 'True'}),
            'product_class': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'attributes'", 'null': 'True', 'to': u"orm['catalogue.ProductClass']"}),
            'required': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
            'type': ('django.db.models.fields.CharField', [], {'default': "'text'", 'max_length': '20'})
        },
        u'catalogue.productattributevalue': {
            'Meta': {'object_name': 'ProductAttributeValue'},
            'attribute': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['catalogue.ProductAttribute']"}),
            u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
            'product': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'attribute_values'", 'to': u"orm['catalogue.Product']"}),
            'value_boolean': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
            'value_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
            'value_entity': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['catalogue.AttributeEntity']", 'null': 'True', 'blank': 'True'}),
            'value_file': ('django.db.models.fields.files.FileField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
            'value_float': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
            'value_image': ('django.db.models.fields.files.ImageField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
            'value_integer': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
            'value_option': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['catalogue.AttributeOption']", 'null': 'True', 'blank': 'True'}),
            'value_richtext': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
            'value_text': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'})
        },
        u'catalogue.productcategory': {
            'Meta': {'ordering': "['product', 'category']", 'object_name': 'ProductCategory'},
            'category': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['catalogue.Category']"}),
            u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
            'product': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['catalogue.Product']"})
        },
        u'catalogue.productclass': {
            'Meta': {'ordering': "['name']", 'object_name': 'ProductClass'},
            u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
            'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
            'options': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['catalogue.Option']", 'symmetrical': 'False', 'blank': 'True'}),
            'requires_shipping': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
            'slug': ('oscar.models.fields.autoslugfield.AutoSlugField', [], {'allow_duplicates': 'False', 'max_length': '128', 'separator': "u'-'", 'blank': 'True', 'unique': 'True', 'populate_from': "'name'", 'overwrite': 'False'}),
            'track_stock': ('django.db.models.fields.BooleanField', [], {'default': 'True'})
        },
        u'catalogue.productrecommendation': {
            'Meta': {'object_name': 'ProductRecommendation'},
            u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
            'primary': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'primary_recommendations'", 'to': u"orm['catalogue.Product']"}),
            'ranking': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0'}),
            'recommendation': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['catalogue.Product']"})
        },
        u'contenttypes.contenttype': {
            'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
            'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
            u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
            'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
            'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
        },
        u'customer.communicationeventtype': {
            'Meta': {'object_name': 'CommunicationEventType'},
            'category': ('django.db.models.fields.CharField', [], {'default': "u'Order related'", 'max_length': '255'}),
            'code': ('oscar.models.fields.autoslugfield.AutoSlugField', [], {'allow_duplicates': 'False', 'max_length': '128', 'separator': "u'-'", 'blank': 'True', 'unique': 'True', 'populate_from': "'name'", 'overwrite': 'False'}),
            'date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
            'date_updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
            'email_body_html_template': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
            'email_body_template': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
            'email_subject_template': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
            u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
            'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
            'sms_template': ('django.db.models.fields.CharField', [], {'max_length': '170', 'null': 'True', 'blank': 'True'})
        },
        u'offer.benefit': {
            'Meta': {'object_name': 'Benefit'},
            u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
            'max_affected_items': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
            'proxy_class': ('django.db.models.fields.CharField', [], {'default': 'None', 'max_length': '255', 'unique': 'True', 'null': 'True', 'blank': 'True'}),
            'range': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['offer.Range']", 'null': 'True', 'blank': 'True'}),
            'type': ('django.db.models.fields.CharField', [], {'max_length': '128', 'blank': 'True'}),
            'value': ('oscar.models.fields.PositiveDecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'})
        },
        u'offer.condition': {
            'Meta': {'object_name': 'Condition'},
            u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
            'proxy_class': ('django.db.models.fields.CharField', [], {'default': 'None', 'max_length': '255', 'unique': 'True', 'null': 'True', 'blank': 'True'}),
            'range': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['offer.Range']", 'null': 'True', 'blank': 'True'}),
            'type': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'blank': 'True'}),
            'value': ('oscar.models.fields.PositiveDecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'})
        },
        u'offer.conditionaloffer': {
            'Meta': {'ordering': "['-priority']", 'object_name': 'ConditionalOffer'},
            'benefit': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['offer.Benefit']"}),
            'condition': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['offer.Condition']"}),
            'date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
            'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
            'end_datetime': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
            u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
            'max_basket_applications': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
            'max_discount': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
            'max_global_applications': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
            'max_user_applications': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
            'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '128'}),
            'num_applications': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
            'num_orders': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
            'offer_type': ('django.db.models.fields.CharField', [], {'default': "'Site'", 'max_length': '128'}),
            'priority': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
            'redirect_url': ('oscar.models.fields.ExtendedURLField', [], {'max_length': '200', 'blank': 'True'}),
            'slug': ('oscar.models.fields.autoslugfield.AutoSlugField', [], {'allow_duplicates': 'False', 'max_length': '128', 'separator': "u'-'", 'blank': 'True', 'unique': 'True', 'populate_from': "'name'", 'overwrite': 'False'}),
            'start_datetime': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
            'status': ('django.db.models.fields.CharField', [], {'default': "'Open'", 'max_length': '64'}),
            'total_discount': ('django.db.models.fields.DecimalField', [], {'default': "'0.00'", 'max_digits': '12', 'decimal_places': '2'})
        },
        u'offer.range': {
            'Meta': {'object_name': 'Range'},
            'classes': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'classes'", 'blank': 'True', 'to': u"orm['catalogue.ProductClass']"}),
            'date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
            'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
            'excluded_products': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'excludes'", 'blank': 'True', 'to': u"orm['catalogue.Product']"}),
            u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
            'included_categories': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'includes'", 'blank': 'True', 'to': u"orm['catalogue.Category']"}),
            'included_products': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'includes'", 'blank': 'True', 'through': u"orm['offer.RangeProduct']", 'to': u"orm['catalogue.Product']"}),
            'includes_all_products': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
            'is_public': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
            'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '128'}),
            'proxy_class': ('django.db.models.fields.CharField', [], {'default': 'None', 'max_length': '255', 'unique': 'True', 'null': 'True', 'blank': 'True'}),
            'slug': ('django.db.models.fields.SlugField', [], {'max_length': '128', 'unique': 'True', 'null': 'True'})
        },
        u'offer.rangeproduct': {
            'Meta': {'unique_together': "(('range', 'product'),)", 'object_name': 'RangeProduct'},
            'display_order': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
            u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
            'product': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['catalogue.Product']"}),
            'range': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['offer.Range']"})
        },
        u'order.billingaddress': {
            'Meta': {'object_name': 'BillingAddress'},
            'country': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['address.Country']"}),
            'first_name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
            u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
            'last_name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
            'line1': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
            'line2': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
            'line3': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
            'line4': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
            'postcode': ('oscar.models.fields.UppercaseCharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'}),
            'search_text': ('django.db.models.fields.CharField', [], {'max_length': '1000'}),
            'state': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
            'title': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'})
        },
        u'order.communicationevent': {
            'Meta': {'ordering': "['-date_created']", 'object_name': 'CommunicationEvent'},
            'date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
            'event_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['customer.CommunicationEventType']"}),
            u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
            'order': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'communication_events'", 'to': u"orm['order.Order']"})
        },
        u'order.line': {
            'Meta': {'object_name': 'Line'},
            'est_dispatch_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
            u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
            'line_price_before_discounts_excl_tax': ('django.db.models.fields.DecimalField', [], {'max_digits': '12', 'decimal_places': '2'}),
            'line_price_before_discounts_incl_tax': ('django.db.models.fields.DecimalField', [], {'max_digits': '12', 'decimal_places': '2'}),
            'line_price_excl_tax': ('django.db.models.fields.DecimalField', [], {'max_digits': '12', 'decimal_places': '2'}),
            'line_price_incl_tax': ('django.db.models.fields.DecimalField', [], {'max_digits': '12', 'decimal_places': '2'}),
            'order': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'lines'", 'to': u"orm['order.Order']"}),
            'partner': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'order_lines'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': u"orm['partner.Partner']"}),
            'partner_line_notes': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
            'partner_line_reference': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'blank': 'True'}),
            'partner_name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'blank': 'True'}),
            'partner_sku': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
            'product': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['catalogue.Product']", 'null': 'True', 'on_delete': 'models.SET_NULL', 'blank': 'True'}),
            'quantity': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1'}),
            'status': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
            'stockrecord': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['partner.StockRecord']", 'null': 'True', 'on_delete': 'models.SET_NULL', 'blank': 'True'}),
            'title': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
            'unit_cost_price': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
            'unit_price_excl_tax': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
            'unit_price_incl_tax': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
            'unit_retail_price': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
            'upc': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'blank': 'True'})
        },
        u'order.lineattribute': {
            'Meta': {'object_name': 'LineAttribute'},
            u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
            'line': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'attributes'", 'to': u"orm['order.Line']"}),
            'option': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'line_attributes'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': u"orm['catalogue.Option']"}),
            'type': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
            'value': ('django.db.models.fields.CharField', [], {'max_length': '255'})
        },
        u'order.lineprice': {
            'Meta': {'ordering': "('id',)", 'object_name': 'LinePrice'},
            u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
            'line': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'prices'", 'to': u"orm['order.Line']"}),
            'order': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'line_prices'", 'to': u"orm['order.Order']"}),
            'price_excl_tax': ('django.db.models.fields.DecimalField', [], {'max_digits': '12', 'decimal_places': '2'}),
            'price_incl_tax': ('django.db.models.fields.DecimalField', [], {'max_digits': '12', 'decimal_places': '2'}),
            'quantity': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1'}),
            'shipping_excl_tax': ('django.db.models.fields.DecimalField', [], {'default': '0', 'max_digits': '12', 'decimal_places': '2'}),
            'shipping_incl_tax': ('django.db.models.fields.DecimalField', [], {'default': '0', 'max_digits': '12', 'decimal_places': '2'})
        },
        u'order.order': {
            'Meta': {'ordering': "['-date_placed']", 'object_name': 'Order'},
            'basket': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['basket.Basket']", 'null': 'True', 'on_delete': 'models.SET_NULL', 'blank': 'True'}),
            'billing_address': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['order.BillingAddress']", 'null': 'True', 'on_delete': 'models.SET_NULL', 'blank': 'True'}),
            'currency': ('django.db.models.fields.CharField', [], {'default': "'GBP'", 'max_length': '12'}),
            'date_placed': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'db_index': 'True', 'blank': 'True'}),
            'guest_email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'null': 'True', 'blank': 'True'}),
            u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
            'number': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_index': 'True'}),
            'shipping_address': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['order.ShippingAddress']", 'null': 'True', 'on_delete': 'models.SET_NULL', 'blank': 'True'}),
            'shipping_code': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '128', 'blank': 'True'}),
            'shipping_excl_tax': ('django.db.models.fields.DecimalField', [], {'default': '0', 'max_digits': '12', 'decimal_places': '2'}),
            'shipping_incl_tax': ('django.db.models.fields.DecimalField', [], {'default': '0', 'max_digits': '12', 'decimal_places': '2'}),
            'shipping_method': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'blank': 'True'}),
            'site': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['sites.Site']", 'null': 'True', 'on_delete': 'models.SET_NULL'}),
            'status': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
            'total_excl_tax': ('django.db.models.fields.DecimalField', [], {'max_digits': '12', 'decimal_places': '2'}),
            'total_incl_tax': ('django.db.models.fields.DecimalField', [], {'max_digits': '12', 'decimal_places': '2'}),
            'user': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'orders'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': u"orm['{0}']".format(AUTH_USER_MODEL)})
        },
        u'order.orderdiscount': {
            'Meta': {'object_name': 'OrderDiscount'},
            'amount': ('django.db.models.fields.DecimalField', [], {'default': '0', 'max_digits': '12', 'decimal_places': '2'}),
            'category': ('django.db.models.fields.CharField', [], {'default': "'Basket'", 'max_length': '64'}),
            'frequency': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True'}),
            u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
            'message': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
            'offer_id': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
            'offer_name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'db_index': 'True'}),
            'order': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'discounts'", 'to': u"orm['order.Order']"}),
            'voucher_code': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'db_index': 'True'}),
            'voucher_id': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'})
        },
        u'order.ordernote': {
            'Meta': {'object_name': 'OrderNote'},
            'date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
            'date_updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
            u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
            'message': ('django.db.models.fields.TextField', [], {}),
            'note_type': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True'}),
            'order': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'notes'", 'to': u"orm['order.Order']"}),
            'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['{0}']".format(AUTH_USER_MODEL), 'null': 'True'})
        },
        u'order.paymentevent': {
            'Meta': {'ordering': "['-date_created']", 'object_name': 'PaymentEvent'},
            'amount': ('django.db.models.fields.DecimalField', [], {'max_digits': '12', 'decimal_places': '2'}),
            'date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
            'event_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['order.PaymentEventType']"}),
            u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
            'lines': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['order.Line']", 'through': u"orm['order.PaymentEventQuantity']", 'symmetrical': 'False'}),
            'order': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'payment_events'", 'to': u"orm['order.Order']"}),
            'reference': ('django.db.models.fields.CharField', [], {'max_length': '128', 'blank': 'True'}),
            'shipping_event': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'payment_events'", 'null': 'True', 'to': u"orm['order.ShippingEvent']"})
        },
        u'order.paymenteventquantity': {
            'Meta': {'object_name': 'PaymentEventQuantity'},
            'event': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'line_quantities'", 'to': u"orm['order.PaymentEvent']"}),
            u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
            'line': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'payment_event_quantities'", 'to': u"orm['order.Line']"}),
            'quantity': ('django.db.models.fields.PositiveIntegerField', [], {})
        },
        u'order.paymenteventtype': {
            'Meta': {'ordering': "('name',)", 'object_name': 'PaymentEventType'},
            'code': ('oscar.models.fields.autoslugfield.AutoSlugField', [], {'allow_duplicates': 'False', 'max_length': '128', 'separator': "u'-'", 'blank': 'True', 'unique': 'True', 'populate_from': "'name'", 'overwrite': 'False'}),
            u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
            'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '128'})
        },
        u'order.shippingaddress': {
            'Meta': {'object_name': 'ShippingAddress'},
            'country': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['address.Country']"}),
            'first_name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
            u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
            'last_name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
            'line1': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
            'line2': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
            'line3': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
            'line4': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
            'notes': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
            'phone_number': ('oscar.models.fields.PhoneNumberField', [], {'max_length': '128', 'blank': 'True'}),
            'postcode': ('oscar.models.fields.UppercaseCharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'}),
            'search_text': ('django.db.models.fields.CharField', [], {'max_length': '1000'}),
            'state': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
            'title': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'})
        },
        u'order.shippingevent': {
            'Meta': {'ordering': "['-date_created']", 'object_name': 'ShippingEvent'},
            'date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
            'event_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['order.ShippingEventType']"}),
            u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
            'lines': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'shipping_events'", 'symmetrical': 'False', 'through': u"orm['order.ShippingEventQuantity']", 'to': u"orm['order.Line']"}),
            'notes': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
            'order': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'shipping_events'", 'to': u"orm['order.Order']"})
        },
        u'order.shippingeventquantity': {
            'Meta': {'object_name': 'ShippingEventQuantity'},
            'event': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'line_quantities'", 'to': u"orm['order.ShippingEvent']"}),
            u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
            'line': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'shipping_event_quantities'", 'to': u"orm['order.Line']"}),
            'quantity': ('django.db.models.fields.PositiveIntegerField', [], {})
        },
        u'order.shippingeventtype': {
            'Meta': {'ordering': "('name',)", 'object_name': 'ShippingEventType'},
            'code': ('oscar.models.fields.autoslugfield.AutoSlugField', [], {'allow_duplicates': 'False', 'max_length': '128', 'separator': "u'-'", 'blank': 'True', 'unique': 'True', 'populate_from': "'name'", 'overwrite': 'False'}),
            u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
            'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'})
        },
        u'partner.partner': {
            'Meta': {'object_name': 'Partner'},
            'code': ('oscar.models.fields.autoslugfield.AutoSlugField', [], {'allow_duplicates': 'False', 'max_length': '128', 'separator': "u'-'", 'blank': 'True', 'unique': 'True', 'populate_from': "'name'", 'overwrite': 'False'}),
            u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
            'name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'blank': 'True'}),
            'users': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'partners'", 'null': 'True', 'symmetrical': 'False', 'to': u"orm['{0}']".format(AUTH_USER_MODEL)})
        },
        u'partner.stockrecord': {
            'Meta': {'unique_together': "(('partner', 'partner_sku'),)", 'object_name': 'StockRecord'},
            'cost_price': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
            'date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
            'date_updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'db_index': 'True', 'blank': 'True'}),
            u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
            'low_stock_threshold': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
            'num_allocated': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
            'num_in_stock': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
            'partner': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'stockrecords'", 'to': u"orm['partner.Partner']"}),
            'partner_sku': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
            'price_currency': ('django.db.models.fields.CharField', [], {'default': "'GBP'", 'max_length': '12'}),
            'price_excl_tax': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
            'price_retail': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
            'product': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'stockrecords'", 'to': u"orm['catalogue.Product']"})
        },
        u'sites.site': {
            'Meta': {'ordering': "(u'domain',)", 'object_name': 'Site', 'db_table': "u'django_site'"},
            'domain': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
            u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
            'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
        },
        u'voucher.voucher': {
            'Meta': {'object_name': 'Voucher'},
            'code': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '128', 'db_index': 'True'}),
            'date_created': ('django.db.models.fields.DateField', [], {'auto_now_add': 'True', 'blank': 'True'}),
            'end_datetime': ('django.db.models.fields.DateTimeField', [], {}),
            u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
            'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
            'num_basket_additions': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
            'num_orders': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
            'offers': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'vouchers'", 'symmetrical': 'False', 'to': u"orm['offer.ConditionalOffer']"}),
            'start_datetime': ('django.db.models.fields.DateTimeField', [], {}),
            'total_discount': ('django.db.models.fields.DecimalField', [], {'default': "'0.00'", 'max_digits': '12', 'decimal_places': '2'}),
            'usage': ('django.db.models.fields.CharField', [], {'default': "'Multi-use'", 'max_length': '128'})
        }
    }
    complete_apps = ['order']
    symmetrical = True
 | 
	bsd-3-clause | 
| 
	dhruvsrivastava/OJ | 
	flask/lib/python2.7/site-packages/requests/packages/chardet/big5freq.py | 
	3133 | 
	82594 | 
	######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Communicator client code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
#   Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301  USA
######################### END LICENSE BLOCK #########################
# Big5 frequency table
# by Taiwan's Mandarin Promotion Council
# <http://www.edu.tw:81/mandr/>
#
# 128  --> 0.42261
# 256  --> 0.57851
# 512  --> 0.74851
# 1024 --> 0.89384
# 2048 --> 0.97583
#
# Ideal Distribution Ratio = 0.74851/(1-0.74851) =2.98
# Random Distribution Ration = 512/(5401-512)=0.105
#
# Typical Distribution Ratio about 25% of Ideal one, still much higher than RDR
BIG5_TYPICAL_DISTRIBUTION_RATIO = 0.75
#Char to FreqOrder table
BIG5_TABLE_SIZE = 5376
Big5CharToFreqOrder = (
   1,1801,1506, 255,1431, 198,   9,  82,   6,5008, 177, 202,3681,1256,2821, 110, #   16
3814,  33,3274, 261,  76,  44,2114,  16,2946,2187,1176, 659,3971,  26,3451,2653, #   32
1198,3972,3350,4202, 410,2215, 302, 590, 361,1964,   8, 204,  58,4510,5009,1932, #   48
  63,5010,5011, 317,1614,  75, 222, 159,4203,2417,1480,5012,3555,3091, 224,2822, #   64
3682,   3,  10,3973,1471,  29,2787,1135,2866,1940, 873, 130,3275,1123, 312,5013, #   80
4511,2052, 507, 252, 682,5014, 142,1915, 124, 206,2947,  34,3556,3204,  64, 604, #   96
5015,2501,1977,1978, 155,1991, 645, 641,1606,5016,3452, 337,  72, 406,5017,  80, #  112
 630, 238,3205,1509, 263, 939,1092,2654, 756,1440,1094,3453, 449,  69,2987, 591, #  128
 179,2096, 471, 115,2035,1844,  60,  50,2988, 134, 806,1869, 734,2036,3454, 180, #  144
 995,1607, 156, 537,2907, 688,5018, 319,1305, 779,2145, 514,2379, 298,4512, 359, #  160
2502,  90,2716,1338, 663,  11, 906,1099,2553,  20,2441, 182, 532,1716,5019, 732, #  176
1376,4204,1311,1420,3206,  25,2317,1056, 113, 399, 382,1950, 242,3455,2474, 529, #  192
3276, 475,1447,3683,5020, 117,  21, 656, 810,1297,2300,2334,3557,5021, 126,4205, #  208
 706, 456, 150, 613,4513,  71,1118,2037,4206, 145,3092,  85, 835, 486,2115,1246, #  224
1426, 428, 727,1285,1015, 800, 106, 623, 303,1281,5022,2128,2359, 347,3815, 221, #  240
3558,3135,5023,1956,1153,4207,  83, 296,1199,3093, 192, 624,  93,5024, 822,1898, #  256
2823,3136, 795,2065, 991,1554,1542,1592,  27,  43,2867, 859, 139,1456, 860,4514, #  272
 437, 712,3974, 164,2397,3137, 695, 211,3037,2097, 195,3975,1608,3559,3560,3684, #  288
3976, 234, 811,2989,2098,3977,2233,1441,3561,1615,2380, 668,2077,1638, 305, 228, #  304
1664,4515, 467, 415,5025, 262,2099,1593, 239, 108, 300, 200,1033, 512,1247,2078, #  320
5026,5027,2176,3207,3685,2682, 593, 845,1062,3277,  88,1723,2038,3978,1951, 212, #  336
 266, 152, 149, 468,1899,4208,4516,  77, 187,5028,3038,  37,   5,2990,5029,3979, #  352
5030,5031,  39,2524,4517,2908,3208,2079,  55, 148,  74,4518, 545, 483,1474,1029, #  368
1665, 217,1870,1531,3138,1104,2655,4209,  24, 172,3562, 900,3980,3563,3564,4519, #  384
  32,1408,2824,1312, 329, 487,2360,2251,2717, 784,2683,   4,3039,3351,1427,1789, #  400
 188, 109, 499,5032,3686,1717,1790, 888,1217,3040,4520,5033,3565,5034,3352,1520, #  416
3687,3981, 196,1034, 775,5035,5036, 929,1816, 249, 439,  38,5037,1063,5038, 794, #  432
3982,1435,2301,  46, 178,3278,2066,5039,2381,5040, 214,1709,4521, 804,  35, 707, #  448
 324,3688,1601,2554, 140, 459,4210,5041,5042,1365, 839, 272, 978,2262,2580,3456, #  464
2129,1363,3689,1423, 697, 100,3094,  48,  70,1231, 495,3139,2196,5043,1294,5044, #  480
2080, 462, 586,1042,3279, 853, 256, 988, 185,2382,3457,1698, 434,1084,5045,3458, #  496
 314,2625,2788,4522,2335,2336, 569,2285, 637,1817,2525, 757,1162,1879,1616,3459, #  512
 287,1577,2116, 768,4523,1671,2868,3566,2526,1321,3816, 909,2418,5046,4211, 933, #  528
3817,4212,2053,2361,1222,4524, 765,2419,1322, 786,4525,5047,1920,1462,1677,2909, #  544
1699,5048,4526,1424,2442,3140,3690,2600,3353,1775,1941,3460,3983,4213, 309,1369, #  560
1130,2825, 364,2234,1653,1299,3984,3567,3985,3986,2656, 525,1085,3041, 902,2001, #  576
1475, 964,4527, 421,1845,1415,1057,2286, 940,1364,3141, 376,4528,4529,1381,   7, #  592
2527, 983,2383, 336,1710,2684,1846, 321,3461, 559,1131,3042,2752,1809,1132,1313, #  608
 265,1481,1858,5049, 352,1203,2826,3280, 167,1089, 420,2827, 776, 792,1724,3568, #  624
4214,2443,3281,5050,4215,5051, 446, 229, 333,2753, 901,3818,1200,1557,4530,2657, #  640
1921, 395,2754,2685,3819,4216,1836, 125, 916,3209,2626,4531,5052,5053,3820,5054, #  656
5055,5056,4532,3142,3691,1133,2555,1757,3462,1510,2318,1409,3569,5057,2146, 438, #  672
2601,2910,2384,3354,1068, 958,3043, 461, 311,2869,2686,4217,1916,3210,4218,1979, #  688
 383, 750,2755,2627,4219, 274, 539, 385,1278,1442,5058,1154,1965, 384, 561, 210, #  704
  98,1295,2556,3570,5059,1711,2420,1482,3463,3987,2911,1257, 129,5060,3821, 642, #  720
 523,2789,2790,2658,5061, 141,2235,1333,  68, 176, 441, 876, 907,4220, 603,2602, #  736
 710, 171,3464, 404, 549,  18,3143,2398,1410,3692,1666,5062,3571,4533,2912,4534, #  752
5063,2991, 368,5064, 146, 366,  99, 871,3693,1543, 748, 807,1586,1185,  22,2263, #  768
 379,3822,3211,5065,3212, 505,1942,2628,1992,1382,2319,5066, 380,2362, 218, 702, #  784
1818,1248,3465,3044,3572,3355,3282,5067,2992,3694, 930,3283,3823,5068,  59,5069, #  800
 585, 601,4221, 497,3466,1112,1314,4535,1802,5070,1223,1472,2177,5071, 749,1837, #  816
 690,1900,3824,1773,3988,1476, 429,1043,1791,2236,2117, 917,4222, 447,1086,1629, #  832
5072, 556,5073,5074,2021,1654, 844,1090, 105, 550, 966,1758,2828,1008,1783, 686, #  848
1095,5075,2287, 793,1602,5076,3573,2603,4536,4223,2948,2302,4537,3825, 980,2503, #  864
 544, 353, 527,4538, 908,2687,2913,5077, 381,2629,1943,1348,5078,1341,1252, 560, #  880
3095,5079,3467,2870,5080,2054, 973, 886,2081, 143,4539,5081,5082, 157,3989, 496, #  896
4224,  57, 840, 540,2039,4540,4541,3468,2118,1445, 970,2264,1748,1966,2082,4225, #  912
3144,1234,1776,3284,2829,3695, 773,1206,2130,1066,2040,1326,3990,1738,1725,4226, #  928
 279,3145,  51,1544,2604, 423,1578,2131,2067, 173,4542,1880,5083,5084,1583, 264, #  944
 610,3696,4543,2444, 280, 154,5085,5086,5087,1739, 338,1282,3096, 693,2871,1411, #  960
1074,3826,2445,5088,4544,5089,5090,1240, 952,2399,5091,2914,1538,2688, 685,1483, #  976
4227,2475,1436, 953,4228,2055,4545, 671,2400,  79,4229,2446,3285, 608, 567,2689, #  992
3469,4230,4231,1691, 393,1261,1792,2401,5092,4546,5093,5094,5095,5096,1383,1672, # 1008
3827,3213,1464, 522,1119, 661,1150, 216, 675,4547,3991,1432,3574, 609,4548,2690, # 1024
2402,5097,5098,5099,4232,3045,   0,5100,2476, 315, 231,2447, 301,3356,4549,2385, # 1040
5101, 233,4233,3697,1819,4550,4551,5102,  96,1777,1315,2083,5103, 257,5104,1810, # 1056
3698,2718,1139,1820,4234,2022,1124,2164,2791,1778,2659,5105,3097, 363,1655,3214, # 1072
5106,2993,5107,5108,5109,3992,1567,3993, 718, 103,3215, 849,1443, 341,3357,2949, # 1088
1484,5110,1712, 127,  67, 339,4235,2403, 679,1412, 821,5111,5112, 834, 738, 351, # 1104
2994,2147, 846, 235,1497,1881, 418,1993,3828,2719, 186,1100,2148,2756,3575,1545, # 1120
1355,2950,2872,1377, 583,3994,4236,2581,2995,5113,1298,3699,1078,2557,3700,2363, # 1136
  78,3829,3830, 267,1289,2100,2002,1594,4237, 348, 369,1274,2197,2178,1838,4552, # 1152
1821,2830,3701,2757,2288,2003,4553,2951,2758, 144,3358, 882,4554,3995,2759,3470, # 1168
4555,2915,5114,4238,1726, 320,5115,3996,3046, 788,2996,5116,2831,1774,1327,2873, # 1184
3997,2832,5117,1306,4556,2004,1700,3831,3576,2364,2660, 787,2023, 506, 824,3702, # 1200
 534, 323,4557,1044,3359,2024,1901, 946,3471,5118,1779,1500,1678,5119,1882,4558, # 1216
 165, 243,4559,3703,2528, 123, 683,4239, 764,4560,  36,3998,1793, 589,2916, 816, # 1232
 626,1667,3047,2237,1639,1555,1622,3832,3999,5120,4000,2874,1370,1228,1933, 891, # 1248
2084,2917, 304,4240,5121, 292,2997,2720,3577, 691,2101,4241,1115,4561, 118, 662, # 1264
5122, 611,1156, 854,2386,1316,2875,   2, 386, 515,2918,5123,5124,3286, 868,2238, # 1280
1486, 855,2661, 785,2216,3048,5125,1040,3216,3578,5126,3146, 448,5127,1525,5128, # 1296
2165,4562,5129,3833,5130,4242,2833,3579,3147, 503, 818,4001,3148,1568, 814, 676, # 1312
1444, 306,1749,5131,3834,1416,1030, 197,1428, 805,2834,1501,4563,5132,5133,5134, # 1328
1994,5135,4564,5136,5137,2198,  13,2792,3704,2998,3149,1229,1917,5138,3835,2132, # 1344
5139,4243,4565,2404,3580,5140,2217,1511,1727,1120,5141,5142, 646,3836,2448, 307, # 1360
5143,5144,1595,3217,5145,5146,5147,3705,1113,1356,4002,1465,2529,2530,5148, 519, # 1376
5149, 128,2133,  92,2289,1980,5150,4003,1512, 342,3150,2199,5151,2793,2218,1981, # 1392
3360,4244, 290,1656,1317, 789, 827,2365,5152,3837,4566, 562, 581,4004,5153, 401, # 1408
4567,2252,  94,4568,5154,1399,2794,5155,1463,2025,4569,3218,1944,5156, 828,1105, # 1424
4245,1262,1394,5157,4246, 605,4570,5158,1784,2876,5159,2835, 819,2102, 578,2200, # 1440
2952,5160,1502, 436,3287,4247,3288,2836,4005,2919,3472,3473,5161,2721,2320,5162, # 1456
5163,2337,2068,  23,4571, 193, 826,3838,2103, 699,1630,4248,3098, 390,1794,1064, # 1472
3581,5164,1579,3099,3100,1400,5165,4249,1839,1640,2877,5166,4572,4573, 137,4250, # 1488
 598,3101,1967, 780, 104, 974,2953,5167, 278, 899, 253, 402, 572, 504, 493,1339, # 1504
5168,4006,1275,4574,2582,2558,5169,3706,3049,3102,2253, 565,1334,2722, 863,  41, # 1520
5170,5171,4575,5172,1657,2338,  19, 463,2760,4251, 606,5173,2999,3289,1087,2085, # 1536
1323,2662,3000,5174,1631,1623,1750,4252,2691,5175,2878, 791,2723,2663,2339, 232, # 1552
2421,5176,3001,1498,5177,2664,2630, 755,1366,3707,3290,3151,2026,1609, 119,1918, # 1568
3474, 862,1026,4253,5178,4007,3839,4576,4008,4577,2265,1952,2477,5179,1125, 817, # 1584
4254,4255,4009,1513,1766,2041,1487,4256,3050,3291,2837,3840,3152,5180,5181,1507, # 1600
5182,2692, 733,  40,1632,1106,2879, 345,4257, 841,2531, 230,4578,3002,1847,3292, # 1616
3475,5183,1263, 986,3476,5184, 735, 879, 254,1137, 857, 622,1300,1180,1388,1562, # 1632
4010,4011,2954, 967,2761,2665,1349, 592,2134,1692,3361,3003,1995,4258,1679,4012, # 1648
1902,2188,5185, 739,3708,2724,1296,1290,5186,4259,2201,2202,1922,1563,2605,2559, # 1664
1871,2762,3004,5187, 435,5188, 343,1108, 596,  17,1751,4579,2239,3477,3709,5189, # 1680
4580, 294,3582,2955,1693, 477, 979, 281,2042,3583, 643,2043,3710,2631,2795,2266, # 1696
1031,2340,2135,2303,3584,4581, 367,1249,2560,5190,3585,5191,4582,1283,3362,2005, # 1712
 240,1762,3363,4583,4584, 836,1069,3153, 474,5192,2149,2532, 268,3586,5193,3219, # 1728
1521,1284,5194,1658,1546,4260,5195,3587,3588,5196,4261,3364,2693,1685,4262, 961, # 1744
1673,2632, 190,2006,2203,3841,4585,4586,5197, 570,2504,3711,1490,5198,4587,2633, # 1760
3293,1957,4588, 584,1514, 396,1045,1945,5199,4589,1968,2449,5200,5201,4590,4013, # 1776
 619,5202,3154,3294, 215,2007,2796,2561,3220,4591,3221,4592, 763,4263,3842,4593, # 1792
5203,5204,1958,1767,2956,3365,3712,1174, 452,1477,4594,3366,3155,5205,2838,1253, # 1808
2387,2189,1091,2290,4264, 492,5206, 638,1169,1825,2136,1752,4014, 648, 926,1021, # 1824
1324,4595, 520,4596, 997, 847,1007, 892,4597,3843,2267,1872,3713,2405,1785,4598, # 1840
1953,2957,3103,3222,1728,4265,2044,3714,4599,2008,1701,3156,1551,  30,2268,4266, # 1856
5207,2027,4600,3589,5208, 501,5209,4267, 594,3478,2166,1822,3590,3479,3591,3223, # 1872
 829,2839,4268,5210,1680,3157,1225,4269,5211,3295,4601,4270,3158,2341,5212,4602, # 1888
4271,5213,4015,4016,5214,1848,2388,2606,3367,5215,4603, 374,4017, 652,4272,4273, # 1904
 375,1140, 798,5216,5217,5218,2366,4604,2269, 546,1659, 138,3051,2450,4605,5219, # 1920
2254, 612,1849, 910, 796,3844,1740,1371, 825,3845,3846,5220,2920,2562,5221, 692, # 1936
 444,3052,2634, 801,4606,4274,5222,1491, 244,1053,3053,4275,4276, 340,5223,4018, # 1952
1041,3005, 293,1168,  87,1357,5224,1539, 959,5225,2240, 721, 694,4277,3847, 219, # 1968
1478, 644,1417,3368,2666,1413,1401,1335,1389,4019,5226,5227,3006,2367,3159,1826, # 1984
 730,1515, 184,2840,  66,4607,5228,1660,2958, 246,3369, 378,1457, 226,3480, 975, # 2000
4020,2959,1264,3592, 674, 696,5229, 163,5230,1141,2422,2167, 713,3593,3370,4608, # 2016
4021,5231,5232,1186,  15,5233,1079,1070,5234,1522,3224,3594, 276,1050,2725, 758, # 2032
1126, 653,2960,3296,5235,2342, 889,3595,4022,3104,3007, 903,1250,4609,4023,3481, # 2048
3596,1342,1681,1718, 766,3297, 286,  89,2961,3715,5236,1713,5237,2607,3371,3008, # 2064
5238,2962,2219,3225,2880,5239,4610,2505,2533, 181, 387,1075,4024, 731,2190,3372, # 2080
5240,3298, 310, 313,3482,2304, 770,4278,  54,3054, 189,4611,3105,3848,4025,5241, # 2096
1230,1617,1850, 355,3597,4279,4612,3373, 111,4280,3716,1350,3160,3483,3055,4281, # 2112
2150,3299,3598,5242,2797,4026,4027,3009, 722,2009,5243,1071, 247,1207,2343,2478, # 2128
1378,4613,2010, 864,1437,1214,4614, 373,3849,1142,2220, 667,4615, 442,2763,2563, # 2144
3850,4028,1969,4282,3300,1840, 837, 170,1107, 934,1336,1883,5244,5245,2119,4283, # 2160
2841, 743,1569,5246,4616,4284, 582,2389,1418,3484,5247,1803,5248, 357,1395,1729, # 2176
3717,3301,2423,1564,2241,5249,3106,3851,1633,4617,1114,2086,4285,1532,5250, 482, # 2192
2451,4618,5251,5252,1492, 833,1466,5253,2726,3599,1641,2842,5254,1526,1272,3718, # 2208
4286,1686,1795, 416,2564,1903,1954,1804,5255,3852,2798,3853,1159,2321,5256,2881, # 2224
4619,1610,1584,3056,2424,2764, 443,3302,1163,3161,5257,5258,4029,5259,4287,2506, # 2240
3057,4620,4030,3162,2104,1647,3600,2011,1873,4288,5260,4289, 431,3485,5261, 250, # 2256
  97,  81,4290,5262,1648,1851,1558, 160, 848,5263, 866, 740,1694,5264,2204,2843, # 2272
3226,4291,4621,3719,1687, 950,2479, 426, 469,3227,3720,3721,4031,5265,5266,1188, # 2288
 424,1996, 861,3601,4292,3854,2205,2694, 168,1235,3602,4293,5267,2087,1674,4622, # 2304
3374,3303, 220,2565,1009,5268,3855, 670,3010, 332,1208, 717,5269,5270,3603,2452, # 2320
4032,3375,5271, 513,5272,1209,2882,3376,3163,4623,1080,5273,5274,5275,5276,2534, # 2336
3722,3604, 815,1587,4033,4034,5277,3605,3486,3856,1254,4624,1328,3058,1390,4035, # 2352
1741,4036,3857,4037,5278, 236,3858,2453,3304,5279,5280,3723,3859,1273,3860,4625, # 2368
5281, 308,5282,4626, 245,4627,1852,2480,1307,2583, 430, 715,2137,2454,5283, 270, # 2384
 199,2883,4038,5284,3606,2727,1753, 761,1754, 725,1661,1841,4628,3487,3724,5285, # 2400
5286, 587,  14,3305, 227,2608, 326, 480,2270, 943,2765,3607, 291, 650,1884,5287, # 2416
1702,1226, 102,1547,  62,3488, 904,4629,3489,1164,4294,5288,5289,1224,1548,2766, # 2432
 391, 498,1493,5290,1386,1419,5291,2056,1177,4630, 813, 880,1081,2368, 566,1145, # 2448
4631,2291,1001,1035,2566,2609,2242, 394,1286,5292,5293,2069,5294,  86,1494,1730, # 2464
4039, 491,1588, 745, 897,2963, 843,3377,4040,2767,2884,3306,1768, 998,2221,2070, # 2480
 397,1827,1195,1970,3725,3011,3378, 284,5295,3861,2507,2138,2120,1904,5296,4041, # 2496
2151,4042,4295,1036,3490,1905, 114,2567,4296, 209,1527,5297,5298,2964,2844,2635, # 2512
2390,2728,3164, 812,2568,5299,3307,5300,1559, 737,1885,3726,1210, 885,  28,2695, # 2528
3608,3862,5301,4297,1004,1780,4632,5302, 346,1982,2222,2696,4633,3863,1742, 797, # 2544
1642,4043,1934,1072,1384,2152, 896,4044,3308,3727,3228,2885,3609,5303,2569,1959, # 2560
4634,2455,1786,5304,5305,5306,4045,4298,1005,1308,3728,4299,2729,4635,4636,1528, # 2576
2610, 161,1178,4300,1983, 987,4637,1101,4301, 631,4046,1157,3229,2425,1343,1241, # 2592
1016,2243,2570, 372, 877,2344,2508,1160, 555,1935, 911,4047,5307, 466,1170, 169, # 2608
1051,2921,2697,3729,2481,3012,1182,2012,2571,1251,2636,5308, 992,2345,3491,1540, # 2624
2730,1201,2071,2406,1997,2482,5309,4638, 528,1923,2191,1503,1874,1570,2369,3379, # 2640
3309,5310, 557,1073,5311,1828,3492,2088,2271,3165,3059,3107, 767,3108,2799,4639, # 2656
1006,4302,4640,2346,1267,2179,3730,3230, 778,4048,3231,2731,1597,2667,5312,4641, # 2672
5313,3493,5314,5315,5316,3310,2698,1433,3311, 131,  95,1504,4049, 723,4303,3166, # 2688
1842,3610,2768,2192,4050,2028,2105,3731,5317,3013,4051,1218,5318,3380,3232,4052, # 2704
4304,2584, 248,1634,3864, 912,5319,2845,3732,3060,3865, 654,  53,5320,3014,5321, # 2720
1688,4642, 777,3494,1032,4053,1425,5322, 191, 820,2121,2846, 971,4643, 931,3233, # 2736
 135, 664, 783,3866,1998, 772,2922,1936,4054,3867,4644,2923,3234, 282,2732, 640, # 2752
1372,3495,1127, 922, 325,3381,5323,5324, 711,2045,5325,5326,4055,2223,2800,1937, # 2768
4056,3382,2224,2255,3868,2305,5327,4645,3869,1258,3312,4057,3235,2139,2965,4058, # 2784
4059,5328,2225, 258,3236,4646, 101,1227,5329,3313,1755,5330,1391,3314,5331,2924, # 2800
2057, 893,5332,5333,5334,1402,4305,2347,5335,5336,3237,3611,5337,5338, 878,1325, # 2816
1781,2801,4647, 259,1385,2585, 744,1183,2272,4648,5339,4060,2509,5340, 684,1024, # 2832
4306,5341, 472,3612,3496,1165,3315,4061,4062, 322,2153, 881, 455,1695,1152,1340, # 2848
 660, 554,2154,4649,1058,4650,4307, 830,1065,3383,4063,4651,1924,5342,1703,1919, # 2864
5343, 932,2273, 122,5344,4652, 947, 677,5345,3870,2637, 297,1906,1925,2274,4653, # 2880
2322,3316,5346,5347,4308,5348,4309,  84,4310, 112, 989,5349, 547,1059,4064, 701, # 2896
3613,1019,5350,4311,5351,3497, 942, 639, 457,2306,2456, 993,2966, 407, 851, 494, # 2912
4654,3384, 927,5352,1237,5353,2426,3385, 573,4312, 680, 921,2925,1279,1875, 285, # 2928
 790,1448,1984, 719,2168,5354,5355,4655,4065,4066,1649,5356,1541, 563,5357,1077, # 2944
5358,3386,3061,3498, 511,3015,4067,4068,3733,4069,1268,2572,3387,3238,4656,4657, # 2960
5359, 535,1048,1276,1189,2926,2029,3167,1438,1373,2847,2967,1134,2013,5360,4313, # 2976
1238,2586,3109,1259,5361, 700,5362,2968,3168,3734,4314,5363,4315,1146,1876,1907, # 2992
4658,2611,4070, 781,2427, 132,1589, 203, 147, 273,2802,2407, 898,1787,2155,4071, # 3008
4072,5364,3871,2803,5365,5366,4659,4660,5367,3239,5368,1635,3872, 965,5369,1805, # 3024
2699,1516,3614,1121,1082,1329,3317,4073,1449,3873,  65,1128,2848,2927,2769,1590, # 3040
3874,5370,5371,  12,2668,  45, 976,2587,3169,4661, 517,2535,1013,1037,3240,5372, # 3056
3875,2849,5373,3876,5374,3499,5375,2612, 614,1999,2323,3877,3110,2733,2638,5376, # 3072
2588,4316, 599,1269,5377,1811,3735,5378,2700,3111, 759,1060, 489,1806,3388,3318, # 3088
1358,5379,5380,2391,1387,1215,2639,2256, 490,5381,5382,4317,1759,2392,2348,5383, # 3104
4662,3878,1908,4074,2640,1807,3241,4663,3500,3319,2770,2349, 874,5384,5385,3501, # 3120
3736,1859,  91,2928,3737,3062,3879,4664,5386,3170,4075,2669,5387,3502,1202,1403, # 3136
3880,2969,2536,1517,2510,4665,3503,2511,5388,4666,5389,2701,1886,1495,1731,4076, # 3152
2370,4667,5390,2030,5391,5392,4077,2702,1216, 237,2589,4318,2324,4078,3881,4668, # 3168
4669,2703,3615,3504, 445,4670,5393,5394,5395,5396,2771,  61,4079,3738,1823,4080, # 3184
5397, 687,2046, 935, 925, 405,2670, 703,1096,1860,2734,4671,4081,1877,1367,2704, # 3200
3389, 918,2106,1782,2483, 334,3320,1611,1093,4672, 564,3171,3505,3739,3390, 945, # 3216
2641,2058,4673,5398,1926, 872,4319,5399,3506,2705,3112, 349,4320,3740,4082,4674, # 3232
3882,4321,3741,2156,4083,4675,4676,4322,4677,2408,2047, 782,4084, 400, 251,4323, # 3248
1624,5400,5401, 277,3742, 299,1265, 476,1191,3883,2122,4324,4325,1109, 205,5402, # 3264
2590,1000,2157,3616,1861,5403,5404,5405,4678,5406,4679,2573, 107,2484,2158,4085, # 3280
3507,3172,5407,1533, 541,1301, 158, 753,4326,2886,3617,5408,1696, 370,1088,4327, # 3296
4680,3618, 579, 327, 440, 162,2244, 269,1938,1374,3508, 968,3063,  56,1396,3113, # 3312
2107,3321,3391,5409,1927,2159,4681,3016,5410,3619,5411,5412,3743,4682,2485,5413, # 3328
2804,5414,1650,4683,5415,2613,5416,5417,4086,2671,3392,1149,3393,4087,3884,4088, # 3344
5418,1076,  49,5419, 951,3242,3322,3323, 450,2850, 920,5420,1812,2805,2371,4328, # 3360
1909,1138,2372,3885,3509,5421,3243,4684,1910,1147,1518,2428,4685,3886,5422,4686, # 3376
2393,2614, 260,1796,3244,5423,5424,3887,3324, 708,5425,3620,1704,5426,3621,1351, # 3392
1618,3394,3017,1887, 944,4329,3395,4330,3064,3396,4331,5427,3744, 422, 413,1714, # 3408
3325, 500,2059,2350,4332,2486,5428,1344,1911, 954,5429,1668,5430,5431,4089,2409, # 3424
4333,3622,3888,4334,5432,2307,1318,2512,3114, 133,3115,2887,4687, 629,  31,2851, # 3440
2706,3889,4688, 850, 949,4689,4090,2970,1732,2089,4335,1496,1853,5433,4091, 620, # 3456
3245, 981,1242,3745,3397,1619,3746,1643,3326,2140,2457,1971,1719,3510,2169,5434, # 3472
3246,5435,5436,3398,1829,5437,1277,4690,1565,2048,5438,1636,3623,3116,5439, 869, # 3488
2852, 655,3890,3891,3117,4092,3018,3892,1310,3624,4691,5440,5441,5442,1733, 558, # 3504
4692,3747, 335,1549,3065,1756,4336,3748,1946,3511,1830,1291,1192, 470,2735,2108, # 3520
2806, 913,1054,4093,5443,1027,5444,3066,4094,4693, 982,2672,3399,3173,3512,3247, # 3536
3248,1947,2807,5445, 571,4694,5446,1831,5447,3625,2591,1523,2429,5448,2090, 984, # 3552
4695,3749,1960,5449,3750, 852, 923,2808,3513,3751, 969,1519, 999,2049,2325,1705, # 3568
5450,3118, 615,1662, 151, 597,4095,2410,2326,1049, 275,4696,3752,4337, 568,3753, # 3584
3626,2487,4338,3754,5451,2430,2275, 409,3249,5452,1566,2888,3514,1002, 769,2853, # 3600
 194,2091,3174,3755,2226,3327,4339, 628,1505,5453,5454,1763,2180,3019,4096, 521, # 3616
1161,2592,1788,2206,2411,4697,4097,1625,4340,4341, 412,  42,3119, 464,5455,2642, # 3632
4698,3400,1760,1571,2889,3515,2537,1219,2207,3893,2643,2141,2373,4699,4700,3328, # 3648
1651,3401,3627,5456,5457,3628,2488,3516,5458,3756,5459,5460,2276,2092, 460,5461, # 3664
4701,5462,3020, 962, 588,3629, 289,3250,2644,1116,  52,5463,3067,1797,5464,5465, # 3680
5466,1467,5467,1598,1143,3757,4342,1985,1734,1067,4702,1280,3402, 465,4703,1572, # 3696
 510,5468,1928,2245,1813,1644,3630,5469,4704,3758,5470,5471,2673,1573,1534,5472, # 3712
5473, 536,1808,1761,3517,3894,3175,2645,5474,5475,5476,4705,3518,2929,1912,2809, # 3728
5477,3329,1122, 377,3251,5478, 360,5479,5480,4343,1529, 551,5481,2060,3759,1769, # 3744
2431,5482,2930,4344,3330,3120,2327,2109,2031,4706,1404, 136,1468,1479, 672,1171, # 3760
3252,2308, 271,3176,5483,2772,5484,2050, 678,2736, 865,1948,4707,5485,2014,4098, # 3776
2971,5486,2737,2227,1397,3068,3760,4708,4709,1735,2931,3403,3631,5487,3895, 509, # 3792
2854,2458,2890,3896,5488,5489,3177,3178,4710,4345,2538,4711,2309,1166,1010, 552, # 3808
 681,1888,5490,5491,2972,2973,4099,1287,1596,1862,3179, 358, 453, 736, 175, 478, # 3824
1117, 905,1167,1097,5492,1854,1530,5493,1706,5494,2181,3519,2292,3761,3520,3632, # 3840
4346,2093,4347,5495,3404,1193,2489,4348,1458,2193,2208,1863,1889,1421,3331,2932, # 3856
3069,2182,3521, 595,2123,5496,4100,5497,5498,4349,1707,2646, 223,3762,1359, 751, # 3872
3121, 183,3522,5499,2810,3021, 419,2374, 633, 704,3897,2394, 241,5500,5501,5502, # 3888
 838,3022,3763,2277,2773,2459,3898,1939,2051,4101,1309,3122,2246,1181,5503,1136, # 3904
2209,3899,2375,1446,4350,2310,4712,5504,5505,4351,1055,2615, 484,3764,5506,4102, # 3920
 625,4352,2278,3405,1499,4353,4103,5507,4104,4354,3253,2279,2280,3523,5508,5509, # 3936
2774, 808,2616,3765,3406,4105,4355,3123,2539, 526,3407,3900,4356, 955,5510,1620, # 3952
4357,2647,2432,5511,1429,3766,1669,1832, 994, 928,5512,3633,1260,5513,5514,5515, # 3968
1949,2293, 741,2933,1626,4358,2738,2460, 867,1184, 362,3408,1392,5516,5517,4106, # 3984
4359,1770,1736,3254,2934,4713,4714,1929,2707,1459,1158,5518,3070,3409,2891,1292, # 4000
1930,2513,2855,3767,1986,1187,2072,2015,2617,4360,5519,2574,2514,2170,3768,2490, # 4016
3332,5520,3769,4715,5521,5522, 666,1003,3023,1022,3634,4361,5523,4716,1814,2257, # 4032
 574,3901,1603, 295,1535, 705,3902,4362, 283, 858, 417,5524,5525,3255,4717,4718, # 4048
3071,1220,1890,1046,2281,2461,4107,1393,1599, 689,2575, 388,4363,5526,2491, 802, # 4064
5527,2811,3903,2061,1405,2258,5528,4719,3904,2110,1052,1345,3256,1585,5529, 809, # 4080
5530,5531,5532, 575,2739,3524, 956,1552,1469,1144,2328,5533,2329,1560,2462,3635, # 4096
3257,4108, 616,2210,4364,3180,2183,2294,5534,1833,5535,3525,4720,5536,1319,3770, # 4112
3771,1211,3636,1023,3258,1293,2812,5537,5538,5539,3905, 607,2311,3906, 762,2892, # 4128
1439,4365,1360,4721,1485,3072,5540,4722,1038,4366,1450,2062,2648,4367,1379,4723, # 4144
2593,5541,5542,4368,1352,1414,2330,2935,1172,5543,5544,3907,3908,4724,1798,1451, # 4160
5545,5546,5547,5548,2936,4109,4110,2492,2351, 411,4111,4112,3637,3333,3124,4725, # 4176
1561,2674,1452,4113,1375,5549,5550,  47,2974, 316,5551,1406,1591,2937,3181,5552, # 4192
1025,2142,3125,3182, 354,2740, 884,2228,4369,2412, 508,3772, 726,3638, 996,2433, # 4208
3639, 729,5553, 392,2194,1453,4114,4726,3773,5554,5555,2463,3640,2618,1675,2813, # 4224
 919,2352,2975,2353,1270,4727,4115,  73,5556,5557, 647,5558,3259,2856,2259,1550, # 4240
1346,3024,5559,1332, 883,3526,5560,5561,5562,5563,3334,2775,5564,1212, 831,1347, # 4256
4370,4728,2331,3909,1864,3073, 720,3910,4729,4730,3911,5565,4371,5566,5567,4731, # 4272
5568,5569,1799,4732,3774,2619,4733,3641,1645,2376,4734,5570,2938, 669,2211,2675, # 4288
2434,5571,2893,5572,5573,1028,3260,5574,4372,2413,5575,2260,1353,5576,5577,4735, # 4304
3183, 518,5578,4116,5579,4373,1961,5580,2143,4374,5581,5582,3025,2354,2355,3912, # 4320
 516,1834,1454,4117,2708,4375,4736,2229,2620,1972,1129,3642,5583,2776,5584,2976, # 4336
1422, 577,1470,3026,1524,3410,5585,5586, 432,4376,3074,3527,5587,2594,1455,2515, # 4352
2230,1973,1175,5588,1020,2741,4118,3528,4737,5589,2742,5590,1743,1361,3075,3529, # 4368
2649,4119,4377,4738,2295, 895, 924,4378,2171, 331,2247,3076, 166,1627,3077,1098, # 4384
5591,1232,2894,2231,3411,4739, 657, 403,1196,2377, 542,3775,3412,1600,4379,3530, # 4400
5592,4740,2777,3261, 576, 530,1362,4741,4742,2540,2676,3776,4120,5593, 842,3913, # 4416
5594,2814,2032,1014,4121, 213,2709,3413, 665, 621,4380,5595,3777,2939,2435,5596, # 4432
2436,3335,3643,3414,4743,4381,2541,4382,4744,3644,1682,4383,3531,1380,5597, 724, # 4448
2282, 600,1670,5598,1337,1233,4745,3126,2248,5599,1621,4746,5600, 651,4384,5601, # 4464
1612,4385,2621,5602,2857,5603,2743,2312,3078,5604, 716,2464,3079, 174,1255,2710, # 4480
4122,3645, 548,1320,1398, 728,4123,1574,5605,1891,1197,3080,4124,5606,3081,3082, # 4496
3778,3646,3779, 747,5607, 635,4386,4747,5608,5609,5610,4387,5611,5612,4748,5613, # 4512
3415,4749,2437, 451,5614,3780,2542,2073,4388,2744,4389,4125,5615,1764,4750,5616, # 4528
4390, 350,4751,2283,2395,2493,5617,4391,4126,2249,1434,4127, 488,4752, 458,4392, # 4544
4128,3781, 771,1330,2396,3914,2576,3184,2160,2414,1553,2677,3185,4393,5618,2494, # 4560
2895,2622,1720,2711,4394,3416,4753,5619,2543,4395,5620,3262,4396,2778,5621,2016, # 4576
2745,5622,1155,1017,3782,3915,5623,3336,2313, 201,1865,4397,1430,5624,4129,5625, # 4592
5626,5627,5628,5629,4398,1604,5630, 414,1866, 371,2595,4754,4755,3532,2017,3127, # 4608
4756,1708, 960,4399, 887, 389,2172,1536,1663,1721,5631,2232,4130,2356,2940,1580, # 4624
5632,5633,1744,4757,2544,4758,4759,5634,4760,5635,2074,5636,4761,3647,3417,2896, # 4640
4400,5637,4401,2650,3418,2815, 673,2712,2465, 709,3533,4131,3648,4402,5638,1148, # 4656
 502, 634,5639,5640,1204,4762,3649,1575,4763,2623,3783,5641,3784,3128, 948,3263, # 4672
 121,1745,3916,1110,5642,4403,3083,2516,3027,4132,3785,1151,1771,3917,1488,4133, # 4688
1987,5643,2438,3534,5644,5645,2094,5646,4404,3918,1213,1407,2816, 531,2746,2545, # 4704
3264,1011,1537,4764,2779,4405,3129,1061,5647,3786,3787,1867,2897,5648,2018, 120, # 4720
4406,4407,2063,3650,3265,2314,3919,2678,3419,1955,4765,4134,5649,3535,1047,2713, # 4736
1266,5650,1368,4766,2858, 649,3420,3920,2546,2747,1102,2859,2679,5651,5652,2000, # 4752
5653,1111,3651,2977,5654,2495,3921,3652,2817,1855,3421,3788,5655,5656,3422,2415, # 4768
2898,3337,3266,3653,5657,2577,5658,3654,2818,4135,1460, 856,5659,3655,5660,2899, # 4784
2978,5661,2900,3922,5662,4408, 632,2517, 875,3923,1697,3924,2296,5663,5664,4767, # 4800
3028,1239, 580,4768,4409,5665, 914, 936,2075,1190,4136,1039,2124,5666,5667,5668, # 4816
5669,3423,1473,5670,1354,4410,3925,4769,2173,3084,4137, 915,3338,4411,4412,3339, # 4832
1605,1835,5671,2748, 398,3656,4413,3926,4138, 328,1913,2860,4139,3927,1331,4414, # 4848
3029, 937,4415,5672,3657,4140,4141,3424,2161,4770,3425, 524, 742, 538,3085,1012, # 4864
5673,5674,3928,2466,5675, 658,1103, 225,3929,5676,5677,4771,5678,4772,5679,3267, # 4880
1243,5680,4142, 963,2250,4773,5681,2714,3658,3186,5682,5683,2596,2332,5684,4774, # 4896
5685,5686,5687,3536, 957,3426,2547,2033,1931,2941,2467, 870,2019,3659,1746,2780, # 4912
2781,2439,2468,5688,3930,5689,3789,3130,3790,3537,3427,3791,5690,1179,3086,5691, # 4928
3187,2378,4416,3792,2548,3188,3131,2749,4143,5692,3428,1556,2549,2297, 977,2901, # 4944
2034,4144,1205,3429,5693,1765,3430,3189,2125,1271, 714,1689,4775,3538,5694,2333, # 4960
3931, 533,4417,3660,2184, 617,5695,2469,3340,3539,2315,5696,5697,3190,5698,5699, # 4976
3932,1988, 618, 427,2651,3540,3431,5700,5701,1244,1690,5702,2819,4418,4776,5703, # 4992
3541,4777,5704,2284,1576, 473,3661,4419,3432, 972,5705,3662,5706,3087,5707,5708, # 5008
4778,4779,5709,3793,4145,4146,5710, 153,4780, 356,5711,1892,2902,4420,2144, 408, # 5024
 803,2357,5712,3933,5713,4421,1646,2578,2518,4781,4782,3934,5714,3935,4422,5715, # 5040
2416,3433, 752,5716,5717,1962,3341,2979,5718, 746,3030,2470,4783,4423,3794, 698, # 5056
4784,1893,4424,3663,2550,4785,3664,3936,5719,3191,3434,5720,1824,1302,4147,2715, # 5072
3937,1974,4425,5721,4426,3192, 823,1303,1288,1236,2861,3542,4148,3435, 774,3938, # 5088
5722,1581,4786,1304,2862,3939,4787,5723,2440,2162,1083,3268,4427,4149,4428, 344, # 5104
1173, 288,2316, 454,1683,5724,5725,1461,4788,4150,2597,5726,5727,4789, 985, 894, # 5120
5728,3436,3193,5729,1914,2942,3795,1989,5730,2111,1975,5731,4151,5732,2579,1194, # 5136
 425,5733,4790,3194,1245,3796,4429,5734,5735,2863,5736, 636,4791,1856,3940, 760, # 5152
1800,5737,4430,2212,1508,4792,4152,1894,1684,2298,5738,5739,4793,4431,4432,2213, # 5168
 479,5740,5741, 832,5742,4153,2496,5743,2980,2497,3797, 990,3132, 627,1815,2652, # 5184
4433,1582,4434,2126,2112,3543,4794,5744, 799,4435,3195,5745,4795,2113,1737,3031, # 5200
1018, 543, 754,4436,3342,1676,4796,4797,4154,4798,1489,5746,3544,5747,2624,2903, # 5216
4155,5748,5749,2981,5750,5751,5752,5753,3196,4799,4800,2185,1722,5754,3269,3270, # 5232
1843,3665,1715, 481, 365,1976,1857,5755,5756,1963,2498,4801,5757,2127,3666,3271, # 5248
 433,1895,2064,2076,5758, 602,2750,5759,5760,5761,5762,5763,3032,1628,3437,5764, # 5264
3197,4802,4156,2904,4803,2519,5765,2551,2782,5766,5767,5768,3343,4804,2905,5769, # 5280
4805,5770,2864,4806,4807,1221,2982,4157,2520,5771,5772,5773,1868,1990,5774,5775, # 5296
5776,1896,5777,5778,4808,1897,4158, 318,5779,2095,4159,4437,5780,5781, 485,5782, # 5312
 938,3941, 553,2680, 116,5783,3942,3667,5784,3545,2681,2783,3438,3344,2820,5785, # 5328
3668,2943,4160,1747,2944,2983,5786,5787, 207,5788,4809,5789,4810,2521,5790,3033, # 5344
 890,3669,3943,5791,1878,3798,3439,5792,2186,2358,3440,1652,5793,5794,5795, 941, # 5360
2299, 208,3546,4161,2020, 330,4438,3944,2906,2499,3799,4439,4811,5796,5797,5798, # 5376  #last 512
#Everything below is of no interest for detection purpose
2522,1613,4812,5799,3345,3945,2523,5800,4162,5801,1637,4163,2471,4813,3946,5802, # 5392
2500,3034,3800,5803,5804,2195,4814,5805,2163,5806,5807,5808,5809,5810,5811,5812, # 5408
5813,5814,5815,5816,5817,5818,5819,5820,5821,5822,5823,5824,5825,5826,5827,5828, # 5424
5829,5830,5831,5832,5833,5834,5835,5836,5837,5838,5839,5840,5841,5842,5843,5844, # 5440
5845,5846,5847,5848,5849,5850,5851,5852,5853,5854,5855,5856,5857,5858,5859,5860, # 5456
5861,5862,5863,5864,5865,5866,5867,5868,5869,5870,5871,5872,5873,5874,5875,5876, # 5472
5877,5878,5879,5880,5881,5882,5883,5884,5885,5886,5887,5888,5889,5890,5891,5892, # 5488
5893,5894,5895,5896,5897,5898,5899,5900,5901,5902,5903,5904,5905,5906,5907,5908, # 5504
5909,5910,5911,5912,5913,5914,5915,5916,5917,5918,5919,5920,5921,5922,5923,5924, # 5520
5925,5926,5927,5928,5929,5930,5931,5932,5933,5934,5935,5936,5937,5938,5939,5940, # 5536
5941,5942,5943,5944,5945,5946,5947,5948,5949,5950,5951,5952,5953,5954,5955,5956, # 5552
5957,5958,5959,5960,5961,5962,5963,5964,5965,5966,5967,5968,5969,5970,5971,5972, # 5568
5973,5974,5975,5976,5977,5978,5979,5980,5981,5982,5983,5984,5985,5986,5987,5988, # 5584
5989,5990,5991,5992,5993,5994,5995,5996,5997,5998,5999,6000,6001,6002,6003,6004, # 5600
6005,6006,6007,6008,6009,6010,6011,6012,6013,6014,6015,6016,6017,6018,6019,6020, # 5616
6021,6022,6023,6024,6025,6026,6027,6028,6029,6030,6031,6032,6033,6034,6035,6036, # 5632
6037,6038,6039,6040,6041,6042,6043,6044,6045,6046,6047,6048,6049,6050,6051,6052, # 5648
6053,6054,6055,6056,6057,6058,6059,6060,6061,6062,6063,6064,6065,6066,6067,6068, # 5664
6069,6070,6071,6072,6073,6074,6075,6076,6077,6078,6079,6080,6081,6082,6083,6084, # 5680
6085,6086,6087,6088,6089,6090,6091,6092,6093,6094,6095,6096,6097,6098,6099,6100, # 5696
6101,6102,6103,6104,6105,6106,6107,6108,6109,6110,6111,6112,6113,6114,6115,6116, # 5712
6117,6118,6119,6120,6121,6122,6123,6124,6125,6126,6127,6128,6129,6130,6131,6132, # 5728
6133,6134,6135,6136,6137,6138,6139,6140,6141,6142,6143,6144,6145,6146,6147,6148, # 5744
6149,6150,6151,6152,6153,6154,6155,6156,6157,6158,6159,6160,6161,6162,6163,6164, # 5760
6165,6166,6167,6168,6169,6170,6171,6172,6173,6174,6175,6176,6177,6178,6179,6180, # 5776
6181,6182,6183,6184,6185,6186,6187,6188,6189,6190,6191,6192,6193,6194,6195,6196, # 5792
6197,6198,6199,6200,6201,6202,6203,6204,6205,6206,6207,6208,6209,6210,6211,6212, # 5808
6213,6214,6215,6216,6217,6218,6219,6220,6221,6222,6223,3670,6224,6225,6226,6227, # 5824
6228,6229,6230,6231,6232,6233,6234,6235,6236,6237,6238,6239,6240,6241,6242,6243, # 5840
6244,6245,6246,6247,6248,6249,6250,6251,6252,6253,6254,6255,6256,6257,6258,6259, # 5856
6260,6261,6262,6263,6264,6265,6266,6267,6268,6269,6270,6271,6272,6273,6274,6275, # 5872
6276,6277,6278,6279,6280,6281,6282,6283,6284,6285,4815,6286,6287,6288,6289,6290, # 5888
6291,6292,4816,6293,6294,6295,6296,6297,6298,6299,6300,6301,6302,6303,6304,6305, # 5904
6306,6307,6308,6309,6310,6311,4817,4818,6312,6313,6314,6315,6316,6317,6318,4819, # 5920
6319,6320,6321,6322,6323,6324,6325,6326,6327,6328,6329,6330,6331,6332,6333,6334, # 5936
6335,6336,6337,4820,6338,6339,6340,6341,6342,6343,6344,6345,6346,6347,6348,6349, # 5952
6350,6351,6352,6353,6354,6355,6356,6357,6358,6359,6360,6361,6362,6363,6364,6365, # 5968
6366,6367,6368,6369,6370,6371,6372,6373,6374,6375,6376,6377,6378,6379,6380,6381, # 5984
6382,6383,6384,6385,6386,6387,6388,6389,6390,6391,6392,6393,6394,6395,6396,6397, # 6000
6398,6399,6400,6401,6402,6403,6404,6405,6406,6407,6408,6409,6410,3441,6411,6412, # 6016
6413,6414,6415,6416,6417,6418,6419,6420,6421,6422,6423,6424,6425,4440,6426,6427, # 6032
6428,6429,6430,6431,6432,6433,6434,6435,6436,6437,6438,6439,6440,6441,6442,6443, # 6048
6444,6445,6446,6447,6448,6449,6450,6451,6452,6453,6454,4821,6455,6456,6457,6458, # 6064
6459,6460,6461,6462,6463,6464,6465,6466,6467,6468,6469,6470,6471,6472,6473,6474, # 6080
6475,6476,6477,3947,3948,6478,6479,6480,6481,3272,4441,6482,6483,6484,6485,4442, # 6096
6486,6487,6488,6489,6490,6491,6492,6493,6494,6495,6496,4822,6497,6498,6499,6500, # 6112
6501,6502,6503,6504,6505,6506,6507,6508,6509,6510,6511,6512,6513,6514,6515,6516, # 6128
6517,6518,6519,6520,6521,6522,6523,6524,6525,6526,6527,6528,6529,6530,6531,6532, # 6144
6533,6534,6535,6536,6537,6538,6539,6540,6541,6542,6543,6544,6545,6546,6547,6548, # 6160
6549,6550,6551,6552,6553,6554,6555,6556,2784,6557,4823,6558,6559,6560,6561,6562, # 6176
6563,6564,6565,6566,6567,6568,6569,3949,6570,6571,6572,4824,6573,6574,6575,6576, # 6192
6577,6578,6579,6580,6581,6582,6583,4825,6584,6585,6586,3950,2785,6587,6588,6589, # 6208
6590,6591,6592,6593,6594,6595,6596,6597,6598,6599,6600,6601,6602,6603,6604,6605, # 6224
6606,6607,6608,6609,6610,6611,6612,4826,6613,6614,6615,4827,6616,6617,6618,6619, # 6240
6620,6621,6622,6623,6624,6625,4164,6626,6627,6628,6629,6630,6631,6632,6633,6634, # 6256
3547,6635,4828,6636,6637,6638,6639,6640,6641,6642,3951,2984,6643,6644,6645,6646, # 6272
6647,6648,6649,4165,6650,4829,6651,6652,4830,6653,6654,6655,6656,6657,6658,6659, # 6288
6660,6661,6662,4831,6663,6664,6665,6666,6667,6668,6669,6670,6671,4166,6672,4832, # 6304
3952,6673,6674,6675,6676,4833,6677,6678,6679,4167,6680,6681,6682,3198,6683,6684, # 6320
6685,6686,6687,6688,6689,6690,6691,6692,6693,6694,6695,6696,6697,4834,6698,6699, # 6336
6700,6701,6702,6703,6704,6705,6706,6707,6708,6709,6710,6711,6712,6713,6714,6715, # 6352
6716,6717,6718,6719,6720,6721,6722,6723,6724,6725,6726,6727,6728,6729,6730,6731, # 6368
6732,6733,6734,4443,6735,6736,6737,6738,6739,6740,6741,6742,6743,6744,6745,4444, # 6384
6746,6747,6748,6749,6750,6751,6752,6753,6754,6755,6756,6757,6758,6759,6760,6761, # 6400
6762,6763,6764,6765,6766,6767,6768,6769,6770,6771,6772,6773,6774,6775,6776,6777, # 6416
6778,6779,6780,6781,4168,6782,6783,3442,6784,6785,6786,6787,6788,6789,6790,6791, # 6432
4169,6792,6793,6794,6795,6796,6797,6798,6799,6800,6801,6802,6803,6804,6805,6806, # 6448
6807,6808,6809,6810,6811,4835,6812,6813,6814,4445,6815,6816,4446,6817,6818,6819, # 6464
6820,6821,6822,6823,6824,6825,6826,6827,6828,6829,6830,6831,6832,6833,6834,6835, # 6480
3548,6836,6837,6838,6839,6840,6841,6842,6843,6844,6845,6846,4836,6847,6848,6849, # 6496
6850,6851,6852,6853,6854,3953,6855,6856,6857,6858,6859,6860,6861,6862,6863,6864, # 6512
6865,6866,6867,6868,6869,6870,6871,6872,6873,6874,6875,6876,6877,3199,6878,6879, # 6528
6880,6881,6882,4447,6883,6884,6885,6886,6887,6888,6889,6890,6891,6892,6893,6894, # 6544
6895,6896,6897,6898,6899,6900,6901,6902,6903,6904,4170,6905,6906,6907,6908,6909, # 6560
6910,6911,6912,6913,6914,6915,6916,6917,6918,6919,6920,6921,6922,6923,6924,6925, # 6576
6926,6927,4837,6928,6929,6930,6931,6932,6933,6934,6935,6936,3346,6937,6938,4838, # 6592
6939,6940,6941,4448,6942,6943,6944,6945,6946,4449,6947,6948,6949,6950,6951,6952, # 6608
6953,6954,6955,6956,6957,6958,6959,6960,6961,6962,6963,6964,6965,6966,6967,6968, # 6624
6969,6970,6971,6972,6973,6974,6975,6976,6977,6978,6979,6980,6981,6982,6983,6984, # 6640
6985,6986,6987,6988,6989,6990,6991,6992,6993,6994,3671,6995,6996,6997,6998,4839, # 6656
6999,7000,7001,7002,3549,7003,7004,7005,7006,7007,7008,7009,7010,7011,7012,7013, # 6672
7014,7015,7016,7017,7018,7019,7020,7021,7022,7023,7024,7025,7026,7027,7028,7029, # 6688
7030,4840,7031,7032,7033,7034,7035,7036,7037,7038,4841,7039,7040,7041,7042,7043, # 6704
7044,7045,7046,7047,7048,7049,7050,7051,7052,7053,7054,7055,7056,7057,7058,7059, # 6720
7060,7061,7062,7063,7064,7065,7066,7067,7068,7069,7070,2985,7071,7072,7073,7074, # 6736
7075,7076,7077,7078,7079,7080,4842,7081,7082,7083,7084,7085,7086,7087,7088,7089, # 6752
7090,7091,7092,7093,7094,7095,7096,7097,7098,7099,7100,7101,7102,7103,7104,7105, # 6768
7106,7107,7108,7109,7110,7111,7112,7113,7114,7115,7116,7117,7118,4450,7119,7120, # 6784
7121,7122,7123,7124,7125,7126,7127,7128,7129,7130,7131,7132,7133,7134,7135,7136, # 6800
7137,7138,7139,7140,7141,7142,7143,4843,7144,7145,7146,7147,7148,7149,7150,7151, # 6816
7152,7153,7154,7155,7156,7157,7158,7159,7160,7161,7162,7163,7164,7165,7166,7167, # 6832
7168,7169,7170,7171,7172,7173,7174,7175,7176,7177,7178,7179,7180,7181,7182,7183, # 6848
7184,7185,7186,7187,7188,4171,4172,7189,7190,7191,7192,7193,7194,7195,7196,7197, # 6864
7198,7199,7200,7201,7202,7203,7204,7205,7206,7207,7208,7209,7210,7211,7212,7213, # 6880
7214,7215,7216,7217,7218,7219,7220,7221,7222,7223,7224,7225,7226,7227,7228,7229, # 6896
7230,7231,7232,7233,7234,7235,7236,7237,7238,7239,7240,7241,7242,7243,7244,7245, # 6912
7246,7247,7248,7249,7250,7251,7252,7253,7254,7255,7256,7257,7258,7259,7260,7261, # 6928
7262,7263,7264,7265,7266,7267,7268,7269,7270,7271,7272,7273,7274,7275,7276,7277, # 6944
7278,7279,7280,7281,7282,7283,7284,7285,7286,7287,7288,7289,7290,7291,7292,7293, # 6960
7294,7295,7296,4844,7297,7298,7299,7300,7301,7302,7303,7304,7305,7306,7307,7308, # 6976
7309,7310,7311,7312,7313,7314,7315,7316,4451,7317,7318,7319,7320,7321,7322,7323, # 6992
7324,7325,7326,7327,7328,7329,7330,7331,7332,7333,7334,7335,7336,7337,7338,7339, # 7008
7340,7341,7342,7343,7344,7345,7346,7347,7348,7349,7350,7351,7352,7353,4173,7354, # 7024
7355,4845,7356,7357,7358,7359,7360,7361,7362,7363,7364,7365,7366,7367,7368,7369, # 7040
7370,7371,7372,7373,7374,7375,7376,7377,7378,7379,7380,7381,7382,7383,7384,7385, # 7056
7386,7387,7388,4846,7389,7390,7391,7392,7393,7394,7395,7396,7397,7398,7399,7400, # 7072
7401,7402,7403,7404,7405,3672,7406,7407,7408,7409,7410,7411,7412,7413,7414,7415, # 7088
7416,7417,7418,7419,7420,7421,7422,7423,7424,7425,7426,7427,7428,7429,7430,7431, # 7104
7432,7433,7434,7435,7436,7437,7438,7439,7440,7441,7442,7443,7444,7445,7446,7447, # 7120
7448,7449,7450,7451,7452,7453,4452,7454,3200,7455,7456,7457,7458,7459,7460,7461, # 7136
7462,7463,7464,7465,7466,7467,7468,7469,7470,7471,7472,7473,7474,4847,7475,7476, # 7152
7477,3133,7478,7479,7480,7481,7482,7483,7484,7485,7486,7487,7488,7489,7490,7491, # 7168
7492,7493,7494,7495,7496,7497,7498,7499,7500,7501,7502,3347,7503,7504,7505,7506, # 7184
7507,7508,7509,7510,7511,7512,7513,7514,7515,7516,7517,7518,7519,7520,7521,4848, # 7200
7522,7523,7524,7525,7526,7527,7528,7529,7530,7531,7532,7533,7534,7535,7536,7537, # 7216
7538,7539,7540,7541,7542,7543,7544,7545,7546,7547,7548,7549,3801,4849,7550,7551, # 7232
7552,7553,7554,7555,7556,7557,7558,7559,7560,7561,7562,7563,7564,7565,7566,7567, # 7248
7568,7569,3035,7570,7571,7572,7573,7574,7575,7576,7577,7578,7579,7580,7581,7582, # 7264
7583,7584,7585,7586,7587,7588,7589,7590,7591,7592,7593,7594,7595,7596,7597,7598, # 7280
7599,7600,7601,7602,7603,7604,7605,7606,7607,7608,7609,7610,7611,7612,7613,7614, # 7296
7615,7616,4850,7617,7618,3802,7619,7620,7621,7622,7623,7624,7625,7626,7627,7628, # 7312
7629,7630,7631,7632,4851,7633,7634,7635,7636,7637,7638,7639,7640,7641,7642,7643, # 7328
7644,7645,7646,7647,7648,7649,7650,7651,7652,7653,7654,7655,7656,7657,7658,7659, # 7344
7660,7661,7662,7663,7664,7665,7666,7667,7668,7669,7670,4453,7671,7672,7673,7674, # 7360
7675,7676,7677,7678,7679,7680,7681,7682,7683,7684,7685,7686,7687,7688,7689,7690, # 7376
7691,7692,7693,7694,7695,7696,7697,3443,7698,7699,7700,7701,7702,4454,7703,7704, # 7392
7705,7706,7707,7708,7709,7710,7711,7712,7713,2472,7714,7715,7716,7717,7718,7719, # 7408
7720,7721,7722,7723,7724,7725,7726,7727,7728,7729,7730,7731,3954,7732,7733,7734, # 7424
7735,7736,7737,7738,7739,7740,7741,7742,7743,7744,7745,7746,7747,7748,7749,7750, # 7440
3134,7751,7752,4852,7753,7754,7755,4853,7756,7757,7758,7759,7760,4174,7761,7762, # 7456
7763,7764,7765,7766,7767,7768,7769,7770,7771,7772,7773,7774,7775,7776,7777,7778, # 7472
7779,7780,7781,7782,7783,7784,7785,7786,7787,7788,7789,7790,7791,7792,7793,7794, # 7488
7795,7796,7797,7798,7799,7800,7801,7802,7803,7804,7805,4854,7806,7807,7808,7809, # 7504
7810,7811,7812,7813,7814,7815,7816,7817,7818,7819,7820,7821,7822,7823,7824,7825, # 7520
4855,7826,7827,7828,7829,7830,7831,7832,7833,7834,7835,7836,7837,7838,7839,7840, # 7536
7841,7842,7843,7844,7845,7846,7847,3955,7848,7849,7850,7851,7852,7853,7854,7855, # 7552
7856,7857,7858,7859,7860,3444,7861,7862,7863,7864,7865,7866,7867,7868,7869,7870, # 7568
7871,7872,7873,7874,7875,7876,7877,7878,7879,7880,7881,7882,7883,7884,7885,7886, # 7584
7887,7888,7889,7890,7891,4175,7892,7893,7894,7895,7896,4856,4857,7897,7898,7899, # 7600
7900,2598,7901,7902,7903,7904,7905,7906,7907,7908,4455,7909,7910,7911,7912,7913, # 7616
7914,3201,7915,7916,7917,7918,7919,7920,7921,4858,7922,7923,7924,7925,7926,7927, # 7632
7928,7929,7930,7931,7932,7933,7934,7935,7936,7937,7938,7939,7940,7941,7942,7943, # 7648
7944,7945,7946,7947,7948,7949,7950,7951,7952,7953,7954,7955,7956,7957,7958,7959, # 7664
7960,7961,7962,7963,7964,7965,7966,7967,7968,7969,7970,7971,7972,7973,7974,7975, # 7680
7976,7977,7978,7979,7980,7981,4859,7982,7983,7984,7985,7986,7987,7988,7989,7990, # 7696
7991,7992,7993,7994,7995,7996,4860,7997,7998,7999,8000,8001,8002,8003,8004,8005, # 7712
8006,8007,8008,8009,8010,8011,8012,8013,8014,8015,8016,4176,8017,8018,8019,8020, # 7728
8021,8022,8023,4861,8024,8025,8026,8027,8028,8029,8030,8031,8032,8033,8034,8035, # 7744
8036,4862,4456,8037,8038,8039,8040,4863,8041,8042,8043,8044,8045,8046,8047,8048, # 7760
8049,8050,8051,8052,8053,8054,8055,8056,8057,8058,8059,8060,8061,8062,8063,8064, # 7776
8065,8066,8067,8068,8069,8070,8071,8072,8073,8074,8075,8076,8077,8078,8079,8080, # 7792
8081,8082,8083,8084,8085,8086,8087,8088,8089,8090,8091,8092,8093,8094,8095,8096, # 7808
8097,8098,8099,4864,4177,8100,8101,8102,8103,8104,8105,8106,8107,8108,8109,8110, # 7824
8111,8112,8113,8114,8115,8116,8117,8118,8119,8120,4178,8121,8122,8123,8124,8125, # 7840
8126,8127,8128,8129,8130,8131,8132,8133,8134,8135,8136,8137,8138,8139,8140,8141, # 7856
8142,8143,8144,8145,4865,4866,8146,8147,8148,8149,8150,8151,8152,8153,8154,8155, # 7872
8156,8157,8158,8159,8160,8161,8162,8163,8164,8165,4179,8166,8167,8168,8169,8170, # 7888
8171,8172,8173,8174,8175,8176,8177,8178,8179,8180,8181,4457,8182,8183,8184,8185, # 7904
8186,8187,8188,8189,8190,8191,8192,8193,8194,8195,8196,8197,8198,8199,8200,8201, # 7920
8202,8203,8204,8205,8206,8207,8208,8209,8210,8211,8212,8213,8214,8215,8216,8217, # 7936
8218,8219,8220,8221,8222,8223,8224,8225,8226,8227,8228,8229,8230,8231,8232,8233, # 7952
8234,8235,8236,8237,8238,8239,8240,8241,8242,8243,8244,8245,8246,8247,8248,8249, # 7968
8250,8251,8252,8253,8254,8255,8256,3445,8257,8258,8259,8260,8261,8262,4458,8263, # 7984
8264,8265,8266,8267,8268,8269,8270,8271,8272,4459,8273,8274,8275,8276,3550,8277, # 8000
8278,8279,8280,8281,8282,8283,8284,8285,8286,8287,8288,8289,4460,8290,8291,8292, # 8016
8293,8294,8295,8296,8297,8298,8299,8300,8301,8302,8303,8304,8305,8306,8307,4867, # 8032
8308,8309,8310,8311,8312,3551,8313,8314,8315,8316,8317,8318,8319,8320,8321,8322, # 8048
8323,8324,8325,8326,4868,8327,8328,8329,8330,8331,8332,8333,8334,8335,8336,8337, # 8064
8338,8339,8340,8341,8342,8343,8344,8345,8346,8347,8348,8349,8350,8351,8352,8353, # 8080
8354,8355,8356,8357,8358,8359,8360,8361,8362,8363,4869,4461,8364,8365,8366,8367, # 8096
8368,8369,8370,4870,8371,8372,8373,8374,8375,8376,8377,8378,8379,8380,8381,8382, # 8112
8383,8384,8385,8386,8387,8388,8389,8390,8391,8392,8393,8394,8395,8396,8397,8398, # 8128
8399,8400,8401,8402,8403,8404,8405,8406,8407,8408,8409,8410,4871,8411,8412,8413, # 8144
8414,8415,8416,8417,8418,8419,8420,8421,8422,4462,8423,8424,8425,8426,8427,8428, # 8160
8429,8430,8431,8432,8433,2986,8434,8435,8436,8437,8438,8439,8440,8441,8442,8443, # 8176
8444,8445,8446,8447,8448,8449,8450,8451,8452,8453,8454,8455,8456,8457,8458,8459, # 8192
8460,8461,8462,8463,8464,8465,8466,8467,8468,8469,8470,8471,8472,8473,8474,8475, # 8208
8476,8477,8478,4180,8479,8480,8481,8482,8483,8484,8485,8486,8487,8488,8489,8490, # 8224
8491,8492,8493,8494,8495,8496,8497,8498,8499,8500,8501,8502,8503,8504,8505,8506, # 8240
8507,8508,8509,8510,8511,8512,8513,8514,8515,8516,8517,8518,8519,8520,8521,8522, # 8256
8523,8524,8525,8526,8527,8528,8529,8530,8531,8532,8533,8534,8535,8536,8537,8538, # 8272
8539,8540,8541,8542,8543,8544,8545,8546,8547,8548,8549,8550,8551,8552,8553,8554, # 8288
8555,8556,8557,8558,8559,8560,8561,8562,8563,8564,4872,8565,8566,8567,8568,8569, # 8304
8570,8571,8572,8573,4873,8574,8575,8576,8577,8578,8579,8580,8581,8582,8583,8584, # 8320
8585,8586,8587,8588,8589,8590,8591,8592,8593,8594,8595,8596,8597,8598,8599,8600, # 8336
8601,8602,8603,8604,8605,3803,8606,8607,8608,8609,8610,8611,8612,8613,4874,3804, # 8352
8614,8615,8616,8617,8618,8619,8620,8621,3956,8622,8623,8624,8625,8626,8627,8628, # 8368
8629,8630,8631,8632,8633,8634,8635,8636,8637,8638,2865,8639,8640,8641,8642,8643, # 8384
8644,8645,8646,8647,8648,8649,8650,8651,8652,8653,8654,8655,8656,4463,8657,8658, # 8400
8659,4875,4876,8660,8661,8662,8663,8664,8665,8666,8667,8668,8669,8670,8671,8672, # 8416
8673,8674,8675,8676,8677,8678,8679,8680,8681,4464,8682,8683,8684,8685,8686,8687, # 8432
8688,8689,8690,8691,8692,8693,8694,8695,8696,8697,8698,8699,8700,8701,8702,8703, # 8448
8704,8705,8706,8707,8708,8709,2261,8710,8711,8712,8713,8714,8715,8716,8717,8718, # 8464
8719,8720,8721,8722,8723,8724,8725,8726,8727,8728,8729,8730,8731,8732,8733,4181, # 8480
8734,8735,8736,8737,8738,8739,8740,8741,8742,8743,8744,8745,8746,8747,8748,8749, # 8496
8750,8751,8752,8753,8754,8755,8756,8757,8758,8759,8760,8761,8762,8763,4877,8764, # 8512
8765,8766,8767,8768,8769,8770,8771,8772,8773,8774,8775,8776,8777,8778,8779,8780, # 8528
8781,8782,8783,8784,8785,8786,8787,8788,4878,8789,4879,8790,8791,8792,4880,8793, # 8544
8794,8795,8796,8797,8798,8799,8800,8801,4881,8802,8803,8804,8805,8806,8807,8808, # 8560
8809,8810,8811,8812,8813,8814,8815,3957,8816,8817,8818,8819,8820,8821,8822,8823, # 8576
8824,8825,8826,8827,8828,8829,8830,8831,8832,8833,8834,8835,8836,8837,8838,8839, # 8592
8840,8841,8842,8843,8844,8845,8846,8847,4882,8848,8849,8850,8851,8852,8853,8854, # 8608
8855,8856,8857,8858,8859,8860,8861,8862,8863,8864,8865,8866,8867,8868,8869,8870, # 8624
8871,8872,8873,8874,8875,8876,8877,8878,8879,8880,8881,8882,8883,8884,3202,8885, # 8640
8886,8887,8888,8889,8890,8891,8892,8893,8894,8895,8896,8897,8898,8899,8900,8901, # 8656
8902,8903,8904,8905,8906,8907,8908,8909,8910,8911,8912,8913,8914,8915,8916,8917, # 8672
8918,8919,8920,8921,8922,8923,8924,4465,8925,8926,8927,8928,8929,8930,8931,8932, # 8688
4883,8933,8934,8935,8936,8937,8938,8939,8940,8941,8942,8943,2214,8944,8945,8946, # 8704
8947,8948,8949,8950,8951,8952,8953,8954,8955,8956,8957,8958,8959,8960,8961,8962, # 8720
8963,8964,8965,4884,8966,8967,8968,8969,8970,8971,8972,8973,8974,8975,8976,8977, # 8736
8978,8979,8980,8981,8982,8983,8984,8985,8986,8987,8988,8989,8990,8991,8992,4885, # 8752
8993,8994,8995,8996,8997,8998,8999,9000,9001,9002,9003,9004,9005,9006,9007,9008, # 8768
9009,9010,9011,9012,9013,9014,9015,9016,9017,9018,9019,9020,9021,4182,9022,9023, # 8784
9024,9025,9026,9027,9028,9029,9030,9031,9032,9033,9034,9035,9036,9037,9038,9039, # 8800
9040,9041,9042,9043,9044,9045,9046,9047,9048,9049,9050,9051,9052,9053,9054,9055, # 8816
9056,9057,9058,9059,9060,9061,9062,9063,4886,9064,9065,9066,9067,9068,9069,4887, # 8832
9070,9071,9072,9073,9074,9075,9076,9077,9078,9079,9080,9081,9082,9083,9084,9085, # 8848
9086,9087,9088,9089,9090,9091,9092,9093,9094,9095,9096,9097,9098,9099,9100,9101, # 8864
9102,9103,9104,9105,9106,9107,9108,9109,9110,9111,9112,9113,9114,9115,9116,9117, # 8880
9118,9119,9120,9121,9122,9123,9124,9125,9126,9127,9128,9129,9130,9131,9132,9133, # 8896
9134,9135,9136,9137,9138,9139,9140,9141,3958,9142,9143,9144,9145,9146,9147,9148, # 8912
9149,9150,9151,4888,9152,9153,9154,9155,9156,9157,9158,9159,9160,9161,9162,9163, # 8928
9164,9165,9166,9167,9168,9169,9170,9171,9172,9173,9174,9175,4889,9176,9177,9178, # 8944
9179,9180,9181,9182,9183,9184,9185,9186,9187,9188,9189,9190,9191,9192,9193,9194, # 8960
9195,9196,9197,9198,9199,9200,9201,9202,9203,4890,9204,9205,9206,9207,9208,9209, # 8976
9210,9211,9212,9213,9214,9215,9216,9217,9218,9219,9220,9221,9222,4466,9223,9224, # 8992
9225,9226,9227,9228,9229,9230,9231,9232,9233,9234,9235,9236,9237,9238,9239,9240, # 9008
9241,9242,9243,9244,9245,4891,9246,9247,9248,9249,9250,9251,9252,9253,9254,9255, # 9024
9256,9257,4892,9258,9259,9260,9261,4893,4894,9262,9263,9264,9265,9266,9267,9268, # 9040
9269,9270,9271,9272,9273,4467,9274,9275,9276,9277,9278,9279,9280,9281,9282,9283, # 9056
9284,9285,3673,9286,9287,9288,9289,9290,9291,9292,9293,9294,9295,9296,9297,9298, # 9072
9299,9300,9301,9302,9303,9304,9305,9306,9307,9308,9309,9310,9311,9312,9313,9314, # 9088
9315,9316,9317,9318,9319,9320,9321,9322,4895,9323,9324,9325,9326,9327,9328,9329, # 9104
9330,9331,9332,9333,9334,9335,9336,9337,9338,9339,9340,9341,9342,9343,9344,9345, # 9120
9346,9347,4468,9348,9349,9350,9351,9352,9353,9354,9355,9356,9357,9358,9359,9360, # 9136
9361,9362,9363,9364,9365,9366,9367,9368,9369,9370,9371,9372,9373,4896,9374,4469, # 9152
9375,9376,9377,9378,9379,4897,9380,9381,9382,9383,9384,9385,9386,9387,9388,9389, # 9168
9390,9391,9392,9393,9394,9395,9396,9397,9398,9399,9400,9401,9402,9403,9404,9405, # 9184
9406,4470,9407,2751,9408,9409,3674,3552,9410,9411,9412,9413,9414,9415,9416,9417, # 9200
9418,9419,9420,9421,4898,9422,9423,9424,9425,9426,9427,9428,9429,3959,9430,9431, # 9216
9432,9433,9434,9435,9436,4471,9437,9438,9439,9440,9441,9442,9443,9444,9445,9446, # 9232
9447,9448,9449,9450,3348,9451,9452,9453,9454,9455,9456,9457,9458,9459,9460,9461, # 9248
9462,9463,9464,9465,9466,9467,9468,9469,9470,9471,9472,4899,9473,9474,9475,9476, # 9264
9477,4900,9478,9479,9480,9481,9482,9483,9484,9485,9486,9487,9488,3349,9489,9490, # 9280
9491,9492,9493,9494,9495,9496,9497,9498,9499,9500,9501,9502,9503,9504,9505,9506, # 9296
9507,9508,9509,9510,9511,9512,9513,9514,9515,9516,9517,9518,9519,9520,4901,9521, # 9312
9522,9523,9524,9525,9526,4902,9527,9528,9529,9530,9531,9532,9533,9534,9535,9536, # 9328
9537,9538,9539,9540,9541,9542,9543,9544,9545,9546,9547,9548,9549,9550,9551,9552, # 9344
9553,9554,9555,9556,9557,9558,9559,9560,9561,9562,9563,9564,9565,9566,9567,9568, # 9360
9569,9570,9571,9572,9573,9574,9575,9576,9577,9578,9579,9580,9581,9582,9583,9584, # 9376
3805,9585,9586,9587,9588,9589,9590,9591,9592,9593,9594,9595,9596,9597,9598,9599, # 9392
9600,9601,9602,4903,9603,9604,9605,9606,9607,4904,9608,9609,9610,9611,9612,9613, # 9408
9614,4905,9615,9616,9617,9618,9619,9620,9621,9622,9623,9624,9625,9626,9627,9628, # 9424
9629,9630,9631,9632,4906,9633,9634,9635,9636,9637,9638,9639,9640,9641,9642,9643, # 9440
4907,9644,9645,9646,9647,9648,9649,9650,9651,9652,9653,9654,9655,9656,9657,9658, # 9456
9659,9660,9661,9662,9663,9664,9665,9666,9667,9668,9669,9670,9671,9672,4183,9673, # 9472
9674,9675,9676,9677,4908,9678,9679,9680,9681,4909,9682,9683,9684,9685,9686,9687, # 9488
9688,9689,9690,4910,9691,9692,9693,3675,9694,9695,9696,2945,9697,9698,9699,9700, # 9504
9701,9702,9703,9704,9705,4911,9706,9707,9708,9709,9710,9711,9712,9713,9714,9715, # 9520
9716,9717,9718,9719,9720,9721,9722,9723,9724,9725,9726,9727,9728,9729,9730,9731, # 9536
9732,9733,9734,9735,4912,9736,9737,9738,9739,9740,4913,9741,9742,9743,9744,9745, # 9552
9746,9747,9748,9749,9750,9751,9752,9753,9754,9755,9756,9757,9758,4914,9759,9760, # 9568
9761,9762,9763,9764,9765,9766,9767,9768,9769,9770,9771,9772,9773,9774,9775,9776, # 9584
9777,9778,9779,9780,9781,9782,4915,9783,9784,9785,9786,9787,9788,9789,9790,9791, # 9600
9792,9793,4916,9794,9795,9796,9797,9798,9799,9800,9801,9802,9803,9804,9805,9806, # 9616
9807,9808,9809,9810,9811,9812,9813,9814,9815,9816,9817,9818,9819,9820,9821,9822, # 9632
9823,9824,9825,9826,9827,9828,9829,9830,9831,9832,9833,9834,9835,9836,9837,9838, # 9648
9839,9840,9841,9842,9843,9844,9845,9846,9847,9848,9849,9850,9851,9852,9853,9854, # 9664
9855,9856,9857,9858,9859,9860,9861,9862,9863,9864,9865,9866,9867,9868,4917,9869, # 9680
9870,9871,9872,9873,9874,9875,9876,9877,9878,9879,9880,9881,9882,9883,9884,9885, # 9696
9886,9887,9888,9889,9890,9891,9892,4472,9893,9894,9895,9896,9897,3806,9898,9899, # 9712
9900,9901,9902,9903,9904,9905,9906,9907,9908,9909,9910,9911,9912,9913,9914,4918, # 9728
9915,9916,9917,4919,9918,9919,9920,9921,4184,9922,9923,9924,9925,9926,9927,9928, # 9744
9929,9930,9931,9932,9933,9934,9935,9936,9937,9938,9939,9940,9941,9942,9943,9944, # 9760
9945,9946,4920,9947,9948,9949,9950,9951,9952,9953,9954,9955,4185,9956,9957,9958, # 9776
9959,9960,9961,9962,9963,9964,9965,4921,9966,9967,9968,4473,9969,9970,9971,9972, # 9792
9973,9974,9975,9976,9977,4474,9978,9979,9980,9981,9982,9983,9984,9985,9986,9987, # 9808
9988,9989,9990,9991,9992,9993,9994,9995,9996,9997,9998,9999,10000,10001,10002,10003, # 9824
10004,10005,10006,10007,10008,10009,10010,10011,10012,10013,10014,10015,10016,10017,10018,10019, # 9840
10020,10021,4922,10022,4923,10023,10024,10025,10026,10027,10028,10029,10030,10031,10032,10033, # 9856
10034,10035,10036,10037,10038,10039,10040,10041,10042,10043,10044,10045,10046,10047,10048,4924, # 9872
10049,10050,10051,10052,10053,10054,10055,10056,10057,10058,10059,10060,10061,10062,10063,10064, # 9888
10065,10066,10067,10068,10069,10070,10071,10072,10073,10074,10075,10076,10077,10078,10079,10080, # 9904
10081,10082,10083,10084,10085,10086,10087,4475,10088,10089,10090,10091,10092,10093,10094,10095, # 9920
10096,10097,4476,10098,10099,10100,10101,10102,10103,10104,10105,10106,10107,10108,10109,10110, # 9936
10111,2174,10112,10113,10114,10115,10116,10117,10118,10119,10120,10121,10122,10123,10124,10125, # 9952
10126,10127,10128,10129,10130,10131,10132,10133,10134,10135,10136,10137,10138,10139,10140,3807, # 9968
4186,4925,10141,10142,10143,10144,10145,10146,10147,4477,4187,10148,10149,10150,10151,10152, # 9984
10153,4188,10154,10155,10156,10157,10158,10159,10160,10161,4926,10162,10163,10164,10165,10166, #10000
10167,10168,10169,10170,10171,10172,10173,10174,10175,10176,10177,10178,10179,10180,10181,10182, #10016
10183,10184,10185,10186,10187,10188,10189,10190,10191,10192,3203,10193,10194,10195,10196,10197, #10032
10198,10199,10200,4478,10201,10202,10203,10204,4479,10205,10206,10207,10208,10209,10210,10211, #10048
10212,10213,10214,10215,10216,10217,10218,10219,10220,10221,10222,10223,10224,10225,10226,10227, #10064
10228,10229,10230,10231,10232,10233,10234,4927,10235,10236,10237,10238,10239,10240,10241,10242, #10080
10243,10244,10245,10246,10247,10248,10249,10250,10251,10252,10253,10254,10255,10256,10257,10258, #10096
10259,10260,10261,10262,10263,10264,10265,10266,10267,10268,10269,10270,10271,10272,10273,4480, #10112
4928,4929,10274,10275,10276,10277,10278,10279,10280,10281,10282,10283,10284,10285,10286,10287, #10128
10288,10289,10290,10291,10292,10293,10294,10295,10296,10297,10298,10299,10300,10301,10302,10303, #10144
10304,10305,10306,10307,10308,10309,10310,10311,10312,10313,10314,10315,10316,10317,10318,10319, #10160
10320,10321,10322,10323,10324,10325,10326,10327,10328,10329,10330,10331,10332,10333,10334,4930, #10176
10335,10336,10337,10338,10339,10340,10341,10342,4931,10343,10344,10345,10346,10347,10348,10349, #10192
10350,10351,10352,10353,10354,10355,3088,10356,2786,10357,10358,10359,10360,4189,10361,10362, #10208
10363,10364,10365,10366,10367,10368,10369,10370,10371,10372,10373,10374,10375,4932,10376,10377, #10224
10378,10379,10380,10381,10382,10383,10384,10385,10386,10387,10388,10389,10390,10391,10392,4933, #10240
10393,10394,10395,4934,10396,10397,10398,10399,10400,10401,10402,10403,10404,10405,10406,10407, #10256
10408,10409,10410,10411,10412,3446,10413,10414,10415,10416,10417,10418,10419,10420,10421,10422, #10272
10423,4935,10424,10425,10426,10427,10428,10429,10430,4936,10431,10432,10433,10434,10435,10436, #10288
10437,10438,10439,10440,10441,10442,10443,4937,10444,10445,10446,10447,4481,10448,10449,10450, #10304
10451,10452,10453,10454,10455,10456,10457,10458,10459,10460,10461,10462,10463,10464,10465,10466, #10320
10467,10468,10469,10470,10471,10472,10473,10474,10475,10476,10477,10478,10479,10480,10481,10482, #10336
10483,10484,10485,10486,10487,10488,10489,10490,10491,10492,10493,10494,10495,10496,10497,10498, #10352
10499,10500,10501,10502,10503,10504,10505,4938,10506,10507,10508,10509,10510,2552,10511,10512, #10368
10513,10514,10515,10516,3447,10517,10518,10519,10520,10521,10522,10523,10524,10525,10526,10527, #10384
10528,10529,10530,10531,10532,10533,10534,10535,10536,10537,10538,10539,10540,10541,10542,10543, #10400
4482,10544,4939,10545,10546,10547,10548,10549,10550,10551,10552,10553,10554,10555,10556,10557, #10416
10558,10559,10560,10561,10562,10563,10564,10565,10566,10567,3676,4483,10568,10569,10570,10571, #10432
10572,3448,10573,10574,10575,10576,10577,10578,10579,10580,10581,10582,10583,10584,10585,10586, #10448
10587,10588,10589,10590,10591,10592,10593,10594,10595,10596,10597,10598,10599,10600,10601,10602, #10464
10603,10604,10605,10606,10607,10608,10609,10610,10611,10612,10613,10614,10615,10616,10617,10618, #10480
10619,10620,10621,10622,10623,10624,10625,10626,10627,4484,10628,10629,10630,10631,10632,4940, #10496
10633,10634,10635,10636,10637,10638,10639,10640,10641,10642,10643,10644,10645,10646,10647,10648, #10512
10649,10650,10651,10652,10653,10654,10655,10656,4941,10657,10658,10659,2599,10660,10661,10662, #10528
10663,10664,10665,10666,3089,10667,10668,10669,10670,10671,10672,10673,10674,10675,10676,10677, #10544
10678,10679,10680,4942,10681,10682,10683,10684,10685,10686,10687,10688,10689,10690,10691,10692, #10560
10693,10694,10695,10696,10697,4485,10698,10699,10700,10701,10702,10703,10704,4943,10705,3677, #10576
10706,10707,10708,10709,10710,10711,10712,4944,10713,10714,10715,10716,10717,10718,10719,10720, #10592
10721,10722,10723,10724,10725,10726,10727,10728,4945,10729,10730,10731,10732,10733,10734,10735, #10608
10736,10737,10738,10739,10740,10741,10742,10743,10744,10745,10746,10747,10748,10749,10750,10751, #10624
10752,10753,10754,10755,10756,10757,10758,10759,10760,10761,4946,10762,10763,10764,10765,10766, #10640
10767,4947,4948,10768,10769,10770,10771,10772,10773,10774,10775,10776,10777,10778,10779,10780, #10656
10781,10782,10783,10784,10785,10786,10787,10788,10789,10790,10791,10792,10793,10794,10795,10796, #10672
10797,10798,10799,10800,10801,10802,10803,10804,10805,10806,10807,10808,10809,10810,10811,10812, #10688
10813,10814,10815,10816,10817,10818,10819,10820,10821,10822,10823,10824,10825,10826,10827,10828, #10704
10829,10830,10831,10832,10833,10834,10835,10836,10837,10838,10839,10840,10841,10842,10843,10844, #10720
10845,10846,10847,10848,10849,10850,10851,10852,10853,10854,10855,10856,10857,10858,10859,10860, #10736
10861,10862,10863,10864,10865,10866,10867,10868,10869,10870,10871,10872,10873,10874,10875,10876, #10752
10877,10878,4486,10879,10880,10881,10882,10883,10884,10885,4949,10886,10887,10888,10889,10890, #10768
10891,10892,10893,10894,10895,10896,10897,10898,10899,10900,10901,10902,10903,10904,10905,10906, #10784
10907,10908,10909,10910,10911,10912,10913,10914,10915,10916,10917,10918,10919,4487,10920,10921, #10800
10922,10923,10924,10925,10926,10927,10928,10929,10930,10931,10932,4950,10933,10934,10935,10936, #10816
10937,10938,10939,10940,10941,10942,10943,10944,10945,10946,10947,10948,10949,4488,10950,10951, #10832
10952,10953,10954,10955,10956,10957,10958,10959,4190,10960,10961,10962,10963,10964,10965,10966, #10848
10967,10968,10969,10970,10971,10972,10973,10974,10975,10976,10977,10978,10979,10980,10981,10982, #10864
10983,10984,10985,10986,10987,10988,10989,10990,10991,10992,10993,10994,10995,10996,10997,10998, #10880
10999,11000,11001,11002,11003,11004,11005,11006,3960,11007,11008,11009,11010,11011,11012,11013, #10896
11014,11015,11016,11017,11018,11019,11020,11021,11022,11023,11024,11025,11026,11027,11028,11029, #10912
11030,11031,11032,4951,11033,11034,11035,11036,11037,11038,11039,11040,11041,11042,11043,11044, #10928
11045,11046,11047,4489,11048,11049,11050,11051,4952,11052,11053,11054,11055,11056,11057,11058, #10944
4953,11059,11060,11061,11062,11063,11064,11065,11066,11067,11068,11069,11070,11071,4954,11072, #10960
11073,11074,11075,11076,11077,11078,11079,11080,11081,11082,11083,11084,11085,11086,11087,11088, #10976
11089,11090,11091,11092,11093,11094,11095,11096,11097,11098,11099,11100,11101,11102,11103,11104, #10992
11105,11106,11107,11108,11109,11110,11111,11112,11113,11114,11115,3808,11116,11117,11118,11119, #11008
11120,11121,11122,11123,11124,11125,11126,11127,11128,11129,11130,11131,11132,11133,11134,4955, #11024
11135,11136,11137,11138,11139,11140,11141,11142,11143,11144,11145,11146,11147,11148,11149,11150, #11040
11151,11152,11153,11154,11155,11156,11157,11158,11159,11160,11161,4956,11162,11163,11164,11165, #11056
11166,11167,11168,11169,11170,11171,11172,11173,11174,11175,11176,11177,11178,11179,11180,4957, #11072
11181,11182,11183,11184,11185,11186,4958,11187,11188,11189,11190,11191,11192,11193,11194,11195, #11088
11196,11197,11198,11199,11200,3678,11201,11202,11203,11204,11205,11206,4191,11207,11208,11209, #11104
11210,11211,11212,11213,11214,11215,11216,11217,11218,11219,11220,11221,11222,11223,11224,11225, #11120
11226,11227,11228,11229,11230,11231,11232,11233,11234,11235,11236,11237,11238,11239,11240,11241, #11136
11242,11243,11244,11245,11246,11247,11248,11249,11250,11251,4959,11252,11253,11254,11255,11256, #11152
11257,11258,11259,11260,11261,11262,11263,11264,11265,11266,11267,11268,11269,11270,11271,11272, #11168
11273,11274,11275,11276,11277,11278,11279,11280,11281,11282,11283,11284,11285,11286,11287,11288, #11184
11289,11290,11291,11292,11293,11294,11295,11296,11297,11298,11299,11300,11301,11302,11303,11304, #11200
11305,11306,11307,11308,11309,11310,11311,11312,11313,11314,3679,11315,11316,11317,11318,4490, #11216
11319,11320,11321,11322,11323,11324,11325,11326,11327,11328,11329,11330,11331,11332,11333,11334, #11232
11335,11336,11337,11338,11339,11340,11341,11342,11343,11344,11345,11346,11347,4960,11348,11349, #11248
11350,11351,11352,11353,11354,11355,11356,11357,11358,11359,11360,11361,11362,11363,11364,11365, #11264
11366,11367,11368,11369,11370,11371,11372,11373,11374,11375,11376,11377,3961,4961,11378,11379, #11280
11380,11381,11382,11383,11384,11385,11386,11387,11388,11389,11390,11391,11392,11393,11394,11395, #11296
11396,11397,4192,11398,11399,11400,11401,11402,11403,11404,11405,11406,11407,11408,11409,11410, #11312
11411,4962,11412,11413,11414,11415,11416,11417,11418,11419,11420,11421,11422,11423,11424,11425, #11328
11426,11427,11428,11429,11430,11431,11432,11433,11434,11435,11436,11437,11438,11439,11440,11441, #11344
11442,11443,11444,11445,11446,11447,11448,11449,11450,11451,11452,11453,11454,11455,11456,11457, #11360
11458,11459,11460,11461,11462,11463,11464,11465,11466,11467,11468,11469,4963,11470,11471,4491, #11376
11472,11473,11474,11475,4964,11476,11477,11478,11479,11480,11481,11482,11483,11484,11485,11486, #11392
11487,11488,11489,11490,11491,11492,4965,11493,11494,11495,11496,11497,11498,11499,11500,11501, #11408
11502,11503,11504,11505,11506,11507,11508,11509,11510,11511,11512,11513,11514,11515,11516,11517, #11424
11518,11519,11520,11521,11522,11523,11524,11525,11526,11527,11528,11529,3962,11530,11531,11532, #11440
11533,11534,11535,11536,11537,11538,11539,11540,11541,11542,11543,11544,11545,11546,11547,11548, #11456
11549,11550,11551,11552,11553,11554,11555,11556,11557,11558,11559,11560,11561,11562,11563,11564, #11472
4193,4194,11565,11566,11567,11568,11569,11570,11571,11572,11573,11574,11575,11576,11577,11578, #11488
11579,11580,11581,11582,11583,11584,11585,11586,11587,11588,11589,11590,11591,4966,4195,11592, #11504
11593,11594,11595,11596,11597,11598,11599,11600,11601,11602,11603,11604,3090,11605,11606,11607, #11520
11608,11609,11610,4967,11611,11612,11613,11614,11615,11616,11617,11618,11619,11620,11621,11622, #11536
11623,11624,11625,11626,11627,11628,11629,11630,11631,11632,11633,11634,11635,11636,11637,11638, #11552
11639,11640,11641,11642,11643,11644,11645,11646,11647,11648,11649,11650,11651,11652,11653,11654, #11568
11655,11656,11657,11658,11659,11660,11661,11662,11663,11664,11665,11666,11667,11668,11669,11670, #11584
11671,11672,11673,11674,4968,11675,11676,11677,11678,11679,11680,11681,11682,11683,11684,11685, #11600
11686,11687,11688,11689,11690,11691,11692,11693,3809,11694,11695,11696,11697,11698,11699,11700, #11616
11701,11702,11703,11704,11705,11706,11707,11708,11709,11710,11711,11712,11713,11714,11715,11716, #11632
11717,11718,3553,11719,11720,11721,11722,11723,11724,11725,11726,11727,11728,11729,11730,4969, #11648
11731,11732,11733,11734,11735,11736,11737,11738,11739,11740,4492,11741,11742,11743,11744,11745, #11664
11746,11747,11748,11749,11750,11751,11752,4970,11753,11754,11755,11756,11757,11758,11759,11760, #11680
11761,11762,11763,11764,11765,11766,11767,11768,11769,11770,11771,11772,11773,11774,11775,11776, #11696
11777,11778,11779,11780,11781,11782,11783,11784,11785,11786,11787,11788,11789,11790,4971,11791, #11712
11792,11793,11794,11795,11796,11797,4972,11798,11799,11800,11801,11802,11803,11804,11805,11806, #11728
11807,11808,11809,11810,4973,11811,11812,11813,11814,11815,11816,11817,11818,11819,11820,11821, #11744
11822,11823,11824,11825,11826,11827,11828,11829,11830,11831,11832,11833,11834,3680,3810,11835, #11760
11836,4974,11837,11838,11839,11840,11841,11842,11843,11844,11845,11846,11847,11848,11849,11850, #11776
11851,11852,11853,11854,11855,11856,11857,11858,11859,11860,11861,11862,11863,11864,11865,11866, #11792
11867,11868,11869,11870,11871,11872,11873,11874,11875,11876,11877,11878,11879,11880,11881,11882, #11808
11883,11884,4493,11885,11886,11887,11888,11889,11890,11891,11892,11893,11894,11895,11896,11897, #11824
11898,11899,11900,11901,11902,11903,11904,11905,11906,11907,11908,11909,11910,11911,11912,11913, #11840
11914,11915,4975,11916,11917,11918,11919,11920,11921,11922,11923,11924,11925,11926,11927,11928, #11856
11929,11930,11931,11932,11933,11934,11935,11936,11937,11938,11939,11940,11941,11942,11943,11944, #11872
11945,11946,11947,11948,11949,4976,11950,11951,11952,11953,11954,11955,11956,11957,11958,11959, #11888
11960,11961,11962,11963,11964,11965,11966,11967,11968,11969,11970,11971,11972,11973,11974,11975, #11904
11976,11977,11978,11979,11980,11981,11982,11983,11984,11985,11986,11987,4196,11988,11989,11990, #11920
11991,11992,4977,11993,11994,11995,11996,11997,11998,11999,12000,12001,12002,12003,12004,12005, #11936
12006,12007,12008,12009,12010,12011,12012,12013,12014,12015,12016,12017,12018,12019,12020,12021, #11952
12022,12023,12024,12025,12026,12027,12028,12029,12030,12031,12032,12033,12034,12035,12036,12037, #11968
12038,12039,12040,12041,12042,12043,12044,12045,12046,12047,12048,12049,12050,12051,12052,12053, #11984
12054,12055,12056,12057,12058,12059,12060,12061,4978,12062,12063,12064,12065,12066,12067,12068, #12000
12069,12070,12071,12072,12073,12074,12075,12076,12077,12078,12079,12080,12081,12082,12083,12084, #12016
12085,12086,12087,12088,12089,12090,12091,12092,12093,12094,12095,12096,12097,12098,12099,12100, #12032
12101,12102,12103,12104,12105,12106,12107,12108,12109,12110,12111,12112,12113,12114,12115,12116, #12048
12117,12118,12119,12120,12121,12122,12123,4979,12124,12125,12126,12127,12128,4197,12129,12130, #12064
12131,12132,12133,12134,12135,12136,12137,12138,12139,12140,12141,12142,12143,12144,12145,12146, #12080
12147,12148,12149,12150,12151,12152,12153,12154,4980,12155,12156,12157,12158,12159,12160,4494, #12096
12161,12162,12163,12164,3811,12165,12166,12167,12168,12169,4495,12170,12171,4496,12172,12173, #12112
12174,12175,12176,3812,12177,12178,12179,12180,12181,12182,12183,12184,12185,12186,12187,12188, #12128
12189,12190,12191,12192,12193,12194,12195,12196,12197,12198,12199,12200,12201,12202,12203,12204, #12144
12205,12206,12207,12208,12209,12210,12211,12212,12213,12214,12215,12216,12217,12218,12219,12220, #12160
12221,4981,12222,12223,12224,12225,12226,12227,12228,12229,12230,12231,12232,12233,12234,12235, #12176
4982,12236,12237,12238,12239,12240,12241,12242,12243,12244,12245,4983,12246,12247,12248,12249, #12192
4984,12250,12251,12252,12253,12254,12255,12256,12257,12258,12259,12260,12261,12262,12263,12264, #12208
4985,12265,4497,12266,12267,12268,12269,12270,12271,12272,12273,12274,12275,12276,12277,12278, #12224
12279,12280,12281,12282,12283,12284,12285,12286,12287,4986,12288,12289,12290,12291,12292,12293, #12240
12294,12295,12296,2473,12297,12298,12299,12300,12301,12302,12303,12304,12305,12306,12307,12308, #12256
12309,12310,12311,12312,12313,12314,12315,12316,12317,12318,12319,3963,12320,12321,12322,12323, #12272
12324,12325,12326,12327,12328,12329,12330,12331,12332,4987,12333,12334,12335,12336,12337,12338, #12288
12339,12340,12341,12342,12343,12344,12345,12346,12347,12348,12349,12350,12351,12352,12353,12354, #12304
12355,12356,12357,12358,12359,3964,12360,12361,12362,12363,12364,12365,12366,12367,12368,12369, #12320
12370,3965,12371,12372,12373,12374,12375,12376,12377,12378,12379,12380,12381,12382,12383,12384, #12336
12385,12386,12387,12388,12389,12390,12391,12392,12393,12394,12395,12396,12397,12398,12399,12400, #12352
12401,12402,12403,12404,12405,12406,12407,12408,4988,12409,12410,12411,12412,12413,12414,12415, #12368
12416,12417,12418,12419,12420,12421,12422,12423,12424,12425,12426,12427,12428,12429,12430,12431, #12384
12432,12433,12434,12435,12436,12437,12438,3554,12439,12440,12441,12442,12443,12444,12445,12446, #12400
12447,12448,12449,12450,12451,12452,12453,12454,12455,12456,12457,12458,12459,12460,12461,12462, #12416
12463,12464,4989,12465,12466,12467,12468,12469,12470,12471,12472,12473,12474,12475,12476,12477, #12432
12478,12479,12480,4990,12481,12482,12483,12484,12485,12486,12487,12488,12489,4498,12490,12491, #12448
12492,12493,12494,12495,12496,12497,12498,12499,12500,12501,12502,12503,12504,12505,12506,12507, #12464
12508,12509,12510,12511,12512,12513,12514,12515,12516,12517,12518,12519,12520,12521,12522,12523, #12480
12524,12525,12526,12527,12528,12529,12530,12531,12532,12533,12534,12535,12536,12537,12538,12539, #12496
12540,12541,12542,12543,12544,12545,12546,12547,12548,12549,12550,12551,4991,12552,12553,12554, #12512
12555,12556,12557,12558,12559,12560,12561,12562,12563,12564,12565,12566,12567,12568,12569,12570, #12528
12571,12572,12573,12574,12575,12576,12577,12578,3036,12579,12580,12581,12582,12583,3966,12584, #12544
12585,12586,12587,12588,12589,12590,12591,12592,12593,12594,12595,12596,12597,12598,12599,12600, #12560
12601,12602,12603,12604,12605,12606,12607,12608,12609,12610,12611,12612,12613,12614,12615,12616, #12576
12617,12618,12619,12620,12621,12622,12623,12624,12625,12626,12627,12628,12629,12630,12631,12632, #12592
12633,12634,12635,12636,12637,12638,12639,12640,12641,12642,12643,12644,12645,12646,4499,12647, #12608
12648,12649,12650,12651,12652,12653,12654,12655,12656,12657,12658,12659,12660,12661,12662,12663, #12624
12664,12665,12666,12667,12668,12669,12670,12671,12672,12673,12674,12675,12676,12677,12678,12679, #12640
12680,12681,12682,12683,12684,12685,12686,12687,12688,12689,12690,12691,12692,12693,12694,12695, #12656
12696,12697,12698,4992,12699,12700,12701,12702,12703,12704,12705,12706,12707,12708,12709,12710, #12672
12711,12712,12713,12714,12715,12716,12717,12718,12719,12720,12721,12722,12723,12724,12725,12726, #12688
12727,12728,12729,12730,12731,12732,12733,12734,12735,12736,12737,12738,12739,12740,12741,12742, #12704
12743,12744,12745,12746,12747,12748,12749,12750,12751,12752,12753,12754,12755,12756,12757,12758, #12720
12759,12760,12761,12762,12763,12764,12765,12766,12767,12768,12769,12770,12771,12772,12773,12774, #12736
12775,12776,12777,12778,4993,2175,12779,12780,12781,12782,12783,12784,12785,12786,4500,12787, #12752
12788,12789,12790,12791,12792,12793,12794,12795,12796,12797,12798,12799,12800,12801,12802,12803, #12768
12804,12805,12806,12807,12808,12809,12810,12811,12812,12813,12814,12815,12816,12817,12818,12819, #12784
12820,12821,12822,12823,12824,12825,12826,4198,3967,12827,12828,12829,12830,12831,12832,12833, #12800
12834,12835,12836,12837,12838,12839,12840,12841,12842,12843,12844,12845,12846,12847,12848,12849, #12816
12850,12851,12852,12853,12854,12855,12856,12857,12858,12859,12860,12861,4199,12862,12863,12864, #12832
12865,12866,12867,12868,12869,12870,12871,12872,12873,12874,12875,12876,12877,12878,12879,12880, #12848
12881,12882,12883,12884,12885,12886,12887,4501,12888,12889,12890,12891,12892,12893,12894,12895, #12864
12896,12897,12898,12899,12900,12901,12902,12903,12904,12905,12906,12907,12908,12909,12910,12911, #12880
12912,4994,12913,12914,12915,12916,12917,12918,12919,12920,12921,12922,12923,12924,12925,12926, #12896
12927,12928,12929,12930,12931,12932,12933,12934,12935,12936,12937,12938,12939,12940,12941,12942, #12912
12943,12944,12945,12946,12947,12948,12949,12950,12951,12952,12953,12954,12955,12956,1772,12957, #12928
12958,12959,12960,12961,12962,12963,12964,12965,12966,12967,12968,12969,12970,12971,12972,12973, #12944
12974,12975,12976,12977,12978,12979,12980,12981,12982,12983,12984,12985,12986,12987,12988,12989, #12960
12990,12991,12992,12993,12994,12995,12996,12997,4502,12998,4503,12999,13000,13001,13002,13003, #12976
4504,13004,13005,13006,13007,13008,13009,13010,13011,13012,13013,13014,13015,13016,13017,13018, #12992
13019,13020,13021,13022,13023,13024,13025,13026,13027,13028,13029,3449,13030,13031,13032,13033, #13008
13034,13035,13036,13037,13038,13039,13040,13041,13042,13043,13044,13045,13046,13047,13048,13049, #13024
13050,13051,13052,13053,13054,13055,13056,13057,13058,13059,13060,13061,13062,13063,13064,13065, #13040
13066,13067,13068,13069,13070,13071,13072,13073,13074,13075,13076,13077,13078,13079,13080,13081, #13056
13082,13083,13084,13085,13086,13087,13088,13089,13090,13091,13092,13093,13094,13095,13096,13097, #13072
13098,13099,13100,13101,13102,13103,13104,13105,13106,13107,13108,13109,13110,13111,13112,13113, #13088
13114,13115,13116,13117,13118,3968,13119,4995,13120,13121,13122,13123,13124,13125,13126,13127, #13104
4505,13128,13129,13130,13131,13132,13133,13134,4996,4506,13135,13136,13137,13138,13139,4997, #13120
13140,13141,13142,13143,13144,13145,13146,13147,13148,13149,13150,13151,13152,13153,13154,13155, #13136
13156,13157,13158,13159,4998,13160,13161,13162,13163,13164,13165,13166,13167,13168,13169,13170, #13152
13171,13172,13173,13174,13175,13176,4999,13177,13178,13179,13180,13181,13182,13183,13184,13185, #13168
13186,13187,13188,13189,13190,13191,13192,13193,13194,13195,13196,13197,13198,13199,13200,13201, #13184
13202,13203,13204,13205,13206,5000,13207,13208,13209,13210,13211,13212,13213,13214,13215,13216, #13200
13217,13218,13219,13220,13221,13222,13223,13224,13225,13226,13227,4200,5001,13228,13229,13230, #13216
13231,13232,13233,13234,13235,13236,13237,13238,13239,13240,3969,13241,13242,13243,13244,3970, #13232
13245,13246,13247,13248,13249,13250,13251,13252,13253,13254,13255,13256,13257,13258,13259,13260, #13248
13261,13262,13263,13264,13265,13266,13267,13268,3450,13269,13270,13271,13272,13273,13274,13275, #13264
13276,5002,13277,13278,13279,13280,13281,13282,13283,13284,13285,13286,13287,13288,13289,13290, #13280
13291,13292,13293,13294,13295,13296,13297,13298,13299,13300,13301,13302,3813,13303,13304,13305, #13296
13306,13307,13308,13309,13310,13311,13312,13313,13314,13315,13316,13317,13318,13319,13320,13321, #13312
13322,13323,13324,13325,13326,13327,13328,4507,13329,13330,13331,13332,13333,13334,13335,13336, #13328
13337,13338,13339,13340,13341,5003,13342,13343,13344,13345,13346,13347,13348,13349,13350,13351, #13344
13352,13353,13354,13355,13356,13357,13358,13359,13360,13361,13362,13363,13364,13365,13366,13367, #13360
5004,13368,13369,13370,13371,13372,13373,13374,13375,13376,13377,13378,13379,13380,13381,13382, #13376
13383,13384,13385,13386,13387,13388,13389,13390,13391,13392,13393,13394,13395,13396,13397,13398, #13392
13399,13400,13401,13402,13403,13404,13405,13406,13407,13408,13409,13410,13411,13412,13413,13414, #13408
13415,13416,13417,13418,13419,13420,13421,13422,13423,13424,13425,13426,13427,13428,13429,13430, #13424
13431,13432,4508,13433,13434,13435,4201,13436,13437,13438,13439,13440,13441,13442,13443,13444, #13440
13445,13446,13447,13448,13449,13450,13451,13452,13453,13454,13455,13456,13457,5005,13458,13459, #13456
13460,13461,13462,13463,13464,13465,13466,13467,13468,13469,13470,4509,13471,13472,13473,13474, #13472
13475,13476,13477,13478,13479,13480,13481,13482,13483,13484,13485,13486,13487,13488,13489,13490, #13488
13491,13492,13493,13494,13495,13496,13497,13498,13499,13500,13501,13502,13503,13504,13505,13506, #13504
13507,13508,13509,13510,13511,13512,13513,13514,13515,13516,13517,13518,13519,13520,13521,13522, #13520
13523,13524,13525,13526,13527,13528,13529,13530,13531,13532,13533,13534,13535,13536,13537,13538, #13536
13539,13540,13541,13542,13543,13544,13545,13546,13547,13548,13549,13550,13551,13552,13553,13554, #13552
13555,13556,13557,13558,13559,13560,13561,13562,13563,13564,13565,13566,13567,13568,13569,13570, #13568
13571,13572,13573,13574,13575,13576,13577,13578,13579,13580,13581,13582,13583,13584,13585,13586, #13584
13587,13588,13589,13590,13591,13592,13593,13594,13595,13596,13597,13598,13599,13600,13601,13602, #13600
13603,13604,13605,13606,13607,13608,13609,13610,13611,13612,13613,13614,13615,13616,13617,13618, #13616
13619,13620,13621,13622,13623,13624,13625,13626,13627,13628,13629,13630,13631,13632,13633,13634, #13632
13635,13636,13637,13638,13639,13640,13641,13642,5006,13643,13644,13645,13646,13647,13648,13649, #13648
13650,13651,5007,13652,13653,13654,13655,13656,13657,13658,13659,13660,13661,13662,13663,13664, #13664
13665,13666,13667,13668,13669,13670,13671,13672,13673,13674,13675,13676,13677,13678,13679,13680, #13680
13681,13682,13683,13684,13685,13686,13687,13688,13689,13690,13691,13692,13693,13694,13695,13696, #13696
13697,13698,13699,13700,13701,13702,13703,13704,13705,13706,13707,13708,13709,13710,13711,13712, #13712
13713,13714,13715,13716,13717,13718,13719,13720,13721,13722,13723,13724,13725,13726,13727,13728, #13728
13729,13730,13731,13732,13733,13734,13735,13736,13737,13738,13739,13740,13741,13742,13743,13744, #13744
13745,13746,13747,13748,13749,13750,13751,13752,13753,13754,13755,13756,13757,13758,13759,13760, #13760
13761,13762,13763,13764,13765,13766,13767,13768,13769,13770,13771,13772,13773,13774,3273,13775, #13776
13776,13777,13778,13779,13780,13781,13782,13783,13784,13785,13786,13787,13788,13789,13790,13791, #13792
13792,13793,13794,13795,13796,13797,13798,13799,13800,13801,13802,13803,13804,13805,13806,13807, #13808
13808,13809,13810,13811,13812,13813,13814,13815,13816,13817,13818,13819,13820,13821,13822,13823, #13824
13824,13825,13826,13827,13828,13829,13830,13831,13832,13833,13834,13835,13836,13837,13838,13839, #13840
13840,13841,13842,13843,13844,13845,13846,13847,13848,13849,13850,13851,13852,13853,13854,13855, #13856
13856,13857,13858,13859,13860,13861,13862,13863,13864,13865,13866,13867,13868,13869,13870,13871, #13872
13872,13873,13874,13875,13876,13877,13878,13879,13880,13881,13882,13883,13884,13885,13886,13887, #13888
13888,13889,13890,13891,13892,13893,13894,13895,13896,13897,13898,13899,13900,13901,13902,13903, #13904
13904,13905,13906,13907,13908,13909,13910,13911,13912,13913,13914,13915,13916,13917,13918,13919, #13920
13920,13921,13922,13923,13924,13925,13926,13927,13928,13929,13930,13931,13932,13933,13934,13935, #13936
13936,13937,13938,13939,13940,13941,13942,13943,13944,13945,13946,13947,13948,13949,13950,13951, #13952
13952,13953,13954,13955,13956,13957,13958,13959,13960,13961,13962,13963,13964,13965,13966,13967, #13968
13968,13969,13970,13971,13972) #13973
# flake8: noqa
 | 
	bsd-3-clause | 
| 
	clagiordano/projectDeploy | 
	modules/utils.py | 
	1 | 
	1458 | 
	#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import subprocess
import shlex
import socket
import modules.outputUtils as out
def getSessionInfo():
    info = {}
    output = subprocess.Popen(["who", "am", "i"], stdout=subprocess.PIPE).communicate()
    output = output[0].strip().split(' ')
    info['username'] = os.getlogin()
    info['ipaddress'] = output[-1][1:-1]
    info['hostname'] = socket.gethostname()
    if info['ipaddress'] != ":0":
        try:
            info['hostname'] = socket.gethostbyaddr(info['ipaddress'])
        except:
            try:
                info['hostname'] = getNetbiosHostname(info['ipaddress'])
            except:
                info['hostname'] = info['ipaddress']
    return info
def getNetbiosHostname(ipaddress):
    output = runShellCommand("nmblookup -A " + ipaddress, False)
    hostname = output[0].split('\n')[1].split(' ')[0].strip()
    if hostname == 'No':
        hostname = output[0]
    return hostname
def runShellCommand(command, shell=True):
    try:
        p = subprocess.Popen( \
            shlex.split(command), \
            shell=shell, \
            stdin=subprocess.PIPE, \
            stdout=subprocess.PIPE, \
            stderr=subprocess.PIPE)
        command_output, command_error = p.communicate()
        exit_status = p.returncode
    except:
        out.fatalError("Failed to execute command " + command)
    return command_output, exit_status, command_error
 | 
	lgpl-3.0 | 
| 
	mkuron/espresso | 
	testsuite/python/dpd.py | 
	1 | 
	15785 | 
	#
# Copyright (C) 2013-2018 The ESPResSo project
#
# This file is part of ESPResSo.
#
# ESPResSo is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program.  If not, see <http://www.gnu.org/licenses/>.
#
import numpy as np
import unittest as ut
import unittest_decorators as utx
from itertools import product
import espressomd
from espressomd.observables import DPDStress
from tests_common import single_component_maxwell
@utx.skipIfMissingFeatures("DPD")
class DPDThermostat(ut.TestCase):
    """Tests the velocity distribution created by the dpd thermostat against
       the single component Maxwell distribution."""
    s = espressomd.System(box_l=3*[10.0])
    s.time_step = 0.01
    s.cell_system.skin = 0.4
    def setUp(self):
        self.s.seed = range(self.s.cell_system.get_state()["n_nodes"])
        np.random.seed(16)
    def tearDown(self):
        s = self.s
        s.part.clear()
    def check_velocity_distribution(self, vel, minmax, n_bins, error_tol, kT):
        """check the recorded particle distributions in velocity against a
           histogram with n_bins bins. Drop velocities outside minmax. Check
           individual histogram bins up to an accuracy of error_tol against
           the analytical result for kT."""
        for i in range(3):
            hist = np.histogram(vel[:, i], range=(-minmax, minmax), bins=n_bins, density=False)
            data = hist[0]/float(vel.shape[0])
            bins = hist[1]
            for j in range(n_bins):
                found = data[j]
                expected = single_component_maxwell(bins[j], bins[j+1], kT)
                self.assertLessEqual(abs(found - expected), error_tol)
    def test_aa_verify_single_component_maxwell(self):
        """Verifies the normalization of the analytical expression."""
        self.assertLessEqual(
            abs(single_component_maxwell(-10, 10, 4.)-1.), 1E-4)
    def check_total_zero(self):
        v_total = np.sum(self.s.part[:].v, axis=0)
        self.assertTrue(v_total[0] < 1e-11)
        self.assertTrue(v_total[1] < 1e-11)
        self.assertTrue(v_total[2] < 1e-11)
    def test_single(self):
        """Test velocity distribution of a dpd fluid with a single type."""
        N = 200
        s = self.s
        s.part.add(pos=s.box_l * np.random.random((N, 3)))
        kT = 2.3
        gamma = 1.5
        s.thermostat.set_dpd(kT=kT, seed=42)
        s.non_bonded_inter[0, 0].dpd.set_params(
            weight_function=0, gamma=gamma, r_cut=1.5,
            trans_weight_function=0, trans_gamma=gamma, trans_r_cut=1.5)
        s.integrator.run(100)
        loops = 250
        v_stored = np.zeros((N*loops, 3))
        for i in range(loops):
            s.integrator.run(10)
            v_stored[i*N:(i+1)*N,:] = s.part[:].v
        v_minmax = 5
        bins = 5
        error_tol = 0.01
        self.check_velocity_distribution(
            v_stored, v_minmax, bins, error_tol, kT)
        self.check_total_zero()
    def test_binary(self):
        """Test velocity distribution of binary dpd fluid"""
        N = 200
        s = self.s
        s.part.add(pos=s.box_l * np.random.random((N // 2, 3)), type=N//2*[0])
        s.part.add(pos=s.box_l * np.random.random((N // 2, 3)), type=N//2*[1])
        kT = 2.3
        gamma = 1.5
        s.thermostat.set_dpd(kT=kT, seed=42)
        s.non_bonded_inter[0, 0].dpd.set_params(
            weight_function=0, gamma=gamma, r_cut=1.0,
            trans_weight_function=0, trans_gamma=gamma, trans_r_cut=1.0)
        s.non_bonded_inter[1, 1].dpd.set_params(
            weight_function=0, gamma=gamma, r_cut=1.0,
            trans_weight_function=0, trans_gamma=gamma, trans_r_cut=1.0)
        s.non_bonded_inter[0, 1].dpd.set_params(
            weight_function=0, gamma=gamma, r_cut=1.5,
            trans_weight_function=0, trans_gamma=gamma, trans_r_cut=1.5)
        s.integrator.run(100)
        loops = 400
        v_stored = np.zeros((N*loops, 3))
        for i in range(loops):
            s.integrator.run(10)
            v_stored[i*N:(i+1)*N,:] = s.part[:].v
        v_minmax = 5
        bins = 5
        error_tol = 0.01
        self.check_velocity_distribution(
            v_stored, v_minmax, bins, error_tol, kT)
        self.check_total_zero()
    def test_disable(self):
        N = 200
        s = self.s
        s.time_step = 0.01
        s.part.add(pos=s.box_l * np.random.random((N, 3)))
        kT = 2.3
        gamma = 1.5
        s.thermostat.set_dpd(kT=kT, seed=42)
        s.non_bonded_inter[0, 0].dpd.set_params(
            weight_function=0, gamma=gamma, r_cut=1.5,
            trans_weight_function=0, trans_gamma=gamma, trans_r_cut=1.5)
        s.integrator.run(10)
        s.thermostat.turn_off()
        # Reset velocities
        s.part[:].v = [1., 2., 3.]
        s.integrator.run(10)
        # Check that there was neither noise nor friction
        for v in s.part[:].v:
            for i in range(3):
                self.assertTrue(v[i] == float(i + 1))
        # Turn back on
        s.thermostat.set_dpd(kT=kT, seed=42)
        # Reset velocities for faster convergence
        s.part[:].v = [0., 0., 0.]
        # Equilibrate
        s.integrator.run(250)
        loops = 250
        v_stored = np.zeros((N*loops, 3))
        for i in range(loops):
            s.integrator.run(10)
            v_stored[i*N:(i+1)*N,:] = s.part[:].v
        v_minmax = 5
        bins = 5
        error_tol = 0.012
        self.check_velocity_distribution(
            v_stored, v_minmax, bins, error_tol, kT)
    def test_const_weight_function(self):
        s = self.s
        kT = 0.
        gamma = 1.42
        s.thermostat.set_dpd(kT=kT, seed=42)
        s.non_bonded_inter[0, 0].dpd.set_params(
            weight_function=0, gamma=gamma, r_cut=1.2,
            trans_weight_function=0, trans_gamma=gamma, trans_r_cut=1.4)
        s.part.add(id=0, pos=[5, 5, 5], type= 0, v=[0, 0, 0])
        v = [.5, .8, .3]
        s.part.add(id=1, pos=[3, 5, 5], type= 0, v = v)
        s.integrator.run(0)
        # Outside of both cutoffs, forces should be 0
        for f in s.part[:].f:
            self.assertTrue(f[0] == 0.)
            self.assertTrue(f[1] == 0.)
            self.assertTrue(f[2] == 0.)
        # Only trans
        s.part[1].pos = [5. - 1.3, 5, 5]
        s.integrator.run(0)
        # Only trans, so x component should be zero
        self.assertLess(abs(s.part[0].f[0]), 1e-16)
        # f = gamma * v_ij
        self.assertTrue(abs(s.part[0].f[1] - gamma * v[1]) < 1e-11)
        self.assertTrue(abs(s.part[0].f[2] - gamma * v[2]) < 1e-11)
        # Momentum conservation
        self.assertLess(abs(s.part[1].f[0]), 1e-16)
        self.assertTrue(abs(s.part[1].f[1] + gamma * v[1]) < 1e-11)
        self.assertTrue(abs(s.part[1].f[2] + gamma * v[2]) < 1e-11)
        # Trans and parallel
        s.part[1].pos = [5. - 1.1, 5, 5]
        s.integrator.run(0)
        self.assertTrue(abs(s.part[0].f[0] - gamma * v[0]) < 1e-11)
        self.assertTrue(abs(s.part[0].f[1] - gamma * v[1]) < 1e-11)
        self.assertTrue(abs(s.part[0].f[2] - gamma * v[2]) < 1e-11)
        self.assertTrue(abs(s.part[1].f[0] + gamma * v[0]) < 1e-11)
        self.assertTrue(abs(s.part[1].f[1] + gamma * v[1]) < 1e-11)
        self.assertTrue(abs(s.part[1].f[2] + gamma * v[2]) < 1e-11)
    def test_linear_weight_function(self):
        s = self.s
        kT = 0.
        gamma = 1.42
        s.thermostat.set_dpd(kT=kT, seed=42)
        s.non_bonded_inter[0, 0].dpd.set_params(
            weight_function=1, gamma=gamma, r_cut=1.2,
            trans_weight_function=1, trans_gamma=gamma, trans_r_cut=1.4)
        def omega(dist, r_cut):
            return (1. - dist / r_cut)
        s.part.add(id=0, pos=[5, 5, 5], type= 0, v=[0, 0, 0])
        v = [.5, .8, .3]
        s.part.add(id=1, pos=[3, 5, 5], type= 0, v = v)
        s.integrator.run(0)
        # Outside of both cutoffs, forces should be 0
        for f in s.part[:].f:
            self.assertTrue(f[0] == 0.)
            self.assertTrue(f[1] == 0.)
            self.assertTrue(f[2] == 0.)
        # Only trans
        s.part[1].pos = [5. - 1.3, 5, 5]
        s.integrator.run(0)
        # Only trans, so x component should be zero
        self.assertLess(abs(s.part[0].f[0]), 1e-16)
        # f = gamma * v_ij
        self.assertTrue(
            abs(s.part[0].f[1] - omega(1.3, 1.4)**2*gamma*v[1]) < 1e-11)
        self.assertTrue(
            abs(s.part[0].f[2] - omega(1.3, 1.4)**2*gamma*v[2]) < 1e-11)
        # Momentum conservation
        self.assertLess(abs(s.part[1].f[0]), 1e-16)
        self.assertTrue(
            abs(s.part[1].f[1] + omega(1.3, 1.4)**2*gamma*v[1]) < 1e-11)
        self.assertTrue(
            abs(s.part[1].f[2] + omega(1.3, 1.4)**2*gamma*v[2]) < 1e-11)
        # Trans and parallel
        s.part[1].pos = [5. - 1.1, 5, 5]
        s.integrator.run(0)
        self.assertTrue(
            abs(s.part[0].f[0] - omega(1.1, 1.2)**2*gamma*v[0]) < 1e-11)
        self.assertTrue(
            abs(s.part[0].f[1] - omega(1.1, 1.4)**2*gamma*v[1]) < 1e-11)
        self.assertTrue(
            abs(s.part[0].f[2] - omega(1.1, 1.4)**2*gamma*v[2]) < 1e-11)
        self.assertTrue(
            abs(s.part[1].f[0] + omega(1.1, 1.2)**2*gamma*v[0]) < 1e-11)
        self.assertTrue(
            abs(s.part[1].f[1] + omega(1.1, 1.4)**2*gamma*v[1]) < 1e-11)
        self.assertTrue(
            abs(s.part[1].f[2] + omega(1.1, 1.4)**2*gamma*v[2]) < 1e-11)
        # Trans and parallel 2nd point
        s.part[1].pos = [5. - 0.5, 5, 5]
        s.integrator.run(0)
        self.assertTrue(
            abs(s.part[0].f[0] - omega(0.5, 1.2)**2*gamma*v[0]) < 1e-11)
        self.assertTrue(
            abs(s.part[0].f[1] - omega(0.5, 1.4)**2*gamma*v[1]) < 1e-11)
        self.assertTrue(
            abs(s.part[0].f[2] - omega(0.5, 1.4)**2*gamma*v[2]) < 1e-11)
        self.assertTrue(
            abs(s.part[1].f[0] + omega(0.5, 1.2)**2*gamma*v[0]) < 1e-11)
        self.assertTrue(
            abs(s.part[1].f[1] + omega(0.5, 1.4)**2*gamma*v[1]) < 1e-11)
        self.assertTrue(
            abs(s.part[1].f[2] + omega(0.5, 1.4)**2*gamma*v[2]) < 1e-11)
    def test_ghosts_have_v(self):
        s = self.s
        r_cut = 1.5
        dx = 0.25 * r_cut
        def f(i):
            if i == 0:
                return dx
            return 10. - dx
        # Put a particle in every corner
        for ind in product([0, 1], [0, 1], [0, 1]):
            pos = [f(x) for x in ind]
            v = ind
            s.part.add(pos=pos, v=v)
        gamma = 1.0
        s.thermostat.set_dpd(kT=0.0, seed=42)
        s.non_bonded_inter[0, 0].dpd.set_params(
            weight_function=0, gamma=gamma, r_cut=r_cut,
            trans_weight_function=0, trans_gamma=gamma, trans_r_cut=r_cut)
        s.integrator.run(0)
        id = 0
        for ind in product([0, 1], [0, 1], [0, 1]):
            for i in ind:
                if ind[i] == 0:
                    sgn = 1
                else:
                    sgn = -1
                self.assertAlmostEqual(sgn * 4.0, s.part[id].f[i])
            id += 1
    def test_constraint(self):
        import espressomd.shapes
        s = self.s
        s.constraints.add(shape=espressomd.shapes.Wall(
            dist=0, normal=[1, 0, 0]), particle_type=0, particle_velocity=[1, 2, 3])
        s.thermostat.set_dpd(kT=0.0, seed=42)
        s.non_bonded_inter[0, 0].dpd.set_params(
            weight_function=0, gamma=1., r_cut=1.0,
            trans_weight_function=0, trans_gamma=1., trans_r_cut=1.0)
        p = s.part.add(pos=[0.5, 0, 0], type=0, v=[0, 0, 0])
        s.integrator.run(0)
        self.assertAlmostEqual(p.f[0], 1.)
        self.assertAlmostEqual(p.f[1], 2.)
        self.assertAlmostEqual(p.f[2], 3.)
        for c in s.constraints:
            s.constraints.remove(c)
    def test_dpd_stress(self):
        def calc_omega(dist):
            return (1./dist - 1./r_cut) ** 2.0
        def diss_force_1(dist, vel_diff):
            f = np.zeros(3)
            vel12dotd12 = 0.
            dist_norm = np.linalg.norm(dist)
            for d in range(3):
                vel12dotd12 += vel_diff[d] * dist[d]
            friction = gamma * calc_omega(dist_norm) * vel12dotd12
            for d in range(3):
                f[d] -= (dist[d] * friction)
            return f
        def diss_force_2(dist, vel_diff):
            dist_norm = np.linalg.norm(dist)
            mat = np.identity(3) * (dist_norm**2.0)
            f = np.zeros(3)
            for d1 in range(3):
                for d2 in range(3):
                    mat[d1, d2] -= dist[d1] * dist[d2]
            for d1 in range(3):
                for d2 in range(3):
                    f[d1] += mat[d1, d2] * vel_diff[d2]
                f[d1] *= - 1.0 * gamma/2.0 * calc_omega(dist_norm)
            return f
        def calc_stress(dist, vel_diff):
            force_pair = diss_force_1(dist, vel_diff) +\
                         diss_force_2(dist, vel_diff)
            stress_pair = np.outer(dist, force_pair)
            return stress_pair
        n_part = 1000
        r_cut = 1.0
        gamma = 5.
        r_cut = 1.0
        s = self.s
        s.part.clear()
        s.non_bonded_inter[0, 0].dpd.set_params(
          weight_function=1, gamma=gamma, r_cut=r_cut,
          trans_weight_function=1, trans_gamma=gamma/2.0, trans_r_cut=r_cut)
        pos = s.box_l * np.random.random((n_part, 3))
        s.part.add(pos=pos)
        s.integrator.run(10)
        s.thermostat.set_dpd(kT=0.0)
        s.integrator.run(steps=0, recalc_forces=True)
        pairs = s.part.pairs()
        stress = np.zeros([3, 3])
        for pair in pairs:
            dist = s.distance_vec(pair[0], pair[1])
            if np.linalg.norm(dist) < r_cut:
                vel_diff = pair[1].v - pair[0].v
                stress += calc_stress(dist, vel_diff)
        stress /= s.box_l[0] ** 3.0
        dpd_stress = s.analysis.dpd_stress()
        dpd_obs = DPDStress()
        obs_stress = dpd_obs.calculate()
        obs_stress = np.array([[obs_stress[0], obs_stress[1], obs_stress[2]],
                               [obs_stress[3], obs_stress[4], obs_stress[5]],
                               [obs_stress[6], obs_stress[7], obs_stress[8]]])
        np.testing.assert_array_almost_equal(np.copy(dpd_stress), stress)
        np.testing.assert_array_almost_equal(np.copy(obs_stress), stress)
    
    def test_momentum_conservation(self):
        r_cut = 1.0
        gamma = 5.
        r_cut = 2.9 
        s = self.s
        s.thermostat.set_dpd(kT=1.3, seed=42)
        s.part.clear()
        s.part.add(pos=((0, 0, 0), (0.1, 0.1, 0.1), (0.1, 0, 0)), mass=(1, 2, 3))
        s.non_bonded_inter[0, 0].dpd.set_params(
          weight_function=1, gamma=gamma, r_cut=r_cut,
          trans_weight_function=1, trans_gamma=gamma/2.0, trans_r_cut=r_cut)
        momentum = np.matmul(s.part[:].v.T, s.part[:].mass)
        for i in range(10):
            s.integrator.run(25)
            np.testing.assert_array_less(np.zeros((3, 3)), np.abs(s.part[:].f))
            np.testing.assert_allclose(np.matmul(s.part[:].v.T, s.part[:].mass), momentum, atol=1E-12)
if __name__ == "__main__":
    ut.main()
 | 
	gpl-3.0 | 
| 
	m4dcoder/cortex | 
	setup.py | 
	1 | 
	1799 | 
	#!/usr/bin/env python2.7
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
from setuptools import setup, find_packages
PKG_ROOT_DIR = os.path.dirname(os.path.realpath(__file__))
PKG_REQ_FILE = '%s/requirements.txt' % PKG_ROOT_DIR
os.chdir(PKG_ROOT_DIR)
def get_version_string():
    version = None
    sys.path.insert(0, PKG_ROOT_DIR)
    from cortex import __version__
    version = __version__
    sys.path.pop(0)
    return version
def get_requirements():
    with open(PKG_REQ_FILE) as f:
        required = f.read().splitlines()
    # Ignore comments in the requirements file
    required = [line for line in required if not line.startswith('#')]
    return required
setup(
    name='cortex',
    version=get_version_string(),
    packages=find_packages(exclude=[]),
    install_requires=get_requirements(),
    license='Apache License (2.0)',
    classifiers=[
        'Development Status :: 3 - Alpha',
        'Intended Audience :: Information Technology',
        'Intended Audience :: System Administrators',
        'License :: OSI Approved :: Apache Software License',
        'Operating System :: POSIX :: Linux',
        'Programming Language :: Python',
        'Programming Language :: Python :: 2',
        'Programming Language :: Python :: 2.7'
    ]
)
 | 
	apache-2.0 | 
| 
	theguardian/CherryStrap | 
	lib/pkg_resources/__init__.py | 
	2 | 
	107034 | 
	"""
Package resource API
--------------------
A resource is a logical file contained within a package, or a logical
subdirectory thereof.  The package resource API expects resource names
to have their path parts separated with ``/``, *not* whatever the local
path separator is.  Do not use os.path operations to manipulate resource
names being passed into the API.
The package resource API is designed to work with normal filesystem packages,
.egg files, and unpacked .egg files.  It can also work in a limited way with
.zip files and with custom PEP 302 loaders that support the ``get_data()``
method.
"""
from __future__ import absolute_import
import sys
import os
import io
import time
import re
import types
import zipfile
import zipimport
import warnings
import stat
import functools
import pkgutil
import token
import symbol
import operator
import platform
import collections
import plistlib
import email.parser
import tempfile
import textwrap
from pkgutil import get_importer
try:
    import _imp
except ImportError:
    # Python 3.2 compatibility
    import imp as _imp
PY3 = sys.version_info > (3,)
PY2 = not PY3
if PY3:
    from urllib.parse import urlparse, urlunparse
if PY2:
    from urlparse import urlparse, urlunparse
if PY3:
    string_types = str,
else:
    string_types = str, eval('unicode')
iteritems = (lambda i: i.items()) if PY3 else lambda i: i.iteritems()
# capture these to bypass sandboxing
from os import utime
try:
    from os import mkdir, rename, unlink
    WRITE_SUPPORT = True
except ImportError:
    # no write support, probably under GAE
    WRITE_SUPPORT = False
from os import open as os_open
from os.path import isdir, split
# Avoid try/except due to potential problems with delayed import mechanisms.
if sys.version_info >= (3, 3) and sys.implementation.name == "cpython":
    import importlib.machinery as importlib_machinery
else:
    importlib_machinery = None
try:
    import parser
except ImportError:
    pass
try:
    import lib.pkg_resources._vendor.packaging.version
    import lib.pkg_resources._vendor.packaging.specifiers
    from lib.pkg_resources._vendor import packaging
    #packaging = lib.pkg_resources._vendor.packaging
except ImportError:
    # fallback to naturally-installed version; allows system packagers to
    #  omit vendored packages.
    import pkg_resources.packaging.version
    import packaging.specifiers
if (3, 0) < sys.version_info < (3, 3):
    msg = (
        "Support for Python 3.0-3.2 has been dropped. Future versions "
        "will fail here."
    )
    warnings.warn(msg)
# declare some globals that will be defined later to
# satisfy the linters.
require = None
working_set = None
class PEP440Warning(RuntimeWarning):
    """
    Used when there is an issue with a version or specifier not complying with
    PEP 440.
    """
class _SetuptoolsVersionMixin(object):
    def __hash__(self):
        return super(_SetuptoolsVersionMixin, self).__hash__()
    def __lt__(self, other):
        if isinstance(other, tuple):
            return tuple(self) < other
        else:
            return super(_SetuptoolsVersionMixin, self).__lt__(other)
    def __le__(self, other):
        if isinstance(other, tuple):
            return tuple(self) <= other
        else:
            return super(_SetuptoolsVersionMixin, self).__le__(other)
    def __eq__(self, other):
        if isinstance(other, tuple):
            return tuple(self) == other
        else:
            return super(_SetuptoolsVersionMixin, self).__eq__(other)
    def __ge__(self, other):
        if isinstance(other, tuple):
            return tuple(self) >= other
        else:
            return super(_SetuptoolsVersionMixin, self).__ge__(other)
    def __gt__(self, other):
        if isinstance(other, tuple):
            return tuple(self) > other
        else:
            return super(_SetuptoolsVersionMixin, self).__gt__(other)
    def __ne__(self, other):
        if isinstance(other, tuple):
            return tuple(self) != other
        else:
            return super(_SetuptoolsVersionMixin, self).__ne__(other)
    def __getitem__(self, key):
        return tuple(self)[key]
    def __iter__(self):
        component_re = re.compile(r'(\d+ | [a-z]+ | \.| -)', re.VERBOSE)
        replace = {
            'pre': 'c',
            'preview': 'c',
            '-': 'final-',
            'rc': 'c',
            'dev': '@',
        }.get
        def _parse_version_parts(s):
            for part in component_re.split(s):
                part = replace(part, part)
                if not part or part == '.':
                    continue
                if part[:1] in '0123456789':
                    # pad for numeric comparison
                    yield part.zfill(8)
                else:
                    yield '*'+part
            # ensure that alpha/beta/candidate are before final
            yield '*final'
        def old_parse_version(s):
            parts = []
            for part in _parse_version_parts(s.lower()):
                if part.startswith('*'):
                    # remove '-' before a prerelease tag
                    if part < '*final':
                        while parts and parts[-1] == '*final-':
                            parts.pop()
                    # remove trailing zeros from each series of numeric parts
                    while parts and parts[-1] == '00000000':
                        parts.pop()
                parts.append(part)
            return tuple(parts)
        # Warn for use of this function
        warnings.warn(
            "You have iterated over the result of "
            "pkg_resources.parse_version. This is a legacy behavior which is "
            "inconsistent with the new version class introduced in setuptools "
            "8.0. In most cases, conversion to a tuple is unnecessary. For "
            "comparison of versions, sort the Version instances directly. If "
            "you have another use case requiring the tuple, please file a "
            "bug with the setuptools project describing that need.",
            RuntimeWarning,
            stacklevel=1,
        )
        for part in old_parse_version(str(self)):
            yield part
class SetuptoolsVersion(_SetuptoolsVersionMixin, packaging.version.Version):
    pass
class SetuptoolsLegacyVersion(_SetuptoolsVersionMixin,
                              packaging.version.LegacyVersion):
    pass
def parse_version(v):
    try:
        return SetuptoolsVersion(v)
    except packaging.version.InvalidVersion:
        return SetuptoolsLegacyVersion(v)
_state_vars = {}
def _declare_state(vartype, **kw):
    globals().update(kw)
    _state_vars.update(dict.fromkeys(kw, vartype))
def __getstate__():
    state = {}
    g = globals()
    for k, v in _state_vars.items():
        state[k] = g['_sget_'+v](g[k])
    return state
def __setstate__(state):
    g = globals()
    for k, v in state.items():
        g['_sset_'+_state_vars[k]](k, g[k], v)
    return state
def _sget_dict(val):
    return val.copy()
def _sset_dict(key, ob, state):
    ob.clear()
    ob.update(state)
def _sget_object(val):
    return val.__getstate__()
def _sset_object(key, ob, state):
    ob.__setstate__(state)
_sget_none = _sset_none = lambda *args: None
def get_supported_platform():
    """Return this platform's maximum compatible version.
    distutils.util.get_platform() normally reports the minimum version
    of Mac OS X that would be required to *use* extensions produced by
    distutils.  But what we want when checking compatibility is to know the
    version of Mac OS X that we are *running*.  To allow usage of packages that
    explicitly require a newer version of Mac OS X, we must also know the
    current version of the OS.
    If this condition occurs for any other platform with a version in its
    platform strings, this function should be extended accordingly.
    """
    plat = get_build_platform()
    m = macosVersionString.match(plat)
    if m is not None and sys.platform == "darwin":
        try:
            plat = 'macosx-%s-%s' % ('.'.join(_macosx_vers()[:2]), m.group(3))
        except ValueError:
            # not Mac OS X
            pass
    return plat
__all__ = [
    # Basic resource access and distribution/entry point discovery
    'require', 'run_script', 'get_provider',  'get_distribution',
    'load_entry_point', 'get_entry_map', 'get_entry_info',
    'iter_entry_points',
    'resource_string', 'resource_stream', 'resource_filename',
    'resource_listdir', 'resource_exists', 'resource_isdir',
    # Environmental control
    'declare_namespace', 'working_set', 'add_activation_listener',
    'find_distributions', 'set_extraction_path', 'cleanup_resources',
    'get_default_cache',
    # Primary implementation classes
    'Environment', 'WorkingSet', 'ResourceManager',
    'Distribution', 'Requirement', 'EntryPoint',
    # Exceptions
    'ResolutionError', 'VersionConflict', 'DistributionNotFound',
    'UnknownExtra', 'ExtractionError',
    # Warnings
    'PEP440Warning',
    # Parsing functions and string utilities
    'parse_requirements', 'parse_version', 'safe_name', 'safe_version',
    'get_platform', 'compatible_platforms', 'yield_lines', 'split_sections',
    'safe_extra', 'to_filename', 'invalid_marker', 'evaluate_marker',
    # filesystem utilities
    'ensure_directory', 'normalize_path',
    # Distribution "precedence" constants
    'EGG_DIST', 'BINARY_DIST', 'SOURCE_DIST', 'CHECKOUT_DIST', 'DEVELOP_DIST',
    # "Provider" interfaces, implementations, and registration/lookup APIs
    'IMetadataProvider', 'IResourceProvider', 'FileMetadata',
    'PathMetadata', 'EggMetadata', 'EmptyProvider', 'empty_provider',
    'NullProvider', 'EggProvider', 'DefaultProvider', 'ZipProvider',
    'register_finder', 'register_namespace_handler', 'register_loader_type',
    'fixup_namespace_packages', 'get_importer',
    # Deprecated/backward compatibility only
    'run_main', 'AvailableDistributions',
]
class ResolutionError(Exception):
    """Abstract base for dependency resolution errors"""
    def __repr__(self):
        return self.__class__.__name__+repr(self.args)
class VersionConflict(ResolutionError):
    """
    An already-installed version conflicts with the requested version.
    Should be initialized with the installed Distribution and the requested
    Requirement.
    """
    _template = "{self.dist} is installed but {self.req} is required"
    @property
    def dist(self):
        return self.args[0]
    @property
    def req(self):
        return self.args[1]
    def report(self):
        return self._template.format(**locals())
    def with_context(self, required_by):
        """
        If required_by is non-empty, return a version of self that is a
        ContextualVersionConflict.
        """
        if not required_by:
            return self
        args = self.args + (required_by,)
        return ContextualVersionConflict(*args)
class ContextualVersionConflict(VersionConflict):
    """
    A VersionConflict that accepts a third parameter, the set of the
    requirements that required the installed Distribution.
    """
    _template = VersionConflict._template + ' by {self.required_by}'
    @property
    def required_by(self):
        return self.args[2]
class DistributionNotFound(ResolutionError):
    """A requested distribution was not found"""
    _template = ("The '{self.req}' distribution was not found "
                 "and is required by {self.requirers_str}")
    @property
    def req(self):
        return self.args[0]
    @property
    def requirers(self):
        return self.args[1]
    @property
    def requirers_str(self):
        if not self.requirers:
            return 'the application'
        return ', '.join(self.requirers)
    def report(self):
        return self._template.format(**locals())
    def __str__(self):
        return self.report()
class UnknownExtra(ResolutionError):
    """Distribution doesn't have an "extra feature" of the given name"""
_provider_factories = {}
PY_MAJOR = sys.version[:3]
EGG_DIST = 3
BINARY_DIST = 2
SOURCE_DIST = 1
CHECKOUT_DIST = 0
DEVELOP_DIST = -1
def register_loader_type(loader_type, provider_factory):
    """Register `provider_factory` to make providers for `loader_type`
    `loader_type` is the type or class of a PEP 302 ``module.__loader__``,
    and `provider_factory` is a function that, passed a *module* object,
    returns an ``IResourceProvider`` for that module.
    """
    _provider_factories[loader_type] = provider_factory
def get_provider(moduleOrReq):
    """Return an IResourceProvider for the named module or requirement"""
    if isinstance(moduleOrReq, Requirement):
        return working_set.find(moduleOrReq) or require(str(moduleOrReq))[0]
    try:
        module = sys.modules[moduleOrReq]
    except KeyError:
        __import__(moduleOrReq)
        module = sys.modules[moduleOrReq]
    loader = getattr(module, '__loader__', None)
    return _find_adapter(_provider_factories, loader)(module)
def _macosx_vers(_cache=[]):
    if not _cache:
        version = platform.mac_ver()[0]
        # fallback for MacPorts
        if version == '':
            plist = '/System/Library/CoreServices/SystemVersion.plist'
            if os.path.exists(plist):
                if hasattr(plistlib, 'readPlist'):
                    plist_content = plistlib.readPlist(plist)
                    if 'ProductVersion' in plist_content:
                        version = plist_content['ProductVersion']
        _cache.append(version.split('.'))
    return _cache[0]
def _macosx_arch(machine):
    return {'PowerPC': 'ppc', 'Power_Macintosh': 'ppc'}.get(machine, machine)
def get_build_platform():
    """Return this platform's string for platform-specific distributions
    XXX Currently this is the same as ``distutils.util.get_platform()``, but it
    needs some hacks for Linux and Mac OS X.
    """
    try:
        # Python 2.7 or >=3.2
        from sysconfig import get_platform
    except ImportError:
        from distutils.util import get_platform
    plat = get_platform()
    if sys.platform == "darwin" and not plat.startswith('macosx-'):
        try:
            version = _macosx_vers()
            machine = os.uname()[4].replace(" ", "_")
            return "macosx-%d.%d-%s" % (int(version[0]), int(version[1]),
                _macosx_arch(machine))
        except ValueError:
            # if someone is running a non-Mac darwin system, this will fall
            # through to the default implementation
            pass
    return plat
macosVersionString = re.compile(r"macosx-(\d+)\.(\d+)-(.*)")
darwinVersionString = re.compile(r"darwin-(\d+)\.(\d+)\.(\d+)-(.*)")
# XXX backward compat
get_platform = get_build_platform
def compatible_platforms(provided, required):
    """Can code for the `provided` platform run on the `required` platform?
    Returns true if either platform is ``None``, or the platforms are equal.
    XXX Needs compatibility checks for Linux and other unixy OSes.
    """
    if provided is None or required is None or provided==required:
        # easy case
        return True
    # Mac OS X special cases
    reqMac = macosVersionString.match(required)
    if reqMac:
        provMac = macosVersionString.match(provided)
        # is this a Mac package?
        if not provMac:
            # this is backwards compatibility for packages built before
            # setuptools 0.6. All packages built after this point will
            # use the new macosx designation.
            provDarwin = darwinVersionString.match(provided)
            if provDarwin:
                dversion = int(provDarwin.group(1))
                macosversion = "%s.%s" % (reqMac.group(1), reqMac.group(2))
                if dversion == 7 and macosversion >= "10.3" or \
                        dversion == 8 and macosversion >= "10.4":
                    return True
            # egg isn't macosx or legacy darwin
            return False
        # are they the same major version and machine type?
        if provMac.group(1) != reqMac.group(1) or \
                provMac.group(3) != reqMac.group(3):
            return False
        # is the required OS major update >= the provided one?
        if int(provMac.group(2)) > int(reqMac.group(2)):
            return False
        return True
    # XXX Linux and other platforms' special cases should go here
    return False
def run_script(dist_spec, script_name):
    """Locate distribution `dist_spec` and run its `script_name` script"""
    ns = sys._getframe(1).f_globals
    name = ns['__name__']
    ns.clear()
    ns['__name__'] = name
    require(dist_spec)[0].run_script(script_name, ns)
# backward compatibility
run_main = run_script
def get_distribution(dist):
    """Return a current distribution object for a Requirement or string"""
    if isinstance(dist, string_types):
        dist = Requirement.parse(dist)
    if isinstance(dist, Requirement):
        dist = get_provider(dist)
    if not isinstance(dist, Distribution):
        raise TypeError("Expected string, Requirement, or Distribution", dist)
    return dist
def load_entry_point(dist, group, name):
    """Return `name` entry point of `group` for `dist` or raise ImportError"""
    return get_distribution(dist).load_entry_point(group, name)
def get_entry_map(dist, group=None):
    """Return the entry point map for `group`, or the full entry map"""
    return get_distribution(dist).get_entry_map(group)
def get_entry_info(dist, group, name):
    """Return the EntryPoint object for `group`+`name`, or ``None``"""
    return get_distribution(dist).get_entry_info(group, name)
class IMetadataProvider:
    def has_metadata(name):
        """Does the package's distribution contain the named metadata?"""
    def get_metadata(name):
        """The named metadata resource as a string"""
    def get_metadata_lines(name):
        """Yield named metadata resource as list of non-blank non-comment lines
       Leading and trailing whitespace is stripped from each line, and lines
       with ``#`` as the first non-blank character are omitted."""
    def metadata_isdir(name):
        """Is the named metadata a directory?  (like ``os.path.isdir()``)"""
    def metadata_listdir(name):
        """List of metadata names in the directory (like ``os.listdir()``)"""
    def run_script(script_name, namespace):
        """Execute the named script in the supplied namespace dictionary"""
class IResourceProvider(IMetadataProvider):
    """An object that provides access to package resources"""
    def get_resource_filename(manager, resource_name):
        """Return a true filesystem path for `resource_name`
        `manager` must be an ``IResourceManager``"""
    def get_resource_stream(manager, resource_name):
        """Return a readable file-like object for `resource_name`
        `manager` must be an ``IResourceManager``"""
    def get_resource_string(manager, resource_name):
        """Return a string containing the contents of `resource_name`
        `manager` must be an ``IResourceManager``"""
    def has_resource(resource_name):
        """Does the package contain the named resource?"""
    def resource_isdir(resource_name):
        """Is the named resource a directory?  (like ``os.path.isdir()``)"""
    def resource_listdir(resource_name):
        """List of resource names in the directory (like ``os.listdir()``)"""
class WorkingSet(object):
    """A collection of active distributions on sys.path (or a similar list)"""
    def __init__(self, entries=None):
        """Create working set from list of path entries (default=sys.path)"""
        self.entries = []
        self.entry_keys = {}
        self.by_key = {}
        self.callbacks = []
        if entries is None:
            entries = sys.path
        for entry in entries:
            self.add_entry(entry)
    @classmethod
    def _build_master(cls):
        """
        Prepare the master working set.
        """
        ws = cls()
        try:
            from __main__ import __requires__
        except ImportError:
            # The main program does not list any requirements
            return ws
        # ensure the requirements are met
        try:
            ws.require(__requires__)
        except VersionConflict:
            return cls._build_from_requirements(__requires__)
        return ws
    @classmethod
    def _build_from_requirements(cls, req_spec):
        """
        Build a working set from a requirement spec. Rewrites sys.path.
        """
        # try it without defaults already on sys.path
        # by starting with an empty path
        ws = cls([])
        reqs = parse_requirements(req_spec)
        dists = ws.resolve(reqs, Environment())
        for dist in dists:
            ws.add(dist)
        # add any missing entries from sys.path
        for entry in sys.path:
            if entry not in ws.entries:
                ws.add_entry(entry)
        # then copy back to sys.path
        sys.path[:] = ws.entries
        return ws
    def add_entry(self, entry):
        """Add a path item to ``.entries``, finding any distributions on it
        ``find_distributions(entry, True)`` is used to find distributions
        corresponding to the path entry, and they are added.  `entry` is
        always appended to ``.entries``, even if it is already present.
        (This is because ``sys.path`` can contain the same value more than
        once, and the ``.entries`` of the ``sys.path`` WorkingSet should always
        equal ``sys.path``.)
        """
        self.entry_keys.setdefault(entry, [])
        self.entries.append(entry)
        for dist in find_distributions(entry, True):
            self.add(dist, entry, False)
    def __contains__(self, dist):
        """True if `dist` is the active distribution for its project"""
        return self.by_key.get(dist.key) == dist
    def find(self, req):
        """Find a distribution matching requirement `req`
        If there is an active distribution for the requested project, this
        returns it as long as it meets the version requirement specified by
        `req`.  But, if there is an active distribution for the project and it
        does *not* meet the `req` requirement, ``VersionConflict`` is raised.
        If there is no active distribution for the requested project, ``None``
        is returned.
        """
        dist = self.by_key.get(req.key)
        if dist is not None and dist not in req:
            # XXX add more info
            raise VersionConflict(dist, req)
        return dist
    def iter_entry_points(self, group, name=None):
        """Yield entry point objects from `group` matching `name`
        If `name` is None, yields all entry points in `group` from all
        distributions in the working set, otherwise only ones matching
        both `group` and `name` are yielded (in distribution order).
        """
        for dist in self:
            entries = dist.get_entry_map(group)
            if name is None:
                for ep in entries.values():
                    yield ep
            elif name in entries:
                yield entries[name]
    def run_script(self, requires, script_name):
        """Locate distribution for `requires` and run `script_name` script"""
        ns = sys._getframe(1).f_globals
        name = ns['__name__']
        ns.clear()
        ns['__name__'] = name
        self.require(requires)[0].run_script(script_name, ns)
    def __iter__(self):
        """Yield distributions for non-duplicate projects in the working set
        The yield order is the order in which the items' path entries were
        added to the working set.
        """
        seen = {}
        for item in self.entries:
            if item not in self.entry_keys:
                # workaround a cache issue
                continue
            for key in self.entry_keys[item]:
                if key not in seen:
                    seen[key]=1
                    yield self.by_key[key]
    def add(self, dist, entry=None, insert=True, replace=False):
        """Add `dist` to working set, associated with `entry`
        If `entry` is unspecified, it defaults to the ``.location`` of `dist`.
        On exit from this routine, `entry` is added to the end of the working
        set's ``.entries`` (if it wasn't already present).
        `dist` is only added to the working set if it's for a project that
        doesn't already have a distribution in the set, unless `replace=True`.
        If it's added, any callbacks registered with the ``subscribe()`` method
        will be called.
        """
        if insert:
            dist.insert_on(self.entries, entry)
        if entry is None:
            entry = dist.location
        keys = self.entry_keys.setdefault(entry,[])
        keys2 = self.entry_keys.setdefault(dist.location,[])
        if not replace and dist.key in self.by_key:
            # ignore hidden distros
            return
        self.by_key[dist.key] = dist
        if dist.key not in keys:
            keys.append(dist.key)
        if dist.key not in keys2:
            keys2.append(dist.key)
        self._added_new(dist)
    def resolve(self, requirements, env=None, installer=None,
            replace_conflicting=False):
        """List all distributions needed to (recursively) meet `requirements`
        `requirements` must be a sequence of ``Requirement`` objects.  `env`,
        if supplied, should be an ``Environment`` instance.  If
        not supplied, it defaults to all distributions available within any
        entry or distribution in the working set.  `installer`, if supplied,
        will be invoked with each requirement that cannot be met by an
        already-installed distribution; it should return a ``Distribution`` or
        ``None``.
        Unless `replace_conflicting=True`, raises a VersionConflict exception if
        any requirements are found on the path that have the correct name but
        the wrong version.  Otherwise, if an `installer` is supplied it will be
        invoked to obtain the correct version of the requirement and activate
        it.
        """
        # set up the stack
        requirements = list(requirements)[::-1]
        # set of processed requirements
        processed = {}
        # key -> dist
        best = {}
        to_activate = []
        # Mapping of requirement to set of distributions that required it;
        # useful for reporting info about conflicts.
        required_by = collections.defaultdict(set)
        while requirements:
            # process dependencies breadth-first
            req = requirements.pop(0)
            if req in processed:
                # Ignore cyclic or redundant dependencies
                continue
            dist = best.get(req.key)
            if dist is None:
                # Find the best distribution and add it to the map
                dist = self.by_key.get(req.key)
                if dist is None or (dist not in req and replace_conflicting):
                    ws = self
                    if env is None:
                        if dist is None:
                            env = Environment(self.entries)
                        else:
                            # Use an empty environment and workingset to avoid
                            # any further conflicts with the conflicting
                            # distribution
                            env = Environment([])
                            ws = WorkingSet([])
                    dist = best[req.key] = env.best_match(req, ws, installer)
                    if dist is None:
                        requirers = required_by.get(req, None)
                        raise DistributionNotFound(req, requirers)
                to_activate.append(dist)
            if dist not in req:
                # Oops, the "best" so far conflicts with a dependency
                dependent_req = required_by[req]
                raise VersionConflict(dist, req).with_context(dependent_req)
            # push the new requirements onto the stack
            new_requirements = dist.requires(req.extras)[::-1]
            requirements.extend(new_requirements)
            # Register the new requirements needed by req
            for new_requirement in new_requirements:
                required_by[new_requirement].add(req.project_name)
            processed[req] = True
        # return list of distros to activate
        return to_activate
    def find_plugins(self, plugin_env, full_env=None, installer=None,
            fallback=True):
        """Find all activatable distributions in `plugin_env`
        Example usage::
            distributions, errors = working_set.find_plugins(
                Environment(plugin_dirlist)
            )
            # add plugins+libs to sys.path
            map(working_set.add, distributions)
            # display errors
            print('Could not load', errors)
        The `plugin_env` should be an ``Environment`` instance that contains
        only distributions that are in the project's "plugin directory" or
        directories. The `full_env`, if supplied, should be an ``Environment``
        contains all currently-available distributions.  If `full_env` is not
        supplied, one is created automatically from the ``WorkingSet`` this
        method is called on, which will typically mean that every directory on
        ``sys.path`` will be scanned for distributions.
        `installer` is a standard installer callback as used by the
        ``resolve()`` method. The `fallback` flag indicates whether we should
        attempt to resolve older versions of a plugin if the newest version
        cannot be resolved.
        This method returns a 2-tuple: (`distributions`, `error_info`), where
        `distributions` is a list of the distributions found in `plugin_env`
        that were loadable, along with any other distributions that are needed
        to resolve their dependencies.  `error_info` is a dictionary mapping
        unloadable plugin distributions to an exception instance describing the
        error that occurred. Usually this will be a ``DistributionNotFound`` or
        ``VersionConflict`` instance.
        """
        plugin_projects = list(plugin_env)
        # scan project names in alphabetic order
        plugin_projects.sort()
        error_info = {}
        distributions = {}
        if full_env is None:
            env = Environment(self.entries)
            env += plugin_env
        else:
            env = full_env + plugin_env
        shadow_set = self.__class__([])
        # put all our entries in shadow_set
        list(map(shadow_set.add, self))
        for project_name in plugin_projects:
            for dist in plugin_env[project_name]:
                req = [dist.as_requirement()]
                try:
                    resolvees = shadow_set.resolve(req, env, installer)
                except ResolutionError as v:
                    # save error info
                    error_info[dist] = v
                    if fallback:
                        # try the next older version of project
                        continue
                    else:
                        # give up on this project, keep going
                        break
                else:
                    list(map(shadow_set.add, resolvees))
                    distributions.update(dict.fromkeys(resolvees))
                    # success, no need to try any more versions of this project
                    break
        distributions = list(distributions)
        distributions.sort()
        return distributions, error_info
    def require(self, *requirements):
        """Ensure that distributions matching `requirements` are activated
        `requirements` must be a string or a (possibly-nested) sequence
        thereof, specifying the distributions and versions required.  The
        return value is a sequence of the distributions that needed to be
        activated to fulfill the requirements; all relevant distributions are
        included, even if they were already activated in this working set.
        """
        needed = self.resolve(parse_requirements(requirements))
        for dist in needed:
            self.add(dist)
        return needed
    def subscribe(self, callback):
        """Invoke `callback` for all distributions (including existing ones)"""
        if callback in self.callbacks:
            return
        self.callbacks.append(callback)
        for dist in self:
            callback(dist)
    def _added_new(self, dist):
        for callback in self.callbacks:
            callback(dist)
    def __getstate__(self):
        return (
            self.entries[:], self.entry_keys.copy(), self.by_key.copy(),
            self.callbacks[:]
        )
    def __setstate__(self, e_k_b_c):
        entries, keys, by_key, callbacks = e_k_b_c
        self.entries = entries[:]
        self.entry_keys = keys.copy()
        self.by_key = by_key.copy()
        self.callbacks = callbacks[:]
class Environment(object):
    """Searchable snapshot of distributions on a search path"""
    def __init__(self, search_path=None, platform=get_supported_platform(),
            python=PY_MAJOR):
        """Snapshot distributions available on a search path
        Any distributions found on `search_path` are added to the environment.
        `search_path` should be a sequence of ``sys.path`` items.  If not
        supplied, ``sys.path`` is used.
        `platform` is an optional string specifying the name of the platform
        that platform-specific distributions must be compatible with.  If
        unspecified, it defaults to the current platform.  `python` is an
        optional string naming the desired version of Python (e.g. ``'3.3'``);
        it defaults to the current version.
        You may explicitly set `platform` (and/or `python`) to ``None`` if you
        wish to map *all* distributions, not just those compatible with the
        running platform or Python version.
        """
        self._distmap = {}
        self.platform = platform
        self.python = python
        self.scan(search_path)
    def can_add(self, dist):
        """Is distribution `dist` acceptable for this environment?
        The distribution must match the platform and python version
        requirements specified when this environment was created, or False
        is returned.
        """
        return (self.python is None or dist.py_version is None
            or dist.py_version==self.python) \
            and compatible_platforms(dist.platform, self.platform)
    def remove(self, dist):
        """Remove `dist` from the environment"""
        self._distmap[dist.key].remove(dist)
    def scan(self, search_path=None):
        """Scan `search_path` for distributions usable in this environment
        Any distributions found are added to the environment.
        `search_path` should be a sequence of ``sys.path`` items.  If not
        supplied, ``sys.path`` is used.  Only distributions conforming to
        the platform/python version defined at initialization are added.
        """
        if search_path is None:
            search_path = sys.path
        for item in search_path:
            for dist in find_distributions(item):
                self.add(dist)
    def __getitem__(self, project_name):
        """Return a newest-to-oldest list of distributions for `project_name`
        Uses case-insensitive `project_name` comparison, assuming all the
        project's distributions use their project's name converted to all
        lowercase as their key.
        """
        distribution_key = project_name.lower()
        return self._distmap.get(distribution_key, [])
    def add(self, dist):
        """Add `dist` if we ``can_add()`` it and it has not already been added
        """
        if self.can_add(dist) and dist.has_version():
            dists = self._distmap.setdefault(dist.key, [])
            if dist not in dists:
                dists.append(dist)
                dists.sort(key=operator.attrgetter('hashcmp'), reverse=True)
    def best_match(self, req, working_set, installer=None):
        """Find distribution best matching `req` and usable on `working_set`
        This calls the ``find(req)`` method of the `working_set` to see if a
        suitable distribution is already active.  (This may raise
        ``VersionConflict`` if an unsuitable version of the project is already
        active in the specified `working_set`.)  If a suitable distribution
        isn't active, this method returns the newest distribution in the
        environment that meets the ``Requirement`` in `req`.  If no suitable
        distribution is found, and `installer` is supplied, then the result of
        calling the environment's ``obtain(req, installer)`` method will be
        returned.
        """
        dist = working_set.find(req)
        if dist is not None:
            return dist
        for dist in self[req.key]:
            if dist in req:
                return dist
        # try to download/install
        return self.obtain(req, installer)
    def obtain(self, requirement, installer=None):
        """Obtain a distribution matching `requirement` (e.g. via download)
        Obtain a distro that matches requirement (e.g. via download).  In the
        base ``Environment`` class, this routine just returns
        ``installer(requirement)``, unless `installer` is None, in which case
        None is returned instead.  This method is a hook that allows subclasses
        to attempt other ways of obtaining a distribution before falling back
        to the `installer` argument."""
        if installer is not None:
            return installer(requirement)
    def __iter__(self):
        """Yield the unique project names of the available distributions"""
        for key in self._distmap.keys():
            if self[key]:
                yield key
    def __iadd__(self, other):
        """In-place addition of a distribution or environment"""
        if isinstance(other, Distribution):
            self.add(other)
        elif isinstance(other, Environment):
            for project in other:
                for dist in other[project]:
                    self.add(dist)
        else:
            raise TypeError("Can't add %r to environment" % (other,))
        return self
    def __add__(self, other):
        """Add an environment or distribution to an environment"""
        new = self.__class__([], platform=None, python=None)
        for env in self, other:
            new += env
        return new
# XXX backward compatibility
AvailableDistributions = Environment
class ExtractionError(RuntimeError):
    """An error occurred extracting a resource
    The following attributes are available from instances of this exception:
    manager
        The resource manager that raised this exception
    cache_path
        The base directory for resource extraction
    original_error
        The exception instance that caused extraction to fail
    """
class ResourceManager:
    """Manage resource extraction and packages"""
    extraction_path = None
    def __init__(self):
        self.cached_files = {}
    def resource_exists(self, package_or_requirement, resource_name):
        """Does the named resource exist?"""
        return get_provider(package_or_requirement).has_resource(resource_name)
    def resource_isdir(self, package_or_requirement, resource_name):
        """Is the named resource an existing directory?"""
        return get_provider(package_or_requirement).resource_isdir(
            resource_name
        )
    def resource_filename(self, package_or_requirement, resource_name):
        """Return a true filesystem path for specified resource"""
        return get_provider(package_or_requirement).get_resource_filename(
            self, resource_name
        )
    def resource_stream(self, package_or_requirement, resource_name):
        """Return a readable file-like object for specified resource"""
        return get_provider(package_or_requirement).get_resource_stream(
            self, resource_name
        )
    def resource_string(self, package_or_requirement, resource_name):
        """Return specified resource as a string"""
        return get_provider(package_or_requirement).get_resource_string(
            self, resource_name
        )
    def resource_listdir(self, package_or_requirement, resource_name):
        """List the contents of the named resource directory"""
        return get_provider(package_or_requirement).resource_listdir(
            resource_name
        )
    def extraction_error(self):
        """Give an error message for problems extracting file(s)"""
        old_exc = sys.exc_info()[1]
        cache_path = self.extraction_path or get_default_cache()
        err = ExtractionError("""Can't extract file(s) to egg cache
The following error occurred while trying to extract file(s) to the Python egg
cache:
  %s
The Python egg cache directory is currently set to:
  %s
Perhaps your account does not have write access to this directory?  You can
change the cache directory by setting the PYTHON_EGG_CACHE environment
variable to point to an accessible directory.
""" % (old_exc, cache_path)
        )
        err.manager = self
        err.cache_path = cache_path
        err.original_error = old_exc
        raise err
    def get_cache_path(self, archive_name, names=()):
        """Return absolute location in cache for `archive_name` and `names`
        The parent directory of the resulting path will be created if it does
        not already exist.  `archive_name` should be the base filename of the
        enclosing egg (which may not be the name of the enclosing zipfile!),
        including its ".egg" extension.  `names`, if provided, should be a
        sequence of path name parts "under" the egg's extraction location.
        This method should only be called by resource providers that need to
        obtain an extraction location, and only for names they intend to
        extract, as it tracks the generated names for possible cleanup later.
        """
        extract_path = self.extraction_path or get_default_cache()
        target_path = os.path.join(extract_path, archive_name+'-tmp', *names)
        try:
            _bypass_ensure_directory(target_path)
        except:
            self.extraction_error()
        self._warn_unsafe_extraction_path(extract_path)
        self.cached_files[target_path] = 1
        return target_path
    @staticmethod
    def _warn_unsafe_extraction_path(path):
        """
        If the default extraction path is overridden and set to an insecure
        location, such as /tmp, it opens up an opportunity for an attacker to
        replace an extracted file with an unauthorized payload. Warn the user
        if a known insecure location is used.
        See Distribute #375 for more details.
        """
        if os.name == 'nt' and not path.startswith(os.environ['windir']):
            # On Windows, permissions are generally restrictive by default
            #  and temp directories are not writable by other users, so
            #  bypass the warning.
            return
        mode = os.stat(path).st_mode
        if mode & stat.S_IWOTH or mode & stat.S_IWGRP:
            msg = ("%s is writable by group/others and vulnerable to attack "
                "when "
                "used with get_resource_filename. Consider a more secure "
                "location (set with .set_extraction_path or the "
                "PYTHON_EGG_CACHE environment variable)." % path)
            warnings.warn(msg, UserWarning)
    def postprocess(self, tempname, filename):
        """Perform any platform-specific postprocessing of `tempname`
        This is where Mac header rewrites should be done; other platforms don't
        have anything special they should do.
        Resource providers should call this method ONLY after successfully
        extracting a compressed resource.  They must NOT call it on resources
        that are already in the filesystem.
        `tempname` is the current (temporary) name of the file, and `filename`
        is the name it will be renamed to by the caller after this routine
        returns.
        """
        if os.name == 'posix':
            # Make the resource executable
            mode = ((os.stat(tempname).st_mode) | 0o555) & 0o7777
            os.chmod(tempname, mode)
    def set_extraction_path(self, path):
        """Set the base path where resources will be extracted to, if needed.
        If you do not call this routine before any extractions take place, the
        path defaults to the return value of ``get_default_cache()``.  (Which
        is based on the ``PYTHON_EGG_CACHE`` environment variable, with various
        platform-specific fallbacks.  See that routine's documentation for more
        details.)
        Resources are extracted to subdirectories of this path based upon
        information given by the ``IResourceProvider``.  You may set this to a
        temporary directory, but then you must call ``cleanup_resources()`` to
        delete the extracted files when done.  There is no guarantee that
        ``cleanup_resources()`` will be able to remove all extracted files.
        (Note: you may not change the extraction path for a given resource
        manager once resources have been extracted, unless you first call
        ``cleanup_resources()``.)
        """
        if self.cached_files:
            raise ValueError(
                "Can't change extraction path, files already extracted"
            )
        self.extraction_path = path
    def cleanup_resources(self, force=False):
        """
        Delete all extracted resource files and directories, returning a list
        of the file and directory names that could not be successfully removed.
        This function does not have any concurrency protection, so it should
        generally only be called when the extraction path is a temporary
        directory exclusive to a single process.  This method is not
        automatically called; you must call it explicitly or register it as an
        ``atexit`` function if you wish to ensure cleanup of a temporary
        directory used for extractions.
        """
        # XXX
def get_default_cache():
    """Determine the default cache location
    This returns the ``PYTHON_EGG_CACHE`` environment variable, if set.
    Otherwise, on Windows, it returns a "Python-Eggs" subdirectory of the
    "Application Data" directory.  On all other systems, it's "~/.python-eggs".
    """
    try:
        return os.environ['PYTHON_EGG_CACHE']
    except KeyError:
        pass
    if os.name!='nt':
        return os.path.expanduser('~/.python-eggs')
    # XXX this may be locale-specific!
    app_data = 'Application Data'
    app_homes = [
        # best option, should be locale-safe
        (('APPDATA',), None),
        (('USERPROFILE',), app_data),
        (('HOMEDRIVE','HOMEPATH'), app_data),
        (('HOMEPATH',), app_data),
        (('HOME',), None),
        # 95/98/ME
        (('WINDIR',), app_data),
    ]
    for keys, subdir in app_homes:
        dirname = ''
        for key in keys:
            if key in os.environ:
                dirname = os.path.join(dirname, os.environ[key])
            else:
                break
        else:
            if subdir:
                dirname = os.path.join(dirname, subdir)
            return os.path.join(dirname, 'Python-Eggs')
    else:
        raise RuntimeError(
            "Please set the PYTHON_EGG_CACHE enviroment variable"
        )
def safe_name(name):
    """Convert an arbitrary string to a standard distribution name
    Any runs of non-alphanumeric/. characters are replaced with a single '-'.
    """
    return re.sub('[^A-Za-z0-9.]+', '-', name)
def safe_version(version):
    """
    Convert an arbitrary string to a standard version string
    """
    try:
        # normalize the version
        return str(packaging.version.Version(version))
    except packaging.version.InvalidVersion:
        version = version.replace(' ','.')
        return re.sub('[^A-Za-z0-9.]+', '-', version)
def safe_extra(extra):
    """Convert an arbitrary string to a standard 'extra' name
    Any runs of non-alphanumeric characters are replaced with a single '_',
    and the result is always lowercased.
    """
    return re.sub('[^A-Za-z0-9.]+', '_', extra).lower()
def to_filename(name):
    """Convert a project or version name to its filename-escaped form
    Any '-' characters are currently replaced with '_'.
    """
    return name.replace('-','_')
class MarkerEvaluation(object):
    values = {
        'os_name': lambda: os.name,
        'sys_platform': lambda: sys.platform,
        'python_full_version': platform.python_version,
        'python_version': lambda: platform.python_version()[:3],
        'platform_version': platform.version,
        'platform_machine': platform.machine,
        'platform_python_implementation': platform.python_implementation,
        'python_implementation': platform.python_implementation,
    }
    @classmethod
    def is_invalid_marker(cls, text):
        """
        Validate text as a PEP 426 environment marker; return an exception
        if invalid or False otherwise.
        """
        try:
            cls.evaluate_marker(text)
        except SyntaxError as e:
            return cls.normalize_exception(e)
        return False
    @staticmethod
    def normalize_exception(exc):
        """
        Given a SyntaxError from a marker evaluation, normalize the error
        message:
         - Remove indications of filename and line number.
         - Replace platform-specific error messages with standard error
           messages.
        """
        subs = {
            'unexpected EOF while parsing': 'invalid syntax',
            'parenthesis is never closed': 'invalid syntax',
        }
        exc.filename = None
        exc.lineno = None
        exc.msg = subs.get(exc.msg, exc.msg)
        return exc
    @classmethod
    def and_test(cls, nodelist):
        # MUST NOT short-circuit evaluation, or invalid syntax can be skipped!
        items = [
            cls.interpret(nodelist[i])
            for i in range(1, len(nodelist), 2)
        ]
        return functools.reduce(operator.and_, items)
    @classmethod
    def test(cls, nodelist):
        # MUST NOT short-circuit evaluation, or invalid syntax can be skipped!
        items = [
            cls.interpret(nodelist[i])
            for i in range(1, len(nodelist), 2)
        ]
        return functools.reduce(operator.or_, items)
    @classmethod
    def atom(cls, nodelist):
        t = nodelist[1][0]
        if t == token.LPAR:
            if nodelist[2][0] == token.RPAR:
                raise SyntaxError("Empty parentheses")
            return cls.interpret(nodelist[2])
        msg = "Language feature not supported in environment markers"
        raise SyntaxError(msg)
    @classmethod
    def comparison(cls, nodelist):
        if len(nodelist) > 4:
            msg = "Chained comparison not allowed in environment markers"
            raise SyntaxError(msg)
        comp = nodelist[2][1]
        cop = comp[1]
        if comp[0] == token.NAME:
            if len(nodelist[2]) == 3:
                if cop == 'not':
                    cop = 'not in'
                else:
                    cop = 'is not'
        try:
            cop = cls.get_op(cop)
        except KeyError:
            msg = repr(cop) + " operator not allowed in environment markers"
            raise SyntaxError(msg)
        return cop(cls.evaluate(nodelist[1]), cls.evaluate(nodelist[3]))
    @classmethod
    def get_op(cls, op):
        ops = {
            symbol.test: cls.test,
            symbol.and_test: cls.and_test,
            symbol.atom: cls.atom,
            symbol.comparison: cls.comparison,
            'not in': lambda x, y: x not in y,
            'in': lambda x, y: x in y,
            '==': operator.eq,
            '!=': operator.ne,
            '<':  operator.lt,
            '>':  operator.gt,
            '<=': operator.le,
            '>=': operator.ge,
        }
        if hasattr(symbol, 'or_test'):
            ops[symbol.or_test] = cls.test
        return ops[op]
    @classmethod
    def evaluate_marker(cls, text, extra=None):
        """
        Evaluate a PEP 426 environment marker on CPython 2.4+.
        Return a boolean indicating the marker result in this environment.
        Raise SyntaxError if marker is invalid.
        This implementation uses the 'parser' module, which is not implemented
        on
        Jython and has been superseded by the 'ast' module in Python 2.6 and
        later.
        """
        return cls.interpret(parser.expr(text).totuple(1)[1])
    @staticmethod
    def _translate_metadata2(env):
        """
        Markerlib implements Metadata 1.2 (PEP 345) environment markers.
        Translate the variables to Metadata 2.0 (PEP 426).
        """
        return dict(
            (key.replace('.', '_'), value)
            for key, value in env
        )
    @classmethod
    def _markerlib_evaluate(cls, text):
        """
        Evaluate a PEP 426 environment marker using markerlib.
        Return a boolean indicating the marker result in this environment.
        Raise SyntaxError if marker is invalid.
        """
        import _markerlib
        env = cls._translate_metadata2(_markerlib.default_environment())
        try:
            result = _markerlib.interpret(text, env)
        except NameError as e:
            raise SyntaxError(e.args[0])
        return result
    if 'parser' not in globals():
        # Fall back to less-complete _markerlib implementation if 'parser' module
        # is not available.
        evaluate_marker = _markerlib_evaluate
    @classmethod
    def interpret(cls, nodelist):
        while len(nodelist)==2: nodelist = nodelist[1]
        try:
            op = cls.get_op(nodelist[0])
        except KeyError:
            raise SyntaxError("Comparison or logical expression expected")
        return op(nodelist)
    @classmethod
    def evaluate(cls, nodelist):
        while len(nodelist)==2: nodelist = nodelist[1]
        kind = nodelist[0]
        name = nodelist[1]
        if kind==token.NAME:
            try:
                op = cls.values[name]
            except KeyError:
                raise SyntaxError("Unknown name %r" % name)
            return op()
        if kind==token.STRING:
            s = nodelist[1]
            if not cls._safe_string(s):
                raise SyntaxError(
                    "Only plain strings allowed in environment markers")
            return s[1:-1]
        msg = "Language feature not supported in environment markers"
        raise SyntaxError(msg)
    @staticmethod
    def _safe_string(cand):
        return (
            cand[:1] in "'\"" and
            not cand.startswith('"""') and
            not cand.startswith("'''") and
            '\\' not in cand
        )
invalid_marker = MarkerEvaluation.is_invalid_marker
evaluate_marker = MarkerEvaluation.evaluate_marker
class NullProvider:
    """Try to implement resources and metadata for arbitrary PEP 302 loaders"""
    egg_name = None
    egg_info = None
    loader = None
    def __init__(self, module):
        self.loader = getattr(module, '__loader__', None)
        self.module_path = os.path.dirname(getattr(module, '__file__', ''))
    def get_resource_filename(self, manager, resource_name):
        return self._fn(self.module_path, resource_name)
    def get_resource_stream(self, manager, resource_name):
        return io.BytesIO(self.get_resource_string(manager, resource_name))
    def get_resource_string(self, manager, resource_name):
        return self._get(self._fn(self.module_path, resource_name))
    def has_resource(self, resource_name):
        return self._has(self._fn(self.module_path, resource_name))
    def has_metadata(self, name):
        return self.egg_info and self._has(self._fn(self.egg_info, name))
    if sys.version_info <= (3,):
        def get_metadata(self, name):
            if not self.egg_info:
                return ""
            return self._get(self._fn(self.egg_info, name))
    else:
        def get_metadata(self, name):
            if not self.egg_info:
                return ""
            return self._get(self._fn(self.egg_info, name)).decode("utf-8")
    def get_metadata_lines(self, name):
        return yield_lines(self.get_metadata(name))
    def resource_isdir(self, resource_name):
        return self._isdir(self._fn(self.module_path, resource_name))
    def metadata_isdir(self, name):
        return self.egg_info and self._isdir(self._fn(self.egg_info, name))
    def resource_listdir(self, resource_name):
        return self._listdir(self._fn(self.module_path, resource_name))
    def metadata_listdir(self, name):
        if self.egg_info:
            return self._listdir(self._fn(self.egg_info, name))
        return []
    def run_script(self, script_name, namespace):
        script = 'scripts/'+script_name
        if not self.has_metadata(script):
            raise ResolutionError("No script named %r" % script_name)
        script_text = self.get_metadata(script).replace('\r\n', '\n')
        script_text = script_text.replace('\r', '\n')
        script_filename = self._fn(self.egg_info, script)
        namespace['__file__'] = script_filename
        if os.path.exists(script_filename):
            source = open(script_filename).read()
            code = compile(source, script_filename, 'exec')
            exec(code, namespace, namespace)
        else:
            from linecache import cache
            cache[script_filename] = (
                len(script_text), 0, script_text.split('\n'), script_filename
            )
            script_code = compile(script_text, script_filename,'exec')
            exec(script_code, namespace, namespace)
    def _has(self, path):
        raise NotImplementedError(
            "Can't perform this operation for unregistered loader type"
        )
    def _isdir(self, path):
        raise NotImplementedError(
            "Can't perform this operation for unregistered loader type"
        )
    def _listdir(self, path):
        raise NotImplementedError(
            "Can't perform this operation for unregistered loader type"
        )
    def _fn(self, base, resource_name):
        if resource_name:
            return os.path.join(base, *resource_name.split('/'))
        return base
    def _get(self, path):
        if hasattr(self.loader, 'get_data'):
            return self.loader.get_data(path)
        raise NotImplementedError(
            "Can't perform this operation for loaders without 'get_data()'"
        )
register_loader_type(object, NullProvider)
class EggProvider(NullProvider):
    """Provider based on a virtual filesystem"""
    def __init__(self, module):
        NullProvider.__init__(self, module)
        self._setup_prefix()
    def _setup_prefix(self):
        # we assume here that our metadata may be nested inside a "basket"
        # of multiple eggs; that's why we use module_path instead of .archive
        path = self.module_path
        old = None
        while path!=old:
            if _is_unpacked_egg(path):
                self.egg_name = os.path.basename(path)
                self.egg_info = os.path.join(path, 'EGG-INFO')
                self.egg_root = path
                break
            old = path
            path, base = os.path.split(path)
class DefaultProvider(EggProvider):
    """Provides access to package resources in the filesystem"""
    def _has(self, path):
        return os.path.exists(path)
    def _isdir(self, path):
        return os.path.isdir(path)
    def _listdir(self, path):
        return os.listdir(path)
    def get_resource_stream(self, manager, resource_name):
        return open(self._fn(self.module_path, resource_name), 'rb')
    def _get(self, path):
        with open(path, 'rb') as stream:
            return stream.read()
register_loader_type(type(None), DefaultProvider)
if importlib_machinery is not None:
    register_loader_type(importlib_machinery.SourceFileLoader, DefaultProvider)
class EmptyProvider(NullProvider):
    """Provider that returns nothing for all requests"""
    _isdir = _has = lambda self, path: False
    _get = lambda self, path: ''
    _listdir = lambda self, path: []
    module_path = None
    def __init__(self):
        pass
empty_provider = EmptyProvider()
class ZipManifests(dict):
    """
    zip manifest builder
    """
    @classmethod
    def build(cls, path):
        """
        Build a dictionary similar to the zipimport directory
        caches, except instead of tuples, store ZipInfo objects.
        Use a platform-specific path separator (os.sep) for the path keys
        for compatibility with pypy on Windows.
        """
        with ContextualZipFile(path) as zfile:
            items = (
                (
                    name.replace('/', os.sep),
                    zfile.getinfo(name),
                )
                for name in zfile.namelist()
            )
            return dict(items)
    load = build
class MemoizedZipManifests(ZipManifests):
    """
    Memoized zipfile manifests.
    """
    manifest_mod = collections.namedtuple('manifest_mod', 'manifest mtime')
    def load(self, path):
        """
        Load a manifest at path or return a suitable manifest already loaded.
        """
        path = os.path.normpath(path)
        mtime = os.stat(path).st_mtime
        if path not in self or self[path].mtime != mtime:
            manifest = self.build(path)
            self[path] = self.manifest_mod(manifest, mtime)
        return self[path].manifest
class ContextualZipFile(zipfile.ZipFile):
    """
    Supplement ZipFile class to support context manager for Python 2.6
    """
    def __enter__(self):
        return self
    def __exit__(self, type, value, traceback):
        self.close()
    def __new__(cls, *args, **kwargs):
        """
        Construct a ZipFile or ContextualZipFile as appropriate
        """
        if hasattr(zipfile.ZipFile, '__exit__'):
            return zipfile.ZipFile(*args, **kwargs)
        return super(ContextualZipFile, cls).__new__(cls)
class ZipProvider(EggProvider):
    """Resource support for zips and eggs"""
    eagers = None
    _zip_manifests = MemoizedZipManifests()
    def __init__(self, module):
        EggProvider.__init__(self, module)
        self.zip_pre = self.loader.archive+os.sep
    def _zipinfo_name(self, fspath):
        # Convert a virtual filename (full path to file) into a zipfile subpath
        # usable with the zipimport directory cache for our target archive
        if fspath.startswith(self.zip_pre):
            return fspath[len(self.zip_pre):]
        raise AssertionError(
            "%s is not a subpath of %s" % (fspath, self.zip_pre)
        )
    def _parts(self, zip_path):
        # Convert a zipfile subpath into an egg-relative path part list.
        # pseudo-fs path
        fspath = self.zip_pre+zip_path
        if fspath.startswith(self.egg_root+os.sep):
            return fspath[len(self.egg_root)+1:].split(os.sep)
        raise AssertionError(
            "%s is not a subpath of %s" % (fspath, self.egg_root)
        )
    @property
    def zipinfo(self):
        return self._zip_manifests.load(self.loader.archive)
    def get_resource_filename(self, manager, resource_name):
        if not self.egg_name:
            raise NotImplementedError(
                "resource_filename() only supported for .egg, not .zip"
            )
        # no need to lock for extraction, since we use temp names
        zip_path = self._resource_to_zip(resource_name)
        eagers = self._get_eager_resources()
        if '/'.join(self._parts(zip_path)) in eagers:
            for name in eagers:
                self._extract_resource(manager, self._eager_to_zip(name))
        return self._extract_resource(manager, zip_path)
    @staticmethod
    def _get_date_and_size(zip_stat):
        size = zip_stat.file_size
        # ymdhms+wday, yday, dst
        date_time = zip_stat.date_time + (0, 0, -1)
        # 1980 offset already done
        timestamp = time.mktime(date_time)
        return timestamp, size
    def _extract_resource(self, manager, zip_path):
        if zip_path in self._index():
            for name in self._index()[zip_path]:
                last = self._extract_resource(
                    manager, os.path.join(zip_path, name)
                )
            # return the extracted directory name
            return os.path.dirname(last)
        timestamp, size = self._get_date_and_size(self.zipinfo[zip_path])
        if not WRITE_SUPPORT:
            raise IOError('"os.rename" and "os.unlink" are not supported '
                          'on this platform')
        try:
            real_path = manager.get_cache_path(
                self.egg_name, self._parts(zip_path)
            )
            if self._is_current(real_path, zip_path):
                return real_path
            outf, tmpnam = _mkstemp(".$extract", dir=os.path.dirname(real_path))
            os.write(outf, self.loader.get_data(zip_path))
            os.close(outf)
            utime(tmpnam, (timestamp, timestamp))
            manager.postprocess(tmpnam, real_path)
            try:
                rename(tmpnam, real_path)
            except os.error:
                if os.path.isfile(real_path):
                    if self._is_current(real_path, zip_path):
                        # the file became current since it was checked above,
                        #  so proceed.
                        return real_path
                    # Windows, del old file and retry
                    elif os.name=='nt':
                        unlink(real_path)
                        rename(tmpnam, real_path)
                        return real_path
                raise
        except os.error:
            # report a user-friendly error
            manager.extraction_error()
        return real_path
    def _is_current(self, file_path, zip_path):
        """
        Return True if the file_path is current for this zip_path
        """
        timestamp, size = self._get_date_and_size(self.zipinfo[zip_path])
        if not os.path.isfile(file_path):
            return False
        stat = os.stat(file_path)
        if stat.st_size!=size or stat.st_mtime!=timestamp:
            return False
        # check that the contents match
        zip_contents = self.loader.get_data(zip_path)
        with open(file_path, 'rb') as f:
            file_contents = f.read()
        return zip_contents == file_contents
    def _get_eager_resources(self):
        if self.eagers is None:
            eagers = []
            for name in ('native_libs.txt', 'eager_resources.txt'):
                if self.has_metadata(name):
                    eagers.extend(self.get_metadata_lines(name))
            self.eagers = eagers
        return self.eagers
    def _index(self):
        try:
            return self._dirindex
        except AttributeError:
            ind = {}
            for path in self.zipinfo:
                parts = path.split(os.sep)
                while parts:
                    parent = os.sep.join(parts[:-1])
                    if parent in ind:
                        ind[parent].append(parts[-1])
                        break
                    else:
                        ind[parent] = [parts.pop()]
            self._dirindex = ind
            return ind
    def _has(self, fspath):
        zip_path = self._zipinfo_name(fspath)
        return zip_path in self.zipinfo or zip_path in self._index()
    def _isdir(self, fspath):
        return self._zipinfo_name(fspath) in self._index()
    def _listdir(self, fspath):
        return list(self._index().get(self._zipinfo_name(fspath), ()))
    def _eager_to_zip(self, resource_name):
        return self._zipinfo_name(self._fn(self.egg_root, resource_name))
    def _resource_to_zip(self, resource_name):
        return self._zipinfo_name(self._fn(self.module_path, resource_name))
register_loader_type(zipimport.zipimporter, ZipProvider)
class FileMetadata(EmptyProvider):
    """Metadata handler for standalone PKG-INFO files
    Usage::
        metadata = FileMetadata("/path/to/PKG-INFO")
    This provider rejects all data and metadata requests except for PKG-INFO,
    which is treated as existing, and will be the contents of the file at
    the provided location.
    """
    def __init__(self, path):
        self.path = path
    def has_metadata(self, name):
        return name=='PKG-INFO'
    def get_metadata(self, name):
        if name=='PKG-INFO':
            with open(self.path,'rU') as f:
                metadata = f.read()
            return metadata
        raise KeyError("No metadata except PKG-INFO is available")
    def get_metadata_lines(self, name):
        return yield_lines(self.get_metadata(name))
class PathMetadata(DefaultProvider):
    """Metadata provider for egg directories
    Usage::
        # Development eggs:
        egg_info = "/path/to/PackageName.egg-info"
        base_dir = os.path.dirname(egg_info)
        metadata = PathMetadata(base_dir, egg_info)
        dist_name = os.path.splitext(os.path.basename(egg_info))[0]
        dist = Distribution(basedir, project_name=dist_name, metadata=metadata)
        # Unpacked egg directories:
        egg_path = "/path/to/PackageName-ver-pyver-etc.egg"
        metadata = PathMetadata(egg_path, os.path.join(egg_path,'EGG-INFO'))
        dist = Distribution.from_filename(egg_path, metadata=metadata)
    """
    def __init__(self, path, egg_info):
        self.module_path = path
        self.egg_info = egg_info
class EggMetadata(ZipProvider):
    """Metadata provider for .egg files"""
    def __init__(self, importer):
        """Create a metadata provider from a zipimporter"""
        self.zip_pre = importer.archive+os.sep
        self.loader = importer
        if importer.prefix:
            self.module_path = os.path.join(importer.archive, importer.prefix)
        else:
            self.module_path = importer.archive
        self._setup_prefix()
_declare_state('dict', _distribution_finders = {})
def register_finder(importer_type, distribution_finder):
    """Register `distribution_finder` to find distributions in sys.path items
    `importer_type` is the type or class of a PEP 302 "Importer" (sys.path item
    handler), and `distribution_finder` is a callable that, passed a path
    item and the importer instance, yields ``Distribution`` instances found on
    that path item.  See ``pkg_resources.find_on_path`` for an example."""
    _distribution_finders[importer_type] = distribution_finder
def find_distributions(path_item, only=False):
    """Yield distributions accessible via `path_item`"""
    importer = get_importer(path_item)
    finder = _find_adapter(_distribution_finders, importer)
    return finder(importer, path_item, only)
def find_eggs_in_zip(importer, path_item, only=False):
    """
    Find eggs in zip files; possibly multiple nested eggs.
    """
    if importer.archive.endswith('.whl'):
        # wheels are not supported with this finder
        # they don't have PKG-INFO metadata, and won't ever contain eggs
        return
    metadata = EggMetadata(importer)
    if metadata.has_metadata('PKG-INFO'):
        yield Distribution.from_filename(path_item, metadata=metadata)
    if only:
        # don't yield nested distros
        return
    for subitem in metadata.resource_listdir('/'):
        if _is_unpacked_egg(subitem):
            subpath = os.path.join(path_item, subitem)
            for dist in find_eggs_in_zip(zipimport.zipimporter(subpath), subpath):
                yield dist
register_finder(zipimport.zipimporter, find_eggs_in_zip)
def find_nothing(importer, path_item, only=False):
    return ()
register_finder(object, find_nothing)
def find_on_path(importer, path_item, only=False):
    """Yield distributions accessible on a sys.path directory"""
    path_item = _normalize_cached(path_item)
    if os.path.isdir(path_item) and os.access(path_item, os.R_OK):
        if _is_unpacked_egg(path_item):
            yield Distribution.from_filename(
                path_item, metadata=PathMetadata(
                    path_item, os.path.join(path_item,'EGG-INFO')
                )
            )
        else:
            # scan for .egg and .egg-info in directory
            for entry in os.listdir(path_item):
                lower = entry.lower()
                if lower.endswith('.egg-info') or lower.endswith('.dist-info'):
                    fullpath = os.path.join(path_item, entry)
                    if os.path.isdir(fullpath):
                        # egg-info directory, allow getting metadata
                        metadata = PathMetadata(path_item, fullpath)
                    else:
                        metadata = FileMetadata(fullpath)
                    yield Distribution.from_location(
                        path_item, entry, metadata, precedence=DEVELOP_DIST
                    )
                elif not only and _is_unpacked_egg(entry):
                    dists = find_distributions(os.path.join(path_item, entry))
                    for dist in dists:
                        yield dist
                elif not only and lower.endswith('.egg-link'):
                    with open(os.path.join(path_item, entry)) as entry_file:
                        entry_lines = entry_file.readlines()
                    for line in entry_lines:
                        if not line.strip():
                            continue
                        path = os.path.join(path_item, line.rstrip())
                        dists = find_distributions(path)
                        for item in dists:
                            yield item
                        break
register_finder(pkgutil.ImpImporter, find_on_path)
if importlib_machinery is not None:
    register_finder(importlib_machinery.FileFinder, find_on_path)
_declare_state('dict', _namespace_handlers={})
_declare_state('dict', _namespace_packages={})
def register_namespace_handler(importer_type, namespace_handler):
    """Register `namespace_handler` to declare namespace packages
    `importer_type` is the type or class of a PEP 302 "Importer" (sys.path item
    handler), and `namespace_handler` is a callable like this::
        def namespace_handler(importer, path_entry, moduleName, module):
            # return a path_entry to use for child packages
    Namespace handlers are only called if the importer object has already
    agreed that it can handle the relevant path item, and they should only
    return a subpath if the module __path__ does not already contain an
    equivalent subpath.  For an example namespace handler, see
    ``pkg_resources.file_ns_handler``.
    """
    _namespace_handlers[importer_type] = namespace_handler
def _handle_ns(packageName, path_item):
    """Ensure that named package includes a subpath of path_item (if needed)"""
    importer = get_importer(path_item)
    if importer is None:
        return None
    loader = importer.find_module(packageName)
    if loader is None:
        return None
    module = sys.modules.get(packageName)
    if module is None:
        module = sys.modules[packageName] = types.ModuleType(packageName)
        module.__path__ = []
        _set_parent_ns(packageName)
    elif not hasattr(module,'__path__'):
        raise TypeError("Not a package:", packageName)
    handler = _find_adapter(_namespace_handlers, importer)
    subpath = handler(importer, path_item, packageName, module)
    if subpath is not None:
        path = module.__path__
        path.append(subpath)
        loader.load_module(packageName)
        for path_item in path:
            if path_item not in module.__path__:
                module.__path__.append(path_item)
    return subpath
def declare_namespace(packageName):
    """Declare that package 'packageName' is a namespace package"""
    _imp.acquire_lock()
    try:
        if packageName in _namespace_packages:
            return
        path, parent = sys.path, None
        if '.' in packageName:
            parent = '.'.join(packageName.split('.')[:-1])
            declare_namespace(parent)
            if parent not in _namespace_packages:
                __import__(parent)
            try:
                path = sys.modules[parent].__path__
            except AttributeError:
                raise TypeError("Not a package:", parent)
        # Track what packages are namespaces, so when new path items are added,
        # they can be updated
        _namespace_packages.setdefault(parent,[]).append(packageName)
        _namespace_packages.setdefault(packageName,[])
        for path_item in path:
            # Ensure all the parent's path items are reflected in the child,
            # if they apply
            _handle_ns(packageName, path_item)
    finally:
        _imp.release_lock()
def fixup_namespace_packages(path_item, parent=None):
    """Ensure that previously-declared namespace packages include path_item"""
    _imp.acquire_lock()
    try:
        for package in _namespace_packages.get(parent,()):
            subpath = _handle_ns(package, path_item)
            if subpath:
                fixup_namespace_packages(subpath, package)
    finally:
        _imp.release_lock()
def file_ns_handler(importer, path_item, packageName, module):
    """Compute an ns-package subpath for a filesystem or zipfile importer"""
    subpath = os.path.join(path_item, packageName.split('.')[-1])
    normalized = _normalize_cached(subpath)
    for item in module.__path__:
        if _normalize_cached(item)==normalized:
            break
    else:
        # Only return the path if it's not already there
        return subpath
register_namespace_handler(pkgutil.ImpImporter, file_ns_handler)
register_namespace_handler(zipimport.zipimporter, file_ns_handler)
if importlib_machinery is not None:
    register_namespace_handler(importlib_machinery.FileFinder, file_ns_handler)
def null_ns_handler(importer, path_item, packageName, module):
    return None
register_namespace_handler(object, null_ns_handler)
def normalize_path(filename):
    """Normalize a file/dir name for comparison purposes"""
    return os.path.normcase(os.path.realpath(filename))
def _normalize_cached(filename, _cache={}):
    try:
        return _cache[filename]
    except KeyError:
        _cache[filename] = result = normalize_path(filename)
        return result
def _is_unpacked_egg(path):
    """
    Determine if given path appears to be an unpacked egg.
    """
    return (
        path.lower().endswith('.egg')
    )
def _set_parent_ns(packageName):
    parts = packageName.split('.')
    name = parts.pop()
    if parts:
        parent = '.'.join(parts)
        setattr(sys.modules[parent], name, sys.modules[packageName])
def yield_lines(strs):
    """Yield non-empty/non-comment lines of a string or sequence"""
    if isinstance(strs, string_types):
        for s in strs.splitlines():
            s = s.strip()
            # skip blank lines/comments
            if s and not s.startswith('#'):
                yield s
    else:
        for ss in strs:
            for s in yield_lines(ss):
                yield s
# whitespace and comment
LINE_END = re.compile(r"\s*(#.*)?$").match
# line continuation
CONTINUE = re.compile(r"\s*\\\s*(#.*)?$").match
# Distribution or extra
DISTRO = re.compile(r"\s*((\w|[-.])+)").match
# ver. info
VERSION = re.compile(r"\s*(<=?|>=?|===?|!=|~=)\s*((\w|[-.*_!+])+)").match
# comma between items
COMMA = re.compile(r"\s*,").match
OBRACKET = re.compile(r"\s*\[").match
CBRACKET = re.compile(r"\s*\]").match
MODULE = re.compile(r"\w+(\.\w+)*$").match
EGG_NAME = re.compile(
    r"""
    (?P<name>[^-]+) (
        -(?P<ver>[^-]+) (
            -py(?P<pyver>[^-]+) (
                -(?P<plat>.+)
            )?
        )?
    )?
    """,
    re.VERBOSE | re.IGNORECASE,
).match
class EntryPoint(object):
    """Object representing an advertised importable object"""
    def __init__(self, name, module_name, attrs=(), extras=(), dist=None):
        if not MODULE(module_name):
            raise ValueError("Invalid module name", module_name)
        self.name = name
        self.module_name = module_name
        self.attrs = tuple(attrs)
        self.extras = Requirement.parse(("x[%s]" % ','.join(extras))).extras
        self.dist = dist
    def __str__(self):
        s = "%s = %s" % (self.name, self.module_name)
        if self.attrs:
            s += ':' + '.'.join(self.attrs)
        if self.extras:
            s += ' [%s]' % ','.join(self.extras)
        return s
    def __repr__(self):
        return "EntryPoint.parse(%r)" % str(self)
    def load(self, require=True, *args, **kwargs):
        """
        Require packages for this EntryPoint, then resolve it.
        """
        if not require or args or kwargs:
            warnings.warn(
                "Parameters to load are deprecated.  Call .resolve and "
                ".require separately.",
                DeprecationWarning,
                stacklevel=2,
            )
        if require:
            self.require(*args, **kwargs)
        return self.resolve()
    def resolve(self):
        """
        Resolve the entry point from its module and attrs.
        """
        module = __import__(self.module_name, fromlist=['__name__'], level=0)
        try:
            return functools.reduce(getattr, self.attrs, module)
        except AttributeError as exc:
            raise ImportError(str(exc))
    def require(self, env=None, installer=None):
        if self.extras and not self.dist:
            raise UnknownExtra("Can't require() without a distribution", self)
        reqs = self.dist.requires(self.extras)
        items = working_set.resolve(reqs, env, installer)
        list(map(working_set.add, items))
    pattern = re.compile(
        r'\s*'
        r'(?P<name>.+?)\s*'
        r'=\s*'
        r'(?P<module>[\w.]+)\s*'
        r'(:\s*(?P<attr>[\w.]+))?\s*'
        r'(?P<extras>\[.*\])?\s*$'
    )
    @classmethod
    def parse(cls, src, dist=None):
        """Parse a single entry point from string `src`
        Entry point syntax follows the form::
            name = some.module:some.attr [extra1, extra2]
        The entry name and module name are required, but the ``:attrs`` and
        ``[extras]`` parts are optional
        """
        m = cls.pattern.match(src)
        if not m:
            msg = "EntryPoint must be in 'name=module:attrs [extras]' format"
            raise ValueError(msg, src)
        res = m.groupdict()
        extras = cls._parse_extras(res['extras'])
        attrs = res['attr'].split('.') if res['attr'] else ()
        return cls(res['name'], res['module'], attrs, extras, dist)
    @classmethod
    def _parse_extras(cls, extras_spec):
        if not extras_spec:
            return ()
        req = Requirement.parse('x' + extras_spec)
        if req.specs:
            raise ValueError()
        return req.extras
    @classmethod
    def parse_group(cls, group, lines, dist=None):
        """Parse an entry point group"""
        if not MODULE(group):
            raise ValueError("Invalid group name", group)
        this = {}
        for line in yield_lines(lines):
            ep = cls.parse(line, dist)
            if ep.name in this:
                raise ValueError("Duplicate entry point", group, ep.name)
            this[ep.name]=ep
        return this
    @classmethod
    def parse_map(cls, data, dist=None):
        """Parse a map of entry point groups"""
        if isinstance(data, dict):
            data = data.items()
        else:
            data = split_sections(data)
        maps = {}
        for group, lines in data:
            if group is None:
                if not lines:
                    continue
                raise ValueError("Entry points must be listed in groups")
            group = group.strip()
            if group in maps:
                raise ValueError("Duplicate group name", group)
            maps[group] = cls.parse_group(group, lines, dist)
        return maps
def _remove_md5_fragment(location):
    if not location:
        return ''
    parsed = urlparse(location)
    if parsed[-1].startswith('md5='):
        return urlunparse(parsed[:-1] + ('',))
    return location
class Distribution(object):
    """Wrap an actual or potential sys.path entry w/metadata"""
    PKG_INFO = 'PKG-INFO'
    def __init__(self, location=None, metadata=None, project_name=None,
            version=None, py_version=PY_MAJOR, platform=None,
            precedence=EGG_DIST):
        self.project_name = safe_name(project_name or 'Unknown')
        if version is not None:
            self._version = safe_version(version)
        self.py_version = py_version
        self.platform = platform
        self.location = location
        self.precedence = precedence
        self._provider = metadata or empty_provider
    @classmethod
    def from_location(cls, location, basename, metadata=None,**kw):
        project_name, version, py_version, platform = [None]*4
        basename, ext = os.path.splitext(basename)
        if ext.lower() in _distributionImpl:
            # .dist-info gets much metadata differently
            match = EGG_NAME(basename)
            if match:
                project_name, version, py_version, platform = match.group(
                    'name','ver','pyver','plat'
                )
            cls = _distributionImpl[ext.lower()]
        return cls(
            location, metadata, project_name=project_name, version=version,
            py_version=py_version, platform=platform, **kw
        )
    @property
    def hashcmp(self):
        return (
            self.parsed_version,
            self.precedence,
            self.key,
            _remove_md5_fragment(self.location),
            self.py_version or '',
            self.platform or '',
        )
    def __hash__(self):
        return hash(self.hashcmp)
    def __lt__(self, other):
        return self.hashcmp < other.hashcmp
    def __le__(self, other):
        return self.hashcmp <= other.hashcmp
    def __gt__(self, other):
        return self.hashcmp > other.hashcmp
    def __ge__(self, other):
        return self.hashcmp >= other.hashcmp
    def __eq__(self, other):
        if not isinstance(other, self.__class__):
            # It's not a Distribution, so they are not equal
            return False
        return self.hashcmp == other.hashcmp
    def __ne__(self, other):
        return not self == other
    # These properties have to be lazy so that we don't have to load any
    # metadata until/unless it's actually needed.  (i.e., some distributions
    # may not know their name or version without loading PKG-INFO)
    @property
    def key(self):
        try:
            return self._key
        except AttributeError:
            self._key = key = self.project_name.lower()
            return key
    @property
    def parsed_version(self):
        if not hasattr(self, "_parsed_version"):
            self._parsed_version = parse_version(self.version)
        return self._parsed_version
    def _warn_legacy_version(self):
        LV = packaging.version.LegacyVersion
        is_legacy = isinstance(self._parsed_version, LV)
        if not is_legacy:
            return
        # While an empty version is technically a legacy version and
        # is not a valid PEP 440 version, it's also unlikely to
        # actually come from someone and instead it is more likely that
        # it comes from setuptools attempting to parse a filename and
        # including it in the list. So for that we'll gate this warning
        # on if the version is anything at all or not.
        if not self.version:
            return
        tmpl = textwrap.dedent("""
            '{project_name} ({version})' is being parsed as a legacy,
            non PEP 440,
            version. You may find odd behavior and sort order.
            In particular it will be sorted as less than 0.0. It
            is recommended to migrate to PEP 440 compatible
            versions.
            """).strip().replace('\n', ' ')
        warnings.warn(tmpl.format(**vars(self)), PEP440Warning)
    @property
    def version(self):
        try:
            return self._version
        except AttributeError:
            for line in self._get_metadata(self.PKG_INFO):
                if line.lower().startswith('version:'):
                    self._version = safe_version(line.split(':',1)[1].strip())
                    return self._version
            else:
                tmpl = "Missing 'Version:' header and/or %s file"
                raise ValueError(tmpl % self.PKG_INFO, self)
    @property
    def _dep_map(self):
        try:
            return self.__dep_map
        except AttributeError:
            dm = self.__dep_map = {None: []}
            for name in 'requires.txt', 'depends.txt':
                for extra, reqs in split_sections(self._get_metadata(name)):
                    if extra:
                        if ':' in extra:
                            extra, marker = extra.split(':', 1)
                            if invalid_marker(marker):
                                # XXX warn
                                reqs=[]
                            elif not evaluate_marker(marker):
                                reqs=[]
                        extra = safe_extra(extra) or None
                    dm.setdefault(extra,[]).extend(parse_requirements(reqs))
            return dm
    def requires(self, extras=()):
        """List of Requirements needed for this distro if `extras` are used"""
        dm = self._dep_map
        deps = []
        deps.extend(dm.get(None, ()))
        for ext in extras:
            try:
                deps.extend(dm[safe_extra(ext)])
            except KeyError:
                raise UnknownExtra(
                    "%s has no such extra feature %r" % (self, ext)
                )
        return deps
    def _get_metadata(self, name):
        if self.has_metadata(name):
            for line in self.get_metadata_lines(name):
                yield line
    def activate(self, path=None):
        """Ensure distribution is importable on `path` (default=sys.path)"""
        if path is None:
            path = sys.path
        self.insert_on(path)
        if path is sys.path:
            fixup_namespace_packages(self.location)
            for pkg in self._get_metadata('namespace_packages.txt'):
                if pkg in sys.modules:
                    declare_namespace(pkg)
    def egg_name(self):
        """Return what this distribution's standard .egg filename should be"""
        filename = "%s-%s-py%s" % (
            to_filename(self.project_name), to_filename(self.version),
            self.py_version or PY_MAJOR
        )
        if self.platform:
            filename += '-' + self.platform
        return filename
    def __repr__(self):
        if self.location:
            return "%s (%s)" % (self, self.location)
        else:
            return str(self)
    def __str__(self):
        try:
            version = getattr(self, 'version', None)
        except ValueError:
            version = None
        version = version or "[unknown version]"
        return "%s %s" % (self.project_name, version)
    def __getattr__(self, attr):
        """Delegate all unrecognized public attributes to .metadata provider"""
        if attr.startswith('_'):
            raise AttributeError(attr)
        return getattr(self._provider, attr)
    @classmethod
    def from_filename(cls, filename, metadata=None, **kw):
        return cls.from_location(
            _normalize_cached(filename), os.path.basename(filename), metadata,
            **kw
        )
    def as_requirement(self):
        """Return a ``Requirement`` that matches this distribution exactly"""
        if isinstance(self.parsed_version, packaging.version.Version):
            spec = "%s==%s" % (self.project_name, self.parsed_version)
        else:
            spec = "%s===%s" % (self.project_name, self.parsed_version)
        return Requirement.parse(spec)
    def load_entry_point(self, group, name):
        """Return the `name` entry point of `group` or raise ImportError"""
        ep = self.get_entry_info(group, name)
        if ep is None:
            raise ImportError("Entry point %r not found" % ((group, name),))
        return ep.load()
    def get_entry_map(self, group=None):
        """Return the entry point map for `group`, or the full entry map"""
        try:
            ep_map = self._ep_map
        except AttributeError:
            ep_map = self._ep_map = EntryPoint.parse_map(
                self._get_metadata('entry_points.txt'), self
            )
        if group is not None:
            return ep_map.get(group,{})
        return ep_map
    def get_entry_info(self, group, name):
        """Return the EntryPoint object for `group`+`name`, or ``None``"""
        return self.get_entry_map(group).get(name)
    def insert_on(self, path, loc = None):
        """Insert self.location in path before its nearest parent directory"""
        loc = loc or self.location
        if not loc:
            return
        nloc = _normalize_cached(loc)
        bdir = os.path.dirname(nloc)
        npath= [(p and _normalize_cached(p) or p) for p in path]
        for p, item in enumerate(npath):
            if item == nloc:
                break
            elif item == bdir and self.precedence == EGG_DIST:
                # if it's an .egg, give it precedence over its directory
                if path is sys.path:
                    self.check_version_conflict()
                path.insert(p, loc)
                npath.insert(p, nloc)
                break
        else:
            if path is sys.path:
                self.check_version_conflict()
            path.append(loc)
            return
        # p is the spot where we found or inserted loc; now remove duplicates
        while True:
            try:
                np = npath.index(nloc, p+1)
            except ValueError:
                break
            else:
                del npath[np], path[np]
                # ha!
                p = np
        return
    def check_version_conflict(self):
        if self.key == 'setuptools':
            # ignore the inevitable setuptools self-conflicts  :(
            return
        nsp = dict.fromkeys(self._get_metadata('namespace_packages.txt'))
        loc = normalize_path(self.location)
        for modname in self._get_metadata('top_level.txt'):
            if (modname not in sys.modules or modname in nsp
                    or modname in _namespace_packages):
                continue
            if modname in ('pkg_resources', 'setuptools', 'site'):
                continue
            fn = getattr(sys.modules[modname], '__file__', None)
            if fn and (normalize_path(fn).startswith(loc) or
                       fn.startswith(self.location)):
                continue
            issue_warning(
                "Module %s was already imported from %s, but %s is being added"
                " to sys.path" % (modname, fn, self.location),
            )
    def has_version(self):
        try:
            self.version
        except ValueError:
            issue_warning("Unbuilt egg for " + repr(self))
            return False
        return True
    def clone(self,**kw):
        """Copy this distribution, substituting in any changed keyword args"""
        names = 'project_name version py_version platform location precedence'
        for attr in names.split():
            kw.setdefault(attr, getattr(self, attr, None))
        kw.setdefault('metadata', self._provider)
        return self.__class__(**kw)
    @property
    def extras(self):
        return [dep for dep in self._dep_map if dep]
class DistInfoDistribution(Distribution):
    """Wrap an actual or potential sys.path entry w/metadata, .dist-info style"""
    PKG_INFO = 'METADATA'
    EQEQ = re.compile(r"([\(,])\s*(\d.*?)\s*([,\)])")
    @property
    def _parsed_pkg_info(self):
        """Parse and cache metadata"""
        try:
            return self._pkg_info
        except AttributeError:
            metadata = self.get_metadata(self.PKG_INFO)
            self._pkg_info = email.parser.Parser().parsestr(metadata)
            return self._pkg_info
    @property
    def _dep_map(self):
        try:
            return self.__dep_map
        except AttributeError:
            self.__dep_map = self._compute_dependencies()
            return self.__dep_map
    def _preparse_requirement(self, requires_dist):
        """Convert 'Foobar (1); baz' to ('Foobar ==1', 'baz')
        Split environment marker, add == prefix to version specifiers as
        necessary, and remove parenthesis.
        """
        parts = requires_dist.split(';', 1) + ['']
        distvers = parts[0].strip()
        mark = parts[1].strip()
        distvers = re.sub(self.EQEQ, r"\1==\2\3", distvers)
        distvers = distvers.replace('(', '').replace(')', '')
        return (distvers, mark)
    def _compute_dependencies(self):
        """Recompute this distribution's dependencies."""
        from _markerlib import compile as compile_marker
        dm = self.__dep_map = {None: []}
        reqs = []
        # Including any condition expressions
        for req in self._parsed_pkg_info.get_all('Requires-Dist') or []:
            distvers, mark = self._preparse_requirement(req)
            parsed = next(parse_requirements(distvers))
            parsed.marker_fn = compile_marker(mark)
            reqs.append(parsed)
        def reqs_for_extra(extra):
            for req in reqs:
                if req.marker_fn(override={'extra':extra}):
                    yield req
        common = frozenset(reqs_for_extra(None))
        dm[None].extend(common)
        for extra in self._parsed_pkg_info.get_all('Provides-Extra') or []:
            extra = safe_extra(extra.strip())
            dm[extra] = list(frozenset(reqs_for_extra(extra)) - common)
        return dm
_distributionImpl = {
    '.egg': Distribution,
    '.egg-info': Distribution,
    '.dist-info': DistInfoDistribution,
    }
def issue_warning(*args,**kw):
    level = 1
    g = globals()
    try:
        # find the first stack frame that is *not* code in
        # the pkg_resources module, to use for the warning
        while sys._getframe(level).f_globals is g:
            level += 1
    except ValueError:
        pass
    warnings.warn(stacklevel=level + 1, *args, **kw)
class RequirementParseError(ValueError):
    def __str__(self):
        return ' '.join(self.args)
def parse_requirements(strs):
    """Yield ``Requirement`` objects for each specification in `strs`
    `strs` must be a string, or a (possibly-nested) iterable thereof.
    """
    # create a steppable iterator, so we can handle \-continuations
    lines = iter(yield_lines(strs))
    def scan_list(ITEM, TERMINATOR, line, p, groups, item_name):
        items = []
        while not TERMINATOR(line, p):
            if CONTINUE(line, p):
                try:
                    line = next(lines)
                    p = 0
                except StopIteration:
                    msg = "\\ must not appear on the last nonblank line"
                    raise RequirementParseError(msg)
            match = ITEM(line, p)
            if not match:
                msg = "Expected " + item_name + " in"
                raise RequirementParseError(msg, line, "at", line[p:])
            items.append(match.group(*groups))
            p = match.end()
            match = COMMA(line, p)
            if match:
                # skip the comma
                p = match.end()
            elif not TERMINATOR(line, p):
                msg = "Expected ',' or end-of-list in"
                raise RequirementParseError(msg, line, "at", line[p:])
        match = TERMINATOR(line, p)
        # skip the terminator, if any
        if match:
            p = match.end()
        return line, p, items
    for line in lines:
        match = DISTRO(line)
        if not match:
            raise RequirementParseError("Missing distribution spec", line)
        project_name = match.group(1)
        p = match.end()
        extras = []
        match = OBRACKET(line, p)
        if match:
            p = match.end()
            line, p, extras = scan_list(
                DISTRO, CBRACKET, line, p, (1,), "'extra' name"
            )
        line, p, specs = scan_list(VERSION, LINE_END, line, p, (1, 2),
            "version spec")
        specs = [(op, val) for op, val in specs]
        yield Requirement(project_name, specs, extras)
class Requirement:
    def __init__(self, project_name, specs, extras):
        """DO NOT CALL THIS UNDOCUMENTED METHOD; use Requirement.parse()!"""
        self.unsafe_name, project_name = project_name, safe_name(project_name)
        self.project_name, self.key = project_name, project_name.lower()
        self.specifier = packaging.specifiers.SpecifierSet(
            ",".join(["".join([x, y]) for x, y in specs])
        )
        self.specs = specs
        self.extras = tuple(map(safe_extra, extras))
        self.hashCmp = (
            self.key,
            self.specifier,
            frozenset(self.extras),
        )
        self.__hash = hash(self.hashCmp)
    def __str__(self):
        extras = ','.join(self.extras)
        if extras:
            extras = '[%s]' % extras
        return '%s%s%s' % (self.project_name, extras, self.specifier)
    def __eq__(self, other):
        return (
            isinstance(other, Requirement) and
            self.hashCmp == other.hashCmp
        )
    def __ne__(self, other):
        return not self == other
    def __contains__(self, item):
        if isinstance(item, Distribution):
            if item.key != self.key:
                return False
            item = item.version
        # Allow prereleases always in order to match the previous behavior of
        # this method. In the future this should be smarter and follow PEP 440
        # more accurately.
        return self.specifier.contains(item, prereleases=True)
    def __hash__(self):
        return self.__hash
    def __repr__(self): return "Requirement.parse(%r)" % str(self)
    @staticmethod
    def parse(s):
        req, = parse_requirements(s)
        return req
def _get_mro(cls):
    """Get an mro for a type or classic class"""
    if not isinstance(cls, type):
        class cls(cls, object): pass
        return cls.__mro__[1:]
    return cls.__mro__
def _find_adapter(registry, ob):
    """Return an adapter factory for `ob` from `registry`"""
    for t in _get_mro(getattr(ob, '__class__', type(ob))):
        if t in registry:
            return registry[t]
def ensure_directory(path):
    """Ensure that the parent directory of `path` exists"""
    dirname = os.path.dirname(path)
    if not os.path.isdir(dirname):
        os.makedirs(dirname)
def _bypass_ensure_directory(path):
    """Sandbox-bypassing version of ensure_directory()"""
    if not WRITE_SUPPORT:
        raise IOError('"os.mkdir" not supported on this platform.')
    dirname, filename = split(path)
    if dirname and filename and not isdir(dirname):
        _bypass_ensure_directory(dirname)
        mkdir(dirname, 0o755)
def split_sections(s):
    """Split a string or iterable thereof into (section, content) pairs
    Each ``section`` is a stripped version of the section header ("[section]")
    and each ``content`` is a list of stripped lines excluding blank lines and
    comment-only lines.  If there are any such lines before the first section
    header, they're returned in a first ``section`` of ``None``.
    """
    section = None
    content = []
    for line in yield_lines(s):
        if line.startswith("["):
            if line.endswith("]"):
                if section or content:
                    yield section, content
                section = line[1:-1].strip()
                content = []
            else:
                raise ValueError("Invalid section heading", line)
        else:
            content.append(line)
    # wrap up last segment
    yield section, content
def _mkstemp(*args,**kw):
    old_open = os.open
    try:
        # temporarily bypass sandboxing
        os.open = os_open
        return tempfile.mkstemp(*args,**kw)
    finally:
        # and then put it back
        os.open = old_open
# Silence the PEP440Warning by default, so that end users don't get hit by it
# randomly just because they use pkg_resources. We want to append the rule
# because we want earlier uses of filterwarnings to take precedence over this
# one.
warnings.filterwarnings("ignore", category=PEP440Warning, append=True)
# from jaraco.functools 1.3
def _call_aside(f, *args, **kwargs):
    f(*args, **kwargs)
    return f
@_call_aside
def _initialize(g=globals()):
    "Set up global resource manager (deliberately not state-saved)"
    manager = ResourceManager()
    g['_manager'] = manager
    for name in dir(manager):
        if not name.startswith('_'):
            g[name] = getattr(manager, name)
@_call_aside
def _initialize_master_working_set():
    """
    Prepare the master working set and make the ``require()``
    API available.
    This function has explicit effects on the global state
    of pkg_resources. It is intended to be invoked once at
    the initialization of this module.
    Invocation by other packages is unsupported and done
    at their own risk.
    """
    working_set = WorkingSet._build_master()
    _declare_state('object', working_set=working_set)
    require = working_set.require
    iter_entry_points = working_set.iter_entry_points
    add_activation_listener = working_set.subscribe
    run_script = working_set.run_script
    # backward compatibility
    run_main = run_script
    # Activate all distributions already on sys.path, and ensure that
    # all distributions added to the working set in the future (e.g. by
    # calling ``require()``) will get activated as well.
    add_activation_listener(lambda dist: dist.activate())
    working_set.entries=[]
    # match order
    list(map(working_set.add_entry, sys.path))
    globals().update(locals())
 | 
	gpl-2.0 | 
| 
	donald-pinckney/EM-Simulator | 
	EM Sim/EM Sim/py_lib/formatter.py | 
	252 | 
	14911 | 
	"""Generic output formatting.
Formatter objects transform an abstract flow of formatting events into
specific output events on writer objects. Formatters manage several stack
structures to allow various properties of a writer object to be changed and
restored; writers need not be able to handle relative changes nor any sort
of ``change back'' operation. Specific writer properties which may be
controlled via formatter objects are horizontal alignment, font, and left
margin indentations. A mechanism is provided which supports providing
arbitrary, non-exclusive style settings to a writer as well. Additional
interfaces facilitate formatting events which are not reversible, such as
paragraph separation.
Writer objects encapsulate device interfaces. Abstract devices, such as
file formats, are supported as well as physical devices. The provided
implementations all work with abstract devices. The interface makes
available mechanisms for setting the properties which formatter objects
manage and inserting data into the output.
"""
import sys
AS_IS = None
class NullFormatter:
    """A formatter which does nothing.
    If the writer parameter is omitted, a NullWriter instance is created.
    No methods of the writer are called by NullFormatter instances.
    Implementations should inherit from this class if implementing a writer
    interface but don't need to inherit any implementation.
    """
    def __init__(self, writer=None):
        if writer is None:
            writer = NullWriter()
        self.writer = writer
    def end_paragraph(self, blankline): pass
    def add_line_break(self): pass
    def add_hor_rule(self, *args, **kw): pass
    def add_label_data(self, format, counter, blankline=None): pass
    def add_flowing_data(self, data): pass
    def add_literal_data(self, data): pass
    def flush_softspace(self): pass
    def push_alignment(self, align): pass
    def pop_alignment(self): pass
    def push_font(self, x): pass
    def pop_font(self): pass
    def push_margin(self, margin): pass
    def pop_margin(self): pass
    def set_spacing(self, spacing): pass
    def push_style(self, *styles): pass
    def pop_style(self, n=1): pass
    def assert_line_data(self, flag=1): pass
class AbstractFormatter:
    """The standard formatter.
    This implementation has demonstrated wide applicability to many writers,
    and may be used directly in most circumstances.  It has been used to
    implement a full-featured World Wide Web browser.
    """
    #  Space handling policy:  blank spaces at the boundary between elements
    #  are handled by the outermost context.  "Literal" data is not checked
    #  to determine context, so spaces in literal data are handled directly
    #  in all circumstances.
    def __init__(self, writer):
        self.writer = writer            # Output device
        self.align = None               # Current alignment
        self.align_stack = []           # Alignment stack
        self.font_stack = []            # Font state
        self.margin_stack = []          # Margin state
        self.spacing = None             # Vertical spacing state
        self.style_stack = []           # Other state, e.g. color
        self.nospace = 1                # Should leading space be suppressed
        self.softspace = 0              # Should a space be inserted
        self.para_end = 1               # Just ended a paragraph
        self.parskip = 0                # Skipped space between paragraphs?
        self.hard_break = 1             # Have a hard break
        self.have_label = 0
    def end_paragraph(self, blankline):
        if not self.hard_break:
            self.writer.send_line_break()
            self.have_label = 0
        if self.parskip < blankline and not self.have_label:
            self.writer.send_paragraph(blankline - self.parskip)
            self.parskip = blankline
            self.have_label = 0
        self.hard_break = self.nospace = self.para_end = 1
        self.softspace = 0
    def add_line_break(self):
        if not (self.hard_break or self.para_end):
            self.writer.send_line_break()
            self.have_label = self.parskip = 0
        self.hard_break = self.nospace = 1
        self.softspace = 0
    def add_hor_rule(self, *args, **kw):
        if not self.hard_break:
            self.writer.send_line_break()
        self.writer.send_hor_rule(*args, **kw)
        self.hard_break = self.nospace = 1
        self.have_label = self.para_end = self.softspace = self.parskip = 0
    def add_label_data(self, format, counter, blankline = None):
        if self.have_label or not self.hard_break:
            self.writer.send_line_break()
        if not self.para_end:
            self.writer.send_paragraph((blankline and 1) or 0)
        if isinstance(format, str):
            self.writer.send_label_data(self.format_counter(format, counter))
        else:
            self.writer.send_label_data(format)
        self.nospace = self.have_label = self.hard_break = self.para_end = 1
        self.softspace = self.parskip = 0
    def format_counter(self, format, counter):
        label = ''
        for c in format:
            if c == '1':
                label = label + ('%d' % counter)
            elif c in 'aA':
                if counter > 0:
                    label = label + self.format_letter(c, counter)
            elif c in 'iI':
                if counter > 0:
                    label = label + self.format_roman(c, counter)
            else:
                label = label + c
        return label
    def format_letter(self, case, counter):
        label = ''
        while counter > 0:
            counter, x = divmod(counter-1, 26)
            # This makes a strong assumption that lowercase letters
            # and uppercase letters form two contiguous blocks, with
            # letters in order!
            s = chr(ord(case) + x)
            label = s + label
        return label
    def format_roman(self, case, counter):
        ones = ['i', 'x', 'c', 'm']
        fives = ['v', 'l', 'd']
        label, index = '', 0
        # This will die of IndexError when counter is too big
        while counter > 0:
            counter, x = divmod(counter, 10)
            if x == 9:
                label = ones[index] + ones[index+1] + label
            elif x == 4:
                label = ones[index] + fives[index] + label
            else:
                if x >= 5:
                    s = fives[index]
                    x = x-5
                else:
                    s = ''
                s = s + ones[index]*x
                label = s + label
            index = index + 1
        if case == 'I':
            return label.upper()
        return label
    def add_flowing_data(self, data):
        if not data: return
        prespace = data[:1].isspace()
        postspace = data[-1:].isspace()
        data = " ".join(data.split())
        if self.nospace and not data:
            return
        elif prespace or self.softspace:
            if not data:
                if not self.nospace:
                    self.softspace = 1
                    self.parskip = 0
                return
            if not self.nospace:
                data = ' ' + data
        self.hard_break = self.nospace = self.para_end = \
                          self.parskip = self.have_label = 0
        self.softspace = postspace
        self.writer.send_flowing_data(data)
    def add_literal_data(self, data):
        if not data: return
        if self.softspace:
            self.writer.send_flowing_data(" ")
        self.hard_break = data[-1:] == '\n'
        self.nospace = self.para_end = self.softspace = \
                       self.parskip = self.have_label = 0
        self.writer.send_literal_data(data)
    def flush_softspace(self):
        if self.softspace:
            self.hard_break = self.para_end = self.parskip = \
                              self.have_label = self.softspace = 0
            self.nospace = 1
            self.writer.send_flowing_data(' ')
    def push_alignment(self, align):
        if align and align != self.align:
            self.writer.new_alignment(align)
            self.align = align
            self.align_stack.append(align)
        else:
            self.align_stack.append(self.align)
    def pop_alignment(self):
        if self.align_stack:
            del self.align_stack[-1]
        if self.align_stack:
            self.align = align = self.align_stack[-1]
            self.writer.new_alignment(align)
        else:
            self.align = None
            self.writer.new_alignment(None)
    def push_font(self, font):
        size, i, b, tt = font
        if self.softspace:
            self.hard_break = self.para_end = self.softspace = 0
            self.nospace = 1
            self.writer.send_flowing_data(' ')
        if self.font_stack:
            csize, ci, cb, ctt = self.font_stack[-1]
            if size is AS_IS: size = csize
            if i is AS_IS: i = ci
            if b is AS_IS: b = cb
            if tt is AS_IS: tt = ctt
        font = (size, i, b, tt)
        self.font_stack.append(font)
        self.writer.new_font(font)
    def pop_font(self):
        if self.font_stack:
            del self.font_stack[-1]
        if self.font_stack:
            font = self.font_stack[-1]
        else:
            font = None
        self.writer.new_font(font)
    def push_margin(self, margin):
        self.margin_stack.append(margin)
        fstack = filter(None, self.margin_stack)
        if not margin and fstack:
            margin = fstack[-1]
        self.writer.new_margin(margin, len(fstack))
    def pop_margin(self):
        if self.margin_stack:
            del self.margin_stack[-1]
        fstack = filter(None, self.margin_stack)
        if fstack:
            margin = fstack[-1]
        else:
            margin = None
        self.writer.new_margin(margin, len(fstack))
    def set_spacing(self, spacing):
        self.spacing = spacing
        self.writer.new_spacing(spacing)
    def push_style(self, *styles):
        if self.softspace:
            self.hard_break = self.para_end = self.softspace = 0
            self.nospace = 1
            self.writer.send_flowing_data(' ')
        for style in styles:
            self.style_stack.append(style)
        self.writer.new_styles(tuple(self.style_stack))
    def pop_style(self, n=1):
        del self.style_stack[-n:]
        self.writer.new_styles(tuple(self.style_stack))
    def assert_line_data(self, flag=1):
        self.nospace = self.hard_break = not flag
        self.para_end = self.parskip = self.have_label = 0
class NullWriter:
    """Minimal writer interface to use in testing & inheritance.
    A writer which only provides the interface definition; no actions are
    taken on any methods.  This should be the base class for all writers
    which do not need to inherit any implementation methods.
    """
    def __init__(self): pass
    def flush(self): pass
    def new_alignment(self, align): pass
    def new_font(self, font): pass
    def new_margin(self, margin, level): pass
    def new_spacing(self, spacing): pass
    def new_styles(self, styles): pass
    def send_paragraph(self, blankline): pass
    def send_line_break(self): pass
    def send_hor_rule(self, *args, **kw): pass
    def send_label_data(self, data): pass
    def send_flowing_data(self, data): pass
    def send_literal_data(self, data): pass
class AbstractWriter(NullWriter):
    """A writer which can be used in debugging formatters, but not much else.
    Each method simply announces itself by printing its name and
    arguments on standard output.
    """
    def new_alignment(self, align):
        print "new_alignment(%r)" % (align,)
    def new_font(self, font):
        print "new_font(%r)" % (font,)
    def new_margin(self, margin, level):
        print "new_margin(%r, %d)" % (margin, level)
    def new_spacing(self, spacing):
        print "new_spacing(%r)" % (spacing,)
    def new_styles(self, styles):
        print "new_styles(%r)" % (styles,)
    def send_paragraph(self, blankline):
        print "send_paragraph(%r)" % (blankline,)
    def send_line_break(self):
        print "send_line_break()"
    def send_hor_rule(self, *args, **kw):
        print "send_hor_rule()"
    def send_label_data(self, data):
        print "send_label_data(%r)" % (data,)
    def send_flowing_data(self, data):
        print "send_flowing_data(%r)" % (data,)
    def send_literal_data(self, data):
        print "send_literal_data(%r)" % (data,)
class DumbWriter(NullWriter):
    """Simple writer class which writes output on the file object passed in
    as the file parameter or, if file is omitted, on standard output.  The
    output is simply word-wrapped to the number of columns specified by
    the maxcol parameter.  This class is suitable for reflowing a sequence
    of paragraphs.
    """
    def __init__(self, file=None, maxcol=72):
        self.file = file or sys.stdout
        self.maxcol = maxcol
        NullWriter.__init__(self)
        self.reset()
    def reset(self):
        self.col = 0
        self.atbreak = 0
    def send_paragraph(self, blankline):
        self.file.write('\n'*blankline)
        self.col = 0
        self.atbreak = 0
    def send_line_break(self):
        self.file.write('\n')
        self.col = 0
        self.atbreak = 0
    def send_hor_rule(self, *args, **kw):
        self.file.write('\n')
        self.file.write('-'*self.maxcol)
        self.file.write('\n')
        self.col = 0
        self.atbreak = 0
    def send_literal_data(self, data):
        self.file.write(data)
        i = data.rfind('\n')
        if i >= 0:
            self.col = 0
            data = data[i+1:]
        data = data.expandtabs()
        self.col = self.col + len(data)
        self.atbreak = 0
    def send_flowing_data(self, data):
        if not data: return
        atbreak = self.atbreak or data[0].isspace()
        col = self.col
        maxcol = self.maxcol
        write = self.file.write
        for word in data.split():
            if atbreak:
                if col + len(word) >= maxcol:
                    write('\n')
                    col = 0
                else:
                    write(' ')
                    col = col + 1
            write(word)
            col = col + len(word)
            atbreak = 1
        self.col = col
        self.atbreak = data[-1].isspace()
def test(file = None):
    w = DumbWriter()
    f = AbstractFormatter(w)
    if file is not None:
        fp = open(file)
    elif sys.argv[1:]:
        fp = open(sys.argv[1])
    else:
        fp = sys.stdin
    for line in fp:
        if line == '\n':
            f.end_paragraph(1)
        else:
            f.add_flowing_data(line)
    f.end_paragraph(0)
if __name__ == '__main__':
    test()
 | 
	apache-2.0 | 
| 
	kkk669/mxnet | 
	python/mxnet/visualization.py | 
	12 | 
	13772 | 
	# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements.  See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership.  The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License.  You may obtain a copy of the License at
#
#   http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied.  See the License for the
# specific language governing permissions and limitations
# under the License.
# coding: utf-8
# pylint: disable=invalid-name, too-many-locals, fixme
# pylint: disable=too-many-branches, too-many-statements
# pylint: disable=too-many-arguments
# pylint: disable=dangerous-default-value
"""Visualization module"""
from __future__ import absolute_import
import re
import copy
import json
from .symbol import Symbol
def _str2tuple(string):
    """Convert shape string to list, internal use only.
    Parameters
    ----------
    string: str
        Shape string.
    Returns
    -------
    list of str
        Represents shape.
    """
    return re.findall(r"\d+", string)
def print_summary(symbol, shape=None, line_length=120, positions=[.44, .64, .74, 1.]):
    """Convert symbol for detail information.
    Parameters
    ----------
    symbol: Symbol
        Symbol to be visualized.
    shape: dict
        A dict of shapes, str->shape (tuple), given input shapes.
    line_length: int
        Rotal length of printed lines
    positions: list
        Relative or absolute positions of log elements in each line.
    Returns
    ------
    None
    """
    if not isinstance(symbol, Symbol):
        raise TypeError("symbol must be Symbol")
    show_shape = False
    if shape is not None:
        show_shape = True
        interals = symbol.get_internals()
        _, out_shapes, _ = interals.infer_shape(**shape)
        if out_shapes is None:
            raise ValueError("Input shape is incomplete")
        shape_dict = dict(zip(interals.list_outputs(), out_shapes))
    conf = json.loads(symbol.tojson())
    nodes = conf["nodes"]
    heads = set(conf["heads"][0])
    if positions[-1] <= 1:
        positions = [int(line_length * p) for p in positions]
    # header names for the different log elements
    to_display = ['Layer (type)', 'Output Shape', 'Param #', 'Previous Layer']
    def print_row(fields, positions):
        """Print format row.
        Parameters
        ----------
        fields: list
            Information field.
        positions: list
            Field length ratio.
        Returns
        ------
        None
        """
        line = ''
        for i, field in enumerate(fields):
            line += str(field)
            line = line[:positions[i]]
            line += ' ' * (positions[i] - len(line))
        print(line)
    print('_' * line_length)
    print_row(to_display, positions)
    print('=' * line_length)
    def print_layer_summary(node, out_shape):
        """print layer information
        Parameters
        ----------
        node: dict
            Node information.
        out_shape: dict
            Node shape information.
        Returns
        ------
            Node total parameters.
        """
        op = node["op"]
        pre_node = []
        pre_filter = 0
        if op != "null":
            inputs = node["inputs"]
            for item in inputs:
                input_node = nodes[item[0]]
                input_name = input_node["name"]
                if input_node["op"] != "null" or item[0] in heads:
                    # add precede
                    pre_node.append(input_name)
                    if show_shape:
                        if input_node["op"] != "null":
                            key = input_name + "_output"
                        else:
                            key = input_name
                        if key in shape_dict:
                            shape = shape_dict[key][1:]
                            pre_filter = pre_filter + int(shape[0])
        cur_param = 0
        if op == 'Convolution':
            if ("no_bias" in node["attrs"]) and int(node["attrs"]["no_bias"]):
                cur_param = pre_filter * int(node["attrs"]["num_filter"])
                for k in _str2tuple(node["attrs"]["kernel"]):
                    cur_param *= int(k)
            else:
                cur_param = pre_filter * int(node["attrs"]["num_filter"])
                for k in _str2tuple(node["attrs"]["kernel"]):
                    cur_param *= int(k)
                cur_param += int(node["attrs"]["num_filter"])
        elif op == 'FullyConnected':
            if ("no_bias" in node["attrs"]) and int(node["attrs"]["no_bias"]):
                cur_param = pre_filter * (int(node["attrs"]["num_hidden"]))
            else:
                cur_param = (pre_filter+1) * (int(node["attrs"]["num_hidden"]))
        elif op == 'BatchNorm':
            key = node["name"] + "_output"
            if show_shape:
                num_filter = shape_dict[key][1]
                cur_param = int(num_filter) * 2
        if not pre_node:
            first_connection = ''
        else:
            first_connection = pre_node[0]
        fields = [node['name'] + '(' + op + ')',
                  "x".join([str(x) for x in out_shape]),
                  cur_param,
                  first_connection]
        print_row(fields, positions)
        if len(pre_node) > 1:
            for i in range(1, len(pre_node)):
                fields = ['', '', '', pre_node[i]]
                print_row(fields, positions)
        return cur_param
    total_params = 0
    for i, node in enumerate(nodes):
        out_shape = []
        op = node["op"]
        if op == "null" and i > 0:
            continue
        if op != "null" or i in heads:
            if show_shape:
                if op != "null":
                    key = node["name"] + "_output"
                else:
                    key = node["name"]
                if key in shape_dict:
                    out_shape = shape_dict[key][1:]
        total_params += print_layer_summary(nodes[i], out_shape)
        if i == len(nodes) - 1:
            print('=' * line_length)
        else:
            print('_' * line_length)
    print('Total params: %s' % total_params)
    print('_' * line_length)
def plot_network(symbol, title="plot", save_format='pdf', shape=None, node_attrs={},
                 hide_weights=True):
    """Creates a visualization (Graphviz digraph object) of the given computation graph.
    Graphviz must be installed for this function to work.
    Parameters
    ----------
    title: str, optional
        Title of the generated visualization.
    symbol: Symbol
        A symbol from the computation graph. The generated digraph will visualize the part
        of the computation graph required to compute `symbol`.
    shape: dict, optional
        Specifies the shape of the input tensors. If specified, the visualization will include
        the shape of the tensors between the nodes. `shape` is a dictionary mapping
        input symbol names (str) to the corresponding tensor shape (tuple).
    node_attrs: dict, optional
        Specifies the attributes for nodes in the generated visualization. `node_attrs` is
        a dictionary of Graphviz attribute names and values. For example,
            ``node_attrs={"shape":"oval","fixedsize":"false"}``
            will use oval shape for nodes and allow variable sized nodes in the visualization.
    hide_weights: bool, optional
        If True (default), then inputs with names of form *_weight (corresponding to weight
        tensors) or *_bias (corresponding to bias vectors) will be hidden for a cleaner
        visualization.
    Returns
    -------
    dot: Digraph
        A Graphviz digraph object visualizing the computation graph to compute `symbol`.
    Example
    -------
    >>> net = mx.sym.Variable('data')
    >>> net = mx.sym.FullyConnected(data=net, name='fc1', num_hidden=128)
    >>> net = mx.sym.Activation(data=net, name='relu1', act_type="relu")
    >>> net = mx.sym.FullyConnected(data=net, name='fc2', num_hidden=10)
    >>> net = mx.sym.SoftmaxOutput(data=net, name='out')
    >>> digraph = mx.viz.plot_network(net, shape={'data':(100,200)},
    ... node_attrs={"fixedsize":"false"})
    >>> digraph.view()
    """
    # todo add shape support
    try:
        from graphviz import Digraph
    except:
        raise ImportError("Draw network requires graphviz library")
    if not isinstance(symbol, Symbol):
        raise TypeError("symbol must be a Symbol")
    draw_shape = False
    if shape is not None:
        draw_shape = True
        interals = symbol.get_internals()
        _, out_shapes, _ = interals.infer_shape(**shape)
        if out_shapes is None:
            raise ValueError("Input shape is incomplete")
        shape_dict = dict(zip(interals.list_outputs(), out_shapes))
    conf = json.loads(symbol.tojson())
    nodes = conf["nodes"]
    # default attributes of node
    node_attr = {"shape": "box", "fixedsize": "true",
                 "width": "1.3", "height": "0.8034", "style": "filled"}
    # merge the dict provided by user and the default one
    node_attr.update(node_attrs)
    dot = Digraph(name=title, format=save_format)
    # color map
    cm = ("#8dd3c7", "#fb8072", "#ffffb3", "#bebada", "#80b1d3",
          "#fdb462", "#b3de69", "#fccde5")
    def looks_like_weight(name):
        """Internal helper to figure out if node should be hidden with `hide_weights`.
        """
        if name.endswith("_weight"):
            return True
        if name.endswith("_bias"):
            return True
        if name.endswith("_beta") or name.endswith("_gamma") or \
	   name.endswith("_moving_var") or name.endswith("_moving_mean"):
            return True
        return False
    # make nodes
    hidden_nodes = set()
    for node in nodes:
        op = node["op"]
        name = node["name"]
        # input data
        attr = copy.deepcopy(node_attr)
        label = name
        if op == "null":
            if looks_like_weight(node["name"]):
                if hide_weights:
                    hidden_nodes.add(node["name"])
                # else we don't render a node, but
                # don't add it to the hidden_nodes set
                # so it gets rendered as an empty oval
                continue
            attr["shape"] = "oval" # inputs get their own shape
            label = node["name"]
            attr["fillcolor"] = cm[0]
        elif op == "Convolution":
            label = r"Convolution\n%s/%s, %s" % ("x".join(_str2tuple(node["attrs"]["kernel"])),
                                                 "x".join(_str2tuple(node["attrs"]["stride"]))
                                                 if "stride" in node["attrs"] else "1",
                                                 node["attrs"]["num_filter"])
            attr["fillcolor"] = cm[1]
        elif op == "FullyConnected":
            label = r"FullyConnected\n%s" % node["attrs"]["num_hidden"]
            attr["fillcolor"] = cm[1]
        elif op == "BatchNorm":
            attr["fillcolor"] = cm[3]
        elif op == "Activation" or op == "LeakyReLU":
            label = r"%s\n%s" % (op, node["attrs"]["act_type"])
            attr["fillcolor"] = cm[2]
        elif op == "Pooling":
            label = r"Pooling\n%s, %s/%s" % (node["attrs"]["pool_type"],
                                             "x".join(_str2tuple(node["attrs"]["kernel"])),
                                             "x".join(_str2tuple(node["attrs"]["stride"]))
                                             if "stride" in node["attrs"] else "1")
            attr["fillcolor"] = cm[4]
        elif op == "Concat" or op == "Flatten" or op == "Reshape":
            attr["fillcolor"] = cm[5]
        elif op == "Softmax":
            attr["fillcolor"] = cm[6]
        else:
            attr["fillcolor"] = cm[7]
            if op == "Custom":
                label = node["attrs"]["op_type"]
        dot.node(name=name, label=label, **attr)
    # add edges
    for node in nodes:          # pylint: disable=too-many-nested-blocks
        op = node["op"]
        name = node["name"]
        if op == "null":
            continue
        else:
            inputs = node["inputs"]
            for item in inputs:
                input_node = nodes[item[0]]
                input_name = input_node["name"]
                if input_name not in hidden_nodes:
                    attr = {"dir": "back", 'arrowtail':'open'}
                    # add shapes
                    if draw_shape:
                        if input_node["op"] != "null":
                            key = input_name + "_output"
                            if "attrs" in input_node:
                                params = input_node["attrs"]
                                if "num_outputs" in params:
                                    key += str(int(params["num_outputs"]) - 1)
                            shape = shape_dict[key][1:]
                            label = "x".join([str(x) for x in shape])
                            attr["label"] = label
                        else:
                            key = input_name
                            shape = shape_dict[key][1:]
                            label = "x".join([str(x) for x in shape])
                            attr["label"] = label
                    dot.edge(tail_name=name, head_name=input_name, **attr)
    return dot
 | 
	apache-2.0 | 
| 
	acimmarusti/isl_exercises | 
	chap3/chap3ex8.py | 
	1 | 
	1315 | 
	from __future__ import print_function, division
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
from pandas.tools.plotting import scatter_matrix
import statsmodels.formula.api as smf
#from sklearn.linear_model import LinearRegression
#import scipy, scipy.stats
#from statsmodels.sandbox.regression.predstd import wls_prediction_std
from statsmodels.stats.outliers_influence import variance_inflation_factor, summary_table
filename = '../Auto.csv'
data = pd.read_csv(filename, na_values='?').dropna()
#Quantitative and qualitative predictors#
print(data.dtypes)
#Simple linear regression#
slinreg = smf.ols('mpg ~ horsepower', data=data).fit()
print(slinreg.summary())
st, fitdat, ss2 = summary_table(slinreg, alpha=0.05)
fittedvalues = fitdat[:,2]
predict_mean_se  = fitdat[:,3]
predict_mean_ci_low, predict_mean_ci_upp = fitdat[:,4:6].T
predict_ci_low, predict_ci_upp = fitdat[:,6:8].T
x = data['horsepower']
y = data['mpg']
#Residuals#
resd1 = y - fittedvalues
f, (ax1, ax2) = plt.subplots(1, 2, sharey=True)
ax1.plot(x, y, 'o')
ax1.plot(x, fittedvalues, 'g-')
ax1.plot(x, predict_ci_low, 'r--')
ax1.plot(x, predict_ci_upp, 'r--')
ax1.plot(x, predict_mean_ci_low, 'b--')
ax1.plot(x, predict_mean_ci_upp, 'b--')
ax2.plot(resd1, fittedvalues, 'o')
plt.show()
 | 
	gpl-3.0 | 
| 
	Kwentar/ImageDownloader | 
	vk.py | 
	1 | 
	7993 | 
	import json
import random
from urllib.error import URLError
from urllib.parse import urlencode
from urllib.request import urlopen, http, Request
import time
from datetime import date
from Profiler import Profiler
import __setup_photo__ as setup
class VkError(Exception):
    def __init__(self, value):
        self.value = value
    def __str__(self):
        return repr(self.value)
class VkUser:
    def __init__(self, uid, name, last_name, day_b, month_b, sex, city_id, age=-1, year_b=-1):
        self.uid = uid
        self.name = name
        self.last_name = last_name
        self.day_b = day_b
        self.month_b = month_b
        if year_b == -1:
            year_b = date.today().year - age
            if month_b < date.today().month or month_b == date.today().month and day_b < date.today().day:
                year_b -= 1
        self.year_b = year_b
        self.sex = sex
        self.city_id = city_id
    def __str__(self):
        return ";".join([self.uid, self.name, self.last_name,
                         self.day_b.__str__(), self.month_b.__str__(),
                         self.year_b.__str__(), self.sex.__str__(),
                         self.city_id.__str__()])
    def get_age(self):
        return date.today().year - self.year_b
class Vk:
    tokens = setup.user_tokens
    curr_token = ''
    p = Profiler()
    @staticmethod
    def check_time(value=0.5):
        if Vk.p.get_time() < value:
            time.sleep(value)
        Vk.p.start()
    @staticmethod
    def set_token(token):
        Vk.tokens.clear()
        Vk.tokens.append(token)
    @staticmethod
    def get_token():
        while True:
            el = random.choice(Vk.tokens)
            if el != Vk.curr_token:
                test_url = 'https://api.vk.com/method/getProfiles?uid=66748&v=5.103&access_token=' + el
                Vk.check_time(1)
                try:
                    response = urlopen(test_url).read()
                    result = json.loads(response.decode('utf-8'))
                    if 'response' in result.keys():
                        print('now I use the ' + el + ' token')
                        Vk.curr_token = el
                        return el
                except http.client.BadStatusLine as err_:
                    print("".join(['ERROR Vk.get_token', err_.__str__()]))
        raise VkError('all tokens are invalid: ' + result['error']['error_msg'].__str__())
    @staticmethod
    def call_api(method, params):
        Vk.check_time()
        while not Vk.curr_token:
            Vk.get_token()
        if isinstance(params, list):
            params_list = params[:]
        elif isinstance(params, dict):
            params_list = params.items()
        else:
            params_list = [params]
        params_list += [('access_token', Vk.curr_token), ('v', '5.103')]
        url = 'https://api.vk.com/method/%s?%s' % (method, urlencode(params_list))
        try:
            req = Request(url=url, headers={'User-agent': random.choice(setup.user_agents)})
            response = urlopen(req).read()
            result = json.loads(response.decode('utf-8'))
            try:
                if 'response' in result.keys():
                    return result['response']
                else:
                    raise VkError('no response on answer: ' + result['error']['error_msg'].__str__())
            except VkError as err_:
                print(err_.value)
                Vk.curr_token = Vk.get_token()
                # Vk.call_api(method, params)
        except URLError as err_:
            print('URLError: ' + err_.errno.__str__() + ", " + err_.reason.__str__())
        except http.client.BadStatusLine as err_:
            print("".join(['ERROR Vk.call_api', err_.__str__()]))
        except ConnectionResetError as err_:
            print("".join(['ERROR ConnectionResetError', err_.__str__()]))
        except ConnectionAbortedError as err_:
            print("".join(['ERROR ConnectionAbortedError', err_.__str__()]))
        return list()
    @staticmethod
    def get_uids(age, month, day, city_id, fields='sex'):
        search_q = list()
        search_q.append(('offset', '0'))
        search_q.append(('count', '300'))
        search_q.append(('city', city_id))
        search_q.append(('fields', fields))
        search_q.append(('age_from', age))
        search_q.append(('age_to', age))
        search_q.append(('has_photo', '1'))
        search_q.append(('birth_day', day))
        search_q.append(('birth_month', month))
        r = Vk.call_api('users.search', search_q)
        count = r['count']
        users = list()
        for el in r['items']:
            if 'id' in el.keys() and not el['is_closed']:
                user = VkUser(uid=el['id'].__str__(), name=el['first_name'],
                              last_name=el['last_name'], sex=el['sex'],
                              day_b=day, month_b=month, age=age, city_id=city_id)
                users.append(user)
        if count > 1000:
            Vk.warning('''Count more than 1000, count = {}, age = {},
                        month = {}, day = {}'''.format(count, age, month, day))
        return users
    @staticmethod
    def create_user_from_response(response):
        if 'user_id' in response.keys():
            uid = response['user_id'].__str__()
        elif 'uid' in response.keys():
            uid = response['uid'].__str__()
        else:
            return None
        if 'deactivated' in response.keys():
            return None
        last_name = 'None'
        sex = 'None'
        name = 'None'
        city_id = 'None'
        day, month, age = [0, 0, 0]
        if 'last_name' in response.keys():
            last_name = response['last_name'].__str__()
        if 'first_name' in response.keys():
            name = response['first_name'].__str__()
        if 'sex' in response.keys():
            sex = response['sex'].__str__()
        if 'city' in response.keys():
            city_id = response['city'].__str__()
        if 'bdate' in response.keys():
            bdate = response['bdate'].__str__().split('.')
            if len(bdate) > 2:
                day, month, age = map(int, bdate)
                age = date.today().year - age
            else:
                day, month = map(int, bdate)
        user = VkUser(uid=uid, name=name, last_name=last_name, sex=sex, day_b=day,
                      month_b=month, age=age, city_id=city_id)
        return user
    @staticmethod
    def get_user_info(uid, fields='city,bdate,sex'):
        search_q = list()
        search_q.append(('user_id', uid))
        search_q.append(('fields', fields))
        r = Vk.call_api('users.get', search_q)
        for el in r:
            user = Vk.create_user_from_response(el)
            if user is not None:
                return user
    @staticmethod
    def get_friends(uid, fields='city,bdate,sex'):
        search_q = list()
        search_q.append(('user_id', uid))
        search_q.append(('offset', '0'))
        search_q.append(('count', '1000'))
        search_q.append(('fields', fields))
        r = Vk.call_api('friends.get', search_q)
        count = len(r)
        users = list()
        for el in r:
                user = Vk.create_user_from_response(el)
                if user is not None:
                    users.append(user)
        if count > 1000:
            Vk.warning('Count more than 1000')
        return users
    @staticmethod
    def get_profile_photos(id_):
        q = list()
        q.append(('owner_id', id_))
        q.append(('count', '10'))
        q.append(('rev', '1'))
        q.append(('extended', '1'))
        q.append(('photos_size', '0'))
        r = Vk.call_api('photos.getAll', q)
        images = []
        for photo in r['items']:
            max_photo = max(photo['sizes'], key=lambda x: x['width']*x['height'])
            images.append(max_photo['url'])
        return images
    @staticmethod
    def warning(msg):
        print(msg)
 | 
	mit | 
| 
	svirt/tp-libvirt | 
	libvirt/tests/src/virsh_cmd/filter/virsh_nwfilter_dumpxml.py | 
	4 | 
	3574 | 
	import logging
from autotest.client.shared import error
from virttest import virsh
from virttest import libvirt_xml
from provider import libvirt_version
def check_list(uuid, name):
    """
    Return True if filter found in nwfilter-list
    :param uuid: filter uuid
    :param name: filter name
    :return: True if found, False if not found
    """
    cmd_result = virsh.nwfilter_list(options="",
                                     ignore_status=True, debug=True)
    output = cmd_result.stdout.strip().split('\n')
    for i in range(2, len(output)):
        if output[i].split() == [uuid, name]:
            return True
    return False
def run(test, params, env):
    """
    Test command: virsh nwfilter-dumpxml.
    1) Prepare parameters.
    2) Run dumpxml command.
    3) Check result.
    """
    # Prepare parameters
    filter_name = params.get("dumpxml_filter_name", "")
    options_ref = params.get("dumpxml_options_ref", "")
    status_error = params.get("status_error", "no")
    # acl polkit params
    uri = params.get("virsh_uri")
    unprivileged_user = params.get('unprivileged_user')
    if unprivileged_user:
        if unprivileged_user.count('EXAMPLE'):
            unprivileged_user = 'testacl'
    if not libvirt_version.version_compare(1, 1, 1):
        if params.get('setup_libvirt_polkit') == 'yes':
            raise error.TestNAError("API acl test not supported in current"
                                    " libvirt version.")
    virsh_dargs = {'ignore_status': True, 'debug': True}
    if params.get('setup_libvirt_polkit') == 'yes':
        virsh_dargs['unprivileged_user'] = unprivileged_user
        virsh_dargs['uri'] = uri
    # Run command
    cmd_result = virsh.nwfilter_dumpxml(filter_name, options=options_ref,
                                        **virsh_dargs)
    output = cmd_result.stdout.strip()
    status = cmd_result.exit_status
    # Check result
    if status_error == "yes":
        if status == 0:
            raise error.TestFail("Run successfully with wrong command.")
    elif status_error == "no":
        if status:
            raise error.TestFail("Run failed with right command.")
        # Get uuid and name from output xml and compare with nwfilter-list
        # output
        new_filter = libvirt_xml.NwfilterXML()
        new_filter['xml'] = output
        uuid = new_filter.uuid
        name = new_filter.filter_name
        if check_list(uuid, name):
            logging.debug("The filter with uuid %s and name %s" % (uuid, name) +
                          " from nwfilter-dumpxml was found in"
                          " nwfilter-list output")
        else:
            raise error.TestFail("The uuid %s with name %s from" % (uuid, name) +
                                 " nwfilter-dumpxml did not match with"
                                 " nwfilter-list output")
        # Run command second time with uuid
        cmd_result = virsh.nwfilter_dumpxml(uuid, options=options_ref,
                                            **virsh_dargs)
        output1 = cmd_result.stdout.strip()
        status1 = cmd_result.exit_status
        if status_error == "yes":
            if status1 == 0:
                raise error.TestFail("Run successfully with wrong command.")
        elif status_error == "no":
            if status1:
                raise error.TestFail("Run failed with right command.")
        if output1 != output:
            raise error.TestFail("nwfilter dumpxml output was different" +
                                 " between using filter uuid and name")
 | 
	gpl-2.0 | 
| 
	mapennell/ansible | 
	test/units/mock/loader.py | 
	50 | 
	2876 | 
	# (c) 2012-2014, Michael DeHaan <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible.  If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
from ansible.errors import AnsibleParserError
from ansible.parsing import DataLoader
class DictDataLoader(DataLoader):
    def __init__(self, file_mapping=dict()):
        assert type(file_mapping) == dict
        self._file_mapping = file_mapping
        self._build_known_directories()
        super(DictDataLoader, self).__init__()
    def load_from_file(self, path):
        if path in self._file_mapping:
            return self.load(self._file_mapping[path], path)
        return None
    def _get_file_contents(self, path):
        if path in self._file_mapping:
            return (self._file_mapping[path], False)
        else:
            raise AnsibleParserError("file not found: %s" % path)
    def path_exists(self, path):
        return path in self._file_mapping or path in self._known_directories
    def is_file(self, path):
        return path in self._file_mapping
    def is_directory(self, path):
        return path in self._known_directories
    def list_directory(self, path):
        return [x for x in self._known_directories]
    def _add_known_directory(self, directory):
        if directory not in self._known_directories:
            self._known_directories.append(directory)
    def _build_known_directories(self):
        self._known_directories  = []
        for path in self._file_mapping:
            dirname = os.path.dirname(path)
            while dirname not in ('/', ''):
                self._add_known_directory(dirname)
                dirname = os.path.dirname(dirname)
    def push(self, path, content):
        rebuild_dirs = False
        if path not in self._file_mapping:
            rebuild_dirs = True
    
        self._file_mapping[path] = content
        if rebuild_dirs:
            self._build_known_directories()
    def pop(self, path):
        if path in self._file_mapping:
            del self._file_mapping[path]
            self._build_known_directories()
    def clear(self):
        self._file_mapping = dict()
        self._known_directories = []
 | 
	gpl-3.0 | 
| 
	strint/tensorflow | 
	tensorflow/examples/image_retraining/retrain.py | 
	19 | 
	43141 | 
	# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Simple transfer learning with an Inception v3 architecture model which
displays summaries in TensorBoard.
This example shows how to take a Inception v3 architecture model trained on
ImageNet images, and train a new top layer that can recognize other classes of
images.
The top layer receives as input a 2048-dimensional vector for each image. We
train a softmax layer on top of this representation. Assuming the softmax layer
contains N labels, this corresponds to learning N + 2048*N model parameters
corresponding to the learned biases and weights.
Here's an example, which assumes you have a folder containing class-named
subfolders, each full of images for each label. The example folder flower_photos
should have a structure like this:
~/flower_photos/daisy/photo1.jpg
~/flower_photos/daisy/photo2.jpg
...
~/flower_photos/rose/anotherphoto77.jpg
...
~/flower_photos/sunflower/somepicture.jpg
The subfolder names are important, since they define what label is applied to
each image, but the filenames themselves don't matter. Once your images are
prepared, you can run the training with a command like this:
bazel build tensorflow/examples/image_retraining:retrain && \
bazel-bin/tensorflow/examples/image_retraining/retrain \
--image_dir ~/flower_photos
You can replace the image_dir argument with any folder containing subfolders of
images. The label for each image is taken from the name of the subfolder it's
in.
This produces a new model file that can be loaded and run by any TensorFlow
program, for example the label_image sample code.
To use with TensorBoard:
By default, this script will log summaries to /tmp/retrain_logs directory
Visualize the summaries with this command:
tensorboard --logdir /tmp/retrain_logs
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
from datetime import datetime
import hashlib
import os.path
import random
import re
import struct
import sys
import tarfile
import numpy as np
from six.moves import urllib
import tensorflow as tf
from tensorflow.python.framework import graph_util
from tensorflow.python.framework import tensor_shape
from tensorflow.python.platform import gfile
from tensorflow.python.util import compat
FLAGS = None
# These are all parameters that are tied to the particular model architecture
# we're using for Inception v3. These include things like tensor names and their
# sizes. If you want to adapt this script to work with another model, you will
# need to update these to reflect the values in the network you're using.
# pylint: disable=line-too-long
DATA_URL = 'http://download.tensorflow.org/models/image/imagenet/inception-2015-12-05.tgz'
# pylint: enable=line-too-long
BOTTLENECK_TENSOR_NAME = 'pool_3/_reshape:0'
BOTTLENECK_TENSOR_SIZE = 2048
MODEL_INPUT_WIDTH = 299
MODEL_INPUT_HEIGHT = 299
MODEL_INPUT_DEPTH = 3
JPEG_DATA_TENSOR_NAME = 'DecodeJpeg/contents:0'
RESIZED_INPUT_TENSOR_NAME = 'ResizeBilinear:0'
MAX_NUM_IMAGES_PER_CLASS = 2 ** 27 - 1  # ~134M
def create_image_lists(image_dir, testing_percentage, validation_percentage):
  """Builds a list of training images from the file system.
  Analyzes the sub folders in the image directory, splits them into stable
  training, testing, and validation sets, and returns a data structure
  describing the lists of images for each label and their paths.
  Args:
    image_dir: String path to a folder containing subfolders of images.
    testing_percentage: Integer percentage of the images to reserve for tests.
    validation_percentage: Integer percentage of images reserved for validation.
  Returns:
    A dictionary containing an entry for each label subfolder, with images split
    into training, testing, and validation sets within each label.
  """
  if not gfile.Exists(image_dir):
    print("Image directory '" + image_dir + "' not found.")
    return None
  result = {}
  sub_dirs = [x[0] for x in gfile.Walk(image_dir)]
  # The root directory comes first, so skip it.
  is_root_dir = True
  for sub_dir in sub_dirs:
    if is_root_dir:
      is_root_dir = False
      continue
    extensions = ['jpg', 'jpeg', 'JPG', 'JPEG']
    file_list = []
    dir_name = os.path.basename(sub_dir)
    if dir_name == image_dir:
      continue
    print("Looking for images in '" + dir_name + "'")
    for extension in extensions:
      file_glob = os.path.join(image_dir, dir_name, '*.' + extension)
      file_list.extend(gfile.Glob(file_glob))
    if not file_list:
      print('No files found')
      continue
    if len(file_list) < 20:
      print('WARNING: Folder has less than 20 images, which may cause issues.')
    elif len(file_list) > MAX_NUM_IMAGES_PER_CLASS:
      print('WARNING: Folder {} has more than {} images. Some images will '
            'never be selected.'.format(dir_name, MAX_NUM_IMAGES_PER_CLASS))
    label_name = re.sub(r'[^a-z0-9]+', ' ', dir_name.lower())
    training_images = []
    testing_images = []
    validation_images = []
    for file_name in file_list:
      base_name = os.path.basename(file_name)
      # We want to ignore anything after '_nohash_' in the file name when
      # deciding which set to put an image in, the data set creator has a way of
      # grouping photos that are close variations of each other. For example
      # this is used in the plant disease data set to group multiple pictures of
      # the same leaf.
      hash_name = re.sub(r'_nohash_.*$', '', file_name)
      # This looks a bit magical, but we need to decide whether this file should
      # go into the training, testing, or validation sets, and we want to keep
      # existing files in the same set even if more files are subsequently
      # added.
      # To do that, we need a stable way of deciding based on just the file name
      # itself, so we do a hash of that and then use that to generate a
      # probability value that we use to assign it.
      hash_name_hashed = hashlib.sha1(compat.as_bytes(hash_name)).hexdigest()
      percentage_hash = ((int(hash_name_hashed, 16) %
                          (MAX_NUM_IMAGES_PER_CLASS + 1)) *
                         (100.0 / MAX_NUM_IMAGES_PER_CLASS))
      if percentage_hash < validation_percentage:
        validation_images.append(base_name)
      elif percentage_hash < (testing_percentage + validation_percentage):
        testing_images.append(base_name)
      else:
        training_images.append(base_name)
    result[label_name] = {
        'dir': dir_name,
        'training': training_images,
        'testing': testing_images,
        'validation': validation_images,
    }
  return result
def get_image_path(image_lists, label_name, index, image_dir, category):
  """"Returns a path to an image for a label at the given index.
  Args:
    image_lists: Dictionary of training images for each label.
    label_name: Label string we want to get an image for.
    index: Int offset of the image we want. This will be moduloed by the
    available number of images for the label, so it can be arbitrarily large.
    image_dir: Root folder string of the subfolders containing the training
    images.
    category: Name string of set to pull images from - training, testing, or
    validation.
  Returns:
    File system path string to an image that meets the requested parameters.
  """
  if label_name not in image_lists:
    tf.logging.fatal('Label does not exist %s.', label_name)
  label_lists = image_lists[label_name]
  if category not in label_lists:
    tf.logging.fatal('Category does not exist %s.', category)
  category_list = label_lists[category]
  if not category_list:
    tf.logging.fatal('Label %s has no images in the category %s.',
                     label_name, category)
  mod_index = index % len(category_list)
  base_name = category_list[mod_index]
  sub_dir = label_lists['dir']
  full_path = os.path.join(image_dir, sub_dir, base_name)
  return full_path
def get_bottleneck_path(image_lists, label_name, index, bottleneck_dir,
                        category):
  """"Returns a path to a bottleneck file for a label at the given index.
  Args:
    image_lists: Dictionary of training images for each label.
    label_name: Label string we want to get an image for.
    index: Integer offset of the image we want. This will be moduloed by the
    available number of images for the label, so it can be arbitrarily large.
    bottleneck_dir: Folder string holding cached files of bottleneck values.
    category: Name string of set to pull images from - training, testing, or
    validation.
  Returns:
    File system path string to an image that meets the requested parameters.
  """
  return get_image_path(image_lists, label_name, index, bottleneck_dir,
                        category) + '.txt'
def create_inception_graph():
  """"Creates a graph from saved GraphDef file and returns a Graph object.
  Returns:
    Graph holding the trained Inception network, and various tensors we'll be
    manipulating.
  """
  with tf.Session() as sess:
    model_filename = os.path.join(
        FLAGS.model_dir, 'classify_image_graph_def.pb')
    with gfile.FastGFile(model_filename, 'rb') as f:
      graph_def = tf.GraphDef()
      graph_def.ParseFromString(f.read())
      bottleneck_tensor, jpeg_data_tensor, resized_input_tensor = (
          tf.import_graph_def(graph_def, name='', return_elements=[
              BOTTLENECK_TENSOR_NAME, JPEG_DATA_TENSOR_NAME,
              RESIZED_INPUT_TENSOR_NAME]))
  return sess.graph, bottleneck_tensor, jpeg_data_tensor, resized_input_tensor
def run_bottleneck_on_image(sess, image_data, image_data_tensor,
                            bottleneck_tensor):
  """Runs inference on an image to extract the 'bottleneck' summary layer.
  Args:
    sess: Current active TensorFlow Session.
    image_data: String of raw JPEG data.
    image_data_tensor: Input data layer in the graph.
    bottleneck_tensor: Layer before the final softmax.
  Returns:
    Numpy array of bottleneck values.
  """
  bottleneck_values = sess.run(
      bottleneck_tensor,
      {image_data_tensor: image_data})
  bottleneck_values = np.squeeze(bottleneck_values)
  return bottleneck_values
def maybe_download_and_extract():
  """Download and extract model tar file.
  If the pretrained model we're using doesn't already exist, this function
  downloads it from the TensorFlow.org website and unpacks it into a directory.
  """
  dest_directory = FLAGS.model_dir
  if not os.path.exists(dest_directory):
    os.makedirs(dest_directory)
  filename = DATA_URL.split('/')[-1]
  filepath = os.path.join(dest_directory, filename)
  if not os.path.exists(filepath):
    def _progress(count, block_size, total_size):
      sys.stdout.write('\r>> Downloading %s %.1f%%' %
                       (filename,
                        float(count * block_size) / float(total_size) * 100.0))
      sys.stdout.flush()
    filepath, _ = urllib.request.urlretrieve(DATA_URL,
                                             filepath,
                                             _progress)
    print()
    statinfo = os.stat(filepath)
    print('Successfully downloaded', filename, statinfo.st_size, 'bytes.')
  tarfile.open(filepath, 'r:gz').extractall(dest_directory)
def ensure_dir_exists(dir_name):
  """Makes sure the folder exists on disk.
  Args:
    dir_name: Path string to the folder we want to create.
  """
  if not os.path.exists(dir_name):
    os.makedirs(dir_name)
def write_list_of_floats_to_file(list_of_floats , file_path):
  """Writes a given list of floats to a binary file.
  Args:
    list_of_floats: List of floats we want to write to a file.
    file_path: Path to a file where list of floats will be stored.
  """
  s = struct.pack('d' * BOTTLENECK_TENSOR_SIZE, *list_of_floats)
  with open(file_path, 'wb') as f:
    f.write(s)
def read_list_of_floats_from_file(file_path):
  """Reads list of floats from a given file.
  Args:
    file_path: Path to a file where list of floats was stored.
  Returns:
    Array of bottleneck values (list of floats).
  """
  with open(file_path, 'rb') as f:
    s = struct.unpack('d' * BOTTLENECK_TENSOR_SIZE, f.read())
    return list(s)
bottleneck_path_2_bottleneck_values = {}
def create_bottleneck_file(bottleneck_path, image_lists, label_name, index,
                           image_dir, category, sess, jpeg_data_tensor, bottleneck_tensor):
  print('Creating bottleneck at ' + bottleneck_path)
  image_path = get_image_path(image_lists, label_name, index, image_dir, category)
  if not gfile.Exists(image_path):
    tf.logging.fatal('File does not exist %s', image_path)
  image_data = gfile.FastGFile(image_path, 'rb').read()
  bottleneck_values = run_bottleneck_on_image(sess, image_data, jpeg_data_tensor, bottleneck_tensor)
  bottleneck_string = ','.join(str(x) for x in bottleneck_values)
  with open(bottleneck_path, 'w') as bottleneck_file:
    bottleneck_file.write(bottleneck_string)
def get_or_create_bottleneck(sess, image_lists, label_name, index, image_dir,
                             category, bottleneck_dir, jpeg_data_tensor,
                             bottleneck_tensor):
  """Retrieves or calculates bottleneck values for an image.
  If a cached version of the bottleneck data exists on-disk, return that,
  otherwise calculate the data and save it to disk for future use.
  Args:
    sess: The current active TensorFlow Session.
    image_lists: Dictionary of training images for each label.
    label_name: Label string we want to get an image for.
    index: Integer offset of the image we want. This will be modulo-ed by the
    available number of images for the label, so it can be arbitrarily large.
    image_dir: Root folder string  of the subfolders containing the training
    images.
    category: Name string of which  set to pull images from - training, testing,
    or validation.
    bottleneck_dir: Folder string holding cached files of bottleneck values.
    jpeg_data_tensor: The tensor to feed loaded jpeg data into.
    bottleneck_tensor: The output tensor for the bottleneck values.
  Returns:
    Numpy array of values produced by the bottleneck layer for the image.
  """
  label_lists = image_lists[label_name]
  sub_dir = label_lists['dir']
  sub_dir_path = os.path.join(bottleneck_dir, sub_dir)
  ensure_dir_exists(sub_dir_path)
  bottleneck_path = get_bottleneck_path(image_lists, label_name, index, bottleneck_dir, category)
  if not os.path.exists(bottleneck_path):
    create_bottleneck_file(bottleneck_path, image_lists, label_name, index, image_dir, category, sess, jpeg_data_tensor, bottleneck_tensor)
  with open(bottleneck_path, 'r') as bottleneck_file:
    bottleneck_string = bottleneck_file.read()
  did_hit_error = False
  try:
    bottleneck_values = [float(x) for x in bottleneck_string.split(',')]
  except:
    print("Invalid float found, recreating bottleneck")
    did_hit_error = True
  if did_hit_error:
    create_bottleneck_file(bottleneck_path, image_lists, label_name, index, image_dir, category, sess, jpeg_data_tensor, bottleneck_tensor)
    with open(bottleneck_path, 'r') as bottleneck_file:
      bottleneck_string = bottleneck_file.read()
    # Allow exceptions to propagate here, since they shouldn't happen after a fresh creation
    bottleneck_values = [float(x) for x in bottleneck_string.split(',')]
  return bottleneck_values
def cache_bottlenecks(sess, image_lists, image_dir, bottleneck_dir,
                      jpeg_data_tensor, bottleneck_tensor):
  """Ensures all the training, testing, and validation bottlenecks are cached.
  Because we're likely to read the same image multiple times (if there are no
  distortions applied during training) it can speed things up a lot if we
  calculate the bottleneck layer values once for each image during
  preprocessing, and then just read those cached values repeatedly during
  training. Here we go through all the images we've found, calculate those
  values, and save them off.
  Args:
    sess: The current active TensorFlow Session.
    image_lists: Dictionary of training images for each label.
    image_dir: Root folder string of the subfolders containing the training
    images.
    bottleneck_dir: Folder string holding cached files of bottleneck values.
    jpeg_data_tensor: Input tensor for jpeg data from file.
    bottleneck_tensor: The penultimate output layer of the graph.
  Returns:
    Nothing.
  """
  how_many_bottlenecks = 0
  ensure_dir_exists(bottleneck_dir)
  for label_name, label_lists in image_lists.items():
    for category in ['training', 'testing', 'validation']:
      category_list = label_lists[category]
      for index, unused_base_name in enumerate(category_list):
        get_or_create_bottleneck(sess, image_lists, label_name, index,
                                 image_dir, category, bottleneck_dir,
                                 jpeg_data_tensor, bottleneck_tensor)
        how_many_bottlenecks += 1
        if how_many_bottlenecks % 100 == 0:
          print(str(how_many_bottlenecks) + ' bottleneck files created.')
def get_random_cached_bottlenecks(sess, image_lists, how_many, category,
                                  bottleneck_dir, image_dir, jpeg_data_tensor,
                                  bottleneck_tensor):
  """Retrieves bottleneck values for cached images.
  If no distortions are being applied, this function can retrieve the cached
  bottleneck values directly from disk for images. It picks a random set of
  images from the specified category.
  Args:
    sess: Current TensorFlow Session.
    image_lists: Dictionary of training images for each label.
    how_many: If positive, a random sample of this size will be chosen.
    If negative, all bottlenecks will be retrieved.
    category: Name string of which set to pull from - training, testing, or
    validation.
    bottleneck_dir: Folder string holding cached files of bottleneck values.
    image_dir: Root folder string of the subfolders containing the training
    images.
    jpeg_data_tensor: The layer to feed jpeg image data into.
    bottleneck_tensor: The bottleneck output layer of the CNN graph.
  Returns:
    List of bottleneck arrays, their corresponding ground truths, and the
    relevant filenames.
  """
  class_count = len(image_lists.keys())
  bottlenecks = []
  ground_truths = []
  filenames = []
  if how_many >= 0:
    # Retrieve a random sample of bottlenecks.
    for unused_i in range(how_many):
      label_index = random.randrange(class_count)
      label_name = list(image_lists.keys())[label_index]
      image_index = random.randrange(MAX_NUM_IMAGES_PER_CLASS + 1)
      image_name = get_image_path(image_lists, label_name, image_index,
                                  image_dir, category)
      bottleneck = get_or_create_bottleneck(sess, image_lists, label_name,
                                            image_index, image_dir, category,
                                            bottleneck_dir, jpeg_data_tensor,
                                            bottleneck_tensor)
      ground_truth = np.zeros(class_count, dtype=np.float32)
      ground_truth[label_index] = 1.0
      bottlenecks.append(bottleneck)
      ground_truths.append(ground_truth)
      filenames.append(image_name)
  else:
    # Retrieve all bottlenecks.
    for label_index, label_name in enumerate(image_lists.keys()):
      for image_index, image_name in enumerate(
          image_lists[label_name][category]):
        image_name = get_image_path(image_lists, label_name, image_index,
                                    image_dir, category)
        bottleneck = get_or_create_bottleneck(sess, image_lists, label_name,
                                              image_index, image_dir, category,
                                              bottleneck_dir, jpeg_data_tensor,
                                              bottleneck_tensor)
        ground_truth = np.zeros(class_count, dtype=np.float32)
        ground_truth[label_index] = 1.0
        bottlenecks.append(bottleneck)
        ground_truths.append(ground_truth)
        filenames.append(image_name)
  return bottlenecks, ground_truths, filenames
def get_random_distorted_bottlenecks(
    sess, image_lists, how_many, category, image_dir, input_jpeg_tensor,
    distorted_image, resized_input_tensor, bottleneck_tensor):
  """Retrieves bottleneck values for training images, after distortions.
  If we're training with distortions like crops, scales, or flips, we have to
  recalculate the full model for every image, and so we can't use cached
  bottleneck values. Instead we find random images for the requested category,
  run them through the distortion graph, and then the full graph to get the
  bottleneck results for each.
  Args:
    sess: Current TensorFlow Session.
    image_lists: Dictionary of training images for each label.
    how_many: The integer number of bottleneck values to return.
    category: Name string of which set of images to fetch - training, testing,
    or validation.
    image_dir: Root folder string of the subfolders containing the training
    images.
    input_jpeg_tensor: The input layer we feed the image data to.
    distorted_image: The output node of the distortion graph.
    resized_input_tensor: The input node of the recognition graph.
    bottleneck_tensor: The bottleneck output layer of the CNN graph.
  Returns:
    List of bottleneck arrays and their corresponding ground truths.
  """
  class_count = len(image_lists.keys())
  bottlenecks = []
  ground_truths = []
  for unused_i in range(how_many):
    label_index = random.randrange(class_count)
    label_name = list(image_lists.keys())[label_index]
    image_index = random.randrange(MAX_NUM_IMAGES_PER_CLASS + 1)
    image_path = get_image_path(image_lists, label_name, image_index, image_dir,
                                category)
    if not gfile.Exists(image_path):
      tf.logging.fatal('File does not exist %s', image_path)
    jpeg_data = gfile.FastGFile(image_path, 'rb').read()
    # Note that we materialize the distorted_image_data as a numpy array before
    # sending running inference on the image. This involves 2 memory copies and
    # might be optimized in other implementations.
    distorted_image_data = sess.run(distorted_image,
                                    {input_jpeg_tensor: jpeg_data})
    bottleneck = run_bottleneck_on_image(sess, distorted_image_data,
                                         resized_input_tensor,
                                         bottleneck_tensor)
    ground_truth = np.zeros(class_count, dtype=np.float32)
    ground_truth[label_index] = 1.0
    bottlenecks.append(bottleneck)
    ground_truths.append(ground_truth)
  return bottlenecks, ground_truths
def should_distort_images(flip_left_right, random_crop, random_scale,
                          random_brightness):
  """Whether any distortions are enabled, from the input flags.
  Args:
    flip_left_right: Boolean whether to randomly mirror images horizontally.
    random_crop: Integer percentage setting the total margin used around the
    crop box.
    random_scale: Integer percentage of how much to vary the scale by.
    random_brightness: Integer range to randomly multiply the pixel values by.
  Returns:
    Boolean value indicating whether any distortions should be applied.
  """
  return (flip_left_right or (random_crop != 0) or (random_scale != 0) or
          (random_brightness != 0))
def add_input_distortions(flip_left_right, random_crop, random_scale,
                          random_brightness):
  """Creates the operations to apply the specified distortions.
  During training it can help to improve the results if we run the images
  through simple distortions like crops, scales, and flips. These reflect the
  kind of variations we expect in the real world, and so can help train the
  model to cope with natural data more effectively. Here we take the supplied
  parameters and construct a network of operations to apply them to an image.
  Cropping
  ~~~~~~~~
  Cropping is done by placing a bounding box at a random position in the full
  image. The cropping parameter controls the size of that box relative to the
  input image. If it's zero, then the box is the same size as the input and no
  cropping is performed. If the value is 50%, then the crop box will be half the
  width and height of the input. In a diagram it looks like this:
  <       width         >
  +---------------------+
  |                     |
  |   width - crop%     |
  |    <      >         |
  |    +------+         |
  |    |      |         |
  |    |      |         |
  |    |      |         |
  |    +------+         |
  |                     |
  |                     |
  +---------------------+
  Scaling
  ~~~~~~~
  Scaling is a lot like cropping, except that the bounding box is always
  centered and its size varies randomly within the given range. For example if
  the scale percentage is zero, then the bounding box is the same size as the
  input and no scaling is applied. If it's 50%, then the bounding box will be in
  a random range between half the width and height and full size.
  Args:
    flip_left_right: Boolean whether to randomly mirror images horizontally.
    random_crop: Integer percentage setting the total margin used around the
    crop box.
    random_scale: Integer percentage of how much to vary the scale by.
    random_brightness: Integer range to randomly multiply the pixel values by.
    graph.
  Returns:
    The jpeg input layer and the distorted result tensor.
  """
  jpeg_data = tf.placeholder(tf.string, name='DistortJPGInput')
  decoded_image = tf.image.decode_jpeg(jpeg_data, channels=MODEL_INPUT_DEPTH)
  decoded_image_as_float = tf.cast(decoded_image, dtype=tf.float32)
  decoded_image_4d = tf.expand_dims(decoded_image_as_float, 0)
  margin_scale = 1.0 + (random_crop / 100.0)
  resize_scale = 1.0 + (random_scale / 100.0)
  margin_scale_value = tf.constant(margin_scale)
  resize_scale_value = tf.random_uniform(tensor_shape.scalar(),
                                         minval=1.0,
                                         maxval=resize_scale)
  scale_value = tf.multiply(margin_scale_value, resize_scale_value)
  precrop_width = tf.multiply(scale_value, MODEL_INPUT_WIDTH)
  precrop_height = tf.multiply(scale_value, MODEL_INPUT_HEIGHT)
  precrop_shape = tf.stack([precrop_height, precrop_width])
  precrop_shape_as_int = tf.cast(precrop_shape, dtype=tf.int32)
  precropped_image = tf.image.resize_bilinear(decoded_image_4d,
                                              precrop_shape_as_int)
  precropped_image_3d = tf.squeeze(precropped_image, squeeze_dims=[0])
  cropped_image = tf.random_crop(precropped_image_3d,
                                 [MODEL_INPUT_HEIGHT, MODEL_INPUT_WIDTH,
                                  MODEL_INPUT_DEPTH])
  if flip_left_right:
    flipped_image = tf.image.random_flip_left_right(cropped_image)
  else:
    flipped_image = cropped_image
  brightness_min = 1.0 - (random_brightness / 100.0)
  brightness_max = 1.0 + (random_brightness / 100.0)
  brightness_value = tf.random_uniform(tensor_shape.scalar(),
                                       minval=brightness_min,
                                       maxval=brightness_max)
  brightened_image = tf.multiply(flipped_image, brightness_value)
  distort_result = tf.expand_dims(brightened_image, 0, name='DistortResult')
  return jpeg_data, distort_result
def variable_summaries(var):
  """Attach a lot of summaries to a Tensor (for TensorBoard visualization)."""
  with tf.name_scope('summaries'):
    mean = tf.reduce_mean(var)
    tf.summary.scalar('mean', mean)
    with tf.name_scope('stddev'):
      stddev = tf.sqrt(tf.reduce_mean(tf.square(var - mean)))
    tf.summary.scalar('stddev', stddev)
    tf.summary.scalar('max', tf.reduce_max(var))
    tf.summary.scalar('min', tf.reduce_min(var))
    tf.summary.histogram('histogram', var)
def add_final_training_ops(class_count, final_tensor_name, bottleneck_tensor):
  """Adds a new softmax and fully-connected layer for training.
  We need to retrain the top layer to identify our new classes, so this function
  adds the right operations to the graph, along with some variables to hold the
  weights, and then sets up all the gradients for the backward pass.
  The set up for the softmax and fully-connected layers is based on:
  https://tensorflow.org/versions/master/tutorials/mnist/beginners/index.html
  Args:
    class_count: Integer of how many categories of things we're trying to
    recognize.
    final_tensor_name: Name string for the new final node that produces results.
    bottleneck_tensor: The output of the main CNN graph.
  Returns:
    The tensors for the training and cross entropy results, and tensors for the
    bottleneck input and ground truth input.
  """
  with tf.name_scope('input'):
    bottleneck_input = tf.placeholder_with_default(
        bottleneck_tensor, shape=[None, BOTTLENECK_TENSOR_SIZE],
        name='BottleneckInputPlaceholder')
    ground_truth_input = tf.placeholder(tf.float32,
                                        [None, class_count],
                                        name='GroundTruthInput')
  # Organizing the following ops as `final_training_ops` so they're easier
  # to see in TensorBoard
  layer_name = 'final_training_ops'
  with tf.name_scope(layer_name):
    with tf.name_scope('weights'):
      layer_weights = tf.Variable(tf.truncated_normal([BOTTLENECK_TENSOR_SIZE, class_count], stddev=0.001), name='final_weights')
      variable_summaries(layer_weights)
    with tf.name_scope('biases'):
      layer_biases = tf.Variable(tf.zeros([class_count]), name='final_biases')
      variable_summaries(layer_biases)
    with tf.name_scope('Wx_plus_b'):
      logits = tf.matmul(bottleneck_input, layer_weights) + layer_biases
      tf.summary.histogram('pre_activations', logits)
  final_tensor = tf.nn.softmax(logits, name=final_tensor_name)
  tf.summary.histogram('activations', final_tensor)
  with tf.name_scope('cross_entropy'):
    cross_entropy = tf.nn.softmax_cross_entropy_with_logits(
        labels=ground_truth_input, logits=logits)
    with tf.name_scope('total'):
      cross_entropy_mean = tf.reduce_mean(cross_entropy)
  tf.summary.scalar('cross_entropy', cross_entropy_mean)
  with tf.name_scope('train'):
    train_step = tf.train.GradientDescentOptimizer(FLAGS.learning_rate).minimize(
        cross_entropy_mean)
  return (train_step, cross_entropy_mean, bottleneck_input, ground_truth_input,
          final_tensor)
def add_evaluation_step(result_tensor, ground_truth_tensor):
  """Inserts the operations we need to evaluate the accuracy of our results.
  Args:
    result_tensor: The new final node that produces results.
    ground_truth_tensor: The node we feed ground truth data
    into.
  Returns:
    Tuple of (evaluation step, prediction).
  """
  with tf.name_scope('accuracy'):
    with tf.name_scope('correct_prediction'):
      prediction = tf.argmax(result_tensor, 1)
      correct_prediction = tf.equal(
          prediction, tf.argmax(ground_truth_tensor, 1))
    with tf.name_scope('accuracy'):
      evaluation_step = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
  tf.summary.scalar('accuracy', evaluation_step)
  return evaluation_step, prediction
def main(_):
  # Setup the directory we'll write summaries to for TensorBoard
  if tf.gfile.Exists(FLAGS.summaries_dir):
    tf.gfile.DeleteRecursively(FLAGS.summaries_dir)
  tf.gfile.MakeDirs(FLAGS.summaries_dir)
  # Set up the pre-trained graph.
  maybe_download_and_extract()
  graph, bottleneck_tensor, jpeg_data_tensor, resized_image_tensor = (
      create_inception_graph())
  # Look at the folder structure, and create lists of all the images.
  image_lists = create_image_lists(FLAGS.image_dir, FLAGS.testing_percentage,
                                   FLAGS.validation_percentage)
  class_count = len(image_lists.keys())
  if class_count == 0:
    print('No valid folders of images found at ' + FLAGS.image_dir)
    return -1
  if class_count == 1:
    print('Only one valid folder of images found at ' + FLAGS.image_dir +
          ' - multiple classes are needed for classification.')
    return -1
  # See if the command-line flags mean we're applying any distortions.
  do_distort_images = should_distort_images(
      FLAGS.flip_left_right, FLAGS.random_crop, FLAGS.random_scale,
      FLAGS.random_brightness)
  sess = tf.Session()
  if do_distort_images:
    # We will be applying distortions, so setup the operations we'll need.
    distorted_jpeg_data_tensor, distorted_image_tensor = add_input_distortions(
        FLAGS.flip_left_right, FLAGS.random_crop, FLAGS.random_scale,
        FLAGS.random_brightness)
  else:
    # We'll make sure we've calculated the 'bottleneck' image summaries and
    # cached them on disk.
    cache_bottlenecks(sess, image_lists, FLAGS.image_dir, FLAGS.bottleneck_dir,
                      jpeg_data_tensor, bottleneck_tensor)
  # Add the new layer that we'll be training.
  (train_step, cross_entropy, bottleneck_input, ground_truth_input,
   final_tensor) = add_final_training_ops(len(image_lists.keys()),
                                          FLAGS.final_tensor_name,
                                          bottleneck_tensor)
  # Create the operations we need to evaluate the accuracy of our new layer.
  evaluation_step, prediction = add_evaluation_step(
      final_tensor, ground_truth_input)
  # Merge all the summaries and write them out to /tmp/retrain_logs (by default)
  merged = tf.summary.merge_all()
  train_writer = tf.summary.FileWriter(FLAGS.summaries_dir + '/train',
                                       sess.graph)
  validation_writer = tf.summary.FileWriter(FLAGS.summaries_dir + '/validation')
  # Set up all our weights to their initial default values.
  init = tf.global_variables_initializer()
  sess.run(init)
  # Run the training for as many cycles as requested on the command line.
  for i in range(FLAGS.how_many_training_steps):
    # Get a batch of input bottleneck values, either calculated fresh every time
    # with distortions applied, or from the cache stored on disk.
    if do_distort_images:
      train_bottlenecks, train_ground_truth = get_random_distorted_bottlenecks(
          sess, image_lists, FLAGS.train_batch_size, 'training',
          FLAGS.image_dir, distorted_jpeg_data_tensor,
          distorted_image_tensor, resized_image_tensor, bottleneck_tensor)
    else:
      train_bottlenecks, train_ground_truth, _ = get_random_cached_bottlenecks(
          sess, image_lists, FLAGS.train_batch_size, 'training',
          FLAGS.bottleneck_dir, FLAGS.image_dir, jpeg_data_tensor,
          bottleneck_tensor)
    # Feed the bottlenecks and ground truth into the graph, and run a training
    # step. Capture training summaries for TensorBoard with the `merged` op.
    train_summary, _ = sess.run([merged, train_step],
             feed_dict={bottleneck_input: train_bottlenecks,
                        ground_truth_input: train_ground_truth})
    train_writer.add_summary(train_summary, i)
    # Every so often, print out how well the graph is training.
    is_last_step = (i + 1 == FLAGS.how_many_training_steps)
    if (i % FLAGS.eval_step_interval) == 0 or is_last_step:
      train_accuracy, cross_entropy_value = sess.run(
          [evaluation_step, cross_entropy],
          feed_dict={bottleneck_input: train_bottlenecks,
                     ground_truth_input: train_ground_truth})
      print('%s: Step %d: Train accuracy = %.1f%%' % (datetime.now(), i,
                                                      train_accuracy * 100))
      print('%s: Step %d: Cross entropy = %f' % (datetime.now(), i,
                                                 cross_entropy_value))
      validation_bottlenecks, validation_ground_truth, _ = (
          get_random_cached_bottlenecks(
              sess, image_lists, FLAGS.validation_batch_size, 'validation',
              FLAGS.bottleneck_dir, FLAGS.image_dir, jpeg_data_tensor,
              bottleneck_tensor))
      # Run a validation step and capture training summaries for TensorBoard
      # with the `merged` op.
      validation_summary, validation_accuracy = sess.run(
          [merged, evaluation_step],
          feed_dict={bottleneck_input: validation_bottlenecks,
                     ground_truth_input: validation_ground_truth})
      validation_writer.add_summary(validation_summary, i)
      print('%s: Step %d: Validation accuracy = %.1f%% (N=%d)' %
            (datetime.now(), i, validation_accuracy * 100,
             len(validation_bottlenecks)))
  # We've completed all our training, so run a final test evaluation on
  # some new images we haven't used before.
  test_bottlenecks, test_ground_truth, test_filenames = (
      get_random_cached_bottlenecks(sess, image_lists, FLAGS.test_batch_size,
                                    'testing', FLAGS.bottleneck_dir,
                                    FLAGS.image_dir, jpeg_data_tensor,
                                    bottleneck_tensor))
  test_accuracy, predictions = sess.run(
      [evaluation_step, prediction],
      feed_dict={bottleneck_input: test_bottlenecks,
                 ground_truth_input: test_ground_truth})
  print('Final test accuracy = %.1f%% (N=%d)' % (
      test_accuracy * 100, len(test_bottlenecks)))
  if FLAGS.print_misclassified_test_images:
    print('=== MISCLASSIFIED TEST IMAGES ===')
    for i, test_filename in enumerate(test_filenames):
      if predictions[i] != test_ground_truth[i].argmax():
        print('%70s  %s' % (test_filename,
                            list(image_lists.keys())[predictions[i]]))
  # Write out the trained graph and labels with the weights stored as constants.
  output_graph_def = graph_util.convert_variables_to_constants(
      sess, graph.as_graph_def(), [FLAGS.final_tensor_name])
  with gfile.FastGFile(FLAGS.output_graph, 'wb') as f:
    f.write(output_graph_def.SerializeToString())
  with gfile.FastGFile(FLAGS.output_labels, 'w') as f:
    f.write('\n'.join(image_lists.keys()) + '\n')
if __name__ == '__main__':
  parser = argparse.ArgumentParser()
  parser.add_argument(
      '--image_dir',
      type=str,
      default='',
      help='Path to folders of labeled images.'
  )
  parser.add_argument(
      '--output_graph',
      type=str,
      default='/tmp/output_graph.pb',
      help='Where to save the trained graph.'
  )
  parser.add_argument(
      '--output_labels',
      type=str,
      default='/tmp/output_labels.txt',
      help='Where to save the trained graph\'s labels.'
  )
  parser.add_argument(
      '--summaries_dir',
      type=str,
      default='/tmp/retrain_logs',
      help='Where to save summary logs for TensorBoard.'
  )
  parser.add_argument(
      '--how_many_training_steps',
      type=int,
      default=4000,
      help='How many training steps to run before ending.'
  )
  parser.add_argument(
      '--learning_rate',
      type=float,
      default=0.01,
      help='How large a learning rate to use when training.'
  )
  parser.add_argument(
      '--testing_percentage',
      type=int,
      default=10,
      help='What percentage of images to use as a test set.'
  )
  parser.add_argument(
      '--validation_percentage',
      type=int,
      default=10,
      help='What percentage of images to use as a validation set.'
  )
  parser.add_argument(
      '--eval_step_interval',
      type=int,
      default=10,
      help='How often to evaluate the training results.'
  )
  parser.add_argument(
      '--train_batch_size',
      type=int,
      default=100,
      help='How many images to train on at a time.'
  )
  parser.add_argument(
      '--test_batch_size',
      type=int,
      default=-1,
      help="""\
      How many images to test on. This test set is only used once, to evaluate
      the final accuracy of the model after training completes.
      A value of -1 causes the entire test set to be used, which leads to more
      stable results across runs.\
      """
  )
  parser.add_argument(
      '--validation_batch_size',
      type=int,
      default=100,
      help="""\
      How many images to use in an evaluation batch. This validation set is
      used much more often than the test set, and is an early indicator of how
      accurate the model is during training.
      A value of -1 causes the entire validation set to be used, which leads to
      more stable results across training iterations, but may be slower on large
      training sets.\
      """
  )
  parser.add_argument(
      '--print_misclassified_test_images',
      default=False,
      help="""\
      Whether to print out a list of all misclassified test images.\
      """,
      action='store_true'
  )
  parser.add_argument(
      '--model_dir',
      type=str,
      default='/tmp/imagenet',
      help="""\
      Path to classify_image_graph_def.pb,
      imagenet_synset_to_human_label_map.txt, and
      imagenet_2012_challenge_label_map_proto.pbtxt.\
      """
  )
  parser.add_argument(
      '--bottleneck_dir',
      type=str,
      default='/tmp/bottleneck',
      help='Path to cache bottleneck layer values as files.'
  )
  parser.add_argument(
      '--final_tensor_name',
      type=str,
      default='final_result',
      help="""\
      The name of the output classification layer in the retrained graph.\
      """
  )
  parser.add_argument(
      '--flip_left_right',
      default=False,
      help="""\
      Whether to randomly flip half of the training images horizontally.\
      """,
      action='store_true'
  )
  parser.add_argument(
      '--random_crop',
      type=int,
      default=0,
      help="""\
      A percentage determining how much of a margin to randomly crop off the
      training images.\
      """
  )
  parser.add_argument(
      '--random_scale',
      type=int,
      default=0,
      help="""\
      A percentage determining how much to randomly scale up the size of the
      training images by.\
      """
  )
  parser.add_argument(
      '--random_brightness',
      type=int,
      default=0,
      help="""\
      A percentage determining how much to randomly multiply the training image
      input pixels up or down by.\
      """
  )
  FLAGS, unparsed = parser.parse_known_args()
  tf.app.run(main=main, argv=[sys.argv[0]] + unparsed)
 | 
	apache-2.0 | 
| 
	Velociraptor85/pyload | 
	module/plugins/hoster/NovafileCom.py | 
	8 | 
	1270 | 
	# -*- coding: utf-8 -*-
#
# Test links:
# http://novafile.com/vfun4z6o2cit
# http://novafile.com/s6zrr5wemuz4
from ..internal.XFSHoster import XFSHoster
class NovafileCom(XFSHoster):
    __name__ = "NovafileCom"
    __type__ = "hoster"
    __version__ = "0.11"
    __status__ = "testing"
    __pattern__ = r'http://(?:www\.)?novafile\.com/\w{12}'
    __config__ = [("activated", "bool", "Activated", True),
                  ("use_premium", "bool", "Use premium account if available", True),
                  ("fallback", "bool",
                   "Fallback to free download if premium fails", True),
                  ("chk_filesize", "bool", "Check file size", True),
                  ("max_wait", "int", "Reconnect if waiting time is greater than minutes", 10)]
    __description__ = """Novafile.com hoster plugin"""
    __license__ = "GPLv3"
    __authors__ = [("zoidberg", "[email protected]"),
                   ("stickell", "[email protected]")]
    PLUGIN_DOMAIN = "novafile.com"
    ERROR_PATTERN = r'class="alert.+?alert-separate".*?>\s*(?:<p>)?(.*?)\s*</'
    WAIT_PATTERN = r'<p>Please wait <span id="count".*?>(\d+)</span> seconds</p>'
    LINK_PATTERN = r'<a href="(http://s\d+\.novafile\.com/.*?)" class="btn btn-green">Download File</a>'
 | 
	gpl-3.0 | 
| 
	naturali/tensorflow | 
	tensorflow/examples/skflow/iris_run_config.py | 
	86 | 
	2087 | 
	#  Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
#  Licensed under the Apache License, Version 2.0 (the "License");
#  you may not use this file except in compliance with the License.
#  You may obtain a copy of the License at
#
#   http://www.apache.org/licenses/LICENSE-2.0
#
#  Unless required by applicable law or agreed to in writing, software
#  distributed under the License is distributed on an "AS IS" BASIS,
#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#  See the License for the specific language governing permissions and
#  limitations under the License.
"""Example of DNNClassifier for Iris plant dataset, with run config."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from sklearn import cross_validation
from sklearn import datasets
from sklearn import metrics
import tensorflow as tf
def main(unused_argv):
  # Load dataset.
  iris = datasets.load_iris()
  x_train, x_test, y_train, y_test = cross_validation.train_test_split(
      iris.data, iris.target, test_size=0.2, random_state=42)
  # You can define you configurations by providing a RunConfig object to
  # estimator to control session configurations, e.g. num_cores
  # and gpu_memory_fraction
  run_config = tf.contrib.learn.estimators.RunConfig(
      num_cores=3, gpu_memory_fraction=0.6)
  # Build 3 layer DNN with 10, 20, 10 units respectively.
  feature_columns = tf.contrib.learn.infer_real_valued_columns_from_input(
      x_train)
  classifier = tf.contrib.learn.DNNClassifier(feature_columns=feature_columns,
                                              hidden_units=[10, 20, 10],
                                              n_classes=3,
                                              config=run_config)
  # Fit and predict.
  classifier.fit(x_train, y_train, steps=200)
  predictions = list(classifier.predict(x_test, as_iterable=True))
  score = metrics.accuracy_score(y_test, predictions)
  print('Accuracy: {0:f}'.format(score))
if __name__ == '__main__':
  tf.app.run()
 | 
	apache-2.0 | 
| 
	googleapis/python-compute | 
	google/cloud/compute_v1/services/target_instances/pagers.py | 
	1 | 
	5740 | 
	# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from typing import (
    Any,
    AsyncIterable,
    Awaitable,
    Callable,
    Iterable,
    Sequence,
    Tuple,
    Optional,
)
from google.cloud.compute_v1.types import compute
class AggregatedListPager:
    """A pager for iterating through ``aggregated_list`` requests.
    This class thinly wraps an initial
    :class:`google.cloud.compute_v1.types.TargetInstanceAggregatedList` object, and
    provides an ``__iter__`` method to iterate through its
    ``items`` field.
    If there are more pages, the ``__iter__`` method will make additional
    ``AggregatedList`` requests and continue to iterate
    through the ``items`` field on the
    corresponding responses.
    All the usual :class:`google.cloud.compute_v1.types.TargetInstanceAggregatedList`
    attributes are available on the pager. If multiple requests are made, only
    the most recent response is retained, and thus used for attribute lookup.
    """
    def __init__(
        self,
        method: Callable[..., compute.TargetInstanceAggregatedList],
        request: compute.AggregatedListTargetInstancesRequest,
        response: compute.TargetInstanceAggregatedList,
        *,
        metadata: Sequence[Tuple[str, str]] = ()
    ):
        """Instantiate the pager.
        Args:
            method (Callable): The method that was originally called, and
                which instantiated this pager.
            request (google.cloud.compute_v1.types.AggregatedListTargetInstancesRequest):
                The initial request object.
            response (google.cloud.compute_v1.types.TargetInstanceAggregatedList):
                The initial response object.
            metadata (Sequence[Tuple[str, str]]): Strings which should be
                sent along with the request as metadata.
        """
        self._method = method
        self._request = compute.AggregatedListTargetInstancesRequest(request)
        self._response = response
        self._metadata = metadata
    def __getattr__(self, name: str) -> Any:
        return getattr(self._response, name)
    @property
    def pages(self) -> Iterable[compute.TargetInstanceAggregatedList]:
        yield self._response
        while self._response.next_page_token:
            self._request.page_token = self._response.next_page_token
            self._response = self._method(self._request, metadata=self._metadata)
            yield self._response
    def __iter__(self) -> Iterable[Tuple[str, compute.TargetInstancesScopedList]]:
        for page in self.pages:
            yield from page.items.items()
    def get(self, key: str) -> Optional[compute.TargetInstancesScopedList]:
        return self._response.items.get(key)
    def __repr__(self) -> str:
        return "{0}<{1!r}>".format(self.__class__.__name__, self._response)
class ListPager:
    """A pager for iterating through ``list`` requests.
    This class thinly wraps an initial
    :class:`google.cloud.compute_v1.types.TargetInstanceList` object, and
    provides an ``__iter__`` method to iterate through its
    ``items`` field.
    If there are more pages, the ``__iter__`` method will make additional
    ``List`` requests and continue to iterate
    through the ``items`` field on the
    corresponding responses.
    All the usual :class:`google.cloud.compute_v1.types.TargetInstanceList`
    attributes are available on the pager. If multiple requests are made, only
    the most recent response is retained, and thus used for attribute lookup.
    """
    def __init__(
        self,
        method: Callable[..., compute.TargetInstanceList],
        request: compute.ListTargetInstancesRequest,
        response: compute.TargetInstanceList,
        *,
        metadata: Sequence[Tuple[str, str]] = ()
    ):
        """Instantiate the pager.
        Args:
            method (Callable): The method that was originally called, and
                which instantiated this pager.
            request (google.cloud.compute_v1.types.ListTargetInstancesRequest):
                The initial request object.
            response (google.cloud.compute_v1.types.TargetInstanceList):
                The initial response object.
            metadata (Sequence[Tuple[str, str]]): Strings which should be
                sent along with the request as metadata.
        """
        self._method = method
        self._request = compute.ListTargetInstancesRequest(request)
        self._response = response
        self._metadata = metadata
    def __getattr__(self, name: str) -> Any:
        return getattr(self._response, name)
    @property
    def pages(self) -> Iterable[compute.TargetInstanceList]:
        yield self._response
        while self._response.next_page_token:
            self._request.page_token = self._response.next_page_token
            self._response = self._method(self._request, metadata=self._metadata)
            yield self._response
    def __iter__(self) -> Iterable[compute.TargetInstance]:
        for page in self.pages:
            yield from page.items
    def __repr__(self) -> str:
        return "{0}<{1!r}>".format(self.__class__.__name__, self._response)
 | 
	apache-2.0 | 
			Subsets and Splits
				
	
				
			
				
No community queries yet
The top public SQL queries from the community will appear here once available.
