repo_name
stringlengths 5
100
| path
stringlengths 4
231
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 6
947k
| score
float64 0
0.34
| prefix
stringlengths 0
8.16k
| middle
stringlengths 3
512
| suffix
stringlengths 0
8.17k
|
---|---|---|---|---|---|---|---|---|
tkammer/infrared
|
tools/ksgen/ksgen/settings.py
|
Python
|
gpl-3.0
| 15,945 | 0.000815 |
from configure import Configuration, ConfigurationError
from ksgen import docstring, yaml_utils, utils
from ksgen.tree import OrderedTree
from ksgen.yaml_utils import LookupDirective
from docopt import docopt, DocoptExit
import logging
import os
import re
import yaml
VALUES_KEY = '!value'
DEFAULTS_TAG = 'defaults'
logger = logging.getLogger(__name__)
class KeyValueError(Exception):
def __init__(self, arg, reason, *args, **kwargs):
super(KeyValueError, self).__init__(*args, **kwargs)
self.arg = arg
self.reason = reason
def __str__(self):
return "Invalid key-value pair: %s, - %s" % (self.arg, self.reason)
class OptionError(Exception):
def __init__(self, paths, *args, **kwargs):
super(OptionError, self).__init__(*args, **kwargs)
self._paths = paths
def __str__(self):
return "Invalid values passed, files : %s not found" % self._paths
class ArgsConflictError(Exception):
pass
class Generator(object):
"""
Usage:
generate [options] <output-file>
generate [--extra-vars=KEY_PAIR]... [options] <output-file>
Options:
--rules-file=<file> Rules file that contains generation rules
Process the rules first, so the additional
args will override args in rules file
--extra-vars=<val>... Provide extra vars {options}
"""
def __init__(self, config_dir, args):
self.config_dir = config_dir
self.args = _normalize_args(args)
logger.debug("Generator: config_dir: %s, args: %s", config_dir, args)
self._doc_string = Generator.__doc__.format(
options=docstring.Generator(config_dir).generate())
self.settings = None
self.output_file = None
self.rules_file = None
self._rules = None
self.parsed = None
self.extra_vars = None
self.all_settings = None
self.defaults = []
def run(self):
if not self._parse():
return 1
loader = Loader(self.config_dir, self.settings)
self._merge_rules_file_exports(loader)
loader.load()
self._merge_extra_vars(loader)
self.all_settings = loader.settings()
self._replace_in_string_lookup()
logger.debug(yaml_utils.to_yaml("All Settings", self.all_settings))
logger.info("Writing to file: %s", self.output_file)
with open(self.output_file, 'w') as out:
out.write(
yaml.safe_dump(self.all_settings, default_flow_style=False))
return 0
def _prepare_defaults(self):
for arg in self.args:
if not arg.startswith('--'):
continue
arg = arg[2:].split('=')[0]
if '-' not in arg:
logging.debug("Preparing defaults for %s:" % arg)
self._load_defaults(self.config_dir + os.sep + arg,
self.parsed['--' + arg])
self._merge_defaults()
logger.info("Defaults \n%s", self.defaults)
def _load_defaults(self, path, value):
param = '-'.join(path[len(self.config_dir + os.sep):].split('/')[::2])
if not self.parsed['--' + param]:
logging.warning(
"\'--%s\' hasn't been provided, using \'%s\' as default" % (
param, value))
self.defaults.append(''.join(['--', param, '=', str(value)]))
else:
value = self.parsed['--' + param]
file_path = path + os.sep + str(value) + '.yml'
loaded_file = Configuration.from_file(file_path)
if loaded_file.get(DEFAULTS_TAG):
path += os.sep + str(value)
for sub_key, sub_value in loaded_file[DEFAULTS_TAG].iteritems():
self._load_defaults(path + os.sep + sub_key, sub_value)
def _merge_defaults(self):
for element in self.defaults:
(option, value) = element.split('=')
if not self.parsed[option]:
self.parsed[option] = value
def _merge_rules_file_exports(self, loader):
if not self._rules:
return
try:
logger.debug("Try loading exports in rules file %s",
self.rules_file)
loader.merge(self._rules.export)
logger.info('Loaded exports from rules file: %s', self.rules_file)
except KeyError:
logger.debug("No 'exports' in rules file %s", self.
|
rules_file)
def _parse(self):
# create the settings tree and preserve the order in which arguments
# are passed. Convert all args into an ordered tree so that
# --foo fooz --too moo --foo-bar baaz --foo-arg
|
vali
# will be like the ordered tree below
# foo:
# <special-key>: fooz
# bar: ### <-- foo-bar is not foo/bar
# <special-key>: baaz
# arg: ### <-- arg comes after bar
# <special-key>: val
# too:
# <special-key>: moo
logger.debug("Parsing: %s", self.args)
logger.debug("DocString for Generate: %s", self._doc_string)
try:
self.parsed = docopt(self._doc_string,
options_first=True, argv=self.args)
except DocoptExit:
logger.error(self._doc_string)
return False
logger.info("Parsed \n%s", self.parsed)
self._prepare_defaults()
if not self._apply_rules():
logger.error("Error while validating rules: check args %s",
' \n'.join(self.args))
return False
logger.debug("New Args: %s", self.args)
logger.info("After applying rules Parsed: \n%s", self.parsed)
self.output_file = utils.extract_value(
self.parsed, '<output-file>', optional=False)
self.extra_vars = utils.extract_value(self.parsed, '--extra-vars')
# filter only options; [ --foo, fooz, --bar baz ] -> [--foo, --bar]
options = [x for x in self.args + self.defaults if x.startswith('--')]
settings = OrderedTree(delimiter='-')
for option in options: # iterate options to preserve order of args
option = option.split('=')[0]
value = self.parsed.get(option)
if not value:
continue
key = option[2:] + settings.delimiter + VALUES_KEY
settings[key] = value
logger.debug("%s: %s", key, value)
logger.debug(yaml_utils.to_yaml(
"Directory structure from args:", settings))
self.settings = settings
return True
def _apply_rules(self):
self.rules_file = utils.extract_value(self.parsed, '--rules-file')
if not self.rules_file:
return True # No rules to be applied
self.rules_file = os.path.abspath(self.rules_file)
logger.debug('Rule file: %s', self.rules_file)
self._rules = load_configuration(self.rules_file, os.path.curdir)
# create --key=value pairs from the rules.args
args_in_rules = self._rules.get('args', {})
extra_vars = utils.extract_value(args_in_rules, 'extra-vars')
args = ['--%s=%s' % (k, v) for k, v in args_in_rules.iteritems()]
if extra_vars:
extra_vars = utils.to_list(extra_vars)
args.extend(['--extra-vars=%s' % x for x in extra_vars])
logger.debug('Args in rules file: %s', args)
logger.debug('Args in self: %s', self.args)
logger.debug('Args in rules: %s', args_in_rules)
# get the key part without first two -- from --key=value
def _key(x):
return x.split('=')[0][2:]
# filter out all args present in rules file
conflicting_keys = [_key(x) for x in self.args
if _key(x) in args_in_rules]
if conflicting_keys:
raise ArgsConflictError(
"Command line args: '{0}' are in conflict with args defined "
"in the rules file.".format(
', '.join(conflicting_keys)))
# prepend the args from t
|
rob-smallshire/cartouche
|
cartouche/parser.py
|
Python
|
bsd-3-clause
| 17,022 | 0.001646 |
# -*- cod
|
ing: utf-8 -*-
from __future__ import print_function
from contextlib import contextmanager
import re
from cartouche._portability import u
from .errors import CartoucheError
from .nodes import (Node, Raises, Except, Note, Warning
|
, Returns, Arg, Yields,
Attribute, Usage, ensure_terminal_blank)
OPTIONAL_BULLET_PATTERN = u(r'(?:[\*\+\-\•\‣\⁃]\s+)?')
ARGS_PATTERN = u(r'(\*{0,2}\w+)(\s+\(([\.\w]+)\))?\s*:\s*(.*)')
ATTRIBUTES_PATTERN = u(r'(\*{0,2}\w+)(\s+\(([\.\w]+)\))?\s*:\s*(.*)')
RAISES_PATTERN = u(r'([\w\.]+)\s*:\s*(.*)')
ARGS_REGEX = re.compile(ARGS_PATTERN)
ATTRIBUTES_REGEX = re.compile(ATTRIBUTES_PATTERN)
RAISES_REGEX = re.compile(RAISES_PATTERN)
class CartoucheSyntaxError(CartoucheError):
pass
def parse_cartouche_text(lines):
'''Parse text in cartouche format and return a reStructuredText equivalent
Args:
lines: A sequence of strings representing the lines of a single
docstring as read from the source by Sphinx. This string should be
in a format that can be parsed by cartouche.
Returns:
A list of lines containing the transformed docstring as
reStructuredText as produced by cartouche.
Raises:
RuntimeError: If the docstring cannot be parsed.
'''
indent_lines = unindent(lines)
indent_lines = pad_blank_lines(indent_lines)
indent_lines = first_paragraph_indent(indent_lines)
indent_paragraphs = gather_lines(indent_lines)
parse_tree = group_paragraphs(indent_paragraphs)
syntax_tree = extract_structure(parse_tree)
result = syntax_tree.render_rst()
ensure_terminal_blank(result)
return result
def unindent(lines):
'''Convert an iterable of indented lines into a sequence of tuples.
The first element of each tuple is the indent in number of characters, and
the second element is the unindented string.
Args:
lines: A sequence of strings representing the lines of text in a docstring.
Returns:
A list of tuples where each tuple corresponds to one line of the input
list. Each tuple has two entries - the first is an integer giving the
size of the indent in characters, the second is the unindented text.
'''
unindented_lines = []
for line in lines:
unindented_line = line.lstrip()
indent = len(line) - len(unindented_line)
unindented_lines.append((indent, unindented_line))
return unindented_lines
def pad_blank_lines(indent_texts):
'''Give blank (empty) lines the same indent level as the preceding line.
Args:
indent_texts: An iterable of tuples each containing an integer in the
first element and a string in the second element.
Returns:
A list of tuples each containing an integer in the first element and a
string in the second element.
'''
current_indent = 0
result = []
for indent, text in indent_texts:
if len(text) > 0:
current_indent = indent
result.append((current_indent, text))
return result
def extract_structure(parse_tree):
'''Create an Abstract Syntax Tree representing the semantics of a parse tree.
Args:
parse_tree: TODO
Returns:
A Node with is the result of an Abstract Syntax Tree representing the
docstring.
Raises:
CartoucheError: In the event that the parse tree cannot be understood.
'''
return convert_node(parse_tree)
def convert_node(node):
if node.indent == 0 and len(node.lines) == 0:
return convert_children(node)
if node.lines[0].startswith('Args:'):
return convert_args(node)
if node.lines[0].startswith('Returns:'):
return convert_returns(node)
if node.lines[0].startswith('Yields:'):
return convert_yields(node)
if node.lines[0].startswith('Raises:'):
return convert_raises(node)
if node.lines[0].startswith('Note:'):
return convert_note(node)
if node.lines[0].startswith('Warning:'):
return convert_warning(node)
if node.lines[0].startswith('Attributes:'):
return convert_attributes(node)
if node.lines[0].startswith('Usage:'):
return convert_usage(node)
result = convert_children(node)
result.lines = node.lines
result.indent = node.indent
return result
def convert_children(node):
converted_children = [convert_node(child) for child in node.children]
result = Node()
result.children = converted_children
return result
def append_child_to_args_group_node(child, group_node, indent):
arg = None
non_empty_lines = (line for line in child.lines if line)
for line in non_empty_lines:
m = ARGS_REGEX.match(line)
if m is None:
raise CartoucheSyntaxError('Cartouche: Invalid argument syntax "{line}" for Args block'.format(line=line))
param_name = m.group(1)
param_type = m.group(3)
param_text = m.group(4)
arg = Arg(indent, param_name)
group_node.children.append(arg)
arg.type = param_type
if param_text is not None:
arg.children.append(Node(indent, [param_text], arg))
if arg is not None:
last_child = arg.children[-1] if len(arg.children) != 0 else arg
for grandchild in child.children:
last_child.children.append(grandchild)
def append_child_to_attributes_group_node(child, group_node, indent):
attribute = None
non_empty_lines = (line for line in child.lines if line)
for line in non_empty_lines:
m = ATTRIBUTES_REGEX.match(line)
if m is None:
raise CartoucheSyntaxError('Cartouche: Invalid attribute syntax "{line}" for Attributes block'.format(line=line))
attribute_name = m.group(1)
attribute_type = m.group(3)
attribute_text = m.group(4)
attribute = Attribute(indent, attribute_name)
group_node.children.append(attribute)
attribute.type = attribute_type
if attribute_text is not None:
attribute.children.append(Node(indent, [attribute_text], attribute))
if attribute is not None:
last_child = attribute.children[-1] if len(attribute.children) != 0 else attribute
for grandchild in child.children:
last_child.children.append(grandchild)
def convert_args(node):
assert node.lines[0].startswith('Args:')
group_node = Node()
for child in node.children:
append_child_to_args_group_node(child, group_node, node.indent)
return group_node
def convert_returns(node):
assert node.lines[0].startswith('Returns:')
returns = Returns(node.indent)
returns.line = node.lines[0][8:].strip()
returns.children = node.children
return returns
def convert_yields(node):
assert node.lines[0].startswith('Yields:')
returns = Yields(node.indent)
returns.line = node.lines[0][8:].strip()
returns.children = node.children
return returns
def convert_note(node):
assert node.lines[0].startswith('Note:')
note = Note(node.indent)
note.line = node.lines[0][5:].strip()
note.children = node.children
return note
def convert_warning(node):
assert node.lines[0].startswith('Warning:')
warning = Warning(node.indent)
warning.line = node.lines[0][8:].strip()
warning.children = node.children
return warning
def convert_raises(node):
assert node.lines[0].startswith('Raises:')
group_node = Raises(node.indent)
for child in node.children:
append_child_to_raise_node(child, group_node)
return group_node
def convert_attributes(node):
assert node.lines[0].startswith('Attributes:')
group_node = Node()
for child in node.children:
append_child_to_attributes_group_node(child, group_node, node.indent)
return group_node
def convert_usage(node):
assert node.lines[0].startswith('Usage:')
usage = Usage(node.indent)
usage.children = node.children
return usage
def parse_exception(line):
'''Parse the first line of a Cartouche exception description.
Args:
line (str): A single line Cartouche exception description.
R
|
bugcrowd/vulnerability-rating-taxonomy
|
lib/generate_artifacts.py
|
Python
|
apache-2.0
| 228 | 0 |
from utils import utils
fr
|
om artifacts import scw_artifact
url_mapping = {}
current_v
|
rt = utils.get_json(utils.VRT_FILENAME)
scw_artifact.write_artifact_file(
scw_artifact.generate_urls(current_vrt['content'], url_mapping)
)
|
ktan2020/legacy-automation
|
win/Lib/encodings/rot_13.py
|
Python
|
mit
| 2,697 | 0.011123 |
#!/usr/bin/env python
""" Python Character Mapping Codec for ROT13.
See http://ucsub.colorado.edu/~kominek/rot13/ for details.
Written by Marc-Andre Lemburg ([email protected]).
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_map)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_map)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input,self.errors,encoding_map)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input,self.errors,decoding_map)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='rot-13',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamwriter=StreamWriter,
streamreader=StreamReader,
)
### Decoding Map
decoding_map = codecs.make_identity_dict(range(256))
decoding_map.update({
0x0041: 0x004e,
0x0042: 0x004f,
0x0043: 0x0050,
0x0044: 0x0051,
0x0045: 0x0052,
0x0046: 0x0053,
0x0047: 0x0054,
0x0048: 0x0055,
0x0049: 0x0056,
0x004a: 0x0057,
0x004b: 0x0058,
0x004c: 0x0059,
0x004d: 0x005a,
0x004e: 0x0041,
0x004f: 0x0042,
0x0050: 0x0043,
0x0051: 0x0044,
0x0052: 0x
|
0045,
0x0053: 0x0046,
0x0054: 0x0047,
0x0055: 0x0048,
0x0056: 0x0049,
0x0057: 0x004a,
0x0058: 0x004b,
0x0059: 0x004c,
0x005a: 0x004d,
0x0061: 0x006e,
0x0062: 0x006f,
0x0063: 0x0070,
0x0064: 0x0071,
0x0065: 0x0072,
0x0066: 0x0073,
0x0067: 0x0074,
0x0068: 0x0075,
0x0069: 0x0076,
0x006a: 0x0077,
0x006b: 0x0078,
0x006c: 0x0079,
0x006d: 0x007a,
|
0x006e: 0x0061,
0x006f: 0x0062,
0x0070: 0x0063,
0x0071: 0x0064,
0x0072: 0x0065,
0x0073: 0x0066,
0x0074: 0x0067,
0x0075: 0x0068,
0x0076: 0x0069,
0x0077: 0x006a,
0x0078: 0x006b,
0x0079: 0x006c,
0x007a: 0x006d,
})
### Encoding Map
encoding_map = codecs.make_encoding_map(decoding_map)
### Filter API
def rot13(infile, outfile):
outfile.write(infile.read().encode('rot-13'))
if __name__ == '__main__':
import sys
rot13(sys.stdin, sys.stdout)
|
Connexions/cnx-upgrade
|
cnxupgrade/tests/__init__.py
|
Python
|
agpl-3.0
| 2,755 | 0.001452 |
# -*- coding: utf-8 -*-
# ###
# Copyright (c) 2013, Rice University
# This software is subject to the provisions of the GNU Affero General
# Public License version 3 (AGPLv3).
# See LICENCE.txt for details.
# ###
from __future__ import print_function
import os
import sys
import psycopg2
__all__ = ('DB_CONNECTION_STRING', 'TESTING_DATA_DIRECTORY',
'postgresql_fixture', 'db_connect',
)
here = os.path.abspath(os.path.dirname(__file__))
_DB_CONNECTION_STRING_ENV_VAR_NAME = 'DB_CONNECTION_STRING'
_DB_CONNECTION_STRING_CLI_OPT_NAME = '--db-conn-str'
try:
DB_CONNECTION_STRING = os.environ[_DB_CONNECTION_STRING_ENV_VAR_NAME]
except:
try:
arg_pos = sys.argv.index(_DB_CONNECTION_STRING_CLI_OPT_NAME)
except ValueError:
# Use default...
print("Using default database connection string.",
file=sys.stderr)
DB_CONNECTION_STRING = "dbname=cnxarchive-testing user=cnxarchive password=cnxarchive"
else:
DB_CONNECTION_STRING = sys.argv[arg_pos+1]
TESTING_DATA_DIRECTORY = os.path.join(here, 'data')
class PostgresqlFixture:
"""A testing fixture for a live (same as production) SQL database.
This will set up the database once for a test case. After each test
case has completed, the database will be cleaned (all tables dropped).
On a personal note, this seems archaic... Why can't I rollback to a
transaction?
"""
def __init__(self):
# Configure the database connection.
self.connection_string = DB_CONNECTION_STRING
# Drop all existing tables from the database.
self._drop_all()
def _drop_all(self):
"""Drop all tables in the database."""
with psycopg2.connect(self.connection_string) as db_connection:
with db_connection.cursor() as cursor:
cursor.execute("DROP SCHEMA public CASCADE")
cursor.execute("CREATE SCHEMA public")
def setUp(self):
# Initialize the database schema.
from cnxarchive.database import initdb
settings = {'db-connection-string
|
': self.connection_string}
initdb(settings)
def tearDown(self):
# Drop all tables.
self._drop_all()
postgresql_fixture = PostgresqlFixture()
def db_connect(method):
"""Decorator for methods that need to use the database
Example:
@db_connect
def setUp(self, cursor):
cursor.execute(some_sql)
# some other code
|
"""
def wrapped(self, *args, **kwargs):
with psycopg2.connect(DB_CONNECTION_STRING) as db_connection:
with db_connection.cursor() as cursor:
return method(self, cursor, *args, **kwargs)
db_connection.commit()
return wrapped
|
dayatz/taiga-back
|
tests/integration/resources_permissions/test_auth_resources.py
|
Python
|
agpl-3.0
| 2,107 | 0 |
# -*- coding: utf-8 -*-
# Copyright (C) 2014-2017 Andrey Antukh <[email protected]>
# Copyright (C) 2014-2017 Jesús Espino <[email protected]>
# Copyright (C) 2014-2017 David Barragán <[email protected]>
# Copyright (C) 2014-2017 Alejandro Alonso <[email protected]>
# Copyright (C) 2014-2017 Anler Hernández <[email protected]>
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from django.core.urlresolvers import reverse
from taiga.base.utils import json
from tests import factories as f
from tests.utils import disconnect_signals, reconnect_signals
import pytest
pytestmark = pytest.mark.django_db
def setup_module(module):
disconnect_signals()
def teardown_module(module):
reconnect_signals()
def test_auth_create(client):
url = reverse('auth-list')
user = f.UserFactory.create()
login_data = json.dumps({
"type": "normal",
"username": user.username,
"password": user.username,
})
result = client.post(url, login_data, content_type="application/json")
assert result.status_code == 200
def test_auth_action_register(client, settings):
settings.PUBLIC_REGISTER_ENABLED = True
url = reverse('auth-register')
register_data = json.dumps({
"type": "public",
"username": "test",
"password": "test",
"full_name
|
": "test",
"email": "[email protected]",
|
})
result = client.post(url, register_data, content_type="application/json")
assert result.status_code == 201
|
mitdbg/mdindex
|
scripts/fabfile/utils.py
|
Python
|
mit
| 2,214 | 0.006775 |
from fabric.api import *
from fabric.contrib.files import exists
import fabric
counter = 0
@roles('master')
def start_spark():
run('/home/mdindex/scripts/startSystems.sh')
@roles('master')
def stop_spark():
run('/home/mdindex/scripts/stopSystems.sh')
@roles('master')
def start_zookeeper():
run('/home/mdindex/scripts/startZookeeper.sh')
@roles('master')
def stop_zookeeper():
run('/home/mdindex/scripts/stopZookeeper.sh')
def run_bg(cmd, before=None, sockname="dtach", use_sudo=False):
"""Run a command in the background using dtach
:param cmd: The command to run
:param output_file: The file to send all of the output to.
:param before: The command to run before the dtach. E.g. exporting
environment variable
:param sockname: The socket name to use for the temp file
:param use_sudo: Whether or not to use sudo
"""
if not exists("/usr/bin/dtach"):
print "Install dtach first !"
return
if before:
cmd = "{}; dtach -n `mktemp -u /tmp/{}.XXXX` {}".format(
before, sockname, cmd)
else:
cmd = "dtach -n `mktemp -u /tmp/{}.XX
|
XX` {}".format(sockname, cmd)
if use_sudo:
return sudo(cmd)
else:
return run(cmd)
@runs_once
def build_jar():
local('cd /Users/anil/Dev/repos/mdindex/; gradle shadowJar')
@parallel
def update_jar():
if not exists('/data/mdindex/jars'):
run('mkdir -p /data/mdindex/jars')
put('../build/libs/amoeba-all.jar', '/data/mdinde
|
x/jars/')
@roles('master')
def update_master_jar():
if not exists('/data/mdindex/jars'):
run('mkdir -p /data/mdindex/jars')
put('../build/libs/amoeba-all.jar', '/data/mdindex/jars/')
@serial
def update_config():
global counter
put('server/server.properties', '/home/mdindex/amoeba.properties')
run('echo "MACHINE_ID = %d" >> /home/mdindex/amoeba.properties' % counter)
counter += 1
@parallel
def clean_cluster():
run('rm -R /data/mdindex/logs/hadoop/')
run('rm -R /home/mdindex/spark-1.6.0-bin-hadoop2.6/logs/')
run('rm -R /home/mdindex/spark-1.6.0-bin-hadoop2.6/work/')
@parallel
def parallel_shell():
run('mv /data/mdindex/tpchd100/download_data.sh ~/')
|
j00bar/django-widgy
|
widgy/contrib/form_builder/south_migrations/0010_auto__add_field_emailuserhandler_to_ident__chg_field_formsubmission_cr.py
|
Python
|
apache-2.0
| 9,301 | 0.007526 |
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'EmailUserHandler.to_ident'
db.add_column('form_builder_emailuserhandler', 'to_ident',
self.gf('django.db.models.fields.CharField')(default='', max_length=36),
keep_default=False)
# Changing field 'FormSubmission.created_at'
db.alter_column('form_builder_formsubmission', 'created_at', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True))
def backwards(self, orm):
# Deleting field 'EmailUserHandler.to_ident'
db.delete_column('form_builder_emailuserhandler', 'to_ident')
# Changing field 'FormSubmission.created_at'
db.alter_column('form_builder_formsubmission', 'created_at', self.gf('django.db.models.fields.DateTimeField')())
models = {
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100
|
'})
},
'form_builder.choicefield': {
'Meta': {'object_name': 'ChoiceField'},
'choices': ('django.db.models.fields.TextField', [], {}),
'help_text': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'prima
|
ry_key': 'True'}),
'ident': ('django.db.models.fields.CharField', [], {'max_length': '36', 'blank': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'required': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '25'})
},
'form_builder.emailsuccesshandler': {
'Meta': {'object_name': 'EmailSuccessHandler'},
'content': ('widgy.contrib.page_builder.db.fields.MarkdownField', [], {'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'subject': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'to': ('django.db.models.fields.EmailField', [], {'max_length': '75'})
},
'form_builder.emailuserhandler': {
'Meta': {'object_name': 'EmailUserHandler'},
'content': ('widgy.contrib.page_builder.db.fields.MarkdownField', [], {'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'subject': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'to': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'+'", 'null': 'True', 'to': "orm['widgy.Node']"}),
'to_ident': ('django.db.models.fields.CharField', [], {'max_length': '36'})
},
'form_builder.form': {
'Meta': {'object_name': 'Form'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ident': ('django.db.models.fields.CharField', [], {'max_length': '36', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'default': "u'Untitled form 8'", 'max_length': '255'})
},
'form_builder.formbody': {
'Meta': {'object_name': 'FormBody'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'form_builder.forminput': {
'Meta': {'object_name': 'FormInput'},
'help_text': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ident': ('django.db.models.fields.CharField', [], {'max_length': '36', 'blank': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'required': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'form_builder.formmeta': {
'Meta': {'object_name': 'FormMeta'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'form_builder.formsubmission': {
'Meta': {'object_name': 'FormSubmission'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'form_ident': ('django.db.models.fields.CharField', [], {'max_length': '36'}),
'form_node': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'form_submissions'", 'on_delete': 'models.PROTECT', 'to': "orm['widgy.Node']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
'form_builder.formvalue': {
'Meta': {'object_name': 'FormValue'},
'field_ident': ('django.db.models.fields.CharField', [], {'max_length': '36'}),
'field_name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'field_node': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['widgy.Node']", 'null': 'True', 'on_delete': 'models.SET_NULL'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'submission': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'values'", 'to': "orm['form_builder.FormSubmission']"}),
'value': ('django.db.models.fields.TextField', [], {})
},
'form_builder.multiplechoicefield': {
'Meta': {'object_name': 'MultipleChoiceField'},
'choices': ('django.db.models.fields.TextField', [], {}),
'help_text': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ident': ('django.db.models.fields.CharField', [], {'max_length': '36', 'blank': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'required': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '25'})
},
'form_builder.savedatahandler': {
'Meta': {'object_name': 'SaveDataHandler'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'form_builder.submitbutton': {
'Meta': {'object_name': 'SubmitButton'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'text': ('django.db.models.fields.CharField', [], {'default': "u'submit'", 'max_length': '255'})
},
'form_builder.successhandlers': {
'Meta': {'object_name': 'SuccessHandlers'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'form_builder.successmessagebucket': {
'Meta': {'object_name': 'SuccessMessageBucket'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'form_builder.textarea': {
'Meta': {'object_name': 'Textarea'},
'help_text': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ident': ('django.db.models.fields.CharField', [], {'max_length': '36', 'blank': 'True'}),
'label': ('django.db.mo
|
Endika/l10n-spain
|
l10n_es_aeat_mod303/models/mod303.py
|
Python
|
agpl-3.0
| 9,035 | 0 |
# -*- coding: utf-8 -*-
# © 2013 - Guadaltech - Alberto Martín Cortada
# © 2015 - AvanzOSC - Ainara Galdona
# © 2014-2016 - Serv. Tecnol. Avanzados - Pedro M. Baeza
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html).
from openerp import models, fields, api, _
class L10nEsAeatMod303Report(models.Model):
_inherit = "l10n.es.aeat.report.tax.mapping"
_name = "l10n.es.aeat.mod303.report"
_description = "AEAT 303 Report"
def _get_export_conf(self):
try:
return self.env.ref(
'l10n_es_aeat_mod303.aeat_mod303_main_export_config').id
except ValueError:
return self.env['aeat.model.export.config']
def _default_counterpart_303(self):
return self.env['account.account'].search(
[('code', 'like', '4750%'), ('type', '!=', 'view')])[:1]
@api.multi
@api.depends('tax_lines', 'tax_lines.amount')
def _compute_total_devengado(self):
casillas_devengado = (3, 6, 9, 11, 13, 15, 18, 21, 24, 26)
for report in self:
tax_lines = report.tax_lines.filtered(
lambda x: x.field_number in casillas_devengado)
report.total_devengado = sum(tax_lines.mapped('amount'))
@api.multi
@api.depends('tax_lines', 'tax_lines.amount')
def _compute_total_deducir(self):
casillas_deducir = (29, 31, 33, 35, 37, 39, 41, 42, 43, 44)
for report in self:
tax_lines = report.tax_lines.filtered(
lambda x: x.field_number in casillas_deducir)
report.total_deducir = sum(tax_lines.mapped('amount'))
@api.multi
@api.depends('total_devengado', 'total_deducir')
def _compute_casilla_46(self):
for report in self:
report.casilla_46 = report.total_devengado - report.total_deducir
@api.multi
@api.depends('porcentaje_atribuible_estado', 'casilla_46')
def _compute_atribuible_estado(self):
for report in self:
report.atribuible_estado = (
report.casilla_46 * report.porcentaje_atribuible_estado / 100)
@api.multi
@api.depends('atribuible_estado', 'cuota_compensar',
'regularizacion_anual', 'casilla_77')
def _compute_casilla_69(self):
for report in self:
report.casilla_69 = (
report.atribuible_estado + report.casilla_77 +
report.cuota_compensar + report.regularizacion_anual)
@api.multi
@api.depends('casilla_69', 'previous_result')
def _compute_resultado_liquidacion(self):
for report in self:
report.resultado_liquidacion = (
report.casilla_69 - report.previous_result)
currency_id = fields.Many2one(
comodel_name='res.currency', string='Currency',
related='company_id.currency_id', store=True, readonly=True)
number = fields.Char(default='303')
export_config = fields.Many2one(default=_get_export_conf)
company_partner_id = fields.Many2one('res.partner', string='Partner',
relation='company_id.partner_id',
store=True)
devolucion_mensual = fields.Boolean(
string="Devolución mensual", states={'done': [('readonly', True)]},
help="Inscrito en el Registro de Devolución Mensual")
total_devengado = fields.Float(
string="[27] IVA devengado", readonly=True,
compute="_compute_total_devengado", store=True)
total_deducir = fields.Float(
string="[45] IVA a deducir", readonly=True,
compute="_compute_total_deducir", store=True)
casilla_46 = fields.Float(
string="[46] Resultado régimen general", readonly=True, store=True,
help="(IVA devengado - IVA deducible)", compute="_compute_casilla_46")
porcentaje_atribuible_estado = fields.Float(
string="[65] % atribuible al Estado",
states={'done': [('readonly', True)]},
help="Los sujetos pasivos que tributen conjuntamente a la "
"Administración del Estado y a las Diputaciones Forales del País "
"Vasco o a la Comunidad Foral de Navarra, consignarán en esta "
"casilla el porcentaje del volumen de operaciones en territorio "
"común. Los demás sujetos pasivos consignarán en est
|
a casilla el "
"100%", default=100)
atribuible
|
_estado = fields.Float(
string="[66] Atribuible a la Administración", readonly=True,
compute="_compute_atribuible_estado", store=True)
cuota_compensar = fields.Float(
string="[67] Cuotas a compensar", default=0,
states={'done': [('readonly', True)]},
help="Cuota a compensar de periodos anteriores, en los que su "
"declaración fue a devolver y se escogió la opción de "
"compensación posterior")
regularizacion_anual = fields.Float(
string="[68] Regularización anual",
states={'done': [('readonly', True)]},
help="En la última autoliquidación del año (la del período 4T o mes "
"12) se hará constar, con el signo que corresponda, el resultado "
"de la regularización anual conforme disponen las Leyes por las "
"que se aprueban el Concierto Económico entre el Estado y la "
"Comunidad Autónoma del País Vasco y el Convenio Económico entre "
"el Estado y la Comunidad Foral de Navarra.""")
casilla_69 = fields.Float(
string="[69] Resultado", readonly=True, compute="_compute_casilla_69",
help="Atribuible a la Administración [66] - Cuotas a compensar [67] + "
"Regularización anual [68]""", store=True)
casilla_77 = fields.Float(
string="[77] Iva Diferido (Liquidado por aduana)",
help="Se hará constar el importe de las cuotas del Impuesto a la "
"importación incluidas en los documentos en los que conste la "
"liquidación practicada por la Administración recibidos en el "
"periodo de liquidación. Solamente podrá cumplimentarse esta "
"casilla cuando se cumplan los requisitos establecidos en el "
"artículo 74.1 del Reglamento del Impuesto sobre el Valor "
"Añadido. ")
previous_result = fields.Float(
string="[70] A deducir",
help="Resultado de la anterior o anteriores declaraciones del mismo "
"concepto, ejercicio y periodo",
states={'done': [('readonly', True)]})
resultado_liquidacion = fields.Float(
string="[71] Result. liquidación", readonly=True,
compute="_compute_resultado_liquidacion", store=True)
result_type = fields.Selection(
selection=[('I', 'A ingresar'),
('D', 'A devolver'),
('N', 'Sin actividad/Resultado cero')],
compute="_compute_result_type")
compensate = fields.Boolean(
string="Compensate", states={'done': [('readonly', True)]},
help="Si se marca, indicará que el importe a devolver se compensará "
"en posteriores declaraciones")
bank_account = fields.Many2one(
comodel_name="res.partner.bank", string="Bank account",
states={'done': [('readonly', True)]})
counterpart_account = fields.Many2one(default=_default_counterpart_303)
allow_posting = fields.Boolean(default=True)
def __init__(self, pool, cr):
self._aeat_number = '303'
super(L10nEsAeatMod303Report, self).__init__(pool, cr)
@api.one
def _compute_allow_posting(self):
self.allow_posting = True
@api.one
@api.depends('resultado_liquidacion')
def _compute_result_type(self):
if self.resultado_liquidacion == 0:
self.result_type = 'N'
elif self.resultado_liquidacion > 0:
self.result_type = 'I'
else:
self.result_type = 'D'
@api.onchange('period_type', 'fiscalyear_id')
def onchange_period_type(self):
super(L10nEsAeatMod303Report, self).onchange_period_type()
if self.period_type not in ('4T', '12'):
self.regularizacion_anual = 0
@api.onchange('type')
def onchange_type(self
|
siwells/teaching_set09103
|
code/topic_11/decrypt.py
|
Python
|
gpl-3.0
| 616 | 0.00487 |
from Crypto.PublicKey import RSA
from C
|
rypto.Cipher import AES, PKCS1_OAEP
file_in = open("encrypted_data.bin", "rb")
private_key = RSA.import_key(open("private.pem").read())
enc_session_key, nonce, tag, ciphertext = [ file_in.read(x) for x in (private_key.size_in_bytes(), 16, 16, -1) ]
# Decrypt the session key with the private RSA key
cipher_rsa = PKCS1_OAEP.new(private_key)
session_key = cipher_rsa.decrypt(enc_session_key)
# Decrypt the data with the AES session key
cipher_aes = AES.new(session_key, AES.MODE_EAX, nonce)
data = ciph
|
er_aes.decrypt_and_verify(ciphertext, tag)
print(data.decode("utf-8"))
|
jplusplus/thenmap-v0
|
generators/utils/convert-csv-to-json.py
|
Python
|
gpl-2.0
| 2,991 | 0.030181 |
# coding=utf-8
#Converts a csv contining country codes and numerical values, to json
#Years should be given in the header, like this:
#
# land, 1980, 1981, 1982
# se, 12, 13, 11
# fi 7, 10, 14
import csv
import json
import argparse
import os.path
import sys
import math
#Check if file exists
def is_valid_file(parser, arg):
if not os.path.exists(arg):
parser.error("The file %s does not exist!" % arg)
else:
return arg
#Check if values are numbers, for our purposes
def is_number(s):
try:
float(s)
return True
except ValueError:
return False
def doRounding(s,d):
if d > 0:
return round(float(s),d)
else:
return int(str(round(float(s),d))[:-2])
#Check if values are years
def isYear(s):
try:
float(s)
if 4 is len(s):
return True
else:
return False
except ValueError:
return False
#Define command line arguments
parser = argparse.ArgumentParser(description='Converts a csv contining country codes and numerical values, to json.')
#Input file
parser.add_argument("-i", "--input", dest="infile", required=True,
help="input file", metavar="FILE",
type=lambda x: is_valid_file(parser,x))
#Output file
parser.add_argument("-o", "--output", dest="outfile",
help="output file", metavar="FILE")
#Column
parser.add_argument("-c", "--column", dest="column",
help="column containing nation codes. The first column is “0”", type=int, default=0)
#Rounding
parser.add_argument("-d", "--decimals", dest="decimals",
help="Number of decimals to keep. Default is -1, meaning “keep all”", type=int, default=-1)
args = parser.parse_args()
inputFile = args.infile #"/home/leo/Världen/demo/patents/raw-pre.csv"
if args.outfile is None:
outputFile = os.path.splitext(inputFile)[0] + ".json"
print "No output file given, using %s" % outputFile
else:
outputFile = args.outfile
if os.path.isfile(outputFile):
print "File %s already exists. Overwrite? [y/N]" % outputFile
choice = raw_input().lower()
if not choice in ('y', 'yes'):
sys.exit()
indataColumn = args.column
print indataColumn
outdata = {}
headers = []
#Open file
try:
with open(inputFile, 'rb') as csvfile:
datacsv = csv.reader(csvfile,delimiter=',',quotechar='"')
firstRow = True
for row in datacsv:
if firstRow:
firstRow = False
for col in row:
headers.append(col)
else:
currentNation = row[indataColumn]
outdata[currentNation] = []
i = 0
for c
|
ol in row:
currentHeader = headers[i]
if isYear(currentHeader):
if is_number(col):
if (args.decimals > -1):
outdata[currentNation].append(doRounding(col,args.decimals))
else:
outdata[currentNation].append(col)
else:
outdata[currentNation].append(None)
i += 1
except IOError:
pr
|
int ("Could not open input file")
print "Writing %s..." % outputFile
with open(outputFile, 'w') as outfile:
json.dump(outdata, outfile)
print "done"
|
Amandil/django-tech-test
|
loans/tests/tests_model_business.py
|
Python
|
bsd-3-clause
| 5,699 | 0.031409 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.test import TestCase
from django.contrib.auth.models import User
from django.db.utils import IntegrityError
from django.core.exceptions import ValidationError
from loans.models import Business
class BusinessTestCase(TestCase):
'''
Creating two business owners
'''
def setUp(self):
self.john = User.objects.create(username="johndoe", first_name="John", last_name="Doe")
self.john.borrower.is_borrower = True
self.john.borrower.telephone_number = '+44 7762 25 4775'
|
self.john.save()
# self.jane = User.objects.create(username="janedoe", first_name="Jane", last_name="Doe")
# self.jane.borrower.is_borrower
|
= True
# self.jane.borrower.telephone_number = '+40 745 497 778'
# self.jane.save()
'''
We must be able to create a business
'''
def test_model_create(self):
acme = Business.objects.create(
crn = '09264172',
owner = self.john,
name = "ACME Inc.",
sector = 'PS',
address_one = 'Building and Number',
address_two = 'Street',
city = 'London',
postcode = 'W8 5EH',
)
acme.full_clean()
'''
Two businesses cannot have the same company number
'''
def test_company_number_duplicate(self):
acme = Business.objects.create(
crn = '09264172',
owner = self.john,
name = "ACME Inc.",
sector = 'PS',
address_one = 'Building and Number',
address_two = 'Street',
city = 'London',
postcode = 'W8 5EH',
)
acme.full_clean()
with self.assertRaises(IntegrityError):
duplicate = Business.objects.create(
crn = '09264172',
owner = self.john,
name = "ACME Duplicate Inc.",
sector = 'PS',
address_one = 'Building and Number',
address_two = 'Street',
city = 'Manchester',
postcode = 'M14 5SZ',
)
'''
The company number must be added in a valid format
'''
def test_company_number_format(self):
# 8 character number should be accepted
acme = Business.objects.create(
crn = '09264172',
owner = self.john,
name = "ACME Inc.",
sector = 'PS',
address_one = 'Building and Number',
address_two = 'Street',
city = 'London',
postcode = 'W8 5EH',
)
acme.full_clean()
# > 8 characters should not be accepted
acme = Business.objects.create(
crn = '09264172123123',
owner = self.john,
name = "ACME Inc.",
sector = 'PS',
address_one = 'Building and Number',
address_two = 'Street',
city = 'London',
postcode = 'W8 5EH',
)
with self.assertRaises(ValidationError):
acme.full_clean()
# < 8 characters should not be accepted
acme = Business.objects.create(
crn = '0926',
owner = self.john,
name = "ACME Inc.",
sector = 'PS',
address_one = 'Building and Number',
address_two = 'Street',
city = 'London',
postcode = 'W8 5EH',
)
with self.assertRaises(ValidationError):
acme.full_clean()
'''
The address must be added in a valid format
'''
def test_address_format(self):
# First address field must have some characters
acme = Business.objects.create(
crn = '09260926',
owner = self.john,
name = "ACME Inc.",
sector = 'PS',
address_one = '',
address_two = 'Street',
city = 'London',
postcode = 'W8 5EH',
)
with self.assertRaises(ValidationError):
acme.full_clean()
# Second address field can be empty
acme = Business.objects.create(
crn = '09260927',
owner = self.john,
name = "ACME Inc.",
sector = 'PS',
address_one = 'Building and Number',
address_two = '',
city = 'London',
postcode = 'W8 5EH',
)
acme.full_clean()
# Postcode must be valid
acme = Business.objects.create(
crn = '09260928',
owner = self.john,
name = "ACME Inc.",
sector = 'PS',
address_one = 'Building and Number',
address_two = '',
city = 'London',
postcode = 'INVALID POSTCODE',
)
with self.assertRaises(ValidationError):
acme.full_clean()
acme = Business.objects.create(
crn = '09260929',
owner = self.john,
name = "ACME Inc.",
sector = 'PS',
address_one = 'Building and Number',
address_two = '',
city = 'London',
postcode = ' 2NP',
)
with self.assertRaises(ValidationError):
acme.full_clean()
acme = Business.objects.create(
crn = '09260930',
owner = self.john,
name = "ACME Inc.",
sector = 'PS',
address_one = 'Building and Number',
address_two = '',
city = 'London',
postcode = 'M145S',
)
with self.assertRaises(ValidationError):
acme.full_clean()
|
diogo149/treeano
|
treeano/core/variable.py
|
Python
|
apache-2.0
| 7,794 | 0 |
from __future__ import division, absolute_import
from __future__ import print_function, unicode_literals
import toolz
import numpy as np
import theano
import theano.tensor as T
from .. import utils
from .inits import ZeroInit
ENABLE_TEST_VALUE = theano.config.compute_test_value != "off"
VALID_TAGS = set("""
input
output
weight
bias
parameter
hyperparameter
monitor
state
tied
""".split())
class VariableWrapper(object):
def __init__(self,
name,
shape=None,
dtype=None,
broadcastable=None,
is_shared=None,
tags=None,
ndim=None,
variable=None,
inits=None,
relative_network=None,
default_inits=None,
default_inits_hyperparameters=("inits",)):
self.name = name
self.shape_ = shape
self.dtype_ = dtype
self.broadcastable_ = broadcastable
self.is_shared_ = is_shared
self.tags_ = tags
self.ndim_ = ndim
self.variable_ = variable
if default_inits is not None:
# replace inits with calculated inits from network
assert inits is None
assert relative_network is not None
assert isinstance(default_inits, (list, tuple))
inits = list(toolz.concat(relative_network.find_hyperparameters(
default_inits_hyperparameters,
default_inits)))
self.inits = inits
# relative_network is provided so that variables can auto-compute
# their shape
self.relative_network = relative_network
self.validate()
def to_state(self, name):
return dict(
shape=self.shape_,
dtype=self.dtype_,
broadcastable=self.broadcastable_,
is_shared=self.is_shared_,
)
def validate(self):
shape = self.shape_
dtype = self.dtype_
broadcastable = self
|
.broadcastable_
is_shared = self.is_shared_
tags = self.tags_
ndim = self.ndim_
variable = self.variable_
if ndim is not
|
None and shape is not None:
assert len(shape) == ndim
if ndim is not None and variable is not None:
assert ndim == variable.ndim
if broadcastable is not None and variable is not None:
assert broadcastable == variable.broadcastable
if is_shared is not None and variable is not None:
assert is_shared == utils.is_shared_variable(self.variable)
if dtype is not None and variable is not None:
assert dtype == variable.dtype
if tags is not None:
self.verify_tags(set(tags))
if is_shared:
assert self.inits is not None, dict(
name=self.name,
msg=("if inits is None, then this shared variable cannot be "
"shared or loaded"),
)
def verify_tags(self, tags):
for tag in tags:
assert tag in VALID_TAGS
if self.is_shared:
# exactly one of parameter and state should be set
assert ("parameter" in tags) != ("state" in tags)
# the only valid tags for shared are the following:
assert len(tags - {"weight", "bias", "parameter", "state"}) == 0
else:
assert len({"weight", "bias", "parameter", "state"} & tags) == 0
@property
def is_shared(self):
if self.is_shared_ is None:
# if is_shared is not supplied, a variable must be supplied
assert self.variable_ is not None
self.is_shared_ = utils.is_shared_variable(self.variable)
return self.is_shared_
@property
def tags(self):
if self.tags_ is None:
self.tags_ = []
if not isinstance(self.tags_, set):
self.tags_ = set(self.tags_)
self.verify_tags(self.tags_)
return self.tags_
@property
def ndim(self):
if self.ndim_ is None:
if self.variable_ is not None:
self.ndim_ = self.variable_.ndim
elif self.shape_ is not None:
self.ndim_ = len(self.shape_)
else:
raise ValueError("ndim not defined")
return self.ndim_
@property
def dtype(self):
if self.dtype_ is None:
if self.variable_ is not None:
self.dtype_ = self.variable_.dtype
else:
self.dtype_ = theano.config.floatX
return self.dtype_
@property
def broadcastable(self):
if self.broadcastable_ is None:
if self.variable_ is not None:
self.broadcastable_ = self.variable_.broadcastable
else:
self.broadcastable_ = (False, ) * self.ndim
return self.broadcastable_
@property
def variable(self):
if self.variable_ is None:
if self.is_shared:
# find appropriate initialization scheme
inits = self.inits
if inits is None:
inits = []
for initialization in inits:
if initialization.predicate(self):
break
else:
# default to zero initialization if none work
initialization = ZeroInit()
# create the shared variable
variable = initialization.create_shared(self)
else:
variable = T.TensorType(self.dtype,
self.broadcastable)(self.name)
self.variable_ = variable
# for ease of debugging, add test values
# ---
# this must be done after self.variable_ is set to avoid a
# recursive loop when calling self.shape
if (not self.is_shared) and ENABLE_TEST_VALUE:
test_value = np.random.rand(*self.shape).astype(self.dtype)
variable.tag.test_value = test_value
# re-validate that initialization worked properly
self.validate()
return self.variable_
@property
def shape(self):
if self.shape_ is None:
# cannot derive shape for shared variable
assert not self.is_shared
# TODO
# can we call variable.get_value().shape
# we don't know if one of the intended dimensions is None,
# but it seems like a fair assumption that a shared variable
# won't change shape (maybe we can add a flag of whether or not
# shape doesn't change that defaults to True)
# FIXME calculate shape
# assert False
self.shape_ = tuple(self.variable_.shape.eval())
return self.shape_
def symbolic_shape(self):
"""
returns the shape of the variable with None's replaced by symbolic
variables
"""
shape = self.shape
var_shape = self.variable.shape
out_shape = []
for i, s in enumerate(shape):
if s is not None:
out_shape.append(s)
else:
out_shape.append(var_shape[i])
return tuple(out_shape)
@property
def value(self):
assert self.is_shared
return self.variable.get_value()
@value.setter
def value(self, new_value):
assert new_value.dtype == self.dtype
assert new_value.shape == self.shape
self.variable.set_value(new_value)
def __repr__(self):
return "{cls}(name={name})".format(cls=self.__class__.__name__,
name=self.name)
|
spcui/avocado-vt
|
selftests/unit/test_utils_params.py
|
Python
|
gpl-2.0
| 3,700 | 0.001622 |
#!/usr/bin/python
import unittest
import os
import sys
# simple magic for using scripts within a source tree
basedir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
if os.path.isdir(os.path.join(basedir, 'virttest')):
sys.path.append(basedir)
from virttest import utils_params
BASE_DICT = {
'image_boot': 'yes',
'image_boot_stg': 'no',
'image_chain': '',
'image_clone_command': 'cp --reflink=auto %s %s',
'image_format': 'qcow2',
'image_format_stg': 'qcow2',
'image_name': 'images/f18-64',
'image_name_stg': 'enospc',
'image_raw_device': 'no',
'image_remove_command': 'rm -rf %s',
'image_size': '10G',
'image_snapshot_stg': 'no',
'image_unbootable_pattern': 'Hard Disk.*not a bootable disk',
'image_verify_bootable': 'yes',
'images': 'image1 stg',
}
CORRECT_RESULT_MAPPING = {"image1": {'image_boot_stg': 'no',
'image_snapshot_stg': 'no',
'image_chain': '',
'image_unbootable_pattern': 'Hard Disk.*not a bootable disk',
'image_name': 'images/f18-64',
'image_remove_command': 'rm -rf %s',
'image_name_stg': 'enospc',
'image_clone_command': 'cp --reflink=auto %s %s',
'image_size': '10G', 'images': 'image1 stg',
'image_raw_device': 'no',
'image_format': 'qcow2',
'image_boot': 'yes',
'image_verify_bootable': 'yes',
'image_format_stg': 'qcow2'},
"stg": {'image_snapshot': 'no',
'image_boot_stg': 'no',
'image_snapshot_stg': 'no',
'image_chain': '',
'image_unbootable_pattern': 'Hard Disk.*not a bootable disk',
'image_name': 'enospc',
'image_remove_command': 'rm -rf %s',
'image_name_stg': 'enospc',
'image_clone_command': 'cp --reflink=auto %s %s',
'image_size': '10G',
'images': 'image1 stg',
'image_raw_device': 'no',
'image_format': 'qcow2',
'image_boot': 'no',
'image_verify_bootable': 'yes',
'image_format_stg': 'qcow2'}}
class TestParams(unittest.TestCase):
def setUp(self):
self.params = utils_params.Params(BASE_DICT)
def testObjects(self):
self.assertEquals(self.params.objects("images"), ['image1', 'stg'])
def testObjectsParams(self):
for key in CORRECT_RESULT_MAPPING.keys():
self.assertEquals(self.params.object_params(key),
CORRECT_RESULT_MAPPING[key
|
])
def testGetItemMissing(self):
try:
self.params['bogus']
raise ValueError("Did not get a ParamNotFound error when trying "
"to access a non-existing param")
# pylint: disable=E0712
except utils_params.ParamNotFound:
pass
def testGetItem(self):
self.assertEqual(self.params['image_size'], "10G")
i
|
f __name__ == "__main__":
unittest.main()
|
soerendip42/rdkit
|
rdkit/Chem/Subshape/testCombined.py
|
Python
|
bsd-3-clause
| 1,702 | 0.03349 |
from __future__ import print_function
from rdkit import Chem
from rdkit.Chem import AllChem
from rdkit.Chem.PyMol import MolViewer
from rdkit.Chem.Subshape import SubshapeBuilder,SubshapeObjects,SubshapeAligner
from rdkit.six.moves import cPickle
import copy
m1 = Chem.MolFromMolFile('test_data/square1.mol')
m2 = Chem.MolFromMolFile('test_data/square2.mol')
b = SubshapeBuilder.SubshapeBuilder()
b.gridDims=(10.,10.,5)
b.gridSpacing=0.4
b.winRad=2.0
if 1:
print('m1:')
s1 = b.GenerateSubshapeShape(m1)
cPickle.dump(s1,file('test_data/square1.shp.pkl','wb+'))
print('m2:')
s2 = b.GenerateSubshapeShape(
|
m2)
cPickle.dump(s2,file('test_data/square2.shp.pkl','wb+'))
ns1 = b.CombineSubshapes(s1,s2)
b.GenerateSubshapeSkeleton(ns1)
cPickle.dump(ns1,file('test_data/combined.shp.pkl','wb+'))
else:
s1 =
|
cPickle.load(file('test_data/square1.shp.pkl','rb'))
s2 = cPickle.load(file('test_data/square2.shp.pkl','rb'))
#ns1 = cPickle.load(file('test_data/combined.shp.pkl','rb'))
ns1=cPickle.load(file('test_data/combined.shp.pkl','rb'))
v = MolViewer()
SubshapeObjects.DisplaySubshape(v,s1,'shape1')
SubshapeObjects.DisplaySubshape(v,ns1,'ns1')
#SubshapeObjects.DisplaySubshape(v,s2,'shape2')
a = SubshapeAligner.SubshapeAligner()
pruneStats={}
algs =a.GetSubshapeAlignments(None,ns1,m1,s1,b,pruneStats=pruneStats)
print(len(algs))
print(pruneStats)
import os,tempfile
from rdkit import Geometry
fName = tempfile.mktemp('.grd')
Geometry.WriteGridToFile(ns1.coarseGrid.grid,fName)
v.server.loadSurface(fName,'coarse','',2.5)
os.unlink(fName)
fName = tempfile.mktemp('.grd')
Geometry.WriteGridToFile(ns1.medGrid.grid,fName)
v.server.loadSurface(fName,'med','',2.5)
os.unlink(fName)
|
chitr/neutron
|
neutron/plugins/ml2/drivers/openvswitch/agent/ovs_neutron_agent.py
|
Python
|
apache-2.0
| 89,243 | 0.000179 |
# Copyright 2011 VMware, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import collections
import hashlib
import signal
import sys
import time
import uuid
import functools
import netaddr
from oslo_config import cfg
from oslo_log import log as logging
import oslo_messaging
from oslo_service import loopingcall
import six
from six import moves
from neutron.agent.common import ovs_lib
from neutron.agent.common import polling
from neutron.agent.common import utils
from neutron.agent.l2.extensions import manager as ext_manager
from neutron.agent.linux import ip_lib
from neutron.agent import rpc as agent_rpc
from neutron.agent import securitygroups_rpc as sg_rpc
from neutron.api.rpc.handlers import dvr_rpc
from neutron.common import config
from neutron.common import constants as n_const
from neutron.common import exceptions
from neutron.common import ipv6_utils as ipv6
from neutron.common import topics
from neutron.common import utils as n_utils
from neutron import context
from neutron.i18n import _LE, _LI, _LW
from neutron.plugins.common import constants as p_const
from neutron.plugins.ml2.drivers.l2pop.rpc_manager import l2population_rpc
from neutron.plugins.ml2.drivers.openvswitch.agent.common \
import constants
from neutron.plugins.ml2.drivers.openvswitch.agent \
import ovs_dvr_neutron_agent
LOG = logging.getLogger(__name__)
cfg.CONF.import_group('AGENT', 'neutron.plugins.ml2.drivers.openvswitch.'
'agent.common.config')
cfg.CONF.import_group('OVS', 'neutron.plugins.ml2.drivers.openvswitch.agent.'
'common.config')
# A placeholder for dead vlans.
DEAD_VLAN_TAG = p_const.MAX_VLAN_TAG + 1
UINT64_BITMASK = (1 << 64) - 1
class _mac_mydialect(netaddr.mac_unix):
word_fmt = '%.2x'
class DeviceListRetrievalError(exceptions.NeutronException):
message = _("Unable to retrieve port details for devices: %(devices)s ")
class LocalVLANMapping(object):
def __init__(self, vlan, network_type, physical_network, segmentation_id,
vif_ports=None):
if vif_ports is None:
vif_ports = {}
self.vlan = vlan
self.network_type = network_type
self.physical_network = physical_network
self.segmentation_id = segmentation_id
self.vif_ports = vif_ports
# set of tunnel ports on which packets should be flooded
self.tun_ofports = set()
def __str__(self):
return ("lv-id = %s type = %s phys-net = %s phys-id = %s" %
(self.vlan, self.netwo
|
rk_type, self.physical_network,
self.segmentation_id))
class OVSPluginApi(agent_rpc.PluginApi):
pass
def has_zero_prefixlen_address(ip_addresses):
return any(netaddr.IPNetwork(ip).prefixlen == 0 for ip in ip_addresses)
class OVSNeutronAgent(sg_rpc.SecurityGroupAgentRpcCallbackMixin,
l2population_rpc.L2populationRpcCallBackTunnelMixin,
dvr_rpc.DVRAgentRpcCallbackMixin):
|
'''Implements OVS-based tunneling, VLANs and flat networks.
Two local bridges are created: an integration bridge (defaults to
'br-int') and a tunneling bridge (defaults to 'br-tun'). An
additional bridge is created for each physical network interface
used for VLANs and/or flat networks.
All VM VIFs are plugged into the integration bridge. VM VIFs on a
given virtual network share a common "local" VLAN (i.e. not
propagated externally). The VLAN id of this local VLAN is mapped
to the physical networking details realizing that virtual network.
For virtual networks realized as GRE tunnels, a Logical Switch
(LS) identifier is used to differentiate tenant traffic on
inter-HV tunnels. A mesh of tunnels is created to other
Hypervisors in the cloud. These tunnels originate and terminate on
the tunneling bridge of each hypervisor. Port patching is done to
connect local VLANs on the integration bridge to inter-hypervisor
tunnels on the tunnel bridge.
For each virtual network realized as a VLAN or flat network, a
veth or a pair of patch ports is used to connect the local VLAN on
the integration bridge with the physical network bridge, with flow
rules adding, modifying, or stripping VLAN tags as necessary.
'''
# history
# 1.0 Initial version
# 1.1 Support Security Group RPC
# 1.2 Support DVR (Distributed Virtual Router) RPC
# 1.3 Added param devices_to_update to security_groups_provider_updated
# 1.4 Added support for network_update
target = oslo_messaging.Target(version='1.4')
def __init__(self, bridge_classes, integ_br, tun_br, local_ip,
bridge_mappings, polling_interval, tunnel_types=None,
veth_mtu=None, l2_population=False,
enable_distributed_routing=False,
minimize_polling=False,
ovsdb_monitor_respawn_interval=(
constants.DEFAULT_OVSDBMON_RESPAWN),
arp_responder=False,
prevent_arp_spoofing=True,
use_veth_interconnection=False,
quitting_rpc_timeout=None,
conf=None):
'''Constructor.
:param bridge_classes: a dict for bridge classes.
:param integ_br: name of the integration bridge.
:param tun_br: name of the tunnel bridge.
:param local_ip: local IP address of this hypervisor.
:param bridge_mappings: mappings from physical network name to bridge.
:param polling_interval: interval (secs) to poll DB.
:param tunnel_types: A list of tunnel types to enable support for in
the agent. If set, will automatically set enable_tunneling to
True.
:param veth_mtu: MTU size for veth interfaces.
:param l2_population: Optional, whether L2 population is turned on
:param minimize_polling: Optional, whether to minimize polling by
monitoring ovsdb for interface changes.
:param ovsdb_monitor_respawn_interval: Optional, when using polling
minimization, the number of seconds to wait before respawning
the ovsdb monitor.
:param arp_responder: Optional, enable local ARP responder if it is
supported.
:param prevent_arp_spoofing: Optional, enable suppression of any ARP
responses from ports that don't match an IP address that belongs
to the ports. Spoofing rules will not be added to ports that
have port security disabled.
:param use_veth_interconnection: use veths instead of patch ports to
interconnect the integration bridge to physical bridges.
:param quitting_rpc_timeout: timeout in seconds for rpc calls after
SIGTERM is received
:param conf: an instance of ConfigOpts
'''
super(OVSNeutronAgent, self).__init__()
self.conf = conf or cfg.CONF
self.ovs = ovs_lib.BaseOVS()
self.fullsync = True
# init bridge classes with configured datapath type.
self.br_int_cls, self.br_phys_cls, self.br_tun_cls = (
functools.partial(bridge_classes[b],
datapath_type=self.conf.OVS.datapath_type)
for b in ('br_int', 'br_phys', 'br_tun'))
self.use_veth_interconnection = use_veth_interconnection
self.veth_mtu = veth_mtu
self.available_local_vlans = set(moves.range(p_const.MIN_VLAN_TAG,
p_const.MAX_VLAN_TAG))
self.tunnel_ty
|
tanmaykm/edx-platform
|
openedx/core/djangoapps/oauth_dispatch/tests/constants.py
|
Python
|
agpl-3.0
| 97 | 0 |
"""
Constants for testing purposes
|
"""
DUMMY_REDIRECT_URL = u'https://example.com/edx/re
|
direct'
|
belokop-an/agenda-tools
|
code/htdocs/confAbstractBook.py
|
Python
|
gpl-2.0
| 233 | 0.017167 |
from MaKaC.webinterface.rh import conferenceDisplay
def index(req,**params):
return conferenceDisplay.RHAbstractBook(req).process(params)
def test(req,**params):
|
return conferenceDisplay.RHAbstractBook(req)
|
.process(params)
|
ahmad88me/PyGithub
|
github/GithubException.py
|
Python
|
lgpl-3.0
| 5,277 | 0.007391 |
############################ Copyrights and license ############################
# #
# Copyright 2012 Vincent Jacques <[email protected]> #
# Copyright 2012 Zearin <[email protected]> #
# Copyright 2013 AKFish <[email protected]> #
# Copyright 2013 Cameron White <[email protected]> #
# Copyright 2013 Vincent Jacques <[email protected]> #
# Copyright 2014 Vincent Jacques <[email protected]> #
# Copyright 2016 Peter Buckley <[email protected]> #
# Copyright 2016 humbug <bah> #
# Copyright 2018 sfdye <[email protected]> #
# #
# This file is part of PyGithub. #
# http://pygithub.readthedocs.io/ #
# #
# PyGithub is free software: you can redistribute it and/or modify it under #
# the terms of the GNU Lesser General Public License as published by the Free #
# Software Foundation, either version 3 of the License, or (at your option) #
# any later version. #
# #
# PyGithub is distributed in the hope that it will be useful, but WITHOUT ANY #
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS #
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more #
# details. #
# #
# You should have received a copy of the GNU Lesser General Public License #
# along with PyGithub. If not, see <http://www.gnu.org/licenses/>. #
# #
################################################################################
import json
class GithubException(Exception):
"""
Error handling in PyGithub is done with exceptions. This class is the base of all exceptions raised by PyGithub (but :class:`github.GithubException.BadAttributeException`).
Some other types of exceptions might be raised by underlying libraries, for example for network-related issues
|
.
"""
def __init__(self, status, data):
super().__init__()
self.__status =
|
status
self.__data = data
self.args = [status, data]
@property
def status(self):
"""
The status returned by the Github API
"""
return self.__status
@property
def data(self):
"""
The (decoded) data returned by the Github API
"""
return self.__data
def __str__(self):
return "{status} {data}".format(status=self.status, data=json.dumps(self.data))
class BadCredentialsException(GithubException):
"""
Exception raised in case of bad credentials (when Github API replies with a 401 or 403 HTML status)
"""
class UnknownObjectException(GithubException):
"""
Exception raised when a non-existing object is requested (when Github API replies with a 404 HTML status)
"""
class BadUserAgentException(GithubException):
"""
Exception raised when request is sent with a bad user agent header (when Github API replies with a 403 bad user agent HTML status)
"""
class RateLimitExceededException(GithubException):
"""
Exception raised when the rate limit is exceeded (when Github API replies with a 403 rate limit exceeded HTML status)
"""
class BadAttributeException(Exception):
"""
Exception raised when Github returns an attribute with the wrong type.
"""
def __init__(self, actualValue, expectedType, transformationException):
self.__actualValue = actualValue
self.__expectedType = expectedType
self.__transformationException = transformationException
@property
def actual_value(self):
"""
The value returned by Github
"""
return self.__actualValue
@property
def expected_type(self):
"""
The type PyGithub expected
"""
return self.__expectedType
@property
def transformation_exception(self):
"""
The exception raised when PyGithub tried to parse the value
"""
return self.__transformationException
class TwoFactorException(GithubException):
"""
Exception raised when Github requires a onetime password for two-factor authentication
"""
class IncompletableObject(GithubException):
"""
Exception raised when we can not request an object from Github because the data returned did not include a URL
"""
class MockException(Exception):
"""
Exception raised when there is a missing or invalid data in the mock values
"""
|
neuropoly/axonsegpy
|
axonsegpy/GUI.py
|
Python
|
mit
| 4,301 | 0.016043 |
import tkinter as tk
import sys
import json
import sys
from tkinter import filedialog
from tkinter import Spinbox
from tkinter import Canvas
from tkinter import RIGHT
from tkinter import ttk
from PIL import Image, ImageTk
from core.readAlgoFile import readAlgo
from core.readAlgoFile import getAlgoList
from core.ConfigParser import parse
from core.modifyConfigFile import addAlgorithme
class GUI(tk.Frame):
def __init__(self, root):
self.photo = None
self.filename=None
self.configFile=("./test.json")
tk.Frame.__init__(self, root)
self.parametre = {}
# define canvas
self.w = Canvas(root, width=200, height=200, borderwidth=3, background="black")
self.w.pack(side=RIGHT)
self.algo={}
# algorithms choice dropdown
lst1 = getAlgoList()
self.dropdownvalue = tk.StringVar()
self.dropdownvalue.set("Please Select")
drop = tk.OptionMenu(root,self.dropdownvalue, *lst1)
drop.pack()
#TODO : Create different frames for
self.dropdownvalue.trace("w", self.callback)
# options for buttons
button_opt = {'fill': 'both' , 'padx': 5, 'pady': 5}
# define buttons
self.selectbtn = tk.Button(self, text='Select Image', command=self.askopenfilename).pack(**button_opt)
self.runalgobtn = tk.Button(self, text='Run algo', command=self.runAlgorithm).pack(side=RIGHT, **button_opt)
self.previewbtn = tk.Button(self, text='Preview', command=self.previewalgorithm).pack(**button_opt)
# define options for opening or saving a file
self.file_opt = options = {}
options['defaultextension'] = '.PNG'
options['filetypes'] = [('all files', '.*'), ('PNG file', '.png'), ('TIFF file', '.tiff')]
options['initialdir'] = '.'
options['initialfile'] = ''
options['parent'] = root
options['title'] = 'Please select an image'
# This is only available on the Macintosh, and only when Navigation Services are installed.
#options['message'] = 'message'
# if you use the multiple file version of the module functions this option is set automatically.
#options['multiple'] = 1
def askopenfilename(self):
"""Returns an opened file in read mode.
This time the dialog just returns a filename and the file is opened by your own code.
"""
# get filename
self.filename = filedialog.askopenfilename(**self.file_opt)
# Code below should put the image in the canvas
if self.filename:
# TODO : get only the filename from the path
image = Image.open(0, self.filename
|
)
photo = ImageTk.PhotoImage(image)
#self.w.create_image(photo)
def popUpAlgo(self,algo,nameAlgo):
"""
:param algo: a list of algo
:return:
"""
button_opt = {'fill': 'both
|
', 'padx': 5, 'pady': 5}
popup=tk.Tk()
popup.wm_title("Select your parameters")
labelList=[]
entryList=[]
paramAlgo=algo[0]["params"]
keyAlgo=list(paramAlgo.keys())
nbrParam=len(paramAlgo)
for i in range(nbrParam):
labelList.append(tk.Label(popup,text=keyAlgo[i]))
entryList.append(tk.Entry(popup))
labelList[i].pack()
entryList[i].pack()
tk.Button(popup, text='Apply',command=lambda:self.appyAlgo(labelList,entryList,nameAlgo)).pack(**button_opt)
tk.Button(popup, text='Done', command=popup.destroy).pack(**button_opt)
popup.mainloop()
def previewalgorithm(self):
# Here, the algo should be run
# in future releases, it should take the canvas image as input
# and output it in the canvas
pass
def appyAlgo(self,labelList,entryList,nameAlgo):
"""
Loads a
:param labelList:
:param entryList:
:return:
"""
for i in range(len(labelList)):
self.parametre[labelList[i].cget("text")]=entryList[i].get()
self.algo["name"]=nameAlgo
self.algo["parametre"]=self.parametre
def runAlgorithm(self):
"""
Change configfile, launch algo
:return:
"""
addAlgorithme(self.configFile,"preprocessing",self.algo)
#parse(self.configFile)
def callback(self, *args):
"""
Callback for our dropdown
:param args:
:return:
"""
print(self.dropdownvalue.get())
self.popUpAlgo(readAlgo(self.dropdownvalue.get()),self.dropdownvalue.get())
if __name__=='__main__':
root = tk.Tk()
GUI(root).pack()
root.mainloop()
|
sveetch/recalbox-manager
|
project/manager_frontend/forms/bios.py
|
Python
|
mit
| 3,935 | 0.009911 |
# -*- coding: utf-8 -*-
"""
Thread forms
"""
import os, hashlib
from django.conf import settings
from django import forms
from django.utils.translation import ugettext_lazy as _
from django.core.files.storage import FileSystemStorage
from project.manager_frontend.forms import CrispyFormMixin
from project.utils.imports import safe_import_module
BIOS_FS_STORAGE = FileSystemStorage(location=settings.RECALBOX_BIOS_PATH, base_url=settings.MEDIA_URL)
def hashfile(afile, hasher, blocksize=65536):
"""
Efficient way to generate checksum from a file, return hexdigest checksum
Use it like this:
import hashlib
hashfile(open(BIOS_FS_STORAGE.path(YOUR_FILEPATH), 'rb'), hashlib.md5())
Stealed from http://stackoverflow.com/a/3431835/4884485
"""
buf = afile.read(blocksize)
while len(buf) > 0:
hasher.update(buf)
buf = afile.read(blocksize)
return hasher.hexdigest()
class BiosDeleteForm(CrispyFormMixin, forms.Form):
"""
Form to delete many bios
"""
form_key = 'delete'
form_fieldname_trigger = 'delete_submit'
#crispy_form_helper_path = 'project.manager_frontend.forms.crispies.bios_delete_helper'
#crispy_form_helper_kwargs = {}
def __init__(self, *args, **kwargs):
self.bios_choices = kwargs.pop('bios_choices')
super(BiosDeleteForm, self).__init__(*args, **kwargs)
super(forms.Form, self).__init__(*args, **kwargs)
self.fields['bios_files'] = forms.MultipleChoiceField(choices=self.bios_choices, widget=forms.CheckboxSelectMultiple, required=False)
def save(self):
bios_map = dict(self.bios_choices)
deleted_bios = []
# Delete all selected bios files
for md5hash in self.cleaned_data["bios_files"]:
filename = bios_map.get(md5hash)
if BIOS_FS_STORAGE.exists(filename):
BIOS_FS_STORAGE.delete(filename)
deleted_bios.append(filename)
return deleted_bios
class BiosUploadForm(CrispyFormMixin, forms.Form):
"""
Bios upload form
"""
#crispy_form_helper_path = 'project.manager_frontend.forms.crispies.bios_helper'
#crispy_form_helper_kwargs = {}
form_key = 'upload'
form_fieldname_trigger = 'upload_submit'
bios = forms.FileField(label=_('Bios file'), required=True)
def __init__(self, *args, **kwargs):
self.manifest = kwargs.pop('bios_manifest')
super(BiosUploadForm, self).__init__(*args, **kwargs)
super(forms.Form, self).__init__(*args, **kwargs)
def clean_bios(self):
"""
Validate bios file from Recalbox Manifest
The bios file must have the right file name and the right md5 checksum
"""
bios = self.cleaned_data['bios']
if bios:
#simple_manifest = {filename: md5hash for (md5hash,filename,system_name,exists) in self.manifest}
simple_manifest = {values[0]: md5hash for md5hash,values in
|
self.manifest.items()}
name = os.path.basename(bios.name)
if name not in simple_manifest:
raise forms.ValidationError(_("Your file does not seem to be a supported Bios"))
else:
bios_chec
|
ksum = hashfile(bios, hashlib.md5())
if bios_checksum != simple_manifest[name]:
raise forms.ValidationError(_("Your file does not have a correct MD5 checksum"))
return bios
def save(self):
bios = self.cleaned_data["bios"]
# Remove the previous file with identical name if any
if BIOS_FS_STORAGE.exists(bios.name):
BIOS_FS_STORAGE.delete(bios.name)
# Save the new uploaded file
BIOS_FS_STORAGE.save(bios.name, bios)
return BIOS_FS_STORAGE.path(bios.name)
|
AugurProject/augur-core
|
tests/trading/test_tradingEscapeHatch.py
|
Python
|
gpl-3.0
| 2,511 | 0.007168 |
#!/usr/bin/env python
from ethereum.tools import tester
from ethereum.tools.tester import TransactionFailed
from pytest import raises
from utils import fix, longTo32Bytes
from constants import LONG, YES, NO
def test_escapeHatch(contractsFixture, cash, market):
controller = contractsFixture.contracts['Controller']
createOrder = contractsFixture.contracts['CreateOrder']
fillOrder = contractsFixture.contracts['FillOrder']
trade = contractsFixture.contracts['Trade']
tradingEscapeHatch = contractsFixture.contracts['TradingEscapeHatch']
yesShareToken = contractsFixture.applySignature('ShareToken', market.getShareToken(YES))
noShareToken = contractsFixture.applySignature('ShareToken', market.getShareToken(NO))
initialTester1ETH = contractsFixture.chain.head_state.get_balance(tester.a1)
initialTester2ETH = contractsFixture.chain.head_state.get_balance(tester.a2)
# create order with cash
orderID = createOrder.publicCreateOrder(contractsFixture.contracts['Constants'].ASK(), fix(1), 6000, market.address, YES, longTo32Bytes(0), longTo32Bytes(0), "42", sender=tester.k1, value=fix('1', '4000'))
assert orderID
# fill order with cash using on-chain matcher
assert trade.publicFillBestOrder(LONG, market.address, YES, fix(1), 6000, sender=tester.k2, value=fix('1', '6000')) == 0
# assert starting values
assert cash.balanceOf(market.address) == fix('10000')
assert noShareToken.balanceOf(tester.a1) == fix(1)
assert yesShareToken.balanceOf(tester.a2) == fix(1)
with raises(TransactionFailed):
tradingEscapeHatch.claimSharesInUpdate(market.address)
# emergency stop and then have everyone liquidate their position
controller.emergencyStop()
curTester1Balance = contractsFixture.chain.head_state.get_balance(tester.a1)
assert tradingEscapeHatch.getFrozenShareValueInMarket(market.address, sender = tester.k1) == initialTester1ETH - curTester1Balance
assert tradingEscapeHatch.claimSharesInUpdate(market.address, sender = tester.k1)
assert tradingEscapeHatch.claimSharesInUpdate(market
|
.address, sender = tester.k2)
# assert final values (should be a zero sum game)
assert contractsFixture.chain.head_state.get_balance(tester.a1) == initialTester1ETH
assert contractsFixture.chain.head_state.get_balance(tester.a2
|
) == initialTester2ETH
assert cash.balanceOf(market.address) == 0
assert noShareToken.balanceOf(tester.a1) == 0
assert yesShareToken.balanceOf(tester.a2) == 0
|
alfiyansys/a-reconsidered-sign
|
algorithms/__init__.py
|
Python
|
gpl-3.0
| 18 | 0.055556 |
_
|
_all__ = ["core"]
| |
USGSDenverPychron/pychron
|
pychron/pyscripts/contexts.py
|
Python
|
apache-2.0
| 1,376 | 0 |
# ===============================================================================
# Copyright 2019 ross
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
# See the License for the specific language governing permissions and
# limitations under the License.
# ===============================================================================
class CTXObject(object):
def update(self, ctx):
self.__dict__.update(**ctx)
class EXPObject(CTXObject):
pass
class CMDObject(CTXObject):
pass
class MeasurementCTXObject(object):
def create(self, yd):
for k in (
"baseline",
|
"multicollect",
"peakcenter",
"equilibration",
"whiff",
"peakhop",
):
try:
c = CTXObject()
c.update(yd[k])
setattr(self, k, c)
except KeyError:
pass
# ============= EOF =============================================
|
ncliam/serverpos
|
openerp/custom_modules/school_link/school.py
|
Python
|
agpl-3.0
| 39,903 | 0.00599 |
# -*- coding: utf-8 -*-
#################################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2013 Julius Network Solutions SARL <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#################################################################################
from openerp import SUPERUSER_ID
from openerp.osv import fields, osv
from openerp.tools.translate import _
from openerp.tools import DEFAULT_SERVER_DATETIME_FORMAT as DATETIME_FORMAT
import pytz, datetime
from ast import literal_eval
from onesignal import OneSignal
USER_AUTH_KEY = 'NWQ4ZDNiMjUtYTdhNS00YTBhLTg2MTctYjlmOTM0OTdhZjBi'
YOUR_APP_ID = '3fcdf8f2-9523-4ca7-8489-fefb29cbecc4'
REST_API_KEY = 'MDdjN2JjZDctNWE2ZS00ZGNjLTlkNmEtNTRkYmUwYzBjZjc3'
class im_chat_session(osv.osv):
_inherit = 'im_chat.session'
def add_user(self, cr, uid, uuid, user_id, context=None):
""" add the given user to the given session """
sids = self.search(cr, uid, [('uuid', '=', uuid)], context=context, limit=1)
for session in self.browse(cr, uid, sids, context=context):
if user_id not in [u.id for u in session.user_ids]:
self.write(cr, uid, [session.id], {'user_ids': [(4, user_id)]}, context=context)
# notify the all the channel users and anonymous channel
notifications = []
for channel_user_id in session.user_ids:
info = self.session_info(cr, channel_user_id.id, [session.id], context=context)
notifications.append([(cr.dbname, 'im_chat.session', channel_user_id.id), info])
# Anonymous are not notified when a new user is added : cannot exec session_info as uid = None
info = self.session_info(cr, SUPERUSER_ID, [session.id], context=context)
notifications.append([session.uuid, info])
self.pool['bus.bus'].sendmany(cr, uid, notifications)
# send a message to the conversation
user = self.pool['res.users'].browse(cr, uid, user_id, context=context)
chat_names = self.pool['res.users'].get_chat_name(cr, uid, [user.id], context=context)
notify_msg = _('%s joined the conversation.') % (chat_names[user.id] or user.name)
self.pool["im_chat.message"].post(cr, uid, uid, session.uuid, "meta", notify_msg, context=context)
def users_infos(self, cr, uid, ids, context=None):
""" get the user infos for all the user in the session """
for session in self.pool["im_chat.session"].browse(cr, SUPERUSER_ID, ids, context=context):
users_infos = self.pool["res.users"].read(cr, SUPERUSER_ID, [u.id for u in session.user_ids], ['id','name', 'im_status'], context=context)
return users_infos
class email_template(osv.osv):
_inherit = "email.template"
def _get_default_gateway(self, cr, uid, context=None):
gateway_id = self.pool.get('sms.smsclient').search(cr, uid, [], context=context, limit=1)
return gateway_id and gateway_id[0] or None
_defaults = {
'sms_template': True,
'gateway_id': _get_default_gateway,
}
class res_user(osv.osv):
_inherit = 'res.users'
def _get_schools(self, cr, uid, context=None):
school_ids = []
res_user = self.pool.get('res.users')
res_partner = self.pool.get('res.partner')
user_id = res_user.browse(cr, SUPERUSER_ID, uid, context=context)
if user_id.partner_id and user_id.partner_id.mobile:
mobile = user_id.partner_id.mobile
partner_ids = res_partner.search(cr, SUPERUSER_ID, [('mobile', '=', mobile), ('customer', '=', True)],
context=context)
if (partner_ids):
for partner_id in partner_ids:
partner = res_partner.browse(cr, SUPERUSER_ID, partner_id, context=context)
if partner and partner.company_id and partner.company_id.school:
school_ids.append(partner.company_id.id)
return school_ids
def _get_all_children(self, cr, uid, context=None):
child_ids = []
res_user = self.pool.get('res.users')
res_partner = self.pool.get('res.partner')
user_id = res_user.browse(cr, SUPERUSER_ID, uid, context=context)
if user_id.partner_id and user_id.partner_id.mobile:
mobile = user_id.partner_id.mobile
partner_ids = res_partner.search(cr, SUPERUSER_ID, [('mobile', '=', mobile), ('customer', '=', True)],
context=context)
if (partner_ids):
for partner_id in partner_ids:
partner = res_partner.browse(cr, SUPERUSER_ID, partner_id, context=context)
for child_id in partner.children:
child_ids.append(child_id.id)
return child_ids
def get_relate_schools(self, cr, uid, ids, name, args, context=None):
res = {}
for user_id in self.browse(cr, SUPERUSER_ID, ids, context=context):
res[user_id.id] = self._get_schools(cr, user_id.id, context=context)
return res
def get_all_children(self, cr, uid, ids, name, args, context=None):
res = {}
for user_id in self.browse(cr, SUPERUSER_ID, ids, context=context):
res[user_id.id] = self._get_all_children(cr, user_id.id, context=context)
return res
def select_school(self, cr, uid, school=None, context=None):
if school:
company_id = self.pool.get('res.company').browse(cr, SUPERUSER_ID, school, context=context)
if company_id:
user_data = {
'company_id': company_id.id,
'company_ids': [(6, 0, [company_id.id])],
}
self.pool.get("res.users").write(cr, SUPERUSER_ID, uid, user_data, context=context)
else:
raise osv.except_osv(_('error!'), _("Invalid school selected"))
def get_chat_name(self, cr, uid, user_ids, context=None):
result = {}
for user_id in user_ids:
user = self.browse(cr, SUPERUSER_ID, user_id, context=context)
company_id = user.company_id.id
name = user.name
employee_ids = self.pool.get('hr.employee').search(cr, SUPERUSER_ID, [("user_id",'=', user_id)], context=context)
employee_id = employee_ids and employee_ids[0] or None
if not employee_id:
mobile = user.partner_id.mobile
if mobile:
# search parent in school
parent_ids = self.pool.get('res.partner').search(cr, SUPERUSER_ID, [("mobile", '=', mobile),
('customer',
|
'=', True),
('company_id','=', company_id)], context=context)
if not parent_ids or len(parent_ids) == 0:
# search parent not in school
parent_ids = self.pool.get('res.partner').search(cr, SUPERUSER_ID, [("mobile", '=', mobile),
|
('customer', '=', True)], context=context)
parent_id = parent_ids and parent_ids[0] or None
|
taget/libvirt
|
docs/index.py
|
Python
|
lgpl-2.1
| 37,181 | 0.007504 |
#!/usr/bin/python -u
#
# imports the API description and fills up a database with
# name relevance to modules, functions or web pages
#
# Operation needed:
# =================
#
# install mysqld, the python wrappers for mysql and libxml2, start mysqld
# - mysql-server
# - mysql
# - php-mysql
# - MySQL-python
# Change the root passwd of mysql:
# mysqladmin -u root password new_password
# Create the new database libvir
# mysqladmin -p create libvir
# Create a database user 'veillard' and give him password access
# change veillard and abcde with the right user name and passwd
# mysql -p
# password:
# mysql> GRANT ALL PRIVILEGES ON libvir TO veillard@localhost
# IDENTIFIED BY 'abcde' WITH GRANT OPTION;
# mysql> GRANT ALL PRIVILEGES ON libvir.* TO veillard@localhost
# IDENTIFIED BY 'abcde' WITH GRANT OPTION;
#
# As the user check the access:
# mysql -p libvir
# Enter password:
# Welcome to the MySQL monitor....
# mysql> use libvir
# Database changed
# mysql> quit
# Bye
#
# Then run the script in the doc subdir, it will create the symbols and
# word tables and populate them with information extracted from
# the libvirt-api.xml API description, and make then accessible read-only
# by nobody@loaclhost the user expected to be Apache's one
#
# On the Apache configuration, make sure you have php support enabled
#
import MySQLdb
import libxml2
import sys
import string
import os
#
# We are not interested in parsing errors here
#
def callback(ctx, str):
return
libxml2.registerErrorHandler(callback, None)
#
# The dictionary of tables required and the SQL command needed
# to create them
#
TABLES={
"symbols" : """CREATE TABLE symbols (
name varchar(255) BINARY NOT NULL,
module varchar(255) BINARY NOT NULL,
type varchar(25) NOT NULL,
descr varchar(255),
UNIQUE KEY name (name),
KEY module (module))""",
"words" : """CREATE TABLE words (
name varchar(50) BINARY NOT NULL,
symbol varchar(255) BINARY NOT NULL,
relevance int,
KEY name (name),
KEY symbol (symbol),
UNIQUE KEY ID (name, symbol))""",
"wordsHTML" : """CREATE TABLE wordsHTML (
name varchar(50) BINARY NOT NULL,
resource varchar(255) BINARY NOT NULL,
section varchar(255),
id varchar(50),
relevance int,
KEY name (name),
KEY resource (resource),
UNIQUE KEY ref (name, resource))""",
"wordsArchive" : """CREATE TABLE wordsArchive (
name varchar(50) BINARY NOT NULL,
ID int(11) NOT NULL,
relevance int,
KEY name (name),
UNIQUE KEY ref (name, ID))""",
"pages" : """CREATE TABLE pages (
resource varchar(255) BINARY NOT NULL,
title varchar(255) BINARY NOT NULL,
UNIQUE KEY name (resource))""",
"archives" : """CREATE TABLE archives (
ID int(11) NOT NULL auto_increment,
resource varchar(255) BINARY NOT NULL,
title varchar(255) BINARY NOT NULL,
UNIQUE KEY id (ID,resource(255)),
INDEX (ID),
INDEX (resource))""",
"Queries" : """CREATE TABLE Queries (
ID int(11) NOT NULL auto_increment,
Value varchar(50) NOT NULL,
Count int(11) NOT NULL,
UNIQUE KEY id (ID,Value(35)),
INDEX (ID))""",
"AllQueries" : """CREATE TABLE AllQueries (
ID int(11) NOT NULL auto_increment,
Value varchar(50) NOT NULL,
Count int(11) NOT NULL,
UNIQUE KEY id (ID,Value(35)),
INDEX (ID))""",
}
#
# The XML API description file to parse
#
API="libvirt-api.xml"
DB=None
#########################################################################
# #
# MySQL database interfaces #
# #
#########################################################################
def createTable(db, name):
global TABLES
if db is None:
return -1
if name is None:
return -1
c = db.cursor()
ret = c.execute("DROP TABLE IF EXISTS %s" % (name))
if ret == 1:
print "Removed table %s" % (name)
print "Creating table %s" % (name)
try:
ret = c.execute(TABLES[name])
except:
print "Failed to create table %s" % (name)
return -1
return ret
def checkTables(db, verbose = 1):
global TABLES
if db is None:
return -1
c = db.cursor()
nbtables = c.execute("show tables")
if verbose:
print "Found %d tables" % (nbtables)
tables = {}
i = 0
while i < nbtables:
l = c.fetchone()
name = l[0]
tables[name] = {}
i = i + 1
for table in TABLES.keys():
if not tables.has_key(table):
print "table %s missing" % (table)
createTable(db, table)
try:
ret = c.execute("SELECT count(*) from %s" % table)
row = c.fetchone()
if verbose:
print "Table %s contains %d records" % (table, row[0])
except:
print "Troubles with table %s : repairing" % (table)
ret = c.execute("repair table %s" % table)
print "repairing returned %d" % (ret)
ret = c.execute("SELECT count(*) from %s" % table)
row = c.fetchone()
print "Table %s contains %d records" % (table, row[0])
if verbose:
print "checkTables finished"
# make sure apache can access the tables read-only
try:
ret = c.execute("GRANT SELECT ON libvir.* TO nobody@localhost")
ret = c.execute("GRANT INSERT,SELECT,UPDATE ON libvir.Queries TO nobody@localhost")
except:
pass
return 0
def openMySQL(db="libvir", passwd=None, verbose = 1):
global DB
if passwd is None:
try:
passwd = os.environ["MySQL_PASS"]
except:
print "No password available, set environment MySQL_PASS"
sys.exit(1)
DB = MySQLdb.connect(passwd=passwd, db=db)
if DB is None:
return -1
ret = checkTables(DB, verbose)
return ret
def updateWord(name, symbol, relevance):
global DB
if DB is None:
openMySQL()
if DB is None:
return -1
if name is None:
return -1
if symbol is None:
return -1
c = DB.cursor()
try:
ret = c.execute(
"""INSERT INTO words (name, symbol, relevance) VALUES ('%s','%s', %d)""" %
(name, symbol, relevance))
except:
try:
ret = c.execute(
"""UPDATE words SET relevance = %d where name = '%s' and symbol = '%s'""" %
(relevance, name, symbol))
except:
print "Update word (%s, %s, %s) failed command" % (name, symbol, relevance)
print "UPDATE words SET relevance = %d where name = '%s' and symbol = '%s'" % (relevance, name, symbol)
print sys.exc_type, sys.exc_value
return -1
return ret
def updateSymbol(name, module, type, desc):
global DB
updateWord(name, name, 50)
if DB is None:
op
|
enMySQL()
if DB is None:
return -1
if name is None:
return -1
if module is None:
return -1
if type is None:
return -1
try:
desc = string.replace(desc, "'", " ")
l = string.split(desc, ".")
desc = l[0]
desc = desc[0:99]
except:
desc = ""
c = DB.cursor()
try:
|
ret = c.execute(
"""INSERT INTO symbols (name, module, type, descr) VALUES ('%s','%s', '%s', '%s')""" %
(name, module, type, desc))
except:
try:
ret = c.execute(
"""UPDATE symbols SET module='%s', type='%s', descr='%s' where name='%s'""" %
(module, type, desc, name))
except:
print "Update symbol (%s, %s, %s) failed command" % (name, module, type)
print """UPDA
|
jhprinz/openpathsampling
|
openpathsampling/tests/test_toy_dynamics.py
|
Python
|
lgpl-2.1
| 12,227 | 0.005561 |
'''
@author: David W.H. Swenson
'''
from __future__ import division
from __future__ import absolute_import
from builtins import zip
from past.utils import old_div
from builtins import object
import os
from nose.tools import (assert_equal, assert_not_equal, assert_almost_equal)
from nose.plugins.skip import SkipTest
import openpathsampling as paths
import openpathsampling.engines.toy as toy
from .test_helpers import (true_func, assert_equal_array_array,
assert_items_equal)
import numpy as np
# =========================================================================
# This single test module includes all the tests for the toy_dynamics
# subpackage.
# =========================================================================
def setUp():
# set up globals
global gaussian, linear, outer, harmonic
gaussian = toy.Gaussian(6.0, [2.5, 40.0], [0.8, 0.5])
outer = toy.OuterWalls([1.12, 2.0], [0.2, -0.25])
linear = toy.LinearSlope([1.5, 0.75], 0.5)
harmonic = toy.HarmonicOscillator([1.5, 2.0], [0.5, 3.0], [0.25, 0.75])
global init_pos, init_vel, sys_mass
init_pos = np.array([0.7, 0.65])
init_vel = np.array([0.6, 0.5])
sys_mass = np.array([1.5, 1.5])
# === TESTS FOR TOY POTENTIAL ENERGY SURFACES =============================
class testHarmonicOscillator(object):
def setUp(self):
self.positions = init_pos
self.velocities = init_vel
self.mass = sys_mass
def test_V(self):
# k = m * omega^2 = [1.5, 1.5] * [0.5, 3.0]^2
# = [0.375, 13.5]
# V = 0.5*( 1.5*0.375*((0.7)-0.25)^2 + 2.0*13.5*((0.65)-0.75)^2)
# = 0.191953125
assert_almost_equal(harmonic.V(self), 0.191953125)
def test_dVdx(self):
# [1.5, 2.0] * [1.5, 1.5] * [0.5, 3.0]^2 * [(0.7)-0.25, (0.65)-0.75]
# = [1.5*1.5*0.5^2*((0.7)-0.25), 2.0*1.5*3.0^2*((0.65)-0.75)]
# = [0.253125, -2.7]
for (experiment, theory) in zip(harmonic.dVdx(self),
[0.253125, -2.7]):
assert_almost_equal(experiment, theory)
class testGaussian(object):
def setUp(self):
self.positions = init_pos
self.velocities = init_vel
self.mass = sys_mass
def test_V(self):
# 6.0*exp(-2.5*((0.7)-0.8)^2-40.0*((0.65)-0.5)^2) = 2.37918851445
assert_almost_equal(gaussian.V(self), 2.37918851445)
def test_dVdx(self):
# exp(-2.5*((0.7)-0.8)^2-40*((0.65)-0.5)^2)*(-30*(0.7)+24)
assert_almost_equal(gaussian.dVdx(self)[0], 1.18959425722)
# -480*((0.65)-0.5)*exp(-2.5*((0.7)-0.8)^2-40*((0.65)-0.5)^2)
assert_almost_equal(gaussian.dVdx(self)[1], -28.5502621734)
class testOuterWalls(object):
def setUp(self):
self.positions = init_pos
self.velocities = init_vel
self.mass = sys_mass
def test_V(self):
# 1.12*(0.7-0.2)^6+2.0*(0.65-(-0.25))^6 = 1.080382
assert_almost_equal(outer.V(self), 1.080382)
def test_dVdx(self):
# 6*1.12*(0.7-0.2)^5 = 0.21
assert_almost_equal(outer.dVdx(self)[0], 0.21)
# 6*2.0*(0.65-(-0.25))^5 = 7.08588
assert_almost_equal(outer.dVdx(self)[1], 7.08588)
class testLinearSlope(object):
def setUp(self):
self.positions = init_pos
self.velocities = init_vel
self.mass = sys_mass
def test_V(self):
assert_almost_equal(linear.V(self), 2.0375)
def test_dVdx(self):
assert_equal(linear.dVdx(self), [1.5, 0.75])
class testCombinations(object):
def setUp(self):
self.positions = init_pos
self.velocities = init_vel
self.mass = sys_mass
self.simpletest = gaussian + gaussian
self.fullertest = gaussian + outer - linear
def test_V(self):
assert_almost_equal(self.simpletest.V(self), 2*2.37918851445)
assert_almost_equal(self.fullertest.V(self),
2.37918851445 + 1.080382 - 2.0375)
def test_dVdx(self):
assert_almost_equal(self.simpletest.dVdx(self)[0], 2*1.18959425722)
assert_almost_equal(self.simpletest.dVdx(self)[1], 2*-28.5502621734)
assert_almost_equal(self.fullertest.dVdx(self)[0],
1.18959425722 + 0.21 - 1.5)
assert_almost_equal(self.fullertest.dVdx(self)[1],
-28.5502621734 + 7.08588 - 0.75)
def test_kinetic_energy(self):
assert_almost_equal(self.simpletest.kinetic_energy(self), 0.4575)
# === TESTS FOR TOY ENGINE OBJECT =========================================
class test_convert_fcn(object):
def test_convert_to_3Ndim(v):
raise SkipTest
assert_equal_array_array(toy.convert_to_3Ndim([1.0, 2.0]),
np.array([[1.0, 2.0, 0.0]]))
assert_equal_array_array(toy.convert_to_3Ndim([1.0, 2.0, 3.0]),
np.array([[1.0, 2.0, 3.0]]))
assert_equal_array_array(toy.convert_to_3Ndim([1.0, 2.0, 3.0, 4.0]),
np.array([[1.0, 2.0, 3.0], [4.0, 0.0, 0.0]]))
class testToyEngine(object):
def setUp(self):
pes = linear
integ = toy.LeapfrogVerletIntegrator(dt=0.002)
topology=toy.Topology(
n_spatial = 2,
masses = sys_mass,
pes = pes
)
options={
'integ' : integ,
'n_frames_max' : 5}
sim = toy.Engine(options=options,
topology=topology
)
template = toy.Snapshot(
coordinates=init_pos.copy(),
velocities=init_pos.copy(),
engine=sim
)
sim.positions = init_pos.copy()
sim.velocities = init_vel.copy()
sim.n_steps_per_frame = 10
self.sim = sim
def teardown(self):
if os.path.isfile('toy_tmp.nc'):
os.remove('toy_tmp.nc')
def test_sanity(self):
assert_items_equal(self.sim._mass, sys_mass)
assert_items_equal(self.sim.
|
_minv, [old_div(1
|
.0,m_i) for m_i in sys_mass])
assert_equal(self.sim.n_steps_per_frame, 10)
def test_snapshot_timestep(self):
assert_equal(self.sim.snapshot_timestep, 0.02)
def test_snapshot_get(self):
snapshot = self.sim.current_snapshot
assert_items_equal(snapshot.velocities[0],
self.sim.velocities)
assert_items_equal(snapshot.coordinates[0],
self.sim.positions)
def test_snapshot_set(self):
snap = toy.Snapshot(coordinates=np.array([[1,2,3]]),
velocities=np.array([[4,5,6]]))
self.sim.current_snapshot = snap
assert_items_equal(self.sim.positions, [1,2,3])
assert_items_equal(self.sim.velocities, [4,5,6])
def test_generate_next_frame(self):
# we test correctness by integrating forward, then backward
assert_items_equal(self.sim.positions, init_pos)
assert_items_equal(self.sim.velocities, init_vel)
snap = self.sim.generate_next_frame()
#assert_equal_array_array(snap.coordinates,
#np.array([init_pos.append(0.0)]))
self.sim.velocities = -self.sim.velocities
snap2 = self.sim.generate_next_frame()
np.testing.assert_allclose(snap2.coordinates[0], init_pos)
def test_generate(self):
self.sim.initialized = True
try:
traj = self.sim.generate(self.sim.current_snapshot, [true_func])
except paths.engines.EngineMaxLengthError as e:
traj = e.last_trajectory
assert_equal(len(traj), self.sim.n_frames_max)
else:
raise RuntimeError('Did not raise MaxLength Error')
def test_generate_n_frames(self):
self.sim.initialized = True
ens = paths.LengthEnsemble(4) # first snap plus n_frames
orig = self.sim.current_snapshot.copy()
traj1 = self.sim.generate(self.sim.current_snapshot, [ens.can_append])
self.sim.current_snapshot = orig
traj2 = [orig] + self.sim.generate_n_frames(3)
assert_equal(len(traj1), len(traj2))
|
soarlab/FPTuner
|
examples/primitives/doppler-1.py
|
Python
|
mit
| 477 | 0.020964 |
import tft_ir_api as IR
var_u = IR.RealVE("u", 0, (-100.0 - 0.0000001), (100.0 + 0.0000001))
var_v = IR.RealVE("v", 1, (20.0 - 0.000000001), (20000.0 + 0.000000001))
var_T = IR.RealVE("T", 2, (-30.0 - 0.000001), (50.0 + 0.000001))
t1 = IR.BE("+", 4, IR.FConst(331.4), IR.BE("*", 3, IR.FConst(0.6), var_T))
temp = IR.BE("+", 5, t1, var_u)
temp = IR.B
|
E("*", 8, temp, temp)
r = IR.BE("/", 9, IR.BE("*", 7, IR.UE("-", 6, t1), var_v), tem
|
p)
IR.TuneExpr(r)
|
hazelcast/hazelcast-python-client
|
hazelcast/protocol/codec/map_unlock_codec.py
|
Python
|
apache-2.0
| 1,021 | 0.002938 |
from hazelcast.serialization.bits import *
from hazelcast.protocol.builtin import FixSizedTypesCodec
from hazelcast.protocol.client_message import OutboundMessage, REQUEST_HEADER_SIZE, create_initial_buffer
from hazelcast.protocol.builtin import StringCodec
from hazelcast.protocol.builtin import DataCodec
# hex: 0x011300
_REQUEST_MESSAGE_TYPE = 70400
# hex: 0x011301
_RESPONSE_MESSAGE_TYPE = 70401
_REQUEST_THREAD_ID_OFFSET = REQUEST_HEADER_SIZE
_REQUEST_REFERENCE_ID_OFFSET = _REQUEST_THREAD_ID_OFFSET + LONG_SIZE_IN_BYTES
_REQUEST_INITIAL_FRAME_SIZE = _REQUEST_REFERENCE_ID_OFFSET + LONG_SIZE_IN_BYTES
def encode_request(name, key, thread_id, reference_id):
buf = create_initial_buffer
|
(_REQUEST_INITIAL_FRAME_SIZE, _REQUEST_MESSAGE_TYPE)
FixSizedTypesCodec.encode_long(buf, _REQUEST_THREAD_ID_OFFSET, thread_id)
FixSizedTypesCodec.encode_long(buf, _REQUEST_REFERENCE_ID_OFFSET, reference_id)
StringCodec.encode(buf, name)
DataCodec.encode(buf, key, True)
r
|
eturn OutboundMessage(buf, True)
|
r26zhao/django_blog
|
blog/migrations/0026_auto_20170629_1342.py
|
Python
|
mit
| 467 | 0.002179 |
# -*- coding: utf-8 -*-
# Generated by Django 1.11.2 on 2017-06-29 05:42
from __future__ import unicode_literals
from dja
|
ngo.db import migrations
class Migration(migrations.Migration):
dependencies = [
('blog', '0025_auto_20170626_0008'),
]
|
operations = [
migrations.AlterModelOptions(
name='category',
options={'ordering': ['-id'], 'verbose_name': '分类', 'verbose_name_plural': '分类'},
),
]
|
ychen820/microblog
|
y/google-cloud-sdk/platform/google_appengine/google/appengine/_internal/django/core/management/__init__.py
|
Python
|
bsd-3-clause
| 17,576 | 0.001707 |
import os
import sys
from optparse import OptionParser, NO_DEFAULT
import imp
import django
from google.appengine._internal.django.core.management.base import BaseCommand, CommandError, handle_default_options
from google.appengine._internal.django.utils.importlib import import_module
# For backwards compatibility: get_version() used to be in this module.
get_version = django.get_version
# A cache of loaded commands, so that call_command
# doesn't have to reload every time it's called.
_commands = None
def find_commands(management_dir):
"""
Given a path to a management directory, returns a list of all the command
names that are available.
Returns an empty list if no commands are defined.
"""
command_dir = os.path.join(management_dir, 'commands')
try:
return [f[:-3] for f in os.listdir(command_dir)
if not f.startswith('_') and f.endswith('.py')]
except OSError:
return []
def find_management_module(app_name):
"""
Determines the path to the management module for the given app_name,
without actually importing the application or the management module.
Raises ImportError if the management module cannot be found for any reason.
"""
parts = app_name.split('.')
parts.append('management')
parts.reverse()
part = parts.pop()
path = None
# When using manage.py, the project module is added to the path,
# loaded, then removed from the path. This means that
# testproject.testapp.models can be loaded in future, even if
# testproject isn't in the path. When looking for the management
# module, we need look for the case where the project name is part
# of the app_name but the project directory itself isn't on the path.
try:
f, path, descr = imp.find_module(part,path)
except ImportError,e:
if os.path.basename(os.getcwd()) != part:
raise e
while parts:
part = parts.pop()
f, path, descr = imp.find_module(part, path and [path] or None)
return path
def load_command_class(app_name, name):
"""
Given a command name and an application name, returns the Command
class instance. All errors raised by the import process
(ImportError, AttributeError) are allowed to propagate.
"""
module = import_module('%s.management.commands.%s' % (app_name, name))
return module.Command()
def get_commands():
"""
Returns a dictionary mapping command names to their callback applications.
This works by looking for a management.commands package in django.core, and
in each installed application -- if a commands package exists, all commands
in that package are registered.
Core commands are always included. If a settings module has been
specified, user-defined commands will also be included, the
startproject command will be disabled, and the startapp command
will be modified to use the directory in which the settings module appears.
The dictionary is in the format {command_name: app_name}. Key-value
pairs from this dictionary can then be used in calls to
load_command_class(app_name, command_name)
If a specific version of a command must be loaded (e.g., with the
startapp command), the instantiated
|
module can be placed in the
dictionary in place of the application name.
The dictionary is cached on the first call and reused on subsequent
calls.
"""
|
global _commands
if _commands is None:
_commands = dict([(name, 'django.core') for name in find_commands(__path__[0])])
# Find the installed apps
try:
from google.appengine._internal.django.conf import settings
apps = settings.INSTALLED_APPS
except (AttributeError, EnvironmentError, ImportError):
apps = []
# Find the project directory
try:
from google.appengine._internal.django.conf import settings
module = import_module(settings.SETTINGS_MODULE)
project_directory = setup_environ(module, settings.SETTINGS_MODULE)
except (AttributeError, EnvironmentError, ImportError, KeyError):
project_directory = None
# Find and load the management module for each installed app.
for app_name in apps:
try:
path = find_management_module(app_name)
_commands.update(dict([(name, app_name)
for name in find_commands(path)]))
except ImportError:
pass # No management module - ignore this app
if project_directory:
# Remove the "startproject" command from self.commands, because
# that's a django-admin.py command, not a manage.py command.
del _commands['startproject']
# Override the startapp command so that it always uses the
# project_directory, not the current working directory
# (which is default).
from google.appengine._internal.django.core.management.commands.startapp import ProjectCommand
_commands['startapp'] = ProjectCommand(project_directory)
return _commands
def call_command(name, *args, **options):
"""
Calls the given command, with the given options and args/kwargs.
This is the primary API you should use for calling specific commands.
Some examples:
call_command('syncdb')
call_command('shell', plain=True)
call_command('sqlall', 'myapp')
"""
# Load the command object.
try:
app_name = get_commands()[name]
if isinstance(app_name, BaseCommand):
# If the command is already loaded, use it directly.
klass = app_name
else:
klass = load_command_class(app_name, name)
except KeyError:
raise CommandError("Unknown command: %r" % name)
# Grab out a list of defaults from the options. optparse does this for us
# when the script runs from the command line, but since call_command can
# be called programatically, we need to simulate the loading and handling
# of defaults (see #10080 for details).
defaults = dict([(o.dest, o.default)
for o in klass.option_list
if o.default is not NO_DEFAULT])
defaults.update(options)
return klass.execute(*args, **defaults)
class LaxOptionParser(OptionParser):
"""
An option parser that doesn't raise any errors on unknown options.
This is needed because the --settings and --pythonpath options affect
the commands (and thus the options) that are available to the user.
"""
def error(self, msg):
pass
def print_help(self):
"""Output nothing.
The lax options are included in the normal option parser, so under
normal usage, we don't need to print the lax options.
"""
pass
def print_lax_help(self):
"""Output the basic options available to every command.
This just redirects to the default print_help() behaviour.
"""
OptionParser.print_help(self)
def _process_args(self, largs, rargs, values):
"""
Overrides OptionParser._process_args to exclusively handle default
options and ignore args and other options.
This overrides the behavior of the super class, which stop parsing
at the first unrecognized option.
"""
while rargs:
arg = rargs[0]
try:
if arg[0:2] == "--" and len(arg) > 2:
# process a single long option (possibly with value(s))
# the superclass code pops the arg off rargs
self._process_long_opt(rargs, values)
elif arg[:1] == "-" and len(arg) > 1:
# process a cluster of short options (possibly with
# value(s) for the last one only)
# the superclass code pops the arg off rargs
self._process_short_opts(rargs, values)
else:
# it's either a non-default option or an arg
|
xstrengthofonex/code-live-tutorials
|
python_web_development/templating/router.py
|
Python
|
mit
| 1,680 | 0.002976 |
from webob import Request, Response
import re
def not_found(request, response):
response.status = 404
response.write("404 Not Found")
class Route(object):
default_methods = ['GET']
def __init__(self, path, handler, methods=None):
self.path = path
self.handler = handler
if methods is None:
self.methods = self.default_methods
else:
self.methods = methods
self.urlvars = {}
def match(self, request):
regex = re.compile(self.path)
match = regex.match(request.path)
if match and request.method in self.methods:
self.urlvars.update(match.groupdict())
request.urlvars = self.urlvars
return True
return False
class Router(object):
def __init__(self):
self.routes = []
def handle(self, url, handler, *args, **kwargs):
if not url.startswith("^"):
url = "^" + url
if not url.endswith("$"):
url += "$"
route = Route(path=url, handler=handler, *args, **kwargs)
self.routes.append(route)
def match(self, request):
for route in self.
|
routes:
if route.match(request):
return route.handler
return None
def dispatch(self, request):
handler = self.match(request)
return handler
def __call__(self, environ, start_response):
request = Request(environ)
response = Response()
handler = self.dispatch(request)
if handler:
handler()(request, response)
else:
not_found(request, response)
|
return response(environ, start_response)
|
stephenbalaban/keras
|
keras/layers/recurrent.py
|
Python
|
mit
| 29,818 | 0.001375 |
# -*- coding: utf-8 -*-
from __future__ import absolute_import
import theano
import theano.tensor as T
import numpy as np
from .. import activations, initializations
from ..utils.theano_utils import shared_scalar, shared_zeros, alloc_zeros_matrix
from ..layers.core import Layer, MaskedLayer
from six.moves import range
class Recurrent(MaskedLayer):
def get_output_mask(self, train=None):
if self.return_sequences:
return super(Recurrent, self).get_output_mask(train)
else:
return None
def get_padded_shuffled_mask(self, train, X, pad=0):
mask = self.get_input_mask(train)
if mask is None:
mask = T.ones_like(X.sum(axis=-1)) # is there a better way to do this without a sum?
# mask is (nb_samples, time)
mask = T.shape_padright(mask) # (nb_samples, time, 1)
mask = T.addbroadcast(mask, -1) # the new dimension (the '1') is made broadcastable
# see http://deeplearning.net/software/theano/library/tensor/basic.html#broadcasting-in-theano-vs-numpy
mask = mask.dimshuffle(1, 0, 2) # (time, nb_samples, 1)
if pad > 0:
# left-pad in time with 0
padding = alloc_zeros_matrix(pad, mask.shape[1], 1)
mask = T.concatenate([padding, mask], axis=0)
return mask.astype('int8')
@property
def output_shape(self):
input_shape = self.input_shape
if self.return_sequences:
return (input_shape[0], input_shape[1], self.output_dim)
else:
return (input_shape[0], self.output_dim)
class SimpleRNN(Recurrent):
'''
Fully connected RNN where output is to fed back to input.
Not a particularly useful model,
included for demonstration purposes
(demonstrates how to use theano.scan to build a basic RNN).
'''
def __init__(self, input_dim, output_dim,
init='glorot_uniform', inner_init='orthogonal', activation='sigmoid', weights=None,
|
truncate_gradient=-1, return_sequences=False):
super(SimpleRNN, self).__init__()
self.init = initializations.get(init)
self.inner_init = initializations.get(inner_init)
self.input_dim = input_dim
self.output_dim = output_dim
self.truncate_gradient = truncate_gradient
|
self.activation = activations.get(activation)
self.return_sequences = return_sequences
self.input = T.tensor3()
self.W = self.init((self.input_dim, self.output_dim))
self.U = self.inner_init((self.output_dim, self.output_dim))
self.b = shared_zeros((self.output_dim))
self.params = [self.W, self.U, self.b]
if weights is not None:
self.set_weights(weights)
def _step(self, x_t, mask_tm1, h_tm1, u):
'''
Variable names follow the conventions from:
http://deeplearning.net/software/theano/library/scan.html
'''
return self.activation(x_t + mask_tm1 * T.dot(h_tm1, u))
def get_output(self, train=False):
X = self.get_input(train) # shape: (nb_samples, time (padded with zeros), input_dim)
# new shape: (time, nb_samples, input_dim) -> because theano.scan iterates over main dimension
padded_mask = self.get_padded_shuffled_mask(train, X, pad=1)
X = X.dimshuffle((1, 0, 2))
x = T.dot(X, self.W) + self.b
# scan = theano symbolic loop.
# See: http://deeplearning.net/software/theano/library/scan.html
# Iterate over the first dimension of the x array (=time).
outputs, updates = theano.scan(
self._step, # this will be called with arguments (sequences[i], outputs[i-1], non_sequences[i])
sequences=[x, dict(input=padded_mask, taps=[-1])], # tensors to iterate over, inputs to _step
# initialization of the output. Input to _step with default tap=-1.
outputs_info=T.unbroadcast(alloc_zeros_matrix(X.shape[1], self.output_dim), 1),
non_sequences=self.U, # static inputs to _step
truncate_gradient=self.truncate_gradient)
if self.return_sequences:
return outputs.dimshuffle((1, 0, 2))
return outputs[-1]
def get_config(self):
return {"name": self.__class__.__name__,
"input_dim": self.input_dim,
"output_dim": self.output_dim,
"init": self.init.__name__,
"inner_init": self.inner_init.__name__,
"activation": self.activation.__name__,
"truncate_gradient": self.truncate_gradient,
"return_sequences": self.return_sequences}
class SimpleDeepRNN(Recurrent):
'''
Fully connected RNN where the output of multiple timesteps
(up to "depth" steps in the past) is fed back to the input:
output = activation( W.x_t + b + inner_activation(U_1.h_tm1) + inner_activation(U_2.h_tm2) + ... )
This demonstrates how to build RNNs with arbitrary lookback.
Also (probably) not a super useful model.
'''
def __init__(self, input_dim, output_dim, depth=3,
init='glorot_uniform', inner_init='orthogonal',
activation='sigmoid', inner_activation='hard_sigmoid',
weights=None, truncate_gradient=-1, return_sequences=False):
super(SimpleDeepRNN, self).__init__()
self.init = initializations.get(init)
self.inner_init = initializations.get(inner_init)
self.input_dim = input_dim
self.output_dim = output_dim
self.truncate_gradient = truncate_gradient
self.activation = activations.get(activation)
self.inner_activation = activations.get(inner_activation)
self.depth = depth
self.return_sequences = return_sequences
self.input = T.tensor3()
self.W = self.init((self.input_dim, self.output_dim))
self.Us = [self.inner_init((self.output_dim, self.output_dim)) for _ in range(self.depth)]
self.b = shared_zeros((self.output_dim))
self.params = [self.W] + self.Us + [self.b]
if weights is not None:
self.set_weights(weights)
def _step(self, x_t, *args):
o = x_t
for i in range(self.depth):
mask_tmi = args[i]
h_tmi = args[i + self.depth]
U_tmi = args[i + 2*self.depth]
o += mask_tmi*self.inner_activation(T.dot(h_tmi, U_tmi))
return self.activation(o)
def get_output(self, train=False):
X = self.get_input(train)
padded_mask = self.get_padded_shuffled_mask(train, X, pad=self.depth)
X = X.dimshuffle((1, 0, 2))
x = T.dot(X, self.W) + self.b
if self.depth == 1:
initial = T.unbroadcast(alloc_zeros_matrix(X.shape[1], self.output_dim), 1)
else:
initial = T.unbroadcast(T.unbroadcast(alloc_zeros_matrix(self.depth, X.shape[1], self.output_dim), 0), 2)
outputs, updates = theano.scan(
self._step,
sequences=[x, dict(
input=padded_mask,
taps=[(-i) for i in range(self.depth)]
)],
outputs_info=[dict(
initial=initial,
taps=[(-i-1) for i in range(self.depth)]
)],
non_sequences=self.Us,
truncate_gradient=self.truncate_gradient
)
if self.return_sequences:
return outputs.dimshuffle((1, 0, 2))
return outputs[-1]
def get_config(self):
return {"name": self.__class__.__name__,
"input_dim": self.input_dim,
"output_dim": self.output_dim,
"depth": self.depth,
"init": self.init.__name__,
"inner_init": self.inner_init.__name__,
"activation": self.activation.__name__,
"inner_activation": self.inner_activation.__name__,
"truncate_gradient": self.truncate_gradient,
"return_sequences": self.return_sequences}
class GRU(Recurrent):
'''
Gated Recurrent Unit - Cho et al. 2014
|
nivekkagicom/uncrustify
|
tests/test_uncrustify/utilities.py
|
Python
|
gpl-2.0
| 7,537 | 0.000133 |
# Logic for listing and running tests.
#
# * @author Ben Gardner October 2009
# * @author Guy Maurel October 2015
# * @author Matthew Woehlke June 2018
#
import argparse
import os
import subprocess
import sys
from .ansicolor import printc
from .config import config, all_tests, FAIL_ATTRS, PASS_ATTRS, SKIP_ATTRS
from .failure import (Failure, MismatchFailure, UnexpectedlyPassingFailure,
UnstableFailure)
from .test import FormatTest
# -----------------------------------------------------------------------------
def _add_common_arguments(parser):
parser.add_argument('-c', '--show-commands', action='store_true',
help='show commands')
parser.add_argument('-v', '--verbose', action='store_true',
help='show detailed test information')
parser.add_argument('-d', '--diff', action='store_true',
help='show diff on failure')
parser.add_argument('-x', '--xdiff', action='store_true',
help='show diff on expected failure')
parser.add_argument('-g', '--debug', action='store_true',
help='generate debug files (.log, .unc)')
parser.add_argument('-e', '--executable', type=str, required=True,
metavar='PATH',
help='uncrustify executable to test')
parser.add_argument('--git', type=str, default=config.git_exe,
metavar='PATH',
help='git executable to use to generate diffs')
parser.add_argument('--result-dir', type=str, default=os.getcwd(),
metavar='DIR',
help='location to which results will be written')
# -----------------------------------------------------------------------------
def add_test_arguments(parser):
_add_common_arguments(parser)
parser.add_argument("name", type=str, metavar='NAME')
parser.add_argument("--lang", type=str, required=True)
parser.add_argument("--input", type=str, required=True)
parser.add_argument("--config", type=str, required=True)
parser.add_argument("--expected", type=str, required=True)
parser.add_argument("--rerun-config", type=str, metavar='INPUT')
parser.add_argument("--rerun-expected", type=str, metavar='CONFIG')
parser.add_argument("--xfail", action='store_true')
# -----------------------------------------------------------------------------
def add_source_tests_arguments(parser):
_add_common_arguments(parser)
parser.add_argument('-p', '--show-all', action='store_true',
help='show passed/skipped tests')
# ---------------------------------------------
|
--------------------------------
def add_format_tests_arguments(parser):
_add_common_arguments(parser)
parser.add_argu
|
ment('-p', '--show-all', action='store_true',
help='show passed/skipped tests')
parser.add_argument('-r', '--select', metavar='CASE(S)', type=str,
help='select tests to be executed')
parser.add_argument('tests', metavar='TEST', type=str, nargs='*',
default=all_tests,
help='test(s) to run (default all)')
# Arguments for generating the CTest script; users should not use these
# directly
parser.add_argument("--write-ctest", type=str, help=argparse.SUPPRESS)
parser.add_argument("--cmake-config", type=str, help=argparse.SUPPRESS)
parser.add_argument("--python", type=str, help=argparse.SUPPRESS)
# -----------------------------------------------------------------------------
def parse_args(parser):
args = parser.parse_args()
if args.git is not None:
config.git_exe = args.git
config.uncrustify_exe = args.executable
if not os.path.exists(config.uncrustify_exe):
msg = 'Specified uncrustify executable {!r} does not exist'.format(
config.uncrustify_exe)
printc("FAILED: ", msg, **FAIL_ATTRS)
sys.exit(-1)
# Do a sanity check on the executable
try:
with open(os.devnull, 'w') as bitbucket:
subprocess.check_call([config.uncrustify_exe, '--help'],
stdout=bitbucket)
except Exception as exc:
msg = ('Specified uncrustify executable {!r} ' +
'does not appear to be usable: {!s}').format(
config.uncrustify_exe, exc)
printc("FAILED: ", msg, **FAIL_ATTRS)
sys.exit(-1)
return args
# -----------------------------------------------------------------------------
def run_tests(tests, args, selector=None):
pass_count = 0
fail_count = 0
mismatch_count = 0
unstable_count = 0
unexpectedly_passing_count = 0
for test in tests:
if selector is not None and not selector.test(test.test_name):
if args.show_all:
printc("SKIPPED: ", test.test_name, **SKIP_ATTRS)
continue
try:
test.run(args)
if args.show_all:
outcome = 'XFAILED' if test.test_xfail else 'PASSED'
printc('{}: '.format(outcome), test.test_name, **PASS_ATTRS)
pass_count += 1
except UnstableFailure:
unstable_count += 1
except MismatchFailure:
mismatch_count += 1
except UnexpectedlyPassingFailure:
unexpectedly_passing_count += 1
except Failure:
fail_count += 1
return {
'passing': pass_count,
'failing': fail_count,
'mismatch': mismatch_count,
'unstable': unstable_count,
'xpass': unexpectedly_passing_count
}
# -----------------------------------------------------------------------------
def report(counts):
total = sum(counts.values())
print('{passing} / {total} tests passed'.format(total=total, **counts))
if counts['failing'] > 0:
printc('{failing} tests failed to execute'.format(**counts),
**FAIL_ATTRS)
if counts['mismatch'] > 0:
printc(
'{mismatch} tests did not match the expected output'.format(
**counts),
**FAIL_ATTRS)
if counts['unstable'] > 0:
printc('{unstable} tests were unstable'.format(**counts),
**FAIL_ATTRS)
if counts['xpass'] > 0:
printc('{xpass} tests passed but were expected to fail'
.format(**counts), **FAIL_ATTRS)
# -----------------------------------------------------------------------------
def read_format_tests(filename, group):
tests = []
print("Processing " + filename)
with open(filename, 'rt') as f:
for line_number, line in enumerate(f, 1):
line = line.strip()
if not len(line):
continue
if line.startswith('#'):
continue
test = FormatTest()
test.build_from_declaration(line, group, line_number)
tests.append(test)
return tests
# -----------------------------------------------------------------------------
def fixup_ctest_path(path, config):
if config is None:
return path
dirname, basename = os.path.split(path)
if os.path.basename(dirname).lower() == config.lower():
dirname, junk = os.path.split(dirname)
return os.path.join(dirname, '${CTEST_CONFIGURATION_TYPE}', basename)
return path
|
AllTheWayDown/turgles
|
turgles/tests/test_buffers.py
|
Python
|
mit
| 11,668 | 0.000086 |
import itertools
from unittest import TestCase
from turgles.buffer import ChunkBuffer, ShapeBuffer, BufferManager
from turgles.memory import TURTLE_MODEL_DATA_SIZE, TURTLE_COLOR_DATA_SIZE
MODEL_ZEROS = [0] * TURTLE_MODEL_DATA_SIZE
MODEL_ONES = [1] * TURTLE_MODEL_DATA_SIZE
MODEL_TWOS = [2] * TURTLE_MODEL_DATA_SIZE
MODEL_THREES = [3] * TURTLE_MODEL_DATA_SIZE
COLOR_ZEROS = [0] * TURTLE_COLOR_DATA_SIZE
COLOR_ONES = [1] * TURTLE_COLOR_DATA_SIZE
COLOR_TWOS = [2] * TURTLE_COLOR_DATA_SIZE
COLOR_THREES = [3] * TURTLE_COLOR_DATA_SIZE
class ChunkBufferTestCase(TestCase):
def assert_turtle_data(self, buffer, index, data):
offset = index * TURTLE_MODEL_DATA_SIZE
slice = buffer.data[offset:offset + TURTLE_MODEL_DATA_SIZE]
self.assertEqual(list(slice), data)
def test_sized_correctly(self):
buffer = ChunkBuffer(4, TURTLE_MODEL_DATA_SIZE)
self.assertEqual(len(buffer.data), 4 * TURTLE_MODEL_DATA_SIZE)
self.assertEqual(buffer.count, 0)
self.assertEqual(buffer.size, 4)
def test_new(self):
buffer = ChunkBuffer(4, TURTLE_MODEL_DATA_SIZE)
data = buffer.new()
self.assertEqual(len(data), TURTLE_MODEL_DATA_SIZE)
self.assertEqual(list(data), MODEL_ZEROS)
self.assertEqual(buffer.count, 1)
def test_new_with_init(self):
buffer = ChunkBuffer(4, TURTLE_MODEL_DATA_SIZE)
init = list(reversed(range(TURTLE_MODEL_DATA_SIZE)))
data = buffer.new(init)
self.assertEqual(len(data), TURTLE_MODEL_DATA_SIZE)
self.assertEqual(list(data), init)
def test_mutlple_new(self):
buffer = ChunkBuffer(4, TURTLE_MODEL_DATA_SIZE)
buffer.new()
self.assertEqual(buffer.count, 1)
buffer.new()
self.assertEqual(buffer.count, 2)
def test_new_triggers_resize(self):
buffer = ChunkBuffer(2, TURTLE_MODEL_DATA_SIZE)
buffer.new()
buffer.new()
self.assertEqual(buffer.size, 2)
self.assertEqual(buffer.count, 2)
buffer.new()
self.assertEqual(buffer.size, 4)
self.assertEqual(buffer.count, 3)
def test_resize(self):
buffer = ChunkBuffer(2, TURTLE_MODEL_DATA_SIZE)
buffer.new(MODEL_ONES)
buffer.new(MODEL_TWOS)
buffer.resize(4)
self.assertEqual(buffer.size, 4)
self.assertEqual(len(buffer.data), 4 * TURTLE_MODEL_DATA_SIZE)
self.assert_turtle_data(buffer, 0, MODEL_ONES)
self.assert_turtle_data(buffer, 1, MODEL_TWOS)
self.assert_turtle_data(buffer, 2, MODEL_ZEROS)
self.assert_turtle_data(buffer, 3, MODEL_ZEROS)
def test_remove_end(self):
buffer = ChunkBuffer(4, TURTLE_MODEL_DATA_SIZE)
buffer.new(MODEL_ONES)
buffer.new(MODEL_TWOS)
buffer.new(MODEL_THREES)
moved = buffer.remove(2)
self.assertEqual(buffer.count, 2)
self.assertIsNone(moved)
self.assert_turtle_data(buffer, 0, MODEL_ONES)
self.assert_turtle_data(buffer, 1, MODEL_TWOS)
self.assert_turtle_data(buffer, 2, MODEL_ZEROS)
def test_remove_start(self):
|
buffer = ChunkBuffer(4, TURTLE_MODEL_DATA_SIZE)
buffer.new(MODEL_ONES)
buffer.new(MODEL_TWOS)
buffer.new(MODEL_THREES)
moved = buffer.remove(0)
self.assertEqual(buffer.count, 2)
self.assertEqual(moved, 2)
self.assert_turtle_data(buffer, 0, MODEL_THREES)
self.assert_turtle_data(buffer, 1, MODEL_TWOS)
self.assert_turtle_data(buffer, 2, MODEL_ZEROS)
def
|
test_remove_middle(self):
buffer = ChunkBuffer(4, TURTLE_MODEL_DATA_SIZE)
buffer.new(MODEL_ONES)
buffer.new(MODEL_TWOS)
buffer.new(MODEL_THREES)
moved = buffer.remove(1)
self.assertEqual(buffer.count, 2)
self.assertEqual(moved, 2)
self.assert_turtle_data(buffer, 0, MODEL_ONES)
self.assert_turtle_data(buffer, 1, MODEL_THREES)
self.assert_turtle_data(buffer, 2, MODEL_ZEROS)
def test_remove_then_add(self):
buffer = ChunkBuffer(4, TURTLE_MODEL_DATA_SIZE)
buffer.new(MODEL_ONES)
buffer.new(MODEL_TWOS)
buffer.new(MODEL_THREES)
buffer.remove(2)
self.assertEqual(buffer.count, 2)
# check data was zeroed
self.assert_turtle_data(buffer, 2, MODEL_ZEROS)
buffer.new([4] * TURTLE_MODEL_DATA_SIZE)
self.assertEqual(buffer.count, 3)
# check reuses previously removed turtle's space
self.assert_turtle_data(buffer, 2, [4] * TURTLE_MODEL_DATA_SIZE)
def make_slices(self, size, array_size=20):
buffer = ChunkBuffer(array_size, TURTLE_MODEL_DATA_SIZE)
for i in range(array_size):
buffer.new([i+1] * TURTLE_MODEL_DATA_SIZE)
return buffer.slice(size)
def test_slice_size_multiple(self):
slices = self.make_slices(10, 20)
size, slice = next(slices)
self.assertEqual(size, 10)
self.assertEqual(
list(slice[0:TURTLE_MODEL_DATA_SIZE]),
[1] * TURTLE_MODEL_DATA_SIZE
)
size, slice = next(slices)
self.assertEqual(size, 10)
self.assertEqual(
list(slice[0:TURTLE_MODEL_DATA_SIZE]),
[11] * TURTLE_MODEL_DATA_SIZE
)
with self.assertRaises(StopIteration):
next(slices)
def test_slice_size_remainder(self):
slices = self.make_slices(15, 20)
size, slice = next(slices)
self.assertEqual(size, 15)
self.assertEqual(
list(slice[0:TURTLE_MODEL_DATA_SIZE]),
[1] * TURTLE_MODEL_DATA_SIZE
)
size, slice = next(slices)
self.assertEqual(size, 5)
self.assertEqual(
list(slice[0:TURTLE_MODEL_DATA_SIZE]),
[16] * TURTLE_MODEL_DATA_SIZE
)
with self.assertRaises(StopIteration):
next(slices)
def test_slice_size_only_one(self):
slices = self.make_slices(20, 10)
size, slice = next(slices)
self.assertEqual(size, 10)
self.assertEqual(
list(slice[0:TURTLE_MODEL_DATA_SIZE]),
[1] * TURTLE_MODEL_DATA_SIZE
)
with self.assertRaises(StopIteration):
next(slices)
class ShapeBufferTestCase(TestCase):
def assert_id_map(self, buffer, id, index):
self.assertIn(id, buffer.id_to_index)
self.assertIn(index, buffer.index_to_id)
self.assertEqual(buffer.id_to_index[id], index)
self.assertEqual(buffer.index_to_id[index], id)
def assert_turtle_data(self, buffer, id, index, model, color):
if id:
self.assert_id_map(buffer, id, index)
model_data = buffer.model.get(index)
color_data = buffer.color.get(index)
self.assertEqual(list(model_data), model)
self.assertEqual(list(color_data), color)
def test_new(self):
buffer = ShapeBuffer('shape', 4)
model, color = buffer.new(0)
self.assert_turtle_data(buffer, 0, 0, MODEL_ZEROS, COLOR_ZEROS)
self.assertEqual(buffer.count, 1)
def test_new_bad_id(self):
buffer = ShapeBuffer('shape', 4)
buffer.new(0)
with self.assertRaises(AssertionError):
buffer.new(0)
def test_new_with_init(self):
buffer = ShapeBuffer('shape', 4)
model, color = buffer.new(0, MODEL_ONES, COLOR_TWOS)
self.assert_turtle_data(buffer, 0, 0, MODEL_ONES, COLOR_TWOS)
def test_mutlple_new(self):
buffer = ShapeBuffer('shape', 4)
buffer.new(0)
self.assert_id_map(buffer, 0, 0)
self.assertEqual(buffer.count, 1)
buffer.new(1)
self.assert_id_map(buffer, 1, 1)
self.assertEqual(buffer.count, 2)
def test_remove_id_end(self):
buffer = ShapeBuffer('shape', 4)
buffer.new(0, MODEL_ONES, COLOR_ONES)
buffer.new(1, MODEL_TWOS, COLOR_TWOS)
buffer.new(2, MODEL_THREES, COLOR_THREES)
self.assert_turtle_data(buffer, 2, 2, MODEL_THREES, COLOR_THREES)
buffer.remove(2)
self.assertEqual(buffer.count, 2)
self.assert_turtle_data(buffer, 0, 0, MOD
|
kailIII/emaresa
|
trunk.pe/account_financial_report/wizard/__init__.py
|
Python
|
agpl-3.0
| 1,490 | 0.000671 |
# -*- encoding: utf-8 -*-
###########################################################################
# Module Writen to OpenERP, Open Source Management Solution
# Copyright (C) OpenERP Venezuela (<http://openerp.com.ve>).
# All Rights Reserved
###############Credits######################################################
# Coded by: Humberto Arocha [email protected]
# Angelica Barrios [email protected]
# Jordi Esteve <[email protected]>
# Planified by: Humberto Arocha
# Finance by: LUBCAN COL S.A.S http://www.lubcancol.com
# Audited by: Humberto Arocha [email protected]
#############################################################################
# This progra
|
m is free software: you can redistribute it and/or modify
#
|
it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
##############################################################################
import wizard
|
pplu/botocore
|
tests/__init__.py
|
Python
|
apache-2.0
| 17,369 | 0.000058 |
# Copyright 2012-2014 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
import os
import sys
import mock
import time
import random
import shutil
import contextlib
import tempfile
import binascii
import platform
import select
import datetime
from io import BytesIO
from subprocess import Popen, PIPE
from dateutil.tz import tzlocal
import unittest
from nose.tools import assert_equal
import botocore.loaders
import botocore.session
from botocore.awsrequest import AWSResponse
from botocore.compat import (
parse_qs, six, urlparse, HAS_CRT
)
from botocore import utils
from botocore import credentials
from botocore.stub import Stubber
_LOADER = botocore.loaders.Loader()
def skip_unless_has_memory_collection(cls):
"""Class decorator to skip tests that require memory collection.
Any test that uses memory collection (such as the resource leak tests)
can decorate their class with skip_unless_has_memory_collection to
indicate that if the platform does not support memory collection
the tests should be skipped.
"""
if platform.system() not in ['Darwin', 'Linux']:
return unittest.skip('Memory tests only supported on mac/linux.')(cls)
return cls
def skip_if_windows(reason):
"""Decorator to skip tests that should not be run on windows.
Example usage:
@skip_if_windows("Not valid")
def test_some_non_windows_stuff(self):
self.assertEqual(...)
"""
def decorator(func):
return unittest.skipIf(
platform.system() not in ['Darwin', 'Linux'], reason)(func)
return d
|
ecorator
de
|
f requires_crt(reason=None):
if reason is None:
reason = "Test requires awscrt to be installed"
def decorator(func):
return unittest.skipIf(not HAS_CRT, reason)(func)
return decorator
def random_chars(num_chars):
"""Returns random hex characters.
Useful for creating resources with random names.
"""
return binascii.hexlify(os.urandom(int(num_chars / 2))).decode('ascii')
def create_session(**kwargs):
# Create a Session object. By default,
# the _LOADER object is used as the loader
# so that we reused the same models across tests.
session = botocore.session.Session(**kwargs)
session.register_component('data_loader', _LOADER)
session.set_config_variable('credentials_file', 'noexist/foo/botocore')
return session
@contextlib.contextmanager
def temporary_file(mode):
"""This is a cross platform temporary file creation.
tempfile.NamedTemporary file on windows creates a secure temp file
that can't be read by other processes and can't be opened a second time.
For tests, we generally *want* them to be read multiple times.
The test fixture writes the temp file contents, the test reads the
temp file.
"""
temporary_directory = tempfile.mkdtemp()
basename = 'tmpfile-%s-%s' % (int(time.time()), random.randint(1, 1000))
full_filename = os.path.join(temporary_directory, basename)
open(full_filename, 'w').close()
try:
with open(full_filename, mode) as f:
yield f
finally:
shutil.rmtree(temporary_directory)
class BaseEnvVar(unittest.TestCase):
def setUp(self):
# Automatically patches out os.environ for you
# and gives you a self.environ attribute that simulates
# the environment. Also will automatically restore state
# for you in tearDown()
self.environ = {}
self.environ_patch = mock.patch('os.environ', self.environ)
self.environ_patch.start()
def tearDown(self):
self.environ_patch.stop()
class BaseSessionTest(BaseEnvVar):
"""Base class used to provide credentials.
This class can be used as a base class that want to use a real
session class but want to be completely isolated from the
external environment (including environment variables).
This class will also set credential vars so you can make fake
requests to services.
"""
def setUp(self, **environ):
super(BaseSessionTest, self).setUp()
self.environ['AWS_ACCESS_KEY_ID'] = 'access_key'
self.environ['AWS_SECRET_ACCESS_KEY'] = 'secret_key'
self.environ['AWS_CONFIG_FILE'] = 'no-exist-foo'
self.environ.update(environ)
self.session = create_session()
self.session.config_filename = 'no-exist-foo'
@skip_unless_has_memory_collection
class BaseClientDriverTest(unittest.TestCase):
INJECT_DUMMY_CREDS = False
def setUp(self):
self.driver = ClientDriver()
env = None
if self.INJECT_DUMMY_CREDS:
env = {'AWS_ACCESS_KEY_ID': 'foo',
'AWS_SECRET_ACCESS_KEY': 'bar'}
self.driver.start(env=env)
def cmd(self, *args):
self.driver.cmd(*args)
def send_cmd(self, *args):
self.driver.send_cmd(*args)
def record_memory(self):
self.driver.record_memory()
@property
def memory_samples(self):
return self.driver.memory_samples
def tearDown(self):
self.driver.stop()
class ClientDriver(object):
CLIENT_SERVER = os.path.join(
os.path.dirname(os.path.abspath(__file__)),
'cmd-runner'
)
def __init__(self):
self._popen = None
self.memory_samples = []
def _get_memory_with_ps(self, pid):
# It would be better to eventually switch to psutil,
# which should allow us to test on windows, but for now
# we'll just use ps and run on POSIX platforms.
command_list = ['ps', '-p', str(pid), '-o', 'rss']
p = Popen(command_list, stdout=PIPE)
stdout = p.communicate()[0]
if not p.returncode == 0:
raise RuntimeError("Could not retrieve memory")
else:
# Get the RSS from output that looks like this:
# RSS
# 4496
return int(stdout.splitlines()[1].split()[0]) * 1024
def record_memory(self):
mem = self._get_memory_with_ps(self._popen.pid)
self.memory_samples.append(mem)
def start(self, env=None):
"""Start up the command runner process."""
self._popen = Popen([sys.executable, self.CLIENT_SERVER],
stdout=PIPE, stdin=PIPE, env=env)
def stop(self):
"""Shutdown the command runner process."""
self.cmd('exit')
self._popen.wait()
def send_cmd(self, *cmd):
"""Send a command and return immediately.
This is a lower level method than cmd().
This method will instruct the cmd-runner process
to execute a command, but this method will
immediately return. You will need to use
``is_cmd_finished()`` to check that the command
is finished.
This method is useful if you want to record attributes
about the process while an operation is occurring. For
example, if you want to instruct the cmd-runner process
to upload a 1GB file to S3 and you'd like to record
the memory during the upload process, you can use
send_cmd() instead of cmd().
"""
cmd_str = ' '.join(cmd) + '\n'
cmd_bytes = cmd_str.encode('utf-8')
self._popen.stdin.write(cmd_bytes)
self._popen.stdin.flush()
def is_cmd_finished(self):
rlist = [self._popen.stdout.fileno()]
result = select.select(rlist, [], [], 0.01)
if result[0]:
return True
return False
def cmd(self, *cmd):
"""Send a command and block until it finishes.
This method will send a command to the cmd-runner process
to run. It will block until the cmd-r
|
simodalla/pygmount
|
pygmount/utils/utils.py
|
Python
|
bsd-3-clause
| 2,197 | 0 |
# -*- coding: UTF-8 -*-
from __future__ import unicode_literals, absolute_import
import os
import platform
try:
from ConfigParser import ConfigParser
except ImportError:
from configparser import ConfigParser
def get_sudo_username():
"""
Check 'SUDO_USER' var if in environment and return a tuple with True and
value of 'SUDO_USER' if the var in environment or a tuple with False and
'USER' value.
Return tuple.
"""
if 'SUDO_USER' in os.environ:
return True, os.environ['SUDO_USER']
return False, os.environ['USER']
def get_home_dir():
"""
Return path of user's home directory.
Return string.
"""
system = platform.system().lower()
if system == 'linux':
return '/home/'
elif system == 'darwin':
return '/Users/'
elif system == 'windows':
return 'C:/Documents...'
else:
raise Exception("Impossibile individuare il tipo
|
di sistema")
def read_config(filename=None):
"""
Read a config filename into .ini format and return dict of shares.
Ke
|
yword arguments:
filename -- the path of config filename (default None)
Return dict.
"""
if not os.path.exists(filename):
raise IOError('Impossibile trovare il filename %s' % filename)
shares = []
config = ConfigParser()
config.read(filename)
for share_items in [config.items(share_title) for share_title in
config.sections()]:
dict_share = {}
for key, value in share_items:
if key == 'hostname' and '@' in value:
hostname, credentials = (item[::-1] for item
in value[::-1].split('@', 1))
dict_share.update({key: hostname})
credentials = tuple(cred.lstrip('"').rstrip('"')
for cred in credentials.split(':', 1))
dict_share.update({'username': credentials[0]})
if len(credentials) > 1:
dict_share.update({'password': credentials[1]})
continue
dict_share.update({key: value})
shares.append(dict_share)
return shares
|
hydralabs/plasma
|
plasma/test/util.py
|
Python
|
mit
| 354 | 0 |
# Copyright The Plasma Project.
# See LI
|
CENSE.txt for details.
"""
Test helpers and classes
"""
import inspect
def dict_for_slots(obj):
"""
"""
slots = []
for cls in inspect.getmro(obj.__class__):
if hasattr(cls, '__slots__'):
|
slots += cls.__slots__
return dict(zip(slots, [getattr(obj, x) for x in slots]))
|
gloryofrobots/obin
|
arza/runtime/routine/routine.py
|
Python
|
gpl-2.0
| 2,553 | 0.001567 |
from arza.misc.platform import jit
from arza.types.space import isany
from arza.runtime import error
from arza.runtime.routine.code_routine import CodeRoutine
from arza.runtime.routine.native_routine import NativeRoutine
from arza.types import api, space
def complete_or_interrupt_native_routine(func):
def func_wrapper(process, routine):
result = func(process, routine)
assert isany(result)
if space.isinterrupt(result):
return
if not routine.is_closed():
routine.complete(process, result)
return func_wrapper
def complete_native_routine(func):
def func_wrapper(process, routine):
result = func(process, routine)
assert space.isany(result), (func, routine.args(), result)
assert not space.isvoid(result), (func, routine.args(), result)
if not routine.is_closed():
routine.complete(process, result)
return func_wrapper
def create_native_routine(stack, name, native, args, arity):
return NativeRoutine(stack, name, native, args, arity)
def create_callback_routine(stack, on_result, on_complete, function, args):
from arz
|
a.runtime.routine.callback_routine import CallbackRoutine
return CallbackRoutine(stack, on_resul
|
t, on_complete, function, args)
def create_module_routine(name, stack, code, env):
return jit.promote(CodeRoutine(space.newvoid(), stack, None, name, code, env))
def create_function_routine(stack, func, args, outer_env):
code = func.bytecode
scope = code.scope
name = func.name
env = create_function_environment(func, scope, args, outer_env)
routine = jit.promote(CodeRoutine(func, stack, args, name, code, env))
return routine
def create_function_environment(func, scope, args, outer_env):
declared_args_count = scope.arg_count if not scope.is_variadic else scope.arg_count - 1
args_count = api.length_i(args)
if not scope.is_variadic:
if args_count != declared_args_count:
return error.throw_5(error.Errors.INVALID_ARG_COUNT_ERROR,
space.newint(args_count), space.newstring(u"!="), space.newint(declared_args_count),
func, args)
if args_count < declared_args_count:
return error.throw_5(error.Errors.INVALID_ARG_COUNT_ERROR,
space.newint(args_count), space.newstring(u"<"), space.newint(declared_args_count),
func.name, args)
env = space.newenv(func.name, scope, outer_env)
return env
|
ercchy/coding-events
|
web/processors/user.py
|
Python
|
mit
| 2,185 | 0.026545 |
from django.contrib.auth.models import User
from django_countries import countries
def get_user(user_id):
user = User.objects.get(id=user_id)
return user
def get_user_profile(user_id):
user = User.objects.get(id=user_id)
return user.profile
def get_ambassadors(country_code=None):
ambassadors = []
all_ambassadors = User.objects.filter(groups__name='ambassadors').order_by('date_joined')
for ambassador in all_ambassadors:
if country_code:
if ambassador.profile.country == country_code:
ambassadors.append(ambassador.profile)
else:
ambassadors.append(ambassador.profile)
return ambassadors
def get_main_ambassadors(country_code=None):
ambassadors = []
all_ambassadors = User.objects.filter(groups__name='main').order_by('date_joined')
for ambassador in all_ambassadors:
if country_code:
if ambassador.profile.country == country_code:
ambassadors.append(ambassador.profile)
else:
ambassadors.append(ambassador.profile)
return ambassadors
def get_not_main_ambassadors(country_code=None):
ambassadors = []
all_ambassadors = User.objects.filter(groups__name='ambassadors').exclude(groups__name='main').order_by('date_joined')
for ambassador in all_ambassadors:
if country_code:
if ambas
|
sador.profile.country == country_code:
ambassadors.append(ambassador.profile)
else:
ambassadors.append(ambassador.profile)
return ambassadors
def get_ambassadors_for_countries():
ambassadors = get_ambassadors()
countries_ambassadors = []
# list countries minus two CUSTOM_COUNTRY_ENTRIES
for code, name in list(count
|
ries)[2:]:
readable_name = unicode(name)
main_ambassadors = get_main_ambassadors(code)
found_ambassadors = get_not_main_ambassadors(code)
countries_ambassadors.append((code, readable_name,found_ambassadors, main_ambassadors))
countries_ambassadors.sort()
return countries_ambassadors
def get_ambassadors_for_country(country):
ambassadors = User.objects.filter(groups__name='ambassadors', userprofile__country=country)
return ambassadors
def update_user_email(user_id, new_email):
user = User.objects.get(id=user_id)
user.email = new_email
user.save(update_fields=["email"])
return user
|
deviantenigma/Savegame-backup-Utility
|
ignore_fortestingpurposesonly.py
|
Python
|
gpl-3.0
| 1,345 | 0.007435 |
import tkinter
import string
from ctypes import windll
import os
'''
for d in string.ascii_uppercase:
if os.path.exists("%s:\\" % (d)):
print("Drive letter '%s' is in use." % (d))
'''
'''
def get_drives():
drives = []
bitmask = windll.kernel32.GetLogicalDrives()
for letter in string.ascii_uppercase:
if bitmask & 1:
drives.append(letter)
bitmask >>= 1
return drives
print(get_drives())
'''
'''
testLambda = (lambda boolInput: False if boolInput else True)
print(testLambda(True))
print(testLambda(False))
'''
top = tkinter.Tk()
#Testing out listbox in preparation to migrating game selection to a listbox as opposed to a bunch of check boxes
#because checkboxes aren't very scalable.
def click_button(event):
# #this block works
w = event.widget
index = w.curselection()
for x in index:
value = w.get(x)
print(value)
Lb1 = tkinter.Listbox(top,selectmode='extended')
#Lb1.bind('<<ListboxSelect>>', click_button)
Lb1.insert(0, "Python")
Lb1.insert(0, "Perl")
Lb1.insert(0, "C")
Lb1.insert(0, "PHP")
Lb1.insert(0, "JSP")
Lb1.insert(0, "Ruby")
def btnclick():
values = [Lb1.g
|
et(x) for x in Lb1.curselection()]
print(', '.join(values))
onlybutton = tk
|
inter.Button(text='test', command=btnclick)
Lb1.pack()
onlybutton.pack()
top.mainloop()
|
Canas/kaftools
|
examples/klms_pretrained.py
|
Python
|
mit
| 1,192 | 0.004195 |
import matplotlib.pyplot as plt
import numpy as np
from kaftools.filters import KlmsFilter
from kaftools.kernels import GaussianKernel
from kaftools.utils.shortcuts import plot_series, plot_squared_error
from kaftools.sparsifiers import NoveltyCriter
|
ion
if __name__ == "__main__":
# Cargar datos
data = np.load('./data/pretrained_data_lorentz.npz')
# sparsify lorentz : lr(1e-2), novelty(0.99919, 1.0)
# sparsify wind: lr(1e-2), novelty(0.9934, 1.0)
# Con
|
figurar KLMS
klms_params = {
'kernel': GaussianKernel(sigma=float(data['sigma_k_post'])),
'learning_rate': 1e-1,
'delay': int(data['delay']),
#'sparsifiers': [NoveltyCriterion(0.99919, 1.0)]
'coefs': data['a_post'],
'dict': data['s_post'].T,
'freeze_dict': True
}
# np.seterr(all='raise')
klms = KlmsFilter(data['y_prog'], data['y_prog'])
klms.fit(**klms_params)
print(len(klms.support_vectors))
plot_series(data['y_prog'], klms.estimate, markersize=1, linewidth=1, figsize=(15, 3))
plot_squared_error(klms.error_history)
import matplotlib.pyplot as plt
#plt.semilogy(np.array(klms.error_history)**2)
#plt.show()
|
redhat-cip/numeter
|
web-app/numeter_webapp/core/tests/group_restriction.py
|
Python
|
agpl-3.0
| 1,907 | 0.003146 |
from django.test import TestCase
from core.models import User, Group, Host
class Access_Test(TestCase):
fixtures = ['test_users.json','test_groups','test_hosts.json']
def test_superuser_can_all(self):
admin = User.objects.get(pk=1)
# Access to host
host = Host.objects.get(pk=1)
r = admin.has_access(host)
self.assertTrue(r, "Superuser can't access to hosts.")
# Access to user
user = User.objects.get(pk=2)
r = admin.has_access(user)
self.assertTrue(r, "Superuser can't access to users.")
# Access to group
group = Group.objects.get(pk=1)
r = admin.has_access(group)
self.assertTrue(r, "Superuser can't access to groups.")
def test_access_to_own_group(self):
user = User.objects.get(pk=2)
# Access to host
host = Host.objects.get(pk=1)
r = user.has_access(host)
self.assertTrue(r, "Simple user can't access to his hosts.")
# Access to user
r = user.has_access(user)
self.assertTrue(r, "Simple user can't access to himself.")
# Access to group
group = Group.o
|
bjects.get(pk=1)
r = user.has_access(group)
self.assertTrue(r, "Simple user can't access to his groups.")
def test_access_to_other_group(self):
user = User.objects.get(pk=2)
# Access to host
host = Host.objects.get(pk=2)
|
r = user.has_access(host)
self.assertFalse(r, "Simple user can access to other group's hosts.")
# Access to user
user2 = User.objects.get(pk=3)
r = user.has_access(user2)
self.assertFalse(r, "Simple user can access to users.")
# Access to group
group = Group.objects.get(pk=2)
r = user.has_access(group)
self.assertFalse(r, "Simple user can access to others groups.")
|
shubhamshuklaer/compiler_main
|
main_window_ui.py
|
Python
|
gpl-3.0
| 1,476 | 0.001355 |
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'main_window.ui'
#
# Created: Wed Jan 7 16:57:08 2015
# by: PyQt5 UI code generator 5.2.1
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_MainWindow(object):
def setupUi(self, MainWindow):
MainWindow.setObjectName("MainWindow")
MainWindow.resize(800, 600)
self.centralwidget = QtWidgets.QWidget(MainWindow)
self.centralwidget.setObjectName("centralwidget")
self.test_btn = QtWidgets.QPushButton(self.centralwidget)
self.test_btn.setGeometry(QtCore.QRect(170, 100, 98, 27))
self.test_btn.setObjectName("test_btn")
MainWindow.setCentralWidget(self.centralwidget)
self.menubar = QtWidgets.QMenuBar(MainWindow)
self.menubar.setGeometry(QtCore.QRect(0, 0, 800, 25))
|
self.menubar.setObjectName("menubar")
MainWindo
|
w.setMenuBar(self.menubar)
self.statusbar = QtWidgets.QStatusBar(MainWindow)
self.statusbar.setObjectName("statusbar")
MainWindow.setStatusBar(self.statusbar)
self.retranslateUi(MainWindow)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
def retranslateUi(self, MainWindow):
_translate = QtCore.QCoreApplication.translate
MainWindow.setWindowTitle(_translate("MainWindow", "MainWindow"))
self.test_btn.setText(_translate("MainWindow", "Test btn"))
|
apache/incubator-airflow
|
airflow/providers/google/cloud/example_dags/example_translate_speech.py
|
Python
|
apache-2.0
| 3,199 | 0.00125 |
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more
|
contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software d
|
istributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import os
from airflow import models
from airflow.providers.google.cloud.operators.text_to_speech import CloudTextToSpeechSynthesizeOperator
from airflow.providers.google.cloud.operators.translate_speech import CloudTranslateSpeechOperator
from airflow.utils import dates
GCP_PROJECT_ID = os.environ.get("GCP_PROJECT_ID", "example-project")
BUCKET_NAME = os.environ.get("GCP_TRANSLATE_SPEECH_TEST_BUCKET", "INVALID BUCKET NAME")
# [START howto_operator_translate_speech_gcp_filename]
FILENAME = "gcp-speech-test-file"
# [END howto_operator_translate_speech_gcp_filename]
# [START howto_operator_text_to_speech_api_arguments]
INPUT = {"text": "Sample text for demo purposes"}
VOICE = {"language_code": "en-US", "ssml_gender": "FEMALE"}
AUDIO_CONFIG = {"audio_encoding": "LINEAR16"}
# [END howto_operator_text_to_speech_api_arguments]
# [START howto_operator_translate_speech_arguments]
CONFIG = {"encoding": "LINEAR16", "language_code": "en_US"}
AUDIO = {"uri": f"gs://{BUCKET_NAME}/{FILENAME}"}
TARGET_LANGUAGE = 'pl'
FORMAT = 'text'
MODEL = 'base'
SOURCE_LANGUAGE = None # type: None
# [END howto_operator_translate_speech_arguments]
with models.DAG(
"example_gcp_translate_speech",
schedule_interval='@once', # Override to match your needs
start_date=dates.days_ago(1),
tags=['example'],
) as dag:
text_to_speech_synthesize_task = CloudTextToSpeechSynthesizeOperator(
project_id=GCP_PROJECT_ID,
input_data=INPUT,
voice=VOICE,
audio_config=AUDIO_CONFIG,
target_bucket_name=BUCKET_NAME,
target_filename=FILENAME,
task_id="text_to_speech_synthesize_task",
)
# [START howto_operator_translate_speech]
translate_speech_task = CloudTranslateSpeechOperator(
project_id=GCP_PROJECT_ID,
audio=AUDIO,
config=CONFIG,
target_language=TARGET_LANGUAGE,
format_=FORMAT,
source_language=SOURCE_LANGUAGE,
model=MODEL,
task_id='translate_speech_task',
)
translate_speech_task2 = CloudTranslateSpeechOperator(
audio=AUDIO,
config=CONFIG,
target_language=TARGET_LANGUAGE,
format_=FORMAT,
source_language=SOURCE_LANGUAGE,
model=MODEL,
task_id='translate_speech_task2',
)
# [END howto_operator_translate_speech]
text_to_speech_synthesize_task >> translate_speech_task >> translate_speech_task2
|
pelikanchik/edx-platform
|
lms/envs/cms/preview_dev.py
|
Python
|
agpl-3.0
| 430 | 0 |
"""
Settings for the LMS that runs alongside the CMS on AWS
"""
# We intentionally define lots of variables that aren't used, and
# w
|
ant
|
to import all variables from base settings files
# pylint: disable=W0401, W0614
from .dev import *
MODULESTORE = {
'default': {
'ENGINE': 'xmodule.modulestore.draft.DraftModuleStore',
'DOC_STORE_CONFIG': DOC_STORE_CONFIG,
'OPTIONS': modulestore_options
},
}
|
teemulehtinen/a-plus
|
external_services/migrations/0011_menuitem_menu_url.py
|
Python
|
gpl-3.0
| 1,720 | 0.003488 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from urllib.parse import urljoin, urlsplit
from django.db import migrations, models
def forwards(apps, schema_editor):
MenuItem = apps.get_model('external_services', 'MenuItem')
items = (MenuItem.objects.all()
.exclude(service=None)
.exclude(menu_url=None)
.exclude(menu_url=''))
errors = []
for item in items:
uri1 = urlsplit(item.menu_url)
uri2 = urlsplit(item.service.url)
if uri1.netloc and uri1.netloc != uri2.netloc:
errors.append(item)
if errors:
print()
msg = ['Database is in inconsistent state.']
for item in errors:
msg.append(" MenuItem(pk=%s): %s <> %s" % (item.pk, item.menu_url, item.service.url))
msg.append("For above menuitems, domain in MenuItem.menu_url doesn't match domain in MenuItem.service.url.")
msg.append("Database is in inconsistent state. Manual fixing is required.")
raise RuntimeError('\n'.join(msg))
for item in items:
uri = urlsplit(item.menu_url)
url = uri._replace(scheme='
|
', netloc='').geturl()
item.menu_url = url
item.save(update_fields=['menu_url'])
def backwards(apps, schema_editor):
MenuItem = apps.get_model('extern
|
al_services', 'MenuItem')
for item in MenuItem.objects.all():
item.menu_url = urljoin(item.service.url, item.menu_url)
item.save(update_fields=['menu_url'])
class Migration(migrations.Migration):
atomic = False
dependencies = [
('external_services', '0010_auto_20180918_1916'),
]
operations = [
migrations.RunPython(forwards, backwards),
]
|
simphony/simphony-remote
|
remoteappmanager/docker/async_docker_client.py
|
Python
|
bsd-3-clause
| 2,391 | 0 |
from concurrent.futures import ThreadPoolExecutor
import docker
import functools
# Common threaded executor for asynchronous jobs.
# Required for the AsyncDockerClien
|
t to operate.
_executor = ThreadPoolExecutor(1)
class AsyncDockerClient:
"""Provides an asynchronous interface to dockerpy.
All Client interface is available as methods returning a future
ins
|
tead of the actual result. The resulting future can be yielded.
This class is thread safe. Note that all instances use the same
executor.
"""
def __init__(self, *args, **kwargs):
"""Initialises the docker async client.
The client uses a single, module level executor to submit
requests and obtain futures. The futures must be yielded
according to the tornado asynchronous interface.
The exported methods are the same as from the docker-py
synchronous client, with the exception of their async nature.
Note that the executor is a ThreadPoolExecutor with a single thread.
"""
self._sync_client = docker.Client(*args, **kwargs)
def __getattr__(self, attr):
"""Returns the docker client method, wrapped in an async execution
environment. The returned method must be used in conjunction with
the yield keyword."""
if hasattr(self._sync_client, attr):
return functools.partial(self._submit_to_executor, attr)
else:
raise AttributeError(
"'{}' object has no attribute '{}'".format(
type(self).__name__,
attr
)
)
# Private
def _submit_to_executor(self, method, *args, **kwargs):
"""Call a synchronous docker client method in a background thread,
using the module level executor.
Parameters
----------
method : string
A string containing a callable method
*args, *kwargs:
Arguments to the invoked method
Return
------
A future from the ThreadPoolExecutor.
"""
return _executor.submit(self._invoke, method, *args, **kwargs)
def _invoke(self, method, *args, **kwargs):
"""wrapper for calling docker methods to be passed to
ThreadPoolExecutor.
"""
m = getattr(self._sync_client, method)
return m(*args, **kwargs)
|
PyBossa/pybossa
|
alembic/versions/7927d63d556_n_answers_migration.py
|
Python
|
agpl-3.0
| 1,841 | 0.008691 |
"""n_answers migration
Revision ID: 7927d63d556
Revises: 1eb5febf4842
Create Date: 2014-08-08 14:02:36.738460
Delete the "n_answers" field from the "info" attribute/column as it is not used.
"""
# revision identifiers, used by Alembic.
revision = '7927d63d556'
down_revision = '1eb5febf4842'
from alembic import op
import sqlalchemy as sa
from sqlalchemy.sql import table, column, select, bindparam
import json
def upgrade():
task = table('task',
c
|
olumn('id'),
column('info')
)
conn = op.get_bind()
query = select([task.c.id, task.c.info])
tasks = conn.execute(query)
update_values = []
for row in tasks:
info_data = row.info
info_dict
|
= json.loads(info_data)
if info_dict.get('n_answers'):
del info_dict['n_answers']
update_values.append({'task_id': row.id, 'new_info': json.dumps(info_dict)})
task_update = task.update().\
where(task.c.id == bindparam('task_id')).\
values(info=bindparam('new_info'))
if len(update_values) > 0:
conn.execute(task_update, update_values)
def downgrade():
task = table('task',
column('id'),
column('info'),
column('n_answers')
)
conn = op.get_bind()
query = select([task.c.id, task.c.info, task.c.n_answers])
tasks = conn.execute(query)
update_values = []
for row in tasks:
info_data = row.info
info_dict = json.loads(info_data)
info_dict['n_answers'] = row.n_answers
update_values.append({'task_id': row.id, 'new_info': json.dumps(info_dict)})
task_update = task.update().\
where(task.c.id == bindparam('task_id')).\
values(info=bindparam('new_info'))
if len(update_values) > 0:
conn.execute(task_update, update_values)
|
GoogleCloudPlatform/covid-19-open-data
|
src/pipelines/epidemiology/de_covid_19_germany_gae.py
|
Python
|
apache-2.0
| 2,120 | 0.000943 |
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, e
|
ither express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import datetime
from typing import Dict
from numpy import unique
from pandas import DataFrame
from lib.data_source import DataSource
class Covid19GermanyDataSource(DataSource):
def parse_dataframes(
self, dataframes: Dict[str,
|
DataFrame], aux: Dict[str, DataFrame], **parse_opts
) -> DataFrame:
# Rename the appropriate columns
data = dataframes[0].rename(columns={"time_iso8601": "date"})
# Convert dates to ISO format
data["date"] = data["date"].apply(
lambda x: datetime.datetime.fromisoformat(x).date().isoformat()
)
# Get a list of all regions
regions = unique([col[3:5] for col in data.columns if col.startswith("DE-")])
# Transform the data from non-tabulated format to our record format
records = []
for idx, row in data.iterrows():
record = {"date": row["date"]}
for region_code in regions:
records.append(
{
"subregion1_code": region_code,
"total_confirmed": row["DE-%s_cases" % region_code],
"total_deceased": row["DE-%s_deaths" % region_code],
**record,
}
)
data = DataFrame.from_records(records)
# Ensure we only take one record from the table
data = data.groupby(["date", "subregion1_code"]).last().reset_index()
# Output the results
data["country_code"] = "DE"
return data
|
eduNEXT/edx-platform
|
lms/djangoapps/course_goals/migrations/0001_initial.py
|
Python
|
agpl-3.0
| 1,064 | 0.00282 |
from django.db import migrations, models
from django.conf import settings
from opaque_keys.edx.django.models import CourseKeyField
class Migration(migrations.Migration):
dependencies = [
migrations
|
.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='CourseGoal',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
|
('course_key', CourseKeyField(max_length=255, db_index=True)),
('goal_key', models.CharField(default='unsure', max_length=100, choices=[('certify', 'Earn a certificate.'), ('complete', 'Complete the course.'), ('explore', 'Explore the course.'), ('unsure', 'Not sure yet.')])),
('user', models.ForeignKey(to=settings.AUTH_USER_MODEL, on_delete=models.CASCADE)),
],
),
migrations.AlterUniqueTogether(
name='coursegoal',
unique_together={('user', 'course_key')},
),
]
|
DoubleNegativeVisualEffects/gaffer
|
python/GafferUITest/ProgressBarTest.py
|
Python
|
bsd-3-clause
| 2,664 | 0.034535 |
##########################################################################
#
# Copyright (c) 2011-2012, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# * Neither the name of John Haddon nor the names of
# any other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS
|
; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF TH
|
E POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import unittest
import GafferUI
import GafferUITest
class ProgressBarTest( GafferUITest.TestCase ) :
def testConstructor( self ) :
b = GafferUI.ProgressBar()
self.assertEqual( b.getRange(), ( 0, 100 ) )
self.assertEqual( b.getProgress(), 0 )
self.assertEqual( b.getText(), "%p%" )
b = GafferUI.ProgressBar( 10, ( 5, 15 ), "doing something %p%" )
self.assertEqual( b.getRange(), ( 5, 15 ) )
self.assertEqual( b.getProgress(), 10 )
self.assertEqual( b.getText(), "doing something %p%" )
def testAccessors( self ) :
b = GafferUI.ProgressBar()
b.setRange( ( 0, 20 ) )
self.assertEqual( b.getRange(), ( 0, 20 ) )
b.setProgress( 10 )
self.assertEqual( b.getProgress(), 10 )
b.setText( "woteva" )
self.assertEqual( b.getText(), "woteva" )
if __name__ == "__main__":
unittest.main()
|
riverrun/aiourlstatus
|
tests/parse_test.py
|
Python
|
gpl-3.0
| 990 | 0.008081 |
import unittest
from aiourlstatus import app
class TestEmpty(unittest.TestCase):
def test_no_urls(self):
data = ''
urls, len_urls = app.find_sort_urls(data)
self.assertEqual(urls, [])
self.assertEqual(len_urls, 0)
class TestTXT(unittest.TestCase):
def test_parse_text(self):
with open('tests/retest.txt') as f:
data = f.read()
urls, len_urls = app.find_sort_urls(data)
url_list = [['http://en.wikipedia.org/wiki/Body_image', 'http://en.wikipedia.org/wiki/Identity_formation',
'http://en.wikipedia.org/wiki/Self-confidence', 'http://en.wikipedia.org
|
/wiki/Self-esteem'],
['http://www.bbc.com/sport/0/'], ['http://www.haskell.org/'], ['http://lxer.com/'],
['http://ww
|
w.find-happiness.com/definition-of-happiness.html'],
['http://www.wikihow.com/Elevate-Your-Self-Esteem']]
self.assertCountEqual(urls, url_list)
if __name__ == '__main__':
unittest.main()
|
addition-it-solutions/project-all
|
addons/account_followup/account_followup.py
|
Python
|
agpl-3.0
| 28,847 | 0.010122 |
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp import api
from openerp.osv import fields, osv
from lxml import etree
from openerp.tools.translate import _
from openerp.exceptions import UserError
class followup(osv.osv):
_name = 'account_followup.followup'
_description = 'Account Follow-up'
_rec_name = 'name'
_columns = {
'followup_line': fields.one2many('account_followup.followup.line', 'followup_id', 'Follow-up', copy=True),
'company_id': fields.many2one('res.company', 'Company', required=True),
'name': fields.related('company_id', 'name', string = "Name", readonly=True, type="char"),
}
_defaults = {
'company_id': lambda s, cr, uid, c: s.pool.get('res.company')._company_default_get(cr, uid, 'account_followup.followup', context=c),
}
_sql_constraints = [('company_uniq', 'unique(company_id)', 'Only one follow-up per company is allowed')]
class followup_line(osv.osv):
def _get_default_template(self, cr, uid, ids, context=None):
try:
return self.pool.get('ir.model.data').get_object_reference(cr, uid, 'account_followup', 'email_template_account_followup_default')[1]
except ValueError:
return False
_name = 'account_followup.followup.line'
_description = 'Follow-up Criteria'
_columns = {
'name': fields.char('Follow-Up Action', required=True),
'sequence': fields.integer('Sequence', help="Gives the sequence order when displaying a list of follow-up lines."),
'delay': fields.integer('Due Days', help="The number of days after the due date of the invoice to wait before sending the reminder. Could be negative if you want to send a polite alert beforehand.", required=True),
'followup_id': fields.many2one('account_followup.followup', 'Follow Ups', required=True, ondelete="cascade"),
'description': fields.text('Printed Message', translate=True),
'send_email':fields.boolean('Send an Email', help="When processing, it will send an email"),
'send_letter':fields.boolean('Send a Letter', help="When processing, it will print a letter"),
'manual_action':fields.boolean('Manual Action', help="When processing, it will set the manual action to be taken for that customer. "),
'manual_action_note':fields.text('Action To Do', placeholder="e.g. Give a phone call, check with others , ..."),
'manual_action_responsible_id':fields.many2one('res.users', 'Assign a Responsible', ondelete='set null'),
'email_template_id':fields.many2one('mail.template', 'Email Template', ondelete='set null'),
}
_order = 'delay'
_sql_constraints = [('days_uniq', 'unique(followup_id, delay)', 'Days of the follow-up levels must be different')]
_defaults = {
'send_email': True,
'send_letter': True,
'manual_action':False,
'description': """
Dear %(partner_name)s,
Exception made if there was a mistake of ours, it seems that the following amount stays unpaid. Please, take appropriate measures in order to carry out this payment in the next 8 days.
Would your payment have been carried out after this mail was sent, please ignore this message. Do not hesitate to contact our accounting department.
Best Regards,
""",
'email_template_id': _get_default_template,
}
def _check_description(self, cr, uid, ids, context=None):
for line in self.browse(cr, uid, ids, context=context):
if line.description:
try:
line.description % {'partner_name': '', 'date':'', 'user_signature': '', 'company_name': ''}
except:
return False
return True
_constraints = [
(_check_description, 'Your description is invalid, use the right legend or %% if you want to use the percent character.', ['description']),
]
class account_move_line(osv.osv):
def _get_result(self, cr, uid, ids, name, arg, context=None):
res = {}
for aml in self.browse(cr, uid, ids, context=context):
res[aml.id] = aml.debit - aml.credit
return res
_inherit = 'account.move.line'
_columns = {
'followup_line_id': fields.many2one('account_followup.followup.line', 'Follow-up Level',
ondelete='restrict'), #restrict deletion of the followup line
'followup_date': fields.date('Latest Follow-up', select=True),
'result':fields.function(_get_result, type='float', method=True,
string="Balance") #'balance' field is not the same
}
class res_partner(osv.osv):
def fields_view_get(self, cr, uid, view_id=None, view_type=None, context=None, toolbar=False, submenu=False):
res = super(res_partner, self).fields_view_get(cr, uid, view_id=view_id, view_type=view_type,
|
context=context,
toolbar=toolbar, submenu=submenu)
context = context or {}
if view_type == 'form' and context.get('Followupfirst'):
doc = etree.XML(res['arch'], parser=None, base_url=None)
first_node = doc.xpath("//page[@name='followup_tab']")
root = first_node[0].getparent()
root.insert(0, first_node[0])
res['arch'] = etree.
|
tostring(doc, encoding="utf-8")
return res
def _get_latest(self, cr, uid, ids, names, arg, context=None, company_id=None):
res={}
if company_id == None:
company = self.pool.get('res.users').browse(cr, uid, uid, context=context).company_id
else:
company = self.pool.get('res.company').browse(cr, uid, company_id, context=context)
for partner in self.browse(cr, uid, ids, context=context):
amls = partner.unreconciled_aml_ids
latest_date = False
latest_level = False
latest_days = False
latest_level_without_lit = False
latest_days_without_lit = False
for aml in amls:
if (aml.company_id == company) and (aml.followup_line_id != False) and (not latest_days or latest_days < aml.followup_line_id.delay):
latest_days = aml.followup_line_id.delay
latest_level = aml.followup_line_id.id
if (aml.company_id == company) and (not latest_date or latest_date < aml.followup_date):
latest_date = aml.followup_date
if (aml.company_id == company) and (aml.blocked == False) and (aml.followup_line_id != False and
(not latest_days_without_lit or latest_days_without_lit < aml.followup_line_id.delay)):
latest_days_without_lit = aml.followup_line_id.delay
latest_level_without_lit = aml.followup_line_id.id
res[partner.id] = {'latest_followup_date': latest_date,
'latest_followup_level_id': latest_level,
'latest_followup_level_id_without_lit': latest_level_without_lit}
return res
@api.cr_uid_ids_context
def do_partner_manual_action(self, cr, uid, partner_ids, context=None):
#partner_ids -> re
|
sondree/Master-thesis
|
Python MOEA/utility.py
|
Python
|
gpl-3.0
| 592 | 0.015203 |
import os
import traceback,sys
CONFIG_FOLDER = "Configs"
def lo
|
ad_config(name):
try:
module = __import__("%s.%s" % (CONFIG_FOLDER,name), fromlist=["Config"])
except ImportError:
# Display error message
traceback.print_exc(file=sys.stdout)
raise ImportError("Failed to import module {0} from folder {1} using fromlist {2}".format(name,CONFIG_FOLDER,listofimports))
conf = module.Config()
conf.filename = os.path.join(CONFIG_FOLDER, "%s.py" % name)
conf.name = name
print "
|
Loading config %s. Loading completed" % name
return conf
|
jesseengel/magenta
|
magenta/models/gansynth/lib/train_util.py
|
Python
|
apache-2.0
| 18,904 | 0.006083 |
# Copyright 2019 The Magenta Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Train a progressive GAN model.
See https://arxiv.org/abs/1710.10196 for details about the model.
See https://github.com/tkarras/progressive_growing_of_gans for the original
theano implementation.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import time
from absl import logging
from magenta.models.gansynth.lib import networks
import numpy as np
import tensorflow as tf
tfgan = tf.contrib.gan
def make_train_sub_dir(stage_id, **kwargs):
"""Returns the log directory for training stage `stage_id`."""
return os.path.join(kwargs['train_root_dir'], 'stage_{:05d}'.format(stage_id))
def make_resolution_schedule(**kwargs):
"""Returns an object of `ResolutionSchedule`."""
return networks.ResolutionSchedule(
scale_mode=kwargs['scale_mode'],
start_resolutions=(kwargs['start_height'], kwargs['start_width']),
scale_base=kwargs['scale_base'],
num_resolutions=kwargs['num_resolutions'])
def get_stage_ids(**kwargs):
"""Returns a list of stage ids.
Args:
**kwargs: A dictionary of
'train_root_dir': A string of root directory of training logs.
'num_resolutions': An integer of number of progressive resolutions.
"""
train_sub_dirs = [
sub_dir for sub_dir in tf.gfile.ListDirectory(kwargs['train_root_dir'])
if sub_dir.startswith('stage_')
]
# If fresh start, start with start_stage_id = 0
# If has been trained for n = len(train_sub_dirs) stages, start with the last
# stage, i.e. start_stage_id = n - 1.
start_stage_id = max(0, len(train_sub_dirs) - 1)
return range(start_stage_id, get_total_num_stages(**kwargs))
def get_total_num_stages(**kwargs):
"""Returns total number of training stages."""
return 2 * kwargs['num_resolutions'] - 1
def get_batch_size(stage_id, **kwargs):
"""Returns batch size for each stage.
It is expected that `len(batch_size_schedule) == num_resolutions`. Each stage
corresponds to a resolution and hence a batch size. However if
`len(batch_size_schedule) < num_resolutions`, pad `batch_size_schedule` in the
beginning with the first batch size.
Args:
stage_id: An integer of training stage index.
**kwargs: A dictionary of
'batch_size_schedule': A list of integer, each element is the batch size
for the current training image resolution.
'num_resolutions': An integer of number of progressive resolutions.
Returns:
An integer batch size for the `stage_id`.
"""
batch_size_schedule = kwargs['batch_size_schedule']
num_resolutions = kwargs['num_resolutions']
if len(batch_size_schedule) < num_resolutions:
batch_size_schedule = (
[batch_size_schedule[0]] * (num_resolutions - len(batch_size_schedule))
+ batch_size_schedule)
return int(batch_size_schedule[(stage_id + 1) // 2])
def get_stage_info(stage_id, **kwargs):
"""Returns information for a training stage.
Args:
stage_id: An integer of training stage index.
**kwargs: A dictionary of
'num_resolutions': An integer of number of progressive resolutions.
'stable_stage_num_images': An integer of number of training images in
the stable stage.
'transition_stage_num_images': An integer of number of training images
in the transition stage.
'total_num_images': An integer of total number of training images.
Returns:
A tuple of integers. The first entry is the number of blocks. The second
entry is the accumulated total number of training images when stage
`stage_id` is finished.
Raises:
ValueError: If `stage_id` is not in [0, total number of stages).
"""
total_num_stages = get_total_num_stages(**kwargs)
valid_stage_id = (0 <= stage_id < total_num_stages)
if not valid_stage_id:
raise ValueError(
'`stage_id` must be in [0, {0}), but instead was {1}'.format(
total_num_stages, stage_id))
# Even stage_id: stable training stage.
# Odd stage_id: transition training stage.
num_blocks = (stage_id + 1) // 2 + 1
num_images = ((stage_id // 2 + 1) * kwargs['stable_stage_num_images'] + (
(stage_id + 1) // 2) * kwargs['transition_stage_num_images'])
total_num_images = kwargs['total_num_images']
if stage_id >= total_num_stages - 1:
num_images = total_num_images
num_images = min(num_images, total_num_images)
return num_blocks, num_images
def make_latent_vectors(num, **kwargs):
"""Returns a batch of `num` random latent vectors."""
return tf.random_normal([num, kwargs['latent_vector_size']], dtype=tf.float32)
def make_interpolated_latent_vectors(num_rows, num_columns, **kwargs):
"""Returns a batch of linearly interpolated latent vectors.
Given two randomly generated latent vector za and zb, it can generate
a row of `num_columns` interpolated latent vectors, i.e.
[..., za + (zb - za) * i / (num_columns - 1), ...] where
i = 0, 1, ..., `num_columns` - 1.
This function produces `num_rows` such rows and returns a (flattened)
batch of latent vectors with batch size `num_rows * num_columns`.
Args:
num_rows: An integer. Number of rows of interpolated latent vectors.
num_columns: An integer. Number of interpolated latent vectors in each row.
**kwargs: A dictionary of
'latent_vector_size': An integer of latent vector size.
Returns:
A `Tensor` of shape `[num_rows * num_columns, latent_vector_size]`.
"""
ans = []
for _ in range(num_rows):
z = tf.random_normal([2, kwargs['latent_vector_size']])
r = tf.reshape(
tf.to_float(tf.range(num_columns)) / (num_columns - 1), [-1, 1])
dz = z[1] - z[0]
ans.append(z[0] + tf.stack([dz] * num_columns) * r)
return tf.concat(ans, axis=0)
def define_loss(gan_model, **kwargs):
"""Defines progressive GAN losses.
The generator and discriminator both use wasserstein loss. In addition,
a small penalty term is added to the discriminator loss to prevent it getting
too large.
Args:
gan_model: A `GANModel` namedtuple.
**kwargs: A dictionary of
'gradient_penalty_weight': A float of gradient norm target for
wasserstein loss.
'gradient_penalty_target': A float of gradient penalty weight for
wasserstein loss.
'real_score_penalty_weight': A float of Additional penalty to keep
the scores from drifting too far from zero.
Returns:
A `GANLoss` namedtuple.
"""
gan_loss = tfgan.gan_loss(
gan_model,
generator_loss_fn=tfgan.losses.wasserstein_generator_loss,
discriminator_loss_fn=tfgan.losses.wasserstein_discriminator_loss,
gradient_penalty_weight=kwargs['gradient_penalty_weight'],
gradient_penalty_target=kwargs['gradient_penalty_target'],
gradient_penalty_epsilon=0.0)
real_score_penalty = tf.reduce_mean(
tf.square(gan_model.discriminator_real_outputs))
tf.summary.scalar('real_score_penalty', real_score_penalty)
return gan_loss._replace(
discriminator_loss=(
gan_loss.discriminator_loss +
kwargs['real_score_penalty_weight'] * real_score_penalty))
def define_train_ops(gan_model, gan_loss, **kwargs):
"""Defines progressive GAN train ops.
Args:
gan_model: A `GANModel` namedtuple.
gan_loss: A `GANLoss` namedtuple.
**kwargs: A dictionary of
'adam_beta1': A float of Adam optimizer beta1.
'adam_beta2': A float of Adam optimizer beta2.
'generator_learning_rate': A float of generator learning rate
|
.
|
'discriminator_learning_rate': A float of disc
|
interactiveaudiolab/nussl
|
nussl/separation/primitive/melodia.py
|
Python
|
mit
| 15,856 | 0.006244 |
import numpy as np
from scipy.ndimage.filters import convolve
from scipy.ndimage import maximum_filter, gaussian_filter
from .. import MaskSeparationBase, SeparationException
from ..benchmark import HighLowPassFilter
from ... import AudioSignal
from ... import vamp_imported
import numpy as np
import scipy.signal
if vamp_imported:
import vamp
# function for generating the vocal chord impulse response
def rosenmodel(t1, t2, fs):
"""
This model for generating singing vowel sounds from sine tones comes
from:
https://simonl02.users.greyc.fr/files/Documents/Teaching/SignalProcessingLabs/lab3.pdf
The above will be referred to throughout these docstrings as THE DOCUMENT.
Original author: Fatemeh Pishdadian
The arguments to the fucntions throughout this text follow the
signatures laid out in THE DOCUMENT.
This is used in Melodia to generate the melody signal to produce a
mask.
Equation 2 in THE DOCUMENT.
"""
N1 = np.floor(t1 * fs)
N2 = np.floor(t2 * fs)
samp_vec1 = np.arange(N1+1)
samp_vec2 = np.arange(N1,N1+N2+1)
ir_func1 = 0.5 * (1 - np.cos((np.pi * samp_vec1)/N1))
ir_func2 = np.cos(np.pi * (samp_vec2 - N1)/(2 * N2))
vchord_filt = np.concatenate((ir_func1,ir_func2))
return vchord_filt
# function for computing the denominator coeffs of the vocal cavity filter transfer function
def oral_cavity_filt(pole_amps, pole_freqs,fs):
"""
This model for generating singing vowel sounds from sine tones comes
from:
https://simonl02.users.greyc.fr/files/Documents/Teaching/SignalProcessingLabs/lab3.pdf
The above will be referred to throughout these docstrings as THE DOCUMENT.
Original author: Fatemeh Pishdadian
The arguments to the fucntions throughout this text follow the
signatures laid out in THE DOCUMENT.
This is used in Melodia to generate the melody signal to produce a
mask.
Solves "Q. Write a function to synthesize filter H(z)" in
THE DOCUMENT
"""
num_pole_pair = len(pole_amps)
poles = pole_amps * np.exp(1j * 2 * np.pi * pole_freqs / fs)
poles_conj = np.conj(poles)
denom_coeffs = 1
for i in range(num_pole_pair):
pole_temp = poles[i]
pole_conj_temp = poles_conj[i]
pole_pair_coeffs = np.convolve(np.array([1,-pole_temp]),np.array([1,-pole_conj_temp]))
denom_coeffs = np.convolve(denom_coeffs, pole_pair_coeffs)
return denom_coeffs
def _apply_vowel_filter(impulse_train, fs, t1=0.0075, t2=.013,
pole_amps=None, pole_freqs=None):
"""
This model for generating singing vowel sounds from sine tones comes
from:
https://simonl02.users.greyc.fr/files/Documents/Teaching/SignalProcessingLabs/lab3.pdf
The above will be referred to throughout these docstrings as THE DOCUMENT.
Original author: Fatemeh Pishdadian
The arguments to the fucntions throughout this text follow the
signatures laid out in THE DOCUMENT.
This is used in Melodia to generate the melody signal to produce a
mask.
Args:
impulse_train (np.ndarray): Numpy array with data to be filtered
fs (int): Sample rate of audio.
t1 (float, optional): N1 in Equation 2 in THE DOCUMENT. Defaults to 0.0075.
t2 (float, optional): N2 in Equation 2 in THE DOCUMENT. Defaults to .013.
pole_amps (np.ndarray, optional): Pole amplitudes, see Figures 2-4 in THE DOCUMENT.
Defaults to None, which maps to E vowel.
pole_freqs (np.ndarray, optional): Pole frequencies, see Figures 2-4 in THE DOCUMENT.
Defaults to None, which maps to E vowel
Returns:
np.ndarray: Filtered impulse train that should sound sort of like the desired
vowel.
"""
if pole_amps is None:
pole_amps = np.array([0.99,0.98,0.9,0.9])
if pole_freqs is None:
pole_freqs = np.array([800,1200,2800,3600])
vchord_filt = rosenmodel(t1, t2, fs)
vchord_out = np.convolve(impulse_train, vchord_filt)
denom_coeffs = oral_cavity_filt(pole_amps, pole_freqs, fs)
oral_out = scipy.signal.lfilter(
np.array([1]), denom_coeffs, vchord_out)
lip_out = np.real(scipy.signal.lfilter(
np.array([1,-1]), np.array([1]), oral_out))
lip_out = lip_out[:impulse_train.shape[0]]
return np.real(lip_out)
class Melodia(MaskSeparationBase):
"""
Implements melody extraction using Melodia [1].
This needs Melodia installed as a vamp plugin, as well as having vampy for
Python installed. Install Melodia via: https://www.upf.edu/web/mtg/melodia.
Note that Melodia can be used only for NON-COMMERCIAL use.
References:
[1] J. Salamon and E. Gómez, "Melody Extraction from Polyphonic Music Signals using
Pitch Contour Characteristics", IEEE Transactions on Audio, Speech and
Language Processing, 20(6):1759-1770, Aug. 2012.
Args:
input_audio_signal (AudioSignal object): The AudioSignal object that has the
audio data that Melodia will be run on.
high_pass_cutoff (optional, float): value (in Hz) for the high pass cutoff
filter.
minimum_frequency (optional, float): minimum frequency in Hertz (default 55.0)
maximum_frequency (optional, float): maximum frequency in Hertz (default 1760.0)
voicing_tolerance (optional, float): Greater values will result in more pitch contours
included in the final melody. Smaller values will result in less pitch
contours included in the final melody (default 0.2).
minimum_peak_salience (optional, float): a hack to avoid silence turning into junk
contours when analyzing monophonic recordings (e.g. solo voice with
no accompaniment). Generally you want to leave this untouched (default 0.0).
num_overtones (optional, int): Number of overtones to use when creating
melody mask.
apply_vowel_filter (optional, bool): Whether or not to apply a vowel filter
on the resynthesized melody signal when masking.
smooth_length (optional, int): Number of frames to smooth discontinuities in the
mask.
add_lower_octave (optional, fool): Use octave below fundamental frequency as well
to take care of octave errors in pitch tracking, since we only care about
the mask. Defaults to False.
mask_type (optional, str): Type of mask to use.
mask_threshold (optional, float): Threshold for mask to convert to binary.
"""
def __init__(self, input_audio_signal, high_pass_cutoff=100, minimum_frequency=55.0,
maximum_frequency=1760.0, voicing_tolerance=0.2, minimum_peak_salience=0.0,
compression=0.5, num_overtones=40, apply_vowel_filter=False, smooth_length=5,
add_lower_octave=False, mask_type='soft', mask_threshold=0.5):
# lazy load vamp to check if it exists
from ... import vamp_imported
melodia_installed = False
if vamp_imported:
melodia_installed = 'mtg-melodia:melodia' in vamp.list_plugins()
if not vamp_imported or not melodia_installed:
self._raise_vamp_melodia_error()
super().__init__(
input_audio_signal=input_audio_signal,
mask_type=mask_type,
mask_th
|
reshold=mask_threshold
)
self.high_pass_cutoff = high_pass_cutoff
self.minimum_frequency = float(minimum_frequency)
self.maximum_frequency = float(maximum_frequency)
self.voicing_toler
|
ance = float(voicing_tolerance)
self.minimum_peak_salience = float(minimum_peak_salience)
self.compression = compression
self.apply_vowel_filter = apply_vowel_filter
self.add_lower_octave = add_lower_octave
self.melody = None
self.melody_signal = None
self.timestamps = None
self.num_overtones = num_overtones
self.smooth_length = smooth_length
def _raise_vamp_melodia_error(self):
raise SeparationException(
'\n**~*
|
xklakoux/spock2kotlin
|
unroller.py
|
Python
|
mit
| 6,026 | 0.002489 |
import re
from enum import Enum
from context import ParsingContext
class UnrollerState(Enum):
START = 0
NAMES = 1
VALUES = 2
END = 3
class Unroller(object):
should_unroll_big = False
def __init__(self, unroll_big):
self.brackets_stack = []
self.names = []
self.values = []
self.recorded_lines = []
self.givens = []
self.state = UnrollerState.START
self.all_lines = []
self.was_parameterized = False
Unroller.should_unroll_big = unroll_big
def record(self, line):
self.all_lines.append(line)
if self.state == UnrollerState.NAMES:
if '<<' in line:
self.names.append(line.split(' << ')[0].strip())
vals = line.split(' << ')[-1].strip().lstrip('[').rstrip(']').split(',')
self.values.extend([[val.strip()] for val in vals])
self.all_lines.pop()
self.state = UnrollerState.END
return False
if self.state == UnrollerState.NAMES:
line = line.replace('||', '|')
self.names = [var.strip() for var in line.split('|')]
self.all_lines.pop()
self.state = UnrollerState.VALUES
return False
elif self.state == UnrollerState.VALUES:
if '|' in line:
line = line.replace('||', '|')
self.values.append([var.strip() for var in line.split('|')])
self.all_lines.pop()
return False
elif '=' in line:
self.givens.append(line)
return False
else:
self.state = UnrollerState.END
if 'where:' in line:
if not re.search('\S', self.recorded_lines[-1]):
self.recorded_lines.pop()
if not re.search('\S', self.all_lines[-1]):
self.all_lines.pop()
self.state = UnrollerState.NAMES
self.all_lines.pop()
return False
self.recorded_lines.append(line)
if self.state == UnrollerState.START or self.state == UnrollerState.END:
if '}' in line:
return True
return False
def unroll_tests(self):
if len(self.values) > 3 and not Unroller.should_unroll_big:
self.was_parameterized = True
return self.get_parameterized_template()
self.givens = ['val ' + given for given in self.givens]
new_tests = []
for set_index, _set in enumerate(self.values):
method_name = self.recorded_lines[0]
for name_index, name in enumerate(self.names):
method_name = method_name.replace("#{}".format(name), _set[name_index].strip().strip('"'))
method_name = self.replace_invalid_chars(method_name)
new_tests.append(method_name)
new_givens = []
for given in self.givens:
for name_index, name in enumerate(self.names):
given = re.sub(r'([^.]){}(\b)'.format(name), '\g<1>' + _set[name_index] + '\g<2>', given)
new_tests.append(given)
new_tests.extend(new_givens)
for line in self.recorded_lines[1:]:
for name_index, name in enumerate(self.names):
line = re.sub(r'([^.]){}(\b)'.format(name), '\g<1>' + _set[name_index] + '\g<2>', line)
new_tests.append(line)
new_tests.append('')
return new_tests
@staticmethod
def replace_invalid_chars(method_name):
method_name = method_name.replace(',', '[coma]')
method_name = method_name.replace('.', '[dot]')
method_name = method_name.replace('[', '(')
method_name = method_name.replace(']', ')')
method_name = method_name.replace(':', '')
method_name = method_name.replace('\\n', ' newline ')
|
method_name = method_name.replace('\\', ' [slash] ')
return method_name
@staticmethod
def parse(spock):
state = ParsingContext.INDETERMINATE
unroller = Unroller(Unroller.should_unroll_big)
new_lines = []
parameterized_lines = []
for line in spock:
if '@Unroll' in line:
state = ParsingContext.UNROLLING
continue
if state
|
== ParsingContext.UNROLLING:
if unroller.record(line):
new_tests = unroller.unroll_tests()
state = ParsingContext.INDETERMINATE
if not unroller.was_parameterized:
new_lines.extend(new_tests)
else:
parameterized_lines.extend(new_tests)
unroller = Unroller(Unroller.should_unroll_big)
continue
new_lines.append(line)
new_lines.extend(parameterized_lines)
return new_lines
def get_parameterized_template(self):
class_name = re.search('`(.*)`', self.all_lines[0]).group(1)
coma_separator = ',\n'
parameters = coma_separator.join(["private val " + name + ": " for name in self.names])
data_whitespace = ' ' * 16
values = coma_separator.join([data_whitespace + '{ arrayOf(' + ', '.join(vals) + ') }' for vals in self.values])
pipe_whitespace = ' |\n' + ' ' * 48
names = pipe_whitespace.join([name + ' {' + str(index) + '}' for index, name in enumerate(self.names)])
parameterized_template = '''
@RunWith(Parameterized::class)
class `{}`({}) : Setup() {{
companion object {{
@JvmStatic
@Parameterized.Parameters(name = """{}
""")
fun data() = createData(
{}
)
}}
'''.format(class_name, parameters, names, values)
self.all_lines.insert(0, parameterized_template)
self.all_lines.append("}")
return self.all_lines
|
eicher31/compassion-modules
|
logging_compassion/wizards/subscribe_logs.py
|
Python
|
agpl-3.0
| 789 | 0 |
# -*- coding: utf-8 -*-
################################
|
##############################################
#
# Copyright (C) 2016 Compassion CH (http://www.
|
compassion.ch)
# Releasing children from poverty in Jesus' name
# @author: Emanuel Cino
#
# The licence is in the file __manifest__.py
#
##############################################################################
from odoo import api, models
class SubscribeLogs(models.TransientModel):
_name = 'auditlog.subscriber'
@api.model
def subscribe_rules(self):
xml_data = self.env['ir.model.data'].search([
('module', '=', 'logging_compassion'),
('model', '=', 'auditlog.rule')])
rules = self.env['auditlog.rule'].browse(xml_data.mapped('res_id'))
rules.subscribe()
|
pfnet/chainer
|
tests/chainer_tests/functions_tests/activation_tests/test_prelu.py
|
Python
|
mit
| 1,845 | 0 |
import numpy
import chainer
from chainer import functions
from chainer import testing
@testing.parameterize(*testing.product({
'shape': [(4, 3, 2), (1,), (1, 2, 3, 4, 5, 6)],
'Wdim': [0, 1, 3],
'dtype': [numpy.float16, numpy.float32, numpy.float64],
}))
@testing.fix_random()
@chainer.testing.backend.inject_backend_tests(
None,
[
# NumPy
{},
# CuPy
{'use_cuda': True, 'cuda_device': 0},
{'use_cuda': True, 'cuda_device': 1},
# ChainerX
{'use_chainerx': True, 'chainerx_device': 'native:0'},
{'use_chainerx': True, 'chainerx_device': 'cuda:0'},
{'use_chainerx': True, 'chainerx_device': 'cuda:1'},
])
class TestPReLU(testing.FunctionTestCase):
def setUp(self):
self.check_backw
|
ard_options = {}
if self.dtype == numpy.float16:
self.check_backward_options.update({'atol': 5e-4, 'rtol': 5e-3})
self.check_double_backward_options = {'atol': 5e-4, 'rtol': 5e-3}
if self.dtyp
|
e == numpy.float16:
self.check_double_backward_options.update(
{'atol': 5e-3, 'rtol': 5e-2})
def generate_inputs(self):
x = numpy.random.uniform(-1, 1, self.shape).astype(self.dtype)
x[(-0.05 < x) & (x < 0.05)] = 0.5
W = numpy.random.uniform(
-1, 1, self.shape[1:1 + self.Wdim]).astype(self.dtype)
return x, W
def forward_expected(self, inputs):
x, W = inputs
y_expect = x.copy()
masked = numpy.ma.masked_greater_equal(y_expect, 0, copy=False)
shape = (1,) + W.shape + (1,) * (x.ndim - W.ndim - 1)
masked *= W.reshape(shape)
return y_expect,
def forward(self, inputs, device):
x, W = inputs
y = functions.prelu(x, W)
return y,
testing.run_module(__name__, __file__)
|
django-oscar/django-oscar-paymentexpress
|
tests/facade_tests.py
|
Python
|
bsd-3-clause
| 7,334 | 0.001364 |
from django.test import TestCase
from mock import Mock, patch
from paymentexpress.facade import Facade
from paymentexpress.gateway import AUTH, PURCHASE
from paymentexpress.models import OrderTransaction
from tests import (XmlTestingMixin, CARD_VISA, SAMPLE_SUCCESSFUL_RESPONSE,
SAMPLE_DECLINED_RESPONSE, SAMPLE_ERROR_RESPONSE)
from oscar.apps.payment.utils import Bankcard
from oscar.apps.payment.exceptions import (UnableToTakePayment,
InvalidGatewayRequestError)
class MockedResponseTestCase(TestCase):
def create_mock_response(self, body, status_code=200):
response = Mock()
response.content = body
response.text = body
response.status_code = status_code
return response
class FacadeTests(TestCase, XmlTestingMixin):
def setUp(self):
self.facade = Facade()
def test_zero_amount_raises_exception(self):
card = Bankcard(card_number=CARD_VISA,
expiry_date='1015',
name="Frankie", cvv="123",
start_date="1010")
with self.assertRaises(UnableToTakePayment):
self.facade.authorise('1000', 0, card)
def test_zero_amount_for_complete_raises_exception(self):
with self.assertRaises(UnableToTakePayment):
self.facade.complete('1000', 0, '1234')
def test_zero_amount_for_purchase_raises_exception(self):
with self.assertRaises(UnableToTakePayment):
self.facade.purchase('1000', 0)
def test_purchase_without_billing_id_or_card_raises_exception(self):
with self.assertRaises(ValueError):
self.facade.purchase('1000', 1.23)
def test_zero_amount_for_refund_raises_exception(self):
with self.assertRaises(UnableToTakePayment):
self.facade.refund('1000', 0, '1234')
def test_merchant_reference_format(self):
merchant_ref = self.facade._get_merchant_reference('1000', AUTH)
self.assertRegexpMatches(merchant_ref, r'^\d+_[A-Z]+_\d+_\d{4}$')
class FacadeSuccessfulResponseTests(MockedResponseTestCase):
dps_txn_ref = '000000030884cdc6'
dps_billing_id = '0000080023225598'
def setUp(self):
self.facade = Facade()
self.card = Bankcard(card_number=CARD_VISA,
expiry_date='1015',
name="Frankie", cvv="123",
start_date="1010")
def test_successful_call_returns_valid_dict(self):
with patch('requests.post') as post:
post.return_value = self.create_mock_response(
SAMPLE_SUCCESSFUL_RESPONSE)
auth_dict = self.facade.authorise('1000', 1, self.card)
complete_dict = self.facade.complete('1000', 1.23,
self.dps_txn_ref)
refund_dict = self.facade.refund('1000', 1.23, '000000030884cdc6')
validate_dict = self.facade.validate(self.card)
response_dicts = (auth_dict, complete_dict, refund_dict,
validate_dict)
for response_dict in response_dicts:
self.assertEquals(self.dps_txn_ref,
response_dict['txn_reference'])
self.assertEquals(self.dps_billing_id,
response_dict['partner_reference'])
def test_purchase_with_billing_id_returns_valid_dict(self):
with patch('requests.post') as post:
post.return_value = self.create_mock_response(
SAMPLE_SUCCESSFUL_RESPONSE)
txn_ref = self.facade.purchase('1000', 1.23, 'abc123')
self.assertEquals(self.dps_txn_ref, txn_ref['txn_reference'])
def test_purchase_with_bankcard_returns_valid_dict(self):
with patch('requests.post') as post:
post.return_value = self.create_mock_response(
SAMPLE_SUCCESSFUL_RESPONSE)
txn_ref = self.facade.purchase('1000', 1.23, None, self.card)
self.assertEquals(self.dps_txn_ref, txn_ref['txn_reference'])
def test_successful_call_is_recorded(self):
with patch('requests.post') as post:
post.return_value = self.create_mock_response(
SAMPLE_SUCCESSFUL_RESPONSE)
self.facade.authorise('10001', 10.25, self.card)
txn = OrderTransaction.objects.filter(order_number='10001')[0]
self.assertEquals(AUTH, txn.txn_type)
def test_empty_i
|
ssue_date_is_allowed(se
|
lf):
with patch('requests.post') as post:
post.return_value = self.create_mock_response(
SAMPLE_SUCCESSFUL_RESPONSE)
card = Bankcard(card_number=CARD_VISA,
expiry_date='1015',
name="Frankie", cvv="123")
txn_ref = self.facade.authorise('1000', 1.23, card)
self.assertEquals(self.dps_txn_ref, txn_ref['txn_reference'])
class FacadeDeclinedResponseTests(MockedResponseTestCase):
def setUp(self):
self.facade = Facade()
self.card = Bankcard(card_number=CARD_VISA,
expiry_date='1015',
name="Frankie", cvv="123",
start_date="1010")
def test_declined_call_raises_an_exception(self):
with patch('requests.post') as post:
post.return_value = self.create_mock_response(
SAMPLE_DECLINED_RESPONSE)
with self.assertRaises(UnableToTakePayment):
self.facade.authorise('1000', 1, self.card)
with self.assertRaises(UnableToTakePayment):
self.facade.complete('1000', 1.23, '000000030884cdc6')
with self.assertRaises(UnableToTakePayment):
self.facade.purchase('1000', 1.23, 'abc123')
with self.assertRaises(UnableToTakePayment):
self.facade.purchase('1000', 1.23, None, self.card)
with self.assertRaises(UnableToTakePayment):
self.facade.refund('1000', 1.23, '000000030884cdc6')
with self.assertRaises(UnableToTakePayment):
self.facade.validate(self.card)
def test_declined_call_is_recorded(self):
with patch('requests.post') as post:
post.return_value = self.create_mock_response(
SAMPLE_DECLINED_RESPONSE)
try:
self.facade.purchase('1001', 10.24, None, self.card)
except Exception:
pass
txn = OrderTransaction.objects.filter(order_number='1001')[0]
self.assertIsNotNone(txn)
self.assertEquals(PURCHASE, txn.txn_type)
class FacadeErrorResponseTests(MockedResponseTestCase):
def setUp(self):
self.facade = Facade()
self.card = Bankcard(card_number=CARD_VISA,
expiry_date='1015',
name="Frankie", cvv="123",
start_date="1010")
def test_error_response_raises_invalid_gateway_request_exception(self):
with patch('requests.post') as post:
post.return_value = self.create_mock_response(
SAMPLE_ERROR_RESPONSE)
with self.assertRaises(InvalidGatewayRequestError):
self.facade.purchase('1000', 10.24, None, self.card)
|
rackerlabs/heat-pyrax
|
pyrax/cloudcdn.py
|
Python
|
apache-2.0
| 7,833 | 0.000766 |
# -*- coding: utf-8 -*-
# Copyright (c)2013 Rackspace US, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from functools import wraps
import re
from pyrax.client import BaseClient
import pyrax.exceptions as exc
from pyrax.manager import BaseManager
from pyrax.resource import BaseResource
import pyrax.utils as utils
class CloudCDNFlavor(BaseResource):
pass
class CloudCDNFlavorManager(BaseManager):
def list(self):
resp, resp_body = self.api.method_get("/%s" % self.uri_base)
return [CloudCDNFlavor(self, info)
for info in resp_body[self.plural_response_key]]
def get(self, flavor_id):
resp, resp_body = self.api.method_get(
"/%s/%s" % (self.uri_base, flavor_id))
return CloudCDNFlavor(self, resp_body)
class CloudCDNService(BaseResource):
def patch(sel
|
f, changes):
self.manager.patch(self.id, changes)
def delete(self):
self.manager.delete(self)
def delete_assets(self, url=None, all=False):
self.manager.delete_assets(self.id, url, all)
class CloudCDNServiceManager(BaseManager):
def create(self, name, flavor_id, domains, origins,
restrictions=None, caching=None, log_delivery=False):
body = {"name": name,
"flavor_id": fla
|
vor_id,
"domains": domains,
"origins": origins,
"restrictions": restrictions or [],
"caching": caching or [],
"log_delivery": {"enabled": bool(log_delivery)}}
resp, resp_body = self.api.method_post("/%s" % self.uri_base,
body=body)
body["id"] = resp.headers.get("location").split("/")[-1]
return CloudCDNService(self, body)
def patch(self, service_id, changes):
resp, resp_body = self.api.method_patch(
"/%s/%s" % (self.uri_base, service_id), body=changes)
return None
def delete_assets(self, service_id, url=None, all=False):
uri = "/%s/%s/assets" % (self.uri_base, service_id)
queries = {}
if all:
queries["all"] = "true"
if url is not None:
queries["url"] = url
qs = utils.dict_to_qs(queries)
if qs:
uri = "%s?%s" % (uri, qs)
self.api.method_delete(uri)
return None
def list(self, limit=None, marker=None):
uri = "/%s" % self.uri_base
qs = utils.dict_to_qs(dict(limit=limit, marker=marker))
if qs:
uri = "%s?%s" % (uri, qs)
return self._list(uri)
class CloudCDNClient(BaseClient):
"""
This is the base client for creating and managing Cloud CDN.
"""
def __init__(self, *args, **kwargs):
super(CloudCDNClient, self).__init__(*args, **kwargs)
self.name = "Cloud CDN"
def _configure_manager(self):
"""
Creates the Manager instances to handle monitoring.
"""
self._flavor_manager = CloudCDNFlavorManager(self,
uri_base="flavors", resource_class=CloudCDNFlavor,
response_key=None, plural_response_key="flavors")
self._services_manager = CloudCDNServiceManager(self,
uri_base="services", resource_class=CloudCDNService,
response_key=None, plural_response_key="services")
def ping(self):
"""Ping the server
Returns None if successful, or raises some exception...TODO
"""
self.method_get("/ping")
def list_flavors(self):
"""List CDN flavors."""
return self._flavor_manager.list()
def get_flavor(self, flavor_id):
"""Get one CDN flavor."""
return self._flavor_manager.get(flavor_id)
def list_services(self, limit=None, marker=None):
"""List CDN services."""
return self._services_manager.list(limit=limit, marker=marker)
def get_service(self, service_id):
"""Get one CDN service."""
return self._services_manager.get(service_id)
def create_service(self, name, flavor_id, domains, origins,
restrictions=None, caching=None, log_delivery=False):
"""Create a new CDN service.
Arguments:
name: The name of the service.
flavor_id: The ID of the flavor to use for this service.
domains: A list of dictionaries, each of which has a required
key "domain" and optional key "protocol" (the default
protocol is http).
origins: A list of dictionaries, each of which has a required
key "origin" which is the URL or IP address to pull
origin content from. Optional keys include "port" to
use a port other than the default of 80, and "ssl"
to enable SSL, which is disabled by default.
caching: An optional
"""
return self._services_manager.create(name, flavor_id, domains,
origins, restrictions, caching,
log_delivery)
def patch_service(self, service_id, changes):
"""Update a CDN service with a patch
Arguments:
service_id: The ID of the service to update.
changes: A list of dictionaries containing the following keys:
op, path, and value. The "op" key can be any of the
following actions: add, replace, or remove. Path
is the path to update. A value must be specified for
add or replace ops, but can be omitted for remove.
"""
self._services_manager.patch(service_id, changes)
def delete_service(self, service):
"""Delete a CDN service."""
self._services_manager.delete(service)
def delete_assets(self, service_id, url=None, all=False):
"""Delete CDN assets
Arguments:
service_id: The ID of the service to delete from.
url: The URL at which to delete assets
all: When True, delete all assets associated with the service_id.
You cannot specifiy both url and all.
"""
self._services_manager.delete_assets(service_id, url, all)
#################################################################
# The following methods are defined in the generic client class,
# but don't have meaning in cdn, as there is not a single
# resource that defines this module.
#################################################################
def list(self, limit=None, marker=None):
"""Not applicable in Cloud CDN."""
raise NotImplementedError
def get(self, item):
"""Not applicable in Cloud CDN."""
raise NotImplementedError
def create(self, *args, **kwargs):
"""Not applicable in Cloud CDN."""
raise NotImplementedError
def delete(self, item):
"""Not applicable in Cloud CDN."""
raise NotImplementedError
def find(self, **kwargs):
"""Not applicable in Cloud CDN."""
raise NotImplementedError
def findall(self, **kwargs):
"""Not applicable in Cloud CDN."""
raise NotImplementedError
#################################################################
|
inonit/wagtail
|
wagtail/tests/settings.py
|
Python
|
bsd-3-clause
| 4,667 | 0.000429 |
import os
WAGTAIL_ROOT = os.path.dirname(__file__)
STATIC_ROOT = os.path.join(WAGTAIL_ROOT, 'test-static')
MEDIA_ROOT = os.path.join(WAGTAIL_ROOT, 'test-media')
MEDIA_URL = '/media/'
DATABASES = {
'default': {
'ENGINE': os.environ.get('DATABASE_ENGINE', 'django.db.backends.sqlite3'),
'NAME': os.environ.get('DATABASE_NAME', 'wagtail'),
'USER': os.environ.get('DATABASE_USER', None),
'PASSWORD': os.environ.get('DATABASE_PASS', None),
'HOST': os.environ.get('DATABAS
|
E_HOST', None),
'TEST': {
'NAME': os.environ.get('DATABASE_NAME', None),
}
}
}
SECRET_KEY = 'not needed'
ROOT_URLCONF = 'wagtail.tests.urls'
STATIC_URL = '/static/'
STATIC_ROOT = STATIC_ROOT
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
)
USE_TZ = True
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
|
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
'django.template.context_processors.request',
'wagtail.tests.context_processors.do_not_use_static_url',
'wagtail.contrib.settings.context_processors.settings',
],
},
},
{
'BACKEND': 'django.template.backends.jinja2.Jinja2',
'APP_DIRS': True,
'OPTIONS': {
'extensions': [
'wagtail.wagtailcore.jinja2tags.core',
'wagtail.wagtailadmin.jinja2tags.userbar',
'wagtail.wagtailimages.jinja2tags.images',
],
},
},
]
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'wagtail.wagtailcore.middleware.SiteMiddleware',
'wagtail.wagtailredirects.middleware.RedirectMiddleware',
)
INSTALLED_APPS = (
# Install wagtailredirects with its appconfig
# Theres nothing special about wagtailredirects, we just need to have one
# app which uses AppConfigs to test that hooks load properly
'wagtail.wagtailredirects.apps.WagtailRedirectsAppConfig',
'wagtail.tests.testapp',
'wagtail.tests.demosite',
'wagtail.tests.customuser',
'wagtail.tests.snippets',
'wagtail.tests.routablepage',
'wagtail.tests.search',
'wagtail.contrib.wagtailstyleguide',
'wagtail.contrib.wagtailsitemaps',
'wagtail.contrib.wagtailroutablepage',
'wagtail.contrib.wagtailfrontendcache',
'wagtail.contrib.wagtailapi',
'wagtail.contrib.wagtailsearchpromotions',
'wagtail.contrib.settings',
'wagtail.wagtailforms',
'wagtail.wagtailsearch',
'wagtail.wagtailembeds',
'wagtail.wagtailimages',
'wagtail.wagtailsites',
'wagtail.wagtailusers',
'wagtail.wagtailsnippets',
'wagtail.wagtaildocs',
'wagtail.wagtailadmin',
'wagtail.wagtailcore',
'taggit',
'rest_framework',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
)
# Using DatabaseCache to make sure that the cache is cleared between tests.
# This prevents false-positives in some wagtail core tests where we are
# changing the 'wagtail_root_paths' key which may cause future tests to fail.
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.db.DatabaseCache',
'LOCATION': 'cache',
}
}
PASSWORD_HASHERS = (
'django.contrib.auth.hashers.MD5PasswordHasher', # don't use the intentionally slow default password hasher
)
WAGTAILSEARCH_BACKENDS = {
'default': {
'BACKEND': 'wagtail.wagtailsearch.backends.db',
}
}
AUTH_USER_MODEL = 'customuser.CustomUser'
if 'ELASTICSEARCH_URL' in os.environ:
WAGTAILSEARCH_BACKENDS['elasticsearch'] = {
'BACKEND': 'wagtail.wagtailsearch.backends.elasticsearch',
'URLS': [os.environ['ELASTICSEARCH_URL']],
'TIMEOUT': 10,
'max_retries': 1,
'AUTO_UPDATE': False,
}
WAGTAIL_SITE_NAME = "Test Site"
|
syunkitada/fabkit-repo
|
fabscript/openstack/mongodb.py
|
Python
|
mit
| 163 | 0 |
# coding: utf-8
from fabkit import task
from fablib.mongodb import MongoDB
mongodb = MongoD
|
B()
@task
def setup():
mongodb.setup()
return {'status':
|
1}
|
mcbor/adventofcode
|
2016/day07/day07-pt1.py
|
Python
|
mit
| 1,067 | 0.00656 |
#!/usr/bin/env python3
# Advent of Code 2016 - Day 7, Part One
import sys
import re
from itertools import islice
def window(seq, n=2):
"Returns a sliding window (of width n) over data from the iterable"
" s -> (
|
s0,s1,...s[n-1]), (s1,s2,...,sn), ... "
it = iter(seq)
result = tuple(islice(it, n))
if len(result) == n:
yield result
for elem in it:
result = result[1:] + (elem,)
yield result
def has_abba(string):
for s in window(string, 4):
if s[:2] == s[:1:-1] and s[0] != s[1]:
return True
return False
def main(argv):
if len(argv) < 2:
print("Usage: day07-pt1.py puzzle.txt")
|
return 1
valid = 0
with open(argv[1]) as f:
for line in f:
nets = re.split('[\[\]]', line.strip())
if any(has_abba(s) for s in nets[::2]) \
and not any(has_abba(h) for h in nets[1::2]):
valid += 1
print(valid)
return 0
if __name__ == '__main__':
sys.exit(main(sys.argv))
|
wwfifi/uliweb
|
uliweb/utils/_compat.py
|
Python
|
bsd-2-clause
| 10,959 | 0.00365 |
"""
Compatible with py2 and py3, inspired by jinjin2, future, etc
common types & functions:
name 2.x 3.x
------------ ----------- -----------
unichr unichr chr
unicode unicode str
range xrange range
string_types (str, unicode) (str, )
pickle cPickle pickle
input raw_input input
StingIO, BytesIO
from io import StringIO, BytesIO
"""
import sys
PY3 = sys.version_info[0] == 3
PY2 = sys.version_info[0] == 2
PY26 = sys.version_info[0:2] == (2, 6)
PYPY = hasattr(sys, 'pypy_translation_info')
_identity = lambda x:x
if not PY2:
unichr = chr
range = range
string_types = (str,)
iterkeys = lambda d: iter(d.keys())
itervalues = lambda d: iter(d.values())
iteritems = lambda d: iter(d.items())
import pickle
def reraise(tp, value, tb=None):
if value.__traceback__ is not tb:
raise value.with_traceback(tb)
raise value
implements_iterator = _identity
python_2_unicode_compatible = _identity
ifilter = filter
imap = map
izip = zip
def u(s, encoding='utf8'):
if isinstance(s, str):
return s
else:
return str(s)
def b(s, encoding='utf8'):
if isinstance(s, bytes):
return s
else:
if isinstance(s, str):
return s.encode(encoding)
else:
return bytes(s)
import builtins
exec_ = getattr(builtins, "exec")
get_next = lambda x: x.next
input = input
open = open
else:
unichr = unichr
range = xrange
string_types = (str, unicode)
iterkeys = lambda d: d.iterkeys()
itervalues = lambda d: d.itervalues()
iteritems = lambda d: d.iteritems()
import cPickle as pickle
from io import BytesIO, StringIO
exec('def reraise(tp, value, tb=None):\n raise tp, value, tb')
from itertools import imap, izip, ifilter
def implements_iterator(cls):
cls.next = cls.__next__
del cls.__next__
return cls
def python_2_unicode_compatible(cls):
cls.__unicode__ = cls.__str__
cls.__str__ = lambda x: x.__unicode__().encode('utf-8')
return cls
def u(s, encoding='utf8'):
if isinstance(s, unicode):
return s
else:
return unicode(str(s), encoding)
def b(s, encoding='utf8'):
if isinstance(s, unicode):
return s.decode(encoding)
else:
return str(s)
def exec_(code, globs=None, locs=None):
"""Execute code in a namespace."""
if globs is None:
frame = sys._getframe(1)
globs = frame.f_globals
if locs is None:
locs = frame.f_locals
del frame
elif locs is None:
locs = globs
exec("""exec code in globs, locs""")
get_next = lambda x: x.__next__
input = raw_input
from io import open
try:
next = next
except NameError:
def next(it):
return it.next()
def with_metaclass(meta, *bases):
# This requires a bit of explanation: the basic idea is to make a
# dummy metaclass for one level of class instanciation that replaces
# itself with the actual metaclass. Because of internal type checks
# we also need to make sure that we downgrade the custom metaclass
# for one level to something closer to type (that's why __call__ and
# __init__ comes back from type etc.).
#
# This has the advantage over six.with_metaclass in that it does not
# introduce dummy classes into the final MRO.
class metaclass(meta):
__call__ = type.__call__
__init__ = type.__init__
def __new__(cls, name, this_bases, d):
if this_bases is None:
return type.__new__(cls, name, (), d)
return meta(name, bases, d)
return metaclass('temporary_class', None, {})
modules_mapping = {
'socketserver':'SocketServer',
'queue':'Queue',
'configparser':'ConfigParser',
'html.entities':'htmlentitydefs',
'html.parser':'HTMLParser',
'http.client':'httplib',
'http.server':['BaseHTTPServer', 'CGIHTTPServer', 'SimpleHTTPServer', 'CGIHTTPServer'],
# from BaseHTTPServer
# from CGIHTTPServer
# from SimpleHTTPServer
# from CGIHTTPServer
'http.cookies':'Cookie',
'http.cookiejar':'cookielib',
'urllib.parse':{'urlparse':['ParseResult', 'SplitResult',
'parse_qs', 'parse_qsl',
'urldefrag', 'urljoin',
'urlparse', 'urlsplit',
'urlunparse', 'urlunsplit'],
'urllib':['quote', 'quote_plus',
'unquote', 'unquote_plus',
'urlencode', 'splitquery']},
# from urlparse import (ParseResult, SplitResult, parse_qs, parse_qsl,
# urldefrag, urljoin, urlparse, urlsplit,
# urlunparse, urlunsplit)
# from urllib import (quote,
# quote_plus,
# unquote,
# unquote_plus,
# urlencode,
# splitquery)
'urllib.request':{'urllib':['pathname2url',
'url2pathname',
'getproxies',
'urlretrieve',
'urlcleanup',
'URLopener',
'FancyURLopener',
'proxy_bypass'],
'urllib2':['AbstractBasicAuthHandler',
'AbstractDigestAuthHandler',
'BaseHandler', 'CacheFTPHandler',
'FileHandler', 'FTPHandler',
'HTTPBasicAuthHandler',
'HTTPCookieProcessor',
'HTTPDefaultErrorHandler',
'HTTPDigestAuthHandler',
'HTTPErrorProcessor', 'HTTPHandler',
'HTTPPasswordMgr',
'HTTPPasswordMgrWithDefaultRealm',
'HTTPRedirectHandler', 'HTTPSHandler',
'URLError', 'build_opener',
'install_opener', 'OpenerDirector',
'ProxyBasicAuthHandler',
'ProxyDigestAuthHandler',
'ProxyHandler', 'Request',
'UnknownHandler', 'urlopen'],
'urlparse':['urldefrag','urljoin', 'urlparse',
'urlunparse', 'urlsplit', 'urlunsplit',
'parse_qs', 'parse_q']},
# from urllib import (pathname2url,
# url2pathname,
# getproxies,
# urlretrieve,
# urlcleanup,
# URLopener,
# FancyURLopener,
# proxy_bypass)
# fr
|
om urllib2 import (
# AbstractBasicAuthHandler,
# AbstractDigestAuthHandler,
# BaseHandler,
# CacheFTPHandler,
# FileHandler,
# FTPHa
|
ndler,
# HTTPBasicAuthHandler,
# HTTPCookieProcessor,
# HTTPDefaultErrorHandler,
# HTTPDigestAuthHandler,
# HTTPErrorProcessor,
# HTTPHandler,
# HTTPPasswordMgr,
# HTTPPasswordMgrWithDefaultRealm,
# HTTPRedirectHandler,
# HTTPSHandler,
#
|
PascalSteger/gravimage
|
programs/gi_mc_errors.py
|
Python
|
gpl-2.0
| 2,025 | 0.011852 |
#!/usr/bin/env python3
## Estimates errors using Monte Carlo sampling
# Hamish Silverwood, GRAPPA, UvA, 23 February 2015
import numpy as np
import gl_helper as gh
import pdb
import pickle
import sys
import numpy.random as rand
import matplotlib.pyplot as plt
#TEST this will eventually go outside
def ErSamp_gauss_linear_w_z():
fraction_err = 0.05
datafile = '/home/hsilverw/LoDaM/darcoda/Data_Sets/simplenu/simplenu_sigz_raw_sdz_p05_sdvz_5.dat'
data = np.loadtxt(datafile)
z_data = data[:, 0]
z_sampled = []
for z_val in z_data:
z_sampled.append(rand.normal(loc = z_val, scale= z_val*fraction_err))
return z_sampled
z_data_flat_distro = rand.random(2000000)
def ErSamp_flat_distro_test():
fraction_err = 0.001
z_data = z_data_flat_distro
z_sampled = []
for z_val in z_data:
z_sampled.append(abs(rand.normal(loc = z_val, scale = fraction_err)))
return z_sampled
def mc_nu_error(sampled_z_func, number_mcs, binmin, binmax, bincenter):
# sampled_z_func - returns a vector of z points
nu_vectors=[]
for jter in range(0, number_mcs):
jter_z_data = sampled_z_func()
jter_nu, dummy, dummy, dummy, dummy = gh.nu_sig_from_bins(binmin
|
, binmax, jter_z_data, np.ones(len(jter_z_data)))
nu_vectors.append(jter_nu)
#Calculate standard deviations of nu
nu_vectors = np.array(nu_vectors)
nu_stdevs = []
nu_means = []
nu_media
|
ns = []
for pter in range(0, len(binmin)):
nu_stdevs.append(np.std(nu_vectors[:, pter]))
nu_means.append(np.mean(nu_vectors[:, pter]))
nu_medians.append(np.median(nu_vectors[:, pter]))
#pdb.set_trace()
#fig = plt.figure()
#ax = fig.add_subplot(111)
#no, bins, patches = ax.hist(nu_vectors[:,0], 100)
return np.array(nu_stdevs)
if __name__=="__main__":
binmin, binmax, bincenter = gh.bin_r_linear(0.2, 0.8, 12)
nu_stdevs = mc_nu_error(ErSamp_flat_distro_test, 100, binmin, binmax, bincenter)
pdb.set_trace()
|
dzimine/mistral
|
mistral/api/controllers/v1/execution.py
|
Python
|
apache-2.0
| 4,285 | 0.000233 |
# -*- coding: utf-8 -*-
#
# Copyright 2013 - Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
from pecan import rest
from pecan import abort
from wsme import types as wtypes
import wsmeext.pecan as wsme_pecan
from mistral import exceptions as ex
from mistral.api.controllers.v1 import task
from mistral.openstack.common import log as logging
from mistral.api.controllers import resource
from mistral.db import api as db_api
from mistral.engine import engine
LOG = logging.getLogger(__name__)
class Execution(resource.Resource):
"""Execution resource."""
id = wtypes.text
workbook_name = wtypes.text
task = wtypes.text
state = wtypes.text
# Context is a JSON object but since WSME doesn't support arbitrary
# dictionaries we have to use text type convert to json and back manually.
context = wtypes.text
def to_dict(self):
d = super(Execution, self).to_dict()
if d.get('context'):
d['context'] = json.loads(d['context'])
return d
@classmethod
def from_dict(cls, d):
e = cls()
for key, val in d.items():
if hasattr(e, key):
if key == 'context' and val:
val = json.dumps(val)
setattr(e, key, val)
return e
class Executions(resource.Resource):
"""A collection of Execution resources."""
executions = [Execution]
class ExecutionsController(rest.RestController):
tasks = task.TasksController()
@wsme_pecan.wsexpose(Execution, wtypes.text, wtypes.text)
def get(self, workbook_name, id):
LOG.debug("Fetch execution [workbook_name=%s, id=%s]" %
(workbook_name, id))
values = db_api.execution_get(workbook_name, id)
if not values:
abort(404)
else:
return Execution.from_dict(values)
@wsme_pecan.wsexpose(Execution, wtypes.text, wtypes.text, body=Execution)
def put(self, workbook_name, id, execution):
LOG.debug("Update execution [workbook_name=%s, id=%s, execution=%s]" %
(workbook_name, id, execution))
values = db_api.execution_update(workbook_name,
id,
execution.to_dict())
return Execution.from_dict(values)
@wsme_pecan.wsexpose(Execution, wtypes.text, body=Execution,
status_code=201)
def post(self, workbook_name, execution):
LOG.debug("Create execution [workbook_name=%s, execution=%s]" %
(workbook_name, execution))
try:
context = None
if execution.context:
context = json.loads(execution.context)
values = engine.start_workflow_execution(execution.workbook_name,
execution.task,
context)
except ex.MistralException as e:
#TODO(n
|
makhotkin) we should use thing such a decorator here
abort(400, e.message)
return Execution.from_dict(values)
@wsme_pecan.wse
|
xpose(None, wtypes.text, wtypes.text, status_code=204)
def delete(self, workbook_name, id):
LOG.debug("Delete execution [workbook_name=%s, id=%s]" %
(workbook_name, id))
db_api.execution_delete(workbook_name, id)
@wsme_pecan.wsexpose(Executions, wtypes.text)
def get_all(self, workbook_name):
LOG.debug("Fetch executions [workbook_name=%s]" % workbook_name)
executions = [Execution.from_dict(values)
for values in db_api.executions_get(workbook_name)]
return Executions(executions=executions)
|
zephyrplugins/zephyr
|
zephyr.plugin.jython/jython2.5.2rc3/Lib/encodings/cp855.py
|
Python
|
epl-1.0
| 34,106 | 0.019615 |
""" Python Character Mapping Codec generated from 'VENDORS/MICSFT/PC/CP855.TXT' with gencodec.py.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_map)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_table)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input,self.error
|
s,encoding_map)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
|
return codecs.charmap_decode(input,self.errors,decoding_table)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='cp855',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
### Decoding Map
decoding_map = codecs.make_identity_dict(range(256))
decoding_map.update({
0x0080: 0x0452, # CYRILLIC SMALL LETTER DJE
0x0081: 0x0402, # CYRILLIC CAPITAL LETTER DJE
0x0082: 0x0453, # CYRILLIC SMALL LETTER GJE
0x0083: 0x0403, # CYRILLIC CAPITAL LETTER GJE
0x0084: 0x0451, # CYRILLIC SMALL LETTER IO
0x0085: 0x0401, # CYRILLIC CAPITAL LETTER IO
0x0086: 0x0454, # CYRILLIC SMALL LETTER UKRAINIAN IE
0x0087: 0x0404, # CYRILLIC CAPITAL LETTER UKRAINIAN IE
0x0088: 0x0455, # CYRILLIC SMALL LETTER DZE
0x0089: 0x0405, # CYRILLIC CAPITAL LETTER DZE
0x008a: 0x0456, # CYRILLIC SMALL LETTER BYELORUSSIAN-UKRAINIAN I
0x008b: 0x0406, # CYRILLIC CAPITAL LETTER BYELORUSSIAN-UKRAINIAN I
0x008c: 0x0457, # CYRILLIC SMALL LETTER YI
0x008d: 0x0407, # CYRILLIC CAPITAL LETTER YI
0x008e: 0x0458, # CYRILLIC SMALL LETTER JE
0x008f: 0x0408, # CYRILLIC CAPITAL LETTER JE
0x0090: 0x0459, # CYRILLIC SMALL LETTER LJE
0x0091: 0x0409, # CYRILLIC CAPITAL LETTER LJE
0x0092: 0x045a, # CYRILLIC SMALL LETTER NJE
0x0093: 0x040a, # CYRILLIC CAPITAL LETTER NJE
0x0094: 0x045b, # CYRILLIC SMALL LETTER TSHE
0x0095: 0x040b, # CYRILLIC CAPITAL LETTER TSHE
0x0096: 0x045c, # CYRILLIC SMALL LETTER KJE
0x0097: 0x040c, # CYRILLIC CAPITAL LETTER KJE
0x0098: 0x045e, # CYRILLIC SMALL LETTER SHORT U
0x0099: 0x040e, # CYRILLIC CAPITAL LETTER SHORT U
0x009a: 0x045f, # CYRILLIC SMALL LETTER DZHE
0x009b: 0x040f, # CYRILLIC CAPITAL LETTER DZHE
0x009c: 0x044e, # CYRILLIC SMALL LETTER YU
0x009d: 0x042e, # CYRILLIC CAPITAL LETTER YU
0x009e: 0x044a, # CYRILLIC SMALL LETTER HARD SIGN
0x009f: 0x042a, # CYRILLIC CAPITAL LETTER HARD SIGN
0x00a0: 0x0430, # CYRILLIC SMALL LETTER A
0x00a1: 0x0410, # CYRILLIC CAPITAL LETTER A
0x00a2: 0x0431, # CYRILLIC SMALL LETTER BE
0x00a3: 0x0411, # CYRILLIC CAPITAL LETTER BE
0x00a4: 0x0446, # CYRILLIC SMALL LETTER TSE
0x00a5: 0x0426, # CYRILLIC CAPITAL LETTER TSE
0x00a6: 0x0434, # CYRILLIC SMALL LETTER DE
0x00a7: 0x0414, # CYRILLIC CAPITAL LETTER DE
0x00a8: 0x0435, # CYRILLIC SMALL LETTER IE
0x00a9: 0x0415, # CYRILLIC CAPITAL LETTER IE
0x00aa: 0x0444, # CYRILLIC SMALL LETTER EF
0x00ab: 0x0424, # CYRILLIC CAPITAL LETTER EF
0x00ac: 0x0433, # CYRILLIC SMALL LETTER GHE
0x00ad: 0x0413, # CYRILLIC CAPITAL LETTER GHE
0x00ae: 0x00ab, # LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
0x00af: 0x00bb, # RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
0x00b0: 0x2591, # LIGHT SHADE
0x00b1: 0x2592, # MEDIUM SHADE
0x00b2: 0x2593, # DARK SHADE
0x00b3: 0x2502, # BOX DRAWINGS LIGHT VERTICAL
0x00b4: 0x2524, # BOX DRAWINGS LIGHT VERTICAL AND LEFT
0x00b5: 0x0445, # CYRILLIC SMALL LETTER HA
0x00b6: 0x0425, # CYRILLIC CAPITAL LETTER HA
0x00b7: 0x0438, # CYRILLIC SMALL LETTER I
0x00b8: 0x0418, # CYRILLIC CAPITAL LETTER I
0x00b9: 0x2563, # BOX DRAWINGS DOUBLE VERTICAL AND LEFT
0x00ba: 0x2551, # BOX DRAWINGS DOUBLE VERTICAL
0x00bb: 0x2557, # BOX DRAWINGS DOUBLE DOWN AND LEFT
0x00bc: 0x255d, # BOX DRAWINGS DOUBLE UP AND LEFT
0x00bd: 0x0439, # CYRILLIC SMALL LETTER SHORT I
0x00be: 0x0419, # CYRILLIC CAPITAL LETTER SHORT I
0x00bf: 0x2510, # BOX DRAWINGS LIGHT DOWN AND LEFT
0x00c0: 0x2514, # BOX DRAWINGS LIGHT UP AND RIGHT
0x00c1: 0x2534, # BOX DRAWINGS LIGHT UP AND HORIZONTAL
0x00c2: 0x252c, # BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
0x00c3: 0x251c, # BOX DRAWINGS LIGHT VERTICAL AND RIGHT
0x00c4: 0x2500, # BOX DRAWINGS LIGHT HORIZONTAL
0x00c5: 0x253c, # BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
0x00c6: 0x043a, # CYRILLIC SMALL LETTER KA
0x00c7: 0x041a, # CYRILLIC CAPITAL LETTER KA
0x00c8: 0x255a, # BOX DRAWINGS DOUBLE UP AND RIGHT
0x00c9: 0x2554, # BOX DRAWINGS DOUBLE DOWN AND RIGHT
0x00ca: 0x2569, # BOX DRAWINGS DOUBLE UP AND HORIZONTAL
0x00cb: 0x2566, # BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
0x00cc: 0x2560, # BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
0x00cd: 0x2550, # BOX DRAWINGS DOUBLE HORIZONTAL
0x00ce: 0x256c, # BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
0x00cf: 0x00a4, # CURRENCY SIGN
0x00d0: 0x043b, # CYRILLIC SMALL LETTER EL
0x00d1: 0x041b, # CYRILLIC CAPITAL LETTER EL
0x00d2: 0x043c, # CYRILLIC SMALL LETTER EM
0x00d3: 0x041c, # CYRILLIC CAPITAL LETTER EM
0x00d4: 0x043d, # CYRILLIC SMALL LETTER EN
0x00d5: 0x041d, # CYRILLIC CAPITAL LETTER EN
0x00d6: 0x043e, # CYRILLIC SMALL LETTER O
0x00d7: 0x041e, # CYRILLIC CAPITAL LETTER O
0x00d8: 0x043f, # CYRILLIC SMALL LETTER PE
0x00d9: 0x2518, # BOX DRAWINGS LIGHT UP AND LEFT
0x00da: 0x250c, # BOX DRAWINGS LIGHT DOWN AND RIGHT
0x00db: 0x2588, # FULL BLOCK
0x00dc: 0x2584, # LOWER HALF BLOCK
0x00dd: 0x041f, # CYRILLIC CAPITAL LETTER PE
0x00de: 0x044f, # CYRILLIC SMALL LETTER YA
0x00df: 0x2580, # UPPER HALF BLOCK
0x00e0: 0x042f, # CYRILLIC CAPITAL LETTER YA
0x00e1: 0x0440, # CYRILLIC SMALL LETTER ER
0x00e2: 0x0420, # CYRILLIC CAPITAL LETTER ER
0x00e3: 0x0441, # CYRILLIC SMALL LETTER ES
0x00e4: 0x0421, # CYRILLIC CAPITAL LETTER ES
0x00e5: 0x0442, # CYRILLIC SMALL LETTER TE
0x00e6: 0x0422, # CYRILLIC CAPITAL LETTER TE
0x00e7: 0x0443, # CYRILLIC SMALL LETTER U
0x00e8: 0x0423, # CYRILLIC CAPITAL LETTER U
0x00e9: 0x0436, # CYRILLIC SMALL LETTER ZHE
0x00ea: 0x0416, # CYRILLIC CAPITAL LETTER ZHE
0x00eb: 0x0432, # CYRILLIC SMALL LETTER VE
0x00ec: 0x0412, # CYRILLIC CAPITAL LETTER VE
0x00ed: 0x044c, # CYRILLIC SMALL LETTER SOFT SIGN
0x00ee: 0x042c, # CYRILLIC CAPITAL LETTER SOFT SIGN
0x00ef: 0x2116, # NUMERO SIGN
0x00f0: 0x00ad, # SOFT HYPHEN
0x00f1: 0x044b, # CYRILLIC SMALL LETTER YERU
0x00f2: 0x042b, # CYRILLIC CAPITAL LETTER YERU
0x00f3: 0x0437, # CYRILLIC SMALL LETTER ZE
0x00f4: 0x0417, # CYRILLIC CAPITAL LETTER ZE
0x00f5: 0x0448, # CYRILLIC SMALL LETTER SHA
0x00f6: 0x0428, # CYRILLIC CAPITAL LETTER SHA
0x00f7: 0x044d, # CYRILLIC SMALL LETTER E
0x00f8: 0x042d, # CYRILLIC CAPITAL LETTER E
0x00f9: 0x0449, # CYRILLIC SMALL LETTER SHCHA
0x00fa: 0x0429, # CYRILLIC CAPITAL LETTER SHCHA
0x00fb: 0x0447, # CYRILLIC SMALL LETTER CHE
0x00fc: 0x0427, # CYRILLIC CAPITAL LETTER CHE
0x00fd: 0x00a7, # SECTIO
|
samiunn/incubator-tinkerpop
|
gremlin-python/src/main/jython/tests/structure/test_graph.py
|
Python
|
apache-2.0
| 4,417 | 0.001132 |
'''
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on an
"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
KIND, either express or implied. See the License for the
specific language governing permissions and limitations
under the License.
'''
__author__ = 'Marko A. Rodriguez (http://markorodriguez.com)'
import unittest
from unittest import TestCase
import six
from gremlin_python.statics import long
from gremlin_python.structure.graph import Edge
from gremlin_python.structure.graph import Property
from gremlin_python.structure.graph import Vertex
from gremlin_python.structure.graph import VertexProperty
from gremlin_python.structure.graph import Path
class TestGraph(TestCase):
def test_graph_objects(self):
vertex = Vertex(1)
assert "v[1]" == str(vertex)
assert "vertex" == vertex.label
assert "person" == Vertex(1, "person").label
assert vertex == Vertex(1)
#
edge = Edge(2, Vertex(1), "said", Vertex("hello", "phrase"))
assert "e[2][1-said->hello]" == str(edge)
assert Vertex(1) == edge.outV
assert Vertex("hello") == edge.inV
assert "said" == edge.label
assert "phrase" == edge.inV.label
assert edge.inV != edge.outV
#
vertex_property = VertexProperty(long(24), "name", "marko")
assert "vp[name->marko]" == str(vertex_property)
assert "name" == vertex_property.label
assert "name" == vertex_property.key
assert "marko" == vertex_property.value
assert long(24) == vertex_property.id
assert isinstance(vertex_property.id, long)
assert vertex_property == VertexProperty(long(24), "name", "marko")
#
property = Property("age", 29)
assert "p[age->29]" == str(property)
assert "age" == property.key
assert 29 == property.value
assert isinstance(property.value, int)
assert property == Property("age", 29)
if not six.PY3:
assert property != Property("age", long(29))
#
for i in [vertex, edge, vertex_property, property]:
for j in [vertex, edge, vertex_property, property]:
if type(i) != type(j):
assert i != j
else:
assert i == j
assert i.__hash__() == hash(i)
def test_path(self):
path = Path([set(["a", "b"]), set(["c", "b"]), set([])], [1, Vertex(1), "hello"])
assert "[1, v[1], 'hello']" == str(path)
assert 1 == path["a"]
assert Vertex(1) == path["c"]
assert [1, Vertex(1)] == path["b"]
assert path[0] == 1
assert path[1] == Vertex(1)
assert path[2] == "hello"
assert 3 == len(path)
assert "hello" in path
assert "goodbye" not in path
assert Vertex(1) in path
assert Vertex(123) not in path
#
try:
temp = path[3]
raise Exception("Accessing beyond the list index should throw an index error")
except IndexError:
pass
#
try:
temp = path["zz"]
raise Exception("Accessing nothing should throw a key error")
except KeyError:
pass
#
try:
temp = path[1:2]
raise Excepti
|
on("Accessing using slices should throw a type error")
except TypeError:
pass
#
assert path == path
assert hash(path) == hash(path)
path2 = Path([set(["a", "b"]), set(["c", "b"]), set([])], [1, Vertex(1), "hello"])
assert path == path2
assert hash(path) == hash(path2)
assert path != Path([set(["a"]), set(["c", "b"]), set([])], [1, Vertex(1), "hello"])
assert path != Path([set(["a", "b"]), set(["c", "b"]),
|
set([])], [3, Vertex(1), "hello"])
if __name__ == '__main__':
unittest.main()
|
Azure/azure-sdk-for-python
|
sdk/network/azure-mgmt-network/azure/mgmt/network/v2020_05_01/operations/_route_filters_operations.py
|
Python
|
mit
| 27,222 | 0.004629 |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.core.polling import LROPoller, NoPolling, PollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.arm_polling import ARMPolling
from .. import models as _models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar, Union
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class RouteFiltersOperations(object):
"""RouteFiltersOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2020_05_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def _delete_initial(
self,
resource_group_name, # type: str
route_filter_name, # type: str
**kwargs # type: Any
):
# type: (...) -> None
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-05-01"
accept = "application/json"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'routeFilterName': self._serialize.url("route_filter_name", route_filter_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeFilters/{routeFilterName}'} # type: ignore
def begin_delete(
self,
resource_group_name, # type: str
route_filter_name, # type: str
**kwargs # type: Any
):
# type: (...) -> LROPoller[None]
"""Deletes the specified route filter.
:param resource_group_name: The name of the resource group.
|
:type resource_group_name: str
:param route_filter_name: The name of the route filter.
:type route_filter_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling.
Pass in False for this operation to not p
|
oll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._delete_initial(
resource_group_name=resource_group_name,
route_filter_name=route_filter_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'routeFilterName': self._serialize.url("route_filter_name", route_filter_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeFilters/{routeFilterName}'} # type: ignore
def get(
self,
resource_group_name, # type: str
route_filter_name, # type: str
expand=None, # type: Optional[str]
**kwargs # type: Any
):
# type: (...) -> "_models.RouteFilter"
"""Gets the specified route filter.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param route_filter_name: The name of the route filter.
:type route_filter_name: str
:param expand: Expands referenced express route bgp peering resources.
:type expand: str
:keyword callable cls: A custom
|
jwren/intellij-community
|
python/testData/refactoring/inlineFunction/removingTypeComment/main.py
|
Python
|
apache-2.0
| 81 | 0.037037 |
def foo():
# type: () -> int
p
|
rint(42)
return 42
re
|
s = fo<caret>o()
|
nachtmaar/androlyze
|
androlyze/celery/CeleryConstants.py
|
Python
|
mit
| 1,276 | 0.003135 |
# encoding: utf-8
__author__ = "Nils Tobias Schmidt"
__email__ = "schmidt89 at informatik.uni-marburg.de"
'''
Holds some constants/settings related to celery.
'''
from androlyze.settings import *
############################################################
#---Max retry wait time
############################################################
# maximum wait time for database open
CELERY_DATABASE_OPEN_RETRY_MAX_TIME = 32
# maximum wait time for import script error
CELERY_IMPORT_SCRIPTS_ERROR_RETRY_MAX_TIME = 0
# maximum wait time for database storage
C
|
ELERY_DATABASE_STORE_RETRY_MAX_TIME = 120
############################################################
#---Result backend constants
# used to get information from callback handlers
############################################################
CELERY_RESULT_BACKEND_KEY_RESULT = "result"
CELERY_RESULT_BACKEND_KEY_TRACEBACK = "traceback"
CELERY_RESULT_BACKEND_KEY_STATUS = "status"
|
############################################################
#---Other
############################################################
# value for retrying until success
CELERY_RETRY_INFINITE = None
def get_analyze_task_name():
from androlyze.analyze.distributed.tasks.AnalyzeTask import AnalyzeTask
return AnalyzeTask.name
|
manuelbua/gitver
|
gitver/commands.py
|
Python
|
apache-2.0
| 15,445 | 0.000129 |
#!/usr/bin/env python2
# coding=utf-8
"""
Defines gitver commands
"""
import re
import os
import sys
from string import Template
from termcolors import term, bold
from git import get_repo_info
from gitver.storage import KVStore
from sanity import check_gitignore
from defines import CFGDIR, PRJ_ROOT, CFGDIRNAME
from version import gitver_version, gitver_buildid
# file where to store NEXT strings <=> TAG user-defined mappings
NEXT_STORE_FILE = os.path.join(CFGDIR, ".next_store")
TPLDIR = os.path.join(CFGDIR, 'templates')
user_version_matcher = r"v{0,1}(?P<maj>\d+)\.(?P<min>\d+)\.(?P<patch>\d+)" \
r"(?:\.(?P<revision>\d+))?$"
#
# helpers
#
def template_path(name):
"""
Constructs and returns the absolute path for the specified template file
name.
"""
return os.path.join(TPLDIR, name)
def parse_templates(cfg, templates, repo, next_custom, preview):
"""
Parse one or more templates, substitute placeholder variables with
real values and write the result to the file specified in the template.
If preview is True, then the output will be written to the stdout while
informative messages will be output to the stderr.
"""
for t in templates.split(' '):
tpath = template_path(t)
if os.path.exists(tpath):
with open(tpath, 'r') as fp:
lines = fp.readlines()
if len(lines) < 2:
term.err("The template \"" + t + "\" is not valid, aborting.")
return
if not lines[0].startswith('#'):
term.err("The template \"" + t + "\" doesn't define any valid "
"output, aborting.")
return
output = str(lines[0]).strip(' #\n')
# resolve relative paths to the project's root
if not os.path.isabs(output):
output = os.path.join(PRJ_ROOT, output)
outdir = os.path.dirname(output)
if not os.path.exists(outdir):
term.err("The template output directory \"" + outdir +
"\" doesn't exists.")
term.info("Processing template \"" + bold(t) + "\" for " + output +
"...")
lines = lines[1:]
xformed = Template("".join(lines))
vstring = build_version_string(cfg, repo, False, next_custom)
args = build_format_args(cfg, repo, next_custom)
keywords = {
'CURRENT_VERSION': vstring,
'MAJOR': args['maj'],
'MINOR': args['min'],
'PATCH': args['patch'],
'REV': args['rev'],
'REV_PREFIX': args['rev_prefix'],
'BUILD_ID': args['build_id'],
'FULL_BUILD_ID': args['build_id_full'],
'COMMIT_COUNT': args['commit_count'],
'COMMIT_COUNT_STR':
str(args['commit_count']) if args['commit_count'] > 0 else '',
'COMMIT_COUNT_PREFIX': args['commit_count_prefix'],
'META_PR': args['meta_pr'],
'META_PR_PREFIX': args['meta_pr_prefix']
}
try:
res = xformed.substitute(keywords)
except KeyError as e:
term.err("Unknown key \"" + e.message + "\" found, aborting.")
sys.exit(1)
if not preview:
try:
fp = open(output, 'w')
fp.write(res)
fp.close()
except IOError:
term.err("Couldn't write file \"" + output + "\"")
sys.exit(1)
else:
term.out(res)
wrote_bytes = len(res) if preview else os.stat(output).st_size
term.info("Done, " + str(wrote_bytes) + " bytes written.")
else:
term.err("Couldn't find the \"" + t + "\" template")
sys.exit(1)
def parse_user_next_stable(user):
"""
Parse the specified user-defined string containing the next stable version
numbers and returns the discretized matches in a dictionary.
"""
try:
data = re.match(user_version_matcher, user).groupdict()
if len(data) < 3:
raise AttributeError
except AttributeError:
return False
return data
def build_format_args(cfg, repo_info, next_custom=None):
|
"""
Builds the formatting arguments by processing the specified repository
information and returns them.
If a tag defines pre-release metadata, this will have the precedence
over any existing user-defined string.
"""
in_next = repo_info['count'] > 0
has_next_custom = next_custom is not None and len(next_custom) > 0
vmaj = repo_info
|
['maj']
vmin = repo_info['min']
vpatch = repo_info['patch']
vrev = repo_info['rev']
vcount = repo_info['count']
vpr = repo_info['pr']
vbuildid = repo_info['build-id']
has_pr = vpr is not None
has_rev = vrev is not None
# pre-release metadata in a tag has precedence over user-specified
# NEXT strings
if in_next and has_next_custom and not has_pr:
u = parse_user_next_stable(next_custom)
if not u:
term.err("Invalid custom NEXT version numbers detected!")
sys.exit(1)
vmaj = u['maj']
vmin = u['min']
vpatch = u['patch']
vrev = u['revision']
has_rev = vrev is not None
meta_pr = vpr if has_pr else \
cfg['default_meta_pr_in_next'] if in_next and has_next_custom else \
cfg['default_meta_pr_in_next_no_next'] if in_next else ''
args = {
'maj': vmaj,
'min': vmin,
'patch': vpatch,
'rev': vrev if has_rev else '',
'rev_prefix': '.' if has_rev else '',
'meta_pr': meta_pr,
'meta_pr_prefix': cfg['meta_pr_prefix'] if len(meta_pr) > 0 else '',
'commit_count': vcount if vcount > 0 else '',
'commit_count_prefix': cfg['commit_count_prefix'] if vcount > 0 else '',
'build_id': vbuildid,
'build_id_full': repo_info['full-build-id']
}
return args
def build_version_string(cfg, repo, promote=False, next_custom=None):
"""
Builds the final version string by processing the specified repository
information, optionally handling version promotion.
Version promotion will just return the user-specified next version string,
if any is present, else an empty string will be returned.
"""
in_next = repo['count'] > 0
has_next_custom = next_custom is not None and len(next_custom) > 0
if promote:
if has_next_custom:
# simulates next real version after proper tagging
version = next_custom
return version
else:
return ''
fmt = cfg['format'] if not in_next else cfg['format_next']
return fmt % build_format_args(cfg, repo, next_custom)
#
# commands
#
def cmd_version(cfg, args):
"""
Generates gitver's version string and license information and prints it
to the stdout.
"""
v = ('v' + gitver_version) if gitver_version is not None else 'n/a'
b = gitver_buildid if gitver_buildid is not None else 'n/a'
term.out("This is gitver " + bold(v))
term.out("Full build ID is " + bold(b))
from gitver import __license__
term.out(__license__)
def cmd_init(cfg, args):
"""
Initializes the current repository by creating the gitver's configuration
directory and creating the default configuration file, if none is present.
Multiple executions of this command will regenerate the default
configuration file whenever it's not found.
"""
from config import create_default_configuration_file
i = 0
if not os.path.exists(CFGDIR):
i += 1
os.makedirs(CFGDIR)
if not os.path.exists(TPLDIR):
i += 1
os.makedirs(TPLDIR)
# try create the default configuration file
wrote_cfg = create_default_configuration_file()
if wrote_cfg:
term.out("gitver has been initialized and configured.")
else:
|
NiceCircuits/pcbLibraryManager
|
src/pcbLibraryManager/symbols/symbolsIC.py
|
Python
|
cc0-1.0
| 3,334 | 0.013197 |
# -*- coding: utf-8 -*-
"""
Created on Sun Aug 2 19:02:52 2015
@author: piotr at nicecircuits.com
"""
from libraryManager.symbol import symbol
from libraryManager.symbolPrimitive import *
from libraryManager.defaults import defaults
from libraryManager.common import *
class symbolIC(symbol):
"""
IC symbol generator
Generate symbol with pinsLeft, pinsRight pins on each side
pinsLeft, pinsRight: list of ["pin name", "pin number", type]
width: width of symbol rectangle
"""
def __init__(self, name, pinsLeft, pinsRight, width, refDes=defaults.icRefDes,\
showPinNames=True, showPinNumbers=True):
super().__init__(name, refDes, showPinNames, showPinNumbers)
# body
height = (max(len(pinsLeft), len(pinsRight))+1)*100
offset = 50 if (height % 200) > 0 else 0
if width % 200 >0:
width = (width//200+1) * 200
self.log.debug("IC symbol body: pins: %d, %d; height: %d, offset: %d" %\
(len(pinsLeft), len(pinsRight), height, offset))
self.primitives.append(symbolRectangle(0, position=[0,offset],\
dimensions=[width, height], filled=fillType.background))
# pins
pinLength = 200
pins = [pinsLeft, pinsRight]
for x in range(2):
y = hei
|
ght/2-100+offset
for p in pins[x]:
if p:
|
if isinstance(p[2],str):
p[2]=pinType.fromStr[p[2]]
self.pins.append(symbolPin(str(p[0]), str(p[1]), [(width/2+pinLength)*(1 if x>0 else -1),y],\
pinLength, p[2], rotation=180 if x>0 else 0))
y = y-100
self.nameObject.position=[0, height/2 + self.nameObject.height + offset]
self.valueObject.position=[0, -(height/2 + self.valueObject.height) + offset]
class symbolICquad(symbol):
"""
Quad IC symbol generator
Generate symbol with pins[] on each side
pins: list of 0..4 lists of ["pin name", "pin number", type]
top, right, bottom, left, clockwise
size: size of symbol rectangle (auto if 0)
"""
def __init__(self, name, pins, size=0, refDes=defaults.icRefDes,\
showPinNames=True, showPinNumbers=True):
super().__init__(name, refDes, showPinNames, showPinNumbers)
# body
if size==0:
size = (len(pins[0])+5)*100
offset = 50 if (size % 200) > 0 else 0
self.log.debug("IC quad symbol body: pins: %d; size: %d, offset: %d" %\
(sum([len(p) for p in pins]), size, offset))
self.primitives.append(symbolRectangle(0, position=[offset,offset],\
dimensions=[size, size], filled=fillType.background))
# pins
pinLength = 200
for side in range(4):
pos = [-len(pins[0])*50+50, size/2+pinLength]
rot = -side*90
for p in pins[side]:
if p:
self.pins.append(symbolPin(p[0], p[1],
translatePoints(rotatePoints([pos],rot),[offset,offset])[0],\
pinLength, p[2], rotation=(rot-90)%360))
pos[0] = pos[0]+100
# self.nameObject.position=[0, height/2 + self.nameObject.height + offset]
# self.valueObject.position=[0, -(height/2 + self.valueObject.height) + offset]
|
phihes/sds-models
|
sdsModels/models.py
|
Python
|
mit
| 12,892 | 0.000853 |
import sklearn.cross_validation as cv
import sklearn.dummy as dummy
from sklearn.mixture import GMM
from sklearn.hmm import GMMHMM
from sklearn import linear_model, naive_bayes
import collections
import itertools
import pandas as pd
from testResults import TestResults
from counters import *
import utils as utils
class Model():
__metaclass__ = ABCMeta
params = {}
isSklearn = True
def __init__(self, params, verbose=False):
self.params = params
self.verbose = verbose
def printv(self, arg, title=None):
if self.verbose:
if title is not None:
print title
print arg
@property
def name(self):
return self._name
@abstractmethod
def _train(self, data):
"""Returns a trained model."""
pass
def _test(self, model, testData, resultObj):
"""Compares predictions made by specified model against test data.
Returns a TestResults object.
"""
# restrict test data to principal component features
features = self.params['features']
test = np.array(testData[features])
# predict a dialog sequence using test data
# sklearn counts from 0 so add 1...
if self.isSklearn:
pred = [int(r) + 1 for r in list(model.predict(test))]
else:
pred = [int(r) for r in list(model.predict(test))]
# extract true ratings from test data
true = [int(rating) for rating in testData['rating'].values.tolist()]
resultObj.compare(true, pred)
return resultObj
def loocv(self, data):
"""Leave-one-out cross validation using given data.
Returns a TestResults objects, where results are averages from the
cross validation steps.
"""
mask = cv.LeaveOneLabelOut(data['label'].values)
results = TestResults(self.name, verbose=self.verbose)
for trainMask, testMask in mask:
# training
trainingData = data.loc[trainMask]
self.printv(trainingData, "training data:")
model = self._train(trainingData)
# testing
testData = data.loc[testMask]
self.printv(testData, "test data:")
# leave p labels out
for label, testGroup in testData.groupby("label"):
results = self._test(model, testGroup, results)
return results
def kfoldscv(self, data, folds):
"""K-folds cross validation using given data and number of folds.
Returns a TestResults objects, where results are averages from the
cross validation steps.
"""
results = TestResults(self.name, verbose=self.verbose)
labels = list(np.unique(data['label'].values))
for tr, te in cv.KFold(len(labels), n_folds=folds):
trainD = data[data['label'].isin([labels[i] for i in tr])]
testD = data[data['label'].isin([labels[i] for i in te])]
self.printv(trainD, "training data:")
self.printv(testD, "test data:")
model = self._train(trainD)
for label, testGroup in testD.groupby("label"):
results = self._test(model, testGroup, results)
return results
def setFeatures(self, features):
self.params['features'] = features
class Dummy(Model):
_name = "dummy"
def _train(self, data):
if 'constant' in self.params.keys():
model = dummy.DummyClassifier(strategy=self.params['strategy'],
constant=self.params['constant'])
else:
model = dummy.DummyClassifier(strategy=self.params['strategy'])
d = np.array(zip(*[data[f].values for f in self.params['features']]))
y = np.array(data['rating'].values)
model.fit(d, y)
return model
class Gmm(Model):
"""A Gaussian mixture model.
Parameters are number of mixture components (num_mixc) and
covariance type (cov_type). Example:
model = Gmm(params = {num_mixc: 3,
cov_type:'diag'})
"""
_name = "GMM"
def _train(self, data):
"""Trains a Gaussian mixture model, using the sklearn implementation."""
# parameters
features = self.params['features']
num_mixc = self.params['num_mixc']
cov_type = self.params['cov_type']
# prepare data shape
d = np.array(zip(*[data[f].values for f in features]))
# choose high number of EM-iterations to get constant results
gmm = GMM(num_mixc, cov_type, n_iter=300)
gmm.fit(d)
return gmm
class Gmmhmm(Model):
"""A hidden Markov model with Gaussian mixture emissions.
Parameters are number of mixture components (num_mixc), covariance type
(cov_type) and states (states). One Gaussian mixture model is created for
each state. Example:
model = Gmmhmm(params = {'num_mixc': 3,
'cov_type': 'diag',
'states': [1,2,3,4,5]})
"""
_name = "GMM-HMM"
def _train(self, data):
"""Trains a GMMHMM model, using the sklearn implementation and maximum-
likelihood estimates as HMM parameters (Hmm.mle(...)).
"""
# parameters
features = self.params['features']
num_mixc = self.params['num_mixc']
cov_type = self.params['cov_type']
states = self.params['states']
# train one GMM for each state
mixes = list()
for state in states:
# select data
|
with current state label
d = data[data.rating == state]
# prepare data shape
d = np.array(zip(*[d[f].values for f in features]))
# init GMM
gmm = GMM(num_mixc, cov_type)
# train
gmm.fit(d)
mixes.append(gmm)
# train HMM with init, trans, GMMs=mixes
mle = Hmm.mle(MatrixCounterNoEmissions, data, states)
model = GMMHMM(n_components=len(states), init_params='', gmms=mixes)
|
model.transmat_ = mle.transition
model.startprob_ = mle.initial
return model
class Ols(Model):
""" Ordinary least squares regression """
_name = "OLS"
isSklearn = True
def _train(self, data):
features = self.params['features']
X = np.array(zip(*[data[f].values for f in features]))
y = np.array(data['rating'])
model = linear_model.LinearRegression()
model.fit(X, y)
return model
class LogisticRegression(Model):
""" Logistic Regression """
_name = "Logit"
isSklearn = True
def _train(self, data):
features = self.params['features']
X = np.array(zip(*[data[f].values for f in features]))
y = np.array(data['rating'])
model = linear_model.LogisticRegression(class_weight=self.params['class_weight'])
model.fit(X, y)
return model
class GaussianNaiveBayes(Model):
""" Gaussian Naive Bayes... """
_name = "G-NB"
isSklearn = True
def _train(self, data):
features = self.params['features']
X = np.array(zip(*[data[f].values for f in features]))
y = np.array(data['rating'])
model = naive_bayes.GaussianNB()
model.fit(X, y)
return model
class MultinomialNaiveBayes(Model):
""" Multinomial Naive Bayes... """
_name = "M-NB"
isSklearn = True
def _train(self, data):
features = self.params['features']
X = np.array(zip(*[data[f].values for f in features]))
y = np.array(data['rating'])
model = naive_bayes.MultinomialNB(alpha=self.params['alpha'],
fit_prior=self.params['fit_prior'])
model.fit(X, y)
return model
class Hmm(Model):
"""A hidden Markov model, using the Nltk implementation and maximum-
likelihood parameter estimates.
"""
_name = "HMM"
isSklearn = False
Parameters = collections.namedtuple(
'Parameters', 'initial transition emission emissionAlph')
class NltkWrapper():
def __init__(self, states
|
Polychart/builder
|
server/polychart/wsgi.py
|
Python
|
agpl-3.0
| 1,058 | 0 |
"""
WSGI config for polychart project.
This module contains the WSGI application used by Django's development server
and any production WSGI deployments. It should expose a module-level variable
named ``application``. Django's ``runserver`` and ``runfcgi`` commands discover
this application via the ``WSGI_APPLICATION`` setting.
Usually you will have the standard Django WSGI application here, but it also
might make sense to replace the whole Django WSGI application with a custom one
that later delegates to the Django one. For example, you could introduce WSGI
middleware here, or combine a Django application with an application of another
framework.
"""
# This application object is used by any WSGI server configured to use this
# file. This includes Django's development server, if the WSGI_APPLICATION
# setting points here.
from django.core.wsgi import get_ws
|
gi_application
application = get_wsgi_application()
# Apply WSGI middleware here.
# from hellowo
|
rld.wsgi import HelloWorldApplication
# application = HelloWorldApplication(application)
|
mobiledayadmin/DenyHosts
|
scripts/restricted_from_invalid.py
|
Python
|
gpl-2.0
| 884 | 0.013575 |
#!/
|
bin/env python
import os, sys
def usage():
print "%s WORK_DIR [num_results]" % sys.argv[0]
sys.exit(1)
try:
work_dir = sys.argv[1]
except:
print "you must specify your DenyHosts WORK_DIR"
usage()
try:
num = int(sys.argv[2])
except:
num = 10
fname = os.path.join(work_dir, "users-invalid")
try:
fp = open(fname, "r")
except:
print fname, "does not exist"
sys.exit(1)
d = {}
for line in fp:
try:
foo = line.split(":")
|
username = foo[0]
attempts = int(foo[1])
# timestamp = foo[2].strip()
except:
continue
l = d.get(attempts, [])
l.append(username)
d[attempts] = l
fp.close()
keys = d.keys()
keys.sort()
keys.reverse()
i = 0
for key in keys:
l = d.get(key)
for username in l:
i += 1
print username
if i >= num: break
if i >= num: break
|
saisankargochhayat/algo_quest
|
leetcode/106. Construct Binary Tree from Inorder and Postorder Traversal/soln.py
|
Python
|
apache-2.0
| 978 | 0.00818 |
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, val=0, left=None, right=None):
# self.val = val
# self.left = left
# self.right = right
# We use the inorder to find which elements are left and right of the curr element.
# And the post order to start with the first elemeent and then construct right and left trees.
class Solution:
def buildTree(self, inorder: List[int], postorde
|
r: List[int]) -> TreeNode:
def helper(inorderL, inorderR):
# base case
if inorderL >= inorderR:
return None
nonlocal postorder
curr = postorder.pop()
root = TreeNode(curr)
currPos = inorderMap[curr]
root.right = helper(currPos+1, inorderR)
root.left = helper(inorderL, currPos)
return root
inorderMap = {v:k for k, v in enu
|
merate(inorder)}
return helper(0, len(inorder))
|
moneta-project/moneta-2.0.1.0
|
contrib/devtools/update-translations.py
|
Python
|
mit
| 6,780 | 0.00649 |
#!/usr/bin/python
# Copyright (c) 2014 Wladimir J. van der Laan
# Distributed under the MIT/X11 software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
'''
Run this script from the root of the repository to update all translations from
transifex.
It will do the following automatically:
- fetch all translations using the tx tool
- post-process them into valid and committable format
- remove invalid control characters
- remove location tags (makes diffs less noisy)
TODO:
- auto-add new translations to the build system according to the translation process
'''
from __future__ import division, print_function
import subprocess
import re
import sys
import os
import io
import xml.etree.ElementTree as ET
# Name of transifex tool
TX = 'tx'
# Name of source language file
SOURCE_LANG = 'moneta_en.ts'
# Directory with locale files
LOCALE_DIR = 'src/qt/locale'
def check_at_repository_root():
if not os.path.exists('.git'):
print('No .git directory found')
print('Execute this script at the root of the repository', file=sys.stderr)
exit(1)
def fetch_all_translations():
if subprocess.call([TX, 'pull', '-f']):
print('Error while fetching translations', file=sys.stderr)
exit(1)
def find_format_specifiers(s):
'''Find all format specifiers in a string.'''
pos = 0
specifiers = []
while True:
percent = s.find('%', pos)
if percent < 0:
break
specifiers.append(s[percent+1])
pos = percent+2
return specifiers
def split_format_specifiers(specifiers):
'''Split format specifiers between numeric (Qt) and others (strprintf)'''
numeric = []
other = []
for s in specifiers:
if s in {'1','2','3','4','5','6','7','8','9'}:
numeric.append(s)
else:
other.append(s)
# numeric (Qt) can be present in any order, others (strprintf) must be in specified order
return set(numeric),other
def sanitize_string(s):
'''Sanitize string for printing'''
return s.replace('\n',' ')
def check_form
|
at_specifiers(source, translation, errors):
source_f = split_format_specifiers(find_format_spec
|
ifiers(source))
# assert that no source messages contain both Qt and strprintf format specifiers
# if this fails, go change the source as this is hacky and confusing!
assert(not(source_f[0] and source_f[1]))
try:
translation_f = split_format_specifiers(find_format_specifiers(translation))
except IndexError:
errors.append("Parse error in translation '%s'" % sanitize_string(translation))
return False
else:
if source_f != translation_f:
errors.append("Mismatch between '%s' and '%s'" % (sanitize_string(source), sanitize_string(translation)))
return False
return True
def all_ts_files(suffix=''):
for filename in os.listdir(LOCALE_DIR):
# process only language files, and do not process source language
if not filename.endswith('.ts'+suffix) or filename == SOURCE_LANG+suffix:
continue
if suffix: # remove provided suffix
filename = filename[0:-len(suffix)]
filepath = os.path.join(LOCALE_DIR, filename)
yield(filename, filepath)
FIX_RE = re.compile(b'[\x00-\x09\x0b\x0c\x0e-\x1f]')
def remove_invalid_characters(s):
'''Remove invalid characters from translation string'''
return FIX_RE.sub(b'', s)
# Override cdata escape function to make our output match Qt's (optional, just for cleaner diffs for
# comparison, disable by default)
_orig_escape_cdata = None
def escape_cdata(text):
text = _orig_escape_cdata(text)
text = text.replace("'", ''')
text = text.replace('"', '"')
return text
def postprocess_translations(reduce_diff_hacks=False):
print('Checking and postprocessing...')
if reduce_diff_hacks:
global _orig_escape_cdata
_orig_escape_cdata = ET._escape_cdata
ET._escape_cdata = escape_cdata
for (filename,filepath) in all_ts_files():
os.rename(filepath, filepath+'.orig')
have_errors = False
for (filename,filepath) in all_ts_files('.orig'):
# pre-fixups to cope with transifex output
parser = ET.XMLParser(encoding='utf-8') # need to override encoding because 'utf8' is not understood only 'utf-8'
with open(filepath + '.orig', 'rb') as f:
data = f.read()
# remove control characters; this must be done over the entire file otherwise the XML parser will fail
data = remove_invalid_characters(data)
tree = ET.parse(io.BytesIO(data), parser=parser)
# iterate over all messages in file
root = tree.getroot()
for context in root.findall('context'):
for message in context.findall('message'):
numerus = message.get('numerus') == 'yes'
source = message.find('source').text
translation_node = message.find('translation')
# pick all numerusforms
if numerus:
translations = [i.text for i in translation_node.findall('numerusform')]
else:
translations = [translation_node.text]
for translation in translations:
if translation is None:
continue
errors = []
valid = check_format_specifiers(source, translation, errors)
for error in errors:
print('%s: %s' % (filename, error))
if not valid: # set type to unfinished and clear string if invalid
translation_node.clear()
translation_node.set('type', 'unfinished')
have_errors = True
# Remove location tags
for location in message.findall('location'):
message.remove(location)
# Remove entire message if it is an unfinished translation
if translation_node.get('type') == 'unfinished':
context.remove(message)
# write fixed-up tree
# if diff reduction requested, replace some XML to 'sanitize' to qt formatting
if reduce_diff_hacks:
out = io.BytesIO()
tree.write(out, encoding='utf-8')
out = out.getvalue()
out = out.replace(b' />', b'/>')
with open(filepath, 'wb') as f:
f.write(out)
else:
tree.write(filepath, encoding='utf-8')
return have_errors
if __name__ == '__main__':
check_at_repository_root()
fetch_all_translations()
postprocess_translations()
|
Ahmad31/Web_Flask_Cassandra
|
flask/lib/python2.7/site-packages/pony/orm/tests/testutils.py
|
Python
|
apache-2.0
| 4,752 | 0.008207 |
from __future__ import absolute_import, print_function, division
from pony.py23compat import basestring
from functools import wraps
from contextlib import contextmanager
from pony.orm.core import Database
from pony.utils import import_module
def raises_exception(exc_class, msg=None):
def decorator(func):
def wrapper(self, *args, **kwargs):
try:
func(self, *args, **kwargs)
self.fail("expected exception %s wasn't raised" % exc_class.__name__)
except exc_class as e:
if not e.args: self.assertEqual(msg, None)
elif msg is not None:
self.assertEqual(e.args[0], msg, "incorrect exception message. expected '%s', got '%s'" % (msg, e.args[0]))
wrapper.__name__ = func.__name__
return wrapper
return decorator
@contextmanager
def raises_if(test, cond, exc_class, exc_msg=None):
try:
yield
except exc_class as e:
test.assertTrue(cond)
if exc_msg is None: pass
elif exc_msg.startswith('...') and exc_msg != '...':
if exc_msg.endswith('...'):
test.assertIn(exc_msg[3:-3], str(e))
else:
test.assertTrue(str(e).endswith(exc_msg[3:]))
elif exc_msg.endswith('...'):
test.assertTrue(str(e).startswith(exc_msg[:-3]))
else:
test.assertEqual(str(e), exc_msg)
else:
test.assertFalse(cond)
def flatten(x):
result = []
for el in x:
if hasattr(el, "__iter__") and not isinstance(el, basestring):
result.extend(flatten(el))
else:
result.append(el)
return result
class TestConnection(object):
def __init__(con, database):
con.database = database
if database and database.provider_name == 'postgres':
con.autocommit = True
def commit(con):
pass
def rollback(con):
pass
def cursor(con):
return test_cursor
class TestCursor(object):
def __init__(cursor):
cursor.description = []
cursor.rowcount = 0
def execute(cursor, sql, args=None):
pass
def fetchone(cursor):
return None
def fetchmany(cursor, size):
return []
def fetchall(cursor):
return []
test_cursor = TestCursor()
class TestPool(object):
def __init__(pool, database):
pool.database = database
def connect(pool):
return TestConnection(pool.database)
def release(pool, con):
pass
def drop(pool, con):
pass
def disconnect(pool):
pass
class TestDatabase(Database):
real_provider_name = None
raw_server_version = None
sql = None
def bind(self, provider_name, *args, **kwargs):
if self.real_provider_name is not None:
provider_name = self.real_provider_name
self.provider_name = provider_name
provider_module = import_module('pony.orm.dbproviders.' + provider_name)
provider_cls = provider_module.provider_cls
raw_server_version = self.raw_server_version
if raw_server_version is None:
if provider_name == 'sqlite': raw_server_version = '3.7.17'
elif provider_name in ('postgres', 'pygresql'): raw_server_version = '9.2'
elif provider_name == 'oracle': raw_server_version = '11.2.0.2.0'
elif provider_name == 'mysql': raw_server_version = '5.6.11'
else: assert False, provider_name # pragma: no cover
t = [ int(component) for component in raw_server_version.split('.') ]
if len(t) == 2: t.append(0)
server_version = tuple(t)
if provider_name in ('postgres', 'pygresql'):
server_version = int('%d%02d%02d' % server_version)
class TestProvider(provider_cls):
def inspect_connection(provider, connection):
pass
|
TestProvider.server_version = server_version
kwargs['pony_check_connection'] = False
kwargs['pony_pool_mockup'] = TestPool(self)
Database.bind(self, TestProvider, *args, **kwargs)
def _execute(database, sql, globals, locals, frame_depth):
assert False # pragma: no cover
def _exec_sql(database, sql, arguments=None, returning_id=False):
|
assert type(arguments) is not list and not returning_id
database.sql = sql
database.arguments = arguments
return test_cursor
def generate_mapping(database, filename=None, check_tables=True, create_tables=False):
return Database.generate_mapping(database, filename, create_tables=False)
|
christi3k/zulip
|
zerver/tornado/descriptors.py
|
Python
|
apache-2.0
| 821 | 0.00609 |
from __future__ import absolute_import
from __future__ import print_function
from typing import Any, Dict, Optional
if False:
import zerver.tornado.event_queue
descriptors_by_handler_id
|
= {} # type: Dict[int, zerver.tornado.event_queue.ClientDescriptor]
def get_descriptor_by_handler_id(handler_id):
# type: (int) -> zerver.tornado.event_queue
|
.ClientDescriptor
return descriptors_by_handler_id.get(handler_id)
def set_descriptor_by_handler_id(handler_id, client_descriptor):
# type: (int, zerver.tornado.event_queue.ClientDescriptor) -> None
descriptors_by_handler_id[handler_id] = client_descriptor
def clear_descriptor_by_handler_id(handler_id, client_descriptor):
# type: (int, Optional[zerver.tornado.event_queue.ClientDescriptor]) -> None
del descriptors_by_handler_id[handler_id]
|
SINGROUP/pycp2k
|
pycp2k/classes/_basis4.py
|
Python
|
lgpl-3.0
| 328 | 0.006098 |
from pycp2k.inputsection import InputSection
class _basis4(InputSec
|
tion):
def __init__(self):
|
InputSection.__init__(self)
self.Default_keyword = []
self._name = "BASIS"
self._repeated_default_keywords = {'Default_keyword': 'DEFAULT_KEYWORD'}
self._attributes = ['Default_keyword']
|
WoLpH/python-progressbar
|
tests/test_stream.py
|
Python
|
bsd-3-clause
| 2,391 | 0 |
import io
import sys
import pytest
import progressbar
def test_nowrap():
# Make sure we definitely unwrap
for i in range(5):
progressbar.streams.unwrap(stderr=True, stdout=True)
stdout = sys.stdout
stderr = sys.stderr
progressbar.streams.wrap()
assert stdout == sys.stdout
assert stderr == sys.stderr
progressbar.streams.unwrap()
assert stdout == sys.stdout
assert stderr == sys.stderr
# Make sure we definitely unwrap
for i in range(5):
progressbar.streams.unwrap(stderr=True, stdout=True)
def test_wrap():
# Make sure we definitely unwrap
for i in range(5):
progressbar.streams.unwrap(stderr=True, stdout=True)
stdout = sys.stdout
stderr = sys.stderr
progressbar.streams.wrap(stderr=True, stdout=True)
assert stdout != sys.stdout
assert stderr != sys.stderr
# Wrap again
stdout = sys.stdout
stderr = sys.stderr
progressbar.streams.wrap(stderr=True, stdout=True)
assert stdout == sys.stdout
assert stderr == sys.stderr
# Make sure we definitely unwrap
for i in range(5):
progressbar.streams.unwrap(stderr=True, stdout=True)
def test_excepthook():
progressbar.streams.wrap(stderr=True, stdout=True)
try:
raise RuntimeError()
except RuntimeError:
progressbar.streams.excepthook(*sys.exc_info())
progressbar.streams.unwrap_excepthook()
progressbar.streams.unwrap_excepthook()
def test_fd_as_io_stream():
stream = io.StringIO()
with progressbar.ProgressBar(fd=stream) as pb:
for i in range(101):
pb.update(i)
stream.close()
def test_no_newlines():
kwargs = dict(
redirect_stderr=True,
redirect_stdout=True,
line_breaks=False,
is_terminal=True,
)
with progressbar.ProgressBar(**kwargs) as bar:
for i in range(5):
bar.update(i)
for i in range(5, 10):
try:
print('\n\n', file=progressbar.streams.stdout)
print('\n\n', file=progressbar.streams.stderr)
except ValueError:
pass
bar.update(i)
@pytest.mark.parametrize('stream', [sys.__stdout__, sys._
|
_stderr__])
def test_fd_as_standard_streams(strea
|
m):
with progressbar.ProgressBar(fd=stream) as pb:
for i in range(101):
pb.update(i)
|
jenhantao/preRNA-seq_OligoDesigner
|
makeAllOligos.py
|
Python
|
bsd-2-clause
| 2,270 | 0.031718 |
'''
Designs oligos for a pre RNA-seq selection method
'''
### imports ###
import sys
import os
import numpy as np
def readFastaFile(fastaFilePath):
'''
Given a path to a multiline fasta file, reads the file, returning two lists - one containing the sequences, the other containing the headers
inputs: path to a fasta file
outputs: a list of the sequences, a list of the sequence headers
'''
sequences = []
headers = []
with open(fastaFilePath) as f:
data = f.readlines()
sequence = ""
for line in data:
if ">" in line:
header = line.replace(">", "").strip()
headers.append(header)
if not sequence == "":
sequences.append(sequence.upper())
sequence = ""
else:
sequence += line.strip()
sequences.append(sequence.upper())
return sequences, headers
def makeOligos(targetSequences, targetLength, outputPath):
'''
Gives all non-unique k-mers of target length that appear in target sequences
inputs: a list of sequences, length of k-mers, path to write output files
outputs: writes the designed oligos to a Fasta file
'''
seenOligos = set()
for i in range(len(targetSequences)):
currentSeq = targetSequences[i]
for j in range(len(targetSequences[i]) - targetLength):
oligo = currentSeq[ j : j + targetLength ]
seenOligos.add(oligo)
# write fasta files
oligos = list(seenOligos)
for i in range(len(oligos)):
outFile = open(outputPath + "/" + oligos[i] + ".fa", "w")
for j in range(1):
outFile.write(">" + str(j)
|
+ "\n")
outFile.write(oligos[i] + "\n")
outFile.close()
if __name__ == "__main__":
targetDirectoryPath = sys.argv[1] # path to a directory containing fasta files giving the sequences we want the oligos to hybridize to
targetLength = int(sys.argv[2]) # desired length of oligos
outputPath = sys.argv[3] # path to write output files
# intialize lists
allTargetSequences = [
|
]
allTargetHeaders = []
# read in sequences
print("reading target files")
for targetFile in os.listdir(targetDirectoryPath):
print(targetFile)
targetSequences, targetHeaders = readFastaFile(targetDirectoryPath + "/" + targetFile)
allTargetSequences += targetSequences
allTargetHeaders += targetHeaders
print("writing oligo fasta files")
makeOligos(targetSequences, targetLength, outputPath)
|
saltstack/salt
|
tests/pytests/unit/engines/test_engines.py
|
Python
|
apache-2.0
| 595 | 0.003361 |
import pytest
|
import salt.engines
from tests.support.mock import MagicMock, patch
d
|
ef test_engine_module_name():
engine = salt.engines.Engine({}, "foobar.start", {}, {}, {}, {}, name="foobar")
assert engine.name == "foobar"
def test_engine_title_set():
engine = salt.engines.Engine({}, "foobar.start", {}, {}, {}, {}, name="foobar")
with patch("salt.utils.process.appendproctitle", MagicMock()) as mm:
with pytest.raises(KeyError):
# The method does not exist so a KeyError will be raised.
engine.run()
mm.assert_called_with("foobar")
|
jcu-eresearch/climas-ng
|
webapp/climasng/data/datafinder.py
|
Python
|
apache-2.0
| 2,719 | 0.004046 |
import os
import re
import json
def createSpeciesJson(source_data_path):
# create the species.json file using data from the specified path
# traverse directories looking for dirs named "1km". If it's
# path matches this pattern:
# .../<taxon-name>/models/<species-name>/1km
# then record that as a species / taxon record.
# here's a regex to test for species dirs:
one_km_regex = re.compile(r'/(\w+)/species/(\w+)/1km$')
# we'll get the species common name from here:
common_names = {}
cn_file = os.pat
|
h.join(os.path.dirname(__file__), 'all_species.json')
try:
# try reading in the list of sci-to-common species names
with open(cn_file) as f:
commo
|
n_names = json.load(f)
except:
# give up on common names if we can't read them
common_names = {}
#
# okay, ready to check for modelled species
#
species_list = {}
for dir, subdirs, files in os.walk(source_data_path):
match = one_km_regex.search(dir)
if match:
taxon = match.group(1)
sci_name = match.group(2).replace('_', ' ')
species_list[sci_name] = {
"commonNames": common_names.get(sci_name, [""]),
"group": taxon
}
# if we found a species dir, we don't need to keep
# os.walk()ing into its descendent dirs
subdirs[:] = []
# now save our species list
json_path = os.path.join(os.path.dirname(__file__), 'species.json')
with open(json_path, 'w') as json_file:
json.dump(species_list, json_file, sort_keys = True, indent = 4)
def createBiodiversityJson(source_data_path):
# create the biodiversity.json file using data from the specified path
# traverse directories looking for "deciles" dirs.
# If a dir's path matches this pattern:
# .../<taxon-name>/biodiversity/deciles
# then record that as a taxon / biodiversity record.
# here's a regex to test for biodiv dirs:
biodiv_regex = re.compile(r'/(\w+)/biodiversity/deciles$')
biodiv_list = {}
for dir, subdirs, files in os.walk(source_data_path):
match = biodiv_regex.search(dir)
if match:
taxon = match.group(1)
biodiv_list[taxon] = {
"group": taxon
}
# if we found a biodiv dir, we don't need to keep
# os.walk()ing into its descendent dirs
subdirs[:] = []
# now save our biodiv list
json_path = os.path.join(os.path.dirname(__file__), 'biodiversity.json')
with open(json_path, 'w') as json_file:
json.dump(biodiv_list, json_file, sort_keys = True, indent = 4)
|
MDAnalysis/mdanalysis
|
testsuite/MDAnalysisTests/coordinates/test_dms.py
|
Python
|
gpl-2.0
| 3,032 | 0.00033 |
# -*- Mode: python; tab-width: 4; indent-tabs-mode:nil; coding:utf-8 -*-
# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4 fileencoding=utf-8
#
# MDAnalysis --- https://www.mdanalysis.org
# Copyright (c) 2006-2017 The MDAnalysis Development Team and contributors
# (see the file AUTHORS for the full list of names)
#
# Released under the GNU Public Licence, v2 or any higher version
#
# Please cite your use of MDAnalysis in published work:
#
# R. J. Gowers, M. Linke, J. Barnoud, T. J. E. Reddy, M. N. Melo, S. L. Seyler,
# D. L. Dotson, J. Domanski, S. Buchoux, I. M. Kenney, and O. Beckstein.
# MDAnalysis: A Python package for the rapid analysis of molecular dynamics
# simulations. In S. Benthall and S. Rostrup editors, Proceedings of the 15th
# Python in Science Conference, pages 102-109,
|
Austin, TX, 2016. SciPy.
# doi: 10.25080/majora-629e541a-00e
#
# N. Michaud-Agrawal, E. J. Denning, T. B. Woolf, and O. Beckstein.
# MDAnalysis: A Toolkit for the Analysis of Molecular Dynamics Simulations.
# J. Com
|
put. Chem. 32 (2011), 2319--2327, doi:10.1002/jcc.21787
#
import MDAnalysis as mda
import numpy as np
import pytest
from numpy.testing import assert_equal
from MDAnalysis.lib.mdamath import triclinic_vectors
from MDAnalysisTests.datafiles import (DMS)
class TestDMSReader(object):
@pytest.fixture()
def universe(self):
return mda.Universe(DMS)
@pytest.fixture()
def ts(self, universe):
return universe.trajectory.ts
def test_global_cell(self, ts):
assert ts.dimensions is None
def test_velocities(self, ts):
assert_equal(hasattr(ts, "_velocities"), False)
def test_number_of_coords(self, universe):
# Desired value taken from VMD
# Info) Atoms: 3341
assert_equal(len(universe.atoms), 3341)
def test_coords_atom_0(self, universe):
# Desired coordinates taken directly from the SQLite file. Check unit
# conversion
coords_0 = np.array([-11.0530004501343,
26.6800003051758,
12.7419996261597, ],
dtype=np.float32)
assert_equal(universe.atoms[0].position, coords_0)
def test_n_frames(self, universe):
assert_equal(universe.trajectory.n_frames, 1,
"wrong number of frames in pdb")
def test_time(self, universe):
assert_equal(universe.trajectory.time, 0.0,
"wrong time of the frame")
def test_frame(self, universe):
assert_equal(universe.trajectory.frame, 0, "wrong frame number "
"(0-based, should be 0 for single frame readers)")
def test_frame_index_0(self, universe):
universe.trajectory[0]
assert_equal(universe.trajectory.ts.frame, 0,
"frame number for frame index 0 should be 0")
def test_frame_index_1_raises_IndexError(self, universe):
with pytest.raises(IndexError):
universe.trajectory[1]
|
parrisha/raspi-visualizer
|
samples/WavGenerator.py
|
Python
|
mit
| 1,233 | 0.024331 |
#############
# ECE 612 Spring 2017
# Joe Parrish
#
# Use the same logic from SpectrumTester.py to generate multiple sine waves
# but write that output to a .wav file for file based testing of the project code
#############
import wave
import argparse
import numpy as np
def generate_sample_file(test_freqs, test_amps, chunk=4096, samplerate=44100):
filename = 'Sample'
x = np.arange(chunk)
y = np.zeros(chunk)
for test_freq,test_amp in zip(test_freqs,test_amps):
filename += '_' + str(test_freq) + 'Hz@' + str(test_amp)
y = np.add(y, np.sin(2 * np.pi * test_freq * x / samplerate) * test_amp)
filename += '.wav'
y = y.astype('i2')
wave_writer = wave.open(filename, mode='wb')
wave_writer.setnchannels(1)
wave_writer.setsampwidth(2)
wave_writer.setframerate(samplerate)
for
|
x in range(0,8):
wave_writer.writeframes(y)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Write a wave file containing Numpy generated sine waves')
parser.add_argument('--freqs', nargs='+', type=int)
parser.add_argument('--amps', nargs='+', type=int)
|
args = parser.parse_args()
generate_sample_file(args.freqs, args.amps)
|
Atothendrew/XcodeBuildIncrementer
|
XcodeBuildIncrementer/__init__.py
|
Python
|
mit
| 60 | 0 |
__auth
|
or__ = 'Andrew Williamson <axwilliamson@godad
|
dy.com>'
|
a25kk/dpf
|
src/dpf.sitetheme/dpf/sitetheme/__init__.py
|
Python
|
mit
| 217 | 0 |
# -*- coding: utf-8 -*-
"""Init and utils."""
from zope.i18nmessageid import MessageFactory
_ = MessageFactory('dpf.sitethem
|
e')
def initializ
|
e(context):
"""Initializer called when used as a Zope 2 product."""
|
joaormatos/anaconda
|
mmfparser/data/chunkloaders/extdata.py
|
Python
|
gpl-3.0
| 1,201 | 0.004163 |
# Copyright (c) Mathias Kaerlev 2012.
# This file is part of Anaconda.
# Anaconda is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# Anaconda is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with Anaconda. If not, see <http://ww
|
w.gnu.org/licenses/>.
from mmfparser.bytereader import ByteReader
from mmfparser.loader import DataLoader
class ExtData(DataLoader):
name = None
data = None
def read(self, reader):
if self.settings.get('old', False):
self.filename = reader.readString()
self.data = reader.read()
else:
reader.checkDefault(reader.readInt(), 0)
reader.checkDefault(reader.read(), '')
def write(self, reader):
reader.writeInt(0)
__all__ = ['ExtData']
|
GeoMatDigital/django-geomat
|
geomat/users/models.py
|
Python
|
bsd-3-clause
| 493 | 0 |
# -*- coding: utf-8 -*-
from django.contrib.auth.models import AbstractUser
from django.db import models
from django.utils.encoding import python_2_unicode_co
|
mpatible
from django.utils.translation import ugettext_lazy as _
@python_2_unicode_compatible
class User(AbstractUser):
# First Name and Last Name do not cover name patterns
# around the globe.
name = models.Char
|
Field(_("Name of User"), blank=True, max_length=255)
def __str__(self):
return self.username
|
Spasley/python
|
generator/record.py
|
Python
|
apache-2.0
| 1,285 | 0.005447 |
__author__ = 'Volodya'
from model.recordfields import RecordFields
import random
import string
import os.path
import jsonpickle
import getopt
import sys
try:
opts, args = getopt.getopt(sys.argv[1:], "n:f:", ['number of records', 'file'])
except getopt.GetoptError as err:
getopt.usage()
sys.exit(2)
n = 5
f = 'data/records.json'
for o, a in opts:
if o == '-n':
n = int(a)
elif o == '-f':
f = a
def random_string(prefix, maxlen):
symbols = string.ascii_letters + string.digits + string.punctuation + " " * 10
return prefix + "".join([random.choice(symbols) for i in range(random.randrange(maxlen))])
testdata = [RecordFields(firstname='', lastname='', middlename='',
nickname='', company='', address='')] + [
RecordFields(firstname=random_string('firstname', 10), lastname=random_string(
|
'lastname', 10), middlename=random_string('middlename', 10),
nickname=random_string('nickname', 10), company=random_string('company', 10), address=random_string('middlename', 10))
for i in range(n)
]
file = os.path.join(os.path.dirname(os.path.abspath(__file
|
__)), "..", f)
with open(file, 'w') as out:
jsonpickle.set_encoder_options('json', indent=2)
out.write(jsonpickle.encode(testdata))
|
luisen14/treatment-tracking-project
|
treatment_tracker/researchers/apps.py
|
Python
|
apache-2.0
| 97 | 0 |
from django.apps impo
|
rt AppConfig
class ResearchersConfig(AppConfig):
name = 'researcher
|
s'
|
encukou/freeipa
|
ipatests/test_ipaserver/test_install/test_installutils.py
|
Python
|
gpl-3.0
| 7,877 | 0.000127 |
#
# Copyright (C) 2017 FreeIPA Contributors. See COPYING for license
#
from __future__ import absolute_import
import binascii
import os
import psutil
import re
import subprocess
import textwrap
import pytest
from unittest.mock import patch, mock_open
from ipaplatform.paths import paths
from ipapython import ipautil
from ipapython.admintool import ScriptError
from ipaserver.install import installutils
from ipaserver.install import ipa_backup
from ipaserver.install import ipa_restore
GPG_GENKEY = textwrap.dedent("""
%echo Generating a standard key
Key-Type: RSA
Key-Length: 2048
Name-Real: IPA Backup
Name-Comment: IPA Backup
Name-Email: [email protected]
Expire-Date: 0
Passphrase: {passphrase}
%commit
%echo done
""")
@pytest.fixture
def gpgkey(request, tempdir):
passphrase = "Secret123"
gnupghome = os.path.join(tempdir, "gnupg")
os.makedirs(gnupghome, 0o700)
# provide clean env for gpg test
env = os.environ.copy()
orig_gnupghome = env.get('GNUPGHOME')
env['GNUPGHOME'] = gnupghome
env['LC_ALL'] = 'C.UTF-8'
env['LANGUAGE'] = 'C'
devnull = open(os.devnull, 'w')
# allow passing passphrases to agent
with open(os.path.join(gnupghome, "gpg-agent.conf"), 'w') as f:
f.write("verbose\n")
f.write("allow-preset-passphrase\n")
# daemonize agent (detach from the console and run in the background)
subprocess.Popen(
[paths.GPG_AGENT, '--batch', '--daemon'],
env=env, stdout=devnull, stderr=devnull
)
def fin():
if orig_gnupghome is not None:
os.environ['GNUPGHOME'] = orig_gnupghome
else:
os.environ.pop('GNUPGHOME', None)
subprocess.run(
[paths.GPG_CONF, '--kill', 'all'],
check=True,
env=env,
)
request.addfinalizer(fin)
# create public / private key pair
keygen = os.path.join(gnupghome, 'keygen')
with open(keygen, 'w') as f:
f.write(GPG_GENKEY.format(passphrase=passphrase))
subprocess.check_call(
[paths.GPG2, '--batch', '--gen-key', keygen],
env=env, stdout=devnull, stderr=devnull
)
# get keygrip of private key
out = subprocess.check_output(
[paths.GPG2, "--list-secret-keys", "--with-keygrip"],
env=env, stderr=subprocess.STDOUT
)
mo = re.search("Keygrip = ([A-Z0-9]{32,})", out.decode('utf-8'))
if mo is None:
raise ValueError(out.decode('utf-8'))
keygrip = mo.group(1)
# unlock private key
cmd = "PRESET_PASSPHRASE {} -1 {}".format(
keygrip,
binascii.hexlify(passphrase.encode('utf-8')).decode('utf-8')
)
subprocess.check_call(
[paths.GPG_CONNECT_AGENT, cmd, "/bye"],
env=env, stdout=devnull, stderr=devnull
)
# set env for the rest of the progress
os.environ['GNUPGHOME'] = gnupghome
def test_gpg_encrypt(tempdir):
src = os.path.join(tempdir, "data.txt")
encrypted = os.path.join(tempdir, "data.gpg")
decrypted = os.path.join(tempdir, "data.out")
passwd = 'Secret123'
payload = 'Dummy text\n'
with open(src, 'w') as f:
f.write(payload)
installutils.encrypt_file(src, encrypted, password=passwd)
assert os.path.isfile(encrypted)
installutils.decrypt_file(encrypted, decrypted, password=passwd)
assert os.path.isfile(decrypted)
with open(decrypted) as f:
assert f.read() == payload
with pytest.raises(ipautil.CalledProcessError):
installutils.decrypt_file(encrypted, decrypted, password='invalid')
def test_gpg_asymmetric(tempdir, gpgkey):
src = os.path.join(tempdir, "asymmetric.txt")
encrypted = src + ".gpg"
payload = 'Dummy text\n'
with open(src, 'w') as f:
f.write(payload)
ipa_backup.encrypt_file(src, remove_original=True)
assert os.path.isfile(encrypted)
assert not os.path.exists(src)
ipa_restore.decrypt_file(tempdir, encrypted)
assert os.path.isfile(src)
with open(src) as f:
assert f.read() == payload
@pytest.mark.parametrize(
"platform, expected",
[
("fedora", "fedora"),
("fedora_container", "fedora"),
("fedora_containers", "fedora_containers"),
("fedoracontainer", "fedoracontainer"),
("rhel", "rhel"),
("rhel_container", "rhel"),
]
)
def test_get_current_platform(monkeypatch, platform, expected):
monkeypatch.setattr(installutils.ipaplatform, "NAME", platform)
assert installutils.get_current_platform() == expected
# The mock_exists in the following tests mocks that the cgroups
# files exist even in non-containers. The values are provided by
# mock_open_multi.
@patch('ipaserver.install.installutils.in_container')
@patch('os.path.exists')
def test_in_container_no_cgroup(mock_exists, mock_in_container):
"""
In a container in a container without cgroups, can't detect RAM
"""
mock_in_container.return_value = True
mock_exists.side_effect = [False, False]
with pytest.raises(ScriptError):
installutils.check_available_memory(False)
def mock_open_multi(*contents):
"""Mock opening multiple files.
For our purposes the first read is limit, second is usage.
Note: this overrides *all* opens so if you use pdb then you will
need to extend the list by 2.
"""
mock_files = [
mock_open(read_data=content).return_value for content in contents
]
mock_multi = mock_open()
mock_multi.side_effect = mock_files
return mock_multi
RAM_OK = str(1800 * 1000 * 1000)
RAM_CA_USED = str(150 * 1000 * 1000)
RAM_MOSTLY_USED = str(1500 * 1000 * 1000)
RAM_NOT_OK = str(10 * 1000 * 1000)
@patch('ipaserver.install.installutils.in_container')
@patch('builtins.open', mock_open_multi(RAM_NOT_OK, "0"))
@patch('os.path.exists')
def test_in_container_insufficient_ram(mock_exists, mock_in_container):
"""In a container with insufficient RAM and zero used"""
mock_in_container.return_value = True
mock_exists.side_effect = [True, True]
with pytest.raises(ScriptError):
installutils.check_available_memory(True)
@patch('ipaserver.install.installutils.in_container')
@patch('builtins.open', mock_open_multi(RAM_OK, RAM_CA_USED))
@patch('os.path.exists')
def test_in_container_ram_ok_no_ca(mock_exists, mock_in_container):
"""In a container with just enough RAM to install w/o a CA"""
mock_in_container.return_value = True
mock_exists.side_effect = [True, True]
installutils.check_available_memory(False)
@patch('ipaserver.install.installutils.in_container')
@patch('builtins.open', mock_open_multi(RAM_OK, RAM_MOSTLY_USED))
@patch('os.path.exists')
def test_in_container_insufficient_ram_with_ca(mock_exists, mock_in_container):
"""In a container and just miss the minimum RAM required"""
mock_in_container.return_value = True
mock_exists.side_effect = [True, True]
with pytest.raises(ScriptError):
installutils.check_available_memory(True)
@patch('ipaserver.install.installutils.in_container')
@patch('psutil.virtual_memory')
def test_not_container_insufficient_ram_with_ca(mock_psutil, mock
|
_in_container):
"""Not a container and insufficient RAM"""
mock_in_container.return_value = False
fake_memory = psutil._pslinux.svmem
fake_memory.available = int(RAM_NOT_OK)
mock_psutil.return_value = fake_memory
with pytest.raises(ScriptError):
installutils.check_available_memory(True)
@patch('ipaserver.install.installutils.in_container')
@patch('psutil.virtual_memory')
def test_not_container_ram_ok(mock_psutil, mock_in_container):
|
"""Not a container and sufficient RAM"""
mock_in_container.return_value = False
fake_memory = psutil._pslinux.svmem
fake_memory.available = int(RAM_OK)
mock_psutil.return_value = fake_memory
installutils.check_available_memory(True)
|
Jolopy/MizzyChan
|
mizzychan/app/crawl/rucrawler.py
|
Python
|
gpl-3.0
| 67,999 | 0.018721 |
import requests
from bs4 import BeautifulSoup
def getMemes():
memep1URL="http://www.quickmeme.com/page/1/"
memep2URL="http://www.quickmeme.com/page/2/"
memep3URL="http://www.quickmeme.com/page/3/"
memep4URL="http://www.quickmeme.com/page/4/"
memep5URL="http://www.quickmeme.com/page/5/"
memep6URL="http://www.quickmeme.com/page/6/"
memep7URL="http://www.quickmeme.com/page/7/"
memep8URL="http://www.quickmeme.com/page/8/"
memep9URL="http://www.quickmeme.com/page/9/"
memep10URL="http://www.quickmeme.com/page/10/"
r_memep1=requests.get(memep1URL)
r_memep2=requests.get(memep2URL)
r_memep3=requests.get(memep3URL)
r_memep4=requests.get(memep4URL)
r_memep5=requests.get(memep5URL)
r_memep6=requests.get(memep6URL)
r_memep7=requests.get(memep7URL)
r_memep8=requests.get(memep8URL)
r_memep9=requests.get(memep9URL)
r_memep10=requests.get(memep10URL)
memep1Soup=BeautifulSoup(r_memep1.content, "lxml")
memep2Soup=BeautifulSoup(r_memep2.content, "lxml")
memep3Soup=BeautifulSoup(r_memep3.content, "lxml")
memep4Soup=BeautifulSoup(r_memep4.content, "lxml")
memep5Soup=BeautifulSoup(r_memep5.content, "lxml")
memep6Soup=BeautifulSoup(r_memep6.content, "lxml")
memep7Soup=BeautifulSoup(r_memep7.content, "lxml")
memep8Soup=BeautifulSoup(r_memep8.content, "lxml")
memep9Soup=BeautifulSoup(r_memep9.content, "lxml")
memep10Soup=BeautifulSoup(r_memep10.content, "lxml")
memep1Links=memep1Soup.find_all("a")
memep2Links=memep2Soup.find_all("a")
memep3Links=memep3Soup.find_all("a")
memep4Links=memep4Soup.find_all("a")
memep5Links=memep5Soup.find_all("a")
memep6Links=memep6Soup.find_all("a")
memep7Links=memep7Soup.find_al
|
l("a")
memep8Links=memep8Soup.find_all("a")
memep9Links=memep9Soup.find_all("a")
memep10Links=memep10Soup.find_all("a")
linkList=[memep1Links,memep2Links,memep3Links,memep4Links,memep5Links,memep6Links,memep7Links,
memep8Links,memep9Links,memep10Links]
mem
|
eLinks=[]
pointer=0
listLength=len(linkList)
for i in range(listLength):
for link in linkList[pointer]:
tempURL=link.get("href")
if tempURL is None:
continue
if tempURL.startswith("/p/"):
memeFix='http://www.quickmeme.com'
memeResolved=memeFix+tempURL
dictionary={}
dictionary['category']='Memes'
dictionary['url']=memeResolved
dictionary['title']=link.get_text().replace('\n','')
dictionary['title']=link.get_text().replace('\t','')
if dictionary['title'] and not dictionary['title'].isspace():
memeLinks.append(dictionary)
pointer+=1
return memeLinks
def getPolitics():
nytimesURL="http://www.nytimes.com/pages/politics"
cnnURL="http://www.cnn.com/specials/politics/national-politics"
foxnewsURL="http://www.foxnews.com/politics"
#cbsnewsURL="http://www.cbsnews.com/politics"
abcnewsURL="http://www.abcnews.go.com/Politics/Election"
reutersnewsURL="http://www.reuters.com/politics"
bbcnewsURL="http://www.bbc.com/news/election/us2016"
#yahoonewsURL="https://www.yahoo.com/news/politics"
nbcnewsURL="http://www.nbcnews.com/politics/2016-election"
usatodaynewsURL="http://www.usatoday.com/section/global/elections-2016/"
#huffingtonpostnewsURL="http://www.huffingtonpost.com/section/politics"
timenewsURL="http://www.time.com/politics"
washingtonpostnewsURL="http://www.washingtonpost.com/politics/"
guardiannewsURL="https://www.theguardian.com/us-news/us-elections-2016"
#wsjnewsURL="http://www.wsj.com/news/politics"
#latimesnewsURL="http://www.latimes.com/politics"
nydailynewsURL="http://www.nydailynews.com/news/politics"
#chicagotribunenewsURL="http://www.chicagotribune.com/news/nationworld/politics/"
r_nytimes=requests.get(nytimesURL)
r_CNN=requests.get(cnnURL)
r_foxnews=requests.get(foxnewsURL)
#r_cbsnews=requests.get(cbsnewsURL)
r_abcnews=requests.get(abcnewsURL)
r_reutersnews=requests.get(reutersnewsURL)
r_bbcnews=requests.get(bbcnewsURL)
#r_yahoonews=requests.get(yahoonewsURL)
r_nbcnews=requests.get(nbcnewsURL)
r_usatodaynews=requests.get(usatodaynewsURL)
#r_huffingtonpostnews=requests.get(huffingtonpostnewsURL)
r_timenews=requests.get(timenewsURL)
r_washingtonpostnews=requests.get(washingtonpostnewsURL)
r_guardiannews=requests.get(guardiannewsURL)
#r_wsjnews=requests.get(wsjnewsURL)
#r_latimesnews=requests.get(latimesnewsURL)
r_nydailynews=requests.get(nydailynewsURL)
#r_chicagotribunenews=requests.get(chicagotribunenewsURL)
nytimesSoup=BeautifulSoup(r_nytimes.content, "lxml")
cnnSoup=BeautifulSoup(r_CNN.content, "lxml")
foxnewsSoup=BeautifulSoup(r_foxnews.content, "lxml")
#cbsnewsSoup=BeautifulSoup(r_cbsnews.content, "lxml")
abcnewsSoup=BeautifulSoup(r_abcnews.content, "lxml")
reutersnewsSoup=BeautifulSoup(r_reutersnews.content, "lxml")
bbcnewsSoup=BeautifulSoup(r_bbcnews.content, "lxml")
#yahoonewsSoup=BeautifulSoup(r_yahoonews.content, "lxml")
nbcnewsSoup=BeautifulSoup(r_nbcnews.content, "lxml")
usatodaynewsSoup=BeautifulSoup(r_usatodaynews.content, "lxml")
#huffingtonpostnewsSoup=BeautifulSoup(r_huffingtonpostnews.content, "lxml")
timenewsSoup=BeautifulSoup(r_timenews.content, "lxml")
washingtonpostnewsSoup=BeautifulSoup(r_washingtonpostnews.content, "lxml")
guardiannewsSoup=BeautifulSoup(r_guardiannews.content, "lxml")
#wsjnewsSoup=BeautifulSoup(r_wsjnews.content, "lxml")
#latimesnewsSoup=BeautifulSoup(r_latimesnews.content, "lxml")
nydailynewsSoup=BeautifulSoup(r_nydailynews.content, "lxml")
#chicagotribunenewsSoup=BeautifulSoup(r_chicagotribunenews.content, "lxml")
nytimesLinks=nytimesSoup.find_all("a")
cnnLinks=cnnSoup.find_all("a")
foxnewsLinks=foxnewsSoup.find_all("a")
#cbsnewsLinks=cbsnewsSoup.find_all("a")
abcnewsLinks=abcnewsSoup.find_all("a")
reutersnewsLinks=reutersnewsSoup.find_all("a")
bbcnewsLinks=bbcnewsSoup.find_all("a")
#yahoonewsLinks=yahoonewsSoup.find_all("a")
nbcnewsLinks=nbcnewsSoup.find_all("a")
usatodaynewsLinks=usatodaynewsSoup.find_all("a")
#huffingtonpostnewsLinks=huffingtonpostnewsSoup.find_all("a")
timenewsLinks=timenewsSoup.find_all("a")
washingtonpostnewsLinks=washingtonpostnewsSoup.find_all("a")
guardiannewsLinks=guardiannewsSoup.find_all("a")
#wsjnewsLinks=wsjnewsSoup.find_all("a")
#latimesnewsLinks=latimesnewsSoup.find_all("a")
nydailynewsLinks=nydailynewsSoup.find_all("a")
#chicagotribunenewsLinks=chicagotribunenewsSoup.find_all("a")
linkList=[nytimesLinks,cnnLinks,foxnewsLinks,abcnewsLinks,reutersnewsLinks,
bbcnewsLinks,nbcnewsLinks,usatodaynewsLinks,timenewsLinks,
washingtonpostnewsLinks,guardiannewsLinks,nydailynewsLinks]
politicsLinks=[]
politicsLinksFinal=[]
politicsLinksTitle=[]
politicsLinksTitleFinal=[]
pointer=0
listLength=len(linkList)
for i in range(listLength):
for link in linkList[pointer]:
tempURL=link.get("href")
if tempURL is None:
continue
if tempURL.startswith("http://www.nytimes.com/2016/"):
dictionary={}
dictionary['category']='Politics'
dictionary['url']=tempURL
dictionary['title']=link.get_text().replace('\n','')
if dictionary['title'] and not dictionary['title'].isspace():
politicsLinks.append(dictionary)
if tempURL.startswith("http://www.foxnews.com/politics/2016/10/"):
dictionary={}
dictionary['category']='Politics'
dictionary['url']=tempURL
dictionary['title']=link.get_text().replace('\n','')
if dictionary['title'] an
|
thaim/ansible
|
lib/ansible/module_utils/network/checkpoint/checkpoint.py
|
Python
|
mit
| 19,463 | 0.003853 |
# This code is part of Ansible, but is an independent component.
# This particular file snippet, and this file snippet only, is BSD licensed.
# Modules you write using this snippet, which is embedded dynamically by Ansible
# still belong to the author of the module, and may assign their own license
# to the complete work.
#
# (c) 2018 Red Hat Inc.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
from __future__ import (absolute_import, division, print_function)
import time
from ansible.module_utils.connection import Connection
checkpoint_argument_spec_for_objects = dict(
auto_publish_session=dict(type='bool'),
wait_for_task=dict(type='bool', default=True),
state=dict(type='str', choices=['present', 'absent'], default='present'),
version=dict(type='str')
)
checkpoint_argument_spec_for_facts = dict(
version=dict(type='str')
)
checkpoint_argument_spec_for_commands = dict(
wait_for_task=dict(type='bool', default=True),
version=dict(type='str')
)
delete_params = ['name', 'uid', 'layer', 'exception-group-name', 'layer', 'rule-name']
# send the request to checkpoint
def send_request(connection, version, url, payload=None):
code, response = connection.send_request('/web_api/' + version + url, payload)
return code, response
# get the payload from the user parameters
def is_checkpoint_param(parameter):
if parameter == 'auto_publish_session' or \
parameter == 'state' or \
parameter == 'wait_for_task' or \
parameter == 'version':
return False
return True
# build the payload from the parameters which has value (not None), and they are parameter of checkpoint API as well
def get_payload_from_parameters(params):
payload = {}
for parameter in params:
parameter_value = params[parameter]
if parameter_value and is_checkpoint_param(parameter):
if isinstance(parameter_value, dict):
payload[parameter.replace("_", "-")] = get_payload_from_parameters(parameter_value)
elif isinstance(parameter_value, list) and len(parameter_value) != 0 and isinstance(parameter_value[0], dict):
payload_list = []
for element_dict in parameter_value:
payload_list.append(get_payload_from_parameters(element_dict))
payload[parameter.replace("_", "-")] = payload_list
else:
payload[parameter.replace("_", "-")] = parameter_value
return payload
# wait for task
def wait_for_task(module, version, connection, task_id):
task_id_payload = {'task-id': task_id}
task_complete = False
current_iteration = 0
max_num_iterations = 300
# As long as there is a task in progress
while not task_complete and current_iteration < max_num_iterations:
current_iteration += 1
# Check the status of the task
code, response = send_request(connection, version, 'show-task', task_id_payload)
attempts_counter = 0
while code != 200:
if attempts_counter < 5:
attempts_counter += 1
time.sleep(2)
code, response = send_request(connection, version, 'show-task', task_id_payload)
else:
response['message'] = "ERROR: Failed to handle asynchronous tasks as synchronous, tasks result is" \
" undefined.\n" + response['message']
module.fail_json(msg=response)
# Count the number of tasks that are not in-progress
completed_tasks = 0
for task in response['tasks']:
if task['status'] == 'failed':
module.fail_json(msg='Task {0} with task id {1} failed. Look at the logs for more details'
.format(task['task-name'], task['task-id']))
if task['status'] == 'in progress':
break
completed_tasks += 1
# Are we done? check if all tasks are completed
if completed_tasks == len(response["tasks"]):
task_complete = True
else:
|
time.sleep(2) # Wait for t
|
wo seconds
if not task_complete:
module.fail_json(msg="ERROR: Timeout.\nTask-id: {0}.".format(task_id_payload['task-id']))
# handle publish command, and wait for it to end if the user asked so
def handle_publish(module, connection, version):
if module.params['auto_publish_session']:
publish_code, publish_response = send_request(connection, version, 'publish')
if publish_code != 200:
module.fail_json(msg=publish_response)
if module.params['wait_for_task']:
wait_for_task(module, version, connection, publish_response['task-id'])
# handle a command
def api_command(module, command):
payload = get_payload_from_parameters(module.params)
connection = Connection(module._socket_path)
# if user insert a specific version, we add it to the url
version = ('v' + module.params['version'] + '/') if module.params.get('version') else ''
code, response = send_request(connection, version, command, payload)
result = {'changed': True}
if code == 200:
if module.params['wait_for_task']:
if 'task-id' in response:
wait_for_task(module, version, connection, response['task-id'])
elif 'tasks' in response:
for task_id in response['tasks']:
wait_for_task(module, version, connection, task_id)
result[command] = response
else:
module.fail_json(msg='Checkpoint device returned error {0} with message {1}'.format(code, response))
return result
# handle api call facts
def api_call_facts(module, api_call_object, api_call_object_plural_version):
payload = get_payload_from_parameters(module.params)
connection = Connection(module._socket_path)
# if user insert a specific version, we add it to the url
version = ('v' + module.params['version'] + '/') if module.params['version'] else ''
# if there is neither name nor uid, the API command will be in plural version (e.g. show-hosts instead of show-host)
if payload.get("name") is None and payload.get("uid") is None:
api_call_object = api_call_object_plural_version
code, response = send_request(connection, version, 'show-' + api_call_object, payload)
if code != 200:
module.fail_json(msg='Checkpoint device returned error {0} with message {1}'.format(code, response))
result = {api_call_object: response}
return result
# handle api call
def api_call(module, api_call_object):
payload = get_payload_from_parameters(module.params)
connection = Connection(module._socket_path)
result = {'changed': False}
if module.check_mode:
return result
# if user insert a specific version, we add it to the url
version = ('v' + module.params['version'] + '/') if m
|
RocketRedNeck/PythonPlayground
|
pidSim.py
|
Python
|
mit
| 18,070 | 0.008356 |
# -*- coding: utf-8 -*-
"""
pidSim.py
A simulation of a vision control to steering PID loop accounting for communication and
processing latency and variation; demonstrates the impact of variation
to successful control when the control variable (CV) has direct influence on
the process variable (PV)
This allows students to experiment with how different elements in the scaling
of a control loop affect performance, this focusing efforts on successful
design.
The model consists of a PID processing software with an asynchronous alignment
with a camera frame which is also asynchronous to image processing software.
Communication latency and jitter are planned as well as image processing impacts.
A plot at the end shows a sample over some specified duration.
The initial conditions of the file represents a case that won't work well until
it is correct by improvements in the constants and image processing rates
Copyright (c) 2016 - RocketRedNeck.com RocketRedNeck.net
RocketRedNeck and MIT Licenses
RocketRedNeck hereby grants license for others to copy and modify this source code for
whatever purpose other's deem worthy as long as RocketRedNeck is given credit where
where credit is due and you leave RocketRedNeck out of it for all other nefarious purposes.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
****************************************************************************************************
"""
import matplotlib.pyplot as plot
import numpy as np
tmax_sec = 5.0
dt_sec = 0.001
ts_sec = np.arange(0.0, tmax_sec, 0.001)
nmax = ts_sec.__len__() # round(tmax_sec/dt_sec)
ns = range(0, nmax)
kp = 0.3 # Proportional gain
ki = 0.03 # Integral gain
kd = 0.0 # Derivative gain
kg = 1.0 # Plant (Process) gain
tau_sec = 0.1
sp = np.zeros(nmax) # Will initialize after first image processed
err = np.zeros(nmax)
intErr = np.zeros(nmax)
derrdt = np.zeros(nmax)
lastErr = 0.0
G = np.zeros(nmax) # Process output to be measured
exp = np.exp(-dt_sec/tau_sec)
# Model of the pid task via a java util.timer
# We add a random normal variation for task wakeup since the util.timer
# can only assure that the task wakes up no earlier than scheduled.
# Empirical measurement of the task latency is required for accurate
# modeling, but for now we can just assume about a 10% average
pidPeriod_sec = 0.02;
pidPeriod_index = round(pidPeriod_sec / dt_sec)
pidStart_index = 0 # "time" that PID computation started
pidDuration_sec = 0.001 # Time to complete PID calculation (models software latency)
pidDuration_index = round(pidDuration_sec / dt_sec)
pidEnd_index = pidStart_index + pidDuration_index # "time" that PID computation ended
pidMinJitter_sec = 0.000 # Minimum Random task jitter
pidMinJitter_index = round(pidMinJitter_sec / dt_sec)
pidMaxJitt
|
er_sec = 0.000 # Maximum Random task jitter
pidMaxJitter_index = round(pidMaxJitter_sec / dt_sec)
pidMeanJitter_index = round((pidMaxJitter_index + pidMinJitter_index)/2)
pidStdDevJitter_index = round((pidMaxJitter_index - pidMinJitter_index) / 3)
cvPid = np.zeros(nmax) # Initial value of cv coming from PID
|
calculation
# The first communication link is assumed to be a CAN bus
# The bus overhead is assumed to be a total fixed time
# not exceeding about 1 ms for up to four (4) messages going to four (4)
# separate motors (including any increases for bit stuffing); in other words
# we assume something like 100 bits per message all mastered from the same
# location on a 1 Mbps bus.
# The underlying software is assumed to be some queue processing task that
# wakes upon a posted message. A complete review of the path is needed to
# assess whether to task that actually receives the posted message awakens
# immediately (higher priority) or must time slice with all other concurrent
# tasks. If communication tasking is forced to wait for an available cycle
# it is possible that an indeterminate delay may occur at the post-to-wire
# boundary; also, the communication tasking must post all messages queued
# to the wire in close sequence otherwise the motors will be out of phase
# We can inject an estimate of communication jitter as a whole using a
# simple normal distribution
comm0Start_index = 0 # "time" that first communication bus starts
comm0Delay_sec = 0.001 # Time to complete communication (MUST BE LESS THAN PID PERIOD)
comm0Delay_index = round(comm0Delay_sec / dt_sec)
comm0End_index = comm0Start_index + comm0Delay_index
comm0MinJitter_sec = 0.000
comm0MinJitter_index = round(comm0MinJitter_sec / dt_sec)
comm0MaxJitter_sec = 0.000
comm0MaxJitter_index = round(comm0MaxJitter_sec / dt_sec)
comm0MeanJitter_index = round((comm0MaxJitter_index + comm0MinJitter_index)/2)
comm0StdDevJitter_index = round((comm0MaxJitter_index - comm0MinJitter_index) / 3)
cvComm0 = np.zeros(nmax) # cv value delayed for first communication bus
camOffset_sec = 0.0 # Offset to represent asynchronous camera start
camOffset_index = round(camOffset_sec / dt_sec)
camStart_index = camOffset_index # "time" that camera runs
camRate_Hz = 30 # Camera frame rate
camPeriod_sec = 1.0/camRate_Hz
camPeriod_index = round(camPeriod_sec / dt_sec)
camEnd_index = camStart_index + camPeriod_index
camImage_index = round((camStart_index + camEnd_index) / 2) # Time associated with center of image
pvCam = np.zeros(nmax) # process variable delayed for camera framing
# The second communication bus is polled by the imaging software
# The time that the imaging software starts is asynchronous to the
# other system components, and it will not execute again until the
# image processing completes (which itself has some variation)
comm1Start_index = 0 # "time" that second communication bus starts
comm1Delay_sec = 0.020 # Time to complete communication
comm1Delay_index = round(comm1Delay_sec / dt_sec)
comm1End_index = comm1Start_index + comm1Delay_index
comm1MinJitter_sec = 0.000
comm1MinJitter_index = round(comm1MinJitter_sec / dt_sec)
comm1MaxJitter_sec = 0.000
comm1MaxJitter_index = round(comm1MaxJitter_sec / dt_sec)
comm1MeanJitter_index = round((comm1MaxJitter_index + comm1MinJitter_index)/2)
comm1StdDevJitter_index = round((comm1MaxJitter_index - comm1MinJitter_index) / 3)
pvComm1 = np.zeros(nmax) # pv value delayed for second communication bus
# Image processing consists of a bounded, but variable process
# The content of the image and the operating environment will cause the
# associated software to vary; we will use emprical estimates for a current
# approach and will assume the variation has a normal distribution with a
# 3-sigma distribution between the upper and lower limits
pvImageStart_index = 0
pvImageMaxRate_Hz = 5.0
pvImageMinRate_Hz = 3.0
pvImageRateSigma = 3
pvImageMaxDuration_sec = 1.0 / pvImageMinRate_Hz
pvImageMinDuration_sec = 1.0 / pvImageMaxRate_Hz
pvImageMaxDuration_index = round(pvImageMaxDuration_sec / dt_sec)
pvImageMinDuration_index = round(pvImageMinDuration_sec / dt_sec)
pvImageMeanDuration_index = round((pvImageMinDuration_index + pvImageMaxDuration_index)/2)
pvImageStdDevDuration_index = round((pvImageMaxD
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.