id
int64 0
300k
| label
stringlengths 1
74
⌀ | text
stringlengths 4k
8k
|
---|---|---|
1,500 |
test get path xdg yaml
|
"""
author: deadc0de6 (https://github.com/deadc0de6)
Copyright (c) 2017, deadc0de6
basic unittest for the install function
"""
# pylint: disable=R0903
# pylint: disable=W0231
# pylint: disable=W0212
import os
import unittest
from unittest.mock import patch
from dotdrop.options import Options, Logger
from dotdrop.exceptions import YamlException
class FakeOptions(Options):
"""fake Options class"""
def __init__(self, args):
"""init"""
self.args = args
self.log = Logger(debug=True)
def clean_setup():
"""clean stuff"""
if 'DOTDROP_CONFIG' in os.environ:
del os.environ['DOTDROP_CONFIG']
if 'XDG_CONFIG_HOME' in os.environ:
del os.environ['XDG_CONFIG_HOME']
def get_args(more):
"""return args dict"""
args = {
'--dry': False,
'--verbose': True,
'--cfg': '',
}
for k, val in more.items():
args[k] = val
return args
def side_effect(valid=''):
"""side effect for os.path.exists"""
def inner(filename):
print(f'checking if {filename} exists')
if filename == valid:
return True
return False
return inner
class TestOptions(unittest.TestCase):
"""test case"""
def test_get_path_from_cli(self):
"""from --cli"""
clean_setup()
expected = 'fakepath'
args = {}
args['--cfg'] = expected
fake = FakeOptions(args)
self.assertEqual(fake._get_config_path(), expected)
def test_get_path_from_env(self):
"""from env"""
clean_setup()
expected = 'envpath'
os.environ['DOTDROP_CONFIG'] = expected
args = get_args({'--cfg': ''})
fake = FakeOptions(args)
self.assertEqual(fake._get_config_path(), expected)
@patch('os.path.exists')
def test_get_path_from_yaml(self, mock_exists):
"""from yaml"""
clean_setup()
mock_exists.return_value = True
expected = 'config.yaml'
args = get_args({'--cfg': ''})
fake = FakeOptions(args)
self.assertEqual(fake._get_config_path(), expected)
@patch('os.path.exists')
def test_get_path_from_toml(self, mock_exists):
"""from toml"""
clean_setup()
expected = 'config.toml'
args = get_args({'--cfg': ''})
mock_exists.side_effect = side_effect(valid=expected)
fake = FakeOptions(args)
self.assertEqual(fake._get_config_path(), expected)
@patch('os.path.exists')
def METHOD_NAME(self, mock_exists):
"""from xdg"""
clean_setup()
home = os.path.expanduser('~/.config')
expected = f'{home}/dotdrop/config.yaml'
mock_exists.side_effect = side_effect(valid=expected)
args = get_args({'--cfg': ''})
os.environ['XDG_CONFIG_HOME'] = home
fake = FakeOptions(args)
self.assertEqual(fake._get_config_path(), expected)
@patch('os.path.exists')
def test_get_path_xdg_toml(self, mock_exists):
"""from xdg toml"""
clean_setup()
home = os.path.expanduser('~/.config')
expected = f'{home}/dotdrop/config.toml'
mock_exists.side_effect = side_effect(valid=expected)
args = get_args({'--cfg': ''})
os.environ['XDG_CONFIG_HOME'] = home
fake = FakeOptions(args)
self.assertEqual(fake._get_config_path(), expected)
@patch('os.path.exists')
def test_get_path_fs_xdg_yaml(self, mock_exists):
"""from fs yaml"""
clean_setup()
home = os.path.expanduser('~/.config')
expected = f'{home}/dotdrop/config.yaml'
mock_exists.side_effect = side_effect(valid=expected)
args = get_args({'--cfg': ''})
fake = FakeOptions(args)
self.assertEqual(fake._get_config_path(), expected)
@patch('os.path.exists')
def test_get_path_fs_xdg_etc_yaml(self, mock_exists):
"""from fs xdg"""
clean_setup()
home = os.path.expanduser('/etc/xdg')
expected = f'{home}/dotdrop/config.yaml'
mock_exists.side_effect = side_effect(valid=expected)
args = get_args({'--cfg': ''})
fake = FakeOptions(args)
self.assertEqual(fake._get_config_path(), expected)
@patch('os.path.exists')
def test_get_path_fs_etc_dotdrop_yaml(self, mock_exists):
"""from fs etc"""
clean_setup()
home = os.path.expanduser('/etc')
expected = f'{home}/dotdrop/config.yaml'
mock_exists.side_effect = side_effect(valid=expected)
args = get_args({'--cfg': ''})
fake = FakeOptions(args)
self.assertEqual(fake._get_config_path(), expected)
@patch('os.path.exists')
def test_get_path_fs_etc_xdg_yaml(self, mock_exists):
"""from fs etc/xdg"""
clean_setup()
home = os.path.expanduser('/etc/xdg')
expected = f'{home}/dotdrop/config.yaml'
mock_exists.side_effect = side_effect(valid=expected)
args = get_args({'--cfg': ''})
fake = FakeOptions(args)
self.assertEqual(fake._get_config_path(), expected)
@patch('os.path.exists')
def test_get_path_fs_xdg_toml(self, mock_exists):
"""from fs toml"""
clean_setup()
home = os.path.expanduser('~/.config')
expected = f'{home}/dotdrop/config.toml'
mock_exists.side_effect = side_effect(valid=expected)
args = get_args({'--cfg': ''})
fake = FakeOptions(args)
self.assertEqual(fake._get_config_path(), expected)
@patch('os.path.exists')
def test_get_path_fs_xdg_etc_toml(self, mock_exists):
"""from fs xdg"""
clean_setup()
home = os.path.expanduser('/etc/xdg')
expected = f'{home}/dotdrop/config.toml'
mock_exists.side_effect = side_effect(valid=expected)
args = get_args({'--cfg': ''})
fake = FakeOptions(args)
self.assertEqual(fake._get_config_path(), expected)
@patch('os.path.exists')
def test_get_path_fs_etc_dotdrop_toml(self, mock_exists):
"""from fs etc"""
clean_setup()
home = os.path.expanduser('/etc')
expected = f'{home}/dotdrop/config.toml'
mock_exists.side_effect = side_effect(valid=expected)
args = get_args({'--cfg': ''})
fake = FakeOptions(args)
self.assertEqual(fake._get_config_path(), expected)
@patch('os.path.exists')
def test_get_path_fs_etc_xdg_toml(self, mock_exists):
"""from fs etc/xdg"""
clean_setup()
home = os.path.expanduser('/etc/xdg')
expected = f'{home}/dotdrop/config.toml'
mock_exists.side_effect = side_effect(valid=expected)
args = get_args({'--cfg': ''})
fake = FakeOptions(args)
self.assertEqual(fake._get_config_path(), expected)
@patch('os.path.exists')
def test_get_path_none(self, mock_exists):
"""path is none"""
clean_setup()
mock_exists.return_value = False
args = get_args({})
fake = FakeOptions(args)
self.assertEqual(None, fake._get_config_path())
@patch('os.path.exists')
def test_options_debug(self, mock_exists):
"""test debug"""
mock_exists.return_value = False
args = {
'--verbose': True,
'--dry': False,
'--cfg': 'path',
'--profile': 'profile',
}
with self.assertRaises(YamlException):
Options(args)
def main():
"""entry point"""
unittest.main()
if __name__ == '__main__':
main()
|
1,501 |
run test
|
#!/usr/bin/env python3
# Copyright (c) 2014-2022 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test RPCs that retrieve information from the mempool."""
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import (
assert_equal,
assert_raises_rpc_error,
)
from test_framework.wallet import MiniWallet
class RPCMempoolInfoTest(BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 1
def METHOD_NAME(self):
self.wallet = MiniWallet(self.nodes[0])
confirmed_utxo = self.wallet.get_utxo()
# Create a tree of unconfirmed transactions in the mempool:
# txA
# / \
# / \
# / \
# / \
# / \
# txB txC
# / \ / \
# / \ / \
# txD txE txF txG
# \ /
# \ /
# txH
def create_tx(**kwargs):
return self.wallet.send_self_transfer_multi(
from_node=self.nodes[0],
**kwargs,
)
txA = create_tx(utxos_to_spend=[confirmed_utxo], num_outputs=2)
txB = create_tx(utxos_to_spend=[txA["new_utxos"][0]], num_outputs=2)
txC = create_tx(utxos_to_spend=[txA["new_utxos"][1]], num_outputs=2)
txD = create_tx(utxos_to_spend=[txB["new_utxos"][0]], num_outputs=1)
txE = create_tx(utxos_to_spend=[txB["new_utxos"][1]], num_outputs=1)
txF = create_tx(utxos_to_spend=[txC["new_utxos"][0]], num_outputs=2)
txG = create_tx(utxos_to_spend=[txC["new_utxos"][1]], num_outputs=1)
txH = create_tx(utxos_to_spend=[txE["new_utxos"][0],txF["new_utxos"][0]], num_outputs=1)
txidA, txidB, txidC, txidD, txidE, txidF, txidG, txidH = [
tx["txid"] for tx in [txA, txB, txC, txD, txE, txF, txG, txH]
]
mempool = self.nodes[0].getrawmempool()
assert_equal(len(mempool), 8)
for txid in [txidA, txidB, txidC, txidD, txidE, txidF, txidG, txidH]:
assert_equal(txid in mempool, True)
self.log.info("Find transactions spending outputs")
result = self.nodes[0].gettxspendingprevout([ {'txid' : confirmed_utxo['txid'], 'vout' : 0}, {'txid' : txidA, 'vout' : 1} ])
assert_equal(result, [ {'txid' : confirmed_utxo['txid'], 'vout' : 0, 'spendingtxid' : txidA}, {'txid' : txidA, 'vout' : 1, 'spendingtxid' : txidC} ])
self.log.info("Find transaction spending multiple outputs")
result = self.nodes[0].gettxspendingprevout([ {'txid' : txidE, 'vout' : 0}, {'txid' : txidF, 'vout' : 0} ])
assert_equal(result, [ {'txid' : txidE, 'vout' : 0, 'spendingtxid' : txidH}, {'txid' : txidF, 'vout' : 0, 'spendingtxid' : txidH} ])
self.log.info("Find no transaction when output is unspent")
result = self.nodes[0].gettxspendingprevout([ {'txid' : txidH, 'vout' : 0} ])
assert_equal(result, [ {'txid' : txidH, 'vout' : 0} ])
result = self.nodes[0].gettxspendingprevout([ {'txid' : txidA, 'vout' : 5} ])
assert_equal(result, [ {'txid' : txidA, 'vout' : 5} ])
self.log.info("Mixed spent and unspent outputs")
result = self.nodes[0].gettxspendingprevout([ {'txid' : txidB, 'vout' : 0}, {'txid' : txidG, 'vout' : 3} ])
assert_equal(result, [ {'txid' : txidB, 'vout' : 0, 'spendingtxid' : txidD}, {'txid' : txidG, 'vout' : 3} ])
self.log.info("Unknown input fields")
assert_raises_rpc_error(-3, "Unexpected key unknown", self.nodes[0].gettxspendingprevout, [{'txid' : txidC, 'vout' : 1, 'unknown' : 42}])
self.log.info("Invalid vout provided")
assert_raises_rpc_error(-8, "Invalid parameter, vout cannot be negative", self.nodes[0].gettxspendingprevout, [{'txid' : txidA, 'vout' : -1}])
self.log.info("Invalid txid provided")
assert_raises_rpc_error(-3, "JSON value of type number for field txid is not of expected type string", self.nodes[0].gettxspendingprevout, [{'txid' : 42, 'vout' : 0}])
self.log.info("Missing outputs")
assert_raises_rpc_error(-8, "Invalid parameter, outputs are missing", self.nodes[0].gettxspendingprevout, [])
self.log.info("Missing vout")
assert_raises_rpc_error(-3, "Missing vout", self.nodes[0].gettxspendingprevout, [{'txid' : txidA}])
self.log.info("Missing txid")
assert_raises_rpc_error(-3, "Missing txid", self.nodes[0].gettxspendingprevout, [{'vout' : 3}])
if __name__ == '__main__':
RPCMempoolInfoTest().main()
|
1,502 |
mean normed
|
#
# Annulus toy log pdf.
#
# This file is part of PINTS (https://github.com/pints-team/pints/) which is
# released under the BSD 3-clause license. See accompanying LICENSE.md for
# copyright notice and full license details.
#
import numpy as np
import scipy
from . import ToyLogPDF
class AnnulusLogPDF(ToyLogPDF):
r"""
Toy distribution based on a d-dimensional distribution of the form
.. math::
f(x|r_0, \sigma) \propto e^{-(|x|-r_0)^2 / {2\sigma^2}}
where :math:`x` is a d-dimensional real, and :math:`|x|` is the Euclidean
norm.
This distribution is roughly a one-dimensional Gaussian distribution
centred on :math:`r0`, that is smeared over the surface of a hypersphere of
the same radius. In two dimensions, the density looks like a circular
annulus.
Extends :class:`pints.LogPDF`.
Parameters
----------
dimensions : int
The dimensionality of the space.
r0 : float
The radius of the hypersphere and is approximately the mean normed
distance from the origin.
sigma : float
The width of the annulus; approximately the standard deviation
of normed distance.
"""
def __init__(self, dimensions=2, r0=10, sigma=1):
if dimensions < 1:
raise ValueError('Dimensions must not be less than 1.')
self._n_parameters = int(dimensions)
r0 = float(r0)
if r0 <= 0:
raise ValueError('r0 must be positive.')
self._r0 = r0
sigma = float(sigma)
if sigma <= 0:
raise ValueError('sigma must be positive.')
self._sigma = sigma
def __call__(self, x):
if not len(x) == self._n_parameters:
raise ValueError('x must be of same dimensions as density')
return scipy.stats.norm.logpdf(
np.linalg.norm(x), self._r0, self._sigma)
def distance(self, samples):
"""
Calculates a measure of normed distance of samples from exact mean and
covariance matrix assuming uniform prior with bounds given by
:meth:`suggested_bounds`.
See :meth:`ToyLogPDF.distance()`.
"""
# Check size of input
if not len(samples.shape) == 2:
raise ValueError('Given samples list must be n x 2.')
if samples.shape[1] != self.n_parameters():
raise ValueError(
'Given samples must have length ' +
str(self.n_parameters()))
# calculate normed distance
d = list(map(lambda x: np.linalg.norm(x), samples))
dist = (
np.abs(self.METHOD_NAME() - np.mean(d)) +
np.abs(self.var_normed() - np.var(d))
)
return dist
def evaluateS1(self, x):
""" See :meth:`LogPDF.evaluateS1()`.
"""
L = self.__call__(x)
r = self._r0
norm = np.linalg.norm(x)
sigma = self._sigma
cons = -(norm - r) / (norm * sigma**2)
dL = np.array([var * cons for var in x])
return L, dL
def mean(self):
"""
Returns the mean of this distribution.
"""
return np.zeros(self._n_parameters)
def METHOD_NAME(self):
"""
Returns the mean of the normed distance from the origin.
"""
return self.moment_normed(1)
def moment_normed(self, order):
"""
Returns a given moment of the normed distance from the origin.
"""
n = self._n_parameters
r = self._r0
a = order
s = self._sigma
g1 = scipy.special.gamma(0.5 * (n + a))
g2 = scipy.special.gamma(0.5 * (1 + n + a))
g3 = scipy.special.gamma(0.5 * n)
g4 = scipy.special.gamma(0.5 * (1 + n))
h1 = scipy.special.hyp1f1(0.5 * (n + a), 0.5, r**2 / (2 * s**2))
h2 = scipy.special.hyp1f1(0.5 * (1 + n + a), 1.5, r**2 / (2 * s**2))
h3 = scipy.special.hyp1f1(0.5 * (1 - n), 0.5, -r**2 / (2 * s**2))
h4 = scipy.special.hyp1f1(1 - 0.5 * n, 1.5, -r**2 / (2 * s**2))
m = 2**(2 - 0.5 * n + 0.5 * (-4 + n + a))
m *= np.exp(-r**2 / (2 * s**2)) * s**a
m *= (np.sqrt(2) * s * g1 * h1 + 2 * r * g2 * h2)
m /= (np.sqrt(2) * s * g3 * h3 + 2 * r * g4 * h4)
return m
def n_parameters(self):
return self._n_parameters
def r0(self):
"""
Returns ``r0``.
"""
return self._r0
def _reject_sample(self, n_samples):
"""
Generates non-negative independent samples.
"""
r = np.ones(n_samples) * -1
f = r < 0
while np.any(f):
r = np.random.normal(self._r0, self._sigma, size=np.sum(f))
f = r < 0
return r
def sample(self, n_samples):
""" See :meth:`ToyLogPDF.sample()`. """
n_samples = int(n_samples)
if n_samples < 1:
raise ValueError(
'Number of samples must be greater than or equal to 1.')
# First sample values of r
r = self._reject_sample(n_samples)
# uniformly sample X s.t. their normed distance is r0
X_norm = np.random.normal(size=(n_samples, self._n_parameters))
lambda_x = np.sqrt(np.sum(X_norm**2, axis=1))
x_unit = [r[i] * X_norm[i] / y for i, y in enumerate(lambda_x)]
return np.array(x_unit)
def sigma(self):
"""
Returns ``sigma``
"""
return self._sigma
def suggested_bounds(self):
""" See :meth:`ToyLogPDF.suggested_bounds()`. """
# in higher dimensions reduce volume as otherwise gets too wide
r0_magnitude = (self._r0 + self._sigma) * (
5**(1.0 / (self._n_parameters - 1.0))
)
bounds = np.tile([-r0_magnitude, r0_magnitude],
(self._n_parameters, 1))
return np.transpose(bounds).tolist()
def var_normed(self):
"""
Returns the variance of the normed distance from the origin.
"""
return self.moment_normed(2) - self.moment_normed(1)**2
|
1,503 |
dq
|
# ------------------------------------
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
# ------------------------------------
import uuid
from cryptography.exceptions import InvalidSignature
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives.asymmetric.rsa import (
RSAPrivateKey,
RSAPrivateNumbers,
RSAPublicNumbers,
generate_private_key,
rsa_crt_dmp1,
rsa_crt_dmq1,
rsa_crt_iqmp,
)
from ._internal import _bytes_to_int, _int_to_bytes
from .key import Key
from .algorithms import Ps256, Ps384, Ps512, Rsa1_5, RsaOaep, RsaOaep256, Rs256, Rs384, Rs512
from ... import JsonWebKey, KeyOperation
class RsaKey(Key): # pylint:disable=too-many-public-methods
PUBLIC_KEY_DEFAULT_OPS = [KeyOperation.encrypt, KeyOperation.wrap_key, KeyOperation.verify]
PRIVATE_KEY_DEFAULT_OPS = PUBLIC_KEY_DEFAULT_OPS + [
KeyOperation.decrypt,
KeyOperation.unwrap_key,
KeyOperation.sign,
]
_supported_encryption_algorithms = frozenset((Rsa1_5.name(), RsaOaep.name(), RsaOaep256.name()))
_supported_key_wrap_algorithms = frozenset((Rsa1_5.name(), RsaOaep.name(), RsaOaep256.name()))
_supported_signature_algorithms = frozenset(
(Ps256.name(), Ps384.name(), Ps512.name(), Rs256.name(), Rs384.name(), Rs512.name(),)
)
def __init__(self, kid=None):
super(RsaKey, self).__init__()
self._kid = kid
self.kty = None
self.key_ops = None
self._rsa_impl = None
@property
def n(self):
return _int_to_bytes(self._public_key_material().n)
@property
def e(self):
return _int_to_bytes(self._public_key_material().e)
@property
def p(self):
return _int_to_bytes(self._private_key_material().p) if self.is_private_key() else None
@property
def q(self):
return _int_to_bytes(self._private_key_material().q) if self.is_private_key() else None
@property
def b(self):
return _int_to_bytes(self._private_key_material().b) if self.is_private_key() else None
@property
def d(self):
return _int_to_bytes(self._private_key_material().d) if self.is_private_key() else None
@property
def METHOD_NAME(self):
return _int_to_bytes(self._private_key_material().dmq1) if self.is_private_key() else None
@property
def dp(self):
return _int_to_bytes(self._private_key_material().dmp1) if self.is_private_key() else None
@property
def qi(self):
return _int_to_bytes(self._private_key_material().iqmp) if self.is_private_key() else None
@property
def private_key(self):
return self._rsa_impl if self.is_private_key() else None
@property
def public_key(self):
return self._rsa_impl.public_key() if self.is_private_key() else self._rsa_impl
@staticmethod
def generate(kid=None, kty="RSA", size=2048, e=65537):
key = RsaKey()
key.kid = kid or str(uuid.uuid4())
key.kty = kty
key.key_ops = RsaKey.PRIVATE_KEY_DEFAULT_OPS
# pylint:disable=protected-access
key._rsa_impl = generate_private_key(public_exponent=e, key_size=size, backend=default_backend())
return key
@classmethod
def from_jwk(cls, jwk):
if jwk.kty not in ("RSA", "RSA-HSM"):
raise ValueError('The specified jwk must have a key type of "RSA" or "RSA-HSM"')
if not jwk.n or not jwk.e:
raise ValueError("Invalid RSA jwk, both n and e must be have values")
rsa_key = cls(kid=jwk.kid)
rsa_key.kty = jwk.kty
rsa_key.key_ops = jwk.key_ops
pub = RSAPublicNumbers(n=_bytes_to_int(jwk.n), e=_bytes_to_int(jwk.e))
# if the private key values are specified construct a private key
# only the secret primes and private exponent are needed as other fields can be calculated
if jwk.p and jwk.q and jwk.d:
# convert the values of p, q, and d from bytes to int
p = _bytes_to_int(jwk.p)
q = _bytes_to_int(jwk.q)
d = _bytes_to_int(jwk.d)
# convert or compute the remaining private key numbers
dmp1 = _bytes_to_int(jwk.dp) if jwk.dp else rsa_crt_dmp1(private_exponent=d, p=p)
dmq1 = _bytes_to_int(jwk.METHOD_NAME) if jwk.METHOD_NAME else rsa_crt_dmq1(private_exponent=d, q=q)
iqmp = _bytes_to_int(jwk.qi) if jwk.qi else rsa_crt_iqmp(p=p, q=q)
# create the private key from the jwk key values
priv = RSAPrivateNumbers(p=p, q=q, d=d, dmp1=dmp1, dmq1=dmq1, iqmp=iqmp, public_numbers=pub)
key_impl = priv.private_key(default_backend())
# if the necessary private key values are not specified create the public key
else:
key_impl = pub.public_key(default_backend())
rsa_key._rsa_impl = key_impl # pylint:disable=protected-access
return rsa_key
def to_jwk(self, include_private=False):
jwk = JsonWebKey(
kid=self.kid,
kty=self.kty,
key_ops=self.key_ops if include_private else RsaKey.PUBLIC_KEY_DEFAULT_OPS,
n=self.n,
e=self.e,
)
if include_private:
jwk.q = self.q
jwk.p = self.p
jwk.d = self.d
jwk.METHOD_NAME = self.METHOD_NAME
jwk.dp = self.dp
jwk.qi = self.qi
return jwk
@property
def default_encryption_algorithm(self):
return RsaOaep.name()
@property
def default_key_wrap_algorithm(self):
return RsaOaep.name()
@property
def default_signature_algorithm(self):
return Rs256.name()
def encrypt(self, plain_text, **kwargs):
algorithm = self._get_algorithm("encrypt", **kwargs)
encryptor = algorithm.create_encryptor(self.public_key)
return encryptor.transform(plain_text)
def decrypt(self, cipher_text, **kwargs):
if not self.is_private_key():
raise NotImplementedError("The current RsaKey does not support decrypt")
algorithm = self._get_algorithm("decrypt", **kwargs)
decryptor = algorithm.create_decryptor(self.private_key)
return decryptor.transform(cipher_text)
def sign(self, digest, **kwargs):
if not self.is_private_key():
raise NotImplementedError("The current RsaKey does not support sign")
algorithm = self._get_algorithm("sign", **kwargs)
signer = algorithm.create_signature_transform(self.private_key)
return signer.sign(digest)
def verify(self, digest, signature, **kwargs):
algorithm = self._get_algorithm("verify", **kwargs)
signer = algorithm.create_signature_transform(self.public_key)
try:
# cryptography's verify methods return None, and raise when verification fails
signer.verify(digest, signature)
return True
except InvalidSignature:
return False
def wrap_key(self, key, **kwargs):
algorithm = self._get_algorithm("wrapKey", **kwargs)
encryptor = algorithm.create_encryptor(self.public_key)
return encryptor.transform(key)
def unwrap_key(self, encrypted_key, **kwargs):
if not self.is_private_key():
raise NotImplementedError("The current RsaKey does not support unwrap")
algorithm = self._get_algorithm("unwrapKey", **kwargs)
decryptor = algorithm.create_decryptor(self.private_key)
return decryptor.transform(encrypted_key)
def is_private_key(self):
return isinstance(self._rsa_impl, RSAPrivateKey)
def _public_key_material(self):
return self.public_key.public_numbers()
def _private_key_material(self):
return self.private_key.private_numbers() if self.private_key else None
|
1,504 |
run test
|
#!/usr/bin/env python3
# Copyright (c) 2017-2021 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test debug logging."""
import os
from test_framework.test_framework import BitcoinTestFramework
from test_framework.test_node import ErrorMatch
class LoggingTest(BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 1
self.setup_clean_chain = True
def relative_log_path(self, name):
return os.path.join(self.nodes[0].chain_path, name)
def METHOD_NAME(self):
# test default log file name
default_log_path = self.relative_log_path("debug.log")
assert os.path.isfile(default_log_path)
# test alternative log file name in datadir
self.restart_node(0, ["-debuglogfile=foo.log"])
assert os.path.isfile(self.relative_log_path("foo.log"))
# test alternative log file name outside datadir
tempname = os.path.join(self.options.tmpdir, "foo.log")
self.restart_node(0, [f"-debuglogfile={tempname}"])
assert os.path.isfile(tempname)
# check that invalid log (relative) will cause error
invdir = self.relative_log_path("foo")
invalidname = os.path.join("foo", "foo.log")
self.stop_node(0)
exp_stderr = r"Error: Could not open debug log file \S+$"
self.nodes[0].assert_start_raises_init_error([f"-debuglogfile={invalidname}"], exp_stderr, match=ErrorMatch.FULL_REGEX)
assert not os.path.isfile(os.path.join(invdir, "foo.log"))
# check that invalid log (relative) works after path exists
self.stop_node(0)
os.mkdir(invdir)
self.start_node(0, [f"-debuglogfile={invalidname}"])
assert os.path.isfile(os.path.join(invdir, "foo.log"))
# check that invalid log (absolute) will cause error
self.stop_node(0)
invdir = os.path.join(self.options.tmpdir, "foo")
invalidname = os.path.join(invdir, "foo.log")
self.nodes[0].assert_start_raises_init_error([f"-debuglogfile={invalidname}"], exp_stderr, match=ErrorMatch.FULL_REGEX)
assert not os.path.isfile(os.path.join(invdir, "foo.log"))
# check that invalid log (absolute) works after path exists
self.stop_node(0)
os.mkdir(invdir)
self.start_node(0, [f"-debuglogfile={invalidname}"])
assert os.path.isfile(os.path.join(invdir, "foo.log"))
# check that -nodebuglogfile disables logging
self.stop_node(0)
os.unlink(default_log_path)
assert not os.path.isfile(default_log_path)
self.start_node(0, ["-nodebuglogfile"])
assert not os.path.isfile(default_log_path)
# just sanity check no crash here
self.restart_node(0, [f"-debuglogfile={os.devnull}"])
self.log.info("Test -debug and -debugexclude raise when invalid values are passed")
self.stop_node(0)
self.nodes[0].assert_start_raises_init_error(
extra_args=["-debug=abc"],
expected_msg="Error: Unsupported logging category -debug=abc.",
match=ErrorMatch.FULL_REGEX,
)
self.nodes[0].assert_start_raises_init_error(
extra_args=["-debugexclude=abc"],
expected_msg="Error: Unsupported logging category -debugexclude=abc.",
match=ErrorMatch.FULL_REGEX,
)
self.log.info("Test -loglevel raises when invalid values are passed")
self.nodes[0].assert_start_raises_init_error(
extra_args=["-loglevel=abc"],
expected_msg="Error: Unsupported global logging level -loglevel=abc. Valid values: info, debug, trace.",
match=ErrorMatch.FULL_REGEX,
)
self.nodes[0].assert_start_raises_init_error(
extra_args=["-loglevel=net:abc"],
expected_msg="Error: Unsupported category-specific logging level -loglevel=net:abc.",
match=ErrorMatch.PARTIAL_REGEX,
)
self.nodes[0].assert_start_raises_init_error(
extra_args=["-loglevel=net:info:abc"],
expected_msg="Error: Unsupported category-specific logging level -loglevel=net:info:abc.",
match=ErrorMatch.PARTIAL_REGEX,
)
if __name__ == '__main__':
LoggingTest().main()
|
1,505 |
npc on target crit hit
|
import arcemu
from arcemu import Unit
def npc_onCombatStart( unit, event, target ):
unit.sendChatMessage( arcemu.CHAT_MSG_MONSTER_YELL, arcemu.LANG_UNIVERSAL, "I am going to kill you " + target.getName() + "!" )
def npc_onCombatStop( unit, event, target ):
unit.sendChatMessage( arcemu.CHAT_MSG_MONSTER_SAY, arcemu.LANG_UNIVERSAL, "Okay, Okay, I yield." )
def npc_onDamageTaken( unit, event, attacker, amount ):
unit.sendChatMessage( arcemu.CHAT_MSG_MONSTER_SAY, arcemu.LANG_UNIVERSAL, "I am damaged for " + str( amount ) )
def npc_onCastSpell( unit, event, spellId ):
unit.sendChatMessage( arcemu.CHAT_MSG_MONSTER_SAY, arcemu.LANG_UNIVERSAL, "I am casting spell " + str( spellId ) )
def npc_onTargetParried( unit, event, target ):
unit.sendChatMessage( arcemu.CHAT_MSG_MONSTER_SAY, arcemu.LANG_UNIVERSAL, "I have parried the attack of " + target.getName() )
def npc_onTargetDodged( unit, event, target ):
unit.sendChatMessage( arcemu.CHAT_MSG_MONSTER_SAY, arcemu.LANG_UNIVERSAL, "I have dodged the attack of " + target.getName() )
def npc_onTargetBlocked( unit, event, target, amount ):
unit.sendChatMessage( arcemu.CHAT_MSG_MONSTER_SAY, arcemu.LANG_UNIVERSAL, "I have blocked " + str( amount ) + " of the attack of " + target.getName() )
def METHOD_NAME( unit, event, target, amount ):
unit.sendChatMessage( arcemu.CHAT_MSG_MONSTER_SAY, arcemu.LANG_UNIVERSAL, target.getName() + " has criticall hit me for " + str( amount ) )
def npc_onParried( unit, event, target ):
unit.sendChatMessage( arcemu.CHAT_MSG_MONSTER_SAY, arcemu.LANG_UNIVERSAL, target.getName() + " has parried my attack" )
def npc_onDodged( unit, event, target ):
unit.sendChatMessage( arcemu.CHAT_MSG_MONSTER_SAY, arcemu.LANG_UNIVERSAL, target.getName() + " has dodged my attack" )
def npc_onBlocked( unit, event, target, amount ):
unit.sendChatMessage( arcemu.CHAT_MSG_MONSTER_SAY, arcemu.LANG_UNIVERSAL, target.getName() + " has blocked " + str( amount ) + " of my attack" )
def npc_onCritHit( unit, event, target, amount ):
unit.sendChatMessage( arcemu.CHAT_MSG_MONSTER_SAY, arcemu.LANG_UNIVERSAL, target.getName() + " has been critically hit for " + str( amount ) )
def npc_onHit( unit, event, target, amount ):
unit.sendChatMessage( arcemu.CHAT_MSG_MONSTER_SAY, arcemu.LANG_UNIVERSAL, "I have hit " + target.getName() + " for " + str( amount ) )
def npc_onDied( unit, event, killer ):
unit.sendChatMessage( arcemu.CHAT_MSG_MONSTER_SAY, arcemu.LANG_UNIVERSAL, killer.getName() + " has killed me." )
def npc_onTargetDied( unit, event, target ):
unit.sendChatMessage( arcemu.CHAT_MSG_MONSTER_SAY, arcemu.LANG_UNIVERSAL, "Hahahaha. " + target.getName() + " you are not prepared!" )
def npc_onLoad( unit, event ):
print( "Loaded creature " + unit.getName() )
#unit.RegisterAIUpdateEvent( 5000 )
def npc_onAssistTargetDied( unit, event, assistTarget ):
unit.sendChatMessage( arcemu.CHAT_MSG_MONSTER_SAY, arcemu.LANG_UNIVERSAL, "Ayieee. " + assistTarget.getName() + " died! :(")
def npc_onFear( unit, event, feared, spellId ):
unit.sendChatMessage( arcemu.CHAT_MSG_MONSTER_SAY, arcemu.LANG_UNIVERSAL, "Ayieee. " + feared.getName() + " scared me with " + str( spellId ) )
def npc_onFlee( unit, event, flee ):
unit.sendChatMessage( arcemu.CHAT_MSG_MONSTER_SAY, arcemu.LANG_UNIVERSAL, "Ayieee. " + flee.getName() + " scared me!")
def npc_onCallForHelp( unit, event ):
unit.sendChatMessage( arcemu.CHAT_MSG_MONSTER_SAY, arcemu.LANG_UNIVERSAL, "Ayieee. I'm scared! I'll call for help!")
def npc_onDespawn( unit, event ):
print( unit.getName() + " is being despawned" )
def npc_onReachWP( unit, event, waypointId, forward ):
dir = '';
if forward:
dir = 'forward'
else:
dir = 'backwards';
unit.sendChatMessage( arcemu.CHAT_MSG_MONSTER_SAY, arcemu.LANG_UNIVERSAL, "I have reached waypoint " + str( waypointId ) + " while moving " + dir )
def npc_onLootTaken( unit, event, player, item ):
player.sendChatMessage( arcemu.CHAT_MSG_SAY, arcemu.LANG_UNIVERSAL, "I have taken item " + str( item ) + " from " + unit.getName() )
def npc_onAIUpdate( unit, event ):
print( "AIUpdate for " + unit.getName() )
def npc_onEmote( unit, event, player, emote ):
unit.sendChatMessage( arcemu.CHAT_MSG_MONSTER_SAY, arcemu.LANG_UNIVERSAL, "Emote " + str( emote ) + "from " + player.getName() )
arcemu.RegisterUnitEvent( 113, arcemu.CREATURE_EVENT_ON_ENTER_COMBAT, npc_onCombatStart )
arcemu.RegisterUnitEvent( 113, arcemu.CREATURE_EVENT_ON_LEAVE_COMBAT, npc_onCombatStop )
arcemu.RegisterUnitEvent( 113, arcemu.CREATURE_EVENT_ON_HIT, npc_onHit )
arcemu.RegisterUnitEvent( 113, arcemu.CREATURE_EVENT_ON_DAMAGE_TAKEN, npc_onDamageTaken )
arcemu.RegisterUnitEvent( 113, arcemu.CREATURE_EVENT_ON_CAST_SPELL, npc_onCastSpell )
arcemu.RegisterUnitEvent( 113, arcemu.CREATURE_EVENT_ON_TARGET_PARRIED, npc_onTargetParried )
arcemu.RegisterUnitEvent( 113, arcemu.CREATURE_EVENT_ON_TARGET_DODGED, npc_onTargetDodged )
arcemu.RegisterUnitEvent( 113, arcemu.CREATURE_EVENT_ON_TARGET_BLOCKED, npc_onTargetBlocked )
arcemu.RegisterUnitEvent( 113, arcemu.CREATURE_EVENT_ON_TARGET_CRIT_HIT, METHOD_NAME )
arcemu.RegisterUnitEvent( 113, arcemu.CREATURE_EVENT_ON_PARRY, npc_onParried )
arcemu.RegisterUnitEvent( 113, arcemu.CREATURE_EVENT_ON_DODGED, npc_onDodged )
arcemu.RegisterUnitEvent( 113, arcemu.CREATURE_EVENT_ON_BLOCKED, npc_onBlocked )
arcemu.RegisterUnitEvent( 113, arcemu.CREATURE_EVENT_ON_CRIT_HIT, npc_onCritHit )
arcemu.RegisterUnitEvent( 113, arcemu.CREATURE_EVENT_ON_DIED, npc_onDied )
arcemu.RegisterUnitEvent( 113, arcemu.CREATURE_EVENT_ON_TARGET_DIED, npc_onTargetDied )
arcemu.RegisterUnitEvent( 113, arcemu.CREATURE_EVENT_ON_LOAD, npc_onLoad )
arcemu.RegisterUnitEvent( 113, arcemu.CREATURE_EVENT_ON_ASSIST_TARGET_DIED, npc_onAssistTargetDied )
arcemu.RegisterUnitEvent( 113, arcemu.CREATURE_EVENT_ON_FEAR, npc_onFear )
arcemu.RegisterUnitEvent( 113, arcemu.CREATURE_EVENT_ON_FLEE, npc_onFlee )
arcemu.RegisterUnitEvent( 113, arcemu.CREATURE_EVENT_ON_CALL_FOR_HELP, npc_onCallForHelp )
arcemu.RegisterUnitEvent( 113, arcemu.CREATURE_EVENT_ON_DESPAWN, npc_onDespawn )
arcemu.RegisterUnitEvent( 113, arcemu.CREATURE_EVENT_ON_REACH_WP, npc_onReachWP )
arcemu.RegisterUnitEvent( 113, arcemu.CREATURE_EVENT_ON_LOOT_TAKEN, npc_onLootTaken )
arcemu.RegisterUnitEvent( 113, arcemu.CREATURE_EVENT_ON_AIUPDATE, npc_onAIUpdate )
arcemu.RegisterUnitEvent( 113, arcemu.CREATURE_EVENT_ON_EMOTE, npc_onEmote )
|
1,506 |
test extract daily
|
from pathlib import Path
import pandas as pd
from antarest.study.storage.rawstudy.model.filesystem.matrix.date_serializer import (
AnnualMatrixSerializer,
DailyMatrixSerializer,
HourlyMatrixSerializer,
MonthlyMatrixSerializer,
WeeklyMatrixSerializer,
rename_unnamed,
)
def test_extract_hourly(tmp_path: Path):
file = tmp_path / "matrix-daily.txt"
content = """
DE hourly 01_solar 02_wind_on
MWh MWh
index day month hour
1 1 JAN 00:00 0 0
2 1 JAN 01:00 100 0
"""
file.write_text(content)
df = pd.read_csv(file, header=[0, 1, 2], sep="\t")
df.fillna("", inplace=True)
serializer = HourlyMatrixSerializer(area="de")
date, body = serializer.extract_date(df)
pd.testing.assert_index_equal(date, pd.Index(["01/01 00:00", "01/01 01:00"]))
rename_unnamed(body)
expected = pd.DataFrame(
data={
("01_solar", "MWh", ""): [0, 100],
("02_wind_on", "MWh", ""): [0, 0],
}
)
pd.testing.assert_frame_equal(body, expected)
def test_build_hourly(tmp_path: Path):
exp = pd.DataFrame(
{
0: ["DE", "", "", "", ""],
1: ["hourly", "", "index", 1, 2],
2: ["", "", "day", "01", "01"],
3: ["", "", "month", "JAN", "JAN"],
4: ["", "", "hour", "00:00", "01:00"],
}
)
index = pd.Index(["01/01 00:00", "01/01 01:00"])
serializer = HourlyMatrixSerializer(area="de")
res = serializer.build_date(index)
assert exp.values.tolist() == res.values.tolist()
def METHOD_NAME(tmp_path: Path):
file = tmp_path / "matrix-daily.txt"
content = """
DE daily 01_solar 02_wind_on
MWh MWh
index day month EXP EXP
1 01 JAN 27000 600
2 02 JAN 48000 34400
"""
file.write_text(content)
df = pd.read_csv(file, header=[0, 1, 2], sep="\t")
serializer = DailyMatrixSerializer(area="de")
date, body = serializer.extract_date(df)
pd.testing.assert_index_equal(date, pd.Index(["01/01", "01/02"]))
pd.testing.assert_frame_equal(
body,
pd.DataFrame(
data={
("01_solar", "MWh", "EXP"): [27000, 48000],
("02_wind_on", "MWh", "EXP"): [600, 34400],
}
),
)
def test_build_daily(tmp_path: Path):
exp = pd.DataFrame(
{
0: ["DE", "", "", "", ""],
1: ["daily", "", "index", 1, 2],
2: ["", "", "day", "01", "02"],
3: ["", "", "month", "JAN", "JAN"],
}
)
index = pd.Index(["01/01", "01/02"])
serializer = DailyMatrixSerializer(area="de")
res = serializer.build_date(index)
assert exp.values.tolist() == res.values.tolist()
def test_extract_weekly(tmp_path: Path):
file = tmp_path / "matrix-daily.txt"
content = """
DE weekly 01_solar 02_wind_on
MWh MWh
week
1 315000 275000
"""
file.write_text(content)
df = pd.read_csv(file, header=[0, 1, 2], sep="\t")
df.fillna("", inplace=True)
serializer = WeeklyMatrixSerializer(area="de")
date, body = serializer.extract_date(df)
pd.testing.assert_index_equal(date, pd.Index([1], name="weekly"))
rename_unnamed(body)
pd.testing.assert_frame_equal(
body,
pd.DataFrame(
data={
("01_solar", "MWh", ""): [315000],
("02_wind_on", "MWh", ""): [275000],
}
),
)
def test_build_weekly(tmp_path: Path):
exp = pd.DataFrame(
{
0: ["DE", "", "", "", ""],
1: ["weekly", "", "week", "1", "2"],
}
)
index = pd.Index(["1", "2"])
serializer = WeeklyMatrixSerializer(area="de")
res = serializer.build_date(index)
assert exp.values.tolist() == res.values.tolist()
def test_extract_monthly(tmp_path: Path):
file = tmp_path / "matrix-monthly.txt"
content = """
DE monthly 01_solar 02_wind_on
MWh MWh
index month EXP EXP
1 JAN 315000 275000
"""
file.write_text(content)
df = pd.read_csv(file, header=[0, 1, 2], sep="\t")
serializer = MonthlyMatrixSerializer(area="de")
date, body = serializer.extract_date(df)
pd.testing.assert_index_equal(date, pd.Index(["01"], name="month"))
rename_unnamed(body)
pd.testing.assert_frame_equal(
body,
pd.DataFrame(
data={
("01_solar", "MWh", "EXP"): [315000],
("02_wind_on", "MWh", "EXP"): [275000],
}
),
)
def test_build_monthly(tmp_path: Path):
exp = pd.DataFrame(
{
0: ["DE", "", "", "", ""],
1: ["monthly", "", "index", 1, 2],
2: ["", "", "month", "MAR", "MAY"],
}
)
index = pd.Index(["03", "05"])
serializer = MonthlyMatrixSerializer(area="de")
res = serializer.build_date(index)
assert exp.values.tolist() == res.values.tolist()
def test_extract_annual(tmp_path: Path):
file = tmp_path / "matrix-daily.txt"
content = """
DE annual 01_solar 02_wind_on
MWh MWh
Annual 315000 275000
"""
file.write_text(content)
df = pd.read_csv(file, header=[0, 1, 2], sep="\t")
df.fillna("", inplace=True)
serializer = AnnualMatrixSerializer(area="de")
date, body = serializer.extract_date(df)
pd.testing.assert_index_equal(date, pd.Index(["Annual"], name="annual"))
rename_unnamed(body)
pd.testing.assert_frame_equal(
body,
pd.DataFrame(
data={
("01_solar", "MWh", ""): [315000],
("02_wind_on", "MWh", ""): [275000],
}
),
)
def test_build_annual():
exp = pd.DataFrame(
{
0: ["DE", "", "", ""],
1: ["annual", "", "", "Annual"],
}
)
serializer = AnnualMatrixSerializer(area="de")
res = serializer.build_date(None)
assert exp.values.tolist() == res.values.tolist()
|
1,507 |
run
|
# -*- coding: utf-8 -*-
#Copyright (C) Fiz Vazquez [email protected]
# Modified by dgranda
#This program is free software; you can redistribute it and/or
#modify it under the terms of the GNU General Public License
#as published by the Free Software Foundation; either version 2
#of the License, or (at your option) any later version.
#This program is distributed in the hope that it will be useful,
#but WITHOUT ANY WARRANTY; without even the implied warranty of
#MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
#GNU General Public License for more details.
#You should have received a copy of the GNU General Public License
#along with this program; if not, write to the Free Software
#Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
import logging
import os
from lxml import etree
from pytrainer.lib.xmlUtils import XMLParser
from pytrainer.gui.dialogs import fileChooserDialog, guiFlush
from pytrainer.core.activity import Activity
from sqlalchemy.orm import exc
class garminTCXv2():
def __init__(self, parent = None, validate=False):
self.parent = parent
self.pytrainer_main = parent.pytrainer_main
self.tmpdir = self.pytrainer_main.profile.tmpdir
self.data_path = os.path.dirname(__file__)
self.validate = validate
self.sport = self.getConfValue("Force_sport_to")
def getConfValue(self, confVar):
info = XMLParser(self.data_path+"/conf.xml")
code = info.getValue("pytrainer-plugin","plugincode")
plugindir = self.pytrainer_main.profile.plugindir
if not os.path.isfile(plugindir+"/"+code+"/conf.xml"):
value = None
else:
info = XMLParser(plugindir+"/"+code+"/conf.xml")
value = info.getValue("pytrainer-plugin",confVar)
return value
def METHOD_NAME(self):
logging.debug(">>")
# able to select multiple files....
selectedFiles = fileChooserDialog(title="Choose a TCX file (or files) to import", multiple=True).getFiles()
guiFlush()
importfiles = []
if not selectedFiles: #Nothing selected
return importfiles
for filename in selectedFiles: #Multiple files
if self.valid_input_file(filename): #TODO could consolidate tree generation here
tree = etree.ElementTree(file=filename)
#Possibly multiple entries in file
activities = self.getActivities(tree)
for activity in activities:
if not self.inDatabase(activity):
sport = self.getSport(activity)
gpxfile = "%s/garmin-tcxv2-%d.gpx" % (self.tmpdir, len(importfiles))
self.createGPXfile(gpxfile, activity)
importfiles.append((gpxfile, sport))
else:
logging.debug("File:%s activity %d already in database. Skipping import." % (filename, activities.index(activity)) )
else:
logging.info("File %s failed validation" % (filename))
logging.debug("<<")
return importfiles
def valid_input_file(self, filename):
""" Function to validate input file if requested"""
if not self.validate: #not asked to validate
logging.debug("Not validating %s" % (filename) )
return True
else:
xslfile = os.path.realpath(self.pytrainer_main.data_path)+ "/schemas/GarminTrainingCenterDatabase_v2.xsd"
from pytrainer.lib.xmlValidation import xmlValidator
validator = xmlValidator()
return validator.validateXSL(filename, xslfile)
def getActivities(self, tree):
'''Function to return all activities in Garmin training center version 2 file
'''
root = tree.getroot()
activities = root.findall(".//{http://www.garmin.com/xmlschemas/TrainingCenterDatabase/v2}Activity")
return activities
def inDatabase(self, activity):
#comparing date and start time (sport may have been changed in DB after import)
time = self.detailsFromTCX(activity)
try:
self.pytrainer_main.ddbb.session.query(Activity).filter(Activity.date_time_utc == time).one()
return True
except exc.NoResultFound:
return False
def getSport(self, activity):
#return sport from file or overide if present
if self.sport:
return self.sport
#sportElement = activity.find(".//{http://www.garmin.com/xmlschemas/TrainingCenterDatabase/v2}Activity")
try:
sport = activity.get("Sport")
except:
sport = "import"
return sport
def detailsFromTCX(self, activity):
timeElement = activity.find(".//{http://www.garmin.com/xmlschemas/TrainingCenterDatabase/v2}Id")
if timeElement is None:
return None
else:
return timeElement.text
def createGPXfile(self, gpxfile, activity):
""" Function to transform a Garmin Training Center v2 Track to a valid GPX+ file
"""
xslt_doc = etree.parse(self.data_path+"/translate.xsl")
transform = etree.XSLT(xslt_doc)
#xml_doc = etree.parse(filename)
xml_doc = activity
result_tree = transform(xml_doc)
result_tree.write(gpxfile, xml_declaration=True, encoding='UTF-8')
|
1,508 |
comp avg seg dur
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import numpy as np
import os.path as op
import re
from tabulate import tabulate
from collections import Counter
def comp_purity(p_xy, axis):
max_p = p_xy.max(axis=axis)
marg_p = p_xy.sum(axis=axis)
indv_pur = max_p / marg_p
aggr_pur = max_p.sum()
return indv_pur, aggr_pur
def comp_entropy(p):
return (-p * np.log(p + 1e-8)).sum()
def comp_norm_mutual_info(p_xy):
p_x = p_xy.sum(axis=1, keepdims=True)
p_y = p_xy.sum(axis=0, keepdims=True)
pmi = np.log(p_xy / np.matmul(p_x, p_y) + 1e-8)
mi = (p_xy * pmi).sum()
h_x = comp_entropy(p_x)
h_y = comp_entropy(p_y)
return mi, mi / h_x, mi / h_y, h_x, h_y
def pad(labs, n):
if n == 0:
return np.array(labs)
return np.concatenate([[labs[0]] * n, labs, [labs[-1]] * n])
def METHOD_NAME(labs_list):
n_frms = 0
n_segs = 0
for labs in labs_list:
labs = np.array(labs)
edges = np.zeros(len(labs)).astype(bool)
edges[0] = True
edges[1:] = labs[1:] != labs[:-1]
n_frms += len(edges)
n_segs += edges.astype(int).sum()
return n_frms / n_segs
def comp_joint_prob(uid2refs, uid2hyps):
"""
Args:
pad: padding for spliced-feature derived labels
"""
cnts = Counter()
skipped = []
abs_frmdiff = 0
for uid in uid2refs:
if uid not in uid2hyps:
skipped.append(uid)
continue
refs = uid2refs[uid]
hyps = uid2hyps[uid]
abs_frmdiff += abs(len(refs) - len(hyps))
min_len = min(len(refs), len(hyps))
refs = refs[:min_len]
hyps = hyps[:min_len]
cnts.update(zip(refs, hyps))
tot = sum(cnts.values())
ref_set = sorted({ref for ref, _ in cnts.keys()})
hyp_set = sorted({hyp for _, hyp in cnts.keys()})
ref2pid = dict(zip(ref_set, range(len(ref_set))))
hyp2lid = dict(zip(hyp_set, range(len(hyp_set))))
# print(hyp_set)
p_xy = np.zeros((len(ref2pid), len(hyp2lid)), dtype=float)
for (ref, hyp), cnt in cnts.items():
p_xy[ref2pid[ref], hyp2lid[hyp]] = cnt
p_xy /= p_xy.sum()
return p_xy, ref2pid, hyp2lid, tot, abs_frmdiff, skipped
def read_phn(tsv_path, rm_stress=True):
uid2phns = {}
with open(tsv_path) as f:
for line in f:
uid, phns = line.rstrip().split("\t")
phns = phns.split(",")
if rm_stress:
phns = [re.sub("[0-9]", "", phn) for phn in phns]
uid2phns[uid] = phns
return uid2phns
def read_lab(tsv_path, lab_path, pad_len=0, upsample=1):
"""
tsv is needed to retrieve the uids for the labels
"""
with open(tsv_path) as f:
f.readline()
uids = [op.splitext(op.basename(line.rstrip().split()[0]))[0] for line in f]
with open(lab_path) as f:
labs_list = [pad(line.rstrip().split(), pad_len).repeat(upsample) for line in f]
assert len(uids) == len(labs_list)
return dict(zip(uids, labs_list))
def main_lab_lab(
tsv_dir,
lab_dir,
lab_name,
lab_sets,
ref_dir,
ref_name,
pad_len=0,
upsample=1,
verbose=False,
):
# assume tsv_dir is the same for both the reference and the hypotheses
tsv_dir = lab_dir if tsv_dir is None else tsv_dir
uid2refs = {}
for s in lab_sets:
uid2refs.update(read_lab(f"{tsv_dir}/{s}.tsv", f"{ref_dir}/{s}.{ref_name}"))
uid2hyps = {}
for s in lab_sets:
uid2hyps.update(
read_lab(
f"{tsv_dir}/{s}.tsv", f"{lab_dir}/{s}.{lab_name}", pad_len, upsample
)
)
_main(uid2refs, uid2hyps, verbose)
def main_phn_lab(
tsv_dir,
lab_dir,
lab_name,
lab_sets,
phn_dir,
phn_sets,
pad_len=0,
upsample=1,
verbose=False,
):
uid2refs = {}
for s in phn_sets:
uid2refs.update(read_phn(f"{phn_dir}/{s}.tsv"))
uid2hyps = {}
tsv_dir = lab_dir if tsv_dir is None else tsv_dir
for s in lab_sets:
uid2hyps.update(
read_lab(
f"{tsv_dir}/{s}.tsv", f"{lab_dir}/{s}.{lab_name}", pad_len, upsample
)
)
_main(uid2refs, uid2hyps, verbose)
def _main(uid2refs, uid2hyps, verbose):
(p_xy, ref2pid, hyp2lid, tot, frmdiff, skipped) = comp_joint_prob(
uid2refs, uid2hyps
)
ref_pur_by_hyp, ref_pur = comp_purity(p_xy, axis=0)
hyp_pur_by_ref, hyp_pur = comp_purity(p_xy, axis=1)
(mi, mi_norm_by_ref, mi_norm_by_hyp, h_ref, h_hyp) = comp_norm_mutual_info(p_xy)
outputs = {
"ref pur": ref_pur,
"hyp pur": hyp_pur,
"H(ref)": h_ref,
"H(hyp)": h_hyp,
"MI": mi,
"MI/H(ref)": mi_norm_by_ref,
"ref segL": METHOD_NAME(uid2refs.values()),
"hyp segL": METHOD_NAME(uid2hyps.values()),
"p_xy shape": p_xy.shape,
"frm tot": tot,
"frm diff": frmdiff,
"utt tot": len(uid2refs),
"utt miss": len(skipped),
}
print(tabulate([outputs.values()], outputs.keys(), floatfmt=".4f"))
if __name__ == "__main__":
"""
compute quality of labels with respect to phone or another labels if set
"""
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("tsv_dir")
parser.add_argument("lab_dir")
parser.add_argument("lab_name")
parser.add_argument("--lab_sets", default=["valid"], type=str, nargs="+")
parser.add_argument(
"--phn_dir",
default="/checkpoint/wnhsu/data/librispeech/960h/fa/raw_phn/phone_frame_align_v1",
)
parser.add_argument(
"--phn_sets", default=["dev-clean", "dev-other"], type=str, nargs="+"
)
parser.add_argument("--pad_len", default=0, type=int, help="padding for hypotheses")
parser.add_argument(
"--upsample", default=1, type=int, help="upsample factor for hypotheses"
)
parser.add_argument("--ref_lab_dir", default="")
parser.add_argument("--ref_lab_name", default="")
parser.add_argument("--verbose", action="store_true")
args = parser.parse_args()
if args.ref_lab_dir and args.ref_lab_name:
main_lab_lab(
args.tsv_dir,
args.lab_dir,
args.lab_name,
args.lab_sets,
args.ref_lab_dir,
args.ref_lab_name,
args.pad_len,
args.upsample,
args.verbose,
)
else:
main_phn_lab(
args.tsv_dir,
args.lab_dir,
args.lab_name,
args.lab_sets,
args.phn_dir,
args.phn_sets,
args.pad_len,
args.upsample,
args.verbose,
)
|
1,509 |
test quotes
|
from io import BytesIO
from translate.convert import csv2po, test_convert
from translate.storage import csvl10n, po
from translate.storage.test_base import first_translatable, headerless_len
def test_replacestrings():
"""Test the _replacestring function"""
assert (
csv2po.replacestrings("Test one two three", ("one", "een"), ("two", "twee"))
== "Test een twee three"
)
class TestCSV2PO:
@staticmethod
def csv2po(csvsource, template=None):
"""helper that converts csv source to po source without requiring files"""
inputfile = BytesIO(csvsource.encode())
inputcsv = csvl10n.csvfile(inputfile)
if template:
templatefile = BytesIO(template.encode())
inputpot = po.pofile(templatefile)
else:
inputpot = None
convertor = csv2po.csv2po(templatepo=inputpot)
return convertor.convertstore(inputcsv)
@staticmethod
def singleelement(storage):
"""checks that the pofile contains a single non-header element, and returns it"""
print(bytes(storage))
assert headerless_len(storage.units) == 1
return first_translatable(storage)
def test_simpleentity(self):
"""checks that a simple csv entry definition converts properly to a po entry"""
csvheader = "location,source,target\n"
csvsource = "intl.charset.default,ISO-8859-1,UTF-16"
# Headerless
pofile = self.csv2po(csvsource)
pounit = self.singleelement(pofile)
# With header
pofile = self.csv2po(csvheader + csvsource)
pounit = self.singleelement(pofile)
assert pounit.getlocations() == ["intl.charset.default"]
assert pounit.source == "ISO-8859-1"
assert pounit.target == "UTF-16"
def test_simpleentity_with_template(self):
"""checks that a simple csv entry definition converts properly to a po entry"""
csvsource = """location,original,translation
intl.charset.default,ISO-8859-1,UTF-16"""
potsource = """#: intl.charset.default
msgid "ISO-8859-1"
msgstr ""
"""
pofile = self.csv2po(csvsource, potsource)
pounit = self.singleelement(pofile)
assert pounit.getlocations() == ["intl.charset.default"]
assert pounit.source == "ISO-8859-1"
assert pounit.target == "UTF-16"
def test_newlines(self):
"""tests multiline po entries"""
minicsv = r""""Random comment
with continuation","Original text","Langdradige teks
wat lank aanhou"
"""
pofile = self.csv2po(minicsv)
unit = self.singleelement(pofile)
assert unit.getlocations() == ["Random comment\nwith continuation"]
assert unit.source == "Original text"
print(unit.target)
assert unit.target == "Langdradige teks\nwat lank aanhou"
def test_tabs(self):
"""Test the escaping of tabs"""
minicsv = ',"First column\tSecond column","Twee kolomme gesky met \t"'
pofile = self.csv2po(minicsv)
unit = self.singleelement(pofile)
print(unit.source)
assert unit.source == "First column\tSecond column"
assert (
not pofile.findunit("First column\tSecond column").target
== "Twee kolomme gesky met \\t"
)
def METHOD_NAME(self):
"""Test the escaping of quotes (and slash)"""
minicsv = r''',"Hello ""Everyone""","Good day ""All"""
,"Use \"".","Gebruik \""."'''
print(minicsv)
csvfile = csvl10n.csvfile(BytesIO(minicsv.encode()))
print(bytes(csvfile))
pofile = self.csv2po(minicsv)
unit = first_translatable(pofile)
assert unit.source == 'Hello "Everyone"'
assert pofile.findunit('Hello "Everyone"').target == 'Good day "All"'
print(bytes(pofile))
for unit in pofile.units:
print(unit.source)
print(unit.target)
print()
# assert pofile.findunit('Use \\".').target == 'Gebruik \\".'
def test_empties(self):
"""Tests that things keep working with empty entries"""
minicsv = ",SomeSource,"
pofile = self.csv2po(minicsv)
assert pofile.findunit("SomeSource") is not None
assert pofile.findunit("SomeSource").target == ""
assert headerless_len(pofile.units) == 1
def test_kdecomment(self):
"""checks that we can merge into KDE comment entries"""
csvsource = """location,source,target
simple.c,Source,Target"""
potsource = r"""#: simple.c
msgid "_: KDE comment\n"
"Source"
msgstr ""
"""
pofile = self.csv2po(csvsource, potsource)
pounit = self.singleelement(pofile)
assert pounit._extract_msgidcomments() == "KDE comment"
assert pounit.source == "Source"
assert pounit.target == "Target"
def test_escaped_newlines(self):
"""Tests that things keep working with escaped newlines"""
minicsv = '"source","target"\r\n"yellow pencil","żółty\\nołówek"'
pofile = self.csv2po(minicsv)
assert pofile.findunit("yellow pencil") is not None
assert pofile.findunit("yellow pencil").target == "żółty\\nołówek"
assert headerless_len(pofile.units) == 1
class TestCSV2POCommand(test_convert.TestConvertCommand, TestCSV2PO):
"""Tests running actual csv2po commands on files"""
convertmodule = csv2po
expected_options = [
"-t TEMPLATE, --template=TEMPLATE",
"-P, --pot",
"--charset=CHARSET",
"--columnorder=COLUMNORDER",
"--duplicates=DUPLICATESTYLE",
]
def test_columnorder(self):
csvcontent = '"Target","Same"\n'
self.create_testfile("test.csv", csvcontent)
self.run_command("test.csv", "test.po")
# Strip PO file header
content = self.open_testfile("test.po", "r").read().split("\n\n")[1]
assert (
content
== """#: Target
msgid "Same"
msgstr ""
"""
)
self.run_command("test.csv", "test.po", columnorder="target,source")
content = self.open_testfile("test.po", "r").read().split("\n\n")[1]
assert (
content
== """msgid "Same"
msgstr "Target"
"""
)
|
1,510 |
set done
|
import datetime
import collections
from uuid import uuid4
class Job:
"""Job related to specific host name.
Data must contain everything needed to finish the job.
"""
# Remove done jobs each n days to clear memory
keep_in_memory_days = 3
def __init__(self, host_name, data, job_id=None, created_time=None):
if job_id is None:
job_id = str(uuid4())
self._id = job_id
if created_time is None:
created_time = datetime.datetime.now()
self._created_time = created_time
self._started_time = None
self._done_time = None
self.host_name = host_name
self.data = data
self._result_data = None
self._started = False
self._done = False
self._errored = False
self._message = None
self._deleted = False
self._worker = None
def keep_in_memory(self):
if self._done_time is None:
return True
now = datetime.datetime.now()
delta = now - self._done_time
return delta.days < self.keep_in_memory_days
@property
def id(self):
return self._id
@property
def done(self):
return self._done
def reset(self):
self._started = False
self._started_time = None
self._done = False
self._done_time = None
self._errored = False
self._message = None
self._worker = None
@property
def started(self):
return self._started
@property
def deleted(self):
return self._deleted
def set_deleted(self):
self._deleted = True
self.set_worker(None)
def set_worker(self, worker):
if worker is self._worker:
return
if self._worker is not None:
self._worker.set_current_job(None)
self._worker = worker
if worker is not None:
worker.set_current_job(self)
def set_started(self):
self._started_time = datetime.datetime.now()
self._started = True
def METHOD_NAME(self, success=True, message=None, data=None):
self._done = True
self._done_time = datetime.datetime.now()
self._errored = not success
self._message = message
self._result_data = data
if self._worker is not None:
self._worker.set_current_job(None)
def status(self):
worker_id = None
if self._worker is not None:
worker_id = self._worker.id
output = {
"id": self.id,
"worker_id": worker_id,
"done": self._done
}
output["message"] = self._message or None
state = "waiting"
if self._deleted:
state = "deleted"
elif self._errored:
state = "error"
elif self._done:
state = "done"
elif self._started:
state = "started"
output["result"] = self._result_data
output["state"] = state
return output
class JobQueue:
"""Queue holds jobs that should be done and workers that can do them.
Also asign jobs to a worker.
"""
old_jobs_check_minutes_interval = 30
def __init__(self):
self._last_old_jobs_check = datetime.datetime.now()
self._jobs_by_id = {}
self._job_queue_by_host_name = collections.defaultdict(
collections.deque
)
self._workers_by_id = {}
self._workers_by_host_name = collections.defaultdict(list)
def workers(self):
"""All currently registered workers."""
return self._workers_by_id.values()
def add_worker(self, worker):
host_name = worker.host_name
print("Added new worker for \"{}\"".format(host_name))
self._workers_by_id[worker.id] = worker
self._workers_by_host_name[host_name].append(worker)
def get_worker(self, worker_id):
return self._workers_by_id.get(worker_id)
def remove_worker(self, worker):
# Look if worker had assigned job to do
job = worker.current_job
if job is not None and not job.done:
# Reset job
job.set_worker(None)
job.reset()
# Add job back to queue
self._job_queue_by_host_name[job.host_name].appendleft(job)
# Remove worker from registered workers
self._workers_by_id.pop(worker.id, None)
host_name = worker.host_name
if worker in self._workers_by_host_name[host_name]:
self._workers_by_host_name[host_name].remove(worker)
print("Removed worker for \"{}\"".format(host_name))
def assign_jobs(self):
"""Try to assign job for each idle worker.
Error all jobs without needed worker.
"""
available_host_names = set()
for worker in self._workers_by_id.values():
host_name = worker.host_name
available_host_names.add(host_name)
if worker.is_idle():
jobs = self._job_queue_by_host_name[host_name]
while jobs:
job = jobs.popleft()
if not job.deleted:
worker.set_current_job(job)
break
for host_name in tuple(self._job_queue_by_host_name.keys()):
if host_name in available_host_names:
continue
jobs_deque = self._job_queue_by_host_name[host_name]
message = ("Not available workers for \"{}\"").format(host_name)
while jobs_deque:
job = jobs_deque.popleft()
if not job.deleted:
job.METHOD_NAME(False, message)
self._remove_old_jobs()
def get_jobs(self):
return self._jobs_by_id.values()
def get_job(self, job_id):
"""Job by it's id."""
return self._jobs_by_id.get(job_id)
def create_job(self, host_name, job_data):
"""Create new job from passed data and add it to queue."""
job = Job(host_name, job_data)
self._jobs_by_id[job.id] = job
self._job_queue_by_host_name[host_name].append(job)
return job
def _remove_old_jobs(self):
"""Once in specific time look if should remove old finished jobs."""
delta = datetime.datetime.now() - self._last_old_jobs_check
if delta.seconds < self.old_jobs_check_minutes_interval:
return
for job_id in tuple(self._jobs_by_id.keys()):
job = self._jobs_by_id[job_id]
if not job.keep_in_memory():
self._jobs_by_id.pop(job_id)
def remove_job(self, job_id):
"""Delete job and eventually stop it."""
job = self._jobs_by_id.get(job_id)
if job is None:
return
job.set_deleted()
self._jobs_by_id.pop(job.id)
def get_job_status(self, job_id):
"""Job's status based on id."""
job = self._jobs_by_id.get(job_id)
if job is None:
return {}
return job.status()
|
1,511 |
get objects
|
"""Sync LDAP Users and groups into authentik"""
from typing import Any, Generator
from django.conf import settings
from django.db.models.base import Model
from django.db.models.query import QuerySet
from ldap3 import DEREF_ALWAYS, SUBTREE, Connection
from structlog.stdlib import BoundLogger, get_logger
from authentik.core.exceptions import PropertyMappingExpressionException
from authentik.events.models import Event, EventAction
from authentik.lib.config import CONFIG
from authentik.lib.merge import MERGE_LIST_UNIQUE
from authentik.sources.ldap.auth import LDAP_DISTINGUISHED_NAME
from authentik.sources.ldap.models import LDAPPropertyMapping, LDAPSource
LDAP_UNIQUENESS = "ldap_uniq"
class BaseLDAPSynchronizer:
"""Sync LDAP Users and groups into authentik"""
_source: LDAPSource
_logger: BoundLogger
_connection: Connection
_messages: list[str]
def __init__(self, source: LDAPSource):
self._source = source
self._connection = source.connection()
self._messages = []
self._logger = get_logger().bind(source=source, syncer=self.__class__.__name__)
@staticmethod
def name() -> str:
"""UI name for the type of object this class synchronizes"""
raise NotImplementedError
def sync_full(self):
"""Run full sync, this function should only be used in tests"""
if not settings.TEST: # noqa
raise RuntimeError(
f"{self.__class__.__name__}.sync_full() should only be used in tests"
)
for page in self.METHOD_NAME():
self.sync(page)
def sync(self, page_data: list) -> int:
"""Sync function, implemented in subclass"""
raise NotImplementedError()
@property
def messages(self) -> list[str]:
"""Get all UI messages"""
return self._messages
@property
def base_dn_users(self) -> str:
"""Shortcut to get full base_dn for user lookups"""
if self._source.additional_user_dn:
return f"{self._source.additional_user_dn},{self._source.base_dn}"
return self._source.base_dn
@property
def base_dn_groups(self) -> str:
"""Shortcut to get full base_dn for group lookups"""
if self._source.additional_group_dn:
return f"{self._source.additional_group_dn},{self._source.base_dn}"
return self._source.base_dn
def message(self, *args, **kwargs):
"""Add message that is later added to the System Task and shown to the user"""
formatted_message = " ".join(args)
if "dn" in kwargs:
formatted_message += f"; DN: {kwargs['dn']}"
self._messages.append(formatted_message)
self._logger.warning(*args, **kwargs)
def METHOD_NAME(self, **kwargs) -> Generator:
"""Get objects from LDAP, implemented in subclass"""
raise NotImplementedError()
# pylint: disable=too-many-arguments
def search_paginator(
self,
search_base,
search_filter,
search_scope=SUBTREE,
dereference_aliases=DEREF_ALWAYS,
attributes=None,
size_limit=0,
time_limit=0,
types_only=False,
get_operational_attributes=False,
controls=None,
paged_size=CONFIG.get_int("ldap.page_size", 50),
paged_criticality=False,
):
"""Search in pages, returns each page"""
cookie = True
while cookie:
self._connection.search(
search_base,
search_filter,
search_scope,
dereference_aliases,
attributes,
size_limit,
time_limit,
types_only,
get_operational_attributes,
controls,
paged_size,
paged_criticality,
None if cookie is True else cookie,
)
try:
cookie = self._connection.result["controls"]["1.2.840.113556.1.4.319"]["value"][
"cookie"
]
except KeyError:
cookie = None
yield self._connection.response
def _flatten(self, value: Any) -> Any:
"""Flatten `value` if its a list"""
if isinstance(value, list):
if len(value) < 1:
return None
return value[0]
return value
def build_user_properties(self, user_dn: str, **kwargs) -> dict[str, Any]:
"""Build attributes for User object based on property mappings."""
props = self._build_object_properties(user_dn, self._source.property_mappings, **kwargs)
props["path"] = self._source.get_user_path()
return props
def build_group_properties(self, group_dn: str, **kwargs) -> dict[str, Any]:
"""Build attributes for Group object based on property mappings."""
return self._build_object_properties(
group_dn, self._source.property_mappings_group, **kwargs
)
def _build_object_properties(
self, object_dn: str, mappings: QuerySet, **kwargs
) -> dict[str, dict[Any, Any]]:
properties = {"attributes": {}}
for mapping in mappings.all().select_subclasses():
if not isinstance(mapping, LDAPPropertyMapping):
continue
mapping: LDAPPropertyMapping
try:
value = mapping.evaluate(user=None, request=None, ldap=kwargs, dn=object_dn)
if value is None:
continue
if isinstance(value, (bytes)):
continue
object_field = mapping.object_field
if object_field.startswith("attributes."):
# Because returning a list might desired, we can't
# rely on self._flatten here. Instead, just save the result as-is
properties["attributes"][object_field.replace("attributes.", "")] = value
else:
properties[object_field] = self._flatten(value)
except PropertyMappingExpressionException as exc:
Event.new(
EventAction.CONFIGURATION_ERROR,
message=f"Failed to evaluate property-mapping: '{mapping.name}'",
source=self._source,
mapping=mapping,
).save()
self._logger.warning("Mapping failed to evaluate", exc=exc, mapping=mapping)
continue
if self._source.object_uniqueness_field in kwargs:
properties["attributes"][LDAP_UNIQUENESS] = self._flatten(
kwargs.get(self._source.object_uniqueness_field)
)
properties["attributes"][LDAP_DISTINGUISHED_NAME] = object_dn
return properties
def update_or_create_attributes(
self,
obj: type[Model],
query: dict[str, Any],
data: dict[str, Any],
) -> tuple[Model, bool]:
"""Same as django's update_or_create but correctly update attributes by merging dicts"""
instance = obj.objects.filter(**query).first()
if not instance:
return (obj.objects.create(**data), True)
for key, value in data.items():
if key == "attributes":
continue
setattr(instance, key, value)
final_attributes = {}
MERGE_LIST_UNIQUE.merge(final_attributes, instance.attributes)
MERGE_LIST_UNIQUE.merge(final_attributes, data.get("attributes", {}))
instance.attributes = final_attributes
instance.save()
return (instance, False)
|
1,512 |
config file
|
# This file is part of the MapProxy project.
# Copyright (C) 2011 Omniscale <http://omniscale.de>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import division
import os
import shutil
import sqlite3
from io import BytesIO
import pytest
from mapproxy.request.wms import WMS111MapRequest
from mapproxy.test.http import MockServ
from mapproxy.test.image import is_png, create_tmp_image
from mapproxy.test.system import SysTest
from mapproxy.cache.geopackage import GeopackageCache
from mapproxy.grid import TileGrid
@pytest.fixture(scope="module")
def METHOD_NAME():
return "cache_geopackage.yaml"
@pytest.fixture(scope="class")
def fixture_gpkg(base_dir):
shutil.copy(
os.path.join(os.path.dirname(__file__), "fixture", "cache.gpkg"),
base_dir.strpath,
)
@pytest.mark.usefixtures("fixture_gpkg")
class TestGeopackageCache(SysTest):
def setup(self):
self.common_map_req = WMS111MapRequest(
url="/service?",
param=dict(
service="WMS",
version="1.1.1",
bbox="-180,-80,0,0",
width="200",
height="200",
layers="gpkg",
srs="EPSG:4326",
format="image/png",
styles="",
request="GetMap",
),
)
def test_get_map_cached(self, app):
resp = app.get(self.common_map_req)
assert resp.content_type == "image/png"
data = BytesIO(resp.body)
assert is_png(data)
def test_get_map_uncached(self, app, base_dir):
assert base_dir.join("cache.gpkg").check()
# already created on startup
self.common_map_req.params.bbox = "-180,0,0,80"
serv = MockServ(port=42423)
serv.expects("/tiles/01/000/000/000/000/000/001.png")
serv.returns(create_tmp_image((256, 256)))
with serv:
resp = app.get(self.common_map_req)
assert resp.content_type == "image/png"
data = BytesIO(resp.body)
assert is_png(data)
# now cached
resp = app.get(self.common_map_req)
assert resp.content_type == "image/png"
data = BytesIO(resp.body)
assert is_png(data)
def test_bad_config_geopackage_no_gpkg_contents(self, app, base_dir):
gpkg_file = base_dir.join("cache.gpkg").strpath
table_name = "no_gpkg_contents"
with sqlite3.connect(gpkg_file) as db:
cur = db.execute(
"""SELECT name FROM sqlite_master WHERE type='table' AND name=?""",
(table_name,),
)
content = cur.fetchone()
assert content[0] == table_name
with sqlite3.connect(gpkg_file) as db:
cur = db.execute(
"""SELECT table_name FROM gpkg_contents WHERE table_name=?""",
(table_name,),
)
content = cur.fetchone()
assert not content
GeopackageCache(gpkg_file, TileGrid(srs=4326), table_name=table_name)
with sqlite3.connect(gpkg_file) as db:
cur = db.execute(
"""SELECT table_name FROM gpkg_contents WHERE table_name=?""",
(table_name,),
)
content = cur.fetchone()
assert content[0] == table_name
def test_bad_config_geopackage_no_spatial_ref_sys(self, base_dir):
gpkg_file = base_dir.join("cache.gpkg").strpath
organization_coordsys_id = 3785
table_name = "no_gpkg_spatial_ref_sys"
with sqlite3.connect(gpkg_file) as db:
cur = db.execute(
"""SELECT organization_coordsys_id FROM gpkg_spatial_ref_sys WHERE organization_coordsys_id=?""",
(organization_coordsys_id,),
)
content = cur.fetchone()
assert not content
GeopackageCache(gpkg_file, TileGrid(srs=3785), table_name=table_name)
with sqlite3.connect(gpkg_file) as db:
cur = db.execute(
"""SELECT organization_coordsys_id FROM gpkg_spatial_ref_sys WHERE organization_coordsys_id=?""",
(organization_coordsys_id,),
)
content = cur.fetchone()
assert content[0] == organization_coordsys_id
|
1,513 |
type
|
# coding=utf-8
# *** WARNING: this file was generated by pulumi. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import copy
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
__all__ = [
'GetMigrateProjectResult',
'AwaitableGetMigrateProjectResult',
'get_migrate_project',
'get_migrate_project_output',
]
@pulumi.output_type
class GetMigrateProjectResult:
"""
Migrate Project REST Resource.
"""
def __init__(__self__, e_tag=None, id=None, location=None, name=None, properties=None, tags=None, METHOD_NAME=None):
if e_tag and not isinstance(e_tag, str):
raise TypeError("Expected argument 'e_tag' to be a str")
pulumi.set(__self__, "e_tag", e_tag)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if location and not isinstance(location, str):
raise TypeError("Expected argument 'location' to be a str")
pulumi.set(__self__, "location", location)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if properties and not isinstance(properties, dict):
raise TypeError("Expected argument 'properties' to be a dict")
pulumi.set(__self__, "properties", properties)
if tags and not isinstance(tags, dict):
raise TypeError("Expected argument 'tags' to be a dict")
pulumi.set(__self__, "tags", tags)
if METHOD_NAME and not isinstance(METHOD_NAME, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", METHOD_NAME)
@property
@pulumi.getter(name="eTag")
def e_tag(self) -> Optional[str]:
"""
Gets or sets the eTag for concurrency control.
"""
return pulumi.get(self, "e_tag")
@property
@pulumi.getter
def id(self) -> str:
"""
Gets the relative URL to get this migrate project.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def location(self) -> Optional[str]:
"""
Gets or sets the Azure location in which migrate project is created.
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> str:
"""
Gets the name of the migrate project.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def properties(self) -> 'outputs.MigrateProjectPropertiesResponse':
"""
Gets or sets the nested properties.
"""
return pulumi.get(self, "properties")
@property
@pulumi.getter
def tags(self) -> Optional['outputs.MigrateProjectResponseTags']:
"""
Gets or sets the tags.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter
def METHOD_NAME(self) -> str:
"""
Handled by resource provider. Type = Microsoft.Migrate/MigrateProject.
"""
return pulumi.get(self, "type")
class AwaitableGetMigrateProjectResult(GetMigrateProjectResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetMigrateProjectResult(
e_tag=self.e_tag,
id=self.id,
location=self.location,
name=self.name,
properties=self.properties,
tags=self.tags,
METHOD_NAME=self.METHOD_NAME)
def get_migrate_project(migrate_project_name: Optional[str] = None,
resource_group_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetMigrateProjectResult:
"""
Migrate Project REST Resource.
:param str migrate_project_name: Name of the Azure Migrate project.
:param str resource_group_name: Name of the Azure Resource Group that migrate project is part of.
"""
__args__ = dict()
__args__['migrateProjectName'] = migrate_project_name
__args__['resourceGroupName'] = resource_group_name
opts = pulumi.InvokeOptions.merge(_utilities.get_invoke_opts_defaults(), opts)
__ret__ = pulumi.runtime.invoke('azure-native:migrate/v20180901preview:getMigrateProject', __args__, opts=opts, typ=GetMigrateProjectResult).value
return AwaitableGetMigrateProjectResult(
e_tag=pulumi.get(__ret__, 'e_tag'),
id=pulumi.get(__ret__, 'id'),
location=pulumi.get(__ret__, 'location'),
name=pulumi.get(__ret__, 'name'),
properties=pulumi.get(__ret__, 'properties'),
tags=pulumi.get(__ret__, 'tags'),
METHOD_NAME=pulumi.get(__ret__, 'type'))
@_utilities.lift_output_func(get_migrate_project)
def get_migrate_project_output(migrate_project_name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetMigrateProjectResult]:
"""
Migrate Project REST Resource.
:param str migrate_project_name: Name of the Azure Migrate project.
:param str resource_group_name: Name of the Azure Resource Group that migrate project is part of.
"""
...
|
1,514 |
test set control vars with constants
|
import numpy as np
from pysd.py_backend.components import Time, Components
from pysd import read_vensim
class TestComponents():
def test_load_components(self, _root):
test_model = _root.joinpath("test-models/samples/teacup/teacup.mdl")
test_model_py = _root.joinpath("test-models/samples/teacup/teacup.py")
read_vensim(test_model)
# set function for testing
executed = []
def set_component(input_dict):
executed.append(("SET", input_dict))
# create object
components = Components(test_model_py, set_component)
# main attributes of the class
assert hasattr(components, "_components")
assert hasattr(components, "_set_components")
# check getting elements
assert components.room_temperature() == 70
# check setting elements
components.room_temperature = 5
assert ("SET", {"room_temperature": 5}) in executed
def temperature():
return 34
components.teacup_temperature = temperature
assert ("SET", {"teacup_temperature": temperature}) in executed
class TestTime():
def test_set_control_vars(self):
time = Time()
def initial_time():
return 2
def final_time():
return 23
def time_step():
return 0.25
def saveper():
return 0.75
time.set_control_vars(
initial_time=initial_time, final_time=final_time,
saveper=saveper, time_step=time_step)
assert time() == 2
assert time.initial_time() == 2
assert time.final_time() == 23
assert time.time_step() == 0.25
assert time.saveper() == 0.75
time.update(10)
assert time() == 10
assert time.initial_time() == 2
assert time.final_time() == 23
assert time.time_step() == 0.25
assert time.saveper() == 0.75
time.reset()
assert time() == 2
assert time.initial_time() == 2
assert time.final_time() == 23
assert time.time_step() == 0.25
assert time.saveper() == 0.75
time.set_control_vars(
saveper=lambda: 2, time_step=lambda: 1)
assert time() == 2
assert time.initial_time() == 2
assert time.final_time() == 23
assert time.time_step() == 1
assert time.saveper() == 2
def METHOD_NAME(self):
time = Time()
time.set_control_vars(
initial_time=2, final_time=23, saveper=0.75, time_step=0.25)
assert time() == 2
assert time.initial_time() == 2
assert time.final_time() == 23
assert time.time_step() == 0.25
assert time.saveper() == 0.75
time.set_control_vars(
initial_time=6)
assert time() == 6
assert time.initial_time() == 6
assert time.final_time() == 23
assert time.time_step() == 0.25
assert time.saveper() == 0.75
time.set_control_vars(
final_time=50, saveper=4, time_step=1)
assert time() == 6
assert time.initial_time() == 6
assert time.final_time() == 50
assert time.time_step() == 1
assert time.saveper() == 4
def test_in_bounds(self):
time = Time()
time.set_control_vars(
initial_time=2, final_time=23, saveper=0.75, time_step=0.25)
assert time.in_bounds()
time.update(21)
assert time.in_bounds()
time.update(23)
assert not time.in_bounds()
time.update(24)
assert not time.in_bounds()
my_time = {"final_time": 30}
def final_time():
return my_time["final_time"]
time.set_control_vars(
initial_time=2, final_time=final_time,
saveper=0.75, time_step=0.25)
# dynamic final_time time
assert time.in_bounds()
time.update(23)
assert time.in_bounds()
my_time["final_time"] = 20
assert not time.in_bounds()
my_time["final_time"] = 50
assert time.in_bounds()
def test_in_return_saveperper(self):
time = Time()
time.set_control_vars(
initial_time=2, final_time=100, saveper=0.75, time_step=0.25)
assert time.in_return()
time.update(2.25)
assert not time.in_return()
time.update(2.75)
assert time.in_return()
time.update(77)
assert time.in_return()
# dynamical initial_time
my_time = {"initial_time": 2}
def initial_time():
return my_time["initial_time"]
time.set_control_vars(
initial_time=initial_time, final_time=100,
saveper=0.75, time_step=0.25)
assert time.in_return()
time.update(2.25)
assert not time.in_return()
time.update(2.75)
assert time.in_return()
time.update(77)
assert time.in_return()
# changing initial_time time var during run must no affect saving time
my_time["initial_time"] = 2.25
time.reset()
assert time.initial_time() == 2.25
assert time.in_return()
time.update(2.25)
assert not time.in_return()
time.update(2.75)
assert time.in_return()
time.update(77)
assert time.in_return()
# dynamical saveperper
my_time["saveper"] = 0.75
def saveper():
return my_time["saveper"]
time.set_control_vars(
initial_time=2, final_time=100, saveper=saveper, time_step=0.25)
assert time.in_return()
time.update(2.25)
assert not time.in_return()
time.update(2.75)
assert time.in_return()
time.update(3)
assert not time.in_return()
my_time["saveper"] = 1
time.reset()
assert time.in_return()
time.update(2.25)
assert not time.in_return()
time.update(2.75)
assert not time.in_return()
time.update(3)
assert time.in_return()
def test_in_return_timestamps(self):
time = Time()
time.set_control_vars(
initial_time=2, final_time=100, saveper=1, time_step=0.25)
assert time.in_return()
time.update(4)
assert time.in_return()
time.update(10)
assert time.in_return()
time.update(12)
assert time.in_return()
time.update(37)
assert time.in_return()
time.reset()
time.add_return_timestamps([2, 10, 37])
assert time.in_return()
time.update(4)
assert not time.in_return()
time.update(10)
assert time.in_return()
time.update(12)
assert not time.in_return()
time.update(37)
assert time.in_return()
time.reset()
time.add_return_timestamps(np.array([4, 12]))
assert not time.in_return()
time.update(4)
assert time.in_return()
time.update(10)
assert not time.in_return()
time.update(12)
assert time.in_return()
time.update(37)
assert not time.in_return()
time.reset()
time.add_return_timestamps(37)
assert not time.in_return()
time.update(4)
assert not time.in_return()
time.update(10)
assert not time.in_return()
time.update(12)
assert not time.in_return()
time.update(37)
assert time.in_return()
time.reset()
time.add_return_timestamps(None)
assert time.in_return()
time.update(4)
assert time.in_return()
time.update(10)
assert time.in_return()
time.update(12)
assert time.in_return()
time.update(37)
assert time.in_return()
|
1,515 |
cmd add header ext stdio
|
#!/usr/bin/env python3
#
# Manipulations with qcow2 image
#
# Copyright (C) 2012 Red Hat, Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import sys
from qcow2_format import (
QcowHeader,
QcowHeaderExtension
)
is_json = False
def cmd_dump_header(fd):
h = QcowHeader(fd)
h.dump(is_json)
print()
h.dump_extensions(is_json)
def cmd_dump_header_exts(fd):
h = QcowHeader(fd)
h.dump_extensions(is_json)
def cmd_set_header(fd, name, value):
try:
value = int(value, 0)
except ValueError:
print("'%s' is not a valid number" % value)
sys.exit(1)
fields = (field[2] for field in QcowHeader.fields)
if name not in fields:
print("'%s' is not a known header field" % name)
sys.exit(1)
h = QcowHeader(fd)
h.__dict__[name] = value
h.update(fd)
def cmd_add_header_ext(fd, magic, data):
try:
magic = int(magic, 0)
except ValueError:
print("'%s' is not a valid magic number" % magic)
sys.exit(1)
h = QcowHeader(fd)
h.extensions.append(QcowHeaderExtension.create(magic,
data.encode('ascii')))
h.update(fd)
def METHOD_NAME(fd, magic):
data = sys.stdin.read()
cmd_add_header_ext(fd, magic, data)
def cmd_del_header_ext(fd, magic):
try:
magic = int(magic, 0)
except ValueError:
print("'%s' is not a valid magic number" % magic)
sys.exit(1)
h = QcowHeader(fd)
found = False
for ex in h.extensions:
if ex.magic == magic:
found = True
h.extensions.remove(ex)
if not found:
print("No such header extension")
return
h.update(fd)
def cmd_set_feature_bit(fd, group, bit):
try:
bit = int(bit, 0)
if bit < 0 or bit >= 64:
raise ValueError
except ValueError:
print("'%s' is not a valid bit number in range [0, 64)" % bit)
sys.exit(1)
h = QcowHeader(fd)
if group == 'incompatible':
h.incompatible_features |= 1 << bit
elif group == 'compatible':
h.compatible_features |= 1 << bit
elif group == 'autoclear':
h.autoclear_features |= 1 << bit
else:
print("'%s' is not a valid group, try "
"'incompatible', 'compatible', or 'autoclear'" % group)
sys.exit(1)
h.update(fd)
cmds = [
['dump-header', cmd_dump_header, 0,
'Dump image header and header extensions'],
['dump-header-exts', cmd_dump_header_exts, 0,
'Dump image header extensions'],
['set-header', cmd_set_header, 2, 'Set a field in the header'],
['add-header-ext', cmd_add_header_ext, 2, 'Add a header extension'],
['add-header-ext-stdio', METHOD_NAME, 1,
'Add a header extension, data from stdin'],
['del-header-ext', cmd_del_header_ext, 1, 'Delete a header extension'],
['set-feature-bit', cmd_set_feature_bit, 2, 'Set a feature bit'],
]
def main(filename, cmd, args):
fd = open(filename, "r+b")
try:
for name, handler, num_args, desc in cmds:
if name != cmd:
continue
elif len(args) != num_args:
usage()
return
else:
handler(fd, *args)
return
print("Unknown command '%s'" % cmd)
finally:
fd.close()
def usage():
print("Usage: %s <file> <cmd> [<arg>, ...] [<key>, ...]" % sys.argv[0])
print("")
print("Supported commands:")
for name, handler, num_args, desc in cmds:
print(" %-20s - %s" % (name, desc))
print("")
print("Supported keys:")
print(" %-20s - %s" % ('-j', 'Dump in JSON format'))
if __name__ == '__main__':
if len(sys.argv) < 3:
usage()
sys.exit(1)
is_json = '-j' in sys.argv
if is_json:
sys.argv.remove('-j')
main(sys.argv[1], sys.argv[2], sys.argv[3:])
|
1,516 |
next
|
"""Miscellaneous WSGI-related Utilities"""
import posixpath
__all__ = [
'FileWrapper', 'guess_scheme', 'application_uri', 'request_uri',
'shift_path_info', 'setup_testing_defaults',
]
class FileWrapper:
"""Wrapper to convert file-like objects to iterables"""
def __init__(self, filelike, blksize=8192):
self.filelike = filelike
self.blksize = blksize
if hasattr(filelike,'close'):
self.close = filelike.close
def __getitem__(self,key):
data = self.filelike.read(self.blksize)
if data:
return data
raise IndexError
def __iter__(self):
return self
def METHOD_NAME(self):
data = self.filelike.read(self.blksize)
if data:
return data
raise StopIteration
def guess_scheme(environ):
"""Return a guess for whether 'wsgi.url_scheme' should be 'http' or 'https'
"""
if environ.get("HTTPS") in ('yes','on','1'):
return 'https'
else:
return 'http'
def application_uri(environ):
"""Return the application's base URI (no PATH_INFO or QUERY_STRING)"""
url = environ['wsgi.url_scheme']+'://'
from urllib import quote
if environ.get('HTTP_HOST'):
url += environ['HTTP_HOST']
else:
url += environ['SERVER_NAME']
if environ['wsgi.url_scheme'] == 'https':
if environ['SERVER_PORT'] != '443':
url += ':' + environ['SERVER_PORT']
else:
if environ['SERVER_PORT'] != '80':
url += ':' + environ['SERVER_PORT']
url += quote(environ.get('SCRIPT_NAME') or '/')
return url
def request_uri(environ, include_query=1):
"""Return the full request URI, optionally including the query string"""
url = application_uri(environ)
from urllib import quote
path_info = quote(environ.get('PATH_INFO',''),safe='/;=,')
if not environ.get('SCRIPT_NAME'):
url += path_info[1:]
else:
url += path_info
if include_query and environ.get('QUERY_STRING'):
url += '?' + environ['QUERY_STRING']
return url
def shift_path_info(environ):
"""Shift a name from PATH_INFO to SCRIPT_NAME, returning it
If there are no remaining path segments in PATH_INFO, return None.
Note: 'environ' is modified in-place; use a copy if you need to keep
the original PATH_INFO or SCRIPT_NAME.
Note: when PATH_INFO is just a '/', this returns '' and appends a trailing
'/' to SCRIPT_NAME, even though empty path segments are normally ignored,
and SCRIPT_NAME doesn't normally end in a '/'. This is intentional
behavior, to ensure that an application can tell the difference between
'/x' and '/x/' when traversing to objects.
"""
path_info = environ.get('PATH_INFO','')
if not path_info:
return None
path_parts = path_info.split('/')
path_parts[1:-1] = [p for p in path_parts[1:-1] if p and p != '.']
name = path_parts[1]
del path_parts[1]
script_name = environ.get('SCRIPT_NAME','')
script_name = posixpath.normpath(script_name+'/'+name)
if script_name.endswith('/'):
script_name = script_name[:-1]
if not name and not script_name.endswith('/'):
script_name += '/'
environ['SCRIPT_NAME'] = script_name
environ['PATH_INFO'] = '/'.join(path_parts)
# Special case: '/.' on PATH_INFO doesn't get stripped,
# because we don't strip the last element of PATH_INFO
# if there's only one path part left. Instead of fixing this
# above, we fix it here so that PATH_INFO gets normalized to
# an empty string in the environ.
if name=='.':
name = None
return name
def setup_testing_defaults(environ):
"""Update 'environ' with trivial defaults for testing purposes
This adds various parameters required for WSGI, including HTTP_HOST,
SERVER_NAME, SERVER_PORT, REQUEST_METHOD, SCRIPT_NAME, PATH_INFO,
and all of the wsgi.* variables. It only supplies default values,
and does not replace any existing settings for these variables.
This routine is intended to make it easier for unit tests of WSGI
servers and applications to set up dummy environments. It should *not*
be used by actual WSGI servers or applications, since the data is fake!
"""
environ.setdefault('SERVER_NAME','127.0.0.1')
environ.setdefault('SERVER_PROTOCOL','HTTP/1.0')
environ.setdefault('HTTP_HOST',environ['SERVER_NAME'])
environ.setdefault('REQUEST_METHOD','GET')
if 'SCRIPT_NAME' not in environ and 'PATH_INFO' not in environ:
environ.setdefault('SCRIPT_NAME','')
environ.setdefault('PATH_INFO','/')
environ.setdefault('wsgi.version', (1,0))
environ.setdefault('wsgi.run_once', 0)
environ.setdefault('wsgi.multithread', 0)
environ.setdefault('wsgi.multiprocess', 0)
from StringIO import StringIO
environ.setdefault('wsgi.input', StringIO(""))
environ.setdefault('wsgi.errors', StringIO())
environ.setdefault('wsgi.url_scheme',guess_scheme(environ))
if environ['wsgi.url_scheme']=='http':
environ.setdefault('SERVER_PORT', '80')
elif environ['wsgi.url_scheme']=='https':
environ.setdefault('SERVER_PORT', '443')
_hoppish = {
'connection':1, 'keep-alive':1, 'proxy-authenticate':1,
'proxy-authorization':1, 'te':1, 'trailers':1, 'transfer-encoding':1,
'upgrade':1
}.__contains__
def is_hop_by_hop(header_name):
"""Return true if 'header_name' is an HTTP/1.1 "Hop-by-Hop" header"""
return _hoppish(header_name.lower())
|
1,517 |
test bubble
|
# -*- coding: utf-8 -*-
from pytest import raises
from discopy.cat import *
from discopy.utils import AxiomError
def test_main():
x, y, z = Ob('x'), Ob('y'), Ob('z')
f, g, h = Box('f', x, y), Box('g', y, z), Box('h', z, x)
assert Id(x) >> f == f == f >> Id(y)
assert (f >> g).dom == f.dom and (f >> g).cod == g.cod
assert f >> g >> h == f >> (g >> h)
F = Functor(ob={x: y, y: z, z: x}, ar={f: g, g: h})
assert F(Id(x)) == Id(F(x))
assert F(f >> g) == F(f) >> F(g)
def test_Ob():
assert Ob('x') == Ob('x') and Ob('x') != Ob('y')
def test_Ob_init():
assert (Ob('x'), Ob('Alice')) == (Ob('x'), Ob('Alice'))
def test_Ob_name():
assert Ob('x').name == 'x'
def test_Ob_repr():
assert repr(Ob('x')) == "cat.Ob('x')"
def test_Ob_str():
assert str(Ob('x')) == 'x'
def test_Ob_eq():
x, x1, y = Ob('x'), Ob('x'), Ob('y')
assert x == x1 and x != y and x != 'x'
assert 'x' != Ob('x')
def test_Ob_hash():
assert {Ob('x'): 42}[Ob('x')] == 42
def test_Arrow():
x, y, z, w = Ob('x'), Ob('y'), Ob('z'), Ob('w')
f, g, h = Box('f', x, y), Box('g', y, z), Box('h', z, w)
assert f >> g >> h == Arrow((f, g, h), x, w)
def test_Arrow_init():
with raises(TypeError) as err:
Arrow((), 1, Ob('x'))
with raises(TypeError) as err:
Arrow((), Ob('x'), 1)
with raises(TypeError) as err:
Arrow((Ob('x'), ), Ob('x'), Ob('x'))
def test_Arrow_len():
assert len(Arrow((), Ob('x'), Ob('x'))) == 0
def test_Arrow_getitem():
f, g = Box('f', Ob('x'), Ob('y')), Box('g', Ob('y'), Ob('z'))
arrow = f >> g >> g.dagger() >> f.dagger()\
>> f >> g >> g.dagger() >> f.dagger()
with raises(TypeError):
arrow["Alice"]
with raises(IndexError):
arrow[9]
with raises(IndexError):
arrow[::-2]
assert arrow[:] == arrow
assert arrow[::-1] == arrow.dagger()
assert arrow[:0] == arrow[:-8] == arrow[-9:-9] == Id(arrow.dom)
for depth, box in enumerate(arrow):
assert arrow[depth] == box
assert arrow[-depth] == arrow.inside[-depth]
assert arrow[depth:depth] == Id(box.dom)
assert arrow[depth:] == Id(box.dom).then(*arrow.inside[depth:])
assert arrow[:depth] == Id(arrow.dom).then(
*arrow.inside[:depth])
assert arrow[depth: depth + 2] == Id(box.dom).then(
*arrow.inside[depth: depth + 2])
def test_Arrow_repr():
assert repr(Arrow((), Ob('x'), Ob('x'))) == "cat.Arrow.id(cat.Ob('x'))"
inside = (Box('f', Ob('x'), Ob('y')), Box('g', Ob('y'), Ob('z')))
assert repr(Arrow(inside, Ob('x'), Ob('z')))\
== "cat.Arrow(inside=(cat.Box('f', cat.Ob('x'), cat.Ob('y')), "\
"cat.Box('g', cat.Ob('y'), cat.Ob('z'))), dom=cat.Ob('x'), "\
"cod=cat.Ob('z'))"
def test_Arrow_str():
x, y, z = Ob('x'), Ob('y'), Ob('z')
f, g = Box('f', x, y), Box('g', y, z)
assert str(Arrow((), x, x) == "Id(x)")
assert str(Arrow((f, ), x, y) == "f")
assert str(Arrow((f, g), x, z)) == "f >> g"
def test_Arrow_eq():
x, y, z = Ob('x'), Ob('y'), Ob('z')
f, g = Box('f', x, y), Box('g', y, z)
assert f >> g == Arrow((f, g), x, z)
def test_Arrow_hash():
assert {Id(Ob('x')): 42}[Id(Ob('x'))] == 42
def test_Arrow_then():
x, y, z = Ob('x'), Ob('y'), Ob('z')
f, g = Box('f', x, y), Box('g', y, z)
assert f.then(g) == f >> g == g << f
with raises(TypeError) as err:
f >> x
def test_Arrow_dagger():
x, y, z = Ob('x'), Ob('y'), Ob('z')
f, g = Box('f', x, y), Box('g', y, z)
h = Arrow((f, g), x, z)
assert h.dagger() == g.dagger() >> f.dagger()
assert h.dagger().dagger() == h
def test_Id_init():
idx = Id(Ob('x'))
assert idx >> idx == idx
assert idx.dagger() == idx
def test_Id_repr():
assert repr(Id(Ob('x'))) == "cat.Arrow.id(cat.Ob('x'))"
def test_Id_str():
x = Ob('x')
assert str(Id(x)) == "Id(x)"
def test_AxiomError():
x, y, z = Ob('x'), Ob('y'), Ob('z')
f, g = Box('f', x, y), Box('g', y, z)
with raises(AxiomError) as err:
Arrow((g, ), x, y)
with raises(AxiomError) as err:
Arrow((f, ), x, z)
with raises(AxiomError) as err:
g >> f
def test_Box():
f = Box('f', Ob('x'), Ob('y'), data=[42, {0: 1}, lambda x: x])
assert f >> Id(Ob('y')) == f == Id(Ob('x')) >> f
def test_Box_dagger():
f = Box('f', Ob('x'), Ob('y'), data=[42, {0: 1}])
assert f.dom == f.dagger().cod and f.cod == f.dagger().dom
assert f == f.dagger().dagger()
def test_Box_repr():
f = Box('f', Ob('x'), Ob('y'), data=42)
assert repr(f) == "cat.Box('f', cat.Ob('x'), cat.Ob('y'), data=42)"
assert repr(f.dagger())\
== "cat.Box('f', cat.Ob('x'), cat.Ob('y'), data=42).dagger()"
def test_Box_str():
f = Box('f', Ob('x'), Ob('y'), data=42)
assert str(f) == "f"
assert str(f.dagger()) == "f[::-1]"
def test_Box_hash():
assert {Box('f', Ob('x'), Ob('y')): 42}[Box('f', Ob('x'), Ob('y'))] == 42
def test_Box_eq():
f = Box('f', Ob('x'), Ob('y'), data=[42, {0: 1}])
assert f == Arrow((f, ), Ob('x'), Ob('y')) and f != Ob('x')
def test_Functor():
x, y, z = Ob('x'), Ob('y'), Ob('z')
f, g = Box('f', x, y), Box('g', y, z)
F = Functor({x: y, y: x, z: z}, {f: f.dagger(), g: f >> g})
assert F((f >> g).dagger()) == F(f >> g).dagger()
assert F(Id(Ob('x'))) == Id(Ob('y'))
def test_Functor_eq():
x, y = Ob('x'), Ob('y')
assert Functor({x: y, y: x}, {}) == Functor({y: x, x: y}, {})
def test_Functor_repr():
assert repr(Functor({}, {})) == "cat.Functor(ob={}, ar={})"
def test_Functor_call():
x, y, z = Ob('x'), Ob('y'), Ob('z')
f, g = Box('f', x, y), Box('g', y, z)
F = Functor({x: y, y: x, z: z}, {f: f.dagger(), g: f >> g})
with raises(TypeError) as err:
F(F)
assert F(x) == y
assert F(f) == f.dagger()
assert F(f.dagger()) == f
assert F(g) == f >> g
assert F(f >> g) == f.dagger() >> f >> g
def test_total_ordering():
x, y, z = Ob('x'), Ob('y'), Ob('z')
assert sorted([z, y, x]) == [x, y, z]
f, g = Box('f', x, y), Box('g', y, z)
assert f < g
def METHOD_NAME():
f = Box('f', Ob('x'), Ob('y'))
assert repr((f).bubble())\
== "cat.Bubble(cat.Box('f', cat.Ob('x'), cat.Ob('y')))"
assert str(f.bubble()) == "(f).bubble()"
def test_Box_call():
f = Box('f', Ob('x'), Ob('y'))
with raises(TypeError):
f(42)
def test_from_tree():
f = Box('f', Ob('x'), Ob('y'), data=[42, {0: 1}])
d = (f >> f[::-1].bubble()) + Id(Ob('x'))
assert from_tree(d.to_tree()) == d
def test_sum_lambdify():
from sympy.abc import phi
f = Box('f', Ob('x'), Ob('y'), data=[phi])
g = Box('g', Ob('x'), Ob('y'), data=[phi])
assert (f + g).free_symbols == {phi}
assert (f + g).lambdify(phi)(1) == f.lambdify(phi)(1) + g.lambdify(phi)(1)
empty_sum = Sum((), Ob('x'), Ob('y'))
assert empty_sum.lambdify(phi)(123) == empty_sum
assert empty_sum.subs(phi, 0) == empty_sum
def test_Sum():
x, y, z = map(Ob, "xyz")
f = Box('f', x, y)
g = Box('g', y, z)
with raises(ValueError):
Sum(())
with raises(AxiomError):
Sum((), x, y) + Sum((), y, z)
with raises(AxiomError):
Sum((f, ), y, z)
with raises(AxiomError):
Sum((f, g))
assert hash(Sum((), x, y)) == hash(Sum((), x, y))
assert repr(Sum((), x, y))\
== "cat.Sum(terms=(), dom=cat.Ob('x'), cod=cat.Ob('y'))"
assert list(Sum((f, ), x, y)) == [f]
assert len(Sum((), x, y)) == 0
assert Sum((), x, x).then(f, g) == Sum((), x, z)
assert Sum((), x, y).dagger() == Sum((), y, x)
|
1,518 |
tear down
|
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
from tests import TestCase, run_gtk_loop
from quodlibet.util import connect_obj
from quodlibet.library import SongLibrarian, SongFileLibrary
from quodlibet.library.base import Library
from quodlibet.library.librarians import Librarian
from tests.test_library_libraries import Fake, Frange, FakeSongFile, FSrange
class TLibrarian(TestCase):
Fake = Fake
Frange = staticmethod(Frange)
Librarian = Librarian
Library = Library
def setUp(self):
self.librarian = self.Librarian()
self.Library.librarian = self.librarian
self.lib1 = self.Library("One")
self.lib2 = self.Library("Two")
self.added_1 = []
self.changed_1 = []
self.removed_1 = []
self.added_2 = []
self.changed_2 = []
self.removed_2 = []
self.added = []
self.changed = []
self.removed = []
connect_obj(self.lib1, 'added', list.extend, self.added_1)
connect_obj(self.lib1, 'changed', list.extend, self.changed_1)
connect_obj(self.lib1, 'removed', list.extend, self.removed_1)
connect_obj(self.lib2, 'added', list.extend, self.added_2)
connect_obj(self.lib2, 'changed', list.extend, self.changed_2)
connect_obj(self.lib2, 'removed', list.extend, self.removed_2)
connect_obj(self.librarian, 'added', list.extend, self.added)
connect_obj(self.librarian, 'changed', list.extend, self.changed)
connect_obj(self.librarian, 'removed', list.extend, self.removed)
def test_libraries(self):
self.failUnlessEqual(len(self.librarian.libraries), 2)
self.failUnless(self.lib1 in self.librarian.libraries.values())
self.failUnless(self.lib2 in self.librarian.libraries.values())
def test_register_at_instantiation(self):
try:
lib = self.Library("Three")
self.failUnlessEqual(len(self.librarian.libraries), 3)
finally:
lib.destroy()
def test_register_later(self):
try:
lib = self.Library()
self.failUnlessEqual(len(self.librarian.libraries), 2)
self.librarian.register(lib, "Three")
self.failUnlessEqual(len(self.librarian.libraries), 3)
finally:
lib.destroy()
def test_register_exists(self):
self.failUnlessRaises(ValueError, self.Library, "Two")
def test_unregister(self):
self.lib2.destroy()
self.failUnlessEqual(len(self.librarian.libraries), 1)
self.failUnless(self.lib1 in self.librarian.libraries.values())
self.failIf(self.lib2 in self.librarian.libraries.values())
self.lib1.destroy()
self.failUnlessEqual(len(self.librarian.libraries), 0)
def test_added(self):
self.lib1.add(self.Frange(12))
self.lib2.add(self.Frange(12, 24))
self.failUnlessEqual(sorted(self.added), self.Frange(24))
def test_removed(self):
self.lib1.add(self.Frange(12))
self.lib2.add(self.Frange(12, 24))
self.lib1.remove([self.Fake(9)])
self.lib2.remove([self.Fake(16)])
self.failUnlessEqual(self.removed, [self.Fake(9), self.Fake(16)])
def test_changed(self):
self.lib1.add(self.Frange(12))
self.lib2.add(self.Frange(12, 24))
self.librarian.changed(self.Frange(6, 18))
run_gtk_loop()
self.failUnlessEqual(sorted(self.changed), self.Frange(6, 18))
self.failUnlessEqual(self.changed_1, self.Frange(6, 12))
self.failUnlessEqual(self.changed_2, self.Frange(12, 18))
def test___getitem__(self):
self.lib1.add(self.Frange(12))
self.lib2.add(self.Frange(12, 24))
self.failUnlessEqual(self.librarian[10], 10)
new = self.Fake(100)
new.key = 200
self.lib2.add([new])
self.failUnlessEqual(self.librarian[200], new)
def test___getitem___not_present(self):
self.lib1.add(self.Frange(12))
self.lib2.add(self.Frange(12, 24))
self.lib2.remove([self.Fake(16)])
self.failUnlessRaises(KeyError, self.librarian.__getitem__, 16)
self.failUnlessRaises(KeyError, self.librarian.__getitem__, 99)
def test___contains__(self):
self.lib1.add(self.Frange(12))
self.lib2.add(self.Frange(12, 24))
new = self.Fake(100)
new.key = 200
self.lib1.add([new])
for value in [1, 2, 15, 22, 200, new]:
self.failUnless(value in self.librarian, "didn't find %d" % value)
for value in [-1, 25, 50, 100]:
self.failIf(value in self.librarian, "found %d" % value)
def METHOD_NAME(self):
self.Library.librarian = None
self.lib1.destroy()
self.lib2.destroy()
self.librarian.destroy()
class TSongLibrarian(TLibrarian):
Fake = FakeSongFile
Frange = staticmethod(FSrange)
Library = SongFileLibrary
Librarian = SongLibrarian
def test_tag_values(self):
self.lib1.add(self.Frange(0, 30, 2))
self.lib2.add(self.Frange(1, 30, 2))
del(self.added[:])
self.failUnlessEqual(
sorted(self.librarian.tag_values(20)), list(range(20)))
self.failUnlessEqual(sorted(self.librarian.tag_values(0)), [])
self.failIf(self.changed or self.added or self.removed)
def test_rename(self):
new = self.Fake(10)
new.key = 30
self.lib1.add([new])
self.lib2.add([new])
self.librarian.rename(new, 20)
run_gtk_loop()
self.failUnlessEqual(new.key, 20)
self.failUnless(new in self.lib1)
self.failUnless(new in self.lib2)
self.failUnless(new.key in self.lib1)
self.failUnless(new.key in self.lib2)
self.failUnlessEqual(self.changed_1, [new])
self.failUnlessEqual(self.changed_2, [new])
self.failUnless(new in self.changed)
def test_rename_changed(self):
new = self.Fake(10)
self.lib1.add([new])
changed = set()
self.librarian.rename(new, 20, changed=changed)
self.assertEqual(len(changed), 1)
self.assertTrue(new in changed)
def test_reload(self):
new = self.Fake(10)
self.lib1.add([new])
changed = set()
removed = set()
self.librarian.reload(new, changed=changed, removed=removed)
self.assertTrue(new in changed)
self.assertFalse(removed)
|
1,519 |
reverse bytes
|
from migen import *
from migen.genlib.misc import timeline
from misoc.interconnect import wishbone
from misoc.interconnect.csr import AutoCSR, CSRStorage, CSRStatus
_FAST_READ = 0x0b
_DIOFR = 0xbb
_QIOFR = 0xeb
def _format_cmd(cmd, spi_width):
"""
`cmd` is the read instruction. Since everything is transmitted on all
dq lines (cmd, adr and data), extend/interleave cmd to full pads.dq
width even if dq1-dq3 are don't care during the command phase:
For example, for N25Q128, 0xeb is the quad i/o fast read, and
extended to 4 bits (dq1,dq2,dq3 high) is: 0xfffefeff
"""
c = 2**(8*spi_width)-1
for b in range(8):
if not (cmd>>b)%2:
c &= ~(1<<(b*spi_width))
return c
def METHOD_NAME(s):
n = (len(s) + 7)//8
return Cat(*[s[i*8:min((i + 1)*8, len(s))]
for i in reversed(range(n))])
class SpiFlash(Module, AutoCSR):
def __init__(self, pads, dummy=15, div=2, with_bitbang=True, endianness="big", dw=32):
"""
Simple SPI flash, e.g. N25Q128 on the LX9 Microboard.
Supports multi-bit pseudo-parallel reads (aka Dual or Quad I/O Fast
Read). Only supports mode0 (cpol=0, cpha=0).
Optionally supports software bitbanging (for write, erase, or other commands).
"""
adr_width = 32-log2_int(dw//8)
self.bus = bus = wishbone.Interface(data_width=dw, adr_width=adr_width)
spi_width = len(pads.dq)
if with_bitbang:
self.bitbang = CSRStorage(4)
self.miso = CSRStatus()
self.bitbang_en = CSRStorage()
###
cs_n = Signal(reset=1)
clk = Signal()
dq_oe = Signal()
read_cmd_params = {
4: (_format_cmd(_QIOFR, 4), 4*8),
2: (_format_cmd(_DIOFR, 2), 2*8),
1: (_format_cmd(_FAST_READ, 1), 1*8)
}
read_cmd, cmd_width = read_cmd_params[spi_width]
addr_width = 24
pads.cs_n.reset = 1
dq = TSTriple(spi_width)
self.specials.dq = dq.get_tristate(pads.dq)
sr = Signal(max(cmd_width, addr_width, dw))
if endianness == "big":
self.comb += bus.dat_r.eq(sr)
else:
self.comb += bus.dat_r.eq(METHOD_NAME(sr))
hw_read_logic = [
pads.clk.eq(clk),
pads.cs_n.eq(cs_n),
dq.o.eq(sr[-spi_width:]),
dq.oe.eq(dq_oe)
]
if with_bitbang:
bitbang_logic = [
pads.clk.eq(self.bitbang.storage[1]),
pads.cs_n.eq(self.bitbang.storage[2]),
If(self.bitbang.storage[3],
dq.oe.eq(0)
).Else(
dq.oe.eq(1)
),
If(self.bitbang.storage[1],
self.miso.status.eq(dq.i[1])
)
]
if spi_width > 1:
bitbang_logic += [
dq.o.eq(Cat(self.bitbang.storage[0], Replicate(1, spi_width-1)))
]
else:
bitbang_logic += [
dq.o.eq(self.bitbang.storage[0])
]
self.comb += \
If(self.bitbang_en.storage,
bitbang_logic
).Else(
hw_read_logic
)
else:
self.comb += hw_read_logic
if div < 2:
raise ValueError("Unsupported value \'{}\' for div parameter for SpiFlash core".format(div))
else:
i = Signal(max=div)
dqi = Signal(spi_width)
self.sync += [
If(i == div//2 - 1,
clk.eq(1),
dqi.eq(dq.i),
),
If(i == div - 1,
i.eq(0),
clk.eq(0),
sr.eq(Cat(dqi, sr[:-spi_width]))
).Else(
i.eq(i + 1),
),
]
# spi is byte-addressed, prefix by zeros
z = Replicate(0, log2_int(dw//8))
seq = [
(cmd_width//spi_width*div,
[dq_oe.eq(1), cs_n.eq(0), sr[-cmd_width:].eq(read_cmd)]),
(addr_width//spi_width*div,
[sr[-addr_width:].eq(Cat(z, bus.adr))]),
((dummy + dw//spi_width)*div,
[dq_oe.eq(0)]),
(1,
[bus.ack.eq(1), cs_n.eq(1)]),
(div, # tSHSL!
[bus.ack.eq(0)]),
(0,
[]),
]
# accumulate timeline deltas
t, tseq = 0, []
for dt, a in seq:
tseq.append((t, a))
t += dt
self.sync += timeline(bus.cyc & bus.stb & (i == div - 1), tseq)
|
1,520 |
bond sort
|
# -*- Mode: python; tab-width: 4; indent-tabs-mode:nil; coding:utf-8 -*-
# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4 fileencoding=utf-8
#
# MDAnalysis --- https://www.mdanalysis.org
# Copyright (c) 2006-2017 The MDAnalysis Development Team and contributors
# (see the file AUTHORS for the full list of names)
#
# Released under the GNU Public Licence, v2 or any higher version
#
# Please cite your use of MDAnalysis in published work:
#
# R. J. Gowers, M. Linke, J. Barnoud, T. J. E. Reddy, M. N. Melo, S. L. Seyler,
# D. L. Dotson, J. Domanski, S. Buchoux, I. M. Kenney, and O. Beckstein.
# MDAnalysis: A Python package for the rapid analysis of molecular dynamics
# simulations. In S. Benthall and S. Rostrup editors, Proceedings of the 15th
# Python in Science Conference, pages 102-109, Austin, TX, 2016. SciPy.
# doi: 10.25080/majora-629e541a-00e
#
# N. Michaud-Agrawal, E. J. Denning, T. B. Woolf, and O. Beckstein.
# MDAnalysis: A Toolkit for the Analysis of Molecular Dynamics Simulations.
# J. Comput. Chem. 32 (2011), 2319--2327, doi:10.1002/jcc.21787
#
import pytest
from numpy.testing import assert_equal
import numpy as np
import MDAnalysis as mda
from MDAnalysis.topology import guessers
from MDAnalysis.core.topologyattrs import Angles
from MDAnalysisTests import make_Universe
from MDAnalysisTests.core.test_fragments import make_starshape
import MDAnalysis.tests.datafiles as datafiles
from MDAnalysisTests.util import import_not_available
try:
from rdkit import Chem
from rdkit.Chem.rdPartialCharges import ComputeGasteigerCharges
except ImportError:
pass
requires_rdkit = pytest.mark.skipif(import_not_available("rdkit"),
reason="requires RDKit")
class TestGuessMasses(object):
def test_guess_masses(self):
out = guessers.guess_masses(['C', 'C', 'H'])
assert isinstance(out, np.ndarray)
assert_equal(out, np.array([12.011, 12.011, 1.008]))
def test_guess_masses_warn(self):
with pytest.warns(UserWarning):
guessers.guess_masses(['X'])
def test_guess_masses_miss(self):
out = guessers.guess_masses(['X', 'Z'])
assert_equal(out, np.array([0.0, 0.0]))
@pytest.mark.parametrize('element, value', (('H', 1.008), ('XYZ', 0.0), ))
def test_get_atom_mass(self, element, value):
assert guessers.get_atom_mass(element) == value
def test_guess_atom_mass(self):
assert guessers.guess_atom_mass('1H') == 1.008
class TestGuessTypes(object):
# guess_types
# guess_atom_type
# guess_atom_element
def test_guess_types(self):
out = guessers.guess_types(['MG2+', 'C12'])
assert isinstance(out, np.ndarray)
assert_equal(out, np.array(['MG', 'C'], dtype=object))
def test_guess_atom_element(self):
assert guessers.guess_atom_element('MG2+') == 'MG'
def test_guess_atom_element_empty(self):
assert guessers.guess_atom_element('') == ''
def test_guess_atom_element_singledigit(self):
assert guessers.guess_atom_element('1') == '1'
def test_guess_atom_element_1H(self):
assert guessers.guess_atom_element('1H') == 'H'
assert guessers.guess_atom_element('2H') == 'H'
@pytest.mark.parametrize('name, element', (
('AO5*', 'O'),
('F-', 'F'),
('HB1', 'H'),
('OC2', 'O'),
('1he2', 'H'),
('3hg2', 'H'),
('OH-', 'O'),
('HO', 'H'),
('he', 'H'),
('zn', 'ZN'),
('Ca2+', 'CA'),
('CA', 'C'),
('N0A', 'N'),
('C0U', 'C'),
('C0S', 'C'),
('Na+', 'NA'),
('Cu2+', 'CU')
))
def test_guess_element_from_name(self, name, element):
assert guessers.guess_atom_element(name) == element
def test_guess_charge():
# this always returns 0.0
assert guessers.guess_atom_charge('this') == 0.0
def test_guess_bonds_Error():
u = make_Universe(trajectory=True)
with pytest.raises(ValueError):
guessers.guess_bonds(u.atoms[:4], u.atoms.positions[:5])
def test_guess_impropers():
u = make_starshape()
ag = u.atoms[:5]
u.add_TopologyAttr(Angles(guessers.guess_angles(ag.bonds)))
vals = guessers.guess_improper_dihedrals(ag.angles)
assert_equal(len(vals), 12)
def METHOD_NAME(arr):
# sort from low to high, also within a tuple
# e.g. ([5, 4], [0, 1], [0, 3]) -> ([0, 1], [0, 3], [4, 5])
out = []
for (i, j) in arr:
if i > j:
i, j = j, i
out.append((i, j))
return sorted(out)
def test_guess_bonds_water():
u = mda.Universe(datafiles.two_water_gro)
bonds = METHOD_NAME(guessers.guess_bonds(u.atoms, u.atoms.positions, u.dimensions))
assert_equal(bonds, ((0, 1),
(0, 2),
(3, 4),
(3, 5)))
def test_guess_bonds_adk():
u = mda.Universe(datafiles.PSF, datafiles.DCD)
u.atoms.types = guessers.guess_types(u.atoms.names)
bonds = METHOD_NAME(guessers.guess_bonds(u.atoms, u.atoms.positions))
assert_equal(np.sort(u.bonds.indices, axis=0),
np.sort(bonds, axis=0))
def test_guess_bonds_peptide():
u = mda.Universe(datafiles.PSF_NAMD, datafiles.PDB_NAMD)
u.atoms.types = guessers.guess_types(u.atoms.names)
bonds = METHOD_NAME(guessers.guess_bonds(u.atoms, u.atoms.positions))
assert_equal(np.sort(u.bonds.indices, axis=0),
np.sort(bonds, axis=0))
@pytest.mark.parametrize("smi", [
"c1ccccc1",
"C1=CC=CC=C1",
"CCO",
"c1ccccc1Cc1ccccc1",
"CN1C=NC2=C1C(=O)N(C(=O)N2C)C",
])
@requires_rdkit
def test_guess_aromaticities(smi):
mol = Chem.MolFromSmiles(smi)
mol = Chem.AddHs(mol)
expected = np.array([atom.GetIsAromatic() for atom in mol.GetAtoms()])
u = mda.Universe(mol)
values = guessers.guess_aromaticities(u.atoms)
assert_equal(values, expected)
@pytest.mark.parametrize("smi", [
"c1ccccc1",
"C1=CC=CC=C1",
"CCO",
"c1ccccc1Cc1ccccc1",
"CN1C=NC2=C1C(=O)N(C(=O)N2C)C",
])
@requires_rdkit
def test_guess_gasteiger_charges(smi):
mol = Chem.MolFromSmiles(smi)
mol = Chem.AddHs(mol)
ComputeGasteigerCharges(mol, throwOnParamFailure=True)
expected = np.array([atom.GetDoubleProp("_GasteigerCharge")
for atom in mol.GetAtoms()], dtype=np.float32)
u = mda.Universe(mol)
values = guessers.guess_gasteiger_charges(u.atoms)
assert_equal(values, expected)
|
1,521 |
async generator
|
# -*- coding: utf-8 -*-
# Copyright 2023 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from typing import (
Any,
AsyncIterator,
Awaitable,
Callable,
Iterator,
Optional,
Sequence,
Tuple,
)
from google.cloud.dialogflowcx_v3.types import transition_route_group
class ListTransitionRouteGroupsPager:
"""A pager for iterating through ``list_transition_route_groups`` requests.
This class thinly wraps an initial
:class:`google.cloud.dialogflowcx_v3.types.ListTransitionRouteGroupsResponse` object, and
provides an ``__iter__`` method to iterate through its
``transition_route_groups`` field.
If there are more pages, the ``__iter__`` method will make additional
``ListTransitionRouteGroups`` requests and continue to iterate
through the ``transition_route_groups`` field on the
corresponding responses.
All the usual :class:`google.cloud.dialogflowcx_v3.types.ListTransitionRouteGroupsResponse`
attributes are available on the pager. If multiple requests are made, only
the most recent response is retained, and thus used for attribute lookup.
"""
def __init__(
self,
method: Callable[..., transition_route_group.ListTransitionRouteGroupsResponse],
request: transition_route_group.ListTransitionRouteGroupsRequest,
response: transition_route_group.ListTransitionRouteGroupsResponse,
*,
metadata: Sequence[Tuple[str, str]] = ()
):
"""Instantiate the pager.
Args:
method (Callable): The method that was originally called, and
which instantiated this pager.
request (google.cloud.dialogflowcx_v3.types.ListTransitionRouteGroupsRequest):
The initial request object.
response (google.cloud.dialogflowcx_v3.types.ListTransitionRouteGroupsResponse):
The initial response object.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
"""
self._method = method
self._request = transition_route_group.ListTransitionRouteGroupsRequest(request)
self._response = response
self._metadata = metadata
def __getattr__(self, name: str) -> Any:
return getattr(self._response, name)
@property
def pages(
self,
) -> Iterator[transition_route_group.ListTransitionRouteGroupsResponse]:
yield self._response
while self._response.next_page_token:
self._request.page_token = self._response.next_page_token
self._response = self._method(self._request, metadata=self._metadata)
yield self._response
def __iter__(self) -> Iterator[transition_route_group.TransitionRouteGroup]:
for page in self.pages:
yield from page.transition_route_groups
def __repr__(self) -> str:
return "{0}<{1!r}>".format(self.__class__.__name__, self._response)
class ListTransitionRouteGroupsAsyncPager:
"""A pager for iterating through ``list_transition_route_groups`` requests.
This class thinly wraps an initial
:class:`google.cloud.dialogflowcx_v3.types.ListTransitionRouteGroupsResponse` object, and
provides an ``__aiter__`` method to iterate through its
``transition_route_groups`` field.
If there are more pages, the ``__aiter__`` method will make additional
``ListTransitionRouteGroups`` requests and continue to iterate
through the ``transition_route_groups`` field on the
corresponding responses.
All the usual :class:`google.cloud.dialogflowcx_v3.types.ListTransitionRouteGroupsResponse`
attributes are available on the pager. If multiple requests are made, only
the most recent response is retained, and thus used for attribute lookup.
"""
def __init__(
self,
method: Callable[
..., Awaitable[transition_route_group.ListTransitionRouteGroupsResponse]
],
request: transition_route_group.ListTransitionRouteGroupsRequest,
response: transition_route_group.ListTransitionRouteGroupsResponse,
*,
metadata: Sequence[Tuple[str, str]] = ()
):
"""Instantiates the pager.
Args:
method (Callable): The method that was originally called, and
which instantiated this pager.
request (google.cloud.dialogflowcx_v3.types.ListTransitionRouteGroupsRequest):
The initial request object.
response (google.cloud.dialogflowcx_v3.types.ListTransitionRouteGroupsResponse):
The initial response object.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
"""
self._method = method
self._request = transition_route_group.ListTransitionRouteGroupsRequest(request)
self._response = response
self._metadata = metadata
def __getattr__(self, name: str) -> Any:
return getattr(self._response, name)
@property
async def pages(
self,
) -> AsyncIterator[transition_route_group.ListTransitionRouteGroupsResponse]:
yield self._response
while self._response.next_page_token:
self._request.page_token = self._response.next_page_token
self._response = await self._method(self._request, metadata=self._metadata)
yield self._response
def __aiter__(self) -> AsyncIterator[transition_route_group.TransitionRouteGroup]:
async def METHOD_NAME():
async for page in self.pages:
for response in page.transition_route_groups:
yield response
return METHOD_NAME()
def __repr__(self) -> str:
return "{0}<{1!r}>".format(self.__class__.__name__, self._response)
|
1,522 |
test cloudwatch log group with kms key
|
from unittest import mock
from boto3 import client, session
from moto import mock_logs
from prowler.providers.aws.lib.audit_info.models import AWS_Audit_Info
from prowler.providers.common.models import Audit_Metadata
AWS_REGION = "us-east-1"
AWS_ACCOUNT_NUMBER = "123456789012"
class Test_cloudwatch_log_group_kms_encryption_enabled:
def set_mocked_audit_info(self):
audit_info = AWS_Audit_Info(
session_config=None,
original_session=None,
audit_session=session.Session(
profile_name=None,
botocore_session=None,
),
audited_account=AWS_ACCOUNT_NUMBER,
audited_account_arn=f"arn:aws:iam::{AWS_ACCOUNT_NUMBER}:root",
audited_user_id=None,
audited_partition="aws",
audited_identity_arn=None,
profile=None,
profile_region=None,
credentials=None,
assumed_role_info=None,
audited_regions=["us-east-1", "eu-west-1"],
organizations_metadata=None,
audit_resources=None,
mfa_enabled=False,
audit_metadata=Audit_Metadata(
services_scanned=0,
expected_checks=[],
completed_checks=0,
audit_progress=0,
),
)
return audit_info
def test_cloudwatch_no_log_groups(self):
from prowler.providers.aws.services.cloudwatch.cloudwatch_service import Logs
current_audit_info = self.set_mocked_audit_info()
from prowler.providers.common.models import Audit_Metadata
current_audit_info.audit_metadata = Audit_Metadata(
services_scanned=0,
# We need to set this check to call __describe_log_groups__
expected_checks=["cloudwatch_log_group_no_secrets_in_logs"],
completed_checks=0,
audit_progress=0,
)
with mock.patch(
"prowler.providers.aws.lib.audit_info.audit_info.current_audit_info",
new=current_audit_info,
), mock.patch(
"prowler.providers.aws.services.cloudwatch.cloudwatch_log_group_kms_encryption_enabled.cloudwatch_log_group_kms_encryption_enabled.logs_client",
new=Logs(current_audit_info),
):
# Test Check
from prowler.providers.aws.services.cloudwatch.cloudwatch_log_group_kms_encryption_enabled.cloudwatch_log_group_kms_encryption_enabled import (
cloudwatch_log_group_kms_encryption_enabled,
)
check = cloudwatch_log_group_kms_encryption_enabled()
result = check.execute()
assert len(result) == 0
@mock_logs
def test_cloudwatch_log_group_without_kms_key(self):
# Generate Logs Client
logs_client = client("logs", region_name=AWS_REGION)
# Request Logs group
logs_client.create_log_group(
logGroupName="test",
)
from prowler.providers.aws.services.cloudwatch.cloudwatch_service import Logs
current_audit_info = self.set_mocked_audit_info()
from prowler.providers.common.models import Audit_Metadata
current_audit_info.audit_metadata = Audit_Metadata(
services_scanned=0,
# We need to set this check to call __describe_log_groups__
expected_checks=["cloudwatch_log_group_no_secrets_in_logs"],
completed_checks=0,
audit_progress=0,
)
with mock.patch(
"prowler.providers.aws.lib.audit_info.audit_info.current_audit_info",
new=current_audit_info,
), mock.patch(
"prowler.providers.aws.services.cloudwatch.cloudwatch_log_group_kms_encryption_enabled.cloudwatch_log_group_kms_encryption_enabled.logs_client",
new=Logs(current_audit_info),
):
# Test Check
from prowler.providers.aws.services.cloudwatch.cloudwatch_log_group_kms_encryption_enabled.cloudwatch_log_group_kms_encryption_enabled import (
cloudwatch_log_group_kms_encryption_enabled,
)
check = cloudwatch_log_group_kms_encryption_enabled()
result = check.execute()
assert len(result) == 1
assert result[0].status == "FAIL"
assert (
result[0].status_extended
== "Log Group test does not have AWS KMS keys associated."
)
assert result[0].resource_id == "test"
@mock_logs
def METHOD_NAME(self):
# Generate Logs Client
logs_client = client("logs", region_name=AWS_REGION)
# Request Logs group
logs_client.create_log_group(logGroupName="test", kmsKeyId="test_kms_id")
from prowler.providers.aws.services.cloudwatch.cloudwatch_service import Logs
current_audit_info = self.set_mocked_audit_info()
from prowler.providers.common.models import Audit_Metadata
current_audit_info.audit_metadata = Audit_Metadata(
services_scanned=0,
# We need to set this check to call __describe_log_groups__
expected_checks=["cloudwatch_log_group_no_secrets_in_logs"],
completed_checks=0,
audit_progress=0,
)
with mock.patch(
"prowler.providers.aws.lib.audit_info.audit_info.current_audit_info",
new=current_audit_info,
), mock.patch(
"prowler.providers.aws.services.cloudwatch.cloudwatch_log_group_kms_encryption_enabled.cloudwatch_log_group_kms_encryption_enabled.logs_client",
new=Logs(current_audit_info),
):
# Test Check
from prowler.providers.aws.services.cloudwatch.cloudwatch_log_group_kms_encryption_enabled.cloudwatch_log_group_kms_encryption_enabled import (
cloudwatch_log_group_kms_encryption_enabled,
)
check = cloudwatch_log_group_kms_encryption_enabled()
result = check.execute()
assert len(result) == 1
assert result[0].status == "PASS"
assert (
result[0].status_extended
== "Log Group test does have AWS KMS key test_kms_id associated."
)
assert result[0].resource_id == "test"
|
1,523 |
copy to
|
from typing import ContextManager
import os
import io
import shutil
import urllib
import urllib.request
from urllib.parse import urlparse
import requests
from datetime import datetime
from functools import partial
from tqdm.auto import tqdm
from rastervision.pipeline.file_system import (FileSystem, NotReadableError,
NotWritableError)
def get_file_obj(uri: str, with_progress: bool = True) -> ContextManager:
"""Returns a context manager for a file-like object that supports buffered
reads. If with_progress is True, wraps the read() method of the object in
a function that updates a tqdm progress bar.
Usage:
.. code-block:: python
with get_file_obj(uri) as f:
...
Adapted from https://stackoverflow.com/a/63831344/5908685.
"""
r = requests.get(uri, stream=True, allow_redirects=True)
if r.status_code != 200:
r.raise_for_status() # Will only raise for 4xx codes, so...
raise RuntimeError(
f'Request to {uri} returned status code {r.status_code}')
file_obj = r.raw
# Decompress if needed
file_obj.read = partial(file_obj.read, decode_content=True)
if not with_progress:
return file_obj
file_size = int(r.headers.get('Content-Length', 0))
desc = '(Unknown total file size)' if file_size == 0 else ''
# put a wrapper around file_obj's read() method that updates the
# progress bar
file_obj_wrapped = tqdm.wrapattr(
file_obj,
'read',
total=file_size,
desc=desc,
bytes=True,
mininterval=0.5,
delay=5)
return file_obj_wrapped
class HttpFileSystem(FileSystem):
"""A FileSystem for downloading files over HTTP."""
@staticmethod
def matches_uri(uri: str, mode: str) -> bool:
parsed_uri = urlparse(uri)
return parsed_uri.scheme in ['http', 'https']
@staticmethod
def file_exists(uri: str, include_dir: bool = True) -> bool:
try:
response = urllib.request.urlopen(uri)
return response.getcode() == 200
except urllib.error.URLError:
return False
@staticmethod
def read_str(uri: str) -> str:
return HttpFileSystem.read_bytes(uri).decode('utf8')
@staticmethod
def read_bytes(uri: str) -> bytes:
with get_file_obj(uri) as in_file, io.BytesIO() as write_buffer:
shutil.copyfileobj(in_file, write_buffer)
return write_buffer.getvalue()
@staticmethod
def write_str(uri: str, data: str) -> None:
raise NotWritableError('Could not write {}'.format(uri))
@staticmethod
def write_bytes(uri: str, data: bytes) -> None:
raise NotWritableError('Could not write {}'.format(uri))
@staticmethod
def sync_to_dir(src_dir: str, dst_dir_uri: str,
delete: bool = False) -> None:
raise NotWritableError('Could not write {}'.format(dst_dir_uri))
@staticmethod
def sync_from_dir(src_dir_uri: str, dst_dir: str,
delete: bool = False) -> None:
raise NotReadableError(
'Cannot read directory from HTTP {}'.format(src_dir_uri))
@staticmethod
def METHOD_NAME(src_path: str, dst_uri: str) -> None:
raise NotWritableError('Could not write {}'.format(dst_uri))
@staticmethod
def copy_from(src_uri: str, dst_path: str) -> None:
with get_file_obj(src_uri) as in_file, open(dst_path,
'wb') as out_file:
shutil.copyfileobj(in_file, out_file)
@staticmethod
def local_path(uri: str, download_dir: str) -> None:
parsed_uri = urlparse(uri)
path = os.path.join(download_dir, 'http', parsed_uri.netloc,
parsed_uri.path[1:])
# This function is expected to return something that is file path-like
# (as opposed to directory-like),
# so if the path ends with / we strip it off. This was motivated by
# a URI that was a zxy tile schema that doesn't end in .png which is
# parsed by urlparse into a path that ends in a /.
if path.endswith('/'):
path = path[:-1]
return path
@staticmethod
def last_modified(uri: str) -> datetime:
return None
@staticmethod
def list_paths(uri, suffix=None):
raise NotImplementedError()
|
1,524 |
zero points
|
# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import paddle
from .uniform import UniformObserver
from paddle.quantization.factory import ObserverFactory
class EMDObserver(ObserverFactory):
r"""
It collects maximum absolute values of target tensor.
Args:
bit_length(int, optional): Number of bits to represent an quantized integer in binary.
dtype(str, optional): The data type of input tensor.
name (str, optional): This parameter is used by developers to print debugging information. \
For details, please refer to :ref:`api_guide_Name`. Default is None.
Examples:
.. code-block:: python
from paddle.quantization import QuantConfig
from paddle.quantization.quanters import FakeQuanterWithAbsMaxObserver
quanter = FakeQuanterWithAbsMaxObserver(moving_rate=0.99)
q_config = QuantConfig(activation=quanter, weight=quanter)
"""
def __init__(self, quant_bits=8):
super(EMDObserver, self).__init__(quant_bits=quant_bits)
def _get_class(self):
return EMDObserverLayer
class EMDObserverLayer(UniformObserver):
def __init__(self, layer, quant_bits=8):
super(EMDObserverLayer, self).__init__(quant_bits=quant_bits)
self._quant_bits = quant_bits
self._calibration_loss = float('inf')
self.qmin, self.qmax = self.qmin_qmax
def forward(self, inputs):
""" Calculate forward pass.
"""
self._scale = None
self._zero_point = None
self._min = None
self._max = None
self._emd_min, self._emd_max = self.cal_min_max(inputs)
return inputs
def cal_min_max(self, inputs):
abs_max_value = float(paddle.max(paddle.flatten(inputs)))
abs_max_value = 1e-8 if abs_max_value == 0.0 else abs_max_value
s = 0.3
scale_emd = abs_max_value
while s <= 1.0:
scale = s * abs_max_value
s += 0.02
bins = 2**(self._quant_bits - 1) - 1
quant_var = paddle.clip(
paddle.round(inputs / scale * self.qmax), -self.qmax - 1,
self.qmax)
quant_dequant_var = quant_var / self.qmax * scale
emd_loss = paddle.abs(
paddle.mean(inputs) - paddle.mean(quant_dequant_var)
) + paddle.abs(paddle.std(inputs) - paddle.std(quant_dequant_var))
emd_loss = float(emd_loss)
if emd_loss <= self._calibration_loss:
self._calibration_loss = emd_loss
scale_emd = scale
return 0, scale_emd
def cal_thresholds(self):
""" Compute thresholds for MAX function.
"""
if self._scale is not None:
self._zero_point = 0
return
self._min, self._max = self._emd_min, self._emd_max
self._scale, self._zero_point = self.cal_scales_zero_points()
def min_value(self) -> float:
return self._min
def max_value(self) -> float:
return self._max
def bit_length(self):
""" Return the bit length of quantized data.
"""
return self._quant_bits
def quant_axis(self):
""" Return quantization axis.
"""
return -1
def scales(self):
""" Return output scales.
"""
if self._scale is None:
self.cal_thresholds()
return self._scale
def METHOD_NAME(self):
""" Return output zero points.
"""
if self._zero_point is None:
self.cal_thresholds()
return self._zero_point
|
1,525 |
get path
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
import os
import numpy as np
from fairseq import utils
from fairseq.data import (
ConcatSentencesDataset,
Dictionary,
IdDataset,
NestedDictionaryDataset,
NumelDataset,
NumSamplesDataset,
PrependTokenDataset,
RawLabelDataset,
RightPadDataset,
SortDataset,
TruncateDataset,
data_utils,
)
from fairseq.data.shorten_dataset import maybe_shorten_dataset
from fairseq.tasks import LegacyFairseqTask, register_task
logger = logging.getLogger(__name__)
@register_task("sentence_ranking")
class SentenceRankingTask(LegacyFairseqTask):
"""
Ranking task on multiple sentences.
Args:
dictionary (Dictionary): the dictionary for the input of the task
"""
@staticmethod
def add_args(parser):
"""Add task-specific arguments to the parser."""
parser.add_argument("data", metavar="FILE", help="file prefix for data")
parser.add_argument(
"--num-classes", type=int, help="number of sentences to be ranked"
)
parser.add_argument(
"--init-token",
type=int,
help="add token at the beginning of each batch item",
)
parser.add_argument(
"--separator-token", type=int, help="add separator token between inputs"
)
parser.add_argument("--no-shuffle", action="store_true")
parser.add_argument(
"--shorten-method",
default="none",
choices=["none", "truncate", "random_crop"],
help="if not none, shorten sequences that exceed --tokens-per-sample",
)
parser.add_argument(
"--shorten-data-split-list",
default="",
help="comma-separated list of dataset splits to apply shortening to, "
'e.g., "train,valid" (default: all dataset splits)',
)
parser.add_argument(
"--max-option-length", type=int, help="max length for each option"
)
def __init__(self, args, dictionary):
super().__init__(args)
self.dictionary = dictionary
@classmethod
def load_dictionary(cls, args, filename, source=True):
"""Load the dictionary from the filename
Args:
filename (str): the filename
"""
dictionary = Dictionary.load(filename)
dictionary.add_symbol("<mask>")
return dictionary
@classmethod
def setup_task(cls, args, **kwargs):
assert (
args.criterion == "sentence_ranking"
), "Must set --criterion=sentence_ranking"
# load data dictionary
data_dict = cls.load_dictionary(
args,
os.path.join(args.data, "input0", "dict.txt"),
source=True,
)
logger.info("[input] dictionary: {} types".format(len(data_dict)))
return SentenceRankingTask(args, data_dict)
def load_dataset(self, split, combine=False, **kwargs):
"""Load a given dataset split (e.g., train, valid, test)."""
def METHOD_NAME(type, split):
return os.path.join(self.args.data, type, split)
def make_dataset(type, dictionary):
split_path = METHOD_NAME(type, split)
dataset = data_utils.load_indexed_dataset(
split_path,
self.source_dictionary,
self.args.dataset_impl,
combine=combine,
)
return dataset
input0 = make_dataset("input0", self.source_dictionary)
input_options = [
make_dataset("input{idx}".format(idx=idx + 1), self.source_dictionary)
for idx in range(self.args.num_classes)
]
if self.args.separator_token is not None:
input0 = PrependTokenDataset(input0, self.args.separator_token)
src_tokens = []
for input_option in input_options:
if self.args.init_token is not None:
input_option = PrependTokenDataset(input_option, self.args.init_token)
if self.args.max_option_length is not None:
input_option = TruncateDataset(
input_option, self.args.max_option_length
)
src_token = ConcatSentencesDataset(input_option, input0)
src_token = maybe_shorten_dataset(
src_token,
split,
self.args.shorten_data_split_list,
self.args.shorten_method,
self.args.max_positions,
self.args.seed,
)
src_tokens.append(src_token)
with data_utils.numpy_seed(self.args.seed):
shuffle = np.random.permutation(len(src_tokens[0]))
dataset = {
"id": IdDataset(),
"nsentences": NumSamplesDataset(),
"ntokens": NumelDataset(src_tokens[0], reduce=True),
}
for src_token_idx in range(len(src_tokens)):
dataset.update(
{
"net_input{idx}".format(idx=src_token_idx + 1): {
"src_tokens": RightPadDataset(
src_tokens[src_token_idx],
pad_idx=self.source_dictionary.pad(),
),
"src_lengths": NumelDataset(
src_tokens[src_token_idx], reduce=False
),
}
}
)
label_path = "{}.label".format(METHOD_NAME("label", split))
if os.path.exists(label_path):
with open(label_path) as h:
dataset.update(
target=RawLabelDataset([int(x.strip()) for x in h.readlines()])
)
nested_dataset = NestedDictionaryDataset(
dataset,
sizes=[np.maximum.reduce([src_token.sizes for src_token in src_tokens])],
)
if self.args.no_shuffle:
dataset = nested_dataset
else:
dataset = SortDataset(
nested_dataset,
# shuffle
sort_order=[shuffle],
)
logger.info("Loaded {0} with #samples: {1}".format(split, len(dataset)))
self.datasets[split] = dataset
return self.datasets[split]
def build_model(self, args):
from fairseq import models
model = models.build_model(args, self)
model.register_classification_head(
getattr(args, "ranking_head_name", "sentence_classification_head"),
num_classes=1,
)
return model
def max_positions(self):
return self.args.max_positions
@property
def source_dictionary(self):
return self.dictionary
@property
def target_dictionary(self):
return self.dictionary
|
1,526 |
run gpu benchmark
|
# Released under the MIT License. See LICENSE for details.
#
"""Benchmark/Stress-Test related functionality."""
from __future__ import annotations
import random
from typing import TYPE_CHECKING
import babase
import bascenev1
if TYPE_CHECKING:
from typing import Any, Sequence
def run_cpu_benchmark() -> None:
"""Run a cpu benchmark."""
# pylint: disable=cyclic-import
from bascenev1lib import tutorial
class BenchmarkSession(bascenev1.Session):
"""Session type for cpu benchmark."""
def __init__(self) -> None:
# print('FIXME: BENCHMARK SESSION WOULD CALC DEPS.')
depsets: Sequence[bascenev1.DependencySet] = []
super().__init__(depsets)
# Store old graphics settings.
self._old_quality = babase.app.config.resolve('Graphics Quality')
cfg = babase.app.config
cfg['Graphics Quality'] = 'Low'
cfg.apply()
self.benchmark_type = 'cpu'
self.setactivity(bascenev1.newactivity(tutorial.TutorialActivity))
def __del__(self) -> None:
# When we're torn down, restore old graphics settings.
cfg = babase.app.config
cfg['Graphics Quality'] = self._old_quality
cfg.apply()
def on_player_request(self, player: bascenev1.SessionPlayer) -> bool:
return False
bascenev1.new_host_session(BenchmarkSession, benchmark_type='cpu')
def run_stress_test(
playlist_type: str = 'Random',
playlist_name: str = '__default__',
player_count: int = 8,
round_duration: int = 30,
) -> None:
"""Run a stress test."""
from babase import modutils
babase.screenmessage(
"Beginning stress test.. use 'End Test' to stop testing.",
color=(1, 1, 0),
)
with babase.ContextRef.empty():
start_stress_test(
{
'playlist_type': playlist_type,
'playlist_name': playlist_name,
'player_count': player_count,
'round_duration': round_duration,
}
)
babase.apptimer(
7.0,
babase.Call(
babase.screenmessage,
(
'stats will be written to '
+ modutils.get_human_readable_user_scripts_path()
+ '/stress_test_stats.csv'
),
),
)
def stop_stress_test() -> None:
"""End a running stress test."""
babase.set_stress_testing(False, 0)
assert babase.app.classic is not None
try:
if babase.app.classic.stress_test_reset_timer is not None:
babase.screenmessage('Ending stress test...', color=(1, 1, 0))
except Exception:
pass
babase.app.classic.stress_test_reset_timer = None
def start_stress_test(args: dict[str, Any]) -> None:
"""(internal)"""
from bascenev1 import DualTeamSession, FreeForAllSession
assert babase.app.classic is not None
appconfig = babase.app.config
playlist_type = args['playlist_type']
if playlist_type == 'Random':
if random.random() < 0.5:
playlist_type = 'Teams'
else:
playlist_type = 'Free-For-All'
babase.screenmessage(
'Running Stress Test (listType="'
+ playlist_type
+ '", listName="'
+ args['playlist_name']
+ '")...'
)
if playlist_type == 'Teams':
appconfig['Team Tournament Playlist Selection'] = args['playlist_name']
appconfig['Team Tournament Playlist Randomize'] = 1
babase.apptimer(
1.0,
babase.Call(
babase.pushcall,
babase.Call(bascenev1.new_host_session, DualTeamSession),
),
)
else:
appconfig['Free-for-All Playlist Selection'] = args['playlist_name']
appconfig['Free-for-All Playlist Randomize'] = 1
babase.apptimer(
1.0,
babase.Call(
babase.pushcall,
babase.Call(bascenev1.new_host_session, FreeForAllSession),
),
)
babase.set_stress_testing(True, args['player_count'])
babase.app.classic.stress_test_reset_timer = babase.AppTimer(
args['round_duration'], babase.Call(_reset_stress_test, args)
)
def _reset_stress_test(args: dict[str, Any]) -> None:
babase.set_stress_testing(False, args['player_count'])
babase.screenmessage('Resetting stress test...')
session = bascenev1.get_foreground_host_session()
assert session is not None
session.end()
babase.apptimer(1.0, babase.Call(start_stress_test, args))
def METHOD_NAME() -> None:
"""Kick off a benchmark to test gpu speeds."""
# FIXME: Not wired up yet.
babase.screenmessage('Not wired up yet.', color=(1, 0, 0))
def run_media_reload_benchmark() -> None:
"""Kick off a benchmark to test media reloading speeds."""
babase.reload_media()
babase.show_progress_bar()
def delay_add(start_time: float) -> None:
def doit(start_time_2: float) -> None:
babase.screenmessage(
babase.app.lang.get_resource(
'debugWindow.totalReloadTimeText'
).replace('${TIME}', str(babase.apptime() - start_time_2))
)
babase.print_load_info()
if babase.app.config.resolve('Texture Quality') != 'High':
babase.screenmessage(
babase.app.lang.get_resource(
'debugWindow.reloadBenchmarkBestResultsText'
),
color=(1, 1, 0),
)
babase.add_clean_frame_callback(babase.Call(doit, start_time))
# The reload starts (should add a completion callback to the
# reload func to fix this).
babase.apptimer(0.05, babase.Call(delay_add, babase.apptime()))
|
1,527 |
get raw point
|
import abc
import numpy
import pyresample
from scipy.interpolate import interp1d
import data.geo as geo
class Model(metaclass=abc.ABCMeta):
"""Abstract base class for models."""
def __init__(self, nc_data):
self.nc_data = nc_data
@property
@abc.abstractmethod
def depths(self) -> numpy.ndarray:
pass
def get_path(
self,
path,
depth,
variable,
starttime,
endtime=None,
numpoints=100,
times=None,
return_depth=False,
tile_time=True,
):
if times is None and endtime is not None:
time_slice = self.nc_data.make_time_slice(starttime, endtime)
times = self.nc_data.timestamps[time_slice]
distances, t, lat, lon, bearings = geo.path_to_points(
path, numpoints, times=times
)
if tile_time:
times = t
if return_depth:
result, dep = self.get_point(
lat, lon, depth, variable, starttime, endtime, return_depth=return_depth
)
return numpy.array([lat, lon]), distances, times, result, dep
else:
result = self.get_point(
lat, lon, depth, variable, starttime, endtime, return_depth=return_depth
)
return numpy.array([lat, lon]), distances, times, result
@abc.abstractmethod
def get_point(
self,
latitude,
longitude,
depth,
variable,
starttime,
endtime=None,
return_depth=False,
):
pass
@abc.abstractmethod
def get_profile(self, latitude, longitude, variable, starttime, endtime=None):
pass
@abc.abstractmethod
def METHOD_NAME(self, latitude, longitude, depth, time, variable):
pass
def _make_resample_data(self, lat_in, lon_in, lat_out, lon_out, data):
"""
Note: `data` must be of shape (time, lat, lon) OR (time, depth, lat, lon).
"""
if len(data.shape) == 4:
origshape = data.shape
# collapse time and depth axes (positions 0 and 1)
data = data.reshape(
(
origshape[0] * origshape[1], # combine time + depth
origshape[2], # lat
origshape[3], # lon
)
)
if len(data.shape) == 3:
# move lat and lon axes (normally the last two axes)
# into the first and second axis positions.
# Before: (..., lat, lon)
# After: (lat, lon, ...)
data = numpy.rollaxis(data, 0, 3)
data = numpy.ma.masked_invalid(data[:])
lon_in, lat_in = pyresample.utils.check_and_wrap(lon_in, lat_in)
masked_lon_in = numpy.ma.array(lon_in)
masked_lat_in = numpy.ma.array(lat_in)
output_def = pyresample.geometry.SwathDefinition(
lons=numpy.ma.array(lon_out), lats=numpy.ma.array(lat_out)
)
return data, masked_lat_in, masked_lon_in, output_def
def get_area(
self,
area,
depth,
time,
variable,
interp,
radius,
neighbours,
return_depth=False,
):
try:
latitude = area[0, :].ravel() # do we really need this slicing `:` BS?
longitude = area[1, :].ravel()
except IndexError:
latitude = area[0].ravel()
longitude = area[1].ravel()
self.nc_data.interp = interp
self.nc_data.radius = radius
self.nc_data.neighbours = neighbours
if return_depth:
a, d = self.get_point(
latitude, longitude, depth, variable, time, return_depth=return_depth
)
return numpy.reshape(a, area.shape[1:]), numpy.reshape(d, area.shape[1:])
a = self.get_point(
latitude, longitude, depth, variable, time, return_depth=return_depth
)
return numpy.reshape(a, area.shape[1:])
def get_path_profile(self, path, variable, starttime, endtime=None, numpoints=100):
distances, times, lat, lon, bearings = geo.path_to_points(path, numpoints)
result, depth = self.get_profile(lat, lon, variable, starttime, endtime=endtime)
return numpy.array([lat, lon]), distances, result.transpose(), depth
def get_profile_depths(self, latitude, longitude, time, variable, depths):
profile, orig_dep = self.get_profile(latitude, longitude, variable, time)
if not hasattr(latitude, "__len__"):
latitude = [latitude]
profile = [profile]
orig_dep = [orig_dep]
depths = numpy.array(depths)
if len(depths.shape) == 1:
depths = numpy.tile(depths, (len(latitude), 1))
output = []
for i in range(0, len(latitude)):
f = interp1d(
orig_dep[i],
profile[i],
assume_sorted=True,
bounds_error=False,
)
output.append(f(depths[i]))
return numpy.ma.masked_invalid(numpy.squeeze(output))
def get_timeseries_point(
self,
latitude,
longitude,
depth,
starttime,
endtime,
variable,
return_depth=False,
):
return self.get_point(
latitude,
longitude,
depth,
variable,
starttime,
endtime,
return_depth=return_depth,
)
def get_timeseries_profile(self, latitude, longitude, starttime, endtime, variable):
return self.get_profile(latitude, longitude, variable, starttime, endtime)
|
1,528 |
render
|
import typing as t
from ast import literal_eval
from ast import parse
from itertools import chain
from itertools import islice
from types import GeneratorType
from . import nodes
from .compiler import CodeGenerator
from .compiler import Frame
from .compiler import has_safe_repr
from .environment import Environment
from .environment import Template
def native_concat(values: t.Iterable[t.Any]) -> t.Optional[t.Any]:
"""Return a native Python type from the list of compiled nodes. If
the result is a single node, its value is returned. Otherwise, the
nodes are concatenated as strings. If the result can be parsed with
:func:`ast.literal_eval`, the parsed value is returned. Otherwise,
the string is returned.
:param values: Iterable of outputs to concatenate.
"""
head = list(islice(values, 2))
if not head:
return None
if len(head) == 1:
raw = head[0]
if not isinstance(raw, str):
return raw
else:
if isinstance(values, GeneratorType):
values = chain(head, values)
raw = "".join([str(v) for v in values])
try:
return literal_eval(
# In Python 3.10+ ast.literal_eval removes leading spaces/tabs
# from the given string. For backwards compatibility we need to
# parse the string ourselves without removing leading spaces/tabs.
parse(raw, mode="eval")
)
except (ValueError, SyntaxError, MemoryError):
return raw
class NativeCodeGenerator(CodeGenerator):
"""A code generator which renders Python types by not adding
``str()`` around output nodes.
"""
@staticmethod
def _default_finalize(value: t.Any) -> t.Any:
return value
def _output_const_repr(self, group: t.Iterable[t.Any]) -> str:
return repr("".join([str(v) for v in group]))
def _output_child_to_const(
self, node: nodes.Expr, frame: Frame, finalize: CodeGenerator._FinalizeInfo
) -> t.Any:
const = node.as_const(frame.eval_ctx)
if not has_safe_repr(const):
raise nodes.Impossible()
if isinstance(node, nodes.TemplateData):
return const
return finalize.const(const) # type: ignore
def _output_child_pre(
self, node: nodes.Expr, frame: Frame, finalize: CodeGenerator._FinalizeInfo
) -> None:
if finalize.src is not None:
self.write(finalize.src)
def _output_child_post(
self, node: nodes.Expr, frame: Frame, finalize: CodeGenerator._FinalizeInfo
) -> None:
if finalize.src is not None:
self.write(")")
class NativeEnvironment(Environment):
"""An environment that renders templates to native Python types."""
code_generator_class = NativeCodeGenerator
concat = staticmethod(native_concat) # type: ignore
class NativeTemplate(Template):
environment_class = NativeEnvironment
def METHOD_NAME(self, *args: t.Any, **kwargs: t.Any) -> t.Any:
"""Render the template to produce a native Python type. If the
result is a single node, its value is returned. Otherwise, the
nodes are concatenated as strings. If the result can be parsed
with :func:`ast.literal_eval`, the parsed value is returned.
Otherwise, the string is returned.
"""
ctx = self.new_context(dict(*args, **kwargs))
try:
return self.environment_class.concat( # type: ignore
self.root_render_func(ctx)
)
except Exception:
return self.environment.handle_exception()
async def render_async(self, *args: t.Any, **kwargs: t.Any) -> t.Any:
if not self.environment.is_async:
raise RuntimeError(
"The environment was not created with async mode enabled."
)
ctx = self.new_context(dict(*args, **kwargs))
try:
return self.environment_class.concat( # type: ignore
[n async for n in self.root_render_func(ctx)] # type: ignore
)
except Exception:
return self.environment.handle_exception()
NativeEnvironment.template_class = NativeTemplate
|
1,529 |
test weight stream
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import pytest
pytest.importorskip("ethosu.vela")
import tvm
from tvm import relay
from tvm.relay.backend.contrib.ethosu.tir.compiler import _lower_to_tir
from tvm.relay.backend.contrib.ethosu.tir.scheduler import (
OperatorCompute,
copy_constants,
)
from tvm.relay.testing import run_opt_pass
from tvm.script import tir as T
from .infra import make_ethosu_conv2d
# fmt: off
@tvm.script.ir_module
class ReferenceModule:
@T.prim_func
def main(input_placeholder_3: T.Buffer((1, 16, 16, 32), "int8"), input_ethosu_write_1: T.Buffer((1, 16, 16, 8), "int8")) -> None:
# function attr dict
T.func_attr({"from_legacy_te_schedule": True, "global_symbol": "main", "tir.noalias": True})
buffer_1 = T.Buffer([384], "uint8")
placeholder_3 = T.Buffer([8192], dtype="int8", data=input_placeholder_3.data)
ethosu_write_1 = T.Buffer([2048], dtype="int8", data=input_ethosu_write_1.data)
# body
placeholder_global_data = T.allocate([384], "uint8", "global", annotations={"disable_lower_builtin": True})
placeholder_global = T.Buffer([384], "uint8", data=placeholder_global_data)
T.evaluate(T.call_extern("ethosu_copy", buffer_1[0], 384, placeholder_global[0], dtype="handle"))
T.evaluate(T.call_extern("ethosu_conv2d", "int8", 16, 16, 32, 16, 0, 16, placeholder_3[0], 0, 0, 0, T.float32(0.5), 10, "NHWC", 512, 32, 1, "int8", 16, 16, 8, 16, 0, 16, ethosu_write_1[0], 0, 0, 0, T.float32(0.25), 14, "NHWC", 128, 8, 1, 1, 1, 1, 1, 1, 1, placeholder_global[0], 304, T.int8(-1), T.int8(-1), 12, placeholder_global[304], 80, T.int8(-1), T.int8(-1), 0, 0, 0, 0, "NONE", 0, 0, "TFL", "NONE", 0, 0, 0, dtype="handle"))
__tvm_meta__ = None
# fmt: on
def test_copy():
def _get_func():
data = relay.var("data", shape=(1, 16, 16, 32), dtype="int8")
conv = make_ethosu_conv2d(
data,
32,
8,
(1, 1),
(0, 0),
(1, 1),
(1, 1),
)
func = relay.Function(relay.analysis.free_vars(conv), conv)
func = run_opt_pass(func, relay.transform.InferType())
return func
func = _get_func()
mod, _ = _lower_to_tir(func, cascader=copy_constants())
script = mod.script()
test_mod = tvm.script.from_source(script)
reference_mod = ReferenceModule
tvm.ir.assert_structural_equal(test_mod["main"], reference_mod["main"], True)
# fmt: off
@tvm.script.ir_module
class WeightStream:
@T.prim_func
def main(input_placeholder_5: T.Buffer((1, 16, 16, 32), "int8"), input_ethosu_write_1: T.Buffer((1, 16, 16, 16), "int8")) -> None:
# function attr dict
T.func_attr({"from_legacy_te_schedule": True, "global_symbol": "main", "tir.noalias": True})
buffer = T.Buffer([528], "uint8")
buffer_2 = T.Buffer([336], "uint8")
placeholder_5 = T.Buffer([8192], dtype="int8", data=input_placeholder_5.data)
ethosu_write_1 = T.Buffer([4096], dtype="int8", data=input_ethosu_write_1.data)
# body
placeholder_d_global_data = T.allocate([528], "uint8", "global", annotations={"disable_lower_builtin": True})
placeholder_d_global = T.Buffer([528], "uint8", data=placeholder_d_global_data)
placeholder_d_global_1_data = T.allocate([336], "uint8", "global", annotations={"disable_lower_builtin": True})
placeholder_d_global_1 = T.Buffer([336], "uint8", data=placeholder_d_global_1_data)
T.evaluate(T.call_extern("ethosu_copy", buffer[0], 528, placeholder_d_global[0], dtype="handle"))
T.evaluate(T.call_extern("ethosu_copy", buffer_2[0], 336, placeholder_d_global_1[0], dtype="handle"))
T.evaluate(T.call_extern("ethosu_conv2d", "int8", 16, 16, 32, 16, 0, 16, placeholder_5[0], 0, 0, 0, T.float32(0.5), 10, "NHWC", 512, 32, 1, "int8", 16, 16, 10, 16, 0, 16, ethosu_write_1[0], 0, 0, 0, T.float32(0.25), 14, "NHWC", 256, 16, 1, 1, 1, 1, 1, 1, 1, placeholder_d_global[0], 416, T.int8(-1), T.int8(-1), 12, placeholder_d_global[416], 112, T.int8(-1), T.int8(-1), 0, 0, 0, 0, "NONE", 0, 0, "TFL", "NONE", 0, 0, 0, dtype="handle"))
T.evaluate(T.call_extern("ethosu_conv2d", "int8", 16, 16, 32, 16, 0, 16, placeholder_5[0], 0, 0, 0, T.float32(0.5), 10, "NHWC", 512, 32, 1, "int8", 16, 16, 6, 16, 0, 16, ethosu_write_1[10], 0, 0, 0, T.float32(0.25), 14, "NHWC", 256, 16, 1, 1, 1, 1, 1, 1, 1, placeholder_d_global_1[0], 272, T.int8(-1), T.int8(-1), 12, placeholder_d_global_1[272], 64, T.int8(-1), T.int8(-1), 0, 0, 0, 0, "NONE", 0, 0, "TFL", "NONE", 0, 0, 0, dtype="handle"))
__tvm_meta__ = None
# fmt: on
def METHOD_NAME():
def _cascader(cached_func, const_dict, sch):
weight = cached_func.inputs[1]
scale_bias = cached_func.inputs[2]
out = cached_func.outputs[0]
conv_compute = OperatorCompute.from_output(out)
co = conv_compute.split(sch, 3, 10)
cache_weight = sch.cache_read(weight, "global", [conv_compute.op])
cache_scale_bias = sch.cache_read(scale_bias, "global", [conv_compute.op])
sch[cache_weight].compute_at(sch[out], co)
sch[cache_scale_bias].compute_at(sch[out], co)
def _get_func():
ifm = relay.var("ifm", shape=(1, 16, 16, 32), dtype="int8")
conv = make_ethosu_conv2d(
ifm,
32,
16,
(1, 1),
(0, 0),
(1, 1),
(1, 1),
)
func = relay.Function(relay.analysis.free_vars(conv), conv)
func = run_opt_pass(func, relay.transform.InferType())
return func
func = _get_func()
mod, _ = _lower_to_tir(func, cascader=_cascader)
script = mod.script()
test_mod = tvm.script.from_source(script)
reference_mod = WeightStream
tvm.ir.assert_structural_equal(test_mod["main"], reference_mod["main"], True)
if __name__ == "__main__":
tvm.testing.main()
|
1,530 |
echo
|
import os
import base64
import click
import appdirs
import ipaddress
import typing
import uuid
from colorama import init, Fore, Style
from vantage6.common.globals import STRING_ENCODING
# init colorstuff
init()
def logger_name(special__name__: str):
"""
Return the name of the logger.
Parameters
----------
special__name__: str
The __name__ variable of a module.
Returns
-------
str
The name of the logger.
"""
log_name = special__name__.split('.')[-1]
if len(log_name) > 14:
log_name = log_name[:11] + ".."
return log_name
class WhoAmI(typing.NamedTuple):
"""
Data-class to store Authenticatable information in.
Attributes
----------
type_: str
The type of the authenticatable (user or node).
id_: int
The id of the authenticatable.
name: str
The name of the authenticatable.
organization_name: str
The name of the organization of the authenticatable.
organization_id: int
The id of the organization of the authenticatable.
"""
type_: str
id_: int
name: str
organization_name: str
organization_id: int
def __repr__(self) -> str:
return (f"<WhoAmI "
f"name={self.name}, "
f"type={self.type_}, "
f"organization={self.organization_name}, "
f"(id={self.organization_id})"
">")
class Singleton(type):
"""
Singleton metaclass. It allows us to create just a single instance of a
class to which it is the metaclass.
"""
_instances = {}
def __call__(cls, *args, **kwargs) -> object:
"""
When the class is called, return an instance of the class. If the
instance already exists, return that instance.
"""
if cls not in cls._instances:
instance = super(Singleton, cls).__call__(*args, **kwargs)
cls._instances[cls] = instance
return cls._instances[cls]
def bytes_to_base64s(bytes_: bytes) -> str:
"""
Convert bytes into base64 encoded string.
Parameters
----------
bytes_: bytes
The bytes to convert.
Returns
-------
str
The base64 encoded string.
"""
return base64.b64encode(bytes_).decode(STRING_ENCODING)
def base64s_to_bytes(bytes_string: str) -> bytes:
"""
Convert base64 encoded string to bytes.
Parameters
----------
bytes_string: str
The base64 encoded string.
Returns
-------
bytes
The encoded string converted to bytes.
"""
return base64.b64decode(bytes_string.encode(STRING_ENCODING))
#
# CLI prints
#
def METHOD_NAME(msg: str, level: str = "info") -> None:
"""
Print a message to the CLI.
Parameters
----------
msg: str
The message to print.
level: str
The level of the message. Can be one of: "error", "warn", "info",
"debug".
"""
type_ = {
"error": f"[{Fore.RED}error{Style.RESET_ALL}]",
"warn": f"[{Fore.YELLOW}warn {Style.RESET_ALL}]",
"info": f"[{Fore.GREEN}info {Style.RESET_ALL}]",
"debug": f"[{Fore.CYAN}debug{Style.RESET_ALL}]",
}.get(level)
click.METHOD_NAME(f"{type_:16} - {msg}")
def info(msg: str) -> None:
"""
Print an info message to the CLI.
Parameters
----------
msg: str
The message to print.
"""
METHOD_NAME(msg, "info")
def warning(msg: str) -> None:
"""
Print a warning message to the CLI.
Parameters
----------
msg: str
The message to print.
"""
METHOD_NAME(msg, "warn")
def error(msg: str) -> None:
"""
Print an error message to the CLI.
Parameters
----------
msg: str
The message to print.
"""
METHOD_NAME(msg, "error")
def debug(msg: str) -> None:
"""
Print a debug message to the CLI.
Parameters
----------
msg: str
The message to print.
"""
METHOD_NAME(msg, "debug")
class ClickLogger:
""""Logs output to the click interface."""
@staticmethod
def info(msg: str) -> None:
"""
Print an info message to the click interface.
Parameters
----------
msg: str
The message to print.
"""
info(msg)
@staticmethod
def warn(msg: str) -> None:
"""
Print a warning message to the click interface.
Parameters
----------
msg: str
The message to print.
"""
warning(msg)
@staticmethod
def error(msg: str) -> None:
"""
Print an error message to the click interface.
Parameters
----------
msg: str
The message to print.
"""
error(msg)
@staticmethod
def debug(msg: str) -> None:
"""
Print a debug message to the click interface.
Parameters
----------
msg: str
The message to print.
"""
debug(msg)
def check_config_writeable(system_folders: bool = False) -> bool:
"""
Check if the user has write permissions to create the configuration file.
Parameters
----------
system_folders: bool
Whether to check the system folders or the user folders.
Returns
-------
bool
Whether the user has write permissions to create the configuration
file or not.
"""
dirs = appdirs.AppDirs()
if system_folders:
dirs_to_check = [
dirs.site_config_dir
]
else:
dirs_to_check = [
dirs.user_config_dir
]
w_ok = True
for dir_ in dirs_to_check:
if not os.path.isdir(dir_):
warning(f"Target directory '{dir_}' for configuration file does "
"not exist.")
w_ok = False
elif not os.access(dir_, os.W_OK):
warning(f"No write permissions at '{dir_}'.")
w_ok = False
return w_ok
def is_ip_address(ip: str) -> bool:
"""
Test if input IP address is a valid IP address
Parameters
----------
ip: str
IP address to validate
Returns
-------
bool: whether or not IP address is valid
"""
try:
_ = ipaddress.ip_address(ip)
return True
except Exception:
return False
def get_database_config(databases: list, label: str) -> dict | None:
"""Get database configuration from config file
Parameters
----------
databases: list[dict]
List of database configurations
label: str
Label of database configuration to retrieve
Returns
-------
Dict | None
Database configuration, or None if not found
Notes
-----
The ``databases`` configuration can be in two formats. The new format
allows for the specification of the database type. The structure of the
new format is as follows:
1. Old format:
{
"database_label": "database_uri",
...
}
2. New format:
[
{
"label": "database_label",
"uri": "database_uri",
"db_type": "database_type"
}
]
"""
# FIXME The old format should be removed in v4+.
old_format = isinstance(databases, dict)
if old_format:
return {
"label": label,
"uri": databases[label],
"type": None
}
else:
for database in databases:
if database["label"] == label:
return database
def generate_apikey() -> str:
"""Creates random api_key using uuid.
Returns
-------
str
api_key
"""
return str(uuid.uuid4())
|
1,531 |
commit
|
# -*- coding: utf-8 -*-
#
# This file is part of SENAITE.CORE.
#
# SENAITE.CORE is free software: you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free Software
# Foundation, version 2.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Copyright 2018-2023 by it's authors.
# Some rights reserved, see README and LICENSE.
from Acquisition import aq_base
from bika.lims import api
from bika.lims import logger
from Products.CMFCore.interfaces import IPortalCatalogQueueProcessor
from senaite.core.catalog import AUDITLOG_CATALOG
from senaite.core.interfaces import IMultiCatalogBehavior
from zope.interface import implementer
REQUIRED_CATALOGS = [
AUDITLOG_CATALOG,
]
@implementer(IPortalCatalogQueueProcessor)
class CatalogMultiplexProcessor(object):
"""A catalog multiplex processor
"""
def is_global_auditlog_enabled(self):
"""Check if the global auditlogging is enabled
"""
setup = api.get_senaite_setup()
# might happen during installation
if not setup:
return False
return setup.getEnableGlobalAuditlog()
def get_catalogs_for(self, obj):
catalogs = getattr(obj, "_catalogs", [])
for rc in REQUIRED_CATALOGS:
if rc in catalogs:
continue
catalogs.append(rc)
# remove auditlog catalog if disabled
if not self.is_global_auditlog_enabled():
catalogs = filter(lambda cid: cid != AUDITLOG_CATALOG, catalogs)
return map(api.get_tool, catalogs)
def supports_multi_catalogs(self, obj):
"""Check if the Multi Catalog Behavior is enabled
"""
if IMultiCatalogBehavior(obj, None) is None:
return False
if api.is_temporary(obj):
return False
return True
def index(self, obj, attributes=None):
if not self.supports_multi_catalogs(obj):
return
catalogs = self.get_catalogs_for(obj)
url = api.get_path(obj)
for catalog in catalogs:
logger.info(
"CatalogMultiplexProcessor::indexObject:catalog={} url={}"
.format(catalog.id, url))
catalog._indexObject(obj)
def reindex(self, obj, attributes=None, update_metadata=1):
if attributes is None:
attributes = []
if not self.supports_multi_catalogs(obj):
return
catalogs = self.get_catalogs_for(obj)
url = api.get_path(obj)
for catalog in catalogs:
logger.info(
"CatalogMultiplexProcessor::reindexObject:catalog={} url={}"
.format(catalog.id, url))
# Intersection of the catalogs indexes and the incoming attributes
indexes = list(set(catalog.indexes()).intersection(attributes))
catalog._reindexObject(
obj, idxs=indexes, update_metadata=update_metadata)
def unindex(self, obj):
wrapped_obj = obj
if aq_base(obj).__class__.__name__ == "PathWrapper":
# Could be a PathWrapper object from collective.indexing.
obj = obj.context
if not self.supports_multi_catalogs(obj):
return
catalogs = self.get_catalogs_for(obj)
# get the old path from the wrapped object
url = api.get_path(wrapped_obj)
for catalog in catalogs:
if catalog._catalog.uids.get(url, None) is not None:
logger.info(
"CatalogMultiplexProcessor::unindex:catalog={} url={}"
.format(catalog.id, url))
catalog._unindexObject(wrapped_obj)
def begin(self):
pass
def METHOD_NAME(self):
pass
def abort(self):
pass
|
1,532 |
test defer makes the correct request
|
# coding: utf-8
#
# Copyright 2020 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the 'License');
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an 'AS-IS' BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for the domain taskqueue services."""
from __future__ import annotations
import datetime
from core import feconf
from core import utils
from core.domain import taskqueue_services
from core.platform import models
from core.tests import test_utils
from typing import Dict, Optional, Set
MYPY = False
if MYPY: # pragma: no cover
from mypy_imports import platform_taskqueue_services
platform_taskqueue_services = models.Registry.import_taskqueue_services()
class TaskqueueDomainServicesUnitTests(test_utils.TestBase):
"""Tests for domain taskqueue services."""
def test_exception_raised_when_deferred_payload_is_not_serializable(
self
) -> None:
class NonSerializableArgs:
"""Object that is not JSON serializable."""
def __init__(self) -> None:
self.x = 1
self.y = 2
arg1 = NonSerializableArgs()
serialization_exception = self.assertRaisesRegex(
ValueError,
'The args or kwargs passed to the deferred call with '
'function_identifier, %s, are not json serializable.' %
taskqueue_services.FUNCTION_ID_UPDATE_STATS)
with serialization_exception:
taskqueue_services.defer(
taskqueue_services.FUNCTION_ID_UPDATE_STATS,
taskqueue_services.QUEUE_NAME_DEFAULT, arg1)
def test_exception_raised_when_email_task_params_is_not_serializable(
self
) -> None:
params: Dict[str, Set[str]] = {
'param1': set()
}
serialization_exception = self.assertRaisesRegex(
ValueError,
'The params added to the email task call cannot be json serialized')
with serialization_exception:
taskqueue_services.enqueue_task(
feconf.TASK_URL_FEEDBACK_MESSAGE_EMAILS,
params,
0)
def METHOD_NAME(self) -> None:
correct_fn_identifier = '/task/deferredtaskshandler'
correct_args = (1, 2, 3)
correct_kwargs = {'a': 'b', 'c': 'd'}
expected_queue_name = taskqueue_services.QUEUE_NAME_EMAILS
expected_url = feconf.TASK_URL_DEFERRED
expected_payload = {
'fn_identifier': correct_fn_identifier,
'args': correct_args,
'kwargs': correct_kwargs
}
create_http_task_swap = self.swap_with_checks(
platform_taskqueue_services,
'create_http_task',
lambda queue_name, url, payload=None, scheduled_for=None: None,
expected_kwargs=[{
'queue_name': expected_queue_name,
'url': expected_url,
'payload': expected_payload
}]
)
with create_http_task_swap:
taskqueue_services.defer(
correct_fn_identifier,
taskqueue_services.QUEUE_NAME_EMAILS,
*correct_args, **correct_kwargs
)
def test_enqueue_task_makes_the_correct_request(self) -> None:
correct_payload = {
'user_id': '1'
}
correct_url = feconf.TASK_URL_FEEDBACK_MESSAGE_EMAILS
correct_queue_name = taskqueue_services.QUEUE_NAME_EMAILS
def mock_create_http_task(
queue_name: str,
url: str,
payload: Optional[Dict[str, str]] = None,
scheduled_for: Optional[datetime.datetime] = None,
task_name: Optional[str] = None
) -> None:
self.assertEqual(queue_name, correct_queue_name)
self.assertEqual(url, correct_url)
self.assertEqual(payload, correct_payload)
self.assertIsNotNone(scheduled_for)
self.assertIsNone(task_name)
swap_create_http_task = self.swap(
platform_taskqueue_services, 'create_http_task',
mock_create_http_task)
with swap_create_http_task:
taskqueue_services.enqueue_task(
correct_url, correct_payload, 0)
def test_that_queue_names_are_in_sync_with_queue_yaml_file(self) -> None:
"""Checks that all of the queues that are instantiated in the queue.yaml
file has a corresponding QUEUE_NAME_* constant instantiated in
taskqueue_services.
"""
queue_name_dict = {}
# Parse the queue.yaml file for the correct queue names.
with utils.open_file('queue.yaml', 'r') as f:
lines = f.readlines()
for line in lines:
if 'name' in line:
queue_name = line.split(':')[1]
queue_name_dict[queue_name.strip()] = False
# Get all attributes of taskqueue_services using the dir function.
attributes = dir(taskqueue_services)
# Check if the queue names in the queue.yaml file exist as a queue
# name in taskqueue_services.
for attribute in attributes:
if attribute.startswith('QUEUE_NAME_'):
queue_name_dict[getattr(taskqueue_services, attribute)] = True
for queue_name, in_taskqueue_services in queue_name_dict.items():
self.assertTrue(in_taskqueue_services)
|
1,533 |
parse opt
|
# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from argparse import ArgumentParser, RawDescriptionHelpFormatter
import yaml
import re
from paddlex.ppdet.core.workspace import get_registered_modules, dump_value
__all__ = ['ColorTTY', 'ArgsParser']
class ColorTTY(object):
def __init__(self):
super(ColorTTY, self).__init__()
self.colors = ['red', 'green', 'yellow', 'blue', 'magenta', 'cyan']
def __getattr__(self, attr):
if attr in self.colors:
color = self.colors.index(attr) + 31
def color_message(message):
return "[{}m{}[0m".format(color, message)
setattr(self, attr, color_message)
return color_message
def bold(self, message):
return self.with_code('01', message)
def with_code(self, code, message):
return "[{}m{}[0m".format(code, message)
class ArgsParser(ArgumentParser):
def __init__(self):
super(ArgsParser, self).__init__(
formatter_class=RawDescriptionHelpFormatter)
self.add_argument("-c", "--config", help="configuration file to use")
self.add_argument(
"-o", "--opt", nargs='*', help="set configuration options")
def parse_args(self, argv=None):
args = super(ArgsParser, self).parse_args(argv)
assert args.config is not None, \
"Please specify --config=configure_file_path."
args.opt = self.METHOD_NAME(args.opt)
return args
def METHOD_NAME(self, opts):
config = {}
if not opts:
return config
for s in opts:
s = s.strip()
k, v = s.split('=', 1)
if '.' not in k:
config[k] = yaml.load(v, Loader=yaml.Loader)
else:
keys = k.split('.')
if keys[0] not in config:
config[keys[0]] = {}
cur = config[keys[0]]
for idx, key in enumerate(keys[1:]):
if idx == len(keys) - 2:
cur[key] = yaml.load(v, Loader=yaml.Loader)
else:
cur[key] = {}
cur = cur[key]
return config
def merge_args(config, args, exclude_args=['config', 'opt', 'slim_config']):
for k, v in vars(args).items():
if k not in exclude_args:
config[k] = v
return config
def print_total_cfg(config):
modules = get_registered_modules()
color_tty = ColorTTY()
green = '___{}___'.format(color_tty.colors.index('green') + 31)
styled = {}
for key in config.keys():
if not config[key]: # empty schema
continue
if key not in modules and not hasattr(config[key], '__dict__'):
styled[key] = config[key]
continue
elif key in modules:
module = modules[key]
else:
type_name = type(config[key]).__name__
if type_name in modules:
module = modules[type_name].copy()
module.update({
k: v
for k, v in config[key].__dict__.items()
if k in module.schema
})
key += " ({})".format(type_name)
default = module.find_default_keys()
missing = module.find_missing_keys()
mismatch = module.find_mismatch_keys()
extra = module.find_extra_keys()
dep_missing = []
for dep in module.inject:
if isinstance(module[dep], str) and module[dep] != '<value>':
if module[dep] not in modules: # not a valid module
dep_missing.append(dep)
else:
dep_mod = modules[module[dep]]
# empty dict but mandatory
if not dep_mod and dep_mod.mandatory():
dep_missing.append(dep)
override = list(
set(module.keys()) - set(default) - set(extra) - set(dep_missing))
replacement = {}
for name in set(override + default + extra + mismatch + missing):
new_name = name
if name in missing:
value = "<missing>"
else:
value = module[name]
if name in extra:
value = dump_value(value) + " <extraneous>"
elif name in mismatch:
value = dump_value(value) + " <type mismatch>"
elif name in dep_missing:
value = dump_value(value) + " <module config missing>"
elif name in override and value != '<missing>':
mark = green
new_name = mark + name
replacement[new_name] = value
styled[key] = replacement
buffer = yaml.dump(styled, default_flow_style=False, default_style='')
buffer = (re.sub(r"<missing>", r"[31m<missing>[0m", buffer))
buffer = (re.sub(r"<extraneous>", r"[33m<extraneous>[0m", buffer))
buffer = (re.sub(r"<type mismatch>", r"[31m<type mismatch>[0m", buffer))
buffer = (re.sub(r"<module config missing>",
r"[31m<module config missing>[0m", buffer))
buffer = re.sub(r"___(\d+)___(.*?):", r"[\1m\2[0m:", buffer)
print(buffer)
|
1,534 |
rotate local
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from typing import List, Optional
import magnum as mn
import numpy as np
from habitat_sim.agent.controls.controls import ActuationSpec, SceneNodeControl
from habitat_sim.geo import FRONT
from habitat_sim.registry import registry
from habitat_sim.scene import SceneNode
__all__: List[str] = []
_X_AXIS = 0
_Y_AXIS = 1
_Z_AXIS = 2
_rotate_local_fns = [
SceneNode.rotate_x_local,
SceneNode.rotate_y_local,
SceneNode.rotate_z_local,
]
def _move_along(scene_node: SceneNode, distance: float, axis: int) -> None:
ax = scene_node.transformation[axis].xyz
scene_node.translate_local(ax * distance)
def METHOD_NAME(
scene_node: SceneNode, theta: float, axis: int, constraint: Optional[float] = None
) -> None:
if constraint is not None:
rotation = scene_node.rotation
if (
abs(float(rotation.angle())) > 0
and 1.0 - abs(rotation.axis().normalized()[axis]) > 1e-3
):
raise RuntimeError(
"Constrained look only works for a singular look action type"
)
look_vector = rotation.transform_vector(FRONT)
if axis == 0:
look_angle = mn.Rad(np.arctan2(look_vector[1], -look_vector[2]))
elif axis == 1:
look_angle = -mn.Rad(np.arctan2(look_vector[0], -look_vector[2]))
new_angle = look_angle + mn.Deg(theta)
constraint = mn.Deg(constraint)
if new_angle > constraint:
theta = constraint - look_angle
elif new_angle < -constraint:
theta = -constraint - look_angle
_rotate_local_fns[axis](scene_node, mn.Deg(theta))
scene_node.rotation = scene_node.rotation.normalized()
@registry.register_move_fn(body_action=True)
class MoveBackward(SceneNodeControl):
def __call__(self, scene_node: SceneNode, actuation_spec: ActuationSpec) -> None:
_move_along(scene_node, actuation_spec.amount, _Z_AXIS)
@registry.register_move_fn(body_action=True)
class MoveForward(SceneNodeControl):
def __call__(self, scene_node: SceneNode, actuation_spec: ActuationSpec) -> None:
_move_along(scene_node, -actuation_spec.amount, _Z_AXIS)
@registry.register_move_fn(body_action=True)
class MoveRight(SceneNodeControl):
def __call__(self, scene_node: SceneNode, actuation_spec: ActuationSpec) -> None:
_move_along(scene_node, actuation_spec.amount, _X_AXIS)
@registry.register_move_fn(body_action=True)
class MoveLeft(SceneNodeControl):
def __call__(self, scene_node: SceneNode, actuation_spec: ActuationSpec) -> None:
_move_along(scene_node, -actuation_spec.amount, _X_AXIS)
@registry.register_move_fn(body_action=False)
class MoveUp(SceneNodeControl):
def __call__(self, scene_node: SceneNode, actuation_spec: ActuationSpec) -> None:
_move_along(scene_node, actuation_spec.amount, _Y_AXIS)
@registry.register_move_fn(body_action=False)
class MoveDown(SceneNodeControl):
def __call__(self, scene_node: SceneNode, actuation_spec: ActuationSpec) -> None:
_move_along(scene_node, -actuation_spec.amount, _Y_AXIS)
@registry.register_move_fn(body_action=False)
class LookLeft(SceneNodeControl):
def __call__(self, scene_node: SceneNode, actuation_spec: ActuationSpec) -> None:
METHOD_NAME(
scene_node, actuation_spec.amount, _Y_AXIS, actuation_spec.constraint
)
@registry.register_move_fn(body_action=False)
class LookRight(SceneNodeControl):
def __call__(self, scene_node: SceneNode, actuation_spec: ActuationSpec) -> None:
METHOD_NAME(
scene_node, -actuation_spec.amount, _Y_AXIS, actuation_spec.constraint
)
registry.register_move_fn(LookLeft, name="turn_left", body_action=True)
registry.register_move_fn(LookRight, name="turn_right", body_action=True)
@registry.register_move_fn(body_action=False)
class LookUp(SceneNodeControl):
def __call__(self, scene_node: SceneNode, actuation_spec: ActuationSpec) -> None:
METHOD_NAME(
scene_node, actuation_spec.amount, _X_AXIS, actuation_spec.constraint
)
@registry.register_move_fn(body_action=False)
class LookDown(SceneNodeControl):
def __call__(self, scene_node: SceneNode, actuation_spec: ActuationSpec) -> None:
METHOD_NAME(
scene_node, -actuation_spec.amount, _X_AXIS, actuation_spec.constraint
)
@registry.register_move_fn(body_action=False)
class RotateSensorClockwise(SceneNodeControl):
def __call__(self, scene_node: SceneNode, actuation_spec: ActuationSpec) -> None:
METHOD_NAME(
scene_node, actuation_spec.amount, _Z_AXIS, actuation_spec.constraint
)
@registry.register_move_fn(body_action=False)
class RotateSensorAntiClockwise(SceneNodeControl):
def __call__(self, scene_node: SceneNode, actuation_spec: ActuationSpec) -> None:
METHOD_NAME(
scene_node, -actuation_spec.amount, _Z_AXIS, actuation_spec.constraint
)
|
1,535 |
read conm2
|
from __future__ import annotations
from typing import TYPE_CHECKING
import numpy as np
import h5py
from ..h5_utils import read_basic_element
if TYPE_CHECKING: # pragma: no cover
from pyNastran.bdf.bdf import BDF
def METHOD_NAME(name: str, group: h5py._hl.dataset.Dataset, geom_model: BDF) -> None:
#('EID', 'G', 'CID', 'M', 'X1', 'X2', 'X3', 'I1', 'I2', 'I3', 'DOMAIN_ID')
EID = group['EID']
NID = group['G']
CID = group['CID']
MASS = group['M']
X1 = group['X1']
X2 = group['X2']
X3 = group['X3']
I1 = group['I1']
I2 = group['I2']
I3 = group['I3']
X = np.stack([X1, X2, X3], axis=1)
DOMAIN_ID = group['DOMAIN_ID']
for eid, nid, cid, mass, x, i1, i2, i3 in zip(EID, NID, CID, MASS, X, I1, I2, I3):
i11 = i1
i21, i22 = i2
i31, i32, i33 = i3
i = [i11, i21, i22, i31, i32, i33]
obj = geom_model.add_conm2(eid, nid, mass, cid=cid, X=x, I=i, comment='')
obj.validate()
def read_celas1(name: str, group: h5py._hl.dataset.Dataset, geom_model: BDF) -> None:
#('EID', 'PID', 'G1', 'G2', 'C1', 'C2', 'DOMAIN_ID')
EID = group['EID']
PID = group['PID']
G1 = group['G1']
G2 = group['G2']
C1 = group['C1']
C2 = group['C2']
NIDS = np.stack([G1, G2], axis=1)
assert NIDS.shape[1] == 2, NIDS.shape
DOMAIN_ID = group['DOMAIN_ID']
for eid, pid, nids, c1, c2 in zip(EID, PID, NIDS, C1, C2):
obj = geom_model.add_celas1(eid, pid, nids, c1=c1, c2=c2, comment='')
obj.validate()
def read_celas2(name: str, group: h5py._hl.dataset.Dataset, geom_model: BDF) -> None:
#('EID', 'K', 'G1', 'G2', 'C1', 'C2', 'DOMAIN_ID')
EID = group['EID']
K = group['K']
G1 = group['G1']
G2 = group['G2']
C1 = group['C1']
C2 = group['C2']
NIDS = np.stack([G1, G2], axis=1)
assert NIDS.shape[1] == 2, NIDS.shape
DOMAIN_ID = group['DOMAIN_ID']
for eid, k, nids, c1, c2 in zip(EID, K, NIDS, C1, C2):
obj = geom_model.add_celas2(eid, k, nids, c1=c1, c2=c2, comment='')
obj.validate()
def read_celas3(name: str, group: h5py._hl.dataset.Dataset, geom_model: BDF) -> None:
EID = group['EID']
PID = group['PID']
G1 = group['S1']
G2 = group['S2']
NIDS = np.stack([G1, G2], axis=1)
assert NIDS.shape[1] == 2, NIDS.shape
DOMAIN_ID = group['DOMAIN_ID']
for eid, pid, nids in zip(EID, PID, NIDS):
obj = geom_model.add_celas3(eid, pid, nids, comment='')
obj.validate()
def read_celas4(name: str, group: h5py._hl.dataset.Dataset, geom_model: BDF) -> None:
EID = group['EID']
K = group['K']
G1 = group['S1']
G2 = group['S2']
NIDS = np.stack([G1, G2], axis=1)
assert NIDS.shape[1] == 2, NIDS.shape
DOMAIN_ID = group['DOMAIN_ID']
for eid, k, nids in zip(EID, K, NIDS):
obj = geom_model.add_celas4(eid, k, nids)
obj.validate()
def read_cdamp1(name: str, group: h5py._hl.dataset.Dataset, geom_model: BDF) -> None:
#('EID', 'PID', 'G1', 'G2', 'C1', 'C2', 'DOMAIN_ID')
EID = group['EID']
PID = group['PID']
G1 = group['G1']
G2 = group['G2']
C1 = group['C1']
C2 = group['C2']
NIDS = np.stack([G1, G2], axis=1)
assert NIDS.shape[1] == 2, NIDS.shape
DOMAIN_ID = group['DOMAIN_ID']
for eid, pid, nids, c1, c2 in zip(EID, PID, NIDS, C1, C2):
obj = geom_model.add_cdamp1(eid, pid, nids, c1=c1, c2=c2, comment='')
obj.validate()
def read_cdamp2(name: str, group: h5py._hl.dataset.Dataset, geom_model: BDF) -> None:
#('EID', 'B', 'G1', 'G2', 'C1', 'C2', 'DOMAIN_ID')
EID = group['EID']
B = group['B']
G1 = group['G1']
G2 = group['G2']
C1 = group['C1']
C2 = group['C2']
NIDS = np.stack([G1, G2], axis=1)
assert NIDS.shape[1] == 2, NIDS.shape
DOMAIN_ID = group['DOMAIN_ID']
for eid, b, nids, c1, c2 in zip(EID, B, NIDS, C1, C2):
obj = geom_model.add_cdamp2(eid, b, nids, c1=c1, c2=c2, comment='')
obj.validate()
def read_cdamp3(name: str, group: h5py._hl.dataset.Dataset, geom_model: BDF) -> None:
EID = group['EID']
PID = group['PID']
G1 = group['S1']
G2 = group['S2']
NIDS = np.stack([G1, G2], axis=1)
assert NIDS.shape[1] == 2, NIDS.shape
DOMAIN_ID = group['DOMAIN_ID']
for eid, pid, nids in zip(EID, PID, NIDS):
obj = geom_model.add_cdamp3(eid, pid, nids, comment='')
obj.validate()
def read_cdamp4(name: str, group: h5py._hl.dataset.Dataset, geom_model: BDF) -> None:
EID = group['EID']
B = group['B']
G1 = group['S1']
G2 = group['S2']
NIDS = np.stack([G1, G2], axis=1)
assert NIDS.shape[1] == 2, NIDS.shape
DOMAIN_ID = group['DOMAIN_ID']
for eid, b, nids in zip(EID, B, NIDS):
obj = geom_model.add_cdamp4(eid, b, nids)
obj.validate()
def read_cvisc(name: str, group: h5py._hl.dataset.Dataset, geom_model: BDF) -> None:
#('EID', 'PID', 'G', 'DOMAIN_ID')
read_basic_element(group, geom_model, geom_model.add_cvisc)
def read_cbush(name: str, group: h5py._hl.dataset.Dataset, geom_model: BDF) -> None:
asdf
def read_cbush1d(name: str, group: h5py._hl.dataset.Dataset, geom_model: BDF) -> None:
asdf
def read_cbush2d(name: str, group: h5py._hl.dataset.Dataset, geom_model: BDF) -> None:
asdf
|
1,536 |
handler
|
# -*- coding: utf-8 -*-
import threading
import socket
import time
import pytest
import requests
from tests.testserver.server import Server
class TestTestServer:
def test_basic(self):
"""messages are sent and received properly"""
question = b"success?"
answer = b"yeah, success"
def METHOD_NAME(sock):
text = sock.recv(1000)
assert text == question
sock.sendall(answer)
with Server(METHOD_NAME) as (host, port):
sock = socket.socket()
sock.connect((host, port))
sock.sendall(question)
text = sock.recv(1000)
assert text == answer
sock.close()
def test_server_closes(self):
"""the server closes when leaving the context manager"""
with Server.basic_response_server() as (host, port):
sock = socket.socket()
sock.connect((host, port))
sock.close()
with pytest.raises(socket.error):
new_sock = socket.socket()
new_sock.connect((host, port))
def test_text_response(self):
"""the text_response_server sends the given text"""
server = Server.text_response_server(
"HTTP/1.1 200 OK\r\n" +
"Content-Length: 6\r\n" +
"\r\nroflol"
)
with server as (host, port):
r = requests.get('http://{}:{}'.format(host, port))
assert r.status_code == 200
assert r.text == u'roflol'
assert r.headers['Content-Length'] == '6'
def test_basic_response(self):
"""the basic response server returns an empty http response"""
with Server.basic_response_server() as (host, port):
r = requests.get('http://{}:{}'.format(host, port))
assert r.status_code == 200
assert r.text == u''
assert r.headers['Content-Length'] == '0'
def test_basic_waiting_server(self):
"""the server waits for the block_server event to be set before closing"""
block_server = threading.Event()
with Server.basic_response_server(wait_to_close_event=block_server) as (host, port):
sock = socket.socket()
sock.connect((host, port))
sock.sendall(b'send something')
time.sleep(2.5)
sock.sendall(b'still alive')
block_server.set() # release server block
def test_multiple_requests(self):
"""multiple requests can be served"""
requests_to_handle = 5
server = Server.basic_response_server(requests_to_handle=requests_to_handle)
with server as (host, port):
server_url = 'http://{}:{}'.format(host, port)
for _ in range(requests_to_handle):
r = requests.get(server_url)
assert r.status_code == 200
# the (n+1)th request fails
with pytest.raises(requests.exceptions.ConnectionError):
r = requests.get(server_url)
@pytest.mark.skip(reason="this fails non-deterministically under pytest-xdist")
def test_request_recovery(self):
"""can check the requests content"""
# TODO: figure out why this sometimes fails when using pytest-xdist.
server = Server.basic_response_server(requests_to_handle=2)
first_request = b'put your hands up in the air'
second_request = b'put your hand down in the floor'
with server as address:
sock1 = socket.socket()
sock2 = socket.socket()
sock1.connect(address)
sock1.sendall(first_request)
sock1.close()
sock2.connect(address)
sock2.sendall(second_request)
sock2.close()
assert server.handler_results[0] == first_request
assert server.handler_results[1] == second_request
def test_requests_after_timeout_are_not_received(self):
"""the basic response handler times out when receiving requests"""
server = Server.basic_response_server(request_timeout=1)
with server as address:
sock = socket.socket()
sock.connect(address)
time.sleep(1.5)
sock.sendall(b'hehehe, not received')
sock.close()
assert server.handler_results[0] == b''
def test_request_recovery_with_bigger_timeout(self):
"""a biggest timeout can be specified"""
server = Server.basic_response_server(request_timeout=3)
data = b'bananadine'
with server as address:
sock = socket.socket()
sock.connect(address)
time.sleep(1.5)
sock.sendall(data)
sock.close()
assert server.handler_results[0] == data
def test_server_finishes_on_error(self):
"""the server thread exits even if an exception exits the context manager"""
server = Server.basic_response_server()
with pytest.raises(Exception):
with server:
raise Exception()
assert len(server.handler_results) == 0
# if the server thread fails to finish, the test suite will hang
# and get killed by the jenkins timeout.
def test_server_finishes_when_no_connections(self):
"""the server thread exits even if there are no connections"""
server = Server.basic_response_server()
with server:
pass
assert len(server.handler_results) == 0
# if the server thread fails to finish, the test suite will hang
# and get killed by the jenkins timeout.
|
1,537 |
domain return ok
|
import sys
from _typeshed import StrPath
from collections.abc import Iterable, Iterator, Sequence
from http.client import HTTPResponse
from re import Pattern
from typing import ClassVar, TypeVar, overload
from urllib.request import Request
__all__ = [
"Cookie",
"CookieJar",
"CookiePolicy",
"DefaultCookiePolicy",
"FileCookieJar",
"LWPCookieJar",
"LoadError",
"MozillaCookieJar",
]
_T = TypeVar("_T")
class LoadError(OSError): ...
class CookieJar(Iterable[Cookie]):
non_word_re: ClassVar[Pattern[str]] # undocumented
quote_re: ClassVar[Pattern[str]] # undocumented
strict_domain_re: ClassVar[Pattern[str]] # undocumented
domain_re: ClassVar[Pattern[str]] # undocumented
dots_re: ClassVar[Pattern[str]] # undocumented
magic_re: ClassVar[Pattern[str]] # undocumented
def __init__(self, policy: CookiePolicy | None = None) -> None: ...
def add_cookie_header(self, request: Request) -> None: ...
def extract_cookies(self, response: HTTPResponse, request: Request) -> None: ...
def set_policy(self, policy: CookiePolicy) -> None: ...
def make_cookies(self, response: HTTPResponse, request: Request) -> Sequence[Cookie]: ...
def set_cookie(self, cookie: Cookie) -> None: ...
def set_cookie_if_ok(self, cookie: Cookie, request: Request) -> None: ...
def clear(self, domain: str | None = None, path: str | None = None, name: str | None = None) -> None: ...
def clear_session_cookies(self) -> None: ...
def clear_expired_cookies(self) -> None: ... # undocumented
def __iter__(self) -> Iterator[Cookie]: ...
def __len__(self) -> int: ...
class FileCookieJar(CookieJar):
filename: str
delayload: bool
if sys.version_info >= (3, 8):
def __init__(
self, filename: StrPath | None = None, delayload: bool = False, policy: CookiePolicy | None = None
) -> None: ...
else:
def __init__(self, filename: str | None = None, delayload: bool = False, policy: CookiePolicy | None = None) -> None: ...
def save(self, filename: str | None = None, ignore_discard: bool = False, ignore_expires: bool = False) -> None: ...
def load(self, filename: str | None = None, ignore_discard: bool = False, ignore_expires: bool = False) -> None: ...
def revert(self, filename: str | None = None, ignore_discard: bool = False, ignore_expires: bool = False) -> None: ...
class MozillaCookieJar(FileCookieJar):
if sys.version_info < (3, 10):
header: ClassVar[str] # undocumented
class LWPCookieJar(FileCookieJar):
def as_lwp_str(self, ignore_discard: bool = True, ignore_expires: bool = True) -> str: ... # undocumented
class CookiePolicy:
netscape: bool
rfc2965: bool
hide_cookie2: bool
def set_ok(self, cookie: Cookie, request: Request) -> bool: ...
def return_ok(self, cookie: Cookie, request: Request) -> bool: ...
def METHOD_NAME(self, domain: str, request: Request) -> bool: ...
def path_return_ok(self, path: str, request: Request) -> bool: ...
class DefaultCookiePolicy(CookiePolicy):
rfc2109_as_netscape: bool
strict_domain: bool
strict_rfc2965_unverifiable: bool
strict_ns_unverifiable: bool
strict_ns_domain: int
strict_ns_set_initial_dollar: bool
strict_ns_set_path: bool
DomainStrictNoDots: ClassVar[int]
DomainStrictNonDomain: ClassVar[int]
DomainRFC2965Match: ClassVar[int]
DomainLiberal: ClassVar[int]
DomainStrict: ClassVar[int]
if sys.version_info >= (3, 8):
def __init__(
self,
blocked_domains: Sequence[str] | None = None,
allowed_domains: Sequence[str] | None = None,
netscape: bool = True,
rfc2965: bool = False,
rfc2109_as_netscape: bool | None = None,
hide_cookie2: bool = False,
strict_domain: bool = False,
strict_rfc2965_unverifiable: bool = True,
strict_ns_unverifiable: bool = False,
strict_ns_domain: int = 0,
strict_ns_set_initial_dollar: bool = False,
strict_ns_set_path: bool = False,
secure_protocols: Sequence[str] = ("https", "wss"),
) -> None: ...
else:
def __init__(
self,
blocked_domains: Sequence[str] | None = None,
allowed_domains: Sequence[str] | None = None,
netscape: bool = True,
rfc2965: bool = False,
rfc2109_as_netscape: bool | None = None,
hide_cookie2: bool = False,
strict_domain: bool = False,
strict_rfc2965_unverifiable: bool = True,
strict_ns_unverifiable: bool = False,
strict_ns_domain: int = 0,
strict_ns_set_initial_dollar: bool = False,
strict_ns_set_path: bool = False,
) -> None: ...
def blocked_domains(self) -> tuple[str, ...]: ...
def set_blocked_domains(self, blocked_domains: Sequence[str]) -> None: ...
def is_blocked(self, domain: str) -> bool: ...
def allowed_domains(self) -> tuple[str, ...] | None: ...
def set_allowed_domains(self, allowed_domains: Sequence[str] | None) -> None: ...
def is_not_allowed(self, domain: str) -> bool: ...
def set_ok_version(self, cookie: Cookie, request: Request) -> bool: ... # undocumented
def set_ok_verifiability(self, cookie: Cookie, request: Request) -> bool: ... # undocumented
def set_ok_name(self, cookie: Cookie, request: Request) -> bool: ... # undocumented
def set_ok_path(self, cookie: Cookie, request: Request) -> bool: ... # undocumented
def set_ok_domain(self, cookie: Cookie, request: Request) -> bool: ... # undocumented
def set_ok_port(self, cookie: Cookie, request: Request) -> bool: ... # undocumented
def return_ok_version(self, cookie: Cookie, request: Request) -> bool: ... # undocumented
def return_ok_verifiability(self, cookie: Cookie, request: Request) -> bool: ... # undocumented
def return_ok_secure(self, cookie: Cookie, request: Request) -> bool: ... # undocumented
def return_ok_expires(self, cookie: Cookie, request: Request) -> bool: ... # undocumented
def return_ok_port(self, cookie: Cookie, request: Request) -> bool: ... # undocumented
def return_ok_domain(self, cookie: Cookie, request: Request) -> bool: ... # undocumented
class Cookie:
version: int | None
name: str
value: str | None
port: str | None
path: str
path_specified: bool
secure: bool
expires: int | None
discard: bool
comment: str | None
comment_url: str | None
rfc2109: bool
port_specified: bool
domain: str # undocumented
domain_specified: bool
domain_initial_dot: bool
def __init__(
self,
version: int | None,
name: str,
value: str | None, # undocumented
port: str | None,
port_specified: bool,
domain: str,
domain_specified: bool,
domain_initial_dot: bool,
path: str,
path_specified: bool,
secure: bool,
expires: int | None,
discard: bool,
comment: str | None,
comment_url: str | None,
rest: dict[str, str],
rfc2109: bool = False,
) -> None: ...
def has_nonstandard_attr(self, name: str) -> bool: ...
@overload
def get_nonstandard_attr(self, name: str) -> str | None: ...
@overload
def get_nonstandard_attr(self, name: str, default: _T) -> str | _T: ...
def set_nonstandard_attr(self, name: str, value: str) -> None: ...
def is_expired(self, now: int | None = None) -> bool: ...
|
1,538 |
open tle
|
"""
Satellite TLE (two-line element) Loading
----------------------------------------
"""
from __future__ import absolute_import, print_function
import datetime
import re
import os
from functools import cmp_to_key
import ephem
TLE_ENTRY_RE = (
r"^([1])(\s+)([%(NUMBER)s%(CLASSIFICATION)s]+)(\s+)"
r"([%(INTL_DESIGNATOR)s]+)(\s+)%(YYDDD)s(\.)(\d+)(\s+)"
r"([\s\-]+)(\.)(\d+)(\s+)(\d+)([\-\+])(\d+)(\s+)(\d+)"
r"([\-\+])(\d+)(\s+)(\d+)(\s+)(\d+)(\s)^([2])(.+)$"
)
def load_tle(acquisition, data_root, date_radius=45):
"""
Loads satellite TLE (two-line element) for the given date and time.
Arguments:
centre_datetime: datetime.datetime instance
data_root: root directory for TLE archive files
date_radius: date radius for TLE search (days)
Returns:
ephem EarthSatellite instance
"""
return load_tle_from_archive(
acquisition, data_root, date_radius
) or load_tle_from_files(acquisition, data_root, date_radius)
def load_tle_from_archive(acquisition, data_root, day_radius=45):
"""Loads TLE (two-line element) for the satellite, date and time from
an archive file.
Arguments:
centre_datetime: datetime.datetime instance
data_root: directory containing TLE archive files
date_radius: date radius for TLE search (days)
Returns:
ephem EarthSatellite instance
"""
center_datetime = acquisition.acquisition_datetime
offsets = sorted(
range(-day_radius, day_radius), key=cmp_to_key(lambda x, y: abs(x) - abs(y))
)
tds = [datetime.timedelta(days=d) for d in offsets]
yyddd_list = [(center_datetime + d).strftime("%02y%03j") for d in tds]
name = acquisition.platform_id.replace("_", "").upper()
tle_archive_path = os.path.join(
data_root, name, "TLE", "%s_ARCHIVE.txt" % acquisition.tag
)
text = ""
try:
with open(tle_archive_path, "r") as fd:
text = fd.read()
except IOError:
# no TLE archive file exists
return None
re_params = {
"NUMBER": acquisition.norad_id,
"CLASSIFICATION": acquisition.classification_type,
"INTL_DESIGNATOR": acquisition.international_designator,
}
for yyddd in yyddd_list:
re_params["YYDDD"] = yyddd
match = re.search(TLE_ENTRY_RE % re_params, text, re.MULTILINE)
if match:
# Reconstitute TLE entry from regex match groups.
tle_text = "".join(match.groups()[0:6]) + yyddd + "".join(match.groups()[6:])
lines = tle_text.split("\n")
return ephem.readtle(acquisition.platform_id, lines[0], lines[1])
return None
def load_tle_from_files(acquisition, data_root, day_range=45):
"""Load a TLE file for the specified datetime.
Arguments:
center_datetime: scene center datetime (datetime instance)
data_root: ephemeris data root directory
Returns:
ephem EarthSatellite instance
"""
name = acquisition.platform_id.replace("-", "").upper()
def METHOD_NAME(tle_path, center_datetime):
"""Open the TLE file and read."""
with open(tle_path, "r") as fd:
tle_text = fd.readlines()
if acquisition.tag == "LS5":
tle1, tle2 = tle_text[7:9]
if acquisition.tag == "LS7":
tle1, tle2 = tle_text[1:3]
return ephem.readtle(acquisition.platform_id, tle1, tle2)
center_datetime = acquisition.acquisition_datetime
scene_doy = center_datetime.strftime("%j") # Note format: '%03d'
tle_dir = os.path.join(
data_root, name, "TLE", "%s_YEAR" % acquisition.tag, "%4d" % center_datetime.year
)
tle_file = acquisition.tle_format % (center_datetime.year, scene_doy)
tle_path = os.path.join(tle_dir, tle_file)
if os.path.exists(tle_path):
try:
return METHOD_NAME(tle_path, center_datetime)
except IOError:
pass
for d in range(1, day_range):
ddelta = datetime.timedelta(days=d)
for s in [-1, 1]:
dt = center_datetime + (ddelta * s)
tle_dir = os.path.join(
data_root, name, "TLE", "%s_YEAR" % acquisition.tag, "%4d" % dt.year
)
tle_file = acquisition.tle_format % (dt.year, dt.strftime("%j"))
tle_path = os.path.join(tle_dir, tle_file)
if os.path.exists(tle_path):
try:
return METHOD_NAME(tle_path, center_datetime)
except IOError:
pass
return None
|
1,539 |
check error msg
|
import random
import numpy as np
from numba.tests.support import TestCase, captured_stdout
from numba import njit, literally
from numba.core import types
from numba.cpython.unsafe.tuple import tuple_setitem, build_full_slice_tuple
from numba.np.unsafe.ndarray import to_fixed_tuple, empty_inferred
from numba.core.unsafe.bytes import memcpy_region
from numba.core.unsafe.refcount import dump_refcount
from numba.cpython.unsafe.numbers import trailing_zeros, leading_zeros
from numba.core.errors import TypingError
class TestTupleIntrinsic(TestCase):
"""Tests for numba.unsafe.tuple
"""
def test_tuple_setitem(self):
@njit
def foo(tup, idxs, vals):
out_tup = tup
for i, v in zip(idxs, vals):
out_tup = tuple_setitem(out_tup, i, v)
return tup, out_tup
random.seed(123)
for _ in range(20):
# Random data
n = random.randint(1, 10)
tup = tuple([random.randint(0, n) for i in range(n)])
vals = tuple([random.randint(10, 20) for i in range(n)])
idxs = list(range(len(vals)))
random.shuffle(idxs)
idxs = tuple(idxs)
# Expect
expect_tup = tuple(tup)
expect_out = np.asarray(expect_tup)
expect_out[np.asarray(idxs)] = vals
# Got
got_tup, got_out = foo(tup, idxs, vals)
# Check
self.assertEqual(got_tup, expect_tup)
self.assertEqual(got_out, tuple(expect_out))
def test_slice_tuple(self):
@njit
def full_slice_array(a, n):
# Since numba slices can't be boxed at the moment
return a[build_full_slice_tuple(literally(n))]
for n in range(1, 3):
a = np.random.random(np.arange(n) + 1)
for i in range(1, n + 1):
np.testing.assert_array_equal(a, full_slice_array(a, i))
with self.assertRaises(TypingError):
# numpy would throw an IndexError here
full_slice_array(a, n + 1)
class TestNdarrayIntrinsic(TestCase):
"""Tests for numba.unsafe.ndarray
"""
def test_to_fixed_tuple(self):
const = 3
@njit
def foo(array):
a = to_fixed_tuple(array, length=1)
b = to_fixed_tuple(array, 2)
c = to_fixed_tuple(array, const)
d = to_fixed_tuple(array, 0)
return a, b, c, d
np.random.seed(123)
for _ in range(10):
# Random data
arr = np.random.random(3)
# Run
a, b, c, d = foo(arr)
# Check
self.assertEqual(a, tuple(arr[:1]))
self.assertEqual(b, tuple(arr[:2]))
self.assertEqual(c, tuple(arr[:3]))
self.assertEqual(d, ())
# Check error with ndim!=1
with self.assertRaises(TypingError) as raises:
foo(np.random.random((1, 2)))
self.assertIn("Not supported on array.ndim=2",
str(raises.exception))
# Check error with non-constant length
@njit
def tuple_with_length(array, length):
return to_fixed_tuple(array, length)
with self.assertRaises(TypingError) as raises:
tuple_with_length(np.random.random(3), 1)
expectmsg = "*length* argument must be a constant"
self.assertIn(expectmsg, str(raises.exception))
def test_issue_3586_variant1(self):
@njit
def func():
S = empty_inferred((10,))
a = 1.1
for i in range(len(S)):
S[i] = a + 2
return S
got = func()
expect = np.asarray([3.1] * 10)
np.testing.assert_array_equal(got, expect)
def test_issue_3586_variant2(self):
@njit
def func():
S = empty_inferred((10,))
a = 1.1
for i in range(S.size):
S[i] = a + 2
return S
got = func()
expect = np.asarray([3.1] * 10)
np.testing.assert_array_equal(got, expect)
class TestBytesIntrinsic(TestCase):
"""Tests for numba.unsafe.bytes
"""
def test_memcpy_region(self):
@njit
def foo(dst, dst_index, src, src_index, nbytes):
# last arg is assume 1 byte alignment
memcpy_region(dst.ctypes.data, dst_index,
src.ctypes.data, src_index, nbytes, 1)
d = np.zeros(10, dtype=np.int8)
s = np.arange(10, dtype=np.int8)
# copy s[1:6] to d[4:9]
foo(d, 4, s, 1, 5)
expected = [0, 0, 0, 0, 1, 2, 3, 4, 5, 0]
np.testing.assert_array_equal(d, expected)
class TestRefCount(TestCase):
def test_dump_refcount(self):
@njit
def use_dump_refcount():
a = np.ones(10)
b = (a, a)
dump_refcount(a)
dump_refcount(b)
# Capture output to sys.stdout
with captured_stdout() as stream:
use_dump_refcount()
output = stream.getvalue()
# Check that it printed
pat = "dump refct of {}"
aryty = types.float64[::1]
tupty = types.Tuple.from_types([aryty] * 2)
self.assertIn(pat.format(aryty), output)
self.assertIn(pat.format(tupty), output)
class TestZeroCounts(TestCase):
def test_zero_count(self):
lz = njit(lambda x: leading_zeros(x))
tz = njit(lambda x: trailing_zeros(x))
evens = [2, 42, 126, 128]
for T in types.unsigned_domain:
self.assertTrue(tz(T(0)) == lz(T(0)) == T.bitwidth)
for i in range(T.bitwidth):
val = T(2 ** i)
self.assertEqual(lz(val) + tz(val) + 1, T.bitwidth)
for n in evens:
self.assertGreater(tz(T(n)), 0)
self.assertEqual(tz(T(n + 1)), 0)
for T in types.signed_domain:
self.assertTrue(tz(T(0)) == lz(T(0)) == T.bitwidth)
for i in range(T.bitwidth - 1):
val = T(2 ** i)
self.assertEqual(lz(val) + tz(val) + 1, T.bitwidth)
self.assertEqual(lz(-val), 0)
self.assertEqual(tz(val), tz(-val))
for n in evens:
self.assertGreater(tz(T(n)), 0)
self.assertEqual(tz(T(n + 1)), 0)
def METHOD_NAME(self, func):
cfunc = njit(lambda *x: func(*x))
func_name = func._name
unsupported_types = filter(
lambda x: not isinstance(x, types.Integer), types.number_domain
)
for typ in sorted(unsupported_types, key=str):
with self.assertRaises(TypingError) as e:
cfunc(typ(2))
self.assertIn(
"{} is only defined for integers, but value passed was '{}'."
.format(func_name, typ),
str(e.exception),
)
# Testing w/ too many/few arguments
def check(args, string):
with self.assertRaises((TypingError, TypeError)) as e:
cfunc(*args)
self.assertIn(
"{}() ".format(func_name),
str(e.exception)
)
check((1, 2), "takes 2 positional arguments but 3 were given")
check((), "missing 1 required positional argument")
def test_trailing_zeros_error(self):
self.METHOD_NAME(trailing_zeros)
def test_leading_zeros_error(self):
self.METHOD_NAME(leading_zeros)
|
1,540 |
ip found
|
import ipaddress
import logging
import random
import socket
import re
import struct
from abc import ABCMeta, abstractmethod
from typing import Iterable, List, Tuple
logger = logging.getLogger(__name__)
class InvalidNetworkRangeError(Exception):
"""Raise when invalid network range is provided"""
class NetworkRange(object, metaclass=ABCMeta):
DOMAIN_LABEL_PATTERN = re.compile(r"(?!-)[a-z0-9-]{1,63}(?<!-)$", re.IGNORECASE)
TLD_PATTERN = re.compile(r"[0-9]+$")
def __init__(self, shuffle=True):
self._shuffle = shuffle
def get_range(self):
"""
:return: Returns a sequence of IPs in an internal format (might be numbers)
"""
return self._get_range()
def __iter__(self):
"""
Iterator of ip addresses (strings) from the current range.
Use get_range if you want it in one go.
:return:
"""
base_range = self.get_range()
if self._shuffle:
random.shuffle(base_range) # noqa: DUO102
for x in base_range:
yield self._number_to_ip(x)
@abstractmethod
def is_in_range(self, ip_address):
raise NotImplementedError()
@abstractmethod
def _get_range(self):
raise NotImplementedError()
@staticmethod
def get_range_obj(address_str):
if not address_str: # Empty string
return None
address_str = address_str.strip()
if address_str.endswith("/32"):
address_str = address_str[:-3]
if NetworkRange.check_if_hostname(address_str):
return SingleIpRange(ip_address=address_str)
if NetworkRange.check_if_range(address_str):
return IpRange(ip_range=address_str)
if "/" in address_str:
return CidrRange(cidr_range=address_str)
return SingleIpRange(ip_address=address_str)
@staticmethod
def filter_invalid_ranges(ranges: Iterable[str], error_msg: str) -> List[str]:
valid_ranges = []
for target_range in ranges:
try:
NetworkRange.validate_range(target_range)
except InvalidNetworkRangeError as e:
logger.error(f"{error_msg} {e}")
continue
valid_ranges.append(target_range)
return valid_ranges
@staticmethod
def validate_range(address_str: str):
try:
NetworkRange.get_range_obj(address_str)
except (ValueError, OSError) as e:
raise InvalidNetworkRangeError(e)
@staticmethod
def check_if_hostname(hostname: str):
if len(hostname) > 253 or hostname[-1] == ".":
return False
labels = hostname.split(".")
# the TLD must be not all-numeric
if NetworkRange.TLD_PATTERN.match(labels[-1]):
return False
return all([NetworkRange.DOMAIN_LABEL_PATTERN.match(label) for label in labels])
@staticmethod
def check_if_range(address_str: str):
if -1 != address_str.find("-"):
try:
NetworkRange._range_to_ips(address_str)
except ValueError:
return False
return True
return False
@staticmethod
def _range_to_ips(ip_range: str) -> Tuple[str, str]:
ips = ip_range.split("-")
ips = [ip.strip() for ip in ips]
ips = sorted(ips, key=lambda ip: socket.inet_aton(ip))
return ips[0], ips[1]
@staticmethod
def _ip_to_number(address):
return struct.unpack(">L", socket.inet_aton(str(address)))[0]
@staticmethod
def _number_to_ip(num):
return socket.inet_ntoa(struct.pack(">L", num))
class CidrRange(NetworkRange):
def __init__(self, cidr_range, shuffle=True):
super(CidrRange, self).__init__(shuffle=shuffle)
self._cidr_range = cidr_range.strip()
self._ip_network = ipaddress.ip_network(str(self._cidr_range), strict=False)
def __repr__(self):
return "<CidrRange %s>" % (self._cidr_range,)
def is_in_range(self, ip_address):
return ipaddress.ip_address(ip_address) in self._ip_network
def _get_range(self):
return [
CidrRange._ip_to_number(str(x))
for x in self._ip_network
if x != self._ip_network.broadcast_address
]
class IpRange(NetworkRange):
def __init__(self, ip_range=None, lower_end_ip=None, higher_end_ip=None, shuffle=True):
super(IpRange, self).__init__(shuffle=shuffle)
if ip_range is not None:
self._lower_end_ip, self._higher_end_ip = IpRange._range_to_ips(ip_range)
elif (lower_end_ip is not None) and (higher_end_ip is not None):
self._lower_end_ip = lower_end_ip.strip()
self._higher_end_ip = higher_end_ip.strip()
else:
raise ValueError("Illegal IP range: %s" % ip_range)
self._lower_end_ip_num = self._ip_to_number(self._lower_end_ip)
self._higher_end_ip_num = self._ip_to_number(self._higher_end_ip)
if self._higher_end_ip_num < self._lower_end_ip_num:
raise ValueError(
"Higher end IP %s is smaller than lower end IP %s"
% (self._lower_end_ip, self._higher_end_ip)
)
def __repr__(self):
return "<IpRange %s-%s>" % (self._lower_end_ip, self._higher_end_ip)
def is_in_range(self, ip_address):
return self._lower_end_ip_num <= self._ip_to_number(ip_address) <= self._higher_end_ip_num
def _get_range(self):
return list(range(self._lower_end_ip_num, self._higher_end_ip_num + 1))
class SingleIpRange(NetworkRange):
def __init__(self, ip_address, shuffle=True):
super(SingleIpRange, self).__init__(shuffle=shuffle)
self._ip_address, self.domain_name = self.string_to_host(ip_address)
def __repr__(self):
return "<SingleIpRange %s>" % (self._ip_address,)
def __iter__(self):
"""
We have to check if we have an IP to return, because user could have entered invalid
domain name and no IP was found
:return: IP if there is one
"""
if self.METHOD_NAME():
yield self._number_to_ip(self.get_range()[0])
def is_in_range(self, ip_address):
return self._ip_address == str(ip_address)
def _get_range(self):
return [SingleIpRange._ip_to_number(self._ip_address)]
def METHOD_NAME(self):
"""
Checks if we could translate domain name entered into IP address
:return: True if dns found domain name and false otherwise
"""
return self._ip_address
@staticmethod
def string_to_host(string_):
"""
Converts the string that user entered in "Scan IP/subnet list" to a tuple of domain name
and ip
:param string_: String that was entered in "Scan IP/subnet list"
:return: A tuple in format (IP, domain_name). Eg. (192.168.55.1, www.google.com)
"""
# The most common use case is to enter ip/range into "Scan IP/subnet list"
domain_name = None
if " " in string_:
raise ValueError(f'"{string_}" is not a valid IP address or domain name.')
# Try casting user's input as IP
try:
ip = ipaddress.ip_address(string_).exploded
except ValueError:
# Exception means that it's a domain name
try:
ip = socket.gethostbyname(string_)
domain_name = string_
except socket.error:
raise ValueError(
"Your specified host: {} is not found as a domain name and"
" it's not an IP address".format(string_)
)
# If a string_ was entered instead of IP we presume that it was domain name and translate it
return ip, domain_name
|
1,541 |
test create arr
|
# pylint: disable=missing-function-docstring, missing-module-docstring, reimported
import numpy as np
from pyccel.epyccel import epyccel
def test_single_return_var_assign(language):
def single_return_var_assign():
y = 3
return y
epyc_single_return_var_assign = epyccel(single_return_var_assign, language=language)
assert (epyc_single_return_var_assign() == single_return_var_assign())
def test_assign_vars_return(language):
def assign_vars_return(a : 'int', b : 'int'):
c = a+b
d = a-b
return c+d
epyc_assign_vars_return = epyccel(assign_vars_return, language=language, fflags="-Werror -Wunused-variable")
assert (epyc_assign_vars_return(3, 4) == assign_vars_return(3, 4))
def test_sum_in_single_return(language):
def sum_in_single_return(a : 'int', b : 'int'):
c = a + b
return c
epyc_sum_in_single_return = epyccel(sum_in_single_return, language=language, fflags="-Werror -Wunused-variable")
assert (epyc_sum_in_single_return(7, 2) == sum_in_single_return(7, 2))
def test_return_expr(language):
def return_expr(x : 'int', y : 'int'):
return x + y
epyc_return_expr = epyccel(return_expr, language=language, fflags="-Werror -Wunused-variable")
assert (epyc_return_expr(7, 2) == return_expr(7, 2))
def test_return_single_var(language):
def return_single_var(x : 'int'):
return x
epyc_return_single_var = epyccel(return_single_var, language=language, fflags="-Werror -Wunused-variable")
assert (epyc_return_single_var(7) == return_single_var(7))
def test_return_scalare(language):
def return_scalare():
return 5
epyc_return_scalare = epyccel(return_scalare, language=language, fflags="-Werror -Wunused-variable")
assert (epyc_return_scalare() == return_scalare())
def test_multi_return_scalare(language):
def multi_return_scalare():
return 5, 7
epyc_multi_return_scalare = epyccel(multi_return_scalare, language=language, fflags="-Werror -Wunused-variable")
assert (epyc_multi_return_scalare() == multi_return_scalare())
def test_multi_return_vars(language):
def multi_return_vars(a : 'int', b : 'int'):
return a, b
epyc_multi_return_vars = epyccel(multi_return_vars, language=language, fflags="-Werror -Wunused-variable")
assert (epyc_multi_return_vars(7, 2) == multi_return_vars(7, 2))
def test_multi_return_vars_expr(language):
def multi_return_vars_expr(a : 'int', b : 'int'):
return (a-b), (a+b)
epyc_multi_return_vars_expr = epyccel(multi_return_vars_expr, language=language, fflags="-Werror -Wunused-variable")
assert (epyc_multi_return_vars_expr(7, 2) == multi_return_vars_expr(7, 2))
def test_scalare_multi_return_stmts(language):
def scalare_multi_return_stmts(a : 'int'):
a = 7
if a:
return 1
else:
return 2
a = 4
return a
epyc_scalare_multi_return_stmts = epyccel(scalare_multi_return_stmts, language=language, fflags="-Werror -Wunused-variable")
assert (epyc_scalare_multi_return_stmts(7) == scalare_multi_return_stmts(7))
def METHOD_NAME(language):
def create_arr(i : int):
import numpy as np
_ = np.ones(i)
return True
epyc_create_arr = epyccel(create_arr, language=language, fflags="-Werror -Wunused-variable")
assert (epyc_create_arr(7) == create_arr(7))
def test_return_arr_element(language):
def return_arr_element(i : int):
import numpy as np
a = np.ones(i)
return a[0]
epyc_return_arr_element = epyccel(return_arr_element, language=language, fflags="-Werror -Wunused-variable")
assert (epyc_return_arr_element(7) == return_arr_element(7))
def test_create_multi_arrs(language):
def create_multi_arrs(i : int):
import numpy as np
_ = np.ones(i)
_ = np.zeros(i)
_ = np.zeros(i)
return True
epyc_create_multi_arrs = epyccel(create_multi_arrs, language=language, fflags="-Werror -Wunused-variable")
assert (epyc_create_multi_arrs(7) == create_multi_arrs(7))
def test_expr_arrs_elements(language):
def expr_arrs_elements(i : int):
import numpy as np
a = np.ones(i)
b = np.zeros(i)
return a[i - 1]+b[i - 1]
epyc_expr_arrs_elements = epyccel(expr_arrs_elements, language=language, fflags="-Werror -Wunused-variable")
assert (epyc_expr_arrs_elements(7) == expr_arrs_elements(7))
def test_complex_expr(language):
def complex_expr(i : int):
import numpy as np
a = np.ones(i)
return ((4 + 5)/(6 - 3) * a[0])%(9 - a[1])
epyc_complex_expr = epyccel(complex_expr, language=language, fflags="-Werror -Wunused-variable")
assert (epyc_complex_expr(7) == complex_expr(7))
def test_multi_allocs(language):
def multi_allocs(i :int):
import numpy as np
a = np.ones(i)
b = np.ones(i)
c = np.ones(i)
d = np.ones(i)
e = np.ones(i)
return ((4 + 5)/(d[0] + e[2]) * c[0])%(b[2] + a[1]) - 4
epyc_multi_allocs = epyccel(multi_allocs, language=language, fflags="-Werror -Wunused-variable")
assert (epyc_multi_allocs(7) == multi_allocs(7))
def test_return_nothing(language):
def divide_by(a : 'float[:]', b : 'float'):
if abs(b)<0.1:
return
for i,ai in enumerate(a):
a[i] = ai/b
epyc_divide_by = epyccel(divide_by, language=language)
x = np.ones(5)
x_copy = x.copy()
b = 0.01
divide_by(x,b)
epyc_divide_by(x_copy,b)
assert np.allclose(x, x_copy, rtol=1e-13, atol=1e-14)
b = 4.0
divide_by(x,b)
epyc_divide_by(x_copy,b)
assert np.allclose(x, x_copy, rtol=1e-13, atol=1e-14)
def test_return_None(language):
def divide_by(a : 'float[:]', b : 'float'): # pylint: disable=inconsistent-return-statements
if abs(b)<0.1:
return None
for i,ai in enumerate(a):
a[i] = ai/b
epyc_divide_by = epyccel(divide_by, language=language)
x = np.ones(5)
x_copy = x.copy()
b = 0.01
divide_by(x,b)
epyc_divide_by(x_copy,b)
assert np.allclose(x, x_copy, rtol=1e-13, atol=1e-14)
b = 4.0
divide_by(x,b)
epyc_divide_by(x_copy,b)
assert np.allclose(x, x_copy, rtol=1e-13, atol=1e-14)
def test_arg_arr_element_op(language):
def return_mult_arr_arg_element(i: 'int', arg:'float[:]'):
import numpy as np
a = np.ones(i)
return a[0] * arg[0]
def return_add_arr_arg_element(i: 'int', arg:'float[:]'):
import numpy as np
a = np.ones(i)
return a[0] + arg[0]
def return_op_arr_arg_element(i: 'int', arg:'float[:]'):
import numpy as np
a = np.ones(i)
return ((a[2] + arg[0]) * arg[2] - 2) / 4 * 2
arr = np.array([1,2,3,4], dtype=float)
epyc_return_mult_arr_arg_element = epyccel(return_mult_arr_arg_element, language=language, fflags="-Werror -Wunused-variable")
assert (epyc_return_mult_arr_arg_element(7, arr) == return_mult_arr_arg_element(7, arr))
epyc_return_add_arr_arg_element = epyccel(return_add_arr_arg_element, language=language, fflags="-Werror -Wunused-variable")
assert (epyc_return_add_arr_arg_element(7, arr) == return_add_arr_arg_element(7, arr))
epyc_return_op_arr_arg_element = epyccel(return_op_arr_arg_element, language=language, fflags="-Werror -Wunused-variable")
assert (epyc_return_op_arr_arg_element(7, arr) == return_op_arr_arg_element(7, arr))
|
1,542 |
check ping
|
import time
import re
import random
from virttest import error_context
from virttest import utils_test
from virttest import utils_net
from virttest import utils_misc
@error_context.context_aware
def run(test, params, env):
"""
Test failover by team driver
1) Boot a vm with 4 nics.
2) inside guest, configure the team driver.
3) inside guest, ping host
4) inside guest, repeated down the slaves one by one.
5) check ping_result.
:param test: Kvm test object.
:param params: Dictionary with the test parameters.
:param env: Dictionary with test environment.
"""
def team_port_add(ifnames, team_if):
"""Team0 add ports and return the ip link result for debuging"""
for port in ifnames:
session_serial.cmd_output_safe(params["clearip_cmd"] % port)
session_serial.cmd_output_safe(params["setdown_cmd"] % port)
session_serial.cmd_output_safe(params["addport_cmd"] % port)
output_teamnl = session_serial.cmd_output_safe(params["portchk_cmd"])
ports = re.findall(r"%s" % params["ptn_teamnl"], output_teamnl)
for port in ifnames:
if port not in ports:
test.fail("Add %s to %s failed." % (port, team_if))
session_serial.cmd_output_safe(params["killdhclient_cmd"])
output = session_serial.cmd_output_safe(params["getip_cmd"],
timeout=300)
team_ip = re.search(r"%s" % params["ptn_ipv4"], output).group()
if not team_ip:
test.fail("Failed to get ip address of %s" % team_if)
return ports, team_ip
def failover(ifnames, timeout):
"""func for failover"""
time.sleep(3)
starttime = time.time()
while True:
pid_ping = session_serial.cmd_output_safe("pidof ping")
pid = re.findall(r"(\d+)", pid_ping)
if not pid:
break
# if ping finished, will break the loop.
for port in ifnames:
session_serial.cmd_output_safe(params["setdown_cmd"] % port)
time.sleep(random.randint(5, 30))
session_serial.cmd_output_safe(params["setup_cmd"] % port)
endtime = time.time()
timegap = endtime - starttime
if timegap > timeout:
break
def METHOD_NAME(status, output):
""" ratio <5% is acceptance."""
if status != 0:
test.fail("Ping failed, staus:%s, output:%s" % (status, output))
# if status != 0 the ping process seams hit issue.
ratio = utils_test.get_loss_ratio(output)
if ratio == -1:
test.fail("The ratio is %s, and status is %s, "
"output is %s" % (ratio, status, output))
elif ratio > int(params["failed_ratio"]):
test.fail("The loss raito is %s, test failed" % ratio)
test.log.info("ping pass with loss raito:%s, that less than %s",
ratio, params["failed_ratio"])
def team_if_exist():
""" judge if team is alive well."""
team_exists_cmd = params.get("team_if_exists_cmd")
return session_serial.cmd_status(team_exists_cmd, safe=True) == 0
vm = env.get_vm(params["main_vm"])
vm.verify_alive()
timeout = int(params.get("login_timeout", 1200))
session_serial = vm.wait_for_serial_login(timeout=timeout)
ifnames = [utils_net.get_linux_ifname(session_serial,
vm.get_mac_address(vlan))
for vlan, nic in enumerate(vm.virtnet)]
session_serial.cmd_output_safe(params["nm_stop_cmd"])
team_if = params.get("team_if")
# initial
error_context.context("Step1: Configure the team environment",
test.log.info)
# steps of building the teaming environment starts
modprobe_cmd = "modprobe team"
session_serial.cmd_output_safe(modprobe_cmd)
session_serial.cmd_output_safe(params["createteam_cmd"])
# this cmd is to create the team0 and correspoding userspace daemon
if not team_if_exist():
test.fail("Interface %s is not created." % team_if)
# check if team0 is created successfully
ports, team_ip = team_port_add(ifnames, team_if)
test.log.debug("The list of the ports that added to %s : %s",
team_if, ports)
test.log.debug("The ip address of %s : %s", team_if, team_ip)
output = session_serial.cmd_output_safe(params["team_debug_cmd"])
test.log.debug("team interface configuration: %s", output)
route_cmd = session_serial.cmd_output_safe(params["route_cmd"])
test.log.debug("The route table of guest: %s", route_cmd)
# this is not this case checkpoint, just to check if route works fine
# steps of building finished
try:
error_context.context("Login in guest via ssh", test.log.info)
# steps of testing this case starts
session = vm.wait_for_login(timeout=timeout)
dest = utils_net.get_ip_address_by_interface(params["netdst"])
count = params.get("count")
timeout = float(count) * 2
error_context.context("Step2: Check if guest can ping out:",
test.log.info)
status, output = utils_test.ping(dest=dest, count=10,
interface=team_if,
timeout=30,
session=session)
METHOD_NAME(status, output)
# small ping check if the team0 works w/o failover
error_context.context("Step3: Start failover testing until "
"ping finished", test.log.info)
failover_thread = utils_misc.InterruptedThread(failover,
(ifnames, timeout))
failover_thread.start()
# start failover loop until ping finished
error_context.context("Step4: Start ping host for %s counts"
% count, test.log.info)
if failover_thread.is_alive():
status, output = utils_test.ping(dest=dest, count=count,
interface=team_if,
timeout=float(count) * 1.5,
session=session)
error_context.context("Step5: Check if ping succeeded",
test.log.info)
METHOD_NAME(status, output)
else:
test.error("The failover thread is not alive")
time.sleep(3)
try:
timeout = timeout * 1.5
failover_thread.join(timeout)
except Exception:
test.error("Failed to join the failover thread")
# finish the main steps and check the result
session_serial.cmd_output_safe(params["killteam_cmd"])
if team_if_exist():
test.fail("Remove %s failed" % team_if)
test.log.info("%s removed", team_if)
# remove the team0 and the daemon, check if succeed
finally:
if session:
session.close()
|
1,543 |
convert monster to win10 json
|
import json
import random
from herders.models import MonsterInstance, BuildingInstance
from .rune_optimizer_mapping import *
def export_win10(summoner):
# Fake storage building
storage_bldg_id = 1234567890
buildings = [
{
'building_master_id': 25,
'building_id': storage_bldg_id,
}
]
# Build the unit list
unit_list = []
for m in MonsterInstance.objects.filter(owner=summoner):
unit_list.append(METHOD_NAME(m))
# Build the rune list
runes = []
for r in RuneInstance.objects.filter(owner=summoner, assigned_to=None):
runes.append(_convert_rune_to_win10_json(r))
# Build the rune craft list
rune_craft_item_list = []
for c in RuneCraftInstance.objects.filter(owner=summoner):
rune_craft_item_list.append(_convert_rune_craft_to_win10_json(c))
# Build the artifact list
artifact_list = []
for a in ArtifactInstance.objects.filter(owner=summoner):
artifact_list.append(_convert_artifact_to_win10_json(a))
# Build the artifact craft list
artifact_craft_list = []
for c in ArtifactCraftInstance.objects.filter(owner=summoner):
artifact_craft_list.append(_convert_artifact_craft_to_win10_json(c))
# Build the decoration list
deco_list = []
for d in BuildingInstance.objects.filter(owner=summoner):
deco_list.append(_convert_deco_to_win10(d))
return json.dumps({
'building_list': buildings,
'unit_list': unit_list,
'runes': runes,
'rune_craft_item_list': rune_craft_item_list,
'artifacts': artifact_list,
'artifact_crafts': artifact_craft_list,
'deco_list': deco_list,
'wizard_id': summoner.com2us_id if summoner.com2us_id else 0,
})
def _convert_rune_to_win10_json(rune):
exported_rune = {
'occupied_type': 1,
'occupied_id': rune.assigned_to.com2us_id if rune.assigned_to else 0,
'sell_value': rune.value,
'pri_eff': [rune_stat_type_map[rune.main_stat], rune.main_stat_value],
'prefix_eff': [rune_stat_type_map[rune.innate_stat], rune.innate_stat_value] if rune.innate_stat else [0, 0],
'slot_no': rune.slot,
'rank': 0,
'sec_eff': [],
'upgrade_curr': rune.level,
'class': rune.stars,
'set_id': rune_set_map[rune.type],
'upgrade_limit': 15,
'rune_id': rune.com2us_id if rune.com2us_id else random.randint(1, 999999999),
'extra': quality_map.get(rune.original_quality, 0),
}
if rune.ancient:
exported_rune['class'] += 10
exported_rune['extra'] += 10
for substat, value, enchanted, grind_value in zip(rune.substats, rune.substat_values, rune.substats_enchanted, rune.substats_grind_value):
exported_rune['sec_eff'].append([
rune_stat_type_map[substat],
value,
1 if enchanted else 0,
grind_value,
])
return exported_rune
def METHOD_NAME(monster):
mon_json = {
'unit_id': monster.com2us_id if monster.com2us_id else random.randint(1, 999999999),
'unit_master_id': monster.monster.com2us_id,
'building_id': 1234567890 if monster.in_storage else 0,
'island_id': 0,
'homunculus': 1 if monster.monster.homunculus else 0,
'attribute': element_map[monster.monster.element],
'unit_level': monster.level,
'class': monster.stars,
'con': monster.base_hp / 15,
'def': monster.base_defense,
'atk': monster.base_attack,
'spd': monster.base_speed,
'critical_rate': monster.base_crit_rate,
'critical_damage': monster.base_crit_damage,
'accuracy': monster.base_accuracy,
'resist': monster.base_resistance,
'skills': [],
'runes': [],
'artifacts': [],
}
# Fill in skills
skill_levels = [
monster.skill_1_level,
monster.skill_2_level,
monster.skill_3_level,
monster.skill_4_level,
]
for idx, skill in enumerate(monster.monster.skills.all().order_by('slot')):
mon_json['skills'].append([
skill.com2us_id,
skill_levels[idx]
])
# Fill in runes and artifacts
for rune in monster.runes.all():
mon_json['runes'].append(_convert_rune_to_win10_json(rune))
for artifact in monster.artifacts.all():
mon_json['artifacts'].append(_convert_artifact_to_win10_json(artifact))
return mon_json
def _convert_rune_craft_to_win10_json(craft):
quality = quality_map[craft.quality]
stat = rune_stat_type_map[craft.stat]
rune_set = rune_set_map.get(craft.rune, 99)
return {
'craft_type_id': int('{:d}{:02d}{:02d}'.format(rune_set, stat, quality)),
'craft_type': craft_type_map[craft.type],
'craft_item_id': craft.com2us_id if craft.com2us_id else random.randint(1, 999999999),
'amount': craft.quantity,
}
def _convert_artifact_to_win10_json(artifact):
effects_data = zip(
[artifact_effect_map[eff] for eff in artifact.effects],
artifact.effects,
artifact.effects_value,
artifact.effects_upgrade_count,
[0] * len(artifact.effects),
artifact.effects_reroll_count,
)
sec_eff = []
for effect in effects_data:
sec_eff.append(list(effect))
return {
'rid': artifact.com2us_id,
'occupied_id': artifact.assigned_to.com2us_id if artifact.assigned_to else 0,
'slot': 0,
'type': artifact_type_map[artifact.slot],
'attribute': element_map[artifact.element] if artifact.element else 0,
'unit_style': archetype_map[artifact.archetype] if artifact.archetype else 0,
'natural_rank': quality_map[artifact.original_quality],
'rank': quality_map[artifact.quality],
'level': artifact.level,
'pri_effect': [
artifact_main_stat_map[artifact.main_stat],
artifact.main_stat_value,
artifact.level,
0,
0,
],
'sec_effects': sec_eff,
}
def _convert_artifact_craft_to_win10_json(craft):
craft_type = artifact_type_map[craft.slot]
element = element_map[craft.element] if craft.element else 0
archetype = archetype_map[craft.archetype] if craft.archetype else 0
quality = quality_map[craft.quality]
effect = artifact_effect_map[craft.effect]
return {
'master_id': int(f'1{craft_type:02d}{element:02d}{archetype:02d}{quality:02d}{effect:03d}'),
'type': craft_type,
'quantity': craft.quantity,
}
def _convert_deco_to_win10(decoration):
return {
'master_id': decoration.building.com2us_id,
'level': decoration.level,
}
|
1,544 |
async io
|
# -*- coding: utf-8 -*-
##########################################################################
# Copyright 2013-2021 Aerospike, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##########################################################################
import asyncio
import sys
import aerospike
import time
from aerospike_helpers.awaitable import io
from optparse import OptionParser
##########################################################################
# Options Parsing
##########################################################################
usage = "usage: %prog [options]"
optparser = OptionParser(usage=usage, add_help_option=False)
optparser.add_option(
"--help", dest="help", action="store_true",
help="Displays this message.")
optparser.add_option(
"-U", "--username", dest="username", type="string", default="ram", metavar="<USERNAME>",
help="Username to connect to database.")
optparser.add_option(
"-P", "--password", dest="password", type="string", default="ram", metavar="<PASSWORD>",
help="Password to connect to database.")
optparser.add_option(
"-h", "--host", dest="host", type="string", default="as-s1.as-network.com", metavar="<ADDRESS>",
help="Address of Aerospike server.")
optparser.add_option(
"-p", "--port", dest="port", type="int", default=3000, metavar="<PORT>",
help="Port of the Aerospike server.")
optparser.add_option(
"--timeout", dest="timeout", type="int", default=1000, metavar="<MS>",
help="Client timeout")
optparser.add_option(
"-n", "--namespace", dest="namespace", type="string", default="test", metavar="<NS>",
help="Namespace of database.")
optparser.add_option(
"-s", "--set", dest="set", type="string", default="demo", metavar="<SET>",
help="Set to use within namespace of database.")
optparser.add_option(
"-c", "--test_count", dest="test_count", type="int", default=128, metavar="<TEST_COUNT>",
help="Number of async IO to spawn.")
(options, args) = optparser.parse_args()
if options.help:
optparser.print_help()
print()
sys.exit(1)
##########################################################################
# Client Configuration
##########################################################################
config = {
'hosts': [(options.host, options.port)],
'policies': {
'total_timeout': options.timeout
}
}
##########################################################################
# Application
##########################################################################
exitCode = 0
try:
# ----------------------------------------------------------------------------
# Connect to Cluster
# ----------------------------------------------------------------------------
print(f"Connecting to {options.host}:{options.port} with {options.username}:{options.password}")
client = aerospike.client(config).connect(
options.username, options.password)
# ----------------------------------------------------------------------------
# Perform Operation
# ----------------------------------------------------------------------------
try:
io_results = {}
test_count = options.test_count
namespace = options.namespace if options.namespace and options.namespace != 'None' else None
set = options.set if options.set and options.set != 'None' else None
policy = {
'total_timeout': options.timeout
}
meta = None
print(f"IO async test count:{test_count}")
async def METHOD_NAME(namespace, set, i):
futures = []
key = (namespace, \
set, \
str(i), \
client.get_key_digest(namespace, set, str(i)))
record = {
'i': i,
'f': 3.1415,
's': 'abc',
'u': '안녕하세요',
#'b': bytearray(['d','e','f']),
'l': [i, 'abc', 'வணக்கம்', ['x', 'y', 'z'], {'x': 1, 'y': 2, 'z': 3}],
'm': {'i': i, 's': 'abc', 'u': 'ஊத்தாப்பம்', 'l': ['x', 'y', 'z'], 'd': {'x': 1, 'y': 2, 'z': 3}}
}
context = {'state': 0, 'result': {}}
io_results[key[2]] = context
result = None
try:
result = await io.put(client, key, record, meta, policy)
except Exception as eargs:
print(f"error: {eargs.code}, {eargs.msg}, {eargs.file}, {eargs.line}")
pass
io_results[key[2]]['result'] = result
async def main():
func_list = []
for i in range(test_count):
func_list.append(METHOD_NAME(namespace, set, i))
await asyncio.gather(*func_list)
asyncio.get_event_loop().run_until_complete(main())
print(f"put_async completed with returning {len(io_results)} records")
#print(io_results)
except Exception as e:
print("error: {0}".format(e), file=sys.stderr)
rc = 1
# ----------------------------------------------------------------------------
# Close Connection to Cluster
# ----------------------------------------------------------------------------
client.close()
except Exception as eargs:
print("error: {0}".format(eargs), file=sys.stderr)
exitCode = 3
##########################################################################
# Exit
##########################################################################
sys.exit(exitCode)
|
1,545 |
write generic log event
|
import csv
from datetime import datetime, timedelta
from itertools import chain
from django.contrib.auth.models import User
from django.db.models import ForeignKey, Min
import attr
from dimagi.utils.parsing import string_to_datetime
from corehq.apps.users.models import Invitation, WebUser
from corehq.util.models import ForeignValue
from ..models import AccessAudit, NavigationEventAudit
def navigation_events_by_user(user, start_date=None, end_date=None):
where = get_date_range_where(start_date, end_date)
query = NavigationEventAudit.objects.filter(user=user, **where)
return AuditWindowQuery(query)
def write_log_events(writer, user, domain=None, override_user=None, start_date=None, end_date=None):
start_date = string_to_datetime(start_date).replace(tzinfo=None) if start_date else None
end_date = string_to_datetime(end_date).replace(tzinfo=None) if end_date else None
for event in navigation_events_by_user(user, start_date, end_date):
if not domain or domain == event.domain:
write_log_event(writer, event, override_user)
def write_log_event(writer, event, override_user=None):
if override_user:
event.user = override_user
writer.writerow([
event.event_date,
event.user,
event.domain,
event.ip_address,
event.request_method,
event.request_path
])
def get_users_for_domain(domain):
users = {u.username for u in WebUser.by_domain(domain)}
super_users = {u['username'] for u in User.objects.filter(is_superuser=True).values('username')}
users_who_accepted_invitations = set(Invitation.objects.filter(
is_accepted=True,
domain=domain).values_list('email', flat=True)
)
removed_users = users_who_accepted_invitations - users
super_users = super_users - users
return users, removed_users, super_users
def get_all_log_events(start_date=None, end_date=None):
where = get_date_range_where(start_date, end_date)
return chain(
AuditWindowQuery(AccessAudit.objects.filter(**where)),
AuditWindowQuery(NavigationEventAudit.objects.filter(**where)),
)
def get_domain_first_access_times(domains, start_date=None, end_date=None):
"""Query NavigationEventAudit events for _first event matching any of
`domains` within each authenticated session_.
NOTE: This function does _not_ query couch.
NOTE: This function may return multiple "access events" from the same
session (if multiple `domains` were accessed in the same session).
Resulting SQL query:
```sql
SELECT
"user",
domain,
MIN(event_date) AS access_time
FROM auditcare_navigationeventaudit
WHERE (
domain IN ( {domains} )
AND event_date > {start_date}
AND event_date <= {end_date}
AND "user" IS NOT NULL
AND session_key IS NOT NULL
)
GROUP BY ("user", domain, session_key)
ORDER BY access_time ASC;
```
"""
where = get_date_range_where(start_date, end_date)
where["domain__in"] = domains
where["user__isnull"] = False
where["session_key__isnull"] = False
return (NavigationEventAudit.objects
.values("user", "domain", "session_key") # GROUP BY fields
.annotate(access_time=Min("event_date"))
.values("user", "domain", "access_time") # SELECT fields
.filter(**where)
.order_by("access_time")
.iterator())
def METHOD_NAME(writer, event):
action = ''
resource = ''
if event.doc_type == 'NavigationEventAudit':
action = event.request_method
resource = event.request_path
else:
assert event.doc_type == 'AccessAudit'
action = event.access_type
resource = event.path
writer.writerow([
event.event_date,
event.doc_type,
event.user,
event.domain,
event.ip_address,
action,
resource,
event.description,
])
def write_export_from_all_log_events(file_obj, start, end):
writer = csv.writer(file_obj)
writer.writerow(['Date', 'Type', 'User', 'Domain', 'IP Address', 'Action', 'Resource', 'Description'])
for event in get_all_log_events(start, end):
METHOD_NAME(writer, event)
def get_date_range_where(start_date, end_date):
"""Get ORM filter kwargs for inclusive event_date range"""
where = {}
if start_date:
where["event_date__gt"] = start_date.date()
if end_date:
where["event_date__lt"] = end_date.date() + timedelta(days=1)
return where
@attr.s(cmp=False)
class AuditWindowQuery:
query = attr.ib()
window_size = attr.ib(default=10000)
def __iter__(self):
"""Windowed query generator using WHERE/LIMIT
Adapted from https://github.com/sqlalchemy/sqlalchemy/wiki/WindowedRangeQuery
"""
query = self.query
last_date = None
last_ids = set()
while True:
qry = query
if last_date is not None:
qry = query.filter(event_date__gte=last_date).exclude(id__in=last_ids)
rec = None
for rec in qry.order_by("event_date")[:self.window_size]:
yield NoForeignQuery(rec)
if rec.event_date != last_date:
last_date = rec.event_date
last_ids = {rec.id}
else:
last_ids.add(rec.id)
if rec is None:
break
def get_foreign_names(model):
names = {f.name for f in model._meta.fields if isinstance(f, ForeignKey)}
names.update(ForeignValue.get_names(model))
return names
@attr.s
class NoForeignQuery:
"""Raise an error if a foreign key field is accessed
This is a hack to prevent downstream code from accessing related
objects, inadvertently triggering many extra queries.
See also: https://stackoverflow.com/questions/66496443
If a need arises for downstream code to access related fields,
`navigation_events_by_user` should be updated to use
`query.select_related` and/or `query.prefetch_related`, and this
class should be refactored accordingly.
"""
_obj = attr.ib()
def __attrs_post_init__(self):
self._fks = get_foreign_names(type(self._obj))
def __getattr__(self, name):
if name in self._fks:
raise ForeignKeyAccessError(name)
return getattr(self._obj, name)
class ForeignKeyAccessError(AttributeError):
pass
|
1,546 |
get minimum across quotas
|
"""
Rate limiter and quota system.
Framework-independent rate limiting mechanism that provides:
* IP address and token-based accounting
* customizable quotas based on IP address and token
* late limiting based on resource usage (time spent on API calls)
* bucketing based on day, week, month
* statistics
* metrics
* fast in-memory storage
Also provides a connector for Flask
"""
import time
import ipaddress
from typing import Dict, List, Optional, Tuple, Union
IpAddress = Union[ipaddress.IPv4Address, ipaddress.IPv6Address]
IpAddrBucket = Dict[IpAddress, float]
IpAddrBuckets = Tuple[IpAddrBucket, IpAddrBucket, IpAddrBucket]
TokenBucket = Dict[str, float]
TokenBuckets = Tuple[TokenBucket, TokenBucket, TokenBucket]
class Limiter:
def __init__(
self,
limits: dict,
token_check_callback=None,
ipaddr_methods=["X-Real-Ip", "socket"],
whitelisted_ipaddrs=Optional[List[str]],
):
# Bucket sequence: month, week, day
self._ipaddr_limits = [
limits.get(l, None)
for l in ("ipaddr_per_month", "ipaddr_per_week", "ipaddr_per_day")
]
self._token_limits = [
limits.get(l, None)
for l in ("token_per_month", "token_per_week", "token_per_day")
]
self._ipaddr_buckets = ({}, {}, {}) # type: IpAddrBuckets
self._token_buckets = ({}, {}, {}) # type: TokenBuckets
self._token_check_callback = token_check_callback
self._ipaddr_extraction_methods = ipaddr_methods
self._last_quota_update_time = time.monotonic()
self._whitelisted_ipaddrs = set()
for ipa in whitelisted_ipaddrs or []:
self._whitelisted_ipaddrs.add(ipaddress.ip_address(ipa))
self.increment_quota_counters(1)
self.refresh_quota_counters_if_needed()
def increment_quota_counters(self, tdelta: int):
"""Delta: time from previous run in seconds"""
if tdelta <= 0:
return
iterable = (
(30 * 24, self._ipaddr_limits[0], self._ipaddr_buckets[0]),
(7 * 24, self._ipaddr_limits[0], self._ipaddr_buckets[0]),
(1 * 24, self._ipaddr_limits[0], self._ipaddr_buckets[0]),
(30 * 24, self._ipaddr_limits[0], self._ipaddr_buckets[0]),
(7 * 24, self._ipaddr_limits[0], self._ipaddr_buckets[0]),
(1 * 24, self._ipaddr_limits[0], self._ipaddr_buckets[0]),
)
for hours, limit, bucket in iterable:
vdelta = limit / hours / 3600 * tdelta
to_delete = []
for k, v in bucket.items():
v += vdelta
if v >= limit:
to_delete.append(k)
else:
bucket[k] = v
for k in to_delete:
del bucket[k]
def refresh_quota_counters_if_needed(self):
t = time.monotonic()
delta = t - self._last_quota_update_time
if delta > 3600:
self.increment_quota_counters(delta)
self._last_quota_update_time = t
def consume_quota(self, elapsed: float, ipaddr: Optional[IpAddress]=None, token=None) -> None:
"""Consume quota in seconds
"""
assert ipaddr or token
if ipaddr:
assert isinstance(ipaddr, ipaddress.IPv4Address)
for n, limit in enumerate(self._ipaddr_limits):
b = self._ipaddr_buckets[n]
b[ipaddr] = b.get(ipaddr, limit) - elapsed
else:
raise NotImplementedError()
def METHOD_NAME(self, ipaddr=None, token=None) -> float:
assert ipaddr or token
if ipaddr:
iterable = zip(self._ipaddr_limits, self._ipaddr_buckets)
return min(bucket.get(ipaddr, limit) for limit, bucket in iterable)
else:
raise NotImplementedError()
def is_quota_available(self, ipaddr=None, token=None) -> bool:
"""Check if all quota buckets for an ipaddr/token are > 0
"""
# return False if any bucket reached 0
for bucket in self._ipaddr_buckets:
if ipaddr in bucket:
if bucket[ipaddr] <= 0:
return False
return True
def is_ipaddr_whitelisted(self, ipaddr: IpAddress) -> bool:
return ipaddr in self._whitelisted_ipaddrs
def get_lowest_daily_quotas_summary(self, n=20) -> List[Tuple[int, float]]:
"""Returns a summary of daily quotas with the lowest values
"""
li = sorted((val, ipa) for ipa, val in self._ipaddr_buckets[2].items())
li = li[:n]
return [(int(ipa.packed[0]), val) for val, ipa in li]
# # Flask-specific code # #
from flask import request, current_app
import flask
class FlaskLimiter:
def _get_client_ipaddr(self) -> IpAddress:
# https://github.com/alisaifee/flask-limiter/issues/41
for m in self._limiter._ipaddr_extraction_methods:
if m == "X-Forwarded-For":
raise NotImplementedError("X-Forwarded-For ")
elif m == "X-Real-Ip":
ipaddr = request.headers.get("X-Real-Ip", None)
if ipaddr:
return ipaddress.ip_address(ipaddr)
elif m == "socket":
return ipaddress.ip_address(request.remote_addr)
else:
raise NotImplementedError(f"IP address method {m} is unknown")
methods = ",".join(self._limiter._ipaddr_extraction_methods)
raise Exception(f"Unable to detect IP address using {methods}")
def _check_limits_callback(self):
"""Check rate limits before processing a request
Refresh quota counters when needed
"""
self._limiter.refresh_quota_counters_if_needed()
ipaddr = self._get_client_ipaddr()
# token = request.headers.get("Token", None)
# if token:
# check token validity
if not self._limiter.is_quota_available(ipaddr=ipaddr):
flask.abort(429)
self._request_start_time = time.monotonic()
log = current_app.logger
log.error("_check_limits_callback called")
def _after_request_callback(self, response):
"""Consume quota and injects HTTP headers when responding to a request
"""
log = current_app.logger
try:
assert response
tdelta = time.monotonic() - self._request_start_time
ipaddr = self._get_client_ipaddr()
if not self._limiter.is_ipaddr_whitelisted(ipaddr):
self._limiter.consume_quota(tdelta, ipaddr=ipaddr)
q = self._limiter.METHOD_NAME(ipaddr=ipaddr)
response.headers.add("X-RateLimit-Remaining", q)
except Exception as e:
log.error(str(e), exc_info=True)
finally:
return response
def __init__(
self,
app,
limits: dict,
token_check_callback=None,
ipaddr_methods=["X-Real-Ip", "socket"],
whitelisted_ipaddrs=None,
):
"""
"""
self._limiter = Limiter(
limits,
token_check_callback=token_check_callback,
ipaddr_methods=ipaddr_methods,
whitelisted_ipaddrs=whitelisted_ipaddrs,
)
if app.extensions.get("limiter"):
raise Exception("The Flask app already has an extension named 'limiter'")
app.before_request(self._check_limits_callback)
app.after_request(self._after_request_callback)
app.extensions["limiter"] = self
def get_lowest_daily_quotas_summary(self, n=20) -> List[Tuple[int, float]]:
return self._limiter.get_lowest_daily_quotas_summary(n)
|
1,547 |
test derived without base attrs
|
# stdlib
from time import time
from typing import Callable
from typing import Optional
# third party
from pydantic import BaseModel
# syft absolute
import syft as sy
from syft.serde.serializable import serializable
def get_fqn_for_class(cls):
return f"{cls.__module__}.{cls.__name__}"
# ------------------------------ Simple classes ------------------------------
class AbstractBase:
uid: str
@serializable(attrs=["uid", "value"])
class Base(AbstractBase):
"""Serialize: uid, value"""
value: int
def __init__(self, uid: str, value: int):
self.uid = uid
self.value = value
@serializable(attrs=["status"])
class Derived(Base):
"""Serialize: uid, value, status"""
status: int
def __init__(self, uid: str, value: int, status: int) -> None:
super().__init__(uid, value)
self.status = status
@serializable(attrs=["status"], without=["uid"])
class DerivedWithoutAttrs(Base):
"""Serialize: value, status"""
status: int
def __init__(self, uid: str, value: int, status: int) -> None:
super().__init__(uid, value)
self.status = status
@serializable(attrs=["status"], inherit=False)
class DerivedNoInherit(Base):
"""Serialize: status"""
status: int
def __init__(self, uid: str, value: int, status: int) -> None:
super().__init__(uid, value)
self.status = status
@serializable(attrs=["uid", "value"], inheritable=False)
class BaseAttrsNonInheritable(AbstractBase):
"""Serialize: uid, value (Derived cannot inherit base attrs)"""
value: Optional[int]
def __init__(self, uid: str = None, value: int = None):
self.uid = uid
self.value = value
@serializable(attrs=["status"])
class DerivedWithoutBaseAttrs(BaseAttrsNonInheritable):
"""Serialize: status (Dervied cannot inherit base attrs)"""
status: int
def __init__(self, uid: str, value: int, status: int):
super().__init__(uid, value)
self.uid = uid
self.value = value
self.status = status
def test_base_attrs():
data = Base(uid=str(time()), value=2)
ser = sy.serialize(data, to_bytes=True)
de = sy.deserialize(ser, from_bytes=True)
assert "uid" in data.__syft_serializable__
assert "value" in data.__syft_serializable__
assert (data.uid, data.value) == (
de.uid,
de.value,
), "Deserialized values do not match"
def test_base_non_inheritable_attrs():
data = BaseAttrsNonInheritable(uid=str(time()), value=2)
ser = sy.serialize(data, to_bytes=True)
sy.deserialize(ser, from_bytes=True)
assert "__syft_serializable__" not in data.__dict__
def test_derived():
data = Derived(uid=str(time()), value=2, status=1)
ser = sy.serialize(data, to_bytes=True)
de = sy.deserialize(ser, from_bytes=True)
assert "uid" in data.__syft_serializable__
assert "value" in data.__syft_serializable__
assert (data.uid, data.value, data.status) == (
de.uid,
de.value,
de.status,
), "Deserialized values do not match"
def test_derived_without_attrs():
data = DerivedWithoutAttrs(uid=str(time()), value=2, status=1)
ser = sy.serialize(data, to_bytes=True)
sy.deserialize(ser, from_bytes=True)
assert "uid" not in data.__syft_serializable__
assert "value" in data.__syft_serializable__
assert "status" in data.__syft_serializable__
def test_derived_without_inherit():
data = DerivedNoInherit(uid=str(time()), value=2, status=1)
ser = sy.serialize(data, to_bytes=True)
de = sy.deserialize(ser, from_bytes=True)
assert "uid" not in data.__syft_serializable__
assert "value" not in data.__syft_serializable__
assert de.status == data.status
def METHOD_NAME():
data = DerivedWithoutBaseAttrs(uid=str(time()), value=2, status=1)
ser = sy.serialize(data, to_bytes=True)
de = sy.deserialize(ser, from_bytes=True)
assert "uid" not in data.__syft_serializable__
assert "value" not in data.__syft_serializable__
assert "status" in data.__syft_serializable__
assert de.status == data.status
# ------------------------------ Pydantic classes ------------------------------
@serializable()
class PydBase(BaseModel):
"""Serialize: uid, value, flag"""
uid: Optional[str] = None
value: Optional[int] = None
flag: Optional[bool] = None
@serializable()
class PydDerived(PydBase):
"""Serialize: uid, value, flag, source, target"""
source: str
target: str
@serializable(without=["uid"])
class PydDerivedWithoutAttr(PydBase):
"""
Serialize: value, flag, source, target
`without=` will only work with Optional attributes due to pydantic's validation
"""
source: str
target: str
@serializable(without=["uid", "flag", "config"])
class PydDerivedWithoutAttrs(PydBase):
"""
Serialize: value, source, target
`without=` will only work with Optional attributes due to pydantic's validation
"""
source: str
target: str
config: Optional[dict] = None
@serializable(attrs=["source", "target"])
class PydDerivedOnly(PydBase):
"""
Serialize: source, target
"""
source: str
target: str
callback: Optional[Callable] = lambda: None # noqa: E731
def test_pydantic():
data = PydBase(uid=str(time()), value=2, flag=True)
ser = sy.serialize(data, to_bytes=True)
de = sy.deserialize(ser, from_bytes=True)
assert (data.uid, data.value, data.flag) == (de.uid, de.value, de.flag)
def test_pydantic_derived():
data = PydDerived(
uid=str(time()),
value=2,
source="source_path",
target="target_path",
)
ser = sy.serialize(data, to_bytes=True)
de = sy.deserialize(ser, from_bytes=True)
assert (data.uid, data.value, data.flag, data.source, data.target) == (
de.uid,
de.value,
de.flag,
de.source,
de.target,
)
def test_pydantic_derived_without_attr():
data = PydDerivedWithoutAttr(
uid=str(time()),
value=2,
source="source_path",
target="target_path",
)
ser = sy.serialize(data, to_bytes=True)
de = sy.deserialize(ser, from_bytes=True)
assert data.uid is not None
assert de.uid is None
assert (data.value, data.flag, data.source, data.target) == (
de.value,
de.flag,
de.source,
de.target,
)
def test_pydantic_derived_without_attrs():
data = PydDerivedWithoutAttrs(
uid=str(time()),
value=2,
source="source_path",
target="target_path",
)
ser = sy.serialize(data, to_bytes=True)
de = sy.deserialize(ser, from_bytes=True)
assert (data.uid, data.flag, data.config) != (None, None, None)
assert (de.uid, de.flag, de.config) == (None, None, None)
assert (data.value, data.flag, data.source, data.target) == (
de.value,
de.flag,
de.source,
de.target,
)
def test_pydantic_derived_only():
data = PydDerivedOnly(
uid=str(time()),
value=2,
flag=True,
source="source_path",
target="target_path",
)
ser = sy.serialize(data, to_bytes=True)
de = sy.deserialize(ser, from_bytes=True)
assert (data.uid, data.value, data.flag) != (de.uid, de.value, de.flag)
assert (de.uid, de.value, de.flag) == (None, None, None)
assert (data.source, data.target) == (de.source, de.target)
|
1,548 |
create dagster daemon cli
|
import os
import sys
from typing import Optional
import click
from dagster import __version__ as dagster_version
from dagster._cli.utils import get_instance_for_cli
from dagster._cli.workspace.cli_target import (
ClickArgMapping,
ClickArgValue,
get_workspace_load_target,
workspace_target_argument,
)
from dagster._core.instance import DagsterInstance, InstanceRef
from dagster._core.telemetry import telemetry_wrapper
from dagster._daemon.controller import (
DEFAULT_DAEMON_HEARTBEAT_TOLERANCE_SECONDS,
DagsterDaemonController as DagsterDaemonController,
all_daemons_live,
daemon_controller_from_instance,
debug_daemon_heartbeats,
get_daemon_statuses,
)
from dagster._daemon.daemon import get_telemetry_daemon_session_id
from dagster._serdes import deserialize_value
from dagster._utils.interrupts import capture_interrupts
def _get_heartbeat_tolerance():
tolerance = os.getenv(
"DAGSTER_DAEMON_HEARTBEAT_TOLERANCE",
)
return int(tolerance) if tolerance else DEFAULT_DAEMON_HEARTBEAT_TOLERANCE_SECONDS
@click.command(
name="run",
help="Run any daemons configured on the DagsterInstance.",
)
@click.option(
"--code-server-log-level",
help="Set the log level for any code servers spun up by the daemon.",
show_default=True,
default="warning",
type=click.Choice(["critical", "error", "warning", "info", "debug"], case_sensitive=False),
)
@click.option(
"--log-level",
help="Set the log level for any code servers spun up by the daemon.",
show_default=True,
default="info",
type=click.Choice(["critical", "error", "warning", "info", "debug"], case_sensitive=False),
)
@click.option(
"--instance-ref",
type=click.STRING,
required=False,
hidden=True,
)
@workspace_target_argument
def run_command(
code_server_log_level: str,
log_level: str,
instance_ref: Optional[str],
**kwargs: ClickArgValue,
) -> None:
try:
with capture_interrupts():
with get_instance_for_cli(
instance_ref=deserialize_value(instance_ref, InstanceRef) if instance_ref else None
) as instance:
_daemon_run_command(instance, log_level, code_server_log_level, kwargs)
except KeyboardInterrupt:
return # Exit cleanly on interrupt
@telemetry_wrapper(metadata={"DAEMON_SESSION_ID": get_telemetry_daemon_session_id()})
def _daemon_run_command(
instance: DagsterInstance, log_level: str, code_server_log_level: str, kwargs: ClickArgMapping
) -> None:
workspace_load_target = get_workspace_load_target(kwargs)
with daemon_controller_from_instance(
instance,
workspace_load_target=workspace_load_target,
heartbeat_tolerance_seconds=_get_heartbeat_tolerance(),
log_level=log_level,
code_server_log_level=code_server_log_level,
) as controller:
controller.check_daemon_loop()
@click.command(
name="liveness-check",
help="Check for recent heartbeats from the daemon.",
)
def liveness_check_command() -> None:
with get_instance_for_cli() as instance:
if all_daemons_live(instance, heartbeat_tolerance_seconds=_get_heartbeat_tolerance()):
click.echo("Daemon live")
else:
click.echo("Daemon(s) not running")
sys.exit(1)
@click.command(
name="wipe",
help="Wipe all heartbeats from storage.",
)
def wipe_command() -> None:
with get_instance_for_cli() as instance:
instance.wipe_daemon_heartbeats()
click.echo("Daemon heartbeats wiped")
@click.command(
name="heartbeat",
help="Read and write a heartbeat",
)
def debug_heartbeat_command() -> None:
with get_instance_for_cli() as instance:
debug_daemon_heartbeats(instance)
@click.command(
name="heartbeat-dump",
help="Log all heartbeat statuses",
)
def debug_heartbeat_dump_command() -> None:
with get_instance_for_cli() as instance:
for daemon_status in get_daemon_statuses(instance, instance.get_required_daemon_types()):
click.echo(daemon_status)
@click.group(
commands={"heartbeat": debug_heartbeat_command, "heartbeat-dump": debug_heartbeat_dump_command}
)
def debug_group() -> None:
"""Daemon debugging utils."""
def METHOD_NAME() -> click.Group:
commands = {
"run": run_command,
"liveness-check": liveness_check_command,
"wipe": wipe_command,
"debug": debug_group,
}
@click.group(commands=commands)
@click.version_option(version=dagster_version)
def group():
"""CLI tools for working with the dagster daemon process."""
return group
cli = METHOD_NAME()
def main() -> None:
cli(obj={})
|
1,549 |
upsample
|
import tensorflow as tf
def conv_pass(
fmaps_in,
kernel_size,
num_fmaps,
num_repetitions,
activation="relu",
name="conv_pass",
):
"""Create a convolution pass::
f_in --> f_1 --> ... --> f_n
where each ``-->`` is a convolution followed by a (non-linear) activation
function and ``n`` ``num_repetitions``. Each convolution will decrease the
size of the feature maps by ``kernel_size-1``.
Args:
f_in:
The input tensor of shape ``(batch_size, channels, depth, height,
width)`` or ``(batch_size, channels, height, width)``.
kernel_size:
Size of the kernel. Forwarded to the tensorflow convolution layer.
num_fmaps:
The number of feature maps to produce with each convolution.
num_repetitions:
How many convolutions to apply.
activation:
Which activation to use after a convolution. Accepts the name of any
tensorflow activation function (e.g., ``relu`` for ``tf.nn.relu``).
"""
fmaps = fmaps_in
if activation is not None:
activation = getattr(tf.nn, activation)
conv_layer = getattr(
tf.layers, {2: "conv2d", 3: "conv3d"}[fmaps_in.get_shape().ndims - 2]
)
for i in range(num_repetitions):
fmaps = conv_layer(
inputs=fmaps,
filters=num_fmaps,
kernel_size=kernel_size,
padding="valid",
data_format="channels_first",
activation=activation,
name=name + "_%i" % i,
)
return fmaps
def downsample(fmaps_in, factors, name="down"):
pooling_layer = getattr(
tf.layers,
{2: "max_pooling2d", 3: "max_pooling3d"}[fmaps_in.get_shape().ndims - 2],
)
fmaps = pooling_layer(
fmaps_in,
pool_size=factors,
strides=factors,
padding="valid",
data_format="channels_first",
name=name,
)
return fmaps
def METHOD_NAME(fmaps_in, factors, num_fmaps, activation="relu", name="up"):
if activation is not None:
activation = getattr(tf.nn, activation)
conv_trans_layer = getattr(
tf.layers,
{2: "conv2d_transpose", 3: "conv3d_transpose"}[fmaps_in.get_shape().ndims - 2],
)
fmaps = conv_trans_layer(
fmaps_in,
filters=num_fmaps,
kernel_size=factors,
strides=factors,
padding="valid",
data_format="channels_first",
activation=activation,
name=name,
)
return fmaps
def crop_spatial(fmaps_in, shape):
"""Crop only the spacial dimensions to match shape.
Args:
fmaps_in:
The input tensor.
shape:
A list (not a tensor) with the requested shape [_, _, z, y, x] or
[_, _, y, x].
"""
in_shape = fmaps_in.get_shape().as_list()
offset = [0, 0] + [(in_shape[i] - shape[i]) // 2 for i in range(2, len(shape))]
size = in_shape[0:2] + shape[2:]
fmaps = tf.slice(fmaps_in, offset, size)
return fmaps
def unet(
fmaps_in, num_fmaps, fmap_inc_factor, downsample_factors, activation="relu", layer=0
):
"""Create a 2D or 3D U-Net::
f_in --> f_left --------------------------->> f_right--> f_out
| ^
v |
g_in --> g_left ------->> g_right --> g_out
| ^
v |
...
where each ``-->`` is a convolution pass (see ``conv_pass``), each `-->>` a
crop, and down and up arrows are max-pooling and transposed convolutions,
respectively.
The U-Net expects tensors to have shape ``(batch=1, channels, depth, height,
width)`` for 3D or ``(batch=1, channels, height, width)`` for 2D.
This U-Net performs only "valid" convolutions, i.e., sizes of the feature
maps decrease after each convolution.
Args:
fmaps_in:
The input tensor.
num_fmaps:
The number of feature maps in the first layer. This is also the
number of output feature maps.
fmap_inc_factor:
By how much to multiply the number of feature maps between layers.
If layer 0 has ``k`` feature maps, layer ``l`` will have
``k*fmap_inc_factor**l``.
downsample_factors:
List of lists ``[z, y, x]`` or ``[y, x]`` to use to down- and
up-sample the feature maps between layers.
activation:
Which activation to use after a convolution. Accepts the name of any
tensorflow activation function (e.g., ``relu`` for ``tf.nn.relu``).
layer:
Used internally to build the U-Net recursively.
"""
prefix = " " * layer
print(prefix + "Creating U-Net layer %i" % layer)
print(prefix + "f_in: " + str(fmaps_in.shape))
# convolve
f_left = conv_pass(
fmaps_in,
kernel_size=3,
num_fmaps=num_fmaps,
num_repetitions=2,
activation=activation,
name="unet_layer_%i_left" % layer,
)
# last layer does not recurse
bottom_layer = layer == len(downsample_factors)
if bottom_layer:
print(prefix + "bottom layer")
print(prefix + "f_out: " + str(f_left.shape))
return f_left
# downsample
g_in = downsample(
f_left, downsample_factors[layer], "unet_down_%i_to_%i" % (layer, layer + 1)
)
# recursive U-net
g_out = unet(
g_in,
num_fmaps=num_fmaps * fmap_inc_factor,
fmap_inc_factor=fmap_inc_factor,
downsample_factors=downsample_factors,
activation=activation,
layer=layer + 1,
)
print(prefix + "g_out: " + str(g_out.shape))
# upsample
g_out_upsampled = METHOD_NAME(
g_out,
downsample_factors[layer],
num_fmaps,
activation=activation,
name="unet_up_%i_to_%i" % (layer + 1, layer),
)
print(prefix + "g_out_upsampled: " + str(g_out_upsampled.shape))
# copy-crop
f_left_cropped = crop_spatial(f_left, g_out_upsampled.get_shape().as_list())
print(prefix + "f_left_cropped: " + str(f_left_cropped.shape))
# concatenate along channel dimension
f_right = tf.concat([f_left_cropped, g_out_upsampled], 1)
print(prefix + "f_right: " + str(f_right.shape))
# convolve
f_out = conv_pass(
f_right,
kernel_size=3,
num_fmaps=num_fmaps,
num_repetitions=2,
name="unet_layer_%i_right" % layer,
)
print(prefix + "f_out: " + str(f_out.shape))
return f_out
|
1,550 |
list
|
from django.conf import settings
from django.db.models.query_utils import Q
from django_filters import rest_framework as filters
from djqscsv import render_to_csv_response
from dry_rest_permissions.generics import DRYPermissions
from rest_framework import filters as rest_framework_filters
from rest_framework import mixins
from rest_framework.generics import get_object_or_404
from rest_framework.permissions import IsAuthenticated
from rest_framework.viewsets import GenericViewSet
from care.facility.api.serializers.resources import (
ResourceRequestCommentSerializer,
ResourceRequestSerializer,
)
from care.facility.models import (
RESOURCE_CATEGORY_CHOICES,
RESOURCE_STATUS_CHOICES,
ResourceRequest,
ResourceRequestComment,
User,
)
from care.facility.models.resources import RESOURCE_SUB_CATEGORY_CHOICES
from care.utils.cache.cache_allowed_facilities import get_accessible_facilities
from care.utils.filters.choicefilter import CareChoiceFilter
def inverse_choices(choices):
output = {}
for choice in choices:
output[choice[1]] = choice[0]
return output
inverse_resource_status = inverse_choices(RESOURCE_STATUS_CHOICES)
inverse_category = inverse_choices(RESOURCE_CATEGORY_CHOICES)
inverse_sub_category = inverse_choices(RESOURCE_SUB_CATEGORY_CHOICES)
def get_request_queryset(request, queryset):
if request.user.is_superuser:
pass
else:
if request.user.user_type >= User.TYPE_VALUE_MAP["StateLabAdmin"]:
q_objects = Q(origin_facility__state=request.user.state)
q_objects |= Q(approving_facility__state=request.user.state)
q_objects |= Q(assigned_facility__state=request.user.state)
return queryset.filter(q_objects)
elif request.user.user_type >= User.TYPE_VALUE_MAP["DistrictLabAdmin"]:
q_objects = Q(origin_facility__district=request.user.district)
q_objects |= Q(approving_facility__district=request.user.district)
q_objects |= Q(assigned_facility__district=request.user.district)
return queryset.filter(q_objects)
facility_ids = get_accessible_facilities(request.user)
q_objects = Q(origin_facility__id__in=facility_ids)
q_objects |= Q(approving_facility__id__in=facility_ids)
q_objects |= Q(assigned_facility__id__in=facility_ids)
queryset = queryset.filter(q_objects)
return queryset
class ResourceFilterSet(filters.FilterSet):
status = CareChoiceFilter(choice_dict=inverse_resource_status)
category = CareChoiceFilter(choice_dict=inverse_category)
sub_category = CareChoiceFilter(choice_dict=inverse_sub_category)
facility = filters.UUIDFilter(field_name="facility__external_id")
origin_facility = filters.UUIDFilter(field_name="origin_facility__external_id")
approving_facility = filters.UUIDFilter(
field_name="approving_facility__external_id"
)
assigned_facility = filters.UUIDFilter(field_name="assigned_facility__external_id")
created_date = filters.DateFromToRangeFilter(field_name="created_date")
modified_date = filters.DateFromToRangeFilter(field_name="modified_date")
assigned_to = filters.NumberFilter(field_name="assigned_to__id")
created_by = filters.NumberFilter(field_name="created_by__id")
last_edited_by = filters.NumberFilter(field_name="last_edited_by__id")
priority = filters.NumberFilter(field_name="priority")
emergency = filters.BooleanFilter(field_name="emergency")
class ResourceRequestViewSet(
mixins.CreateModelMixin,
mixins.ListModelMixin,
mixins.RetrieveModelMixin,
mixins.UpdateModelMixin,
GenericViewSet,
):
serializer_class = ResourceRequestSerializer
lookup_field = "external_id"
queryset = ResourceRequest.objects.all().select_related(
"origin_facility",
"origin_facility__ward",
"origin_facility__local_body",
"origin_facility__district",
"origin_facility__state",
"approving_facility",
"approving_facility__ward",
"approving_facility__local_body",
"approving_facility__district",
"approving_facility__state",
"assigned_facility",
"assigned_facility__ward",
"assigned_facility__local_body",
"assigned_facility__district",
"assigned_facility__state",
"assigned_to",
"created_by",
"last_edited_by",
)
ordering_fields = [
"id",
"created_date",
"modified_date",
"emergency",
"priority",
]
permission_classes = (IsAuthenticated, DRYPermissions)
filter_backends = (
filters.DjangoFilterBackend,
rest_framework_filters.OrderingFilter,
)
filterset_class = ResourceFilterSet
def get_queryset(self):
return get_request_queryset(self.request, self.queryset)
def METHOD_NAME(self, request, *args, **kwargs):
if settings.CSV_REQUEST_PARAMETER in request.GET:
queryset = self.filter_queryset(self.get_queryset()).values(
*ResourceRequest.CSV_MAPPING.keys()
)
return render_to_csv_response(
queryset,
field_header_map=ResourceRequest.CSV_MAPPING,
field_serializer_map=ResourceRequest.CSV_MAKE_PRETTY,
)
return super().METHOD_NAME(request, *args, **kwargs)
class ResourceRequestCommentViewSet(
mixins.CreateModelMixin,
mixins.ListModelMixin,
mixins.RetrieveModelMixin,
GenericViewSet,
):
serializer_class = ResourceRequestCommentSerializer
lookup_field = "external_id"
queryset = ResourceRequestComment.objects.all().order_by("-created_date")
permission_classes = (IsAuthenticated,)
def get_queryset(self):
queryset = self.queryset.filter(
request__external_id=self.kwargs.get("resource_external_id")
)
if self.request.user.is_superuser:
pass
else:
if self.request.user.user_type >= User.TYPE_VALUE_MAP["StateLabAdmin"]:
q_objects = Q(request__origin_facility__state=self.request.user.state)
q_objects |= Q(
request__approving_facility__state=self.request.user.state
)
q_objects |= Q(
request__assigned_facility__state=self.request.user.state
)
return queryset.filter(q_objects)
elif self.request.user.user_type >= User.TYPE_VALUE_MAP["DistrictLabAdmin"]:
q_objects = Q(
request__origin_facility__district=self.request.user.district
)
q_objects |= Q(
request__approving_facility__district=self.request.user.district
)
q_objects |= Q(
request__assigned_facility__district=self.request.user.district
)
return queryset.filter(q_objects)
facility_ids = get_accessible_facilities(self.request.user)
q_objects = Q(request__origin_facility__id__in=facility_ids)
q_objects |= Q(request__approving_facility__id__in=facility_ids)
q_objects |= Q(request__assigned_facility__id__in=facility_ids)
queryset = queryset.filter(q_objects)
return queryset
def get_request(self):
queryset = get_request_queryset(self.request, ResourceRequest.objects.all())
queryset = queryset.filter(external_id=self.kwargs.get("resource_external_id"))
return get_object_or_404(queryset)
def perform_create(self, serializer):
serializer.save(request=self.get_request())
|
1,551 |
insights segments
|
from warnings import warn
from twilio.rest.flex_api.FlexApiBase import FlexApiBase
from twilio.rest.flex_api.v1.assessments import AssessmentsList
from twilio.rest.flex_api.v1.channel import ChannelList
from twilio.rest.flex_api.v1.configuration import ConfigurationList
from twilio.rest.flex_api.v1.flex_flow import FlexFlowList
from twilio.rest.flex_api.v1.insights_assessments_comment import (
InsightsAssessmentsCommentList,
)
from twilio.rest.flex_api.v1.insights_conversations import InsightsConversationsList
from twilio.rest.flex_api.v1.insights_questionnaires import InsightsQuestionnairesList
from twilio.rest.flex_api.v1.insights_questionnaires_category import (
InsightsQuestionnairesCategoryList,
)
from twilio.rest.flex_api.v1.insights_questionnaires_question import (
InsightsQuestionnairesQuestionList,
)
from twilio.rest.flex_api.v1.METHOD_NAME import InsightsSegmentsList
from twilio.rest.flex_api.v1.insights_session import InsightsSessionList
from twilio.rest.flex_api.v1.insights_settings_answer_sets import (
InsightsSettingsAnswerSetsList,
)
from twilio.rest.flex_api.v1.insights_settings_comment import (
InsightsSettingsCommentList,
)
from twilio.rest.flex_api.v1.insights_user_roles import InsightsUserRolesList
from twilio.rest.flex_api.v1.interaction import InteractionList
from twilio.rest.flex_api.v1.web_channel import WebChannelList
from twilio.rest.flex_api.v2.web_channels import WebChannelsList
class FlexApi(FlexApiBase):
@property
def assessments(self) -> AssessmentsList:
warn(
"assessments is deprecated. Use v1.assessments instead.",
DeprecationWarning,
stacklevel=2,
)
return self.v1.assessments
@property
def channel(self) -> ChannelList:
warn(
"channel is deprecated. Use v1.channel instead.",
DeprecationWarning,
stacklevel=2,
)
return self.v1.channel
@property
def configuration(self) -> ConfigurationList:
warn(
"configuration is deprecated. Use v1.configuration instead.",
DeprecationWarning,
stacklevel=2,
)
return self.v1.configuration
@property
def flex_flow(self) -> FlexFlowList:
warn(
"flex_flow is deprecated. Use v1.flex_flow instead.",
DeprecationWarning,
stacklevel=2,
)
return self.v1.flex_flow
@property
def insights_assessments_comment(self) -> InsightsAssessmentsCommentList:
warn(
"insights_assessments_comment is deprecated. Use v1.insights_assessments_comment instead.",
DeprecationWarning,
stacklevel=2,
)
return self.v1.insights_assessments_comment
@property
def insights_conversations(self) -> InsightsConversationsList:
warn(
"insights_conversations is deprecated. Use v1.insights_conversations instead.",
DeprecationWarning,
stacklevel=2,
)
return self.v1.insights_conversations
@property
def insights_questionnaires(self) -> InsightsQuestionnairesList:
warn(
"insights_questionnaires is deprecated. Use v1.insights_questionnaires instead.",
DeprecationWarning,
stacklevel=2,
)
return self.v1.insights_questionnaires
@property
def insights_questionnaires_category(self) -> InsightsQuestionnairesCategoryList:
warn(
"insights_questionnaires_category is deprecated. Use v1.insights_questionnaires_category instead.",
DeprecationWarning,
stacklevel=2,
)
return self.v1.insights_questionnaires_category
@property
def insights_questionnaires_question(self) -> InsightsQuestionnairesQuestionList:
warn(
"insights_questionnaires_question is deprecated. Use v1.insights_questionnaires_question instead.",
DeprecationWarning,
stacklevel=2,
)
return self.v1.insights_questionnaires_question
@property
def METHOD_NAME(self) -> InsightsSegmentsList:
warn(
"insights_segments is deprecated. Use v1.insights_segments instead.",
DeprecationWarning,
stacklevel=2,
)
return self.v1.METHOD_NAME
@property
def insights_session(self) -> InsightsSessionList:
warn(
"insights_session is deprecated. Use v1.insights_session instead.",
DeprecationWarning,
stacklevel=2,
)
return self.v1.insights_session
@property
def insights_settings_answer_sets(self) -> InsightsSettingsAnswerSetsList:
warn(
"insights_settings_answer_sets is deprecated. Use v1.insights_settings_answer_sets instead.",
DeprecationWarning,
stacklevel=2,
)
return self.v1.insights_settings_answer_sets
@property
def insights_settings_comment(self) -> InsightsSettingsCommentList:
warn(
"insights_settings_comment is deprecated. Use v1.insights_settings_comment instead.",
DeprecationWarning,
stacklevel=2,
)
return self.v1.insights_settings_comment
@property
def insights_user_roles(self) -> InsightsUserRolesList:
warn(
"insights_user_roles is deprecated. Use v1.insights_user_roles instead.",
DeprecationWarning,
stacklevel=2,
)
return self.v1.insights_user_roles
@property
def interaction(self) -> InteractionList:
warn(
"interaction is deprecated. Use v1.interaction instead.",
DeprecationWarning,
stacklevel=2,
)
return self.v1.interaction
@property
def web_channel(self) -> WebChannelList:
warn(
"web_channel is deprecated. Use v1.web_channel instead.",
DeprecationWarning,
stacklevel=2,
)
return self.v1.web_channel
@property
def web_channels(self) -> WebChannelsList:
warn(
"web_channels is deprecated. Use v2.web_channels instead.",
DeprecationWarning,
stacklevel=2,
)
return self.v2.web_channels
|
1,552 |
get information
|
# ***************************************************************************
# * Copyright (c) 2021 Bernd Hahnebach <[email protected]> *
# * *
# * This file is part of the FreeCAD CAx development system. *
# * *
# * This program is free software; you can redistribute it and/or modify *
# * it under the terms of the GNU Lesser General Public License (LGPL) *
# * as published by the Free Software Foundation; either version 2 of *
# * the License, or (at your option) any later version. *
# * for detail see the LICENCE text file. *
# * *
# * This program is distributed in the hope that it will be useful, *
# * but WITHOUT ANY WARRANTY; without even the implied warranty of *
# * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
# * GNU Library General Public License for more details. *
# * *
# * You should have received a copy of the GNU Library General Public *
# * License along with this program; if not, write to the Free Software *
# * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 *
# * USA *
# * *
# ***************************************************************************
import FreeCAD
import Fem
import ObjectsFem
from . import manager
from .manager import get_meshname
from .manager import init_doc
def METHOD_NAME():
return {
"name": "NonGui Tutorial 01 - Eigenvalue of elastic beam",
"meshtype": "solid",
"meshelement": "Tet10",
"constraints": [],
"solvers": ["calculix", "ccxtools", "elmer"],
"material": "solid",
"equations": ["elasticity"] # "frequency", but list not allowed here
}
def get_explanation(header=""):
return header + """
To run the example from Python console use:
from femexamples.elmer_nonguitutorial01_eigenvalue_of_elastic_beam import setup
setup()
See forum topic post:
https://forum.freecad.org/viewtopic.php?t=56590
"""
def setup(doc=None, solvertype="elmer"):
# init FreeCAD document
if doc is None:
doc = init_doc()
# explanation object
# just keep the following line and change text string in get_explanation method
manager.add_explanation_obj(doc, get_explanation(manager.get_header(METHOD_NAME())))
# geometric object
geom_obj = doc.addObject("Part::Box", "Box")
geom_obj.Length = 1000
geom_obj.Width = 200
geom_obj.Height = 100
doc.recompute()
if FreeCAD.GuiUp:
geom_obj.ViewObject.Document.activeView().viewAxonometric()
geom_obj.ViewObject.Document.activeView().fitAll()
# analysis
analysis = ObjectsFem.makeAnalysis(doc, "Analysis")
# solver
if solvertype == "calculix":
solver_obj = ObjectsFem.makeSolverCalculix(doc, "SolverCalculiX")
elif solvertype == "ccxtools":
solver_obj = ObjectsFem.makeSolverCalculixCcxTools(doc, "CalculiXccxTools")
solver_obj.WorkingDir = u""
elif solvertype == "elmer":
solver_obj = ObjectsFem.makeSolverElmer(doc, "SolverElmer")
eq_obj = ObjectsFem.makeEquationElasticity(doc, solver_obj)
eq_obj.EigenAnalysis = True
eq_obj.CalculateStresses = True
eq_obj.DisplaceMesh = False
else:
FreeCAD.Console.PrintWarning(
"Unknown or unsupported solver type: {}. "
"No solver object was created.\n".format(solvertype)
)
if solvertype == "calculix" or solvertype == "ccxtools":
solver_obj.AnalysisType = "frequency"
solver_obj.GeometricalNonlinearity = "linear"
solver_obj.ThermoMechSteadyState = False
solver_obj.MatrixSolverType = "default"
solver_obj.IterationsControlParameterTimeUse = False
solver_obj.EigenmodesCount = 5
solver_obj.EigenmodeHighLimit = 1000000.0
solver_obj.EigenmodeLowLimit = 0.01
analysis.addObject(solver_obj)
# material
material_obj = ObjectsFem.makeMaterialSolid(doc, "MechanicalMaterial")
mat = material_obj.Material
mat["Name"] = "Steel-Generic"
mat["YoungsModulus"] = "100 GPa"
mat["PoissonRatio"] = "0.30"
mat["Density"] = "2330 kg/m^3"
material_obj.Material = mat
analysis.addObject(material_obj)
# constraint fixed
con_fixed = ObjectsFem.makeConstraintFixed(doc, "ConstraintFixed")
con_fixed.References = [
(geom_obj, "Face1"),
(geom_obj, "Face2")
]
analysis.addObject(con_fixed)
# mesh
from .meshes.mesh_eigenvalue_of_elastic_beam_tetra10 import create_nodes
from .meshes.mesh_eigenvalue_of_elastic_beam_tetra10 import create_elements
fem_mesh = Fem.FemMesh()
control = create_nodes(fem_mesh)
if not control:
FreeCAD.Console.PrintError("Error on creating nodes.\n")
control = create_elements(fem_mesh)
if not control:
FreeCAD.Console.PrintError("Error on creating elements.\n")
femmesh_obj = analysis.addObject(ObjectsFem.makeMeshGmsh(doc, get_meshname()))[0]
femmesh_obj.FemMesh = fem_mesh
femmesh_obj.Part = geom_obj
femmesh_obj.SecondOrderLinear = False
femmesh_obj.CharacteristicLengthMax = "40.80 mm"
doc.recompute()
return doc
|
1,553 |
get config
|
from keras_core import backend
from keras_core.api_export import keras_core_export
from keras_core.layers.preprocessing.tf_data_layer import TFDataLayer
from keras_core.utils import backend_utils
@keras_core_export("keras_core.layers.CategoryEncoding")
class CategoryEncoding(TFDataLayer):
"""A preprocessing layer which encodes integer features.
This layer provides options for condensing data into a categorical encoding
when the total number of tokens are known in advance. It accepts integer
values as inputs, and it outputs a dense or sparse representation of those
inputs. For integer inputs where the total number of tokens is not known,
use `keras_core.layers.IntegerLookup` instead.
**Note:** This layer is safe to use inside a `tf.data` pipeline
(independently of which backend you're using).
Examples:
**One-hot encoding data**
>>> layer = keras_core.layers.CategoryEncoding(
... num_tokens=4, output_mode="one_hot")
>>> layer([3, 2, 0, 1])
array([[0., 0., 0., 1.],
[0., 0., 1., 0.],
[1., 0., 0., 0.],
[0., 1., 0., 0.]]>
**Multi-hot encoding data**
>>> layer = keras_core.layers.CategoryEncoding(
... num_tokens=4, output_mode="multi_hot")
>>> layer([[0, 1], [0, 0], [1, 2], [3, 1]])
array([[1., 1., 0., 0.],
[1., 0., 0., 0.],
[0., 1., 1., 0.],
[0., 1., 0., 1.]]>
**Using weighted inputs in `"count"` mode**
>>> layer = keras_core.layers.CategoryEncoding(
... num_tokens=4, output_mode="count")
>>> count_weights = np.array([[.1, .2], [.1, .1], [.2, .3], [.4, .2]])
>>> layer([[0, 1], [0, 0], [1, 2], [3, 1]], count_weights=count_weights)
array([[0.1, 0.2, 0. , 0. ],
[0.2, 0. , 0. , 0. ],
[0. , 0.2, 0.3, 0. ],
[0. , 0.2, 0. , 0.4]]>
Args:
num_tokens: The total number of tokens the layer should support. All
inputs to the layer must integers in the range `0 <= value <
num_tokens`, or an error will be thrown.
output_mode: Specification for the output of the layer.
Values can be `"one_hot"`, `"multi_hot"` or `"count"`,
configuring the layer as follows:
- `"one_hot"`: Encodes each individual element in the input
into an array of `num_tokens` size, containing a 1 at the
element index. If the last dimension is size 1, will encode
on that dimension. If the last dimension is not size 1,
will append a new dimension for the encoded output.
- `"multi_hot"`: Encodes each sample in the input into a single
array of `num_tokens` size, containing a 1 for each
vocabulary term present in the sample. Treats the last
dimension as the sample dimension, if input shape is
`(..., sample_length)`, output shape will be
`(..., num_tokens)`.
- `"count"`: Like `"multi_hot"`, but the int array contains a
count of the number of times the token at that index
appeared in the sample.
For all output modes, currently only output up to rank 2 is
supported.
Defaults to `"multi_hot"`.
Call arguments:
inputs: A 1D or 2D tensor of integer inputs.
count_weights: A tensor in the same shape as `inputs` indicating the
weight for each sample value when summing up in `count` mode.
Not used in `"multi_hot"` or `"one_hot"` modes.
"""
def __init__(self, num_tokens=None, output_mode="multi_hot", **kwargs):
super().__init__(**kwargs)
# Support deprecated names for output_modes.
if output_mode == "binary":
output_mode = "multi_hot"
# 'output_mode' must be one of ("count", "one_hot", "multi_hot")
if output_mode not in ("count", "one_hot", "multi_hot"):
raise ValueError(f"Unknown arg for output_mode: {output_mode}")
if num_tokens is None:
raise ValueError(
"num_tokens must be set to use this layer. If the "
"number of tokens is not known beforehand, use the "
"IntegerLookup layer instead."
)
if num_tokens < 1:
raise ValueError(
f"`num_tokens` must be >= 1. Received: num_tokens={num_tokens}."
)
self.num_tokens = num_tokens
self.output_mode = output_mode
self._allow_non_tensor_positional_args = True
self._convert_input_args = False
def _count(self, inputs, axis=-1):
reduction_axis = 1 if len(inputs.shape) > 1 else 0
outputs = self.backend.numpy.sum(
self.backend.nn.one_hot(
inputs, self.num_tokens, axis=axis, dtype=self.dtype
),
axis=reduction_axis,
)
return outputs
def _encode(self, inputs):
if self.output_mode == "multi_hot":
outputs = self.backend.nn.multi_hot(
inputs, self.num_tokens, dtype=self.dtype
)
elif self.output_mode == "one_hot":
outputs = self.backend.nn.one_hot(
inputs, self.num_tokens, dtype=self.dtype
)
elif self.output_mode == "count":
outputs = self._count(inputs)
return outputs
def compute_output_shape(self, input_shape):
return tuple(input_shape + (self.num_tokens,))
def METHOD_NAME(self):
config = {
"num_tokens": self.num_tokens,
"output_mode": self.output_mode,
}
base_config = super().METHOD_NAME()
return {**base_config, **config}
def call(self, inputs):
outputs = self._encode(inputs)
if (
self.backend._backend != "tensorflow"
and not backend_utils.in_tf_graph()
):
outputs = backend.convert_to_tensor(outputs)
return outputs
|
1,554 |
output logical id
|
"""
Date type classes for companion stacks
"""
import posixpath
import re
from typing import Optional
from samcli.lib.utils.hash import str_checksum
class CompanionStack:
"""
Abstraction class for the companion stack
Companion stack name will be generated by this class.
"""
_parent_stack_name: str
_escaped_parent_stack_name: str
_parent_stack_hash: str
_stack_name: str
def __init__(self, parent_stack_name: str) -> None:
self._parent_stack_name = parent_stack_name
self._escaped_parent_stack_name = re.sub(r"[^a-z0-9]", "", self._parent_stack_name.lower())
self._parent_stack_hash = str_checksum(self._parent_stack_name)
# There is max 128 characters limit on the length of stack name.
# Using MD5 to avoid collision after truncating
# 104 + 1 + 8 + 15 = 128 max char
self._stack_name = f"{self._parent_stack_name[:104]}-{self._parent_stack_hash[:8]}-CompanionStack"
@property
def parent_stack_name(self) -> str:
"""
Parent stack name
"""
return self._parent_stack_name
@property
def escaped_parent_stack_name(self) -> str:
"""
Parent stack name with only alphanumeric characters
"""
return self._escaped_parent_stack_name
@property
def parent_stack_hash(self) -> str:
"""
MD5 hash of parent stack name
"""
return self._parent_stack_hash
@property
def stack_name(self) -> str:
"""
Companion stack stack name
"""
return self._stack_name
class ECRRepo:
"""
Abstraction class for ECR repos in companion stacks
Logical ID, Physical ID, and Repo URI will be generated with this class.
"""
_function_full_path: Optional[str]
_escaped_function_logical_id: Optional[str]
_function_md5: Optional[str]
_companion_stack: Optional[CompanionStack]
_logical_id: Optional[str]
_physical_id: Optional[str]
_output_logical_id: Optional[str]
def __init__(
self,
companion_stack: Optional[CompanionStack] = None,
function_full_path: Optional[str] = None,
logical_id: Optional[str] = None,
physical_id: Optional[str] = None,
METHOD_NAME: Optional[str] = None,
):
"""
Must be specified either with
companion_stack and function_full_path
or
logical_id, physical_id, and output_logical_id
"""
self._function_full_path = (
function_full_path.replace(posixpath.sep, "") if function_full_path else function_full_path
)
self._escaped_function_logical_id = (
re.sub(r"[^a-z0-9]", "", self._function_full_path.lower()) if self._function_full_path is not None else None
)
self._function_md5 = str_checksum(self._function_full_path) if self._function_full_path is not None else None
self._companion_stack = companion_stack
self._logical_id = logical_id
self._physical_id = physical_id
self._output_logical_id = METHOD_NAME
@property
def logical_id(self) -> Optional[str]:
if self._logical_id is None and self._function_full_path and self._function_md5:
# MD5 is used to avoid two having the same escaped name with different Lambda Functions
# For example: Helloworld and HELLO-WORLD
# 52 + 8 + 4 = 64 max char
self._logical_id = self._function_full_path[:52] + self._function_md5[:8] + "Repo"
return self._logical_id
@property
def physical_id(self) -> Optional[str]:
if (
self._physical_id is None
and self._companion_stack
and self._function_md5
and self._escaped_function_logical_id
):
# The physical ID is constructed with escaped_stack_name + stack_md5[:8] as prefix/path and
# followed by escaped_lambda_logical_id + function_md5[:8] + "repo" to show
# the linkage between the function and the repo
# 128 + 8 + 1 + 64 + 8 + 4 = 213 max char
self._physical_id = (
self._companion_stack.escaped_parent_stack_name
+ self._companion_stack.parent_stack_hash[:8]
+ "/"
+ self._escaped_function_logical_id
+ self._function_md5[:8]
+ "repo"
)
return self._physical_id
@property
def METHOD_NAME(self) -> Optional[str]:
if self._output_logical_id is None and self._function_full_path and self._function_md5:
self._output_logical_id = self._function_full_path[:52] + self._function_md5[:8] + "Out"
return self._output_logical_id
@staticmethod
def get_domain(region: str) -> str:
# https://docs.amazonaws.cn/en_us/aws/latest/userguide/endpoints-Beijing.html
if region.startswith("cn-"):
return "amazonaws.com.cn"
return "amazonaws.com"
def get_repo_uri(self, account_id, region) -> str:
domain = ECRRepo.get_domain(region)
return f"{account_id}.dkr.ecr.{region}.{domain}/{self.physical_id}"
|
1,555 |
make message
|
# Based on https://github.com/ziglang/zig-pypi/blob/de14cf728fa35c014821f62a4fa9abd9f4bb560e/make_wheels.py
# MIT
from __future__ import annotations
import os
import sys
from email.message import EmailMessage
from pathlib import Path
from typing import Sequence
from zipfile import ZIP_DEFLATED, ZipInfo
from wheel.wheelfile import WheelFile
def METHOD_NAME(headers, payload=None):
msg = EmailMessage()
for name, value in headers.items():
if name == "_dependencies":
for dep in value:
if isinstance(dep, ExtraRequires):
msg["Provides-Extra"] = dep.name
for inner_dep in dep.deps:
msg["Requires-Dist"] = f"{inner_dep}; extra == '{dep.name}'"
else:
msg["Requires-Dist"] = dep
elif isinstance(value, list):
for value_part in value:
msg[name] = value_part
else:
msg[name] = value
if payload:
msg.set_payload(payload)
return msg
def write_wheel_file(filename, contents):
with WheelFile(filename, "w") as wheel:
for member_info, member_source in contents.items():
if not isinstance(member_info, ZipInfo):
member_info = ZipInfo(member_info)
member_info.external_attr = 0o644 << 16
member_info.file_size = len(member_source)
member_info.compress_type = ZIP_DEFLATED
wheel.writestr(member_info, bytes(member_source))
return filename
def write_wheel(
wheel_path,
*,
name,
version,
tag,
metadata,
description,
contents,
entrypoints: list[str] | None = None,
top_level: list[str] | None = None,
):
dist_info = f"{name}-{version}.dist-info"
extra = {}
if entrypoints:
entrypoints_joined = "\n".join(entrypoints)
text = f"[console_scripts]\n{entrypoints_joined}"
file = f"{dist_info}/entry_points.txt"
extra[file] = text.encode("utf8")
if top_level:
top_level_joined = "\n".join(top_level) + "\n"
file = f"{dist_info}/top_level.txt"
extra[file] = top_level_joined.encode("utf8")
return write_wheel_file(
wheel_path,
{
**contents,
**extra,
f"{dist_info}/METADATA": METHOD_NAME(
{
"Metadata-Version": "2.1",
"Name": name,
"Version": version,
**metadata,
},
description,
),
f"{dist_info}/WHEEL": METHOD_NAME(
{
"Wheel-Version": "1.0",
"Generator": "anki write_wheel.py",
"Root-Is-Purelib": "false",
"Tag": tag,
}
),
},
)
def merge_sources(contents, root, exclude):
root = Path(root)
for path in root.glob("**/*"):
if path.is_dir() or exclude(path):
continue
path_str = str(path.relative_to(root.parent))
if path_str.endswith(".pyc"):
continue
contents[path_str] = path.read_bytes()
def split_wheel_path(path: str):
path2 = Path(path)
components = path2.stem.split("-", maxsplit=2)
return components
class ExtraRequires:
def __init__(self, name, deps):
self.name = name
self.deps = deps
src_root = sys.argv[1]
generated_root = sys.argv[2]
wheel_path = sys.argv[3]
name, version, tag = split_wheel_path(wheel_path)
def exclude_aqt(path: Path) -> bool:
if path.suffix in [".ui", ".scss", ".map", ".ts"]:
return True
if path.name.startswith("tsconfig"):
return True
if "/aqt/data" in str(path):
return True
return False
def exclude_nothing(path: Path) -> bool:
return False
def extract_requirements(path: Path) -> list[str]:
return path.read_text().splitlines()
if name == "aqt":
exclude = exclude_aqt
else:
exclude = exclude_nothing
contents: dict[str, str] = {}
merge_sources(contents, src_root, exclude)
merge_sources(contents, generated_root, exclude)
all_requires: Sequence[str | ExtraRequires]
if name == "anki":
all_requires = extract_requirements(Path("python/requirements.anki.in"))
entrypoints = None
top_level = None
else:
all_requires = extract_requirements(Path("python/requirements.aqt.in")) + [
"anki==" + version,
ExtraRequires(
"qt5",
[
"pyqt5>=5.14",
"pyqtwebengine",
],
),
ExtraRequires(
"qt6",
[
"pyqt6>=6.2",
"pyqt6-webengine>=6.2",
],
),
]
entrypoints = ["anki = aqt:run"]
top_level = ["aqt", "_aqt"]
# reproducible builds
os.environ["SOURCE_DATE_EPOCH"] = "0"
write_wheel(
wheel_path,
name=name,
version=version,
tag=tag,
metadata={
"License": "AGPL-3",
"Classifier": [
"License :: OSI Approved :: GNU Affero General Public License v3 or later (AGPLv3+)",
],
"Requires-Python": ">=3.9",
"_dependencies": all_requires,
},
description="Please see https://apps.ankiweb.net\n\n",
contents=contents,
entrypoints=entrypoints,
top_level=top_level,
)
|
1,556 |
get app engine service iam policy output
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import copy
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
__all__ = [
'GetAppEngineServiceIamPolicyResult',
'AwaitableGetAppEngineServiceIamPolicyResult',
'get_app_engine_service_iam_policy',
'get_app_engine_service_iam_policy_output',
]
@pulumi.output_type
class GetAppEngineServiceIamPolicyResult:
"""
A collection of values returned by getAppEngineServiceIamPolicy.
"""
def __init__(__self__, app_id=None, etag=None, id=None, policy_data=None, project=None, service=None):
if app_id and not isinstance(app_id, str):
raise TypeError("Expected argument 'app_id' to be a str")
pulumi.set(__self__, "app_id", app_id)
if etag and not isinstance(etag, str):
raise TypeError("Expected argument 'etag' to be a str")
pulumi.set(__self__, "etag", etag)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if policy_data and not isinstance(policy_data, str):
raise TypeError("Expected argument 'policy_data' to be a str")
pulumi.set(__self__, "policy_data", policy_data)
if project and not isinstance(project, str):
raise TypeError("Expected argument 'project' to be a str")
pulumi.set(__self__, "project", project)
if service and not isinstance(service, str):
raise TypeError("Expected argument 'service' to be a str")
pulumi.set(__self__, "service", service)
@property
@pulumi.getter(name="appId")
def app_id(self) -> str:
return pulumi.get(self, "app_id")
@property
@pulumi.getter
def etag(self) -> str:
"""
(Computed) The etag of the IAM policy.
"""
return pulumi.get(self, "etag")
@property
@pulumi.getter
def id(self) -> str:
"""
The provider-assigned unique ID for this managed resource.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter(name="policyData")
def policy_data(self) -> str:
"""
(Required only by `iap.AppEngineServiceIamPolicy`) The policy data generated by
a `organizations_get_iam_policy` data source.
"""
return pulumi.get(self, "policy_data")
@property
@pulumi.getter
def project(self) -> str:
return pulumi.get(self, "project")
@property
@pulumi.getter
def service(self) -> str:
return pulumi.get(self, "service")
class AwaitableGetAppEngineServiceIamPolicyResult(GetAppEngineServiceIamPolicyResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetAppEngineServiceIamPolicyResult(
app_id=self.app_id,
etag=self.etag,
id=self.id,
policy_data=self.policy_data,
project=self.project,
service=self.service)
def get_app_engine_service_iam_policy(app_id: Optional[str] = None,
project: Optional[str] = None,
service: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetAppEngineServiceIamPolicyResult:
"""
Retrieves the current IAM policy data for appengineservice
## example
```python
import pulumi
import pulumi_gcp as gcp
policy = gcp.iap.get_app_engine_service_iam_policy(app_id=google_app_engine_standard_app_version["version"]["project"],
project=google_app_engine_standard_app_version["version"]["project"],
service=google_app_engine_standard_app_version["version"]["service"])
```
:param str app_id: Id of the App Engine application. Used to find the parent resource to bind the IAM policy to
:param str project: The ID of the project in which the resource belongs.
If it is not provided, the project will be parsed from the identifier of the parent resource. If no project is provided in the parent identifier and no project is specified, the provider project is used.
:param str service: Service id of the App Engine application Used to find the parent resource to bind the IAM policy to
"""
__args__ = dict()
__args__['appId'] = app_id
__args__['project'] = project
__args__['service'] = service
opts = pulumi.InvokeOptions.merge(_utilities.get_invoke_opts_defaults(), opts)
__ret__ = pulumi.runtime.invoke('gcp:iap/getAppEngineServiceIamPolicy:getAppEngineServiceIamPolicy', __args__, opts=opts, typ=GetAppEngineServiceIamPolicyResult).value
return AwaitableGetAppEngineServiceIamPolicyResult(
app_id=pulumi.get(__ret__, 'app_id'),
etag=pulumi.get(__ret__, 'etag'),
id=pulumi.get(__ret__, 'id'),
policy_data=pulumi.get(__ret__, 'policy_data'),
project=pulumi.get(__ret__, 'project'),
service=pulumi.get(__ret__, 'service'))
@_utilities.lift_output_func(get_app_engine_service_iam_policy)
def METHOD_NAME(app_id: Optional[pulumi.Input[str]] = None,
project: Optional[pulumi.Input[Optional[str]]] = None,
service: Optional[pulumi.Input[str]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetAppEngineServiceIamPolicyResult]:
"""
Retrieves the current IAM policy data for appengineservice
## example
```python
import pulumi
import pulumi_gcp as gcp
policy = gcp.iap.get_app_engine_service_iam_policy(app_id=google_app_engine_standard_app_version["version"]["project"],
project=google_app_engine_standard_app_version["version"]["project"],
service=google_app_engine_standard_app_version["version"]["service"])
```
:param str app_id: Id of the App Engine application. Used to find the parent resource to bind the IAM policy to
:param str project: The ID of the project in which the resource belongs.
If it is not provided, the project will be parsed from the identifier of the parent resource. If no project is provided in the parent identifier and no project is specified, the provider project is used.
:param str service: Service id of the App Engine application Used to find the parent resource to bind the IAM policy to
"""
...
|
1,557 |
test nonstationary mutator
|
import json
from pathlib import Path
import pytest
from nni.common.framework import get_default_framework, set_default_framework
from nni.nas.space import StationaryMutator, Mutator, MutationSampler, GraphModelSpace, ModelStatus, MutatorSequence
from nni.nas.space.mutator import _RandomSampler
from nni.nas.space.graph_op import Operation
@pytest.fixture(autouse=True, scope='module')
def default_framework():
original_framework = get_default_framework()
set_default_framework('tensorflow')
yield
set_default_framework(original_framework)
@pytest.fixture(autouse=True)
def max_pool():
yield Operation.new('MaxPool2D', {'pool_size': 2})
@pytest.fixture(autouse=True)
def avg_pool():
yield Operation.new('AveragePooling2D', {'pool_size': 2})
@pytest.fixture(autouse=True)
def global_pool():
yield Operation.new('GlobalAveragePooling2D')
class DebugSampler(MutationSampler):
def __init__(self):
self.iteration = 0
def choice(self, candidates, mutator, model, index):
idx = (self.iteration + index) % len(candidates)
return candidates[idx]
def mutation_start(self, mutator, model):
self.iteration += 1
class DebugMutator(Mutator):
def __init__(self, ops, label):
super().__init__(label=label)
self.ops = ops
def mutate(self, model):
pool1 = model.graphs['stem'].get_node_by_name('pool1')
op = self.choice(self.ops)
pool1.update_operation(op)
pool2 = model.graphs['stem'].get_node_by_name('pool2')
if op == self.ops[0]:
pool2.update_operation(self.ops[0])
else:
pool2.update_operation(self.choice(self.ops))
class StationaryDebugMutator(StationaryMutator):
def __init__(self, ops, label):
super().__init__(label=label)
self.ops = ops
def mutate(self, model):
pool1 = model.graphs['stem'].get_node_by_name('pool1')
pool1.update_operation(self.choice(self.ops))
pool2 = model.graphs['stem'].get_node_by_name('pool2')
pool2.update_operation(self.choice(self.ops))
@pytest.fixture
def mutator(max_pool, avg_pool, global_pool):
sampler = DebugSampler()
mutator = StationaryDebugMutator(ops=[max_pool, avg_pool, global_pool], label='debug')
mutator.bind_sampler(sampler)
sampler.iteration = 0
return mutator
@pytest.fixture
def mutator1(max_pool, avg_pool, global_pool):
sampler = DebugSampler()
mutator = DebugMutator(ops=[max_pool, avg_pool, global_pool], label='debug')
mutator.bind_sampler(sampler)
sampler.iteration = 0
return mutator
@pytest.fixture
def model0():
json_path = Path(__file__).parent / 'mnist_tensorflow.json'
ir = json.load(json_path.open())
return GraphModelSpace._load(_internal=True, **ir)
def test_dry_run(model0, mutator, max_pool, avg_pool, global_pool):
assert model0.status == ModelStatus.Initialized
candidates, model1 = mutator.dry_run(model0)
assert model0.status == ModelStatus.Initialized
assert model1.status == ModelStatus.Mutating
assert len(candidates) == 2
assert candidates['debug/0'].values == [max_pool, avg_pool, global_pool]
assert candidates['debug/1'].values == [max_pool, avg_pool, global_pool]
def test_mutation(model0, mutator, max_pool, avg_pool, global_pool):
model1 = mutator.apply(model0)
assert _get_pools(model1) == (avg_pool, global_pool)
model2 = mutator.apply(model1)
assert _get_pools(model2) == (global_pool, max_pool)
assert len(model2.history) == 2
assert model2.history[0].from_ == model0
assert model2.history[0].to == model1
assert model2.history[1].from_ == model1
assert model2.history[1].to == model2
assert model2.history[0].mutator == mutator
assert model2.history[1].mutator == mutator
assert _get_pools(model0) == (max_pool, max_pool)
assert _get_pools(model1) == (avg_pool, global_pool)
def test_mutator_sequence(model0, mutator, max_pool, avg_pool):
mutators = MutatorSequence([mutator])
with pytest.raises(AssertionError, match='bound to a model'):
mutators.simplify()
with mutators.bind_model(model0):
assert list(mutators.simplify().keys()) == ['debug/0', 'debug/1']
with mutators.bind_model(model0):
model1 = mutators.freeze({'debug/0': avg_pool, 'debug/1': max_pool})
assert model1.status == ModelStatus.Mutating
assert len(model1.history) == 1
assert _get_pools(model1) == (avg_pool, max_pool)
def test_simplify_and_random(model0, mutator, max_pool, avg_pool, global_pool):
model0.mutators = MutatorSequence([mutator])
assert list(model0.simplify().keys()) == ['debug/0', 'debug/1']
mutator.sampler = None
model1 = model0.random()
assert model1.status == ModelStatus.Frozen
assert list(model1.sample.keys()) == ['debug/0', 'debug/1']
assert model1.sample['debug/0'] in [max_pool, avg_pool, global_pool]
assert model1.sample['debug/1'] in [max_pool, avg_pool, global_pool]
def METHOD_NAME(model0, mutator1, max_pool, avg_pool, global_pool):
model = model0
for _ in range(10):
model = mutator1.apply(model)
pools = _get_pools(model)
if pools[0] == max_pool:
assert pools[1] == max_pool
else:
assert pools[0] in [avg_pool, global_pool]
assert pools[1] in [max_pool, avg_pool, global_pool]
def test_nonstationary_mutator_simplify(model0, mutator1, max_pool, avg_pool, global_pool):
model0.mutators = MutatorSequence([mutator1])
assert model0.simplify() == {'debug': mutator1}
mutator1.sampler = None
model1 = model0.random()
assert model1.status == ModelStatus.Frozen
assert isinstance(model1.sample['debug'], _RandomSampler)
pools = _get_pools(model1)
assert pools[0] in [max_pool, avg_pool, global_pool]
assert pools[1] in [max_pool, avg_pool, global_pool]
def _get_pools(model):
pool1 = model.graphs['stem'].get_node_by_name('pool1').operation
pool2 = model.graphs['stem'].get_node_by_name('pool2').operation
return pool1, pool2
|
1,558 |
test 3 tensorflow mnist targeted
|
# MIT License
#
# Copyright (C) The Adversarial Robustness Toolbox (ART) Authors 2020
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
# documentation files (the "Software"), to deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all copies or substantial portions of the
# Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""
This module tests the Pixel Attack.
The Pixel Attack is a generalisation of One Pixel Attack.
| One Pixel Attack Paper link:
https://ieeexplore.ieee.org/abstract/document/8601309/citations#citations
(arXiv link: https://arxiv.org/pdf/1710.08864.pdf)
| Pixel Attack Paper link:
https://arxiv.org/abs/1906.06026
"""
from __future__ import absolute_import, division, print_function, unicode_literals
import logging
import unittest
import numpy as np
from art.attacks.evasion.pixel_threshold import PixelAttack
from art.estimators.estimator import BaseEstimator, NeuralNetworkMixin
from art.estimators.classification.classifier import ClassifierMixin
from tests.utils import TestBase
from tests.utils import get_image_classifier_tf, get_image_classifier_pt # , get_image_classifier_kr
from tests.attacks.utils import backend_test_classifier_type_check_fail
logger = logging.getLogger(__name__)
class TestPixelAttack(TestBase):
"""
A unittest class for testing the Pixel Attack.
This module tests the Pixel Attack.
The Pixel Attack is a generalisation of One Pixel Attack.
| One Pixel Attack Paper link:
https://ieeexplore.ieee.org/abstract/document/8601309/citations#citations
(arXiv link: https://arxiv.org/pdf/1710.08864.pdf)
| Pixel Attack Paper link:
https://arxiv.org/abs/1906.06026
"""
@classmethod
def setUpClass(cls):
super().setUpClass()
cls.n_test = 2
cls.x_test_mnist = cls.x_test_mnist[0 : cls.n_test]
cls.y_test_mnist = cls.y_test_mnist[0 : cls.n_test]
# def test_6_keras_mnist(self):
# """
# Test with the KerasClassifier. (Untargeted Attack)
# :return:
# """
#
# classifier = get_image_classifier_kr()
# self._test_attack(classifier, self.x_test_mnist, self.y_test_mnist, False)
# def test_2_tensorflow_mnist(self):
# """
# Test with the TensorFlowClassifier. (Untargeted Attack)
# :return:
# """
# classifier, sess = get_image_classifier_tf()
# self._test_attack(classifier, self.x_test_mnist, self.y_test_mnist, False)
def test_4_pytorch_mnist(self):
"""
Test with the PyTorchClassifier. (Untargeted Attack)
:return:
"""
x_test = np.reshape(self.x_test_mnist, (self.x_test_mnist.shape[0], 1, 28, 28)).astype(np.float32)
classifier = get_image_classifier_pt()
self._test_attack(classifier, x_test, self.y_test_mnist, False)
def test_8_pytorch_mnist_single_sample(self):
"""
Test with the PyTorchClassifier on a single sample. (Untargeted Attack)
:return:
"""
x_test = np.reshape(self.x_test_mnist[1], (1, 1, 28, 28)).astype(np.float32)
classifier = get_image_classifier_pt()
self._test_attack(classifier, x_test, self.y_test_mnist[[1]], False)
# def test_7_keras_mnist_targeted(self):
# """
# Test with the KerasClassifier. (Targeted Attack)
# :return:
# """
# classifier = get_image_classifier_kr()
# self._test_attack(classifier, self.x_test_mnist, self.y_test_mnist, True)
def METHOD_NAME(self):
"""
Test with the TensorFlowClassifier. (Targeted Attack)
:return:
"""
classifier, sess = get_image_classifier_tf()
self._test_attack(classifier, self.x_test_mnist, self.y_test_mnist, True)
# def test_5_pytorch_mnist_targeted(self):
# """
# Test with the PyTorchClassifier. (Targeted Attack)
# :return:
# """
# x_test = np.reshape(self.x_test_mnist, (self.x_test_mnist.shape[0], 1, 28, 28)).astype(np.float32)
# classifier = get_image_classifier_pt()
# self._test_attack(classifier, x_test, self.y_test_mnist, True)
def _test_attack(self, classifier, x_test, y_test, targeted):
"""
Test with the Pixel Attack
:return:
"""
x_test_original = x_test.copy()
if targeted:
# Generate random target classes
class_y_test = np.argmax(y_test, axis=1)
nb_classes = np.unique(class_y_test).shape[0]
targets = np.random.randint(nb_classes, size=self.n_test)
for i in range(self.n_test):
if class_y_test[i] == targets[i]:
targets[i] -= 1
else:
targets = y_test
for th in [None, 128]:
for es in [0, 1]:
df = PixelAttack(classifier, th=th, es=es, max_iter=20, targeted=targeted, verbose=False)
x_test_adv = df.generate(x_test_original, targets)
np.testing.assert_raises(AssertionError, np.testing.assert_array_equal, x_test, x_test_adv)
self.assertFalse((0.0 == x_test_adv).all())
# Check that x_test has not been modified by attack and classifier
self.assertAlmostEqual(float(np.max(np.abs(x_test_original - x_test))), 0.0, delta=0.00001)
def test_check_params(self):
ptc = get_image_classifier_pt(from_logits=True)
with self.assertRaises(ValueError):
_ = PixelAttack(ptc, th=-1)
with self.assertRaises(ValueError):
_ = PixelAttack(ptc, es=1.0)
with self.assertRaises(ValueError):
_ = PixelAttack(ptc, targeted="true")
with self.assertRaises(ValueError):
_ = PixelAttack(ptc, verbose="true")
with self.assertRaises(ValueError):
ptc._clip_values = None
_ = PixelAttack(ptc)
def test_1_classifier_type_check_fail(self):
backend_test_classifier_type_check_fail(PixelAttack, [BaseEstimator, NeuralNetworkMixin, ClassifierMixin])
if __name__ == "__main__":
unittest.main()
|
1,559 |
reddy chatterji register
|
import logging
import numpy as np
from skimage.filters import difference_of_gaussians, window
from skimage.transform import rotate, warp_polar
from aspire.numeric import fft
from aspire.utils.coor_trans import grid_2d
logger = logging.getLogger(__name__)
__cache = dict()
def _phase_cross_correlation(img0, img1):
"""
# Adapted from skimage.registration.phase_cross_correlation
:param img0: Fixed image.
:param img1: Translated image.
:returns: (cross-correlation magnitudes (2D array), shifts)
"""
# Cache img0 transform, this saves n_classes*(n_nbor-1) transforms
# Note we use the `id` because ndarray are unhashable
key = id(img0)
if key not in __cache:
__cache[key] = fft.fft2(img0)
src_f = __cache[key]
target_f = fft.fft2(img1)
# Whole-pixel shifts - Compute cross-correlation by an IFFT
shape = src_f.shape
image_product = src_f * target_f.conj()
cross_correlation = fft.ifft2(image_product)
# Locate maximum
maxima = np.unravel_index(
np.argmax(np.abs(cross_correlation)), cross_correlation.shape
)
midpoints = np.array([np.fix(axis_size / 2) for axis_size in shape])
shifts = np.array(maxima, dtype=np.float64)
shifts[shifts > midpoints] -= np.array(shape)[shifts > midpoints]
return np.abs(cross_correlation), shifts
def METHOD_NAME(
images, reflection, mask=None, do_cross_corr_translations=True, dtype=None
):
"""
Compute the Reddy Chatterji method registering images[1:] to images[0].
This differs from papers and published scikit implimentations by
computing the fixed base image[0] pipeline once then reusing.
:param images: Image data (m_img, L, L)
:param reflection: Image reflections (m_img,)
:param mask: Support of image. Defaults to disk with radius images.shape[-1]//2.
:param do_cross_corr_translations: Solve trnaslations by using cross correlation (log polar) method.
:param dtype: Specify dtype. Defaults to infer from images.dtype
:returns: (rotations, shifts, correlations) corresponding to `images`
"""
if mask is None:
L = images.shape[-1]
mask = grid_2d(L, normalized=False)["r"] < L // 2
if dtype is None:
dtype = images.dtype
# Result arrays
M = len(images)
rotations = np.zeros(M, dtype=dtype)
correlations = np.full(M, -np.inf, dtype=dtype)
shifts = np.zeros((M, 2), dtype=int)
# De-Mean
images = images - images.mean(axis=(-1, -2))[:, np.newaxis, np.newaxis]
# Precompute fixed_img data used repeatedly in the loop below.
fixed_img = images[0]
# Difference of Gaussians (Band Filter)
fixed_img_dog = difference_of_gaussians(fixed_img, 1, 4)
# Window Images (Fix spectral boundary)
wfixed_img = fixed_img_dog * window("hann", fixed_img.shape)
# Transform image to Fourier space
fixed_img_fs = np.abs(fft.fftshift(fft.fft2(wfixed_img))) ** 2
# Compute Log Polar Transform
radius = fixed_img_fs.shape[0] // 8 # Low Pass
warped_fixed_img_fs = warp_polar(
fixed_img_fs,
radius=radius,
output_shape=fixed_img_fs.shape,
scaling="log",
)
# Only use half of FFT, because it's symmetrical
warped_fixed_img_fs = warped_fixed_img_fs[: fixed_img_fs.shape[0] // 2, :]
# Now prepare for rotating original images,
# and searching for translations.
# We start back at the raw fixed_img.
twfixed_img = fixed_img * window("hann", fixed_img.shape)
# Register image `m` against images[0]
for m in range(1, len(images)):
# Get the image to register
regis_img = images[m]
# Reflect images when necessary
if reflection[m]:
regis_img = np.flipud(regis_img)
# Difference of Gaussians (Band Filter)
regis_img_dog = difference_of_gaussians(regis_img, 1, 4)
# Window Images (Fix spectral boundary)
wregis_img = regis_img_dog * window("hann", regis_img.shape)
# Transform image to Fourier space
regis_img_fs = np.abs(fft.fftshift(fft.fft2(wregis_img))) ** 2
# Compute Log Polar Transform
warped_regis_img_fs = warp_polar(
regis_img_fs,
radius=radius, # Low Pass
output_shape=fixed_img_fs.shape,
scaling="log",
)
# Only use half of FFT, because it's symmetrical
warped_regis_img_fs = warped_regis_img_fs[: fixed_img_fs.shape[0] // 2, :]
# Compute the Cross_Correlation to estimate rotation
# Note that _phase_cross_correlation uses the mangnitudes (abs()),
# ie it is using both freq and phase information.
cross_correlation, _ = _phase_cross_correlation(
warped_fixed_img_fs, warped_regis_img_fs
)
# Rotating Cartesian space translates the angular log polar component.
# Scaling Cartesian space translates the radial log polar component.
# In common image resgistration problems, both components are used
# to simultaneously estimate scaling and rotation.
# Since we are not currently concerned with scaling transformation,
# disregard the second axis of the `cross_correlation` returned by
# `_phase_cross_correlation`.
cross_correlation_score = cross_correlation[:, 0].ravel()
# Recover the angle from index representing maximal cross_correlation
recovered_angle_degrees = (360 / regis_img_fs.shape[0]) * np.argmax(
cross_correlation_score
)
# The recovered angle represents an estimate of the rotation from reference to image[m].
# The registration angle for image[m],
# the angle to apply to image[m] to register with reference,
# would be the negation of this,
r = -1 * recovered_angle_degrees
# For now, try the hack below, attempting two cases ...
# Some papers mention running entire algos /twice/,
# when admitting reflections, so this hack is not
# the worst you could do :).
# Hack
regis_img_estimated = rotate(regis_img, r)
regis_img_rotated_p180 = rotate(regis_img, r + 180)
da = np.dot(fixed_img[mask], regis_img_estimated[mask])
db = np.dot(fixed_img[mask], regis_img_rotated_p180[mask])
if db > da:
regis_img_estimated = regis_img_rotated_p180
r += 180
# Assign estimated rotations results
rotations[m] = r * np.pi / 180 # Convert to radians
if do_cross_corr_translations:
# Prepare for searching over translations using cross-correlation with the rotated image.
twregis_img = regis_img_estimated * window("hann", regis_img.shape)
cross_correlation, shift = _phase_cross_correlation(
twfixed_img, twregis_img
)
# Compute the shifts as integer number of pixels,
shift_x, shift_y = int(shift[1]), int(shift[0])
# then apply the shifts
regis_img_estimated = np.roll(regis_img_estimated, shift_y, axis=0)
regis_img_estimated = np.roll(regis_img_estimated, shift_x, axis=1)
# Assign estimated shift to results
shifts[m] = shift[::-1].astype(int)
else:
shift = None # For logger line
# Estimated `corr` metric
corr = np.dot(fixed_img[mask], regis_img_estimated[mask])
correlations[m] = corr
# Cleanup some cached stuff for this class
__cache.pop(id(warped_fixed_img_fs), None)
__cache.pop(id(twfixed_img), None)
return rotations, shifts, correlations
|
1,560 |
db resource
|
# ruff: isort: skip_file
from dagster import ResourceDefinition, graph, job
# start_resource_example
from dagster import resource
class ExternalCerealFetcher:
def fetch_new_cereals(self, start_ts, end_ts):
pass
@resource
def cereal_fetcher(init_context):
return ExternalCerealFetcher()
# end_resource_example
# start_op_with_resources_example
from dagster import op
CREATE_TABLE_1_QUERY = "create table_1 as select * from table_0"
@op(required_resource_keys={"database"})
def op_requires_resources(context):
context.resources.database.execute_query(CREATE_TABLE_1_QUERY)
# end_op_with_resources_example
# start_resource_testing
from dagster import resource
@resource
def my_resource(_):
return "foo"
def test_my_resource():
assert my_resource(None) == "foo"
# end_resource_testing
# start_resource_testing_with_context
from dagster import build_init_resource_context, resource
@resource(required_resource_keys={"foo"}, config_schema={"bar": str})
def my_resource_requires_context(init_context):
return init_context.resources.foo, init_context.resource_config["bar"]
def test_my_resource_with_context():
init_context = build_init_resource_context(
resources={"foo": "foo_str"}, config={"bar": "bar_str"}
)
assert my_resource_requires_context(init_context) == ("foo_str", "bar_str")
# end_resource_testing_with_context
# start_test_cm_resource
from contextlib import contextmanager
from dagster import resource
@resource
@contextmanager
def my_cm_resource(_):
yield "foo"
def test_cm_resource():
with my_cm_resource(None) as initialized_resource:
assert initialized_resource == "foo"
# end_test_cm_resource
database_resource = ResourceDefinition.mock_resource()
database_resource_a = ResourceDefinition.mock_resource()
database_resource_b = ResourceDefinition.mock_resource()
# start_job_example
from dagster import job
@job(resource_defs={"database": database_resource})
def do_database_stuff_job():
op_requires_resources()
# end_job_example
# start_graph_example
from dagster import graph
@graph
def do_database_stuff():
op_requires_resources()
do_database_stuff_prod = do_database_stuff.to_job(
resource_defs={"database": database_resource_a}
)
do_database_stuff_dev = do_database_stuff.to_job(
resource_defs={"database": database_resource_b}
)
# end_graph_example
class Client:
def __init__(self, _user, _password):
pass
# start_resource_dep_example
from dagster import resource
@resource
def credentials():
return ("bad_username", "easy_password")
@resource(required_resource_keys={"credentials"})
def client(init_context):
username, password = init_context.resources.credentials
return Client(username, password)
# end_resource_dep_example
# start_resource_dep_op
from dagster import graph, op
@op(required_resource_keys={"client"})
def get_client(context):
return context.resources.client
# end_resource_dep_op
# start_resource_dep_job
@job(resource_defs={"credentials": credentials, "client": client})
def connect():
get_client()
# end_resource_dep_job
# start_resource_config
class DatabaseConnection:
def __init__(self, connection: str):
self.connection = connection
@resource(config_schema={"connection": str})
def METHOD_NAME(init_context):
connection = init_context.resource_config["connection"]
return DatabaseConnection(connection)
# end_resource_config
def get_db_connection():
return "foo"
def cleanup_db_connection(_db_conn):
pass
# start_cm_resource
from contextlib import contextmanager
@resource
@contextmanager
def db_connection():
try:
db_conn = get_db_connection()
yield db_conn
finally:
cleanup_db_connection(db_conn)
# end_cm_resource
# start_cm_resource_op
@op(required_resource_keys={"db_connection"})
def use_db_connection(context):
db_conn = context.resources.db_connection
...
# end_cm_resource_op
@job
def the_job():
...
def get_the_db_connection(_):
...
# start_build_resources_example
from dagster import resource, build_resources
@resource
def the_credentials():
...
@resource(required_resource_keys={"credentials"})
def the_db_connection(init_context):
get_the_db_connection(init_context.resources.credentials)
def uses_db_connection():
with build_resources(
{"db_connection": the_db_connection, "credentials": the_credentials}
) as resources:
conn = resources.db_connection
...
# end_build_resources_example
def do_something_with_resource(_):
pass
# start_asset_use_resource
from dagster import asset
@asset(required_resource_keys={"foo"})
def asset_requires_resource(context):
do_something_with_resource(context.resources.foo)
# end_asset_use_resource
@resource
def foo_resource():
...
# start_asset_provide_resource
from dagster import Definitions
defs = Definitions(
assets=[asset_requires_resource],
resources={"foo": foo_resource},
)
# end_asset_provide_resource
# start_asset_provide_resource_using_repository
from dagster import repository, with_resources
@repository
def repo():
return [
*with_resources(
definitions=[asset_requires_resource],
resource_defs={"foo": foo_resource},
)
]
# end_asset_provide_resource_using_repository
|
1,561 |
test q data stream protocol pack unpack
|
import asyncio
import json
import struct
from contextlib import asynccontextmanager, closing
from socket import socketpair
import pytest
from hypothesis import example, given, settings
from hypothesis import strategies as st
from server.protocol import (
DisconnectedError,
QDataStreamProtocol,
SimpleJsonProtocol
)
@pytest.fixture(scope="session")
def qstream_protocol_context():
@asynccontextmanager
async def make_protocol():
rsock, wsock = socketpair()
with closing(wsock):
reader, writer = await asyncio.open_connection(sock=rsock)
proto = QDataStreamProtocol(reader, writer)
yield proto
await proto.close()
return make_protocol
@pytest.fixture
def socket_pair():
"""A pair of connected sockets."""
rsock, wsock = socketpair()
with closing(wsock):
yield rsock, wsock
@pytest.fixture
async def reader_writer(socket_pair):
"""A connected StreamReader, StreamWriter pair"""
rsock, _ = socket_pair
# Socket closed by socket_pair fixture
return await asyncio.open_connection(sock=rsock)
@pytest.fixture
def reader(reader_writer):
reader, _ = reader_writer
return reader
@pytest.fixture
def writer(reader_writer):
_, writer = reader_writer
return writer
@pytest.fixture
async def qstream_protocol(reader, writer):
proto = QDataStreamProtocol(reader, writer)
yield proto
await proto.close()
@pytest.fixture(params=(QDataStreamProtocol, SimpleJsonProtocol))
async def protocol(request, reader, writer):
proto = request.param(reader, writer)
yield proto
await proto.close()
@pytest.fixture
async def unix_srv():
async def do_nothing(client_reader, client_writer):
with closing(client_writer):
await client_reader.read()
srv = await asyncio.start_unix_server(do_nothing, "/tmp/test.sock")
with closing(srv):
yield srv
await srv.wait_closed()
@pytest.fixture
async def unix_protocol(unix_srv):
(reader, writer) = await asyncio.open_unix_connection("/tmp/test.sock")
proto = QDataStreamProtocol(reader, writer)
yield proto
await proto.close()
def st_messages():
"""Strategy for generating internal message dictionaries"""
return st.dictionaries(
keys=st.text(),
values=st.one_of(
st.integers(),
st.text(),
st.lists(st.one_of(st.integers(), st.text()))
)
)
async def test_types():
with pytest.raises(NotImplementedError):
QDataStreamProtocol.pack_message({"Not": ["a", "string"]})
async def test_QDataStreamProtocol_recv_small_message(qstream_protocol, reader):
data = QDataStreamProtocol.pack_block(b"".join([
QDataStreamProtocol.pack_qstring('{"some_header": true}'),
QDataStreamProtocol.pack_qstring("Goodbye")
]))
reader.feed_data(data)
message = await qstream_protocol.read_message()
assert message == {"some_header": True, "legacy": ["Goodbye"]}
async def test_QDataStreamProtocol_recv_malformed_message(qstream_protocol, reader):
reader.feed_data(b"\0")
reader.feed_eof()
with pytest.raises(asyncio.IncompleteReadError):
await qstream_protocol.read_message()
async def test_QDataStreamProtocol_recv_large_array(qstream_protocol, reader):
reader.feed_data(QDataStreamProtocol.pack_block(b"".join(
[QDataStreamProtocol.pack_qstring('{"some_header": true}')] +
[QDataStreamProtocol.pack_qstring(str(i)) for i in range(1520)])))
reader.feed_eof()
message = await qstream_protocol.read_message()
assert message == {"some_header": True, "legacy": [str(i) for i in range(1520)]}
async def test_QDataStreamProtocol_unpacks_evil_qstring(qstream_protocol, reader):
reader.feed_data(struct.pack("!I", 64))
reader.feed_data(b'\x00\x00\x004\x00{\x00"\x00c\x00o\x00m\x00m\x00a\x00n\x00d\x00"\x00:\x00 \x00"\x00a\x00s\x00k\x00_\x00s\x00e\x00s\x00s\x00i\x00o\x00n\x00"\x00}\xff\xff\xff\xff\xff\xff\xff\xff')
reader.feed_eof()
message = await qstream_protocol.read_message()
assert message == {"command": "ask_session"}
@given(message=st_messages())
@example(message={
"Some": "crazy",
"Message": ["message", 10],
"with": 1000
})
@settings(max_examples=300)
async def METHOD_NAME(
qstream_protocol_context,
message
):
async with qstream_protocol_context() as protocol:
protocol.reader.feed_data(
QDataStreamProtocol.pack_message(json.dumps(message))
)
assert message == await protocol.read_message()
@given(message=st_messages())
@example(message={
"Some": "crazy",
"Message": ["message", 10],
"with": 1000
})
async def test_QDataStreamProtocol_deterministic(message):
assert (
QDataStreamProtocol.encode_message(message) ==
QDataStreamProtocol.encode_message(message) ==
QDataStreamProtocol.encode_message(message)
)
async def test_QDataStreamProtocol_encode_ping_pong():
assert QDataStreamProtocol.encode_message({"command": "ping"}) == \
b"\x00\x00\x00\x0c\x00\x00\x00\x08\x00P\x00I\x00N\x00G"
assert QDataStreamProtocol.encode_message({"command": "pong"}) == \
b"\x00\x00\x00\x0c\x00\x00\x00\x08\x00P\x00O\x00N\x00G"
async def test_send_message_simultaneous_writes(unix_protocol):
msg = {
"command": "test",
"data": "*" * (4096*4)
}
# If drain calls are not synchronized, then this will raise an
# AssertionError from within asyncio
await asyncio.gather(*(unix_protocol.send_message(msg) for i in range(20)))
async def test_send_messages_simultaneous_writes(unix_protocol):
msg = {
"command": "test",
"data": "*" * (4096*4)
}
# If drain calls are not synchronized, then this will raise an
# AssertionError from within asyncio
await asyncio.gather(*(
unix_protocol.send_messages((msg, msg)) for i in range(20))
)
async def test_send_raw_simultaneous_writes(unix_protocol):
msg = b"*" * (4096*4)
# If drain calls are not synchronized, then this will raise an
# AssertionError from within asyncio
await asyncio.gather(*(unix_protocol.send_raw(msg) for i in range(20)))
async def test_send_connected_attribute(unix_protocol, unix_srv):
unix_protocol.reader.set_exception(
RuntimeError("Unit test triggered exception")
)
with pytest.raises(DisconnectedError):
await unix_protocol.send_message({"Hello": "World"})
assert unix_protocol.is_connected() is False
async def test_send_when_disconnected(protocol):
await protocol.close()
assert protocol.is_connected() is False
with pytest.raises(DisconnectedError):
await protocol.send_message({"some": "message"})
with pytest.raises(DisconnectedError):
await protocol.send_messages([
{"some": "message"},
{"some": "other message"}
])
async def test_read_when_disconnected(protocol):
await protocol.close()
assert protocol.is_connected() is False
with pytest.raises(DisconnectedError):
await protocol.read_message()
|
1,562 |
guide
|
# Copyright Contributors to the Pyro project.
# SPDX-License-Identifier: Apache-2.0
r"""
Example: Toy Mixture Model with Discrete Enumeration
====================================================
A toy mixture model to provide a simple example for implementing discrete enumeration::
(A) -> [B] -> (C)
``A`` is an observed Bernoulli variable with Beta prior. ``B`` is a hidden variable which
is a mixture of two Bernoulli distributions (with Beta priors), chosen by ``A`` being true or false.
``C`` is observed, and like ``B``, is a mixture of two Bernoulli distributions (with Beta priors),
chosen by ``B`` being true or false. There is a plate over the three variables for ``num_obs``
independent observations of data.
Because ``B`` is hidden and discrete we wish to marginalize it out of the model. This is done by:
1. marking the model with ``@config_enumerate``
2. marking the ``B`` sample site in the model with ``infer={"enumerate": "parallel"}``
3. passing ``SVI`` the ``TraceEnum_ELBO`` loss function
"""
import argparse
import matplotlib.pyplot as plt
from jax import random
import jax.numpy as jnp
import optax
import numpyro
from numpyro import handlers
from numpyro.contrib.funsor import config_enumerate
import numpyro.distributions as dist
from numpyro.distributions import constraints
from numpyro.infer import SVI, TraceEnum_ELBO
from numpyro.ops.indexing import Vindex
def main(args):
num_obs = args.num_obs
num_steps = args.num_steps
prior, CPDs, data = handlers.seed(generate_data, random.PRNGKey(0))(num_obs)
posterior_params = train(prior, data, num_steps, num_obs)
evaluate(CPDs, posterior_params)
def generate_data(num_obs):
# domain = [False, True]
prior = {
"A": jnp.array([1.0, 10.0]),
"B": jnp.array([[10.0, 1.0], [1.0, 10.0]]),
"C": jnp.array([[10.0, 1.0], [1.0, 10.0]]),
}
CPDs = {
"p_A": numpyro.sample("p_A", dist.Beta(prior["A"][0], prior["A"][1])),
"p_B": numpyro.sample("p_B", dist.Beta(prior["B"][:, 0], prior["B"][:, 1])),
"p_C": numpyro.sample("p_C", dist.Beta(prior["C"][:, 0], prior["C"][:, 1])),
}
data = {"A": numpyro.sample("A", dist.Bernoulli(jnp.ones(num_obs) * CPDs["p_A"]))}
data["B"] = numpyro.sample("B", dist.Bernoulli(CPDs["p_B"][data["A"]]))
data["C"] = numpyro.sample("C", dist.Bernoulli(CPDs["p_C"][data["B"]]))
return prior, CPDs, data
@config_enumerate
def model(prior, obs, num_obs):
p_A = numpyro.sample("p_A", dist.Beta(1, 1))
p_B = numpyro.sample("p_B", dist.Beta(jnp.ones(2), jnp.ones(2)).to_event(1))
p_C = numpyro.sample("p_C", dist.Beta(jnp.ones(2), jnp.ones(2)).to_event(1))
with numpyro.plate("data_plate", num_obs):
A = numpyro.sample("A", dist.Bernoulli(p_A), obs=obs["A"])
# Vindex used to ensure proper indexing into the enumerated sample sites
B = numpyro.sample(
"B",
dist.Bernoulli(Vindex(p_B)[A]),
infer={"enumerate": "parallel"},
)
numpyro.sample("C", dist.Bernoulli(Vindex(p_C)[B]), obs=obs["C"])
def METHOD_NAME(prior, obs, num_obs):
a = numpyro.param("a", prior["A"], constraint=constraints.positive)
numpyro.sample("p_A", dist.Beta(a[0], a[1]))
b = numpyro.param("b", prior["B"], constraint=constraints.positive)
numpyro.sample("p_B", dist.Beta(b[:, 0], b[:, 1]).to_event(1))
c = numpyro.param("c", prior["C"], constraint=constraints.positive)
numpyro.sample("p_C", dist.Beta(c[:, 0], c[:, 1]).to_event(1))
def train(prior, data, num_steps, num_obs):
elbo = TraceEnum_ELBO()
svi = SVI(model, METHOD_NAME, optax.adam(learning_rate=0.01), loss=elbo)
svi_result = svi.run(random.PRNGKey(0), num_steps, prior, data, num_obs)
plt.figure()
plt.plot(svi_result.losses)
plt.show()
posterior_params = svi_result.params.copy()
posterior_params["a"] = posterior_params["a"][
None, :
] # reshape to same as other variables
return posterior_params
def evaluate(CPDs, posterior_params):
true_p_A, pred_p_A = get_true_pred_CPDs(CPDs["p_A"], posterior_params["a"])
true_p_B, pred_p_B = get_true_pred_CPDs(CPDs["p_B"], posterior_params["b"])
true_p_C, pred_p_C = get_true_pred_CPDs(CPDs["p_C"], posterior_params["c"])
print("\np_A = True")
print("actual: ", true_p_A)
print("predicted:", pred_p_A)
print("\np_B = True | A = False/True")
print("actual: ", true_p_B)
print("predicted:", pred_p_B)
print("\np_C = True | B = False/True")
print("actual: ", true_p_C)
print("predicted:", pred_p_C)
def get_true_pred_CPDs(CPD, posterior_param):
true_p = CPD
pred_p = posterior_param[:, 0] / jnp.sum(posterior_param, axis=1)
return true_p, pred_p
if __name__ == "__main__":
assert numpyro.__version__.startswith("0.13.0")
parser = argparse.ArgumentParser(description="Toy mixture model")
parser.add_argument("-n", "--num-steps", default=4000, type=int)
parser.add_argument("-o", "--num-obs", default=10000, type=int)
args = parser.parse_args()
main(args)
|
1,563 |
get input output texts
|
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
# Copyright 2020 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
import unittest
from paddlenlp.transformers.xlm.tokenizer import XLMTokenizer
from tests.testing_utils import slow
from ..test_tokenizer_common import TokenizerTesterMixin
class XLMTokenizationTest(TokenizerTesterMixin, unittest.TestCase):
tokenizer_class = XLMTokenizer
test_fast_tokenizer = False
test_offsets = False
def setUp(self):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
vocab = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"w</w>",
"r</w>",
"t</w>",
"lo",
"low",
"er</w>",
"low</w>",
"lowest</w>",
"newer</w>",
"wider</w>",
"<unk>",
]
vocab_tokens = dict(zip(vocab, range(len(vocab))))
merges = ["l o 123", "lo w 1456", "e r</w> 1789", ""]
self.vocab_file = os.path.join(self.tmpdirname, XLMTokenizer.resource_files_names["vocab_file"])
self.merges_file = os.path.join(self.tmpdirname, XLMTokenizer.resource_files_names["merges_file"])
with open(self.vocab_file, "w") as fp:
fp.write(json.dumps(vocab_tokens))
with open(self.merges_file, "w") as fp:
fp.write("\n".join(merges))
def METHOD_NAME(self, tokenizer):
input_text = "lower newer"
output_text = "lower newer"
return input_text, output_text
def test_get_vocab(self):
vocab_keys = list(self.get_tokenizer().get_vocab().keys())
self.assertEqual(vocab_keys[0], "l")
self.assertEqual(vocab_keys[-1], "<special9>")
self.assertEqual(len(vocab_keys), 34)
def test_add_tokens_tokenizer(self):
tokenizers = self.get_tokenizers(do_lower_case=False)
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}"):
vocab_size = tokenizer.vocab_size
all_size = len(tokenizer.get_vocab())
self.assertNotEqual(vocab_size, 0)
# We usually have added tokens from the start in tests because our vocab fixtures are
# smaller than the original vocabs - let's not assert this
# self.assertEqual(vocab_size, all_size)
new_toks = ["aaaaa bbbbbb", "cccccccccdddddddd"]
added_toks = tokenizer.add_tokens(new_toks)
vocab_size_2 = tokenizer.vocab_size
all_size_2 = len(tokenizer.get_vocab())
self.assertNotEqual(vocab_size_2, 0)
self.assertEqual(vocab_size, vocab_size_2)
self.assertEqual(added_toks, len(new_toks))
self.assertEqual(all_size_2, all_size + len(new_toks))
tokens = tokenizer.encode(
"aaaaa bbbbbb low cccccccccdddddddd l", return_token_type_ids=None, add_special_tokens=False
)["input_ids"]
self.assertGreaterEqual(len(tokens), 4)
self.assertGreater(tokens[0], tokenizer.vocab_size - 1)
self.assertGreater(tokens[-2], tokenizer.vocab_size - 1)
new_toks_2 = {"eos_token": ">>>>|||<||<<|<<", "pad_token": "<<<<<|||>|>>>>|>"}
added_toks_2 = tokenizer.add_special_tokens(new_toks_2)
vocab_size_3 = tokenizer.vocab_size
all_size_3 = len(tokenizer.get_vocab())
self.assertNotEqual(vocab_size_3, 0)
self.assertEqual(vocab_size, vocab_size_3)
self.assertEqual(added_toks_2, len(new_toks_2))
self.assertEqual(all_size_3, all_size_2 + len(new_toks_2))
tokens = tokenizer.encode(
">>>>|||<||<<|<< aaaaabbbbbb low cccccccccdddddddd <<<<<|||>|>>>>|> l",
return_token_type_ids=None,
add_special_tokens=False,
)["input_ids"]
self.assertGreaterEqual(len(tokens), 6)
self.assertGreater(tokens[0], tokenizer.vocab_size - 1)
self.assertGreater(tokens[0], tokens[1])
self.assertGreater(tokens[-2], tokenizer.vocab_size - 1)
self.assertEqual(tokens[0], tokenizer.eos_token_id)
self.assertEqual(tokens[-2], tokenizer.pad_token_id)
def test_consecutive_unk_string(self):
pass
def test_add_tokens(self):
tokenizer = XLMTokenizer(self.vocab_file, self.merges_file)
vocab_size = len(tokenizer)
self.assertEqual(tokenizer.add_tokens(""), 0)
self.assertEqual(tokenizer.add_tokens("testoken"), 1)
self.assertEqual(tokenizer.add_tokens(["testoken1", "testtoken2"]), 2)
self.assertEqual(len(tokenizer.get_vocab()), vocab_size + 3)
def test_full_tokenizer(self):
"""Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt"""
tokenizer = XLMTokenizer(self.vocab_file, self.merges_file)
text = "lower"
bpe_tokens = ["low", "er</w>"]
tokens = tokenizer.tokenize(text)
self.assertListEqual(tokens, bpe_tokens)
input_tokens = tokens + ["<unk>"]
input_bpe_tokens = [14, 15, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(input_tokens), input_bpe_tokens)
@slow
def test_sequence_builders(self):
tokenizer = XLMTokenizer.from_pretrained("xlm-mlm-en-2048")
text = tokenizer.encode("sequence builders", add_special_tokens=False)
text_2 = tokenizer.encode("multi-sequence build", add_special_tokens=False)
encoded_sentence = tokenizer.build_inputs_with_special_tokens(text)
encoded_pair = tokenizer.build_inputs_with_special_tokens(text, text_2)
assert encoded_sentence == [0] + text + [1]
assert encoded_pair == [0] + text + [1] + text_2 + [1]
|
1,564 |
is program valid
|
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
sys.path.append('../')
from auto_scan_test import AutoScanTest, IgnoreReasons
from program_config import TensorConfig, ProgramConfig, OpConfig, CxxConfig, TargetType, PrecisionType, DataLayoutType, Place
import unittest
import hypothesis
from hypothesis import given, settings, seed, example, assume, reproduce_failure
import hypothesis.strategies as st
import numpy as np
from functools import partial
from test_elementwise_util import check_broadcast
class TestBitwiseOp(AutoScanTest):
def __init__(self, *args, **kwargs):
AutoScanTest.__init__(self, *args, **kwargs)
host_places = [
Place(TargetType.Host, PrecisionType.Any, DataLayoutType.NCHW)
]
self.enable_testing_on_place(places=host_places)
def sample_program_configs(self, draw):
input_type = draw(st.sampled_from(["int32", "int64"]))
input_data_x_shape = draw(
st.lists(
st.integers(
min_value=1, max_value=10), min_size=1, max_size=4))
input_data_y_shape = draw(
st.lists(
st.integers(
min_value=1, max_value=10), min_size=1, max_size=4))
axis = -1 # paddle2.4.2 only support -1
input_data_x_shape = draw(st.sampled_from([input_data_x_shape, []]))
input_data_y_shape = draw(st.sampled_from([input_data_y_shape, []]))
assume(
check_broadcast(input_data_x_shape, input_data_y_shape, axis) ==
True)
if axis < 0:
axis = abs(len(input_data_x_shape) - len(
input_data_y_shape)) + axis + 1
def generate_input(*args, **kwargs):
if kwargs["type"] == "bool":
return np.random.choice([True, False],
kwargs["shape"]).astype(bool)
if kwargs["type"] == "uint8":
return np.random.normal(0, 255,
kwargs["shape"]).astype(np.uint8)
if kwargs["type"] == "int8":
return np.random.normal(-128, 127,
kwargs["shape"]).astype(np.int8)
if kwargs["type"] == "int16":
return np.random.normal(-32768, 32767,
kwargs["shape"]).astype(np.int16)
if kwargs["type"] == "int32":
return np.random.normal(-2147483648, 2147483647,
kwargs["shape"]).astype(np.int32)
if kwargs["type"] == "int64":
return np.random.normal(-9223372036854775808,
9223372036854775807,
kwargs["shape"]).astype(np.int64)
bitwise_op = OpConfig(
type="bitwise_xor",
inputs={
"X": ["input_data"],
"Y": ["input_data1"],
},
outputs={"Out": ["output_data"]},
attrs={"axis": axis})
if (input_type == "int32"):
bitwise_op.outputs_dtype = {"output_data": np.int32}
elif (input_type == "int64"):
bitwise_op.outputs_dtype = {"output_data": np.int64}
program_config = ProgramConfig(
ops=[bitwise_op],
weights={},
inputs={
"input_data": TensorConfig(data_gen=partial(
generate_input, type=input_type,
shape=input_data_x_shape)),
"input_data1": TensorConfig(data_gen=partial(
generate_input, type=input_type,
shape=input_data_y_shape)),
},
outputs=["output_data"])
return program_config
def METHOD_NAME(self,
program_config: ProgramConfig,
predictor_config: CxxConfig) -> bool:
return True
def sample_predictor_configs(self):
return self.get_predictor_configs(), ["bitwise_xor"], (1e-5, 1e-5)
def add_ignore_pass_case(self):
pass
def test(self, *args, **kwargs):
self.run_and_statis(quant=False, max_examples=500)
if __name__ == "__main__":
unittest.main(argv=[''])
|
1,565 |
synchronize
|
"""This module contains all OpenCL specific kernel_tuner functions"""
from __future__ import print_function
import time
import numpy as np
from kernel_tuner.backends.backend import GPUBackend
from kernel_tuner.observers.opencl import OpenCLObserver
# embedded in try block to be able to generate documentation
try:
import pyopencl as cl
except ImportError:
cl = None
class OpenCLFunctions(GPUBackend):
"""Class that groups the OpenCL functions on maintains some state about the device"""
def __init__(
self, device=0, platform=0, iterations=7, compiler_options=None, observers=None
):
"""Creates OpenCL device context and reads device properties
:param device: The ID of the OpenCL device to use for benchmarking
:type device: int
:param iterations: The number of iterations to run the kernel during benchmarking, 7 by default.
:type iterations: int
"""
if not cl:
raise ImportError(
"Error: pyopencl not installed, please install e.g. using 'pip install pyopencl'."
)
self.iterations = iterations
# setup context and queue
platforms = cl.get_platforms()
self.ctx = cl.Context(devices=[platforms[platform].get_devices()[device]])
self.queue = cl.CommandQueue(
self.ctx, properties=cl.command_queue_properties.PROFILING_ENABLE
)
self.mf = cl.mem_flags
# inspect device properties
self.max_threads = self.ctx.devices[0].get_info(
cl.device_info.MAX_WORK_GROUP_SIZE
)
self.compiler_options = compiler_options or []
# observer stuff
self.observers = observers or []
self.observers.append(OpenCLObserver(self))
self.event = None
for obs in self.observers:
obs.register_device(self)
# collect environment information
dev = self.ctx.devices[0]
env = dict()
env["platform_name"] = dev.platform.name
env["platform_version"] = dev.platform.version
env["device_name"] = dev.name
env["device_version"] = dev.version
env["opencl_c_version"] = dev.opencl_c_version
env["driver_version"] = dev.driver_version
env["iterations"] = self.iterations
env["compiler_options"] = compiler_options
self.env = env
self.name = dev.name
def ready_argument_list(self, arguments):
"""ready argument list to be passed to the kernel, allocates gpu mem
:param arguments: List of arguments to be passed to the kernel.
The order should match the argument list on the OpenCL kernel.
Allowed values are numpy.ndarray, and/or numpy.int32, numpy.float32, and so on.
:type arguments: list(numpy objects)
:returns: A list of arguments that can be passed to an OpenCL kernel.
:rtype: list( pyopencl.Buffer, numpy.int32, ... )
"""
gpu_args = []
for arg in arguments:
# if arg i is a numpy array copy to device
if isinstance(arg, np.ndarray):
gpu_args.append(
cl.Buffer(
self.ctx,
self.mf.READ_WRITE | self.mf.COPY_HOST_PTR,
hostbuf=arg,
)
)
# if not an array, just pass argument along
else:
gpu_args.append(arg)
return gpu_args
def compile(self, kernel_instance):
"""call the OpenCL compiler to compile the kernel, return the device function
:param kernel_name: The name of the kernel to be compiled, used to lookup the
function after compilation.
:type kernel_name: string
:param kernel_string: The OpenCL kernel code that contains the function `kernel_name`
:type kernel_string: string
:returns: An OpenCL kernel that can be called directly.
:rtype: pyopencl.Kernel
"""
prg = cl.Program(self.ctx, kernel_instance.kernel_string).build(
options=self.compiler_options
)
func = getattr(prg, kernel_instance.name)
return func
def start_event(self):
"""Records the event that marks the start of a measurement
In OpenCL the event is created when the kernel is launched"""
pass
def stop_event(self):
"""Records the event that marks the end of a measurement
In OpenCL the event is created when the kernel is launched"""
pass
def kernel_finished(self):
"""Returns True if the kernel has finished, False otherwise"""
return self.event.get_info(cl.event_info.COMMAND_EXECUTION_STATUS) == 0
def METHOD_NAME(self):
"""Halts execution until device has finished its tasks"""
self.queue.finish()
def run_kernel(self, func, gpu_args, threads, grid):
"""runs the OpenCL kernel passed as 'func'
:param func: An OpenCL Kernel
:type func: pyopencl.Kernel
:param gpu_args: A list of arguments to the kernel, order should match the
order in the code. Allowed values are either variables in global memory
or single values passed by value.
:type gpu_args: list( pyopencl.Buffer, numpy.int32, ...)
:param threads: A tuple listing the number of work items in each dimension of
the work group.
:type threads: tuple(int, int, int)
:param grid: A tuple listing the number of work groups in each dimension
of the NDRange.
:type grid: tuple(int, int)
"""
global_size = (grid[0] * threads[0], grid[1] * threads[1], grid[2] * threads[2])
local_size = threads
self.event = func(self.queue, global_size, local_size, *gpu_args)
def memset(self, buffer, value, size):
"""set the memory in allocation to the value in value
:param allocation: An OpenCL Buffer to fill
:type allocation: pyopencl.Buffer
:param value: The value to set the memory to
:type value: a single 32-bit int
:param size: The size of to the allocation unit in bytes
:type size: int
"""
if isinstance(buffer, cl.Buffer):
try:
cl.enqueue_fill_buffer(self.queue, buffer, np.uint32(value), 0, size)
except AttributeError:
src = np.zeros(size, dtype="uint8") + np.uint8(value)
cl.enqueue_copy(self.queue, buffer, src)
def memcpy_dtoh(self, dest, src):
"""perform a device to host memory copy
:param dest: A numpy array in host memory to store the data
:type dest: numpy.ndarray
:param src: An OpenCL Buffer to copy data from
:type src: pyopencl.Buffer
"""
if isinstance(src, cl.Buffer):
cl.enqueue_copy(self.queue, dest, src)
def memcpy_htod(self, dest, src):
"""perform a host to device memory copy
:param dest: An OpenCL Buffer to copy data from
:type dest: pyopencl.Buffer
:param src: A numpy array in host memory to store the data
:type src: numpy.ndarray
"""
if isinstance(dest, cl.Buffer):
cl.enqueue_copy(self.queue, dest, src)
def copy_constant_memory_args(self, cmem_args):
raise NotImplementedError("PyOpenCL backend does not support constant memory")
def copy_shared_memory_args(self, smem_args):
raise NotImplementedError("PyOpenCL backend does not support shared memory")
def copy_texture_memory_args(self, texmem_args):
raise NotImplementedError("PyOpenCL backend does not support texture memory")
units = {"time": "ms"}
|
1,566 |
test gridcur serialize
|
"""
tests for code that reads teh old gridcur format
"""
from pathlib import Path
from datetime import datetime
import numpy as np
import pytest
from gnome.environment.environment_objects import (GridCurrent,
FileGridCurrent,
)
import gnome.scripting as gs
test_data_dir = Path(__file__).parent / "sample_data"
test_output_dir = Path(__file__).parent / "sample_output"
def make_gridcur(filename, location="cells"):
"""
this makes a gridcur file, with a quarter circle of currents
Data in the cells
(off the coast of Alabama)
"""
lat = np.linspace(29.0, 30.0, 11)
lon = np.linspace(-88.0, -86.0, 21)
times = [datetime(2020, 7, 14, 12, 0),
datetime(2020, 7, 14, 18, 0),
datetime(2020, 7, 15, 0, 0),
]
vel = 0.5
units = "m/s"
data_type = "currents"
data_u = []
data_v = []
for i in range(len(times)):
v = vel + (i * 0.2)
if location == "cells":
U = np.zeros((len(lon) - 1, len(lat) - 1), dtype=np.float32)
elif location == "nodes":
U = np.zeros((len(lon), len(lat)), dtype=np.float32)
else:
raise ValueError('location must be "cells" or "nodes"')
V = U.copy()
for row in range(U.shape[0]):
for col in range(U.shape[1]):
theta = np.arctan2(row, col)
U[row, col] = v * np.cos(theta)
V[row, col] = v * np.sin(theta)
data_u.append(U)
data_v.append(V)
GridcurCurrent.write_gridcur(filename, data_type, units, times, lon, lat, data_u, data_v)
# create a test gridcur file:
CELL_EXAMPLE = test_data_dir / "example_gridcur_on_cells.cur"
# make_gridcur(CELL_EXAMPLE)
NODE_EXAMPLE = test_data_dir / "example_gridcur_on_nodes.cur"
# make_gridcur(NODE_EXAMPLE, "nodes")
def test_nonexistant_filename():
with pytest.raises(ValueError):
current = FileGridCurrent("non_existant_filename.cur")
def test_nonexistant_filename_nc():
with pytest.raises(ValueError):
current = FileGridCurrent("non_existant_filename.nc")
def test_gridcur_in_model():
current = FileGridCurrent(test_data_dir / NODE_EXAMPLE)
mover = gs.CurrentMover(current=current)
start_time = "2020-07-14T12:00"
model = gs.Model(time_step=gs.hours(1),
start_time=start_time,
duration=gs.hours(12),
uncertain=False)
model.movers += mover
spill = gs.grid_spill(bounds=((-88.0, 29.0),
(-86.0, 30.0),
),
resolution=20,
release_time=start_time,
)
model.spills += spill
renderer = gs.Renderer(output_dir=test_output_dir,
image_size=(800, 600),
viewport=((-88.0, 29.0),
(-86.0, 30.0),
),
)
model.outputters += renderer
model.full_run()
def METHOD_NAME():
"""
Can we persist one of these? and remake it from the persisted
location?
"""
filename = str(test_data_dir / NODE_EXAMPLE)
current = FileGridCurrent(filename,
extrapolation_is_allowed=True,
)
print("About to serialze")
print(f"{current.filename}")
serial = current.serialize()
print(f"{serial}")
current2 = FileGridCurrent.deserialize(serial)
# really should test this better, but at least it didn't barf
assert current2.extrapolation_is_allowed
assert current2.filename == filename
@pytest.mark.skip
def test_netcdf_file():
testfile = str(test_data_dir / 'tri_ring.nc')
# create a GridCurrent
current = FileGridCurrent(filename=testfile,
extrapolation_is_allowed=True,
)
assert type(current) == FileGridCurrent
assert isinstance(current, GridCurrent)
# really should test more, but what?
assert current.extrapolation_is_allowed
assert current.units == "m/s"
assert len(current.variables) == 3
@pytest.mark.skip
def test_netcdf_in_model():
"""
the current object works with a model, and produces
something in the rendered output
correct? who knows, but it's running!
"""
# Single timestep, so time doesn't matter.
current = FileGridCurrent(str(test_data_dir / 'tri_ring.nc'))
mover = gs.CurrentMover(current=current)
start_time = "2020-07-14T12:00"
model = gs.Model(time_step=gs.hours(1),
start_time=start_time,
duration=gs.hours(12),
uncertain=False)
model.movers += mover
# From the nodes of the netcdf file
# In [8]: lat[:].min()
# Out[8]: -0.9961946980917455
# In [9]: lat[:].max()
# Out[9]: 0.9961946980917455
# In [10]: lon[:].min()
# Out[10]: -0.9961946980917455
# In [11]: lon[:].max()
# Out[11]: 0.9961946980917455
spill = gs.grid_spill(bounds=((-0.996, -0.996),
(0.996, 0.996),
),
resolution=20,
release_time=start_time,
)
model.spills += spill
renderer = gs.Renderer(output_dir=test_output_dir / "netcdf",
image_size=(800, 600),
viewport=((-0.996, -0.996),
(0.996, 0.996),
),
)
model.outputters += renderer
model.full_run()
|
1,567 |
sanitize id
|
#!/usr/bin/env python3
# License: GPLv3 Copyright: 2019, Kovid Goyal <kovid at kovidgoyal.net>
import re
from base64 import standard_b64decode
from collections import OrderedDict
from itertools import count
from typing import Callable, Dict, Optional
from .constants import is_macos, logo_png_file
from .fast_data_types import get_boss
from .types import run_once
from .utils import get_custom_window_icon, log_error
NotifyImplementation = Callable[[str, str, str], None]
if is_macos:
from .fast_data_types import cocoa_send_notification
def notify(
title: str,
body: str,
timeout: int = 5000,
application: str = 'kitty',
icon: bool = True,
identifier: Optional[str] = None,
subtitle: Optional[str] = None,
) -> None:
cocoa_send_notification(identifier, title, body, subtitle)
else:
from .fast_data_types import dbus_send_notification
alloc_map: Dict[int, str] = {}
identifier_map: Dict[str, int] = {}
def dbus_notification_created(alloc_id: int, notification_id: int) -> None:
identifier = alloc_map.pop(alloc_id, None)
if identifier is not None:
identifier_map[identifier] = notification_id
def dbus_notification_activated(notification_id: int, action: str) -> None:
rmap = {v: k for k, v in identifier_map.items()}
identifier = rmap.get(notification_id)
if identifier is not None:
notification_activated(identifier)
def notify(
title: str,
body: str,
timeout: int = -1,
application: str = 'kitty',
icon: bool = True,
identifier: Optional[str] = None,
subtitle: Optional[str] = None,
) -> None:
icf = ''
if icon is True:
icf = get_custom_window_icon()[1] or logo_png_file
alloc_id = dbus_send_notification(application, icf, title, body, 'Click to see changes', timeout)
if alloc_id and identifier is not None:
alloc_map[alloc_id] = identifier
def notify_implementation(title: str, body: str, identifier: str) -> None:
notify(title, body, identifier=identifier)
class NotificationCommand:
done: bool = True
identifier: str = '0'
title: str = ''
body: str = ''
actions: str = ''
def __repr__(self) -> str:
return f'NotificationCommand(identifier={self.identifier!r}, title={self.title!r}, body={self.body!r}, actions={self.actions!r}, done={self.done!r})'
def parse_osc_9(raw: str) -> NotificationCommand:
ans = NotificationCommand()
ans.title = raw
return ans
def parse_osc_777(raw: str) -> NotificationCommand:
parts = raw.split(';', 1)
ans = NotificationCommand()
ans.title = parts[0]
if len(parts) > 1:
ans.body = parts[1]
return ans
@run_once
def sanitize_identifier_pat() -> 're.Pattern[str]':
return re.compile(r'[^a-zA-Z0-9-_+.]+')
def METHOD_NAME(v: str) -> str:
return sanitize_identifier_pat().sub('', v)
def parse_osc_99(raw: str) -> NotificationCommand:
cmd = NotificationCommand()
metadata, payload = raw.partition(';')[::2]
payload_is_encoded = False
payload_type = 'title'
if metadata:
for part in metadata.split(':'):
try:
k, v = part.split('=', 1)
except Exception:
log_error('Malformed OSC 99: metadata is not key=value pairs')
return cmd
if k == 'p':
payload_type = v
elif k == 'i':
cmd.identifier = METHOD_NAME(v)
elif k == 'e':
payload_is_encoded = v == '1'
elif k == 'd':
cmd.done = v != '0'
elif k == 'a':
cmd.actions += f',{v}'
if payload_type not in ('body', 'title'):
log_error(f'Malformed OSC 99: unknown payload type: {payload_type}')
return NotificationCommand()
if payload_is_encoded:
try:
payload = standard_b64decode(payload).decode('utf-8')
except Exception:
log_error('Malformed OSC 99: payload is not base64 encoded UTF-8 text')
return NotificationCommand()
if payload_type == 'title':
cmd.title = payload
else:
cmd.body = payload
return cmd
def limit_size(x: str) -> str:
if len(x) > 1024:
x = x[:1024]
return x
def merge_osc_99(prev: NotificationCommand, cmd: NotificationCommand) -> NotificationCommand:
if prev.done or prev.identifier != cmd.identifier:
return cmd
cmd.actions = limit_size(f'{prev.actions},{cmd.actions}')
cmd.title = limit_size(prev.title + cmd.title)
cmd.body = limit_size(prev.body + cmd.body)
return cmd
identifier_registry: "OrderedDict[str, RegisteredNotification]" = OrderedDict()
id_counter = count()
class RegisteredNotification:
identifier: str
window_id: int
focus: bool = True
report: bool = False
def __init__(self, cmd: NotificationCommand, window_id: int):
self.window_id = window_id
for x in cmd.actions.strip(',').split(','):
val = not x.startswith('-')
x = x.lstrip('+-')
if x == 'focus':
self.focus = val
elif x == 'report':
self.report = val
self.identifier = cmd.identifier
def register_identifier(identifier: str, cmd: NotificationCommand, window_id: int) -> None:
identifier_registry[identifier] = RegisteredNotification(cmd, window_id)
if len(identifier_registry) > 100:
identifier_registry.popitem(False)
def notification_activated(identifier: str, activated_implementation: Optional[Callable[[str, int, bool, bool], None]] = None) -> None:
if identifier == 'new-version':
from .update_check import notification_activated as do
do()
elif identifier.startswith('test-notify-'):
log_error(f'Test notification {identifier} activated')
else:
r = identifier_registry.pop(identifier, None)
if r is not None and (r.focus or r.report):
if activated_implementation is None:
get_boss().notification_activated(r.identifier, r.window_id, r.focus, r.report)
else:
activated_implementation(r.identifier, r.window_id, r.focus, r.report)
def reset_registry() -> None:
global id_counter
identifier_registry.clear()
id_counter = count()
def notify_with_command(cmd: NotificationCommand, window_id: int, notify_implementation: NotifyImplementation = notify_implementation) -> None:
title = cmd.title or cmd.body
body = cmd.body if cmd.title else ''
if title:
identifier = f'i{next(id_counter)}'
notify_implementation(title, body, identifier)
register_identifier(identifier, cmd, window_id)
def handle_notification_cmd(
osc_code: int,
raw_data: str,
window_id: int,
prev_cmd: NotificationCommand,
notify_implementation: NotifyImplementation = notify_implementation
) -> Optional[NotificationCommand]:
if osc_code == 99:
cmd = merge_osc_99(prev_cmd, parse_osc_99(raw_data))
if cmd.done:
notify_with_command(cmd, window_id, notify_implementation)
cmd = NotificationCommand()
return cmd
if osc_code == 9:
cmd = parse_osc_9(raw_data)
notify_with_command(cmd, window_id, notify_implementation)
return cmd
if osc_code == 777:
cmd = parse_osc_777(raw_data)
notify_with_command(cmd, window_id, notify_implementation)
return cmd
return None
|
1,568 |
fn matches
|
"""
pygments.formatters
~~~~~~~~~~~~~~~~~~~
Pygments formatters.
:copyright: Copyright 2006-2023 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
import sys
import types
import fnmatch
from os.path import basename
from pygments.formatters._mapping import FORMATTERS
from pygments.plugin import find_plugin_formatters
from pygments.util import ClassNotFound
__all__ = ['get_formatter_by_name', 'get_formatter_for_filename',
'get_all_formatters', 'load_formatter_from_file'] + list(FORMATTERS)
_formatter_cache = {} # classes by name
_pattern_cache = {}
def METHOD_NAME(fn, glob):
"""Return whether the supplied file name fn matches pattern filename."""
if glob not in _pattern_cache:
pattern = _pattern_cache[glob] = re.compile(fnmatch.translate(glob))
return pattern.match(fn)
return _pattern_cache[glob].match(fn)
def _load_formatters(module_name):
"""Load a formatter (and all others in the module too)."""
mod = __import__(module_name, None, None, ['__all__'])
for formatter_name in mod.__all__:
cls = getattr(mod, formatter_name)
_formatter_cache[cls.name] = cls
def get_all_formatters():
"""Return a generator for all formatter classes."""
# NB: this returns formatter classes, not info like get_all_lexers().
for info in FORMATTERS.values():
if info[1] not in _formatter_cache:
_load_formatters(info[0])
yield _formatter_cache[info[1]]
for _, formatter in find_plugin_formatters():
yield formatter
def find_formatter_class(alias):
"""Lookup a formatter by alias.
Returns None if not found.
"""
for module_name, name, aliases, _, _ in FORMATTERS.values():
if alias in aliases:
if name not in _formatter_cache:
_load_formatters(module_name)
return _formatter_cache[name]
for _, cls in find_plugin_formatters():
if alias in cls.aliases:
return cls
def get_formatter_by_name(_alias, **options):
"""
Return an instance of a :class:`.Formatter` subclass that has `alias` in its
aliases list. The formatter is given the `options` at its instantiation.
Will raise :exc:`pygments.util.ClassNotFound` if no formatter with that
alias is found.
"""
cls = find_formatter_class(_alias)
if cls is None:
raise ClassNotFound("no formatter found for name %r" % _alias)
return cls(**options)
def load_formatter_from_file(filename, formattername="CustomFormatter", **options):
"""
Return a `Formatter` subclass instance loaded from the provided file, relative
to the current directory.
The file is expected to contain a Formatter class named ``formattername``
(by default, CustomFormatter). Users should be very careful with the input, because
this method is equivalent to running ``eval()`` on the input file. The formatter is
given the `options` at its instantiation.
:exc:`pygments.util.ClassNotFound` is raised if there are any errors loading
the formatter.
.. versionadded:: 2.2
"""
try:
# This empty dict will contain the namespace for the exec'd file
custom_namespace = {}
with open(filename, 'rb') as f:
exec(f.read(), custom_namespace)
# Retrieve the class `formattername` from that namespace
if formattername not in custom_namespace:
raise ClassNotFound('no valid %s class found in %s' %
(formattername, filename))
formatter_class = custom_namespace[formattername]
# And finally instantiate it with the options
return formatter_class(**options)
except OSError as err:
raise ClassNotFound('cannot read %s: %s' % (filename, err))
except ClassNotFound:
raise
except Exception as err:
raise ClassNotFound('error when loading custom formatter: %s' % err)
def get_formatter_for_filename(fn, **options):
"""
Return a :class:`.Formatter` subclass instance that has a filename pattern
matching `fn`. The formatter is given the `options` at its instantiation.
Will raise :exc:`pygments.util.ClassNotFound` if no formatter for that filename
is found.
"""
fn = basename(fn)
for modname, name, _, filenames, _ in FORMATTERS.values():
for filename in filenames:
if METHOD_NAME(fn, filename):
if name not in _formatter_cache:
_load_formatters(modname)
return _formatter_cache[name](**options)
for cls in find_plugin_formatters():
for filename in cls.filenames:
if METHOD_NAME(fn, filename):
return cls(**options)
raise ClassNotFound("no formatter found for file name %r" % fn)
class _automodule(types.ModuleType):
"""Automatically import formatters."""
def __getattr__(self, name):
info = FORMATTERS.get(name)
if info:
_load_formatters(info[0])
cls = _formatter_cache[info[1]]
setattr(self, name, cls)
return cls
raise AttributeError(name)
oldmod = sys.modules[__name__]
newmod = _automodule(__name__)
newmod.__dict__.update(oldmod.__dict__)
sys.modules[__name__] = newmod
del newmod.newmod, newmod.oldmod, newmod.sys, newmod.types
|
1,569 |
close
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from copy import deepcopy
from typing import Any, Awaitable, TYPE_CHECKING
from azure.core.rest import AsyncHttpResponse, HttpRequest
from azure.mgmt.core import AsyncARMPipelineClient
from .. import models as _models
from ..._serialization import Deserializer, Serializer
from ._configuration import AppConfigurationManagementClientConfiguration
from .operations import (
ConfigurationStoresOperations,
KeyValuesOperations,
Operations,
PrivateEndpointConnectionsOperations,
PrivateLinkResourcesOperations,
ReplicasOperations,
)
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from azure.core.credentials_async import AsyncTokenCredential
class AppConfigurationManagementClient: # pylint: disable=client-accepts-api-version-keyword
"""AppConfigurationManagementClient.
:ivar configuration_stores: ConfigurationStoresOperations operations
:vartype configuration_stores:
azure.mgmt.appconfiguration.v2022_03_01_preview.aio.operations.ConfigurationStoresOperations
:ivar operations: Operations operations
:vartype operations: azure.mgmt.appconfiguration.v2022_03_01_preview.aio.operations.Operations
:ivar private_endpoint_connections: PrivateEndpointConnectionsOperations operations
:vartype private_endpoint_connections:
azure.mgmt.appconfiguration.v2022_03_01_preview.aio.operations.PrivateEndpointConnectionsOperations
:ivar private_link_resources: PrivateLinkResourcesOperations operations
:vartype private_link_resources:
azure.mgmt.appconfiguration.v2022_03_01_preview.aio.operations.PrivateLinkResourcesOperations
:ivar key_values: KeyValuesOperations operations
:vartype key_values:
azure.mgmt.appconfiguration.v2022_03_01_preview.aio.operations.KeyValuesOperations
:ivar replicas: ReplicasOperations operations
:vartype replicas:
azure.mgmt.appconfiguration.v2022_03_01_preview.aio.operations.ReplicasOperations
:param credential: Credential needed for the client to connect to Azure. Required.
:type credential: ~azure.core.credentials_async.AsyncTokenCredential
:param subscription_id: The Microsoft Azure subscription ID. Required.
:type subscription_id: str
:param base_url: Service URL. Default value is "https://management.azure.com".
:type base_url: str
:keyword api_version: Api Version. Default value is "2022-03-01-preview". Note that overriding
this default value may result in unsupported behavior.
:paramtype api_version: str
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
"""
def __init__(
self,
credential: "AsyncTokenCredential",
subscription_id: str,
base_url: str = "https://management.azure.com",
**kwargs: Any
) -> None:
self._config = AppConfigurationManagementClientConfiguration(
credential=credential, subscription_id=subscription_id, **kwargs
)
self._client: AsyncARMPipelineClient = AsyncARMPipelineClient(base_url=base_url, config=self._config, **kwargs)
client_models = {k: v for k, v in _models.__dict__.items() if isinstance(v, type)}
self._serialize = Serializer(client_models)
self._deserialize = Deserializer(client_models)
self._serialize.client_side_validation = False
self.configuration_stores = ConfigurationStoresOperations(
self._client, self._config, self._serialize, self._deserialize
)
self.operations = Operations(self._client, self._config, self._serialize, self._deserialize)
self.private_endpoint_connections = PrivateEndpointConnectionsOperations(
self._client, self._config, self._serialize, self._deserialize
)
self.private_link_resources = PrivateLinkResourcesOperations(
self._client, self._config, self._serialize, self._deserialize
)
self.key_values = KeyValuesOperations(self._client, self._config, self._serialize, self._deserialize)
self.replicas = ReplicasOperations(self._client, self._config, self._serialize, self._deserialize)
def _send_request(self, request: HttpRequest, **kwargs: Any) -> Awaitable[AsyncHttpResponse]:
"""Runs the network request through the client's chained policies.
>>> from azure.core.rest import HttpRequest
>>> request = HttpRequest("GET", "https://www.example.org/")
<HttpRequest [GET], url: 'https://www.example.org/'>
>>> response = await client._send_request(request)
<AsyncHttpResponse: 200 OK>
For more information on this code flow, see https://aka.ms/azsdk/dpcodegen/python/send_request
:param request: The network request you want to make. Required.
:type request: ~azure.core.rest.HttpRequest
:keyword bool stream: Whether the response payload will be streamed. Defaults to False.
:return: The response of your network call. Does not do error handling on your response.
:rtype: ~azure.core.rest.AsyncHttpResponse
"""
request_copy = deepcopy(request)
request_copy.url = self._client.format_url(request_copy.url)
return self._client.send_request(request_copy, **kwargs)
async def METHOD_NAME(self) -> None:
await self._client.METHOD_NAME()
async def __aenter__(self) -> "AppConfigurationManagementClient":
await self._client.__aenter__()
return self
async def __aexit__(self, *exc_details: Any) -> None:
await self._client.__aexit__(*exc_details)
|
1,570 |
update access token
|
#
# Copyright (c) 2023 Airbyte, Inc., all rights reserved.
#
from typing import Any, List, Mapping, Optional, Tuple
import pendulum
import requests
from airbyte_cdk.logger import AirbyteLogger
from airbyte_cdk.sources import AbstractSource
from airbyte_cdk.sources.streams import Stream
from airbyte_cdk.sources.streams.http.requests_native_auth import TokenAuthenticator
from .streams import API_VERSION, ContentMetadata, Dashboards, LookerException, LookerStream, QueryHistory, RunLooks, SwaggerParser
class CustomTokenAuthenticator(TokenAuthenticator):
def __init__(self, domain: str, client_id: str, client_secret: str):
self._domain, self._client_id, self._client_secret = domain, client_id, client_secret
super().__init__(None)
self._access_token = None
self._token_expiry_date = pendulum.now()
def METHOD_NAME(self) -> Optional[str]:
headers = {"Content-Type": "application/x-www-form-urlencoded"}
url = f"https://{self._domain}/api/{API_VERSION}/login"
try:
resp = requests.post(url=url, headers=headers, data=f"client_id={self._client_id}&client_secret={self._client_secret}")
if resp.status_code != 200:
return "Unable to connect to the Looker API. Please check your credentials."
except ConnectionError as error:
return str(error)
data = resp.json()
self._access_token = data["access_token"]
self._token_expiry_date = pendulum.now().add(seconds=data["expires_in"])
return None
def get_auth_header(self) -> Mapping[str, Any]:
if self._token_expiry_date < pendulum.now():
err = self.METHOD_NAME()
if err:
raise LookerException(f"auth error: {err}")
return {"Authorization": f"token {self._access_token}"}
class SourceLooker(AbstractSource):
"""
Source Intercom fetch data from messaging platform.
"""
def get_authenticator(self, config: Mapping[str, Any]) -> CustomTokenAuthenticator:
return CustomTokenAuthenticator(domain=config["domain"], client_id=config["client_id"], client_secret=config["client_secret"])
def check_connection(self, logger: AirbyteLogger, config: Mapping[str, Any]) -> Tuple[bool, Any]:
authenticator = self.get_authenticator(config)
err = authenticator.METHOD_NAME()
if err:
AirbyteLogger().error("auth error: {err}")
return False, err
return True, None
def streams(self, config: Mapping[str, Any]) -> List[Stream]:
base_args = {
"authenticator": self.get_authenticator(config),
"domain": config["domain"],
}
args = dict(swagger_parser=SwaggerParser(domain=config["domain"]), **base_args)
streams = [
LookerStream("color_collections", **args),
LookerStream("connections", **args),
ContentMetadata("content_metadata", **args),
ContentMetadata("content_metadata_access", **args),
Dashboards("dashboards", **args),
LookerStream("dashboard_elements", **args),
LookerStream("dashboard_filters", **args),
LookerStream("dashboard_layout_components", **args),
LookerStream("dashboard_layouts", **args),
LookerStream("datagroups", **args),
LookerStream("folders", **args),
LookerStream("folder_ancestors", **args),
LookerStream("git_branches", **args),
LookerStream("groups", **args),
LookerStream("homepage_items", **args),
LookerStream("homepage_sections", **args),
LookerStream("homepages", **args),
LookerStream("integration_hubs", **args),
LookerStream("integrations", **args),
LookerStream("legacy_features", **args),
Dashboards("lookml_dashboards", **args),
LookerStream("lookml_models", **args),
LookerStream("looks", **args),
LookerStream("model_sets", **args),
LookerStream("permission_sets", **args),
LookerStream("permissions", **args),
LookerStream("primary_homepage_sections", **args),
LookerStream("projects", **args),
LookerStream("project_files", **args),
QueryHistory(**base_args),
LookerStream("roles", **args),
LookerStream("role_groups", **args),
RunLooks(run_look_ids=config["run_look_ids"], **args) if config.get("run_look_ids") else None,
LookerStream("scheduled_plans", request_params={"all_users": "true"}, **args),
LookerStream("spaces", **args),
LookerStream("space_ancestors", **args),
LookerStream("user_attributes", **args),
LookerStream("user_attribute_group_values", **args),
LookerStream("user_attribute_values", request_params={"all_values": "true", "include_unset": "true"}, **args),
LookerStream("user_login_lockouts", **args),
LookerStream("user_sessions", **args),
LookerStream("users", **args),
LookerStream("versions", **args),
LookerStream("workspaces", **args),
]
# stream RunLooks is dynamic and will be added if run_look_ids is not empty
# but we need to save streams' older
return [stream for stream in streams if stream]
|
1,571 |
fetch
|
import requests
from waste_collection_schedule import Collection # type: ignore[attr-defined]
from waste_collection_schedule.service.ICS import ICS
from bs4 import BeautifulSoup
import re
TITLE = "Abfallwirtschaftsbetrieb LK Mainz-Bingen"
DESCRIPTION = "Source for Abfallwirtschaftsbetrieb LK Mainz-Bingen."
URL = "https://www.awb-mainz-bingen.de/"
TEST_CASES = {
"Stadt Ingelheim Ingelheim Süd Albert-Schweitzer-Straße": {
"bezirk": "Stadt Ingelheim",
"ort": "Ingelheim Süd",
"strasse": "Albert-Schweitzer-Straße"
},
"Verbandsgemeinde Rhein-Selz, Mommenheim": {
"bezirk": "Verbandsgemeinde Rhein-Selz",
"ort": "mOmMenHeiM",
},
"Stadt Bingen, Bingen-Stadt, Martinstraße (Haus-Nr.: 5 - 11, 10 - 18)": {
"bezirk": "Stadt Bingen",
"ort": "Bingen-Stadt",
"strasse": "Martinstraße (Haus-Nr.: 5 - 11, 10 - 18)"
},
}
ICON_MAP = {
"Restmüll": "mdi:trash-can",
"Glass": "mdi:bottle-soda",
"Biomüll": "mdi:leaf",
"Papier": "mdi:package-variant",
"Gelbe/r Tonne / Sack": "mdi:recycle",
"Problemmüll": "mdi:toxic",
}
API_URL = "https://abfallkalender.awb-mainz-bingen.de/"
class Source:
def __init__(self, bezirk: str, ort: str, strasse: str = None):
self._bezirk: str = bezirk
self._ort: str = ort
self._strasse: str = strasse
self._ics = ICS()
def METHOD_NAME(self):
session = requests.Session()
# Get bezirk id from main page
r = session.get(API_URL)
r.raise_for_status()
soup = BeautifulSoup(r.text, "html.parser")
bezirk = soup.find("select", {"id": "Abfuhrbezirk"}).find(
"option", text=re.compile(re.escape(self._bezirk), re.IGNORECASE))
if not bezirk:
found = [i.text for i in soup.find_all("option")][1:]
raise Exception(
f"No matching bezirk found search for: {self._bezirk} found: {str(found)}")
bezirk_id = bezirk.get("value")
# set arguemnts to imitate xajax call
xjxargs_string = "<xjxobj>"
for key, value in {
"Abfuhrbezirk": "{bezirk_id}",
"Ortschaft": "{ort_id}",
"Strasse": "{strasse_id}",
}.items():
xjxargs_string += "<e><k>"+key+"</k><v>S"+value+"</v></e>"
xjxargs_string += "</xjxobj>"
args = {
"xjxfun": "show_ortsteil_dropdown",
"xjxargs[]": xjxargs_string.format(bezirk_id=bezirk_id, ort_id=0, strasse_id=0),
}
# send request to get dropdown with for ort id
r = session.post(API_URL, data=args)
r.raise_for_status()
soup = BeautifulSoup(r.text, "xml")
teilorte_div = soup.find("cmd", {"id": "divTeilort"})
if not teilorte_div:
raise Exception("invalid resonse from server", soup)
teilorte = BeautifulSoup(teilorte_div.text.replace(
"<![CDATA[", "").replace("]]>", ""), "html.parser")
ort = teilorte.find("option", text=re.compile(
re.escape(self._ort), re.IGNORECASE))
if not ort:
raise Exception(
f"No matching ort found. Searched for: {self._ort}. Found {str([i.text for i in teilorte.find_all('option')][1:])})")
ort_id = ort.get("value")
args = {
"xjxfun": "show_strasse_dropdown_or_abfuhrtermine",
"xjxargs[]": xjxargs_string.format(bezirk_id=bezirk_id, ort_id=ort_id, strasse_id=0),
}
r = session.post(API_URL, data=args)
r.raise_for_status()
soup = BeautifulSoup(r.text, "xml")
div_strasse = soup.find("cmd", {"id": "divStrasse"})
if not div_strasse:
raise Exception("invalid resonse from server")
strassen_soup = BeautifulSoup(div_strasse.text.replace(
"<![CDATA[", "").replace("]]>", ""), "html.parser")
# If strasse is needed
if strassen_soup.find("option"):
if not self._strasse:
raise Exception("Street needed but not provided")
# get strasse id
strasse_id = strassen_soup.find("option", text=re.compile(
re.escape(self._strasse), re.IGNORECASE))
if not strasse_id:
found = [i.text for i in strassen_soup.find_all("option")][1:]
raise Exception(
f"Street wanted but no matching street found. Searched for: {self._strasse}. Found {str(found)})")
strasse_id = strasse_id.get("value")
xjxargs = {"bezirk_id": bezirk_id,
"ort_id": ort_id, "strasse_id": strasse_id}
args = {
"xjxfun": "show_abfuhrtermine",
"xjxargs[]": xjxargs_string.format(**xjxargs),
}
# get main calendar
r = session.post(API_URL, data=args)
r.raise_for_status()
soup = BeautifulSoup(r.text, "xml")
cal_wrapper = soup.find("cmd", {"id": "divKalenderWrapper"})
if not cal_wrapper:
raise Exception("No calendar found", r.text)
cal_soup = BeautifulSoup(cal_wrapper.text.replace(
"<![CDATA[", "").replace("]]>", ""), "html.parser")
# get ical file url
ical_path = cal_soup.find(
"a", {"href": re.compile("ical")}).get("href")
# get ical file
r = requests.get(API_URL+ical_path)
r.raise_for_status()
r.encoding = "utf-8"
# remove DURATION because the returned icalendar has invalid DURATION syntax
ical_string = re.sub(r'^DURATION.*\n?', "", r.text, flags=re.MULTILINE)
dates = self._ics.convert(ical_string)
entries = []
for d in dates:
bin_type = d[1].split(" am ")[0].replace(
"Abfuhrtermin", "").strip()
entries.append(Collection(d[0], bin_type, ICON_MAP.get(bin_type)))
return entries
|
1,572 |
root cmakelists dir
|
# Copyright 2013-2023 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
import re
from spack.package import *
class RocmDebugAgent(CMakePackage):
"""Radeon Open Compute (ROCm) debug agent"""
homepage = "https://github.com/ROCm-Developer-Tools/rocr_debug_agent"
git = "https://github.com/ROCm-Developer-Tools/rocr_debug_agent.git"
url = "https://github.com/ROCm-Developer-Tools/rocr_debug_agent/archive/rocm-5.5.0.tar.gz"
tags = ["rocm"]
maintainers("srekolam", "renjithravindrankannath")
libraries = ["librocm-debug-agent"]
version("5.5.1", sha256="1bb66734f11bb57df6efa507f0217651446653bf28b3ca36acfcf94511a7c2bc")
version("5.5.0", sha256="4f2431a395a77a06dc417ed1e9188731b031a0c680e62c6eee19d60965317f5a")
version("5.4.3", sha256="b2c9ac198ea3cbf35e7e80f57c5d81c461de78b821d07b637ea4037a65cdf49f")
version("5.4.0", sha256="94bef73ea0a6d385dab2292ee591ca1dc268a5585cf9f1b5092a1530949f575e")
version("5.3.3", sha256="7170312d08e91334ee03586aa1f23d67f33d9ec0df25a5556cbfa3f210b15b06")
version("5.3.0", sha256="8dfb6aa442ce136207c0c089321c8099042395977b4a488e4ca219661df0cd78")
version("5.2.3", sha256="5d31372e2980738271ae26b92dcc402c387cdf5f23710ce6feeb2bd303ff7ea0")
version("5.2.1", sha256="a60c224c546a25dafcff1e50ce3a1605e152efdb36624a672ddb5812cd34773e")
version("5.2.0", sha256="f8e8d5ad691033d0c0f1850d69f35c98ba9722ab4adc66c4251f22257f56f0a2")
version("5.1.3", sha256="ef26130829f3348d503669467ab1ea39fb67d943d88d64e7ac04b9617ec6067d")
version("5.1.0", sha256="e0ceeef575d8645385bc6e4c9c3accaa192a93c42d83545cf5626c848f59806b")
version(
"5.0.2",
sha256="4ec3cdedc4ba774d05c3dc972186b3181b3aa823af08f3843238961d5ef90e57",
deprecated=True,
)
version(
"5.0.0",
sha256="fb8ebe136bfa815116453bdcb4afb9617ab488f54501434c72eed9706857be3f",
deprecated=True,
)
version(
"4.5.2",
sha256="85c7f19485defd9a58716fffdd1a0e065ed7f779c3f124467fca18755bc634a6",
deprecated=True,
)
version(
"4.5.0",
sha256="6486b1a8515da4711d3c85f8e41886f8fe6ba37ca2c63664f00c811f6296ac20",
deprecated=True,
)
version(
"4.3.1",
sha256="7bee6be6c29883f03f47a8944c0d50b7cf43a6b5eeed734602f521c3c40a18d0",
deprecated=True,
)
version(
"4.3.0",
sha256="0cdee5792b808e03b839070da0d1b08dc4078a7d1fc295f0c99c6a5ae7d636a6",
deprecated=True,
)
version(
"4.2.0",
sha256="ce02a5b752291882daa0a2befa23944e59087ce9fe65a91061476c3c399e4a0c",
deprecated=True,
)
version(
"4.1.0",
sha256="b1ae874887e5ee037070f1dd46b145ad02ec9fd8a724c6b6ae194b534f01acdb",
deprecated=True,
)
version(
"4.0.0",
sha256="a9e64834d56a9221c242e71aa110c2cef0087aa8f86f50428dd618e5e623cc3c",
deprecated=True,
)
version(
"3.10.0",
sha256="675b8d3cc4aecc4428a93553abf664bbe6a2cb153f1f480e6cadeeb4d24ef4b1",
deprecated=True,
)
version(
"3.9.0",
sha256="3e56bf8b2b53d9102e8709b6259deea52257dc6210df16996b71a7d677952b1b",
deprecated=True,
)
version(
"3.8.0",
sha256="55243331ac4b0d90e88882eb29fd06fad354e278f8a34ac7f0680b2c895ca2ac",
deprecated=True,
)
version(
"3.7.0",
sha256="d0f442a2b224a734b0080c906f0fc3066a698e5cde9ff97ffeb485b36d2caba1",
deprecated=True,
)
version(
"3.5.0",
sha256="203ccb18d2ac508aae40bf364923f67375a08798b20057e574a0c5be8039f133",
deprecated=True,
)
def url_for_version(self, version):
url = "https://github.com/ROCm-Developer-Tools/rocr_debug_agent/archive/"
if version <= Version("3.7.0"):
url += "roc-{0}.tar.gz".format(version)
else:
url += "rocm-{0}.tar.gz".format(version)
return url
depends_on("cmake@3:", type="build")
depends_on("elfutils@:0.168", type="link")
for ver in [
"3.5.0",
"3.7.0",
"3.8.0",
"3.9.0",
"3.10.0",
"4.0.0",
"4.1.0",
"4.2.0",
"4.3.0",
"4.3.1",
"4.5.0",
"4.5.2",
"5.0.0",
"5.0.2",
"5.1.0",
"5.1.3",
"5.2.0",
"5.2.1",
"5.2.3",
"5.3.0",
"5.3.3",
"5.4.0",
"5.4.3",
"5.5.0",
"5.5.1",
]:
depends_on("hsa-rocr-dev@" + ver, when="@" + ver)
depends_on("hsakmt-roct@" + ver, when="@" + ver)
for ver in [
"3.7.0",
"3.8.0",
"3.9.0",
"3.10.0",
"4.0.0",
"4.1.0",
"4.2.0",
"4.3.0",
"4.3.1",
"4.5.0",
"4.5.2",
"5.0.0",
"5.0.2",
"5.1.0",
"5.1.3",
"5.2.0",
"5.2.1",
"5.2.3",
"5.3.0",
"5.3.3",
"5.4.0",
"5.4.3",
"5.5.0",
"5.5.1",
]:
depends_on("rocm-dbgapi@" + ver, when="@" + ver)
depends_on("hip@" + ver, when="@" + ver)
for ver in ["5.5.0", "5.5.1"]:
depends_on("rocm-core@" + ver, when="@" + ver)
# https://github.com/ROCm-Developer-Tools/rocr_debug_agent/pull/4
patch("0001-Drop-overly-strict-Werror-flag.patch", when="@3.7.0:")
patch("0002-add-hip-architecture.patch", when="@3.9.0:")
@classmethod
def determine_version(cls, lib):
match = re.search(r"lib\S*\.so\.\d+\.\d+\.(\d)(\d\d)(\d\d)", lib)
if match:
ver = "{0}.{1}.{2}".format(
int(match.group(1)), int(match.group(2)), int(match.group(3))
)
else:
ver = None
return ver
@property
def METHOD_NAME(self):
if self.spec.satisfies("@3.5.0"):
return "src"
else:
return self.stage.source_path
def cmake_args(self):
spec = self.spec
args = []
if spec.satisfies("@3.5.0"):
args.append(
"-DCMAKE_PREFIX_PATH={0}/include/hsa;{1}/include,".format(
spec["hsa-rocr-dev"].prefix, spec["hsakmt-roct"].prefix
)
)
if spec.satisfies("@3.7.0:5.1"):
args.append(self.define("CMAKE_MODULE_PATH", spec["hip"].prefix.cmake))
elif spec.satisfies("@5.2.0:"):
args.append(self.define("CMAKE_MODULE_PATH", spec["hip"].prefix.lib.cmake.hip))
return args
|
1,573 |
copy
|
# Copyright 2004-2005 Elemental Security, Inc. All Rights Reserved.
# Licensed to PSF under a Contributor Agreement.
"""This module defines the data structures used to represent a grammar.
These are a bit arcane because they are derived from the data
structures used by Python's 'pgen' parser generator.
There's also a table here mapping operators to their names in the
token module; the Python tokenize module reports all operators as the
fallback token code OP, but the parser needs the actual token code.
"""
# Python imports
import pickle
# Local imports
from . import token, tokenize
class Grammar(object):
"""Pgen parsing tables tables conversion class.
Once initialized, this class supplies the grammar tables for the
parsing engine implemented by parse.py. The parsing engine
accesses the instance variables directly. The class here does not
provide initialization of the tables; several subclasses exist to
do this (see the conv and pgen modules).
The load() method reads the tables from a pickle file, which is
much faster than the other ways offered by subclasses. The pickle
file is written by calling dump() (after loading the grammar
tables using a subclass). The report() method prints a readable
representation of the tables to stdout, for debugging.
The instance variables are as follows:
symbol2number -- a dict mapping symbol names to numbers. Symbol
numbers are always 256 or higher, to distinguish
them from token numbers, which are between 0 and
255 (inclusive).
number2symbol -- a dict mapping numbers to symbol names;
these two are each other's inverse.
states -- a list of DFAs, where each DFA is a list of
states, each state is is a list of arcs, and each
arc is a (i, j) pair where i is a label and j is
a state number. The DFA number is the index into
this list. (This name is slightly confusing.)
Final states are represented by a special arc of
the form (0, j) where j is its own state number.
dfas -- a dict mapping symbol numbers to (DFA, first)
pairs, where DFA is an item from the states list
above, and first is a set of tokens that can
begin this grammar rule (represented by a dict
whose values are always 1).
labels -- a list of (x, y) pairs where x is either a token
number or a symbol number, and y is either None
or a string; the strings are keywords. The label
number is the index in this list; label numbers
are used to mark state transitions (arcs) in the
DFAs.
start -- the number of the grammar's start symbol.
keywords -- a dict mapping keyword strings to arc labels.
tokens -- a dict mapping token numbers to arc labels.
"""
def __init__(self):
self.symbol2number = {}
self.number2symbol = {}
self.states = []
self.dfas = {}
self.labels = [(0, "EMPTY")]
self.keywords = {}
self.tokens = {}
self.symbol2label = {}
self.start = 256
def dump(self, filename):
"""Dump the grammar tables to a pickle file."""
f = open(filename, "wb")
pickle.dump(self.__dict__, f, 2)
f.close()
def load(self, filename):
"""Load the grammar tables from a pickle file."""
f = open(filename, "rb")
d = pickle.load(f)
f.close()
self.__dict__.update(d)
def METHOD_NAME(self):
"""
Copy the grammar.
"""
new = self.__class__()
for dict_attr in ("symbol2number", "number2symbol", "dfas", "keywords",
"tokens", "symbol2label"):
setattr(new, dict_attr, getattr(self, dict_attr).METHOD_NAME())
new.labels = self.labels[:]
new.states = self.states[:]
new.start = self.start
return new
def report(self):
"""Dump the grammar tables to standard output, for debugging."""
from pprint import pprint
print "s2n"
pprint(self.symbol2number)
print "n2s"
pprint(self.number2symbol)
print "states"
pprint(self.states)
print "dfas"
pprint(self.dfas)
print "labels"
pprint(self.labels)
print "start", self.start
# Map from operator to number (since tokenize doesn't do this)
opmap_raw = """
( LPAR
) RPAR
[ LSQB
] RSQB
: COLON
, COMMA
; SEMI
+ PLUS
- MINUS
* STAR
/ SLASH
| VBAR
& AMPER
< LESS
> GREATER
= EQUAL
. DOT
% PERCENT
` BACKQUOTE
{ LBRACE
} RBRACE
@ AT
== EQEQUAL
!= NOTEQUAL
<> NOTEQUAL
<= LESSEQUAL
>= GREATEREQUAL
~ TILDE
^ CIRCUMFLEX
<< LEFTSHIFT
>> RIGHTSHIFT
** DOUBLESTAR
+= PLUSEQUAL
-= MINEQUAL
*= STAREQUAL
/= SLASHEQUAL
%= PERCENTEQUAL
&= AMPEREQUAL
|= VBAREQUAL
^= CIRCUMFLEXEQUAL
<<= LEFTSHIFTEQUAL
>>= RIGHTSHIFTEQUAL
**= DOUBLESTAREQUAL
// DOUBLESLASH
//= DOUBLESLASHEQUAL
-> RARROW
"""
opmap = {}
for line in opmap_raw.splitlines():
if line:
op, name = line.split()
opmap[op] = getattr(token, name)
|
1,574 |
main
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Plot all mean/std per point for a subject or population json file from
tractometry-flow.
WARNING: For population, the displayed STDs is only showing the variation
of the means. It does not account intra-subject STDs.
"""
import argparse
import itertools
import json
import os
import numpy as np
from scilpy.io.utils import (add_overwrite_arg, assert_inputs_exist,
assert_output_dirs_exist_and_empty)
from scilpy.utils.metrics_tools import plot_metrics_stats
def _build_arg_parser():
p = argparse.ArgumentParser(description=__doc__,
formatter_class=argparse.RawTextHelpFormatter)
p.add_argument('in_json',
help='JSON file containing the mean/std per point. For '
'example, can be created using '
'scil_compute_metrics_along_streamline.')
p.add_argument('out_dir',
help='Output directory.')
p.add_argument('--stats_over_population', action='store_true',
help='If set, consider the input stats to be over an '
'entire population and not subject-based.')
p.add_argument('--nb_pts', type=int,
help='Force the number of divisions for the bundles.\n'
'Avoid unequal plots across datasets, replace missing '
'data with zeros.')
p.add_argument('--display_means', action='store_true',
help='Display the subjects means as semi-transparent line.\n'
'Poor results when the number of subject is high.')
p1 = p.add_mutually_exclusive_group()
p1.add_argument('--fill_color',
help='Hexadecimal RGB color filling the region between '
'mean +/- std. The hexadecimal RGB color should be '
'formatted as 0xRRGGBB.')
p1.add_argument('--dict_colors',
help='Dictionnary mapping basename to color.'
'Same convention as --color.')
add_overwrite_arg(p)
return p
def METHOD_NAME():
parser = _build_arg_parser()
args = parser.parse_args()
assert_inputs_exist(parser, args.in_json)
assert_output_dirs_exist_and_empty(parser, args, args.out_dir,
create_dir=True)
if args.fill_color and len(args.fill_color) != 8:
parser.error('Hexadecimal RGB color should be formatted as 0xRRGGBB')
with open(args.in_json, 'r+') as f:
if args.stats_over_population:
mean_std_per_point = json.load(f)
else:
mean_std_per_point = list(json.load(f).values())[0]
for bundle_name, bundle_stats in mean_std_per_point.items():
for metric, metric_stats in bundle_stats.items():
nb_points = args.nb_pts if args.nb_pts is not None \
else len(metric_stats)
num_digits_labels = len(list(metric_stats.keys())[0])
means = []
stds = []
for label_int in range(1, nb_points+1):
label = str(label_int).zfill(num_digits_labels)
mean = metric_stats.get(label, {'mean': 0})['mean']
std = metric_stats.get(label, {'std': 0})['std']
if not isinstance(mean, list):
mean = [mean]
std = [std]
means += [mean]
stds += [std]
color = None
if args.dict_colors:
with open(args.dict_colors, 'r') as data:
dict_colors = json.load(data)
# Supports variation from rbx-flow
for key in dict_colors.keys():
if key in bundle_name:
color = dict_colors[key]
elif args.fill_color is not None:
color = args.fill_color
if color is None:
color = '0x000000'
# Robustify for missing data
means = np.array(list(itertools.zip_longest(*means,
fillvalue=np.nan))).T
stds = np.array(list(itertools.zip_longest(*stds,
fillvalue=np.nan))).T
for i in range(len(means)):
_nan = np.isnan(means[i, :])
if np.count_nonzero(_nan) > 0:
if np.count_nonzero(_nan) < len(means[i, :]):
means[i, _nan] = np.average(means[i, ~_nan])
stds[i, _nan] = np.average(stds[i, ~_nan])
else:
means[i, _nan] = -1
stds[i, _nan] = -1
if not args.stats_over_population:
means = np.squeeze(means)
stds = np.squeeze(stds)
fig = plot_metrics_stats(means, stds,
title=bundle_name,
xlabel='Location along the streamline',
ylabel=metric,
fill_color=(color.replace("0x", "#")),
display_means=args.display_means)
fig.savefig(os.path.join(args.out_dir,
'{}_{}.png'.format(bundle_name,
metric)),
bbox_inches='tight')
if __name__ == '__main__':
METHOD_NAME()
|
1,575 |
test build with debug
|
"""
Tests CQGI functionality
Currently, this includes:
Parsing a script, and detecting its available variables
Altering the values at runtime
defining a build_object function to return results
"""
from cadquery import cqgi
from tests import BaseTest
import textwrap
TESTSCRIPT = textwrap.dedent(
"""
height=2.0
width=3.0
(a,b) = (1.0,1.0)
foo="bar"
result = "%s|%s|%s|%s" % ( str(height) , str(width) , foo , str(a) )
show_object(result)
"""
)
TEST_DEBUG_SCRIPT = textwrap.dedent(
"""
height=2.0
width=3.0
(a,b) = (1.0,1.0)
foo="bar"
debug(foo, { "color": 'yellow' } )
result = "%s|%s|%s|%s" % ( str(height) , str(width) , foo , str(a) )
show_object(result)
debug(height )
"""
)
class TestCQGI(BaseTest):
def test_parser(self):
model = cqgi.CQModel(TESTSCRIPT)
metadata = model.metadata
self.assertEqual(
set(metadata.parameters.keys()), {"height", "width", "a", "b", "foo"}
)
def METHOD_NAME(self):
model = cqgi.CQModel(TEST_DEBUG_SCRIPT)
result = model.build()
debugItems = result.debugObjects
self.assertTrue(len(debugItems) == 2)
self.assertTrue(debugItems[0].shape == "bar")
self.assertTrue(debugItems[0].options == {"color": "yellow"})
self.assertTrue(debugItems[1].shape == 2.0)
self.assertTrue(debugItems[1].options == {})
def test_build_with_empty_params(self):
model = cqgi.CQModel(TESTSCRIPT)
result = model.build()
self.assertTrue(result.success)
self.assertTrue(len(result.results) == 1)
self.assertTrue(result.results[0].shape == "2.0|3.0|bar|1.0")
def test_build_with_different_params(self):
model = cqgi.CQModel(TESTSCRIPT)
result = model.build({"height": 3.0})
self.assertTrue(result.results[0].shape == "3.0|3.0|bar|1.0")
def test_describe_parameters(self):
script = textwrap.dedent(
"""
a = 2.0
describe_parameter(a,'FirstLetter')
"""
)
model = cqgi.CQModel(script)
a_param = model.metadata.parameters["a"]
self.assertTrue(a_param.default_value == 2.0)
self.assertTrue(a_param.desc == "FirstLetter")
self.assertTrue(a_param.varType == cqgi.NumberParameterType)
def test_describe_parameter_invalid_doesnt_fail_script(self):
script = textwrap.dedent(
"""
a = 2.0
describe_parameter(a, 2 - 1 )
"""
)
model = cqgi.CQModel(script)
a_param = model.metadata.parameters["a"]
self.assertTrue(a_param.name == "a")
def test_build_with_exception(self):
badscript = textwrap.dedent(
"""
raise ValueError("ERROR")
"""
)
model = cqgi.CQModel(badscript)
result = model.build({})
self.assertFalse(result.success)
self.assertIsNotNone(result.exception)
self.assertTrue(result.exception.args[0] == "ERROR")
def test_that_invalid_syntax_in_script_fails_immediately(self):
badscript = textwrap.dedent(
"""
this doesn't even compile
"""
)
exception = None
try:
cqgi.CQModel(badscript)
except Exception as e:
exception = e
self.assertIsInstance(exception, SyntaxError)
def test_that_two_results_are_returned(self):
script = textwrap.dedent(
"""
h = 1
show_object(h)
h = 2
show_object(h)
"""
)
model = cqgi.CQModel(script)
result = model.build({})
self.assertEqual(2, len(result.results))
self.assertEqual(1, result.results[0].shape)
self.assertEqual(2, result.results[1].shape)
def test_that_assinging_number_to_string_works(self):
script = textwrap.dedent(
"""
h = "this is a string"
show_object(h)
"""
)
result = cqgi.parse(script).build({"h": 33.33})
self.assertEqual(result.results[0].shape, "33.33")
def test_that_assigning_string_to_number_fails(self):
script = textwrap.dedent(
"""
h = 20.0
show_object(h)
"""
)
result = cqgi.parse(script).build({"h": "a string"})
self.assertTrue(isinstance(result.exception, cqgi.InvalidParameterError))
def test_that_assigning_unknown_var_fails(self):
script = textwrap.dedent(
"""
h = 20.0
show_object(h)
"""
)
result = cqgi.parse(script).build({"w": "var is not there"})
self.assertTrue(isinstance(result.exception, cqgi.InvalidParameterError))
def test_that_cq_objects_are_visible(self):
script = textwrap.dedent(
"""
r = cadquery.Workplane('XY').box(1,2,3)
show_object(r)
"""
)
result = cqgi.parse(script).build()
self.assertTrue(result.success)
self.assertIsNotNone(result.first_result)
def test_that_options_can_be_passed(self):
script = textwrap.dedent(
"""
r = cadquery.Workplane('XY').box(1,2,3)
show_object(r, options={"rgba":(128, 255, 128, 0.0)})
"""
)
result = cqgi.parse(script).build()
self.assertTrue(result.success)
self.assertIsNotNone(result.first_result.options)
def test_setting_boolean_variable(self):
script = textwrap.dedent(
"""
h = True
show_object( "*%s*" % str(h) )
"""
)
result = cqgi.parse(script).build({"h": False})
self.assertTrue(result.success)
self.assertEqual(result.first_result.shape, "*False*")
def test_that_only_top_level_vars_are_detected(self):
script = textwrap.dedent(
"""
h = 1.0
w = 2.0
def do_stuff():
x = 1
y = 2
show_object( "result" )
"""
)
model = cqgi.parse(script)
self.assertEqual(2, len(model.metadata.parameters))
|
1,576 |
dump flags
|
#!/usr/bin/env python
# Copyright 2014-2019 The PySCF Developers. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Author: Qiming Sun <[email protected]>
#
'''
CISD analytical nuclear gradients
'''
import numpy
from pyscf import lib
from pyscf.lib import logger
from pyscf.ci import cisd
from pyscf.grad import rhf as rhf_grad
from pyscf.grad import ccsd as ccsd_grad
def grad_elec(cigrad, civec=None, eris=None, atmlst=None, verbose=logger.INFO):
myci = cigrad.base
if civec is None: civec = myci.ci
assert (not isinstance(civec, (list, tuple)))
nocc = myci.nocc
nmo = myci.nmo
d1 = cisd._gamma1_intermediates(myci, civec, nmo, nocc)
fd2intermediate = lib.H5TmpFile()
d2 = cisd._gamma2_outcore(myci, civec, nmo, nocc, fd2intermediate, True)
t1 = t2 = l1 = l2 = civec
return ccsd_grad.grad_elec(cigrad, t1, t2, l1, l2, eris, atmlst, d1, d2, verbose)
def as_scanner(grad_ci, state=0):
'''Generating a nuclear gradients scanner/solver (for geometry optimizer).
The returned solver is a function. This function requires one argument
"mol" as input and returns total CISD energy.
The solver will automatically use the results of last calculation as the
initial guess of the new calculation. All parameters assigned in the
CISD and the underlying SCF objects (conv_tol, max_memory etc) are
automatically applied in the solver.
Note scanner has side effects. It may change many underlying objects
(_scf, with_df, with_x2c, ...) during calculation.
Examples::
>>> from pyscf import gto, scf, ci
>>> mol = gto.M(atom='H 0 0 0; F 0 0 1')
>>> ci_scanner = ci.CISD(scf.RHF(mol)).nuc_grad_method().as_scanner()
>>> e_tot, grad = ci_scanner(gto.M(atom='H 0 0 0; F 0 0 1.1'))
>>> e_tot, grad = ci_scanner(gto.M(atom='H 0 0 0; F 0 0 1.5'))
'''
from pyscf import gto
if isinstance(grad_ci, lib.GradScanner):
return grad_ci
logger.info(grad_ci, 'Create scanner for %s', grad_ci.__class__)
class CISD_GradScanner(grad_ci.__class__, lib.GradScanner):
def __init__(self, g):
lib.GradScanner.__init__(self, g)
def __call__(self, mol_or_geom, state=state, **kwargs):
if isinstance(mol_or_geom, gto.Mole):
mol = mol_or_geom
else:
mol = self.mol.set_geom_(mol_or_geom, inplace=False)
self.reset(mol)
ci_scanner = self.base
if ci_scanner.nroots > 1 and state >= ci_scanner.nroots:
raise ValueError('State ID greater than the number of CISD roots')
mf_scanner = ci_scanner._scf
mf_scanner(mol)
ci_scanner.mo_coeff = mf_scanner.mo_coeff
ci_scanner.mo_occ = mf_scanner.mo_occ
if getattr(ci_scanner.ci, 'size', 0) != ci_scanner.vector_size():
ci_scanner.ci = None
eris = ci_scanner.ao2mo(ci_scanner.mo_coeff)
ci_scanner.kernel(ci0=ci_scanner.ci, eris=eris)
# TODO: Check root flip
if ci_scanner.nroots > 1:
e_tot = ci_scanner.e_tot[state]
civec = ci_scanner.ci[state]
else:
e_tot = ci_scanner.e_tot
civec = ci_scanner.ci
de = self.kernel(civec, eris=eris, **kwargs)
return e_tot, de
@property
def converged(self):
ci_scanner = self.base
if ci_scanner.nroots > 1:
ci_conv = ci_scanner.converged[state]
else:
ci_conv = ci_scanner.converged
return all((ci_scanner._scf.converged, ci_conv))
# cache eris object in CCSD base class. eris object is used many times
# when calculating gradients
g_ao2mo = grad_ci.base.__class__.ao2mo
def _save_eris(self, *args, **kwargs):
self._eris = g_ao2mo(self, *args, **kwargs)
return self._eris
grad_ci.base.__class__.ao2mo = _save_eris
return CISD_GradScanner(grad_ci)
class Gradients(rhf_grad.GradientsMixin):
def __init__(self, myci):
self.state = 0 # of which the gradients to be computed.
rhf_grad.GradientsMixin.__init__(self, myci)
def METHOD_NAME(self, verbose=None):
log = logger.new_logger(self, verbose)
log.info('\n')
if not self.base.converged:
log.warn('Ground state %s not converged',
self.base.__class__.__name__)
log.info('******** %s for %s ********',
self.__class__, self.base.__class__)
if self.state != 0 and self.base.nroots > 1:
log.info('State ID = %d', self.state)
return self
grad_elec = grad_elec
def kernel(self, civec=None, eris=None, atmlst=None, state=None,
verbose=None):
log = logger.new_logger(self, verbose)
myci = self.base
if civec is None: civec = myci.ci
if civec is None: civec = myci.kernel(eris=eris)
if (isinstance(civec, (list, tuple)) or
(isinstance(civec, numpy.ndarray) and civec.ndim > 1)):
if state is None:
state = self.state
else:
self.state = state
civec = civec[state]
logger.info(self, 'Multiple roots are found in CISD solver. '
'Nuclear gradients of root %d are computed.', state)
if atmlst is None:
atmlst = self.atmlst
else:
self.atmlst = atmlst
if self.verbose >= logger.WARN:
self.check_sanity()
if self.verbose >= logger.INFO:
self.METHOD_NAME()
de = self.grad_elec(civec, eris, atmlst, verbose=log)
self.de = de + self.grad_nuc(atmlst=atmlst)
if self.mol.symmetry:
self.de = self.symmetrize(self.de, atmlst)
self._finalize()
return self.de
# Calling the underlying SCF nuclear gradients because it may be modified
# by external modules (e.g. QM/MM, solvent)
def grad_nuc(self, mol=None, atmlst=None):
mf_grad = self.base._scf.nuc_grad_method()
return mf_grad.grad_nuc(mol, atmlst)
def _finalize(self):
if self.verbose >= logger.NOTE:
logger.note(self, '--------- %s gradients for state %d ----------',
self.base.__class__.__name__, self.state)
self._write(self.mol, self.de, self.atmlst)
logger.note(self, '----------------------------------------------')
as_scanner = as_scanner
Grad = Gradients
cisd.CISD.Gradients = lib.class_as_method(Gradients)
|
1,577 |
get source model
|
# -*- coding: utf-8 -*-
# This code is part of Qiskit.
#
# (C) Copyright IBM 2017, 2021.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""
Delegate for display of QComponents in Library tab
"""
import importlib
import inspect
import os
from PySide2.QtCore import QAbstractItemModel, QAbstractProxyModel, QModelIndex, Signal
from PySide2.QtGui import QPainter
from PySide2.QtWidgets import QItemDelegate, QStyle, QStyleOptionViewItem, QWidget
from qiskit_metal._gui.widgets.qlibrary_display.file_model_qlibrary import QFileSystemLibraryModel
from qiskit_metal.toolbox_metal.exceptions import QLibraryGUIException
class LibraryDelegate(QItemDelegate):
"""
Delegate for QLibrary view
Requires LibraryModel
"""
tool_tip_signal = Signal(str)
def __init__(self, parent: QWidget = None):
"""
Initializer for LibraryDelegate
Args:
parent(QWidget): parent
"""
super().__init__(parent)
# The Delegate may belong to a view using a ProxyModel but even so
# the source model for that Proxy Model(s) should be a QFileSystemLibraryModel
self.source_model_type = QFileSystemLibraryModel
def METHOD_NAME(self, model: QAbstractItemModel, source_type: type): # pylint: disable=R0201, no-self-use
"""
The Delegate may belong to a view using a ProxyModel. However,
the source model for that Proxy Model(s) should be a QFileSystemLibraryModel
and is returned by this function
Args:
model(QAbstractItemModel): Current model
source_type(type): Expected source model type
Returns:
QFileSystemLibraryModel: Source model
Raises:
QLibraryGUIException: If unable to find the source model for the given model
"""
while True:
# https://stackoverflow.com/questions/50478661/python-isinstance-not-working-as-id-expect
if model.__class__.__name__ == source_type.__name__:
return model
if isinstance(model, QAbstractProxyModel):
model = model.sourceModel()
else:
raise QLibraryGUIException(
f"Unable to find source model: "
f"\n Expected Type is:"
f"\n{source_type}"
f"\n First non-proxy model type found is"
f"\n{type(model)} for"
f"\n{model}")
def paint(self, painter: QPainter, option: QStyleOptionViewItem,
index: QModelIndex):
"""
Paints the Metal GUI QLibrary.
If hovering over a file with a tooltip, emits the tooltip signal
Args:
painter (QPainter): Current painter
option (QStyleOptionViewItem): Current option
index (QModelIndex): Current index of related model
Emits:
tool_tip_signal(str): The TOOLTIP for the QComponent being hovered over by the mouse
"""
self.emit_tool_tip(option, index)
QItemDelegate.paint(self, painter, option, index)
def emit_tool_tip(self, option: QStyleOptionViewItem, index: QModelIndex):
"""
Args:
option (QStyleOptionViewItem): Contains current style flags
index (QModelIndex): Index being moused over
Emits:
tool_tip_signal(str): The TOOLTIP for the QComponent of the index
"""
if option.state & QStyle.State_MouseOver: # if option.state == QStyle.State_MouseOver: Qt.WA_Hover
source_model = self.METHOD_NAME(index.model(),
self.source_model_type)
model = index.model()
full_path = source_model.filePath(model.mapToSource(index))
# try:
try:
current_class = self.get_class_from_abs_file_path(full_path)
information = current_class.TOOLTIP
except:
information = ""
self.tool_tip_signal.emit(information)
def get_class_from_abs_file_path(self, abs_file_path):
"""
Gets the corresponding class object for the absolute file path to the file containing that
class definition
Args:
abs_file_path (str): absolute file path to the file containing the QComponent class definition
getting class from absolute file path -
https://stackoverflow.com/questions/452969/does-python-have-an-equivalent-to-java-class-forname
"""
qis_abs_path = abs_file_path[abs_file_path.
index(__name__.split('.')[0]):]
# Windows users' qis_abs_path may use os.sep or '/' due to PySide's
# handling of file names
qis_mod_path = qis_abs_path.replace(os.sep, '.')[:-len('.py')]
qis_mod_path = qis_mod_path.replace(
"/", '.') # users cannot use '/' in filename
mymodule = importlib.import_module(qis_mod_path)
members = inspect.getmembers(mymodule, inspect.isclass)
class_owner = qis_mod_path.split('.')[-1]
for memtup in members:
if len(memtup) > 1:
if str(memtup[1].__module__).endswith(class_owner):
return memtup[1]
|
1,578 |
is unlisted addons reviewer
|
from olympia import amo
def match_rules(rules, app, action):
"""
This will match rules found in Group.
"""
for rule in rules.split(','):
rule_app, rule_action = rule.split(':')
if rule_app == '*' or rule_app == app:
if rule_action == '*' or rule_action == action or action == '%':
return True
return False
def action_allowed_for(user, permission):
"""
Determines if the user has permission to do a certain action.
`permission` is a tuple constant in constants.permissions.
Note: relies in user.groups_list, which is cached on the user instance the
first time it's accessed.
"""
if user is None or not user.is_authenticated:
return False
assert permission in amo.permissions.PERMISSIONS_LIST # constants only.
return any(
match_rules(group.rules, permission.app, permission.action)
for group in user.groups_list
)
def experiments_submission_allowed(user, parsed_addon_data):
"""Experiments can only be submitted by the people with the right
permission.
See bug 1220097.
"""
return not parsed_addon_data.get('is_experiment', False) or action_allowed_for(
user, amo.permissions.EXPERIMENTS_SUBMIT
)
def langpack_submission_allowed(user, parsed_addon_data):
"""Language packs can only be submitted by people with the right
permission.
See https://github.com/mozilla/addons-server/issues/11788 and
https://github.com/mozilla/addons-server/issues/11793
"""
return not parsed_addon_data.get('type') == amo.ADDON_LPAPP or action_allowed_for(
user, amo.permissions.LANGPACK_SUBMIT
)
def reserved_guid_addon_submission_allowed(user, parsed_addon_data):
"""Add-ons with a guid ending with reserved suffixes can only be submitted
by people with the right permission.
"""
guid = parsed_addon_data.get('guid') or ''
return not guid.lower().endswith(amo.RESERVED_ADDON_GUIDS) or action_allowed_for(
user, amo.permissions.SYSTEM_ADDON_SUBMIT
)
def mozilla_signed_extension_submission_allowed(user, parsed_addon_data):
"""Add-ons already signed with mozilla internal certificate can only be
submitted by people with the right permission.
"""
return not parsed_addon_data.get(
'is_mozilla_signed_extension'
) or action_allowed_for(user, amo.permissions.SYSTEM_ADDON_SUBMIT)
def check_addon_ownership(
user,
addon,
allow_developer=False,
allow_addons_edit_permission=True,
allow_mozilla_disabled_addon=False,
):
"""
Check that user is the owner of the add-on.
Will always return False for deleted add-ons.
By default, this function will:
- return False for mozilla disabled add-ons. Can be bypassed with
allow_mozilla_disabled_addon=True.
- return False if the author is just a developer and not an owner. Can be
bypassed with allow_developer=True.
- return False for non authors. Can be bypassed with
allow_addons_edit_permission=True and the user has the Addons:Edit
permission. This has precedence over all other checks.
"""
if not user.is_authenticated:
return False
# Deleted addons can't be edited at all.
if addon.is_deleted:
return False
# Users with 'Addons:Edit' can do anything.
if allow_addons_edit_permission and action_allowed_for(
user, amo.permissions.ADDONS_EDIT
):
return True
# Only admins can edit admin-disabled addons.
if addon.status == amo.STATUS_DISABLED and not allow_mozilla_disabled_addon:
return False
# Addon owners can do everything else.
roles = (amo.AUTHOR_ROLE_OWNER,)
if allow_developer:
roles += (amo.AUTHOR_ROLE_DEV,)
return addon.addonuser_set.filter(user=user, role__in=roles).exists()
def is_listed_addons_reviewer(user, allow_content_reviewers=True):
permissions = [
amo.permissions.ADDONS_REVIEW,
amo.permissions.ADDONS_RECOMMENDED_REVIEW,
]
if allow_content_reviewers:
permissions.append(amo.permissions.ADDONS_CONTENT_REVIEW)
allow_access = any(action_allowed_for(user, perm) for perm in permissions)
return allow_access
def is_listed_addons_viewer_or_reviewer(user, allow_content_reviewers=True):
return action_allowed_for(
user, amo.permissions.REVIEWER_TOOLS_VIEW
) or is_listed_addons_reviewer(user, allow_content_reviewers)
def METHOD_NAME(user):
return action_allowed_for(user, amo.permissions.ADDONS_REVIEW_UNLISTED)
def is_unlisted_addons_viewer_or_reviewer(user):
return action_allowed_for(
user, amo.permissions.REVIEWER_TOOLS_UNLISTED_VIEW
) or METHOD_NAME(user)
def is_static_theme_reviewer(user):
return action_allowed_for(user, amo.permissions.STATIC_THEMES_REVIEW)
def is_reviewer(user, addon, allow_content_reviewers=True):
"""Return True if the user is an addons reviewer, or a theme reviewer
and the addon is a theme.
If allow_content_reviewers is passed and False (defaults to True), then
having content review permission is not enough to be considered an addons
reviewer.
"""
if addon.type == amo.ADDON_STATICTHEME:
return is_static_theme_reviewer(user)
return is_listed_addons_reviewer(
user, allow_content_reviewers=allow_content_reviewers
)
def is_user_any_kind_of_reviewer(user, allow_viewers=False):
"""More lax version of is_reviewer: does not check what kind of reviewer
the user is, and accepts unlisted reviewers, post reviewers, content
reviewers. If allow_viewers is passed and truthy, also allows users with
just reviewer tools view access.
Don't use on anything that would alter add-on data.
any_reviewer_required() decorator and AllowAnyKindOfReviewer DRF permission
use this function behind the scenes to guard views that don't change the
add-on but still need to be restricted to reviewers only.
"""
permissions = [
amo.permissions.ADDONS_REVIEW,
amo.permissions.ADDONS_REVIEW_UNLISTED,
amo.permissions.ADDONS_CONTENT_REVIEW,
amo.permissions.ADDONS_RECOMMENDED_REVIEW,
amo.permissions.STATIC_THEMES_REVIEW,
]
if allow_viewers:
permissions.extend(
[
amo.permissions.REVIEWER_TOOLS_VIEW,
amo.permissions.REVIEWER_TOOLS_UNLISTED_VIEW,
]
)
allow_access = any(action_allowed_for(user, perm) for perm in permissions)
return allow_access
def author_or_unlisted_viewer_or_reviewer(user, addon):
return is_unlisted_addons_viewer_or_reviewer(user) or check_addon_ownership(
user,
addon,
allow_addons_edit_permission=False,
allow_developer=True,
)
|
1,579 |
tags
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import copy
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
from . import outputs
from ._inputs import *
__all__ = [
'GetSecurityGroupsResult',
'AwaitableGetSecurityGroupsResult',
'get_security_groups',
'get_security_groups_output',
]
@pulumi.output_type
class GetSecurityGroupsResult:
"""
A collection of values returned by getSecurityGroups.
"""
def __init__(__self__, arns=None, filters=None, id=None, ids=None, METHOD_NAME=None, vpc_ids=None):
if arns and not isinstance(arns, list):
raise TypeError("Expected argument 'arns' to be a list")
pulumi.set(__self__, "arns", arns)
if filters and not isinstance(filters, list):
raise TypeError("Expected argument 'filters' to be a list")
pulumi.set(__self__, "filters", filters)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if ids and not isinstance(ids, list):
raise TypeError("Expected argument 'ids' to be a list")
pulumi.set(__self__, "ids", ids)
if METHOD_NAME and not isinstance(METHOD_NAME, dict):
raise TypeError("Expected argument 'tags' to be a dict")
pulumi.set(__self__, "tags", METHOD_NAME)
if vpc_ids and not isinstance(vpc_ids, list):
raise TypeError("Expected argument 'vpc_ids' to be a list")
pulumi.set(__self__, "vpc_ids", vpc_ids)
@property
@pulumi.getter
def arns(self) -> Sequence[str]:
"""
ARNs of the matched security groups.
"""
return pulumi.get(self, "arns")
@property
@pulumi.getter
def filters(self) -> Optional[Sequence['outputs.GetSecurityGroupsFilterResult']]:
return pulumi.get(self, "filters")
@property
@pulumi.getter
def id(self) -> str:
"""
The provider-assigned unique ID for this managed resource.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def ids(self) -> Sequence[str]:
"""
IDs of the matches security groups.
"""
return pulumi.get(self, "ids")
@property
@pulumi.getter
def METHOD_NAME(self) -> Mapping[str, str]:
return pulumi.get(self, "tags")
@property
@pulumi.getter(name="vpcIds")
def vpc_ids(self) -> Sequence[str]:
"""
VPC IDs of the matched security groups. The data source's tag or filter *will span VPCs* unless the `vpc-id` filter is also used.
"""
return pulumi.get(self, "vpc_ids")
class AwaitableGetSecurityGroupsResult(GetSecurityGroupsResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetSecurityGroupsResult(
arns=self.arns,
filters=self.filters,
id=self.id,
ids=self.ids,
METHOD_NAME=self.METHOD_NAME,
vpc_ids=self.vpc_ids)
def get_security_groups(filters: Optional[Sequence[pulumi.InputType['GetSecurityGroupsFilterArgs']]] = None,
METHOD_NAME: Optional[Mapping[str, str]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetSecurityGroupsResult:
"""
Use this data source to get IDs and VPC membership of Security Groups that are created outside this provider.
## Example Usage
```python
import pulumi
import pulumi_aws as aws
test = aws.ec2.get_security_groups(tags={
"Application": "k8s",
"Environment": "dev",
})
```
```python
import pulumi
import pulumi_aws as aws
test = aws.ec2.get_security_groups(filters=[
aws.ec2.GetSecurityGroupsFilterArgs(
name="group-name",
values=["*nodes*"],
),
aws.ec2.GetSecurityGroupsFilterArgs(
name="vpc-id",
values=[var["vpc_id"]],
),
])
```
:param Sequence[pulumi.InputType['GetSecurityGroupsFilterArgs']] filters: One or more name/value pairs to use as filters. There are several valid keys, for a full reference, check out [describe-security-groups in the AWS CLI reference][1].
:param Mapping[str, str] tags: Map of tags, each pair of which must exactly match for desired security groups.
"""
__args__ = dict()
__args__['filters'] = filters
__args__['tags'] = METHOD_NAME
opts = pulumi.InvokeOptions.merge(_utilities.get_invoke_opts_defaults(), opts)
__ret__ = pulumi.runtime.invoke('aws:ec2/getSecurityGroups:getSecurityGroups', __args__, opts=opts, typ=GetSecurityGroupsResult).value
return AwaitableGetSecurityGroupsResult(
arns=pulumi.get(__ret__, 'arns'),
filters=pulumi.get(__ret__, 'filters'),
id=pulumi.get(__ret__, 'id'),
ids=pulumi.get(__ret__, 'ids'),
METHOD_NAME=pulumi.get(__ret__, 'tags'),
vpc_ids=pulumi.get(__ret__, 'vpc_ids'))
@_utilities.lift_output_func(get_security_groups)
def get_security_groups_output(filters: Optional[pulumi.Input[Optional[Sequence[pulumi.InputType['GetSecurityGroupsFilterArgs']]]]] = None,
METHOD_NAME: Optional[pulumi.Input[Optional[Mapping[str, str]]]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetSecurityGroupsResult]:
"""
Use this data source to get IDs and VPC membership of Security Groups that are created outside this provider.
## Example Usage
```python
import pulumi
import pulumi_aws as aws
test = aws.ec2.get_security_groups(tags={
"Application": "k8s",
"Environment": "dev",
})
```
```python
import pulumi
import pulumi_aws as aws
test = aws.ec2.get_security_groups(filters=[
aws.ec2.GetSecurityGroupsFilterArgs(
name="group-name",
values=["*nodes*"],
),
aws.ec2.GetSecurityGroupsFilterArgs(
name="vpc-id",
values=[var["vpc_id"]],
),
])
```
:param Sequence[pulumi.InputType['GetSecurityGroupsFilterArgs']] filters: One or more name/value pairs to use as filters. There are several valid keys, for a full reference, check out [describe-security-groups in the AWS CLI reference][1].
:param Mapping[str, str] tags: Map of tags, each pair of which must exactly match for desired security groups.
"""
...
|
1,580 |
test unknown
|
###############################################################################
#
# The MIT License (MIT)
#
# Copyright (c) typedef int GmbH
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
###############################################################################
import sys
from unittest.mock import Mock
import twisted.internet
from twisted.trial import unittest
from autobahn.twisted import choosereactor
class ChooseReactorTests(unittest.TestCase):
def patch_reactor(self, name, new_reactor):
"""
Patch ``name`` so that Twisted will grab a fake reactor instead of
a real one.
"""
if hasattr(twisted.internet, name):
self.patch(twisted.internet, name, new_reactor)
else:
def _cleanup():
delattr(twisted.internet, name)
setattr(twisted.internet, name, new_reactor)
def patch_modules(self):
"""
Patch ``sys.modules`` so that Twisted believes there is no
installed reactor.
"""
old_modules = dict(sys.modules)
new_modules = dict(sys.modules)
del new_modules["twisted.internet.reactor"]
def _cleanup():
sys.modules = old_modules
self.addCleanup(_cleanup)
sys.modules = new_modules
def METHOD_NAME(self):
"""
``install_optimal_reactor`` will use the default reactor if it is
unable to detect the platform it is running on.
"""
reactor_mock = Mock()
self.patch_reactor("selectreactor", reactor_mock)
self.patch(sys, "platform", "unknown")
# Emulate that a reactor has not been installed
self.patch_modules()
choosereactor.install_optimal_reactor()
reactor_mock.install.assert_called_once_with()
def test_mac(self):
"""
``install_optimal_reactor`` will install KQueueReactor on
Darwin (OS X).
"""
reactor_mock = Mock()
self.patch_reactor("kqreactor", reactor_mock)
self.patch(sys, "platform", "darwin")
# Emulate that a reactor has not been installed
self.patch_modules()
choosereactor.install_optimal_reactor()
reactor_mock.install.assert_called_once_with()
def test_win(self):
"""
``install_optimal_reactor`` will install IOCPReactor on Windows.
"""
if sys.platform != 'win32':
raise unittest.SkipTest('unit test requires Windows')
reactor_mock = Mock()
self.patch_reactor("iocpreactor", reactor_mock)
self.patch(sys, "platform", "win32")
# Emulate that a reactor has not been installed
self.patch_modules()
choosereactor.install_optimal_reactor()
reactor_mock.install.assert_called_once_with()
def test_bsd(self):
"""
``install_optimal_reactor`` will install KQueueReactor on BSD.
"""
reactor_mock = Mock()
self.patch_reactor("kqreactor", reactor_mock)
self.patch(sys, "platform", "freebsd11")
# Emulate that a reactor has not been installed
self.patch_modules()
choosereactor.install_optimal_reactor()
reactor_mock.install.assert_called_once_with()
def test_linux(self):
"""
``install_optimal_reactor`` will install EPollReactor on Linux.
"""
reactor_mock = Mock()
self.patch_reactor("epollreactor", reactor_mock)
self.patch(sys, "platform", "linux")
# Emulate that a reactor has not been installed
self.patch_modules()
choosereactor.install_optimal_reactor()
reactor_mock.install.assert_called_once_with()
|
1,581 |
test and condition and short circuit
|
from unittest.mock import Mock
import pytest
from nucypher.policy.conditions.base import AccessControlCondition
from nucypher.policy.conditions.lingo import AndCompoundCondition, OrCompoundCondition
@pytest.fixture(scope="function")
def mock_conditions():
condition_1 = Mock(spec=AccessControlCondition)
condition_1.verify.return_value = (True, 1)
condition_1.to_dict.return_value = {
"value": 1
} # needed for "id" value calc for CompoundAccessControlCondition
condition_2 = Mock(spec=AccessControlCondition)
condition_2.verify.return_value = (True, 2)
condition_2.to_dict.return_value = {"value": 2}
condition_3 = Mock(spec=AccessControlCondition)
condition_3.verify.return_value = (True, 3)
condition_3.to_dict.return_value = {"value": 3}
condition_4 = Mock(spec=AccessControlCondition)
condition_4.verify.return_value = (True, 4)
condition_4.to_dict.return_value = {"value": 4}
return condition_1, condition_2, condition_3, condition_4
def METHOD_NAME(mock_conditions):
condition_1, condition_2, condition_3, condition_4 = mock_conditions
and_condition = AndCompoundCondition(
operands=[
condition_1,
condition_2,
condition_3,
condition_4,
]
)
# ensure that all conditions evaluated when all return True
result, value = and_condition.verify()
assert result is True
assert len(value) == 4, "all conditions evaluated"
assert value == [1, 2, 3, 4]
# ensure that short circuit happens when 1st condition is false
condition_1.verify.return_value = (False, 1)
result, value = and_condition.verify()
assert result is False
assert len(value) == 1, "only one condition evaluated"
assert value == [1]
# short circuit occurs for 3rd entry
condition_1.verify.return_value = (True, 1)
condition_3.verify.return_value = (False, 3)
result, value = and_condition.verify()
assert result is False
assert len(value) == 3, "3-of-4 conditions evaluated"
assert value == [1, 2, 3]
def test_or_condition_and_short_circuit(mock_conditions):
condition_1, condition_2, condition_3, condition_4 = mock_conditions
or_condition = OrCompoundCondition(
operands=[
condition_1,
condition_2,
condition_3,
condition_4,
]
)
# ensure that only first condition evaluated when first is True
condition_1.verify.return_value = (True, 1) # short circuit here
result, value = or_condition.verify()
assert result is True
assert len(value) == 1, "only first condition needs to be evaluated"
assert value == [1]
# ensure first True condition is returned
condition_1.verify.return_value = (False, 1)
condition_2.verify.return_value = (False, 2)
condition_3.verify.return_value = (True, 3) # short circuit here
result, value = or_condition.verify()
assert result is True
assert len(value) == 3, "third condition causes short circuit"
assert value == [1, 2, 3]
# no short circuit occurs when all are False
condition_1.verify.return_value = (False, 1)
condition_2.verify.return_value = (False, 2)
condition_3.verify.return_value = (False, 3)
condition_4.verify.return_value = (False, 4)
result, value = or_condition.verify()
assert result is False
assert len(value) == 4, "all conditions evaluated"
assert value == [1, 2, 3, 4]
def test_compound_condition(mock_conditions):
condition_1, condition_2, condition_3, condition_4 = mock_conditions
compound_condition = AndCompoundCondition(
operands=[
OrCompoundCondition(
operands=[
condition_1,
condition_2,
condition_3,
]
),
condition_4,
]
)
# all conditions are True
result, value = compound_condition.verify()
assert result is True
assert len(value) == 2, "or_condition and condition_4"
assert value == [[1], 4]
# or condition is False
condition_1.verify.return_value = (False, 1)
condition_2.verify.return_value = (False, 2)
condition_3.verify.return_value = (False, 3)
result, value = compound_condition.verify()
assert result is False
assert len(value) == 1, "or_condition"
assert value == [
[1, 2, 3]
] # or-condition does not short circuit, but and-condition is short-circuited because or-condition is False
# or condition is True but condition 4 is False
condition_1.verify.return_value = (True, 1)
condition_4.verify.return_value = (False, 4)
result, value = compound_condition.verify()
assert result is False
assert len(value) == 2, "or_condition and condition_4"
assert value == [
[1],
4,
] # or-condition short-circuited because condition_1 was True
# condition_4 is now true
condition_4.verify.return_value = (True, 4)
result, value = compound_condition.verify()
assert result is True
assert len(value) == 2, "or_condition and condition_4"
assert value == [
[1],
4,
] # or-condition short-circuited because condition_1 was True
def test_nested_compound_condition(mock_conditions):
condition_1, condition_2, condition_3, condition_4 = mock_conditions
nested_compound_condition = AndCompoundCondition(
operands=[
OrCompoundCondition(
operands=[
condition_1,
AndCompoundCondition(
operands=[
condition_2,
condition_3,
]
),
]
),
condition_4,
]
)
# all conditions are True
result, value = nested_compound_condition.verify()
assert result is True
assert len(value) == 2, "or_condition and condition_4"
assert value == [[1], 4] # or short-circuited since condition_1 is True
# set condition_1 to False so nested and-condition must be evaluated
condition_1.verify.return_value = (False, 1)
result, value = nested_compound_condition.verify()
assert result is True
assert len(value) == 2, "or_condition and condition_4"
assert value == [
[1, [2, 3]],
4,
] # nested and-condition was evaluated and evaluated to True
# set condition_4 to False so that overall result flips to False
condition_4.verify.return_value = (False, 4)
result, value = nested_compound_condition.verify()
assert result is False
assert len(value) == 2, "or_condition and condition_4"
assert value == [[1, [2, 3]], 4]
|
1,582 |
test single event logdir
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorboard.uploader.logdir_loader."""
import os.path
import shutil
from tensorboard.uploader import logdir_loader
from tensorboard import test as tb_test
from tensorboard.backend.event_processing import directory_loader
from tensorboard.backend.event_processing import event_file_loader
from tensorboard.backend.event_processing import io_wrapper
from tensorboard.util import test_util
class LogdirLoaderTest(tb_test.TestCase):
def _create_logdir_loader(self, logdir):
def directory_loader_factory(path):
return directory_loader.DirectoryLoader(
path,
event_file_loader.TimestampedEventFileLoader,
path_filter=io_wrapper.IsTensorFlowEventsFile,
)
return logdir_loader.LogdirLoader(logdir, directory_loader_factory)
def _extract_tags(self, event_generator):
"""Converts a generator of tf.Events into a list of event tags."""
return [
event.summary.value[0].tag
for event in event_generator
if not event.file_version
]
def _extract_run_to_tags(self, run_to_events):
"""Returns run-to-tags dict from run-to-event-generator dict."""
run_to_tags = {}
for run_name, event_generator in run_to_events.items():
# There should be no duplicate runs.
self.assertNotIn(run_name, run_to_tags)
run_to_tags[run_name] = self._extract_tags(event_generator)
return run_to_tags
def test_empty_logdir(self):
logdir = self.get_temp_dir()
loader = self._create_logdir_loader(logdir)
# Default state is empty.
self.assertEmpty(list(loader.get_run_events()))
loader.synchronize_runs()
# Still empty, since there's no data.
self.assertEmpty(list(loader.get_run_events()))
def METHOD_NAME(self):
logdir = self.get_temp_dir()
with test_util.FileWriter(logdir) as writer:
writer.add_test_summary("foo")
loader = self._create_logdir_loader(logdir)
loader.synchronize_runs()
self.assertEqual(
self._extract_run_to_tags(loader.get_run_events()), {".": ["foo"]}
)
# A second load should indicate no new data for the run.
self.assertEqual(
self._extract_run_to_tags(loader.get_run_events()), {".": []}
)
def test_multiple_writes_to_logdir(self):
logdir = self.get_temp_dir()
with test_util.FileWriter(os.path.join(logdir, "a")) as writer:
writer.add_test_summary("tag_a")
with test_util.FileWriter(os.path.join(logdir, "b")) as writer:
writer.add_test_summary("tag_b")
with test_util.FileWriter(os.path.join(logdir, "b", "x")) as writer:
writer.add_test_summary("tag_b_x")
writer_c = test_util.FileWriter(os.path.join(logdir, "c"))
writer_c.add_test_summary("tag_c")
writer_c.flush()
loader = self._create_logdir_loader(logdir)
loader.synchronize_runs()
self.assertEqual(
self._extract_run_to_tags(loader.get_run_events()),
{
"a": ["tag_a"],
"b": ["tag_b"],
"b/x": ["tag_b_x"],
"c": ["tag_c"],
},
)
# A second load should indicate no new data.
self.assertEqual(
self._extract_run_to_tags(loader.get_run_events()),
{"a": [], "b": [], "b/x": [], "c": []},
)
# Write some new data to both new and pre-existing event files.
with test_util.FileWriter(
os.path.join(logdir, "a"), filename_suffix=".other"
) as writer:
writer.add_test_summary("tag_a_2")
writer.add_test_summary("tag_a_3")
writer.add_test_summary("tag_a_4")
with test_util.FileWriter(
os.path.join(logdir, "b", "x"), filename_suffix=".other"
) as writer:
writer.add_test_summary("tag_b_x_2")
with writer_c as writer:
writer.add_test_summary("tag_c_2")
# New data should appear on the next load.
self.assertEqual(
self._extract_run_to_tags(loader.get_run_events()),
{
"a": ["tag_a_2", "tag_a_3", "tag_a_4"],
"b": [],
"b/x": ["tag_b_x_2"],
"c": ["tag_c_2"],
},
)
def test_directory_deletion(self):
logdir = self.get_temp_dir()
with test_util.FileWriter(os.path.join(logdir, "a")) as writer:
writer.add_test_summary("tag_a")
with test_util.FileWriter(os.path.join(logdir, "b")) as writer:
writer.add_test_summary("tag_b")
with test_util.FileWriter(os.path.join(logdir, "c")) as writer:
writer.add_test_summary("tag_c")
loader = self._create_logdir_loader(logdir)
loader.synchronize_runs()
self.assertEqual(list(loader.get_run_events().keys()), ["a", "b", "c"])
shutil.rmtree(os.path.join(logdir, "b"))
loader.synchronize_runs()
self.assertEqual(list(loader.get_run_events().keys()), ["a", "c"])
shutil.rmtree(logdir)
loader.synchronize_runs()
self.assertEmpty(loader.get_run_events())
def test_directory_deletion_during_event_loading(self):
logdir = self.get_temp_dir()
with test_util.FileWriter(logdir) as writer:
writer.add_test_summary("foo")
loader = self._create_logdir_loader(logdir)
loader.synchronize_runs()
self.assertEqual(
self._extract_run_to_tags(loader.get_run_events()), {".": ["foo"]}
)
shutil.rmtree(logdir)
runs_to_events = loader.get_run_events()
self.assertEqual(list(runs_to_events.keys()), ["."])
events = runs_to_events["."]
self.assertEqual(self._extract_tags(events), [])
if __name__ == "__main__":
tb_test.main()
|
1,583 |
tags
|
# coding=utf-8
# *** WARNING: this file was generated by pulumi. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import copy
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
__all__ = [
'GetCertificateResult',
'AwaitableGetCertificateResult',
'get_certificate',
'get_certificate_output',
]
@pulumi.output_type
class GetCertificateResult:
"""
Certificate used for Custom Domain bindings of Container Apps in a Managed Environment
"""
def __init__(__self__, id=None, location=None, name=None, properties=None, system_data=None, METHOD_NAME=None, type=None):
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if location and not isinstance(location, str):
raise TypeError("Expected argument 'location' to be a str")
pulumi.set(__self__, "location", location)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if properties and not isinstance(properties, dict):
raise TypeError("Expected argument 'properties' to be a dict")
pulumi.set(__self__, "properties", properties)
if system_data and not isinstance(system_data, dict):
raise TypeError("Expected argument 'system_data' to be a dict")
pulumi.set(__self__, "system_data", system_data)
if METHOD_NAME and not isinstance(METHOD_NAME, dict):
raise TypeError("Expected argument 'tags' to be a dict")
pulumi.set(__self__, "tags", METHOD_NAME)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
@property
@pulumi.getter
def id(self) -> str:
"""
Fully qualified resource ID for the resource. Ex - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def location(self) -> str:
"""
The geo-location where the resource lives
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> str:
"""
The name of the resource
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def properties(self) -> 'outputs.CertificateResponseProperties':
"""
Certificate resource specific properties
"""
return pulumi.get(self, "properties")
@property
@pulumi.getter(name="systemData")
def system_data(self) -> 'outputs.SystemDataResponse':
"""
Azure Resource Manager metadata containing createdBy and modifiedBy information.
"""
return pulumi.get(self, "system_data")
@property
@pulumi.getter
def METHOD_NAME(self) -> Optional[Mapping[str, str]]:
"""
Resource tags.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter
def type(self) -> str:
"""
The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or "Microsoft.Storage/storageAccounts"
"""
return pulumi.get(self, "type")
class AwaitableGetCertificateResult(GetCertificateResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetCertificateResult(
id=self.id,
location=self.location,
name=self.name,
properties=self.properties,
system_data=self.system_data,
METHOD_NAME=self.METHOD_NAME,
type=self.type)
def get_certificate(certificate_name: Optional[str] = None,
environment_name: Optional[str] = None,
resource_group_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetCertificateResult:
"""
Certificate used for Custom Domain bindings of Container Apps in a Managed Environment
:param str certificate_name: Name of the Certificate.
:param str environment_name: Name of the Managed Environment.
:param str resource_group_name: The name of the resource group. The name is case insensitive.
"""
__args__ = dict()
__args__['certificateName'] = certificate_name
__args__['environmentName'] = environment_name
__args__['resourceGroupName'] = resource_group_name
opts = pulumi.InvokeOptions.merge(_utilities.get_invoke_opts_defaults(), opts)
__ret__ = pulumi.runtime.invoke('azure-native:app/v20230502preview:getCertificate', __args__, opts=opts, typ=GetCertificateResult).value
return AwaitableGetCertificateResult(
id=pulumi.get(__ret__, 'id'),
location=pulumi.get(__ret__, 'location'),
name=pulumi.get(__ret__, 'name'),
properties=pulumi.get(__ret__, 'properties'),
system_data=pulumi.get(__ret__, 'system_data'),
METHOD_NAME=pulumi.get(__ret__, 'tags'),
type=pulumi.get(__ret__, 'type'))
@_utilities.lift_output_func(get_certificate)
def get_certificate_output(certificate_name: Optional[pulumi.Input[str]] = None,
environment_name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetCertificateResult]:
"""
Certificate used for Custom Domain bindings of Container Apps in a Managed Environment
:param str certificate_name: Name of the Certificate.
:param str environment_name: Name of the Managed Environment.
:param str resource_group_name: The name of the resource group. The name is case insensitive.
"""
...
|
1,584 |
test custom eval
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from parlai.scripts.display_data import display_data as display, setup_args
from parlai.agents.repeat_label.repeat_label import RepeatLabelAgent
from parlai.core.message import Message
from parlai.core.metrics import F1Metric, AverageMetric
from parlai.core.teachers import create_task_agent_from_taskname
from parlai.core.worlds import create_task
from parlai.tasks.wizard_of_wikipedia.agents import TOKEN_KNOWLEDGE
import unittest
import itertools
import parlai.utils.testing as testing_utils
def product_dict(dictionary):
keys = dictionary.keys()
vals = dictionary.values()
for instance in itertools.product(*vals):
yield dict(zip(keys, instance))
class TestWoW(unittest.TestCase):
"""
Basic tests on the train_model.py example.
"""
@unittest.skip
def test_output(self):
dts = ['train', 'valid', 'test']
main_task = 'wizard_of_wikipedia'
variants = [
'WizardOfWikipediaTeacher',
'WizardDialogKnowledgeTeacher',
'BasicdialogTeacher',
'DocreaderTeacher',
'GeneratorTeacher',
]
variant_args = {
'WizardOfWikipediaTeacher': {},
'WizardDialogKnowledgeTeacher': {
'label_type': ['response', 'chosen_sent'],
'include_knowledge': [False, True],
'include_checked_sentence': [False, True],
},
'BasicdialogTeacher': {'wizard_dialog': [False, True]},
'DocreaderTeacher': {
'teacher_type': [
'docs',
'docs_sentence',
'more_docs',
'more_docs_sentence',
'span',
]
},
'GeneratorTeacher': {
'only_checked_knowledge': [False, True],
'ignorant_dropout': [0, 0.5, 1],
},
}
splits = ['random_split', 'topic_split']
for datatype in dts:
for task_var in variants:
for split in splits:
task_name = '{}:{}:{}'.format(main_task, task_var, split)
opt_defaults = {'task': task_name, 'datatype': datatype}
task_args = variant_args[task_var]
if len(task_args) == 0:
print('Testing {} with args {}'.format(task_name, opt_defaults))
self._run_display_test(opt_defaults)
else:
for combo in product_dict(task_args):
args = {**opt_defaults, **combo}
print('Testing {} with args {}'.format(task_name, args))
self._run_display_test(args)
def _run_display_test(self, kwargs):
with testing_utils.capture_output() as stdout:
parser = setup_args()
parser.set_defaults(**kwargs)
opt = parser.parse_args([])
agent = RepeatLabelAgent(opt)
world = create_task(opt, agent)
display(opt)
str_output = stdout.getvalue()
self.assertTrue(
'loaded {} episodes with a total of {} examples'.format(
world.num_episodes(), world.num_examples()
)
in str_output,
'Wizard of Wikipedia failed with following args: {}'.format(opt)
+ str_output,
)
def METHOD_NAME(self):
"""
Test whether custom evaluation works.
"""
parser = setup_args()
opt = parser.parse_args(
[
'--task',
'wizard_of_wikipedia',
'--datatype',
'valid',
'--label-type',
'chosen_sent',
]
)
teacher = create_task_agent_from_taskname(opt)[0]
title = 'Gardening'
cands = list('four')
text = "Gardening\nI like Gardening, even when I've only been doing it for a short time."
response = 'I live on a farm, we garden all year long, it is very relaxing.'
checked_sent = (
'Gardening is considered by many people to be a relaxing activity.'
)
checked_sent_label = f'{title}{TOKEN_KNOWLEDGE}{checked_sent}'
retrieval_metric_keys = ['passage_r@1', 'passage_r@5', 'title_r@1', 'title_r@5']
chosen_sent_teacher_action = Message(
{
'text': text,
'labels': [checked_sent_label],
'title': [title],
'checked_sentence': [checked_sent],
}
)
correct_chosen_sent_response = Message(
{
'text': checked_sent_label,
'title_candidates': [title] + cands,
'text_candidates': [checked_sent_label] + cands,
}
)
top5_chosen_sent_response = Message(
{
'text': f'hello{TOKEN_KNOWLEDGE}goodbye',
'title_candidates': cands + [title],
'text_candidates': cands + [checked_sent_label],
}
)
incorrect_chosen_sent_response = Message(
{
'text': f'hello{TOKEN_KNOWLEDGE}goodbye',
'title_candidates': cands,
'text_candidates': cands,
}
)
response_teacher_action = Message(
{'text': text, 'labels': [response], 'checked_sentence': checked_sent}
)
high_f1_response = Message({'text': checked_sent})
low_f1_response = Message({'text': 'incorrect'})
# 1) Test with correct top sentence
teacher.reset_metrics()
teacher.custom_evaluation(
chosen_sent_teacher_action,
[checked_sent_label],
correct_chosen_sent_response,
)
report = teacher.report()
for k in retrieval_metric_keys:
assert k in report
assert report[k] == AverageMetric(1)
# 2) Test with top sentence in top 5
teacher.reset_metrics()
teacher.custom_evaluation(
chosen_sent_teacher_action, [checked_sent_label], top5_chosen_sent_response
)
report = teacher.report()
for k in retrieval_metric_keys:
assert k in report
assert report[k] == AverageMetric(1) if '5' in k else AverageMetric(0)
# 3) Test with no top sentences
teacher.reset_metrics()
teacher.custom_evaluation(
chosen_sent_teacher_action,
[checked_sent_label],
incorrect_chosen_sent_response,
)
report = teacher.report()
for k in retrieval_metric_keys:
assert k in report
assert report[k] == AverageMetric(0)
# 4) Test knowledge f1 with high f1
teacher.label_type = 'response'
teacher.reset_metrics()
teacher.custom_evaluation(response_teacher_action, [response], high_f1_response)
report = teacher.report()
assert 'knowledge_f1' in report
assert report['knowledge_f1'] == F1Metric(1)
# 5) Test knowledge f1 with low f1
teacher.reset_metrics()
teacher.custom_evaluation(response_teacher_action, [response], low_f1_response)
report = teacher.report()
assert 'knowledge_f1' in report
assert report['knowledge_f1'] == F1Metric(0)
if __name__ == '__main__':
unittest.main()
|
1,585 |
matrix for control state
|
# This code is part of Qiskit.
#
# (C) Copyright IBM 2017.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""
This module contains utility functions for circuits.
"""
import numpy
from qiskit.exceptions import QiskitError
from qiskit.circuit.exceptions import CircuitError
from .parametervector import ParameterVectorElement
def sort_parameters(parameters):
"""Sort an iterable of :class:`.Parameter` instances into a canonical order, respecting the
ordering relationships between elements of :class:`.ParameterVector`\\ s."""
def key(parameter):
if isinstance(parameter, ParameterVectorElement):
return (parameter.vector.name, parameter.index)
return (parameter.name,)
return sorted(parameters, key=key)
def _compute_control_matrix(base_mat, num_ctrl_qubits, ctrl_state=None):
r"""
Compute the controlled version of the input matrix with qiskit ordering.
This function computes the controlled unitary with :math:`n` control qubits
and :math:`m` target qubits,
.. math::
V_n^j(U_{2^m}) = (U_{2^m} \otimes |j\rangle\!\langle j|) +
(I_{2^m} \otimes (I_{2^n} - |j\rangle\!\langle j|)).
where :math:`|j\rangle \in \mathcal{H}^{2^n}` is the control state.
Args:
base_mat (ndarray): unitary to be controlled
num_ctrl_qubits (int): number of controls for new unitary
ctrl_state (int or str or None): The control state in decimal or as
a bitstring (e.g. '111'). If None, use 2**num_ctrl_qubits-1.
Returns:
ndarray: controlled version of base matrix.
Raises:
QiskitError: unrecognized mode or invalid ctrl_state
"""
num_target = int(numpy.log2(base_mat.shape[0]))
ctrl_dim = 2**num_ctrl_qubits
ctrl_grnd = numpy.repeat([[1], [0]], [1, ctrl_dim - 1])
if ctrl_state is None:
ctrl_state = ctrl_dim - 1
elif isinstance(ctrl_state, str):
ctrl_state = int(ctrl_state, 2)
if isinstance(ctrl_state, int):
if not 0 <= ctrl_state < ctrl_dim:
raise QiskitError("Invalid control state value specified.")
else:
raise QiskitError("Invalid control state type specified.")
ctrl_proj = numpy.diag(numpy.roll(ctrl_grnd, ctrl_state))
full_mat = numpy.kron(numpy.eye(2**num_target), numpy.eye(ctrl_dim) - ctrl_proj) + numpy.kron(
base_mat, ctrl_proj
)
return full_mat
def _ctrl_state_to_int(ctrl_state, num_ctrl_qubits):
"""Convert ctrl_state to int.
Args:
ctrl_state (None, str, int): ctrl_state. If None, set to 2**num_ctrl_qubits-1.
If str, convert to int. If int, pass.
num_ctrl_qubits (int): The number of control qubits.
Return:
int: ctrl_state
Raises:
CircuitError: invalid ctrl_state
"""
ctrl_state_std = None
if isinstance(ctrl_state, str):
try:
assert len(ctrl_state) == num_ctrl_qubits
ctrl_state = int(ctrl_state, 2)
except ValueError as ex:
raise CircuitError("invalid control bit string: " + ctrl_state) from ex
except AssertionError as ex:
raise CircuitError("invalid control bit string: length != num_ctrl_qubits") from ex
if isinstance(ctrl_state, int):
if 0 <= ctrl_state < 2**num_ctrl_qubits:
ctrl_state_std = ctrl_state
else:
raise CircuitError("invalid control state specification")
elif ctrl_state is None:
ctrl_state_std = 2**num_ctrl_qubits - 1
else:
raise CircuitError(f"invalid control state specification: {repr(ctrl_state)}")
return ctrl_state_std
def with_gate_array(base_array):
"""Class decorator that adds an ``__array__`` method to a :class:`.Gate` instance that returns a
singleton nonwritable view onto the complex matrix described by ``base_array``."""
nonwritable = numpy.array(base_array, dtype=numpy.complex128)
nonwritable.setflags(write=False)
def __array__(_self, dtype=None):
return numpy.asarray(nonwritable, dtype=dtype)
def decorator(cls):
if hasattr(cls, "__array__"):
raise RuntimeError("Refusing to decorate a class that already has '__array__' defined.")
cls.__array__ = __array__
return cls
return decorator
def with_controlled_gate_array(base_array, num_ctrl_qubits, cached_states=None):
"""Class decorator that adds an ``__array__`` method to a :class:`.ControlledGate` instance that
returns singleton nonwritable views onto a relevant precomputed complex matrix for the given
control state.
If ``cached_states`` is not given, then all possible control states are precomputed. If it is
given, it should be an iterable of integers, and only these control states will be cached."""
base = numpy.asarray(base_array, dtype=numpy.complex128)
def METHOD_NAME(state):
out = numpy.asarray(
_compute_control_matrix(base, num_ctrl_qubits, state),
dtype=numpy.complex128,
)
out.setflags(write=False)
return out
if cached_states is None:
nonwritables = [METHOD_NAME(state) for state in range(2**num_ctrl_qubits)]
def __array__(self, dtype=None):
return numpy.asarray(nonwritables[self.ctrl_state], dtype=dtype)
else:
nonwritables = {state: METHOD_NAME(state) for state in cached_states}
def __array__(self, dtype=None):
if (out := nonwritables.get(self.ctrl_state)) is not None:
return numpy.asarray(out, dtype=dtype)
return numpy.asarray(
_compute_control_matrix(base, num_ctrl_qubits, self.ctrl_state), dtype=dtype
)
def decorator(cls):
if hasattr(cls, "__array__"):
raise RuntimeError("Refusing to decorate a class that already has '__array__' defined.")
cls.__array__ = __array__
return cls
return decorator
|
1,586 |
get dbtests
|
"""
Run time data storage and retrieval.
"""
import time
import datetime
import json
import socket
import os.path
import shutil
from processor.helper.config.config_utils import config_value, framework_currentdata, get_cache_data, \
set_cache_data, TESTS, DBTESTS, DBVALUES, SNAPSHOT
from processor.helper.json.json_utils import json_from_file, save_json_to_file
from processor.logging.log_handler import getlogger, FWLOGFILENAME
from processor.helper.file.file_utils import remove_file, exists_dir, mkdir_path
exclude_list = ['token', 'clientSecret', 'vaulttoken', 'exclusion', 'apitoken', 'gittoken', 'outputpath']
def METHOD_NAME():
currdata = get_currentdata()
if DBTESTS in currdata:
dbtests = currdata[DBTESTS]
else:
nodb = config_value(TESTS, DBTESTS)
if nodb and nodb.upper() in DBVALUES:
dbtests = DBVALUES.index(nodb.upper())
else:
dbtests = DBVALUES.index(SNAPSHOT)
put_in_currentdata(DBTESTS, dbtests)
return dbtests
def add_to_exclude_list(key):
if key not in exclude_list:
exclude_list.append(key)
def init_currentdata():
""" Initialises data structure to store runtime data. """
started = int(time.time() * 1000)
runctx = framework_currentdata()
run_dir = os.path.dirname(runctx)
if not exists_dir(run_dir):
mkdir_path(run_dir)
run_data = {
'start': started,
'end': started,
'remote': False,
'errors': [],
'host': socket.gethostname(),
'timestamp': datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
}
save_currentdata(run_data)
def put_in_currentdata(key, value):
"""Adds a value in the current run data"""
if key:
curr_data = get_currentdata()
if key in curr_data:
val = curr_data[key]
if isinstance(val, list):
val.append(value)
else:
curr_data[key] = value
else:
curr_data[key] = value
save_currentdata(curr_data)
def delete_from_currentdata(key):
"""Remove a key from the current run data"""
if key:
currdata = get_currentdata()
if key in currdata:
del currdata[key]
save_currentdata(currdata)
def get_from_currentdata(key):
""" Get the data for this key from the rundata"""
data = None
currdata = get_currentdata()
if key and key in currdata:
data = currdata[key]
return data
def get_currentdata():
"""Get the current run data, if present else empty json object"""
runctx = framework_currentdata()
curr_data = json_from_file(runctx)
if not curr_data:
curr_data = {}
return curr_data
def put_in_cachedata(key, value):
"""Adds a value in the cache data"""
if key:
curr_data = get_cache_data()
if key in curr_data:
val = curr_data[key]
if isinstance(val, list):
val.append(value)
else:
curr_data[key] = value
else:
curr_data[key] = value
set_cache_data(curr_data)
def get_from_cachedata(key):
""" Get the data for this key from the cachedata"""
data = None
currdata = get_cache_data()
if key and key in currdata:
data = currdata[key]
return data
def save_currentdata(curr_data):
"""Save the key value rundata for further access, if None store it empty."""
if not curr_data:
curr_data = {}
runctx = framework_currentdata()
save_json_to_file(curr_data, runctx)
def delete_currentdata():
"""Delete the rundata file when exiting of the script."""
logger = getlogger()
# singletest = get_from_currentdata(SINGLETEST)
# if singletest:
# container = get_from_currentdata('container')
# cdir = get_container_dir(container)
# shutil.rmtree('%s/snapshots' % cdir)
cleaning_repos = get_from_currentdata("CLEANING_REPOS")
if cleaning_repos:
for repo in cleaning_repos:
if repo and os.path.exists(repo):
shutil.rmtree(repo)
delete_from_currentdata("CLEANING_REPOS")
logger.info("END: Completed the run and cleaning up.")
runctx = get_currentdata()
runctx['end'] = int(time.time() * 1000)
runctx['log'] = FWLOGFILENAME
if 'start' in runctx:
runctx['duration'] = '%d seconds' % int((runctx['end'] - runctx['start'])/1000)
runctx['start'] = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(runctx['start']/1000))
runctx['end'] = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(runctx['end']/1000))
newdata = {}
for k,v in runctx.items():
if k not in exclude_list:
newdata[k] = v
logger.critical("\033[92m Run Stats: %s\033[00m" % json.dumps(newdata, indent=2))
if runctx['remote']:
from processor.helper.utils.compliance_utils import upload_compliance_results
logger.info("Uploading data....")
upload_compliance_results(runctx['container'], runctx['outputpath'], runctx['env'], runctx['company'], runctx['apitoken'])
run_file = framework_currentdata()
remove_file(run_file)
|
1,587 |
postprocess predictions
|
import os
import argparse
import collections
import numpy as np
import torch
def process_files(args):
all_predictions = collections.OrderedDict()
all_labels = collections.OrderedDict()
all_uid = collections.OrderedDict()
for path in args.paths:
path = os.path.join(path, args.prediction_name)
try:
data = torch.load(path)
for dataset in data:
name, d = dataset
predictions, labels, uid = d
if name not in all_predictions:
all_predictions[name] = np.array(predictions)
if args.labels is None:
args.labels = [i for i in range(all_predictions[name].shape[1])]
if args.eval:
all_labels[name] = np.array(labels)
all_uid[name] = np.array(uid)
else:
all_predictions[name] += np.array(predictions)
assert np.allclose(all_uid[name], np.array(uid))
except Exception as e:
print(e)
continue
return all_predictions, all_labels, all_uid
def get_threshold(all_predictions, all_labels, one_threshold=False):
if one_threshold:
all_predictons = {'combined': np.concatenate(list(all_predictions.values()))}
all_labels = {'combined': np.concatenate(list(all_predictions.labels()))}
out_thresh = []
for dataset in all_predictions:
preds = all_predictions[dataset]
labels = all_labels[dataset]
out_thresh.append(calc_threshold(preds, labels))
return out_thresh
def calc_threshold(p, l):
trials = [(i) * (1. / 100.) for i in range(100)]
best_acc = float('-inf')
best_thresh = 0
for t in trials:
acc = ((apply_threshold(p, t).argmax(-1) == l).astype(float)).mean()
if acc > best_acc:
best_acc = acc
best_thresh = t
return best_thresh
def apply_threshold(preds, t):
assert (np.allclose(preds.sum(-1), np.ones(preds.shape[0])))
prob = preds[:, -1]
thresholded = (prob >= t).astype(int)
preds = np.zeros_like(preds)
preds[np.arange(len(thresholded)), thresholded.reshape(-1)] = 1
return preds
def threshold_predictions(all_predictions, threshold):
if len(threshold) != len(all_predictions):
threshold = [threshold[-1]] * (len(all_predictions) - len(threshold))
for i, dataset in enumerate(all_predictions):
thresh = threshold[i]
preds = all_predictions[dataset]
all_predictions[dataset] = apply_threshold(preds, thresh)
return all_predictions
def METHOD_NAME(all_predictions, all_labels, args):
for d in all_predictions:
all_predictions[d] = all_predictions[d] / len(args.paths)
if args.calc_threshold:
args.threshold = get_threshold(all_predictions, all_labels, args.one_threshold)
print('threshold', args.threshold)
if args.threshold is not None:
all_predictions = threshold_predictions(all_predictions, args.threshold)
return all_predictions, all_labels
def write_predictions(all_predictions, all_labels, all_uid, args):
all_correct = 0
count = 0
for dataset in all_predictions:
preds = all_predictions[dataset]
preds = np.argmax(preds, -1)
if args.eval:
correct = (preds == all_labels[dataset]).sum()
num = len(all_labels[dataset])
accuracy = correct / num
count += num
all_correct += correct
accuracy = (preds == all_labels[dataset]).mean()
print(accuracy)
if not os.path.exists(os.path.join(args.outdir, dataset)):
os.makedirs(os.path.join(args.outdir, dataset))
outpath = os.path.join(
args.outdir, dataset, os.path.splitext(
args.prediction_name)[0] + '.tsv')
with open(outpath, 'w') as f:
f.write('id\tlabel\n')
f.write('\n'.join(str(uid) + '\t' + str(args.labels[p])
for uid, p in zip(all_uid[dataset], preds.tolist())))
if args.eval:
print(all_correct / count)
def ensemble_predictions(args):
all_predictions, all_labels, all_uid = process_files(args)
all_predictions, all_labels = METHOD_NAME(all_predictions, all_labels, args)
write_predictions(all_predictions, all_labels, all_uid, args)
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--paths', required=True, nargs='+',
help='paths to checkpoint directories used in ensemble')
parser.add_argument('--eval', action='store_true',
help='compute accuracy metrics against labels (dev set)')
parser.add_argument('--outdir',
help='directory to place ensembled predictions in')
parser.add_argument('--prediction-name', default='test_predictions.pt',
help='name of predictions in checkpoint directories')
parser.add_argument('--calc-threshold', action='store_true',
help='calculate threshold classification')
parser.add_argument('--one-threshold', action='store_true',
help='use on threshold for all subdatasets')
parser.add_argument('--threshold', nargs='+', default=None, type=float,
help='user supplied threshold for classification')
parser.add_argument('--labels', nargs='+', default=None,
help='whitespace separated list of label names')
args = parser.parse_args()
ensemble_predictions(args)
if __name__ == '__main__':
main()
|
1,588 |
test verify ld proofs not verified unsigned
|
from asynctest import TestCase
from aries_cloudagent.wallet.key_type import ED25519
from ..ed25519_signature_2020 import Ed25519Signature2020
from ...crypto.wallet_key_pair import WalletKeyPair
from ...ld_proofs import sign, verify
from ...purposes.assertion_proof_purpose import AssertionProofPurpose
from ....tests.data import (
TEST_LD_DOCUMENT,
TEST_LD_DOCUMENT_SIGNED_ED25519_2020,
TEST_LD_DOCUMENT_BAD_SIGNED_ED25519_2020,
TEST_VC_DOCUMENT,
TEST_VC_DOCUMENT_SIGNED_ED25519_2020,
)
from ....tests.document_loader import custom_document_loader
from .....core.in_memory import InMemoryProfile
from .....did.did_key import DIDKey
from .....wallet.in_memory import InMemoryWallet
class TestEd25519Signature2020(TestCase):
test_seed = "testseed000000000000000000000001"
async def setUp(self):
self.profile = InMemoryProfile.test_profile()
self.wallet = InMemoryWallet(self.profile)
self.key = await self.wallet.create_signing_key(
key_type=ED25519, seed=self.test_seed
)
self.verification_method = DIDKey.from_public_key_b58(
self.key.verkey, ED25519
).key_id
self.sign_key_pair = WalletKeyPair(
wallet=self.wallet,
key_type=ED25519,
public_key_base58=self.key.verkey,
)
self.verify_key_pair = WalletKeyPair(wallet=self.wallet, key_type=ED25519)
async def test_sign_ld_proofs(self):
signed = await sign(
document=TEST_LD_DOCUMENT,
suite=Ed25519Signature2020(
key_pair=self.sign_key_pair,
verification_method=self.verification_method,
),
document_loader=custom_document_loader,
purpose=AssertionProofPurpose(),
)
assert signed
async def test_verify_ld_proofs(self):
result = await verify(
document=TEST_LD_DOCUMENT_SIGNED_ED25519_2020,
suites=[Ed25519Signature2020(key_pair=self.verify_key_pair)],
document_loader=custom_document_loader,
purpose=AssertionProofPurpose(),
)
assert result
assert result.verified
async def test_verify_ld_proofs_not_verified_bad_signature(self):
result = await verify(
document=TEST_LD_DOCUMENT_BAD_SIGNED_ED25519_2020,
suites=[Ed25519Signature2020(key_pair=self.verify_key_pair)],
document_loader=custom_document_loader,
purpose=AssertionProofPurpose(),
)
assert result
assert not result.verified
async def METHOD_NAME(self):
MODIFIED_DOCUMENT = {
**TEST_LD_DOCUMENT_SIGNED_ED25519_2020,
"unsigned_claim": "oops",
}
result = await verify(
document=MODIFIED_DOCUMENT,
suites=[Ed25519Signature2020(key_pair=self.verify_key_pair)],
document_loader=custom_document_loader,
purpose=AssertionProofPurpose(),
)
assert result
assert not result.verified
async def test_verify_ld_proofs_not_verified_changed_statement(self):
MODIFIED_DOCUMENT = {
**TEST_LD_DOCUMENT_SIGNED_ED25519_2020,
"email": "[email protected]",
}
result = await verify(
document=MODIFIED_DOCUMENT,
suites=[Ed25519Signature2020(key_pair=self.verify_key_pair)],
document_loader=custom_document_loader,
purpose=AssertionProofPurpose(),
)
assert result
assert not result.verified
async def test_sign_vc(self):
signed = await sign(
document=TEST_VC_DOCUMENT,
suite=Ed25519Signature2020(
key_pair=self.sign_key_pair,
verification_method=self.verification_method,
),
document_loader=custom_document_loader,
purpose=AssertionProofPurpose(),
)
assert signed
async def test_verify_vc(self):
result = await verify(
document=TEST_VC_DOCUMENT_SIGNED_ED25519_2020,
suites=[Ed25519Signature2020(key_pair=self.verify_key_pair)],
document_loader=custom_document_loader,
purpose=AssertionProofPurpose(),
)
assert result
assert result.verified
|
1,589 |
find node
|
"""
Functions that communicate with OBS API
and work with related XML data.
"""
import xml.sax.saxutils
from xml.etree import ElementTree as ET
def get(apiurl, path, query=None):
"""
Send a GET request to OBS.
:param apiurl: OBS apiurl.
:type apiurl: str
:param path: URL path segments.
:type path: list(str)
:param query: URL query values.
:type query: dict(str, str)
:returns: Parsed XML root.
:rtype: xml.etree.ElementTree.Element
"""
from .. import connection as osc_connection
from .. import core as osc_core
assert apiurl
assert path
if not isinstance(path, (list, tuple)):
raise TypeError("Argument `path` expects a list of strings")
url = osc_core.makeurl(apiurl, path, query)
with osc_connection.http_GET(url) as f:
root = ET.parse(f).getroot()
return root
def post(apiurl, path, query=None):
"""
Send a POST request to OBS.
:param apiurl: OBS apiurl.
:type apiurl: str
:param path: URL path segments.
:type path: list(str)
:param query: URL query values.
:type query: dict(str, str)
:returns: Parsed XML root.
:rtype: xml.etree.ElementTree.Element
"""
from .. import connection as osc_connection
from .. import core as osc_core
assert apiurl
assert path
if not isinstance(path, (list, tuple)):
raise TypeError("Argument `path` expects a list of strings")
url = osc_core.makeurl(apiurl, path, query)
with osc_connection.http_POST(url) as f:
root = ET.parse(f).getroot()
return root
def put(apiurl, path, query=None, data=None):
"""
Send a PUT request to OBS.
:param apiurl: OBS apiurl.
:type apiurl: str
:param path: URL path segments.
:type path: list(str)
:param query: URL query values.
:type query: dict(str, str)
:returns: Parsed XML root.
:rtype: xml.etree.ElementTree.Element
"""
from osc import connection as osc_connection
from osc import core as osc_core
assert apiurl
assert path
if not isinstance(path, (list, tuple)):
raise TypeError("Argument `path` expects a list of strings")
url = osc_core.makeurl(apiurl, path, query)
with osc_connection.http_PUT(url, data=data) as f:
root = osc_core.ET.parse(f).getroot()
return root
def _to_xpath(*args):
"""
Convert strings and dictionaries to xpath:
string gets translated to a node name
dictionary gets translated to [@key='value'] predicate
All values are properly escaped.
Examples:
args: ["directory", "entry", {"name": "osc"}]
result: "directory/entry[@name='osc']"
args: ["attributes", "attribute", {"namespace": "OBS", "name": "BranchSkipRepositories"}, "value"]
result: "attributes/attribute[@namespace='OBS'][@name='BranchSkipRepositories']/value"
"""
xpath = ""
for arg in args:
if isinstance(arg, str):
arg = xml.sax.saxutils.escape(arg)
xpath += f"/{arg}"
elif isinstance(arg, dict):
for key, value in arg.items():
key = xml.sax.saxutils.escape(key)
value = xml.sax.saxutils.escape(value)
xpath += f"[@{key}='{value}']"
else:
raise TypeError(f"Argument '{arg}' has invalid type '{type(arg).__name__}'. Expected types: str, dict")
# strip the leading slash because we're making a relative search
xpath = xpath.lstrip("/")
return xpath
def find_nodes(root, root_name, *args):
"""
Find nodes with given `node_name`.
Also, verify that the root tag matches the `root_name`.
:param root: Root node.
:type root: xml.etree.ElementTree.Element
:param root_name: Expected (tag) name of the root node.
:type root_name: str
:param *args: Simplified xpath notation: strings are node names, dictionaries translate to [@key='value'] predicates.
:type *args: list[str, dict]
:returns: List of nodes that match xpath based on the given `args`.
:rtype: list(xml.etree.ElementTree.Element)
"""
assert root.tag == root_name
return root.findall(_to_xpath(*args))
def METHOD_NAME(root, root_name, *args):
"""
Find a single node with given `node_name`.
If `node_name` is not specified, the root node is returned.
Also, verify that the root tag matches the `root_name`.
:param root: Root node.
:type root: xml.etree.ElementTree.Element
:param root_name: Expected (tag) name of the root node.
:type root_name: str
:param *args: Simplified xpath notation: strings are node names, dictionaries translate to [@key='value'] predicates.
:type *args: list[str, dict]
:returns: The node that matches xpath based on the given `args`
or the root node if `args` are not specified.
:rtype: xml.etree.ElementTree.Element
"""
assert root.tag == root_name
if not args:
# only verify the root tag
return root
return root.find(_to_xpath(*args))
def group_child_nodes(node):
nodes = node[:]
result = []
while nodes:
# look at the tag of the first node
tag = nodes[0].tag
# collect all nodes with the same tag and append them to the result
# then repeat the step for the next tag(s)
matches = []
others = []
for i in nodes:
if i.tag == tag:
matches.append(i)
else:
others.append(i)
result += matches
nodes = others
node[:] = result
def write_xml_node_to_file(node, path, indent=True):
"""
Write a XML node to a file.
:param node: Node to write.
:type node: xml.etree.ElementTree.Element
:param path: Path to a file that will be written to.
:type path: str
:param indent: Whether to indent (pretty-print) the written XML.
:type indent: bool
"""
if indent:
xml_indent(node)
ET.ElementTree(node).write(path)
def xml_escape(string):
"""
Escape the string so it's safe to use in XML and xpath.
"""
entities = {
"\"": """,
"'": "'",
}
if isinstance(string, bytes):
return xml.sax.saxutils.escape(string.decode("utf-8"), entities=entities).encode("utf-8")
return xml.sax.saxutils.escape(string, entities=entities)
def xml_unescape(string):
"""
Decode XML entities in the string.
"""
entities = {
""": "\"",
"'": "'",
}
if isinstance(string, bytes):
return xml.sax.saxutils.unescape(string.decode("utf-8"), entities=entities).encode("utf-8")
return xml.sax.saxutils.unescape(string, entities=entities)
def xml_indent(root):
"""
Indent XML so it looks pretty after printing or saving to file.
"""
if hasattr(ET, "indent"):
# ElementTree supports indent() in Python 3.9 and newer
ET.indent(root)
else:
from .. import core as osc_core
osc_core.xmlindent(root)
|
1,590 |
widgets
|
'''
conv_star.py
Copyright (C) 2020, 2021, 2022 Phillip A Carter
Copyright (C) 2020, 2021, 2022 Gregory D Carl
This program is free software; you can redistribute it and/or modify it
under the terms of the GNU General Public License as published by the
Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful, but
WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License along
with this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
'''
from PyQt5.QtCore import Qt, QCoreApplication
from PyQt5.QtWidgets import QLabel, QMessageBox
from importlib import reload
from plasmac import star as STAR
_translate = QCoreApplication.translate
def preview(P, W, Conv):
if P.dialogError:
return
if not W.xsEntry.text():
W.xsEntry.setText('{:0.3f}'.format(P.xOrigin))
if not W.ysEntry.text():
W.ysEntry.setText('{:0.3f}'.format(P.yOrigin))
origin = W.centLeft.text() == 'CENTER'
error = STAR.preview(Conv, P.fTmp, P.fNgc, P.fNgcBkp, \
int(W.conv_material.currentText().split(':')[0]), \
W.conv_material.currentText().split(':')[1].strip(), \
P.preAmble, P.postAmble, \
W.liEntry.text(), W.loEntry.text(), \
origin, W.xsEntry.text(), W.ysEntry.text(), \
W.kerf_width.value(), P.intExt, \
W.pEntry.text(), W.odEntry.text(), W.idEntry.text(), W.aEntry.text())
if error:
P.dialogError = True
P.dialog_show_ok(QMessageBox.Warning, _translate('Conversational', 'Star Error'), error)
else:
W.conv_preview.load(P.fNgc)
W.conv_preview.set_current_view()
W.add.setEnabled(True)
W.undo.setEnabled(True)
Conv.conv_preview_button(P, W, True)
def auto_preview(P, W, Conv, button=False):
if button == 'intext':
if not W.intExt.isChecked():
return
Conv.conv_auto_preview_button(P, W, button)
elif button == 'center':
if not W.centLeft.isChecked():
return
Conv.conv_auto_preview_button(P, W, button)
if W.main_tab_widget.currentIndex() == 1 and \
W.pEntry.text() and W.odEntry.text() and W.idEntry.text():
preview(P, W, Conv)
def entry_changed(P, W, Conv, widget):
Conv.conv_entry_changed(P, W, widget)
def METHOD_NAME(P, W, Conv):
if P.developmentPin.get():
reload(STAR)
W.lDesc.setText(_translate('Conversational', 'CREATING STAR'))
W.iLabel.setPixmap(P.conv_star_l)
#alignment and size
rightAlign = ['ctLabel', 'spLabel', 'xsLabel', 'xsEntry', 'ysLabel', \
'ysEntry', 'liLabel', 'liEntry', 'loLabel', 'loEntry', \
'pLabel', 'pEntry', 'odLabel', 'odEntry', 'idLabel', \
'idEntry', 'aLabel', 'aEntry']
centerAlign = ['lDesc']
rButton = ['intExt', 'centLeft']
pButton = ['preview', 'add', 'undo']
for widget in rightAlign:
W[widget].setAlignment(Qt.AlignRight | Qt.AlignVCenter)
W[widget].setFixedWidth(80)
W[widget].setFixedHeight(24)
for widget in centerAlign:
W[widget].setAlignment(Qt.AlignCenter | Qt.AlignBottom)
W[widget].setFixedWidth(240)
W[widget].setFixedHeight(24)
for widget in rButton:
W[widget].setFixedWidth(80)
W[widget].setFixedHeight(24)
for widget in pButton:
W[widget].setFixedWidth(80)
W[widget].setFixedHeight(24)
#connections
W.conv_material.currentTextChanged.connect(lambda:auto_preview(P, W, Conv))
W.intExt.toggled.connect(lambda:auto_preview(P, W, Conv, 'intext'))
W.centLeft.toggled.connect(lambda:auto_preview(P, W, Conv, 'center'))
W.preview.pressed.connect(lambda:preview(P, W, Conv))
W.add.pressed.connect(lambda:Conv.conv_add_shape_to_file(P, W))
W.undo.pressed.connect(lambda:Conv.conv_undo_shape(P, W))
entries = ['xsEntry', 'ysEntry', 'liEntry', 'loEntry', \
'pEntry', 'odEntry', 'idEntry', 'aEntry']
for entry in entries:
W[entry].textChanged.connect(lambda:entry_changed(P, W, Conv, W.sender()))
W[entry].returnPressed.connect(lambda:preview(P, W, Conv))
#add to layout
if P.landscape:
W.entries.addWidget(W.ctLabel, 0, 0)
W.entries.addWidget(W.intExt, 0, 1)
W.entries.addWidget(W.spLabel, 1, 0)
W.entries.addWidget(W.centLeft, 1, 1)
W.entries.addWidget(W.xsLabel, 2, 0)
W.entries.addWidget(W.xsEntry, 2, 1)
W.entries.addWidget(W.ysLabel, 3, 0)
W.entries.addWidget(W.ysEntry, 3, 1)
W.entries.addWidget(W.liLabel, 4, 0)
W.entries.addWidget(W.liEntry, 4, 1)
W.entries.addWidget(W.loLabel, 5, 0)
W.entries.addWidget(W.loEntry, 5, 1)
W.entries.addWidget(W.pLabel, 6, 0)
W.entries.addWidget(W.pEntry, 6, 1)
W.entries.addWidget(W.odLabel, 7, 0)
W.entries.addWidget(W.odEntry, 7, 1)
W.entries.addWidget(W.idLabel, 8, 0)
W.entries.addWidget(W.idEntry, 8, 1)
W.entries.addWidget(W.aLabel, 9, 0)
W.entries.addWidget(W.aEntry, 9, 1)
for r in [10,11]:
W['s{}'.format(r)] = QLabel('')
W['s{}'.format(r)].setFixedHeight(24)
W.entries.addWidget(W['s{}'.format(r)], r, 0)
W.entries.addWidget(W.preview, 12, 0)
W.entries.addWidget(W.add, 12, 2)
W.entries.addWidget(W.undo, 12, 4)
W.entries.addWidget(W.lDesc, 13 , 1, 1, 3)
W.entries.addWidget(W.iLabel, 0 , 2, 7, 3)
else:
W.entries.addWidget(W.conv_material, 0, 0, 1, 5)
W.entries.addWidget(W.ctLabel, 1, 0)
W.entries.addWidget(W.intExt, 1, 1)
W.entries.addWidget(W.spLabel, 2, 0)
W.entries.addWidget(W.centLeft, 2, 1)
W.entries.addWidget(W.xsLabel, 3, 0)
W.entries.addWidget(W.xsEntry, 3, 1)
W.entries.addWidget(W.ysLabel, 3, 2)
W.entries.addWidget(W.ysEntry, 3, 3)
W.entries.addWidget(W.liLabel, 4, 0)
W.entries.addWidget(W.liEntry, 4, 1)
W.entries.addWidget(W.loLabel, 4, 2)
W.entries.addWidget(W.loEntry, 4, 3)
W.entries.addWidget(W.pLabel, 5, 0)
W.entries.addWidget(W.pEntry, 5, 1)
W.entries.addWidget(W.odLabel, 6, 0)
W.entries.addWidget(W.odEntry, 6, 1)
W.entries.addWidget(W.idLabel, 6, 2)
W.entries.addWidget(W.idEntry, 6, 3)
W.entries.addWidget(W.aLabel, 7, 0)
W.entries.addWidget(W.aEntry, 7, 1)
W.s8 = QLabel('')
W.s8.setFixedHeight(24)
W.entries.addWidget(W.s8, 8, 0)
W.entries.addWidget(W.preview, 9, 0)
W.entries.addWidget(W.add, 9, 2)
W.entries.addWidget(W.undo, 9, 4)
W.entries.addWidget(W.lDesc, 10 , 1, 1, 3)
W.entries.addWidget(W.iLabel, 0 , 5, 7, 3)
W.pEntry.setFocus()
P.convSettingsChanged = False
|
1,591 |
get default category name
|
import re
from typing import Any, Callable, Dict, List, Optional
import sqlalchemy as sa
from szurubooru import config, db, errors, model, rest
from szurubooru.func import cache, serialization, util
DEFAULT_CATEGORY_NAME_CACHE_KEY = "default-tag-category"
class TagCategoryNotFoundError(errors.NotFoundError):
pass
class TagCategoryAlreadyExistsError(errors.ValidationError):
pass
class TagCategoryIsInUseError(errors.ValidationError):
pass
class InvalidTagCategoryNameError(errors.ValidationError):
pass
class InvalidTagCategoryColorError(errors.ValidationError):
pass
def _verify_name_validity(name: str) -> None:
name_regex = config.config["tag_category_name_regex"]
if not re.match(name_regex, name):
raise InvalidTagCategoryNameError(
"Name must satisfy regex %r." % name_regex
)
class TagCategorySerializer(serialization.BaseSerializer):
def __init__(self, category: model.TagCategory) -> None:
self.category = category
def _serializers(self) -> Dict[str, Callable[[], Any]]:
return {
"name": self.serialize_name,
"version": self.serialize_version,
"color": self.serialize_color,
"usages": self.serialize_usages,
"default": self.serialize_default,
"order": self.serialize_order,
}
def serialize_name(self) -> Any:
return self.category.name
def serialize_version(self) -> Any:
return self.category.version
def serialize_color(self) -> Any:
return self.category.color
def serialize_usages(self) -> Any:
return self.category.tag_count
def serialize_default(self) -> Any:
return self.category.default
def serialize_order(self) -> Any:
return self.category.order
def serialize_category(
category: Optional[model.TagCategory], options: List[str] = []
) -> Optional[rest.Response]:
if not category:
return None
return TagCategorySerializer(category).serialize(options)
def create_category(name: str, color: str, order: int) -> model.TagCategory:
category = model.TagCategory()
update_category_name(category, name)
update_category_color(category, color)
update_category_order(category, order)
if not get_all_categories():
category.default = True
return category
def update_category_name(category: model.TagCategory, name: str) -> None:
assert category
if not name:
raise InvalidTagCategoryNameError("Name cannot be empty.")
expr = sa.func.lower(model.TagCategory.name) == name.lower()
if category.tag_category_id:
expr = expr & (
model.TagCategory.tag_category_id != category.tag_category_id
)
already_exists = (
db.session.query(model.TagCategory).filter(expr).count() > 0
)
if already_exists:
raise TagCategoryAlreadyExistsError(
"A category with this name already exists."
)
if util.value_exceeds_column_size(name, model.TagCategory.name):
raise InvalidTagCategoryNameError("Name is too long.")
_verify_name_validity(name)
category.name = name
cache.remove(DEFAULT_CATEGORY_NAME_CACHE_KEY)
def update_category_color(category: model.TagCategory, color: str) -> None:
assert category
if not color:
raise InvalidTagCategoryColorError("Color cannot be empty.")
if not re.match(r"^#?[0-9a-z]+$", color):
raise InvalidTagCategoryColorError("Invalid color.")
if util.value_exceeds_column_size(color, model.TagCategory.color):
raise InvalidTagCategoryColorError("Color is too long.")
category.color = color
def update_category_order(category: model.TagCategory, order: int) -> None:
assert category
category.order = order
def try_get_category_by_name(
name: str, lock: bool = False
) -> Optional[model.TagCategory]:
query = db.session.query(model.TagCategory).filter(
sa.func.lower(model.TagCategory.name) == name.lower()
)
if lock:
query = query.with_for_update()
return query.one_or_none()
def get_category_by_name(name: str, lock: bool = False) -> model.TagCategory:
category = try_get_category_by_name(name, lock)
if not category:
raise TagCategoryNotFoundError("Tag category %r not found." % name)
return category
def get_all_category_names() -> List[str]:
return [cat.name for cat in get_all_categories()]
def get_all_categories() -> List[model.TagCategory]:
return (
db.session.query(model.TagCategory)
.order_by(model.TagCategory.order.asc(), model.TagCategory.name.asc())
.all()
)
def try_get_default_category(
lock: bool = False,
) -> Optional[model.TagCategory]:
query = db.session.query(model.TagCategory).filter(
model.TagCategory.default
)
if lock:
query = query.with_for_update()
category = query.first()
# if for some reason (e.g. as a result of migration) there's no default
# category, get the first record available.
if not category:
query = db.session.query(model.TagCategory).order_by(
model.TagCategory.tag_category_id.asc()
)
if lock:
query = query.with_for_update()
category = query.first()
return category
def get_default_category(lock: bool = False) -> model.TagCategory:
category = try_get_default_category(lock)
if not category:
raise TagCategoryNotFoundError("No tag category created yet.")
return category
def METHOD_NAME() -> str:
if cache.has(DEFAULT_CATEGORY_NAME_CACHE_KEY):
return cache.get(DEFAULT_CATEGORY_NAME_CACHE_KEY)
default_category = get_default_category()
default_category_name = default_category.name
cache.put(DEFAULT_CATEGORY_NAME_CACHE_KEY, default_category_name)
return default_category_name
def set_default_category(category: model.TagCategory) -> None:
assert category
old_category = try_get_default_category(lock=True)
if old_category:
db.session.refresh(old_category)
old_category.default = False
db.session.refresh(category)
category.default = True
cache.remove(DEFAULT_CATEGORY_NAME_CACHE_KEY)
def delete_category(category: model.TagCategory) -> None:
assert category
if len(get_all_category_names()) == 1:
raise TagCategoryIsInUseError("Cannot delete the last category.")
if (category.tag_count or 0) > 0:
raise TagCategoryIsInUseError(
"Tag category has some usages and cannot be deleted. "
+ "Please remove this category from relevant tags first.."
)
db.session.delete(category)
|
1,592 |
inner test
|
#!/usr/bin/env python3
# Test remapping of topic name for incoming message
from mosq_test_helper import *
def write_config(filename, port1, port2, protocol_version):
with open(filename, 'w') as f:
f.write("port %d\n" % (port2))
f.write("allow_anonymous true\n")
f.write("\n")
f.write("connection bridge_sample\n")
f.write("address 127.0.0.1:%d\n" % (port1))
f.write("bridge_attempt_unsubscribe false\n")
f.write("topic # in 0 local/topic/ remote/topic/\n")
f.write("topic prefix/# in 0 local2/topic/ remote2/topic/\n")
f.write("topic +/value in 0 local3/topic/ remote3/topic/\n")
f.write("topic ic/+ in 0 local4/top remote4/tip\n")
f.write("topic clients/total in 0 test/mosquitto/org $SYS/broker/\n")
f.write("notifications false\n")
f.write("restart_timeout 5\n")
f.write("bridge_protocol_version %s\n" % (protocol_version))
connect_packet = None
connack_packet = None
def METHOD_NAME(bridge, sock, proto_ver):
global connect_packet, connack_packet
if not mosq_test.expect_packet(bridge, "connect", connect_packet):
return 1
bridge.send(connack_packet)
if proto_ver == 5:
opts = mqtt5_opts.MQTT_SUB_OPT_NO_LOCAL | mqtt5_opts.MQTT_SUB_OPT_RETAIN_AS_PUBLISHED
else:
opts = 0
mid = 0
patterns = [
"remote/topic/#",
"remote2/topic/prefix/#",
"remote3/topic/+/value",
"remote4/tipic/+",
"$SYS/broker/clients/total",
]
for pattern in ("remote/topic/#", "remote2/topic/prefix/#", "remote3/topic/+/value"):
mid += 1
subscribe_packet = mosq_test.gen_subscribe(mid, pattern, 0 | opts, proto_ver=proto_ver)
suback_packet = mosq_test.gen_suback(mid, 0, proto_ver=proto_ver)
if not mosq_test.expect_packet(bridge, "subscribe", subscribe_packet):
return 1
bridge.send(suback_packet)
mid += 1
subscribe_packet = mosq_test.gen_subscribe(mid, "#", 0 | opts, proto_ver=proto_ver)
suback_packet = mosq_test.gen_suback(mid, 0, proto_ver=proto_ver)
sock.send(subscribe_packet)
if not mosq_test.expect_packet(sock, "suback", suback_packet):
return 1
cases = [
('local/topic/something', 'remote/topic/something'),
('local/topic/some/t/h/i/n/g', 'remote/topic/some/t/h/i/n/g'),
('local/topic/value', 'remote/topic/value'),
# Don't work, #40 must be fixed before
# ('local/topic', 'remote/topic'),
('local2/topic/prefix/something', 'remote2/topic/prefix/something'),
('local3/topic/something/value', 'remote3/topic/something/value'),
('local4/topic/something', 'remote4/tipic/something'),
('test/mosquitto/orgclients/total', '$SYS/broker/clients/total'),
]
for (local_topic, remote_topic) in cases:
mid += 1
remote_publish_packet = mosq_test.gen_publish(
remote_topic, qos=0, mid=mid, payload='', proto_ver=proto_ver)
local_publish_packet = mosq_test.gen_publish(
local_topic, qos=0, mid=mid, payload='', proto_ver=proto_ver)
bridge.send(remote_publish_packet)
match = mosq_test.expect_packet(sock, "publish", local_publish_packet)
if not match:
print("Fail on cases local_topic=%r, remote_topic=%r" % (
local_topic, remote_topic,
))
return 1
return 0
def do_test(proto_ver):
global connect_packet, connack_packet
if proto_ver == 4:
bridge_protocol = "mqttv311"
proto_ver_connect = 128+4
else:
bridge_protocol = "mqttv50"
proto_ver_connect = 5
(port1, port2) = mosq_test.get_port(2)
conf_file = os.path.basename(__file__).replace('.py', '.conf')
write_config(conf_file, port1, port2, bridge_protocol)
rc = 1
keepalive = 60
client_id = socket.gethostname()+".bridge_sample"
connect_packet = mosq_test.gen_connect(client_id, keepalive=keepalive, clean_session=False, proto_ver=proto_ver_connect)
connack_packet = mosq_test.gen_connack(rc=0, proto_ver=proto_ver)
client_connect_packet = mosq_test.gen_connect("pub-test", keepalive=keepalive, proto_ver=proto_ver)
client_connack_packet = mosq_test.gen_connack(rc=0, proto_ver=proto_ver)
ssock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
ssock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
ssock.settimeout(4)
ssock.bind(('', port1))
ssock.listen(5)
broker = mosq_test.start_broker(filename=os.path.basename(__file__), port=port2, use_conf=True)
try:
(bridge, address) = ssock.accept()
bridge.settimeout(2)
sock = mosq_test.do_client_connect(
client_connect_packet, client_connack_packet,
port=port2,
)
rc = METHOD_NAME(bridge, sock, proto_ver)
sock.close()
bridge.close()
except mosq_test.TestError:
pass
finally:
os.remove(conf_file)
try:
bridge.close()
except NameError:
pass
broker.terminate()
broker.wait()
(stdo, stde) = broker.communicate()
ssock.close()
if rc:
print(stde.decode('utf-8'))
exit(rc)
do_test(proto_ver=4)
do_test(proto_ver=5)
exit(0)
|
1,593 |
register
|
# This file is part of Korman.
#
# Korman is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Korman is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Korman. If not, see <http://www.gnu.org/licenses/>.
import bpy
from bpy.props import *
class IDPropMixin:
"""
So, here's the rub.
In Blender 2.79, we finally get the ability to use native Blender ID Datablock properties in Python.
This is great! It will allow us to specify other objects (Blender Objects, Materials, Textures) in
our plugin as pointer properties. Further, we can even specify a poll method to create a 'search list'
of valid options.
Naturally, there are some cons. The con here is that we've been storing object NAMES in string properties
for several releases now. Therefore, the purpose of this class is simple... It is a mixin to be
used for silently upgrading these object name properties to ID Properties. You will need to override
the _idprop_mapping and _idprop_sources methods in your class. The mixin will handle upgrading
the properties when a derived class is touched.
Unfortunately, it is not possible to easily batch convert everything on load or save, due to issues
in the way Blender's Python API functions. Long story short: PropertyGroups do not execute __new__
or __init__. Furthermore, Blender's UI does not appreciate having ID Datablocks return from
__getattribute__. To make matters worse, all properties are locked in a read-only state during
the UI draw stage.
"""
def __getattribute__(self, attr):
_getattribute = super().__getattribute__
# Let's make sure no one is trying to access an old version...
if attr in _getattribute("_idprop_mapping")().values():
raise AttributeError("'{}' has been deprecated... Please use the ID Property".format(attr))
# I have some bad news for you... Unfortunately, this might have been called
# during Blender's draw() context. Blender locks all properties during the draw loop.
# HOWEVER!!! There is a solution. Upon inspection of the Blender source code, however, it
# appears this restriction is temporarily suppressed during property getters... So let's get
# a property that executes a getter :D
# ...
# ...
# But why not simply proxy requests here, you ask? Ah, young grasshopper... This is the
# fifth time I have (re-)written this code. Trust me when I say, 'tis a boondoggle.
assert _getattribute("idprops_upgraded")
# Must be something regular. Just super it.
return super().__getattribute__(attr)
def __setattr__(self, attr, value):
idprops = super().__getattribute__("_idprop_mapping")()
# Disallow any attempts to set the old string property
if attr in idprops.values():
raise AttributeError("'{}' has been deprecated... Please use the ID Property".format(attr))
# Inappropriate touching?
super().__getattribute__("_try_upgrade_idprops")()
# Now, pass along our update
super().__setattr__(attr, value)
@classmethod
def METHOD_NAME(cls):
if hasattr(super(), "register"):
super().METHOD_NAME()
cls.idprops_upgraded = BoolProperty(name="INTERNAL: ID Property Upgrader HACK",
description="HAAAX *throws CRT monitor*",
get=cls._try_upgrade_idprops,
options={"HIDDEN"})
cls.idprops_upgraded_value = BoolProperty(name="INTERNAL: ID Property Upgrade Status",
description="Have old StringProperties been upgraded to ID Datablock Properties?",
default=False,
options={"HIDDEN"})
for str_prop in cls._idprop_mapping().values():
setattr(cls, str_prop, StringProperty(description="deprecated"))
def _try_upgrade_idprops(self):
_getattribute = super().__getattribute__
if not _getattribute("idprops_upgraded_value"):
idprop_map = _getattribute("_idprop_mapping")()
strprop_src = _getattribute("_idprop_sources")()
for idprop_name, strprop_name in idprop_map.items():
if not super().is_property_set(strprop_name):
continue
strprop_value = _getattribute(strprop_name)
idprop_value = strprop_src[strprop_name].get(strprop_value, None)
super().__setattr__(idprop_name, idprop_value)
super().property_unset(strprop_name)
super().__setattr__("idprops_upgraded_value", True)
# you should feel like this now... https://youtu.be/1JBSs6MQJeI?t=33s
return True
class IDPropObjectMixin(IDPropMixin):
"""Like IDPropMixin, but with the assumption that all IDs can be found in bpy.data.objects"""
def _idprop_sources(self):
# NOTE: bad problems result when using super() here, so we'll manually reference object
cls = object.__getattribute__(self, "__class__")
idprops = cls._idprop_mapping()
return { i: bpy.data.objects for i in idprops.values() }
def poll_animated_objects(self, value):
return value.plasma_object.has_animation_data
def poll_camera_objects(self, value):
return value.type == "CAMERA"
def poll_drawable_objects(self, value):
return value.type == "MESH" and any(value.data.materials)
def poll_empty_objects(self, value):
return value.type == "EMPTY"
def poll_mesh_objects(self, value):
return value.type == "MESH"
def poll_softvolume_objects(self, value):
return value.plasma_modifiers.softvolume.enabled
def poll_subworld_objects(self, value):
return value.plasma_modifiers.subworld_def.enabled
def poll_visregion_objects(self, value):
return value.plasma_modifiers.visregion.enabled
def poll_envmap_textures(self, value):
return isinstance(value, bpy.types.EnvironmentMapTexture)
@bpy.app.handlers.persistent
def _upgrade_node_trees(dummy):
"""
Logic node haxxor incoming!
Logic nodes appear to have issues with silently updating themselves. I expect that Blender is
doing something strange in the UI code that causes our metaprogramming tricks to be bypassed.
Therefore, we will loop through all Plasma node trees and forcibly update them on blend load.
"""
for tree in bpy.data.node_groups:
if tree.bl_idname != "PlasmaNodeTree":
continue
for node in tree.nodes:
if isinstance(node, IDPropMixin):
assert node._try_upgrade_idprops()
bpy.app.handlers.load_post.append(_upgrade_node_trees)
|
1,594 |
seed life
|
import gc
import time
import random
from stellar import StellarUnicorn
from picographics import PicoGraphics, DISPLAY_STELLAR_UNICORN, PEN_P8
from ulab import numpy
"""
A randomly-seeded game-of-life cellular automata effect.
Experiment with the values below to change the effect.
Press "A" to manually re-seed.
"""
# MAXIMUM OVERKILL
# machine.freq(250_000_000)
INITIAL_LIFE = 128 # Number of live cells to seed
GENERATION_TIME_MS = 100 # MS between generations
SMOOTHED = True # Enable for a more organic if somewhat unsettling feel
STALEMATE_DEPTH = 5 # How many generations of changes must match before reset
DECAY = 0.90 # Rate at which smoothing effect decays, higher number = more persistent, 1.0 = no decay
TENACITY = 32 # Rate at which smoothing effect increases
su = StellarUnicorn()
su.set_brightness(0.5)
graphics = PicoGraphics(DISPLAY_STELLAR_UNICORN, pen_type=PEN_P8)
changed_cells = []
for c in range(256):
graphics.create_pen(c // 2, 0, c)
def update():
global last_gen, changed_cells
if SMOOTHED:
duration[:] += life * TENACITY
duration[:] *= DECAY
if time.ticks_ms() - last_gen < GENERATION_TIME_MS:
return
last_gen = time.ticks_ms()
# Rollin' rollin' rollin.
_N = numpy.roll(life, -1, axis=0)
_NW = numpy.roll(_N, -1, axis=1)
_NE = numpy.roll(_N, 1, axis=1)
_S = numpy.roll(life, 1, axis=0)
_SW = numpy.roll(_S, -1, axis=1)
_SE = numpy.roll(_S, 1, axis=1)
_W = numpy.roll(life, -1, axis=1)
_E = numpy.roll(life, 1, axis=1)
# Compute the total neighbours for each cell
neighbours[:] = _N + _NW + _NE + _S + _SW + _SE + _W + _E
next_generation[:] = life[:]
# Any cells with exactly three neighbours should always stay alive
next_generation[:] += neighbours[:] == 3
# Any alive cells with less than two neighbours should die
next_generation[:] -= (neighbours[:] < 2) * life
# Any alive cells with more than three neighbours should die
next_generation[:] -= (neighbours[:] > 3) * life
next_generation[:] = numpy.clip(next_generation, 0, 1)
changed_cells.append(numpy.sum(life != next_generation))
changed_cells = changed_cells[-STALEMATE_DEPTH:]
life[:] = next_generation
if changed_cells.count(changed_cells[0]) == STALEMATE_DEPTH:
METHOD_NAME(INITIAL_LIFE // 2)
def draw():
# Copy the effect to the framebuffer
if SMOOTHED:
memoryview(graphics)[:] = numpy.ndarray(numpy.clip(duration, 0, 255), dtype=numpy.uint8).tobytes()
else:
memoryview(graphics)[:] = numpy.ndarray(life * 255, dtype=numpy.uint8).tobytes()
su.update(graphics)
def METHOD_NAME(amount=INITIAL_LIFE):
for _ in range(amount):
x = random.randint(0, width - 1)
y = random.randint(0, height - 1)
life[y][x] = int(True) # Avoid: TypeError: 'bool' object isn't iterable
width = StellarUnicorn.WIDTH
height = StellarUnicorn.HEIGHT
life = numpy.zeros((height, width), dtype=numpy.bool)
next_generation = numpy.zeros((height, width), dtype=numpy.bool)
neighbours = numpy.zeros((height, width), dtype=numpy.uint8)
duration = numpy.zeros((height, width))
last_gen = time.ticks_ms()
t_count = 0
t_total = 0
METHOD_NAME()
while True:
if su.is_pressed(StellarUnicorn.SWITCH_BRIGHTNESS_UP):
su.adjust_brightness(+0.01)
if su.is_pressed(StellarUnicorn.SWITCH_BRIGHTNESS_DOWN):
su.adjust_brightness(-0.01)
if su.is_pressed(StellarUnicorn.SWITCH_A):
life[:] = int(False)
if su.is_pressed(StellarUnicorn.SWITCH_B):
SMOOTHED = not SMOOTHED
tstart = time.ticks_ms()
gc.collect()
update()
draw()
tfinish = time.ticks_ms()
total = tfinish - tstart
t_total += total
t_count += 1
if t_count == 60:
per_frame_avg = t_total / t_count
print(f"60 frames in {t_total}ms, avg {per_frame_avg:.02f}ms per frame, {1000/per_frame_avg:.02f} FPS")
t_count = 0
t_total = 0
# pause for a moment (important or the USB serial device will fail)
# try to pace at 60fps or 30fps
if total > 1000 / 30:
time.sleep(0.0001)
elif total > 1000 / 60:
t = 1000 / 30 - total
time.sleep(t / 1000)
else:
t = 1000 / 60 - total
time.sleep(t / 1000)
|
1,595 |
tear down
|
from datetime import date
from django.shortcuts import reverse
from django.test import TestCase
from autoemails.forms import GenericEmailScheduleForm
from autoemails.models import EmailTemplate, RQJob, Trigger
from autoemails.tests.base import FakeRedisTestCaseMixin
import autoemails.views
from workshops.models import Event, Language, Organization, WorkshopRequest
from workshops.tests.base import SuperuserMixin
class TestGenericScheduleEmail(FakeRedisTestCaseMixin, SuperuserMixin, TestCase):
def setUp(self):
super().setUp()
self._setUpSuperuser()
# save scheduler and connection data
self._saved_scheduler = autoemails.views.scheduler
self._saved_redis_connection = autoemails.views.redis_connection
# overwrite them
autoemails.views.scheduler = self.scheduler
autoemails.views.redis_connection = self.connection
def METHOD_NAME(self):
super().METHOD_NAME()
autoemails.views.scheduler = self._saved_scheduler
autoemails.views.redis_connection = self._saved_redis_connection
def _setUpTemplateTrigger(self):
self.template_slug = "test-template-slug"
self.template = EmailTemplate.objects.create(
slug=self.template_slug,
subject="Test Email",
to_header="{{ recipient }}",
from_header="[email protected]",
body_template="# Hello there",
)
self.trigger = Trigger.objects.create(
action="workshop-request-response1", template=self.template, active=True
)
def _setUpWorkshopRequest(self, create_event=False):
kwargs = dict(
state="p",
personal="Harry",
family="Potter",
email="[email protected]",
institution_other_name="Hogwarts",
location="Scotland",
country="GB",
preferred_dates=None,
other_preferred_dates="soon",
language=Language.objects.get(name="English"),
audience_description="Students of Hogwarts",
administrative_fee="nonprofit",
scholarship_circumstances="",
travel_expences_management="booked",
travel_expences_management_other="",
institution_restrictions="no_restrictions",
institution_restrictions_other="",
carpentries_info_source_other="",
user_notes="",
)
if create_event:
self.event = Event.objects.create(
slug="event1",
start=date(2020, 10, 31),
end=date(2020, 11, 1),
host=Organization.objects.first(),
)
self.wr = WorkshopRequest.objects.create(event=self.event, **kwargs)
else:
self.wr = WorkshopRequest.objects.create(**kwargs)
def _formData(self):
return {
"slug": "test1",
"subject": "test2",
"to_header": "test3",
"from_header": "test4",
"cc_header": "test5",
"bcc_header": "test6",
"reply_to_header": "test7",
"body_template": "# test",
}
def test_request_method(self):
methods = ["OPTIONS", "HEAD", "TRACE", "GET", "PUT", "PATCH", "DELETE"]
url = reverse("autoemails:email_response", args=[1])
for method in methods:
with self.subTest(method=method):
response = self.client.generic(method, path=url)
self.assertEqual(response.status_code, 405)
response = self.client.generic("POST", path=url)
self.assertEqual(response.status_code, 302) # redirect to log in
def test_authorized(self):
url = reverse("autoemails:email_response", args=[1])
self.client.force_login(self.admin)
response = self.client.post(url)
self.assertEqual(response.status_code, 404)
def test_expected_objects_present_in_db(self):
# required: a template, a trigger, and a workshop request
self._setUpTemplateTrigger()
self._setUpWorkshopRequest()
url = reverse("autoemails:email_response", args=[self.wr.pk])
data = {"slug": self.template_slug}
self.client.force_login(self.admin)
response = self.client.post(url, data)
# redirects to workshop request details
self.assertEqual(response.status_code, 302)
self.assertRedirects(response, self.wr.get_absolute_url())
def test_valid_form(self):
self._setUpTemplateTrigger()
data = self._formData()
form = GenericEmailScheduleForm(data, instance=self.template)
self.assertEqual(form.is_valid(), True)
def test_job_scheduled(self):
self._setUpTemplateTrigger()
self._setUpWorkshopRequest(create_event=False)
data = self._formData()
data.update({"slug": self.template_slug, "next": "/dashboard"})
url = reverse("autoemails:email_response", args=[self.wr.pk])
# no jobs
self.assertEqual(self.scheduler.count(), 0)
# no rqjobs
self.assertFalse(RQJob.objects.all())
self.client.force_login(self.admin)
response = self.client.post(url, data, follow=True)
self.assertEqual(response.status_code, 200) # after redirect
self.assertContains(
response,
"New email (Response to Workshop Request 1) was scheduled",
)
# 1 new job
self.assertEqual(self.scheduler.count(), 1)
job = next(self.scheduler.get_jobs())
# 1 new rqjob
self.assertEqual(RQJob.objects.count(), 1)
rqjob = RQJob.objects.first()
# ensure it's the same job
self.assertEqual(job.get_id(), rqjob.job_id)
|
1,596 |
resolve from mibs
|
# (C) Datadog, Inc. 2010-2019
# All rights reserved
# Licensed under Simplified BSD License (see LICENSE)
from collections import defaultdict
from typing import DefaultDict, Dict, List, Optional, Tuple # noqa: F401
from .models import OID # noqa: F401
from .pysnmp_types import MibViewController # noqa: F401
from .types import OIDMatch
class OIDTreeNode(object):
__slots__ = ('name', 'children')
def __init__(self):
# type: () -> None
self.name = None # type: Optional[str]
self.children = defaultdict(OIDTreeNode) # type: DefaultDict[int, OIDTreeNode]
class OIDTrie(object):
"""A trie implementation to store OIDs and efficiently match prefixes.
We use it to do basic MIB-like resolution.
"""
def __init__(self):
# type: () -> None
self._root = OIDTreeNode()
def set(self, parts, name):
# type: (Tuple[int, ...], str) -> None
node = self._root
for part in parts:
node = node.children[part]
node.name = name
def match(self, parts):
# type: (Tuple[int, ...]) -> Tuple[Tuple[int, ...], Optional[str]]
node = self._root
matched = []
name = None
for part in parts:
child = node.children.get(part)
if child is None:
break
node = child
matched.append(part)
if node.name is not None:
name = node.name
return tuple(matched), name
class OIDResolver(object):
"""
Helper for performing resolution of OIDs when tagging table metrics.
Here's a summary of where this resolver can intervene:
```yaml
metrics:
- MIB: ...
table: ...
symbols:
- # XXX(1) Direct OID metric resolution.
- OID: 1.3.6.1.2.1.4.31.1.1.4
name: ipSystemStatsHCInReceives
metric_tags:
- # XXX(2) Column-based tag resolution.
tag: battery_index
column:
OID: 1.3.6.1.4.1.232.6.2.17.2.1.2
name: cpqHeSysBatteryIndex
- # XXX(3) Index-based tag resolution.
tag: ipversion
index: 1
mapping:
0: unknown
1: ipv4
2: ipv6
```
"""
def __init__(self, mib_view_controller, enforce_constraints):
# type: (MibViewController, bool) -> None
self._mib_view_controller = mib_view_controller
self._resolver = OIDTrie()
self._index_resolvers = defaultdict(dict) # type: DefaultDict[str, Dict[int, Dict[int, str]]]
self._enforce_constraints = enforce_constraints
def register(self, oid, name):
# type: (OID, str) -> None
"""Register a translation from a name to an OID.
Corresponds to XXX(1) and XXX(2) in the summary listing.
"""
self._resolver.set(oid.as_tuple(), name)
def register_index(self, tag, index, mapping):
# type: (str, int, Dict[int, str]) -> None
"""Register a mapping for index-based tag translation.
Corresponds to XXX(3) in the summary listing.
"""
self._index_resolvers[tag][index] = mapping
def METHOD_NAME(self, oid):
# type: (OID) -> OIDMatch
if not self._enforce_constraints:
# if enforce_constraints is false, then MIB resolution has not been done yet
# so we need to do it manually. We have to specify the mibs that we will need
# to resolve the name.
oid.resolve(self._mib_view_controller)
mib_symbol = oid.get_mib_symbol()
return OIDMatch(name=mib_symbol.symbol, indexes=mib_symbol.prefix)
def _resolve_tag_index(self, tail, name):
# type: (Tuple[int, ...], str) -> Tuple[str, ...]
mappings_by_index = self._index_resolvers.get(name)
if mappings_by_index is None:
# No mapping -> use the OID parts themselves as tag values.
return tuple(str(part) for part in tail)
tags = [] # type: List[str]
for index, part in enumerate(tail, 1):
if index in mappings_by_index:
# Default: use mapping to compute tag from index.
mapping = mappings_by_index[index]
tag = mapping[part]
tags.append(tag)
else:
# Fallback: use the OID part itself as a tag value.
tags.append(str(part))
return tuple(tags)
def resolve_oid(self, oid):
# type: (OID) -> OIDMatch
"""Resolve an OID to a name and its indexes.
This will perform either:
1. MIB-based resolution, if `oid` doesn't match any registered OID.
2. Manual resolution, if `oid` matched. In this case, indexes are resolved using any registered mappings.
Returns
-------
name: the name of the metric associated to `oid`.
tag_index: a sequence of tag values. k-th item in the sequence corresponds to the k-th entry in `metric_tags`.
"""
parts = oid.as_tuple()
prefix, name = self._resolver.match(parts)
if name is None:
return self.METHOD_NAME(oid)
# Example: parts: (1, 3, 6, 1, 2, 1, 1), prefix: (1, 3, 6, 1) -> tail: (2, 1, 1)
tail = parts[len(prefix) :]
tag_index = self._resolve_tag_index(tail, name=name)
return OIDMatch(name=name, indexes=tag_index)
|
1,597 |
test conversion between intf
|
"""
Copyright 2013 Steven Diamond
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import numpy as np
import scipy.sparse as sp
import cvxpy.interface as intf
from cvxpy.tests.base_test import BaseTest
class TestInterfaces(BaseTest):
""" Unit tests for matrix interfaces. """
def setUp(self) -> None:
pass
def sign_for_intf(self, interface) -> None:
"""Test sign for a given interface.
"""
mat = interface.const_to_matrix([[1, 2, 3, 4], [3, 4, 5, 6]])
self.assertEqual(intf.sign(mat), (True, False)) # Positive.
self.assertEqual(intf.sign(-mat), (False, True)) # Negative.
self.assertEqual(intf.sign(0*mat), (True, True)) # Zero.
mat = interface.const_to_matrix([[-1, 2, 3, 4], [3, 4, 5, 6]])
self.assertEqual(intf.sign(mat), (False, False)) # Unknown.
# Test numpy ndarray interface.
def test_ndarray(self) -> None:
interface = intf.get_matrix_interface(np.ndarray)
# const_to_matrix
mat = interface.const_to_matrix([1, 2, 3])
self.assertEqual(interface.shape(mat), (3,))
mat = interface.const_to_matrix([1, 2])
self.assertEqual(interface.shape(mat), (2,))
mat = interface.scalar_matrix(2, (4, 3))
self.assertEqual(interface.shape(mat), (4, 3))
self.assertEqual(interface.index(mat, (1, 2)), 2)
# reshape
mat = interface.const_to_matrix([[1, 2, 3], [3, 4, 5]])
mat = interface.reshape(mat, (6, 1))
self.assertEqual(interface.index(mat, (4, 0)), 4)
mat = interface.const_to_matrix(1, convert_scalars=True)
self.assertEqual(type(interface.reshape(mat, (1, 1))), type(mat))
# index
mat = interface.const_to_matrix([[1, 2, 3, 4], [3, 4, 5, 6]])
self.assertEqual(interface.index(mat, (0, 1)), 3)
mat = interface.index(mat, (slice(1, 4, 2), slice(0, 2, None)))
self.assertEqual(list(mat.flatten('C')), [2, 4, 4, 6])
# Scalars and matrices.
scalar = interface.const_to_matrix(2)
mat = interface.const_to_matrix([1, 2, 3])
self.assertTrue((
scalar*mat == interface.const_to_matrix([2, 4, 6])).all())
self.assertTrue((
scalar - mat == interface.const_to_matrix([1, 0, -1])).all())
# Sign
self.sign_for_intf(interface)
# shape.
self.assertEqual(interface.shape(np.array([1, 2, 3])), (3,))
# Test numpy matrix interface.
def test_numpy_matrix(self) -> None:
interface = intf.get_matrix_interface(np.matrix)
# const_to_matrix
mat = interface.const_to_matrix([1, 2, 3])
self.assertEqual(interface.shape(mat), (3, 1))
mat = interface.const_to_matrix([[1], [2], [3]])
self.assertEqual(mat[0, 0], 1)
mat = interface.scalar_matrix(2, (4, 3))
self.assertEqual(interface.shape(mat), (4, 3))
self.assertEqual(interface.index(mat, (1, 2)), 2)
# reshape
mat = interface.const_to_matrix([[1, 2, 3], [3, 4, 5]])
mat = interface.reshape(mat, (6, 1))
self.assertEqual(interface.index(mat, (4, 0)), 4)
mat = interface.const_to_matrix(1, convert_scalars=True)
self.assertEqual(type(interface.reshape(mat, (1, 1))), type(mat))
# index
mat = interface.const_to_matrix([[1, 2, 3, 4], [3, 4, 5, 6]])
self.assertEqual(interface.index(mat, (0, 1)), 3)
mat = interface.index(mat, (slice(1, 4, 2), slice(0, 2, None)))
self.assertFalse((mat - np.array([[2, 4], [4, 6]])).any())
# Sign
self.sign_for_intf(interface)
def test_scipy_sparse(self) -> None:
"""Test cvxopt sparse interface.
"""
interface = intf.get_matrix_interface(sp.csc_matrix)
# const_to_matrix
mat = interface.const_to_matrix([1, 2, 3])
self.assertEqual(interface.shape(mat), (3, 1))
# C = cvxopt.spmatrix([1, 1, 1, 1, 1], [0, 1, 2, 0, 0, ], [0, 0, 0, 1, 2])
# mat = interface.const_to_matrix(C)
# self.assertEqual(interface.shape(mat), (3, 3))
# identity
mat = interface.identity(4)
cmp_mat = interface.const_to_matrix(np.eye(4))
self.assertEqual(interface.shape(mat), interface.shape(cmp_mat))
self.assertEqual((mat - cmp_mat).nnz, 0)
# scalar_matrix
mat = interface.scalar_matrix(2, (4, 3))
self.assertEqual(interface.shape(mat), (4, 3))
self.assertEqual(interface.index(mat, (1, 2)), 2)
# reshape
mat = interface.const_to_matrix([[1, 2, 3], [3, 4, 5]])
mat = interface.reshape(mat, (6, 1))
self.assertEqual(interface.index(mat, (4, 0)), 4)
# Test scalars.
scalar = interface.scalar_matrix(1, (1, 1))
self.assertEqual(type(scalar), np.ndarray)
scalar = interface.scalar_matrix(1, (1, 3))
self.assertEqual(scalar.shape, (1, 3))
# index
mat = interface.const_to_matrix([[1, 2, 3, 4], [3, 4, 5, 6]])
self.assertEqual(interface.index(mat, (0, 1)), 3)
mat = interface.index(mat, (slice(1, 4, 2), slice(0, 2, None)))
self.assertFalse((mat - np.array([[2, 4], [4, 6]])).any())
# scalar value
mat = sp.eye(1)
self.assertEqual(intf.scalar_value(mat), 1.0)
# Sign
self.sign_for_intf(interface)
# Complex
# define sparse matrix [[0, 1j],[-1j,0]]
row = np.array([0, 1])
col = np.array([1, 0])
data = np.array([1j, -1j])
A = sp.csr_matrix((data, (row, col)), shape=(2, 2))
mat = interface.const_to_matrix(A)
self.assertEqual(mat[0, 1], 1j)
self.assertEqual(mat[1, 0], -1j)
def METHOD_NAME(self) -> None:
"""Test conversion between every pair of interfaces.
"""
interfaces = [intf.get_matrix_interface(np.ndarray),
intf.get_matrix_interface(np.matrix),
intf.get_matrix_interface(sp.csc_matrix)]
cmp_mat = [[1, 2, 3, 4], [3, 4, 5, 6], [-1, 0, 2, 4]]
for i in range(len(interfaces)):
for j in range(i+1, len(interfaces)):
intf1 = interfaces[i]
mat1 = intf1.const_to_matrix(cmp_mat)
intf2 = interfaces[j]
mat2 = intf2.const_to_matrix(cmp_mat)
for col in range(len(cmp_mat)):
for row in range(len(cmp_mat[0])):
key = (slice(row, row+1, None),
slice(col, col+1, None))
self.assertEqual(intf1.index(mat1, key),
intf2.index(mat2, key))
# Convert between the interfaces.
self.assertEqual(cmp_mat[col][row],
intf1.index(intf1.const_to_matrix(mat2), key))
self.assertEqual(intf2.index(intf2.const_to_matrix(mat1), key),
cmp_mat[col][row])
|
1,598 |
id
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import copy
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
__all__ = [
'GetEndpointResult',
'AwaitableGetEndpointResult',
'get_endpoint',
'get_endpoint_output',
]
@pulumi.output_type
class GetEndpointResult:
"""
A collection of values returned by getEndpoint.
"""
def __init__(__self__, endpoint_address=None, endpoint_type=None, METHOD_NAME=None):
if endpoint_address and not isinstance(endpoint_address, str):
raise TypeError("Expected argument 'endpoint_address' to be a str")
pulumi.set(__self__, "endpoint_address", endpoint_address)
if endpoint_type and not isinstance(endpoint_type, str):
raise TypeError("Expected argument 'endpoint_type' to be a str")
pulumi.set(__self__, "endpoint_type", endpoint_type)
if METHOD_NAME and not isinstance(METHOD_NAME, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", METHOD_NAME)
@property
@pulumi.getter(name="endpointAddress")
def endpoint_address(self) -> str:
"""
Endpoint based on `endpoint_type`:
* No `endpoint_type`: Either `iot:Data` or `iot:Data-ATS` [depending on region](https://aws.amazon.com/blogs/iot/aws-iot-core-ats-endpoints/)
* `iot:CredentialsProvider`: `IDENTIFIER.credentials.iot.REGION.amazonaws.com`
* `iot:Data`: `IDENTIFIER.iot.REGION.amazonaws.com`
* `iot:Data-ATS`: `IDENTIFIER-ats.iot.REGION.amazonaws.com`
* `iot:Jobs`: `IDENTIFIER.jobs.iot.REGION.amazonaws.com`
"""
return pulumi.get(self, "endpoint_address")
@property
@pulumi.getter(name="endpointType")
def endpoint_type(self) -> Optional[str]:
return pulumi.get(self, "endpoint_type")
@property
@pulumi.getter
def METHOD_NAME(self) -> str:
"""
The provider-assigned unique ID for this managed resource.
"""
return pulumi.get(self, "id")
class AwaitableGetEndpointResult(GetEndpointResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetEndpointResult(
endpoint_address=self.endpoint_address,
endpoint_type=self.endpoint_type,
METHOD_NAME=self.METHOD_NAME)
def get_endpoint(endpoint_type: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetEndpointResult:
"""
Returns a unique endpoint specific to the AWS account making the call.
## Example Usage
```python
import pulumi
import pulumi_aws as aws
import pulumi_kubernetes as kubernetes
example = aws.iot.get_endpoint()
agent = kubernetes.index.Kubernetes_pod("agent",
metadata=[{
name: my-device,
}],
spec=[{
container: [{
image: gcr.io/my-project/image-name,
name: image-name,
env: [{
name: IOT_ENDPOINT,
value: example.endpoint_address,
}],
}],
}])
```
:param str endpoint_type: Endpoint type. Valid values: `iot:CredentialProvider`, `iot:Data`, `iot:Data-ATS`, `iot:Jobs`.
"""
__args__ = dict()
__args__['endpointType'] = endpoint_type
opts = pulumi.InvokeOptions.merge(_utilities.get_invoke_opts_defaults(), opts)
__ret__ = pulumi.runtime.invoke('aws:iot/getEndpoint:getEndpoint', __args__, opts=opts, typ=GetEndpointResult).value
return AwaitableGetEndpointResult(
endpoint_address=pulumi.get(__ret__, 'endpoint_address'),
endpoint_type=pulumi.get(__ret__, 'endpoint_type'),
METHOD_NAME=pulumi.get(__ret__, 'id'))
@_utilities.lift_output_func(get_endpoint)
def get_endpoint_output(endpoint_type: Optional[pulumi.Input[Optional[str]]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetEndpointResult]:
"""
Returns a unique endpoint specific to the AWS account making the call.
## Example Usage
```python
import pulumi
import pulumi_aws as aws
import pulumi_kubernetes as kubernetes
example = aws.iot.get_endpoint()
agent = kubernetes.index.Kubernetes_pod("agent",
metadata=[{
name: my-device,
}],
spec=[{
container: [{
image: gcr.io/my-project/image-name,
name: image-name,
env: [{
name: IOT_ENDPOINT,
value: example.endpoint_address,
}],
}],
}])
```
:param str endpoint_type: Endpoint type. Valid values: `iot:CredentialProvider`, `iot:Data`, `iot:Data-ATS`, `iot:Jobs`.
"""
...
|
1,599 |
open connection
|
from contextlib import contextmanager, suppress
from typing import Any, AnyStr, ClassVar, Iterator, Optional, Sequence, List
import snowflake.connector as snowflake_lib
from dlt.common.destination import DestinationCapabilitiesContext
from dlt.destinations.exceptions import DatabaseTerminalException, DatabaseTransientException, DatabaseUndefinedRelation
from dlt.destinations.sql_client import DBApiCursorImpl, SqlClientBase, raise_database_error, raise_open_connection_error
from dlt.destinations.typing import DBApi, DBApiCursor, DBTransaction, DataFrame
from dlt.destinations.snowflake.configuration import SnowflakeCredentials
from dlt.destinations.snowflake import capabilities
class SnowflakeCursorImpl(DBApiCursorImpl):
native_cursor: snowflake_lib.cursor.SnowflakeCursor # type: ignore[assignment]
def df(self, chunk_size: int = None, **kwargs: Any) -> Optional[DataFrame]:
if chunk_size is None:
return self.native_cursor.fetch_pandas_all(**kwargs)
return super().df(chunk_size=chunk_size, **kwargs)
class SnowflakeSqlClient(SqlClientBase[snowflake_lib.SnowflakeConnection], DBTransaction):
dbapi: ClassVar[DBApi] = snowflake_lib
capabilities: ClassVar[DestinationCapabilitiesContext] = capabilities()
def __init__(self, dataset_name: str, credentials: SnowflakeCredentials) -> None:
super().__init__(credentials.database, dataset_name)
self._conn: snowflake_lib.SnowflakeConnection = None
self.credentials = credentials
def METHOD_NAME(self) -> snowflake_lib.SnowflakeConnection:
conn_params = self.credentials.to_connector_params()
# set the timezone to UTC so when loading from file formats that do not have timezones
# we get dlt expected UTC
if "timezone" not in conn_params:
conn_params["timezone"] = "UTC"
self._conn = snowflake_lib.connect(
schema=self.fully_qualified_dataset_name(),
**conn_params
)
return self._conn
@raise_open_connection_error
def close_connection(self) -> None:
if self._conn:
self._conn.close()
self._conn = None
@contextmanager
def begin_transaction(self) -> Iterator[DBTransaction]:
try:
self._conn.autocommit(False)
yield self
self.commit_transaction()
except Exception:
self.rollback_transaction()
raise
@raise_database_error
def commit_transaction(self) -> None:
self._conn.commit()
self._conn.autocommit(True)
@raise_database_error
def rollback_transaction(self) -> None:
self._conn.rollback()
self._conn.autocommit(True)
@property
def native_connection(self) -> "snowflake_lib.SnowflakeConnection":
return self._conn
def drop_tables(self, *tables: str) -> None:
# Tables are drop with `IF EXISTS`, but snowflake raises when the schema doesn't exist.
# Multi statement exec is safe and the error can be ignored since all tables are in the same schema.
with suppress(DatabaseUndefinedRelation):
super().drop_tables(*tables)
def execute_sql(self, sql: AnyStr, *args: Any, **kwargs: Any) -> Optional[Sequence[Sequence[Any]]]:
with self.execute_query(sql, *args, **kwargs) as curr:
if curr.description is None:
return None
else:
f = curr.fetchall()
return f
@contextmanager
@raise_database_error
def execute_query(self, query: AnyStr, *args: Any, **kwargs: Any) -> Iterator[DBApiCursor]:
curr: DBApiCursor = None
db_args = args if args else kwargs if kwargs else None
with self._conn.cursor() as curr: # type: ignore[assignment]
try:
curr.execute(query, db_args, num_statements=0)
yield SnowflakeCursorImpl(curr) # type: ignore[abstract]
except snowflake_lib.Error as outer:
try:
self._reset_connection()
except snowflake_lib.Error:
self.close_connection()
self.METHOD_NAME()
raise outer
def fully_qualified_dataset_name(self, escape: bool = True) -> str:
# Always escape for uppercase
if escape:
return self.capabilities.escape_identifier(self.dataset_name)
return self.dataset_name.upper()
def _reset_connection(self) -> None:
self._conn.rollback()
self._conn.autocommit(True)
@classmethod
def _make_database_exception(cls, ex: Exception) -> Exception:
if isinstance(ex, snowflake_lib.errors.ProgrammingError):
if ex.sqlstate == 'P0000' and ex.errno == 100132:
# Error in a multi statement execution. These don't show the original error codes
msg = str(ex)
if "NULL result in a non-nullable column" in msg:
return DatabaseTerminalException(ex)
elif "does not exist or not authorized" in msg: # E.g. schema not found
return DatabaseUndefinedRelation(ex)
else:
return DatabaseTransientException(ex)
if ex.sqlstate in {'42S02', '02000'}:
return DatabaseUndefinedRelation(ex)
elif ex.sqlstate == '22023': # Adding non-nullable no-default column
return DatabaseTerminalException(ex)
elif ex.sqlstate == '42000' and ex.errno == 904: # Invalid identifier
return DatabaseTerminalException(ex)
elif ex.sqlstate == "22000":
return DatabaseTerminalException(ex)
else:
return DatabaseTransientException(ex)
elif isinstance(ex, snowflake_lib.errors.IntegrityError):
raise DatabaseTerminalException(ex)
elif isinstance(ex, snowflake_lib.errors.DatabaseError):
term = cls._maybe_make_terminal_exception_from_data_error(ex)
if term:
return term
else:
return DatabaseTransientException(ex)
elif isinstance(ex, TypeError):
# snowflake raises TypeError on malformed query parameters
return DatabaseTransientException(snowflake_lib.errors.ProgrammingError(str(ex)))
elif cls.is_dbapi_exception(ex):
return DatabaseTransientException(ex)
else:
return ex
@staticmethod
def _maybe_make_terminal_exception_from_data_error(snowflake_ex: snowflake_lib.DatabaseError) -> Optional[Exception]:
return None
@staticmethod
def is_dbapi_exception(ex: Exception) -> bool:
return isinstance(ex, snowflake_lib.DatabaseError)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.