id
int64 0
300k
| label
stringlengths 1
74
⌀ | text
stringlengths 4k
8k
|
---|---|---|
3,300 |
volume
|
"""
Domain class definition.
"""
import logging
import numpy as np
from collections import OrderedDict
from ..tools.array import prod
from ..tools.cache import CachedMethod, CachedClass, CachedAttribute
from ..tools.general import unify_attributes, unify, OrderedSet
from .coords import Coordinate, CartesianCoordinates
logger = logging.getLogger(__name__.split('.')[-1])
class Domain(metaclass=CachedClass):
"""
The direct product of a set of bases.
Parameters
----------
dist : Distributor object
Distributor for an operand/field.
bases : collection of Basis objects
Bases comprising the direct product domain.
"""
@classmethod
def _preprocess_args(cls, dist, bases):
# Drop None bases
bases = [b for b in bases if b is not None]
# Drop duplicate bases
bases = tuple(OrderedSet(bases))
# Make sure coordsystems don't overlap
cs = [b.coordsystem for b in bases]
if len(set(cs)) < len(cs):
raise ValueError("Overlapping bases specified.")
# Sort by first axis
key = lambda basis: basis.first_axis
bases = tuple(sorted(bases, key=key))
return (dist, bases), {}
def __init__(self, dist, bases):
self.dist = dist
self.bases = bases # Preprocessed to remove Nones and duplicates
self.dim = sum(basis.dim for basis in self.bases)
@CachedAttribute
def METHOD_NAME(self):
return prod([basis.METHOD_NAME for basis in self.bases])
@CachedAttribute
def bases_by_axis(self):
bases_by_axis = OrderedDict()
for basis in self.bases:
for axis in range(basis.first_axis, basis.first_axis+basis.dim):
bases_by_axis[axis] = basis
return bases_by_axis
@CachedAttribute
def full_bases(self):
full_bases = [None for i in range(self.dist.dim)]
for basis in self.bases:
for axis in range(basis.first_axis, basis.first_axis+basis.dim):
full_bases[axis] = basis
return tuple(full_bases)
@CachedAttribute
def bases_by_coord(self):
bases_by_coord = OrderedDict()
for coord in self.dist.coords:
if type(coord.cs) in [type(None), CartesianCoordinates]:
bases_by_coord[coord] = None
else:
bases_by_coord[coord.cs] = None
for basis in self.bases:
bases_by_coord[basis.coords] = basis
#bases_by_coord[basis.coordsystem] = basis
return bases_by_coord
@CachedAttribute
def dealias(self):
dealias = [1] * self.dist.dim
for basis in self.bases:
for subaxis in range(basis.dim):
dealias[basis.first_axis+subaxis] = basis.dealias[subaxis]
return tuple(dealias)
def substitute_basis(self, old_basis, new_basis):
new_bases = list(self.bases)
if old_basis in new_bases:
new_bases.remove(old_basis)
new_bases.append(new_basis)
return Domain(self.dist, new_bases)
def get_basis(self, coords):
if isinstance(coords, int):
axis = coords
else:
axis = coords.axis
return self.full_bases[axis]
def get_basis_subaxis(self, coord):
axis = coord.axis
for basis in self.bases:
if (axis >= basis.axis) and (axis <= basis.axis + basis.dim):
return axis - basis.axis
def get_coord(self, name):
for basis in self.bases:
# This is hacky...
if isinstance(basis.coords, Coordinate):
if name == basis.coords.name:
return basis.coords
else:
for basis_coord in basis.coords.coords:
if name == basis_coord.name:
return basis_coord
raise ValueError("Coordinate name not in domain")
def enumerate_unique_bases(self):
axes = []
unique_bases = []
for axis, basis in enumerate(self.full_bases):
if (basis is None) or (basis not in unique_bases):
axes.append(axis)
unique_bases.append(basis)
return zip(axes, unique_bases)
@CachedAttribute
def constant(self):
"""Tuple of constant flags."""
const = np.ones(self.dist.dim, dtype=bool)
for basis in self.bases:
for subaxis in range(basis.dim):
const[basis.axis+subaxis] = basis.constant[subaxis]
return tuple(const)
@CachedAttribute
def nonconstant(self):
return tuple(~c for c in self.constant)
@CachedAttribute
def mode_dependence(self):
"""Tuple of dependence flags."""
dep = np.zeros(self.dist.dim, dtype=bool)
for basis in self.bases:
for subaxis in range(basis.dim):
dep[basis.axis+subaxis] = basis.subaxis_dependence[subaxis]
return tuple(dep)
@CachedAttribute
def dim(self):
return sum(self.nonconstant)
@CachedAttribute
def coeff_shape(self):
"""Compute coefficient shape."""
scales = tuple(1 for i in range(self.dist.dim))
return self.global_shape(layout=self.dist.coeff_layout, scales=scales)
def grid_shape(self, scales):
"""Compute grid shape."""
# Remedy scales before calling cached method
scales = self.dist.remedy_scales(scales)
return self._grid_shape(scales)
def global_shape(self, layout, scales):
shape = np.ones(self.dist.dim, dtype=int)
for basis in self.bases:
basis_axes = slice(basis.first_axis, basis.last_axis+1)
shape[basis_axes] = basis.global_shape(layout.grid_space[basis_axes], scales[basis_axes])
return tuple(shape)
@CachedMethod
def chunk_shape(self, layout):
"""Compute chunk shape."""
shape = np.ones(self.dist.dim, dtype=int)
for basis in self.bases:
basis_axes = slice(basis.first_axis, basis.last_axis+1)
shape[basis_axes] = basis.chunk_shape(layout.grid_space[basis_axes])
return tuple(shape)
def group_shape(self, layout):
"""Compute group shape."""
group_shape = np.ones(self.dist.dim, dtype=int)
for basis in self.bases:
basis_axes = slice(basis.first_axis, basis.last_axis+1)
group_shape[basis_axes] = basis.group_shape
group_shape[layout.grid_space] = 1
return group_shape
@CachedMethod
def _grid_shape(self, scales):
"""Cached grid shape computation."""
shape = np.ones(self.dist.dim, dtype=int)
for basis in self.bases:
subscales = scales[basis.axis:basis.axis+basis.dim]
subshape = basis.grid_shape(subscales)
shape[basis.axis:basis.axis+basis.dim] = subshape
return tuple(shape)
# def expand_bases(self, bases):
# exp_bases = [None] * self.domain.dim
# for basis in bases:
# if basis is not None:
# if exp_bases[basis.space.axis] is not None:
# raise ValueError("Degenerate bases.")
# exp_bases[basis.space.axis] = basis
# return tuple(exp_bases)
# def __contains__(self, item):
# if isinstance(item, Subdomain):
# for axis in range(self.domain.dim):
# if item.spaces[axis] not in {None, self.spaces[axis]}:
# return False
# return True
# else:
# space = self.domain.get_space_object(item)
# return (space in self.spaces)
|
3,301 |
test ksize2x2 stride1x1 rate1x1 valid
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functional tests for ExtractImagePatches op."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.compiler.tests import xla_test
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import array_ops
from tensorflow.python.platform import test
class ExtractImagePatches(xla_test.XLATestCase):
"""Functional tests for ExtractImagePatches op."""
def _VerifyValues(self, image, ksizes, strides, rates, padding, patches):
"""Tests input-output pairs for the ExtractImagePatches op.
Args:
image: Input tensor with shape: [batch, in_rows, in_cols, depth].
ksizes: Patch size specified as: [ksize_rows, ksize_cols].
strides: Output strides, specified as [stride_rows, stride_cols].
rates: Atrous rates, specified as [rate_rows, rate_cols].
padding: Padding type.
patches: Expected output.
"""
ksizes = [1] + ksizes + [1]
strides = [1] + strides + [1]
rates = [1] + rates + [1]
with self.session():
image_placeholder = array_ops.placeholder(dtypes.float32)
with self.test_scope():
out_tensor = array_ops.extract_image_patches(
image_placeholder,
ksizes=ksizes,
strides=strides,
rates=rates,
padding=padding,
name="im2col")
feed_dict = {image_placeholder: image}
self.assertAllClose(patches, out_tensor.eval(feed_dict=feed_dict))
def testKsize1x1Stride1x1Rate1x1(self):
"""Verifies that for 1x1 kernel the output equals the input."""
# [2, 3, 4, 5]
image = np.reshape(range(120), [2, 3, 4, 5])
# [2, 3, 4, 5]
patches = np.reshape(range(120), [2, 3, 4, 5])
for padding in ["VALID", "SAME"]:
self._VerifyValues(
image,
ksizes=[1, 1],
strides=[1, 1],
rates=[1, 1],
padding=padding,
patches=patches)
def testKsize1x1Stride2x3Rate1x1(self):
"""Test for 1x1 kernel and strides."""
# [2, 4, 5, 3]
image = np.reshape(range(120), [2, 4, 5, 3])
# [2, 2, 2, 3]
patches = image[:, ::2, ::3, :]
for padding in ["VALID", "SAME"]:
self._VerifyValues(
image,
ksizes=[1, 1],
strides=[2, 3],
rates=[1, 1],
padding=padding,
patches=patches)
def METHOD_NAME(self):
"""Test for 2x2 kernel with VALID padding."""
# [1, 2, 2, 1]
image = [[[[1], [2]], [[3], [4]]]]
# [1, 1, 1, 4]
patches = [[[[1, 2, 3, 4]]]]
self._VerifyValues(
image,
ksizes=[2, 2],
strides=[1, 1],
rates=[1, 1],
padding="VALID",
patches=patches)
def testKsize2x2Stride1x1Rate1x1Same(self):
"""Test for 2x2 kernel with SAME padding."""
# [1, 2, 2, 1]
image = [[[[1], [2]], [[3], [4]]]]
# [1, 2, 2, 4]
patches = [[[[1, 2, 3, 4], [2, 0, 4, 0]], [[3, 4, 0, 0], [4, 0, 0, 0]]]]
self._VerifyValues(
image,
ksizes=[2, 2],
strides=[1, 1],
rates=[1, 1],
padding="SAME",
patches=patches)
def testKsize2x2Stride1x1Rate2x2Valid(self):
"""Test for 2x2 kernel with 2x2 dilation."""
# [1, 2, 2, 1]
image = np.arange(16).reshape(1, 4, 4, 1).astype(np.float32)
# [1, 2, 2, 4]
patches = [[[[0, 2, 8, 10], [1, 3, 9, 11]],
[[4, 6, 12, 14], [5, 7, 13, 15]]]]
self._VerifyValues(
image,
ksizes=[2, 2],
strides=[1, 1],
rates=[2, 2],
padding="VALID",
patches=patches)
def testKsize2x2Stride1x1Rate1x1ValidDepth2(self):
"""Test for 2x2 kernel with VALID padding."""
# [1, 2, 2, 2]
image = [[[[1, 5], [2, 6]], [[3, 7], [4, 8]]]]
# [1, 1, 1, 8]
patches = [[[[1, 5, 2, 6, 3, 7, 4, 8]]]]
self._VerifyValues(
image,
ksizes=[2, 2],
strides=[1, 1],
rates=[1, 1],
padding="VALID",
patches=patches)
if __name__ == "__main__":
test.main()
|
3,302 |
test gmm wrong descriptor format 4
|
import pytest
import numpy as np
pytest.importorskip('sklearn')
from skimage.feature._fisher_vector import ( # noqa: E402
learn_gmm, fisher_vector, FisherVectorException,
DescriptorException
)
def test_gmm_wrong_descriptor_format_1():
"""Test that DescriptorException is raised when wrong type for descriptions
is passed.
"""
with pytest.raises(DescriptorException):
learn_gmm('completely wrong test', n_modes=1)
def test_gmm_wrong_descriptor_format_2():
"""Test that DescriptorException is raised when descriptors are of
different dimensionality.
"""
with pytest.raises(DescriptorException):
learn_gmm([np.zeros((5, 11)), np.zeros((4, 10))], n_modes=1)
def test_gmm_wrong_descriptor_format_3():
"""Test that DescriptorException is raised when not all descriptors are of
rank 2.
"""
with pytest.raises(DescriptorException):
learn_gmm([np.zeros((5, 10)), np.zeros((4, 10, 1))], n_modes=1)
def METHOD_NAME():
"""Test that DescriptorException is raised when elements of descriptor list
are of the incorrect type (i.e. not a NumPy ndarray).
"""
with pytest.raises(DescriptorException):
learn_gmm([[1, 2, 3], [1, 2, 3]], n_modes=1)
def test_gmm_wrong_num_modes_format_1():
"""Test that FisherVectorException is raised when incorrect type for
n_modes is passed into the learn_gmm function.
"""
with pytest.raises(FisherVectorException):
learn_gmm([np.zeros((5, 10)), np.zeros((4, 10))], n_modes='not_valid')
def test_gmm_wrong_num_modes_format_2():
"""Test that FisherVectorException is raised when a number that is not a
positive integer is passed into the n_modes argument of learn_gmm.
"""
with pytest.raises(FisherVectorException):
learn_gmm([np.zeros((5, 10)), np.zeros((4, 10))], n_modes=-1)
def test_gmm_wrong_covariance_type():
"""Test that FisherVectorException is raised when wrong covariance type is
passed in as a keyword argument.
"""
with pytest.raises(FisherVectorException):
learn_gmm(
np.random.random((10, 10)), n_modes=2,
gm_args={'covariance_type': 'full'}
)
def test_gmm_correct_covariance_type():
"""Test that GMM estimation is successful when the correct covariance type
is passed in as a keyword argument.
"""
gmm = learn_gmm(
np.random.random((10, 10)), n_modes=2,
gm_args={'covariance_type': 'diag'}
)
assert gmm.means_ is not None
assert gmm.covariances_ is not None
assert gmm.weights_ is not None
def test_gmm_e2e():
"""
Test the GMM estimation. Since this is essentially a wrapper for the
scikit-learn GaussianMixture class, the testing of the actual inner
workings of the GMM estimation is left to scikit-learn and its
dependencies.
We instead simply assert that the estimation was successful based on the
fact that the GMM object will have associated mixture weights, means, and
variances after estimation is successful/complete.
"""
gmm = learn_gmm(np.random.random((100, 64)), n_modes=5)
assert gmm.means_ is not None
assert gmm.covariances_ is not None
assert gmm.weights_ is not None
def test_fv_wrong_descriptor_types():
"""
Test that DescriptorException is raised when the incorrect type for the
descriptors is passed into the fisher_vector function.
"""
try:
from sklearn.mixture import GaussianMixture
except ImportError:
print(
'scikit-learn is not installed. Please ensure it is installed in '
'order to use the Fisher vector functionality.'
)
with pytest.raises(DescriptorException):
fisher_vector([[1, 2, 3, 4]], GaussianMixture())
def test_fv_wrong_gmm_type():
"""
Test that FisherVectorException is raised when a GMM not of type
sklearn.mixture.GaussianMixture is passed into the fisher_vector
function.
"""
class MyDifferentGaussianMixture:
pass
with pytest.raises(FisherVectorException):
fisher_vector(np.zeros((10, 10)), MyDifferentGaussianMixture())
def test_fv_e2e():
"""
Test the Fisher vector computation given a GMM returned from the learn_gmm
function. We simply assert that the dimensionality of the resulting Fisher
vector is correct.
The dimensionality of a Fisher vector is given by 2KD + K, where K is the
number of Gaussians specified in the associated GMM, and D is the
dimensionality of the descriptors using to estimate the GMM.
"""
dim = 128
num_modes = 8
expected_dim = 2 * num_modes * dim + num_modes
descriptors = [
np.random.random((np.random.randint(5, 30), dim))
for _ in range(10)
]
gmm = learn_gmm(descriptors, n_modes=num_modes)
fisher_vec = fisher_vector(descriptors[0], gmm)
assert len(fisher_vec) == expected_dim
def test_fv_e2e_improved():
"""
Test the improved Fisher vector computation given a GMM returned from the
learn_gmm function. We simply assert that the dimensionality of the
resulting Fisher vector is correct.
The dimensionality of a Fisher vector is given by 2KD + K, where K is the
number of Gaussians specified in the associated GMM, and D is the
dimensionality of the descriptors using to estimate the GMM.
"""
dim = 128
num_modes = 8
expected_dim = 2 * num_modes * dim + num_modes
descriptors = [
np.random.random((np.random.randint(5, 30), dim))
for _ in range(10)
]
gmm = learn_gmm(descriptors, n_modes=num_modes)
fisher_vec = fisher_vector(descriptors[0], gmm, improved=True)
assert len(fisher_vec) == expected_dim
|
3,303 |
add regs
|
# See LICENSE for licensing information.
#
# Copyright (c) 2016-2023 Regents of the University of California and The Board
# of Regents for the Oklahoma Agricultural and Mechanical College
# (acting for and on behalf of Oklahoma State University)
# All rights reserved.
#
import math
from openram.tech import spice
class rom_verilog:
"""
Create a behavioral Verilog file for simulation.
This is inherited by the rom_base class.
"""
def __init__(self):
pass
def verilog_write(self, verilog_name):
""" Write a behavioral Verilog model. """
self.vf = open(verilog_name, "w")
self.vf.write("// OpenROM ROM model\n")
#basic info
self.vf.write("// Words: {0}\n".format(self.num_words))
self.vf.write("// Word size: {0}\n".format(self.word_size))
self.vf.write("// Word per Row: {0}\n".format(self.words_per_row))
self.vf.write("// Data Type: {0}\n".format(self.data_type))
self.vf.write("// Data File: {0}\n".format(self.rom_data))
self.vf.write("\n")
try:
self.vdd_name = spice["power"]
except KeyError:
self.vdd_name = "vdd"
try:
self.gnd_name = spice["ground"]
except KeyError:
self.gnd_name = "gnd"
#add multiple banks later
self.vf.write("module {0}(\n".format(self.name))
self.vf.write("`ifdef USE_POWER_PINS\n")
self.vf.write(" {},\n".format(self.vdd_name))
self.vf.write(" {},\n".format(self.gnd_name))
self.vf.write("`endif\n")
for port in self.all_ports:
if port in self.read_ports:
self.vf.write("// Port {0}: R\n".format(port))
self.vf.write(" clk{0},csb{0},addr{0},dout{0}".format(port))
# Continue for every port on a new line
if port != self.all_ports[-1]:
self.vf.write(",\n")
self.vf.write("\n );\n\n")
self.vf.write(" parameter DATA_WIDTH = {0} ;\n".format(self.word_size))
self.vf.write(" parameter ADDR_WIDTH = {0} ;\n".format(math.ceil(math.log(self.num_words,2))))
self.vf.write(" parameter ROM_DEPTH = 1 << ADDR_WIDTH;\n")
self.vf.write(" // FIXME: This delay is arbitrary.\n")
self.vf.write(" parameter DELAY = 3 ;\n")
self.vf.write(" parameter VERBOSE = 1 ; //Set to 0 to only display warnings\n")
self.vf.write(" parameter T_HOLD = 1 ; //Delay to hold dout value after posedge. Value is arbitrary\n")
self.vf.write("\n")
self.vf.write("`ifdef USE_POWER_PINS\n")
self.vf.write(" inout {};\n".format(self.vdd_name))
self.vf.write(" inout {};\n".format(self.gnd_name))
self.vf.write("`endif\n")
for port in self.all_ports:
self.add_inputs_outputs(port)
self.vf.write("\n")
# This is the memory array itself
self.vf.write(" reg [DATA_WIDTH-1:0] mem [0:ROM_DEPTH-1];\n\n")
#write memory init here
self.vf.write(f" initial begin\n")
if self.data_type == "bin":
self.vf.write(f" $readmemb(\"{self.rom_data}\",mem,0,ROM_DEPTH-1);\n")
elif self.data_type == "hex":
self.vf.write(f" $readmemh(\"{self.rom_data}\",mem,0, ROM_DEPTH-1);\n")
else:
raise ValueError(f"Data type: {self.data_type} is not supported!")
self.vf.write(f" end\n\n")
for port in self.all_ports:
self.register_inputs(port)
for port in self.all_ports:
if port in self.read_ports:
self.add_read_block(port)
self.vf.write("\n")
self.vf.write("endmodule\n")
self.vf.close()
def register_inputs(self, port):
"""
Register the control signal, address and data inputs.
"""
self.METHOD_NAME(port)
self.add_flops(port)
def METHOD_NAME(self, port):
"""
Create the input regs for the given port.
"""
self.vf.write(" reg csb{0}_reg;\n".format(port))
self.vf.write(" reg [ADDR_WIDTH-1:0] addr{0}_reg;\n".format(port))
if port in self.read_ports:
self.vf.write(" reg [DATA_WIDTH-1:0] dout{0};\n".format(port))
def add_flops(self, port):
"""
Add the flop behavior logic for a port.
"""
self.vf.write("\n")
self.vf.write(" // All inputs are registers\n")
self.vf.write(" always @(posedge clk{0})\n".format(port))
self.vf.write(" begin\n")
self.vf.write(" csb{0}_reg = csb{0};\n".format(port))
self.vf.write(" addr{0}_reg = addr{0};\n".format(port))
if port in self.read_ports:
self.add_write_read_checks(port)
if port in self.read_ports:
self.vf.write(" #(T_HOLD) dout{0} = {1}'bx;\n".format(port, self.word_size))
self.vf.write(" if ( !csb{0}_reg && VERBOSE ) \n".format(port))
self.vf.write(" $display($time,\" Reading %m addr{0}=%b dout{0}=%b\",addr{0}_reg,mem[addr{0}_reg]);\n".format(port))
self.vf.write(" end\n\n")
def add_inputs_outputs(self, port):
"""
Add the module input and output declaration for a port.
"""
self.vf.write(" input clk{0}; // clock\n".format(port))
self.vf.write(" input csb{0}; // active low chip select\n".format(port))
self.vf.write(" input [ADDR_WIDTH-1:0] addr{0};\n".format(port))
if port in self.read_ports:
self.vf.write(" output [DATA_WIDTH-1:0] dout{0};\n".format(port))
def add_write_block(self, port):
"""
ROM does not take writes thus this function does nothing
"""
self.vf.write("\n")
def add_read_block(self, port):
"""
Add a read port block.
"""
self.vf.write("\n")
self.vf.write(" // Memory Read Block Port {0}\n".format(port))
self.vf.write(" // Read Operation : When web{0} = 1, csb{0} = 0\n".format(port))
self.vf.write(" always @ (negedge clk{0})\n".format(port))
self.vf.write(" begin : MEM_READ{0}\n".format(port))
self.vf.write(" if (!csb{0}_reg)\n".format(port))
self.vf.write(" dout{0} <= #(DELAY) mem[addr{0}_reg];\n".format(port))
self.vf.write(" end\n")
def add_write_read_checks(self, rport):
"""
Since ROMs dont have write ports this does nothing
"""
pass
|
3,304 |
wrapper
|
#!/usr/bin/env python3
# copyright (c) 2020 Bowen Ding
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
import io
import os
from base64 import b64decode
from pathlib import Path
from typing import BinaryIO
from cryptography.exceptions import InvalidTag
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives import hashes
from cryptography.hazmat.primitives.ciphers.aead import AESGCM
from cryptography.hazmat.primitives.kdf.pbkdf2 import PBKDF2HMAC
backend = default_backend()
MAGIC_STRING = b"_e_n_c_r_y_p_t_e_d_"
def kdf_gen_key(password: str, salt: str, iters: int) -> bytes:
password = password.encode("utf-8")
salt = salt.encode("utf-8")
kdf = PBKDF2HMAC(
algorithm=hashes.SHA1(), length=16, salt=salt, iterations=iters, backend=backend
)
key = kdf.derive(password)
return key
def dispatchargs(decorated):
def METHOD_NAME(args):
if args.key is not None:
key = b64decode(args.key.encode("utf-8"))
elif args.password is not None:
key = kdf_gen_key(args.password, args.kdf_salt, args.kdf_iters)
else:
raise ValueError("Neither password nor key is provided")
tag = args.gcm_tag.encode("utf-8")
outputBuffer = io.BytesIO()
with Path(args.infile).open("rb") as in_:
decorated(key, in_, outputBuffer, tag)
with Path(args.outfile).open("wb") as out:
out.write(outputBuffer.getbuffer())
return METHOD_NAME
def encrypt(key: bytes, infile: BinaryIO, outfile: BinaryIO, tag: bytes):
if len(key) != 128 / 8:
raise ValueError("Unsupported key length: %d" % len(key))
aesgcm = AESGCM(key)
iv = os.urandom(12)
plaintext = infile
ciphertext = outfile
rawbytes = plaintext.read()
encrypted = aesgcm.encrypt(iv, rawbytes, tag)
ciphertext.write(MAGIC_STRING)
ciphertext.write(iv)
ciphertext.write(encrypted)
def decrypt(key: bytes, infile: BinaryIO, outfile: BinaryIO, tag: bytes):
if len(key) != 128 / 8:
raise ValueError("Unsupported key length: %d" % len(key))
aesgcm = AESGCM(key)
ciphertext = infile
plaintext = outfile
magicstring = ciphertext.read(len(MAGIC_STRING))
if magicstring != MAGIC_STRING:
raise ValueError("Data is not encrypted")
iv = ciphertext.read(12)
rawbytes = ciphertext.read()
try:
decrypted = aesgcm.decrypt(iv, rawbytes, tag)
except InvalidTag:
raise ValueError("Incorrect tag, iv, or corrupted ciphertext")
plaintext.write(decrypted)
if __name__ == "__main__":
import argparse as ap
parser = ap.ArgumentParser(description="Encrypt or decrypt using AES-128-GCM")
parser.add_argument("-k", "--key", help="Base64-encoded key")
parser.add_argument("-p", "--password", help="Password in plaintext")
parser.add_argument(
"--kdf-salt", help="PBKDF2 salt", default="saltysaltsweetysweet"
)
parser.add_argument(
"--kdf-iters", type=int, help="PBKDF2 iterations", default=10000
)
parser.add_argument("--gcm-tag", help="AES-GCM tag", default="AuTheNTiCatIoNtAG")
parser.add_argument("-i", "--infile", help="Input file")
parser.add_argument("-o", "--outfile", help="Output file")
subparsers = parser.add_subparsers(title="commands", dest="action")
parser_enc = subparsers.add_parser("enc", help="Encrypt")
parser_enc.set_defaults(execute=dispatchargs(encrypt))
parser_dec = subparsers.add_parser("dec", help="Decrypt")
parser_dec.set_defaults(execute=dispatchargs(decrypt))
args = parser.parse_args()
args.execute(args)
|
3,305 |
get form kwargs
|
#
# This file is part of pretix (Community Edition).
#
# Copyright (C) 2014-2020 Raphael Michel and contributors
# Copyright (C) 2020-2021 rami.io GmbH and contributors
#
# This program is free software: you can redistribute it and/or modify it under the terms of the GNU Affero General
# Public License as published by the Free Software Foundation in version 3 of the License.
#
# ADDITIONAL TERMS APPLY: Pursuant to Section 7 of the GNU Affero General Public License, additional terms are
# applicable granting you additional permissions and placing additional restrictions on your usage of this software.
# Please refer to the pretix LICENSE file to obtain the full terms applicable to this work. If you did not receive
# this file, see <https://pretix.eu/about/en/license>.
#
# This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied
# warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Affero General Public License for more
# details.
#
# You should have received a copy of the GNU Affero General Public License along with this program. If not, see
# <https://www.gnu.org/licenses/>.
#
# This file is based on an earlier version of pretix which was released under the Apache License 2.0. The full text of
# the Apache License 2.0 can be obtained at <http://www.apache.org/licenses/LICENSE-2.0>.
#
# This file may have since been changed and any changes are released under the terms of AGPLv3 as described above. A
# full history of changes and contributors is available at <https://github.com/pretix/pretix>.
#
# This file contains Apache-licensed contributions copyrighted by: Alexander Schwartz
#
# Unless required by applicable law or agreed to in writing, software distributed under the Apache License 2.0 is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under the License.
import logging
from datetime import timedelta
from django.conf import settings
from django.contrib import messages
from django.shortcuts import get_object_or_404, redirect
from django.urls import reverse
from django.utils.functional import cached_property
from django.utils.timezone import now
from django.utils.translation import gettext_lazy as _
from django.views.generic import FormView, TemplateView
from pretix.base.models import CachedFile
from pretix.base.services.orderimport import import_orders, parse_csv
from pretix.base.views.tasks import AsyncAction
from pretix.control.forms.orderimport import ProcessForm
from pretix.control.permissions import EventPermissionRequiredMixin
logger = logging.getLogger(__name__)
class ImportView(EventPermissionRequiredMixin, TemplateView):
template_name = 'pretixcontrol/orders/import_start.html'
permission = 'can_change_orders'
def post(self, request, *args, **kwargs):
if 'file' not in request.FILES:
return redirect(reverse('control:event.orders.import', kwargs={
'event': request.event.slug,
'organizer': request.organizer.slug,
}))
if not request.FILES['file'].name.lower().endswith('.csv'):
messages.error(request, _('Please only upload CSV files.'))
return redirect(reverse('control:event.orders.import', kwargs={
'event': request.event.slug,
'organizer': request.organizer.slug,
}))
if request.FILES['file'].size > settings.FILE_UPLOAD_MAX_SIZE_OTHER:
messages.error(request, _('Please do not upload files larger than 10 MB.'))
return redirect(reverse('control:event.orders.import', kwargs={
'event': request.event.slug,
'organizer': request.organizer.slug,
}))
cf = CachedFile.objects.create(
expires=now() + timedelta(days=1),
date=now(),
filename='import.csv',
type='text/csv',
)
cf.file.save('import.csv', request.FILES['file'])
return redirect(reverse('control:event.orders.import.process', kwargs={
'event': request.event.slug,
'organizer': request.organizer.slug,
'file': cf.id
}))
class ProcessView(EventPermissionRequiredMixin, AsyncAction, FormView):
permission = 'can_change_orders'
template_name = 'pretixcontrol/orders/import_process.html'
form_class = ProcessForm
task = import_orders
known_errortypes = ['DataImportError']
def METHOD_NAME(self):
k = super().METHOD_NAME()
k.update({
'event': self.request.event,
'initial': self.request.event.settings.order_import_settings,
'headers': self.parsed.fieldnames
})
return k
def form_valid(self, form):
self.request.event.settings.order_import_settings = form.cleaned_data
return self.do(
self.request.event.pk, self.file.id, form.cleaned_data, self.request.LANGUAGE_CODE,
self.request.user.pk
)
@cached_property
def file(self):
return get_object_or_404(CachedFile, pk=self.kwargs.get("file"), filename="import.csv")
@cached_property
def parsed(self):
try:
return parse_csv(self.file.file, 1024 * 1024)
except UnicodeDecodeError:
messages.warning(
self.request,
_(
"We could not identify the character encoding of the CSV file. "
"Some characters were replaced with a placeholder."
)
)
return parse_csv(self.file.file, 1024 * 1024, "replace")
def get(self, request, *args, **kwargs):
if 'async_id' in request.GET and settings.HAS_CELERY:
return self.get_result(request)
return FormView.get(self, request, *args, **kwargs)
def get_success_message(self, value):
return _('The import was successful.')
def get_success_url(self, value):
return reverse('control:event.orders', kwargs={
'event': self.request.event.slug,
'organizer': self.request.organizer.slug,
})
def dispatch(self, request, *args, **kwargs):
if 'async_id' in request.GET and settings.HAS_CELERY:
return self.get_result(request)
if not self.parsed:
messages.error(request, _('We\'ve been unable to parse the uploaded file as a CSV file.'))
return redirect(reverse('control:event.orders.import', kwargs={
'event': request.event.slug,
'organizer': request.organizer.slug,
}))
return super().dispatch(request, *args, **kwargs)
def get_error_url(self):
return reverse('control:event.orders.import.process', kwargs={
'event': self.request.event.slug,
'organizer': self.request.organizer.slug,
'file': self.file.id
})
def get_context_data(self, **kwargs):
ctx = super().get_context_data(**kwargs)
ctx['file'] = self.file
ctx['parsed'] = self.parsed
ctx['sample_rows'] = list(self.parsed)[:3]
return ctx
|
3,306 |
test positive list installable updates
|
"""Test for Content Access (Golden Ticket) CLI
:Requirement: Content Access
:CaseLevel: Acceptance
:CaseComponent: Hosts-Content
:CaseAutomation: Automated
:team: Phoenix-subscriptions
:TestType: Functional
:Upstream: No
"""
import time
import pytest
from nailgun import entities
from robottelo.cli.host import Host
from robottelo.cli.package import Package
from robottelo.config import settings
from robottelo.constants import REAL_0_ERRATA_ID
from robottelo.constants import REAL_RHEL7_0_2_PACKAGE_FILENAME
from robottelo.constants import REAL_RHEL7_0_2_PACKAGE_NAME
from robottelo.constants import REPOS
pytestmark = [
pytest.mark.skipif(
(not settings.robottelo.REPOS_HOSTING_URL), reason='Missing repos_hosting_url'
),
pytest.mark.run_in_one_thread,
]
@pytest.fixture(scope='module')
def module_lce(module_sca_manifest_org):
return entities.LifecycleEnvironment(organization=module_sca_manifest_org).create()
@pytest.fixture(scope="module")
def rh_repo_cv(module_sca_manifest_org, rh_repo_gt_manifest, module_lce):
rh_repo_cv = entities.ContentView(organization=module_sca_manifest_org).create()
# Add CV to AK
rh_repo_cv.repository = [rh_repo_gt_manifest]
rh_repo_cv.update(['repository'])
rh_repo_cv.publish()
rh_repo_cv = rh_repo_cv.read()
# promote the last version published into the module lce
rh_repo_cv.version[-1].promote(data={'environment_ids': module_lce.id, 'force': False})
return rh_repo_cv
@pytest.fixture(scope="module")
def module_ak(rh_repo_cv, module_sca_manifest_org, module_lce):
module_ak = entities.ActivationKey(
content_view=rh_repo_cv,
environment=module_lce,
organization=module_sca_manifest_org,
).create()
# Ensure tools repo is enabled in the activation key
module_ak.content_override(
data={'content_overrides': [{'content_label': REPOS['rhst7']['id'], 'value': '1'}]}
)
return module_ak
@pytest.fixture(scope="module")
def vm(
rh_repo_gt_manifest,
module_sca_manifest_org,
module_ak,
rhel7_contenthost_module,
module_target_sat,
):
# python-psutil obsoleted by python2-psutil, install older python2-psutil for errata test.
rhel7_contenthost_module.run(
'rpm -Uvh https://download.fedoraproject.org/pub/epel/7/x86_64/Packages/p/'
'python2-psutil-5.6.7-1.el7.x86_64.rpm'
)
rhel7_contenthost_module.install_katello_ca(module_target_sat)
rhel7_contenthost_module.register_contenthost(module_sca_manifest_org.label, module_ak.name)
host = entities.Host().search(query={'search': f'name={rhel7_contenthost_module.hostname}'})
host_id = host[0].id
host_content = entities.Host(id=host_id).read_json()
assert host_content["subscription_status"] == 5
rhel7_contenthost_module.install_katello_host_tools()
return rhel7_contenthost_module
@pytest.mark.tier2
@pytest.mark.pit_client
@pytest.mark.pit_server
def METHOD_NAME(vm):
"""Ensure packages applicability is functioning properly.
:id: 4feb692c-165b-4f96-bb97-c8447bd2cf6e
:steps:
1. Setup a content host with registration to unrestricted org
2. Install a package that has updates
3. Run `hammer package list` specifying option
packages-restrict-applicable="true".
:expectedresults:
1. Update package is available independent of subscription because
Golden Ticket is enabled.
:BZ: 1344049, 1498158
:parametrized: yes
:CaseImportance: Critical
"""
for _ in range(30):
applicable_packages = Package.list(
{
'host': vm.hostname,
'packages-restrict-applicable': 'true',
'search': f'name={REAL_RHEL7_0_2_PACKAGE_NAME}',
}
)
if applicable_packages:
break
time.sleep(10)
assert len(applicable_packages) > 0
assert REAL_RHEL7_0_2_PACKAGE_FILENAME in [
package['filename'] for package in applicable_packages
]
@pytest.mark.tier2
@pytest.mark.upgrade
@pytest.mark.pit_client
@pytest.mark.pit_server
def test_positive_erratum_installable(vm):
"""Ensure erratum applicability is showing properly, without attaching
any subscription.
:id: e8dc52b9-884b-40d7-9244-680b5a736cf7
:steps:
1. register a host to unrestricted org with Library
2. install a package, that will need errata to be applied
3. list the host applicable errata with searching the required
errata id
:expectedresults: errata listed successfuly and is installable
:BZ: 1344049, 1498158
:parametrized: yes
:CaseImportance: Critical
"""
# check that package errata is applicable
for _ in range(30):
erratum = Host.errata_list({'host': vm.hostname, 'search': f'id = {REAL_0_ERRATA_ID}'})
if erratum:
break
time.sleep(10)
assert len(erratum) == 1
assert erratum[0]['installable'] == 'true'
@pytest.mark.tier2
def test_negative_rct_not_shows_golden_ticket_enabled(
target_sat, function_org, function_entitlement_manifest
):
"""Assert restricted manifest has no Golden Ticket enabled .
:id: 754c1be7-468e-4795-bcf9-258a38f3418b
:steps:
1. Run `rct cat-manifest /tmp/restricted_manifest.zip`.
:expectedresults:
1. Assert `Content Access Mode: Simple Content Access` is not present.
:CaseImportance: High
"""
# upload organization manifest with org environment access disabled
org = function_org
manifest = function_entitlement_manifest
target_sat.upload_manifest(org.id, manifest, interface='CLI')
result = target_sat.execute(f'rct cat-manifest {manifest.name}')
assert result.status == 0
assert 'Content Access Mode: Simple Content Access' not in result.stdout
@pytest.mark.tier2
@pytest.mark.upgrade
def test_positive_rct_shows_golden_ticket_enabled(module_sca_manifest, target_sat):
"""Assert unrestricted manifest has Golden Ticket enabled .
:id: 0c6e2f88-1a86-4417-9248-d7bd20584197
:steps:
1. Run `rct cat-manifest /tmp/unrestricted_manifest.zip`.
:expectedresults:
1. Assert `Content Access Mode: Simple Content Access` is present.
:CaseImportance: Medium
"""
with module_sca_manifest as manifest:
target_sat.put(f'{manifest.path}', f'{manifest.name}')
result = target_sat.execute(f'rct cat-manifest {module_sca_manifest.name}')
assert result.status == 0
assert 'Content Access Mode: Simple Content Access' in result.stdout
@pytest.mark.tier3
def test_negative_unregister_and_pull_content(vm):
"""Attempt to retrieve content after host has been unregistered from Satellite
:id: de0d0d91-b1e1-4f0e-8a41-c27df4d6b6fd
:expectedresults: Host can no longer retrieve content from satellite
:CaseLevel: System
:parametrized: yes
:CaseImportance: Critical
"""
result = vm.run('subscription-manager unregister')
assert result.status == 0
# Try installing any package from available repos on vm
result = vm.run('yum install -y katello-agent')
assert result.status != 0
|
3,307 |
test custom routing prefix
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import time
from mcrouter.test.MCProcess import Memcached
from mcrouter.test.McrouterTestCase import McrouterTestCase
class TestMcrouterRoutingPrefixAscii(McrouterTestCase):
config = './mcrouter/test/routing_prefix_test_ascii.json'
extra_args = []
def setUp(self):
# The order here must corresponds to the order of hosts in the .json
self.allhosts = []
for _ in range(0, 4):
self.allhosts.append(self.add_server(Memcached()))
def get_mcrouter(self):
return self.add_mcrouter(
self.config, '/a/a/', extra_args=self.extra_args)
def test_routing_prefix(self):
mcr = self.get_mcrouter()
nclusters = len(self.allhosts)
# first try setting a key to the local cluster
mcr.set("testkeylocal", "testvalue")
self.assertEqual(self.allhosts[0].get("testkeylocal"), "testvalue")
for i in range(1, nclusters):
self.assertIsNone(self.allhosts[i].get("testkeylocal"))
mcr.set("/*/*/testkey-routing", "testvalue")
# /*/*/ is all-fastest, and some requests might complete asynchronously.
# As a workaround, just wait
time.sleep(1)
local = self.allhosts[0].get("testkey-routing", True)
self.assertEqual(local["value"], "testvalue")
# make sure the key got set as "/*/*/key"
for i in range(1, nclusters):
local = self.allhosts[i].get("/*/*/testkey-routing", True)
self.assertEqual(local["value"], "testvalue")
class TestMcrouterRoutingPrefixCaret(TestMcrouterRoutingPrefixAscii):
config = './mcrouter/test/routing_prefix_test_caret.json'
class TestMcrouterRoutingPrefixOldNaming(TestMcrouterRoutingPrefixAscii):
config = './mcrouter/test/routing_prefix_test_old_naming.json'
class TestMcrouterRoutingPrefixSimpleRoutes(TestMcrouterRoutingPrefixAscii):
config = './mcrouter/test/routing_prefix_test_simple_routes.json'
class TestFallbackRouting(McrouterTestCase):
config = './mcrouter/test/routing_prefix_test_fallback_route.json'
extra_args = []
def setUp(self):
self.aa = self.add_server(Memcached())
self.ab = self.add_server(Memcached())
self.ba = self.add_server(Memcached())
self.bb = self.add_server(Memcached())
def get_mcrouter(self):
return self.add_mcrouter(
self.config, '/a/a/', extra_args=self.extra_args)
def test_fallback_routing(self):
mcr = self.get_mcrouter()
key = "/*/*/key"
orig_value = "orig"
mcr.set(key, orig_value)
time.sleep(1)
self.assertEqual(self.aa.get('key'), orig_value)
self.assertEqual(self.ab.get('key'), orig_value)
self.assertEqual(self.ba.get('key'), orig_value)
self.assertEqual(self.bb.get('key'), orig_value)
key = "/a/foobar/key"
value1 = "value1"
mcr.set(key, value1)
time.sleep(1)
self.assertEqual(self.ab.get('key'), orig_value)
self.assertEqual(self.ba.get('key'), orig_value)
self.assertEqual(self.bb.get('key'), orig_value)
self.assertEqual(self.aa.get('key'), value1)
class TestCustomRoutingPrefixes(McrouterTestCase):
config = './mcrouter/test/routing_prefix_test_custom.json'
extra_args = []
def setUp(self):
self.aa = self.add_server(Memcached())
self.ab = self.add_server(Memcached())
self.ba = self.add_server(Memcached())
self.bb = self.add_server(Memcached())
def get_mcrouter(self):
return self.add_mcrouter(
self.config, '/a/a/', extra_args=self.extra_args)
def METHOD_NAME(self):
mcr = self.get_mcrouter()
key = "/*/a/key"
value = "value"
mcr.set(key, value)
time.sleep(1)
self.assertEqual(self.aa.get('key'), value)
self.assertEqual(self.ba.get('key'), value)
key = "/b*/*/key"
value = "value2"
mcr.set(key, value)
time.sleep(1)
self.assertEqual(self.ba.get('key'), value)
self.assertEqual(self.bb.get('key'), value)
key = "/b/*b*/key"
value = "value3"
mcr.set(key, value)
self.assertEqual(self.bb.get('key'), value)
|
3,308 |
set ttl state
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
DOCUMENTATION = r"""
---
module: dynamodb_ttl
version_added: 1.0.0
short_description: Set TTL for a given DynamoDB table
description:
- Sets the TTL for a given DynamoDB table.
options:
state:
description:
- State to set DynamoDB table to.
choices: ['enable', 'disable']
required: false
type: str
table_name:
description:
- Name of the DynamoDB table to work on.
required: true
type: str
attribute_name:
description:
- The name of the Time To Live attribute used to store the expiration time for items in the table.
- This appears to be required by the API even when disabling TTL.
required: true
type: str
author:
- Ted Timmons (@tedder)
extends_documentation_fragment:
- amazon.aws.common.modules
- amazon.aws.region.modules
- amazon.aws.boto3
"""
EXAMPLES = r"""
- name: enable TTL on my cowfacts table
community.aws.dynamodb_ttl:
state: enable
table_name: cowfacts
attribute_name: cow_deleted_date
- name: disable TTL on my cowfacts table
community.aws.dynamodb_ttl:
state: disable
table_name: cowfacts
attribute_name: cow_deleted_date
"""
RETURN = r"""
current_status:
description: current or new TTL specification.
type: dict
returned: always
sample:
- { "AttributeName": "deploy_timestamp", "TimeToLiveStatus": "ENABLED" }
- { "AttributeName": "deploy_timestamp", "Enabled": true }
"""
try:
import botocore
except ImportError:
pass # Handled by AnsibleAWSModule
from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule
def get_current_ttl_state(c, table_name):
"""Fetch the state dict for a table."""
current_state = c.describe_time_to_live(TableName=table_name)
return current_state.get("TimeToLiveDescription")
def does_state_need_changing(attribute_name, desired_state, current_spec):
"""Run checks to see if the table needs to be modified. Basically a dirty check."""
if not current_spec:
# we don't have an entry (or a table?)
return True
if desired_state.lower() == "enable" and current_spec.get("TimeToLiveStatus") not in ["ENABLING", "ENABLED"]:
return True
if desired_state.lower() == "disable" and current_spec.get("TimeToLiveStatus") not in ["DISABLING", "DISABLED"]:
return True
if attribute_name != current_spec.get("AttributeName"):
return True
return False
def METHOD_NAME(c, table_name, state, attribute_name):
"""Set our specification. Returns the update_time_to_live specification dict,
which is different than the describe_* call."""
is_enabled = False
if state.lower() == "enable":
is_enabled = True
ret = c.update_time_to_live(
TableName=table_name,
TimeToLiveSpecification={
"Enabled": is_enabled,
"AttributeName": attribute_name,
},
)
return ret.get("TimeToLiveSpecification")
def main():
argument_spec = dict(
state=dict(choices=["enable", "disable"]),
table_name=dict(required=True),
attribute_name=dict(required=True),
)
module = AnsibleAWSModule(
argument_spec=argument_spec,
)
try:
dbclient = module.client("dynamodb")
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
module.fail_json_aws(e, msg="Failed to connect to AWS")
result = {"changed": False}
state = module.params["state"]
# wrap all our calls to catch the standard exceptions. We don't pass `module` in to the
# methods so it's easier to do here.
try:
current_state = get_current_ttl_state(dbclient, module.params["table_name"])
if does_state_need_changing(module.params["attribute_name"], module.params["state"], current_state):
# changes needed
new_state = METHOD_NAME(
dbclient, module.params["table_name"], module.params["state"], module.params["attribute_name"]
)
result["current_status"] = new_state
result["changed"] = True
else:
# no changes needed
result["current_status"] = current_state
except botocore.exceptions.ClientError as e:
module.fail_json_aws(e, msg="Failed to get or update ttl state")
except botocore.exceptions.ParamValidationError as e:
module.fail_json_aws(e, msg="Failed due to invalid parameters")
except ValueError as e:
module.fail_json_aws(e, msg="Failed")
module.exit_json(**result)
if __name__ == "__main__":
main()
|
3,309 |
set pids
|
# Copyright (C) 2010-2015 Cuckoo Foundation.
# This file is part of Cuckoo Sandbox - http://www.cuckoosandbox.org
# See the file 'docs/LICENSE' for copying permission.
import glob
import logging
import os
import shutil
from lib.api.process import Process
from lib.common.exceptions import CuckooPackageError
log = logging.getLogger(__name__)
class Package:
"""Base abstract analysis package."""
PATHS = []
def __init__(self, options=None, config=None):
"""@param options: options dict."""
if options is None:
options = {}
self.config = config
self.options = options
self.pids = []
# Fetch the current working directory, defaults to $TEMP.
def METHOD_NAME(self, pids):
"""Update list of monitored PIDs in the package context.
@param pids: list of pids.
"""
self.pids = pids
def start(self):
"""Run analysis package.
@raise NotImplementedError: this method is abstract.
"""
raise NotImplementedError
def check(self):
"""Check."""
return True
def enum_paths(self):
"""Enumerate available paths."""
for path in self.PATHS:
basedir = path[0]
sys32 = len(path) > 1 and path[1].lower() == "system32"
if basedir == "SystemRoot":
if not sys32 or "PE32+" not in self.config.file_type:
yield os.path.join(os.getenv("SystemRoot"), *path[1:])
yield os.path.join(os.getenv("SystemRoot"), "sysnative", *path[2:])
elif basedir == "ProgramFiles":
if os.getenv("ProgramFiles(x86)"):
yield os.path.join(os.getenv("ProgramFiles(x86)"), *path[1:])
yield os.path.join(os.getenv("ProgramFiles").replace(" (x86)", ""), *path[1:])
elif basedir == "HomeDrive":
# os.path.join() does not work well when giving just C:
# instead of C:\\, so we manually add the backslash.
homedrive = f"{os.getenv('HomeDrive')}\\"
yield os.path.join(homedrive, *path[1:])
else:
yield os.path.join(*path)
def get_path(self, application):
"""Search for the application in all available paths.
@param application: application executable name
@return: executable path
"""
for path in self.enum_paths():
if application in path and os.path.isfile(path):
return path
raise CuckooPackageError(f"Unable to find any {application} executable")
def get_path_glob(self, application):
"""Search for the application in all available paths with glob support.
@param application: application executable name
@return: executable path
"""
for path in self.enum_paths():
for path in glob.iglob(path):
if os.path.isfile(path) and (not application or application.lower() in path.lower()):
return path
raise CuckooPackageError(f"Unable to find any {application} executable")
def get_path_app_in_path(self, application):
"""Search for the application in all available paths.
@param application: application executable name
@return: executable path
"""
for path in self.enum_paths():
if os.path.isfile(path) and (not application or application.lower() in path.lower()):
return path
raise CuckooPackageError(f"Unable to find any {application} executable")
def execute(self, path, args, interest):
"""Starts an executable for analysis.
@param path: executable path
@param args: executable arguments
@param interest: file of interest, passed to the cuckoomon config
@return: process pid
"""
free = self.options.get("free", False)
suspended = not free
kernel_analysis = bool(self.options.get("kernel_analysis", False))
p = Process(options=self.options, config=self.config)
if not p.execute(path=path, args=args, suspended=suspended, kernel_analysis=kernel_analysis):
raise CuckooPackageError("Unable to execute the initial process, analysis aborted")
if free:
return None
if not kernel_analysis:
p.inject(interest)
p.resume()
p.close()
return p.pid
def package_files(self):
"""A list of files to upload to host.
The list should be a list of tuples (<path on guest>, <name of file in package_files folder>).
(package_files is a folder that will be created in analysis folder).
"""
return []
def finish(self):
"""Finish run.
If configured, upload memory dumps of
all running processes.
"""
return True
def move_curdir(self, filepath):
"""Move a file to the current working directory so it can be executed
from there.
@param filepath: the file to be moved
@return: the new filepath
"""
if "curdir" in self.options:
self.curdir = os.path.expandvars(self.options["curdir"])
else:
self.curdir = os.getenv("TEMP")
newpath = os.path.join(self.curdir, os.path.basename(filepath))
shutil.move(filepath, newpath)
return newpath
class Auxiliary:
# Setting all Auxiliary to have a default priority of 0
start_priority = 0
stop_priority = 0
def __init__(self, options=None, config=None):
"""@param options: options dict."""
if options is None:
options = {}
self.options = options
self.config = config
def add_pid(self, pid):
pass
def del_pid(self, pid):
pass
|
3,310 |
should exit
|
import logging
import os
from typing import Optional
from jina.importer import ImportExtensions
from jina.serve.runtimes.servers import BaseServer
from jina._docarray import docarray_v2
class WebSocketServer(BaseServer):
"""WebSocket Server implementation"""
def __init__(
self,
ssl_keyfile: Optional[str] = None,
ssl_certfile: Optional[str] = None,
uvicorn_kwargs: Optional[dict] = None,
proxy: Optional[bool] = None,
**kwargs
):
"""Initialize the gateway
:param ssl_keyfile: the path to the key file
:param ssl_certfile: the path to the certificate file
:param uvicorn_kwargs: Dictionary of kwargs arguments that will be passed to Uvicorn server when starting the server
:param proxy: If set, respect the http_proxy and https_proxy environment variables, otherwise, it will unset
these proxy variables before start. gRPC seems to prefer no proxy
:param kwargs: keyword args
"""
super().__init__(**kwargs)
self.ssl_keyfile = ssl_keyfile
self.ssl_certfile = ssl_certfile
self.uvicorn_kwargs = uvicorn_kwargs
if not proxy and os.name != 'nt':
os.unsetenv('http_proxy')
os.unsetenv('https_proxy')
async def setup_server(self):
"""
Setup WebSocket Server
"""
self.logger.debug(f'Setting up Websocket server')
if docarray_v2:
from jina.serve.runtimes.gateway.request_handling import GatewayRequestHandler
if isinstance(self._request_handler, GatewayRequestHandler):
await self._request_handler.streamer._get_endpoints_input_output_models(is_cancel=self.is_cancel)
self._request_handler.streamer._validate_flow_docarray_compatibility()
self.app = self._request_handler._websocket_fastapi_default_app(tracing=self.tracing, tracer_provider=self.tracer_provider)
with ImportExtensions(required=True):
from uvicorn import Config, Server
class UviServer(Server):
"""The uvicorn server."""
async def setup(self, sockets=None):
"""
Setup uvicorn server.
:param sockets: sockets of server.
"""
config = self.config
if not config.loaded:
config.load()
self.lifespan = config.lifespan_class(config)
await self.startup(sockets=sockets)
if self.METHOD_NAME:
return
async def serve(self, **kwargs):
"""
Start the server.
:param kwargs: keyword arguments
"""
await self.main_loop()
if 'CICD_JINA_DISABLE_HEALTHCHECK_LOGS' in os.environ:
class _EndpointFilter(logging.Filter):
def filter(self, record: logging.LogRecord) -> bool:
# NOTE: space is important after `GET /`, else all logs will be disabled.
return record.getMessage().find("GET / ") == -1
# Filter out healthcheck endpoint `GET /`
logging.getLogger("uvicorn.access").addFilter(_EndpointFilter())
uvicorn_kwargs = self.uvicorn_kwargs or {}
if self.ssl_keyfile and 'ssl_keyfile' not in uvicorn_kwargs.keys():
uvicorn_kwargs['ssl_keyfile'] = self.ssl_keyfile
if self.ssl_certfile and 'ssl_certfile' not in uvicorn_kwargs.keys():
uvicorn_kwargs['ssl_certfile'] = self.ssl_certfile
self.server = UviServer(
config=Config(
app=self.app,
host=self.host,
port=self.port,
ws_max_size=1024 * 1024 * 1024,
log_level=os.getenv('JINA_LOG_LEVEL', 'error').lower(),
**uvicorn_kwargs,
)
)
self.logger.debug(f'UviServer server setup')
await self.server.setup()
self.logger.debug(f'Websocket server setup successful')
@property
def _should_exit(self):
"""Property describing if server is ready to exit
:return: boolean indicating if Server ready to exit
"""
return self.server.METHOD_NAME
@property
def METHOD_NAME(self):
"""Property describing if server is ready to exit
:return: boolean indicating if Server ready to exit
"""
return self._should_exit
async def shutdown(self):
"""Free other resources allocated with the server, e.g, gateway object, ..."""
self.logger.debug(f'Shutting down server')
await super().shutdown()
self.server.METHOD_NAME = True
await self.server.shutdown()
self.logger.debug(f'Server shutdown finished')
async def run_server(self):
"""Run WebSocket server forever"""
await self.server.serve()
|
3,311 |
test create relation
|
# Copyright Contributors to the Amundsen project.
# SPDX-License-Identifier: Apache-2.0
import unittest
from unittest.mock import ANY
from databuilder.models.dashboard.dashboard_query import DashboardQuery
from databuilder.models.graph_serializable import (
NODE_KEY, NODE_LABEL, RELATION_END_KEY, RELATION_END_LABEL, RELATION_REVERSE_TYPE, RELATION_START_KEY,
RELATION_START_LABEL, RELATION_TYPE,
)
from databuilder.serializers import (
atlas_serializer, mysql_serializer, neo4_serializer, neptune_serializer,
)
from databuilder.serializers.neptune_serializer import (
METADATA_KEY_PROPERTY_NAME_BULK_LOADER_FORMAT, NEPTUNE_CREATION_TYPE_JOB,
NEPTUNE_CREATION_TYPE_NODE_PROPERTY_NAME_BULK_LOADER_FORMAT,
NEPTUNE_CREATION_TYPE_RELATIONSHIP_PROPERTY_NAME_BULK_LOADER_FORMAT, NEPTUNE_HEADER_ID, NEPTUNE_HEADER_LABEL,
NEPTUNE_LAST_EXTRACTED_AT_RELATIONSHIP_PROPERTY_NAME_BULK_LOADER_FORMAT, NEPTUNE_RELATIONSHIP_HEADER_FROM,
NEPTUNE_RELATIONSHIP_HEADER_TO,
)
class TestDashboardQuery(unittest.TestCase):
def setUp(self) -> None:
self.dashboard_query = DashboardQuery(
dashboard_group_id='dg_id',
dashboard_id='d_id',
query_id='q_id',
query_name='q_name',
url='http://foo.bar/query/baz',
query_text='SELECT * FROM foo.bar'
)
def test_create_nodes(self) -> None:
actual = self.dashboard_query.create_next_node()
actual_serialized = neo4_serializer.serialize_node(actual)
expected = {
'url': 'http://foo.bar/query/baz',
'name': 'q_name',
'id': 'q_id',
'query_text': 'SELECT * FROM foo.bar',
NODE_KEY: '_dashboard://gold.dg_id/d_id/query/q_id',
NODE_LABEL: DashboardQuery.DASHBOARD_QUERY_LABEL
}
self.assertEqual(expected, actual_serialized)
def test_create_nodes_neptune(self) -> None:
actual = self.dashboard_query.create_next_node()
actual_serialized = neptune_serializer.convert_node(actual)
neptune_expected = {
NEPTUNE_HEADER_ID: 'Query:_dashboard://gold.dg_id/d_id/query/q_id',
METADATA_KEY_PROPERTY_NAME_BULK_LOADER_FORMAT: '_dashboard://gold.dg_id/d_id/query/q_id',
NEPTUNE_HEADER_LABEL: DashboardQuery.DASHBOARD_QUERY_LABEL,
NEPTUNE_LAST_EXTRACTED_AT_RELATIONSHIP_PROPERTY_NAME_BULK_LOADER_FORMAT: ANY,
NEPTUNE_CREATION_TYPE_NODE_PROPERTY_NAME_BULK_LOADER_FORMAT: NEPTUNE_CREATION_TYPE_JOB,
'id:String(single)': 'q_id',
'query_text:String(single)': 'SELECT * FROM foo.bar',
'name:String(single)': 'q_name',
'url:String(single)': 'http://foo.bar/query/baz'
}
self.assertEqual(neptune_expected, actual_serialized)
def METHOD_NAME(self) -> None:
actual = self.dashboard_query.create_next_relation()
actual_serialized = neo4_serializer.serialize_relationship(actual)
expected = {
RELATION_END_KEY: '_dashboard://gold.dg_id/d_id/query/q_id',
RELATION_START_LABEL: 'Dashboard',
RELATION_END_LABEL: DashboardQuery.DASHBOARD_QUERY_LABEL,
RELATION_START_KEY: '_dashboard://gold.dg_id/d_id',
RELATION_TYPE: 'HAS_QUERY',
RELATION_REVERSE_TYPE: 'QUERY_OF'
}
self.assertEqual(expected, actual_serialized)
def test_create_relation_neptune(self) -> None:
actual = self.dashboard_query.create_next_relation()
actual_serialized = neptune_serializer.convert_relationship(actual)
neptune_forward_expected = {
NEPTUNE_HEADER_ID: "{label}:{from_vertex_id}_{to_vertex_id}".format(
from_vertex_id='Dashboard:_dashboard://gold.dg_id/d_id',
to_vertex_id='Query:_dashboard://gold.dg_id/d_id/query/q_id',
label='HAS_QUERY'
),
METADATA_KEY_PROPERTY_NAME_BULK_LOADER_FORMAT: "{label}:{from_vertex_id}_{to_vertex_id}".format(
from_vertex_id='Dashboard:_dashboard://gold.dg_id/d_id',
to_vertex_id='Query:_dashboard://gold.dg_id/d_id/query/q_id',
label='HAS_QUERY'
),
NEPTUNE_RELATIONSHIP_HEADER_FROM: 'Dashboard:_dashboard://gold.dg_id/d_id',
NEPTUNE_RELATIONSHIP_HEADER_TO: 'Query:_dashboard://gold.dg_id/d_id/query/q_id',
NEPTUNE_HEADER_LABEL: 'HAS_QUERY',
NEPTUNE_LAST_EXTRACTED_AT_RELATIONSHIP_PROPERTY_NAME_BULK_LOADER_FORMAT: ANY,
NEPTUNE_CREATION_TYPE_RELATIONSHIP_PROPERTY_NAME_BULK_LOADER_FORMAT: NEPTUNE_CREATION_TYPE_JOB
}
neptune_reversed_expected = {
NEPTUNE_HEADER_ID: "{label}:{from_vertex_id}_{to_vertex_id}".format(
from_vertex_id='Query:_dashboard://gold.dg_id/d_id/query/q_id',
to_vertex_id='Dashboard:_dashboard://gold.dg_id/d_id',
label='QUERY_OF'
),
METADATA_KEY_PROPERTY_NAME_BULK_LOADER_FORMAT: "{label}:{from_vertex_id}_{to_vertex_id}".format(
from_vertex_id='Query:_dashboard://gold.dg_id/d_id/query/q_id',
to_vertex_id='Dashboard:_dashboard://gold.dg_id/d_id',
label='QUERY_OF'
),
NEPTUNE_RELATIONSHIP_HEADER_FROM: 'Query:_dashboard://gold.dg_id/d_id/query/q_id',
NEPTUNE_RELATIONSHIP_HEADER_TO: 'Dashboard:_dashboard://gold.dg_id/d_id',
NEPTUNE_HEADER_LABEL: 'QUERY_OF',
NEPTUNE_LAST_EXTRACTED_AT_RELATIONSHIP_PROPERTY_NAME_BULK_LOADER_FORMAT: ANY,
NEPTUNE_CREATION_TYPE_RELATIONSHIP_PROPERTY_NAME_BULK_LOADER_FORMAT: NEPTUNE_CREATION_TYPE_JOB
}
assert actual is not None
self.assertDictEqual(actual_serialized[0], neptune_forward_expected)
self.assertDictEqual(actual_serialized[1], neptune_reversed_expected)
def test_create_records(self) -> None:
actual = self.dashboard_query.create_next_record()
actual_serialized = mysql_serializer.serialize_record(actual)
expected = {
'rk': '_dashboard://gold.dg_id/d_id/query/q_id',
'name': 'q_name',
'id': 'q_id',
'dashboard_rk': '_dashboard://gold.dg_id/d_id',
'url': 'http://foo.bar/query/baz',
'query_text': 'SELECT * FROM foo.bar'
}
assert actual is not None
self.assertDictEqual(expected, actual_serialized)
self.assertIsNone(self.dashboard_query.create_next_record())
def test_create_next_atlas_entity(self) -> None:
actual = self.dashboard_query.create_next_atlas_entity()
actual_serialized = atlas_serializer.serialize_entity(actual)
expected = {
"typeName": "DashboardQuery",
"operation": "CREATE",
"relationships": "dashboard#Dashboard#_dashboard://gold.dg_id/d_id",
"qualifiedName": "_dashboard://gold.dg_id/d_id/query/q_id",
"name": "q_name",
"id": "q_id",
"url": "http://foo.bar/query/baz",
"queryText": "SELECT * FROM foo.bar"
}
assert actual is not None
self.assertDictEqual(expected, actual_serialized)
self.assertIsNone(self.dashboard_query.create_next_atlas_entity())
|
3,312 |
decorator
|
#!/usr/bin/env python
# Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from builtins import next
from builtins import object
import logging
import sys
from desktop.lib.i18n import force_unicode
from beeswax import data_export
from librdbms.server import dbms
from notebook.connectors.base import Api, QueryError, QueryExpired, _get_snippet_name
LOG = logging.getLogger()
def query_error_handler(func):
def METHOD_NAME(*args, **kwargs):
try:
return func(*args, **kwargs)
except Exception as e:
message = force_unicode(e)
if 'Invalid query handle' in message or 'Invalid OperationHandle' in message:
raise QueryExpired(e)
else:
if sys.version_info[0] > 2:
raise QueryError(message).with_traceback(sys.exc_info()[2])
else:
raise QueryError, message, sys.exc_info()[2]
return METHOD_NAME
class RdbmsApi(Api):
def _execute(self, notebook, snippet):
query_server = self._get_query_server()
db = dbms.get(self.user, query_server)
db.use(snippet['database']) # TODO: only do the use on the first statement in a multi query
table = db.execute_statement(snippet['statement']) # TODO: execute statement stub in Rdbms
return table
@query_error_handler
def execute(self, notebook, snippet):
table = self._execute(notebook, snippet)
data = list(table.rows())
has_result_set = data is not None
return {
'sync': True,
'has_result_set': has_result_set,
'modified_row_count': 0,
'result': {
'has_more': False,
'data': data if has_result_set else [],
'meta': [{
'name': col['name'] if type(col) is dict else col,
'type': col.get('type', '') if type(col) is dict else '',
'comment': ''
} for col in table.columns_description] if has_result_set else [],
'type': 'table'
}
}
@query_error_handler
def check_status(self, notebook, snippet):
return {'status': 'expired'}
@query_error_handler
def fetch_result(self, notebook, snippet, rows, start_over):
return {
'has_more': False,
'data': [],
'meta': [],
'type': 'table'
}
@query_error_handler
def fetch_result_metadata(self):
pass
@query_error_handler
def cancel(self, notebook, snippet):
return {'status': 0}
@query_error_handler
def get_log(self, notebook, snippet, startFrom=None, size=None):
return 'No logs'
@query_error_handler
def close_statement(self, notebook, snippet):
return {'status': -1}
@query_error_handler
def autocomplete(self, snippet, database=None, table=None, column=None, nested=None, operation=None):
query_server = self._get_query_server()
db = dbms.get(self.user, query_server)
assist = Assist(db)
response = {'status': -1}
if database is None:
response['databases'] = assist.get_databases()
elif table is None:
tables_meta = []
for t in assist.get_tables(database):
tables_meta.append({'name': t, 'type': 'Table', 'comment': ''})
response['tables_meta'] = tables_meta
elif column is None:
columns = assist.get_columns(database, table)
response['columns'] = [col['name'] for col in columns]
response['extended_columns'] = columns
else:
columns = assist.get_columns(database, table)
response['name'] = next((col['name'] for col in columns if column == col['name']), '')
response['type'] = next((col['type'] for col in columns if column == col['name']), '')
response['status'] = 0
return response
@query_error_handler
def get_sample_data(self, snippet, database=None, table=None, column=None, is_async=False, operation=None):
query_server = self._get_query_server()
db = dbms.get(self.user, query_server)
assist = Assist(db)
response = {'status': -1, 'result': {}}
sample_data = assist.get_sample_data(database, table, column)
if sample_data:
response['status'] = 0
response['headers'] = sample_data.columns
response['full_headers'] = sample_data.columns_description
response['rows'] = list(sample_data.rows())
else:
response['message'] = _('Failed to get sample data.')
return response
@query_error_handler
def get_browse_query(self, snippet, database, table, partition_spec=None):
return "SELECT * FROM `%s`.`%s` LIMIT 1000" % (database, table)
@query_error_handler
def explain(self, notebook, snippet):
query_server = self._get_query_server()
db = dbms.get(self.user, query_server)
db.use(snippet['database'])
result = db.explain(snippet['statement'])
rows = list(result.rows())
cols = result.cols()
# Prettify output
explanation = ""
cols_pretty = [(col + ": ") for col in cols]
col_width = max(len(col) for col in cols_pretty)
for index, col in enumerate(cols_pretty):
lines = []
for row in rows:
lines += str(row[index]).split("\n")
explanation += col.ljust(col_width) + lines[0] + "\n"
for line in lines[1:]:
explanation += (" " * col_width) + line + "\n"
return {
'status': 0,
'explanation': explanation,
'statement': snippet['statement'],
}
def _get_query_server(self):
if self.query_server:
return self.query_server
else:
return dbms.get_query_server_config(server=self.interpreter)
class Assist(object):
def __init__(self, db):
self.db = db
def get_databases(self):
return self.db.get_databases()
def get_tables(self, database, table_names=[]):
self.db.use(database)
return self.db.get_tables(database, table_names)
def get_columns(self, database, table):
return self.db.get_columns(database, table, names_only=False)
def get_sample_data(self, database, table, column=None):
return self.db.get_sample_data(database, table, column)
class FixedResult(object):
def __init__(self, result):
self.result = result
self.has_more = False
def fetch(self, handle=None, start_over=None, rows=None):
return self.result
|
3,313 |
test var collection contains
|
import pytest
from pyaerocom import const
from pyaerocom.data import resources
from pyaerocom.exceptions import VariableDefinitionError
from pyaerocom.varcollection import VarCollection
from pyaerocom.variable import Variable
def test_VARS_is_VarCollection():
assert isinstance(const.VARS, VarCollection)
@pytest.fixture()
def collection() -> VarCollection:
with resources.path("pyaerocom.data", "variables.ini") as path:
assert path.exists()
return VarCollection(str(path))
def test_invalid_entries(collection: VarCollection):
invalid = ("_" in var for var in collection.all_vars)
assert sum(invalid) == 0
@pytest.mark.parametrize(
"var_ini,exception,error",
[
pytest.param(None, ValueError, "Invalid input for var_ini, need str", id="ValueError"),
pytest.param(
"/bla/blub", FileNotFoundError, "File /bla/blub does not exist", id="FileNotFoundError"
),
],
)
def test_VarCollection___init___error(var_ini, exception, error: str):
with pytest.raises(exception) as e:
VarCollection(var_ini)
assert str(e.value) == error
def test_VarCollection_add_var(collection: VarCollection):
var = Variable(var_name="concpm10gt1", units="ug m-3")
collection.add_var(var)
assert var.var_name in collection.all_vars
def test_VarCollection_add_var_error(collection: VarCollection):
var = Variable(var_name="concpm10", units="ug m-3")
with pytest.raises(VariableDefinitionError) as e:
collection.add_var(var)
assert str(e.value) == f"variable with name {var.var_name} is already defined"
def test_VarCollection_delete_var(collection: VarCollection):
var = Variable(var_name="concpm10", units="ug m-3")
collection.delete_variable(var.var_name)
assert var.var_name not in collection.all_vars
def test_VarCollection_delete_var_error(collection: VarCollection):
var = Variable(var_name="concpm10gt1", units="ug m-3")
with pytest.raises(VariableDefinitionError) as e:
collection.delete_variable(var.var_name)
assert str(e.value) == f"No such variable {var.var_name} in VarCollection"
@pytest.mark.parametrize("var_name", ["blablub42", "od550aer"])
def test_VarCollection_get_var(collection: VarCollection, var_name: str):
collection.add_var(Variable(var_name="blablub42"))
assert isinstance(collection.get_var(var_name), Variable)
def test_VarCollection_get_var_error(collection: VarCollection):
var_name = "bla"
with pytest.raises(VariableDefinitionError) as e:
collection.get_var(var_name)
assert str(e.value) == f"Error (VarCollection): input variable {var_name} is not supported"
@pytest.mark.parametrize(
"search_pattern,num",
[
("*blaaaaaaa*", 0),
("dep*", 0),
("od*", 26),
("conc*", 82),
],
)
def test_VarCollection_find(collection: VarCollection, search_pattern: str, num: int):
result = collection.find(search_pattern)
assert len(result) == num
def test_VarCollection_delete_var_MULTIDEF(collection: VarCollection):
var_name = "concpm10"
collection.all_vars.append(var_name)
with pytest.raises(VariableDefinitionError) as e:
collection.delete_variable(var_name)
assert f"found multiple matches for variable {var_name} in VarCollection" in str(e.value)
def test_VarCollection___dir__(collection: VarCollection):
result = dir(collection)
all_vars = collection.all_vars
assert len(result) == len(all_vars)
assert result == sorted(all_vars)
@pytest.mark.parametrize("var_name,found", [("blablub", False), ("od550aer", True)])
def METHOD_NAME(collection: VarCollection, var_name: str, found: bool):
assert (var_name in collection) == found
def test_VarCollection___len__(collection: VarCollection):
assert len(collection) > 0
def test_VarCollection___getitem__(collection: VarCollection):
assert isinstance(collection["od550aer"], Variable)
def test_VarCollection___getitem___error(collection: VarCollection):
var_name = "blaaaa"
assert var_name not in collection
with pytest.raises(VariableDefinitionError) as e:
collection[var_name]
assert str(e.value).endswith(f"input variable {var_name} is not supported")
def test_VarCollection___repr__(collection: VarCollection):
assert repr(collection).startswith("VarCollection")
def test_VarCollection___str__(collection: VarCollection):
assert str(collection).startswith("VarCollection")
|
3,314 |
update unindexed threads
|
"""
4Chan board scraper - indexes threads and queues them for scraping
"""
import json
from backend.lib.scraper import BasicJSONScraper
from common.lib.exceptions import JobAlreadyExistsException
class BoardScraper4chan(BasicJSONScraper):
"""
Scrape 4chan boards
The threads found aren't saved themselves, but new jobs are created to scrape the
individual threads so post data can be saved
"""
type = "fourchan-board"
max_workers = 2 # should probably be equivalent to the amount of boards to scrape
required_fields = ["no", "last_modified"]
position = 0
def process(self, data):
"""
Process scraped board data
For each thread, a record is inserted into the database if it does not exist yet
:param dict data: The board data, parsed JSON data
"""
self.datasource = self.type.split("-")[0]
new_threads = 0
if not data:
self.log.error("No thread data from board scrape of %s/%s/" % (self.datasource, self.job.data["remote_id"]))
return False
index_thread_ids = []
for page in data:
if page.get('threads') is None:
self.log.error(
"No thread data from board scrape of %s/%s/" % (self.datasource, self.job.data["remote_id"]))
return False
for thread in page["threads"]:
self.position += 1
new_threads += self.save_thread(thread)
index_thread_ids.append(thread["id"] if "id" in thread else thread["no"])
self.log.info("Board scrape for %s/%s/ yielded %i new threads" % (self.datasource, self.job.data["remote_id"], new_threads))
# Also update threads that were not yet seen as not archived or closed, but were also not in the index.
# These were either archived or deleted by moderators.
self.METHOD_NAME(index_thread_ids)
def save_thread(self, thread):
"""
Save thread
:param dict thread: Thread data
:return int: Number of new threads created (so 0 or 1)
"""
# 8kun for some reason doesn't always include last_modified
# in that case the timestamp will be 0
if self.datasource == "8kun" and "last_modified" in self.required_fields:
self.required_fields.remove("last_modified")
# check if we have everything we need
missing = set(self.required_fields) - set(thread.keys())
if missing != set():
self.log.warning("Missing fields %s in scraped thread from %s/%s/, ignoring: got %s" % (repr(missing), self.datasource, self.job.data["remote_id"], repr(thread)))
return False
board_id = self.job.data["remote_id"].split("/").pop()
thread_id = thread["id"] if "id" in thread else thread["no"]
thread_data = {
"id": thread_id,
"board": board_id,
"index_positions": ""
}
# schedule a job for scraping the thread's posts
try:
jobtype = self.type.replace("-board", "-thread")
self.queue.add_job(jobtype=jobtype, remote_id=thread["no"], details={"board": board_id})
except JobAlreadyExistsException:
# this might happen if the workers can't keep up with the queue
pass
# add database record for thread, if none exists yet
# 8chan supports cyclical threads which have an ID that is *not* the first post's. The
# following line accounts for this.
thread_row = self.db.fetchone("SELECT * FROM threads_" + self.prefix + " WHERE id = %s AND board = %s", (str(thread_id), board_id))
new_thread = 0
if not thread_row:
new_thread += 1
self.db.insert("threads_" + self.prefix, thread_data)
replacements = [self.init_time, thread.get("last_modified", 0)]
if "fourchan" in self.type:
# update timestamps and position, but only for 4chan
# other chans have different strategies and often have "infinite"
# threads which would rapidly bloat the database with an infinite
# stream of thread positions
position_update = str(self.init_time) + ":" + str(self.position) + ","
positions_bit = ", index_positions = CONCAT(index_positions, %s)"
replacements.append(position_update)
else:
positions_bit = ""
replacements.extend([str(thread_id), board_id])
self.db.execute("UPDATE threads_" + self.prefix + " SET timestamp_scraped = %s, timestamp_modified = %s" + positions_bit + " WHERE id = %s AND board = %s",
replacements)
return new_thread
def METHOD_NAME(self, index_thread_ids):
"""
Add a job for threads that aren't in the index, but are also still marked as active
(i.e. `timestamp_deleted` or `timestamp_archived` is still 0).
:param index_thread_ids, list: List of dicts with threads that were in the index already.
"""
board_id = self.job.data["remote_id"].split("/").pop()
# We're updating checking threads that
# 1) are not in the index
# 2) are not more than an hour old; we already covered older ones in the regular scrape,
# and if not, it's likely that 4CAT wasn't running at the time, so we can't verify
# whether the thread is archived or deleted.
# 3) have 0 as a value for both `timestamp_deleted` and `timestamp_archived`
unindexed_threads = self.db.fetchall("SELECT id FROM threads_" + self.prefix + " WHERE board = %s AND timestamp_deleted = 0 AND timestamp_archived = 0 AND timestamp_modified > (EXTRACT(epoch FROM NOW()) - 3600) AND id NOT IN %s",
(board_id, tuple(index_thread_ids)))
if unindexed_threads:
to_check = 0
for thread in unindexed_threads:
# Schedule a job for scraping the thread's posts,
# which also updates its deleted/archived status
try:
# Add a new thread job if it isn't in the jobs table anymore
jobtype = self.type.replace("-board", "-thread")
query = "SELECT remote_id FROM jobs WHERE remote_id = '%s' AND details = '%s';" % (str(thread["id"]), json.dumps({"board": board_id}))
remote_id = self.db.fetchone(query)
if not remote_id:
self.queue.add_job(jobtype=jobtype, remote_id=str(thread["id"]), details={"board": board_id})
to_check += 1
except JobAlreadyExistsException:
# this might happen if the workers can't keep up with the queue
pass
if to_check:
self.log.info("Board scrape for %s/%s/ yielded %s threads that disappeared from the index, updating their status" % (self.datasource, self.job.data["remote_id"], to_check))
def get_url(self):
"""
Get URL to scrape for the current job
:return string: URL to scrape
"""
board_id = self.job.data["remote_id"].split("/").pop()
return "http://a.4cdn.org/%s/threads.json" % board_i
|
3,315 |
data to dict
|
"""
Parse the Audio SNIPS corpus
Authors:
* Heng-Jui Chang 2022
"""
import logging
from collections import OrderedDict
from pathlib import Path
from typing import Any, Dict, List
from tqdm import trange
from .base import Corpus
__all__ = [
"SNIPS",
]
class SNIPS(Corpus):
def __init__(
self,
dataset_root: str,
train_speakers: List[str],
valid_speakers: List[str],
test_speakers: List[str],
) -> None:
self.dataset_root = Path(dataset_root)
self.train_speakers = train_speakers
self.valid_speakers = valid_speakers
self.test_speakers = test_speakers
self.data_dict = self._collect_data(
self.dataset_root, train_speakers, valid_speakers, test_speakers
)
self.train = self.METHOD_NAME(self.data_dict, ["train"])
self.valid = self.METHOD_NAME(self.data_dict, ["valid"])
self.test = self.METHOD_NAME(self.data_dict, ["test"])
self._data = OrderedDict()
self._data.update(self.train)
self._data.update(self.valid)
self._data.update(self.test)
@property
def all_data(self):
return self._data
@property
def data_split_ids(self):
return (
list(self.train.keys()),
list(self.valid.keys()),
list(self.test.keys()),
)
@staticmethod
def _collect_data(
dataset_root: str,
train_speakers: List[str],
valid_speakers: List[str],
test_speakers: List[str],
) -> Dict[str, Dict[str, Any]]:
# Load transcription
transcripts_file = open(dataset_root / "all.iob.snips.txt").readlines()
transcripts = {}
for line in transcripts_file:
line = line.strip().split(" ")
index = line[0] # {speaker}-snips-{split}-{index}
sent = " ".join(line[1:])
transcripts[index] = sent
# List wave files
data_dict = {}
for split, speaker_list in [
("train", train_speakers),
("valid", valid_speakers),
("test", test_speakers),
]:
wav_list = list((dataset_root / split).rglob("*.wav"))
new_wav_list, name_list, spkr_list = [], [], []
uf = 0
for i in trange(len(wav_list), desc="checking files"):
uid = wav_list[i].stem
if uid in transcripts:
spkr = uid.split("-")[0]
if spkr in speaker_list:
new_wav_list.append(str(wav_list[i]))
name_list.append(uid)
spkr_list.append(spkr)
else:
logging.info(wav_list[i], "Not Found")
uf += 1
logging.info("%d wav file with label not found in text file!" % uf)
wav_list = new_wav_list
logging.info(
f"loaded audio from {len(speaker_list)} speakers {str(speaker_list)} with {len(wav_list)} examples."
)
assert len(wav_list) > 0, "No data found @ {}".format(dataset_root / split)
text_list = [transcripts[name] for name in name_list]
wav_list, name_list, text_list, spkr_list = zip(
*[
(wav, name, text, spkr)
for (wav, name, text, spkr) in sorted(
zip(wav_list, name_list, text_list, spkr_list),
key=lambda x: x[1],
)
]
)
data_dict[split] = {
"name_list": name_list,
"wav_list": wav_list,
"text_list": text_list,
"spkr_list": spkr_list,
}
return data_dict
@staticmethod
def METHOD_NAME(
data_dict: Dict[str, Dict[str, List[Any]]], splits: List[str]
) -> dict:
data = dict(
{
name: {
"wav_path": data_dict[split]["wav_list"][i],
"transcription": " ".join(
data_dict[split]["text_list"][i]
.split("\t")[0]
.strip()
.split(" ")[1:-1]
),
"iob": " ".join(
data_dict[split]["text_list"][i]
.split("\t")[1]
.strip()
.split(" ")[1:-1]
),
"intent": data_dict[split]["text_list"][i]
.split("\t")[1]
.strip()
.split(" ")[-1],
"speaker": data_dict[split]["spkr_list"][i],
"corpus_split": split,
}
for split in splits
for i, name in enumerate(data_dict[split]["name_list"])
}
)
return data
|
3,316 |
flatten
|
from urllib.parse import parse_qs, unquote, urlparse
from braceexpand import braceexpand
import requests
# https://github.com/mozilla/bedrock/blob/master/tests/redirects/base.py
def get_abs_url(url, base_url):
if url.startswith("/"):
# urljoin messes with query strings too much
return "".join([base_url, url])
return url
# https://github.com/mozilla/bedrock/blob/master/tests/redirects/base.py
def url_test(
url,
location=None,
status_code=requests.codes.moved_permanently,
req_headers=None,
req_kwargs=None,
resp_headers=None,
query=None,
follow_redirects=False,
final_status_code=requests.codes.ok,
):
r"""
Function for producing a config dict for the redirect test.
You can use simple bash style brace expansion in the `url` and `location`
values. If you need the `location` to change with the `url` changes you must
use the same number of expansions or the `location` will be treated as non-expandable.
If you use brace expansion this function will return a list of dicts instead of a dict.
You must use the `flatten` function provided to prepare your test fixture if you do this.
If you combine brace expansion with a compiled regular expression pattern you must
escape any backslashes as this is the escape character for brace expansion.
example:
url_test('/about/drivers{/,.html}', 'https://wiki.mozilla.org/Firefox/Drivers'),
url_test('/projects/index.{de,fr,hr,sq}.html', '/{de,fr,hr,sq}/firefox/products/'),
url_test('/firefox/notes/', re.compile(r'\/firefox\/[\d\.]+\/releasenotes\/'),
url_test('/firefox/android/{,beta/}notes/', re.compile(r'\\/firefox\\/android\\/[\\d\\.]+{,beta}\\/releasenotes\\/'
:param url: The URL in question (absolute or relative).
:param location: If a redirect, either the expected value or a compiled regular expression to match the "Location" header.
:param status_code: Expected status code from the request.
:param req_headers: Extra headers to send with the request.
:param req_kwargs: Extra arguments to pass to requests.get()
:param resp_headers: Dict of headers expected in the response.
:param query: Dict of expected query params in `location` URL.
:param follow_redirects: Boolean indicating whether redirects should be followed.
:param final_status_code: Expected status code after following any redirects.
:return: dict or list of dicts
"""
test_data = {
"url": url,
"location": location,
"status_code": status_code,
"req_headers": req_headers,
"req_kwargs": req_kwargs,
"resp_headers": resp_headers,
"query": query,
"follow_redirects": follow_redirects,
"final_status_code": final_status_code,
}
expanded_urls = list(braceexpand(url))
num_urls = len(expanded_urls)
if num_urls == 1:
return test_data
new_urls = []
if location:
expanded_locations = list(braceexpand(test_data["location"]))
num_locations = len(expanded_locations)
for i, expanded_url in enumerate(expanded_urls):
data = test_data.copy()
data["url"] = expanded_url
if location and num_urls == num_locations:
data["location"] = expanded_locations[i]
new_urls.append(data)
return new_urls
def assert_valid_url(
url,
location=None,
status_code=requests.codes.moved_permanently,
req_headers=None,
req_kwargs=None,
resp_headers=None,
query=None,
base_url=None,
follow_redirects=False,
final_status_code=requests.codes.ok,
):
"""
Define a test of a URL's response.
:param url: The URL in question (absolute or relative).
:param location: If a redirect, either the expected value or a compiled regular expression to match the "Location" header.
:param status_code: Expected status code from the request.
:param req_headers: Extra headers to send with the request.
:param req_kwargs: Extra arguments to pass to requests.get()
:param resp_headers: Dict of headers expected in the response.
:param base_url: Base URL for the site to test.
:param query: Dict of expected query params in `location` URL.
:param follow_redirects: Boolean indicating whether redirects should be followed.
:param final_status_code: Expected status code after following any redirects.
"""
kwargs = {"allow_redirects": follow_redirects}
if req_headers:
kwargs["headers"] = req_headers
if req_kwargs:
kwargs.update(req_kwargs)
abs_url = get_abs_url(url, base_url)
resp = requests.get(abs_url, **kwargs)
# so that the value will appear in locals in test output
resp_location = resp.headers.get("location")
if follow_redirects:
assert (
resp.status_code == final_status_code
), f"got {resp.status_code}, expected {final_status_code}"
else:
assert (
resp.status_code == status_code
), f"got {resp.status_code}, expected {status_code}"
if location and not follow_redirects:
if query:
# all query values must be lists
for k, v in query.items():
if isinstance(v, str):
query[k] = [v]
# parse the QS from resp location header and compare to query arg
# since order doesn't matter.
resp_parsed = urlparse(resp_location)
assert query == parse_qs(resp_parsed.query)
# strip off query for further comparison
resp_location = resp_location.split("?")[0]
assert location == unquote(
resp_location
), f"got {unquote(resp_location)}, expected {location}"
if resp_headers and not follow_redirects:
def convert_to_set(header):
return frozenset(d.strip() for d in header.lower().split(","))
for name, value in resp_headers.items():
assert name in resp.headers
assert convert_to_set(resp.headers[name]) == convert_to_set(value)
# https://github.com/mozilla/bedrock/blob/master/tests/redirects/base.py
def METHOD_NAME(urls_list):
"""Take a list of dicts which may itself contain some lists of dicts, and
return a generator that will return just the dicts in sequence.
Example:
list(flatten([{'dude': 'jeff'}, [{'walter': 'walter'}, {'donny': 'dead'}]]))
> [{'dude': 'jeff'}, {'walter': 'walter'}, {'donny': 'dead'}]
"""
for url in urls_list:
if isinstance(url, dict):
yield url
else:
yield from url
|
3,317 |
test get all form definitions grouped by
|
import uuid
from django.test import TestCase
from corehq.apps.app_manager.tests.app_factory import AppFactory
from corehq.apps.reports.analytics.couchaccessors import (
SimpleFormInfo,
get_all_form_definitions_grouped_by_app_and_xmlns,
get_all_form_details,
get_form_details_for_app,
get_form_details_for_app_and_module,
get_form_details_for_app_and_xmlns,
get_form_details_for_xmlns,
update_reports_analytics_indexes,
)
class SetupSimpleAppMixin(object):
@classmethod
def class_setup(cls):
cls.domain = uuid.uuid4().hex
cls.f1_xmlns = 'xmlns1'
cls.f2_xmlns = 'xmlns2'
app_factory = AppFactory(domain=cls.domain)
module1, form1 = app_factory.new_basic_module('m1', '_casetype')
module2, form2 = app_factory.new_basic_module('m2', '_casetype2')
form1.xmlns = cls.f1_xmlns
form2.xmlns = cls.f2_xmlns
app_factory.app.save()
cls.app = app_factory.app
deleted_app_factory = AppFactory(domain=cls.domain)
deleted_module1, deleted_form1 = deleted_app_factory.new_basic_module('del-m1', '_casetype3')
cls.deleted_xmlns = 'xmlns3'
deleted_form1.xmlns = cls.deleted_xmlns
deleted_app_factory.app.doc_type = 'Application-Deleted'
# make sure the ID comes after the primary app
deleted_app_factory.app._id = '{}z'.format(cls.app.id)
deleted_app_factory.app.save()
cls.deleted_app = deleted_app_factory.app
cls.xmlnses = [cls.f1_xmlns, cls.f2_xmlns, cls.deleted_xmlns]
update_reports_analytics_indexes()
def _assert_form_details_match(self, index, details):
expected_app = self.app if index < 2 else self.deleted_app
self.assertEqual(expected_app._id, details.app.id)
self.assertEqual(index % 2, details.module.id)
self.assertEqual(0, details.form.id)
self.assertEqual(self.xmlnses[index], details.xmlns)
self.assertFalse(details.is_user_registration)
class ReportAppAnalyticsTest(SetupSimpleAppMixin, TestCase):
@classmethod
def setUpClass(cls):
super(ReportAppAnalyticsTest, cls).setUpClass()
cls.class_setup()
def METHOD_NAME(self):
self.assertEqual([], get_all_form_definitions_grouped_by_app_and_xmlns('missing'))
def test_get_all_form_definitions_grouped_by_app_and_xmlns(self):
self.assertEqual(
[SimpleFormInfo(self.app._id, self.f1_xmlns),
SimpleFormInfo(self.app._id, self.f2_xmlns),
SimpleFormInfo(self.deleted_app._id, self.deleted_xmlns)],
get_all_form_definitions_grouped_by_app_and_xmlns(self.domain)
)
def test_get_all_form_details_no_data(self):
self.assertEqual([], get_all_form_details('missing'))
def test_get_all_form_details(self):
app_structures = get_all_form_details(self.domain)
self.assertEqual(3, len(app_structures))
for i, details in enumerate(app_structures):
self._assert_form_details_match(i, details)
def test_get_all_form_details_active(self):
details = get_all_form_details(self.domain, deleted=False)
self.assertEqual(2, len(details))
for i, detail in enumerate(details):
self._assert_form_details_match(i, detail)
def test_get_all_form_details_deleted(self):
details = get_all_form_details(self.domain, deleted=True)
self.assertEqual(1, len(details))
self._assert_form_details_match(2, details[0])
def test_get_form_details_for_xmlns_no_data(self):
self.assertEqual([], get_form_details_for_xmlns('missing', 'missing'))
self.assertEqual([], get_form_details_for_xmlns(self.domain, 'missing'))
self.assertEqual([], get_form_details_for_xmlns('missing', self.f1_xmlns))
def test_get_form_details_for_xmlns(self):
[details_1] = get_form_details_for_xmlns(self.domain, self.f1_xmlns)
[details_2] = get_form_details_for_xmlns(self.domain, self.f2_xmlns)
for i, details in enumerate([details_1, details_2]):
self._assert_form_details_match(i, details)
def test_get_form_details_for_app_no_data(self):
self.assertEqual([], get_form_details_for_app('missing', 'missing'))
self.assertEqual([], get_form_details_for_app('missing', self.app.id))
self.assertEqual([], get_form_details_for_app(self.domain, 'missing'))
def test_get_form_details_for_app(self):
details = get_form_details_for_app(self.domain, self.app.id)
for i, detail in enumerate(details):
self._assert_form_details_match(i, detail)
def test_get_form_details_for_app_and_module_no_data(self):
self.assertEqual([], get_form_details_for_app_and_module('missing', self.app.id, 0))
self.assertEqual([], get_form_details_for_app_and_module(self.domain, 'missing', 0))
self.assertEqual([], get_form_details_for_app_and_module(self.domain, self.app.id, 3))
def test_get_form_details_for_app_and_module(self):
for i in range(2):
[details] = get_form_details_for_app_and_module(self.domain, self.app.id, i)
self._assert_form_details_match(i, details)
def test_get_form_details_for_app_and_xmlns_no_data(self):
self.assertEqual([], get_form_details_for_app_and_xmlns('missing', self.app.id, self.f1_xmlns))
self.assertEqual([], get_form_details_for_app_and_xmlns(self.domain, 'missing', self.f1_xmlns))
self.assertEqual([], get_form_details_for_app_and_xmlns(self.domain, self.app.id, 'missing'))
self.assertEqual(
[], get_form_details_for_app_and_xmlns(self.domain, self.app.id, self.f1_xmlns, deleted=True)
)
def test_get_form_details_for_app_and_xmlns(self):
for i in range(2):
[details] = get_form_details_for_app_and_xmlns(self.domain, self.app.id, self.xmlnses[i])
self._assert_form_details_match(i, details)
|
3,318 |
mock time
|
"""retry tests."""
import asyncio
import dataclasses
import datetime
import sys
from typing import Iterator
from unittest import mock
import pytest
from wandb.sdk.lib import retry
if sys.version_info >= (3, 10):
asyncio_run = asyncio.run
else:
def asyncio_run(coro):
return asyncio.new_event_loop().run_until_complete(coro)
@dataclasses.dataclass
class MockTime:
now: datetime.datetime
sleep: mock.Mock
sleep_async: mock.Mock
@pytest.fixture(autouse=True)
def METHOD_NAME() -> Iterator[MockTime]:
"""Mock out the now()/sleep() funcs used by the retry logic."""
now = datetime.datetime.now()
def _sleep(seconds):
nonlocal now
now += datetime.timedelta(seconds=seconds)
async def _sleep_async(seconds):
nonlocal now
now += datetime.timedelta(seconds=seconds)
await asyncio.sleep(1e-9) # let the event loop shuffle stuff around
with mock.patch(
"wandb.sdk.lib.retry.NOW_FN",
wraps=lambda: now,
) as mock_now, mock.patch(
"wandb.sdk.lib.retry.SLEEP_FN", side_effect=_sleep
) as mock_sleep, mock.patch(
"wandb.sdk.lib.retry.SLEEP_ASYNC_FN", side_effect=_sleep_async
) as mock_sleep_async:
yield MockTime(now=mock_now, sleep=mock_sleep, sleep_async=mock_sleep_async)
def test_retry_respects_num_retries():
func = mock.Mock()
func.side_effect = ValueError
num_retries = 7
retrier = retry.Retry(
func,
num_retries=num_retries,
retryable_exceptions=(ValueError,),
)
with pytest.raises(ValueError):
retrier()
assert func.call_count == num_retries + 1
def test_retry_call_num_retries_overrides_default_num_retries():
func = mock.Mock()
func.side_effect = ValueError
retrier = retry.Retry(
func,
retryable_exceptions=(ValueError,),
)
num_retries = 4
with pytest.raises(ValueError):
retrier(num_retries=num_retries)
assert func.call_count == num_retries + 1
def test_retry_respects_num_retries_across_multiple_calls():
func = mock.Mock()
func.side_effect = ValueError
num_retries = 7
retrier = retry.Retry(
func,
num_retries=num_retries,
retryable_exceptions=(ValueError,),
)
with pytest.raises(ValueError):
retrier()
with pytest.raises(ValueError):
retrier()
assert func.call_count == 2 * (num_retries + 1)
def test_retry_respects_retryable_exceptions():
func = mock.Mock()
func.side_effect = ValueError
retrier = retry.Retry(
func,
retryable_exceptions=(ValueError,),
num_retries=3,
)
with pytest.raises(ValueError):
retrier()
assert func.call_count > 1
func.reset_mock()
func.side_effect = IndexError
retrier = retry.Retry(
func,
retryable_exceptions=(ValueError,),
)
with pytest.raises(IndexError):
retrier()
assert func.call_count == 1
def test_retry_respects_secondary_timeout(METHOD_NAME: MockTime):
func = mock.Mock()
func.side_effect = ValueError
t0 = METHOD_NAME.now()
def check_retry_timeout(e):
if isinstance(e, ValueError):
return datetime.timedelta(minutes=10)
retry_timedelta = datetime.timedelta(hours=7)
retrier = retry.Retry(
func,
retryable_exceptions=(ValueError,),
check_retry_fn=check_retry_timeout,
retry_timedelta=retry_timedelta,
num_retries=10000,
)
with pytest.raises(ValueError):
retrier()
# add some slop for other timeout calls, should be about 10 minutes of retries
assert 10 <= (METHOD_NAME.now() - t0).total_seconds() / 60 < 20
class MyError(Exception):
pass
SECOND = datetime.timedelta(seconds=1)
class TestFilteredBackoff:
def test_reraises_exc_failing_predicate(self):
wrapped = mock.Mock(spec=retry.Backoff)
filtered = retry.FilteredBackoff(
filter=lambda e: False,
wrapped=wrapped,
)
with pytest.raises(MyError):
filtered.next_sleep_or_reraise(MyError("don't retry me"))
wrapped.next_sleep_or_reraise.assert_not_called()
def test_delegates_exc_passing_predicate(self):
retriable_exc = MyError("retry me")
wrapped = mock.Mock(
spec=retry.Backoff,
next_sleep_or_reraise=mock.Mock(return_value=123 * SECOND),
)
filtered = retry.FilteredBackoff(
filter=lambda e: e == retriable_exc,
wrapped=wrapped,
)
assert filtered.next_sleep_or_reraise(retriable_exc) == 123 * SECOND
wrapped.next_sleep_or_reraise.assert_called_once_with(retriable_exc)
class TestExponentialBackoff:
def test_respects_max_retries(self):
backoff = retry.ExponentialBackoff(
initial_sleep=SECOND, max_sleep=SECOND, max_retries=3
)
for _ in range(3):
backoff.next_sleep_or_reraise(MyError())
with pytest.raises(MyError):
backoff.next_sleep_or_reraise(MyError())
def test_respects_timeout(self, METHOD_NAME: MockTime):
t0 = METHOD_NAME.now()
dt = 300 * SECOND
backoff = retry.ExponentialBackoff(
initial_sleep=SECOND, max_sleep=10 * dt, timeout_at=t0 + dt
)
with pytest.raises(MyError):
for _ in range(9999):
METHOD_NAME.sleep(
backoff.next_sleep_or_reraise(MyError()).total_seconds()
)
assert t0 + dt <= METHOD_NAME.now() <= t0 + 2 * dt
def test_respects_max_sleep_if_smaller_than_initial_sleep(
self, METHOD_NAME: MockTime
):
max_sleep = 10 * SECOND
backoff = retry.ExponentialBackoff(
initial_sleep=2 * max_sleep, max_sleep=max_sleep
)
assert backoff.next_sleep_or_reraise(MyError()) == max_sleep
class TestRetryAsync:
def test_follows_backoff_schedule(self, METHOD_NAME: MockTime):
fn = mock.Mock(side_effect=MyError("oh no"))
with pytest.raises(MyError):
asyncio_run(
retry.retry_async(
mock.Mock(
spec=retry.Backoff,
next_sleep_or_reraise=mock.Mock(
side_effect=[
1 * SECOND,
2 * SECOND,
MyError(),
]
),
),
fn,
"pos1",
"pos2",
kw1="kw1",
kw2="kw2",
)
)
METHOD_NAME.sleep_async.assert_has_calls(
[
mock.call(1.0),
mock.call(2.0),
]
)
fn.assert_has_calls(
[
mock.call("pos1", "pos2", kw1="kw1", kw2="kw2"),
mock.call("pos1", "pos2", kw1="kw1", kw2="kw2"),
mock.call("pos1", "pos2", kw1="kw1", kw2="kw2"),
]
)
def test_calls_on_exc(self, METHOD_NAME: MockTime):
backoff = mock.Mock(
spec=retry.Backoff,
next_sleep_or_reraise=mock.Mock(return_value=1 * SECOND),
)
excs = [MyError("one"), MyError("two")]
fn_sync = mock.Mock(
side_effect=[
*excs,
lambda: None,
],
)
async def fn():
return fn_sync()
on_exc = mock.Mock()
asyncio_run(retry.retry_async(backoff, fn, on_exc=on_exc))
on_exc.assert_has_calls(
[
mock.call(excs[0]),
mock.call(excs[1]),
]
)
|
3,319 |
set service properties
|
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
from azure.cli.core.profiles import get_sdk, ResourceType
from ._client_factory import generic_data_service_factory
class ServiceProperties:
def __init__(self, cli_ctx, name, service, account_name=None, account_key=None, connection_string=None,
sas_token=None):
self.cli_ctx = cli_ctx
self.name = name
self.client = generic_data_service_factory(cli_ctx, service, name=account_name, key=account_key,
connection_string=connection_string, sas_token=sas_token)
if not self.client:
from knack.util import CLIError
raise CLIError('Failed to initialize data client.')
def get_service_properties(self):
return getattr(self.client, 'get_{}_service_properties'.format(self.name))
def METHOD_NAME(self):
return getattr(self.client, 'set_{}_service_properties'.format(self.name))
def get_logging(self, timeout=None):
return self.get_service_properties()(timeout=timeout).__dict__['logging']
def set_logging(self, read, write, delete, retention, timeout=None, version=None):
t_logging, t_retention_policy = get_sdk(self.cli_ctx, ResourceType.DATA_STORAGE, 'Logging', 'RetentionPolicy',
mod='common.models')
retention_policy = t_retention_policy(enabled=retention != 0, days=retention)
logging = t_logging(delete, read, write, retention_policy)
if version:
logging.version = str(version)
return self.METHOD_NAME()(logging=logging, timeout=timeout)
def disable_logging(self, timeout=None):
return self.set_logging(read=False, write=False, delete=False, retention=0, timeout=timeout)
def get_cors(self, timeout=None):
return self.get_service_properties()(timeout=timeout).__dict__['cors']
def add_cors(self, origins, methods, max_age, exposed_headers=None, allowed_headers=None, timeout=None):
from azure.common import AzureHttpError
t_cors_rule = get_sdk(self.cli_ctx, ResourceType.DATA_STORAGE, 'CorsRule', mod='common.models')
cors = self.get_cors(timeout)
new_rule = t_cors_rule(origins, methods, max_age, exposed_headers, allowed_headers)
cors.append(new_rule)
try:
return self.METHOD_NAME()(cors=cors, timeout=timeout)
except AzureHttpError as ex:
# The service issue: https://msazure.visualstudio.com/DefaultCollection/One/_workitems/edit/1247479.
# This workaround can be removed once the service is updated.
if ex.status_code == 400 and len(cors) > 5:
from knack.util import CLIError
raise CLIError('Failed to add CORS rules. No more than 5 CORS rule can be added.')
raise ex
def clear_cors(self, timeout=None):
return self.METHOD_NAME()(cors=[], timeout=timeout)
def get_metrics(self, interval, timeout=None):
props = self.get_service_properties()(timeout=timeout)
metrics = {}
if interval == 'both':
metrics['hour'] = props.__dict__['hour_metrics']
metrics['minute'] = props.__dict__['minute_metrics']
else:
metrics[interval] = props.__dict__['{}_metrics'.format(interval)]
return metrics
def set_metrics(self, retention, hour, minute, api=None, timeout=None):
t_metrics, t_retention_policy = get_sdk(self.cli_ctx, ResourceType.DATA_STORAGE, 'Metrics', 'RetentionPolicy',
mod='common.models')
retention_policy = t_retention_policy(enabled=retention != 0, days=retention)
hour_metrics = t_metrics(hour, api, retention_policy) if hour is not None else None
minute_metrics = t_metrics(minute, api, retention_policy) if minute is not None else None
return self.METHOD_NAME()(
hour_metrics=hour_metrics, minute_metrics=minute_metrics, timeout=timeout)
|
3,320 |
test flag overrides env var
|
#!/usr/bin/env python
#
# Copyright 2006, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Unit test for Google Test's break-on-failure mode.
A user can ask Google Test to seg-fault when an assertion fails, using
either the GTEST_BREAK_ON_FAILURE environment variable or the
--gtest_break_on_failure flag. This script tests such functionality
by invoking gtest_break_on_failure_unittest_ (a program written with
Google Test) with different environments and command line flags.
"""
__author__ = '[email protected] (Zhanyong Wan)'
import gtest_test_utils
import os
import sys
# Constants.
IS_WINDOWS = os.name == 'nt'
# The environment variable for enabling/disabling the break-on-failure mode.
BREAK_ON_FAILURE_ENV_VAR = 'GTEST_BREAK_ON_FAILURE'
# The command line flag for enabling/disabling the break-on-failure mode.
BREAK_ON_FAILURE_FLAG = 'gtest_break_on_failure'
# The environment variable for enabling/disabling the throw-on-failure mode.
THROW_ON_FAILURE_ENV_VAR = 'GTEST_THROW_ON_FAILURE'
# The environment variable for enabling/disabling the catch-exceptions mode.
CATCH_EXCEPTIONS_ENV_VAR = 'GTEST_CATCH_EXCEPTIONS'
# Path to the gtest_break_on_failure_unittest_ program.
EXE_PATH = gtest_test_utils.GetTestExecutablePath(
'gtest_break_on_failure_unittest_')
environ = gtest_test_utils.environ
SetEnvVar = gtest_test_utils.SetEnvVar
# Tests in this file run a Google-Test-based test program and expect it
# to terminate prematurely. Therefore they are incompatible with
# the premature-exit-file protocol by design. Unset the
# premature-exit filepath to prevent Google Test from creating
# the file.
SetEnvVar(gtest_test_utils.PREMATURE_EXIT_FILE_ENV_VAR, None)
def Run(command):
"""Runs a command; returns 1 if it was killed by a signal, or 0 otherwise."""
p = gtest_test_utils.Subprocess(command, env=environ)
if p.terminated_by_signal:
return 1
else:
return 0
# The tests.
class GTestBreakOnFailureUnitTest(gtest_test_utils.TestCase):
"""Tests using the GTEST_BREAK_ON_FAILURE environment variable or
the --gtest_break_on_failure flag to turn assertion failures into
segmentation faults.
"""
def RunAndVerify(self, env_var_value, flag_value, expect_seg_fault):
"""Runs gtest_break_on_failure_unittest_ and verifies that it does
(or does not) have a seg-fault.
Args:
env_var_value: value of the GTEST_BREAK_ON_FAILURE environment
variable; None if the variable should be unset.
flag_value: value of the --gtest_break_on_failure flag;
None if the flag should not be present.
expect_seg_fault: 1 if the program is expected to generate a seg-fault;
0 otherwise.
"""
SetEnvVar(BREAK_ON_FAILURE_ENV_VAR, env_var_value)
if env_var_value is None:
env_var_value_msg = ' is not set'
else:
env_var_value_msg = '=' + env_var_value
if flag_value is None:
flag = ''
elif flag_value == '0':
flag = '--%s=0' % BREAK_ON_FAILURE_FLAG
else:
flag = '--%s' % BREAK_ON_FAILURE_FLAG
command = [EXE_PATH]
if flag:
command.append(flag)
if expect_seg_fault:
should_or_not = 'should'
else:
should_or_not = 'should not'
has_seg_fault = Run(command)
SetEnvVar(BREAK_ON_FAILURE_ENV_VAR, None)
msg = ('when %s%s, an assertion failure in "%s" %s cause a seg-fault.' %
(BREAK_ON_FAILURE_ENV_VAR, env_var_value_msg, ' '.join(command),
should_or_not))
self.assert_(has_seg_fault == expect_seg_fault, msg)
def testDefaultBehavior(self):
"""Tests the behavior of the default mode."""
self.RunAndVerify(env_var_value=None,
flag_value=None,
expect_seg_fault=0)
def testEnvVar(self):
"""Tests using the GTEST_BREAK_ON_FAILURE environment variable."""
self.RunAndVerify(env_var_value='0',
flag_value=None,
expect_seg_fault=0)
self.RunAndVerify(env_var_value='1',
flag_value=None,
expect_seg_fault=1)
def testFlag(self):
"""Tests using the --gtest_break_on_failure flag."""
self.RunAndVerify(env_var_value=None,
flag_value='0',
expect_seg_fault=0)
self.RunAndVerify(env_var_value=None,
flag_value='1',
expect_seg_fault=1)
def METHOD_NAME(self):
"""Tests that the flag overrides the environment variable."""
self.RunAndVerify(env_var_value='0',
flag_value='0',
expect_seg_fault=0)
self.RunAndVerify(env_var_value='0',
flag_value='1',
expect_seg_fault=1)
self.RunAndVerify(env_var_value='1',
flag_value='0',
expect_seg_fault=0)
self.RunAndVerify(env_var_value='1',
flag_value='1',
expect_seg_fault=1)
def testBreakOnFailureOverridesThrowOnFailure(self):
"""Tests that gtest_break_on_failure overrides gtest_throw_on_failure."""
SetEnvVar(THROW_ON_FAILURE_ENV_VAR, '1')
try:
self.RunAndVerify(env_var_value=None,
flag_value='1',
expect_seg_fault=1)
finally:
SetEnvVar(THROW_ON_FAILURE_ENV_VAR, None)
if IS_WINDOWS:
def testCatchExceptionsDoesNotInterfere(self):
"""Tests that gtest_catch_exceptions doesn't interfere."""
SetEnvVar(CATCH_EXCEPTIONS_ENV_VAR, '1')
try:
self.RunAndVerify(env_var_value='1',
flag_value='1',
expect_seg_fault=1)
finally:
SetEnvVar(CATCH_EXCEPTIONS_ENV_VAR, None)
if __name__ == '__main__':
gtest_test_utils.Main()
|
3,321 |
url set anchor
|
# ContentDB
# Copyright (C) 2018-21 rubenwardy
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
import datetime
import typing
from urllib.parse import urljoin, urlparse, urlunparse
import user_agents
from flask import request, abort, url_for
from flask_babel import LazyString, lazy_gettext
from werkzeug.datastructures import MultiDict
from app import app
def is_safe_url(target):
ref_url = urlparse(request.host_url)
test_url = urlparse(urljoin(request.host_url, target))
return test_url.scheme in ('http', 'https') and \
ref_url.netloc == test_url.netloc
# These are given to Jinja in template_filters.py
def abs_url_for(endpoint: str, **kwargs):
scheme = "https" if app.config["BASE_URL"][:5] == "https" else "http"
return url_for(endpoint, _external=True, _scheme=scheme, **kwargs)
def abs_url(path):
return urljoin(app.config["BASE_URL"], path)
def abs_url_samesite(path):
base = urlparse(app.config["BASE_URL"])
return urlunparse(base._replace(path=path))
def url_current(abs=False):
if request.args is None or request.view_args is None:
return None
args = MultiDict(request.args)
dargs = dict(args.lists())
dargs.update(request.view_args)
if abs:
return abs_url_for(request.endpoint, **dargs)
else:
return url_for(request.endpoint, **dargs)
def url_clear_query():
if request.endpoint is None:
return None
dargs = dict()
if request.view_args:
dargs.update(request.view_args)
return url_for(request.endpoint, **dargs)
def METHOD_NAME(anchor):
args = MultiDict(request.args)
dargs = dict(args.lists())
dargs.update(request.view_args)
return url_for(request.endpoint, **dargs) + "#" + anchor
def url_set_query(**kwargs):
if request.endpoint is None:
return None
args = MultiDict(request.args)
for key, value in kwargs.items():
if key == "_add":
for key2, value_to_add in value.items():
values = set(args.getlist(key2))
values.add(value_to_add)
args.setlist(key2, list(values))
elif key == "_remove":
for key2, value_to_remove in value.items():
values = set(args.getlist(key2))
values.discard(value_to_remove)
args.setlist(key2, list(values))
else:
args.setlist(key, [ value ])
dargs = dict(args.lists())
if request.view_args:
dargs.update(request.view_args)
return url_for(request.endpoint, **dargs)
def get_int_or_abort(v, default=None):
if v is None:
return default
try:
return int(v or default)
except ValueError:
abort(400)
def is_user_bot():
user_agent = request.headers.get('User-Agent')
if user_agent is None:
return True
user_agent = user_agents.parse(user_agent)
return user_agent.is_bot
def get_request_date(key: str) -> typing.Optional[datetime.date]:
val = request.args.get(key)
if val is None:
return None
try:
return datetime.datetime.strptime(val, "%Y-%m-%d").date()
except ValueError:
abort(400)
def get_daterange_options() -> typing.List[typing.Tuple[LazyString, str]]:
now = datetime.datetime.utcnow().date()
days7 = (datetime.datetime.utcnow() - datetime.timedelta(days=7)).date()
days30 = (datetime.datetime.utcnow() - datetime.timedelta(days=30)).date()
days90 = (datetime.datetime.utcnow() - datetime.timedelta(days=90)).date()
year_start = datetime.date(now.year, 1, 1)
last_year_start = datetime.date(now.year - 1, 1, 1)
last_year_end = datetime.date(now.year - 1, 12, 31)
return [
(lazy_gettext("All time"), url_clear_query()),
(lazy_gettext("Last 7 days"), url_set_query(start=days7.isoformat(), end=now.isoformat())),
(lazy_gettext("Last 30 days"), url_set_query(start=days30.isoformat(), end=now.isoformat())),
(lazy_gettext("Last 90 days"), url_set_query(start=days90.isoformat(), end=now.isoformat())),
(lazy_gettext("Year to date"), url_set_query(start=year_start, end=now.isoformat())),
(lazy_gettext("Last year"), url_set_query(start=last_year_start, end=last_year_end)),
]
|
3,322 |
add dict
|
import os
import shutil
from django.core.files import File
from django.utils import timezone
from django.utils.crypto import get_random_string
from ...core.utils import slugify
FILENAME_MAX_LEN = 50
class DataArchive:
def __init__(self, user, working_dir_path):
self.user = user
self.working_dir_path = working_dir_path
self.tmp_dir_path = None
self.data_dir_path = None
self.file_path = None
self.file = None
def __enter__(self):
self.tmp_dir_path = self.create_tmp_dir()
self.data_dir_path = self.create_data_dir()
return self
def __exit__(self, *args):
self.delete_file()
self.delete_tmp_dir()
def create_tmp_dir(self):
tmp_dir_name = get_tmp_filename(self.user)
tmp_dir_path = os.path.join(self.working_dir_path, tmp_dir_name)
os.mkdir(tmp_dir_path)
return tmp_dir_path
def create_data_dir(self):
data_dir_name = get_tmp_filename(self.user)
data_dir_path = os.path.join(self.tmp_dir_path, data_dir_name)
os.mkdir(data_dir_path)
return data_dir_path
def delete_tmp_dir(self):
if self.tmp_dir_path:
shutil.rmtree(self.tmp_dir_path)
self.tmp_dir_path = None
self.data_dir_path = None
def get_file(self):
file_name = get_tmp_filename(self.user)
file_path = os.path.join(self.working_dir_path, file_name)
self.file_path = shutil.make_archive(file_path, "zip", self.tmp_dir_path)
self.file = open(self.file_path, "rb")
return File(self.file)
def delete_file(self):
if self.file:
self.file.close()
self.file = None
if self.file_path:
os.remove(self.file_path)
self.file_path = None
def add_text(self, name, value, date=None, directory=None):
clean_filename = slugify(str(name))
file_dir_path = self.make_final_path(date=date, directory=directory)
file_path = os.path.join(file_dir_path, "%s.txt" % clean_filename)
with open(file_path, "w") as fp:
fp.write(str(value))
return file_path
def METHOD_NAME(self, name, value, date=None, directory=None):
text_lines = []
for key, item in value.items():
text_lines.append("%s: %s" % (key, item))
text = "\n".join(text_lines)
return self.add_text(name, text, date=date, directory=directory)
def add_model_file(self, model_file, prefix=None, date=None, directory=None):
if not model_file:
return None
target_dir_path = self.make_final_path(date=date, directory=directory)
filename = os.path.basename(model_file.name)
if prefix:
prefixed_filename = "%s-%s" % (prefix, filename)
clean_filename = trim_long_filename(prefixed_filename)
target_path = os.path.join(target_dir_path, clean_filename)
else:
clean_filename = trim_long_filename(filename)
target_path = os.path.join(target_dir_path, clean_filename)
with open(target_path, "wb") as fp:
for chunk in model_file.chunks():
fp.write(chunk)
return target_path
def make_final_path(self, date=None, directory=None):
# fixme: os.path.isdir test can be avoided in py37k
if date and directory:
raise ValueError("date and directory arguments are mutually exclusive")
data_dir_path = self.data_dir_path
if date:
final_path = data_dir_path
path_items = [date.strftime("%Y"), date.strftime("%m"), date.strftime("%d")]
for path_item in path_items:
final_path = os.path.join(final_path, str(path_item))
if not os.path.isdir(final_path):
os.mkdir(final_path)
return final_path
if directory:
final_path = os.path.join(data_dir_path, str(directory))
if not os.path.isdir(final_path):
os.mkdir(final_path)
return final_path
return data_dir_path
def get_tmp_filename(user):
filename_bits = [
user.slug,
timezone.now().strftime("%Y%m%d-%H%M%S"),
get_random_string(6),
]
return "-".join(filename_bits)
def trim_long_filename(filename):
# fixme: consider moving this utility to better place?
# eg. to trim too long attachment filenames on upload
if len(filename) < FILENAME_MAX_LEN:
return filename
name, extension = os.path.splitext(filename)
name_len = FILENAME_MAX_LEN - len(extension)
return "%s%s" % (name[:name_len], extension)
|
3,323 |
setup
|
# monet_theming_group.py
#
# Change the look of Adwaita, with ease
# Copyright (C) 2023, Gradience Team
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
from enum import Enum
from gi.repository import Gtk, Adw
from gradience.backend.theming.monet import Monet
from gradience.backend.constants import rootdir
from gradience.frontend.widgets.palette_shades import GradiencePaletteShades
from gradience.backend.logger import Logger
logging = Logger()
@Gtk.Template(resource_path=f"{rootdir}/ui/monet_theming_group.ui")
class GradienceMonetThemingGroup(Adw.PreferencesGroup):
__gtype_name__ = "GradienceMonetThemingGroup"
monet_theming_expander = Gtk.Template.Child("monet-theming-expander")
monet_file_chooser = Gtk.Template.Child("monet-file-chooser")
monet_file_chooser_button = Gtk.Template.Child("file-chooser-button")
def __init__(self, parent, **kwargs):
super().__init__(**kwargs)
self.parent = parent
self.app = self.parent.get_application()
self.monet_image_file = None
self.setup_signals()
self.METHOD_NAME()
def setup_signals(self):
self.monet_file_chooser.connect(
"response", self.on_monet_file_chooser_response)
def METHOD_NAME(self):
self.monet_file_chooser.set_transient_for(self.parent)
self.setup_palette_shades()
#self.setup_tone_row()
self.setup_theme_row()
def setup_palette_shades(self):
self.monet_palette_shades = GradiencePaletteShades(
"monet", _("Monet Palette"), 6
)
self.app.pref_palette_shades["monet"] = self.monet_palette_shades
self.monet_theming_expander.add_row(self.monet_palette_shades)
# TODO: Rethink how it should be implemented
'''def setup_tone_row(self):
self.tone_row = Adw.ComboRow()
self.tone_row.set_title(_("Tone"))
tone_store = Gtk.StringList()
tone_store_values = []
for i in range(20, 80, 5):
tone_store_values.append(str(i))
for v in tone_store_values:
tone_store.append(v)
self.tone_row.set_model(tone_store)
self.monet_theming_expander.add_row(self.tone_row)'''
def setup_theme_row(self):
self.theme_row = Adw.ComboRow()
self.theme_row.set_title(_("Theme"))
theme_store = Gtk.StringList()
theme_store.append(_("Auto"))
theme_store.append(_("Light"))
theme_store.append(_("Dark"))
self.theme_row.set_model(theme_store)
self.monet_theming_expander.add_row(self.theme_row)
@Gtk.Template.Callback()
def on_apply_button_clicked(self, *_args):
if self.monet_image_file:
try:
monet_theme = Monet().generate_palette_from_image(self.monet_image_file)
#tone = self.tone_row.get_selected_item().get_string() # TODO: Remove tone requirement from Monet Engine
variant_pos = self.theme_row.props.selected
class variantEnum(Enum):
AUTO = 0
LIGHT = 1
DARK = 2
def __get_variant_string():
if variant_pos == variantEnum.AUTO.value:
return "auto"
elif variant_pos == variantEnum.DARK.value:
return "dark"
elif variant_pos == variantEnum.LIGHT.value:
return "light"
variant_str = __get_variant_string()
self.app.custom_css_group.reset_buffer()
self.app.update_theme_from_monet(monet_theme, variant_str)
except (OSError, AttributeError, ValueError) as e:
logging.error("Failed to generate Monet palette", exc=e)
self.parent.toast_overlay.add_toast(
Adw.Toast(title=_("Failed to generate Monet palette"))
)
else:
logging.info("Monet palette generated successfully")
self.parent.toast_overlay.add_toast(
Adw.Toast(title=_("Palette generated"))
)
else:
logging.error("Input image for Monet generation not selected")
self.parent.toast_overlay.add_toast(
Adw.Toast(title=_("Select an image first"))
)
@Gtk.Template.Callback()
def on_file_chooser_button_clicked(self, *_args):
self.monet_file_chooser.show()
def on_monet_file_chooser_response(self, widget, response):
if response == Gtk.ResponseType.ACCEPT:
self.monet_image_file = self.monet_file_chooser.get_file()
image_basename = self.monet_image_file.get_basename()
self.monet_file_chooser_button.set_label(image_basename)
self.monet_file_chooser_button.set_tooltip_text(image_basename)
self.monet_file_chooser.hide()
if response == Gtk.ResponseType.ACCEPT:
self.monet_image_file = self.monet_image_file.get_path()
self.on_apply_button_clicked()
|
3,324 |
assert sigmoid classification
|
#!/usr/bin/env python3
import unittest
import torch
from captum._utils.typing import BaselineType, Tensor
from captum.attr._core.integrated_gradients import IntegratedGradients
from captum.attr._core.noise_tunnel import NoiseTunnel
from tests.helpers.basic import assertTensorAlmostEqual, BaseTest
from tests.helpers.classification_models import SigmoidModel, SoftmaxModel
from torch.nn import Module
class Test(BaseTest):
def test_sigmoid_classification_vanilla(self) -> None:
self.METHOD_NAME("vanilla", "riemann_right")
def test_sigmoid_classification_smoothgrad(self) -> None:
self.METHOD_NAME("smoothgrad", "riemann_left")
def test_sigmoid_classification_smoothgrad_sq(self) -> None:
self.METHOD_NAME("smoothgrad_sq", "riemann_middle")
def test_sigmoid_classification_vargrad(self) -> None:
self.METHOD_NAME("vargrad", "riemann_trapezoid")
def test_softmax_classification_vanilla(self) -> None:
self._assert_softmax_classification("vanilla", "gausslegendre")
def test_softmax_classification_smoothgrad(self) -> None:
self._assert_softmax_classification("smoothgrad", "riemann_right")
def test_softmax_classification_smoothgrad_sq(self) -> None:
self._assert_softmax_classification("smoothgrad_sq", "riemann_left")
def test_softmax_classification_vargrad(self) -> None:
self._assert_softmax_classification("vargrad", "riemann_middle")
def test_softmax_classification_vanilla_batch(self) -> None:
self._assert_softmax_classification_batch("vanilla", "riemann_trapezoid")
def test_softmax_classification_smoothgrad_batch(self) -> None:
self._assert_softmax_classification_batch("smoothgrad", "gausslegendre")
def test_softmax_classification_smoothgrad_sq_batch(self) -> None:
self._assert_softmax_classification_batch("smoothgrad_sq", "riemann_right")
def test_softmax_classification_vargrad_batch(self) -> None:
self._assert_softmax_classification_batch("vargrad", "riemann_left")
def METHOD_NAME(
self, type: str = "vanilla", approximation_method: str = "gausslegendre"
) -> None:
num_in = 20
input = torch.arange(0.0, num_in * 1.0, requires_grad=True).unsqueeze(0)
target = torch.tensor(0)
# TODO add test cases for multiple different layers
model = SigmoidModel(num_in, 5, 1)
self._validate_completness(model, input, target, type, approximation_method)
def _assert_softmax_classification(
self, type: str = "vanilla", approximation_method: str = "gausslegendre"
) -> None:
num_in = 40
input = torch.arange(0.0, num_in * 1.0, requires_grad=True).unsqueeze(0)
target = torch.tensor(5)
# 10-class classification model
model = SoftmaxModel(num_in, 20, 10)
self._validate_completness(model, input, target, type, approximation_method)
def _assert_softmax_classification_batch(
self, type: str = "vanilla", approximation_method: str = "gausslegendre"
) -> None:
num_in = 40
input = torch.arange(0.0, num_in * 3.0, requires_grad=True).reshape(3, num_in)
target = torch.tensor([5, 5, 2])
baseline = torch.zeros(1, num_in)
# 10-class classification model
model = SoftmaxModel(num_in, 20, 10)
self._validate_completness(
model, input, target, type, approximation_method, baseline
)
def _validate_completness(
self,
model: Module,
input: Tensor,
target: Tensor,
type: str = "vanilla",
approximation_method: str = "gausslegendre",
baseline: BaselineType = None,
) -> None:
ig = IntegratedGradients(model.forward)
model.zero_grad()
if type == "vanilla":
attributions, delta = ig.attribute(
input,
baselines=baseline,
target=target,
method=approximation_method,
n_steps=200,
return_convergence_delta=True,
)
delta_expected = ig.compute_convergence_delta(
attributions, baseline, input, target
)
assertTensorAlmostEqual(self, delta_expected, delta)
delta_condition = (delta.abs() < 0.005).all()
self.assertTrue(
delta_condition,
"The sum of attribution values {} is not "
"nearly equal to the difference between the endpoint for "
"some samples".format(delta),
)
self.assertEqual([input.shape[0]], list(delta.shape))
else:
nt = NoiseTunnel(ig)
n_samples = 10
attributions, delta = nt.attribute(
input,
baselines=baseline,
nt_type=type,
nt_samples=n_samples,
stdevs=0.0002,
n_steps=100,
target=target,
method=approximation_method,
return_convergence_delta=True,
)
self.assertEqual([input.shape[0] * n_samples], list(delta.shape))
self.assertTrue((delta.abs() < 0.05).all())
self.assertEqual(attributions.shape, input.shape)
if __name__ == "__main__":
unittest.main()
|
3,325 |
add minecraft service servicer to server
|
# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
"""Client and server classes corresponding to protobuf-defined services."""
import grpc
from google.protobuf import empty_pb2 as google_dot_protobuf_dot_empty__pb2
import minecraft_pb2 as minecraft__pb2
class MinecraftServiceStub(object):
"""*
The main service.
"""
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.spawnBlocks = channel.unary_unary(
'/dk.itu.real.ooe.MinecraftService/spawnBlocks',
request_serializer=minecraft__pb2.Blocks.SerializeToString,
response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString,
)
self.readCube = channel.unary_unary(
'/dk.itu.real.ooe.MinecraftService/readCube',
request_serializer=minecraft__pb2.Cube.SerializeToString,
response_deserializer=minecraft__pb2.Blocks.FromString,
)
self.fillCube = channel.unary_unary(
'/dk.itu.real.ooe.MinecraftService/fillCube',
request_serializer=minecraft__pb2.FillCubeRequest.SerializeToString,
response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString,
)
class MinecraftServiceServicer(object):
"""*
The main service.
"""
def spawnBlocks(self, request, context):
"""* Spawn multiple blocks.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def readCube(self, request, context):
"""* Return all blocks in a cube
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def fillCube(self, request, context):
"""* Fill a cube with a block type
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def METHOD_NAME(servicer, server):
rpc_method_handlers = {
'spawnBlocks': grpc.unary_unary_rpc_method_handler(
servicer.spawnBlocks,
request_deserializer=minecraft__pb2.Blocks.FromString,
response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,
),
'readCube': grpc.unary_unary_rpc_method_handler(
servicer.readCube,
request_deserializer=minecraft__pb2.Cube.FromString,
response_serializer=minecraft__pb2.Blocks.SerializeToString,
),
'fillCube': grpc.unary_unary_rpc_method_handler(
servicer.fillCube,
request_deserializer=minecraft__pb2.FillCubeRequest.FromString,
response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'dk.itu.real.ooe.MinecraftService', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
# This class is part of an EXPERIMENTAL API.
class MinecraftService(object):
"""*
The main service.
"""
@staticmethod
def spawnBlocks(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/dk.itu.real.ooe.MinecraftService/spawnBlocks',
minecraft__pb2.Blocks.SerializeToString,
google_dot_protobuf_dot_empty__pb2.Empty.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def readCube(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/dk.itu.real.ooe.MinecraftService/readCube',
minecraft__pb2.Cube.SerializeToString,
minecraft__pb2.Blocks.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def fillCube(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/dk.itu.real.ooe.MinecraftService/fillCube',
minecraft__pb2.FillCubeRequest.SerializeToString,
google_dot_protobuf_dot_empty__pb2.Empty.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
|
3,326 |
test elementwise merge do not erase
|
# coding=utf-8
import json
from bzt.engine import Configuration
from bzt.utils import BetterDict, dehumanize_time, temp_file
from tests.unit import BZTestCase, RESOURCES_DIR, BASE_CONFIG, ROOT_LOGGER, EngineEmul
class TestConfiguration(BZTestCase):
def test_load(self):
obj = Configuration()
configs = [
BASE_CONFIG,
RESOURCES_DIR + "json/jmx.json",
RESOURCES_DIR + "json/concurrency.json"
]
obj.load(configs)
ROOT_LOGGER.debug("config:\n%s", obj)
fname = temp_file()
obj.dump(fname, Configuration.JSON)
with open(fname) as fh:
ROOT_LOGGER.debug("JSON:\n%s", fh.read())
fname = temp_file()
obj.dump(fname, Configuration.YAML)
with open(fname) as fh:
ROOT_LOGGER.debug("YAML:\n%s", fh.read())
def test_merge(self):
obj = Configuration()
configs = [
RESOURCES_DIR + "yaml/test.yml",
RESOURCES_DIR + "json/merge1.json",
RESOURCES_DIR + "json/merge2.json",
]
obj.load(configs)
fname = temp_file()
obj.dump(fname, Configuration.JSON)
with open(fname) as fh:
ROOT_LOGGER.debug("JSON:\n%s", fh.read())
jmeter = obj['modules']['jmeter']
classval = jmeter['class']
self.assertEquals("bzt.modules.jmeter.JMeterExecutor", classval)
self.assertEquals("value", obj['key'])
self.assertEquals(6, len(obj["list-append"]))
self.assertEquals(2, len(obj["list-replace"]))
self.assertEquals(2, len(obj["list-replace-notexistent"]))
self.assertIsInstance(obj["list-complex"][1][0], BetterDict)
self.assertIsInstance(obj["list-complex"][1][0], BetterDict)
self.assertIsInstance(obj["list-complex"][1][0], BetterDict)
self.assertFalse("properties" in jmeter)
fname = temp_file()
obj.dump(fname, Configuration.JSON)
checker = Configuration()
checker.load([fname])
token = checker["list-complex"][1][0]['token']
self.assertNotEquals('test', token)
token_orig = obj["list-complex"][1][0]['token']
self.assertEquals('test', token_orig)
def test_unicode(self):
obj = Configuration()
expected = "Юникод"
obj.merge({
"ustr": expected,
})
ustr = obj.get("ustr", "nope")
self.assertEqual(ustr, expected)
def test_masq_sensitive(self):
obj = Configuration()
obj.merge({
"token": "my-precious",
"my_password": "qweasdzxc",
"secret": "secret",
"secret_story": "story",
})
BetterDict.traverse(obj, Configuration.masq_sensitive)
self.assertEquals(obj["token"], "*" * 8)
self.assertEquals(obj["my_password"], "*" * 8)
self.assertEquals(obj["secret"], "*" * 8)
self.assertEquals(obj["secret_story"], "story")
def test_filtering(self):
obj = Configuration()
obj.merge({
"drop": "me",
"also-drop": {"this": "drop"},
"and-also-drop": ["thelist"],
"but-keep": "value",
"and-also-keep": {
"nested": "value",
"while-dropping": "some"
},
"filter-subitems": {
"keep": "value",
"drop": "some"
}
})
rules = {
"but-keep": True,
"and-also-keep": {"nested": True},
"!filter-subitems": {"drop": True},
}
obj.filter(rules)
expected = {
"but-keep": "value",
"and-also-keep": {"nested": "value"},
"filter-subitems": {"keep": "value"},
}
self.assertEquals(expected, obj)
def test_tabs(self):
obj = Configuration()
obj.tab_replacement_spaces = 4
obj.load([RESOURCES_DIR + "yaml/tabs-issue.yml"])
fname = temp_file()
obj.dump(fname, Configuration.YAML)
self.assertFilesEqual(RESOURCES_DIR + "yaml/tabs-issue-spaces.yml", fname)
def test_merge_removal(self):
obj = Configuration()
obj.merge({
"foo": "bar",
})
obj.merge({
"^foo": "baz",
})
self.assertNotIn("foo", obj)
def test_merge_overwrite(self):
obj = Configuration()
obj.merge({
"foo": {"bar": "baz"},
})
obj.merge({
"~foo": "baz",
})
self.assertEqual(obj["foo"], "baz")
def test_elementwise_merge(self):
obj = Configuration()
obj.merge({
"execution": [{
"executor": "jmeter",
"iterations": 10,
}],
})
obj.merge({
"$execution": [{"iterations": 20}],
})
self.assertEqual(obj["execution"][0]["iterations"], 20)
def METHOD_NAME(self):
obj = Configuration()
obj.merge({
"execution": [{
"executor": "jmeter",
"iterations": 10,
}, {
"executor": "selenium",
"iterations": 30,
}],
})
obj.merge({
"$execution": [{"iterations": 20}],
})
self.assertEqual(obj["execution"][0]["iterations"], 20)
self.assertEqual(obj["execution"][1]["iterations"], 30)
def test_elementwise_merge_right_is_bigger(self):
obj = Configuration()
obj.merge({
"execution": [{
"executor": "jmeter",
"iterations": 10,
}],
})
obj.merge({
"$execution": [{"iterations": 20}, {"iterations": 30}],
})
self.assertEqual(obj["execution"][0]["iterations"], 20)
self.assertEqual(obj["execution"][1]["iterations"], 30)
def test_encode_decode_infinities(self):
engine = EngineEmul()
obj = Configuration()
obj.merge({
"foo": float("inf"),
})
cfg = engine.create_artifact("config", ".json")
obj.dump(cfg, Configuration.JSON)
with open(cfg) as fds:
dump = json.loads(fds.read())
self.assertEqual(dump["foo"], "inf")
self.assertEqual(dehumanize_time(dump["foo"]), float("inf"))
def test_overwrite_execution_locations(self):
obj = Configuration()
obj.merge({
"execution": [{"locations": {"us-central1-a": 1}}],
})
obj.merge({
"$execution": [{"~locations": {"harbor-1": 1}}],
})
ROOT_LOGGER.info(obj)
self.assertEqual(obj, {"execution": [{"locations": {"harbor-1": 1}}]})
|
3,327 |
poplast
|
from _typeshed import SupportsKeysAndGetItem
from binascii import Incomplete
from collections.abc import Generator, ItemsView, Iterable, KeysView, ValuesView
from typing import NoReturn, TypeVar
from typing_extensions import Self, TypeAlias
_KT = TypeVar("_KT")
_VT = TypeVar("_VT")
_T = TypeVar("_T")
class OrderedMultiDict(dict[_KT, _VT]):
def add(self, k: _KT, v: _VT) -> None: ...
def addlist(self, k: _KT, v: Iterable[_VT]) -> None: ...
def clear(self) -> None: ...
def copy(self) -> Self: ...
def counts(self) -> OrderedMultiDict[_KT, _VT]: ...
@classmethod
def fromkeys(cls, keys: _KT, default: _VT | None = None) -> OrderedMultiDict[_KT, _VT]: ... # type: ignore[override]
def get(self, k: _KT, default: _VT | None = None) -> OrderedMultiDict[_KT, _VT]: ... # type: ignore[override]
def getlist(self, k: _KT, default: _VT | None = ...) -> list[object]: ...
def inverted(self) -> OrderedMultiDict[_KT, _VT]: ...
def items(self, multi: bool = False) -> list[tuple[_KT, _VT]]: ... # type: ignore[override]
def iteritems(self, multi: bool = False) -> Generator[tuple[_KT, _VT], None, None]: ...
def iterkeys(self, multi: bool = False) -> Generator[_KT, None, None]: ...
def itervalues(self, multi: bool = False) -> Generator[_VT, None, None]: ...
def keys(self, multi: bool = False) -> list[_KT]: ... # type: ignore[override]
def pop(self, k: _KT, default: _VT | None = ...) -> _VT: ... # type: ignore[override]
def popall(self, k: _KT, default: _VT | None = ...) -> list[_VT]: ...
def METHOD_NAME(self, k: _KT | None = ..., default: _VT | None = ...) -> _VT: ...
def setdefault(self, k: _KT, default: _VT | None = ...) -> _VT: ...
def sorted(self, key: _KT | None = None, reverse: bool = False) -> OrderedMultiDict[_KT, _VT]: ...
def sortedvalues(self, key: _KT | None = None, reverse: bool = False) -> OrderedMultiDict[_KT, _VT]: ...
def todict(self, multi: bool = False) -> dict[_KT, _VT]: ...
def update(self, E: dict[_KT, _VT] | Iterable[object], **F) -> None: ... # type: ignore[override]
def update_extend(self, E: dict[_KT, _VT] | Iterable[object], **F) -> None: ...
def values(self, multi: bool = False) -> list[_VT]: ... # type: ignore[override]
def viewitems(self) -> ItemsView[_KT, _VT]: ...
def viewkeys(self) -> KeysView[_KT]: ...
def viewvalues(self) -> ValuesView[_VT]: ...
OMD: TypeAlias = OrderedMultiDict[_KT, _VT]
MultiDict: TypeAlias = OrderedMultiDict[_KT, _VT]
class FastIterOrderedMultiDict(OrderedMultiDict[_KT, _VT]): # undocumented
def iteritems(self, multi: bool = False) -> Generator[tuple[_KT, _VT], None, None]: ...
def iterkeys(self, multi: bool = False) -> Generator[_KT, None, None]: ...
class OneToOne(dict[_KT, _VT]):
inv: dict[_VT, _KT]
def clear(self) -> None: ...
def copy(self) -> Self: ...
def pop(self, key: _KT, default: _VT | _T = ...) -> _VT | _T: ...
def popitem(self) -> tuple[_KT, _VT]: ...
def setdefault(self, key: _KT, default: _VT | None = None) -> _VT: ...
@classmethod
def unique(cls, *a, **kw) -> Self: ...
def update(self, dict_or_iterable, **kw) -> None: ... # type: ignore[override]
class ManyToMany(dict[_KT, frozenset[_VT]]):
data: dict[_KT, set[_VT]]
inv: dict[_VT, set[_KT]]
# def __contains__(self, key: _KT): ...
def __delitem__(self, key: _KT) -> None: ...
def __eq__(self, other): ...
def __getitem__(self, key: _KT): ...
def __init__(self, items: Iterable[Incomplete] | None = None) -> None: ...
def __iter__(self): ...
def __len__(self): ...
def __setitem__(self, key: _KT, vals: Iterable[_VT]) -> None: ...
def add(self, key: _KT, val: _VT) -> None: ...
def get(self, key: _KT, default: frozenset[_VT] = ...) -> frozenset[_VT]: ... # type: ignore[override]
def iteritems(self) -> Generator[tuple[_KT, _VT], None, None]: ...
def keys(self): ...
def remove(self, key: _KT, val: _VT) -> None: ...
def replace(self, key: _KT, newkey: _KT) -> None: ...
def update(self, iterable: ManyToMany[_KT, _VT] | SupportsKeysAndGetItem[_KT, _VT] | tuple[_KT, _VT]) -> None: ... # type: ignore[override]
def subdict(d: dict[_KT, _VT], keep: Iterable[_KT] | None = None, drop: Iterable[_KT] | None = None) -> dict[_KT, _VT]: ...
class FrozenHashError(TypeError): ... # undocumented
class FrozenDict(dict[_KT, _VT]):
def __copy__(self) -> Self: ...
def clear(self, *a, **kw) -> None: ...
@classmethod
def fromkeys(cls, keys: Iterable[_KT], value: _VT | None = None) -> FrozenDict[_KT, _VT]: ... # type: ignore[override]
def pop(self, *a, **kw) -> NoReturn: ...
def popitem(self, *a, **kw) -> NoReturn: ...
def setdefault(self, *a, **kw) -> NoReturn: ...
def updated(self, *a, **kw) -> Self: ...
|
3,328 |
log
|
import abc
import colorlog
import contextlib
import inspect
import io
import logging
import sys
import traceback
from magma.backend.util import make_relative
from magma.common import Stack
from magma.config import config, EnvConfig
config._register(
log_stream=EnvConfig("MAGMA_LOG_STREAM", "stderr"),
log_level=EnvConfig("MAGMA_LOG_LEVEL", "INFO"),
include_traceback=EnvConfig("MAGMA_INCLUDE_WIRE_TRACEBACK", False, bool),
traceback_limit=EnvConfig("MAGMA_ERROR_TRACEBACK_LIMIT", 5, int),
)
_staged_logs_stack = Stack()
_log_capturer_stack = Stack()
def _make_bold(string):
return f"\033[1m{string}\033[0m"
def _get_source_line(filename, lineno):
with open(filename, "r") as f:
return f.readlines()[lineno - 1]
def _attach_debug_info(msg, debug_info):
file = debug_info.filename
line = debug_info.lineno
line_info = _make_bold(f"{make_relative(file)}:{line}")
msg = f"{line_info}: {msg}"
try:
source = _get_source_line(file, line).rstrip("\n")
source = f">> {source}"
except FileNotFoundError:
source = f"(Could not file file {file})"
msg = f"{msg}\n{source}"
return msg
def _attach_traceback(msg, frame_selector, limit):
"""
Attaches traceback string to @msg and returns new string.
@frame_selector is a function which takes a list of stack frames and selects
one. For example, it could select the frame based on an index, or based on
the function names.
"""
frame = frame_selector(inspect.stack()).frame
with io.StringIO() as io_:
traceback.print_stack(f=frame, limit=limit, file=io_)
tb = io_.getvalue()
msg = f"{msg}\n{tb}"
return msg
def _frame_selector(frames):
return frames[3]
def _get_additional_kwarg(kwargs, key):
try:
value = kwargs.pop(key)
return value
except KeyError:
return None
def get_staged_logs_stack() -> Stack:
global _staged_logs_stack
return _staged_logs_stack
class _MagmaLogger(logging.Logger):
"""
Derivative of logging.Logger class, with two additional keyword args:
* 'debug_info': Tuple of (file_name, line_no). If 'debug_info' is included,
this source-level information is logged along with the message.
* 'include_traceback': If True, a traceback is printed along with the
message.
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._raw = False
@property
def raw(self) -> bool:
return self._raw
@raw.setter
def raw(self, raw: bool):
self._raw = raw
@contextlib.contextmanager
def as_raw(self):
prev_raw = self.raw
self.raw = True
try:
yield self
finally:
self.raw = prev_raw
def METHOD_NAME(self, level, msg, args, **kwargs):
if not self.raw and self._staged_log(level, msg, args, **kwargs):
return
self._raw_log(level, msg, args, **kwargs)
def _staged_log(self, level, msg, args, **kwargs) -> bool:
staged_logs_stack = get_staged_logs_stack()
try:
staged_logs = staged_logs_stack.peek()
except IndexError:
return False
staged_logs.append((self, level, msg, args, kwargs))
return True
def _capture_log(self, level, msg, args, **kwargs):
try:
log_capturer = get_log_capturer()
except IndexError:
return
log_capturer.add_log((level, msg, args, kwargs))
def _raw_log(self, level, msg, args, **kwargs):
debug_info = _get_additional_kwarg(kwargs, "debug_info")
if debug_info:
msg = _attach_debug_info(msg, debug_info)
include_traceback = _get_additional_kwarg(kwargs, "include_traceback")
if include_traceback or config.include_traceback:
msg = _attach_traceback(
msg, _frame_selector, config.traceback_limit)
self._capture_log(level, msg, args, **kwargs)
super().METHOD_NAME(level, msg, args, **kwargs)
# Set logging class to _MagmaLogger to override logging behavior. Also, setup
# root logger parameters.
logging.setLoggerClass(_MagmaLogger)
_log_stream = getattr(sys, config.log_stream)
_root_logger = logging.getLogger("magma")
_handler = colorlog.StreamHandler(_log_stream)
_handler.setFormatter(colorlog.ColoredFormatter(
'%(log_color)s%(levelname)s%(reset)s:%(name)s:%(message)s'))
_root_logger.addHandler(_handler)
_root_logger.setLevel(config.log_level)
def root_logger():
return logging.getLogger("magma")
# NOTE(rsetaluri): For some reason the following code which uses
# contextlib.contextmanager results in the context manager being entered into
# twice. It may be cached somewhere in the pipeline.
#
# @contextlib.contextmanager
# def logging_level(level):
# root = root_logger()
# prev_level = root.level
# root.setLevel(level)
# try:
# yield
# finally:
# root.setLevel(prev_level)
class logging_level:
def __init__(self, level):
self.level = level
self.root = root_logger()
def __enter__(self):
self.prev_level = self.root.level
self.root.setLevel(self.level)
def __exit__(self, *_):
self.root.setLevel(self.prev_level)
def stage_logger():
get_staged_logs_stack().push([])
def _flush(staged_logs):
for logger, level, obj, args, kwargs in staged_logs:
with logger.as_raw():
logger.log(level, obj, *args, **kwargs)
def flush():
staged_logs = get_staged_logs_stack().pop()
_flush(staged_logs)
return staged_logs
def unstage_logger():
return flush()
def flush_all():
staged_logs_stack = get_staged_logs_stack()
while staged_logs_stack:
staged_logs = staged_logs_stack.pop()
_flush(staged_logs)
@contextlib.contextmanager
def staged_logs():
stage_logger()
staged_logs = get_staged_logs_stack().peek()
try:
yield staged_logs
finally:
unstage_logger()
class StagedLogRecord(abc.ABC):
def __init__(self, tpl: str):
self._tpl = tpl
@abc.abstractmethod
def args(self):
raise NotImplementedError()
def __str__(self):
return self._tpl.format(*self.args())
def _get_log_capturer_stack() -> Stack:
global _log_capturer_stack
return _log_capturer_stack
def push_log_capturer(log_capturer):
_get_log_capturer_stack().push(log_capturer)
def pop_log_capturer():
_get_log_capturer_stack().pop()
def get_log_capturer():
return _get_log_capturer_stack().peek()
@contextlib.contextmanager
def capture_logs(log_capturer):
push_log_capturer(log_capturer)
try:
yield
finally:
pop_log_capturer()
|
3,329 |
get mapping data
|
"""
Reference-physical domain mappings.
"""
import numpy as nm
from sfepy.base.base import Struct
from sfepy.discrete.common.extmods.cmapping import CMapping
class PyCMapping(Struct):
"""
Class for storing mapping data. Primary data in numpy arrays.
Data for C functions translated to FMFields and embedded in CMapping.
"""
def __init__(self, bf, det, volume, bfg, normal, dim):
self.bf = bf
self.det = det
self.volume = volume
self.bfg = bfg
self.normal = normal
self.cmap = CMapping(bf, det, volume, bfg, normal, dim)
n_el, n_qp = det.shape[:2]
n_ep = bf.shape[3]
self.n_el = n_el
self.n_qp = n_qp
self.dim = dim
self.n_ep = n_ep
def integrate(self, out, field, mode=0):
dim = field.shape[2]
if mode < 3 or dim == 1:
out[:] = nm.sum(field * self.det, axis=1)[:, None, :, :]
if mode == 1:
out /= self.volume
elif dim == (self.tdim + 1) and self.normal is not None:
out[:] = nm.dot(field, self.normal) * self.det / self.volume
return 0
class PhysicalQPs(Struct):
"""
Physical quadrature points in a region.
"""
def __init__(self, num=0):
Struct.__init__(self, num=num, shape=(0, 0, 0))
self.values = nm.empty(self.shape, dtype=nm.float64)
def get_shape(self, rshape):
"""
Get shape from raveled shape.
"""
n_qp = self.shape[1]
if n_qp > 0:
if rshape[0] == 1:
shape = (0, n_qp) + rshape[1:] # Constant parameter.
elif (rshape[0] // n_qp) * n_qp != rshape[0]:
raise ValueError('incompatible shapes! (n_qp: %d, %s)'
% (n_qp, rshape))
else:
shape = (rshape[0] // n_qp, n_qp) + rshape[1:]
else:
shape = (rshape[0], 0, 0, 0)
return shape
class Mapping(Struct):
"""
Base class for mappings.
"""
@staticmethod
def from_args(region, kind='v'):
"""
Create mapping from reference to physical entities in a given
region, given the integration kind ('v' or 's').
This mapping can be used to compute the physical quadrature
points.
Parameters
----------
region : Region instance
The region defining the entities.
kind : 'v' or 's'
The kind of the entities: 'v' - cells, 's' - facets.
Returns
-------
mapping : FEMapping or IGMapping instance
The requested mapping.
"""
from sfepy.discrete.fem.domain import FEDomain
from sfepy.discrete.iga.domain import IGDomain
if isinstance(region.domain, FEDomain):
from sfepy.discrete.fem.mappings import FEMapping
coors = region.domain.get_mesh_coors()
if kind == 's':
coors = coors[region.vertices]
conn, gel = region.domain.get_conn(ret_gel=True)
if kind == 'v':
cells = region.get_cells()
mapping = FEMapping(coors, conn[cells], gel=gel)
elif kind == 's':
from sfepy.discrete.fem.fe_surface import FESurface
aux = FESurface('aux', region, gel.get_surface_entities(),
conn)
mapping = FEMapping(coors, aux.leconn, gel=gel.surface_facet)
elif isinstance(region.domain, IGDomain):
from sfepy.discrete.iga.mappings import IGMapping
mapping = IGMapping(region.domain, region.cells)
else:
raise ValueError('unknown domain class! (%s)' % type(region.domain))
return mapping
def get_physical_qps(region, integral, map_kind=None):
"""
Get physical quadrature points corresponding to the given region
and integral.
"""
phys_qps = PhysicalQPs()
if map_kind is None:
map_kind = 'v' if region.can_cells else 's'
gmap = Mapping.from_args(region, map_kind)
gel = gmap.get_geometry()
qp_coors, _ = integral.get_qp(gel.name)
qps = gmap.get_physical_qps(qp_coors)
n_el, n_qp = qps.shape[0], qps.shape[1]
phys_qps.num = n_el * n_qp
phys_qps.shape = qps.shape
qps.shape = (phys_qps.num, qps.shape[2])
phys_qps.values = qps
return phys_qps
def METHOD_NAME(name, field, integral, region=None, integration='volume'):
"""
General helper function for accessing reference mapping data.
Get data attribute `name` from reference mapping corresponding to
`field` in `region` in quadrature points of the given `integral` and
`integration` type.
Parameters
----------
name : str
The reference mapping attribute name.
field : Field instance
The field defining the reference mapping.
integral : Integral instance
The integral defining quadrature points.
region : Region instance, optional
If given, use the given region instead of `field` region.
integration : one of ('volume', 'surface', 'surface_extra')
The integration type.
Returns
-------
data : array
The required data merged for all element groups.
Notes
-----
Assumes the same element geometry in all element groups of the field!
"""
data = None
if region is None:
region = field.region
geo, _ = field.get_mapping(region, integral, integration)
data = getattr(geo, name)
return data
def get_jacobian(field, integral, region=None, integration='volume'):
"""
Get the jacobian of reference mapping corresponding to `field`.
Parameters
----------
field : Field instance
The field defining the reference mapping.
integral : Integral instance
The integral defining quadrature points.
region : Region instance, optional
If given, use the given region instead of `field` region.
integration : one of ('volume', 'surface', 'surface_extra')
The integration type.
Returns
-------
jac : array
The jacobian merged for all element groups.
See Also
--------
get_mapping_data
Notes
-----
Assumes the same element geometry in all element groups of the field!
"""
jac = METHOD_NAME('det', field, integral, region=region,
integration=integration)
return jac
def get_normals(field, integral, region):
"""
Get the normals of element faces in `region`.
Parameters
----------
field : Field instance
The field defining the reference mapping.
integral : Integral instance
The integral defining quadrature points.
region : Region instance
The given of the element faces.
Returns
-------
normals : array
The normals merged for all element groups.
See Also
--------
get_mapping_data
Notes
-----
Assumes the same element geometry in all element groups of the field!
"""
normals = METHOD_NAME('normal', field, integral, region=region,
integration='surface')
return normals
|
3,330 |
set active severity
|
"""Contains the NotifyPanel class."""
__all__ = ['NotifyPanel']
class NotifyPanel:
"""NotifyPanel class: this class contains methods for creating
a panel to control direct/panda notify categories."""
def __init__(self, directNotify, tl = None):
"""
NotifyPanel class pops up a control panel to view/set
notify levels for all available DIRECT and PANDA notify categories
"""
# Make sure TK mainloop is running
from direct.showbase.TkGlobal import Pmw
from tkinter import Toplevel, Frame, Label, Radiobutton, IntVar
from tkinter import HORIZONTAL, X, W, NW, BOTH, LEFT, RIGHT
# To get severity levels
from panda3d.core import NSFatal, NSError, NSWarning, NSInfo, NSDebug, NSSpam
if tl is None:
tl = Toplevel()
tl.title('Notify Controls')
tl.geometry('300x400')
# Init active category
self.activeCategory = None
# Create widgets
mainFrame = Frame(tl)
# Paned widget for dividing two halves
framePane = Pmw.PanedWidget(mainFrame,
orient = HORIZONTAL)
categoryFrame = framePane.add('categories', size = 200)
severityFrame = framePane.add('severities', size = 50)
# Category frame
# Assemble PANDA categories
categories = self.getPandaCategoriesAsList()
self.__categories = {}
categoryNames = []
for category in categories:
name = category.getBasename()
self.__categories[name] = category
categoryNames.append(name)
# Assemble DIRECT categories
for name in directNotify.getCategories():
category = directNotify.getCategory(name)
self.__categories[name] = category
categoryNames.append(name)
# Sort resulting list of names
categoryNames.sort()
# Create a listbox
self.categoryList = Pmw.ScrolledListBox(
categoryFrame,
labelpos = NW, label_text = 'Categories:',
label_font=('MSSansSerif', 10, 'bold'),
listbox_takefocus = 1,
items = categoryNames,
selectioncommand = self.setActivePandaCategory)
self.categoryList.pack(expand = 1, fill = BOTH)
# Severity frame
Label(severityFrame, text = 'Severity:',
font=('MSSansSerif', 10, 'bold'),
justify = RIGHT, anchor = W).pack(fill = X, padx = 5)
self.severity = IntVar()
self.severity.set(0)
self.fatalSeverity = Radiobutton(severityFrame, text = 'Fatal',
justify = LEFT, anchor = W,
value = NSFatal,
variable = self.severity,
command = self.METHOD_NAME)
self.fatalSeverity.pack(fill = X)
self.errorSeverity = Radiobutton(severityFrame, text = 'Error',
justify = LEFT, anchor = W,
value = NSError,
variable = self.severity,
command = self.METHOD_NAME)
self.errorSeverity.pack(fill = X)
self.warningSeverity = Radiobutton(severityFrame, text = 'Warning',
justify = LEFT, anchor = W,
value = NSWarning,
variable = self.severity,
command = self.METHOD_NAME)
self.warningSeverity.pack(fill = X)
self.infoSeverity = Radiobutton(severityFrame, text = 'Info',
justify = LEFT, anchor = W,
value = NSInfo,
variable = self.severity,
command = self.METHOD_NAME)
self.infoSeverity.pack(fill = X)
self.debugSeverity = Radiobutton(severityFrame, text = 'Debug',
justify = LEFT, anchor = W,
value = NSDebug,
variable = self.severity,
command = self.METHOD_NAME)
self.debugSeverity.pack(fill = X)
self.spamSeverity = Radiobutton(severityFrame, text = 'Spam',
justify = LEFT, anchor = W,
value = NSSpam,
variable = self.severity,
command = self.METHOD_NAME)
self.spamSeverity.pack(fill = X)
# Pack frames
framePane.pack(expand = 1, fill = BOTH)
mainFrame.pack(expand = 1, fill = BOTH)
# Get listbox
listbox = self.categoryList.component('listbox')
# Bind updates to arrow buttons
listbox.bind('<KeyRelease-Up>', self.setActivePandaCategory)
listbox.bind('<KeyRelease-Down>', self.setActivePandaCategory)
# And grab focus (to allow keyboard navigation)
listbox.focus_set()
# And set active index (so keypresses will start with index 0)
listbox.activate(0)
# Select first item
self.categoryList.select_set(0)
self.setActivePandaCategory()
def _getPandaCategories(self, category):
categories = [category]
for i in range(category.getNumChildren()):
child = category.getChild(i)
categories.append(self._getPandaCategories(child))
return categories
def getPandaCategories(self):
from panda3d.core import Notify
topCategory = Notify.ptr().getTopCategory()
return self._getPandaCategories(topCategory)
def _getPandaCategoriesAsList(self, pc, catList):
for item in pc:
if isinstance(item, list):
self._getPandaCategoriesAsList(item, catList)
else:
catList.append(item)
def getPandaCategoriesAsList(self):
pc = self.getPandaCategories()
pcList = []
self._getPandaCategoriesAsList(pc, pcList)
return pcList[1:]
def setActivePandaCategory(self, event = None):
categoryName = self.categoryList.getcurselection()[0]
self.activeCategory = self.__categories.get(categoryName, None)
if self.activeCategory:
self.severity.set(self.activeCategory.getSeverity())
def METHOD_NAME(self):
if self.activeCategory:
self.activeCategory.setSeverity(self.severity.get())
|
3,331 |
enabled
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import copy
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
__all__ = [
'AliasRoutingConfiguration',
'StateMachineLoggingConfiguration',
'StateMachineTracingConfiguration',
'GetAliasRoutingConfigurationResult',
]
@pulumi.output_type
class AliasRoutingConfiguration(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "stateMachineVersionArn":
suggest = "state_machine_version_arn"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in AliasRoutingConfiguration. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
AliasRoutingConfiguration.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
AliasRoutingConfiguration.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
state_machine_version_arn: str,
weight: int):
"""
:param str state_machine_version_arn: A version of the state machine.
:param int weight: Percentage of traffic routed to the state machine version.
The following arguments are optional:
"""
pulumi.set(__self__, "state_machine_version_arn", state_machine_version_arn)
pulumi.set(__self__, "weight", weight)
@property
@pulumi.getter(name="stateMachineVersionArn")
def state_machine_version_arn(self) -> str:
"""
A version of the state machine.
"""
return pulumi.get(self, "state_machine_version_arn")
@property
@pulumi.getter
def weight(self) -> int:
"""
Percentage of traffic routed to the state machine version.
The following arguments are optional:
"""
return pulumi.get(self, "weight")
@pulumi.output_type
class StateMachineLoggingConfiguration(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "includeExecutionData":
suggest = "include_execution_data"
elif key == "logDestination":
suggest = "log_destination"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in StateMachineLoggingConfiguration. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
StateMachineLoggingConfiguration.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
StateMachineLoggingConfiguration.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
include_execution_data: Optional[bool] = None,
level: Optional[str] = None,
log_destination: Optional[str] = None):
"""
:param bool include_execution_data: Determines whether execution data is included in your log. When set to `false`, data is excluded.
:param str level: Defines which category of execution history events are logged. Valid values: `ALL`, `ERROR`, `FATAL`, `OFF`
:param str log_destination: Amazon Resource Name (ARN) of a CloudWatch log group. Make sure the State Machine has the correct IAM policies for logging. The ARN must end with `:*`
"""
if include_execution_data is not None:
pulumi.set(__self__, "include_execution_data", include_execution_data)
if level is not None:
pulumi.set(__self__, "level", level)
if log_destination is not None:
pulumi.set(__self__, "log_destination", log_destination)
@property
@pulumi.getter(name="includeExecutionData")
def include_execution_data(self) -> Optional[bool]:
"""
Determines whether execution data is included in your log. When set to `false`, data is excluded.
"""
return pulumi.get(self, "include_execution_data")
@property
@pulumi.getter
def level(self) -> Optional[str]:
"""
Defines which category of execution history events are logged. Valid values: `ALL`, `ERROR`, `FATAL`, `OFF`
"""
return pulumi.get(self, "level")
@property
@pulumi.getter(name="logDestination")
def log_destination(self) -> Optional[str]:
"""
Amazon Resource Name (ARN) of a CloudWatch log group. Make sure the State Machine has the correct IAM policies for logging. The ARN must end with `:*`
"""
return pulumi.get(self, "log_destination")
@pulumi.output_type
class StateMachineTracingConfiguration(dict):
def __init__(__self__, *,
METHOD_NAME: Optional[bool] = None):
"""
:param bool enabled: When set to `true`, AWS X-Ray tracing is enabled. Make sure the State Machine has the correct IAM policies for logging. See the [AWS Step Functions Developer Guide](https://docs.aws.amazon.com/step-functions/latest/dg/xray-iam.html) for details.
"""
if METHOD_NAME is not None:
pulumi.set(__self__, "enabled", METHOD_NAME)
@property
@pulumi.getter
def METHOD_NAME(self) -> Optional[bool]:
"""
When set to `true`, AWS X-Ray tracing is enabled. Make sure the State Machine has the correct IAM policies for logging. See the [AWS Step Functions Developer Guide](https://docs.aws.amazon.com/step-functions/latest/dg/xray-iam.html) for details.
"""
return pulumi.get(self, "enabled")
@pulumi.output_type
class GetAliasRoutingConfigurationResult(dict):
def __init__(__self__, *,
state_machine_version_arn: str,
weight: int):
pulumi.set(__self__, "state_machine_version_arn", state_machine_version_arn)
pulumi.set(__self__, "weight", weight)
@property
@pulumi.getter(name="stateMachineVersionArn")
def state_machine_version_arn(self) -> str:
return pulumi.get(self, "state_machine_version_arn")
@property
@pulumi.getter
def weight(self) -> int:
return pulumi.get(self, "weight")
|
3,332 |
test property decorator baseclass doc
|
# Test case for property
# more tests are in test_descr
import sys
import unittest
from test.test_support import run_unittest
class PropertyBase(Exception):
pass
class PropertyGet(PropertyBase):
pass
class PropertySet(PropertyBase):
pass
class PropertyDel(PropertyBase):
pass
class BaseClass(object):
def __init__(self):
self._spam = 5
@property
def spam(self):
"""BaseClass.getter"""
return self._spam
@spam.setter
def spam(self, value):
self._spam = value
@spam.deleter
def spam(self):
del self._spam
class SubClass(BaseClass):
@BaseClass.spam.getter
def spam(self):
"""SubClass.getter"""
raise PropertyGet(self._spam)
@spam.setter
def spam(self, value):
raise PropertySet(self._spam)
@spam.deleter
def spam(self):
raise PropertyDel(self._spam)
class PropertyDocBase(object):
_spam = 1
def _get_spam(self):
return self._spam
spam = property(_get_spam, doc="spam spam spam")
class PropertyDocSub(PropertyDocBase):
@PropertyDocBase.spam.getter
def spam(self):
"""The decorator does not use this doc string"""
return self._spam
class PropertySubNewGetter(BaseClass):
@BaseClass.spam.getter
def spam(self):
"""new docstring"""
return 5
class PropertyNewGetter(object):
@property
def spam(self):
"""original docstring"""
return 1
@spam.getter
def spam(self):
"""new docstring"""
return 8
class PropertyTests(unittest.TestCase):
def test_property_decorator_baseclass(self):
# see #1620
base = BaseClass()
self.assertEqual(base.spam, 5)
self.assertEqual(base._spam, 5)
base.spam = 10
self.assertEqual(base.spam, 10)
self.assertEqual(base._spam, 10)
delattr(base, "spam")
self.assertTrue(not hasattr(base, "spam"))
self.assertTrue(not hasattr(base, "_spam"))
base.spam = 20
self.assertEqual(base.spam, 20)
self.assertEqual(base._spam, 20)
def test_property_decorator_subclass(self):
# see #1620
sub = SubClass()
self.assertRaises(PropertyGet, getattr, sub, "spam")
self.assertRaises(PropertySet, setattr, sub, "spam", None)
self.assertRaises(PropertyDel, delattr, sub, "spam")
@unittest.skipIf(sys.flags.optimize >= 2,
"Docstrings are omitted with -O2 and above")
def test_property_decorator_subclass_doc(self):
sub = SubClass()
self.assertEqual(sub.__class__.spam.__doc__, "SubClass.getter")
@unittest.skipIf(sys.flags.optimize >= 2,
"Docstrings are omitted with -O2 and above")
def METHOD_NAME(self):
base = BaseClass()
self.assertEqual(base.__class__.spam.__doc__, "BaseClass.getter")
def test_property_decorator_doc(self):
base = PropertyDocBase()
sub = PropertyDocSub()
self.assertEqual(base.__class__.spam.__doc__, "spam spam spam")
self.assertEqual(sub.__class__.spam.__doc__, "spam spam spam")
@unittest.skipIf(sys.flags.optimize >= 2,
"Docstrings are omitted with -O2 and above")
def test_property_getter_doc_override(self):
newgettersub = PropertySubNewGetter()
self.assertEqual(newgettersub.spam, 5)
self.assertEqual(newgettersub.__class__.spam.__doc__, "new docstring")
newgetter = PropertyNewGetter()
self.assertEqual(newgetter.spam, 8)
self.assertEqual(newgetter.__class__.spam.__doc__, "new docstring")
# Issue 5890: subclasses of property do not preserve method __doc__ strings
class PropertySub(property):
"""This is a subclass of property"""
class PropertySubSlots(property):
"""This is a subclass of property that defines __slots__"""
__slots__ = ()
class PropertySubclassTests(unittest.TestCase):
def test_slots_docstring_copy_exception(self):
try:
class Foo(object):
@PropertySubSlots
def spam(self):
"""Trying to copy this docstring will raise an exception"""
return 1
#This raises a TypeError in Jython.
except (AttributeError, TypeError):
pass
else:
raise Exception("AttributeError not raised")
@unittest.skipIf(sys.flags.optimize >= 2,
"Docstrings are omitted with -O2 and above")
def test_docstring_copy(self):
class Foo(object):
@PropertySub
def spam(self):
"""spam wrapped in property subclass"""
return 1
self.assertEqual(
Foo.spam.__doc__,
"spam wrapped in property subclass")
@unittest.skipIf(sys.flags.optimize <= 2,
"Docstrings are omitted with -O2 and above")
def test_property_setter_copies_getter_docstring(self):
class Foo(object):
def __init__(self): self._spam = 1
@PropertySub
def spam(self):
"""spam wrapped in property subclass"""
return self._spam
@spam.setter
def spam(self, value):
"""this docstring is ignored"""
self._spam = value
foo = Foo()
self.assertEqual(foo.spam, 1)
foo.spam = 2
self.assertEqual(foo.spam, 2)
self.assertEqual(
Foo.spam.__doc__,
"spam wrapped in property subclass")
class FooSub(Foo):
@Foo.spam.setter
def spam(self, value):
"""another ignored docstring"""
self._spam = 'eggs'
foosub = FooSub()
self.assertEqual(foosub.spam, 1)
foosub.spam = 7
self.assertEqual(foosub.spam, 'eggs')
self.assertEqual(
FooSub.spam.__doc__,
"spam wrapped in property subclass")
@unittest.skipIf(sys.flags.optimize <= 2,
"Docstrings are omitted with -O2 and above")
def test_property_new_getter_new_docstring(self):
class Foo(object):
@PropertySub
def spam(self):
"""a docstring"""
return 1
@spam.getter
def spam(self):
"""a new docstring"""
return 2
self.assertEqual(Foo.spam.__doc__, "a new docstring")
class FooBase(object):
@PropertySub
def spam(self):
"""a docstring"""
return 1
class Foo2(FooBase):
@FooBase.spam.getter
def spam(self):
"""a new docstring"""
return 2
self.assertEqual(Foo.spam.__doc__, "a new docstring")
def test_main():
run_unittest(PropertyTests, PropertySubclassTests)
if __name__ == '__main__':
test_main()
|
3,333 |
extract formats
|
# coding: utf-8
from __future__ import unicode_literals
import json
import re
from .common import InfoExtractor
from ..utils import (
float_or_none,
int_or_none,
merge_dicts,
parse_codecs,
urljoin,
)
class StreamCZIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?(?:stream|televizeseznam)\.cz/[^?#]+/(?P<display_id>[^?#]+)-(?P<id>[0-9]+)'
_TESTS = [{
'url': 'https://www.televizeseznam.cz/video/lajna/buh-57953890',
'md5': '40c41ade1464a390a0b447e333df4239',
'info_dict': {
'id': '57953890',
'ext': 'mp4',
'title': 'Bůh',
'display_id': 'buh',
'description': 'md5:8f5f09b9b7bc67df910486cdd88f7165',
'duration': 1369.6,
'view_count': int,
}
}, {
'url': 'https://www.stream.cz/kdo-to-mluvi/kdo-to-mluvi-velke-odhaleni-prinasi-novy-porad-uz-od-25-srpna-64087937',
'md5': '41fd358000086a1ccdb068c77809b158',
'info_dict': {
'id': '64087937',
'ext': 'mp4',
'title': 'Kdo to mluví? Velké odhalení přináší nový pořad už od 25. srpna',
'display_id': 'kdo-to-mluvi-velke-odhaleni-prinasi-novy-porad-uz-od-25-srpna',
'description': 'md5:97a811000a6460266029d6c1c2ebcd59',
'duration': 50.2,
'view_count': int,
}
}, {
'url': 'https://www.stream.cz/tajemno/znicehonic-jim-skrz-strechu-prolitnul-zahadny-predmet-badatele-vse-objasnili-64147267',
'md5': '3ee4d0be040e8f4a543e67e509d55e3f',
'info_dict': {
'id': '64147267',
'ext': 'mp4',
'title': 'Zničehonic jim skrz střechu prolítnul záhadný předmět. Badatelé vše objasnili',
'display_id': 'znicehonic-jim-skrz-strechu-prolitnul-zahadny-predmet-badatele-vse-objasnili',
'description': 'md5:4b8ada6718d34bb011c4e04ca4bc19bf',
'duration': 442.84,
'view_count': int,
}
}]
def METHOD_NAME(self, spl_url, video):
for ext, pref, streams in (
('ts', -1, video.get('http_stream', {}).get('qualities', {})),
('mp4', 1, video.get('mp4'))):
for format_id, stream in streams.items():
if not stream.get('url'):
continue
yield merge_dicts({
'format_id': '-'.join((format_id, ext)),
'ext': ext,
'source_preference': pref,
'url': urljoin(spl_url, stream['url']),
'tbr': float_or_none(stream.get('bandwidth'), scale=1000),
'duration': float_or_none(stream.get('duration'), scale=1000),
'width': stream.get('resolution', 2 * [0])[0] or None,
'height': stream.get('resolution', 2 * [0])[1] or int_or_none(format_id.replace('p', '')),
}, parse_codecs(stream.get('codec')))
def _real_extract(self, url):
display_id, video_id = re.match(self._VALID_URL, url).groups()
data = self._download_json(
'https://www.televizeseznam.cz/api/graphql', video_id, 'Downloading GraphQL result',
data=json.dumps({
'variables': {'urlName': video_id},
'query': '''
query LoadEpisode($urlName : String){ episode(urlName: $urlName){ ...VideoDetailFragmentOnEpisode } }
fragment VideoDetailFragmentOnEpisode on Episode {
id
spl
urlName
name
perex
duration
views
}'''
}).encode('utf-8'),
headers={'Content-Type': 'application/json;charset=UTF-8'}
)['data']['episode']
spl_url = data['spl'] + 'spl2,3'
metadata = self._download_json(spl_url, video_id, 'Downloading playlist')
if 'Location' in metadata and 'data' not in metadata:
spl_url = metadata['Location']
metadata = self._download_json(spl_url, video_id, 'Downloading redirected playlist')
video = metadata['data']
subtitles = {}
for subs in video.get('subtitles', {}).values():
if not subs.get('language'):
continue
for ext, sub_url in subs.get('urls').items():
subtitles.setdefault(subs['language'], []).append({
'ext': ext,
'url': urljoin(spl_url, sub_url)
})
formats = list(self.METHOD_NAME(spl_url, video))
self._sort_formats(formats)
return {
'id': video_id,
'display_id': display_id,
'title': data.get('name'),
'description': data.get('perex'),
'duration': float_or_none(data.get('duration')),
'view_count': int_or_none(data.get('views')),
'formats': formats,
'subtitles': subtitles,
}
|
3,334 |
cwd
|
import enum
from _typeshed import Incomplete
from typing import Any, NamedTuple
from psutil._common import (
NIC_DUPLEX_FULL as NIC_DUPLEX_FULL,
NIC_DUPLEX_HALF as NIC_DUPLEX_HALF,
NIC_DUPLEX_UNKNOWN as NIC_DUPLEX_UNKNOWN,
AccessDenied as AccessDenied,
NoSuchProcess as NoSuchProcess,
ZombieProcess as ZombieProcess,
isfile_strict as isfile_strict,
parse_environ_block as parse_environ_block,
path_exists_strict as path_exists_strict,
supports_ipv6 as supports_ipv6,
usage_percent as usage_percent,
)
from psutil._compat import PY3 as PY3
__extra__all__: Any
POWER_SUPPLY_PATH: str
HAS_PROC_SMAPS: bool
HAS_PROC_SMAPS_ROLLUP: bool
HAS_PROC_IO_PRIORITY: Any
HAS_CPU_AFFINITY: Any
CLOCK_TICKS: Any
PAGESIZE: Any
BOOT_TIME: Any
LITTLE_ENDIAN: Any
DISK_SECTOR_SIZE: int
AF_LINK: Any
AddressFamily: Any
IOPRIO_CLASS_NONE: int
IOPRIO_CLASS_RT: int
IOPRIO_CLASS_BE: int
IOPRIO_CLASS_IDLE: int
class IOPriority(enum.IntEnum):
IOPRIO_CLASS_NONE: int
IOPRIO_CLASS_RT: int
IOPRIO_CLASS_BE: int
IOPRIO_CLASS_IDLE: int
PROC_STATUSES: Any
TCP_STATUSES: Any
class svmem(NamedTuple):
total: int
available: int
percent: float
used: int
free: int
active: int
inactive: int
buffers: int
cached: int
shared: int
slab: int
class sdiskio(NamedTuple):
read_count: Any
write_count: Any
read_bytes: Any
write_bytes: Any
read_time: Any
write_time: Any
read_merged_count: Any
write_merged_count: Any
busy_time: Any
class popenfile(NamedTuple):
path: Any
fd: Any
position: Any
mode: Any
flags: Any
class pmem(NamedTuple):
rss: Any
vms: Any
shared: Any
text: Any
lib: Any
data: Any
dirty: Any
class pfullmem(NamedTuple):
rss: Incomplete
vms: Incomplete
shared: Incomplete
text: Incomplete
lib: Incomplete
data: Incomplete
dirty: Incomplete
uss: Incomplete
pss: Incomplete
swap: Incomplete
class pmmap_grouped(NamedTuple):
path: Any
rss: Any
size: Any
pss: Any
shared_clean: Any
shared_dirty: Any
private_clean: Any
private_dirty: Any
referenced: Any
anonymous: Any
swap: Any
pmmap_ext: Any
class pio(NamedTuple):
read_count: Any
write_count: Any
read_bytes: Any
write_bytes: Any
read_chars: Any
write_chars: Any
class pcputimes(NamedTuple):
user: Any
system: Any
children_user: Any
children_system: Any
iowait: Any
def readlink(path): ...
def file_flags_to_mode(flags): ...
def is_storage_device(name): ...
def set_scputimes_ntuple(procfs_path) -> None: ...
scputimes: Any
prlimit: Any
def calculate_avail_vmem(mems): ...
def virtual_memory() -> svmem: ...
def swap_memory(): ...
def cpu_times(): ...
def per_cpu_times(): ...
def cpu_count_logical(): ...
def cpu_count_cores() -> int | None: ...
def cpu_stats(): ...
def cpu_freq(): ...
net_if_addrs: Any
class _Ipv6UnsupportedError(Exception): ...
class Connections:
tmap: Any
def __init__(self) -> None: ...
def get_proc_inodes(self, pid): ...
def get_all_inodes(self): ...
@staticmethod
def decode_address(addr, family): ...
@staticmethod
def process_inet(file, family, type_, inodes, filter_pid: Incomplete | None = ...) -> None: ...
@staticmethod
def process_unix(file, family, inodes, filter_pid: Incomplete | None = ...) -> None: ...
def retrieve(self, kind, pid: Incomplete | None = ...): ...
def net_connections(kind: str = ...): ...
def net_io_counters(): ...
def net_if_stats(): ...
disk_usage: Any
def disk_io_counters(perdisk: bool = ...): ...
class RootFsDeviceFinder:
major: Incomplete
minor: Incomplete
def __init__(self) -> None: ...
def ask_proc_partitions(self): ...
def ask_sys_dev_block(self): ...
def ask_sys_class_block(self): ...
def find(self): ...
def disk_partitions(all: bool = ...): ...
def sensors_temperatures(): ...
def sensors_fans(): ...
def sensors_battery(): ...
def users(): ...
def boot_time(): ...
def pids(): ...
def pid_exists(pid): ...
def ppid_map(): ...
def wrap_exceptions(fun): ...
class Process:
pid: Any
def __init__(self, pid) -> None: ...
def oneshot_enter(self) -> None: ...
def oneshot_exit(self) -> None: ...
def name(self): ...
def exe(self): ...
def cmdline(self): ...
def environ(self): ...
def terminal(self): ...
def io_counters(self) -> pio: ...
def cpu_times(self): ...
def cpu_num(self): ...
def wait(self, timeout: Incomplete | None = ...): ...
def create_time(self): ...
def memory_info(self): ...
def memory_full_info(self): ...
def memory_maps(self): ...
def METHOD_NAME(self): ...
def num_ctx_switches(self, _ctxsw_re=...): ...
def num_threads(self, _num_threads_re=...): ...
def threads(self): ...
def nice_get(self): ...
def nice_set(self, value): ...
def cpu_affinity_get(self): ...
def cpu_affinity_set(self, cpus) -> None: ...
def ionice_get(self): ...
def ionice_set(self, ioclass, value): ...
def rlimit(self, resource_, limits: Incomplete | None = ...): ...
def status(self): ...
def open_files(self): ...
def connections(self, kind: str = ...): ...
def num_fds(self): ...
def ppid(self): ...
def uids(self, _uids_re=...): ...
def gids(self, _gids_re=...): ...
|
3,335 |
toggle
|
# Copyright: Ankitects Pty Ltd and contributors
# License: GNU AGPL, version 3 or later; http://www.gnu.org/licenses/agpl.html
from __future__ import annotations
import json
from typing import Callable
import aqt
from anki.cards import Card, CardId
from anki.lang import without_unicode_isolation
from aqt.qt import *
from aqt.utils import (
addCloseShortcut,
disable_help_button,
qconnect,
restoreGeom,
saveGeom,
setWindowIcon,
tr,
)
from aqt.webview import AnkiWebView, AnkiWebViewKind
class CardInfoDialog(QDialog):
TITLE = "browser card info"
GEOMETRY_KEY = "revlog"
silentlyClose = True
def __init__(
self,
parent: QWidget | None,
mw: aqt.AnkiQt,
card: Card | None,
on_close: Callable | None = None,
geometry_key: str | None = None,
window_title: str | None = None,
) -> None:
super().__init__(parent)
self.mw = mw
self._on_close = on_close
self.GEOMETRY_KEY = geometry_key or self.GEOMETRY_KEY
if window_title:
self.setWindowTitle(window_title)
self._setup_ui(card.id if card else None)
self.show()
def _setup_ui(self, card_id: CardId | None) -> None:
self.mw.garbage_collect_on_dialog_finish(self)
disable_help_button(self)
restoreGeom(self, self.GEOMETRY_KEY, default_size=(800, 800))
addCloseShortcut(self)
setWindowIcon(self)
self.web = AnkiWebView(kind=AnkiWebViewKind.BROWSER_CARD_INFO)
self.web.setVisible(False)
self.web.load_ts_page("card-info")
layout = QVBoxLayout()
layout.setContentsMargins(0, 0, 0, 0)
layout.addWidget(self.web)
buttons = QDialogButtonBox(QDialogButtonBox.StandardButton.Close)
buttons.setContentsMargins(10, 0, 10, 10)
layout.addWidget(buttons)
qconnect(buttons.rejected, self.reject)
self.setLayout(layout)
self.web.eval("anki.cardInfoPromise = anki.setupCardInfo(document.body);")
self.update_card(card_id)
def update_card(self, card_id: CardId | None) -> None:
self.web.eval(
f"anki.cardInfoPromise.then((c) => c.updateStats({json.dumps(card_id)}));"
)
def reject(self) -> None:
if self._on_close:
self._on_close()
self.web.cleanup()
self.web = None
saveGeom(self, self.GEOMETRY_KEY)
return QDialog.reject(self)
class CardInfoManager:
"""Wrapper class to conveniently toggle, update and close a card info dialog."""
def __init__(self, mw: aqt.AnkiQt, geometry_key: str, window_title: str):
self.mw = mw
self.geometry_key = geometry_key
self.window_title = window_title
self._card: Card | None = None
self._dialog: CardInfoDialog | None = None
def METHOD_NAME(self) -> None:
if self._dialog:
self._dialog.reject()
else:
self._dialog = CardInfoDialog(
None,
self.mw,
self._card,
self._on_close,
self.geometry_key,
self.window_title,
)
def set_card(self, card: Card | None) -> None:
self._card = card
if self._dialog:
self._dialog.update_card(card.id if card else None)
def close(self) -> None:
if self._dialog:
self.METHOD_NAME()
def _on_close(self) -> None:
self._dialog = None
class BrowserCardInfo(CardInfoManager):
def __init__(self, mw: aqt.AnkiQt):
super().__init__(
mw,
"revlog",
without_unicode_isolation(
tr.card_stats_current_card(context=tr.qt_misc_browse())
),
)
class ReviewerCardInfo(CardInfoManager):
def __init__(self, mw: aqt.AnkiQt):
super().__init__(
mw,
"reviewerCardInfo",
without_unicode_isolation(
tr.card_stats_current_card(context=tr.decks_study())
),
)
class PreviousReviewerCardInfo(CardInfoManager):
def __init__(self, mw: aqt.AnkiQt):
super().__init__(
mw,
"previousReviewerCardInfo",
without_unicode_isolation(
tr.card_stats_previous_card(context=tr.decks_study())
),
)
|
3,336 |
job retry data
|
import json
import os
import re
from datetime import datetime
from pathlib import Path
from time import sleep
from typing import Any
import gitlab
import psycopg2
import yaml
from kubernetes import client, config
from kubernetes.client.exceptions import ApiException
from kubernetes.client.models.v1_pod import V1Pod
from kubernetes.client.models.v1_pod_status import V1PodStatus
from opensearch_dsl import Date, Document, connections
config.load_config()
v1_client = client.CoreV1Api()
class JobPayload(Document):
timestamp = Date()
class Index:
name = "gitlab-job-failures-*"
def save(self, **kwargs):
# assign now if no timestamp given
if not self.timestamp:
self.timestamp = datetime.now()
# override the index to go to the proper timeslot
kwargs["index"] = self.timestamp.strftime("gitlab-job-failures-%Y%m%d")
return super().save(**kwargs)
GITLAB_TOKEN = os.environ["GITLAB_TOKEN"]
GITLAB_POSTGRES_DB = os.environ["GITLAB_POSTGRES_DB"]
GITLAB_POSTGRES_USER = os.environ["GITLAB_POSTGRES_RO_USER"]
GITLAB_POSTGRES_PASSWORD = os.environ["GITLAB_POSTGRES_RO_PASSWORD"]
GITLAB_POSTGRES_HOST = os.environ["GITLAB_POSTGRES_HOST"]
OPENSEARCH_ENDPOINT = os.environ["OPENSEARCH_ENDPOINT"]
OPENSEARCH_USERNAME = os.environ["OPENSEARCH_USERNAME"]
OPENSEARCH_PASSWORD = os.environ["OPENSEARCH_PASSWORD"]
# Instantiate gitlab api wrapper
gl = gitlab.Gitlab("https://gitlab.spack.io", GITLAB_TOKEN)
# Instantiate postgres connection
pg_conn = psycopg2.connect(
host=GITLAB_POSTGRES_HOST,
port="5432",
dbname=GITLAB_POSTGRES_DB,
user=GITLAB_POSTGRES_USER,
password=GITLAB_POSTGRES_PASSWORD,
)
def METHOD_NAME(job_id: str | int, job_name: str) -> tuple[int, bool]:
with pg_conn:
cur = pg_conn.cursor()
cur.execute(
"""
SELECT attempt_number, COALESCE(retried, FALSE) as retried FROM (
SELECT ROW_NUMBER() OVER (ORDER BY id) as attempt_number, retried, id
FROM ci_builds
WHERE
ci_builds.name = %(job_name)s
and ci_builds.stage_id = (
SELECT stage_id from ci_builds WHERE id = %(job_id)s LIMIT 1
)
and ci_builds.status = 'failed'
) as build_attempts
WHERE build_attempts.id = %(job_id)s
;
""",
{"job_id": job_id, "job_name": job_name},
)
result = cur.fetchone()
cur.close()
return result
def assign_error_taxonomy(job_input_data: dict[str, Any], job_trace: str):
# Read taxonomy file
with open(Path(__file__).parent / "taxonomy.yaml") as f:
taxonomy = yaml.safe_load(f)["taxonomy"]
job_input_data["error_taxonomy_version"] = taxonomy["version"]
# Compile matching patterns from job trace
matching_patterns = set()
for error_class, lookups in taxonomy["error_classes"].items():
if lookups:
for grep_expr in lookups.get("grep_for", []):
if re.compile(grep_expr).search(job_trace):
matching_patterns.add(error_class)
# If the job logs matched any regexes, assign it the taxonomy
# with the highest priority in the "deconflict order".
# Otherwise, assign it a taxonomy of "other".
job_error_class = None
if len(matching_patterns):
for error_class in taxonomy["deconflict_order"]:
if error_class in matching_patterns:
job_error_class = error_class
break
else:
job_error_class = "other"
# If this job timed out or failed to be scheduled by GitLab,
# label it as such.
if job_input_data["build_failure_reason"] in (
"stuck_or_timeout_failure",
"scheduler_failure",
):
job_error_class = job_input_data["build_failure_reason"]
job_input_data["error_taxonomy"] = job_error_class
return
def collect_pod_status(job_input_data: dict[str, Any], job_trace: str):
"""Collect k8s info about this job and store it in the OpenSearch record"""
# Record whether this job was run on a kubernetes pod or via some other
# means (a UO runner, for example)
job_input_data["kubernetes_job"] = "Using Kubernetes executor" in job_trace
# If this job wasn't run on kubernetes, there's no pod to fetch so
# we can exit early
if not job_input_data["kubernetes_job"]:
return
# Scan job logs to infer the name of the pod this job was executed on
runner_name_matches = re.findall(
rf"Running on (.+) via {job_input_data['runner']['description']}...",
job_trace,
)
if not len(runner_name_matches):
job_input_data["pod_status"] = None
return
pod_name = runner_name_matches[0]
pod: V1Pod | None = None
while True:
# Try to fetch pod with kube
try:
pod = v1_client.read_namespaced_pod(name=pod_name, namespace="pipeline")
except ApiException:
# If it doesn't work, that means the pod has already been cleaned up.
# In that case, we break out of the loop and return.
break
# Check if the pod is still running. If so, keep re-fetching it until it's complete
status: V1PodStatus = pod.status
if status.phase != "Running":
break
sleep(1)
if pod:
job_input_data["pod_status"] = pod.status.to_dict()
def main():
# Read input data and extract params
job_input_data = json.loads(os.environ["JOB_INPUT_DATA"])
job_id = job_input_data["build_id"]
job_name = job_input_data["build_name"]
# Annotate if job has been retried
attempt_number, retried = METHOD_NAME(job_id=job_id, job_name=job_name)
job_input_data["attempt_number"] = attempt_number
job_input_data["retried"] = retried
# Convert all string timestamps in webhook payload to `datetime` objects
for key, val in job_input_data.items():
try:
if isinstance(val, str):
job_input_data[key] = datetime.strptime(val, "%Y-%m-%d %H:%M:%S %Z")
except ValueError:
continue
# Retrieve project and job from gitlab API
project = gl.projects.get(job_input_data["project_id"])
job = project.jobs.get(job_input_data["build_id"])
job_trace: str = job.trace().decode() # type: ignore
# Get info about the k8s pod this job ran on
collect_pod_status(job_input_data, job_trace)
# Assign any/all relevant errors
assign_error_taxonomy(job_input_data, job_trace)
# Upload to OpenSearch
connections.create_connection(
hosts=[OPENSEARCH_ENDPOINT],
http_auth=(
OPENSEARCH_USERNAME,
OPENSEARCH_PASSWORD,
),
)
doc = JobPayload(**job_input_data)
doc.save()
if __name__ == "__main__":
main()
|
3,337 |
main
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
DOCUMENTATION = r"""
---
module: elasticache_subnet_group
version_added: 1.0.0
short_description: manage ElastiCache subnet groups
description:
- Creates, modifies, and deletes ElastiCache subnet groups.
options:
state:
description:
- Specifies whether the subnet should be present or absent.
choices: [ 'present' , 'absent' ]
default: 'present'
type: str
name:
description:
- Database subnet group identifier.
- This value is automatically converted to lowercase.
required: true
type: str
description:
description:
- ElastiCache subnet group description.
- When not provided defaults to I(name) on subnet group creation.
type: str
subnets:
description:
- List of subnet IDs that make up the ElastiCache subnet group.
- At least one subnet must be provided when creating an ElastiCache subnet group.
type: list
elements: str
author:
- "Tim Mahoney (@timmahoney)"
extends_documentation_fragment:
- amazon.aws.common.modules
- amazon.aws.region.modules
- amazon.aws.boto3
"""
EXAMPLES = r"""
- name: Add or change a subnet group
community.aws.elasticache_subnet_group:
state: present
name: norwegian-blue
description: My Fancy Ex Parrot Subnet Group
subnets:
- subnet-aaaaaaaa
- subnet-bbbbbbbb
- name: Remove a subnet group
community.aws.elasticache_subnet_group:
state: absent
name: norwegian-blue
"""
RETURN = r"""
cache_subnet_group:
description: Description of the Elasticache Subnet Group.
returned: always
type: dict
contains:
arn:
description: The Amazon Resource Name (ARN) of the cache subnet group.
returned: when the subnet group exists
type: str
sample: arn:aws:elasticache:us-east-1:123456789012:subnetgroup:norwegian-blue
description:
description: The description of the cache subnet group.
returned: when the cache subnet group exists
type: str
sample: My Fancy Ex Parrot Subnet Group
name:
description: The name of the cache subnet group.
returned: when the cache subnet group exists
type: str
sample: norwegian-blue
vpc_id:
description: The VPC ID of the cache subnet group.
returned: when the cache subnet group exists
type: str
sample: norwegian-blue
subnet_ids:
description: The IDs of the subnets beloging to the cache subnet group.
returned: when the cache subnet group exists
type: list
elements: str
sample:
- subnet-aaaaaaaa
- subnet-bbbbbbbb
"""
try:
import botocore
except ImportError:
pass # Handled by AnsibleAWSModule
from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict
from ansible_collections.amazon.aws.plugins.module_utils.botocore import is_boto3_error_code
from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry
from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule
def get_subnet_group(name):
try:
groups = client.describe_cache_subnet_groups(
aws_retry=True,
CacheSubnetGroupName=name,
)["CacheSubnetGroups"]
except is_boto3_error_code("CacheSubnetGroupNotFoundFault"):
return None
except (
botocore.exceptions.ClientError,
botocore.exceptions.BotoCoreError,
) as e: # pylint: disable=duplicate-except
module.fail_json_aws(e, msg="Failed to describe subnet group")
if not groups:
return None
if len(groups) > 1:
module.fail_aws(
msg="Found multiple matches for subnet group",
cache_subnet_groups=camel_dict_to_snake_dict(groups),
)
subnet_group = camel_dict_to_snake_dict(groups[0])
subnet_group["name"] = subnet_group["cache_subnet_group_name"]
subnet_group["description"] = subnet_group["cache_subnet_group_description"]
subnet_ids = list(s["subnet_identifier"] for s in subnet_group["subnets"])
subnet_group["subnet_ids"] = subnet_ids
return subnet_group
def create_subnet_group(name, description, subnets):
if not subnets:
module.fail_json(msg="At least one subnet must be provided when creating a subnet group")
if module.check_mode:
return True
try:
if not description:
description = name
client.create_cache_subnet_group(
aws_retry=True,
CacheSubnetGroupName=name,
CacheSubnetGroupDescription=description,
SubnetIds=subnets,
)
return True
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
module.fail_json_aws(e, msg="Failed to create subnet group")
def update_subnet_group(subnet_group, name, description, subnets):
update_params = dict()
if description and subnet_group["description"] != description:
update_params["CacheSubnetGroupDescription"] = description
if subnets:
old_subnets = set(subnet_group["subnet_ids"])
new_subnets = set(subnets)
if old_subnets != new_subnets:
update_params["SubnetIds"] = list(subnets)
if not update_params:
return False
if module.check_mode:
return True
try:
client.modify_cache_subnet_group(
aws_retry=True,
CacheSubnetGroupName=name,
**update_params,
)
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
module.fail_json_aws(e, msg="Failed to update subnet group")
return True
def delete_subnet_group(name):
if module.check_mode:
return True
try:
client.delete_cache_subnet_group(
aws_retry=True,
CacheSubnetGroupName=name,
)
return True
except is_boto3_error_code("CacheSubnetGroupNotFoundFault"):
# AWS is "eventually consistent", cope with the race conditions where
# deletion hadn't completed when we ran describe
return False
except (
botocore.exceptions.ClientError,
botocore.exceptions.BotoCoreError,
) as e: # pylint: disable=duplicate-except
module.fail_json_aws(e, msg="Failed to delete subnet group")
def METHOD_NAME():
argument_spec = dict(
state=dict(default="present", choices=["present", "absent"]),
name=dict(required=True),
description=dict(required=False),
subnets=dict(required=False, type="list", elements="str"),
)
global module
global client
module = AnsibleAWSModule(
argument_spec=argument_spec,
supports_check_mode=True,
)
state = module.params.get("state")
name = module.params.get("name").lower()
description = module.params.get("description")
subnets = module.params.get("subnets")
client = module.client("elasticache", retry_decorator=AWSRetry.jittered_backoff())
subnet_group = get_subnet_group(name)
changed = False
if state == "present":
if not subnet_group:
result = create_subnet_group(name, description, subnets)
changed |= result
else:
result = update_subnet_group(subnet_group, name, description, subnets)
changed |= result
subnet_group = get_subnet_group(name)
else:
if subnet_group:
result = delete_subnet_group(name)
changed |= result
subnet_group = None
module.exit_json(changed=changed, cache_subnet_group=subnet_group)
if __name__ == "__main__":
METHOD_NAME()
|
3,338 |
actions
|
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Jira issue tracker."""
import datetime
from urllib.parse import urljoin
from dateutil import parser
from clusterfuzz._internal.config import db_config
from clusterfuzz._internal.issue_management import issue_tracker
from clusterfuzz._internal.issue_management.jira.issue_tracker_manager import \
IssueTrackerManager
class Issue(issue_tracker.Issue):
"""Represents an issue."""
def __init__(self, itm, jira_issue):
self.itm = itm
self.jira_issue = jira_issue
self._ccs = issue_tracker.LabelStore(self.itm.get_watchers(self.jira_issue))
self._components = issue_tracker.LabelStore(
self.jira_issue.fields.components)
self._labels = issue_tracker.LabelStore(self.jira_issue.fields.labels)
@property
def issue_tracker(self):
"""The IssueTracker for this issue."""
return IssueTracker(self.itm)
@property
def id(self):
"""The issue identifier."""
return int(self.jira_issue.id)
@property
def key(self):
"""The issue key (e.g. FUZZ-123)."""
return self.jira_issue.key
@property
def title(self):
"""The issue title."""
return self.jira_issue.fields.summary
@title.setter
def title(self, new_title):
self.jira_issue.fields.summary = new_title
@property
def reporter(self):
"""The issue reporter."""
return self.jira_issue.fields.reporter
@reporter.setter
def reporter(self, new_reporter):
self.jira_issue.fields.reporter = new_reporter
@property
def is_open(self):
"""Whether the issue is open."""
return self.jira_issue.fields.resolution is None
@property
def closed_time(self):
return datetime.datetime.fromtimestamp(
parser.parse(self.jira_issue.fields.resolutiondate).timestamp())
@property
def status(self):
"""The issue status."""
return self.jira_issue.fields.status
@status.setter
def status(self, new_status):
self.jira_issue.fields.status = new_status
@property
def body(self):
"""The issue body."""
return self.jira_issue.fields.description
@body.setter
def body(self, new_body):
self.jira_issue.fields.description = new_body
@property
def assignee(self):
"""The issue assignee."""
return self.jira_issue.fields.assignee
@assignee.setter
def assignee(self, new_assignee):
self.jira_issue.fields.assignee = new_assignee
@property
def ccs(self):
"""The issue CC list."""
return self._ccs
@property
def labels(self):
"""The issue labels list."""
return self._labels
@property
def components(self):
"""The issue component list."""
return self._components
# FIXME: Add support for notify arguments
def save(self, new_comment=None, notify=True): # pylint: disable=unused-argument
"""Save the issue."""
# add new comment to issue
if new_comment:
self.itm.client.add_comment(self.jira_issue, new_comment)
for added in self._components.added:
self.components.add(added)
for removed in self._components.removed:
self.components.remove(removed)
self._components.reset_tracking()
for added in self._ccs.added:
self.ccs.add(added)
for removed in self._ccs.removed:
self.ccs.remove(removed)
self._ccs.reset_tracking()
for added in self._labels.added:
self.labels.add(added)
for removed in self._labels.removed:
self.labels.remove(removed)
self._labels.reset_tracking()
self.itm.save(self)
@property
def METHOD_NAME(self):
return ()
@property
def merged_into(self):
pass
class IssueTracker(issue_tracker.IssueTracker):
"""Issue tracker interface."""
def __init__(self, itm):
self._itm = itm
@property
def project(self):
return self._itm.project_name
def new_issue(self):
jira_issue = self._itm.create()
return Issue(self._itm, jira_issue)
def get_issue(self, issue_id):
jira_issue = self._itm.get_issue(issue_id)
if not jira_issue:
return None
return Issue(self._itm, jira_issue)
def find_issues(self, keywords=None, only_open=False):
"""Find issues."""
search_text = 'project = {project_name}' + _get_search_text(keywords)
search_text = search_text.format(project_name=self._itm.project_name)
if only_open:
search_text += ' AND resolution = Unresolved'
issues = self._itm.get_issues(search_text)
return [Issue(self._itm, issue) for issue in issues]
def issue_url(self, issue_id):
"""Return the issue URL with the given ID."""
issue = self.get_issue(issue_id)
if not issue:
return None
config = db_config.get()
url = urljoin(config.jira_url, f'/browse/{str(issue.key)}')
return url
def find_issues_url(self, keywords=None, only_open=None):
search_text = 'project = {project_name}' + _get_search_text(keywords)
search_text = search_text.format(project_name=self._itm.project_name)
if only_open:
search_text += ' AND resolution = Unresolved'
config = db_config.get()
return urljoin(config.jira_url, f'/issues/?jql={search_text}')
def _get_issue_tracker_manager_for_project(project_name):
"""Return jira issue tracker manager for the given project."""
# If there is no issue tracker set, bail out.
if not project_name or project_name == 'disabled':
return None
return IssueTrackerManager(project_name=project_name)
def get_issue_tracker(project_name, config): # pylint: disable=unused-argument
"""Get the issue tracker for the project name."""
itm = _get_issue_tracker_manager_for_project(project_name)
if itm is None:
return None
return IssueTracker(itm)
def _get_search_text(keywords):
"""Get search text."""
jira_special_characters = '+-&|!(){}[]^~*?\\:'
search_text = ''
for keyword in keywords:
# Replace special characters with whitespace as they are not allowed and
# can't be searched for.
stripped_keyword = keyword
for special_character in jira_special_characters:
stripped_keyword = stripped_keyword.replace(special_character, ' ')
# coalesce multiple spaces into one.
stripped_keyword = ' '.join(stripped_keyword.split())
search_text += f' AND text ~ "{stripped_keyword}"'
return search_text
|
3,339 |
test can launch
|
# pylint: disable=protected-access, unused-argument, no-value-for-parameter
import os
from unittest import mock, TestCase
from .test_common import setUp
from radical.pilot.agent.launch_method.aprun import APRun
# ------------------------------------------------------------------------------
#
class TestAPRun(TestCase):
# --------------------------------------------------------------------------
#
@mock.patch.object(APRun, '__init__', return_value=None)
@mock.patch('radical.utils.which', return_value='/usr/bin/aprun')
def test_init_from_scratch(self, mocked_which, mocked_init):
lm_aprun = APRun('', {}, None, None, None)
lm_info = lm_aprun._init_from_scratch({}, '')
self.assertEqual(lm_info['command'], mocked_which())
# --------------------------------------------------------------------------
#
@mock.patch.object(APRun, '__init__', return_value=None)
def test_init_from_info(self, mocked_init):
lm_aprun = APRun('', {}, None, None, None)
lm_info = {'env' : {'test_env': 'test_value'},
'env_sh' : 'env/lm_aprun.sh',
'command': '/usr/bin/aprun'}
lm_aprun._init_from_info(lm_info)
self.assertEqual(lm_aprun._env, lm_info['env'])
self.assertEqual(lm_aprun._env_sh, lm_info['env_sh'])
self.assertEqual(lm_aprun._command, lm_info['command'])
lm_info['command'] = ''
with self.assertRaises(AssertionError):
lm_aprun._init_from_info(lm_info)
# --------------------------------------------------------------------------
#
@mock.patch.object(APRun, '__init__', return_value=None)
def METHOD_NAME(self, mocked_init):
lm_aprun = APRun('', {}, None, None, None)
self.assertTrue(lm_aprun.can_launch(
task={'description': {'executable': 'script'}})[0])
self.assertFalse(lm_aprun.can_launch(
task={'description': {'executable': None}})[0])
# --------------------------------------------------------------------------
#
@mock.patch.object(APRun, '__init__', return_value=None)
def test_get_launcher_env(self, mocked_init):
lm_aprun = APRun('', {}, None, None, None)
lm_info = {'env' : {'test_env': 'test_value'},
'env_sh' : 'env/lm_aprun.sh',
'command': '/usr/bin/aprun'}
lm_aprun._init_from_info(lm_info)
self.assertIn('. $RP_PILOT_SANDBOX/%s' % lm_info['env_sh'],
lm_aprun.get_launcher_env())
# --------------------------------------------------------------------------
#
@mock.patch.object(APRun, '__init__', return_value=None)
@mock.patch('radical.utils.Logger')
def test_get_launch_rank_cmds(self, mocked_logger, mocked_init):
lm_aprun = APRun('', {}, None, None, None)
lm_aprun._log = mocked_logger
lm_aprun._command = 'aprun'
test_cases = setUp('lm', 'aprun')
for task, result in test_cases:
if 'cores_per_node' in task.get('slots', {}):
os.environ['SAGA_PPN'] = str(task['slots']['cores_per_node'])
command = lm_aprun.get_launch_cmds(task, '')
self.assertEqual(command, result['launch_cmd'], msg=task['uid'])
command = lm_aprun.get_exec(task)
self.assertEqual(command, result['rank_exec'], msg=task['uid'])
os.environ.pop('SAGA_PPN', None)
# --------------------------------------------------------------------------
#
@mock.patch.object(APRun, '__init__', return_value=None)
def test_get_rank_cmd(self, mocked_init):
lm_aprun = APRun('', {}, None, None, None)
command = lm_aprun.get_rank_cmd()
self.assertIn('$MPI_RANK', command)
self.assertIn('$PMIX_RANK', command)
self.assertIn('$ALPS_APP_PE', command)
# ------------------------------------------------------------------------------
if __name__ == '__main__':
tc = TestAPRun()
tc.test_init_from_scratch()
tc.test_init_from_info()
tc.METHOD_NAME()
tc.test_get_launcher_env()
tc.test_get_launch_rank_cmds()
tc.test_get_rank_cmd()
# ------------------------------------------------------------------------------
# pylint: enable=protected-access, unused-argument, no-value-for-parameter
|
3,340 |
detector id
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import copy
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
__all__ = [
'GetFindingIdsResult',
'AwaitableGetFindingIdsResult',
'get_finding_ids',
'get_finding_ids_output',
]
@pulumi.output_type
class GetFindingIdsResult:
"""
A collection of values returned by getFindingIds.
"""
def __init__(__self__, METHOD_NAME=None, finding_ids=None, has_findings=None, id=None):
if METHOD_NAME and not isinstance(METHOD_NAME, str):
raise TypeError("Expected argument 'detector_id' to be a str")
pulumi.set(__self__, "detector_id", METHOD_NAME)
if finding_ids and not isinstance(finding_ids, list):
raise TypeError("Expected argument 'finding_ids' to be a list")
pulumi.set(__self__, "finding_ids", finding_ids)
if has_findings and not isinstance(has_findings, bool):
raise TypeError("Expected argument 'has_findings' to be a bool")
pulumi.set(__self__, "has_findings", has_findings)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
@property
@pulumi.getter(name="detectorId")
def METHOD_NAME(self) -> str:
return pulumi.get(self, "detector_id")
@property
@pulumi.getter(name="findingIds")
def finding_ids(self) -> Sequence[str]:
"""
A list of finding IDs for the specified detector.
"""
return pulumi.get(self, "finding_ids")
@property
@pulumi.getter(name="hasFindings")
def has_findings(self) -> bool:
"""
Indicates whether findings are present for the specified detector.
"""
return pulumi.get(self, "has_findings")
@property
@pulumi.getter
def id(self) -> str:
return pulumi.get(self, "id")
class AwaitableGetFindingIdsResult(GetFindingIdsResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetFindingIdsResult(
METHOD_NAME=self.METHOD_NAME,
finding_ids=self.finding_ids,
has_findings=self.has_findings,
id=self.id)
def get_finding_ids(METHOD_NAME: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetFindingIdsResult:
"""
Data source for managing an AWS GuardDuty Finding Ids.
## Example Usage
### Basic Usage
```python
import pulumi
import pulumi_aws as aws
example = aws.guardduty.get_finding_ids(detector_id=aws_guardduty_detector["example"]["id"])
```
:param str detector_id: ID of the GuardDuty detector.
"""
__args__ = dict()
__args__['detectorId'] = METHOD_NAME
opts = pulumi.InvokeOptions.merge(_utilities.get_invoke_opts_defaults(), opts)
__ret__ = pulumi.runtime.invoke('aws:guardduty/getFindingIds:getFindingIds', __args__, opts=opts, typ=GetFindingIdsResult).value
return AwaitableGetFindingIdsResult(
METHOD_NAME=pulumi.get(__ret__, 'detector_id'),
finding_ids=pulumi.get(__ret__, 'finding_ids'),
has_findings=pulumi.get(__ret__, 'has_findings'),
id=pulumi.get(__ret__, 'id'))
@_utilities.lift_output_func(get_finding_ids)
def get_finding_ids_output(METHOD_NAME: Optional[pulumi.Input[str]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetFindingIdsResult]:
"""
Data source for managing an AWS GuardDuty Finding Ids.
## Example Usage
### Basic Usage
```python
import pulumi
import pulumi_aws as aws
example = aws.guardduty.get_finding_ids(detector_id=aws_guardduty_detector["example"]["id"])
```
:param str detector_id: ID of the GuardDuty detector.
"""
...
|
3,341 |
solve mat
|
# This file is part of PyOP2
#
# PyOP2 is Copyright (c) 2012-2014, Imperial College London and
# others. Please see the AUTHORS file in the main source directory for
# a full list of copyright holders. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * The name of Imperial College London or that of other
# contributors may not be used to endorse or promote products
# derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTERS
# ''AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
# INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
# STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
# OF THE POSSIBILITY OF SUCH DAMAGE.
import pytest
import loopy
from pyop2.codegen.rep2loopy import SolveCallable, INVCallable
import numpy as np
from pyop2 import op2
from pyop2.configuration import target
@pytest.fixture
def s():
return op2.Set(1)
@pytest.fixture
def zero_mat(s):
return op2.Dat(s ** (2, 2), [[0.0, 0.0], [0.0, 0.0]])
@pytest.fixture
def inv_mat(s):
return op2.Dat(s ** (2, 2), [[1.0, 2.0], [3.0, 4.0]])
@pytest.fixture
def zero_vec(s):
return op2.Dat(s ** (2, 1), [0.0, 0.0])
@pytest.fixture
def METHOD_NAME(s):
d = op2.Dat(s ** (2, 2), [[2.0, 1.0], [-3.0, 2.0]])
return d
@pytest.fixture
def solve_vec(s):
return op2.Dat(s ** (2, 1), [1.0, 0.0])
class TestCallables:
def test_inverse_callable(self, zero_mat, inv_mat):
loopy.set_caching_enabled(False)
k = loopy.make_kernel(
["{ : }"],
"""
B[:,:] = inverse(A[:,:])
""",
[loopy.GlobalArg('B', dtype=np.float64, shape=(2, 2)),
loopy.GlobalArg('A', dtype=np.float64, shape=(2, 2))],
target=target,
name="callable_kernel",
lang_version=(2018, 2))
k = loopy.register_callable(k, INVCallable.name, INVCallable())
code = loopy.generate_code_v2(k).device_code()
code.replace('void callable_kernel', 'static void callable_kernel')
loopykernel = op2.Kernel(code, "callable_kernel", ldargs=["-llapack"])
op2.par_loop(loopykernel, zero_mat.dataset.set, zero_mat(op2.WRITE), inv_mat(op2.READ))
expected = np.linalg.inv(inv_mat.data)
assert np.allclose(expected, zero_mat.data)
def test_solve_callable(self, zero_vec, METHOD_NAME, solve_vec):
loopy.set_caching_enabled(False)
k = loopy.make_kernel(
["{ : }"],
"""
x[:] = solve(A[:,:], b[:])
""",
[loopy.GlobalArg('x', dtype=np.float64, shape=(2, )),
loopy.GlobalArg('A', dtype=np.float64, shape=(2, 2)),
loopy.GlobalArg('b', dtype=np.float64, shape=(2, ),)],
target=target,
name="callable_kernel2",
lang_version=(2018, 2))
k = loopy.register_callable(k, SolveCallable.name, SolveCallable())
code = loopy.generate_code_v2(k).device_code()
code.replace('void callable_kernel2', 'static void callable_kernel2')
loopykernel = op2.Kernel(code, "callable_kernel2", ldargs=["-llapack"])
args = [zero_vec(op2.READ), METHOD_NAME(op2.READ), solve_vec(op2.WRITE)]
op2.par_loop(loopykernel, METHOD_NAME.dataset.set, *args)
expected = np.linalg.solve(METHOD_NAME.data, solve_vec.data)
assert np.allclose(expected, zero_vec.data)
|
3,342 |
test content hash differs
|
from collections.abc import (
Callable,
Mapping,
)
from reconcile.saas_auto_promotions_manager.subscriber import (
CONTENT_HASH_LENGTH,
ConfigHash,
Subscriber,
)
from .data_keys import (
DESIRED_REF,
DESIRED_TARGET_HASHES,
NAMESPACE_REF,
TARGET_FILE_PATH,
)
def test_can_compute_content_hash(subscriber_builder: Callable[[Mapping], Subscriber]):
subscribers = [
subscriber_builder(
{
NAMESPACE_REF: "some_namespace",
TARGET_FILE_PATH: "some_saas",
DESIRED_REF: "new",
DESIRED_TARGET_HASHES: [
ConfigHash(channel="a", target_config_hash="b", parent_saas="c"),
ConfigHash(channel="e", target_config_hash="f", parent_saas="g"),
],
}
),
subscriber_builder(
{
NAMESPACE_REF: "other_namespace",
TARGET_FILE_PATH: "other_saas",
DESIRED_REF: "new",
DESIRED_TARGET_HASHES: [
ConfigHash(channel="a", target_config_hash="b", parent_saas="c"),
ConfigHash(channel="e", target_config_hash="f", parent_saas="g"),
],
}
),
]
assert (
len(Subscriber.combined_content_hash(subscribers=subscribers))
== CONTENT_HASH_LENGTH
)
def test_content_hash_is_deterministic(
subscriber_builder: Callable[[Mapping], Subscriber]
):
subscribers = [
subscriber_builder(
{
NAMESPACE_REF: "some_namespace",
TARGET_FILE_PATH: "some_saas",
DESIRED_REF: "new",
DESIRED_TARGET_HASHES: [
ConfigHash(channel="a", target_config_hash="b", parent_saas="c"),
ConfigHash(channel="e", target_config_hash="f", parent_saas="g"),
],
}
),
subscriber_builder(
{
NAMESPACE_REF: "other_namespace",
TARGET_FILE_PATH: "other_saas",
DESIRED_REF: "old",
DESIRED_TARGET_HASHES: [
ConfigHash(channel="h", target_config_hash="i", parent_saas="j"),
ConfigHash(channel="e", target_config_hash="f", parent_saas="g"),
],
}
),
]
hashes = set()
for _ in range(3):
hashes.add(Subscriber.combined_content_hash(subscribers=subscribers))
assert len(hashes) == 1
def METHOD_NAME(subscriber_builder: Callable[[Mapping], Subscriber]):
subscriber_a = subscriber_builder(
{
NAMESPACE_REF: "some_namespace",
TARGET_FILE_PATH: "some_saas",
DESIRED_REF: "new",
DESIRED_TARGET_HASHES: [
ConfigHash(channel="a", target_config_hash="b", parent_saas="c"),
ConfigHash(channel="e", target_config_hash="f", parent_saas="g"),
],
}
)
subscriber_b = subscriber_builder(
{
NAMESPACE_REF: "some_namespace",
TARGET_FILE_PATH: "some_other_saas",
DESIRED_REF: "new",
DESIRED_TARGET_HASHES: [
ConfigHash(channel="a", target_config_hash="b", parent_saas="c"),
],
}
)
assert Subscriber.combined_content_hash(
[subscriber_a]
) != Subscriber.combined_content_hash([subscriber_b])
def test_content_hash_equals(subscriber_builder: Callable[[Mapping], Subscriber]):
subscriber_a = subscriber_builder(
{
NAMESPACE_REF: "some_namespace",
TARGET_FILE_PATH: "some_saas",
DESIRED_REF: "new",
DESIRED_TARGET_HASHES: [
ConfigHash(channel="a", target_config_hash="b", parent_saas="c"),
ConfigHash(channel="e", target_config_hash="f", parent_saas="g"),
ConfigHash(channel="h", target_config_hash="i", parent_saas="j"),
ConfigHash(channel="k", target_config_hash="l", parent_saas="m"),
],
}
)
subscriber_b = subscriber_builder(
{
NAMESPACE_REF: "some_namespace",
TARGET_FILE_PATH: "some_saas",
DESIRED_REF: "new",
DESIRED_TARGET_HASHES: list(
reversed(
[
ConfigHash(
channel="a", target_config_hash="b", parent_saas="c"
),
ConfigHash(
channel="e", target_config_hash="f", parent_saas="g"
),
ConfigHash(
channel="h", target_config_hash="i", parent_saas="j"
),
ConfigHash(
channel="k", target_config_hash="l", parent_saas="m"
),
]
)
),
}
)
assert Subscriber.combined_content_hash(
[subscriber_a, subscriber_b]
) == Subscriber.combined_content_hash([subscriber_b, subscriber_a])
|
3,343 |
is client error
|
from enum import IntEnum
class codes(IntEnum):
"""HTTP status codes and reason phrases
Status codes from the following RFCs are all observed:
* RFC 7231: Hypertext Transfer Protocol (HTTP/1.1), obsoletes 2616
* RFC 6585: Additional HTTP Status Codes
* RFC 3229: Delta encoding in HTTP
* RFC 4918: HTTP Extensions for WebDAV, obsoletes 2518
* RFC 5842: Binding Extensions to WebDAV
* RFC 7238: Permanent Redirect
* RFC 2295: Transparent Content Negotiation in HTTP
* RFC 2774: An HTTP Extension Framework
* RFC 7540: Hypertext Transfer Protocol Version 2 (HTTP/2)
* RFC 2324: Hyper Text Coffee Pot Control Protocol (HTCPCP/1.0)
* RFC 7725: An HTTP Status Code to Report Legal Obstacles
* RFC 8297: An HTTP Status Code for Indicating Hints
* RFC 8470: Using Early Data in HTTP
"""
def __new__(cls, value: int, phrase: str = "") -> "codes":
obj = int.__new__(cls, value) # type: ignore
obj._value_ = value
obj.phrase = phrase # type: ignore
return obj
def __str__(self) -> str:
return str(self.value)
@classmethod
def get_reason_phrase(cls, value: int) -> str:
try:
return codes(value).phrase # type: ignore
except ValueError:
return ""
@classmethod
def is_redirect(cls, value: int) -> bool:
return value in (
# 301 (Cacheable redirect. Method may change to GET.)
codes.MOVED_PERMANENTLY,
# 302 (Uncacheable redirect. Method may change to GET.)
codes.FOUND,
# 303 (Client should make a GET or HEAD request.)
codes.SEE_OTHER,
# 307 (Equiv. 302, but retain method)
codes.TEMPORARY_REDIRECT,
# 308 (Equiv. 301, but retain method)
codes.PERMANENT_REDIRECT,
)
@classmethod
def is_error(cls, value: int) -> bool:
return 400 <= value <= 599
@classmethod
def METHOD_NAME(cls, value: int) -> bool:
return 400 <= value <= 499
@classmethod
def is_server_error(cls, value: int) -> bool:
return 500 <= value <= 599
# informational
CONTINUE = 100, "Continue"
SWITCHING_PROTOCOLS = 101, "Switching Protocols"
PROCESSING = 102, "Processing"
EARLY_HINTS = 103, "Early Hints"
# success
OK = 200, "OK"
CREATED = 201, "Created"
ACCEPTED = 202, "Accepted"
NON_AUTHORITATIVE_INFORMATION = 203, "Non-Authoritative Information"
NO_CONTENT = 204, "No Content"
RESET_CONTENT = 205, "Reset Content"
PARTIAL_CONTENT = 206, "Partial Content"
MULTI_STATUS = 207, "Multi-Status"
ALREADY_REPORTED = 208, "Already Reported"
IM_USED = 226, "IM Used"
# redirection
MULTIPLE_CHOICES = 300, "Multiple Choices"
MOVED_PERMANENTLY = 301, "Moved Permanently"
FOUND = 302, "Found"
SEE_OTHER = 303, "See Other"
NOT_MODIFIED = 304, "Not Modified"
USE_PROXY = 305, "Use Proxy"
TEMPORARY_REDIRECT = 307, "Temporary Redirect"
PERMANENT_REDIRECT = 308, "Permanent Redirect"
# client error
BAD_REQUEST = 400, "Bad Request"
UNAUTHORIZED = 401, "Unauthorized"
PAYMENT_REQUIRED = 402, "Payment Required"
FORBIDDEN = 403, "Forbidden"
NOT_FOUND = 404, "Not Found"
METHOD_NOT_ALLOWED = 405, "Method Not Allowed"
NOT_ACCEPTABLE = 406, "Not Acceptable"
PROXY_AUTHENTICATION_REQUIRED = 407, "Proxy Authentication Required"
REQUEST_TIMEOUT = 408, "Request Timeout"
CONFLICT = 409, "Conflict"
GONE = 410, "Gone"
LENGTH_REQUIRED = 411, "Length Required"
PRECONDITION_FAILED = 412, "Precondition Failed"
REQUEST_ENTITY_TOO_LARGE = 413, "Request Entity Too Large"
REQUEST_URI_TOO_LONG = 414, "Request-URI Too Long"
UNSUPPORTED_MEDIA_TYPE = 415, "Unsupported Media Type"
REQUESTED_RANGE_NOT_SATISFIABLE = 416, "Requested Range Not Satisfiable"
EXPECTATION_FAILED = 417, "Expectation Failed"
IM_A_TEAPOT = 418, "I'm a teapot"
MISDIRECTED_REQUEST = 421, "Misdirected Request"
UNPROCESSABLE_ENTITY = 422, "Unprocessable Entity"
LOCKED = 423, "Locked"
FAILED_DEPENDENCY = 424, "Failed Dependency"
TOO_EARLY = 425, "Too Early"
UPGRADE_REQUIRED = 426, "Upgrade Required"
PRECONDITION_REQUIRED = 428, "Precondition Required"
TOO_MANY_REQUESTS = 429, "Too Many Requests"
REQUEST_HEADER_FIELDS_TOO_LARGE = 431, "Request Header Fields Too Large"
UNAVAILABLE_FOR_LEGAL_REASONS = 451, "Unavailable For Legal Reasons"
# server errors
INTERNAL_SERVER_ERROR = 500, "Internal Server Error"
NOT_IMPLEMENTED = 501, "Not Implemented"
BAD_GATEWAY = 502, "Bad Gateway"
SERVICE_UNAVAILABLE = 503, "Service Unavailable"
GATEWAY_TIMEOUT = 504, "Gateway Timeout"
HTTP_VERSION_NOT_SUPPORTED = 505, "HTTP Version Not Supported"
VARIANT_ALSO_NEGOTIATES = 506, "Variant Also Negotiates"
INSUFFICIENT_STORAGE = 507, "Insufficient Storage"
LOOP_DETECTED = 508, "Loop Detected"
NOT_EXTENDED = 510, "Not Extended"
NETWORK_AUTHENTICATION_REQUIRED = 511, "Network Authentication Required"
# Include lower-case styles for `requests` compatibility.
for code in codes:
setattr(codes, code._name_.lower(), int(code))
|
3,344 |
init models
|
# SPDX-License-Identifier: LGPL-3.0-or-later
import json
import os
import unittest
import numpy as np
from common import (
j_loader,
run_dp,
tests_path,
)
from deepmd.env import (
GLOBAL_NP_FLOAT_PRECISION,
tf,
)
from deepmd.train.run_options import (
RunOptions,
)
from deepmd.train.trainer import (
DPTrainer,
)
from deepmd.utils.argcheck import (
normalize,
)
from deepmd.utils.compat import (
update_deepmd_input,
)
from deepmd.utils.data_system import (
DeepmdDataSystem,
)
if GLOBAL_NP_FLOAT_PRECISION == np.float32:
default_places = 4
else:
default_places = 10
def _file_delete(file):
if os.path.isdir(file):
os.rmdir(file)
elif os.path.isfile(file):
os.remove(file)
def METHOD_NAME():
data_file = str(tests_path / os.path.join("init_frz_model", "data"))
frozen_model = str(tests_path / "init_frz_se_a_spin.pb")
ckpt = str(tests_path / "init_frz_se_a_spin.ckpt")
run_opt_ckpt = RunOptions(init_model=ckpt, log_level=20)
run_opt_frz = RunOptions(init_frz_model=frozen_model, log_level=20)
INPUT = str(tests_path / "input.json")
jdata = j_loader("test_model_spin.json")
jdata["training"]["save_ckpt"] = ckpt
jdata["training"]["training_data"]["systems"] = [str(tests_path / "model_spin/")]
jdata["training"]["validation_data"]["systems"] = [str(tests_path / "model_spin/")]
del jdata["training"]["set_prefix"]
with open(INPUT, "w") as fp:
json.dump(jdata, fp, indent=4)
ret = run_dp("dp train " + INPUT)
np.testing.assert_equal(ret, 0, "DP train failed!")
ret = run_dp("dp freeze -c " + str(tests_path) + " -o " + frozen_model)
np.testing.assert_equal(ret, 0, "DP freeze failed!")
jdata = update_deepmd_input(jdata, warning=True, dump="input_v2_compat.json")
jdata = normalize(jdata)
model_ckpt = DPTrainer(jdata, run_opt=run_opt_ckpt)
jdata = j_loader("test_model_spin.json")
jdata["training"]["save_ckpt"] = ckpt
jdata["training"]["training_data"]["systems"] = [str(tests_path / "model_spin/")]
jdata["training"]["validation_data"]["systems"] = [str(tests_path / "model_spin/")]
del jdata["training"]["set_prefix"]
jdata["loss"]["type"] = "ener_spin"
jdata = update_deepmd_input(jdata, warning=True, dump="input_v2_compat.json")
jdata = normalize(jdata)
model_frz = DPTrainer(jdata, run_opt=run_opt_frz)
rcut = model_ckpt.model.get_rcut()
type_map = model_ckpt.model.get_type_map()
data = DeepmdDataSystem(
systems=jdata["training"]["training_data"]["systems"],
batch_size=1,
test_size=1,
rcut=rcut,
type_map=type_map,
trn_all_set=True,
)
data_requirement = {
"energy": {
"ndof": 1,
"atomic": False,
"must": False,
"high_prec": True,
"type_sel": None,
"repeat": 1,
"default": 0.0,
},
"force": {
"ndof": 3,
"atomic": True,
"must": False,
"high_prec": False,
"type_sel": None,
"repeat": 1,
"default": 0.0,
},
"virial": {
"ndof": 9,
"atomic": False,
"must": False,
"high_prec": False,
"type_sel": None,
"repeat": 1,
"default": 0.0,
},
"atom_ener": {
"ndof": 1,
"atomic": True,
"must": False,
"high_prec": False,
"type_sel": None,
"repeat": 1,
"default": 0.0,
},
"atom_pref": {
"ndof": 1,
"atomic": True,
"must": False,
"high_prec": False,
"type_sel": None,
"repeat": 3,
"default": 0.0,
},
}
data.add_dict(data_requirement)
stop_batch = jdata["training"]["numb_steps"]
return INPUT, ckpt, frozen_model, model_ckpt, model_frz, data, stop_batch
(
INPUT,
CKPT,
FROZEN_MODEL,
CKPT_TRAINER,
FRZ_TRAINER,
VALID_DATA,
STOP_BATCH,
) = METHOD_NAME()
class TestInitFrzModelR(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.dp_ckpt = CKPT_TRAINER
cls.dp_frz = FRZ_TRAINER
cls.valid_data = VALID_DATA
cls.stop_batch = STOP_BATCH
@classmethod
def tearDownClass(cls):
_file_delete(INPUT)
_file_delete(FROZEN_MODEL)
_file_delete("out.json")
_file_delete(str(tests_path / "checkpoint"))
_file_delete(CKPT + ".meta")
_file_delete(CKPT + ".index")
_file_delete(CKPT + ".data-00000-of-00001")
_file_delete(CKPT + "-0.meta")
_file_delete(CKPT + "-0.index")
_file_delete(CKPT + "-0.data-00000-of-00001")
_file_delete(CKPT + "-1.meta")
_file_delete(CKPT + "-1.index")
_file_delete(CKPT + "-1.data-00000-of-00001")
_file_delete("input_v2_compat.json")
_file_delete("lcurve.out")
def test_single_frame(self):
valid_batch = self.valid_data.get_batch()
natoms = valid_batch["natoms_vec"]
tf.reset_default_graph()
self.dp_ckpt.build(self.valid_data, self.stop_batch)
self.dp_ckpt._init_session()
feed_dict_ckpt = self.dp_ckpt.get_feed_dict(valid_batch, is_training=False)
ckpt_rmse_ckpt = self.dp_ckpt.loss.eval(
self.dp_ckpt.sess, feed_dict_ckpt, natoms
)
tf.reset_default_graph()
self.dp_frz.build(self.valid_data, self.stop_batch)
self.dp_frz._init_session()
feed_dict_frz = self.dp_frz.get_feed_dict(valid_batch, is_training=False)
ckpt_rmse_frz = self.dp_frz.loss.eval(self.dp_frz.sess, feed_dict_frz, natoms)
tf.reset_default_graph()
# check values
np.testing.assert_almost_equal(
ckpt_rmse_ckpt["rmse_e"], ckpt_rmse_frz["rmse_e"], default_places
)
np.testing.assert_almost_equal(
ckpt_rmse_ckpt["rmse_fr"], ckpt_rmse_frz["rmse_fr"], default_places
)
np.testing.assert_almost_equal(
ckpt_rmse_ckpt["rmse_fm"], ckpt_rmse_frz["rmse_fm"], default_places
)
|
3,345 |
main
|
import os
import argparse
import json
import pandas as pd
from autogluon.multimodal import MultiModalPredictor
from ray import tune
from dataset import (
AdultTabularDataset,
AloiTabularDataset,
CaliforniaHousingTabularDataset,
CovtypeTabularDataset,
EpsilonTabularDataset,
HelenaTabularDataset,
HiggsSmallTabularDataset,
JannisTabularDataset,
MicrosoftTabularDataset,
YahooTabularDataset,
YearTabularDataset,
)
TABULAR_DATASETS = {
"ad": AdultTabularDataset,
"al": AloiTabularDataset,
"ca": CaliforniaHousingTabularDataset,
"co": CovtypeTabularDataset,
"ep": EpsilonTabularDataset,
"he": HelenaTabularDataset,
"hi": HiggsSmallTabularDataset,
"ja": JannisTabularDataset,
"mi": MicrosoftTabularDataset,
"ya": YahooTabularDataset,
"ye": YearTabularDataset,
}
automm_hyperparameters = {
"data.categorical.convert_to_text": False,
"model.names": ["categorical_transformer", "numerical_transformer", "fusion_transformer"],
"model.numerical_transformer.embedding_arch": ["linear"],
"env.batch_size": 128,
"env.per_gpu_batch_size": 128,
"env.eval_batch_size_ratio": 1,
"env.num_workers": 12,
"env.num_workers_evaluation": 12,
"env.num_gpus": 1,
"optimization.max_epochs": 2000, # Specify a large value to train until convergence
"optimization.weight_decay": 1.0e-5,
"optimization.lr_choice": None,
"optimization.lr_schedule": "polynomial_decay",
"optimization.warmup_steps": 0.0,
"optimization.patience": 20,
"optimization.top_k": 3,
}
hyperparameter_tune_kwargs = {
"searcher": "random",
"scheduler": "FIFO",
"num_trials": 50,
}
def METHOD_NAME(args):
if args.gpu_id is not None:
os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu_id
assert args.dataset_name in TABULAR_DATASETS.keys(), "Unsupported dataset name."
### Dataset loading
train_data = TABULAR_DATASETS[args.dataset_name]("train", args.dataset_dir)
val_data = TABULAR_DATASETS[args.dataset_name]("val", args.dataset_dir)
test_data = TABULAR_DATASETS[args.dataset_name]("test", args.dataset_dir)
automm_hyperparameters["optimization.learning_rate"] = args.lr
automm_hyperparameters["optimization.end_lr"] = args.end_lr
if args.embedding_arch is not None:
automm_hyperparameters["model.numerical_transformer.embedding_arch"] = args.embedding_arch
tabular_hyperparameters = {
"GBM": [
{},
{"extra_trees": True, "ag_args": {"name_suffix": "XT"}},
],
"CAT": {},
"XGB": {},
"AG_AUTOMM": automm_hyperparameters,
}
if args.mode == "single":
### model initialization
predictor = MultiModalPredictor(
label=train_data.label_column,
problem_type=train_data.problem_type,
eval_metric=train_data.metric,
path=args.exp_dir,
verbosity=4,
)
### model training
predictor.fit(
train_data=train_data.data,
tuning_data=val_data.data,
seed=args.seed,
hyperparameters=automm_hyperparameters,
)
### model inference
scores = predictor.evaluate(data=test_data.data, metrics=[test_data.metric])
with open(os.path.join(args.exp_dir, "scores.json"), "w") as f:
json.dump(scores, f)
print(scores)
elif args.mode == "single_hpo":
automm_hyperparameters["model.fusion_transformer.ffn_dropout"] = tune.uniform(0.0, 0.5)
automm_hyperparameters["model.fusion_transformer.attention_dropout"] = tune.uniform(0.0, 0.5)
automm_hyperparameters["model.fusion_transformer.residual_dropout"] = tune.uniform(0.0, 0.2)
automm_hyperparameters["model.fusion_transformer.ffn_d_hidden"] = tune.randint(150, 300)
automm_hyperparameters["model.numerical_transformer.ffn_d_hidden"] = tune.randint(150, 300)
automm_hyperparameters["optimization.learning_rate"] = tune.uniform(0.00001, 0.001)
automm_hyperparameters["optimization.end_lr"] = 1e-5
### model initialization
predictor = MultiModalPredictor(
label=train_data.label_column,
problem_type=train_data.problem_type,
eval_metric=train_data.metric,
path=args.exp_dir,
verbosity=4,
)
### model training
predictor.fit(
train_data=train_data.data,
tuning_data=val_data.data,
seed=args.seed,
hyperparameters=automm_hyperparameters,
hyperparameter_tune_kwargs=hyperparameter_tune_kwargs,
)
### model inference
scores = predictor.evaluate(data=test_data.data, metrics=[test_data.metric])
with open(os.path.join(args.exp_dir, "scores.json"), "w") as f:
json.dump(scores, f)
print(scores)
elif args.mode == "weighted" or args.mode == "single_bag5" or args.mode == "stack5":
if args.mode == "single_bag5":
tabular_hyperparameters = {
"AG_AUTOMM": automm_hyperparameters,
}
num_bag_folds, num_stack_levels = 5, 0
elif args.mode == "weighted":
num_bag_folds, num_stack_levels = None, None
elif args.mode == "stack5":
num_bag_folds, num_stack_levels = 5, 1
else:
raise NotImplementedError
from autogluon.tabular import TabularPredictor
predictor = TabularPredictor(eval_metric=train_data.metric, label=train_data.label_column, path=args.exp_dir)
predictor.fit(
train_data=train_data.data,
tuning_data=val_data.data if num_bag_folds is None else None,
hyperparameters=tabular_hyperparameters,
num_bag_folds=num_bag_folds,
num_stack_levels=num_stack_levels,
)
leaderboard = predictor.leaderboard()
leaderboard.to_csv(os.path.join(args.exp_dir, "leaderboard.csv"))
else:
raise NotImplementedError
scores = predictor.evaluate(data=test_data.data)
with open(os.path.join(args.exp_dir, "scores.json"), "w") as f:
json.dump(scores, f)
print(scores)
predictions = predictor.predict(data=test_data.data)
predictions.to_csv(os.path.join(args.exp_dir, "predictions.csv"))
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--gpu_id", default=None, type=str, help="Specify the GPU to use.")
parser.add_argument("--dataset_name", default="ad", type=str, help="Specify the dataset to run the experinments.")
parser.add_argument("--dataset_dir", default="./dataset", type=str, help="Path to the dataset.")
parser.add_argument("--exp_dir", default=None, type=str, help="Path to the outputs.")
parser.add_argument("--lr", default=1e-04, type=float, help="Initial learning rate.")
parser.add_argument("--end_lr", default=1e-04, type=float, help="End learning rate.")
parser.add_argument(
"--mode",
choices=["single", "single_hpo", "weighted", "single_bag5", "stack5"],
default="single",
help="Method to run with.",
)
parser.add_argument("--seed", default=0, type=int)
parser.add_argument(
"--embedding_arch",
type=str,
nargs="+",
default=None,
help="Embedding architecture for numerical features in FT_Transformer.",
)
args = parser.parse_args()
if args.exp_dir is None:
args.exp_dir = f"./results/{args.dataset_name}"
METHOD_NAME(args)
|
3,346 |
get object name
|
import functools
import inspect
import re
from typing import Callable, Iterable, List, Optional, Any, Union, TYPE_CHECKING
import interactions.api.events as events
from interactions.client.const import T
from interactions.models.discord.enums import ComponentType
if TYPE_CHECKING:
from interactions.models.discord.components import BaseComponent
__all__ = (
"escape_mentions",
"find",
"find_all",
"get",
"get_all",
"wrap_partial",
"get_parameters",
"get_event_name",
"get_object_name",
"maybe_coroutine",
"nulled_boolean_get",
)
mention_reg = re.compile(r"@(everyone|here|[!&]?[0-9]{17,20})")
camel_to_snake = re.compile(r"([A-Z]+)")
def escape_mentions(content: str) -> str:
"""
Escape mentions that could ping someone in a string.
!!! note
This does not escape channel mentions as they do not ping anybody
Args:
content: The string to escape
Returns:
Processed string
"""
return mention_reg.sub("@\u200b\\1", content)
def find(predicate: Callable[[T], bool], sequence: Iterable[T]) -> Optional[T]:
"""
Find the first element in a sequence that matches the predicate.
??? Hint "Example Usage:"
```python
member = find(lambda m: m.name == "UserName", guild.members)
```
Args:
predicate: A callable that returns a boolean value
sequence: A sequence to be searched
Returns:
A match if found, otherwise None
"""
return next((el for el in sequence if predicate(el)), None)
def find_all(predicate: Callable[[T], bool], sequence: Iterable[T]) -> List[T]:
"""
Find all elements in a sequence that match the predicate.
??? Hint "Example Usage:"
```python
members = find_all(lambda m: m.name == "UserName", guild.members)
```
Args:
predicate: A callable that returns a boolean value
sequence: A sequence to be searched
Returns:
A list of matches
"""
return [el for el in sequence if predicate(el)]
def get(sequence: Iterable[T], **kwargs: Any) -> Optional[T]:
"""
Find the first element in a sequence that matches all attrs.
??? Hint "Example Usage:"
```python
channel = get(guild.channels, nsfw=False, category="General")
```
Args:
sequence: A sequence to be searched
**kwargs: Keyword arguments to search the sequence for
Returns:
A match if found, otherwise None
"""
if not kwargs:
return sequence[0]
for el in sequence:
if any(not hasattr(el, attr) for attr in kwargs):
continue
if all(getattr(el, attr) == value for attr, value in kwargs.items()):
return el
return None
def get_all(sequence: Iterable[T], **kwargs: Any) -> List[T]:
"""
Find all elements in a sequence that match all attrs.
??? Hint "Example Usage:"
```python
channels = get_all(guild.channels, nsfw=False, category="General")
```
Args:
sequence: A sequence to be searched
**kwargs: Keyword arguments to search the sequence for
Returns:
A list of matches
"""
if not kwargs:
return sequence
matches = []
for el in sequence:
if any(not hasattr(el, attr) for attr in kwargs):
continue
if all(getattr(el, attr) == value for attr, value in kwargs.items()):
matches.append(el)
return matches
def wrap_partial(obj: Any, cls: Any) -> Callable:
"""
🎁 Wraps a commands callback objects into partials.
!!! note
This is used internally, you shouldn't need to use this function
Args:
obj: The command object to process
cls: The class to use in partials
Returns:
The original command object with its callback methods wrapped
"""
if obj.callback is None or isinstance(obj.callback, functools.partial):
return obj
if "_no_wrap" not in getattr(obj.callback, "__name__", ""):
obj.callback = functools.partial(obj.callback, cls)
if getattr(obj, "error_callback", None):
obj.error_callback = functools.partial(obj.error_callback, cls)
if getattr(obj, "pre_run_callback", None):
obj.pre_run_callback = functools.partial(obj.pre_run_callback, cls)
if getattr(obj, "post_run_callback", None):
obj.post_run_callback = functools.partial(obj.post_run_callback, cls)
if getattr(obj, "autocomplete_callbacks", None):
obj.autocomplete_callbacks = {k: functools.partial(v, cls) for k, v in obj.autocomplete_callbacks.items()}
if getattr(obj, "subcommands", None):
obj.subcommands = {k: wrap_partial(v, cls) for k, v in obj.subcommands.items()}
return obj
def get_parameters(callback: Callable) -> dict[str, inspect.Parameter]:
"""
Gets all the parameters of a callback.
Args:
callback: The callback to get the parameters of
Returns:
A dictionary of parameters
"""
return {p.name: p for p in inspect.signature(callback).parameters.values()}
@functools.lru_cache(maxsize=50)
def get_event_name(event: Union[str, "events.BaseEvent"]) -> str:
"""
Get the event name smartly from an event class or string name.
Args:
event: The event to parse the name of
Returns:
The event name
"""
name = event
if inspect.isclass(name) and issubclass(name, events.BaseEvent):
name = name.__name__
# convert CamelCase to snake_case
name = camel_to_snake.sub(r"_\1", name).lower()
# remove any leading underscores
name = name.lstrip("_")
# remove any `on_` prefixes
name = name.removeprefix("on_")
return name
def METHOD_NAME(x: Any) -> str:
"""
Gets the name of virtually any object.
Args:
x (Any): The object to get the name of.
Returns:
str: The name of the object.
"""
try:
return x.__name__
except AttributeError:
return repr(x) if hasattr(x, "__origin__") else x.__class__.__name__
async def maybe_coroutine(func: Callable, *args, **kwargs) -> Any:
"""Allows running either a coroutine or a function."""
if inspect.iscoroutinefunction(func):
return await func(*args, **kwargs)
return func(*args, **kwargs)
def disable_components(*components: "BaseComponent") -> list["BaseComponent"]:
"""Disables all components in a list of components."""
for component in components:
if component.type == ComponentType.ACTION_ROW:
disable_components(*component.components)
else:
component.disabled = True
return list(components)
def nulled_boolean_get(data: dict[str, Any], key: str) -> bool:
"""
Gets a boolean value from a dictionary, but treats None as True.
Args:
data: The dictionary to get the value from
key: The key to get the value from
Returns:
The boolean value of the key
"""
# discord tags are weird, when they are None they are True, when they are True they are True and when they are False they are False
if key in data:
return True if data[key] is None else bool(data[key])
return False
|
3,347 |
show search
|
# -*- coding: utf-8 -*-
# vStream https://github.com/Kodi-vStream/venom-xbmc-addons
import re
from resources.lib.gui.hoster import cHosterGui
from resources.lib.gui.gui import cGui
from resources.lib.handler.inputParameterHandler import cInputParameterHandler
from resources.lib.handler.outputParameterHandler import cOutputParameterHandler
from resources.lib.handler.requestHandler import cRequestHandler
from resources.lib.util import cUtil
from resources.lib.parser import cParser
from resources.lib.comaddon import progress, siteManager
SITE_IDENTIFIER = 'watchvf'
SITE_NAME = 'WatchVF'
SITE_DESC = 'Films en streaming.'
URL_MAIN = siteManager().getUrlMain(SITE_IDENTIFIER)
URL_SEARCH = (URL_MAIN + '?s=', 'showMovies')
URL_SEARCH_MOVIES = (URL_SEARCH[0], 'showMovies')
FUNCTION_SEARCH = 'showMovies'
MOVIE_MOVIE = (True, 'load')
MOVIE_NEWS = (URL_MAIN + 'movies/', 'showMovies')
MOVIE_GENRES = (True, 'showGenres')
def load():
oGui = cGui()
oOutputParameterHandler = cOutputParameterHandler()
oOutputParameterHandler.addParameter('siteUrl', 'http://venom/')
oGui.addDir(SITE_IDENTIFIER, 'showSearch', 'Recherche', 'search.png', oOutputParameterHandler)
oOutputParameterHandler.addParameter('siteUrl', MOVIE_NEWS[0])
oGui.addDir(SITE_IDENTIFIER, MOVIE_NEWS[1], 'Films (Derniers ajouts)', 'news.png', oOutputParameterHandler)
oOutputParameterHandler.addParameter('siteUrl', MOVIE_GENRES[0])
oGui.addDir(SITE_IDENTIFIER, MOVIE_GENRES[1], 'Films (Genres)', 'genres.png', oOutputParameterHandler)
oGui.setEndOfDirectory()
def METHOD_NAME():
oGui = cGui()
sSearchText = oGui.showKeyBoard()
if sSearchText:
sUrl = URL_SEARCH[0] + sSearchText
showMovies(sUrl)
oGui.setEndOfDirectory()
return
def showGenres():
oGui = cGui()
liste = [['Action', 'action'], ['Aventure', 'aventure'], ['Comédie', 'comedie'], ['Drame', 'drame'],
['Epouvante Horreur', 'epouvante-horreur'], ['Policier', 'policier'], ['Romance', 'romance'],
['Thriller', 'thriller']]
oOutputParameterHandler = cOutputParameterHandler()
for sTitle, sUrl in liste:
oOutputParameterHandler.addParameter('siteUrl', URL_MAIN + 'film-genre/' + sUrl + '/')
oGui.addDir(SITE_IDENTIFIER, 'showMovies', sTitle, 'genres.png', oOutputParameterHandler)
oGui.setEndOfDirectory()
def showMovies(sSearch=''):
oGui = cGui()
if sSearch:
oUtil = cUtil()
sUrl = sSearch.replace(' ', '+') + '&post_type=movie'
sSearch = oUtil.CleanName(sSearch.replace(URL_SEARCH[0], ''))
else:
oInputParameterHandler = cInputParameterHandler()
sUrl = oInputParameterHandler.getValue('siteUrl')
oRequestHandler = cRequestHandler(sUrl)
sHtmlContent = oRequestHandler.request()
oParser = cParser()
sHtmlContent = oParser.abParse(sHtmlContent, '', 'class="widget-area sidebar-area movie-sidebar')
sPattern = 'poster"><a href="([^"]+).+?src="([^"]+).+?title">([^<]+)'
aResult = oParser.parse(sHtmlContent, sPattern)
if not aResult[0]:
oGui.addText(SITE_IDENTIFIER)
else:
total = len(aResult[1])
progress_ = progress().VScreate(SITE_NAME)
oOutputParameterHandler = cOutputParameterHandler()
for aEntry in aResult[1]:
progress_.VSupdate(progress_, total)
if progress_.iscanceled():
break
sUrl = aEntry[0]
sThumb = aEntry[1]
sTitle = re.sub('^Voir', '', aEntry[2].replace(' Film en streaming complet', ''))
# Si recherche et trop de résultat, on nettoie
if sSearch and total > 3:
if not oUtil.CheckOccurence(sSearch, sTitle):
continue
oOutputParameterHandler.addParameter('siteUrl', sUrl)
oOutputParameterHandler.addParameter('sMovieTitle', sTitle)
oOutputParameterHandler.addParameter('sThumb', sThumb)
oGui.addMovie(SITE_IDENTIFIER, 'showHosters', sTitle, '', sThumb, '', oOutputParameterHandler)
progress_.VSclose(progress_)
if not sSearch:
sNextPage, sPaging = __checkForNextPage(sHtmlContent)
if sNextPage:
oOutputParameterHandler = cOutputParameterHandler()
oOutputParameterHandler.addParameter('siteUrl', sNextPage)
oGui.addNext(SITE_IDENTIFIER, 'showMovies', 'Page ' + sPaging, oOutputParameterHandler)
oGui.setEndOfDirectory()
def __checkForNextPage(sHtmlContent):
oParser = cParser()
sPattern = '>(\d+)</a></li>\s*<li><a class="next page-numbers" href="([^"]+)">Next Page'
aResult = oParser.parse(sHtmlContent, sPattern)
if aResult[0]:
sNextPage = aResult[1][0][1]
sNumberMax = aResult[1][0][0]
sNumberNext = re.search('page.([0-9]+)', sNextPage).group(1)
sPaging = sNumberNext + '/' + sNumberMax
return sNextPage, sPaging
return False, 'none'
def showHosters():
oGui = cGui()
oInputParameterHandler = cInputParameterHandler()
sUrl = oInputParameterHandler.getValue('siteUrl')
sMovieTitle = oInputParameterHandler.getValue('sMovieTitle')
sThumb = oInputParameterHandler.getValue('sThumb')
oRequestHandler = cRequestHandler(sUrl)
sHtmlContent = oRequestHandler.request()
oParser = cParser()
sPattern = '<iframe.+?src=["\'](.+?)["\']'
aResult = oParser.parse(sHtmlContent, sPattern)
if not aResult[0]:
oGui.addText(SITE_IDENTIFIER)
if aResult[0]:
for aEntry in aResult[1]:
sHosterUrl = aEntry
oHoster = cHosterGui().checkHoster(sHosterUrl)
if oHoster:
oHoster.setDisplayName(sMovieTitle)
oHoster.setFileName(sMovieTitle)
cHosterGui().showHoster(oGui, oHoster, sHosterUrl, sThumb)
oGui.setEndOfDirectory()
|
3,348 |
flatten errors
|
import json
from typing import TYPE_CHECKING, Any, Dict, Generator, List, Optional, Sequence, Tuple, Type, Union
from .json import pydantic_encoder
from .utils import Representation
if TYPE_CHECKING:
from typing_extensions import TypedDict
from .config import BaseConfig
from .types import ModelOrDc
from .typing import ReprArgs
Loc = Tuple[Union[int, str], ...]
class _ErrorDictRequired(TypedDict):
loc: Loc
msg: str
type: str
class ErrorDict(_ErrorDictRequired, total=False):
ctx: Dict[str, Any]
__all__ = 'ErrorWrapper', 'ValidationError'
class ErrorWrapper(Representation):
__slots__ = 'exc', '_loc'
def __init__(self, exc: Exception, loc: Union[str, 'Loc']) -> None:
self.exc = exc
self._loc = loc
def loc_tuple(self) -> 'Loc':
if isinstance(self._loc, tuple):
return self._loc
else:
return (self._loc,)
def __repr_args__(self) -> 'ReprArgs':
return [('exc', self.exc), ('loc', self.loc_tuple())]
# ErrorList is something like Union[List[Union[List[ErrorWrapper], ErrorWrapper]], ErrorWrapper]
# but recursive, therefore just use:
ErrorList = Union[Sequence[Any], ErrorWrapper]
class ValidationError(Representation, ValueError):
__slots__ = 'raw_errors', 'model', '_error_cache'
def __init__(self, errors: Sequence[ErrorList], model: 'ModelOrDc') -> None:
self.raw_errors = errors
self.model = model
self._error_cache: Optional[List['ErrorDict']] = None
def errors(self) -> List['ErrorDict']:
if self._error_cache is None:
try:
config = self.model.__config__ # type: ignore
except AttributeError:
config = self.model.__pydantic_model__.__config__ # type: ignore
self._error_cache = list(METHOD_NAME(self.raw_errors, config))
return self._error_cache
def json(self, *, indent: Union[None, int, str] = 2) -> str:
return json.dumps(self.errors(), indent=indent, default=pydantic_encoder)
def __str__(self) -> str:
errors = self.errors()
no_errors = len(errors)
return (
f'{no_errors} validation error{"" if no_errors == 1 else "s"} for {self.model.__name__}\n'
f'{display_errors(errors)}'
)
def __repr_args__(self) -> 'ReprArgs':
return [('model', self.model.__name__), ('errors', self.errors())]
def display_errors(errors: List['ErrorDict']) -> str:
return '\n'.join(f'{_display_error_loc(e)}\n {e["msg"]} ({_display_error_type_and_ctx(e)})' for e in errors)
def _display_error_loc(error: 'ErrorDict') -> str:
return ' -> '.join(str(e) for e in error['loc'])
def _display_error_type_and_ctx(error: 'ErrorDict') -> str:
t = 'type=' + error['type']
ctx = error.get('ctx')
if ctx:
return t + ''.join(f'; {k}={v}' for k, v in ctx.items())
else:
return t
def METHOD_NAME(
errors: Sequence[Any], config: Type['BaseConfig'], loc: Optional['Loc'] = None
) -> Generator['ErrorDict', None, None]:
for error in errors:
if isinstance(error, ErrorWrapper):
if loc:
error_loc = loc + error.loc_tuple()
else:
error_loc = error.loc_tuple()
if isinstance(error.exc, ValidationError):
yield from METHOD_NAME(error.exc.raw_errors, config, error_loc)
else:
yield error_dict(error.exc, config, error_loc)
elif isinstance(error, list):
yield from METHOD_NAME(error, config, loc=loc)
else:
raise RuntimeError(f'Unknown error object: {error}')
def error_dict(exc: Exception, config: Type['BaseConfig'], loc: 'Loc') -> 'ErrorDict':
type_ = get_exc_type(exc.__class__)
msg_template = config.error_msg_templates.get(type_) or getattr(exc, 'msg_template', None)
ctx = exc.__dict__
if msg_template:
msg = msg_template.format(**ctx)
else:
msg = str(exc)
d: 'ErrorDict' = {'loc': loc, 'msg': msg, 'type': type_}
if ctx:
d['ctx'] = ctx
return d
_EXC_TYPE_CACHE: Dict[Type[Exception], str] = {}
def get_exc_type(cls: Type[Exception]) -> str:
# slightly more efficient than using lru_cache since we don't need to worry about the cache filling up
try:
return _EXC_TYPE_CACHE[cls]
except KeyError:
r = _get_exc_type(cls)
_EXC_TYPE_CACHE[cls] = r
return r
def _get_exc_type(cls: Type[Exception]) -> str:
if issubclass(cls, AssertionError):
return 'assertion_error'
base_name = 'type_error' if issubclass(cls, TypeError) else 'value_error'
if cls in (TypeError, ValueError):
# just TypeError or ValueError, no extra code
return base_name
# if it's not a TypeError or ValueError, we just take the lowercase of the exception name
# no chaining or snake case logic, use "code" for more complex error types.
code = getattr(cls, 'code', None) or cls.__name__.replace('Error', '').lower()
return base_name + '.' + code
|
3,349 |
test static
|
import unittest
from unittest.mock import patch
import numpy as np
import xarray as xr
from data.calculated import CalculatedArray, CalculatedData
from data.variable import Variable
from data.variable_list import VariableList
class TestCalculatedData(unittest.TestCase):
@patch("data.sqlite_database.SQLiteDatabase.get_data_variables")
def test_nothing(self, mock_query_func):
mock_query_func.return_value = VariableList(
[
Variable(
"votemper",
"Sea water potential temperature",
"Kelvin",
sorted(["time", "depth", "latitude", "longitude"]),
)
]
)
with CalculatedImpl("tests/testdata/mercator_test.nc") as data:
self.assertEqual(len(data.variables), 1)
v = data.get_dataset_variable("votemper")
self.assertEqual(xr.DataArray, type(v))
self.assertAlmostEqual(v[0, 0, 17, 816].values, 271.1796875)
@patch("data.sqlite_database.SQLiteDatabase.get_data_variables")
@patch("data.sqlite_database.SQLiteDatabase.get_variable_dims")
def test_new_variable(self, mock_get_var_dims, mock_query_func):
mock_get_var_dims.return_value = ["time", "depth", "latitude", "longitude"]
mock_query_func.return_value = VariableList(
[
Variable(
"votemper",
"Sea water potential temperature",
"Kelvin",
sorted(["time", "depth", "latitude", "longitude"]),
)
]
)
calculated = {
"votemper_new": {
"equation": "votemper * 2",
"long_name": "Temperature",
"dims": ("time", "depth", "latitude", "longitude"),
"units": "degree_C",
"valid_min": -273.15,
"valid_max": 999.0,
}
}
with CalculatedImpl(
"tests/testdata/mercator_test.nc", calculated=calculated
) as data:
self.assertEqual(len(data.variables), 2)
v = data.get_dataset_variable("votemper_new")
self.assertAlmostEqual(v[0, 0, 17, 816].values, 2.0 * 271.1796875)
self.assertEqual(v.attrs.long_name, "Temperature")
self.assertEqual(v.shape, (1, 50, 850, 1800))
@patch("data.sqlite_database.SQLiteDatabase.get_data_variables")
def test_override(self, mock_query_func):
mock_query_func.return_value = VariableList(
[
Variable(
"votemper",
"Sea water potential temperature",
"Kelvin",
sorted(["time", "depth", "latitude", "longitude"]),
)
]
)
calculated = {
"votemper": {
"equation": "votemper -273.15",
"units": "degree_C",
"dims": ("time", "depth", "latitude", "longitude"),
}
}
with CalculatedImpl(
"tests/testdata/mercator_test.nc", calculated=calculated
) as data:
self.assertEqual(len(data.variables), 1)
v = data.get_dataset_variable("votemper")
self.assertAlmostEqual(v[0, 0, 17, 816].values, 271.1796875 - 273.15)
self.assertEqual(v.attrs.long_name, "Sea water potential temperature")
self.assertEqual(v.shape, (1, 50, 850, 1800))
def test_calculated_var_wo_dims_raises(self):
calculated = {
"votemper": {
"equation": "votemper -273.15",
"units": "degree_C",
}
}
with CalculatedImpl(
"tests/testdata/mercator_test.nc", calculated=calculated
) as data:
with self.assertRaises(KeyError):
data.get_dataset_variable("votemper")
class CalculatedImpl(CalculatedData):
def __init__(self, url: str, **kwargs):
super().__init__(url, **kwargs)
def get_point(self):
pass
def get_profile(self):
pass
def get_raw_point(self):
pass
def depths(self):
pass
class TestCalculatedArray(unittest.TestCase):
def test_attrs(self):
attrs = {"my_attr": 420}
dataset = xr.Dataset()
array = CalculatedArray(dataset, "3 * 5", [], attrs)
self.assertEqual(array[:].attrs, attrs)
self.assertEqual(array.attrs, attrs)
def METHOD_NAME(self):
dataset = xr.Dataset()
array = CalculatedArray(dataset, "3 * 5", [])
self.assertEqual(array[0], 15)
def test_passthrough(self):
dataset = xr.Dataset({"var": ("x", [1, 2, 3, 4, 5])})
array = CalculatedArray(dataset, "var", ["x"])
self.assertEqual(array[0], 1)
self.assertEqual(array[2], 3)
self.assertEqual(array[4], 5)
def test_single_expression(self):
dataset = xr.Dataset({"var": ("x", [1, 2, 3, 4, 5])})
array = CalculatedArray(dataset, "var * 5", ["x"])
self.assertEqual(array[0], 5)
self.assertEqual(array[2], 15)
self.assertEqual(array[4], 25)
def test_multiple_expression(self):
dataset = xr.Dataset(
{
"var": ("x", [1, 2, 3, 4, 5]),
"var2": ("x", [5, 4, 3, 2, 1]),
}
)
array = CalculatedArray(dataset, "var + var2", ["x"])
self.assertEqual(array[0], 6)
self.assertEqual(array[2], 6)
self.assertEqual(array[4], 6)
def test_different_dimensions(self):
dataset = xr.Dataset(
{
"var": ("x", [1, 2]),
"var2": ("y", [3, 4]),
"var3": (("x", "y"), [[5, 6], [7, 8]]),
"var4": (("y", "x"), [[9, 10], [11, 12]]),
}
)
array = CalculatedArray(dataset, "var + var2", ["x"])
self.assertIsNan(array[0])
array = CalculatedArray(dataset, "var3 + var4", ["x"])
self.assertIsNan(array[0, 0])
array = CalculatedArray(dataset, "var + var3", ["x", "y"])
self.assertEqual(array[0, 0], 6)
self.assertEqual(array[0, 1], 7)
self.assertEqual(array[1, 0], 9)
self.assertEqual(array[1, 1], 10)
def assertIsNan(self, value):
v = value
return self.assertTrue(np.isnan(v))
|
3,350 |
parse account
|
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Hadoop Utilities Functions
"""
import os
import sys
import json
import time
import math
import collections
import numpy as np
HADOOP_BIN = None
FS_NAME = None
FS_UGI = None
ERR_LOG = "./hadoop_err.log"
Ddfs = " -Ddfs.client.block.write.retries=15 -Ddfs.rpc.timeout=300000 -Ddfs.delete.trash=1"
def set_hadoop_account(hadoop_bin, fs_name, fs_ugi):
"""set hadoop account"""
global HADOOP_BIN
global FS_NAME
global FS_UGI
HADOOP_BIN = hadoop_bin
FS_NAME = fs_name
FS_UGI = fs_ugi
def set_hadoop_err(err_log="./hadoop_err.log"):
"""set hadoop err file"""
global ERR_LOG
ERR_LOG = err_log
def METHOD_NAME(hadoop_bin, fs_name, fs_ugi):
"""parse hadoop account"""
is_local_account = not (hadoop_bin is None or fs_name is None or
fs_ugi is None)
is_global_account = not (HADOOP_BIN is None or FS_NAME is None or
FS_UGI is None)
if not is_local_account and not is_global_account:
msg = "hadoop account should be setted before using hadoop commands." + \
" But got [hadoop_bin = %s], [fs_name = %s] and [fs_ugi = %s]" % \
(hadoop_bin, fs_name, fs_ugi)
raise ValueError(msg)
elif is_global_account:
hadoop_bin = HADOOP_BIN
fs_name = FS_NAME
fs_ugi = FS_UGI
return hadoop_bin, fs_name, fs_ugi
def check_hadoop_path(path, hadoop_bin=None, fs_name=None, fs_ugi=None):
"""check hadoop path"""
hadoop_bin, fs_name, fs_ugi = METHOD_NAME(hadoop_bin, fs_name, fs_ugi)
if path.startswith("hdfs://") or path.startswith("afs://"):
return path
else:
real_path = fs_name + path
return real_path
def make_base_cmd(hadoop_bin, fs_name, fs_ugi):
"""make base hadoop command"""
cmd = "%s fs" % hadoop_bin
cmd += " -D fs.default.name=%s" % fs_name
cmd += " -D hadoop.job.ugi=%s" % fs_ugi
cmd += Ddfs
return cmd
def ls(path, hadoop_bin=None, fs_name=None, fs_ugi=None):
"""hadoop list"""
hadoop_bin, fs_name, fs_ugi = METHOD_NAME(hadoop_bin, fs_name, fs_ugi)
path = check_hadoop_path(path, hadoop_bin, fs_name, fs_ugi)
cmd = make_base_cmd(hadoop_bin, fs_name, fs_ugi)
cmd += " -ls %s" % path
cmd += " | awk '{print $8}'"
cmd += " 2>%s" % ERR_LOG
filelist = os.popen(cmd).read().split()
return filelist
def mkdir(path, hadoop_bin=None, fs_name=None, fs_ugi=None):
"""hadoop mkdir directory"""
hadoop_bin, fs_name, fs_ugi = METHOD_NAME(hadoop_bin, fs_name, fs_ugi)
path = check_hadoop_path(path, hadoop_bin, fs_name, fs_ugi)
cmd = make_base_cmd(hadoop_bin, fs_name, fs_ugi)
cmd += " -mkdir %s" % path
cmd += " 2>%s" % ERR_LOG
ret = os.system(cmd)
return ret
def exists(path, hadoop_bin=None, fs_name=None, fs_ugi=None):
"""hadoop exists"""
hadoop_bin, fs_name, fs_ugi = METHOD_NAME(hadoop_bin, fs_name, fs_ugi)
path = check_hadoop_path(path, hadoop_bin, fs_name, fs_ugi)
cmd = make_base_cmd(hadoop_bin, fs_name, fs_ugi)
cmd += " -test -e " + path
cmd += " 2>%s ; echo $?" % ERR_LOG
ret = int(os.popen(cmd).read().strip())
ret = True if ret == 0 else False
return ret
def rm(path, hadoop_bin=None, fs_name=None, fs_ugi=None):
"""hadoop remove"""
hadoop_bin, fs_name, fs_ugi = METHOD_NAME(hadoop_bin, fs_name, fs_ugi)
path = check_hadoop_path(path, hadoop_bin, fs_name, fs_ugi)
cmd = make_base_cmd(hadoop_bin, fs_name, fs_ugi)
cmd += " -rmr %s" % path
cmd += " 2>%s" % ERR_LOG
if exists(path):
ret = os.system(cmd)
return ret
else:
return 0
def open(filename, hadoop_bin=None, fs_name=None, fs_ugi=None):
"""hadoop open file"""
hadoop_bin, fs_name, fs_ugi = METHOD_NAME(hadoop_bin, fs_name, fs_ugi)
filename = check_hadoop_path(filename, hadoop_bin, fs_name, fs_ugi)
cmd = make_base_cmd(hadoop_bin, fs_name, fs_ugi)
cmd += " -cat %s" % filename
cmd += " 2>%s" % ERR_LOG
p = os.popen(cmd)
return p
def gz_open(filename, hadoop_bin=None, fs_name=None, fs_ugi=None):
"""hadoop open gz file"""
hadoop_bin, fs_name, fs_ugi = METHOD_NAME(hadoop_bin, fs_name, fs_ugi)
filename = check_hadoop_path(filename, hadoop_bin, fs_name, fs_ugi)
cmd = make_base_cmd(hadoop_bin, fs_name, fs_ugi)
cmd += " -text %s" % filename
cmd += " 2>%s" % ERR_LOG
p = os.popen(cmd)
return p
def mv(src, dest, hadoop_bin=None, fs_name=None, fs_ugi=None):
"""hadoop move"""
hadoop_bin, fs_name, fs_ugi = METHOD_NAME(hadoop_bin, fs_name, fs_ugi)
src = check_hadoop_path(src, hadoop_bin, fs_name, fs_ugi)
dest = check_hadoop_path(dest, hadoop_bin, fs_name, fs_ugi)
if exists(dest):
rm(dest)
cmd = make_base_cmd(hadoop_bin, fs_name, fs_ugi)
cmd += " -mv %s %s" % (src, dest)
cmd += " 2>%s" % ERR_LOG
ret = os.system(cmd)
return ret
def get(src, dest, hadoop_bin=None, fs_name=None, fs_ugi=None):
""" hadoop download file"""
hadoop_bin, fs_name, fs_ugi = METHOD_NAME(hadoop_bin, fs_name, fs_ugi)
src = check_hadoop_path(src, hadoop_bin, fs_name, fs_ugi)
cmd = make_base_cmd(hadoop_bin, fs_name, fs_ugi)
cmd += " -get %s %s" % (src, dest)
cmd += " 2>%s" % ERR_LOG
ret = os.system(cmd)
return ret
def put(src, dest, hadoop_bin=None, fs_name=None, fs_ugi=None):
"""hadoop upload file"""
hadoop_bin, fs_name, fs_ugi = METHOD_NAME(hadoop_bin, fs_name, fs_ugi)
dest = check_hadoop_path(dest, hadoop_bin, fs_name, fs_ugi)
cmd = make_base_cmd(hadoop_bin, fs_name, fs_ugi)
cmd += " -put %s %s" % (src, dest)
cmd += " 2>%s" % ERR_LOG
ret = os.system(cmd)
return ret
def replace(src, dest, hadoop_bin=None, fs_name=None, fs_ugi=None):
"""hadoop replace"""
hadoop_bin, fs_name, fs_ugi = METHOD_NAME(hadoop_bin, fs_name, fs_ugi)
src = check_hadoop_path(src, fs_name, fs_ugi)
dest = check_hadoop_path(dest, fs_name, fs_ugi)
tmp = dest + "_" + str(int(time.time()))
cmd = make_base_cmd(hadoop_bin, fs_name, fs_ugi)
cmd += " -mv " + dest + " " + tmp + " && "
cmd += make_base_cmd(hadoop_bin, fs_name, fs_ugi)
cmd += " -put " + src + " " + dest + " && "
cmd += make_base_cmd(hadoop_bin, fs_name, fs_ugi)
cmd += " -rmr " + tmp
ret = os.system(cmd)
return ret
|
3,351 |
test subrequests are logged as subrequest summary
|
import json
import logging
import unittest
from unittest import mock
from pyramid import testing
from kinto.core import DEFAULT_SETTINGS, JsonLogFormatter, initialization
from .support import BaseWebTest
class RequestSummaryTest(BaseWebTest, unittest.TestCase):
def setUp(self):
super().setUp()
config = testing.setUp()
config.registry.settings = DEFAULT_SETTINGS
initialization.setup_logging(config)
patch = mock.patch("kinto.core.initialization.summary_logger")
self.mocked = patch.start()
self.addCleanup(patch.stop)
def logger_context(self):
args, kwargs = self.mocked.info.call_args_list[-1]
return kwargs["extra"]
def test_standard_info_is_bound(self):
headers = {"User-Agent": "Smith", **self.headers}
self.app.get("/", headers=headers)
event_dict = self.logger_context()
self.assertEqual(event_dict["path"], "/v0/")
self.assertEqual(event_dict["method"], "GET")
self.assertEqual(event_dict["code"], 200)
self.assertEqual(event_dict["agent"], "Smith")
self.assertIsNotNone(event_dict["uid"])
self.assertIsNotNone(event_dict["time"])
self.assertIsNotNone(event_dict["t"])
self.assertEqual(event_dict["errno"], 0)
self.assertNotIn("lang", event_dict)
self.assertNotIn("headers", event_dict)
self.assertNotIn("body", event_dict)
def test_userid_is_none_when_anonymous(self):
self.app.get("/")
event_dict = self.logger_context()
self.assertNotIn("uid", event_dict)
def test_lang_is_not_none_when_provided(self):
self.app.get("/", headers={"Accept-Language": "fr-FR"})
event_dict = self.logger_context()
self.assertEqual(event_dict["lang"], "fr-FR")
def test_agent_is_not_none_when_provided(self):
self.app.get("/", headers={"User-Agent": "webtest/x.y.z"})
event_dict = self.logger_context()
self.assertEqual(event_dict["agent"], "webtest/x.y.z")
def test_errno_is_specified_on_error(self):
self.app.get("/unknown", status=404)
event_dict = self.logger_context()
self.assertEqual(event_dict["errno"], 111)
def test_basic_authn_type_is_bound(self):
app = self.make_app({"multiauth.policies": "basicauth"})
app.get("/mushrooms", headers={"Authorization": "Basic bWF0OjE="})
event_dict = self.logger_context()
self.assertEqual(event_dict["authn_type"], "basicauth")
def test_request_id_is_taken_from_headers(self):
self.app.get("/", headers={"X-Request-Id": "foo"})
event_dict = self.logger_context()
self.assertEqual(event_dict["rid"], "foo")
def test_headers_and_body_when_level_is_debug(self):
self.mocked.level = logging.DEBUG
body = b'{"boom": 1}'
self.app.post("/batch", body, headers=self.headers, status=400)
event_dict = self.logger_context()
self.assertEqual(
event_dict["headers"],
{
"Authorization": "Basic bWF0OnNlY3JldA==",
"Content-Length": "11",
"Content-Type": "application/json",
"Host": "localhost:80",
},
)
self.assertEqual(event_dict["body"], body)
self.maxDiff = None
responseBody = event_dict["response"]["body"]
self.assertEqual(json.loads(responseBody.decode("utf-8"))["error"], "Invalid parameters")
responseHeaders = event_dict["response"]["headers"]
self.assertEqual(
sorted(responseHeaders.keys()),
[
"Access-Control-Expose-Headers",
"Content-Length",
"Content-Security-Policy",
"Content-Type",
"X-Content-Type-Options",
],
)
class BatchSubrequestTest(BaseWebTest, unittest.TestCase):
def setUp(self):
super().setUp()
patch = mock.patch("kinto.core.views.batch.subrequest_logger")
self.subrequest_mocked = patch.start()
self.addCleanup(patch.stop)
patch = mock.patch("kinto.core.initialization.summary_logger")
self.summary_mocked = patch.start()
self.addCleanup(patch.stop)
headers = {**self.headers, "User-Agent": "readinglist"}
body = {
"requests": [
{"path": "/unknown", "headers": {"User-Agent": "foo"}},
{"path": "/unknown2"},
]
}
self.app.post_json("/batch", body, headers=headers)
def test_batch_global_request_is_preserved(self):
args, kwargs = self.summary_mocked.info.call_args_list[-1]
extra = kwargs["extra"]
self.assertEqual(extra["code"], 200)
self.assertEqual(extra["path"], "/v0/batch")
self.assertEqual(extra["agent"], "readinglist")
def test_batch_size_is_bound(self):
args, kwargs = self.summary_mocked.info.call_args_list[-1]
extra = kwargs["extra"]
self.assertEqual(extra["batch_size"], 2)
def test_subrequests_are_not_logged_as_request_summary(self):
self.assertEqual(self.summary_mocked.info.call_count, 1)
def METHOD_NAME(self):
self.assertEqual(self.subrequest_mocked.info.call_count, 2)
args, kwargs = self.subrequest_mocked.info.call_args_list[-1]
extra = kwargs["extra"]
self.assertEqual(extra["path"], "/v0/unknown2")
args, kwargs = self.subrequest_mocked.info.call_args_list[-2]
extra = kwargs["extra"]
self.assertEqual(extra["path"], "/v0/unknown")
class JsonFormatterTest(unittest.TestCase):
def test_logger_name(self):
JsonLogFormatter.init_from_settings({"project_name": "kintowe"})
f = JsonLogFormatter()
record = logging.LogRecord("app.log", logging.DEBUG, "", 0, "coucou", (), None)
result = f.format(record)
logged = json.loads(result)
self.assertEqual(logged["Logger"], "kintowe")
self.assertEqual(logged["Type"], "app.log")
# See https://github.com/mozilla/mozilla-cloud-services-logger/issues/2
self.assertEqual(logged["Fields"]["msg"], "coucou")
|
3,352 |
test condition decision
|
# ----------------------------------------------------------------------------
# Copyright (C) 2021-2023 Deepchecks (https://www.deepchecks.com)
#
# This file is part of Deepchecks.
# Deepchecks is distributed under the terms of the GNU Affero General
# Public License (version 3 or later).
# You should have received a copy of the GNU Affero General Public License
# along with Deepchecks. If not, see <http://www.gnu.org/licenses/>.
# ----------------------------------------------------------------------------
#
"""Tests for BaseCheck class."""
import numpy as np
# pylint: disable-all
import pandas as pd
from hamcrest import *
from deepchecks import __version__
from deepchecks.core import BaseCheck, CheckResult, ConditionResult
from deepchecks.core.condition import ConditionCategory
from deepchecks.core.errors import DeepchecksValueError
from deepchecks.tabular import Context, TrainTestCheck
class DummyCheck(TrainTestCheck):
def __init__(self, param1=1, param2=2, n_samples=None):
super().__init__()
self.param1 = param1
self.param2 = param2
self.n_samples = n_samples
def run_logic(self, context):
return CheckResult(context)
def test_add_condition():
# Arrange & Act
check = DummyCheck().add_condition('condition A', lambda r: ConditionCategory.PASS)
# Assert
assert_that(check._conditions.values(), contains_exactly(
has_property('name', 'condition A')
))
def test_add_multiple_conditions():
# Arrange & Act
check = (DummyCheck().add_condition('condition A', lambda r: True)
.add_condition('condition B', lambda r: False)
.add_condition('condition C', lambda r: ConditionResult(ConditionCategory.PASS)))
# Assert
assert_that(check._conditions.values(), contains_exactly(
has_property('name', 'condition A'),
has_property('name', 'condition B'),
has_property('name', 'condition C')
))
def test_add_conditions_wrong_name():
# Arrange
check = DummyCheck()
# Act & Assert
assert_that(calling(check.add_condition).with_args(333, lambda r: True),
raises(DeepchecksValueError, 'Condition name must be of type str but got: int'))
def test_add_conditions_wrong_value():
# Arrange
check = DummyCheck()
# Act & Assert
assert_that(calling(check.add_condition).with_args('cond', 'string not function'),
raises(DeepchecksValueError, 'Condition must be a function'))
def test_clean_conditions():
# Arrange
check = DummyCheck().add_condition('a', lambda r: True).add_condition('b', lambda r: True)
# Act & Assert
assert_that(check, has_property('_conditions', has_length(2)))
check.clean_conditions()
assert_that(check, has_property('_conditions', has_length(0)))
def test_remove_condition():
# Arrange
check = (DummyCheck().add_condition('condition A', lambda r: True)
.add_condition('condition B', lambda r: False)
.add_condition('condition C', lambda r: ConditionResult(ConditionCategory.PASS)))
# Act & Assert
check.remove_condition(1)
assert_that(check._conditions.values(), has_items(
has_property('name', 'condition A'), has_property('name', 'condition C')
))
check.remove_condition(0)
assert_that(check._conditions.values(), has_items(has_property('name', 'condition C')))
def test_remove_condition_index_error():
# Arrange
check = DummyCheck().add_condition('a', lambda r: True).add_condition('b', lambda r: True)
# Act & Assert
assert_that(calling(check.remove_condition).with_args(7),
raises(DeepchecksValueError, 'Index 7 of conditions does not exists'))
def METHOD_NAME():
def raise_(ex): # just to test error in condition
raise ex
# Arrange
check = (DummyCheck().add_condition('condition A', lambda _: True)
.add_condition('condition B', lambda _: ConditionResult(ConditionCategory.FAIL, 'some result'))
.add_condition('condition C', lambda _: ConditionResult(ConditionCategory.WARN, 'my actual'))
.add_condition('condition F', lambda _: raise_(Exception('fail'))))
decisions = check.conditions_decision(CheckResult(1))
# Assert
assert_that(decisions, has_items(
all_of(
has_property('name', 'condition A'),
has_property('category', ConditionCategory.PASS),
has_property('details', '')
),
all_of(
has_property('name', 'condition B'),
has_property('category', ConditionCategory.FAIL),
has_property('details', 'some result')
),
all_of(
has_property('name', 'condition C'),
has_property('category', ConditionCategory.WARN),
has_property('details', 'my actual')
),
all_of(
has_property('name', 'condition F'),
has_property('category', ConditionCategory.ERROR),
has_property('details', 'Exception in condition: Exception: fail')
)
))
def test_params():
# Arrange
default_check = DummyCheck()
parameter_check = DummyCheck(param2=5)
all_param_check = DummyCheck(8, 9, 10)
# Assert
assert_that(default_check.params(), equal_to({}))
assert_that(parameter_check.params(), equal_to({'param2': 5}))
assert_that(all_param_check.params(), equal_to({'param1': 8, 'param2': 9, 'n_samples': 10}))
assert_that(default_check.params(show_defaults=True), equal_to({'param1': 1, 'param2': 2, 'n_samples': None}))
def test_config():
check = DummyCheck(param2=5).config()
assert_that(check, equal_to({
'module_name': f'{DummyCheck.__module__}',
'class_name': 'DummyCheck',
'version': __version__,
'params': {'param1': 1, 'param2': 5, 'n_samples': None},
}))
assert_that(BaseCheck.from_config(check), instance_of(DummyCheck))
def test_pass_feature_importance_incorrect(iris_split_dataset):
# Arrange
check = DummyCheck()
train, test = iris_split_dataset
# Act & Assert
assert_that(calling(check.run).with_args(train, test, feature_importance='wrong type'),
raises(DeepchecksValueError,
'feature_importance must be given as a pandas.Series where the index is feature names and the '
'value is the calculated importance'))
def test_pass_feature_importance_correct(iris_split_dataset):
# Arrange
check = DummyCheck()
train, test = iris_split_dataset
feature_importance = pd.Series(data=np.random.rand(len(train.features)), index=train.features)
# Act
result = check.run(train, test, feature_importance=feature_importance)
context: Context = result.value
# Assert
assert_that(context._calculated_importance, is_(True))
assert_that(context._feature_importance is not None)
|
3,353 |
test xpathbuilder with eval
|
"""
Tests of the XPathBuilder class.
"""
import pytest
from lxml import etree
TEST_INPXML_PATH = 'fleur/Max-R5/FePt_film_SSFT_LO/files/inp2.xml'
def test_xpathbuilder():
"""
Test the basic behaviour of the XPathBuilder class.
"""
from masci_tools.util.xml.xpathbuilder import XPathBuilder
simple_xpath = '/test/xpath/simple'
xpath = XPathBuilder(simple_xpath)
assert xpath.path == simple_xpath
xpath.add_filter('xpath', {'index': -2})
with pytest.raises(ValueError):
xpath.add_filter('not-existing', {'index': -3})
assert xpath.path == '/test/xpath[last() - $xpath_index]/simple'
assert str(xpath) == '/test/xpath[last() - 1]/simple'
assert xpath.path_variables == {'xpath_index': 1}
xpath = XPathBuilder(simple_xpath, strict=True)
with pytest.raises(ValueError):
str(xpath)
xpath = XPathBuilder(etree.XPath(simple_xpath), compile_path=True, smart_strings=True)
xpath.add_filter('xpath', {'index': -2})
assert isinstance(xpath.path, etree.XPath)
assert str(xpath) == '/test/xpath[last() - 1]/simple'
with pytest.raises(ValueError):
xpath = XPathBuilder(etree.XPath(simple_xpath), smart_strings=True)
@pytest.mark.parametrize('simple_xpath,filters,expected', [
('/fleurInput/atomGroups/atomGroup/@species', {
'atomGroup': {
'index': -1
}
}, 'Pt-1'),
('/fleurInput/atomGroups/atomGroup/@species', {
'atomGroup': {
'index': {
'==': -1
}
}
}, 'Pt-1'),
('/fleurInput/atomGroups/atomGroup/@species', {
'atomGroup': {
'index': {
'<': -1
}
}
}, 'Fe-1'),
('/fleurInput/atomGroups/atomGroup/@species', {
'atomGroup': {
'index': {
'>=': 2
}
}
}, 'Pt-1'),
('/fleurInput/atomSpecies/species/@name', {
'species': {
'/mtSphere/@radius': {
'>=': 2.0
}
}
}, ['Fe-1', 'Pt-1']),
('/fleurInput/atomSpecies/species/mtSphere/@radius', {
'species': {
'name': {
'==': 'Fe-1'
}
}
}, '2.20000000'),
('/fleurInput/atomSpecies/species/@name', {
'species': {
'/mtSphere/@radius': {
'==': '2.20000000'
}
}
}, ['Fe-1', 'Pt-1']),
('/fleurInput/atomSpecies/species/@name', {
'species': {
'has': './lo'
}
}, ['Fe-1', 'Pt-1']),
('/fleurInput/atomSpecies/species/@name', {
'species': {
'has-not': './ldaU'
}
}, ['Fe-1', 'Pt-1']),
('/fleurInput/atomGroups/atomGroup/@species', {
'atomGroup': {
'filmPos/@label': {
'contains': '22'
}
}
}, 'Fe-1'),
('/fleurInput/atomGroups/atomGroup/@species', {
'atomGroup': {
'filmPos/@label': {
'not-contains': '22'
}
}
}, 'Pt-1'),
('/fleurInput/atomGroups/atomGroup/@species', {
'atomGroup': {
'and': [{
'filmPos/@label': {
'not-contains': '22'
}
}, {
'force/@relaxXYZ': 'TTT'
}]
}
}, 'Pt-1'),
('/fleurInput/atomGroups/atomGroup/@species', {
'atomGroup': {
'or': [{
'filmPos/@label': {
'not-contains': '22'
}
}, {
'filmPos/@label': {
'contains': '22'
}
}]
}
}, ['Fe-1', 'Pt-1']),
('/fleurInput/atomSpecies/species/@name', {
'species': {
'./lo': {
'number-nodes': {
'>': 1
}
}
}
}, 'Fe-1'),
('/fleurInput/atomGroups/atomGroup/@species', {
'atomGroup': {
('relPos/@label', 'filmPos/@label'): {
'not-contains': '22'
}
}
}, 'Pt-1'),
('/fleurInput/atomSpecies/species/@name', {
'species': {
'name': {
'starts-with': 'P'
}
}
}, 'Pt-1'),
('/fleurInput/atomGroups/atomGroup/@species', {
'atomGroup': {
('relPos/@label', 'filmPos/@label'): {
'ends-with': '22'
}
}
}, 'Fe-1'),
('/fleurInput/atomSpecies/species/lo/@n', {
'lo': {
'l': {
'in': [0, 1]
}
}
}, ['3', '3', '5']),
('/fleurInput/atomSpecies/species/lo/@n', {
'lo': {
'l': {
'not-in': [0, 2]
}
}
}, ['3', '5']),
('/fleurInput/atomSpecies/species/electronConfig/coreConfig/text()', {
'coreConfig': '[Ne]'
}, '[Ne]'),
('/fleurInput/atomGroups/atomGroup/@species', {
'atomGroup': {
'species': {
'string-length': {
'>=': 3
}
}
}
}, ['Fe-1', 'Pt-1']),
])
def METHOD_NAME(load_inpxml, simple_xpath, filters, expected):
"""
Test the xpathbuilder with a variety of different Xpath expressions and filters
"""
from masci_tools.util.xml.xpathbuilder import XPathBuilder
from masci_tools.util.xml.common_functions import eval_xpath
xmltree, _ = load_inpxml(TEST_INPXML_PATH, absolute=False)
xpath = XPathBuilder(simple_xpath, filters=filters)
print(f'Complex XPath: {str(xpath)}')
res = eval_xpath(xmltree, xpath)
assert res == expected
assert str(xpath) != simple_xpath # make sure the path is not the same as the original
|
3,354 |
implements bool
|
# flake8: noqa
# This whole file is full of lint errors
import codecs
import sys
import operator
import functools
import warnings
try:
import builtins
except ImportError:
import __builtin__ as builtins
PY2 = sys.version_info[0] == 2
WIN = sys.platform.startswith('win')
_identity = lambda x: x
if PY2:
unichr = unichr
text_type = unicode
string_types = (str, unicode)
integer_types = (int, long)
iterkeys = lambda d, *args, **kwargs: d.iterkeys(*args, **kwargs)
itervalues = lambda d, *args, **kwargs: d.itervalues(*args, **kwargs)
iteritems = lambda d, *args, **kwargs: d.iteritems(*args, **kwargs)
iterlists = lambda d, *args, **kwargs: d.iterlists(*args, **kwargs)
iterlistvalues = lambda d, *args, **kwargs: d.iterlistvalues(*args, **kwargs)
int_to_byte = chr
iter_bytes = iter
exec('def reraise(tp, value, tb=None):\n raise tp, value, tb')
def fix_tuple_repr(obj):
def __repr__(self):
cls = self.__class__
return '%s(%s)' % (cls.__name__, ', '.join(
'%s=%r' % (field, self[index])
for index, field in enumerate(cls._fields)
))
obj.__repr__ = __repr__
return obj
def implements_iterator(cls):
cls.next = cls.__next__
del cls.__next__
return cls
def implements_to_string(cls):
cls.__unicode__ = cls.__str__
cls.__str__ = lambda x: x.__unicode__().encode('utf-8')
return cls
def native_string_result(func):
def wrapper(*args, **kwargs):
return func(*args, **kwargs).encode('utf-8')
return functools.update_wrapper(wrapper, func)
def METHOD_NAME(cls):
cls.__nonzero__ = cls.__bool__
del cls.__bool__
return cls
from itertools import imap, izip, ifilter
range_type = xrange
from StringIO import StringIO
from cStringIO import StringIO as BytesIO
NativeStringIO = BytesIO
def make_literal_wrapper(reference):
return _identity
def normalize_string_tuple(tup):
"""Normalizes a string tuple to a common type. Following Python 2
rules, upgrades to unicode are implicit.
"""
if any(isinstance(x, text_type) for x in tup):
return tuple(to_unicode(x) for x in tup)
return tup
def try_coerce_native(s):
"""Try to coerce a unicode string to native if possible. Otherwise,
leave it as unicode.
"""
try:
return to_native(s)
except UnicodeError:
return s
wsgi_get_bytes = _identity
def wsgi_decoding_dance(s, charset='utf-8', errors='replace'):
return s.decode(charset, errors)
def wsgi_encoding_dance(s, charset='utf-8', errors='replace'):
if isinstance(s, bytes):
return s
return s.encode(charset, errors)
def to_bytes(x, charset=sys.getdefaultencoding(), errors='strict'):
if x is None:
return None
if isinstance(x, (bytes, bytearray, buffer)):
return bytes(x)
if isinstance(x, unicode):
return x.encode(charset, errors)
raise TypeError('Expected bytes')
def to_native(x, charset=sys.getdefaultencoding(), errors='strict'):
if x is None or isinstance(x, str):
return x
return x.encode(charset, errors)
else:
unichr = chr
text_type = str
string_types = (str, )
integer_types = (int, )
iterkeys = lambda d, *args, **kwargs: iter(d.keys(*args, **kwargs))
itervalues = lambda d, *args, **kwargs: iter(d.values(*args, **kwargs))
iteritems = lambda d, *args, **kwargs: iter(d.items(*args, **kwargs))
iterlists = lambda d, *args, **kwargs: iter(d.lists(*args, **kwargs))
iterlistvalues = lambda d, *args, **kwargs: iter(d.listvalues(*args, **kwargs))
int_to_byte = operator.methodcaller('to_bytes', 1, 'big')
iter_bytes = functools.partial(map, int_to_byte)
def reraise(tp, value, tb=None):
if value.__traceback__ is not tb:
raise value.with_traceback(tb)
raise value
fix_tuple_repr = _identity
implements_iterator = _identity
implements_to_string = _identity
METHOD_NAME = _identity
native_string_result = _identity
imap = map
izip = zip
ifilter = filter
range_type = range
from io import StringIO, BytesIO
NativeStringIO = StringIO
_latin1_encode = operator.methodcaller('encode', 'latin1')
def make_literal_wrapper(reference):
if isinstance(reference, text_type):
return _identity
return _latin1_encode
def normalize_string_tuple(tup):
"""Ensures that all types in the tuple are either strings
or bytes.
"""
tupiter = iter(tup)
is_text = isinstance(next(tupiter, None), text_type)
for arg in tupiter:
if isinstance(arg, text_type) != is_text:
raise TypeError('Cannot mix str and bytes arguments (got %s)'
% repr(tup))
return tup
try_coerce_native = _identity
wsgi_get_bytes = _latin1_encode
def wsgi_decoding_dance(s, charset='utf-8', errors='replace'):
return s.encode('latin1').decode(charset, errors)
def wsgi_encoding_dance(s, charset='utf-8', errors='replace'):
if isinstance(s, text_type):
s = s.encode(charset)
return s.decode('latin1', errors)
def to_bytes(x, charset=sys.getdefaultencoding(), errors='strict'):
if x is None:
return None
if isinstance(x, (bytes, bytearray, memoryview)): # noqa
return bytes(x)
if isinstance(x, str):
return x.encode(charset, errors)
raise TypeError('Expected bytes')
def to_native(x, charset=sys.getdefaultencoding(), errors='strict'):
if x is None or isinstance(x, str):
return x
return x.decode(charset, errors)
def to_unicode(x, charset=sys.getdefaultencoding(), errors='strict',
allow_none_charset=False):
if x is None:
return None
if not isinstance(x, bytes):
return text_type(x)
if charset is None and allow_none_charset:
return x
return x.decode(charset, errors)
|
3,355 |
test parse timestamp with timezone invalid timezone
|
from __future__ import annotations
import string
import hypothesis as h
import hypothesis.strategies as st
import parsy
import pytest
import ibis.expr.datatypes as dt
import ibis.tests.strategies as its
from ibis.common.annotations import ValidationError
@pytest.mark.parametrize(
("spec", "expected"),
[
("boolean", dt.boolean),
("int8", dt.int8),
("int16", dt.int16),
("int32", dt.int32),
("int64", dt.int64),
("int", dt.int64),
("uint8", dt.uint8),
("uint16", dt.uint16),
("uint32", dt.uint32),
("uint64", dt.uint64),
("float16", dt.float16),
("float32", dt.float32),
("float64", dt.float64),
("float", dt.float64),
("string", dt.string),
("binary", dt.binary),
("date", dt.date),
("time", dt.time),
("timestamp", dt.timestamp),
("point", dt.point),
("linestring", dt.linestring),
("polygon", dt.polygon),
("multilinestring", dt.multilinestring),
("multipoint", dt.multipoint),
("multipolygon", dt.multipolygon),
],
)
def test_primitive_from_string(spec, expected):
assert dt.dtype(spec) == expected
@pytest.mark.parametrize(
("spec", "expected"),
[
["decimal", dt.Decimal(None, None)],
["decimal(10, 3)", dt.Decimal(10, 3)],
["bignumeric", dt.Decimal(76, 38)],
["bigdecimal", dt.Decimal(76, 38)],
["bignumeric(1, 1)", dt.Decimal(1, 1)],
["bigdecimal(1, 1)", dt.Decimal(1, 1)],
],
)
def test_parse_decimal(spec, expected):
assert dt.dtype(spec) == expected
@pytest.mark.parametrize(
"case",
[
"decimal(",
"decimal()",
"decimal(3)",
"decimal(,)",
"decimal(3,)",
"decimal(3,",
],
)
def test_parse_decimal_failure(case):
with pytest.raises(parsy.ParseError):
dt.dtype(case)
@pytest.mark.parametrize("spec", ["varchar", "varchar(10)", "char", "char(10)"])
def test_parse_char_varchar(spec):
assert dt.dtype(spec) == dt.string
@pytest.mark.parametrize(
"spec", ["varchar(", "varchar)", "varchar()", "char(", "char)", "char()"]
)
def test_parse_char_varchar_invalid(spec):
with pytest.raises(parsy.ParseError):
dt.dtype(spec)
def test_parse_array_token_error():
with pytest.raises(parsy.ParseError):
dt.dtype("array<string>>")
def test_parse_struct():
orders = """array<struct<
oid: int64,
status: string,
totalprice: decimal(12, 2),
order_date: string,
items: array<struct<
iid: int64,
name: string,
price: decimal(12, 2),
discount_perc: decimal(12, 2),
shipdate: string,
: bool
>>
>>"""
expected = dt.Array(
dt.Struct.from_tuples(
[
("oid", dt.int64),
("status", dt.string),
("totalprice", dt.Decimal(12, 2)),
("order_date", dt.string),
(
"items",
dt.Array(
dt.Struct.from_tuples(
[
("iid", dt.int64),
("name", dt.string),
("price", dt.Decimal(12, 2)),
("discount_perc", dt.Decimal(12, 2)),
("shipdate", dt.string),
("", dt.boolean),
]
)
),
),
]
)
)
assert dt.dtype(orders) == expected
def test_struct_with_string_types():
result = dt.Struct.from_tuples(
[
("a", "map<double, string>"),
("b", "array<map<string, array<int32>>>"),
("c", "array<string>"),
("d", "int8"),
]
)
assert result == dt.Struct.from_tuples(
[
("a", dt.Map(dt.double, dt.string)),
("b", dt.Array(dt.Map(dt.string, dt.Array(dt.int32)))),
("c", dt.Array(dt.string)),
("d", dt.int8),
]
)
def test_array_with_string_value_types():
assert dt.Array("int32") == dt.Array(dt.int32)
assert dt.Array(dt.Array("array<map<string, double>>")) == (
dt.Array(dt.Array(dt.Array(dt.Map(dt.string, dt.double))))
)
def test_map_with_string_value_types():
assert dt.Map("int32", "double") == dt.Map(dt.int32, dt.double)
assert dt.Map("int32", "array<double>") == dt.Map(dt.int32, dt.Array(dt.double))
def test_parse_empty_map_failure():
with pytest.raises(parsy.ParseError):
dt.dtype("map<>")
def test_parse_map_allow_non_primitive_keys():
assert dt.dtype("map<array<string>, double>") == dt.Map(
dt.Array(dt.string), dt.double
)
def test_parse_timestamp_with_timezone_single_quote():
t = dt.dtype("timestamp('US/Eastern')")
assert isinstance(t, dt.Timestamp)
assert t.timezone == "US/Eastern"
def test_parse_timestamp_with_timezone_double_quote():
t = dt.dtype("timestamp('US/Eastern')")
assert isinstance(t, dt.Timestamp)
assert t.timezone == "US/Eastern"
def METHOD_NAME():
ts = dt.dtype("timestamp('US/Ea')")
assert str(ts) == "timestamp('US/Ea')"
@pytest.mark.parametrize("scale", range(10))
@pytest.mark.parametrize("tz", ["UTC", "America/New_York"])
def test_parse_timestamp_with_scale(scale, tz):
expected = dt.Timestamp(timezone=tz, scale=scale)
typestring = f"timestamp({tz!r}, {scale:d})"
assert dt.parse(typestring) == expected
assert str(expected) == typestring
@pytest.mark.parametrize("scale", range(10))
def test_parse_timestamp_with_scale_no_tz(scale):
assert dt.parse(f"timestamp({scale:d})") == dt.Timestamp(scale=scale)
@pytest.mark.parametrize(
"unit",
[
"Y",
"Q",
"M",
"W",
"D", # date units
"h",
"m",
"s",
"ms",
"us",
"ns", # time units
],
)
def test_parse_interval(unit):
definition = f"interval('{unit}')"
assert dt.Interval(unit) == dt.dtype(definition)
@pytest.mark.parametrize("unit", ["X", "unsupported"])
def test_parse_interval_with_invalid_unit(unit):
definition = f"interval('{unit}')"
with pytest.raises(ValidationError):
dt.dtype(definition)
@pytest.mark.parametrize(
"case",
[
"timestamp(US/Ea)",
"timestamp('US/Eastern\")",
"timestamp(\"US/Eastern')",
"interval(Y)",
"interval('Y\")",
"interval(\"Y')",
],
)
def test_parse_temporal_with_invalid_string_argument(case):
with pytest.raises(parsy.ParseError):
dt.dtype(case)
def test_parse_time():
assert dt.dtype("time").equals(dt.time)
def test_parse_null():
assert dt.parse("null") == dt.null
# corresponds to its.all_dtypes() but without:
# - geospacial types, the string representation is different from what the parser expects
# - struct types, the generated struct field names contain special characters
field_names = st.text(
alphabet=st.characters(
whitelist_characters=string.ascii_letters + string.digits,
whitelist_categories=(),
)
)
roundtrippable_dtypes = st.deferred(
lambda: (
its.primitive_dtypes()
| its.string_like_dtypes()
| its.temporal_dtypes()
| its.interval_dtype()
| its.variadic_dtypes()
| its.struct_dtypes(names=field_names)
| its.array_dtypes(roundtrippable_dtypes)
| its.map_dtypes(roundtrippable_dtypes, roundtrippable_dtypes)
)
)
@h.given(roundtrippable_dtypes)
def test_parse_dtype_roundtrip(dtype):
assert dt.dtype(str(dtype)) == dtype
|
3,356 |
teardown
|
# -*- coding: utf-8 -*-
"""
jinja2.testsuite
~~~~~~~~~~~~~~~~
All the unittests of Jinja2. These tests can be executed by
either running run-tests.py using multiple Python versions at
the same time.
:copyright: (c) 2010 by the Jinja Team.
:license: BSD, see LICENSE for more details.
"""
import os
import re
import sys
import unittest
from traceback import format_exception
from jinja2 import loaders
from jinja2._compat import PY2
here = os.path.dirname(os.path.abspath(__file__))
dict_loader = loaders.DictLoader({
'justdict.html': 'FOO'
})
package_loader = loaders.PackageLoader('jinja2.testsuite.res', 'templates')
filesystem_loader = loaders.FileSystemLoader(here + '/res/templates')
function_loader = loaders.FunctionLoader({'justfunction.html': 'FOO'}.get)
choice_loader = loaders.ChoiceLoader([dict_loader, package_loader])
prefix_loader = loaders.PrefixLoader({
'a': filesystem_loader,
'b': dict_loader
})
class JinjaTestCase(unittest.TestCase):
### use only these methods for testing. If you need standard
### unittest method, wrap them!
def setup(self):
pass
def METHOD_NAME(self):
pass
def setUp(self):
self.setup()
def tearDown(self):
self.METHOD_NAME()
def assert_equal(self, a, b):
return self.assertEqual(a, b)
def assert_raises(self, *args, **kwargs):
return self.assertRaises(*args, **kwargs)
def assert_traceback_matches(self, callback, expected_tb):
try:
callback()
except Exception as e:
tb = format_exception(*sys.exc_info())
if re.search(expected_tb.strip(), ''.join(tb)) is None:
raise self.fail('Traceback did not match:\n\n%s\nexpected:\n%s'
% (''.join(tb), expected_tb))
else:
self.fail('Expected exception')
def find_all_tests(suite):
"""Yields all the tests and their names from a given suite."""
suites = [suite]
while suites:
s = suites.pop()
try:
suites.extend(s)
except TypeError:
yield s, '%s.%s.%s' % (
s.__class__.__module__,
s.__class__.__name__,
s._testMethodName
)
class BetterLoader(unittest.TestLoader):
"""A nicer loader that solves two problems. First of all we are setting
up tests from different sources and we're doing this programmatically
which breaks the default loading logic so this is required anyways.
Secondly this loader has a nicer interpolation for test names than the
default one so you can just do ``run-tests.py ViewTestCase`` and it
will work.
"""
def getRootSuite(self):
return suite()
def loadTestsFromName(self, name, module=None):
root = self.getRootSuite()
if name == 'suite':
return root
all_tests = []
for testcase, testname in find_all_tests(root):
if testname == name or \
testname.endswith('.' + name) or \
('.' + name + '.') in testname or \
testname.startswith(name + '.'):
all_tests.append(testcase)
if not all_tests:
raise LookupError('could not find test case for "%s"' % name)
if len(all_tests) == 1:
return all_tests[0]
rv = unittest.TestSuite()
for test in all_tests:
rv.addTest(test)
return rv
def suite():
from jinja2.testsuite import ext, filters, tests, core_tags, \
loader, inheritance, imports, lexnparse, security, api, \
regression, debug, utils, bytecode_cache, doctests
suite = unittest.TestSuite()
suite.addTest(ext.suite())
suite.addTest(filters.suite())
suite.addTest(tests.suite())
suite.addTest(core_tags.suite())
suite.addTest(loader.suite())
suite.addTest(inheritance.suite())
suite.addTest(imports.suite())
suite.addTest(lexnparse.suite())
suite.addTest(security.suite())
suite.addTest(api.suite())
suite.addTest(regression.suite())
suite.addTest(debug.suite())
suite.addTest(utils.suite())
suite.addTest(bytecode_cache.suite())
# doctests will not run on python 3 currently. Too many issues
# with that, do not test that on that platform.
if PY2:
suite.addTest(doctests.suite())
return suite
def main():
"""Runs the testsuite as command line application."""
try:
unittest.main(testLoader=BetterLoader(), defaultTest='suite')
except Exception as e:
print('Error: %s' % e)
|
3,357 |
run
|
from __future__ import annotations
import logging
import typing as t
from types import ModuleType
import cloudpickle
import bentoml
from ...exceptions import MissingDependencyException
from ...exceptions import NotFound
from ..models.model import Model
from ..models.model import ModelContext
from ..models.model import ModelOptions
from ..models.model import ModelSignature
from ..tag import Tag
from ..utils.pkg import get_pkg_version
from .common.pytorch import PyTorchTensorContainer # noqa # type: ignore
try:
import easyocr
except ImportError: # pragma: no cover
raise MissingDependencyException(
"'easyocr' is required in order to use module 'bentoml.easyocr'. Install easyocr with 'pip install easyocr'."
)
if t.TYPE_CHECKING:
from ..models.model import ModelSignaturesType
ListStr = list[str]
else:
ListStr = list
__all__ = ["load_model", "save_model", "get_runnable", "get"]
MODULE_NAME = "bentoml.easyocr"
API_VERSION = "v1"
MODEL_FILENAME = "saved_model.pkl"
logger = logging.getLogger(__name__)
def get(tag_like: str | Tag) -> Model:
"""
Get the BentoML model with the given tag.
Args:
tag_like: The tag of the model to retrieve from the model store.
Returns:
:obj:`~bentoml.Model`: A BentoML :obj:`~bentoml.Model` with the matching tag.
Example:
.. code-block:: python
import bentoml
# target model must be from the BentoML model store
model = bentoml.easyocr.get("en_reader:latest")
"""
model = bentoml.models.get(tag_like)
if model.info.module not in (MODULE_NAME, __name__):
raise NotFound(
f"Model {model.tag} was saved with module {model.info.module}, not loading with {MODULE_NAME}."
)
return model
def load_model(bento_model: str | Tag | Model) -> easyocr.Reader:
"""
Load the EasyOCR model from BentoML local model store with given name.
Args:
bento_model: Either the tag of the model to get from the store,
or a BentoML :class:`~bentoml.Model` instance to load the
model from.
Returns:
``easyocr.Reader``: The EasyOCR model from the model store.
Example:
.. code-block:: python
import bentoml
reader = bentoml.easyocr.load_model('en_reader:latest')
"""
if not isinstance(bento_model, Model):
bento_model = get(bento_model)
if bento_model.info.module not in (MODULE_NAME, __name__):
raise NotFound(
f"Model {bento_model.tag} was saved with module {bento_model.info.module}, not loading with {MODULE_NAME}."
)
with open(bento_model.path_of(MODEL_FILENAME), "rb") as f:
return cloudpickle.load(f)
def save_model(
name: Tag | str,
reader: easyocr.Reader,
*,
signatures: ModelSignaturesType | None = None,
labels: dict[str, str] | None = None,
custom_objects: dict[str, t.Any] | None = None,
external_modules: t.List[ModuleType] | None = None,
metadata: dict[str, t.Any] | None = None,
) -> bentoml.Model:
"""
Save a model instance to BentoML modelstore.
Args:
name: Name for given model instance. This should pass Python identifier check.
reader: The EasyOCR model to be saved. Currently only supports pre-trained models from easyocr.
Custom models are not yet supported.
signatures: Methods to expose for running inference on the target model. Signatures are used for creating :obj:`~bentoml.Runner` instances when serving model with :obj:`~bentoml.Service`
labels: User-defined labels for managing models, e.g. ``team=nlp``, ``stage=dev``.
custom_objects: Custom objects to be saved with the model. An example is ``{"my-normalizer": normalizer}``.
Custom objects are currently serialized with cloudpickle, but this implementation is subject to change.
external_modules: user-defined additional python modules to be saved alongside the model or custom objects,
e.g. a tokenizer module, preprocessor module, model configuration module
metadata: Custom metadata for given model.
Returns:
:obj:`~bentoml.Tag`: A :obj:`tag` with a format ``name:version`` where ``name`` is the user-defined model's name, and a generated ``version``.
Examples:
.. code-block:: python
import bentoml
import easyocr
reader = easyocr.Reader(['en'])
bento_model = bentoml.easyocr.save_model('en_reader', reader)
""" # noqa
context = ModelContext(
framework_name="easyocr",
framework_versions={"easyocr": get_pkg_version("easyocr")},
)
if signatures is None:
signatures = {
k: {"batchable": False}
for k in ("detect", "readtext", "readtextlang", "recognize")
}
signatures["readtext_batched"] = {"batchable": True}
logger.info(
'Using the default model signature for Transformers (%s) for model "%s".',
signatures,
name,
)
with bentoml.models.create(
name,
module=MODULE_NAME,
api_version=API_VERSION,
labels=labels,
context=context,
options=ModelOptions(),
signatures=signatures,
custom_objects=custom_objects,
external_modules=external_modules,
metadata=metadata,
) as bento_model:
with open(bento_model.path_of(MODEL_FILENAME), "wb") as f:
cloudpickle.dump(reader, f)
return bento_model
def get_runnable(bento_model: bentoml.Model) -> type[bentoml.Runnable]:
"""
Private API: use :obj:`~bentoml.Model.to_runnable` instead.
"""
class EasyOCRRunnable(bentoml.Runnable):
SUPPORTED_RESOURCES = ("nvidia.com/gpu", "cpu")
SUPPORTS_CPU_MULTI_THREADING = True
def __init__(self):
super().__init__()
self.model = load_model(bento_model)
self.predict_fns: dict[str, t.Callable[..., t.Any]] = {}
for method_name in bento_model.info.signatures:
self.predict_fns[method_name] = getattr(self.model, method_name)
def add_runnable_method(method_name: str, options: ModelSignature):
def METHOD_NAME(self: EasyOCRRunnable, *args: t.Any, **kwargs: t.Any) -> t.Any:
return self.predict_fns[method_name](*args, **kwargs)
EasyOCRRunnable.add_method(
METHOD_NAME,
name=method_name,
batchable=options.batchable,
batch_dim=options.batch_dim,
input_spec=options.input_spec,
output_spec=options.output_spec,
)
for method_name, options in bento_model.info.signatures.items():
add_runnable_method(method_name, options)
return EasyOCRRunnable
|
3,358 |
test service names import and v0
|
import pytest
@pytest.mark.subprocess(env=dict(DD_TRACE_SPAN_ATTRIBUTE_SCHEMA="v0"))
def test_service_names_import_default():
from ddtrace.internal.schema import DEFAULT_SPAN_SERVICE_NAME
from ddtrace.internal.schema import schematize_cache_operation
from ddtrace.internal.schema import schematize_cloud_api_operation
from ddtrace.internal.schema import schematize_database_operation
from ddtrace.internal.schema import schematize_service_name
from ddtrace.internal.schema import schematize_url_operation
from ddtrace.internal.schema.span_attribute_schema import cache_operation_v0
from ddtrace.internal.schema.span_attribute_schema import cloud_api_operation_v0
from ddtrace.internal.schema.span_attribute_schema import database_operation_v0
from ddtrace.internal.schema.span_attribute_schema import service_name_v0
from ddtrace.internal.schema.span_attribute_schema import url_operation_v0
assert DEFAULT_SPAN_SERVICE_NAME is None
assert schematize_service_name == service_name_v0
assert schematize_database_operation == database_operation_v0
assert schematize_cache_operation == cache_operation_v0
assert schematize_cloud_api_operation == cloud_api_operation_v0
assert schematize_url_operation == url_operation_v0
@pytest.mark.subprocess(env=dict(DD_TRACE_SPAN_ATTRIBUTE_SCHEMA="v0"))
def METHOD_NAME():
from ddtrace.internal.schema import DEFAULT_SPAN_SERVICE_NAME
from ddtrace.internal.schema import schematize_cache_operation
from ddtrace.internal.schema import schematize_cloud_api_operation
from ddtrace.internal.schema import schematize_database_operation
from ddtrace.internal.schema import schematize_service_name
from ddtrace.internal.schema import schematize_url_operation
from ddtrace.internal.schema.span_attribute_schema import cache_operation_v0
from ddtrace.internal.schema.span_attribute_schema import cloud_api_operation_v0
from ddtrace.internal.schema.span_attribute_schema import database_operation_v0
from ddtrace.internal.schema.span_attribute_schema import service_name_v0
from ddtrace.internal.schema.span_attribute_schema import url_operation_v0
assert DEFAULT_SPAN_SERVICE_NAME is None
assert schematize_service_name == service_name_v0
assert schematize_database_operation == database_operation_v0
assert schematize_cache_operation == cache_operation_v0
assert schematize_cloud_api_operation == cloud_api_operation_v0
assert schematize_url_operation == url_operation_v0
@pytest.mark.subprocess(
env=dict(DD_TRACE_SPAN_ATTRIBUTE_SCHEMA="v1"),
parametrize={"DD_TRACE_REMOVE_INTEGRATION_SERVICE_NAMES_ENABLED": ["False", "True"]},
)
def test_service_name_imports_v1():
from ddtrace.internal.schema import DEFAULT_SPAN_SERVICE_NAME
from ddtrace.internal.schema import schematize_cache_operation
from ddtrace.internal.schema import schematize_cloud_api_operation
from ddtrace.internal.schema import schematize_database_operation
from ddtrace.internal.schema import schematize_service_name
from ddtrace.internal.schema import schematize_url_operation
from ddtrace.internal.schema.span_attribute_schema import cache_operation_v1
from ddtrace.internal.schema.span_attribute_schema import cloud_api_operation_v1
from ddtrace.internal.schema.span_attribute_schema import database_operation_v1
from ddtrace.internal.schema.span_attribute_schema import service_name_v1
from ddtrace.internal.schema.span_attribute_schema import url_operation_v1
assert DEFAULT_SPAN_SERVICE_NAME == "unnamed-python-service"
assert schematize_service_name == service_name_v1
assert schematize_database_operation == database_operation_v1
assert schematize_cache_operation == cache_operation_v1
assert schematize_cloud_api_operation == cloud_api_operation_v1
assert schematize_url_operation == url_operation_v1
@pytest.mark.subprocess(
env=dict(DD_TRACE_SPAN_ATTRIBUTE_SCHEMA="v0", DD_TRACE_REMOVE_INTEGRATION_SERVICE_NAMES_ENABLED="True")
)
def test_service_name_import_with_client_service_names_enabled_v0():
"""
Service name parameters are flipped when DD_TRACE_REMOVE_INTEGRATION_SERVICE_NAMES_ENABLED is True for v0
"""
from ddtrace.internal.schema import DEFAULT_SPAN_SERVICE_NAME
from ddtrace.internal.schema import schematize_cache_operation
from ddtrace.internal.schema import schematize_cloud_api_operation
from ddtrace.internal.schema import schematize_database_operation
from ddtrace.internal.schema import schematize_service_name
from ddtrace.internal.schema import schematize_url_operation
from ddtrace.internal.schema.span_attribute_schema import cache_operation_v0
from ddtrace.internal.schema.span_attribute_schema import cloud_api_operation_v0
from ddtrace.internal.schema.span_attribute_schema import database_operation_v0
from ddtrace.internal.schema.span_attribute_schema import service_name_v1
from ddtrace.internal.schema.span_attribute_schema import url_operation_v0
assert DEFAULT_SPAN_SERVICE_NAME == "unnamed-python-service"
assert schematize_service_name == service_name_v1
assert schematize_database_operation == database_operation_v0
assert schematize_cache_operation == cache_operation_v0
assert schematize_cloud_api_operation == cloud_api_operation_v0
assert schematize_url_operation == url_operation_v0
|
3,359 |
reset
|
""" Create aircraft trails on the radar display."""
from math import *
import numpy as np
import bluesky as bs
from bluesky import settings
from bluesky.core import TrafficArrays
class Trails(TrafficArrays):
"""
Traffic trails class definition : Data for trails
Methods:
Trails() : constructor
Members: see create
Created by : Jacco M. Hoekstra
"""
def __init__(self,dttrail=10.):
super().__init__()
self.active = False # Wether or not to show trails
self.dt = dttrail # Resolution of trail pieces in time
self.pygame = (bs.gui == 'pygame') # Trails are different for pygame
self.tcol0 = 60. # After how many seconds old colour
# This list contains some standard colors
self.colorList = {'BLUE': np.array([0, 0, 255]),
'CYAN': np.array([0,255,255]),
'RED' : np.array([255, 0, 0]),
'YELLOW': np.array([255, 255, 0])}
# Set default color to Blue
self.defcolor = self.colorList['CYAN']
# Foreground data on line pieces
self.lat0 = np.array([])
self.lon0 = np.array([])
self.lat1 = np.array([])
self.lon1 = np.array([])
self.time = np.array([])
self.col = []
self.fcol = np.array([])
# background copy of data
self.bglat0 = np.array([])
self.bglon0 = np.array([])
self.bglat1 = np.array([])
self.bglon1 = np.array([])
self.bgtime = np.array([])
self.bgcol = []
with self.settrafarrays():
self.accolor = []
self.lastlat = np.array([])
self.lastlon = np.array([])
self.lasttim = np.array([])
self.clearnew()
return
def create(self,n=1):
super().create(n)
self.accolor[-1] = self.defcolor
self.lastlat[-1] = bs.traf.lat[-1]
self.lastlon[-1] = bs.traf.lon[-1]
def update(self):
self.acid = bs.traf.id
if not self.active:
self.lastlat = bs.traf.lat
self.lastlon = bs.traf.lon
self.lasttim[:] = bs.sim.simt
return
"""Add linepieces for trails based on traffic data"""
# Use temporary list/array for fast append
lstlat0 = []
lstlon0 = []
lstlat1 = []
lstlon1 = []
lsttime = []
# Check for update
delta = bs.sim.simt - self.lasttim
idxs = np.where(delta > self.dt)[0]
# Add all a/c which need the update
# if len(idxs)>0:
# print "len(idxs)=",len(idxs)
for i in idxs:
# Add to lists
lstlat0.append(self.lastlat[i])
lstlon0.append(self.lastlon[i])
lstlat1.append(bs.traf.lat[i])
lstlon1.append(bs.traf.lon[i])
lsttime.append(bs.sim.simt)
if isinstance(self.col, np.ndarray):
# print type(trailcol[i])
# print trailcol[i]
# print "col type: ",type(self.col)
self.col = self.col.tolist()
type(self.col)
self.col.append(self.accolor[i])
# Update aircraft record
self.lastlat[i] = bs.traf.lat[i]
self.lastlon[i] = bs.traf.lon[i]
self.lasttim[i] = bs.sim.simt
# When a/c is no longer part of trail semgment,
# it is no longer a/c data => move to the GUI buffer (send or draw)
if self.pygame:
# Pygame: send to drawing buffer
self.lat0 = np.concatenate((self.lat0, np.array(lstlat0)))
self.lon0 = np.concatenate((self.lon0, np.array(lstlon0)))
self.lat1 = np.concatenate((self.lat1, np.array(lstlat1)))
self.lon1 = np.concatenate((self.lon1, np.array(lstlon1)))
self.time = np.concatenate((self.time, np.array(lsttime)))
else:
# QtGL: add to send buffer
self.newlat0.extend(lstlat0)
self.newlon0.extend(lstlon0)
self.newlat1.extend(lstlat1)
self.newlon1.extend(lstlon1)
# Update colours
self.fcol = (1. - np.minimum(self.tcol0, np.abs(bs.sim.simt - self.time)) / self.tcol0)
return
def buffer(self):
"""Buffer trails: Move current stack to background """
self.bglat0 = np.append(self.bglat0, self.lat0)
self.bglon0 = np.append(self.bglon0, self.lon0)
self.bglat1 = np.append(self.bglat1, self.lat1)
self.bglon1 = np.append(self.bglon1, self.lon1)
self.bgtime = np.append(self.bgtime, self.time)
# No color saved: Background: always 'old color' self.col0
if isinstance(self.bgcol, np.ndarray):
self.bgcol = self.bgcol.tolist()
if isinstance(self.col, np.ndarray):
self.col = self.col.tolist()
self.bgcol = self.bgcol + self.col
self.bgacid = self.bgacid + self.acid
self.clearfg() # Clear foreground trails
return
def clearnew(self):
# Clear new lines pipeline used for QtGL
self.newlat0 = []
self.newlon0 = []
self.newlat1 = []
self.newlon1 = []
def clearfg(self): # Foreground
"""Clear trails foreground"""
self.lat0 = np.array([])
self.lon0 = np.array([])
self.lat1 = np.array([])
self.lon1 = np.array([])
self.time = np.array([])
self.col = np.array([])
return
def clearbg(self): # Background
"""Clear trails background"""
self.bglat0 = np.array([])
self.bglon0 = np.array([])
self.bglat1 = np.array([])
self.bglon1 = np.array([])
self.bgtime = np.array([])
self.bgacid = []
return
def clear(self):
"""Clear all data, Foreground and background"""
self.lastlon = np.array([])
self.lastlat = np.array([])
self.clearfg()
self.clearbg()
self.clearnew()
return
def setTrails(self, *args):
""" Set trails on/off, or change trail color of aircraft """
if len(args)==0:
msg = "TRAIL ON/OFF, [dt] / TRAIL acid color\n"
if self.active:
msg = msg + "TRAILS ARE ON"
else:
msg = msg + "TRAILS ARE OFF"
return True,msg
# Switch on/off
elif type(args[0]) == bool:
# Set trails on/off
self.active = args[0]
if len(args) > 1:
self.dt = args[1]
if not self.active:
self.clear()
# Change color per acid (pygame only)
else:
# Change trail color
if len(args) < 2 or args[1] not in ["BLUE", "RED", "YELLOW"]:
return False, "Set aircraft trail color with: TRAIL acid BLUE/RED/YELLOW"
self.changeTrailColor(args[1], args[0])
return True
def changeTrailColor(self, color, idx):
"""Change color of aircraft trail"""
self.accolor[idx] = self.colorList[color]
return
def METHOD_NAME(self):
# This ensures that the traffic arrays (which size is dynamic)
# are all reset as well, so all lat,lon,sdp etc but also objects adsb
super().METHOD_NAME()
self.clear()
self.active = False
|
3,360 |
delete
|
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from api.base_view import DetailView, GenericUIView, PaginatedView
from api.provider.serializers import (
DoProviderUpgradeSerializer,
ProviderDetailSerializer,
ProviderDetailUISerializer,
ProviderSerializer,
ProviderUISerializer,
)
from api.serializers import ProviderUpgradeSerializer
from api.utils import AdcmFilterBackend, AdcmOrderingFilter, check_obj, create
from audit.utils import audit
from cm.api import delete_host_provider
from cm.issue import update_hierarchy_issues
from cm.models import HostProvider, Upgrade
from cm.upgrade import get_upgrade
from guardian.mixins import PermissionListMixin
from rbac.viewsets import DjangoOnlyObjectPermissions
from rest_framework import permissions, status
from rest_framework.response import Response
from adcm.permissions import check_custom_perm, get_object_for_user
class ProviderList(PermissionListMixin, PaginatedView):
"""
get:
List all host providers
post:
Create new host provider
"""
queryset = HostProvider.objects.all()
serializer_class = ProviderSerializer
serializer_class_ui = ProviderUISerializer
serializer_class_post = ProviderDetailSerializer
filterset_fields = ("name", "prototype_id")
ordering_fields = ("id", "name", "state", "prototype__display_name", "prototype__version_order")
permission_required = ["cm.view_hostprovider"]
ordering = ["id"]
@audit
def post(self, request, *args, **kwargs): # pylint: disable=unused-argument
serializer = self.get_serializer(data=request.data)
return create(serializer)
class ProviderDetail(PermissionListMixin, DetailView):
"""
get:
Show host provider
"""
queryset = HostProvider.objects.all()
serializer_class = ProviderDetailSerializer
serializer_class_ui = ProviderDetailUISerializer
permission_classes = (DjangoOnlyObjectPermissions,)
permission_required = ["cm.view_hostprovider"]
lookup_field = "id"
lookup_url_kwarg = "provider_id"
error_code = "PROVIDER_NOT_FOUND"
ordering = ["id"]
@audit
def METHOD_NAME(self, request, *args, **kwargs): # pylint: disable=unused-argument
"""
Remove host provider
"""
provider = self.get_object()
delete_host_provider(provider)
return Response(status=status.HTTP_204_NO_CONTENT)
class ProviderUpgrade(GenericUIView):
queryset = Upgrade.objects.all()
serializer_class = ProviderUpgradeSerializer
permission_classes = (permissions.IsAuthenticated,)
filter_backends = (AdcmFilterBackend, AdcmOrderingFilter)
ordering = ["id"]
def get_ordering(self):
order = AdcmOrderingFilter()
return order.get_ordering(self.request, self.get_queryset(), self)
def get(self, request, *args, **kwargs): # pylint: disable=unused-argument
"""
List all available upgrades for specified host provider
"""
provider = get_object_for_user(request.user, "cm.view_hostprovider", HostProvider, id=kwargs["provider_id"])
check_custom_perm(request.user, "view_upgrade_of", "hostprovider", provider)
update_hierarchy_issues(provider)
obj = get_upgrade(provider, self.get_ordering())
serializer = self.serializer_class(obj, many=True, context={"provider_id": provider.id, "request": request})
return Response(serializer.data)
class ProviderUpgradeDetail(GenericUIView):
queryset = Upgrade.objects.all()
serializer_class = ProviderUpgradeSerializer
permission_classes = (permissions.IsAuthenticated,)
ordering = ["id"]
def get(self, request, *args, **kwargs): # pylint: disable=unused-argument
"""
List all available upgrades for specified host provider
"""
provider = get_object_for_user(request.user, "cm.view_hostprovider", HostProvider, id=kwargs["provider_id"])
check_custom_perm(request.user, "view_upgrade_of", "hostprovider", provider)
obj = check_obj(Upgrade, {"id": kwargs["upgrade_id"], "bundle__name": provider.prototype.bundle.name})
serializer = self.serializer_class(obj, context={"provider_id": provider.id, "request": request})
return Response(serializer.data)
class DoProviderUpgrade(GenericUIView):
queryset = Upgrade.objects.all()
serializer_class = DoProviderUpgradeSerializer
permission_classes = (permissions.IsAuthenticated,)
ordering = ["id"]
@audit
def post(self, request, *args, **kwargs): # pylint: disable=unused-argument
provider = get_object_for_user(request.user, "cm.view_hostprovider", HostProvider, id=kwargs["provider_id"])
check_custom_perm(request.user, "do_upgrade_of", "hostprovider", provider)
serializer = self.get_serializer(data=request.data)
return create(serializer, upgrade_id=int(kwargs["upgrade_id"]), obj=provider)
|
3,361 |
get unicode value
|
"""
@package dbmgr.vinfo
@brief Support classes for Database Manager
List of classes:
- vinfo::VectorDBInfo
(C) 2007-2013 by the GRASS Development Team
This program is free software under the GNU General Public License
(>=v2). Read the file COPYING that comes with GRASS for details.
@author Martin Landa <landa.martin gmail.com>
"""
import os
import wx
from gui_core.gselect import VectorDBInfo as VectorDBInfoBase
from gui_core.wrap import StaticText
from core.gcmd import RunCommand, GError
from core.settings import UserSettings
import grass.script as grass
def METHOD_NAME(value):
"""Get unicode value
:param value: value to be recoded
:return: unicode value
"""
if isinstance(value, str):
return value
if isinstance(value, bytes):
enc = GetDbEncoding()
return str(value, enc, errors="replace")
else:
return str(value)
def GetDbEncoding():
"""Checks if user set DB encoding (first user settings,
then env variable), if not assumes unicode."""
enc = UserSettings.Get(group="atm", key="encoding", subkey="value")
if not enc and "GRASS_DB_ENCODING" in os.environ:
enc = os.environ["GRASS_DB_ENCODING"]
else:
enc = "utf-8" # assuming UTF-8
return enc
def CreateDbInfoDesc(panel, mapDBInfo, layer):
"""Create database connection information content"""
infoFlexSizer = wx.FlexGridSizer(cols=2, hgap=1, vgap=1)
infoFlexSizer.AddGrowableCol(1)
infoFlexSizer.Add(StaticText(parent=panel, id=wx.ID_ANY, label="Driver:"))
infoFlexSizer.Add(
StaticText(parent=panel, id=wx.ID_ANY, label=mapDBInfo.layers[layer]["driver"])
)
infoFlexSizer.Add(StaticText(parent=panel, id=wx.ID_ANY, label="Database:"))
infoFlexSizer.Add(
StaticText(
parent=panel, id=wx.ID_ANY, label=mapDBInfo.layers[layer]["database"]
)
)
infoFlexSizer.Add(StaticText(parent=panel, id=wx.ID_ANY, label="Table:"))
infoFlexSizer.Add(
StaticText(parent=panel, id=wx.ID_ANY, label=mapDBInfo.layers[layer]["table"])
)
infoFlexSizer.Add(StaticText(parent=panel, id=wx.ID_ANY, label="Key:"))
infoFlexSizer.Add(
StaticText(parent=panel, id=wx.ID_ANY, label=mapDBInfo.layers[layer]["key"])
)
return infoFlexSizer
class VectorDBInfo(VectorDBInfoBase):
"""Class providing information about attribute tables
linked to the vector map"""
def __init__(self, map):
VectorDBInfoBase.__init__(self, map)
def GetColumns(self, table):
"""Return list of columns names (based on their index)"""
try:
names = [""] * len(self.tables[table].keys())
except KeyError:
return []
for name, desc in self.tables[table].items():
names[desc["index"]] = name
return names
def SelectByPoint(self, queryCoords, qdist):
"""Get attributes by coordinates (all available layers)
Return line id or None if no line is found"""
line = None
nselected = 0
try:
data = grass.vector_what(
map=self.map,
coord=(float(queryCoords[0]), float(queryCoords[1])),
distance=float(qdist),
)
except grass.ScriptError:
GError(
parent=None,
message=_(
"Failed to query vector map <{map}>. "
"Check database settings and topology."
).format(map=self.map),
)
if len(data) < 1 or all(("Table" not in record) for record in data):
return None
# process attributes
ret = dict()
for key in ["Category", "Layer", "Table", "Id"]:
ret[key] = list()
for record in data:
if "Table" not in record:
continue
table = record["Table"]
for key, value in record["Attributes"].items():
if len(value) < 1:
value = None
else:
if self.tables[table][key]["ctype"] != str:
value = self.tables[table][key]["ctype"](value)
else:
value = METHOD_NAME(value)
self.tables[table][key]["values"].append(value)
for key, value in record.items():
if key == "Attributes":
continue
if key in ret:
ret[key].append(value)
if "Id" not in record.keys():
ret["Id"].append(None)
return ret
def SelectFromTable(self, layer, cols="*", where=None):
"""Select records from the table
Return number of selected records, -1 on error
"""
if layer <= 0:
return -1
nselected = 0
table = self.layers[layer]["table"] # get table desc
# select values (only one record)
if where is None or where == "":
sql = "SELECT %s FROM %s" % (cols, table)
else:
sql = "SELECT %s FROM %s WHERE %s" % (cols, table, where)
ret = RunCommand(
"db.select",
read=True,
quiet=True,
flags="v",
sql=sql,
database=self.layers[layer]["database"],
driver=self.layers[layer]["driver"],
)
# self.tables[table][key][1] = str(cat)
if ret:
for line in ret.splitlines():
name, value = line.split("|")
# casting ...
if value:
if not isinstance("", self.tables[table][name]["ctype"]):
value = self.tables[table][name]["ctype"](value)
else:
value = METHOD_NAME(value)
else:
value = None
self.tables[table][name]["values"].append(value)
nselected = 1
return nselected
|
3,362 |
test decimated offset 105
|
"""
Name: decimation_test
Purpose: v.in.lidar decimation test
Author: Vaclav Petras
Copyright: (C) 2015 by Vaclav Petras and the GRASS Development Team
Licence: This program is free software under the GNU General Public
License (>=v2). Read the file COPYING that comes with GRASS
for details.
"""
import os
from grass.gunittest.case import TestCase
from grass.gunittest.main import test
class TestCountBasedDecimation(TestCase):
"""Test case for watershed module
This tests expects v.random and v.out.lidar to work properly.
"""
# Setup variables to be used for outputs
vector_points = "vinlidar_decimation_original"
imported_points = "vinlidar_decimation_imported"
las_file = "vinlidar_decimation_points.las"
npoints = 300 # the values works well for 300 without rounding
@classmethod
def setUpClass(cls):
"""Ensures expected computational region and generated data"""
cls.use_temp_region()
cls.runModule("g.region", n=20, s=10, e=25, w=15, res=1)
cls.runModule(
"v.random",
flags="zb",
output=cls.vector_points,
npoints=cls.npoints,
zmin=200,
zmax=500,
seed=100,
)
cls.runModule("v.out.lidar", input=cls.vector_points, output=cls.las_file)
@classmethod
def tearDownClass(cls):
"""Remove the temporary region and generated data"""
cls.runModule("g.remove", flags="f", type="vector", name=cls.vector_points)
if os.path.isfile(cls.las_file):
os.remove(cls.las_file)
cls.del_temp_region()
def tearDown(self):
"""Remove the outputs created by the import
This is executed after each test run.
"""
self.runModule("g.remove", flags="f", type="vector", name=self.imported_points)
def test_identical(self):
"""Test to see if the standard outputs are created"""
self.assertModule(
"v.in.lidar", input=self.las_file, output=self.imported_points, flags="bt"
)
self.assertVectorExists(self.imported_points)
self.assertVectorFitsTopoInfo(
vector=self.imported_points, reference=dict(points=self.npoints)
)
def skip_number(self, number, expect):
"""Test to see if the outputs are created"""
self.assertModule(
"v.in.lidar",
input=self.las_file,
output=self.imported_points,
flags="bt",
skip=number,
)
self.assertVectorExists(self.imported_points)
self.assertVectorFitsTopoInfo(
vector=self.imported_points, reference=dict(points=expect)
)
def preserve_number(self, number, expect):
"""Test to see if the outputs are created"""
self.assertModule(
"v.in.lidar",
input=self.las_file,
output=self.imported_points,
flags="bt",
preserve=number,
)
self.assertVectorExists(self.imported_points)
self.assertVectorFitsTopoInfo(
vector=self.imported_points, reference=dict(points=expect)
)
def offset_number(self, number, expect):
"""Test to see if the outputs are created"""
self.assertModule(
"v.in.lidar",
input=self.las_file,
output=self.imported_points,
flags="bt",
offset=number,
)
self.assertVectorExists(self.imported_points)
self.assertVectorFitsTopoInfo(
vector=self.imported_points, reference=dict(points=expect)
)
def limit_number(self, number, expect):
"""Test to see if the outputs are created"""
self.assertModule(
"v.in.lidar",
input=self.las_file,
output=self.imported_points,
flags="bt",
limit=number,
)
self.assertVectorExists(self.imported_points)
self.assertVectorFitsTopoInfo(
vector=self.imported_points, reference=dict(points=expect)
)
def test_decimated_skip_2(self):
"""Test to see if the outputs are created"""
self.skip_number(number=2, expect=self.npoints / 2)
def test_decimated_skip_4(self):
"""Test to see if the outputs are created"""
self.skip_number(number=4, expect=0.75 * self.npoints)
def test_decimated_skip_10(self):
"""Test to see if the outputs are created"""
self.skip_number(number=10, expect=0.9 * self.npoints)
def test_decimated_preserve_2(self):
"""Test to see if the outputs are created"""
self.preserve_number(number=2, expect=self.npoints / 2)
def test_decimated_preserve_10(self):
"""Test to see if the outputs are created"""
self.preserve_number(number=10, expect=self.npoints / 10)
def METHOD_NAME(self):
"""Test to see if the outputs are created"""
self.offset_number(number=105, expect=self.npoints - 105)
def test_decimated_limit_105(self):
"""Test to see if the outputs are created"""
self.limit_number(number=105, expect=105)
def test_offset_preserve(self):
"""Test to see if the outputs are created"""
self.assertModule(
"v.in.lidar",
input=self.las_file,
output=self.imported_points,
flags="bt",
offset=105,
preserve=10,
)
self.assertVectorExists(self.imported_points)
self.assertVectorFitsTopoInfo(
vector=self.imported_points,
reference=dict(points=int((self.npoints - 105) / 10)),
)
def test_limit_skip(self):
"""Test to see if the outputs are created"""
self.assertModule(
"v.in.lidar",
input=self.las_file,
output=self.imported_points,
flags="bt",
limit=105,
skip=10,
)
self.assertVectorExists(self.imported_points)
self.assertVectorFitsTopoInfo(
vector=self.imported_points, reference=dict(points=105)
)
def test_offset_limit_skip(self):
"""Test to see if the outputs are created"""
self.assertModule(
"v.in.lidar",
input=self.las_file,
output=self.imported_points,
flags="bt",
offset=50,
skip=5,
limit=self.npoints - 1,
)
self.assertVectorExists(self.imported_points)
self.assertVectorFitsTopoInfo(
vector=self.imported_points,
reference=dict(points=0.8 * (self.npoints - 50)),
)
if __name__ == "__main__":
test()
|
3,363 |
test prediction labels confidence
|
# -*- coding: utf-8 -*-
from __future__ import division
from __future__ import print_function
import os
import sys
import unittest
# noinspection PyProtectedMember
from numpy.testing import assert_equal
from numpy.testing import assert_raises
from sklearn.base import clone
from sklearn.metrics import roc_auc_score
# temporary solution for relative imports in case pyod is not installed
# if pyod is installed, no need to use the following line
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
from pyod.models.rgraph import RGraph
from pyod.utils.data import generate_data
class TestRGraph(unittest.TestCase):
def setUp(self):
self.n_train = 100
self.n_test = 100
self.n_features = 80
self.contamination = 0.1
self.roc_floor = 0.8
# Generate sample data
self.X_train, self.X_test, self.y_train, self.y_test = generate_data(
n_train=self.n_train, n_test=self.n_test,
n_features=self.n_features, contamination=self.contamination,
random_state=42)
self.clf = RGraph(n_nonzero=100, transition_steps=20, gamma=50, blocksize_test_data=20,
tau=1, preprocessing=True, active_support=False, gamma_nz=False,
maxiter_lasso=100, contamination=self.contamination,
algorithm='lasso_lars', verbose=0)
self.clf.fit(self.X_train)
def test_parameters(self):
assert (hasattr(self.clf, 'decision_scores_') and
self.clf.decision_scores_ is not None)
assert (hasattr(self.clf, 'labels_') and
self.clf.labels_ is not None)
assert (hasattr(self.clf, 'threshold_') and
self.clf.threshold_ is not None)
assert (hasattr(self.clf, '_mu') and
self.clf._mu is not None)
assert (hasattr(self.clf, '_sigma') and
self.clf._sigma is not None)
assert (hasattr(self.clf, 'transition_matrix_') and
self.clf.transition_matrix_ is not None)
def test_train_scores(self):
assert_equal(len(self.clf.decision_scores_), self.X_train.shape[0])
def test_prediction_scores(self):
pred_scores = self.clf.decision_function(self.X_test)
# check score shapes
assert_equal(pred_scores.shape[0], self.X_test.shape[0])
# check performance
assert (roc_auc_score(self.y_test, pred_scores) >= self.roc_floor)
def test_prediction_labels(self):
pred_labels = self.clf.predict(self.X_test)
assert_equal(pred_labels.shape, self.y_test.shape)
def test_prediction_proba(self):
pred_proba = self.clf.predict_proba(self.X_test)
assert (pred_proba.min() >= 0)
assert (pred_proba.max() <= 1)
def test_prediction_proba_linear(self):
pred_proba = self.clf.predict_proba(self.X_test, method='linear')
assert (pred_proba.min() >= 0)
assert (pred_proba.max() <= 1)
def test_prediction_proba_unify(self):
pred_proba = self.clf.predict_proba(self.X_test, method='unify')
assert (pred_proba.min() >= 0)
assert (pred_proba.max() <= 1)
def test_prediction_proba_parameter(self):
with assert_raises(ValueError):
self.clf.predict_proba(self.X_test, method='something')
def METHOD_NAME(self):
pred_labels, confidence = self.clf.predict(self.X_test,
return_confidence=True)
assert_equal(pred_labels.shape, self.y_test.shape)
assert_equal(confidence.shape, self.y_test.shape)
assert (confidence.min() >= 0)
assert (confidence.max() <= 1)
def test_prediction_proba_linear_confidence(self):
pred_proba, confidence = self.clf.predict_proba(self.X_test,
method='linear',
return_confidence=True)
assert (pred_proba.min() >= 0)
assert (pred_proba.max() <= 1)
assert_equal(confidence.shape, self.y_test.shape)
assert (confidence.min() >= 0)
assert (confidence.max() <= 1)
def test_fit_predict(self):
pred_labels = self.clf.fit_predict(self.X_train)
assert_equal(pred_labels.shape, self.y_train.shape)
def test_fit_predict_score(self):
self.clf.fit_predict_score(self.X_test, self.y_test)
self.clf.fit_predict_score(self.X_test, self.y_test,
scoring='roc_auc_score')
self.clf.fit_predict_score(self.X_test, self.y_test,
scoring='prc_n_score')
with assert_raises(NotImplementedError):
self.clf.fit_predict_score(self.X_test, self.y_test,
scoring='something')
def test_model_clone(self):
# for deep models this may not apply
clone_clf = clone(self.clf)
def tearDown(self):
pass
if __name__ == '__main__':
unittest.main()
|
3,364 |
test periodic value repr
|
# Copyright 2018 The Cirq Developers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
import sympy
import cirq
def test_periodic_value_equality():
eq = cirq.testing.EqualsTester()
eq.add_equality_group(
cirq.PeriodicValue(1, 2),
cirq.PeriodicValue(1, 2),
cirq.PeriodicValue(3, 2),
cirq.PeriodicValue(3, 2),
cirq.PeriodicValue(5, 2),
cirq.PeriodicValue(-1, 2),
)
eq.add_equality_group(cirq.PeriodicValue(1.5, 2.0), cirq.PeriodicValue(1.5, 2.0))
eq.add_equality_group(cirq.PeriodicValue(0, 2))
eq.add_equality_group(cirq.PeriodicValue(1, 3))
eq.add_equality_group(cirq.PeriodicValue(2, 4))
def test_periodic_value_approx_eq_basic():
assert cirq.approx_eq(cirq.PeriodicValue(1.0, 2.0), cirq.PeriodicValue(1.0, 2.0), atol=0.1)
assert cirq.approx_eq(cirq.PeriodicValue(1.0, 2.0), cirq.PeriodicValue(1.2, 2.0), atol=0.3)
assert not cirq.approx_eq(cirq.PeriodicValue(1.0, 2.0), cirq.PeriodicValue(1.2, 2.0), atol=0.1)
assert not cirq.approx_eq(cirq.PeriodicValue(1.0, 2.0), cirq.PeriodicValue(1.0, 2.2), atol=0.3)
assert not cirq.approx_eq(cirq.PeriodicValue(1.0, 2.0), cirq.PeriodicValue(1.0, 2.2), atol=0.1)
assert not cirq.approx_eq(cirq.PeriodicValue(1.0, 2.0), cirq.PeriodicValue(1.2, 2.2), atol=0.3)
assert not cirq.approx_eq(cirq.PeriodicValue(1.0, 2.0), cirq.PeriodicValue(1.2, 2.2), atol=0.1)
def test_periodic_value_approx_eq_normalized():
assert cirq.approx_eq(cirq.PeriodicValue(1.0, 3.0), cirq.PeriodicValue(4.1, 3.0), atol=0.2)
assert cirq.approx_eq(cirq.PeriodicValue(1.0, 3.0), cirq.PeriodicValue(-2.1, 3.0), atol=0.2)
def test_periodic_value_approx_eq_boundary():
assert cirq.approx_eq(cirq.PeriodicValue(0.0, 2.0), cirq.PeriodicValue(1.9, 2.0), atol=0.2)
assert cirq.approx_eq(cirq.PeriodicValue(0.1, 2.0), cirq.PeriodicValue(1.9, 2.0), atol=0.3)
assert cirq.approx_eq(cirq.PeriodicValue(1.9, 2.0), cirq.PeriodicValue(0.1, 2.0), atol=0.3)
assert not cirq.approx_eq(cirq.PeriodicValue(0.1, 2.0), cirq.PeriodicValue(1.9, 2.0), atol=0.1)
assert cirq.approx_eq(cirq.PeriodicValue(0, 1.0), cirq.PeriodicValue(0.5, 1.0), atol=0.6)
assert not cirq.approx_eq(cirq.PeriodicValue(0, 1.0), cirq.PeriodicValue(0.5, 1.0), atol=0.1)
assert cirq.approx_eq(cirq.PeriodicValue(0.4, 1.0), cirq.PeriodicValue(0.6, 1.0), atol=0.3)
def test_periodic_value_types_mismatch():
assert not cirq.approx_eq(cirq.PeriodicValue(0.0, 2.0), 0.0, atol=0.2)
assert not cirq.approx_eq(0.0, cirq.PeriodicValue(0.0, 2.0), atol=0.2)
@pytest.mark.parametrize(
'value, is_parameterized, parameter_names',
[
(cirq.PeriodicValue(1.0, 3.0), False, set()),
(cirq.PeriodicValue(0.0, sympy.Symbol('p')), True, {'p'}),
(cirq.PeriodicValue(sympy.Symbol('v'), 3.0), True, {'v'}),
(cirq.PeriodicValue(sympy.Symbol('v'), sympy.Symbol('p')), True, {'p', 'v'}),
],
)
@pytest.mark.parametrize('resolve_fn', [cirq.resolve_parameters, cirq.resolve_parameters_once])
def test_periodic_value_is_parameterized(value, is_parameterized, parameter_names, resolve_fn):
assert cirq.is_parameterized(value) == is_parameterized
assert cirq.parameter_names(value) == parameter_names
resolved = resolve_fn(value, {p: 1 for p in parameter_names})
assert not cirq.is_parameterized(resolved)
@pytest.mark.parametrize(
'val',
[
cirq.PeriodicValue(0.4, 1.0),
cirq.PeriodicValue(0.0, 2.0),
cirq.PeriodicValue(1.0, 3),
cirq.PeriodicValue(-2.1, 3.0),
cirq.PeriodicValue(sympy.Symbol('v'), sympy.Symbol('p')),
cirq.PeriodicValue(2.0, sympy.Symbol('p')),
cirq.PeriodicValue(sympy.Symbol('v'), 3),
],
)
def METHOD_NAME(val):
cirq.testing.assert_equivalent_repr(val)
|
3,365 |
wrap
|
from types import CodeType
from types import FunctionType
from typing import Any
from typing import Callable
from typing import Dict
from typing import List
from typing import Optional
from typing import Set
from typing import cast
from ddtrace.debugging._function.discovery import FullyNamed
from ddtrace.internal.injection import HookInfoType
from ddtrace.internal.injection import HookType
from ddtrace.internal.injection import eject_hooks
from ddtrace.internal.injection import inject_hooks
from ddtrace.internal.wrapping import WrappedFunction
from ddtrace.internal.wrapping import Wrapper
from ddtrace.internal.wrapping import unwrap
from ddtrace.internal.wrapping import METHOD_NAME
WrapperType = Callable[[FunctionType, Any, Any, Any], Any]
class FullyNamedWrappedFunction(FullyNamed, WrappedFunction):
"""A fully named wrapper function."""
class FunctionStore(object):
"""Function object store.
This class provides a storage layer for patching operations, which allows us
to store the original code object of functions being patched with either
hook injections or wrapping. This also enforce a single wrapping layer.
Multiple wrapping is implemented as a list of wrappers handled by the single
wrapper function.
If extra attributes are defined during the patching process, they will get
removed when the functions are restored.
"""
def __init__(self, extra_attrs=None):
# type: (Optional[List[str]]) -> None
self._code_map = {} # type: Dict[FunctionType, CodeType]
self._wrapper_map = {} # type: Dict[FunctionType, Wrapper]
self._extra_attrs = ["__dd_wrapped__"]
if extra_attrs:
self._extra_attrs.extend(extra_attrs)
def __enter__(self):
return self
def __exit__(self, *exc):
self.restore_all()
def _store(self, function):
# type: (FunctionType) -> None
if function not in self._code_map:
self._code_map[function] = function.__code__
def inject_hooks(self, function, hooks):
# type: (FullyNamedWrappedFunction, List[HookInfoType]) -> Set[str]
"""Bulk-inject hooks into a function.
Returns the set of probe IDs for those probes that failed to inject.
"""
try:
return self.inject_hooks(cast(FullyNamedWrappedFunction, function.__dd_wrapped__), hooks)
except AttributeError:
f = cast(FunctionType, function)
self._store(f)
return {p.probe_id for _, _, p in inject_hooks(f, hooks)}
def eject_hooks(self, function, hooks):
# type: (FunctionType, List[HookInfoType]) -> Set[str]
"""Bulk-eject hooks from a function.
Returns the set of probe IDs for those probes that failed to eject.
"""
try:
wrapped = cast(FullyNamedWrappedFunction, function).__dd_wrapped__
except AttributeError:
# Not a wrapped function so we can actually eject from it
return {p.probe_id for _, _, p in eject_hooks(function, hooks)}
else:
# Try on the wrapped function.
return self.eject_hooks(cast(FunctionType, wrapped), hooks)
def inject_hook(self, function, hook, line, arg):
# type: (FullyNamedWrappedFunction, HookType, int, Any) -> bool
"""Inject a hook into a function."""
return not not self.inject_hooks(function, [(hook, line, arg)])
def eject_hook(self, function, hook, line, arg):
# type: (FunctionType, HookType, int, Any) -> bool
"""Eject a hook from a function."""
return not not self.eject_hooks(function, [(hook, line, arg)])
def METHOD_NAME(self, function, wrapper):
# type: (FunctionType, Wrapper) -> None
"""Wrap a function with a hook."""
self._store(function)
self._wrapper_map[function] = wrapper
METHOD_NAME(function, wrapper)
def unwrap(self, function):
# type: (FullyNamedWrappedFunction) -> None
"""Unwrap a hook around a wrapped function."""
unwrap(function, self._wrapper_map.pop(cast(FunctionType, function)))
def restore_all(self):
# type: () -> None
"""Restore all the patched functions to their original form."""
for function, code in self._code_map.items():
function.__code__ = code
for attr in self._extra_attrs:
try:
delattr(function, attr)
except AttributeError:
pass
|
3,366 |
private link service connection state
|
# coding=utf-8
# *** WARNING: this file was generated by pulumi. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import copy
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
from . import outputs
__all__ = [
'GetPrivateEndpointConnectionResult',
'AwaitableGetPrivateEndpointConnectionResult',
'get_private_endpoint_connection',
'get_private_endpoint_connection_output',
]
@pulumi.output_type
class GetPrivateEndpointConnectionResult:
"""
A private endpoint connection
"""
def __init__(__self__, group_ids=None, id=None, name=None, private_endpoint=None, METHOD_NAME=None, provisioning_state=None, type=None):
if group_ids and not isinstance(group_ids, list):
raise TypeError("Expected argument 'group_ids' to be a list")
pulumi.set(__self__, "group_ids", group_ids)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if private_endpoint and not isinstance(private_endpoint, dict):
raise TypeError("Expected argument 'private_endpoint' to be a dict")
pulumi.set(__self__, "private_endpoint", private_endpoint)
if METHOD_NAME and not isinstance(METHOD_NAME, dict):
raise TypeError("Expected argument 'private_link_service_connection_state' to be a dict")
pulumi.set(__self__, "private_link_service_connection_state", METHOD_NAME)
if provisioning_state and not isinstance(provisioning_state, str):
raise TypeError("Expected argument 'provisioning_state' to be a str")
pulumi.set(__self__, "provisioning_state", provisioning_state)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
@property
@pulumi.getter(name="groupIds")
def group_ids(self) -> Sequence[str]:
"""
Group IDs.
"""
return pulumi.get(self, "group_ids")
@property
@pulumi.getter
def id(self) -> str:
"""
Resource ID.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def name(self) -> str:
"""
Resource name.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="privateEndpoint")
def private_endpoint(self) -> Optional['outputs.PrivateEndpointPropertyResponse']:
"""
Private endpoint which the connection belongs to.
"""
return pulumi.get(self, "private_endpoint")
@property
@pulumi.getter(name="privateLinkServiceConnectionState")
def METHOD_NAME(self) -> Optional['outputs.PrivateLinkServiceConnectionStatePropertyResponse']:
"""
Connection state of the private endpoint connection.
"""
return pulumi.get(self, "private_link_service_connection_state")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> str:
"""
State of the private endpoint connection.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter
def type(self) -> str:
"""
Resource type.
"""
return pulumi.get(self, "type")
class AwaitableGetPrivateEndpointConnectionResult(GetPrivateEndpointConnectionResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetPrivateEndpointConnectionResult(
group_ids=self.group_ids,
id=self.id,
name=self.name,
private_endpoint=self.private_endpoint,
METHOD_NAME=self.METHOD_NAME,
provisioning_state=self.provisioning_state,
type=self.type)
def get_private_endpoint_connection(private_endpoint_connection_name: Optional[str] = None,
resource_group_name: Optional[str] = None,
server_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetPrivateEndpointConnectionResult:
"""
Gets a private endpoint connection.
Azure REST API version: 2021-11-01.
:param str private_endpoint_connection_name: The name of the private endpoint connection.
:param str resource_group_name: The name of the resource group that contains the resource. You can obtain this value from the Azure Resource Manager API or the portal.
:param str server_name: The name of the server.
"""
__args__ = dict()
__args__['privateEndpointConnectionName'] = private_endpoint_connection_name
__args__['resourceGroupName'] = resource_group_name
__args__['serverName'] = server_name
opts = pulumi.InvokeOptions.merge(_utilities.get_invoke_opts_defaults(), opts)
__ret__ = pulumi.runtime.invoke('azure-native:sql:getPrivateEndpointConnection', __args__, opts=opts, typ=GetPrivateEndpointConnectionResult).value
return AwaitableGetPrivateEndpointConnectionResult(
group_ids=pulumi.get(__ret__, 'group_ids'),
id=pulumi.get(__ret__, 'id'),
name=pulumi.get(__ret__, 'name'),
private_endpoint=pulumi.get(__ret__, 'private_endpoint'),
METHOD_NAME=pulumi.get(__ret__, 'private_link_service_connection_state'),
provisioning_state=pulumi.get(__ret__, 'provisioning_state'),
type=pulumi.get(__ret__, 'type'))
@_utilities.lift_output_func(get_private_endpoint_connection)
def get_private_endpoint_connection_output(private_endpoint_connection_name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
server_name: Optional[pulumi.Input[str]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetPrivateEndpointConnectionResult]:
"""
Gets a private endpoint connection.
Azure REST API version: 2021-11-01.
:param str private_endpoint_connection_name: The name of the private endpoint connection.
:param str resource_group_name: The name of the resource group that contains the resource. You can obtain this value from the Azure Resource Manager API or the portal.
:param str server_name: The name of the server.
"""
...
|
3,367 |
output handler
|
import logging
import os
from redash.query_runner import (
TYPE_DATETIME,
TYPE_FLOAT,
TYPE_INTEGER,
TYPE_STRING,
BaseSQLQueryRunner,
JobTimeoutException,
register,
)
from redash.utils import json_dumps, json_loads
try:
import oracledb
TYPES_MAP = {
oracledb.DATETIME: TYPE_DATETIME,
oracledb.CLOB: TYPE_STRING,
oracledb.LOB: TYPE_STRING,
oracledb.FIXED_CHAR: TYPE_STRING,
oracledb.FIXED_NCHAR: TYPE_STRING,
oracledb.INTERVAL: TYPE_DATETIME,
oracledb.LONG_STRING: TYPE_STRING,
oracledb.NATIVE_FLOAT: TYPE_FLOAT,
oracledb.NCHAR: TYPE_STRING,
oracledb.NUMBER: TYPE_FLOAT,
oracledb.ROWID: TYPE_INTEGER,
oracledb.STRING: TYPE_STRING,
oracledb.TIMESTAMP: TYPE_DATETIME,
}
ENABLED = True
except ImportError:
ENABLED = False
logger = logging.getLogger(__name__)
class Oracle(BaseSQLQueryRunner):
should_annotate_query = False
noop_query = "SELECT 1 FROM dual"
limit_query = " FETCH NEXT 1000 ROWS ONLY"
limit_keywords = ["ROW", "ROWS", "ONLY", "TIES"]
@classmethod
def get_col_type(cls, col_type, scale):
if col_type == oracledb.NUMBER:
if scale is None:
return TYPE_INTEGER
if scale > 0:
return TYPE_FLOAT
return TYPE_INTEGER
else:
return TYPES_MAP.get(col_type, None)
@classmethod
def enabled(cls):
return ENABLED
@classmethod
def configuration_schema(cls):
return {
"type": "object",
"properties": {
"user": {"type": "string"},
"password": {"type": "string"},
"host": {
"type": "string",
"title": "Host: To use a DSN Service Name instead, use the text string `_useservicename` in the host name field.",
},
"port": {"type": "number"},
"servicename": {"type": "string", "title": "DSN Service Name"},
"encoding": {"type": "string"},
},
"required": ["servicename", "user", "password", "host", "port"],
"extra_options": ["encoding"],
"secret": ["password"],
}
@classmethod
def type(cls):
return "oracle"
def _get_tables(self, schema):
query = """
SELECT
all_tab_cols.OWNER,
all_tab_cols.TABLE_NAME,
all_tab_cols.COLUMN_NAME
FROM all_tab_cols
WHERE all_tab_cols.OWNER NOT IN('SYS','SYSTEM','ORDSYS','CTXSYS','WMSYS','MDSYS','ORDDATA','XDB','OUTLN','DMSYS','DSSYS','EXFSYS','LBACSYS','TSMSYS')
"""
results, error = self.run_query(query, None)
if error is not None:
self._handle_run_query_error(error)
results = json_loads(results)
for row in results["rows"]:
if row["OWNER"] is not None:
table_name = "{}.{}".format(row["OWNER"], row["TABLE_NAME"])
else:
table_name = row["TABLE_NAME"]
if table_name not in schema:
schema[table_name] = {"name": table_name, "columns": []}
schema[table_name]["columns"].append(row["COLUMN_NAME"])
return list(schema.values())
@classmethod
def _convert_number(cls, value):
try:
return int(value)
except BaseException:
return value
@classmethod
def METHOD_NAME(cls, cursor, name, default_type, length, precision, scale):
if default_type in (oracledb.CLOB, oracledb.LOB):
return cursor.var(oracledb.LONG_STRING, 80000, cursor.arraysize)
if default_type in (oracledb.STRING, oracledb.FIXED_CHAR):
return cursor.var(str, length, cursor.arraysize)
if default_type == oracledb.NUMBER:
if scale <= 0:
return cursor.var(
oracledb.STRING,
255,
outconverter=Oracle._convert_number,
arraysize=cursor.arraysize,
)
def run_query(self, query, user):
if self.configuration.get("encoding"):
os.environ["NLS_LANG"] = self.configuration["encoding"]
# To use a DSN Service Name instead, use the text string `_useservicename` in the host name field.
if self.configuration["host"].lower() == "_useservicename":
dsn = self.configuration["servicename"]
else:
dsn = oracledb.makedsn(
self.configuration["host"],
self.configuration["port"],
service_name=self.configuration["servicename"],
)
connection = oracledb.connect(
user=self.configuration["user"],
password=self.configuration["password"],
dsn=dsn,
)
connection.outputtypehandler = Oracle.METHOD_NAME
cursor = connection.cursor()
try:
cursor.execute(query)
rows_count = cursor.rowcount
if cursor.description is not None:
columns = self.fetch_columns([(i[0], Oracle.get_col_type(i[1], i[5])) for i in cursor.description])
rows = [dict(zip((c["name"] for c in columns), row)) for row in cursor]
data = {"columns": columns, "rows": rows}
error = None
json_data = json_dumps(data)
else:
columns = [{"name": "Row(s) Affected", "type": "TYPE_INTEGER"}]
rows = [{"Row(s) Affected": rows_count}]
data = {"columns": columns, "rows": rows}
json_data = json_dumps(data)
connection.commit()
except oracledb.DatabaseError as err:
(err_args,) = err.args
line_number = query.count("\n", 0, err_args.offset) + 1
column_number = err_args.offset - query.rfind("\n", 0, err_args.offset) - 1
error = "Query failed at line {}, column {}: {}".format(str(line_number), str(column_number), str(err))
json_data = None
except (KeyboardInterrupt, JobTimeoutException):
connection.cancel()
raise
finally:
os.environ.pop("NLS_LANG", None)
connection.close()
return json_data, error
register(Oracle)
|
3,368 |
process query params
|
import json
from typing import Dict, List, Tuple
from django import forms
from django.conf import settings
__all__ = (
'APISelect',
'APISelectMultiple',
)
class APISelect(forms.Select):
"""
A select widget populated via an API call
:param api_url: API endpoint URL. Required if not set automatically by the parent field.
"""
template_name = 'widgets/apiselect.html'
option_template_name = 'widgets/select_option.html'
dynamic_params: Dict[str, str]
static_params: Dict[str, List[str]]
def __init__(self, api_url=None, full=False, *args, **kwargs):
super().__init__(*args, **kwargs)
self.attrs['class'] = 'netbox-api-select'
self.dynamic_params: Dict[str, List[str]] = {}
self.static_params: Dict[str, List[str]] = {}
if api_url:
self.attrs['data-url'] = '/{}{}'.format(settings.BASE_PATH, api_url.lstrip('/')) # Inject BASE_PATH
def __deepcopy__(self, memo):
"""Reset `static_params` and `dynamic_params` when APISelect is deepcopied."""
result = super().__deepcopy__(memo)
result.dynamic_params = {}
result.static_params = {}
return result
def _process_query_param(self, key, value) -> None:
"""
Based on query param value's type and value, update instance's dynamic/static params.
"""
if isinstance(value, str):
# Coerce `True` boolean.
if value.lower() == 'true':
value = True
# Coerce `False` boolean.
elif value.lower() == 'false':
value = False
# Query parameters cannot have a `None` (or `null` in JSON) type, convert
# `None` types to `'null'` so that ?key=null is used in the query URL.
elif value is None:
value = 'null'
# Check type of `value` again, since it may have changed.
if isinstance(value, str):
if value.startswith('$'):
# A value starting with `$` indicates a dynamic query param, where the
# initial value is unknown and will be updated at the JavaScript layer
# as the related form field's value changes.
field_name = value.strip('$')
self.dynamic_params[field_name] = key
else:
# A value _not_ starting with `$` indicates a static query param, where
# the value is already known and should not be changed at the JavaScript
# layer.
if key in self.static_params:
current = self.static_params[key]
self.static_params[key] = [v for v in set([*current, value])]
else:
self.static_params[key] = [value]
else:
# Any non-string values are passed through as static query params, since
# dynamic query param values have to be a string (in order to start with
# `$`).
if key in self.static_params:
current = self.static_params[key]
self.static_params[key] = [v for v in set([*current, value])]
else:
self.static_params[key] = [value]
def METHOD_NAME(self, query_params):
"""
Process an entire query_params dictionary, and handle primitive or list values.
"""
for key, value in query_params.items():
if isinstance(value, (List, Tuple)):
# If value is a list/tuple, iterate through each item.
for item in value:
self._process_query_param(key, item)
else:
self._process_query_param(key, value)
def _serialize_params(self, key, params):
"""
Serialize dynamic or static query params to JSON and add the serialized value to
the widget attributes by `key`.
"""
# Deserialize the current serialized value from the widget, using an empty JSON
# array as a fallback in the event one is not defined.
current = json.loads(self.attrs.get(key, '[]'))
# Combine the current values with the updated values and serialize the result as
# JSON. Note: the `separators` kwarg effectively removes extra whitespace from
# the serialized JSON string, which is ideal since these will be passed as
# attributes to HTML elements and parsed on the client.
self.attrs[key] = json.dumps([*current, *params], separators=(',', ':'))
def _add_dynamic_params(self):
"""
Convert post-processed dynamic query params to data structure expected by front-
end, serialize the value to JSON, and add it to the widget attributes.
"""
key = 'data-dynamic-params'
if len(self.dynamic_params) > 0:
try:
update = [{'fieldName': f, 'queryParam': q} for (f, q) in self.dynamic_params.items()]
self._serialize_params(key, update)
except IndexError as error:
raise RuntimeError(f"Missing required value for dynamic query param: '{self.dynamic_params}'") from error
def _add_static_params(self):
"""
Convert post-processed static query params to data structure expected by front-
end, serialize the value to JSON, and add it to the widget attributes.
"""
key = 'data-static-params'
if len(self.static_params) > 0:
try:
update = [{'queryParam': k, 'queryValue': v} for (k, v) in self.static_params.items()]
self._serialize_params(key, update)
except IndexError as error:
raise RuntimeError(f"Missing required value for static query param: '{self.static_params}'") from error
def add_query_params(self, query_params):
"""
Proccess & add a dictionary of URL query parameters to the widget attributes.
"""
# Process query parameters. This populates `self.dynamic_params` and `self.static_params`.
self.METHOD_NAME(query_params)
# Add processed dynamic parameters to widget attributes.
self._add_dynamic_params()
# Add processed static parameters to widget attributes.
self._add_static_params()
def add_query_param(self, key, value) -> None:
"""
Process & add a key/value pair of URL query parameters to the widget attributes.
"""
self.add_query_params({key: value})
class APISelectMultiple(APISelect, forms.SelectMultiple):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.attrs['data-multiple'] = 1
|
3,369 |
ensure context
|
"""
Expose each GPU devices directly.
This module implements a API that is like the "CUDA runtime" context manager
for managing CUDA context stack and clean up. It relies on thread-local globals
to separate the context stack management of each thread. Contexts are also
shareable among threads. Only the main thread can destroy Contexts.
Note:
- This module must be imported by the main-thread.
"""
import functools
import threading
from contextlib import contextmanager
from .driver import driver, USE_NV_BINDING
class _DeviceList(object):
def __getattr__(self, attr):
# First time looking at "lst" attribute.
if attr == "lst":
# Device list is not initialized.
# Query all CUDA devices.
numdev = driver.get_device_count()
gpus = [_DeviceContextManager(driver.get_device(devid))
for devid in range(numdev)]
# Define "lst" to avoid re-initialization
self.lst = gpus
return gpus
# Other attributes
return super(_DeviceList, self).__getattr__(attr)
def __getitem__(self, devnum):
'''
Returns the context manager for device *devnum*.
'''
return self.lst[devnum]
def __str__(self):
return ', '.join([str(d) for d in self.lst])
def __iter__(self):
return iter(self.lst)
def __len__(self):
return len(self.lst)
@property
def current(self):
"""Returns the active device or None if there's no active device
"""
with driver.get_active_context() as ac:
devnum = ac.devnum
if devnum is not None:
return self[devnum]
class _DeviceContextManager(object):
"""
Provides a context manager for executing in the context of the chosen
device. The normal use of instances of this type is from
``numba.cuda.gpus``. For example, to execute on device 2::
with numba.cuda.gpus[2]:
d_a = numba.cuda.to_device(a)
to copy the array *a* onto device 2, referred to by *d_a*.
"""
def __init__(self, device):
self._device = device
def __getattr__(self, item):
return getattr(self._device, item)
def __enter__(self):
_runtime.get_or_create_context(self._device.id)
def __exit__(self, exc_type, exc_val, exc_tb):
# this will verify that we are popping the right device context.
self._device.get_primary_context().pop()
def __str__(self):
return "<Managed Device {self.id}>".format(self=self)
class _Runtime(object):
"""Emulate the CUDA runtime context management.
It owns all Devices and Contexts.
Keeps at most one Context per Device
"""
def __init__(self):
self.gpus = _DeviceList()
# For caching the attached CUDA Context
self._tls = threading.local()
# Remember the main thread
# Only the main thread can *actually* destroy
self._mainthread = threading.current_thread()
# Avoid mutation of runtime state in multithreaded programs
self._lock = threading.RLock()
@contextmanager
def METHOD_NAME(self):
"""Ensure a CUDA context is available inside the context.
On entrance, queries the CUDA driver for an active CUDA context and
attaches it in TLS for subsequent calls so they do not need to query
the CUDA driver again. On exit, detach the CUDA context from the TLS.
This will allow us to pickup thirdparty activated CUDA context in
any top-level Numba CUDA API.
"""
with driver.get_active_context():
oldctx = self._get_attached_context()
newctx = self.get_or_create_context(None)
self._set_attached_context(newctx)
try:
yield
finally:
self._set_attached_context(oldctx)
def get_or_create_context(self, devnum):
"""Returns the primary context and push+create it if needed
for *devnum*. If *devnum* is None, use the active CUDA context (must
be primary) or create a new one with ``devnum=0``.
"""
if devnum is None:
attached_ctx = self._get_attached_context()
if attached_ctx is None:
return self._get_or_create_context_uncached(devnum)
else:
return attached_ctx
else:
if USE_NV_BINDING:
devnum = int(devnum)
return self._activate_context_for(devnum)
def _get_or_create_context_uncached(self, devnum):
"""See also ``get_or_create_context(devnum)``.
This version does not read the cache.
"""
with self._lock:
# Try to get the active context in the CUDA stack or
# activate GPU-0 with the primary context
with driver.get_active_context() as ac:
if not ac:
return self._activate_context_for(0)
else:
# Get primary context for the active device
ctx = self.gpus[ac.devnum].get_primary_context()
# Is active context the primary context?
if USE_NV_BINDING:
ctx_handle = int(ctx.handle)
ac_ctx_handle = int(ac.context_handle)
else:
ctx_handle = ctx.handle.value
ac_ctx_handle = ac.context_handle.value
if ctx_handle != ac_ctx_handle:
msg = ('Numba cannot operate on non-primary'
' CUDA context {:x}')
raise RuntimeError(msg.format(ac_ctx_handle))
# Ensure the context is ready
ctx.prepare_for_use()
return ctx
def _activate_context_for(self, devnum):
with self._lock:
gpu = self.gpus[devnum]
newctx = gpu.get_primary_context()
# Detect unexpected context switch
cached_ctx = self._get_attached_context()
if cached_ctx is not None and cached_ctx is not newctx:
raise RuntimeError('Cannot switch CUDA-context.')
newctx.push()
return newctx
def _get_attached_context(self):
return getattr(self._tls, 'attached_context', None)
def _set_attached_context(self, ctx):
self._tls.attached_context = ctx
def reset(self):
"""Clear all contexts in the thread. Destroy the context if and only
if we are in the main thread.
"""
# Pop all active context.
while driver.pop_active_context() is not None:
pass
# If it is the main thread
if threading.current_thread() == self._mainthread:
self._destroy_all_contexts()
def _destroy_all_contexts(self):
# Reset all devices
for gpu in self.gpus:
gpu.reset()
_runtime = _Runtime()
# ================================ PUBLIC API ================================
gpus = _runtime.gpus
def get_context(devnum=None):
"""Get the current device or use a device by device number, and
return the CUDA context.
"""
return _runtime.get_or_create_context(devnum)
def require_context(fn):
"""
A decorator that ensures a CUDA context is available when *fn* is executed.
Note: The function *fn* cannot switch CUDA-context.
"""
@functools.wraps(fn)
def _require_cuda_context(*args, **kws):
with _runtime.METHOD_NAME():
return fn(*args, **kws)
return _require_cuda_context
def reset():
"""Reset the CUDA subsystem for the current thread.
In the main thread:
This removes all CUDA contexts. Only use this at shutdown or for
cleaning up between tests.
In non-main threads:
This clear the CUDA context stack only.
"""
_runtime.reset()
|
3,370 |
quat2 yaw
|
import rospy
import threading
from math import asin, atan2, pi
from nav_msgs.msg import Odometry
def METHOD_NAME(qw, qx, qy, qz):
'''
Translates from Quaternion to Yaw.
@param qw,qx,qy,qz: Quaternion values
@type qw,qx,qy,qz: float
@return Yaw value translated from Quaternion
'''
rotateZa0=2.0*(qx*qy + qw*qz)
rotateZa1=qw*qw + qx*qx - qy*qy - qz*qz
rotateZ=0.0
if(rotateZa0 != 0.0 and rotateZa1 != 0.0):
rotateZ=atan2(rotateZa0,rotateZa1)
return rotateZ
def quat2Pitch(qw, qx, qy, qz):
'''
Translates from Quaternion to Pitch.
@param qw,qx,qy,qz: Quaternion values
@type qw,qx,qy,qz: float
@return Pitch value translated from Quaternion
'''
rotateYa0=-2.0*(qx*qz - qw*qy)
rotateY=0.0
if(rotateYa0 >= 1.0):
rotateY = pi/2.0
elif(rotateYa0 <= -1.0):
rotateY = -pi/2.0
else:
rotateY = asin(rotateYa0)
return rotateY
def quat2Roll (qw, qx, qy, qz):
'''
Translates from Quaternion to Roll.
@param qw,qx,qy,qz: Quaternion values
@type qw,qx,qy,qz: float
@return Roll value translated from Quaternion
'''
rotateXa0=2.0*(qy*qz + qw*qx)
rotateXa1=qw*qw - qx*qx - qy*qy + qz*qz
rotateX=0.0
if(rotateXa0 != 0.0 and rotateXa1 != 0.0):
rotateX=atan2(rotateXa0, rotateXa1)
return rotateX
def odometry2Pose3D(odom):
'''
Translates from ROS Odometry to JderobotTypes Pose3d.
@param odom: ROS Odometry to translate
@type odom: Odometry
@return a Pose3d translated from odom
'''
pose = Pose3d()
ori = odom.pose.pose.orientation
pose.x = odom.pose.pose.position.x
pose.y = odom.pose.pose.position.y
pose.z = odom.pose.pose.position.z
#pose.h = odom.pose.pose.position.h
pose.yaw = METHOD_NAME(ori.w, ori.x, ori.y, ori.z)
pose.pitch = quat2Pitch(ori.w, ori.x, ori.y, ori.z)
pose.roll = quat2Roll(ori.w, ori.x, ori.y, ori.z)
pose.q = [ori.w, ori.x, ori.y, ori.z]
pose.timeStamp = odom.header.stamp.secs + (odom.header.stamp.nsecs *1e-9)
return pose
class Pose3d ():
def __init__(self):
self.x = 0 # X coord [meters]
self.y = 0 # Y coord [meters]
self.z = 0 # Z coord [meters]
self.h = 1 # H param
self.yaw = 0 #Yaw angle[rads]
self.pitch = 0 # Pitch angle[rads]
self.roll = 0 # Roll angle[rads]
self.q = [0,0,0,0] # Quaternion
self.timeStamp = 0 # Time stamp [s]
def __str__(self):
s = "Pose3D: {\n x: " + str(self.x) + "\n Y: " + str(self.y)
s = s + "\n Z: " + str(self.z) + "\n H: " + str(self.h)
s = s + "\n Yaw: " + str(self.yaw) + "\n Pitch: " + str(self.pitch) + "\n Roll: " + str(self.roll)
s = s + "\n quaternion: " + str(self.q) + "\n timeStamp: " + str(self.timeStamp) + "\n}"
return s
class ListenerPose3d:
'''
ROS Pose3D Subscriber. Pose3D Client to Receive pose3d from ROS nodes.
'''
def __init__(self, topic):
'''
ListenerPose3d Constructor.
@param topic: ROS topic to subscribe
@type topic: String
'''
self.topic = topic
self.data = Pose3d()
self.sub = None
self.lock = threading.Lock()
self.start()
def __callback (self, odom):
'''
Callback function to receive and save Pose3d.
@param odom: ROS Odometry received
@type odom: Odometry
'''
pose = odometry2Pose3D(odom)
self.lock.acquire()
self.data = pose
self.lock.release()
def stop(self):
'''
Stops (Unregisters) the client.
'''
self.sub.unregister()
def start (self):
'''
Starts (Subscribes) the client.
'''
self.sub = rospy.Subscriber(self.topic, Odometry, self.__callback)
def getPose3d(self):
'''
Returns last Pose3d.
@return last JdeRobotTypes Pose3d saved
'''
self.lock.acquire()
pose = self.data
self.lock.release()
return pose
|
3,371 |
translate bin path
|
"""Rules for running `mro format`, either to reformat files or to check them."""
load(
"//tools:providers.bzl",
"MroInfo",
)
load("//tools:util.bzl", "merge_runfiles")
load("@bazel_skylib//lib:shell.bzl", "shell")
def METHOD_NAME(p):
if p.startswith(".."):
return "external/" + p[len("../"):]
return p
def _mro_tool_common_impl(ctx, script_function):
mros = depset(
ctx.files.srcs,
transitive = [
dep[MroInfo].transitive_mros
for dep in ctx.attr.srcs
if MroInfo in dep
],
).to_list()
script = ctx.actions.declare_file(ctx.attr.script_name or ctx.attr.name)
ctx.actions.write(
output = script,
content = script_function(ctx, mros),
is_executable = True,
)
mros.append(script)
return [
DefaultInfo(
executable = script,
files = depset([script]),
runfiles = merge_runfiles(
ctx,
[ctx.attr._mro],
files = mros,
),
),
]
def _make_mrf_runner_script(ctx, mros):
return """#!/usr/bin/env bash
root=$(dirname $(realpath -sL "${{BASH_SOURCE[0]}}"))
runfiles="${{root}}"
if [ -d "${{BASH_SOURCE[0]}}.runfiles/{workspace}" ]; then
runfiles=$(realpath -sL "${{BASH_SOURCE[0]}}.runfiles/{workspace}")
fi
if [ -d "${{BUILD_WORKING_DIRECTORY}}" ]; then
builtin cd "${{BUILD_WORKING_DIRECTORY}}"
fi
{mropath}
export MARTIAN_BASE=$(dirname "${{runfiles}}/{mro}")
exec -a {basename} "${{runfiles}}/{mro}" {subcommand}{flags} \\
\t{mro_files}
""".format(
basename = shell.quote(ctx.executable._mro.basename),
mropath = "export MROPATH=\"{}\"".format(":".join([
"${runfiles}/" + METHOD_NAME(p)
for p in depset(transitive = [
dep[MroInfo].mropath
for dep in ctx.attr.srcs
if MroInfo in dep
]).to_list()
])) if ctx.attr.srcs else "",
mro = METHOD_NAME(ctx.executable._mro.short_path),
subcommand = shell.quote(ctx.attr.subcommand),
flags = " \\\n\t" + " \\\n\t".join(
[
shell.quote(flag)
for flag in ctx.attr.flags
],
) if ctx.attr.flags else "",
mro_files = " \\\n\t".join([
"\"${runfiles}/\"" + shell.quote(METHOD_NAME(p.short_path))
for p in mros
]) + " \"$@\"",
workspace = ctx.workspace_name or "__main__",
)
def _mro_tool_runner_impl(ctx):
return _mro_tool_common_impl(ctx, _make_mrf_runner_script)
mro_tool_runner = rule(
attrs = {
"srcs": attr.label_list(
doc = "The mro files to give as arguments to the tool.",
allow_files = True,
providers = [
[MroInfo],
[DefaultInfo],
],
),
"subcommand": attr.string(
mandatory = True,
doc = "The subcommand for the `mro` tool, e.g. " +
"`format`, `check`, `graph`, `edit`.",
),
"_mro": attr.label(
executable = True,
default = Label("@martian//:mro"),
cfg = "target",
),
"script_name": attr.string(
doc = "The name for the script file.",
),
"flags": attr.string_list(
default = [],
doc = "Flags to pass to the `mro` subcommand.",
),
},
doc = "Runs the `mro` tool, possibly with a subcommand, with the given " +
"mro files (if any) as arguments.",
executable = True,
implementation = _mro_tool_runner_impl,
)
def _make_mrf_tester_script(ctx, mros):
if not mros:
fail("required: at least one file to check", "mros")
return """#!/usr/bin/env bash
export MROPATH="{mropath}"
export MARTIAN_BASE=$(dirname "{mro}")
rc=0
for mro_file in \\
\t{mro_files}; do
echo "checking ${{mro_file}}..."
(
set -o pipefail
diff "${{mro_file}}" <("{mro}" format "${{mro_file}}")
) || rc=$?
done
exit $rc
""".format(
mropath = ":".join(
depset(transitive = [
dep[MroInfo].mropath
for dep in ctx.attr.srcs
if MroInfo in dep
]).to_list(),
),
mro = ctx.executable._mro.short_path,
mrf_flags = " ".join(ctx.attr.mrf_flags),
mro_files = " \\\n\t".join([
shell.quote(p.short_path)
for p in mros
]),
)
def _mrf_test_impl(ctx):
return _mro_tool_common_impl(ctx, _make_mrf_tester_script)
mrf_test = rule(
attrs = {
"srcs": attr.label_list(
mandatory = True,
doc = "The mro files to format.",
allow_files = True,
providers = [
[MroInfo],
[DefaultInfo],
],
),
"_mro": attr.label(
executable = True,
default = Label("@martian//:mro"),
cfg = "target",
),
"script_name": attr.string(
doc = "The name for the script file.",
),
"mrf_flags": attr.string_list(
default = [
"--includes",
],
doc = "Flags to pass to `mro format`.",
),
},
doc = "Runs `mro format` on the given files, and fails if there are any differences.",
test = True,
implementation = _mrf_test_impl,
)
|
3,372 |
test sinh
|
# Owner(s): ["module: dynamo"]
# this file is autogenerated via gen_ufuncs.py
# do not edit manually!
import numpy as np
from torch._numpy._ufuncs import * # noqa: F403
from torch._numpy.testing import assert_allclose
def test_absolute():
assert_allclose(np.absolute(0.5), absolute(0.5), atol=1e-14, check_dtype=False)
def test_arccos():
assert_allclose(np.arccos(0.5), arccos(0.5), atol=1e-14, check_dtype=False)
def test_arccosh():
assert_allclose(np.arccosh(1.5), arccosh(1.5), atol=1e-14, check_dtype=False)
def test_arcsin():
assert_allclose(np.arcsin(0.5), arcsin(0.5), atol=1e-14, check_dtype=False)
def test_arcsinh():
assert_allclose(np.arcsinh(0.5), arcsinh(0.5), atol=1e-14, check_dtype=False)
def test_arctan():
assert_allclose(np.arctan(0.5), arctan(0.5), atol=1e-14, check_dtype=False)
def test_arctanh():
assert_allclose(np.arctanh(0.5), arctanh(0.5), atol=1e-14, check_dtype=False)
def test_cbrt():
assert_allclose(np.cbrt(0.5), cbrt(0.5), atol=1e-14, check_dtype=False)
def test_ceil():
assert_allclose(np.ceil(0.5), ceil(0.5), atol=1e-14, check_dtype=False)
def test_conjugate():
assert_allclose(np.conjugate(0.5), conjugate(0.5), atol=1e-14, check_dtype=False)
def test_cos():
assert_allclose(np.cos(0.5), cos(0.5), atol=1e-14, check_dtype=False)
def test_cosh():
assert_allclose(np.cosh(0.5), cosh(0.5), atol=1e-14, check_dtype=False)
def test_deg2rad():
assert_allclose(np.deg2rad(0.5), deg2rad(0.5), atol=1e-14, check_dtype=False)
def test_degrees():
assert_allclose(np.degrees(0.5), degrees(0.5), atol=1e-14, check_dtype=False)
def test_exp():
assert_allclose(np.exp(0.5), exp(0.5), atol=1e-14, check_dtype=False)
def test_exp2():
assert_allclose(np.exp2(0.5), exp2(0.5), atol=1e-14, check_dtype=False)
def test_expm1():
assert_allclose(np.expm1(0.5), expm1(0.5), atol=1e-14, check_dtype=False)
def test_fabs():
assert_allclose(np.fabs(0.5), fabs(0.5), atol=1e-14, check_dtype=False)
def test_floor():
assert_allclose(np.floor(0.5), floor(0.5), atol=1e-14, check_dtype=False)
def test_isfinite():
assert_allclose(np.isfinite(0.5), isfinite(0.5), atol=1e-14, check_dtype=False)
def test_isinf():
assert_allclose(np.isinf(0.5), isinf(0.5), atol=1e-14, check_dtype=False)
def test_isnan():
assert_allclose(np.isnan(0.5), isnan(0.5), atol=1e-14, check_dtype=False)
def test_log():
assert_allclose(np.log(0.5), log(0.5), atol=1e-14, check_dtype=False)
def test_log10():
assert_allclose(np.log10(0.5), log10(0.5), atol=1e-14, check_dtype=False)
def test_log1p():
assert_allclose(np.log1p(0.5), log1p(0.5), atol=1e-14, check_dtype=False)
def test_log2():
assert_allclose(np.log2(0.5), log2(0.5), atol=1e-14, check_dtype=False)
def test_logical_not():
assert_allclose(
np.logical_not(0.5), logical_not(0.5), atol=1e-14, check_dtype=False
)
def test_negative():
assert_allclose(np.negative(0.5), negative(0.5), atol=1e-14, check_dtype=False)
def test_positive():
assert_allclose(np.positive(0.5), positive(0.5), atol=1e-14, check_dtype=False)
def test_rad2deg():
assert_allclose(np.rad2deg(0.5), rad2deg(0.5), atol=1e-14, check_dtype=False)
def test_radians():
assert_allclose(np.radians(0.5), radians(0.5), atol=1e-14, check_dtype=False)
def test_reciprocal():
assert_allclose(np.reciprocal(0.5), reciprocal(0.5), atol=1e-14, check_dtype=False)
def test_rint():
assert_allclose(np.rint(0.5), rint(0.5), atol=1e-14, check_dtype=False)
def test_sign():
assert_allclose(np.sign(0.5), sign(0.5), atol=1e-14, check_dtype=False)
def test_signbit():
assert_allclose(np.signbit(0.5), signbit(0.5), atol=1e-14, check_dtype=False)
def test_sin():
assert_allclose(np.sin(0.5), sin(0.5), atol=1e-14, check_dtype=False)
def METHOD_NAME():
assert_allclose(np.sinh(0.5), sinh(0.5), atol=1e-14, check_dtype=False)
def test_sqrt():
assert_allclose(np.sqrt(0.5), sqrt(0.5), atol=1e-14, check_dtype=False)
def test_square():
assert_allclose(np.square(0.5), square(0.5), atol=1e-14, check_dtype=False)
def test_tan():
assert_allclose(np.tan(0.5), tan(0.5), atol=1e-14, check_dtype=False)
def test_tanh():
assert_allclose(np.tanh(0.5), tanh(0.5), atol=1e-14, check_dtype=False)
def test_trunc():
assert_allclose(np.trunc(0.5), trunc(0.5), atol=1e-14, check_dtype=False)
|
3,373 |
streamlines to segments
|
# -*- coding: utf-8 -*-
import logging
import numpy as np
from numpy.linalg import norm
from scipy.spatial import cKDTree
from scipy.sparse import bsr_matrix
def _subdivide_streamline(streamline, n_steps):
if n_steps < 2:
return streamline
dirs = streamline[1:] - streamline[:-1]
subdivided = np.zeros((n_steps * (len(streamline) - 1) + 1, 3))
subdivided[::n_steps] = streamline
for s in range(1, n_steps):
subdivided[s::n_steps] = streamline[:-1] + s / n_steps * dirs
return subdivided
def METHOD_NAME(streamlines, n_steps=1):
"""Split streamlines into its segments.
Parameters
----------
streamlines : list of numpy.ndarray
List of streamlines.
Returns
-------
segments : numpy.ndarray (2D)
Segments array representation with the first and last points.
"""
vts_0_list = []
vts_1_list = []
for streamline in streamlines:
streamline = _subdivide_streamline(streamline, n_steps)
vts_0_list.append(streamline[:-1])
vts_1_list.append(streamline[1:])
segments = np.stack((np.vstack(vts_0_list), np.vstack(vts_1_list)), axis=0)
return segments
def streamlines_to_endpoints(streamlines):
"""Equivalent to streamlines resampling to 2 points (first and last).
Parameters
----------
streamlines : list of numpy.ndarray
List of streamlines.
Returns
-------
endpoints : numpy.ndarray (2D)
Endpoint array representation with the first and last points.
"""
endpoints = np.zeros((2, len(streamlines), 3))
for i, streamline in enumerate(streamlines):
endpoints[0, i] = streamline[0]
endpoints[1, i] = streamline[-1]
return endpoints
def streamlines_to_pts_dir_norm(streamlines, n_steps=1, asymmetric=False):
"""Evaluate each segment: mid position, direction, length.
Parameters
----------
streamlines : list of numpy.ndarray
List of streamlines.
Returns
-------
seg_mid : numpy.ndarray (2D)
Mid position (x,y,z) of all streamlines' segments.
seg_dir : numpy.ndarray (2D)
Direction (x,y,z) of all streamlines' segments.
seg_norm : numpy.ndarray (2D)
Length of all streamlines' segments.
"""
segments = METHOD_NAME(streamlines, n_steps)
seg_mid = get_segments_mid_pts_positions(segments)
seg_dir, seg_norm = get_segments_dir_and_norm(segments,
seg_mid,
asymmetric)
mask = seg_norm > 1.0e-20
if ~mask.any():
logging.warning("WARNING : There is at least one streamline with "
"overlapping points in the tractogram.")
return seg_mid[mask], seg_dir[mask], seg_norm[mask]
def get_segments_mid_pts_positions(segments):
return 0.5 * (segments[0] + segments[1])
def get_segments_vectors(segments):
return segments[1] - segments[0]
def get_segments_dir_and_norm(segments, seg_mid=None, asymmetric=False):
if asymmetric:
seg_vecs = get_segments_vectors(segments)
return get_vectors_dir_and_norm_rel_to_center(seg_vecs, seg_mid)
return get_vectors_dir_and_norm(get_segments_vectors(segments))
def get_vectors_dir_and_norm(vectors):
vectors_norm = compute_vectors_norm(vectors)
vectors_dir = vectors / vectors_norm.reshape((-1, 1))
return vectors_dir, vectors_norm
def get_vectors_dir_and_norm_rel_to_center(vectors, seg_mid_pts):
""" Evaluates vectors direction and norm by taking into account the
orientation and position of segments in relation to the center
of voxel
"""
vectors_norm = compute_vectors_norm(vectors)
vectors_dir = vectors / vectors_norm.reshape((-1, 1))
# we create an array of voxel centers for each of our points
vox_centers = seg_mid_pts.astype(int) + 0.5
# directions to center of voxel for each segment
dir_to_center = (vox_centers - seg_mid_pts).flatten()
r, c = (vectors_dir.shape[0], 3 * vectors_dir.shape[0])
rows = np.arange(r).repeat(3)
cols = np.arange(c)
dir_to_center_mat = bsr_matrix((dir_to_center, (rows, cols)), shape=(r, c))
# compute dot product between direction of vectors and direction to center
dots = dir_to_center_mat.dot(vectors_dir.flatten()).reshape((-1, 1))
# when dot is greater that 0, the vector goes toward the center
# of the voxel we flip the direction of such vectors
vectors_dir_rel = np.where(dots > 0, -vectors_dir, vectors_dir)
return vectors_dir_rel, vectors_norm
def psf_from_sphere(sphere_vertices):
return np.abs(np.dot(sphere_vertices, sphere_vertices.T))
# Mask functions
def generate_mask_indices_1d(nb_voxel, indices_1d):
mask_1d = np.zeros(nb_voxel, dtype=bool)
mask_1d[indices_1d] = True
return mask_1d
def get_indices_1d(volume_shape, pts):
return np.ravel_multi_index(pts.T.astype(int), volume_shape)
def get_dir_to_sphere_id(vectors, sphere_vertices):
"""Find the closest vector on the sphere vertices using a cKDT tree
sphere_vertices must be normed (or all with equal norm).
Parameters
----------
vectors : numpy.ndarray (2D)
Vectors representing the direction (x,y,z) of segments.
sphere_vertices : numpy.ndarray (2D)
Vertices of a Dipy sphere object.
Returns
-------
dir_sphere_id : numpy.ndarray (1D)
Sphere indices of the closest sphere direction for each vector
"""
sphere_kdtree = cKDTree(sphere_vertices)
_, dir_sphere_id = sphere_kdtree.query(vectors, k=1, workers=-1)
return dir_sphere_id
# Generic Functions (vector norm)
def compute_vectors_norm(vectors):
return norm(vectors, ord=2, axis=-1)
def normalize_vectors(vectors):
return p_normalize_vectors(vectors, 2)
def p_normalize_vectors(vectors, p):
return vectors / norm(vectors, ord=p, axis=-1, keepdims=True)
|
3,374 |
test software trigger simtel process
|
import json
import numpy as np
import pytest
from numpy.testing import assert_equal
from ctapipe.containers import ArrayEventContainer
from ctapipe.io import EventSource
def assert_all_tel_keys(event, expected, ignore=None):
if ignore is None:
ignore = set()
expected = tuple(expected)
for name, container in event.items():
if hasattr(container, "tel"):
actual = tuple(container.tel.keys())
if name not in ignore and actual != expected:
raise AssertionError(
f"Unexpected tel_ids in container {name}:" f"{actual} != {expected}"
)
@pytest.mark.parametrize("data_type", (list, np.array))
def test_software_trigger(subarray_prod5_paranal, data_type):
from ctapipe.instrument.trigger import SoftwareTrigger
subarray = subarray_prod5_paranal
trigger = SoftwareTrigger(
subarray=subarray,
min_telescopes=2,
min_telescopes_of_type=[
("type", "*", 0),
("type", "LST*", 2),
],
)
# only one telescope, no SWAT
event = ArrayEventContainer()
event.trigger.tels_with_trigger = data_type([5])
assert trigger(event) == False
assert_equal(event.trigger.tels_with_trigger, data_type([]))
# 1 LST + 1 MST, 1 LST would not have triggered LST hardware trigger
# and after LST is removed, we only have 1 telescope, so no SWAT either
event = ArrayEventContainer()
event.trigger.tels_with_trigger = data_type([1, 6])
assert trigger(event) == False
assert_equal(event.trigger.tels_with_trigger, data_type([]))
# two MSTs and 1 LST, -> remove single LST
event = ArrayEventContainer()
event.trigger.tels_with_trigger = data_type([1, 5, 6])
assert trigger(event) == True
assert_equal(event.trigger.tels_with_trigger, data_type([5, 6]))
# two MSTs, nothing to change
event = ArrayEventContainer()
event.trigger.tels_with_trigger = data_type([5, 6])
assert trigger(event) == True
assert_equal(event.trigger.tels_with_trigger, data_type([5, 6]))
# three LSTs, nothing to change
event = ArrayEventContainer()
event.trigger.tels_with_trigger = data_type([1, 2, 3])
assert trigger(event) == True
assert_equal(event.trigger.tels_with_trigger, data_type([1, 2, 3]))
# thee LSTs, plus MSTs, nothing to change
event = ArrayEventContainer()
event.trigger.tels_with_trigger = data_type([1, 2, 3, 5, 6, 7])
assert trigger(event) == True
assert_equal(event.trigger.tels_with_trigger, data_type([1, 2, 3, 5, 6, 7]))
@pytest.mark.parametrize("allowed_tels", (None, list(range(1, 20))))
def test_software_trigger_simtel(allowed_tels):
from ctapipe.instrument.trigger import SoftwareTrigger
path = "dataset://gamma_divergent_LaPalma_baseline_20Zd_180Az_prod3_test.simtel.gz"
expected = [
[12, 16],
[],
[1, 2, 3, 4],
[1, 4],
[],
[1, 3],
[1, 2, 3, 4, 5, 6, 7, 12, 15, 16, 17, 18],
[13, 14],
[],
[2, 3, 7, 12],
[1, 2, 5, 17],
[],
[13, 19],
[],
[],
[1, 2, 4, 5, 11, 18],
[17, 18],
[7, 12],
[],
]
with EventSource(
path, focal_length_choice="EQUIVALENT", allowed_tels=allowed_tels
) as source:
trigger = SoftwareTrigger(
subarray=source.subarray,
min_telescopes=2,
min_telescopes_of_type=[
("type", "*", 0),
("type", "LST*", 2),
],
)
for e, expected_tels in zip(source, expected):
trigger(e)
assert_equal(e.trigger.tels_with_trigger, expected_tels)
assert_all_tel_keys(e, expected_tels, ignore={"dl0", "dl1", "dl2", "muon"})
def test_software_trigger_simtel_single_lsts():
from ctapipe.instrument.trigger import SoftwareTrigger
path = "dataset://gamma_divergent_LaPalma_baseline_20Zd_180Az_prod3_test.simtel.gz"
# remove 3 LSTs, so that we trigger the 1-LST condition
allowed_tels = [1, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19]
expected = [
[12, 16],
[],
[],
[],
[],
[],
[5, 6, 7, 12, 15, 16, 17, 18],
[13, 14],
[],
[7, 12],
[5, 17],
[13, 19],
[],
[],
[5, 11, 18],
[17, 18],
[7, 12],
[],
]
with EventSource(
path, focal_length_choice="EQUIVALENT", allowed_tels=allowed_tels
) as source:
trigger = SoftwareTrigger(
subarray=source.subarray,
min_telescopes=2,
min_telescopes_of_type=[
("type", "*", 0),
("type", "LST*", 2),
],
)
for e, expected_tels in zip(source, expected):
print(e.trigger.tels_with_trigger)
trigger(e)
print(e.trigger.tels_with_trigger, expected_tels)
assert_equal(e.trigger.tels_with_trigger, expected_tels)
assert_all_tel_keys(e, expected_tels, ignore={"dl0", "dl1", "dl2", "muon"})
def METHOD_NAME(tmp_path):
from ctapipe.core import run_tool
from ctapipe.io import TableLoader
from ctapipe.tools.process import ProcessorTool
path = "dataset://gamma_divergent_LaPalma_baseline_20Zd_180Az_prod3_test.simtel.gz"
config = dict(
ProcessorTool=dict(
EventSource=dict(
focal_length_choice="EQUIVALENT",
# remove 3 LSTs, so that we trigger the 1-LST condition
allowed_tels=(1, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19),
),
SoftwareTrigger=dict(
min_telescopes=2,
min_telescopes_of_type=[
("type", "*", 0),
("type", "LST*", 2),
],
),
)
)
output_path = tmp_path / "software_trigger.dl1.h5"
config_path = tmp_path / "config.json"
config_path.write_text(json.dumps(config))
run_tool(
ProcessorTool(),
[f"--input={path}", f"--output={output_path}", f"--config={config_path}"],
)
del config["ProcessorTool"]["SoftwareTrigger"]
output_path_no_software_trigger = tmp_path / "no_software_trigger.dl1.h5"
config_path = tmp_path / "config_no_software_trigger.json"
config_path.write_text(json.dumps(config))
run_tool(
ProcessorTool(),
[
f"--input={path}",
f"--output={output_path_no_software_trigger}",
f"--config={config_path}",
],
)
with TableLoader(
output_path,
load_simulated=True,
load_dl1_parameters=True,
focal_length_choice="EQUIVALENT",
) as loader:
events_trigger = loader.read_telescope_events("LST_LST_LSTCam")
with TableLoader(
output_path_no_software_trigger,
load_simulated=True,
load_dl1_parameters=True,
focal_length_choice="EQUIVALENT",
) as loader:
events_no_trigger = loader.read_telescope_events("LST_LST_LSTCam")
assert len(events_no_trigger) > len(events_trigger)
|
3,375 |
should colorize
|
# Stubs for logbook.more (Python 3)
#
# NOTE: This dynamically typed stub was automatically generated by stubgen.
from logbook.base import RecordDispatcher
from logbook.handlers import (
FingersCrossedHandler as FingersCrossedHandlerBase,
Handler,
StderrHandler,
StringFormatter,
StringFormatterHandlerMixin,
)
from logbook.ticketing import BackendBase
from typing import Any, Optional
TWITTER_FORMAT_STRING: Any
TWITTER_ACCESS_TOKEN_URL: str
NEW_TWEET_URL: str
class CouchDBBackend(BackendBase):
database: Any = ...
def setup_backend(self) -> None: ...
def record_ticket(self, record: Any, data: Any, hash: Any, app_id: Any) -> None: ...
class TwitterFormatter(StringFormatter):
max_length: int = ...
def format_exception(self, record: Any): ...
def __call__(self, record: Any, handler: Any): ...
class TaggingLogger(RecordDispatcher):
def __init__(self, name: Optional[Any] = ..., tags: Optional[Any] = ...) -> None: ...
def log(self, tags: Any, msg: Any, *args: Any, **kwargs: Any): ...
class TaggingHandler(Handler):
def __init__(self, handlers: Any, filter: Optional[Any] = ..., bubble: bool = ...) -> None: ...
def emit(self, record: Any) -> None: ...
class TwitterHandler(Handler, StringFormatterHandlerMixin):
default_format_string: Any = ...
formatter_class: Any = ...
consumer_key: Any = ...
consumer_secret: Any = ...
username: Any = ...
password: Any = ...
def __init__(
self,
consumer_key: Any,
consumer_secret: Any,
username: Any,
password: Any,
level: Any = ...,
format_string: Optional[Any] = ...,
filter: Optional[Any] = ...,
bubble: bool = ...,
) -> None: ...
def get_oauth_token(self): ...
def make_client(self): ...
def tweet(self, status: Any): ...
def emit(self, record: Any) -> None: ...
class SlackHandler(Handler, StringFormatterHandlerMixin):
api_token: Any = ...
channel: Any = ...
slack: Any = ...
def __init__(
self,
api_token: Any,
channel: Any,
level: Any = ...,
format_string: Optional[Any] = ...,
filter: Optional[Any] = ...,
bubble: bool = ...,
) -> None: ...
def emit(self, record: Any) -> None: ...
class JinjaFormatter:
template: Any = ...
def __init__(self, template: Any) -> None: ...
def __call__(self, record: Any, handler: Any): ...
class ExternalApplicationHandler(Handler):
encoding: Any = ...
def __init__(
self,
arguments: Any,
stdin_format: Optional[Any] = ...,
encoding: str = ...,
level: Any = ...,
filter: Optional[Any] = ...,
bubble: bool = ...,
) -> None: ...
def emit(self, record: Any) -> None: ...
class ColorizingStreamHandlerMixin:
def force_color(self) -> None: ...
def forbid_color(self) -> None: ...
def METHOD_NAME(self, record: Any): ...
def get_color(self, record: Any): ...
def format(self, record: Any): ...
class ColorizedStderrHandler(ColorizingStreamHandlerMixin, StderrHandler):
def __init__(self, *args: Any, **kwargs: Any) -> None: ...
class FingersCrossedHandler(FingersCrossedHandlerBase):
def __init__(self, *args: Any, **kwargs: Any) -> None: ...
class ExceptionHandler(Handler, StringFormatterHandlerMixin):
exc_type: Any = ...
def __init__(
self,
exc_type: Any,
level: Any = ...,
format_string: Optional[Any] = ...,
filter: Optional[Any] = ...,
bubble: bool = ...,
) -> None: ...
def handle(self, record: Any): ...
class DedupHandler(Handler):
def __init__(self, format_string: str = ..., *args: Any, **kwargs: Any) -> None: ...
def clear(self) -> None: ...
def pop_application(self) -> None: ...
def pop_thread(self) -> None: ...
def pop_context(self) -> None: ...
def pop_greenlet(self) -> None: ...
def handle(self, record: Any): ...
def flush(self) -> None: ...
class RiemannHandler(Handler):
host: Any = ...
port: Any = ...
ttl: Any = ...
queue: Any = ...
flush_threshold: Any = ...
transport: Any = ...
def __init__(
self,
host: Any,
port: Any,
message_type: str = ...,
ttl: int = ...,
flush_threshold: int = ...,
bubble: bool = ...,
filter: Optional[Any] = ...,
level: Any = ...,
) -> None: ...
def record_to_event(self, record: Any): ...
def emit(self, record: Any) -> None: ...
|
3,376 |
resolve login settings per product
|
import asyncio
import json
import logging
import asyncpg
from aiohttp import web
from pydantic import ValidationError
from servicelib.aiohttp.application_setup import ModuleCategory, app_module_setup
from settings_library.email import SMTPSettings
from settings_library.postgres import PostgresSettings
from .._constants import (
APP_PUBLIC_CONFIG_PER_PRODUCT,
APP_SETTINGS_KEY,
INDEX_RESOURCE_NAME,
)
from ..db.plugin import setup_db
from ..db.settings import get_plugin_settings as get_db_plugin_settings
from ..email.plugin import setup_email
from ..email.settings import get_plugin_settings as get_email_plugin_settings
from ..invitations.plugin import setup_invitations
from ..products.plugin import ProductName, list_products, setup_products
from ..redis import setup_redis
from ..rest.plugin import setup_rest
from . import (
api_keys_handlers,
handlers_2fa,
handlers_auth,
handlers_change,
handlers_confirmation,
handlers_registration,
)
from ._constants import APP_LOGIN_SETTINGS_PER_PRODUCT_KEY
from .settings import (
APP_LOGIN_OPTIONS_KEY,
LoginOptions,
LoginSettings,
LoginSettingsForProduct,
)
from .storage import APP_LOGIN_STORAGE_KEY, AsyncpgStorage
log = logging.getLogger(__name__)
MAX_TIME_TO_CLOSE_POOL_SECS = 5
async def _setup_login_storage_ctx(app: web.Application):
assert APP_LOGIN_STORAGE_KEY not in app # nosec
settings: PostgresSettings = get_db_plugin_settings(app)
pool: asyncpg.pool.Pool = await asyncpg.create_pool(
dsn=settings.dsn_with_query,
min_size=settings.POSTGRES_MINSIZE,
max_size=settings.POSTGRES_MAXSIZE,
loop=asyncio.get_event_loop(),
)
app[APP_LOGIN_STORAGE_KEY] = storage = AsyncpgStorage(pool)
yield # ----------------
if storage.pool is not pool:
log.error("Somebody has changed the db pool")
try:
await asyncio.wait_for(pool.close(), timeout=MAX_TIME_TO_CLOSE_POOL_SECS)
except asyncio.TimeoutError:
log.exception("Failed to close login storage loop")
def setup_login_storage(app: web.Application):
if _setup_login_storage_ctx not in app.cleanup_ctx:
app.cleanup_ctx.append(_setup_login_storage_ctx)
def _setup_login_options(app: web.Application):
settings: SMTPSettings = get_email_plugin_settings(app)
cfg = settings.dict()
if INDEX_RESOURCE_NAME in app.router:
cfg["LOGIN_REDIRECT"] = f"{app.router[INDEX_RESOURCE_NAME].url_for()}"
app[APP_LOGIN_OPTIONS_KEY] = LoginOptions(**cfg)
async def METHOD_NAME(app: web.Application):
"""Resolves login settings by composing app and product configurations
for the login plugin. Note that product settings override app settings.
"""
# app plugin settings
app_login_settings: LoginSettings | None
login_settings_per_product: dict[ProductName, LoginSettingsForProduct] = {}
if app_login_settings := app[APP_SETTINGS_KEY].WEBSERVER_LOGIN:
assert app_login_settings, "setup_settings not called?" # nosec
assert isinstance(app_login_settings, LoginSettings) # nosec
# compose app and product settings
errors = {}
for product in list_products(app):
try:
login_settings_per_product[
product.name
] = LoginSettingsForProduct.create_from_composition(
app_login_settings=app_login_settings,
product_login_settings=product.login_settings,
)
except ValidationError as err: # noqa: PERF203
errors[product.name] = err
if errors:
msg = "\n".join([f"{n}: {e}" for n, e in errors.items()])
error_msg = f"Invalid product.login_settings:\n{msg}"
raise ValueError(error_msg)
# store in app
app[APP_LOGIN_SETTINGS_PER_PRODUCT_KEY] = login_settings_per_product
log.info(
"Captured products login settings:\n%s",
json.dumps(
{
product_name: login_settings.dict()
for product_name, login_settings in login_settings_per_product.items()
},
indent=1,
),
)
# product-based public config: Overrides ApplicationSettings.public_dict
public_data_per_product = {}
for product_name, settings in login_settings_per_product.items():
public_data_per_product[product_name] = {
"invitation_required": settings.LOGIN_REGISTRATION_INVITATION_REQUIRED
}
app.setdefault(APP_PUBLIC_CONFIG_PER_PRODUCT, public_data_per_product)
@app_module_setup(
"simcore_service_webserver.login",
ModuleCategory.ADDON,
settings_name="WEBSERVER_LOGIN",
logger=log,
)
def setup_login(app: web.Application):
"""Setting up login subsystem in application"""
setup_db(app)
setup_redis(app)
setup_products(app)
setup_rest(app)
setup_email(app)
setup_invitations(app)
# routes
app.router.add_routes(handlers_auth.routes)
app.router.add_routes(handlers_confirmation.routes)
app.router.add_routes(handlers_registration.routes)
app.router.add_routes(handlers_change.routes)
app.router.add_routes(handlers_2fa.routes)
app.router.add_routes(api_keys_handlers.routes)
_setup_login_options(app)
setup_login_storage(app)
app.on_startup.append(METHOD_NAME)
return True
|
3,377 |
test write bed graph worker smoothing
|
import os
import pytest
import deeptools.writeBedGraph as wr
from deeptools.writeBedGraph import scaleCoverage
@pytest.mark.parametrize("bc", ["bam", 'cram'])
class TestWriteBedGraph():
def ifiles(self, ext='bam'):
root = os.path.dirname(os.path.abspath(__file__)) + "/test_data/"
bamFile1 = root + "testA." + ext
bamFile2 = root + "testB." + ext
bamFile_PE = root + "test_paired2." + ext
chrom = '3R'
step_size = 50
bin_length = 50
func_args = {'scaleFactor': 1.0}
c = wr.WriteBedGraph(
[bamFile1],
binLength=bin_length,
stepSize=step_size
)
return c, bamFile1, bamFile2, bamFile_PE, chrom, step_size, bin_length, func_args
def test_writeBedGraph_worker(self, bc):
c, bamFile1, bamFile2, bamFile_PE, chrom, step_size, bin_length, func_args = self.ifiles(bc)
c.zerosToNans = False
c.skipZeros = False
tempFile = c.writeBedGraph_worker(chrom, 0, 200, scaleCoverage, func_args)
_foo = open(tempFile[3], 'r')
res = _foo.readlines()
_foo.close()
expected = ['3R\t0\t100\t0\n', '3R\t100\t200\t1\n']
assert f"{res}" == f"{expected}"
os.remove(tempFile[3])
def test_writeBedGraph_worker_zerotonan(self, bc):
c, bamFile1, bamFile2, bamFile_PE, chrom, step_size, bin_length, func_args = self.ifiles(bc)
# turn on zeroToNan
c.zerosToNans = True
tempFile2 = c.writeBedGraph_worker(chrom, 0, 200, scaleCoverage, func_args)
_foo = open(tempFile2[3], 'r')
res = _foo.readlines()
_foo.close()
expected = ['3R\t100\t200\t1\n']
assert f"{res}" == f"{expected}"
os.remove(tempFile2[3])
def test_writeBedGraph_worker_scaling(self, bc):
c, bamFile1, bamFile2, bamFile_PE, chrom, step_size, bin_length, func_args = self.ifiles(bc)
func_args = {'scaleFactor': 3.0}
tempFile = c.writeBedGraph_worker(chrom, 0, 200, scaleCoverage, func_args)
_foo = open(tempFile[3], 'r')
res = _foo.readlines()
_foo.close()
expected = ['3R\t0\t100\t0\n', '3R\t100\t200\t3\n']
assert f"{res}" == f"{expected}"
os.remove(tempFile[3])
def test_writeBedGraph_worker_ignore_duplicates(self, bc):
c, bamFile1, bamFile2, bamFile_PE, chrom, step_size, bin_length, func_args = self.ifiles(bc)
c = wr.WriteBedGraph(
[bamFile2],
binLength=bin_length,
stepSize=step_size,
ignoreDuplicates=True
)
c.zerosToNans = True
tempFile = c.writeBedGraph_worker(chrom, 0, 200, scaleCoverage, func_args)
_foo = open(tempFile[3], 'r')
res = _foo.readlines()
_foo.close()
expected = ['3R\t50\t200\t1\n']
assert f"{res}" == f"{expected}"
os.remove(tempFile[3])
def METHOD_NAME(self, bc):
c, bamFile1, bamFile2, bamFile_PE, chrom, step_size, bin_length, func_args = self.ifiles(bc)
c.binLength = 20
c.stepSize = 20
c.smoothLength = 60
tempFile = c.writeBedGraph_worker(chrom, 100, 200, scaleCoverage, func_args)
_foo = open(tempFile[3], 'r')
res = _foo.readlines()
_foo.close()
expected = ['3R\t100\t120\t1\n', '3R\t120\t180\t1.33333\n', '3R\t180\t200\t1\n']
assert f"{res}" == f"{expected}"
os.remove(tempFile[3])
def test_writeBedGraph_cigar(self, bc):
"""
The bamFile1 contains a read at position 10
with the following CIGAR: 10S20M10N10M10S
that maps to a chromosome named chr_cigar.
"""
c, bamFile1, bamFile2, bamFile_PE, chrom, step_size, bin_length, func_args = self.ifiles(bc)
# turn of read extension
c.extendPairedEnds = False
c.binLength = 10
c.stepSize = 10
tempFile = c.writeBedGraph_worker('chr_cigar', 0, 100, scaleCoverage, func_args)
_foo = open(tempFile[3], 'r')
res = _foo.readlines()
_foo.close()
# the single read is split into bin 10-30, and then 40-50
expected = [
'chr_cigar\t0\t10\t0\n',
'chr_cigar\t10\t30\t1\n',
'chr_cigar\t30\t40\t0\n',
'chr_cigar\t40\t50\t1\n',
'chr_cigar\t50\t100\t0\n'
]
assert f"{res}" == f"{expected}"
os.remove(tempFile[3])
|
3,378 |
raw slices
|
"""Defines commonly used segment predicates for rule writers.
For consistency, all the predicates in this module are implemented as functions
returning functions. This avoids rule writers having to remember the
distinction between normal functions and functions returning functions.
This is not necessarily a complete set of predicates covering all possible
requirements. Rule authors can define their own predicates as needed, either
as regular functions, `lambda`, etc.
"""
from typing import Callable, Optional
from sqlfluff.core.parser import BaseSegment
from sqlfluff.utils.functional.raw_file_slices import RawFileSlices
from sqlfluff.utils.functional.templated_file_slices import TemplatedFileSlices
from sqlfluff.core.templaters.base import TemplatedFile
def raw_is(*raws: str) -> Callable[[BaseSegment], bool]: # pragma: no cover
"""Returns a function that determines if segment matches one of the raw inputs."""
def _(segment: BaseSegment) -> bool:
return segment.raw in raws
return _
def raw_upper_is(*raws: str) -> Callable[[BaseSegment], bool]:
"""Returns a function that determines if segment matches one of the raw inputs."""
def _(segment: BaseSegment) -> bool:
return segment.raw_upper in raws
return _
def is_type(*seg_type: str) -> Callable[[BaseSegment], bool]:
"""Returns a function that determines if segment is one of the types."""
def _(segment: BaseSegment) -> bool:
return segment.is_type(*seg_type)
return _
def is_keyword(*keyword_name: str) -> Callable[[BaseSegment], bool]:
"""Returns a function that determines if it's a matching keyword."""
return and_(
is_type("keyword"), raw_upper_is(*[raw.upper() for raw in keyword_name])
)
def is_code() -> Callable[[BaseSegment], bool]:
"""Returns a function that checks if segment is code."""
def _(segment: BaseSegment) -> bool:
return segment.is_code
return _
def is_comment() -> Callable[[BaseSegment], bool]:
"""Returns a function that checks if segment is comment."""
def _(segment: BaseSegment) -> bool:
return segment.is_comment
return _
def is_expandable() -> Callable[[BaseSegment], bool]:
"""Returns a function that checks if segment is expandable."""
def _(segment: BaseSegment) -> bool:
return segment.is_expandable
return _
def is_meta() -> Callable[[BaseSegment], bool]:
"""Returns a function that checks if segment is meta."""
def _(segment: BaseSegment) -> bool:
return segment.is_meta
return _
def is_raw() -> Callable[[BaseSegment], bool]:
"""Returns a function that checks if segment is raw."""
def _(segment: BaseSegment) -> bool:
return segment.is_raw()
return _
def is_whitespace() -> Callable[[BaseSegment], bool]:
"""Returns a function that checks if segment is whitespace."""
def _(segment: BaseSegment) -> bool:
return segment.is_whitespace
return _
def is_templated() -> Callable[[BaseSegment], bool]: # pragma: no cover
"""Returns a function that checks if segment is templated."""
def _(segment: BaseSegment) -> bool:
return segment.is_templated
return _
def get_type() -> Callable[[BaseSegment], str]:
"""Returns a function that gets segment type."""
def _(segment: BaseSegment) -> str:
return segment.get_type()
return _
def and_(*functions: Callable[[BaseSegment], bool]) -> Callable[[BaseSegment], bool]:
"""Returns a function that computes the functions and-ed together."""
def _(segment: BaseSegment) -> bool:
return all(function(segment) for function in functions)
return _
def or_(*functions: Callable[[BaseSegment], bool]) -> Callable[[BaseSegment], bool]:
"""Returns a function that computes the functions or-ed together."""
def _(segment: BaseSegment) -> bool:
return any(function(segment) for function in functions)
return _
def not_(fn: Callable[[BaseSegment], bool]) -> Callable[[BaseSegment], bool]:
"""Returns a function that computes: not fn()."""
def _(segment: BaseSegment) -> bool:
return not fn(segment)
return _
def METHOD_NAME(
segment: BaseSegment,
templated_file: Optional[TemplatedFile],
) -> RawFileSlices: # pragma: no cover
"""Returns raw slices for a segment."""
if not templated_file:
raise ValueError(
'raw_slices: "templated_file" parameter is required.'
) # pragma: no cover
if not segment.pos_marker:
raise ValueError(
'raw_slices: "segment" parameter must have pos_marker set.'
) # pragma: no cover
return RawFileSlices(
*templated_file.raw_slices_spanning_source_slice(
segment.pos_marker.source_slice
),
templated_file=templated_file
)
def templated_slices(
segment: BaseSegment,
templated_file: Optional[TemplatedFile],
) -> TemplatedFileSlices:
"""Returns raw slices for a segment."""
if not templated_file:
raise ValueError(
'templated_slices: "templated_file" parameter is required.'
) # pragma: no cover
if not segment.pos_marker:
raise ValueError(
'templated_slices: "segment" parameter must have pos_marker set.'
) # pragma: no cover
# :TRICKY: We don't use _find_slice_indices_of_templated_pos() here because
# it treats TemplatedFileSlice.templated_slice.stop as inclusive, not
# exclusive. Other parts of SQLFluff rely on this behaviour, but we don't
# want it. It's easy enough to do this ourselves.
start = segment.pos_marker.templated_slice.start
stop = segment.pos_marker.templated_slice.stop
templated_slices = [
slice_
for slice_ in templated_file.sliced_file
if (stop > slice_.templated_slice.start and start < slice_.templated_slice.stop)
]
return TemplatedFileSlices(*templated_slices, templated_file=templated_file)
|
3,379 |
construct
|
# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Base model of the knowledge graph embedding model.
"""
from paddle import fluid
class Model(object):
"""
Base model.
"""
def __init__(self, **kwargs):
"""
Init model
"""
# Needed parameters
self.model_name = kwargs["model_name"]
self.data_reader = kwargs["data_reader"]
self._hidden_size = kwargs["hidden_size"]
self._learning_rate = kwargs["learning_rate"]
self._optimizer = kwargs["optimizer"]
self.args = kwargs["args"]
# Optional parameters
if "margin" in kwargs:
self._margin = kwargs["margin"]
self._prefix = "%s_%s_dim=%d_" % (
self.model_name, self.data_reader.name, self._hidden_size)
self.ent_name = self._prefix + "entity_embeddings"
self.rel_name = self._prefix + "relation_embeddings"
self._entity_total = self.data_reader.entity_total
self._relation_total = self.data_reader.relation_total
self._ent_shape = [self._entity_total, self._hidden_size]
self._rel_shape = [self._relation_total, self._hidden_size]
def METHOD_NAME(self):
"""
Construct the program
:return: None
"""
self.startup_program = fluid.Program()
self.train_program = fluid.Program()
self.test_program = fluid.Program()
with fluid.program_guard(self.train_program, self.startup_program):
self.train_pos_input = fluid.layers.data(
"pos_triple",
dtype="int64",
shape=[None, 3, 1],
append_batch_size=False)
self.train_neg_input = fluid.layers.data(
"neg_triple",
dtype="int64",
shape=[None, 3, 1],
append_batch_size=False)
self.train_feed_list = ["pos_triple", "neg_triple"]
self.train_feed_vars = [self.train_pos_input, self.train_neg_input]
self.train_fetch_vars = self.construct_train_program()
loss = self.train_fetch_vars[0]
self.apply_optimizer(loss, opt=self._optimizer)
with fluid.program_guard(self.test_program, self.startup_program):
self.test_input = fluid.layers.data(
"test_triple",
dtype="int64",
shape=[3],
append_batch_size=False)
self.test_feed_list = ["test_triple"]
self.test_fetch_vars = self.construct_test_program()
def apply_optimizer(self, loss, opt="sgd"):
"""
Construct the backward of the train program.
:param loss: `type : variable` final loss of the model.
:param opt: `type : string` the optimizer name
:return:
"""
optimizer_available = {
"adam": fluid.optimizer.Adam,
"sgd": fluid.optimizer.SGD,
"momentum": fluid.optimizer.Momentum
}
if opt in optimizer_available:
opt_func = optimizer_available[opt]
else:
opt_func = None
if opt_func is None:
raise ValueError("You should chose the optimizer in %s" %
optimizer_available.keys())
else:
optimizer = opt_func(learning_rate=self._learning_rate)
return optimizer.minimize(loss)
def construct_train_program(self):
"""
This function should construct the train program with the `self.train_pos_input`
and `self.train_neg_input`. These inputs are batch of triples.
:return: List of variables you want to get. Please be sure the ':var loss' should
be in the first place, eg. [loss, variable1, variable2, ...].
"""
raise NotImplementedError(
"You should define the construct_train_program"
" function before use it!")
def construct_test_program(self):
"""
This function should construct test (or evaluate) program with the 'self.test_input'.
Util now, we only support a triple the evaluate the ranks.
:return: the distance of all entity with the test triple (for both head and tail entity).
"""
raise NotImplementedError(
"You should define the construct_test_program"
" function before use it")
|
3,380 |
get segmentation target
|
import cv2
import numpy as np
import torch
from PIL import Image
from torch.utils.data import Dataset
from torch.utils.data.dataset import Subset
from torchvision.datasets.sbd import SBDataset
from torchvision.datasets.voc import VOCSegmentation
import ignite.distributed as idist
from ignite.utils import convert_tensor
class TransformedDataset(Dataset):
def __init__(self, ds, transform_fn):
assert isinstance(ds, Dataset)
assert callable(transform_fn)
self.ds = ds
self.transform_fn = transform_fn
def __len__(self):
return len(self.ds)
def __getitem__(self, index):
dp = self.ds[index]
return self.transform_fn(**dp)
class VOCSegmentationOpencv(VOCSegmentation):
target_names = [
"background",
"aeroplane",
"bicycle",
"bird",
"boat",
"bottle",
"bus",
"car",
"cat",
"chair",
"cow",
"diningtable",
"dog",
"horse",
"motorbike",
"person",
"plant",
"sheep",
"sofa",
"train",
"tv/monitor",
]
def __init__(self, *args, return_meta=False, **kwargs):
super(VOCSegmentationOpencv, self).__init__(*args, **kwargs)
self.return_meta = return_meta
def __getitem__(self, index):
img = cv2.imread(self.images[index])
assert img is not None, f"Image at '{self.images[index]}' has a problem"
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
mask = np.asarray(Image.open(self.masks[index]))
if self.return_meta:
return {
"image": img,
"mask": mask,
"meta": {"index": index, "image_path": self.images[index], "mask_path": self.masks[index]},
}
return {"image": img, "mask": mask}
class SBDatasetOpencv(SBDataset):
def __init__(self, *args, return_meta=False, **kwargs):
super(SBDatasetOpencv, self).__init__(*args, **kwargs)
assert self.mode == "segmentation", "SBDatasetOpencv should be in segmentation mode only"
self.return_meta = return_meta
def METHOD_NAME(self, filepath):
mat = self._loadmat(filepath)
return mat["GTcls"][0]["Segmentation"][0]
def __getitem__(self, index):
img = cv2.imread(self.images[index])
assert img is not None, f"Image at '{self.images[index]}' has a problem"
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
mask = self._get_target(self.masks[index])
if self.return_meta:
return {
"image": img,
"mask": mask,
"meta": {"index": index, "image_path": self.images[index], "mask_path": self.masks[index]},
}
return {"image": img, "mask": mask}
def get_train_dataset(root_path, return_meta=False):
return VOCSegmentationOpencv(
root=root_path, year="2012", image_set="train", download=False, return_meta=return_meta
)
def get_val_dataset(root_path, return_meta=False):
return VOCSegmentationOpencv(root=root_path, year="2012", image_set="val", download=False, return_meta=return_meta)
def get_train_noval_sbdataset(root_path, return_meta=False):
return SBDatasetOpencv(root_path, image_set="train_noval", mode="segmentation", return_meta=return_meta)
def get_dataloader(dataset, sampler=None, shuffle=False, limit_num_samples=None, **kwargs):
if limit_num_samples is not None:
g = torch.Generator().manual_seed(limit_num_samples)
indices = torch.randperm(len(dataset), generator=g)[:limit_num_samples]
dataset = Subset(dataset, indices)
return idist.auto_dataloader(dataset, sampler=sampler, shuffle=(sampler is None) and shuffle, **kwargs)
def get_train_val_loaders(
root_path,
train_transforms,
val_transforms,
batch_size=16,
num_workers=8,
train_sampler=None,
val_batch_size=None,
sbd_path=None,
limit_train_num_samples=None,
limit_val_num_samples=None,
):
train_ds = get_train_dataset(root_path)
val_ds = get_val_dataset(root_path)
if sbd_path is not None:
sbd_train_ds = get_train_noval_sbdataset(sbd_path)
train_ds = train_ds + sbd_train_ds
if len(val_ds) < len(train_ds):
g = torch.Generator().manual_seed(len(train_ds))
train_eval_indices = torch.randperm(len(train_ds), generator=g)[: len(val_ds)]
train_eval_ds = Subset(train_ds, train_eval_indices)
else:
train_eval_ds = train_ds
train_ds = TransformedDataset(train_ds, transform_fn=train_transforms)
val_ds = TransformedDataset(val_ds, transform_fn=val_transforms)
train_eval_ds = TransformedDataset(train_eval_ds, transform_fn=val_transforms)
val_batch_size = batch_size * 4 if val_batch_size is None else val_batch_size
train_loader = get_dataloader(
train_ds,
shuffle=True,
sampler=train_sampler,
batch_size=batch_size,
num_workers=num_workers,
drop_last=True,
limit_num_samples=limit_train_num_samples,
)
val_loader = get_dataloader(
val_ds,
shuffle=False,
batch_size=val_batch_size,
num_workers=num_workers,
drop_last=False,
limit_num_samples=limit_val_num_samples,
)
train_eval_loader = get_dataloader(
train_eval_ds,
shuffle=False,
batch_size=val_batch_size,
num_workers=num_workers,
drop_last=False,
limit_num_samples=limit_val_num_samples,
)
return train_loader, val_loader, train_eval_loader
def get_inference_dataloader(
root_path, mode, transforms, batch_size=16, num_workers=8, pin_memory=True, limit_num_samples=None
):
assert mode in ("train", "test"), "Mode should be 'train' or 'test'"
get_dataset_fn = get_train_dataset if mode == "train" else get_val_dataset
dataset = get_dataset_fn(root_path, return_meta=True)
dataset = TransformedDataset(dataset, transform_fn=transforms)
return get_dataloader(
dataset,
limit_num_samples=limit_num_samples,
shuffle=False,
batch_size=batch_size,
num_workers=num_workers,
pin_memory=pin_memory,
drop_last=False,
)
def ignore_mask_boundaries(**kwargs):
assert "mask" in kwargs, "Input should contain 'mask'"
mask = kwargs["mask"]
mask[mask == 255] = 0
kwargs["mask"] = mask
return kwargs
def denormalize(t, mean, std, max_pixel_value=255):
assert isinstance(t, torch.Tensor), f"{type(t)}"
assert t.ndim == 3
d = t.device
mean = torch.tensor(mean, device=d).unsqueeze(-1).unsqueeze(-1)
std = torch.tensor(std, device=d).unsqueeze(-1).unsqueeze(-1)
tensor = std * t + mean
tensor *= max_pixel_value
return tensor
def prepare_image_mask(batch, device, non_blocking):
x, y = batch["image"], batch["mask"]
x = convert_tensor(x, device, non_blocking=non_blocking)
y = convert_tensor(y, device, non_blocking=non_blocking).long()
return x, y
|
3,381 |
bbox overlaps
|
import numpy as np
def clip_boxes(boxes, im_shape):
"""
Clip boxes to image boundaries.
:param boxes: [N, 4* num_classes]
:param im_shape: tuple of 2
:return: [N, 4* num_classes]
"""
# x1 >= 0
boxes[:, 0::4] = np.maximum(np.minimum(boxes[:, 0::4], im_shape[1] - 1), 0)
# y1 >= 0
boxes[:, 1::4] = np.maximum(np.minimum(boxes[:, 1::4], im_shape[0] - 1), 0)
# x2 < im_shape[1]
boxes[:, 2::4] = np.maximum(np.minimum(boxes[:, 2::4], im_shape[1] - 1), 0)
# y2 < im_shape[0]
boxes[:, 3::4] = np.maximum(np.minimum(boxes[:, 3::4], im_shape[0] - 1), 0)
return boxes
# def bbox_overlaps(boxes, query_boxes):
# return bbox_overlaps_cython(boxes, query_boxes)
def METHOD_NAME(boxes, query_boxes):
return bbox_overlaps_py(boxes, query_boxes)
def bbox_overlaps_py(boxes, query_boxes):
"""
determine overlaps between boxes and query_boxes
:param boxes: n * 4 bounding boxes
:param query_boxes: k * 4 bounding boxes
:return: overlaps: n * k overlaps
"""
n_ = boxes.shape[0]
k_ = query_boxes.shape[0]
overlaps = np.zeros((n_, k_), dtype=np.float)
for k in range(k_):
query_box_area = (query_boxes[k, 2] - query_boxes[k, 0] + 1) * (query_boxes[k, 3] - query_boxes[k, 1] + 1)
for n in range(n_):
iw = min(boxes[n, 2], query_boxes[k, 2]) - max(boxes[n, 0], query_boxes[k, 0]) + 1
if iw > 0:
ih = min(boxes[n, 3], query_boxes[k, 3]) - max(boxes[n, 1], query_boxes[k, 1]) + 1
if ih > 0:
box_area = (boxes[n, 2] - boxes[n, 0] + 1) * (boxes[n, 3] - boxes[n, 1] + 1)
all_area = float(box_area + query_box_area - iw * ih)
overlaps[n, k] = iw * ih / all_area
return overlaps
def nonlinear_transform(ex_rois, gt_rois):
"""
compute bounding box regression targets from ex_rois to gt_rois
:param ex_rois: [N, 4]
:param gt_rois: [N, 4]
:return: [N, 4]
"""
assert ex_rois.shape[0] == gt_rois.shape[0], 'inconsistent rois number'
ex_widths = ex_rois[:, 2] - ex_rois[:, 0] + 1.0
ex_heights = ex_rois[:, 3] - ex_rois[:, 1] + 1.0
ex_ctr_x = ex_rois[:, 0] + 0.5 * (ex_widths - 1.0)
ex_ctr_y = ex_rois[:, 1] + 0.5 * (ex_heights - 1.0)
gt_widths = gt_rois[:, 2] - gt_rois[:, 0] + 1.0
gt_heights = gt_rois[:, 3] - gt_rois[:, 1] + 1.0
gt_ctr_x = gt_rois[:, 0] + 0.5 * (gt_widths - 1.0)
gt_ctr_y = gt_rois[:, 1] + 0.5 * (gt_heights - 1.0)
targets_dx = (gt_ctr_x - ex_ctr_x) / (ex_widths + 1e-14)
targets_dy = (gt_ctr_y - ex_ctr_y) / (ex_heights + 1e-14)
targets_dw = np.log(gt_widths / ex_widths)
targets_dh = np.log(gt_heights / ex_heights)
if gt_rois.shape[1] <= 4:
targets = np.vstack(
(targets_dx, targets_dy, targets_dw, targets_dh)).transpose()
return targets
else:
targets = [targets_dx, targets_dy, targets_dw, targets_dh]
# if config.USE_BLUR:
# for i in range(4, gt_rois.shape[1]):
# t = gt_rois[:,i]
# targets.append(t)
targets = np.vstack(targets).transpose()
return targets
def landmark_transform(ex_rois, gt_rois):
assert ex_rois.shape[0] == gt_rois.shape[0], 'inconsistent rois number'
ex_widths = ex_rois[:, 2] - ex_rois[:, 0] + 1.0
ex_heights = ex_rois[:, 3] - ex_rois[:, 1] + 1.0
ex_ctr_x = ex_rois[:, 0] + 0.5 * (ex_widths - 1.0)
ex_ctr_y = ex_rois[:, 1] + 0.5 * (ex_heights - 1.0)
targets = []
for i in range(gt_rois.shape[1]):
for j in range(gt_rois.shape[2]):
# if not config.USE_OCCLUSION and j==2:
# continue
if j == 2:
continue
if j == 0: # w
target = (gt_rois[:, i, j] - ex_ctr_x) / (ex_widths + 1e-14)
elif j == 1: # h
target = (gt_rois[:, i, j] - ex_ctr_y) / (ex_heights + 1e-14)
else: # visibile
target = gt_rois[:, i, j]
targets.append(target)
targets = np.vstack(targets).transpose()
return targets
def nonlinear_pred(boxes, box_deltas):
"""
Transform the set of class-agnostic boxes into class-specific boxes
by applying the predicted offsets (box_deltas)
:param boxes: !important [N 4]
:param box_deltas: [N, 4 * num_classes]
:return: [N 4 * num_classes]
"""
if boxes.shape[0] == 0:
return np.zeros((0, box_deltas.shape[1]))
boxes = boxes.astype(np.float, copy=False)
widths = boxes[:, 2] - boxes[:, 0] + 1.0
heights = boxes[:, 3] - boxes[:, 1] + 1.0
ctr_x = boxes[:, 0] + 0.5 * (widths - 1.0)
ctr_y = boxes[:, 1] + 0.5 * (heights - 1.0)
dx = box_deltas[:, 0::4]
dy = box_deltas[:, 1::4]
dw = box_deltas[:, 2::4]
dh = box_deltas[:, 3::4]
pred_ctr_x = dx * widths[:, np.newaxis] + ctr_x[:, np.newaxis]
pred_ctr_y = dy * heights[:, np.newaxis] + ctr_y[:, np.newaxis]
pred_w = np.exp(dw) * widths[:, np.newaxis]
pred_h = np.exp(dh) * heights[:, np.newaxis]
pred_boxes = np.zeros(box_deltas.shape)
# x1
pred_boxes[:, 0::4] = pred_ctr_x - 0.5 * (pred_w - 1.0)
# y1
pred_boxes[:, 1::4] = pred_ctr_y - 0.5 * (pred_h - 1.0)
# x2
pred_boxes[:, 2::4] = pred_ctr_x + 0.5 * (pred_w - 1.0)
# y2
pred_boxes[:, 3::4] = pred_ctr_y + 0.5 * (pred_h - 1.0)
return pred_boxes
def landmark_pred(boxes, landmark_deltas):
if boxes.shape[0] == 0:
return np.zeros((0, landmark_deltas.shape[1]))
boxes = boxes.astype(np.float, copy=False)
widths = boxes[:, 2] - boxes[:, 0] + 1.0
heights = boxes[:, 3] - boxes[:, 1] + 1.0
ctr_x = boxes[:, 0] + 0.5 * (widths - 1.0)
ctr_y = boxes[:, 1] + 0.5 * (heights - 1.0)
preds = []
for i in range(landmark_deltas.shape[1]):
if i % 2 == 0:
pred = (landmark_deltas[:, i] * widths + ctr_x)
else:
pred = (landmark_deltas[:, i] * heights + ctr_y)
preds.append(pred)
preds = np.vstack(preds).transpose()
return preds
def iou_transform(ex_rois, gt_rois):
""" return bbox targets, IoU loss uses gt_rois as gt """
assert ex_rois.shape[0] == gt_rois.shape[0], 'inconsistent rois number'
return gt_rois
def iou_pred(boxes, box_deltas):
"""
Transform the set of class-agnostic boxes into class-specific boxes
by applying the predicted offsets (box_deltas)
:param boxes: !important [N 4]
:param box_deltas: [N, 4 * num_classes]
:return: [N 4 * num_classes]
"""
if boxes.shape[0] == 0:
return np.zeros((0, box_deltas.shape[1]))
boxes = boxes.astype(np.float, copy=False)
x1 = boxes[:, 0]
y1 = boxes[:, 1]
x2 = boxes[:, 2]
y2 = boxes[:, 3]
dx1 = box_deltas[:, 0::4]
dy1 = box_deltas[:, 1::4]
dx2 = box_deltas[:, 2::4]
dy2 = box_deltas[:, 3::4]
pred_boxes = np.zeros(box_deltas.shape)
# x1
pred_boxes[:, 0::4] = dx1 + x1[:, np.newaxis]
# y1
pred_boxes[:, 1::4] = dy1 + y1[:, np.newaxis]
# x2
pred_boxes[:, 2::4] = dx2 + x2[:, np.newaxis]
# y2
pred_boxes[:, 3::4] = dy2 + y2[:, np.newaxis]
return pred_boxes
# define bbox_transform and bbox_pred
bbox_transform = nonlinear_transform
bbox_pred = nonlinear_pred
|
3,382 |
setup
|
"""A directive to generate a gallery of images from structured data.
Generating a gallery of images that are all the same size is a common
pattern in documentation, and this can be cumbersome if the gallery is
generated programmatically. This directive wraps this particular use-case
in a helper-directive to generate it with a single YAML configuration file.
It currently exists for maintainers of the pydata-sphinx-theme,
but might be abstracted into a standalone package if it proves useful.
"""
from pathlib import Path
from typing import Any, Dict, List
from docutils import nodes
from docutils.parsers.rst import directives
from sphinx.application import Sphinx
from sphinx.util import logging
from sphinx.util.docutils import SphinxDirective
from yaml import safe_load
logger = logging.getLogger(__name__)
TEMPLATE_GRID = """
`````{{grid}} {columns}
{options}
{content}
`````
"""
GRID_CARD = """
````{{grid-item-card}} {title}
{options}
{content}
````
"""
class GalleryGridDirective(SphinxDirective):
"""A directive to show a gallery of images and links in a Bootstrap grid.
The grid can be generated from a YAML file that contains a list of items, or
from the content of the directive (also formatted in YAML). Use the parameter
"class-card" to add an additional CSS class to all cards. When specifying the grid
items, you can use all parameters from "grid-item-card" directive to customize
individual cards + ["image", "header", "content", "title"].
Danger:
This directive can only be used in the context of a Myst documentation page as
the templates use Markdown flavored formatting.
"""
name = "gallery-grid"
has_content = True
required_arguments = 0
optional_arguments = 1
final_argument_whitespace = True
option_spec = {
# A class to be added to the resulting container
"grid-columns": directives.unchanged,
"class-container": directives.unchanged,
"class-card": directives.unchanged,
}
def run(self) -> List[nodes.Node]:
"""Create the gallery grid."""
if self.arguments:
# If an argument is given, assume it's a path to a YAML file
# Parse it and load it into the directive content
path_data_rel = Path(self.arguments[0])
path_doc, _ = self.get_source_info()
path_doc = Path(path_doc).parent
path_data = (path_doc / path_data_rel).resolve()
if not path_data.exists():
logger.warn(f"Could not find grid data at {path_data}.")
nodes.text("No grid data found at {path_data}.")
return
yaml_string = path_data.read_text()
else:
yaml_string = "\n".join(self.content)
# Use all the element with an img-bottom key as sites to show
# and generate a card item for each of them
grid_items = []
for item in safe_load(yaml_string):
# remove parameters that are not needed for the card options
title = item.pop("title", "")
# build the content of the card using some extra parameters
header = f"{item.pop('header')} \n^^^ \n" if "header" in item else ""
image = f"}) \n" if "image" in item else ""
content = f"{item.pop('content')} \n" if "content" in item else ""
# optional parameter that influence all cards
if "class-card" in self.options:
item["class-card"] = self.options["class-card"]
loc_options_str = "\n".join(f":{k}: {v}" for k, v in item.items()) + " \n"
card = GRID_CARD.format(
options=loc_options_str, content=header + image + content, title=title
)
grid_items.append(card)
# Parse the template with Sphinx Design to create an output container
# Prep the options for the template grid
class_ = "gallery-directive" + f' {self.options.get("class-container", "")}'
options = {"gutter": 2, "class-container": class_}
options_str = "\n".join(f":{k}: {v}" for k, v in options.items())
# Create the directive string for the grid
grid_directive = TEMPLATE_GRID.format(
columns=self.options.get("grid-columns", "1 2 3 4"),
options=options_str,
content="\n".join(grid_items),
)
# Parse content as a directive so Sphinx Design processes it
container = nodes.container()
self.state.nested_parse([grid_directive], 0, container)
# Sphinx Design outputs a container too, so just use that
return [container.children[0]]
def METHOD_NAME(app: Sphinx) -> Dict[str, Any]:
"""Add custom configuration to sphinx app.
Args:
app: the Sphinx application
Returns:
the 2 parallel parameters set to ``True``.
"""
app.add_directive("gallery-grid", GalleryGridDirective)
return {
"parallel_read_safe": True,
"parallel_write_safe": True,
}
|
3,383 |
parse
|
import torch
import torch.nn as nn
import torch.nn.functional as F
from .genotypes import PRIMITIVES, Genotype
from .operations import OPS, FactorizedReduce, ReLUConvBN
class MixedOp(nn.Module):
def __init__(self, C, stride):
super(MixedOp, self).__init__()
self._ops = nn.ModuleList()
for primitive in PRIMITIVES:
op = OPS[primitive](C, stride, False)
if "pool" in primitive:
op = nn.Sequential(op, nn.BatchNorm2d(C, affine=False))
self._ops.append(op)
def forward(self, x, weights, cpu_weights):
clist = []
for j, cpu_weight in enumerate(cpu_weights):
if abs(cpu_weight) > 1e-10:
clist.append(weights[j] * self._ops[j](x))
if len(clist) == 1:
return clist[0]
else:
return sum(clist)
class Cell(nn.Module):
def __init__(
self, steps, multiplier, C_prev_prev, C_prev, C, reduction, reduction_prev
):
super(Cell, self).__init__()
self.reduction = reduction
if reduction_prev:
self.preprocess0 = FactorizedReduce(C_prev_prev, C, affine=False)
else:
self.preprocess0 = ReLUConvBN(C_prev_prev, C, 1, 1, 0, affine=False)
self.preprocess1 = ReLUConvBN(C_prev, C, 1, 1, 0, affine=False)
self._steps = steps
self._multiplier = multiplier
self._ops = nn.ModuleList()
self._bns = nn.ModuleList()
for i in range(self._steps):
for j in range(2 + i):
stride = 2 if reduction and j < 2 else 1
op = MixedOp(C, stride)
self._ops.append(op)
def forward(self, s0, s1, weights):
s0 = self.preprocess0(s0)
s1 = self.preprocess1(s1)
cpu_weights = weights.tolist()
states = [s0, s1]
offset = 0
for i in range(self._steps):
s = sum(
self._ops[offset + j](h, weights[offset + j], cpu_weights[offset + j])
for j, h in enumerate(states)
)
offset += len(states)
states.append(s)
# logging.info(states)
return torch.cat(states[-self._multiplier :], dim=1)
class Network_GumbelSoftmax(nn.Module):
def __init__(
self,
C,
num_classes,
layers,
criterion,
device,
steps=4,
multiplier=4,
stem_multiplier=3,
):
super(Network_GumbelSoftmax, self).__init__()
self._C = C
self._num_classes = num_classes
self._layers = layers
self._criterion = criterion
self._steps = steps
self._multiplier = multiplier
self.device = device
C_curr = stem_multiplier * C # 3*16
self.stem = nn.Sequential(
nn.Conv2d(3, C_curr, 3, padding=1, bias=False), nn.BatchNorm2d(C_curr)
)
C_prev_prev, C_prev, C_curr = C_curr, C_curr, C
self.cells = nn.ModuleList()
reduction_prev = False
# for layers = 8, when layer_i = 2, 5, the cell is reduction cell.
for i in range(layers):
if i in [layers // 3, 2 * layers // 3]:
C_curr *= 2
reduction = True
else:
reduction = False
cell = Cell(
steps,
multiplier,
C_prev_prev,
C_prev,
C_curr,
reduction,
reduction_prev,
)
reduction_prev = reduction
self.cells += [cell]
C_prev_prev, C_prev = C_prev, multiplier * C_curr
self.global_pooling = nn.AdaptiveAvgPool2d(1)
self.classifier = nn.Linear(C_prev, num_classes)
self.tau = 5
self._initialize_alphas()
def new(self):
model_new = Network_GumbelSoftmax(
self._C, self._num_classes, self._layers, self._criterion, self.device
).to(self.device)
for x, y in zip(model_new.arch_parameters(), self.arch_parameters()):
x.data.copy_(y.data)
return model_new
def set_tau(self, tau):
self.tau = tau
def get_tau(self):
return self.tau
def forward(self, input):
batch, C, H, W = input.size()
s0 = s1 = self.stem(input)
for i, cell in enumerate(self.cells):
if cell.reduction:
weights = F.gumbel_softmax(self.alphas_reduce, self.tau, True)
else:
weights = F.gumbel_softmax(self.alphas_normal, self.tau, True)
s0, s1 = s1, cell(s0, s1, weights)
out = self.global_pooling(s1)
logits = self.classifier(out.view(out.size(0), -1))
return logits
def _initialize_alphas(self):
k = sum(1 for i in range(self._steps) for n in range(2 + i))
num_ops = len(PRIMITIVES)
self.alphas_normal = nn.Parameter(1e-3 * torch.randn(k, num_ops))
self.alphas_reduce = nn.Parameter(1e-3 * torch.randn(k, num_ops))
self._arch_parameters = [
self.alphas_normal,
self.alphas_reduce,
]
def arch_parameters(self):
return self._arch_parameters
def genotype(self):
def _isCNNStructure(k_best):
return k_best >= 4
def METHOD_NAME(weights):
gene = []
n = 2
start = 0
cnn_structure_count = 0
for i in range(self._steps):
end = start + n
W = weights[start:end].copy()
edges = sorted(
range(i + 2),
key=lambda x: -max(
W[x][k]
for k in range(len(W[x]))
if k != PRIMITIVES.index("none")
),
)[:2]
for j in edges:
k_best = None
for k in range(len(W[j])):
if k != PRIMITIVES.index("none"):
if k_best is None or W[j][k] > W[j][k_best]:
k_best = k
if _isCNNStructure(k_best):
cnn_structure_count += 1
gene.append((PRIMITIVES[k_best], j))
start = end
n += 1
return gene, cnn_structure_count
with torch.no_grad():
gene_normal, cnn_structure_count_normal = METHOD_NAME(
F.softmax(self.alphas_normal, dim=-1).data.cpu().numpy()
)
gene_reduce, cnn_structure_count_reduce = METHOD_NAME(
F.softmax(self.alphas_reduce, dim=-1).data.cpu().numpy()
)
concat = range(2 + self._steps - self._multiplier, self._steps + 2)
genotype = Genotype(
normal=gene_normal,
normal_concat=concat,
reduce=gene_reduce,
reduce_concat=concat,
)
return genotype, cnn_structure_count_normal, cnn_structure_count_reduce
|
3,384 |
clean new pw
|
#
# This file is part of pretix (Community Edition).
#
# Copyright (C) 2014-2020 Raphael Michel and contributors
# Copyright (C) 2020-2021 rami.io GmbH and contributors
#
# This program is free software: you can redistribute it and/or modify it under the terms of the GNU Affero General
# Public License as published by the Free Software Foundation in version 3 of the License.
#
# ADDITIONAL TERMS APPLY: Pursuant to Section 7 of the GNU Affero General Public License, additional terms are
# applicable granting you additional permissions and placing additional restrictions on your usage of this software.
# Please refer to the pretix LICENSE file to obtain the full terms applicable to this work. If you did not receive
# this file, see <https://pretix.eu/about/en/license>.
#
# This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied
# warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Affero General Public License for more
# details.
#
# You should have received a copy of the GNU Affero General Public License along with this program. If not, see
# <https://www.gnu.org/licenses/>.
#
# This file is based on an earlier version of pretix which was released under the Apache License 2.0. The full text of
# the Apache License 2.0 can be obtained at <http://www.apache.org/licenses/LICENSE-2.0>.
#
# This file may have since been changed and any changes are released under the terms of AGPLv3 as described above. A
# full history of changes and contributors is available at <https://github.com/pretix/pretix>.
#
# This file contains Apache-licensed contributions copyrighted by: AV-room, Jason Estibeiro
#
# Unless required by applicable law or agreed to in writing, software distributed under the Apache License 2.0 is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under the License.
from django import forms
from django.conf import settings
from django.contrib.auth.hashers import check_password
from django.contrib.auth.password_validation import (
password_validators_help_texts, validate_password,
)
from django.db.models import Q
from django.utils.translation import gettext_lazy as _
from pytz import common_timezones
from pretix.base.models import User
from pretix.control.forms import SingleLanguageWidget
class UserSettingsForm(forms.ModelForm):
error_messages = {
'duplicate_identifier': _("There already is an account associated with this e-mail address. "
"Please choose a different one."),
'pw_current': _("Please enter your current password if you want to change your e-mail "
"address or password."),
'pw_current_wrong': _("The current password you entered was not correct."),
'pw_mismatch': _("Please enter the same password twice"),
'rate_limit': _("For security reasons, please wait 5 minutes before you try again."),
'pw_equal': _("Please choose a password different to your current one.")
}
old_pw = forms.CharField(max_length=255,
required=False,
label=_("Your current password"),
widget=forms.PasswordInput())
new_pw = forms.CharField(max_length=255,
required=False,
label=_("New password"),
widget=forms.PasswordInput())
new_pw_repeat = forms.CharField(max_length=255,
required=False,
label=_("Repeat new password"),
widget=forms.PasswordInput())
timezone = forms.ChoiceField(
choices=((a, a) for a in common_timezones),
label=_("Default timezone"),
help_text=_('Only used for views that are not bound to an event. For all '
'event views, the event timezone is used instead.')
)
class Meta:
model = User
fields = [
'fullname',
'locale',
'timezone',
'email'
]
widgets = {
'locale': SingleLanguageWidget
}
def __init__(self, *args, **kwargs):
self.user = kwargs.pop('user')
super().__init__(*args, **kwargs)
self.fields['email'].required = True
if self.user.auth_backend != 'native':
del self.fields['old_pw']
del self.fields['new_pw']
del self.fields['new_pw_repeat']
self.fields['email'].disabled = True
def clean_old_pw(self):
old_pw = self.cleaned_data.get('old_pw')
if old_pw and settings.HAS_REDIS:
from django_redis import get_redis_connection
rc = get_redis_connection("redis")
cnt = rc.incr('pretix_pwchange_%s' % self.user.pk)
rc.expire('pretix_pwchange_%s' % self.user.pk, 300)
if cnt > 10:
raise forms.ValidationError(
self.error_messages['rate_limit'],
code='rate_limit',
)
if old_pw and not check_password(old_pw, self.user.password):
raise forms.ValidationError(
self.error_messages['pw_current_wrong'],
code='pw_current_wrong',
)
return old_pw
def clean_email(self):
email = self.cleaned_data['email']
if User.objects.filter(Q(email__iexact=email) & ~Q(pk=self.instance.pk)).exists():
raise forms.ValidationError(
self.error_messages['duplicate_identifier'],
code='duplicate_identifier',
)
return email
def METHOD_NAME(self):
password1 = self.cleaned_data.get('new_pw', '')
if password1 and validate_password(password1, user=self.user) is not None:
raise forms.ValidationError(
_(password_validators_help_texts()),
code='pw_invalid'
)
return password1
def clean_new_pw_repeat(self):
password1 = self.cleaned_data.get('new_pw')
password2 = self.cleaned_data.get('new_pw_repeat')
if password1 and password1 != password2:
raise forms.ValidationError(
self.error_messages['pw_mismatch'],
code='pw_mismatch'
)
def clean(self):
password1 = self.cleaned_data.get('new_pw')
email = self.cleaned_data.get('email')
old_pw = self.cleaned_data.get('old_pw')
if (password1 or email != self.user.email) and not old_pw:
raise forms.ValidationError(
self.error_messages['pw_current'],
code='pw_current'
)
if password1 and password1 == old_pw:
raise forms.ValidationError(
self.error_messages['pw_equal'],
code='pw_equal'
)
if password1:
self.instance.set_password(password1)
return self.cleaned_data
class User2FADeviceAddForm(forms.Form):
name = forms.CharField(label=_('Device name'), max_length=64)
devicetype = forms.ChoiceField(label=_('Device type'), widget=forms.RadioSelect, choices=(
('totp', _('Smartphone with the Authenticator application')),
('webauthn', _('WebAuthn-compatible hardware token (e.g. Yubikey)')),
))
|
3,385 |
signal change
|
#
# Copyright 2019-2022 GoPro Inc.
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import os.path as op
from PySide6 import QtCore, QtGui, QtWidgets
class _ControlWidget(QtWidgets.QWidget):
needSceneReload = QtCore.Signal(str, object)
def __init__(self, name):
super().__init__()
self._name = name
self.layout = QtWidgets.QVBoxLayout(self)
self.layout.setContentsMargins(0, 0, 0, 0)
def METHOD_NAME(self, value):
self.needSceneReload.emit(self._name, value)
def get_label_text(self, value=None):
return f"<b>{self._name}:</b> {value}" if value is not None else f"<b>{self._name}:</b>"
class Slider(_ControlWidget):
def __init__(self, name, value, range, unit_base):
super().__init__(name)
slider = QtWidgets.QSlider(QtCore.Qt.Horizontal)
self._unit_base = unit_base
slider.setRange(range[0] * self._unit_base, range[1] * self._unit_base)
slider.setValue(value * self._unit_base)
self._label = QtWidgets.QLabel(self.get_label_text(value))
slider.valueChanged.connect(self._slider_value_changed)
self.layout.addWidget(self._label)
self.layout.addWidget(slider)
@QtCore.Slot(int)
def _slider_value_changed(self, value):
real_value = value if self._unit_base == 1 else value / float(self._unit_base)
self._label.setText(self.get_label_text(real_value))
self.METHOD_NAME(real_value)
class VectorWidget(_ControlWidget):
def __init__(self, name, value, n, minv, maxv):
super().__init__(name)
hlayout = QtWidgets.QHBoxLayout()
self._spinboxes = []
elems_min = minv if minv is not None else [0] * n
elems_max = maxv if maxv is not None else [1] * n
assert 2 <= n <= 4
assert len(elems_min) == len(elems_max) == len(value) == n
for elem_value, elem_min, elem_max in zip(value, elems_min, elems_max):
spin = QtWidgets.QDoubleSpinBox()
spin.setMinimum(elem_min)
spin.setMaximum(elem_max)
spin.setSingleStep(0.01)
spin.setValue(elem_value)
self._spinboxes.append(spin)
spin.valueChanged.connect(self._spin_value_changed)
hlayout.addWidget(spin)
label = QtWidgets.QLabel(self.get_label_text())
self.layout.addWidget(label)
self.layout.addLayout(hlayout)
@QtCore.Slot(float)
def _spin_value_changed(self, value):
self.METHOD_NAME(tuple(spin.value() for spin in self._spinboxes))
class ColorPicker(_ControlWidget):
def __init__(self, name, value):
super().__init__(name)
self._color_btn = QtWidgets.QPushButton()
color = QtGui.QColor()
color.setRgbF(*value)
color_name = color.name()
self._color_btn.setStyleSheet("background-color: %s;" % color_name)
self._label = QtWidgets.QLabel(self.get_label_text(color_name))
self._color_btn.pressed.connect(self._pick_color)
self._qcolor = color
self.layout.addWidget(self._label)
self.layout.addWidget(self._color_btn)
@QtCore.Slot()
def _pick_color(self):
color = QtWidgets.QColorDialog.getColor(initial=self._qcolor)
if not color.isValid():
return
self._color_btn.setStyleSheet("background-color: %s;" % color.name())
self._qcolor = color
self._label.setText(self.get_label_text(color.name()))
self.METHOD_NAME(color.getRgbF()[:3])
class Checkbox(_ControlWidget):
def __init__(self, name, value):
super().__init__(name)
self._chkbox = QtWidgets.QCheckBox(name)
self._chkbox.setChecked(value)
self._chkbox.stateChanged.connect(self._checkbox_toggle)
self.layout.addWidget(self._chkbox)
@QtCore.Slot()
def _checkbox_toggle(self):
self.METHOD_NAME(self._chkbox.isChecked())
class FilePicker(_ControlWidget):
def __init__(self, name, value, filter):
super().__init__(name)
self._filter = filter
dialog_btn = QtWidgets.QPushButton("Open file")
self._label = QtWidgets.QLabel(self.get_label_text(value))
dialog_btn.pressed.connect(self._choose_filename)
self.layout.addWidget(self._label)
self.layout.addWidget(dialog_btn)
@QtCore.Slot()
def _choose_filename(self):
filename = QtWidgets.QFileDialog.getOpenFileName(self, "Open file", "", self._filter)
if not filename[0]:
return
self._label.setText(self.get_label_text(op.basename(filename[0])))
self.METHOD_NAME(filename[0])
class ComboBox(_ControlWidget):
def __init__(self, name, value, choices):
super().__init__(name)
combobox = QtWidgets.QComboBox()
label = QtWidgets.QLabel(self.get_label_text())
combobox.addItems(choices)
combobox.setCurrentIndex(choices.index(value))
combobox.currentTextChanged.connect(self._combobox_select)
self.layout.addWidget(label)
self.layout.addWidget(combobox)
@QtCore.Slot(str)
def _combobox_select(self, text):
self.METHOD_NAME(text)
class TextInput(_ControlWidget):
def __init__(self, name, value):
super().__init__(name)
self._text = QtWidgets.QPlainTextEdit()
self._text.setPlainText(value)
label = QtWidgets.QLabel(self.get_label_text())
self.layout.addWidget(label)
self.layout.addWidget(self._text)
submit_btn = QtWidgets.QPushButton("OK")
submit_btn.pressed.connect(self._submit_text)
self.layout.addWidget(label)
self.layout.addWidget(self._text)
self.layout.addWidget(submit_btn)
@QtCore.Slot()
def _submit_text(self):
self.METHOD_NAME(self._text.toPlainText())
control_to_widget = dict(
Range=Slider,
Vector=VectorWidget,
Color=ColorPicker,
Bool=Checkbox,
File=FilePicker,
List=ComboBox,
Text=TextInput,
)
|
3,386 |
test instance
|
from ctypes import *
from ctypes.test import need_symbol
import unittest
# IMPORTANT INFO:
#
# Consider this call:
# func.restype = c_char_p
# func(c_char_p("123"))
# It returns
# "123"
#
# WHY IS THIS SO?
#
# argument tuple (c_char_p("123"), ) is destroyed after the function
# func is called, but NOT before the result is actually built.
#
# If the arglist would be destroyed BEFORE the result has been built,
# the c_char_p("123") object would already have a zero refcount,
# and the pointer passed to (and returned by) the function would
# probably point to deallocated space.
#
# In this case, there would have to be an additional reference to the argument...
import _ctypes_test
testdll = CDLL(_ctypes_test.__file__)
# Return machine address `a` as a (possibly long) non-negative integer.
# Starting with Python 2.5, id(anything) is always non-negative, and
# the ctypes addressof() inherits that via PyLong_FromVoidPtr().
def positive_address(a):
if a >= 0:
return a
# View the bits in `a` as unsigned instead.
import struct
num_bits = struct.calcsize("P") * 8 # num bits in native machine address
a += 1 << num_bits
assert a >= 0
return a
def c_wbuffer(init):
n = len(init) + 1
return (c_wchar * n)(*init)
class CharPointersTestCase(unittest.TestCase):
def setUp(self):
func = testdll._testfunc_p_p
func.restype = c_long
func.argtypes = None
def test_paramflags(self):
# function returns c_void_p result,
# and has a required parameter named 'input'
prototype = CFUNCTYPE(c_void_p, c_void_p)
func = prototype(("_testfunc_p_p", testdll),
((1, "input"),))
try:
func()
except TypeError as details:
self.assertEqual(str(details), "required argument 'input' missing")
else:
self.fail("TypeError not raised")
self.assertEqual(func(None), None)
self.assertEqual(func(input=None), None)
def test_int_pointer_arg(self):
func = testdll._testfunc_p_p
func.restype = c_long
self.assertEqual(0, func(0))
ci = c_int(0)
func.argtypes = POINTER(c_int),
self.assertEqual(positive_address(addressof(ci)),
positive_address(func(byref(ci))))
func.argtypes = c_char_p,
self.assertRaises(ArgumentError, func, byref(ci))
func.argtypes = POINTER(c_short),
self.assertRaises(ArgumentError, func, byref(ci))
func.argtypes = POINTER(c_double),
self.assertRaises(ArgumentError, func, byref(ci))
def test_POINTER_c_char_arg(self):
func = testdll._testfunc_p_p
func.restype = c_char_p
func.argtypes = POINTER(c_char),
self.assertEqual(None, func(None))
self.assertEqual(b"123", func(b"123"))
self.assertEqual(None, func(c_char_p(None)))
self.assertEqual(b"123", func(c_char_p(b"123")))
self.assertEqual(b"123", func(c_buffer(b"123")))
ca = c_char(b"a")
self.assertEqual(ord(b"a"), func(pointer(ca))[0])
self.assertEqual(ord(b"a"), func(byref(ca))[0])
def test_c_char_p_arg(self):
func = testdll._testfunc_p_p
func.restype = c_char_p
func.argtypes = c_char_p,
self.assertEqual(None, func(None))
self.assertEqual(b"123", func(b"123"))
self.assertEqual(None, func(c_char_p(None)))
self.assertEqual(b"123", func(c_char_p(b"123")))
self.assertEqual(b"123", func(c_buffer(b"123")))
ca = c_char(b"a")
self.assertEqual(ord(b"a"), func(pointer(ca))[0])
self.assertEqual(ord(b"a"), func(byref(ca))[0])
def test_c_void_p_arg(self):
func = testdll._testfunc_p_p
func.restype = c_char_p
func.argtypes = c_void_p,
self.assertEqual(None, func(None))
self.assertEqual(b"123", func(b"123"))
self.assertEqual(b"123", func(c_char_p(b"123")))
self.assertEqual(None, func(c_char_p(None)))
self.assertEqual(b"123", func(c_buffer(b"123")))
ca = c_char(b"a")
self.assertEqual(ord(b"a"), func(pointer(ca))[0])
self.assertEqual(ord(b"a"), func(byref(ca))[0])
func(byref(c_int()))
func(pointer(c_int()))
func((c_int * 3)())
@need_symbol('c_wchar_p')
def test_c_void_p_arg_with_c_wchar_p(self):
func = testdll._testfunc_p_p
func.restype = c_wchar_p
func.argtypes = c_void_p,
self.assertEqual(None, func(c_wchar_p(None)))
self.assertEqual("123", func(c_wchar_p("123")))
def METHOD_NAME(self):
func = testdll._testfunc_p_p
func.restype = c_void_p
class X:
_as_parameter_ = None
func.argtypes = c_void_p,
self.assertEqual(None, func(X()))
func.argtypes = None
self.assertEqual(None, func(X()))
@need_symbol('c_wchar')
class WCharPointersTestCase(unittest.TestCase):
def setUp(self):
func = testdll._testfunc_p_p
func.restype = c_int
func.argtypes = None
def test_POINTER_c_wchar_arg(self):
func = testdll._testfunc_p_p
func.restype = c_wchar_p
func.argtypes = POINTER(c_wchar),
self.assertEqual(None, func(None))
self.assertEqual("123", func("123"))
self.assertEqual(None, func(c_wchar_p(None)))
self.assertEqual("123", func(c_wchar_p("123")))
self.assertEqual("123", func(c_wbuffer("123")))
ca = c_wchar("a")
self.assertEqual("a", func(pointer(ca))[0])
self.assertEqual("a", func(byref(ca))[0])
def test_c_wchar_p_arg(self):
func = testdll._testfunc_p_p
func.restype = c_wchar_p
func.argtypes = c_wchar_p,
c_wchar_p.from_param("123")
self.assertEqual(None, func(None))
self.assertEqual("123", func("123"))
self.assertEqual(None, func(c_wchar_p(None)))
self.assertEqual("123", func(c_wchar_p("123")))
# XXX Currently, these raise TypeErrors, although they shouldn't:
self.assertEqual("123", func(c_wbuffer("123")))
ca = c_wchar("a")
self.assertEqual("a", func(pointer(ca))[0])
self.assertEqual("a", func(byref(ca))[0])
class ArrayTest(unittest.TestCase):
def test(self):
func = testdll._testfunc_ai8
func.restype = POINTER(c_int)
func.argtypes = c_int * 8,
func((c_int * 8)(1, 2, 3, 4, 5, 6, 7, 8))
# This did crash before:
def func(): pass
CFUNCTYPE(None, c_int * 3)(func)
################################################################
if __name__ == '__main__':
unittest.main()
|
3,387 |
set up
|
# License: BSD 3-Clause
import unittest
from typing import List
from random import randint, shuffle
from openml.exceptions import OpenMLServerException
from openml.testing import TestBase
from openml.datasets import (
get_dataset,
list_datasets,
)
from openml.tasks import TaskType, create_task, get_task
class OpenMLTaskTest(TestBase):
"""
A helper class. The methods of the test case
are only executed in subclasses of the test case.
"""
__test__ = False
@classmethod
def setUpClass(cls):
if cls is OpenMLTaskTest:
raise unittest.SkipTest("Skip OpenMLTaskTest tests," " it's a base class")
super(OpenMLTaskTest, cls).setUpClass()
def METHOD_NAME(self, n_levels: int = 1):
super(OpenMLTaskTest, self).METHOD_NAME()
def test_download_task(self):
return get_task(self.task_id)
def test_upload_task(self):
# We don't know if the task in question already exists, so we try a few times. Checking
# beforehand would not be an option because a concurrent unit test could potentially
# create the same task and make this unit test fail (i.e. getting a dataset and creating
# a task for it is not atomic).
compatible_datasets = self._get_compatible_rand_dataset()
for i in range(100):
try:
dataset_id = compatible_datasets[i % len(compatible_datasets)]
# TODO consider implementing on the diff task types.
task = create_task(
task_type=self.task_type,
dataset_id=dataset_id,
target_name=self._get_random_feature(dataset_id),
estimation_procedure_id=self.estimation_procedure,
)
task.publish()
TestBase._mark_entity_for_removal("task", task.id)
TestBase.logger.info(
"collected from {}: {}".format(__file__.split("/")[-1], task.id)
)
# success
break
except OpenMLServerException as e:
# Error code for 'task already exists'
# Should be 533 according to the docs
# (# https://www.openml.org/api_docs#!/task/post_task)
if e.code == 614:
continue
else:
raise e
else:
raise ValueError(
"Could not create a valid task for task type ID {}".format(self.task_type)
)
def _get_compatible_rand_dataset(self) -> List:
active_datasets = list_datasets(status="active", output_format="dataframe")
# depending on the task type, find either datasets
# with only symbolic features or datasets with only
# numerical features.
if self.task_type == TaskType.SUPERVISED_REGRESSION:
compatible_datasets = active_datasets[active_datasets["NumberOfSymbolicFeatures"] == 0]
elif self.task_type == TaskType.CLUSTERING:
compatible_datasets = active_datasets
else:
compatible_datasets = active_datasets[active_datasets["NumberOfNumericFeatures"] == 0]
compatible_datasets = list(compatible_datasets["did"])
# in-place shuffling
shuffle(compatible_datasets)
return compatible_datasets
# random_dataset_pos = randint(0, len(compatible_datasets) - 1)
#
# return compatible_datasets[random_dataset_pos]
def _get_random_feature(self, dataset_id: int) -> str:
random_dataset = get_dataset(dataset_id)
# necessary loop to overcome string and date type
# features.
while True:
random_feature_index = randint(0, len(random_dataset.features) - 1)
random_feature = random_dataset.features[random_feature_index]
if self.task_type == TaskType.SUPERVISED_REGRESSION:
if random_feature.data_type == "numeric":
break
else:
if random_feature.data_type == "nominal":
break
return random_feature.name
|
3,388 |
traverse
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2021 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""The auto tuning strategy."""
from copy import deepcopy
from ..utils import logger
from .strategy import STRATEGIES, TuneStrategy, strategy_registry
@strategy_registry
class AutoTuneStrategy(TuneStrategy):
"""The auto tuning strategy.
There are three stages executed by auto strategy sequentially,
and the tuning process ends once the condition meets the exit policy.
"""
def __init__(
self,
model,
conf,
q_dataloader=None,
q_func=None,
eval_func=None,
eval_dataloader=None,
eval_metric=None,
resume=None,
q_hooks=None,
):
"""Init an auto tuning strategy.
Args:
model: The FP32 model specified for low precision tuning.
conf: The Conf class instance includes all user configurations.
q_dataloader: Data loader for calibration, mandatory for post-training quantization. Defaults to None.
q_func: Training function for quantization aware training. Defaults to None. Defaults to None.
eval_func: The evaluation function provided by user. This function takes model as parameter, and
evaluation dataset and metrics should be encapsulated in this function implementation and
outputs a higher-is-better accuracy scalar value.
eval_dataloader: Data loader for evaluation. Defaults to None.
eval_metric: Metric for evaluation. Defaults to None.
resume: The dict containing resume information. Defaults to None.
q_hooks: The dict of training hooks, supported keys are: on_epoch_begin, on_epoch_end, on_step_begin,
on_step_end. Their values are functions to be executed in adaptor layer.. Defaults to None.
"""
super().__init__(
model=model,
conf=conf,
q_dataloader=q_dataloader,
q_func=q_func,
eval_func=eval_func,
eval_dataloader=eval_dataloader,
eval_metric=eval_metric,
resume=resume,
q_hooks=q_hooks,
)
logger.info("*** Initialize auto tuning")
self.strategies_sequence = ["conservative", "basic"]
def _transfer_alpha(self, pre_strategy):
sq_alpha = (
pre_strategy.cur_best_tuning_cfg.get("recipe_cfgs", {}).get("smooth_quant_args", {}).get("alpha", None)
)
if sq_alpha and self.conf.quantization.recipes:
logger.warning(
f"[Strategy] Override the user config's smooth quant alpha into best alpha"
f"({sq_alpha: .4f}) found in pre-strategy."
)
self.conf.quantization.recipes.setdefault("smooth_quant_args", {})["alpha"] = sq_alpha
def sequential_traverse(self):
"""Try different strategies sequentially."""
pre_strategy = self
for strategy_name in self.strategies_sequence:
logger.info(f"*** Start {strategy_name} tuning.")
# transfer the best alpha of sq to the next strategy
self._transfer_alpha(pre_strategy)
strategy = STRATEGIES[strategy_name](
model=self.model,
conf=self.conf,
q_dataloader=self.calib_dataloader,
q_func=self.q_func,
eval_func=self.eval_func,
eval_dataloader=self.eval_dataloader,
eval_metric=self.eval_metric,
resume=self._resume,
q_hooks=self.q_hooks,
pre_strategy=pre_strategy,
)
pre_strategy = strategy
strategy.METHOD_NAME()
self.best_qmodel = strategy.best_qmodel
if self.best_qmodel:
return
def next_tune_cfg(self):
"""Generate and yield the default tuning config.
Returns:
tune_config (dict): A dict containing the tuning configuration for quantization.
"""
tuning_space = self.tuning_space
calib_sampling_size_lst = tuning_space.root_item.get_option_by_name("calib_sampling_size").options
_, _, op_tuning_cfg = self.initial_tuning_cfg()
op_tuning_cfg["calib_sampling_size"] = calib_sampling_size_lst[0]
if not self.cur_best_tuning_cfg:
self.cur_best_tuning_cfg = deepcopy(op_tuning_cfg)
# try to tune sq alpha
if self._should_tuning_sq_alpha(self.config.recipes):
for tune_cfg in self.tuning_sq_alpha(tuning_space, deepcopy(self.cur_best_tuning_cfg), self.config.recipes):
yield tune_cfg
logger.info("Quantize the model with default config.")
yield op_tuning_cfg
def METHOD_NAME(self):
"""Traverse the tuning space."""
# Quantize model with default config
super().METHOD_NAME()
if self.best_qmodel:
logger.info("[Strategy] Found the model meets accuracy requirements, ending the tuning process.")
return
elif self.config.tuning_criterion.max_trials == 1:
logger.info(
"[Strategy] Not found the model meets accuracy requirements,\
but the max trial is 1, ending the tuning process."
)
else:
# Start to try different strategies sequentially
self.sequential_traverse()
|
3,389 |
test works with python38
|
# Copyright 2023 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import annotations
from textwrap import dedent
import pytest
from pants.backend.python.dependency_inference import parse_python_dependencies
from pants.backend.python.dependency_inference.rules import (
PythonImportDependenciesInferenceFieldSet,
)
from pants.backend.python.dependency_inference.rules import rules as core_rules
from pants.backend.python.framework.django import dependency_inference, detect_apps
from pants.backend.python.target_types import PythonSourceTarget
from pants.backend.python.util_rules import pex
from pants.core.util_rules import stripped_source_files
from pants.engine.addresses import Address
from pants.engine.rules import QueryRule
from pants.engine.target import InferredDependencies
from pants.testutil.python_interpreter_selection import (
skip_unless_python27_present,
skip_unless_python37_present,
skip_unless_python38_present,
skip_unless_python39_present,
)
from pants.testutil.rule_runner import RuleRunner
@pytest.fixture
def rule_runner() -> RuleRunner:
rule_runner = RuleRunner(
rules=[
*parse_python_dependencies.rules(),
*stripped_source_files.rules(),
*pex.rules(),
*dependency_inference.rules(),
*detect_apps.rules(),
*core_rules(),
QueryRule(InferredDependencies, [dependency_inference.InferDjangoDependencies]),
],
target_types=[PythonSourceTarget],
)
rule_runner.set_options([], env_inherit={"PATH", "PYENV_ROOT", "HOME"})
return rule_runner
def do_test_migration_dependencies(rule_runner: RuleRunner, constraints: str) -> None:
rule_runner.write_files(
{
"BUILD": "python_source(name='t', source='path/to/app0/migrations/0001_initial.py')",
"path/to/app0/migrations/0001_initial.py": dedent(
"""\
class Migration(migrations.Migration):
dependencies = [
("app1", "0012_some_migration"),
("app2_label", "0042_another_migration"),
]
operations = []
"""
),
"path/to/app1/BUILD": dedent(
f"""\
python_source(
source="apps.py",
interpreter_constraints=['{constraints}'],
)
"""
),
"path/to/app1/apps.py": dedent(
"""\
class App1AppConfig(AppConfig):
name = "path.to.app1"
label = "app1"
"""
),
"path/to/app1/migrations/BUILD": "python_source(source='0012_some_migration.py')",
"path/to/app1/migrations/0012_some_migration.py": "",
"another/path/app2/BUILD": dedent(
f"""\
python_source(
source="apps.py",
interpreter_constraints=['{constraints}'],
)
"""
),
"another/path/app2/apps.py": dedent(
"""\
class App2AppConfig(AppConfig):
name = "another.path.app2"
label = "app2_label"
"""
),
"another/path/app2/migrations/BUILD": "python_source(source='0042_another_migration.py')",
"another/path/app2/migrations/0042_another_migration.py": "",
}
)
tgt = rule_runner.get_target(Address("", target_name="t"))
result = rule_runner.request(
InferredDependencies,
[
dependency_inference.InferDjangoDependencies(
PythonImportDependenciesInferenceFieldSet.create(tgt)
)
],
)
assert set(result.include) == {
Address("another/path/app2/migrations", target_name="migrations"),
Address("path/to/app1/migrations", target_name="migrations"),
}
@skip_unless_python27_present
def test_works_with_python2(rule_runner: RuleRunner) -> None:
do_test_migration_dependencies(rule_runner, constraints="CPython==2.7.*")
@skip_unless_python37_present
def test_works_with_python37(rule_runner: RuleRunner) -> None:
do_test_migration_dependencies(rule_runner, constraints="CPython==3.7.*")
@skip_unless_python38_present
def METHOD_NAME(rule_runner: RuleRunner) -> None:
do_test_migration_dependencies(rule_runner, constraints="CPython==3.8.*")
@skip_unless_python39_present
def test_works_with_python39(rule_runner: RuleRunner) -> None:
do_test_migration_dependencies(rule_runner, constraints="CPython==3.9.*")
|
3,390 |
output json
|
import argparse
import errno
import json
import logging
import os
from collections import defaultdict
from copy import deepcopy
try:
from typing import Any
except ImportError:
# Only used for type annotations
pass
from find_apps import find_apps
from find_build_apps import BUILD_SYSTEM_CMAKE, BUILD_SYSTEMS
from idf_py_actions.constants import PREVIEW_TARGETS, SUPPORTED_TARGETS
from ttfw_idf.IDFAssignTest import ExampleAssignTest, TestAppsAssignTest
TEST_LABELS = {
'example_test': 'BOT_LABEL_EXAMPLE_TEST',
'test_apps': 'BOT_LABEL_CUSTOM_TEST',
'component_ut': ['BOT_LABEL_UNIT_TEST',
'BOT_LABEL_UNIT_TEST_32',
'BOT_LABEL_UNIT_TEST_S2',
'BOT_LABEL_UNIT_TEST_C3'],
}
BUILD_ALL_LABELS = [
'BOT_LABEL_BUILD',
'BOT_LABEL_BUILD_ALL_APPS',
'BOT_LABEL_REGULAR_TEST',
'BOT_LABEL_WEEKEND_TEST',
]
def _has_build_all_label(): # type: () -> bool
for label in BUILD_ALL_LABELS:
if os.getenv(label):
return True
return False
def _judge_build_or_not(action, build_all): # type: (str, bool) -> tuple[bool, bool]
"""
:return: (build_or_not_for_test_related_apps, build_or_not_for_non_related_apps)
"""
if build_all or _has_build_all_label() or (not os.getenv('BOT_TRIGGER_WITH_LABEL')):
logging.info('Build all apps')
return True, True
labels = TEST_LABELS[action]
if not isinstance(labels, list):
labels = [labels] # type: ignore
for label in labels:
if os.getenv(label):
logging.info('Build only test cases apps')
return True, False
logging.info('Skip all')
return False, False
def METHOD_NAME(apps_dict_list, target, build_system, output_dir): # type: (list, str, str, str) -> None
output_path = os.path.join(output_dir, 'scan_{}_{}.json'.format(target.lower(), build_system))
with open(output_path, 'w') as fw:
fw.writelines([json.dumps(app) + '\n' for app in apps_dict_list])
# we might need artifacts to run test cases locally.
# So we need to save artifacts which have test case not executed by CI.
class _ExampleAssignTest(ExampleAssignTest):
DEFAULT_FILTER = {} # type: dict[str, Any]
class _TestAppsAssignTest(TestAppsAssignTest):
DEFAULT_FILTER = {} # type: dict[str, Any]
def main(): # type: () -> None
parser = argparse.ArgumentParser(description='Scan the required build tests')
parser.add_argument('test_type',
choices=TEST_LABELS.keys(),
help='Scan test type')
parser.add_argument('paths', nargs='+',
help='One or more app paths')
parser.add_argument('-b', '--build-system',
choices=BUILD_SYSTEMS.keys(),
default=BUILD_SYSTEM_CMAKE)
parser.add_argument('-c', '--ci-config-file',
required=True,
help='gitlab ci config target-test file')
parser.add_argument('-o', '--output-path',
required=True,
help='output path of the scan result')
parser.add_argument('--exclude', nargs='*',
help='Ignore specified directory. Can be used multiple times.')
parser.add_argument('--extra_test_dirs', nargs='*',
help='Additional directories to preserve artifacts for local tests')
parser.add_argument('--preserve_all', action='store_true',
help='add this flag to preserve artifacts for all apps')
parser.add_argument('--build-all', action='store_true',
help='add this flag to build all apps')
args = parser.parse_args()
build_test_case_apps, build_standalone_apps = _judge_build_or_not(args.test_type, args.build_all)
if not os.path.exists(args.output_path):
try:
os.makedirs(args.output_path)
except OSError as e:
if e.errno != errno.EEXIST:
raise e
SUPPORTED_TARGETS.extend(PREVIEW_TARGETS)
if (not build_standalone_apps) and (not build_test_case_apps):
for target in SUPPORTED_TARGETS:
METHOD_NAME([], target, args.build_system, args.output_path)
SystemExit(0)
idf_path = str(os.getenv('IDF_PATH'))
paths = set([os.path.join(idf_path, path) if not os.path.isabs(path) else path for path in args.paths])
test_cases = []
for path in paths:
if args.test_type == 'example_test':
assign = _ExampleAssignTest(path, args.ci_config_file)
elif args.test_type in ['test_apps', 'component_ut']:
assign = _TestAppsAssignTest(path, args.ci_config_file)
else:
raise SystemExit(1) # which is impossible
test_cases.extend(assign.search_cases())
'''
{
<target>: {
'test_case_apps': [<app_dir>], # which is used in target tests
'standalone_apps': [<app_dir>], # which is not
},
...
}
'''
scan_info_dict = defaultdict(dict) # type: dict[str, dict]
# store the test cases dir, exclude these folders when scan for standalone apps
default_exclude = args.exclude if args.exclude else []
build_system = args.build_system.lower()
build_system_class = BUILD_SYSTEMS[build_system]
for target in SUPPORTED_TARGETS:
exclude_apps = deepcopy(default_exclude)
if build_test_case_apps:
scan_info_dict[target]['test_case_apps'] = set()
test_dirs = args.extra_test_dirs if args.extra_test_dirs else []
for case in test_cases:
if case.case_info['target'].lower() == target.lower():
test_dirs.append(case.case_info['app_dir'])
for app_dir in test_dirs:
app_dir = os.path.join(idf_path, app_dir) if not os.path.isabs(app_dir) else app_dir
_apps = find_apps(build_system_class, app_dir, True, exclude_apps, target.lower())
if _apps:
scan_info_dict[target]['test_case_apps'].update(_apps)
exclude_apps.extend(_apps)
else:
scan_info_dict[target]['test_case_apps'] = set()
if build_standalone_apps:
scan_info_dict[target]['standalone_apps'] = set()
for path in paths:
scan_info_dict[target]['standalone_apps'].update(
find_apps(build_system_class, path, True, exclude_apps, target.lower()))
else:
scan_info_dict[target]['standalone_apps'] = set()
test_case_apps_preserve_default = True if build_system == 'cmake' else False
for target in SUPPORTED_TARGETS:
apps = []
for app_dir in scan_info_dict[target]['test_case_apps']:
apps.append({
'app_dir': app_dir,
'build_system': args.build_system,
'target': target,
'preserve': args.preserve_all or test_case_apps_preserve_default
})
for app_dir in scan_info_dict[target]['standalone_apps']:
apps.append({
'app_dir': app_dir,
'build_system': args.build_system,
'target': target,
'preserve': args.preserve_all
})
output_path = os.path.join(args.output_path, 'scan_{}_{}.json'.format(target.lower(), build_system))
with open(output_path, 'w') as fw:
fw.writelines([json.dumps(app) + '\n' for app in apps])
if __name__ == '__main__':
main()
|
3,391 |
get translation
|
# Copyright 2017 Google Inc. All Rights Reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utility functions specifically for NMT."""
from __future__ import print_function
import codecs
import time
import numpy as np
import tensorflow as tf
import logging as log
import evaluation_utils
import misc_utils as utils
__all__ = ["decode_and_evaluate", "get_translation"]
def decode_and_evaluate(mode, sess, out_tensor, trans_file, ref_file,
metric='bleu', beam_width=10,
num_translations_per_input=1, iterations=1):
"""Decode a test set and compute a score according to the evaluation task."""
utils.print_out(" Decoding to output %s" % trans_file)
with codecs.getwriter("utf-8")(
tf.io.gfile.GFile(trans_file, mode="wb")) as trans_f:
trans_f.write("") # Write empty string to ensure file is created.
num_translations_per_input = min(num_translations_per_input, beam_width)
print(" Running inference with beam_width %g, num translations per input %d. " \
% (beam_width, num_translations_per_input))
print(" Total iterations count %d." % iterations)
# Warmup for the first batch to take out the very first runtime
# session overhead.
nmt_outputs = sess.run(out_tensor) # time x batch_size x beam_width
nmt_outputs = nmt_outputs.transpose() # beam_width x batch_size x time
batch_size = nmt_outputs.shape[1]
for sent_id in range(batch_size):
translation = METHOD_NAME(nmt_outputs[0], sent_id,
tgt_eos='</s>')
if mode == 'accuracy':
trans_f.write((translation + b"\n").decode("utf-8"))
# prediction time is the time for the model prediction only
# overall time is the time for data pre-processing and data post-processing
prediction_times = list()
overall_start = time.time()
num_sentences = 0
n = 0
while n < iterations:
n += 1
while True:
try:
start = time.time()
nmt_outputs = sess.run(out_tensor) # time x batch_size x beam_width
nmt_outputs = nmt_outputs.transpose() # beam_width x batch_size x time
prediction_times.append(time.time() - start)
batch_size = nmt_outputs.shape[1]
num_sentences += batch_size
for sent_id in range(batch_size):
for beam_id in range(num_translations_per_input):
translation = METHOD_NAME(nmt_outputs[beam_id], sent_id,
tgt_eos='</s>')
if mode == 'accuracy':
trans_f.write((translation + b"\n").decode("utf-8"))
except tf.errors.OutOfRangeError:
utils.print_time(
" Done, num sentences %d, num translations per input %d" %
(num_sentences, num_translations_per_input), overall_start)
break
overall_time = (time.time() - overall_start)
print("\nAverage Prediction Latency: {:.5f} sec per batch.".format(
sum(prediction_times)/float(len(prediction_times))))
print("Overall Latency: {:.5f} sec for the entire test "
"dataset.".format(overall_time/float(iterations)))
print("Overall Throughput : {:.3f} sentences per sec.".format(
num_sentences/float(overall_time)))
# Evaluation
if mode == 'accuracy':
if ref_file and tf.io.gfile.exists(trans_file):
score = evaluation_utils.evaluate(ref_file, trans_file, metric)
utils.print_out(" Accuracy metric %s: %.1f" % (metric, score))
def METHOD_NAME(nmt_outputs, sent_id, tgt_eos):
"""Given batch decoding outputs, select a sentence and turn to text."""
if tgt_eos: tgt_eos = tgt_eos.encode("utf-8")
# Select a sentence
output = nmt_outputs[sent_id, :].tolist()
# If there is an eos symbol in outputs, cut them at that point.
if tgt_eos and tgt_eos in output:
output = output[:output.index(tgt_eos)]
translation = utils.format_text(output)
return translation
|
3,392 |
test migrate instance
|
# -*- coding: utf-8 -*-
"""
Tencent is pleased to support the open source community by making 蓝鲸智云PaaS平台社区版 (BlueKing PaaS Community
Edition) available.
Copyright (C) 2017 THL A29 Limited, a Tencent company. All rights reserved.
Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://opensource.org/licenses/MIT
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
"""
import factory
from django.test import TestCase
from django.db.models import signals
from pipeline.contrib.statistics.models import InstanceInPipeline
from pipeline.models import PipelineInstance, PipelineTemplate, Snapshot
from pipeline.utils.uniqid import uniqid
from pipeline.engine.utils import calculate_elapsed_time
from gcloud.tests.mock import * # noqa
from gcloud.tests.mock_settings import * # noqa
from gcloud.analysis_statistics.data_migrate.tasks import migrate_instance
from gcloud.tasktmpl3.models import TaskTemplate
from gcloud.taskflow3.models import TaskFlowInstance
from gcloud.analysis_statistics.models import TaskflowStatistics
from gcloud.core.models import Project
class TestMigrateInstance(TestCase):
@factory.django.mute_signals(signals.pre_save, signals.post_save)
def setUp(self):
self.test_snapshot = Snapshot.objects.create_snapshot({})
self.test_snapshot.save()
self.test_project = Project.objects.create(
name="proj",
creator="creator",
)
self.test_project.save()
# prepare test data
instance_id = uniqid()
template_id = uniqid()
self.instance_in_pipeline = InstanceInPipeline.objects.create(
instance_id=instance_id, atom_total=0, subprocess_total=0, gateways_total=0
)
self.pipeline_template = PipelineTemplate.objects.create(
template_id=template_id, creator="creator", snapshot=self.test_snapshot
)
self.pipeline_instance = PipelineInstance.objects.create(
instance_id=instance_id, creator="creator", snapshot=self.test_snapshot, template=self.pipeline_template
)
self.task_template = TaskTemplate.objects.create(
project=self.test_project, pipeline_template=self.pipeline_template
)
self.taskflow_instance = TaskFlowInstance.objects.create(
project=self.test_project, pipeline_instance=self.pipeline_instance, template_id=template_id
)
def tearDown(self):
InstanceInPipeline.objects.all().delete()
PipelineTemplate.objects.all().delete()
PipelineInstance.objects.all().delete()
TaskFlowInstance.objects.all().delete()
TaskTemplate.objects.all().delete()
@patch(TASKFLOW_STATISTICS_FILTER, MagicMock())
@patch(TASKFLOW_STATISTICS_CREATE, MagicMock())
def METHOD_NAME(self):
test_start = self.instance_in_pipeline.id - 1
test_end = self.instance_in_pipeline.id + 1
result = migrate_instance(test_start, test_end)
kwargs = dict(
instance_id=self.pipeline_instance.id,
task_instance_id=self.taskflow_instance.id,
atom_total=self.instance_in_pipeline.atom_total,
subprocess_total=self.instance_in_pipeline.subprocess_total,
gateways_total=self.instance_in_pipeline.gateways_total,
project_id=self.taskflow_instance.project.id,
category=self.task_template.category,
template_id=self.pipeline_template.id,
task_template_id=self.task_template.id,
creator=self.pipeline_instance.creator,
create_time=self.pipeline_instance.create_time,
start_time=self.pipeline_instance.start_time,
finish_time=self.pipeline_instance.finish_time,
elapsed_time=calculate_elapsed_time(self.pipeline_instance.start_time, self.pipeline_instance.finish_time),
create_method=self.taskflow_instance.create_method,
)
TaskflowStatistics.objects.filter.assert_called_once_with(instance_id=kwargs["instance_id"])
TaskflowStatistics.objects.create.assert_called_once_with(**kwargs)
self.assertTrue(result)
|
3,393 |
prepare request
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.mgmt.core.exceptions import ARMErrorFormat
from ... import models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class Operations:
"""Operations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.purview.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def list(
self,
**kwargs
) -> AsyncIterable["models.OperationList"]:
"""Lists the available operations.
List of available operations.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either OperationList or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.purview.models.OperationList]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.OperationList"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-07-01"
accept = "application/json"
def METHOD_NAME(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('OperationList', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = METHOD_NAME(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize(models.ErrorResponseModel, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/providers/Microsoft.Purview/operations'} # type: ignore
|
3,394 |
main
|
import logging
import optparse
import os
import sys
import textwrap
import time
import transaction
from pyramid.paster import bootstrap
from pyramid.paster import setup_logging
from rdflib import RDF
from rdflib import SKOS
from rdflib.term import URIRef
from skosprovider_rdf import utils
from atramhasis.data.datamanagers import CountsManager
from atramhasis.data.models import ConceptschemeCounts
from atramhasis.errors import SkosRegistryNotFoundException
log = logging.getLogger(__name__)
def METHOD_NAME():
description = """\
Dump all conceptschemes to files. Will serialise as Turtle and RDF/XML format.
"""
usage = "usage: %prog config_uri"
parser = optparse.OptionParser(
usage=usage,
description=textwrap.dedent(description)
)
parser.add_option(
'-l', '--location', dest='dump_location', type='string',
help='Specify where to dump the conceptschemes. If not specified, this \
is set to the atramhasis.dump_location from your ini file.'
)
parser.add_option(
'-r', '--rdf2hdt', dest='rdf2hdt', type='string', default=False,
help='Specify where the rdf2hdt command can be found. If not specified, this \
is set to atramhasis.rdf2hdt from your ini file.'
)
options, args = parser.parse_args(sys.argv[1:])
if not len(args) >= 1:
log.error('You must provide at least one argument.')
return 2
config_uri = args[0]
env = bootstrap(config_uri)
setup_logging(config_uri)
dump_location = options.dump_location
if dump_location is None:
dump_location = env['registry'].settings.get(
'atramhasis.dump_location',
os.path.abspath(os.path.dirname(config_uri))
)
if not os.access(dump_location, os.W_OK | os.X_OK):
log.error('Dump location "' + dump_location + '" is not writable.')
return 2
rdf2hdt = options.rdf2hdt
if not rdf2hdt:
rdf2hdt = env['registry'].settings.get(
'atramhasis.rdf2hdt',
False
)
request = env['request']
if hasattr(request, 'skos_registry') and request.skos_registry is not None:
skos_registry = request.skos_registry
else:
raise SkosRegistryNotFoundException() # pragma: no cover
counts = []
files = []
for p in skos_registry.get_providers():
if any([not_shown in p.get_metadata()['subject'] for not_shown in ['external']]):
continue
start_time = time.time()
pid = p.get_metadata()['id']
filename = os.path.join(dump_location, '%s-full' % pid)
filename_ttl = '%s.ttl' % filename
filename_rdf = '%s.rdf' % filename
files.append(filename_ttl)
log.info('Generating dump for %s' % pid)
graph = utils.rdf_dumper(p)
triples = len(graph)
log.info('Number of triples in Graph: %d' % triples)
csuri = URIRef(p.concept_scheme.uri)
cs_triples = len(list(graph.predicate_objects(csuri)))
log.info('Number of triples in Conceptscheme: %d' % cs_triples)
count_concepts = len(list(graph.subjects(RDF.type, SKOS.Concept)))
count_collections = len(list(graph.subjects(RDF.type, SKOS.Collection)))
try:
avg_concept_triples = ((triples - cs_triples) /
(count_concepts + count_collections))
except ZeroDivisionError:
avg_concept_triples = 0
log.info('Average number of triples per concept: %d' % avg_concept_triples)
counts.append({
'conceptscheme_id': pid,
'triples': triples,
'conceptscheme_triples': cs_triples,
'avg_concept_triples': avg_concept_triples
})
log.info(f'Dumping {pid} to Turtle: {filename_ttl}')
graph.serialize(destination=filename_ttl, format='turtle')
log.info(f'Dumping {pid} to RDFxml: {filename_rdf}')
graph.serialize(destination=filename_rdf, format='pretty-xml')
del graph
log.info(f'--- {(time.time() - start_time)} seconds ---')
log.info('All files dumped to %s' % dump_location)
if rdf2hdt:
from subprocess import check_call, CalledProcessError
parsing_error = False
for f in files:
log.info(f'Converting {f} to hdt')
hdtf = f.replace('.ttl', '.hdt')
try:
check_call([rdf2hdt, '-f', 'turtle', f, hdtf])
except (FileNotFoundError, CalledProcessError) as e:
# Turtle failed, let's try rdfxml
parsing_error = True
log.warning(f'rdf2hdt for file {f} failed with error {e}. Trying rdfxml...')
rdff = f.replace('.ttl', '.rdf')
try:
check_call([rdf2hdt, '-f', 'rdfxml', rdff, hdtf])
except (FileNotFoundError, CalledProcessError) as e:
# rdfxml failed
log.error(f'rdfxml for file {f} failed with error {e}')
if parsing_error:
log.error('Error during rdf2hdt conversion. Check logs for more information.')
else:
log.info(f'All hdt files dumped to {dump_location}')
with transaction.manager:
dbsession = request.registry.dbmaker()
manager = CountsManager(dbsession)
for c in counts:
cs_count = ConceptschemeCounts(
conceptscheme_id=c['conceptscheme_id'],
triples=c['triples'],
conceptscheme_triples=c['conceptscheme_triples'],
avg_concept_triples=c['avg_concept_triples']
)
manager.save(cs_count)
env['closer']()
|
3,395 |
connect
|
import logging
from typing import Callable, Optional, List, Tuple, Dict
from feeluown.utils.dispatch import Signal
logger = logging.getLogger(__name__)
class SignalConnector:
def __init__(self, symbol: str):
self._signal: Optional[Signal] = None
self.symbol = symbol
self._slot_list: List[Tuple[Callable, Dict]] = []
self._slot_symbol_list: List[Tuple[str, Dict]] = []
def bind_signal(self, signal: Signal):
self._signal = signal
def METHOD_NAME(self):
"""Connect all slots.
"""
if self._signal is None:
raise RuntimeError("no signal is bound")
# Connect slot which are not symbol.
# These slots are connected directly.
for slot, kwargs in self._slot_list:
self._signal.METHOD_NAME(slot, **kwargs)
self._slot_list.clear()
# Connect slots which are symbol currently.
self._signal.METHOD_NAME(self.slot_symbols_delegate, weak=False)
def connect_slot(self, slot: Callable, **kwargs):
if self._signal is not None:
# If signal is already bound, the slot is connected immediately.
self._signal.METHOD_NAME(slot, **kwargs)
else:
self._slot_list.append((slot, kwargs))
def connect_slot_symbol(self, slot_symbol: str, **kwargs):
self._slot_symbol_list.append((slot_symbol, kwargs))
def disconnect_slot_symbol(self, slot_symbol):
for i, (symbol, _) in enumerate(self._slot_symbol_list):
if symbol == slot_symbol:
self._slot_symbol_list.pop(i)
def disconnect_slot(self, slot: Callable):
if self._signal is not None:
self._signal.disconnect(slot)
else:
for i, (s, _) in enumerate(self._slot_list):
if s == slot:
self._slot_list.pop(i)
break
def slot_symbols_delegate(self, *args):
"""
A delegate invoke the slots for the signal.
Signal.emit => self.slot_symbols_delegate => slots
"""
for slot_symbol, kwargs in self._slot_symbol_list:
func = fuoexec_F(slot_symbol)
# FIXME: Duplicate code. The logic has been implemented in Signal.emit.
if kwargs.get('aioqueue'):
if Signal.has_aio_support:
Signal.aioqueue.sync_q.put_nowait((func, args)) # type: ignore
else:
logger.warning(
'No aio support is available, a slot is ignored.')
else:
try:
func(*args)
except: # noqa, pylint: disable=bare-except
logger.exception('error during calling slot:%s')
class SignalManager:
def __init__(self):
self.initialized = False
self._app = None
self.signal_connectors: List[SignalConnector] = []
def initialize(self, app):
"""
Find each signal by signal_symbol and connect slots for them.
"""
if self.initialized:
raise RuntimeError('signals slots manager already initialized')
self._app = app
for connector in self.signal_connectors:
self._init_sc(connector)
self.initialized = True
def add(self, signal_symbol: str, slot: Callable, use_symbol: bool, **kwargs):
"""Add one slot for the signal.
:param slot: The function or it's symbol.
"""
sc = self._get_or_create_sc(signal_symbol)
if use_symbol is True:
sc.connect_slot_symbol(fuoexec_S(slot), **kwargs)
else:
sc.connect_slot(slot, **kwargs)
def remove(self, signal_symbol: str, slot: Callable, use_symbol: bool):
"""Remove one slot for signal.
If slot is not connected, this does nothing.
"""
signal_connector = self._get_or_create_sc(signal_symbol)
if use_symbol is True:
signal_connector.disconnect_slot_symbol(fuoexec_S(slot))
else:
signal_connector.disconnect_slot(slot)
def _get_or_create_sc(self, signal_symbol) -> SignalConnector:
"""Get or create signal connector."""
for sc in self.signal_connectors:
if sc.symbol == signal_symbol:
signal_connector = sc
break
else:
signal_connector = SignalConnector(signal_symbol)
if self.initialized:
self._init_sc(signal_connector)
self.signal_connectors.append(signal_connector)
return signal_connector
def _init_sc(self, sc):
# pylint: disable=eval-used
signal = eval(sc.symbol, {'app': self._app})
sc.bind_signal(signal)
sc.METHOD_NAME()
signal_mgr: SignalManager = SignalManager()
# pylint: disable=wrong-import-position, cyclic-import
from .fuoexec import fuoexec_S, fuoexec_F # noqa
|
3,396 |
std string
|
'''
$Id: tzfile.py,v 1.8 2004/06/03 00:15:24 zenzen Exp $
'''
from datetime import datetime
from struct import unpack, calcsize
from pytz.tzinfo import StaticTzInfo, DstTzInfo, memorized_ttinfo
from pytz.tzinfo import memorized_datetime, memorized_timedelta
def _byte_string(s):
"""Cast a string or byte string to an ASCII byte string."""
return s.encode('ASCII')
_NULL = _byte_string('\0')
def METHOD_NAME(s):
"""Cast a string or byte string to an ASCII string."""
return str(s.decode('ASCII'))
def build_tzinfo(zone, fp):
head_fmt = '>4s c 15x 6l'
head_size = calcsize(head_fmt)
(magic, format, ttisgmtcnt, ttisstdcnt, leapcnt, timecnt,
typecnt, charcnt) = unpack(head_fmt, fp.read(head_size))
# Make sure it is a tzfile(5) file
assert magic == _byte_string('TZif'), 'Got magic %s' % repr(magic)
# Read out the transition times, localtime indices and ttinfo structures.
data_fmt = '>%(timecnt)dl %(timecnt)dB %(ttinfo)s %(charcnt)ds' % dict(
timecnt=timecnt, ttinfo='lBB' * typecnt, charcnt=charcnt)
data_size = calcsize(data_fmt)
data = unpack(data_fmt, fp.read(data_size))
# make sure we unpacked the right number of values
assert len(data) == 2 * timecnt + 3 * typecnt + 1
transitions = [memorized_datetime(trans)
for trans in data[:timecnt]]
lindexes = list(data[timecnt:2 * timecnt])
ttinfo_raw = data[2 * timecnt:-1]
tznames_raw = data[-1]
del data
# Process ttinfo into separate structs
ttinfo = []
tznames = {}
i = 0
while i < len(ttinfo_raw):
# have we looked up this timezone name yet?
tzname_offset = ttinfo_raw[i + 2]
if tzname_offset not in tznames:
nul = tznames_raw.find(_NULL, tzname_offset)
if nul < 0:
nul = len(tznames_raw)
tznames[tzname_offset] = METHOD_NAME(
tznames_raw[tzname_offset:nul])
ttinfo.append((ttinfo_raw[i],
bool(ttinfo_raw[i + 1]),
tznames[tzname_offset]))
i += 3
# Now build the timezone object
if len(ttinfo) == 1 or len(transitions) == 0:
ttinfo[0][0], ttinfo[0][2]
cls = type(zone, (StaticTzInfo,), dict(
zone=zone,
_utcoffset=memorized_timedelta(ttinfo[0][0]),
_tzname=ttinfo[0][2]))
else:
# Early dates use the first standard time ttinfo
i = 0
while ttinfo[i][1]:
i += 1
if ttinfo[i] == ttinfo[lindexes[0]]:
transitions[0] = datetime.min
else:
transitions.insert(0, datetime.min)
lindexes.insert(0, i)
# calculate transition info
transition_info = []
for i in range(len(transitions)):
inf = ttinfo[lindexes[i]]
utcoffset = inf[0]
if not inf[1]:
dst = 0
else:
for j in range(i - 1, -1, -1):
prev_inf = ttinfo[lindexes[j]]
if not prev_inf[1]:
break
dst = inf[0] - prev_inf[0] # dst offset
# Bad dst? Look further. DST > 24 hours happens when
# a timzone has moved across the international dateline.
if dst <= 0 or dst > 3600 * 3:
for j in range(i + 1, len(transitions)):
stdinf = ttinfo[lindexes[j]]
if not stdinf[1]:
dst = inf[0] - stdinf[0]
if dst > 0:
break # Found a useful std time.
tzname = inf[2]
# Round utcoffset and dst to the nearest minute or the
# datetime library will complain. Conversions to these timezones
# might be up to plus or minus 30 seconds out, but it is
# the best we can do.
utcoffset = int((utcoffset + 30) // 60) * 60
dst = int((dst + 30) // 60) * 60
transition_info.append(memorized_ttinfo(utcoffset, dst, tzname))
cls = type(zone, (DstTzInfo,), dict(
zone=zone,
_utc_transition_times=transitions,
_transition_info=transition_info))
return cls()
if __name__ == '__main__':
import os.path
from pprint import pprint
base = os.path.join(os.path.dirname(__file__), 'zoneinfo')
tz = build_tzinfo('Australia/Melbourne',
open(os.path.join(base, 'Australia', 'Melbourne'), 'rb'))
tz = build_tzinfo('US/Eastern',
open(os.path.join(base, 'US', 'Eastern'), 'rb'))
pprint(tz._utc_transition_times)
|
3,397 |
method
|
import json
from six.moves import urllib, xmlrpc_client
from .util import read_body
import logging
log = logging.getLogger(__name__)
def METHOD_NAME(r1, r2):
assert r1.METHOD_NAME == r2.METHOD_NAME, "{} != {}".format(r1.METHOD_NAME, r2.METHOD_NAME)
def uri(r1, r2):
assert r1.uri == r2.uri, "{} != {}".format(r1.uri, r2.uri)
def host(r1, r2):
assert r1.host == r2.host, "{} != {}".format(r1.host, r2.host)
def scheme(r1, r2):
assert r1.scheme == r2.scheme, "{} != {}".format(r1.scheme, r2.scheme)
def port(r1, r2):
assert r1.port == r2.port, "{} != {}".format(r1.port, r2.port)
def path(r1, r2):
assert r1.path == r2.path, "{} != {}".format(r1.path, r2.path)
def query(r1, r2):
assert r1.query == r2.query, "{} != {}".format(r1.query, r2.query)
def raw_body(r1, r2):
assert read_body(r1) == read_body(r2)
def body(r1, r2):
transformer = _get_transformer(r1)
r2_transformer = _get_transformer(r2)
if transformer != r2_transformer:
transformer = _identity
assert transformer(read_body(r1)) == transformer(read_body(r2))
def headers(r1, r2):
assert r1.headers == r2.headers, "{} != {}".format(r1.headers, r2.headers)
def _header_checker(value, header="Content-Type"):
def checker(headers):
_header = headers.get(header, "")
if isinstance(_header, bytes):
_header = _header.decode("utf-8")
return value in _header.lower()
return checker
def _transform_json(body):
# Request body is always a byte string, but json.loads() wants a text
# string. RFC 7159 says the default encoding is UTF-8 (although UTF-16
# and UTF-32 are also allowed: hmmmmm).
if body:
return json.loads(body.decode("utf-8"))
_xml_header_checker = _header_checker("text/xml")
_xmlrpc_header_checker = _header_checker("xmlrpc", header="User-Agent")
_checker_transformer_pairs = (
(
_header_checker("application/x-www-form-urlencoded"),
lambda body: urllib.parse.parse_qs(body.decode("ascii")),
),
(_header_checker("application/json"), _transform_json),
(lambda request: _xml_header_checker(request) and _xmlrpc_header_checker(request), xmlrpc_client.loads),
)
def _identity(x):
return x
def _get_transformer(request):
for checker, transformer in _checker_transformer_pairs:
if checker(request.headers):
return transformer
else:
return _identity
def requests_match(r1, r2, matchers):
successes, failures = get_matchers_results(r1, r2, matchers)
if failures:
log.debug("Requests {} and {} differ.\n" "Failure details:\n" "{}".format(r1, r2, failures))
return len(failures) == 0
def _evaluate_matcher(matcher_function, *args):
"""
Evaluate the result of a given matcher as a boolean with an assertion error message if any.
It handles two types of matcher :
- a matcher returning a boolean value.
- a matcher that only makes an assert, returning None or raises an assertion error.
"""
assertion_message = None
try:
match = matcher_function(*args)
match = True if match is None else match
except AssertionError as e:
match = False
assertion_message = str(e)
return match, assertion_message
def get_matchers_results(r1, r2, matchers):
"""
Get the comparison results of two requests as two list.
The first returned list represents the matchers names that passed.
The second list is the failed matchers as a string with failed assertion details if any.
"""
matches_success, matches_fails = [], []
for m in matchers:
matcher_name = m.__name__
match, assertion_message = _evaluate_matcher(m, r1, r2)
if match:
matches_success.append(matcher_name)
else:
assertion_message = get_assertion_message(assertion_message)
matches_fails.append((matcher_name, assertion_message))
return matches_success, matches_fails
def get_assertion_message(assertion_details):
"""
Get a detailed message about the failing matcher.
"""
return assertion_details
|
3,398 |
parent geoid
|
import logging
from django.db import models
from django.conf import settings
from django.utils.text import slugify
import requests
log = logging.getLogger(__name__)
CATEGORIES = {
'A': 'metro',
'B': 'local',
'C': 'district',
}
class LocationNotFound(Exception):
pass
class GeographyUpdate(models.Model):
datetime = models.DateTimeField(auto_now_add=True)
class Meta:
db_table = "scorecard_geography_update"
verbose_name = "Geography Update"
class Geography(models.Model):
#: The level for this geography (eg. `country`) which, together with
#: `geo_code`, makes up the unique geo id.
geo_level = models.CharField(max_length=15, null=False)
#: The code for this geography which must be unique for this level.
#: Together with `geo_level`, this makes up the unique geo id.
geo_code = models.CharField(max_length=10, null=False)
#: Name of this geography.
name = models.CharField(max_length=100, null=False, db_index=True)
#: Long name of this geography, giving it context (such as a city or province)
#: If this is null, it is computed based on the place's ancestors.
long_name = models.CharField(max_length=100, null=True, db_index=True)
#: Area in square kilometers. Optional.
square_kms = models.FloatField(null=True)
# hierarchy
#: The level of this geography's parent, or `None` if this is the root
#: geography that has no parent.
parent_level = models.CharField(max_length=15, null=True)
#: The code of this geography's parent, or `None` if this is the root
#: geography that has no parent.
parent_code = models.CharField(max_length=10, null=True)
province_name = models.CharField(max_length=100, null=False)
province_code = models.CharField(max_length=5, null=False)
category = models.CharField(max_length=2, null=False)
miif_category = models.TextField(null=True)
population = models.IntegerField(null=True)
postal_address_1 = models.TextField(null=True)
postal_address_2 = models.TextField(null=True)
postal_address_3 = models.TextField(null=True)
street_address_1 = models.TextField(null=True)
street_address_2 = models.TextField(null=True)
street_address_3 = models.TextField(null=True)
street_address_4 = models.TextField(null=True)
phone_number = models.TextField(null=True)
fax_number = models.TextField(null=True)
url = models.TextField(null=True)
class Meta:
unique_together = ('geo_level', 'geo_code')
verbose_name = "Municipality"
verbose_name_plural = "Municipalities"
def __str__(self):
return f'{self.name}'
@property
def category_name(self):
return CATEGORIES[self.category] + ' municipality'
@property
def geoid(self):
return '-'.join([self.geo_level, self.geo_code])
@property
def slug(self):
return slugify(self.name)
@property
def METHOD_NAME(self):
if self.parent_level and self.parent_code:
return '%s-%s' % (self.parent_level, self.parent_code)
return None
@property
def parent(self):
""" The parent of this geograhy, or `None` if this is the root of
the hierarchy.
"""
if not hasattr(self, '_parent'):
if self.parent_level and self.parent_code:
self._parent = self.__class__.objects.filter(
geo_level=self.parent_level, geo_code=self.parent_code).first()
else:
self._parent = None
return self._parent
def ancestors(self):
""" A list of the ancestors of this geography, all the way up to the root.
This is an empty list if this geography is the root of the hierarchy.
"""
ancestors = []
g = self.parent
while g:
ancestors.append(g)
g = g.parent
return ancestors
def as_dict(self):
return {
'ancestors': [a.as_dict() for a in self.ancestors()],
'full_geoid': self.geoid,
'full_name': self.long_name,
'name': self.long_name,
'short_name': self.name,
'geo_level': self.geo_level,
'geo_code': self.geo_code,
'parent_geoid': self.METHOD_NAME,
'square_kms': self.square_kms,
'province_name': self.province_name,
'province_code': self.province_code,
'category': self.category,
'category_name': self.category_name,
'miif_category': self.miif_category,
'slug': self.slug,
}
@property
def bbox(self):
url = settings.MAPIT['url'] + '/area/MDB:%s/geometry?generation=%s' % (
self.geo_code, settings.MAPIT['generation'])
resp = requests.get(url)
js = resp.json()
return [js[x] for x in ["min_lon", "min_lat", "max_lon", "max_lat"]]
def __unicode__(self):
return self.full_name
@classmethod
def find(cls, geo_code, geo_level):
geo = cls.objects.filter(
geo_level=geo_level, geo_code=geo_code).first()
if not geo:
raise LocationNotFound(
"Invalid level, code: %s-%s" % (geo_level, geo_code))
return geo
@classmethod
def get_locations_from_coords(cls, longitude, latitude):
"""
Returns a list of geographies containing this point.
"""
url = settings.MAPIT['url'] + '/point/4326/%s,%s?generation=%s' % (
longitude, latitude, settings.MAPIT['generation'])
resp = requests.get(url)
resp.raise_for_status()
geos = []
for feature in resp.json().values():
try:
geo = cls.find(feature['codes']['MDB'],
feature['type_name'].lower())
if geo.geo_level in ['municipality', 'district']:
geos.append(geo)
except LocationNotFound as e:
log.warn("Couldn't find geo that Mapit gave us: %s" %
feature, exc_info=e)
return geos
|
3,399 |
warnings
|
"""
The standard stream parser interface for VASP.
----------------------------------------------
Contains the parsing interfaces to ``parsevasp`` used to parse standard streams
for VASP related notification, warnings and errors.
"""
# pylint: disable=abstract-method
import re
from parsevasp.stream import Stream
from aiida_vasp.parsers.content_parsers.base import BaseFileParser
class StreamParser(BaseFileParser):
"""Parser used for parsing errors and warnings from VASP."""
DEFAULT_SETTINGS = {'quantities_to_parse': ['notifications']}
PARSABLE_QUANTITIES = {
'notifications': {
'inputs': [],
'name': 'notifications',
'prerequisites': [],
}
}
def _init_from_handler(self, handler):
"""Initialize a ``parsevasp`` object of ``Stream`` using a file like handler.
Parameters
----------
handler : object
A file like object that provides the necessary standard stream content to be parsed.
"""
# First get any special config from the parser settings, else use the default
stream_config = None
history = False
if self._settings is not None:
stream_config = self._settings.get('stream_config', None)
history = self._settings.get('stream_history', False)
try:
self._content_parser = Stream(
file_handler=handler, logger=self._logger, history=history, config=stream_config
)
except SystemExit:
self._logger.warning('Parsevasp exited abnormally.')
@property
def notifications(self):
"""Fetch the notifications that VASP generated.
Returns
-------
notifications : list
A list of all notifications from VASP. Each entry is a dict with the keys ``name``, ``kind``, ``message``
and ``regex`` containing name of the message, what kind it is (``ERROR`` or ``WARNING``), a description
of the notification and the regular expression detected as string values.
"""
# ``parsevasp`` returns ``VaspStream`` objects, which we cannot serialize. We could serialize this, but
# eventually, we would like to move to a dedicated node for the notifications with its own data class.
# This should be fixed in AiiDA core and coordinated across many plugins. For now, we convert the relevant info
# into dict entries explicitly.
notifications = []
for item in self._content_parser.entries:
if isinstance(item.regex, type(re.compile(''))):
regex = item.regex.pattern
else:
regex = item.regex
notifications.append({'name': item.shortname, 'kind': item.kind, 'message': item.message, 'regex': regex})
return notifications
@property
def errors(self):
"""Fetch the errors that VASP generated.
Returns
-------
errors : list
A list of all errors from VASP. Each entry is a dict with the keys ``name``, ``kind``, ``message``
and ``regex`` containing name of the message, what kind it is (``ERROR`` or ``WARNING``), a description
of the error and the regular expression detected as string values.
"""
return [item for item in self._content_parser.entries if item.kind == 'ERROR']
@property
def METHOD_NAME(self):
"""Fetch the warnings that VASP generated.
Returns
-------
warnings : list
A list of all warnings from VASP. Each entry is a dict with the keys ``name``, ``kind``, ``message``
and ``regex`` containing name of the message, what kind it is (``ERROR`` or ``WARNING``), a description
of the error and the regular expression detected as string values.
"""
return [item for item in self._content_parser.entries if item.kind == 'WARNING']
@property
def has_entries(self):
"""Check if there are notifications from VASP present according to the config after parsning.
Returns
-------
entries : bool
``True`` if notifications was detected, ``False`` otherwise.
"""
entries = self._content_parser.has_entries
return entries
@property
def number_of_entries(self):
"""Find the number of unique notifications from VASP.
Returns
-------
number_of_entries : int
The number of unique notification entries that VASP generated.
"""
number_of_entries = len(self._content_parser)
return number_of_entries
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.