id
int64 0
300k
| label
stringlengths 1
74
⌀ | text
stringlengths 4k
8k
|
---|---|---|
3,200 |
test nddataarray from nddataarray
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
# This module contains tests of a class equivalent to pre-1.0 NDData.
import numpy as np
import pytest
from astropy import units as u
from astropy.nddata.compat import NDDataArray
from astropy.nddata.nddata import NDData
from astropy.nddata.nduncertainty import StdDevUncertainty
from astropy.wcs import WCS
NDDATA_ATTRIBUTES = [
"mask",
"flags",
"uncertainty",
"unit",
"shape",
"size",
"dtype",
"ndim",
"wcs",
"convert_unit_to",
]
def test_nddataarray_has_attributes_of_old_nddata():
ndd = NDDataArray([1, 2, 3])
for attr in NDDATA_ATTRIBUTES:
assert hasattr(ndd, attr)
def test_nddata_simple():
nd = NDDataArray(np.zeros((10, 10)))
assert nd.shape == (10, 10)
assert nd.size == 100
assert nd.dtype == np.dtype(float)
def test_nddata_parameters():
# Test for issue 4620
nd = NDDataArray(data=np.zeros((10, 10)))
assert nd.shape == (10, 10)
assert nd.size == 100
assert nd.dtype == np.dtype(float)
# Change order; `data` has to be given explicitly here
nd = NDDataArray(meta={}, data=np.zeros((10, 10)))
assert nd.shape == (10, 10)
assert nd.size == 100
assert nd.dtype == np.dtype(float)
# Pass uncertainty as second implicit argument
data = np.zeros((10, 10))
uncertainty = StdDevUncertainty(0.1 + np.zeros_like(data))
nd = NDDataArray(data, uncertainty)
assert nd.shape == (10, 10)
assert nd.size == 100
assert nd.dtype == np.dtype(float)
assert nd.uncertainty == uncertainty
def test_nddata_conversion():
nd = NDDataArray(np.array([[1, 2, 3], [4, 5, 6]]))
assert nd.size == 6
assert nd.dtype == np.dtype(int)
@pytest.mark.parametrize(
"flags_in",
[
np.array([True, False]),
np.array([1, 0]),
[True, False],
[1, 0],
np.array(["a", "b"]),
["a", "b"],
],
)
def test_nddata_flags_init_without_np_array(flags_in):
ndd = NDDataArray([1, 1], flags=flags_in)
assert (ndd.flags == flags_in).all()
@pytest.mark.parametrize("shape", [(10,), (5, 5), (3, 10, 10)])
def test_nddata_flags_invalid_shape(shape):
with pytest.raises(ValueError) as exc:
NDDataArray(np.zeros((10, 10)), flags=np.ones(shape))
assert exc.value.args[0] == "dimensions of flags do not match data"
def test_convert_unit_to():
# convert_unit_to should return a copy of its input
d = NDDataArray(np.ones((5, 5)))
d.unit = "km"
d.uncertainty = StdDevUncertainty(0.1 + np.zeros_like(d))
# workaround because zeros_like does not support dtype arg until v1.6
# and NDData accepts only bool ndarray as mask
tmp = np.zeros_like(d.data)
d.mask = np.array(tmp, dtype=bool)
d1 = d.convert_unit_to("m")
assert np.all(d1.data == np.array(1000.0))
assert np.all(d1.uncertainty.array == 1000.0 * d.uncertainty.array)
assert d1.unit == u.m
# changing the output mask should not change the original
d1.mask[0, 0] = True
assert d.mask[0, 0] != d1.mask[0, 0]
d.flags = np.zeros_like(d.data)
d1 = d.convert_unit_to("m")
# check that subclasses can require wcs and/or unit to be present and use
# _arithmetic and convert_unit_to
class SubNDData(NDDataArray):
"""
Subclass for test initialization of subclasses in NDData._arithmetic and
NDData.convert_unit_to
"""
def __init__(self, *arg, **kwd):
super().__init__(*arg, **kwd)
if self.unit is None:
raise ValueError("Unit for subclass must be specified")
if self.wcs is None:
raise ValueError("WCS for subclass must be specified")
def test_init_of_subclass_in_convert_unit_to():
data = np.ones([10, 10])
arr1 = SubNDData(data, unit="m", wcs=WCS(naxis=2))
result = arr1.convert_unit_to("km")
np.testing.assert_array_equal(arr1.data, 1000 * result.data)
# Test for issue #4129:
def METHOD_NAME():
ndd1 = NDDataArray(
[1.0, 4.0, 9.0], uncertainty=StdDevUncertainty([1.0, 2.0, 3.0]), flags=[0, 1, 0]
)
ndd2 = NDDataArray(ndd1)
# Test that the 2 instances point to the same objects and aren't just
# equal; this is explicitly documented for the main data array and we
# probably want to catch any future change in behavior for the other
# attributes too and ensure they are intentional.
assert ndd2.data is ndd1.data
assert ndd2.uncertainty is ndd1.uncertainty
assert ndd2.flags is ndd1.flags
assert ndd2.meta == ndd1.meta
# Test for issue #4137:
def test_nddataarray_from_nddata():
ndd1 = NDData([1.0, 4.0, 9.0], uncertainty=StdDevUncertainty([1.0, 2.0, 3.0]))
ndd2 = NDDataArray(ndd1)
assert ndd2.data is ndd1.data
assert ndd2.uncertainty is ndd1.uncertainty
assert ndd2.meta == ndd1.meta
|
3,201 |
f 6
|
import itertools
import numpy as np
from numpy import exp
from numpy.testing import assert_, assert_equal
from scipy.optimize import root
def test_performance():
# Compare performance results to those listed in
# [Cheng & Li, IMA J. Num. An. 29, 814 (2008)]
# and
# [W. La Cruz, J.M. Martinez, M. Raydan, Math. Comp. 75, 1429 (2006)].
# and those produced by dfsane.f from M. Raydan's website.
#
# Where the results disagree, the largest limits are taken.
e_a = 1e-5
e_r = 1e-4
table_1 = [
dict(F=F_1, x0=x0_1, n=1000, nit=5, nfev=5),
dict(F=F_1, x0=x0_1, n=10000, nit=2, nfev=2),
dict(F=F_2, x0=x0_2, n=500, nit=11, nfev=11),
dict(F=F_2, x0=x0_2, n=2000, nit=11, nfev=11),
# dict(F=F_4, x0=x0_4, n=999, nit=243, nfev=1188), removed: too sensitive to rounding errors
dict(F=METHOD_NAME, x0=x0_6, n=100, nit=6, nfev=6), # Results from dfsane.f; papers list nit=3, nfev=3
dict(F=F_7, x0=x0_7, n=99, nit=23, nfev=29), # Must have n%3==0, typo in papers?
dict(F=F_7, x0=x0_7, n=999, nit=23, nfev=29), # Must have n%3==0, typo in papers?
dict(F=F_9, x0=x0_9, n=100, nit=12, nfev=18), # Results from dfsane.f; papers list nit=nfev=6?
dict(F=F_9, x0=x0_9, n=1000, nit=12, nfev=18),
dict(F=F_10, x0=x0_10, n=1000, nit=5, nfev=5), # Results from dfsane.f; papers list nit=2, nfev=12
]
# Check also scaling invariance
for xscale, yscale, line_search in itertools.product([1.0, 1e-10, 1e10], [1.0, 1e-10, 1e10],
['cruz', 'cheng']):
for problem in table_1:
n = problem['n']
func = lambda x, n: yscale*problem['F'](x/xscale, n)
args = (n,)
x0 = problem['x0'](n) * xscale
fatol = np.sqrt(n) * e_a * yscale + e_r * np.linalg.norm(func(x0, n))
sigma_eps = 1e-10 * min(yscale/xscale, xscale/yscale)
sigma_0 = xscale/yscale
with np.errstate(over='ignore'):
sol = root(func, x0, args=args,
options=dict(ftol=0, fatol=fatol, maxfev=problem['nfev'] + 1,
sigma_0=sigma_0, sigma_eps=sigma_eps,
line_search=line_search),
method='DF-SANE')
err_msg = repr([xscale, yscale, line_search, problem, np.linalg.norm(func(sol.x, n)),
fatol, sol.success, sol.nit, sol.nfev])
assert_(sol.success, err_msg)
assert_(sol.nfev <= problem['nfev'] + 1, err_msg) # nfev+1: dfsane.f doesn't count first eval
assert_(sol.nit <= problem['nit'], err_msg)
assert_(np.linalg.norm(func(sol.x, n)) <= fatol, err_msg)
def test_complex():
def func(z):
return z**2 - 1 + 2j
x0 = 2.0j
ftol = 1e-4
sol = root(func, x0, tol=ftol, method='DF-SANE')
assert_(sol.success)
f0 = np.linalg.norm(func(x0))
fx = np.linalg.norm(func(sol.x))
assert_(fx <= ftol*f0)
def test_linear_definite():
# The DF-SANE paper proves convergence for "strongly isolated"
# solutions.
#
# For linear systems F(x) = A x - b = 0, with A positive or
# negative definite, the solution is strongly isolated.
def check_solvability(A, b, line_search='cruz'):
func = lambda x: A.dot(x) - b
xp = np.linalg.solve(A, b)
eps = np.linalg.norm(func(xp)) * 1e3
sol = root(func, b, options=dict(fatol=eps, ftol=0, maxfev=17523, line_search=line_search),
method='DF-SANE')
assert_(sol.success)
assert_(np.linalg.norm(func(sol.x)) <= eps)
n = 90
# Test linear pos.def. system
np.random.seed(1234)
A = np.arange(n*n).reshape(n, n)
A = A + n*n * np.diag(1 + np.arange(n))
assert_(np.linalg.eigvals(A).min() > 0)
b = np.arange(n) * 1.0
check_solvability(A, b, 'cruz')
check_solvability(A, b, 'cheng')
# Test linear neg.def. system
check_solvability(-A, b, 'cruz')
check_solvability(-A, b, 'cheng')
def test_shape():
def f(x, arg):
return x - arg
for dt in [float, complex]:
x = np.zeros([2,2])
arg = np.ones([2,2], dtype=dt)
sol = root(f, x, args=(arg,), method='DF-SANE')
assert_(sol.success)
assert_equal(sol.x.shape, x.shape)
# Some of the test functions and initial guesses listed in
# [W. La Cruz, M. Raydan. Optimization Methods and Software, 18, 583 (2003)]
def F_1(x, n):
g = np.zeros([n])
i = np.arange(2, n+1)
g[0] = exp(x[0] - 1) - 1
g[1:] = i*(exp(x[1:] - 1) - x[1:])
return g
def x0_1(n):
x0 = np.empty([n])
x0.fill(n/(n-1))
return x0
def F_2(x, n):
g = np.zeros([n])
i = np.arange(2, n+1)
g[0] = exp(x[0]) - 1
g[1:] = 0.1*i*(exp(x[1:]) + x[:-1] - 1)
return g
def x0_2(n):
x0 = np.empty([n])
x0.fill(1/n**2)
return x0
def F_4(x, n):
assert_equal(n % 3, 0)
g = np.zeros([n])
# Note: the first line is typoed in some of the references;
# correct in original [Gasparo, Optimization Meth. 13, 79 (2000)]
g[::3] = 0.6 * x[::3] + 1.6 * x[1::3]**3 - 7.2 * x[1::3]**2 + 9.6 * x[1::3] - 4.8
g[1::3] = 0.48 * x[::3] - 0.72 * x[1::3]**3 + 3.24 * x[1::3]**2 - 4.32 * x[1::3] - x[2::3] + 0.2 * x[2::3]**3 + 2.16
g[2::3] = 1.25 * x[2::3] - 0.25*x[2::3]**3
return g
def x0_4(n):
assert_equal(n % 3, 0)
x0 = np.array([-1, 1/2, -1] * (n//3))
return x0
def METHOD_NAME(x, n):
c = 0.9
mu = (np.arange(1, n+1) - 0.5)/n
return x - 1/(1 - c/(2*n) * (mu[:,None]*x / (mu[:,None] + mu)).sum(axis=1))
def x0_6(n):
return np.ones([n])
def F_7(x, n):
assert_equal(n % 3, 0)
def phi(t):
v = 0.5*t - 2
v[t > -1] = ((-592*t**3 + 888*t**2 + 4551*t - 1924)/1998)[t > -1]
v[t >= 2] = (0.5*t + 2)[t >= 2]
return v
g = np.zeros([n])
g[::3] = 1e4 * x[1::3]**2 - 1
g[1::3] = exp(-x[::3]) + exp(-x[1::3]) - 1.0001
g[2::3] = phi(x[2::3])
return g
def x0_7(n):
assert_equal(n % 3, 0)
return np.array([1e-3, 18, 1] * (n//3))
def F_9(x, n):
g = np.zeros([n])
i = np.arange(2, n)
g[0] = x[0]**3/3 + x[1]**2/2
g[1:-1] = -x[1:-1]**2/2 + i*x[1:-1]**3/3 + x[2:]**2/2
g[-1] = -x[-1]**2/2 + n*x[-1]**3/3
return g
def x0_9(n):
return np.ones([n])
def F_10(x, n):
return np.log(1 + x) - x/n
def x0_10(n):
return np.ones([n])
|
3,202 |
signature
|
"""
Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
This module defines the itermediate data structure of inputs.
"""
import inspect
from dataclasses import dataclass
from typing import Any, Dict, List, Optional, Tuple, Union
import numpy as np
import paddle
from paddle import Tensor
from ..transformers.model_outputs import MaskedLMOutput, SequenceClassifierOutput
from ..transformers.tokenizer_utils_base import PaddingStrategy, PretrainedTokenizerBase
def METHOD_NAME(function):
"""
Obtain the input arguments of the given function.
"""
sig = inspect.METHOD_NAME(function)
args = [p.name for p in sig.parameters.values() if p.kind == inspect.Parameter.POSITIONAL_OR_KEYWORD]
return args
@dataclass
class PromptDataCollatorWithPadding:
"""
Data collator that will group inputs by keywords and dynamically
pad the inputs to the longest sequence in the batch.
Args:
tokenizer (`paddlenlp.transformers.PretrainedTokenizer`):
The tokenizer used for encoding the data from PromptTokenizer.
"""
tokenizer: PretrainedTokenizerBase
padding: Union[bool, str, PaddingStrategy] = True
max_length: Optional[int] = None
pad_to_multiple_of: Optional[int] = None
return_tensors: str = "pd"
return_attention_mask: Optional[bool] = None
default_model_input_names: List = (
"input_ids",
"token_type_ids",
"special_tokens_mask",
"offset_mapping",
"position_ids",
)
def _convert_to_tensors(self, data):
if self.return_tensors == "np":
return np.array(data)
else:
return paddle.to_tensor(data)
def __call__(self, features: List[Dict[str, Any]]) -> Dict[str, Any]:
batch = {}
for key in features[0]:
if key in self.default_model_input_names:
batch[key] = [b[key] for b in features]
batch = self.tokenizer.pad(
batch,
padding=self.padding,
max_length=self.max_length,
pad_to_multiple_of=self.pad_to_multiple_of,
return_tensors=self.return_tensors,
return_attention_mask=self.return_attention_mask,
)
max_length = batch["input_ids"].shape[1]
for key in features[0]:
if key not in self.default_model_input_names:
values = [b[key] for b in features if key in b]
if len(values) < len(features):
continue
if key == "masked_positions":
new_values = []
for index, value in enumerate(values):
value = np.array(value) + index * max_length
new_values.extend(value.tolist())
values = new_values
elif key == "attention_mask":
new_values = np.ones([len(values), 1, max_length, max_length]) * -1e4
for index, value in enumerate(values):
length = len(value)
new_values[index][0, :length, :length] = value
values = new_values
elif key in ("soft_token_ids", "encoder_ids"):
for index, value in enumerate(values):
values[index] = value + [0] * (max_length - len(value))
elif key in ("omask_positions"):
max_num_option = max([len(x) for x in values])
for index, value in enumerate(values):
values[index] = value + [0] * (max_num_option - len(value))
elif key == "labels":
if isinstance(values[0], list):
max_num_label = max([len(x) for x in values])
for index, value in enumerate(values):
values[index] = value + [-100] * (max_num_label - len(value))
elif key != "cls_positions":
continue
batch[key] = self._convert_to_tensors(values)
return batch
def sequence_classification_forward_with_past_key_values(
self,
input_ids: Optional[Tensor] = None,
token_type_ids: Optional[Tensor] = None,
position_ids: Optional[Tensor] = None,
attention_mask: Optional[Tensor] = None,
inputs_embeds: Optional[Tensor] = None,
labels: Optional[Tensor] = None,
output_hidden_states: Optional[bool] = None,
output_attentions: Optional[bool] = None,
return_dict: Optional[bool] = None,
past_key_values: Optional[Tuple[Tuple[Tensor]]] = None,
):
outputs = self.ernie(
input_ids,
token_type_ids=token_type_ids,
position_ids=position_ids,
attention_mask=attention_mask,
inputs_embeds=inputs_embeds,
past_key_values=past_key_values,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=True,
)
pooled_output = outputs[1]
pooled_output = self.dropout(pooled_output)
logits = self.classifier(pooled_output)
loss = None
if labels is not None:
if self.num_labels == 1:
loss_fct = paddle.nn.MSELoss()
loss = loss_fct(logits, labels)
elif labels.dtype == paddle.int64 or labels.dtype == paddle.int32:
loss_fct = paddle.nn.CrossEntropyLoss()
loss = loss_fct(logits.reshape((-1, self.num_labels)), labels.reshape((-1,)))
else:
loss_fct = paddle.nn.BCEWithLogitsLoss()
loss = loss_fct(logits, labels)
return SequenceClassifierOutput(
loss=loss,
logits=logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
def masked_lm_forward_with_past_key_values(
self,
input_ids: Optional[Tensor] = None,
token_type_ids: Optional[Tensor] = None,
position_ids: Optional[Tensor] = None,
attention_mask: Optional[Tensor] = None,
masked_positions: Optional[Tensor] = None,
inputs_embeds: Optional[Tensor] = None,
labels: Optional[Tensor] = None,
output_hidden_states: Optional[bool] = None,
output_attentions: Optional[bool] = None,
return_dict: Optional[bool] = None,
past_key_values: Optional[Tuple[Tuple[Tensor]]] = None,
):
outputs = self.ernie(
input_ids,
token_type_ids=token_type_ids,
position_ids=position_ids,
attention_mask=attention_mask,
inputs_embeds=inputs_embeds,
past_key_values=past_key_values,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=True,
)
sequence_output = outputs[0]
prediction_scores = self.cls(sequence_output, masked_positions=masked_positions)
masked_lm_loss = None
if labels is not None:
loss_fct = paddle.nn.CrossEntropyLoss()
masked_lm_loss = loss_fct(
prediction_scores.reshape((-1, paddle.shape(prediction_scores)[-1])), labels.reshape((-1,))
)
return MaskedLMOutput(
loss=masked_lm_loss,
logits=prediction_scores,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
|
3,203 |
preprocess
|
# -*- coding: utf-8 -*-
'''
Created on 04 Mar 2017
@author: Guilherme Stiebler
Copyright © 2017 Guilherme Stiebler, Éric Piel, Delmic
This file is part of Odemis.
Odemis is free software: you can redistribute it and/or modify it under the terms
of the GNU General Public License version 2 as published by the Free Software
Foundation.
Odemis is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY;
without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
PURPOSE. See the GNU General Public License for more details.
You should have received a copy of the GNU General Public License along with
Odemis. If not, see http://www.gnu.org/licenses/.
'''
import logging
import warnings
import numpy
import cv2
from scipy import ndimage
from odemis import model
from odemis.util import img
if int(cv2.__version__[0]) <= 2:
cv2.ORB_create = cv2.ORB
# Sift is not installed by default, check first if it's available
if hasattr(cv2, 'SIFT'):
cv2.SIFT_create = cv2.SIFT
# The brute-force matcher works in theory a bit better than the Flann-based one,
# but slower. In practice, it doesn't seem to show better results, and if they
# are many keypoints (eg, 2000) the slow-down can be a couple of seconds.
USE_BF = False # Use BruteForce matcher
USE_KNN = True # Use k-nearest neighbour matching method
# Missing defines from OpenCV
FLANN_INDEX_LINEAR = 0
FLANN_INDEX_KDTREE = 1
FLANN_INDEX_KMEANS = 2
FLANN_INDEX_LSH = 6
def FindTransform(ima, imb, fd_type=None):
"""
ima(DataArray of shape YaXa with uint8): Image to be aligned
imb(DataArray of shape YbXb with uint8): Base image
Note that the shape doesn't have to be any relationship with the shape of the
first dimension(doesn't even need to be the same ratio)
fd_type(None or str): Feature detector type. Must be 'SIFT' or 'ORB'. ORB is faster,
but SIFT usually has better results. If None, it will pick the best available.
return (ndarray of shape 3, 3): transformation matrix to align the first image on the
base image. (right column is translation)
raises:
ValueError: if no good transformation is found.
"""
# FIXME: modify this method to make it work reliable
# Instantiate the feature detector and the matcher
# TODO: try BRISK, AZAKE and other detectors?
if fd_type is None:
for fd in ("SIFT", "ORB"):
if hasattr(cv2, "%s_create" % fd):
fd_type = fd
break
if fd_type == "ORB":
feature_detector = cv2.ORB_create()
if USE_BF:
matcher = cv2.BFMatcher(normType=cv2.NORM_HAMMING)
else:
index_params = dict(algorithm=FLANN_INDEX_LSH,
table_number=6, # 12
key_size=12, # 20
multi_probe_level=1) # 2
search_params = {}
matcher = cv2.FlannBasedMatcher(index_params, search_params)
elif fd_type == "SIFT":
# Extra arguments for SIFT
# contrastThreshold = 0.04
# edgeThreshold = 10
# sigma = 1.6 # TODO: no need for Gaussian as preprocess already does it?
feature_detector = cv2.SIFT_create(nfeatures=2000) # avoid going crazy on keypoints
if USE_BF:
matcher = cv2.BFMatcher(normType=cv2.NORM_L2)
else:
# Note: with KDTree, every call returns slightly different matches,
# which is quite annoying for reproducibility
# index_params = dict(algorithm=FLANN_INDEX_KDTREE, trees=5)
index_params = dict(algorithm=FLANN_INDEX_KMEANS)
search_params = dict(checks=32) # default value
matcher = cv2.FlannBasedMatcher(index_params, search_params)
else:
raise ValueError("Unknown feature detector %s" % (fd_type,))
logging.debug("Using feature detector %s", fd_type)
# find and compute the descriptors
ima_kp, ima_des = feature_detector.detectAndCompute(ima, None)
imb_kp, imb_des = feature_detector.detectAndCompute(imb, None)
logging.debug("Found %d and %d keypoints", len(ima_kp), len(imb_kp))
# run the matcher of the detected features
if USE_KNN:
# For each keypoint, return up to k(=2) best ones in the other image
matches = matcher.knnMatch(ima_des, imb_des, k=2)
# store all the good matches as per Lowe's ratio test
dist_ratio = 0.75
selected_matches = [m[0] for m in matches
if len(m) == 2 and m[0].distance < m[1].distance * dist_ratio]
else:
# For each keypoint, pick the closest one in the other image
matches = matcher.match(ima_des, imb_des)
# Pick up to the best 10 matches
min_dist = 100 # almost random value
selected_matches = [m for m in matches if m.distance < min_dist]
selected_matches.sort(key=lambda m: m.distance)
selected_matches = selected_matches[:10]
logging.debug("Found %d matches and %d good ones", len(matches), len(selected_matches))
if len(selected_matches) < 5:
raise ValueError("Less than 5 common features (%d) detected on the images" %
(len(selected_matches),))
# get keypoints for selected matches
selected_ima_kp = [list(ima_kp[m.queryIdx].pt) for m in selected_matches]
selected_imb_kp = [list(imb_kp[m.trainIdx].pt) for m in selected_matches]
selected_ima_kp = numpy.array([selected_ima_kp])
selected_imb_kp = numpy.array([selected_imb_kp])
ima_mkp = [ima_kp[m.queryIdx] for m in selected_matches]
imb_mkp = [imb_kp[m.trainIdx] for m in selected_matches]
# testing detecting the matching points automatically
try:
mat, mask = cv2.findHomography(selected_ima_kp, selected_imb_kp, cv2.RANSAC)
except Exception:
raise ValueError("The images does not match")
if mat is None:
raise ValueError("The images does not match")
return mat, ima_kp, imb_kp, ima_mkp, imb_mkp
def METHOD_NAME(im, invert, flip, crop, gaussian_sigma, eqhis):
'''
Typical preprocessing steps needed before performing keypoint matching
im (DataArray): Input image
invert (bool): Invert the brightness levels of the image
flip (tuple(bool, bool)): Determine if the image should be flipped on the X and Y axis
crop (tuple(t,b,l,r): Crop values in pixels
gaussian_sigma (int): Blur intensity
eqhis (bool): If True, an histogram equalisation is performed (and data type)
is set to uint8
return (DataArray of same shape): Processed image
'''
try:
metadata = im.metadata
except AttributeError:
metadata = {}
flip_x, flip_y = flip
# flip on X axis
if flip_x:
im = im[:, ::-1]
# flip on Y axis
if flip_y:
im = im[::-1, :]
crop_top, crop_bottom, crop_left, crop_right = crop
# remove the bar
im = im[crop_top:im.shape[0] - crop_bottom, crop_left:im.shape[1] - crop_right]
# Invert the image brightness
if invert:
# mn = im.min()
mx = im.max()
im = mx - im
# equalize histogram
if eqhis:
if im.dtype != numpy.uint8:
# OpenCV histogram equalisation only works on uint8 data
rgb_im = img.DataArray2RGB(im)
im = rgb_im[:, :, 0]
im = cv2.equalizeHist(im)
# blur the image using a gaussian filter
if gaussian_sigma:
im = ndimage.gaussian_filter(im, sigma=gaussian_sigma)
# return a new DataArray with the metadata of the original image
return model.DataArray(im, metadata)
|
3,204 |
plot statistic
|
import numpy as np
from scipy import stats
import matplotlib.pyplot as plt
import pandas as pd
import math
DEFAULT_SEQUENCE_WEIGHTS= {0: 1, 1:1, 2:1, 3: 1, 4: 1}
SEQUENCE_CRITERIA = "Sequence"
QUESTION_ID_CRITERIA = "Question"
EQUAL_CRITERIA = "Equal"
class Question_Answer_Scorer:
def __init__(self,name,function,criteria,sequence_weights=DEFAULT_SEQUENCE_WEIGHTS):
self.function = function
self.criteria = criteria
self.name = name
self.seq_w = sequence_weights
def get_criteria(self,answer):
if self.criteria == SEQUENCE_CRITERIA:
return answer.get_sequence_avg_time()
elif self.criteria == QUESTION_ID_CRITERIA:
return answer.get_question_avg_time()
elif self.criteria == EQUAL_CRITERIA:
return 60
def get_relative_criteria(self,answer):
if self.criteria == SEQUENCE_CRITERIA:
return answer.get_sequence_avg_relative_time()
elif self.criteria == QUESTION_ID_CRITERIA:
return answer.get_question_avg_relative_time()
elif self.criteria == EQUAL_CRITERIA:
return 0.2
def get_relative_score(self,answer):
criteria_value = self.get_relative_criteria(answer)
answer_time = answer.get_relative_time_taken()
return self.function(answer_time,criteria_value)
def get_score(self,answer):
criteria_value = self.get_criteria(answer)
answer_time = answer.get_time_taken()
answer_weight = self.seq_w[answer.sequence]
return self.function(answer_time,criteria_value) * answer_weight
def difference(time_1,time_2):
return time_1-time_2
def abs_difference(time_1,time_2):
return abs(time_1 - time_2)
def positive_difference(time_1,time_2):
return max(time_1 - time_2, 0)
def get_quiz_stat_dataframe(quiz_stat):
stat_dataframe = pd.DataFrame.from_dict(quiz_stat.scores, orient="index", columns=["score","partial_scores"])
stat_dataframe["user"] = stat_dataframe.index
stat_dataframe["quiz"] = quiz_stat.quiz.title
return stat_dataframe
def get_quiz_scores_dataframe(quiz_stat):
stat_dataframe = pd.DataFrame.from_dict(quiz_stat.statistic, orient="index",columns= ["score"])
stat_dataframe["user"] = stat_dataframe.index
stat_dataframe["quiz"] = quiz_stat.quiz.title
stat_dataframe["scorer"] = quiz_stat.scorer.name
return stat_dataframe
def calculate_quiz_answer_score(quiz_answer, scorer, use_relative_scores= False, ):
result = {'score': 0,'question_answers_scores': {}}
for question_answer in quiz_answer.answers:
if not use_relative_scores:
score = scorer.get_score(question_answer)
else:
score = scorer.get_relative_score(question_answer)
result['question_answers_scores'][question_answer.sequence] = score
result['score'] += score
return result
def calculate_quiz_scores(quiz,scorer,use_relative_scores=False):
result = {}
for user_answer in quiz.quiz_answers:
result[user_answer.user_id] = calculate_quiz_answer_score(user_answer,scorer,use_relative_scores)
return {k: v for k, v in reversed(sorted(result.items(), key=lambda item: item[1]['score']))}
class Quiz_Statistic():
def __init__(self, quiz, scorer):
self.quiz = quiz
self.scorer = scorer
self.scores = calculate_quiz_scores(quiz,scorer)
self.statistic = {user: value['score'] for user, value in self.scores.items()}
self.shapiro = self.test_normal_statistic()
self.follows_normal_distribution = self.shapiro if not self.shapiro else self.shapiro.pvalue > 0.05
if(self.follows_normal_distribution):
self.normal_dist = self.calc_normal_theoretical_dist()
def calc_normal_theoretical_dist(self):
values = list(self.statistic.values())
self.avg = np.average(values)
self.std = np.std(values)
def get_outliers(self,ppf):
if (self.follows_normal_distribution):
ppf = stats.norm.ppf(ppf,self.avg,self.std)
return list(dict(filter(lambda user: user[1] > ppf , self.statistic.items())).keys())
else:
nr_users = len(self.statistic.keys())
stop_index = math.floor(nr_users - nr_users * ppf)
return list(self.statistic.keys())[:stop_index]
def test_normal_statistic(self):
if (len(self.statistic) < 4):
return False
shapiro_test = stats.shapiro(list(self.statistic.values()))
return shapiro_test
def METHOD_NAME(self):
fig, (ax1, ax2, ax3) = plt.subplots(1, 3, figsize=(30, 5))
ax1.hist(self.statistic.values(), bins=5,density=True)
ax1.set_title("bin-size: 5")
ax2.hist(self.statistic.values(), bins=10,density=True)
ax2.set_title("bin-size: 10")
ax3.hist(self.statistic.values(), bins=20,density=True)
ax3.set_title("bin-size: 20")
plt.suptitle(
self.quiz.title + f" {'drawn from normal' if self.follows_normal_distribution else 'NOT drawn from normal'}")
if(self.follows_normal_distribution):
norm_color = 'tab:red'
x = np.linspace(self.avg - 3*self.std, self.avg + 3*self.std, 100)
pdf = stats.norm.pdf(x, self.avg, self.std)
ax1.plot(x, pdf,color=norm_color)
ax2.plot(x, pdf,color=norm_color)
ax3.plot(x, pdf,color=norm_color)
fig.tight_layout()
plt.show()
QUIZ_SCORERS = {
"Diff. Mean (Question ID)": Question_Answer_Scorer("Diff. Mean (Question ID)",difference,QUESTION_ID_CRITERIA,{0:1, 1:0.6, 2:0.2, 3:-0.2, 4:-0.6}),
"Positive Diff. Mean (Question ID)": Question_Answer_Scorer("Positive Diff. Mean (Question ID)",positive_difference,QUESTION_ID_CRITERIA,{0:1, 1:0.6, 2:0.2, 3:-0.2, 4:-0.6}),
"Absolute Diff. Mean (Question ID)": Question_Answer_Scorer("Absolute Diff. Mean (Question ID)",abs_difference,QUESTION_ID_CRITERIA,{0:1, 1:0.6, 2:0.2, 3:-0.2, 4:-0.6}),
"Diff. Mean (Sequence ID)": Question_Answer_Scorer("Diff. Mean (Sequence ID)",difference,SEQUENCE_CRITERIA,{0:1, 1:0.6, 2:0.2, 3:-0.2, 4:-0.6}),
"Positive Diff. Mean (Sequence ID)": Question_Answer_Scorer("Positive Diff. Mean (Sequence ID)",positive_difference,SEQUENCE_CRITERIA,{0:1, 1:0.6, 2:0.2, 3:-0.2, 4:-0.6}),
"Absolute Diff. Mean (Sequence ID)": Question_Answer_Scorer("Absolute Diff. Mean (Sequence ID)",abs_difference,SEQUENCE_CRITERIA,{0:1, 1:0.6, 2:0.2, 3:-0.2, 4:-0.6}),
}
|
3,205 |
url parameters
|
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
#
# Code generated by aaz-dev-tools
# --------------------------------------------------------------------------------------------
# pylint: skip-file
# flake8: noqa
from azure.cli.core.aaz import *
@register_command(
"dynatrace monitor list-linkable-environment"
)
class ListLinkableEnvironment(AAZCommand):
"""Get all the dynatrace environments that a user can link a azure resource to
:example: List-linkable-environment
az dynatrace monitor list-linkable-environment -g rg --monitor-name monitor --user-principal [email protected] --region eastus2euap
"""
_aaz_info = {
"version": "2021-09-01",
"resources": [
["mgmt-plane", "/subscriptions/{}/resourcegroups/{}/providers/dynatrace.observability/monitors/{}/listlinkableenvironments", "2021-09-01"],
]
}
def _handler(self, command_args):
super()._handler(command_args)
return self.build_paging(self._execute_operations, self._output)
_args_schema = None
@classmethod
def _build_arguments_schema(cls, *args, **kwargs):
if cls._args_schema is not None:
return cls._args_schema
cls._args_schema = super()._build_arguments_schema(*args, **kwargs)
# define Arg Group ""
_args_schema = cls._args_schema
_args_schema.monitor_name = AAZStrArg(
options=["--monitor-name"],
help="Monitor resource name",
required=True,
)
_args_schema.resource_group = AAZResourceGroupNameArg(
required=True,
)
# define Arg Group "Request"
_args_schema = cls._args_schema
_args_schema.region = AAZStrArg(
options=["--region"],
arg_group="Request",
help="Azure region in which we want to link the environment",
)
_args_schema.tenant_id = AAZStrArg(
options=["--tenant-id"],
arg_group="Request",
help="Tenant Id of the user in which they want to link the environment",
)
_args_schema.user_principal = AAZStrArg(
options=["--user-principal"],
arg_group="Request",
help="user principal id of the user",
)
return cls._args_schema
def _execute_operations(self):
self.pre_operations()
self.MonitorsListLinkableEnvironments(ctx=self.ctx)()
self.post_operations()
@register_callback
def pre_operations(self):
pass
@register_callback
def post_operations(self):
pass
def _output(self, *args, **kwargs):
result = self.deserialize_output(self.ctx.vars.instance.value, client_flatten=True)
next_link = self.deserialize_output(self.ctx.vars.instance.next_link)
return result, next_link
class MonitorsListLinkableEnvironments(AAZHttpOperation):
CLIENT_TYPE = "MgmtClient"
def __call__(self, *args, **kwargs):
request = self.make_request()
session = self.client.send_request(request=request, stream=False, **kwargs)
if session.http_response.status_code in [200]:
return self.on_200(session)
return self.on_error(session.http_response)
@property
def url(self):
return self.client.format_url(
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Dynatrace.Observability/monitors/{monitorName}/listLinkableEnvironments",
**self.METHOD_NAME
)
@property
def method(self):
return "POST"
@property
def error_format(self):
return "MgmtErrorFormat"
@property
def METHOD_NAME(self):
parameters = {
**self.serialize_url_param(
"monitorName", self.ctx.args.monitor_name,
required=True,
),
**self.serialize_url_param(
"resourceGroupName", self.ctx.args.resource_group,
required=True,
),
**self.serialize_url_param(
"subscriptionId", self.ctx.subscription_id,
required=True,
),
}
return parameters
@property
def query_parameters(self):
parameters = {
**self.serialize_query_param(
"api-version", "2021-09-01",
required=True,
),
}
return parameters
@property
def header_parameters(self):
parameters = {
**self.serialize_header_param(
"Content-Type", "application/json",
),
**self.serialize_header_param(
"Accept", "application/json",
),
}
return parameters
@property
def content(self):
_content_value, _builder = self.new_content_builder(
self.ctx.args,
typ=AAZObjectType,
typ_kwargs={"flags": {"required": True, "client_flatten": True}}
)
_builder.set_prop("region", AAZStrType, ".region")
_builder.set_prop("tenantId", AAZStrType, ".tenant_id")
_builder.set_prop("userPrincipal", AAZStrType, ".user_principal")
return self.serialize_content(_content_value)
def on_200(self, session):
data = self.deserialize_http_content(session)
self.ctx.set_var(
"instance",
data,
schema_builder=self._build_schema_on_200
)
_schema_on_200 = None
@classmethod
def _build_schema_on_200(cls):
if cls._schema_on_200 is not None:
return cls._schema_on_200
cls._schema_on_200 = AAZObjectType()
_schema_on_200 = cls._schema_on_200
_schema_on_200.next_link = AAZStrType(
serialized_name="nextLink",
)
_schema_on_200.value = AAZListType()
value = cls._schema_on_200.value
value.Element = AAZObjectType()
_element = cls._schema_on_200.value.Element
_element.environment_id = AAZStrType(
serialized_name="environmentId",
)
_element.environment_name = AAZStrType(
serialized_name="environmentName",
)
_element.plan_data = AAZObjectType(
serialized_name="planData",
)
plan_data = cls._schema_on_200.value.Element.plan_data
plan_data.billing_cycle = AAZStrType(
serialized_name="billingCycle",
)
plan_data.effective_date = AAZStrType(
serialized_name="effectiveDate",
)
plan_data.plan_details = AAZStrType(
serialized_name="planDetails",
)
plan_data.usage_type = AAZStrType(
serialized_name="usageType",
)
return cls._schema_on_200
__all__ = ["ListLinkableEnvironment"]
|
3,206 |
calculate eer
|
"""
Usage:
This scripts it to evaluate the classification accuracy/error rate from the embedding extracted
by gen_audio_embedding.py
Example (LID classification)
PYTHONPATH='.' python examples/wav2vec/eval_speaker_clf_task.py \
--data /fsx/androstj/exps/lid_voxlingua/infer/atj_xlsr2_100pct_300M_mean_fast_upd_100k_new.npz \
--task cls --merge mean_logit
"""
import numpy as np
import sklearn
from sklearn.metrics.pairwise import cosine_similarity
from sklearn.preprocessing import StandardScaler
from tqdm import tqdm
import ipdb
import logging
import argparse
from scipy.special import softmax
log=logging.getLogger(__name__)
log.setLevel(logging.INFO)
def METHOD_NAME(y_label, y_score):
# y denotes groundtruth scores,
# y_score denotes the prediction scores.
from scipy.optimize import brentq
from sklearn.metrics import roc_curve
from scipy.interpolate import interp1d
fpr, tpr, thresholds = roc_curve(y_label, y_score, pos_label=1)
eer = brentq(lambda x : 1. - x - interp1d(fpr, tpr)(x), 0., 1.)
optimal_threshold = interp1d(fpr, thresholds)(eer)
return eer, optimal_threshold
def calculate_minDCF(y_label, y_score, p_target=0.01, c_miss=1, c_fa=1):
# https://github.com/kaldi-asr/kaldi/blob/master/egs/sre08/v1/sid/compute_min_dcf.py
from sklearn.metrics import det_curve
fpr, fnr, thresholds = det_curve(y_label, y_score, pos_label=1)
min_c_det = float("inf")
min_c_det_threshold = thresholds[0]
for i in range(0, len(fpr)):
# See Equation (2). it is a weighted sum of false negative
# and false positive errors.
c_det = c_miss * fnr[i] * p_target + c_fa * fpr[i] * (1 - p_target)
if c_det < min_c_det:
min_c_det = c_det
min_c_det_threshold = thresholds[i]
# See Equations (3) and (4). Now we normalize the cost.
c_def = min(c_miss * p_target, c_fa * (1 - p_target))
min_dcf = min_c_det / c_def
return min_dcf, min_c_det_threshold
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--data', help='npz contains name & latent file')
parser.add_argument('--task', choices=['cls', 'veri', 'cls_voxlingua'])
parser.add_argument('--merge', choices=['mean_logit', 'first_logit', 'mean_latent_sim', 'first_latent_sim', 'mean_logit_sim', 'first_logit_sim'])
parser.add_argument('--veri-pair', help='verification file contains 1/0 utt_x utt_y')
parser.add_argument('--scaler', type=str, choices=['mean_var'])
parser.add_argument('--compress-method', choices=['pca'])
parser.add_argument('--compress-dim', type=int)
args = parser.parse_args()
if args.task in ['cls', 'cls_voxlingua']:
print('| run classification evaluation')
data = np.load(args.data)
data_logit = data['logit']
data_target = data['target']
data_src_len = data['src_len']
assert data_logit.shape[0] == data_target.shape[0]
B = data_logit.shape[0]
correct = 0
total = 0
data_prob = softmax(data_logit, axis=2)
correct_vs_len = np.empty((B, 2))
for ii in range(B):
_target = data_target[ii]
if args.merge == 'mean_logit':
_prob = np.mean(data_prob[ii], axis=0)
top_1 = np.argmax(_prob)
elif args.merge == 'first_logit':
_prob = data_prob[ii][0]
top_1 = np.argmax(_prob)
else :
raise ValueError()
is_top_1 = (1 if top_1 == _target else 0)
correct += is_top_1
total += 1
_src_len = data_src_len[ii] / 16000
correct_vs_len[ii] = [is_top_1, _src_len]
acc = correct / total * 100
t_5 = correct_vs_len[:, 1] <= 5
t_20 = correct_vs_len[:, 1] > 5
c_5 = correct_vs_len[t_5, 0].sum()
c_20 = correct_vs_len[t_20, 0].sum()
t_5 = t_5.sum()
t_20 = t_20.sum()
acc_5 = c_5 / t_5 * 100
acc_20 = c_20 / t_20 * 100
print(f'| acc = {acc:.2f}% -- err = {100-acc:.2f}% -- {correct=} {total=}')
print(f'| acc 0to5 = {acc_5:.2f}% -- err = {100-acc_5:.2f}% -- {c_5=} {t_5=}')
print(f'| acc 5to20 = {acc_20:.2f}% -- err = {100-acc_20:.2f}% -- {c_20=} {t_20=}')
if args.task == 'veri':
print('| run verification evaluation')
veri_pairs = []
with open(args.veri_pair) as ff:
for fi in ff:
a,b,c = fi.split()
a = int(a)
veri_pairs.append([a,b,c])
data = np.load(args.data)
if 'logit' in args.merge:
data_latent = data['logit']
elif 'latent' in args.merge:
data_latent = data['latent']
else :
raise ValueError()
data_name = data['name']
assert len(data_name) == len(data_latent)
map_name_latent = {}
from sklearn.pipeline import make_pipeline
pipe = []
if args.scaler == 'mean_var':
print(f'| apply StandardScaler')
pipe.append(StandardScaler())
if args.compress_method == 'pca':
n_comp = args.compress_dim
print(f'| apply PCA with {n_comp=}')
from sklearn.decomposition import PCA
pipe.append(PCA(n_components=n_comp))
if len(pipe) > 0 :
pipe = make_pipeline(*pipe)
data_latent_2d = data_latent.reshape(-1, data_latent.shape[-1])
pipe.fit(data_latent_2d)
data_latent_2d = pipe.transform(data_latent_2d)
data_latent = data_latent_2d.reshape(data_latent.shape[0], data_latent.shape[1], -1)
for ii in range(len(data_name)):
map_name_latent[data_name[ii]] = data_latent[ii]
labels = []
scores = []
for lbl, pair_a, pair_b in tqdm(veri_pairs):
labels.append(lbl)
pair_a = map_name_latent[pair_a]
pair_b = map_name_latent[pair_b]
assert pair_a.ndim == pair_b.ndim == 2
score = cosine_similarity(pair_a, pair_b)
if args.merge.startswith('mean'):
score = np.mean(score)
elif args.merge.startswith('first'):
score = score[0, 0]
else :
raise ValueError()
scores.append(score)
labels = np.array(labels)
scores = np.array(scores)
eer, eer_threshold = METHOD_NAME(labels, scores)
minDCF, minDCF_threshold = calculate_minDCF(labels, scores)
print('='*40)
print(f'| EER = {eer*100:.2f}%\tthreshold = {eer_threshold:.2f}')
print(f'| minDCF = {minDCF:.2f}\tthreshold = {minDCF_threshold:.2f}')
|
3,207 |
output
|
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
#
# Code generated by aaz-dev-tools
# --------------------------------------------------------------------------------------------
# pylint: skip-file
# flake8: noqa
from azure.cli.core.aaz import *
@register_command(
"network asg show",
)
class Show(AAZCommand):
"""Get details of an application security group.
:example: Get details of an application security group.
az network asg show -g MyResourceGroup -n MyAsg
"""
_aaz_info = {
"version": "2021-08-01",
"resources": [
["mgmt-plane", "/subscriptions/{}/resourcegroups/{}/providers/microsoft.network/applicationsecuritygroups/{}", "2021-08-01"],
]
}
def _handler(self, command_args):
super()._handler(command_args)
self._execute_operations()
return self.METHOD_NAME()
_args_schema = None
@classmethod
def _build_arguments_schema(cls, *args, **kwargs):
if cls._args_schema is not None:
return cls._args_schema
cls._args_schema = super()._build_arguments_schema(*args, **kwargs)
# define Arg Group ""
_args_schema = cls._args_schema
_args_schema.name = AAZStrArg(
options=["-n", "--name"],
help="Name of the new application security group resource.",
required=True,
id_part="name",
)
_args_schema.resource_group = AAZResourceGroupNameArg(
required=True,
)
return cls._args_schema
def _execute_operations(self):
self.pre_operations()
self.ApplicationSecurityGroupsGet(ctx=self.ctx)()
self.post_operations()
@register_callback
def pre_operations(self):
pass
@register_callback
def post_operations(self):
pass
def METHOD_NAME(self, *args, **kwargs):
result = self.deserialize_output(self.ctx.vars.instance, client_flatten=True)
return result
class ApplicationSecurityGroupsGet(AAZHttpOperation):
CLIENT_TYPE = "MgmtClient"
def __call__(self, *args, **kwargs):
request = self.make_request()
session = self.client.send_request(request=request, stream=False, **kwargs)
if session.http_response.status_code in [200]:
return self.on_200(session)
return self.on_error(session.http_response)
@property
def url(self):
return self.client.format_url(
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/applicationSecurityGroups/{applicationSecurityGroupName}",
**self.url_parameters
)
@property
def method(self):
return "GET"
@property
def error_format(self):
return "ODataV4Format"
@property
def url_parameters(self):
parameters = {
**self.serialize_url_param(
"applicationSecurityGroupName", self.ctx.args.name,
required=True,
),
**self.serialize_url_param(
"resourceGroupName", self.ctx.args.resource_group,
required=True,
),
**self.serialize_url_param(
"subscriptionId", self.ctx.subscription_id,
required=True,
),
}
return parameters
@property
def query_parameters(self):
parameters = {
**self.serialize_query_param(
"api-version", "2021-08-01",
required=True,
),
}
return parameters
@property
def header_parameters(self):
parameters = {
**self.serialize_header_param(
"Accept", "application/json",
),
}
return parameters
def on_200(self, session):
data = self.deserialize_http_content(session)
self.ctx.set_var(
"instance",
data,
schema_builder=self._build_schema_on_200
)
_schema_on_200 = None
@classmethod
def _build_schema_on_200(cls):
if cls._schema_on_200 is not None:
return cls._schema_on_200
cls._schema_on_200 = AAZObjectType()
_schema_on_200 = cls._schema_on_200
_schema_on_200.etag = AAZStrType(
flags={"read_only": True},
)
_schema_on_200.id = AAZStrType()
_schema_on_200.location = AAZStrType()
_schema_on_200.name = AAZStrType(
flags={"read_only": True},
)
_schema_on_200.properties = AAZObjectType(
flags={"client_flatten": True},
)
_schema_on_200.tags = AAZDictType()
_schema_on_200.type = AAZStrType(
flags={"read_only": True},
)
properties = cls._schema_on_200.properties
properties.provisioning_state = AAZStrType(
serialized_name="provisioningState",
flags={"read_only": True},
)
properties.resource_guid = AAZStrType(
serialized_name="resourceGuid",
flags={"read_only": True},
)
tags = cls._schema_on_200.tags
tags.Element = AAZStrType()
return cls._schema_on_200
class _ShowHelper:
"""Helper class for Show"""
__all__ = ["Show"]
|
3,208 |
list
|
# pylint: disable=too-many-lines
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, Callable, Dict, Iterable, Optional, TypeVar
import urllib.parse
from azure.core.exceptions import (
ClientAuthenticationError,
HttpResponseError,
ResourceExistsError,
ResourceNotFoundError,
ResourceNotModifiedError,
map_error,
)
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpResponse
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.core.utils import case_insensitive_dict
from azure.mgmt.core.exceptions import ARMErrorFormat
from .. import models as _models
from ..._serialization import Serializer
from .._vendor import _convert_request
T = TypeVar("T")
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
_SERIALIZER = Serializer()
_SERIALIZER.client_side_validation = False
def build_list_request(subscription_id: str, **kwargs: Any) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2018-11-01"))
accept = _headers.pop("Accept", "application/json")
# Construct URL
_url = kwargs.pop("template_url", "/subscriptions/{subscriptionId}/providers/Microsoft.Storage/skus")
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str", min_length=1),
}
_url: str = _url.format(**path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
# Construct headers
_headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs)
class SkusOperations:
"""
.. warning::
**DO NOT** instantiate this class directly.
Instead, you should access the following operations through
:class:`~azure.mgmt.storage.v2018_11_01.StorageManagementClient`'s
:attr:`skus` attribute.
"""
models = _models
def __init__(self, *args, **kwargs):
input_args = METHOD_NAME(args)
self._client = input_args.pop(0) if input_args else kwargs.pop("client")
self._config = input_args.pop(0) if input_args else kwargs.pop("config")
self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer")
self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer")
self._api_version = input_args.pop(0) if input_args else kwargs.pop("api_version")
@distributed_trace
def METHOD_NAME(self, **kwargs: Any) -> Iterable["_models.Sku"]:
"""Lists the available SKUs supported by Microsoft.Storage for given subscription.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either Sku or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.storage.v2018_11_01.models.Sku]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2018-11-01"))
cls: ClsType[_models.StorageSkuListResult] = kwargs.pop("cls", None)
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
def prepare_request(next_link=None):
if not next_link:
request = build_list_request(
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.METHOD_NAME.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
# make call to next link with the client's api-version
_parsed_next_link = urllib.parse.urlparse(next_link)
_next_request_params = case_insensitive_dict(
{
key: [urllib.parse.quote(v) for v in value]
for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items()
}
)
_next_request_params["api-version"] = self._config.api_version
request = HttpRequest(
"GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
def extract_data(pipeline_response):
deserialized = self._deserialize("StorageSkuListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem) # type: ignore
return None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
_stream = False
pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(get_next, extract_data)
METHOD_NAME.metadata = {"url": "/subscriptions/{subscriptionId}/providers/Microsoft.Storage/skus"}
|
3,209 |
close
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from copy import deepcopy
from typing import Any, Awaitable, TYPE_CHECKING
from azure.core.rest import AsyncHttpResponse, HttpRequest
from azure.mgmt.core import AsyncARMPipelineClient
from .. import models as _models
from ..._serialization import Deserializer, Serializer
from ._configuration import ResourceManagementClientConfiguration
from .operations import (
DeploymentOperationsOperations,
DeploymentsOperations,
Operations,
ProvidersOperations,
ResourceGroupsOperations,
ResourcesOperations,
TagsOperations,
)
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from azure.core.credentials_async import AsyncTokenCredential
class ResourceManagementClient: # pylint: disable=client-accepts-api-version-keyword,too-many-instance-attributes
"""Provides operations for working with resources and resource groups.
:ivar operations: Operations operations
:vartype operations: azure.mgmt.resource.resources.v2019_05_10.aio.operations.Operations
:ivar deployments: DeploymentsOperations operations
:vartype deployments:
azure.mgmt.resource.resources.v2019_05_10.aio.operations.DeploymentsOperations
:ivar providers: ProvidersOperations operations
:vartype providers:
azure.mgmt.resource.resources.v2019_05_10.aio.operations.ProvidersOperations
:ivar resources: ResourcesOperations operations
:vartype resources:
azure.mgmt.resource.resources.v2019_05_10.aio.operations.ResourcesOperations
:ivar resource_groups: ResourceGroupsOperations operations
:vartype resource_groups:
azure.mgmt.resource.resources.v2019_05_10.aio.operations.ResourceGroupsOperations
:ivar tags: TagsOperations operations
:vartype tags: azure.mgmt.resource.resources.v2019_05_10.aio.operations.TagsOperations
:ivar deployment_operations: DeploymentOperationsOperations operations
:vartype deployment_operations:
azure.mgmt.resource.resources.v2019_05_10.aio.operations.DeploymentOperationsOperations
:param credential: Credential needed for the client to connect to Azure. Required.
:type credential: ~azure.core.credentials_async.AsyncTokenCredential
:param subscription_id: The ID of the target subscription. Required.
:type subscription_id: str
:param base_url: Service URL. Default value is "https://management.azure.com".
:type base_url: str
:keyword api_version: Api Version. Default value is "2019-05-10". Note that overriding this
default value may result in unsupported behavior.
:paramtype api_version: str
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
"""
def __init__(
self,
credential: "AsyncTokenCredential",
subscription_id: str,
base_url: str = "https://management.azure.com",
**kwargs: Any
) -> None:
self._config = ResourceManagementClientConfiguration(
credential=credential, subscription_id=subscription_id, **kwargs
)
self._client: AsyncARMPipelineClient = AsyncARMPipelineClient(base_url=base_url, config=self._config, **kwargs)
client_models = {k: v for k, v in _models.__dict__.items() if isinstance(v, type)}
self._serialize = Serializer(client_models)
self._deserialize = Deserializer(client_models)
self._serialize.client_side_validation = False
self.operations = Operations(self._client, self._config, self._serialize, self._deserialize)
self.deployments = DeploymentsOperations(self._client, self._config, self._serialize, self._deserialize)
self.providers = ProvidersOperations(self._client, self._config, self._serialize, self._deserialize)
self.resources = ResourcesOperations(self._client, self._config, self._serialize, self._deserialize)
self.resource_groups = ResourceGroupsOperations(self._client, self._config, self._serialize, self._deserialize)
self.tags = TagsOperations(self._client, self._config, self._serialize, self._deserialize)
self.deployment_operations = DeploymentOperationsOperations(
self._client, self._config, self._serialize, self._deserialize
)
def _send_request(self, request: HttpRequest, **kwargs: Any) -> Awaitable[AsyncHttpResponse]:
"""Runs the network request through the client's chained policies.
>>> from azure.core.rest import HttpRequest
>>> request = HttpRequest("GET", "https://www.example.org/")
<HttpRequest [GET], url: 'https://www.example.org/'>
>>> response = await client._send_request(request)
<AsyncHttpResponse: 200 OK>
For more information on this code flow, see https://aka.ms/azsdk/dpcodegen/python/send_request
:param request: The network request you want to make. Required.
:type request: ~azure.core.rest.HttpRequest
:keyword bool stream: Whether the response payload will be streamed. Defaults to False.
:return: The response of your network call. Does not do error handling on your response.
:rtype: ~azure.core.rest.AsyncHttpResponse
"""
request_copy = deepcopy(request)
request_copy.url = self._client.format_url(request_copy.url)
return self._client.send_request(request_copy, **kwargs)
async def METHOD_NAME(self) -> None:
await self._client.METHOD_NAME()
async def __aenter__(self) -> "ResourceManagementClient":
await self._client.__aenter__()
return self
async def __aexit__(self, *exc_details: Any) -> None:
await self._client.__aexit__(*exc_details)
|
3,210 |
send crash report
|
import datetime
import plistlib
from pathlib import Path
import json
from resources import network_handler, constants, global_settings
DATE_FORMAT: str = "%Y-%m-%d %H-%M-%S"
ANALYTICS_SERVER: str = ""
SITE_KEY: str = ""
CRASH_URL: str = ANALYTICS_SERVER + "/crash"
VALID_ANALYTICS_ENTRIES: dict = {
'KEY': str, # Prevent abuse (embedded at compile time)
'UNIQUE_IDENTITY': str, # Host's UUID as SHA1 hash
'APPLICATION_NAME': str, # ex. OpenCore Legacy Patcher
'APPLICATION_VERSION': str, # ex. 0.2.0
'OS_VERSION': str, # ex. 10.15.7
'MODEL': str, # ex. MacBookPro11,5
'GPUS': list, # ex. ['Intel Iris Pro', 'AMD Radeon R9 M370X']
'FIRMWARE': str, # ex. APPLE
'LOCATION': str, # ex. 'US' (just broad region, don't need to be specific)
'TIMESTAMP': datetime.datetime, # ex. 2021-09-01-12-00-00
}
VALID_CRASH_ENTRIES: dict = {
'KEY': str, # Prevent abuse (embedded at compile time)
'APPLICATION_VERSION': str, # ex. 0.2.0
'APPLICATION_COMMIT': str, # ex. 0.2.0 or {commit hash if not a release}
'OS_VERSION': str, # ex. 10.15.7
'MODEL': str, # ex. MacBookPro11,5
'TIMESTAMP': datetime.datetime, # ex. 2021-09-01-12-00-00
'CRASH_LOG': str, # ex. "This is a crash log"
}
class Analytics:
def __init__(self, global_constants: constants.Constants) -> None:
self.constants: constants.Constants = global_constants
self.unique_identity = str(self.constants.computer.uuid_sha1)
self.application = str("OpenCore Legacy Patcher")
self.version = str(self.constants.patcher_version)
self.os = str(self.constants.detected_os_version)
self.model = str(self.constants.computer.real_model)
self.date = str(datetime.datetime.now().strftime(DATE_FORMAT))
def send_analytics(self) -> None:
if global_settings.GlobalEnviromentSettings().read_property("DisableCrashAndAnalyticsReporting") is True:
return
self._generate_base_data()
self._post_analytics_data()
def METHOD_NAME(self, log_file: Path) -> None:
if ANALYTICS_SERVER == "":
return
if SITE_KEY == "":
return
if global_settings.GlobalEnviromentSettings().read_property("DisableCrashAndAnalyticsReporting") is True:
return
if not log_file.exists():
return
if self.constants.commit_info[0].startswith("refs/tags"):
# Avoid being overloaded with crash reports
return
commit_info = self.constants.commit_info[0].split("/")[-1] + "_" + self.constants.commit_info[1].split("T")[0] + "_" + self.constants.commit_info[2].split("/")[-1]
crash_data= {
"KEY": SITE_KEY,
"APPLICATION_VERSION": self.version,
"APPLICATION_COMMIT": commit_info,
"OS_VERSION": self.os,
"MODEL": self.model,
"TIMESTAMP": self.date,
"CRASH_LOG": log_file.read_text()
}
network_handler.NetworkUtilities().post(CRASH_URL, json = crash_data)
def _get_country(self) -> str:
# Get approximate country from .GlobalPreferences.plist
path = "/Library/Preferences/.GlobalPreferences.plist"
if not Path(path).exists():
return "US"
try:
result = plistlib.load(Path(path).open("rb"))
except:
return "US"
if "Country" not in result:
return "US"
return result["Country"]
def _generate_base_data(self) -> None:
self.gpus = []
self.firmware = str(self.constants.computer.firmware_vendor)
self.location = str(self._get_country())
for gpu in self.constants.computer.gpus:
self.gpus.append(str(gpu.arch))
self.data = {
'KEY': SITE_KEY,
'UNIQUE_IDENTITY': self.unique_identity,
'APPLICATION_NAME': self.application,
'APPLICATION_VERSION': self.version,
'OS_VERSION': self.os,
'MODEL': self.model,
'GPUS': self.gpus,
'FIRMWARE': self.firmware,
'LOCATION': self.location,
'TIMESTAMP': self.date,
}
# convert to JSON:
self.data = json.dumps(self.data)
def _post_analytics_data(self) -> None:
# Post data to analytics server
if ANALYTICS_SERVER == "":
return
if SITE_KEY == "":
return
network_handler.NetworkUtilities().post(ANALYTICS_SERVER, json = self.data)
|
3,211 |
test param priority
|
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
import logging
import pytest
from flowmachine.core import connect
@pytest.fixture(autouse=True)
def reset_connect(monkeypatch):
logging.getLogger("flowmachine.debug").handlers = []
def test_double_connect_warning():
"""Test that a warning is raised when connecting twice."""
connect()
with pytest.warns(UserWarning):
connect()
# assert 1 == len(logging.getLogger("flowmachine.debug").handlers)
def test_bad_log_level_goes_to_error(monkeypatch):
"""Test that a bad log level is coerced to ERROR."""
monkeypatch.setenv("FLOWMACHINE_LOG_LEVEL", "BAD_LEVEL")
connect()
assert logging.ERROR == logging.getLogger("flowmachine.debug").level
def test_log_level_set_env(monkeypatch):
"""Test that a log level can be set via env."""
monkeypatch.setenv("FLOWMACHINE_LOG_LEVEL", "INFO")
connect()
assert logging.INFO == logging.getLogger("flowmachine.debug").level
def test_log_level_set(monkeypatch):
"""Test that a log level can be set via param."""
connect(log_level="critical")
assert logging.CRITICAL == logging.getLogger("flowmachine.debug").level
def METHOD_NAME(mocked_connections, monkeypatch):
"""Explicit parameters to connect should be respected"""
# Use monkeypatch to set environment variable only for this test
monkeypatch.setenv("FLOWMACHINE_LOG_LEVEL", "DUMMY_ENV_LOG_LEVEL")
monkeypatch.setenv("FLOWDB_PORT", "7777")
monkeypatch.setenv("FLOWMACHINE_FLOWDB_USER", "DUMMY_ENV_FLOWDB_USER")
monkeypatch.setenv("FLOWDB_PASSWORD", "DUMMY_ENV_FLOWDB_PASSWORD")
monkeypatch.setenv("FLOWDB_HOST", "DUMMY_ENV_FLOWDB_HOST")
monkeypatch.setenv("DB_CONNECTION_POOL_SIZE", "7777")
monkeypatch.setenv("DB_CONNECTION_POOL_OVERFLOW", "7777")
monkeypatch.setenv("REDIS_HOST", "DUMMY_ENV_REDIS_HOST")
monkeypatch.setenv("REDIS_PORT", "7777")
monkeypatch.setenv("REDIS_PASSWORD", "DUMMY_ENV_REDIS_PASSWORD")
(
core_set_log_level_mock,
core_init_Connection_mock,
core_init_StrictRedis_mock,
core_init_start_threadpool_mock,
) = mocked_connections
connect(
log_level="dummy_log_level",
flowdb_port=1234,
flowdb_user="dummy_db_user",
flowdb_password="dummy_db_pass",
flowdb_host="dummy_db_host",
flowdb_connection_pool_size=6789,
flowdb_connection_pool_overflow=1011,
redis_host="dummy_redis_host",
redis_port=1213,
redis_password="dummy_redis_password",
)
core_set_log_level_mock.assert_called_with("flowmachine.debug", "dummy_log_level")
core_init_Connection_mock.assert_called_with(
port=1234,
user="dummy_db_user",
password="dummy_db_pass",
host="dummy_db_host",
database="flowdb",
pool_size=6789,
overflow=1011,
)
core_init_StrictRedis_mock.assert_called_with(
host="dummy_redis_host", port=1213, password="dummy_redis_password"
)
core_init_start_threadpool_mock.assert_called_with(
6789
) # for the time being, we should have num_threads = num_db_connections
def test_env_priority(mocked_connections, monkeypatch):
"""Env vars should be used over defaults in connect"""
# Use monkeypatch to set environment variable only for this test
monkeypatch.setenv("FLOWMACHINE_LOG_LEVEL", "DUMMY_ENV_LOG_LEVEL")
monkeypatch.setenv("FLOWDB_PORT", "6969")
monkeypatch.setenv("FLOWMACHINE_FLOWDB_USER", "DUMMY_ENV_FLOWDB_USER")
monkeypatch.setenv("FLOWMACHINE_FLOWDB_PASSWORD", "DUMMY_ENV_FLOWDB_PASSWORD")
monkeypatch.setenv("FLOWDB_HOST", "DUMMY_ENV_FLOWDB_HOST")
monkeypatch.setenv("DB_CONNECTION_POOL_SIZE", "7777")
monkeypatch.setenv("DB_CONNECTION_POOL_OVERFLOW", "2020")
monkeypatch.setenv("REDIS_HOST", "DUMMY_ENV_REDIS_HOST")
monkeypatch.setenv("REDIS_PORT", "5050")
monkeypatch.setenv("REDIS_PASSWORD", "DUMMY_ENV_REDIS_PASSWORD")
(
core_set_log_level_mock,
core_init_Connection_mock,
core_init_StrictRedis_mock,
core_init_start_threadpool_mock,
) = mocked_connections
connect()
core_set_log_level_mock.assert_called_with(
"flowmachine.debug", "DUMMY_ENV_LOG_LEVEL"
)
core_init_Connection_mock.assert_called_with(
port=6969,
user="DUMMY_ENV_FLOWDB_USER",
password="DUMMY_ENV_FLOWDB_PASSWORD",
host="DUMMY_ENV_FLOWDB_HOST",
database="flowdb",
pool_size=7777,
overflow=2020,
)
core_init_StrictRedis_mock.assert_called_with(
host="DUMMY_ENV_REDIS_HOST", port=5050, password="DUMMY_ENV_REDIS_PASSWORD"
)
core_init_start_threadpool_mock.assert_called_with(
7777
) # for the time being, we should have num_threads = num_db_connections
@pytest.mark.usefixtures("clean_env")
def test_connect_defaults(mocked_connections, monkeypatch):
"""Test connect defaults are used with no params and no env vars"""
(
core_set_log_level_mock,
core_init_Connection_mock,
core_init_StrictRedis_mock,
core_init_start_threadpool_mock,
) = mocked_connections
connect(flowdb_password="foo", redis_password="fm_redis")
core_set_log_level_mock.assert_called_with("flowmachine.debug", "error")
core_init_Connection_mock.assert_called_with(
port=9000,
user="flowmachine",
password="foo",
host="localhost",
database="flowdb",
pool_size=5,
overflow=1,
)
core_init_StrictRedis_mock.assert_called_with(
host="localhost", port=6379, password="fm_redis"
)
core_init_start_threadpool_mock.assert_called_with(
5
) # for the time being, we should have num_threads = num_db_connections
@pytest.mark.usefixtures("clean_env")
@pytest.mark.parametrize(
"args", [{}, {"flowdb_password": "foo"}, {"redis_password": "fm_redis"}]
)
def test_connect_passwords_required(args):
"""Test connect raises a valueerror if no password is set for db or redis"""
with pytest.raises(ValueError):
connect(**args)
|
3,212 |
test gatk hc gvcf
|
import os
from pathlib import Path
from resolwe.flow.models import Data
from resolwe.test import tag_process
from resolwe_bio.utils.filter import filter_vcf_variable
from resolwe_bio.utils.test import BioProcessTestCase
class WgsWorkflowTestCase(BioProcessTestCase):
@tag_process("workflow-wgs-paired")
def test_wgs_workflow(self):
def filter_gatkcmd(line):
return line.startswith(b"##GATKCommandLine")
with self.preparation_stage():
ref_seq = self.run_process(
"upload-fasta-nucl",
{
"src": "./bqsr/input/hs_b37_chr17_upto_TP53.fasta.gz",
"species": "Homo sapiens",
"build": "custom_build",
},
)
bwa_index = self.run_process("bwa-index", {"ref_seq": ref_seq.id})
reads = self.prepare_paired_reads(
mate1=["./workflow_wes/input/TP53_1.fastq.gz"],
mate2=["./workflow_wes/input/TP53_2.fastq.gz"],
)
kbase = []
for i in ["./bqsr/input/dbsnp_TP53.vcf.gz"]:
kbase.append(
self.run_process(
"upload-variants-vcf",
{"src": i, "species": "Homo sapiens", "build": "custom_build"},
)
)
adapters = self.prepare_ref_seq()
self.run_process(
"workflow-wgs-paired",
{
"bwa_index": bwa_index.id,
"ref_seq": ref_seq.id,
"reads": reads.id,
"known_sites": [i.id for i in kbase],
"hc_dbsnp": kbase[0].id,
"advanced": {
"trimming": {
"adapters": adapters.id,
"seed_mismatches": 2,
"simple_clip_threshold": 10,
"min_adapter_length": 8,
"palindrome_clip_threshold": 30,
"leading": 20,
"trailing": 3,
"minlen": 40,
},
"align": {
"m": True,
"scoring": {"unpaired_p": 17},
},
"bqsr": {
"read_group": "-LB=DAB;-PL=Illumina;-PU=barcode;-SM=sample1"
},
"hc": {"stand_call_conf": 3, "mbq": 3},
},
},
)
for data in Data.objects.all():
self.assertStatus(data, Data.STATUS_DONE)
variants = Data.objects.filter(process__slug="vc-gatk4-hc").last()
self.assertFile(
variants,
"vcf",
os.path.join("wgs_workflow", "output", "tp53_1fastqgz.gatkHC.vcf.gz"),
compression="gzip",
file_filter=filter_gatkcmd,
)
@tag_process("workflow-wgs-gvcf")
def METHOD_NAME(self):
base = Path("wgs")
inputs = base / "input"
outputs = base / "output"
with self.preparation_stage():
ref_seq = self.run_process(
"upload-fasta-nucl",
{
"src": inputs / "hs_b37_chr17_upto_TP53.fasta.gz",
"species": "Homo sapiens",
"build": "custom_build",
},
)
bwa_index = self.run_process("bwamem2-index", {"ref_seq": ref_seq.id})
reads = self.prepare_paired_reads(
mate1=[inputs / "TP53_1.fastq.gz"],
mate2=[inputs / "TP53_2.fastq.gz"],
)
dbsnp = self.run_process(
"upload-variants-vcf",
{
"src": inputs / "dbsnp_TP53.vcf.gz",
"species": "Homo sapiens",
"build": "custom_build",
},
)
intervals = self.run_process(
"upload-bed",
{
"src": inputs / "hg38.intervals.bed",
"species": "Homo sapiens",
"build": "hg19",
},
)
adapters = self.prepare_ref_seq()
self.run_process(
"workflow-wgs-gvcf",
{
"reads": reads.id,
"ref_seq": ref_seq.id,
"bwa_index": bwa_index.id,
"known_sites": [dbsnp.id],
"gatk_options": {
"intervals": intervals.id,
},
"trimming_options": {
"enable_trimming": True,
"adapters": adapters.id,
"seed_mismatches": 2,
"simple_clip_threshold": 10,
"min_adapter_length": 8,
"palindrome_clip_threshold": 30,
"leading": 20,
"trailing": 3,
"minlen": 40,
},
},
)
for data in Data.objects.all():
self.assertStatus(data, Data.STATUS_DONE)
variants = Data.objects.filter(process__slug="gatk-haplotypecaller-gvcf").last()
self.assertFile(
variants,
"vcf",
outputs / "variants_from_workflow.g.vcf.gz",
file_filter=filter_vcf_variable,
compression="gzip",
)
|
3,213 |
handle switch case
|
# pylint:disable=unused-argument,useless-return
from collections import OrderedDict
import ailment
from ...errors import UnsupportedNodeTypeError
from .structuring.structurer_nodes import (
MultiNode,
CodeNode,
SequenceNode,
ConditionNode,
SwitchCaseNode,
LoopNode,
CascadingConditionNode,
ConditionalBreakNode,
IncompleteSwitchCaseNode,
)
class SequenceWalker:
"""
Walks a SequenceNode and all its nodes, recursively.
"""
def __init__(self, handlers=None, exception_on_unsupported=False, update_seqnode_in_place=True):
self._update_seqnode_in_place = update_seqnode_in_place
self._exception_on_unsupported = exception_on_unsupported
default_handlers = {
# Structurer nodes
CodeNode: self._handle_Code,
SequenceNode: self._handle_Sequence,
ConditionNode: self._handle_Condition,
CascadingConditionNode: self._handle_CascadingCondition,
SwitchCaseNode: self.METHOD_NAME,
IncompleteSwitchCaseNode: self._handle_IncompleteSwitchCase,
LoopNode: self._handle_Loop,
MultiNode: self._handle_MultiNode,
ConditionalBreakNode: self._handle_ConditionalBreak,
ailment.Block: self._handle_Noop,
}
self._handlers = default_handlers
if handlers:
self._handlers.update(handlers)
def walk(self, sequence):
return self._handle(sequence)
#
# Handlers
#
def _handle(self, node, **kwargs):
handler = self._handlers.get(node.__class__, None)
if handler is not None:
return handler(node, **kwargs)
if self._exception_on_unsupported:
raise UnsupportedNodeTypeError("Node type %s is not supported yet." % type(node))
return None
def _handle_Code(self, node: CodeNode, **kwargs):
new_inner_node = self._handle(node.node, parent=node, index=0)
if new_inner_node is None:
return None
return CodeNode(new_inner_node, node.reaching_condition)
def _handle_Sequence(self, node, **kwargs):
nodes_copy = list(node.nodes)
changed = False
# we iterate backwards because users of this function may invoke insert_node() directly to insert nodes to the
# parent node, either before the current node or after the current node. iterating backwards allows us to
# ensure `i` always points to the right index in node.nodes, even after custom insertions.
i = len(nodes_copy) - 1
while i > -1:
node_ = nodes_copy[i]
new_node = self._handle(node_, parent=node, index=i)
if new_node is not None:
changed = True
if self._update_seqnode_in_place:
node.nodes[i] = new_node
else:
nodes_copy[i] = new_node
i -= 1
if not changed:
return None
if self._update_seqnode_in_place:
return node
return SequenceNode(node.addr, nodes=nodes_copy)
def _handle_MultiNode(self, node, **kwargs):
changed = False
nodes_copy = list(node.nodes)
i = len(nodes_copy) - 1
while i > -1:
node_ = nodes_copy[i]
new_node = self._handle(node_, parent=node, index=i)
if new_node is not None:
changed = True
node.nodes[i] = new_node
i -= 1
return None if not changed else node
def METHOD_NAME(self, node, **kwargs):
self._handle(node.switch_expr, parent=node, label="switch_expr")
changed = False
new_cases = OrderedDict()
for idx in list(node.cases.keys()):
case = node.cases[idx]
new_case = self._handle(case, parent=node, index=idx, label="case")
if new_case is not None:
changed = True
new_cases[idx] = new_case
else:
new_cases[idx] = case
new_default_node = None
if node.default_node is not None:
new_default_node = self._handle(node.default_node, parent=node, index=0, label="default")
if new_default_node is not None:
changed = True
else:
new_default_node = node.default_node
if changed:
return SwitchCaseNode(node.switch_expr, new_cases, new_default_node, addr=node.addr)
return None
def _handle_IncompleteSwitchCase(self, node: IncompleteSwitchCaseNode, **kwargs):
changed = False
new_cases = []
for idx, case in enumerate(node.cases):
new_case = self._handle(case, parent=node, index=idx, label="case")
if new_case is not None:
changed = True
new_cases.append(new_case)
else:
new_cases.append(case)
new_head = None
if node.head is not None:
new_head = self._handle(node.head, parent=node, index=0, label="default")
if new_head is not None:
changed = True
else:
new_head = node.head
if changed:
return IncompleteSwitchCaseNode(node.addr, new_head, new_cases)
return None
def _handle_Loop(self, node: LoopNode, **kwargs):
if node.initializer is not None:
self._handle(node.initializer)
if node.iterator is not None:
self._handle(node.iterator)
if node.condition is not None:
self._handle(node.condition, parent=node, label="condition")
seq_node = self._handle(node.sequence_node, parent=node, label="body", index=0)
if seq_node is not None:
return LoopNode(
node.sort,
node.condition,
seq_node,
addr=node.addr,
continue_addr=node.continue_addr,
initializer=node.initializer,
iterator=node.iterator,
)
return None
def _handle_Condition(self, node, **kwargs):
if node.true_node is not None:
new_true_node = self._handle(node.true_node, parent=node, index=0)
else:
new_true_node = None
if node.false_node is not None:
new_false_node = self._handle(node.false_node, parent=node, index=1)
else:
new_false_node = None
if new_true_node is None and new_false_node is None:
return None
return ConditionNode(
node.addr,
node.reaching_condition,
node.condition,
node.true_node if new_true_node is None else new_true_node,
false_node=node.false_node if new_false_node is None else new_false_node,
)
def _handle_CascadingCondition(self, node: CascadingConditionNode, **kwargs):
for index, (_, child_node) in enumerate(node.condition_and_nodes):
self._handle(child_node, parent=node, index=index)
if node.else_node is not None:
self._handle(node.else_node, parent=node, index=-1)
return None
def _handle_ConditionalBreak(self, node: ConditionalBreakNode, **kwargs): # pylint:disable=no-self-use
return None
def _handle_Noop(self, *args, **kwargs): # pylint:disable=no-self-use
return None
|
3,214 |
create parser
|
# Copyright 2021 Hathor Labs
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import math
import random
import signal
import sys
import time
from argparse import ArgumentParser, Namespace
from json.decoder import JSONDecodeError
from typing import Any
import requests
_SLEEP_ON_ERROR_SECONDS = 5
_MAX_CONN_RETRIES = math.inf
def METHOD_NAME() -> ArgumentParser:
from hathor.cli.util import METHOD_NAME
parser = METHOD_NAME()
parser.add_argument('url', help='URL to get mining bytes')
parser.add_argument('--address', action='append')
parser.add_argument('--value', action='append')
parser.add_argument('--rate', type=float, help='tx/s')
parser.add_argument('--weight', type=float, help='Weight')
parser.add_argument('--count', type=int, help='Quantity of txs to be generated')
parser.add_argument('--timestamp', action='append', choices=['client', 'server'], help='If the tx timestamp '
'should be set on the client or server. If this parameter is not given, server will set '
'the timestamp as part of regular tx creation')
parser.add_argument('--profiler', action='store_true', default=False, help='Enable profiling')
return parser
def execute(args: Namespace) -> None:
import urllib.parse
from requests.exceptions import ConnectionError
send_tokens_url = urllib.parse.urljoin(args.url, 'wallet/send_tokens/')
print('Hathor TX Sender v1.0.0')
print('URL: {}'.format(args.url))
print('Send tokens URL: {}'.format(send_tokens_url))
print('Rate: {} tx/s'.format(args.rate))
latest_timestamp = 0
latest_weight = 0
conn_retries = 0
if args.rate:
interval = 1. / args.rate
else:
interval = None
if args.address:
addresses = args.address
else:
address_url = urllib.parse.urljoin(args.url, 'wallet/address') + '?new=false'
response = None
while True:
try:
response = requests.get(address_url)
break
except ConnectionError as e:
print('Error connecting to server: {}'.format(address_url))
print(e)
if conn_retries >= _MAX_CONN_RETRIES:
print('Too many connection failures, giving up.')
sys.exit(1)
else:
conn_retries += 1
print('Waiting {} seconds to try again ({} of {})...'.format(_SLEEP_ON_ERROR_SECONDS, conn_retries,
_MAX_CONN_RETRIES))
time.sleep(_SLEEP_ON_ERROR_SECONDS)
continue
else:
conn_retries = 0
assert response is not None
addresses = [response.json()['address']]
print('Addresses: {}'.format(addresses))
def signal_handler(sig, frame):
if args.profiler:
response = requests.post(profiler_url, json={'stop': True})
print(response.text)
sys.exit(0)
signal.signal(signal.SIGINT, signal_handler)
if args.profiler:
profiler_url = urllib.parse.urljoin(args.url, 'profiler/')
response = requests.post(profiler_url, json={'start': True})
print(response.text)
t0 = time.time()
total = 0
count = 0
while True:
address = random.choice(addresses)
if args.value:
value = random.choice(args.value)
else:
value = random.randint(10, 100)
# print('Sending {} tokens to {}...'.format(address, value))
data: dict[str, Any] = {'outputs': [{'address': address, 'value': value}], 'inputs': []}
if args.timestamp:
if args.timestamp == 'server':
data['timestamp'] = 0
elif args.timestamp == 'client':
data['timestamp'] = int(time.time())
if args.weight:
data['weight'] = args.weight
try:
response = requests.post(send_tokens_url, json={'data': data})
except ConnectionError as e:
print('Error connecting to server: {}'.format(send_tokens_url))
print(e)
if conn_retries >= _MAX_CONN_RETRIES:
print('Too many connection failures, giving up.')
sys.exit(1)
else:
conn_retries += 1
print('Waiting {} seconds to try again ({} of {})...'.format(_SLEEP_ON_ERROR_SECONDS, conn_retries,
_MAX_CONN_RETRIES))
time.sleep(_SLEEP_ON_ERROR_SECONDS)
continue
else:
conn_retries = 0
try:
data = response.json()
assert data['success']
total += 1
if args.count and total == args.count:
break
latest_timestamp = data['tx']['timestamp']
latest_weight = data['tx']['weight']
except (AssertionError, JSONDecodeError) as e:
print('Error reading response from server: {}'.format(response))
print(response.text)
print(e)
print('Waiting {} seconds to try again...'.format(_SLEEP_ON_ERROR_SECONDS))
time.sleep(_SLEEP_ON_ERROR_SECONDS)
else:
# print('Response:', data)
if interval:
time.sleep(interval)
count += 1
t1 = time.time()
if t1 - t0 > 5:
measure = count / (t1 - t0)
if interval is not None:
error = 1. / measure - 1. / args.rate
if interval > error:
interval -= error
else:
interval = 0
# print('')
print(' {} tx/s (latest timestamp={}, latest weight={}, sleep interval={})'.format(
measure, latest_timestamp, latest_weight, interval))
# print('')
count = 0
t0 = t1
def main():
parser = METHOD_NAME()
args = parser.parse_args()
execute(args)
|
3,215 |
main
|
# coding: utf-8
from __future__ import print_function, unicode_literals
import argparse
import sys
import pkg_resources
import logging
import os
from lxml import etree
import packtools
from packtools import catalogs
LOGGER = logging.getLogger(__name__)
class XMLError(Exception):
""" Represents errors that would block HTMLGenerator instance from
being created.
"""
def get_htmlgenerator(
xmlpath, no_network, no_checks, css, print_css, js,
math_elem_preference, math_js,
permlink,
url_article_page, url_download_ris,
gs_abstract,
output_style,
xslt,
bootstrap_css,
article_css,
design_system_static_img_path,
):
if xslt == "3.0":
if bootstrap_css and article_css and os.path.isfile(css):
css = os.path.dirname(css)
if not design_system_static_img_path:
design_system_static_img_path = (
os.path.join(
os.path.dirname(os.path.dirname(bootstrap_css)),
"img"
)
)
try:
parsed_xml = packtools.XML(xmlpath, no_network=no_network)
except IOError as e:
raise XMLError('Error reading %s. Make sure it is a valid file-path or URL.' % xmlpath)
except etree.XMLSyntaxError as e:
raise XMLError('Error reading %s. Syntax error: %s' % (xmlpath, e))
try:
valid_only = not no_checks
generator = packtools.HTMLGenerator.parse(
parsed_xml, valid_only=valid_only, css=css,
print_css=print_css, js=js,
math_elem_preference=math_elem_preference, math_js=math_js,
permlink=permlink,
url_article_page=url_article_page,
url_download_ris=url_download_ris,
gs_abstract=gs_abstract,
output_style=output_style,
xslt=xslt,
bootstrap_css=bootstrap_css,
article_css=article_css,
design_system_static_img_path=design_system_static_img_path,
)
except ValueError as e:
raise XMLError('Error reading %s. %s.' % (xmlpath, e))
return generator
@packtools.utils.config_xml_catalog
def METHOD_NAME():
packtools_version = pkg_resources.get_distribution('packtools').version
parser = argparse.ArgumentParser(description='HTML generator cli utility')
parser.add_argument('--nonetwork', action='store_true',
help='prevents the retrieval of the DTD through the network')
parser.add_argument('--nochecks', action='store_true',
help='prevents the validation against SciELO PS spec')
parser.add_argument('--gs_abstract', default=False,
action='store_true',
help='Abstract for Google Scholar')
parser.add_argument('--output_style', default='',
help='Output styles: website or html')
parser.add_argument('--xslt', default=None,
choices=['2.0', '3.0'],
help='XSLT Version',
)
parser.add_argument('--css', default=catalogs.HTML_GEN_DEFAULT_CSS_PATH,
help='URL or full path of the CSS file to use with generated htmls')
parser.add_argument('--print_css', default=catalogs.HTML_GEN_DEFAULT_PRINT_CSS_PATH,
help='URL or full path of the CSS (media: print) file to use with generated htmls')
parser.add_argument('--bootstrap_css', default=catalogs.HTML_GEN_BOOTSTRAP_CSS_PATH,
help='URL or full path of the CSS file to use with generated htmls')
parser.add_argument('--article_css', default=catalogs.HTML_GEN_ARTICLE_CSS_PATH,
help='URL or full path of the CSS file to use with generated htmls')
parser.add_argument('--design_system_static_img_path',
help='URL or full path of the Design System Images')
parser.add_argument('--math_js', default='https://cdn.jsdelivr.net/npm/[email protected]/es5/tex-mml-svg.js',
help='URL Math renderer')
parser.add_argument('--math_elem_preference', default='mml:math',
choices=['text-math', 'mml:math'],
help='Math element preference')
parser.add_argument('--js', default=catalogs.HTML_GEN_DEFAULT_JS_PATH,
help='URL or full path of the JS file to use with generated htmls')
parser.add_argument('--permlink', default='',
help='Permanente URL to access the article')
parser.add_argument('--url_article_page', default='',
help='OPAC URL to access the article')
parser.add_argument('--url_download_ris', default='',
help='URL to download RIS file (how to cite this article)')
parser.add_argument('XML', nargs='+',
help='filesystem path or URL to the XML')
parser.add_argument('--version', action='version', version=packtools_version)
parser.add_argument('--loglevel', default='WARNING')
args = parser.parse_args()
logging.basicConfig(level=getattr(logging, args.loglevel.upper()))
print('Please wait, this may take a while...', file=sys.stderr)
if args.xslt:
xslt_versions = [args.xslt]
else:
xslt_versions = ["2.0", "3.0"]
for xml in packtools.utils.flatten(args.XML):
LOGGER.info('starting generation of %s' % (xml,))
for xslt_version in xslt_versions:
generate_html_files(args, xslt_version, xml)
LOGGER.info('finished generating %s' % (xml,))
def generate_html_files(config, xslt_version, xml):
try:
html_generator = get_htmlgenerator(
xml, config.nonetwork, config.nochecks,
config.css, config.print_css, config.js,
config.math_elem_preference, config.math_js,
config.permlink, config.url_article_page, config.url_download_ris,
config.gs_abstract,
config.output_style,
xslt_version,
config.bootstrap_css,
config.article_css,
config.design_system_static_img_path,
)
LOGGER.debug('HTMLGenerator repr: %s' % repr(html_generator))
except XMLError as e:
LOGGER.debug(e)
LOGGER.warning('Error generating %s. Skipping. Run with DEBUG for more info.', xml)
return
try:
abstract_suffix = config.gs_abstract and '.abstract' or ''
version = xslt_version.replace(".", "_")
for lang, trans_result in html_generator:
# nome do arquivo a ser criado
fname, fext = xml.rsplit('.', 1)
if xslt_version == "2.0":
name_parts = [fname, lang + abstract_suffix, 'html']
else:
name_parts = [fname, lang + abstract_suffix, version, 'html']
out_fname = '.'.join(name_parts)
# criação do arquivo
with open(out_fname, 'wb') as fp:
fp.write(etree.tostring(trans_result, pretty_print=True,
encoding='utf-8', method='html',
doctype=u"<!DOCTYPE html>"))
print('Generated HTML file:', out_fname)
except TypeError as e:
LOGGER.debug(e)
LOGGER.warning('Error generating %s. Skipping. Run with DEBUG for more info.', xml)
return
if __name__ == '__main__':
METHOD_NAME()
|
3,216 |
run test
|
#!/usr/bin/env python3
# See LICENSE for licensing information.
#
# Copyright (c) 2016-2023 Regents of the University of California and The Board
# of Regents for the Oklahoma Agricultural and Mechanical College
# (acting for and on behalf of Oklahoma State University)
# All rights reserved.
#
import sys, os
import unittest
from testutils import *
import openram
from openram import debug
from openram.sram_factory import factory
from openram import OPTS
# @unittest.skip("SKIPPING 21_model_delay_test")
class model_delay_test(openram_test):
""" Compare the accuracy of the analytical model with a spice simulation. """
def METHOD_NAME(self):
config_file = "{}/tests/configs/config".format(os.getenv("OPENRAM_HOME"))
openram.init_openram(config_file, is_unit_test=True)
OPTS.analytical_delay = False
OPTS.netlist_only = True
OPTS.spice_name = "Xyce"
OPTS.num_sim_threads = 8
# This is a hack to reload the characterizer __init__ with the spice version
from importlib import reload
from openram import characterizer
reload(characterizer)
from openram.characterizer import delay
from openram.characterizer import elmore
from openram import sram
from openram import sram_config
if OPTS.tech_name == "sky130":
num_spare_rows = 1
num_spare_cols = 1
else:
num_spare_rows = 0
num_spare_cols = 0
c = sram_config(word_size=1,
num_words=16,
num_banks=1,
num_spare_cols=num_spare_cols,
num_spare_rows=num_spare_rows)
c.words_per_row=1
c.recompute_sizes()
debug.info(1, "Testing timing for sample 1bit, 16words SRAM with 1 bank")
s = factory.create(module_type="sram", sram_config=c)
tempspice = OPTS.openram_temp + "temp.sp"
s.sp_write(tempspice)
probe_address = "1" * s.s.addr_size
probe_data = s.s.word_size - 1
debug.info(1, "Probe address {0} probe data bit {1}".format(probe_address, probe_data))
corner = (OPTS.process_corners[0], OPTS.supply_voltages[0], OPTS.temperatures[0])
d = delay(s.s, tempspice, corner)
m = elmore(s.s, tempspice, corner)
from openram import tech
loads = [tech.spice["dff_in_cap"]*4]
slews = [tech.spice["rise_time"]*2]
load_slews = []
for slew in slews:
for load in loads:
load_slews.append((load, slew))
# Run a spice characterization
spice_data, port_data = d.analyze(probe_address, probe_data, load_slews)
spice_data.update(port_data[0])
# Run analytical characterization
model_data, port_data = m.get_lib_values(load_slews)
model_data.update(port_data[0])
# Only compare the delays
spice_delays = {key:value for key, value in spice_data.items() if 'delay' in key}
spice_delays['min_period'] = spice_data['min_period']
model_delays = {key:value for key, value in model_data.items() if 'delay' in key}
model_delays['min_period'] = model_data['min_period']
debug.info(1,"Spice Delays={}".format(spice_delays))
debug.info(1,"Model Delays={}".format(model_delays))
if OPTS.tech_name == "freepdk45":
error_tolerance = 0.30
elif OPTS.tech_name == "scn4m_subm":
error_tolerance = 0.30
else:
self.assertTrue(False) # other techs fail
debug.info(3, 'spice_delays {}'.format(spice_delays))
debug.info(3, 'model_delays {}'.format(model_delays))
# Check if no too many or too few results
self.assertTrue(len(spice_delays.keys())==len(model_delays.keys()))
self.assertTrue(self.check_golden_data(spice_delays,model_delays,error_tolerance))
openram.end_openram()
# run the test from the command line
if __name__ == "__main__":
(OPTS, args) = openram.parse_args()
del sys.argv[1:]
header(__file__, OPTS.tech_name)
unittest.main(testRunner=debugTestRunner())
|
3,217 |
forward
|
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.from typing import List, Union
from typing import List, Union
from ..utils import is_torch_available
from .base import Pipeline
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_TEXT_TO_SPECTROGRAM_MAPPING
from ..models.speecht5.modeling_speecht5 import SpeechT5HifiGan
DEFAULT_VOCODER_ID = "microsoft/speecht5_hifigan"
class TextToAudioPipeline(Pipeline):
"""
Text-to-audio generation pipeline using any `AutoModelForTextToWaveform` or `AutoModelForTextToSpectrogram`. This
pipeline generates an audio file from an input text and optional other conditional inputs.
Example:
```python
>>> from transformers import pipeline
>>> pipe = pipeline(model="suno/bark-small")
>>> output = pipe("Hey it's HuggingFace on the phone!")
>>> audio = output["audio"]
>>> sampling_rate = output["sampling_rate"]
```
Learn more about the basics of using a pipeline in the [pipeline tutorial](../pipeline_tutorial)
This pipeline can currently be loaded from [`pipeline`] using the following task identifiers: `"text-to-speech"` or
`"text-to-audio"`.
See the list of available models on [huggingface.co/models](https://huggingface.co/models?filter=text-to-speech).
"""
def __init__(self, *args, vocoder=None, sampling_rate=None, **kwargs):
super().__init__(*args, **kwargs)
if self.framework == "tf":
raise ValueError("The TextToAudioPipeline is only available in PyTorch.")
self.vocoder = None
if self.model.__class__ in MODEL_FOR_TEXT_TO_SPECTROGRAM_MAPPING.values():
self.vocoder = (
SpeechT5HifiGan.from_pretrained(DEFAULT_VOCODER_ID).to(self.model.device)
if vocoder is None
else vocoder
)
self.sampling_rate = sampling_rate
if self.vocoder is not None:
self.sampling_rate = self.vocoder.config.sampling_rate
if self.sampling_rate is None:
# get sampling_rate from config and generation config
config = self.model.config.to_dict()
gen_config = self.model.__dict__.get("generation_config", None)
if gen_config is not None:
config.update(gen_config.to_dict())
for sampling_rate_name in ["sample_rate", "sampling_rate"]:
sampling_rate = config.get(sampling_rate_name, None)
if sampling_rate is not None:
self.sampling_rate = sampling_rate
def preprocess(self, text, **kwargs):
if isinstance(text, str):
text = [text]
if self.model.config.model_type == "bark":
# bark Tokenizer is called with BarkProcessor which uses those kwargs
new_kwargs = {
"max_length": self.model.generation_config.semantic_config.get("max_input_semantic_length", 256),
"add_special_tokens": False,
"return_attention_mask": True,
"return_token_type_ids": False,
"padding": "max_length",
}
# priority is given to kwargs
new_kwargs.update(kwargs)
kwargs = new_kwargs
output = self.tokenizer(text, **kwargs, return_tensors="pt")
return output
def METHOD_NAME(self, model_inputs, **kwargs):
# we expect some kwargs to be additional tensors which need to be on the right device
kwargs = self._ensure_tensor_on_device(kwargs, device=self.device)
if self.model.can_generate():
output = self.model.generate(**model_inputs, **kwargs)
else:
output = self.model(**model_inputs, **kwargs)[0]
if self.vocoder is not None:
# in that case, the output is a spectrogram that needs to be converted into a waveform
output = self.vocoder(output)
return output
def __call__(self, text_inputs: Union[str, List[str]], **forward_params):
"""
Generates speech/audio from the inputs. See the [`TextToAudioPipeline`] documentation for more information.
Args:
text_inputs (`str` or `List[str]`):
The text(s) to generate.
forward_params (*optional*):
Parameters passed to the model generation/forward method.
Return:
A `dict` or a list of `dict`: The dictionaries have two keys:
- **audio** (`np.ndarray` of shape `(nb_channels, audio_length)`) -- The generated audio waveform.
- **sampling_rate** (`int`) -- The sampling rate of the generated audio waveform.
"""
return super().__call__(text_inputs, **forward_params)
def _sanitize_parameters(
self,
preprocess_params=None,
forward_params=None,
):
if preprocess_params is None:
preprocess_params = {}
if forward_params is None:
forward_params = {}
postprocess_params = {}
return preprocess_params, forward_params, postprocess_params
def postprocess(self, waveform):
output_dict = {}
output_dict["audio"] = waveform.cpu().float().numpy()
output_dict["sampling_rate"] = self.sampling_rate
return output_dict
|
3,218 |
put
|
# flake8: noqa
from django.core.exceptions import ImproperlyConfigured
from django.db import transaction
from django.forms import models as model_forms
from django.forms.formsets import all_valid
from django.http import HttpResponseRedirect
from django.utils.encoding import force_str
from django.views import generic
class MultiFormMixin(generic.base.ContextMixin):
forms = {}
success_url = None
prefix = None
def forms_invalid(self, forms):
"""
If any form is invalid, re-render the context data with the
data-filled forms and errors.
"""
return self.render_to_response(self.get_context_data(forms=forms))
def forms_valid(self, forms):
"""
If all forms are valid, redirect to the supplied URL.
"""
return HttpResponseRedirect(self.get_success_url())
def get_context_data(self, **kwargs):
"""
Insert the forms into the context dict.
"""
if "forms" not in kwargs:
kwargs["forms"] = self.get_forms()
return super().get_context_data(**kwargs)
def get_forms(self):
"""
Returns instances of the forms to be used in this view.
"""
forms = {}
for name in self.forms.keys():
form_class = self.get_form_class(name)
if form_class:
forms[name] = form_class(**self.get_form_kwargs(name))
return forms
def _get_from_name(self, name, key, default=None):
form = self.forms.get(name)
if form:
return form.get(key, default)
def get_form_class(self, name):
"""
Returns the form class to be used with the named form.
"""
return self._get_from_name(name, "form_class")
def get_initial(self, name):
"""
Returns the initial data to use for the named form.
"""
initial = self._get_from_name(name, "initial", {})
return initial.copy()
def get_prefix(self, name):
"""
Returns the prefix to use for the named form.
"""
if self.prefix:
return "{}_{}".format(self.prefix, name)
return name
def get_form_kwargs(self, name):
"""
Returns the keyword arguments for instantiating the named form.
"""
kwargs = {
"initial": self.get_initial(name),
"prefix": self.get_prefix(name),
}
if self.request.method in ("POST", "PUT"):
kwargs.update(
{
"data": self.request.POST,
"files": self.request.FILES,
}
)
return kwargs
def get_success_url(self):
"""
Returns the supplied success URL.
"""
if self.success_url:
# Forcing possible reverse_lazy evaluation
url = force_str(self.success_url)
else:
raise ImproperlyConfigured("No URL to redirect to. Provide a success_url.")
return url
class MultiModelFormMixin(MultiFormMixin):
objects = {}
def forms_valid(self, forms):
"""
If all forms are valid, save the associated models.
"""
self.objects = self.forms_save(forms)
self.forms_save_m2m(forms)
return super().forms_valid(forms)
def forms_save(self, forms, commit=True):
"""
Save all the forms in one transaction.
"""
objects = {}
with transaction.atomic():
for name in self.forms.keys():
if hasattr(forms[name], "save"):
objects[name] = forms[name].save(commit)
return objects
def forms_save_m2m(self, forms):
"""
Calls save_m2m on every form where it is available.
Has to be called after the forms have been saved.
"""
for form in forms.values():
if hasattr(form, "save_m2m"):
form.save_m2m()
def get_form_class(self, name):
"""
Returns the form class to be used with the named form.
"""
fields = self._get_from_name(name, "fields")
form_class = self._get_from_name(name, "form_class")
model = self._get_from_name(name, "model")
if fields is not None and form_class:
raise ImproperlyConfigured(
"Specifying both 'fields' and 'form_class' is not permitted."
)
if form_class:
return form_class
elif model is not None:
if fields is None:
raise ImproperlyConfigured(
"Using MultiModelFormMixin (base class of %s) without "
"the 'fields' attribute is prohibited." % self.__class__.__name__
)
return model_forms.modelform_factory(model, fields=fields)
def get_form_kwargs(self, name):
"""
Returns the keyword arguments for instantiating the named form.
"""
kwargs = super().get_form_kwargs(name)
instance = self.get_instance(name)
if instance:
kwargs.update({"instance": instance})
return kwargs
def get_instance(self, name):
"""
Returns the instance object used for instantiating the named form.
If no instance (None) is returned the django BaseModelForm
creates a default instance of the provided model.
"""
pass
class ProcessMultiFormView(generic.View):
def get(self, request, *args, **kwargs):
"""
Handles GET requests and instantiates a blank version of the form.
"""
return self.render_to_response(self.get_context_data())
def post(self, request, *args, **kwargs):
"""
Handles POST requests, instantiating a form instance with the passed
POST variables and then checked for validity.
"""
forms = self.get_forms()
if all_valid(forms.values()):
return self.forms_valid(forms)
else:
return self.forms_invalid(forms)
def METHOD_NAME(self, *args, **kwargs):
return self.post(*args, **kwargs)
class BaseMultiFormView(MultiFormMixin, ProcessMultiFormView):
"""
A base view for displaying multiple forms.
"""
class BaseMultiModelFormView(MultiModelFormMixin, ProcessMultiFormView):
"""
A base view for displaying multiple forms that may contain ModelForms.
"""
|
3,219 |
destroy all cowrie
|
# Copyright (c) 2019 Guilherme Borges <[email protected]>
# See the COPYRIGHT file for more information
from __future__ import annotations
import os
import random
import sys
import uuid
from twisted.python import log
import backend_pool.libvirt.guest_handler
import backend_pool.libvirt.network_handler
import backend_pool.util
from cowrie.core.config import CowrieConfig
LIBVIRT_URI = "qemu:///system"
class LibvirtError(Exception):
pass
class LibvirtBackendService:
def __init__(self):
# lazy import to avoid exception if not using the backend_pool and libvirt not installed (#1185)
import libvirt
# open connection to libvirt
self.conn = libvirt.open(LIBVIRT_URI)
if self.conn is None:
log.msg(
eventid="cowrie.backend_pool.qemu",
format="Failed to open connection to %(uri)s",
uri=LIBVIRT_URI,
)
raise LibvirtError()
self.filter = None
self.network = None
# signals backend is ready to be operated
self.ready: bool = False
# table to associate IPs and MACs
seed: int = random.randint(0, sys.maxsize)
self.network_table = backend_pool.util.generate_network_table(seed)
log.msg(
eventid="cowrie.backend_pool.qemu", format="Connection to QEMU established"
)
def start_backend(self):
"""
Initialises QEMU/libvirt environment needed to run guests. Namely starts networks and network filters.
"""
# create a network filter
self.filter = backend_pool.libvirt.network_handler.create_filter(self.conn)
# create a network for the guests (as a NAT)
self.network = backend_pool.libvirt.network_handler.create_network(
self.conn, self.network_table
)
# service is ready to be used (create guests and use them)
self.ready = True
def stop_backend(self):
log.msg(
eventid="cowrie.backend_pool.qemu", format="Doing QEMU clean shutdown..."
)
self.ready = False
self.METHOD_NAME()
def shutdown_backend(self):
self.conn.close() # close libvirt connection
log.msg(
eventid="cowrie.backend_pool.qemu",
format="Connection to QEMU closed successfully",
)
def get_mac_ip(self, ip_tester):
"""
Get a MAC and IP that are not being used by any guest.
"""
# Try to find a free pair 500 times.
retries = 0
while retries < 500:
mac = random.choice(list(self.network_table.keys()))
ip = self.network_table[mac]
if ip_tester(ip):
return mac, ip
retries += 1
raise LibvirtError()
def create_guest(self, ip_tester):
"""
Returns an unready domain and its snapshot information.
Guarantee that the IP is free with the ip_tester function.
"""
if not self.ready:
return
# create a single guest
guest_unique_id = uuid.uuid4().hex
guest_mac, guest_ip = self.get_mac_ip(ip_tester)
dom, snapshot = backend_pool.libvirt.guest_handler.create_guest(
self.conn, guest_mac, guest_unique_id
)
if dom is None:
log.msg(eventid="cowrie.backend_pool.qemu", format="Failed to create guest")
return None
return dom, snapshot, guest_ip
def destroy_guest(self, domain, snapshot):
if not self.ready:
return
try:
# destroy the domain in qemu
domain.destroy()
# we want to remove the snapshot if either:
# - explicitely set save_snapshots to False
# - no snapshot dir was defined (using cowrie's root dir) - should not happen but prevent it
if (
(
not CowrieConfig.getboolean(
"backend_pool", "save_snapshots", fallback=True
)
or CowrieConfig.get("backend_pool", "snapshot_path", fallback=None)
is None
)
and os.path.exists(snapshot)
and os.path.isfile(snapshot)
):
os.remove(snapshot) # destroy its disk snapshot
except Exception as error:
log.err(
eventid="cowrie.backend_pool.qemu",
format="Error destroying guest: %(error)s",
error=error,
)
def __destroy_all_guests(self):
domains = self.conn.listDomainsID()
if not domains:
log.msg(
eventid="cowrie.backend_pool.qemu", format="Could not get domain list"
)
for domain_id in domains:
d = self.conn.lookupByID(domain_id)
if d.name().startswith("cowrie"):
try:
d.destroy()
except KeyboardInterrupt:
pass
def __destroy_all_networks(self):
networks = self.conn.listNetworks()
if not networks:
log.msg(
eventid="cowrie.backend_pool.qemu", format="Could not get network list"
)
for network in networks:
if network.startswith("cowrie"):
n = self.conn.networkLookupByName(network)
n.destroy()
def __destroy_all_network_filters(self):
network_filters = self.conn.listNWFilters()
if not network_filters:
log.msg(
eventid="cowrie.backend_pool.qemu",
format="Could not get network filters list",
)
for nw_filter in network_filters:
if nw_filter.startswith("cowrie"):
n = self.conn.nwfilterLookupByName(nw_filter)
n.undefine()
def METHOD_NAME(self):
self.__destroy_all_guests()
self.__destroy_all_networks()
self.__destroy_all_network_filters()
|
3,220 |
clean
|
import json
import os
import multiprocessing
import itertools
import random
import re
from infinibatch import iterators
from functools import partial
from tasks.data.lm_loader import LMLoader
from tasks.data.utils import NativeCheckpointableIterator, WeightIterator, EOL_SYMBOL, BOI_SYMBOL, EOI_SYMBOL, image_code_to_token
from fairseq.data.encoders.gpt2_bpe import GPT2BPE
from spacy.lang.en import English
IMAGE_KEY="Images"
TEXT_KEY="Extracted"
class WildLoader(LMLoader):
def _setup(self):
self.nlp_sentencizer = English()
self.nlp_sentencizer.add_pipe("sentencizer")
self.max_image_num = 5
def _tokenize(self):
multilingual_iters = []
weights = []
for data in self.data:
multilingual_iters.append(
self._tokenize_foreach_lang(data)
)
if 'weight' in data:
weights.append(float(data['weight']))
else:
weights.append(int(data['count']))
if len(multilingual_iters) == 1:
return multilingual_iters[0]
sampling_iterator = WeightIterator(weights, self.seed)
control_iterator = NativeCheckpointableIterator(sampling_iterator)
tokenized_lines = iterators.MultiplexIterator(control_iterator, multilingual_iters)
return tokenized_lines
def _tokenize_foreach_lang(self, data):
dataset = list(zip(data['source']))
if self.shuffle:
chunk_files = iterators.InfinitePermutationSourceIterator(
dataset,
seed=self.seed,
shuffle=self.shuffle,
num_instances=self.num_shards,
instance_rank=self.shard_id,)
else:
chunk_files = iterators.ChunkedSourceIterator(
dataset,
num_instances=self.num_shards,
instance_rank=self.shard_id,)
tokenized_lines = iterators.SelectManyIterator(chunk_files, lambda files: self._read_from_files(*files))
tokenized_lines = iterators.SamplingRandomMapIterator(tokenized_lines, self._prepare, self.seed)
return tokenized_lines
@staticmethod
def fs_encode_line(fs_dict, words, append_eos=True):
ids = []
for i, word in enumerate(words):
idx = fs_dict.index(word)
ids.append(idx)
if append_eos:
ids.append(fs_dict.eos_index)
return ids
def text_transform(self, line):
spm_tokenizer=self.tokenizer
if isinstance(spm_tokenizer, GPT2BPE):
tokens = spm_tokenizer.encode(line).split(' ')
else:
tokens = spm_tokenizer.encode(line, out_type=str)
tokenized_tokens = WildLoader.fs_encode_line(self.dictionary, tokens, append_eos=False)
return tokenized_tokens
def METHOD_NAME(self, text):
# python re, remove html tags
METHOD_NAME = re.compile('<.*?>')
return re.sub(METHOD_NAME, '', text)
def _read_from_files(self, source_file):
"""
<s>  sentence  sentence </s>
1, sample a random subsequnece: 3 sentences + the first image ... take up to 5 images + 3 sentences
2, filter html tags <p>, <br>, <br/>
3, single image, random sample rate as 0.5
"""
file_path = os.path.join(self.data_dir, source_file)
if not os.path.exists(file_path):
print('| file {} not exists'.format(file_path), flush=True)
return iter([]) # skip bad file
try:
with open(file_path, 'r', encoding='utf8') as f:
lines = f.read().strip().split('\n')
except:
return iter([]) # skip bad file
for doc_jsonstr in lines:
try:
json_obj = json.loads(doc_jsonstr)
doc = [self.dictionary.bos()]
start_idx = 0
image_num = len(json_obj[IMAGE_KEY])
if image_num == 1:
r = random.random()
if r > 0.5:
continue
for image_idx, image_item in enumerate(json_obj[IMAGE_KEY]):
if image_idx >= self.max_image_num:
if len(doc) < self.tokens_per_sample:
yield doc
break
text_snippet = json_obj[TEXT_KEY][start_idx:image_item['Span'][0]-1]
text_snippet = self.METHOD_NAME(text_snippet)
if len(text_snippet) != 0:
if image_idx == 0:
# crop 3 sentences before the first image
sentences = list(self.nlp_sentencizer(text_snippet).sents)
text_snippet = ' '.join([str(sent) for sent in sentences[-3:]])
text_token = self.text_transform(text_snippet)
doc.extend(text_token)
if len(doc) >= self.tokens_per_sample: # drop too long sentence
# data.append(doc[:])
doc = doc[:self.tokens_per_sample - 2]
doc.append(self.dictionary.eos())
yield doc
break
image_tokens = [image_code_to_token(i) for i in image_item['input_ids']]
image_tokens = WildLoader.fs_encode_line(self.dictionary, image_tokens, append_eos=False)
doc.append(self.dictionary.index(BOI_SYMBOL))
doc.extend(image_tokens)
doc.append(self.dictionary.index(EOI_SYMBOL))
start_idx = image_item['Span'][1] + 1
if image_idx == image_num - 1:
# crop 3 sentences after the last image
text_snippet = json_obj[TEXT_KEY][start_idx:]
text_snippet = self.METHOD_NAME(text_snippet)
sentences = list(self.nlp_sentencizer(text_snippet).sents)
text_snippet = ' '.join([str(sent) for sent in sentences[:3]])
text_token = self.text_transform(text_snippet)
doc.extend(text_token)
doc.append(self.dictionary.eos())
if len(doc) < self.tokens_per_sample:
yield doc
break
except:
continu
|
3,221 |
stail q entry
|
# Generated by h2py from /usr/include/netinet/in.h
IPPROTO_IP = 0
IPPROTO_HOPOPTS = 0
IPPROTO_ICMP = 1
IPPROTO_IGMP = 2
IPPROTO_GGP = 3
IPPROTO_IPV4 = 4
IPPROTO_IPIP = IPPROTO_IPV4
IPPROTO_TCP = 6
IPPROTO_ST = 7
IPPROTO_EGP = 8
IPPROTO_PIGP = 9
IPPROTO_RCCMON = 10
IPPROTO_NVPII = 11
IPPROTO_PUP = 12
IPPROTO_ARGUS = 13
IPPROTO_EMCON = 14
IPPROTO_XNET = 15
IPPROTO_CHAOS = 16
IPPROTO_UDP = 17
IPPROTO_MUX = 18
IPPROTO_MEAS = 19
IPPROTO_HMP = 20
IPPROTO_PRM = 21
IPPROTO_IDP = 22
IPPROTO_TRUNK1 = 23
IPPROTO_TRUNK2 = 24
IPPROTO_LEAF1 = 25
IPPROTO_LEAF2 = 26
IPPROTO_RDP = 27
IPPROTO_IRTP = 28
IPPROTO_TP = 29
IPPROTO_BLT = 30
IPPROTO_NSP = 31
IPPROTO_INP = 32
IPPROTO_SEP = 33
IPPROTO_3PC = 34
IPPROTO_IDPR = 35
IPPROTO_XTP = 36
IPPROTO_DDP = 37
IPPROTO_CMTP = 38
IPPROTO_TPXX = 39
IPPROTO_IL = 40
IPPROTO_IPV6 = 41
IPPROTO_SDRP = 42
IPPROTO_ROUTING = 43
IPPROTO_FRAGMENT = 44
IPPROTO_IDRP = 45
IPPROTO_RSVP = 46
IPPROTO_GRE = 47
IPPROTO_MHRP = 48
IPPROTO_BHA = 49
IPPROTO_ESP = 50
IPPROTO_AH = 51
IPPROTO_INLSP = 52
IPPROTO_SWIPE = 53
IPPROTO_NHRP = 54
IPPROTO_ICMPV6 = 58
IPPROTO_NONE = 59
IPPROTO_DSTOPTS = 60
IPPROTO_AHIP = 61
IPPROTO_CFTP = 62
IPPROTO_HELLO = 63
IPPROTO_SATEXPAK = 64
IPPROTO_KRYPTOLAN = 65
IPPROTO_RVD = 66
IPPROTO_IPPC = 67
IPPROTO_ADFS = 68
IPPROTO_SATMON = 69
IPPROTO_VISA = 70
IPPROTO_IPCV = 71
IPPROTO_CPNX = 72
IPPROTO_CPHB = 73
IPPROTO_WSN = 74
IPPROTO_PVP = 75
IPPROTO_BRSATMON = 76
IPPROTO_ND = 77
IPPROTO_WBMON = 78
IPPROTO_WBEXPAK = 79
IPPROTO_EON = 80
IPPROTO_VMTP = 81
IPPROTO_SVMTP = 82
IPPROTO_VINES = 83
IPPROTO_TTP = 84
IPPROTO_IGP = 85
IPPROTO_DGP = 86
IPPROTO_TCF = 87
IPPROTO_IGRP = 88
IPPROTO_OSPFIGP = 89
IPPROTO_SRPC = 90
IPPROTO_LARP = 91
IPPROTO_MTP = 92
IPPROTO_AX25 = 93
IPPROTO_IPEIP = 94
IPPROTO_MICP = 95
IPPROTO_SCCSP = 96
IPPROTO_ETHERIP = 97
IPPROTO_ENCAP = 98
IPPROTO_APES = 99
IPPROTO_GMTP = 100
IPPROTO_IPCOMP = 108
IPPROTO_PIM = 103
IPPROTO_PGM = 113
IPPROTO_DIVERT = 254
IPPROTO_RAW = 255
IPPROTO_MAX = 256
IPPROTO_DONE = 257
IPPORT_RESERVED = 1024
IPPORT_USERRESERVED = 5000
IPPORT_HIFIRSTAUTO = 49152
IPPORT_HILASTAUTO = 65535
IPPORT_RESERVEDSTART = 600
def IN_CLASSA(i): return (((u_int32_t)(i) & 0x80000000) == 0)
IN_CLASSA_NET = 0xff000000
IN_CLASSA_NSHIFT = 24
IN_CLASSA_HOST = 0x00ffffff
IN_CLASSA_MAX = 128
def IN_CLASSB(i): return (((u_int32_t)(i) & 0xc0000000) == 0x80000000)
IN_CLASSB_NET = 0xffff0000
IN_CLASSB_NSHIFT = 16
IN_CLASSB_HOST = 0x0000ffff
IN_CLASSB_MAX = 65536
def IN_CLASSC(i): return (((u_int32_t)(i) & 0xe0000000) == 0xc0000000)
IN_CLASSC_NET = 0xffffff00
IN_CLASSC_NSHIFT = 8
IN_CLASSC_HOST = 0x000000ff
def IN_CLASSD(i): return (((u_int32_t)(i) & 0xf0000000) == 0xe0000000)
IN_CLASSD_NET = 0xf0000000
IN_CLASSD_NSHIFT = 28
IN_CLASSD_HOST = 0x0fffffff
def IN_MULTICAST(i): return IN_CLASSD(i)
def IN_EXPERIMENTAL(i): return (((u_int32_t)(i) & 0xf0000000) == 0xf0000000)
def IN_BADCLASS(i): return (((u_int32_t)(i) & 0xf0000000) == 0xf0000000)
INADDR_NONE = 0xffffffff
IN_LOOPBACKNET = 127
INET_ADDRSTRLEN = 16
IP_OPTIONS = 1
IP_HDRINCL = 2
IP_TOS = 3
IP_TTL = 4
IP_RECVOPTS = 5
IP_RECVRETOPTS = 6
IP_RECVDSTADDR = 7
IP_RETOPTS = 8
IP_MULTICAST_IF = 9
IP_MULTICAST_TTL = 10
IP_MULTICAST_LOOP = 11
IP_ADD_MEMBERSHIP = 12
IP_DROP_MEMBERSHIP = 13
IP_MULTICAST_VIF = 14
IP_RSVP_ON = 15
IP_RSVP_OFF = 16
IP_RSVP_VIF_ON = 17
IP_RSVP_VIF_OFF = 18
IP_PORTRANGE = 19
IP_RECVIF = 20
IP_IPSEC_POLICY = 21
IP_FAITH = 22
IP_FW_ADD = 50
IP_FW_DEL = 51
IP_FW_FLUSH = 52
IP_FW_ZERO = 53
IP_FW_GET = 54
IP_FW_RESETLOG = 55
IP_DUMMYNET_CONFIGURE = 60
IP_DUMMYNET_DEL = 61
IP_DUMMYNET_FLUSH = 62
IP_DUMMYNET_GET = 64
IP_DEFAULT_MULTICAST_TTL = 1
IP_DEFAULT_MULTICAST_LOOP = 1
IP_MAX_MEMBERSHIPS = 20
IP_PORTRANGE_DEFAULT = 0
IP_PORTRANGE_HIGH = 1
IP_PORTRANGE_LOW = 2
IPPROTO_MAXID = (IPPROTO_AH + 1)
IPCTL_FORWARDING = 1
IPCTL_SENDREDIRECTS = 2
IPCTL_DEFTTL = 3
IPCTL_DEFMTU = 4
IPCTL_RTEXPIRE = 5
IPCTL_RTMINEXPIRE = 6
IPCTL_RTMAXCACHE = 7
IPCTL_SOURCEROUTE = 8
IPCTL_DIRECTEDBROADCAST = 9
IPCTL_INTRQMAXLEN = 10
IPCTL_INTRQDROPS = 11
IPCTL_STATS = 12
IPCTL_ACCEPTSOURCEROUTE = 13
IPCTL_FASTFORWARDING = 14
IPCTL_KEEPFAITH = 15
IPCTL_GIF_TTL = 16
IPCTL_MAXID = 17
# Included from netinet6/in6.h
# Included from sys/queue.h
def SLIST_HEAD_INITIALIZER(head): return \
def SLIST_ENTRY(type): return \
def STAILQ_HEAD_INITIALIZER(head): return \
def METHOD_NAME(type): return \
def LIST_HEAD_INITIALIZER(head): return \
def LIST_ENTRY(type): return \
def TAILQ_HEAD_INITIALIZER(head): return \
def TAILQ_ENTRY(type): return \
def CIRCLEQ_ENTRY(type): return \
__KAME_VERSION = "20000701/FreeBSD-current"
IPV6PORT_RESERVED = 1024
IPV6PORT_ANONMIN = 49152
IPV6PORT_ANONMAX = 65535
IPV6PORT_RESERVEDMIN = 600
IPV6PORT_RESERVEDMAX = (IPV6PORT_RESERVED-1)
INET6_ADDRSTRLEN = 46
IPV6_ADDR_INT32_ONE = 1
IPV6_ADDR_INT32_TWO = 2
IPV6_ADDR_INT32_MNL = 0xff010000
IPV6_ADDR_INT32_MLL = 0xff020000
IPV6_ADDR_INT32_SMP = 0x0000ffff
IPV6_ADDR_INT16_ULL = 0xfe80
IPV6_ADDR_INT16_USL = 0xfec0
IPV6_ADDR_INT16_MLL = 0xff02
IPV6_ADDR_INT32_ONE = 0x01000000
IPV6_ADDR_INT32_TWO = 0x02000000
IPV6_ADDR_INT32_MNL = 0x000001ff
IPV6_ADDR_INT32_MLL = 0x000002ff
IPV6_ADDR_INT32_SMP = 0xffff0000
IPV6_ADDR_INT16_ULL = 0x80fe
IPV6_ADDR_INT16_USL = 0xc0fe
IPV6_ADDR_INT16_MLL = 0x02ff
def IN6_IS_ADDR_UNSPECIFIED(a): return \
def IN6_IS_ADDR_LOOPBACK(a): return \
def IN6_IS_ADDR_V4COMPAT(a): return \
def IN6_IS_ADDR_V4MAPPED(a): return \
IPV6_ADDR_SCOPE_NODELOCAL = 0x01
IPV6_ADDR_SCOPE_LINKLOCAL = 0x02
IPV6_ADDR_SCOPE_SITELOCAL = 0x05
IPV6_ADDR_SCOPE_ORGLOCAL = 0x08
IPV6_ADDR_SCOPE_GLOBAL = 0x0e
__IPV6_ADDR_SCOPE_NODELOCAL = 0x01
__IPV6_ADDR_SCOPE_LINKLOCAL = 0x02
__IPV6_ADDR_SCOPE_SITELOCAL = 0x05
__IPV6_ADDR_SCOPE_ORGLOCAL = 0x08
__IPV6_ADDR_SCOPE_GLOBAL = 0x0e
def IN6_IS_ADDR_LINKLOCAL(a): return \
def IN6_IS_ADDR_SITELOCAL(a): return \
def IN6_IS_ADDR_MC_NODELOCAL(a): return \
def IN6_IS_ADDR_MC_LINKLOCAL(a): return \
def IN6_IS_ADDR_MC_SITELOCAL(a): return \
def IN6_IS_ADDR_MC_ORGLOCAL(a): return \
def IN6_IS_ADDR_MC_GLOBAL(a): return \
def IN6_IS_ADDR_MC_NODELOCAL(a): return \
def IN6_IS_ADDR_MC_LINKLOCAL(a): return \
def IN6_IS_ADDR_MC_SITELOCAL(a): return \
def IN6_IS_ADDR_MC_ORGLOCAL(a): return \
def IN6_IS_ADDR_MC_GLOBAL(a): return \
def IN6_IS_SCOPE_LINKLOCAL(a): return \
IPV6_OPTIONS = 1
IPV6_RECVOPTS = 5
IPV6_RECVRETOPTS = 6
IPV6_RECVDSTADDR = 7
IPV6_RETOPTS = 8
IPV6_SOCKOPT_RESERVED1 = 3
IPV6_UNICAST_HOPS = 4
IPV6_MULTICAST_IF = 9
IPV6_MULTICAST_HOPS = 10
IPV6_MULTICAST_LOOP = 11
IPV6_JOIN_GROUP = 12
IPV6_LEAVE_GROUP = 13
IPV6_PORTRANGE = 14
ICMP6_FILTER = 18
IPV6_PKTINFO = 19
IPV6_HOPLIMIT = 20
IPV6_NEXTHOP = 21
IPV6_HOPOPTS = 22
IPV6_DSTOPTS = 23
IPV6_RTHDR = 24
IPV6_PKTOPTIONS = 25
IPV6_CHECKSUM = 26
IPV6_BINDV6ONLY = 27
IPV6_IPSEC_POLICY = 28
IPV6_FAITH = 29
IPV6_FW_ADD = 30
IPV6_FW_DEL = 31
IPV6_FW_FLUSH = 32
IPV6_FW_ZERO = 33
IPV6_FW_GET = 34
IPV6_RTHDR_LOOSE = 0
IPV6_RTHDR_STRICT = 1
IPV6_RTHDR_TYPE_0 = 0
IPV6_DEFAULT_MULTICAST_HOPS = 1
IPV6_DEFAULT_MULTICAST_LOOP = 1
IPV6_PORTRANGE_DEFAULT = 0
IPV6_PORTRANGE_HIGH = 1
IPV6_PORTRANGE_LOW = 2
IPV6PROTO_MAXID = (IPPROTO_PIM + 1)
IPV6CTL_FORWARDING = 1
IPV6CTL_SENDREDIRECTS = 2
IPV6CTL_DEFHLIM = 3
IPV6CTL_DEFMTU = 4
IPV6CTL_FORWSRCRT = 5
IPV6CTL_STATS = 6
IPV6CTL_MRTSTATS = 7
IPV6CTL_MRTPROTO = 8
IPV6CTL_MAXFRAGPACKETS = 9
IPV6CTL_SOURCECHECK = 10
IPV6CTL_SOURCECHECK_LOGINT = 11
IPV6CTL_ACCEPT_RTADV = 12
IPV6CTL_KEEPFAITH = 13
IPV6CTL_LOG_INTERVAL = 14
IPV6CTL_HDRNESTLIMIT = 15
IPV6CTL_DAD_COUNT = 16
IPV6CTL_AUTO_FLOWLABEL = 17
IPV6CTL_DEFMCASTHLIM = 18
IPV6CTL_GIF_HLIM = 19
IPV6CTL_KAME_VERSION = 20
IPV6CTL_USE_DEPRECATED = 21
IPV6CTL_RR_PRUNE = 22
IPV6CTL_MAPPED_ADDR = 23
IPV6CTL_BINDV6ONLY = 24
IPV6CTL_RTEXPIRE = 25
IPV6CTL_RTMINEXPIRE = 26
IPV6CTL_RTMAXCACHE = 27
IPV6CTL_MAXID = 28
|
3,222 |
get always on top
|
import asyncio
import inspect
import warnings
from dataclasses import dataclass, field
from functools import partial
from multiprocessing import Queue
from typing import Any, Callable, Dict, Optional, Tuple
from .dataclasses import KWONLY_SLOTS
from .globals import log
method_queue: Queue = Queue()
response_queue: Queue = Queue()
try:
with warnings.catch_warnings():
# webview depends on bottle which uses the deprecated CGI function (https://github.com/bottlepy/bottle/issues/1403)
warnings.filterwarnings('ignore', category=DeprecationWarning)
import webview
from webview.window import FixPoint
class WindowProxy(webview.Window):
def __init__(self) -> None: # pylint: disable=super-init-not-called
pass # NOTE we don't call super().__init__ here because this is just a proxy to the actual window
async def METHOD_NAME(self) -> bool:
"""Get whether the window is always on top."""
return await self._request()
def set_always_on_top(self, on_top: bool) -> None:
"""Set whether the window is always on top."""
self._send(on_top)
async def get_size(self) -> Tuple[int, int]:
"""Get the window size as tuple (width, height)."""
return await self._request()
async def get_position(self) -> Tuple[int, int]:
"""Get the window position as tuple (x, y)."""
return await self._request()
def load_url(self, url: str) -> None:
self._send(url)
def load_html(self, content: str, base_uri: str = ...) -> None: # type: ignore
self._send(content, base_uri)
def load_css(self, stylesheet: str) -> None:
self._send(stylesheet)
def set_title(self, title: str) -> None:
self._send(title)
async def get_cookies(self) -> Any: # pylint: disable=invalid-overridden-method
return await self._request()
async def get_current_url(self) -> str: # pylint: disable=invalid-overridden-method
return await self._request()
def destroy(self) -> None:
self._send()
def show(self) -> None:
self._send()
def hide(self) -> None:
self._send()
def set_window_size(self, width: int, height: int) -> None:
self._send(width, height)
def resize(self, width: int, height: int, fix_point: FixPoint = FixPoint.NORTH | FixPoint.WEST) -> None:
self._send(width, height, fix_point)
def minimize(self) -> None:
self._send()
def restore(self) -> None:
self._send()
def toggle_fullscreen(self) -> None:
self._send()
def move(self, x: int, y: int) -> None:
self._send(x, y)
async def evaluate_js(self, script: str) -> str: # pylint: disable=arguments-differ,invalid-overridden-method
return await self._request(script)
async def create_confirmation_dialog(self, title: str, message: str) -> bool: # pylint: disable=invalid-overridden-method
return await self._request(title, message)
async def create_file_dialog( # pylint: disable=invalid-overridden-method
self,
dialog_type: int = webview.OPEN_DIALOG,
directory: str = '',
allow_multiple: bool = False,
save_filename: str = '',
file_types: Tuple[str, ...] = (),
) -> Tuple[str, ...]:
return await self._request(
dialog_type=dialog_type,
directory=directory,
allow_multiple=allow_multiple,
save_filename=save_filename,
file_types=file_types,
)
def expose(self, function: Callable) -> None: # pylint: disable=arguments-differ
raise NotImplementedError(f'exposing "{function}" is not supported')
def _send(self, *args: Any, **kwargs: Any) -> None:
name = inspect.currentframe().f_back.f_code.co_name # type: ignore
method_queue.put((name, args, kwargs))
async def _request(self, *args: Any, **kwargs: Any) -> Any:
def wrapper(*args: Any, **kwargs: Any) -> Any:
try:
method_queue.put((name, args, kwargs))
return response_queue.get() # wait for the method to be called and writing its result to the queue
except Exception:
log.exception(f'error in {name}')
return None
name = inspect.currentframe().f_back.f_code.co_name # type: ignore
return await asyncio.get_event_loop().run_in_executor(None, partial(wrapper, *args, **kwargs))
def signal_server_shutdown(self) -> None:
self._send()
except ModuleNotFoundError:
class WindowProxy: # type: ignore
pass # just a dummy if webview is not installed
@dataclass(**KWONLY_SLOTS)
class Native:
start_args: Dict[str, Any] = field(default_factory=dict)
window_args: Dict[str, Any] = field(default_factory=dict)
main_window: Optional[WindowProxy] = None
|
3,223 |
call
|
# BSD 3-Clause License
#
# Copyright (c) 2019, Elasticsearch BV
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from elasticapm import async_capture_span
from elasticapm.conf import constants
from elasticapm.instrumentation.packages.asyncio.base import AsyncAbstractInstrumentedModule
from elasticapm.traces import DroppedSpan, execution_context
from elasticapm.utils import get_host_from_url, sanitize_url
from elasticapm.utils.disttracing import TracingOptions
class AioHttpClientInstrumentation(AsyncAbstractInstrumentedModule):
name = "aiohttp_client"
instrument_list = [("aiohttp.client", "ClientSession._request")]
async def METHOD_NAME(self, module, method, wrapped, instance, args, kwargs):
method = kwargs["method"] if "method" in kwargs else args[0]
url = kwargs["url"] if "url" in kwargs else args[1]
url = str(url)
signature = " ".join([method.upper(), get_host_from_url(url)])
url = sanitize_url(url)
transaction = execution_context.get_transaction()
async with async_capture_span(
signature,
span_type="external",
span_subtype="http",
extra={"http": {"url": url}},
leaf=True,
) as span:
leaf_span = span
while isinstance(leaf_span, DroppedSpan):
leaf_span = leaf_span.parent
parent_id = leaf_span.id if leaf_span else transaction.id
trace_parent = transaction.trace_parent.copy_from(
span_id=parent_id, trace_options=TracingOptions(recorded=True)
)
headers = kwargs.get("headers") or {}
self._set_disttracing_headers(headers, trace_parent, transaction)
kwargs["headers"] = headers
response = await wrapped(*args, **kwargs)
if response:
if span.context:
span.context["http"]["status_code"] = response.status
span.set_success() if response.status < 400 else span.set_failure()
return response
def mutate_unsampled_call_args(self, module, method, wrapped, instance, args, kwargs, transaction):
# since we don't have a span, we set the span id to the transaction id
trace_parent = transaction.trace_parent.copy_from(
span_id=transaction.id, trace_options=TracingOptions(recorded=False)
)
headers = kwargs.get("headers") or {}
self._set_disttracing_headers(headers, trace_parent, transaction)
kwargs["headers"] = headers
return args, kwargs
def _set_disttracing_headers(self, headers, trace_parent, transaction):
trace_parent_str = trace_parent.to_string()
headers[constants.TRACEPARENT_HEADER_NAME] = trace_parent_str
if transaction.tracer.config.use_elastic_traceparent_header:
headers[constants.TRACEPARENT_LEGACY_HEADER_NAME] = trace_parent_str
if trace_parent.tracestate:
headers[constants.TRACESTATE_HEADER_NAME] = trace_parent.tracestate
|
3,224 |
tuple demo
|
#!/usr/bin/env python2
"""
mycpp/examples/containers.py
"""
from __future__ import print_function
import os
from mycpp.mylib import log, NewDict, iteritems
from typing import List, Tuple, Dict, Optional
gstr = 'foo' # type: str
glist_int = [1, 2] # type: List[int]
glist_str = ['spam', 'eggs'] # type: List[str]
gEmptyDict = {} # type: Dict[str, str]
gdict = {'a': 42, 'b': 43} # type: Dict[str, int]
gdict_is = {5: 'foo', 6: 'bar', 7: 'spam'} # type: Dict[int, str]
gdict_ss = {'foo': 'foo'}
def ListDemo():
# type: () -> None
intlist = [] # type: List[int]
intlist.append(1)
intlist.append(2)
intlist.append(3)
local_list = [1, 2]
log("local_list = %d", len(local_list))
# turned into intlist->set(1, 42)
intlist[1] = 42
log("len(intlist) = %d", len(intlist))
for i in intlist:
log("i = %d", i)
for i in intlist[0:len(intlist):2]:
log("stride i = %d", i)
log('1? %d', 1 in intlist)
log('42? %d', 42 in intlist)
del intlist[:]
log("len() after del = %d", len(intlist))
strlist = [] # type: List[str]
strlist.append('a')
strlist.append('b')
log("len(strlist) = %d", len(strlist))
for s in strlist:
log("s = %s", s)
log('a? %d', 'a' in strlist)
log('foo? %d', 'foo' in strlist)
log("len(strlist) = %d", len(strlist))
x = strlist.pop()
log("x = %s", x)
# repeat string
no_str = None # type: Optional[str]
blank = [no_str] * 3
log("len(blank) = %d", len(blank))
class Point(object):
def __init__(self, x, y):
# type: (int, int) -> None
self.x = x
self.y = y
def METHOD_NAME():
# type: () -> None
t2 = (3, 'hello') # type: Tuple[int, str]
# Destructuring
myint, mystr = t2
log('myint = %d', myint)
log('mystr = %s', mystr)
# Does this ever happen? Or do we always use destructring?
#log('t2[0] = %d', t2[0])
#log('t2[1] = %s', t2[1])
x = 3
if x in (3, 4, 5):
print('yes')
else:
print('no')
p = Point(3, 4)
if p.x in (3, 4, 5):
print('yes')
else:
print('no')
s = 'foo'
if s in ('foo', 'bar'):
print('yes')
else:
print('no')
log("glist_int = %d", len(glist_int))
log("glist_str = %d", len(glist_str))
def DictDemo():
# type: () -> None
# regression
#nonempty = {'a': 'b'} # type: Dict[str, str]
d = {} # type: Dict[str, int]
d['foo'] = 42
# TODO: implement len(Dict) and Dict::remove() and enable this
if 0:
log('len(d) = %d', len(d))
del d['foo']
log('len(d) = %d', len(d))
# TODO: fix this
# log("gdict = %d", len(gdict))
ordered = NewDict() # type: Dict[str, int]
ordered['a'] = 10
ordered['b'] = 11
ordered['c'] = 12
ordered['a'] = 50
for k, v in iteritems(ordered):
log("%s %d", k, v)
# This is a proper type error
# withargs = NewDict({'s': 42}) # type: Dict[str, int]
log('len gEmptyDict = %d', len(gEmptyDict))
log('len gdict = %d', len(gdict))
log('len gdict_is = %d', len(gdict_is))
log('len gdict_ss = %d', len(gdict_ss))
log('gdict["a"] = %d', gdict['a'])
log('gdict_is[5] = %s', gdict_is[5])
log('gdict_ss["foo"] = %s', gdict_ss['foo'])
lit = {'foo': 42, 'bar': 43}
log('foo = %d', lit['foo'])
if 'bar' in lit:
log('bar is a member')
def ContainsDemo():
# type: () -> None
# List
x = 4
if x in [3,4,5]:
print('345 yes')
else:
print('345 no')
if x in [3,5,7]:
print('357 yes')
else:
print('357 no')
# Tuple is optimized
x = 4
if x in (3,4,5):
print('tu 345 yes')
else:
print('tu 345 no')
if x in (3,5,7):
print('tu 357 yes')
else:
print('tu 357 no')
s = "hi"
if s in ("hi", "bye"):
print('hi yes')
else:
print('hi no')
# BUG FIX: 'not in' had a bug
if s not in ("hi", "bye"):
print('hi yes')
else:
print('hi no')
def run_tests():
# type: () -> None
ListDemo()
log('')
METHOD_NAME()
log('')
DictDemo()
log('')
ContainsDemo()
log('')
def run_benchmarks():
# type: () -> None
n = 1000000
i = 0
intlist = [] # type: List[int]
strlist = [] # type: List[str]
while i < n:
intlist.append(i)
strlist.append("foo")
i += 1
log('Appended %d items to 2 lists', n)
if __name__ == '__main__':
if os.getenv('BENCHMARK'):
log('Benchmarking...')
run_benchmarks()
else:
run_tests()
# vim: sw=2
|
3,225 |
tagged words block reader
|
# Natural Language Toolkit: Switchboard Corpus Reader
#
# Copyright (C) 2001-2023 NLTK Project
# Author: Edward Loper <[email protected]>
# URL: <https://www.nltk.org/>
# For license information, see LICENSE.TXT
import re
from nltk.corpus.reader.api import *
from nltk.corpus.reader.util import *
from nltk.tag import map_tag, str2tuple
class SwitchboardTurn(list):
"""
A specialized list object used to encode switchboard utterances.
The elements of the list are the words in the utterance; and two
attributes, ``speaker`` and ``id``, are provided to retrieve the
spearker identifier and utterance id. Note that utterance ids
are only unique within a given discourse.
"""
def __init__(self, words, speaker, id):
list.__init__(self, words)
self.speaker = speaker
self.id = int(id)
def __repr__(self):
if len(self) == 0:
text = ""
elif isinstance(self[0], tuple):
text = " ".join("%s/%s" % w for w in self)
else:
text = " ".join(self)
return f"<{self.speaker}.{self.id}: {text!r}>"
class SwitchboardCorpusReader(CorpusReader):
_FILES = ["tagged"]
# Use the "tagged" file even for non-tagged data methods, since
# it's tokenized.
def __init__(self, root, tagset=None):
CorpusReader.__init__(self, root, self._FILES)
self._tagset = tagset
def words(self):
return StreamBackedCorpusView(self.abspath("tagged"), self._words_block_reader)
def tagged_words(self, tagset=None):
def METHOD_NAME(stream):
return self._tagged_words_block_reader(stream, tagset)
return StreamBackedCorpusView(self.abspath("tagged"), METHOD_NAME)
def turns(self):
return StreamBackedCorpusView(self.abspath("tagged"), self._turns_block_reader)
def tagged_turns(self, tagset=None):
def tagged_turns_block_reader(stream):
return self._tagged_turns_block_reader(stream, tagset)
return StreamBackedCorpusView(self.abspath("tagged"), tagged_turns_block_reader)
def discourses(self):
return StreamBackedCorpusView(
self.abspath("tagged"), self._discourses_block_reader
)
def tagged_discourses(self, tagset=False):
def tagged_discourses_block_reader(stream):
return self._tagged_discourses_block_reader(stream, tagset)
return StreamBackedCorpusView(
self.abspath("tagged"), tagged_discourses_block_reader
)
def _discourses_block_reader(self, stream):
# returns at most 1 discourse. (The other methods depend on this.)
return [
[
self._parse_utterance(u, include_tag=False)
for b in read_blankline_block(stream)
for u in b.split("\n")
if u.strip()
]
]
def _tagged_discourses_block_reader(self, stream, tagset=None):
# returns at most 1 discourse. (The other methods depend on this.)
return [
[
self._parse_utterance(u, include_tag=True, tagset=tagset)
for b in read_blankline_block(stream)
for u in b.split("\n")
if u.strip()
]
]
def _turns_block_reader(self, stream):
return self._discourses_block_reader(stream)[0]
def _tagged_turns_block_reader(self, stream, tagset=None):
return self._tagged_discourses_block_reader(stream, tagset)[0]
def _words_block_reader(self, stream):
return sum(self._discourses_block_reader(stream)[0], [])
def _tagged_words_block_reader(self, stream, tagset=None):
return sum(self._tagged_discourses_block_reader(stream, tagset)[0], [])
_UTTERANCE_RE = re.compile(r"(\w+)\.(\d+)\:\s*(.*)")
_SEP = "/"
def _parse_utterance(self, utterance, include_tag, tagset=None):
m = self._UTTERANCE_RE.match(utterance)
if m is None:
raise ValueError("Bad utterance %r" % utterance)
speaker, id, text = m.groups()
words = [str2tuple(s, self._SEP) for s in text.split()]
if not include_tag:
words = [w for (w, t) in words]
elif tagset and tagset != self._tagset:
words = [(w, map_tag(self._tagset, tagset, t)) for (w, t) in words]
return SwitchboardTurn(words, speaker, id)
|
3,226 |
test get status
|
"""
Unit tests for MicroService.py module
Author: Valentin Kuznetsov <vkuznet [AT] gmail [DOT] com>
"""
from __future__ import division, print_function
import unittest
import cherrypy
import gzip
import json
from WMCore_t.MicroService_t import TestConfig
from WMCore.MicroService.Service.RestApiHub import RestApiHub
from WMCore.MicroService.Tools.Common import cert, ckey
from WMCore.Services.pycurl_manager import RequestHandler
from Utils.Utilities import decodeBytesToUnicode
def gzipDecompress(payload):
"""Util to Gzip decompress a given data object"""
if isinstance(payload, bytes):
payload = gzip.decompress(payload)
payload = decodeBytesToUnicode(payload)
return json.loads(payload)
return payload
class ServiceManager(object):
"""
Initialize ServiceManager class
"""
def __init__(self, config=None):
self.config = config
self.appname = 'test' # keep it since it is used by XMLFormat(self.app.appname))
def status(self, serviceName=None, **kwargs):
"Return current status about our service"
print("### CALL status API with service name %s" % serviceName)
data = {'status': "OK", "api": "status"}
if kwargs:
data.update(kwargs)
return data
def info(self, reqName, **kwargs):
"Return current status about our service"
print("### CALL info API with request name %s" % reqName)
data = {'status': "OK", "api": "info"}
if kwargs:
data.update(kwargs)
return data
class MicroServiceTest(unittest.TestCase):
"Unit test for MicroService module"
def setUp(self):
"Setup MicroService for testing"
self.managerName = "ServiceManager"
config = TestConfig
manager = 'WMCore_t.MicroService_t.MicroService_t.%s' % self.managerName
config.views.data.manager = manager
config.manager = manager
mount = '/microservice/data'
self.mgr = RequestHandler()
self.port = config.main.port
self.url = 'http://localhost:%s%s' % (self.port, mount)
cherrypy.config["server.socket_port"] = self.port
self.app = ServiceManager(config)
self.server = RestApiHub(self.app, config, mount)
cherrypy.tree.mount(self.server, mount)
cherrypy.engine.start()
# implicitly request data compressed with gzip (default in RequestHandler class)
self.noEncHeader = {'Accept': 'application/json'}
# explicitly request data uncompressed
self.identityEncHeader = {'Accept': 'application/json', 'Accept-Encoding': 'identity'}
# explicitly request data compressed with gzip
self.gzipEncHeader = {'Accept': 'application/json', 'Accept-Encoding': 'gzip'}
def tearDown(self):
"Tear down MicroService"
cherrypy.engine.stop()
cherrypy.engine.exit()
def METHOD_NAME(self):
"Test function for getting state of the MicroService"
api = "status"
url = '%s/%s' % (self.url, api)
params = {}
data = self.mgr.getdata(url, params=params, headers=self.noEncHeader, encode=True, decode=True)
data = gzipDecompress(data)
self.assertEqual(data['result'][0]['microservice'], self.managerName)
self.assertEqual(data['result'][0]['api'], api)
params = {"service": "transferor"}
data = self.mgr.getdata(url, params=params, headers=self.noEncHeader, encode=True, decode=True)
data = gzipDecompress(data)
self.assertEqual(data['result'][0]['microservice'], self.managerName)
self.assertEqual(data['result'][0]['api'], api)
def testGetStatusIdentity(self):
"Test function for getting state of the MicroService"
api = "status"
url = '%s/%s' % (self.url, api)
params = {}
data = self.mgr.getdata(url, params=params, headers=self.identityEncHeader, encode=True, decode=True)
self.assertEqual(data['result'][0]['microservice'], self.managerName)
self.assertEqual(data['result'][0]['api'], api)
params = {"service": "transferor"}
data = self.mgr.getdata(url, params=params, headers=self.identityEncHeader, encode=True, decode=True)
self.assertEqual(data['result'][0]['microservice'], self.managerName)
self.assertEqual(data['result'][0]['api'], api)
def testGetInfo(self):
"Test function for getting state of the MicroService"
api = "info"
url = '%s/%s' % (self.url, api)
params = {}
data = self.mgr.getdata(url, params=params, encode=True, decode=True)
data = gzipDecompress(data)
self.assertEqual(data['result'][0]['microservice'], self.managerName)
self.assertEqual(data['result'][0]['api'], api)
params = {"request": "fake_request_name"}
data = self.mgr.getdata(url, params=params, encode=True, decode=True)
data = gzipDecompress(data)
self.assertEqual(data['result'][0]['microservice'], self.managerName)
self.assertEqual(data['result'][0]['api'], api)
def testGetInfoGZipped(self):
"Test function for getting state of the MicroService"
api = "status"
url = '%s/%s' % (self.url, api)
params = {}
# headers = {'Content-Type': 'application/json', 'Accept-Encoding': 'gzip'}
data = self.mgr.getdata(url, params=params, headers=self.gzipEncHeader, encode=True, decode=True)
# data = self.mgr.getdata(url, params=params, encode=True, decode=False)
# data = gzipDecompress(data)
self.assertEqual(data['result'][0]['microservice'], self.managerName)
self.assertEqual(data['result'][0]['api'], api)
params = {"request": "fake_request_name"}
data = self.mgr.getdata(url, params=params, headers=self.gzipEncHeader, encode=True, decode=True)
# data = gzipDecompress(data)
self.assertEqual(data['result'][0]['microservice'], self.managerName)
self.assertEqual(data['result'][0]['api'], api)
def testPostCall(self):
"Test function for getting state of the MicroService"
api = "status"
url = self.url + "/%s" % api
params = {"request": "fake_request_name"}
headers = {'Content-Type': 'application/json'}
data = self.mgr.getdata(url, params=params, headers=headers, verb='POST',
cert=cert(), ckey=ckey(), encode=True, decode=True)
self.assertDictEqual(data['result'][0], {'status': 'OK', 'api': 'info'})
def testPostCallGZipped(self):
"Test function for getting state of the MicroService"
api = "status"
url = self.url + "/%s" % api
params = {"request": "fake_request_name"}
headers = {'Content-Type': 'application/json', 'Accept-Encoding': 'gzip'}
data = self.mgr.getdata(url, params=params, headers=headers, verb='POST',
cert=cert(), ckey=ckey(), encode=True, decode=True)
self.assertDictEqual(data['result'][0], {'status': 'OK', 'api': 'info'})
if __name__ == '__main__':
unittest.main()
|
3,227 |
state vars
|
# Copyright The Lightning AI team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import typing as t
from lightning.app.utilities.app_helpers import _LightningAppRef, _set_child_name
T = t.TypeVar("T")
if t.TYPE_CHECKING:
from lightning.app.utilities.types import Component
def _prepare_name(component: "Component") -> str:
return str(component.name.split(".")[-1])
# TODO: add support and tests for list operations (concatenation, deletion, insertion, etc.)
class List(t.List[T]):
def __init__(self, *items: T):
"""The List Object is used to represents list collection of
:class:`~lightning.app.core.work.LightningWork`
or :class:`~lightning.app.core.flow.LightningFlow`.
Example:
>>> from lightning.app import LightningFlow, LightningWork
>>> from lightning.app.structures import List
>>> class CounterWork(LightningWork):
... def __init__(self):
... super().__init__()
... self.counter = 0
... def run(self):
... self.counter += 1
...
>>> class RootFlow(LightningFlow):
... def __init__(self):
... super().__init__()
... self.list = List(*[CounterWork(), CounterWork()])
... def run(self):
... for work in self.list:
... work.run()
...
>>> flow = RootFlow()
>>> flow.run()
>>> assert flow.list[0].counter == 1
Arguments:
items: A sequence of LightningWork or LightningFlow.
"""
super().__init__()
from lightning.app.runners.backends import Backend
self._name: t.Optional[str] = ""
self._last_index = 0
self._backend: t.Optional[Backend] = None
for item in items:
self.append(item)
def append(self, v):
from lightning.app.core import LightningFlow, LightningWork
_set_child_name(self, v, str(self._last_index))
if self._backend:
if isinstance(v, LightningFlow):
LightningFlow._attach_backend(v, self._backend)
elif isinstance(v, LightningWork):
self._backend._wrap_run_method(_LightningAppRef().get_current(), v)
v._name = f"{self.name}.{self._last_index}"
self._last_index += 1
super().append(v)
@property
def name(self):
"""Returns the name of this List object."""
return self._name or "root"
@property
def works(self):
from lightning.app.core import LightningFlow, LightningWork
works = [item for item in self if isinstance(item, LightningWork)]
for flow in [item for item in self if isinstance(item, LightningFlow)]:
for child_work in flow.works(recurse=False):
works.append(child_work)
return works
@property
def flows(self):
from lightning.app.core import LightningFlow
from lightning.app.structures import Dict as _Dict
from lightning.app.structures import List as _List
flows = {}
for item in self:
if isinstance(item, LightningFlow):
flows[item.name] = item
for child_flow in item.flows.values():
flows[child_flow.name] = child_flow
if isinstance(item, (_Dict, _List)):
for child_flow in item.flows.values():
flows[child_flow.name] = child_flow
return flows
@property
def state(self):
"""Returns the state of its flows and works."""
from lightning.app.core import LightningFlow, LightningWork
works = [item for item in self if isinstance(item, LightningWork)]
children = [item for item in self if isinstance(item, LightningFlow)]
return {
"works": {_prepare_name(w): w.state for w in works},
"flows": {_prepare_name(flow): flow.state for flow in children},
}
@property
def METHOD_NAME(self):
from lightning.app.core import LightningFlow, LightningWork
works = [item for item in self if isinstance(item, LightningWork)]
children = [item for item in self if isinstance(item, LightningFlow)]
return {
"works": {_prepare_name(w): w.METHOD_NAME for w in works},
"flows": {_prepare_name(flow): flow.METHOD_NAME for flow in children},
}
@property
def state_with_changes(self):
from lightning.app.core import LightningFlow, LightningWork
works = [item for item in self if isinstance(item, LightningWork)]
children = [item for item in self if isinstance(item, LightningFlow)]
return {
"works": {str(_prepare_name(w)): w.state_with_changes for w in works},
"flows": {_prepare_name(flow): flow.state_with_changes for flow in children},
}
def set_state(self, state):
"""Method to set the state of the list and its children."""
from lightning.app.core import LightningFlow, LightningWork
works = [item for item in self if isinstance(item, LightningWork)]
children = [item for item in self if isinstance(item, LightningFlow)]
current_state_keys = {_prepare_name(w) for w in self}
state_keys = set(list(state["works"].keys()) + list(state["flows"].keys()))
if current_state_keys != state_keys:
key_diff = (current_state_keys - state_keys) | (state_keys - current_state_keys)
raise Exception(
f"The provided state doesn't match the `List` {self.name}. Found `{key_diff}` un-matching keys"
)
for work_key, work_state in state["works"].items():
for work in works:
if _prepare_name(work) == work_key:
work.set_state(work_state)
for child_key, child_state in state["flows"].items():
for child in children:
if _prepare_name(child) == child_key:
child.set_state(child_state)
def __len__(self):
"""Returns the number of elements within this List."""
return sum(1 for _ in self)
|
3,228 |
setup
|
"""
find.py - Sopel Spelling Correction Plugin
This plugin will fix spelling errors if someone corrects them
using the sed notation (s///) commonly found in vi/vim.
Copyright 2011, Michael Yanovich, yanovich.net
Copyright 2013, Elsie Powell, embolalia.com
Copyright 2020, dgw, technobabbl.es
Includes contributions from: Matt Meinwald, and Morgan Goose
Licensed under the Eiffel Forum License 2.
https://sopel.chat
"""
from __future__ import annotations
from collections import deque
import re
from sopel import plugin
from sopel.formatting import bold
from sopel.tools import SopelIdentifierMemory
def METHOD_NAME(bot):
if 'find_lines' not in bot.memory:
bot.memory['find_lines'] = SopelIdentifierMemory(
identifier_factory=bot.make_identifier,
)
def shutdown(bot):
try:
del bot.memory['find_lines']
except KeyError:
pass
@plugin.echo
@plugin.rule('.*')
@plugin.priority('low')
@plugin.require_chanmsg
@plugin.unblockable
def collectlines(bot, trigger):
"""Create a temporary log of what people say"""
line = trigger.group()
if line.startswith('s/') or line.startswith('s|'):
# Don't remember substitutions
return
# Add a log for the channel and nick, if there isn't already one
if trigger.sender not in bot.memory['find_lines']:
bot.memory['find_lines'][trigger.sender] = SopelIdentifierMemory(
identifier_factory=bot.make_identifier,
)
if trigger.nick not in bot.memory['find_lines'][trigger.sender]:
bot.memory['find_lines'][trigger.sender][trigger.nick] = deque(maxlen=10)
# Update in-memory list of the user's lines in the channel
line_list = bot.memory['find_lines'][trigger.sender][trigger.nick]
# Messages are stored in reverse order (most recent first)
if line.startswith('\x01ACTION'):
line_list.appendleft(line[:-1])
else:
line_list.appendleft(line)
def _cleanup_channel(bot, channel):
bot.memory['find_lines'].pop(channel, None)
def _cleanup_nickname(bot, nick, channel=None):
if channel:
bot.memory['find_lines'].get(channel, {}).pop(nick, None)
else:
for channel in bot.memory['find_lines'].keys():
bot.memory['find_lines'][channel].pop(nick, None)
@plugin.echo
@plugin.event('PART')
@plugin.priority('low')
@plugin.unblockable
def part_cleanup(bot, trigger):
"""Clean up cached data when a user leaves a channel."""
if trigger.nick == bot.nick:
# Nuke the whole channel cache, boys, we're outta here!
_cleanup_channel(bot, trigger.sender)
else:
# Someone else left; clean up after them
_cleanup_nickname(bot, trigger.nick, trigger.sender)
@plugin.echo
@plugin.event('QUIT')
@plugin.priority('low')
@plugin.unblockable
def quit_cleanup(bot, trigger):
"""Clean up cached data after a user quits IRC."""
# If Sopel itself quits, shutdown() will handle the cleanup.
_cleanup_nickname(bot, trigger.nick)
@plugin.echo
@plugin.event('KICK')
@plugin.priority('low')
@plugin.unblockable
def kick_cleanup(bot, trigger):
"""Clean up cached data when a user is kicked from a channel."""
nick = bot.make_identifier(trigger.args[1])
if nick == bot.nick:
# We got kicked! Nuke the whole channel.
_cleanup_channel(bot, trigger.sender)
else:
# Clean up after the poor sod (or more likely, spammer) who got the boot
_cleanup_nickname(bot, nick, trigger.sender)
# Match nick, s/find/replace/flags. Flags and nick are optional, nick can be
# followed by comma or colon, anything after the first space after the third
# slash is ignored, and you can use either a slash or a pipe.
# If you want to search for an actual slash AND a pipe in the same message,
# you can escape your separator, in old and/or new.
@plugin.rule(r"""(?:
(?P<nick>\S+) # Catch a nick in group 1
[:,]\s+)? # Followed by optional colon/comma and whitespace
s(?P<sep>/) # The literal s and a separator / as group 2
(?P<old> # Group 3 is the thing to find
(?:\\/|[^/])+ # One or more non-slashes or escaped slashes
)
/ # The separator again
(?P<new> # Group 4 is what to replace with
(?:\\/|[^/])* # One or more non-slashes or escaped slashes
)
(?:/ # Optional separator followed by group 5 (flags)
(?P<flags>\S+)
)?
""")
@plugin.rule(r"""(?:
(?P<nick>\S+) # Catch a nick in group 1
[:,]\s+)? # Followed by optional colon/comma and whitespace
s(?P<sep>\|) # The literal s and a separator | as group 2
(?P<old> # Group 3 is the thing to find
(?:\\\||[^|])+ # One or more non-pipe or escaped pipe
)
\| # The separator again
(?P<new> # Group 4 is what to replace with
(?:\\\||[^|])* # One or more non-pipe or escaped pipe
)
(?:\| # Optional separator followed by group 5 (flags)
(?P<flags>\S+)
)?
""")
@plugin.priority('high')
def findandreplace(bot, trigger):
# Don't bother in PM
if trigger.is_privmsg:
return
# Correcting other person vs self.
rnick = bot.make_identifier(trigger.group('nick') or trigger.nick)
# only do something if there is conversation to work with
history = bot.memory['find_lines'].get(trigger.sender, {}).get(rnick, None)
if not history:
return
sep = trigger.group('sep')
old = trigger.group('old').replace('\\%s' % sep, sep)
new = trigger.group('new')
me = False # /me command
flags = trigger.group('flags') or ''
# only clean/format the new string if it's non-empty
if new:
new = bold(new.replace('\\%s' % sep, sep))
# If g flag is given, replace all. Otherwise, replace once.
if 'g' in flags:
count = -1
else:
count = 1
# repl is a dynamically defined function which performs the substitution.
# i flag turns off case sensitivity. re.U turns on unicode replacement.
if 'i' in flags:
regex = re.compile(re.escape(old), re.U | re.I)
def repl(s):
return re.sub(regex, new, s, count == 1)
else:
def repl(s):
return s.replace(old, new, count)
# Look back through the user's lines in the channel until you find a line
# where the replacement works
new_phrase = None
for line in history:
if line.startswith("\x01ACTION"):
me = True # /me command
line = line[8:]
else:
me = False
replaced = repl(line)
if replaced != line: # we are done
new_phrase = replaced
break
if not new_phrase:
return # Didn't find anything
# Save the new "edited" message.
action = (me and '\x01ACTION ') or '' # If /me message, prepend \x01ACTION
history.appendleft(action + new_phrase) # history is in most-recent-first order
# output
if not me:
new_phrase = 'meant to say: %s' % new_phrase
if trigger.group(1):
phrase = '%s thinks %s %s' % (trigger.nick, rnick, new_phrase)
else:
phrase = '%s %s' % (trigger.nick, new_phrase)
bot.say(phrase)
|
3,229 |
get sql resource sql role assignment
|
# coding=utf-8
# *** WARNING: this file was generated by pulumi. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import copy
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
__all__ = [
'GetSqlResourceSqlRoleAssignmentResult',
'AwaitableGetSqlResourceSqlRoleAssignmentResult',
'get_sql_resource_sql_role_assignment',
'get_sql_resource_sql_role_assignment_output',
]
@pulumi.output_type
class GetSqlResourceSqlRoleAssignmentResult:
"""
An Azure Cosmos DB Role Assignment
"""
def __init__(__self__, id=None, name=None, principal_id=None, role_definition_id=None, scope=None, type=None):
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if principal_id and not isinstance(principal_id, str):
raise TypeError("Expected argument 'principal_id' to be a str")
pulumi.set(__self__, "principal_id", principal_id)
if role_definition_id and not isinstance(role_definition_id, str):
raise TypeError("Expected argument 'role_definition_id' to be a str")
pulumi.set(__self__, "role_definition_id", role_definition_id)
if scope and not isinstance(scope, str):
raise TypeError("Expected argument 'scope' to be a str")
pulumi.set(__self__, "scope", scope)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
@property
@pulumi.getter
def id(self) -> str:
"""
The unique resource identifier of the database account.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def name(self) -> str:
"""
The name of the database account.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="principalId")
def principal_id(self) -> Optional[str]:
"""
The unique identifier for the associated AAD principal in the AAD graph to which access is being granted through this Role Assignment. Tenant ID for the principal is inferred using the tenant associated with the subscription.
"""
return pulumi.get(self, "principal_id")
@property
@pulumi.getter(name="roleDefinitionId")
def role_definition_id(self) -> Optional[str]:
"""
The unique identifier for the associated Role Definition.
"""
return pulumi.get(self, "role_definition_id")
@property
@pulumi.getter
def scope(self) -> Optional[str]:
"""
The data plane resource path for which access is being granted through this Role Assignment.
"""
return pulumi.get(self, "scope")
@property
@pulumi.getter
def type(self) -> str:
"""
The type of Azure resource.
"""
return pulumi.get(self, "type")
class AwaitableGetSqlResourceSqlRoleAssignmentResult(GetSqlResourceSqlRoleAssignmentResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetSqlResourceSqlRoleAssignmentResult(
id=self.id,
name=self.name,
principal_id=self.principal_id,
role_definition_id=self.role_definition_id,
scope=self.scope,
type=self.type)
def METHOD_NAME(account_name: Optional[str] = None,
resource_group_name: Optional[str] = None,
role_assignment_id: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetSqlResourceSqlRoleAssignmentResult:
"""
Retrieves the properties of an existing Azure Cosmos DB SQL Role Assignment with the given Id.
:param str account_name: Cosmos DB database account name.
:param str resource_group_name: The name of the resource group. The name is case insensitive.
:param str role_assignment_id: The GUID for the Role Assignment.
"""
__args__ = dict()
__args__['accountName'] = account_name
__args__['resourceGroupName'] = resource_group_name
__args__['roleAssignmentId'] = role_assignment_id
opts = pulumi.InvokeOptions.merge(_utilities.get_invoke_opts_defaults(), opts)
__ret__ = pulumi.runtime.invoke('azure-native:documentdb/v20230415:getSqlResourceSqlRoleAssignment', __args__, opts=opts, typ=GetSqlResourceSqlRoleAssignmentResult).value
return AwaitableGetSqlResourceSqlRoleAssignmentResult(
id=pulumi.get(__ret__, 'id'),
name=pulumi.get(__ret__, 'name'),
principal_id=pulumi.get(__ret__, 'principal_id'),
role_definition_id=pulumi.get(__ret__, 'role_definition_id'),
scope=pulumi.get(__ret__, 'scope'),
type=pulumi.get(__ret__, 'type'))
@_utilities.lift_output_func(METHOD_NAME)
def get_sql_resource_sql_role_assignment_output(account_name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
role_assignment_id: Optional[pulumi.Input[str]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetSqlResourceSqlRoleAssignmentResult]:
"""
Retrieves the properties of an existing Azure Cosmos DB SQL Role Assignment with the given Id.
:param str account_name: Cosmos DB database account name.
:param str resource_group_name: The name of the resource group. The name is case insensitive.
:param str role_assignment_id: The GUID for the Role Assignment.
"""
...
|
3,230 |
from connection string
|
# -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
import sys
if sys.version_info >= (3,):
from urllib.parse import urlparse
else:
from urlparse import urlparse
from ._constants import (
SERVICE_HOST_BASE,
DEFAULT_PROTOCOL,
DEV_ACCOUNT_NAME,
DEV_ACCOUNT_SECONDARY_NAME,
DEV_ACCOUNT_KEY,
DEV_BLOB_HOST,
DEV_QUEUE_HOST,
)
from ._error import (
_ERROR_STORAGE_MISSING_INFO,
)
_EMULATOR_ENDPOINTS = {
'blob': DEV_BLOB_HOST,
'queue': DEV_QUEUE_HOST,
'file': '',
}
_CONNECTION_ENDPOINTS = {
'blob': 'BlobEndpoint',
'queue': 'QueueEndpoint',
'file': 'FileEndpoint',
}
_CONNECTION_ENDPOINTS_SECONDARY = {
'blob': 'BlobSecondaryEndpoint',
'queue': 'QueueSecondaryEndpoint',
'file': 'FileSecondaryEndpoint',
}
class _ServiceParameters(object):
def __init__(self, service, account_name=None, account_key=None, sas_token=None, token_credential=None,
is_emulated=False, protocol=DEFAULT_PROTOCOL, endpoint_suffix=SERVICE_HOST_BASE,
custom_domain=None, custom_domain_secondary=None):
self.account_name = account_name
self.account_key = account_key
self.sas_token = sas_token
self.token_credential = token_credential
self.protocol = protocol or DEFAULT_PROTOCOL
self.is_emulated = is_emulated
if is_emulated:
self.account_name = DEV_ACCOUNT_NAME
self.protocol = 'http'
# Only set the account key if a sas_token is not present to allow sas to be used with the emulator
self.account_key = DEV_ACCOUNT_KEY if not self.sas_token else None
self.primary_endpoint = '{}/{}'.format(_EMULATOR_ENDPOINTS[service], DEV_ACCOUNT_NAME)
self.secondary_endpoint = '{}/{}'.format(_EMULATOR_ENDPOINTS[service], DEV_ACCOUNT_SECONDARY_NAME)
else:
# Strip whitespace from the key
if self.account_key:
self.account_key = self.account_key.strip()
endpoint_suffix = endpoint_suffix or SERVICE_HOST_BASE
# Setup the primary endpoint
if custom_domain:
parsed_url = urlparse(custom_domain)
# Trim any trailing slashes from the path
path = parsed_url.path.rstrip('/')
self.primary_endpoint = parsed_url.netloc + path
self.protocol = self.protocol if parsed_url.scheme is '' else parsed_url.scheme
else:
if not self.account_name:
raise ValueError(_ERROR_STORAGE_MISSING_INFO)
self.primary_endpoint = '{}.{}.{}'.format(self.account_name, service, endpoint_suffix)
# Setup the secondary endpoint
if custom_domain_secondary:
if not custom_domain:
raise ValueError(_ERROR_STORAGE_MISSING_INFO)
parsed_url = urlparse(custom_domain_secondary)
# Trim any trailing slashes from the path
path = parsed_url.path.rstrip('/')
self.secondary_endpoint = parsed_url.netloc + path
else:
if self.account_name:
self.secondary_endpoint = '{}-secondary.{}.{}'.format(self.account_name, service, endpoint_suffix)
else:
self.secondary_endpoint = None
@staticmethod
def get_service_parameters(service, account_name=None, account_key=None, sas_token=None, token_credential= None,
is_emulated=None, protocol=None, endpoint_suffix=None, custom_domain=None,
request_session=None, connection_string=None, socket_timeout=None):
if connection_string:
params = _ServiceParameters.METHOD_NAME(connection_string, service)
elif is_emulated:
params = _ServiceParameters(service, is_emulated=True)
elif account_name:
if protocol.lower() != 'https' and token_credential is not None:
raise ValueError("Token credential is only supported with HTTPS.")
params = _ServiceParameters(service,
account_name=account_name,
account_key=account_key,
sas_token=sas_token,
token_credential=token_credential,
is_emulated=is_emulated,
protocol=protocol,
endpoint_suffix=endpoint_suffix,
custom_domain=custom_domain)
else:
raise ValueError(_ERROR_STORAGE_MISSING_INFO)
params.request_session = request_session
params.socket_timeout = socket_timeout
return params
@staticmethod
def METHOD_NAME(connection_string, service):
# Split into key=value pairs removing empties, then split the pairs into a dict
config = dict(s.split('=', 1) for s in connection_string.split(';') if s)
# Authentication
account_name = config.get('AccountName')
account_key = config.get('AccountKey')
sas_token = config.get('SharedAccessSignature')
# Emulator
is_emulated = config.get('UseDevelopmentStorage')
# Basic URL Configuration
protocol = config.get('DefaultEndpointsProtocol')
endpoint_suffix = config.get('EndpointSuffix')
# Custom URLs
endpoint = config.get(_CONNECTION_ENDPOINTS[service])
endpoint_secondary = config.get(_CONNECTION_ENDPOINTS_SECONDARY[service])
return _ServiceParameters(service,
account_name=account_name,
account_key=account_key,
sas_token=sas_token,
is_emulated=is_emulated,
protocol=protocol,
endpoint_suffix=endpoint_suffix,
custom_domain=endpoint,
custom_domain_secondary=endpoint_secondary)
|
3,231 |
attach signal2
|
#!/usr/bin/env python
#
# Copyright 2012,2015 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# SPDX-License-Identifier: GPL-3.0-or-later
#
#
from gnuradio import gr, filter
from gnuradio import blocks
from gnuradio.fft import window
import sys
try:
from gnuradio import qtgui
from PyQt5 import QtWidgets, Qt
import sip
except ImportError:
sys.stderr.write("Error: Program requires PyQt5 and gr-qtgui.\n")
sys.exit(1)
try:
from gnuradio import analog
except ImportError:
sys.stderr.write("Error: Program requires gr-analog.\n")
sys.exit(1)
try:
from gnuradio import channels
except ImportError:
sys.stderr.write("Error: Program requires gr-channels.\n")
sys.exit(1)
class dialog_box(QtWidgets.QWidget):
def __init__(self, display, control):
QtWidgets.QWidget.__init__(self, None)
self.setWindowTitle('PyQt Test GUI')
self.boxlayout = QtWidgets.QBoxLayout(
QtWidgets.QBoxLayout.LeftToRight, self)
self.boxlayout.addWidget(display, 1)
self.boxlayout.addWidget(control)
self.resize(800, 500)
class control_box(QtWidgets.QWidget):
def __init__(self, parent=None):
QtWidgets.QWidget.__init__(self, parent)
self.setWindowTitle('Control Panel')
self.setToolTip('Control the signals')
QtWidgets.QToolTip.setFont(Qt.QFont('OldEnglish', 10))
self.layout = QtWidgets.QFormLayout(self)
# Control the first signal
self.freq1Edit = QtWidgets.QLineEdit(self)
self.freq1Edit.setMinimumWidth(100)
self.layout.addRow("Signal 1 Frequency:", self.freq1Edit)
self.freq1Edit.editingFinished.connect(self.freq1EditText)
self.amp1Edit = QtWidgets.QLineEdit(self)
self.amp1Edit.setMinimumWidth(100)
self.layout.addRow("Signal 1 Amplitude:", self.amp1Edit)
self.amp1Edit.editingFinished.connect(self.amp1EditText)
# Control the second signal
self.freq2Edit = QtWidgets.QLineEdit(self)
self.freq2Edit.setMinimumWidth(100)
self.layout.addRow("Signal 2 Frequency:", self.freq2Edit)
self.freq2Edit.editingFinished.connect(self.freq2EditText)
self.amp2Edit = QtWidgets.QLineEdit(self)
self.amp2Edit.setMinimumWidth(100)
self.layout.addRow("Signal 2 Amplitude:", self.amp2Edit)
self.amp2Edit.editingFinished.connect(self.amp2EditText)
self.quit = QtWidgets.QPushButton('Close', self)
self.quit.setMinimumWidth(100)
self.layout.addWidget(self.quit)
self.quit.clicked.connect(QtWidgets.qApp.quit)
def attach_signal1(self, signal):
self.signal1 = signal
self.freq1Edit.setText(("{0}").format(self.signal1.frequency()))
self.amp1Edit.setText(("{0}").format(self.signal1.amplitude()))
def METHOD_NAME(self, signal):
self.signal2 = signal
self.freq2Edit.setText(("{0}").format(self.signal2.frequency()))
self.amp2Edit.setText(("{0}").format(self.signal2.amplitude()))
def freq1EditText(self):
try:
newfreq = float(self.freq1Edit.text())
self.signal1.set_frequency(newfreq)
except ValueError:
print("Bad frequency value entered")
def amp1EditText(self):
try:
newamp = float(self.amp1Edit.text())
self.signal1.set_amplitude(newamp)
except ValueError:
print("Bad amplitude value entered")
def freq2EditText(self):
try:
newfreq = float(self.freq2Edit.text())
self.signal2.set_frequency(newfreq)
except ValueError:
print("Bad frequency value entered")
def amp2EditText(self):
try:
newamp = float(self.amp2Edit.text())
self.signal2.set_amplitude(newamp)
except ValueError:
print("Bad amplitude value entered")
class my_top_block(gr.top_block):
def __init__(self):
gr.top_block.__init__(self)
Rs = 8000
f1 = 100
f2 = 200
npts = 2048
self.qapp = QtWidgets.QApplication(sys.argv)
ss = open(gr.prefix() + '/share/gnuradio/themes/dark.qss')
sstext = ss.read()
ss.close()
self.qapp.setStyleSheet(sstext)
src1 = analog.sig_source_c(Rs, analog.GR_SIN_WAVE, f1, 0.1, 0)
src2 = analog.sig_source_c(Rs, analog.GR_SIN_WAVE, f2, 0.1, 0)
src = blocks.add_cc()
channel = channels.channel_model(0.01)
thr = blocks.throttle(gr.sizeof_gr_complex, 100 * npts)
self.snk1 = qtgui.freq_sink_c(npts, window.WIN_BLACKMAN_hARRIS,
0, Rs,
"Complex Freq Example", 3, None)
self.connect(src1, (src, 0))
self.connect(src2, (src, 1))
self.connect(src, channel, thr, (self.snk1, 0))
self.connect(src1, (self.snk1, 1))
self.connect(src2, (self.snk1, 2))
self.ctrl_win = control_box()
self.ctrl_win.attach_signal1(src1)
self.ctrl_win.METHOD_NAME(src2)
# Get the reference pointer to the SpectrumDisplayForm QWidget
pyQt = self.snk1.qwidget()
# Wrap the pointer as a PyQt SIP object
# This can now be manipulated as a PyQt5.QtWidgets.QWidget
pyWin = sip.wrapinstance(pyQt, QtWidgets.QWidget)
# pyWin.show()
self.main_box = dialog_box(pyWin, self.ctrl_win)
self.main_box.show()
if __name__ == "__main__":
tb = my_top_block()
tb.start()
tb.qapp.exec_()
tb.stop()
|
3,232 |
test nonce manual
|
#!/usr/bin/python3
import pytest
from brownie.exceptions import VirtualMachineError
from brownie.network.transaction import TransactionReceipt
abi = {
"constant": False,
"inputs": [{"name": "_to", "type": "address"}, {"name": "_value", "type": "uint256"}],
"name": "transfer",
"outputs": [{"name": "", "type": "bool"}],
"payable": False,
"stateMutability": "nonpayable",
"type": "function",
}
def test_attributes(tester, accounts):
assert tester.revertStrings._address == tester.address
assert tester.revertStrings._name == "BrownieTester.revertStrings"
assert tester.revertStrings._owner == accounts[0]
assert type(tester.revertStrings.abi) is dict
assert tester.revertStrings.signature == "0xd8046e7d"
def test_encode_input(tester):
inputs = ("hello", "0x66aB6D9362d4F35596279692F0251Db635165871", ("potato", "0x1234"))
calldata = tester.setTuple.encode_input(inputs)
assert calldata == (
"0xad31c804000000000000000000000000000000000000000000000000000000000000002"
"0000000000000000000000000000000000000000000000000000000000000006000000000"
"000000000000000066ab6d9362d4f35596279692f0251db63516587100000000000000000"
"000000000000000000000000000000000000000000000a000000000000000000000000000"
"0000000000000000000000000000000000000568656c6c6f0000000000000000000000000"
"0000000000000000000000000000000000000000000000000000000000000000000000000"
"0000000000000000004000000000000000000000000000000000000000000000000000000"
"0000000123400000000000000000000000000000000000000000000000000000000000000"
"06706f7461746f0000000000000000000000000000000000000000000000000000"
)
def test_call(tester, accounts):
nonce = accounts[0].nonce
result = tester.revertStrings.call(5, {"from": accounts[0]})
assert result is True
assert accounts[0].nonce == nonce
def test_call_revert(tester, accounts):
nonce = accounts[0].nonce
with pytest.raises(VirtualMachineError):
tester.revertStrings.call(31337, {"from": accounts[5]})
assert accounts[0].nonce == nonce
def test_returns_tx_on_success(tester, accounts):
"""returns a TransactionReceipt on success"""
tx = tester.revertStrings(5)
assert type(tx) == TransactionReceipt
def test_raises_on_revert(tester, accounts):
"""raises on revert"""
with pytest.raises(VirtualMachineError):
tester.revertStrings(0)
def test_returns_tx_on_revert_in_console(tester, accounts, console_mode):
"""returns a tx on revert in console"""
tx = tester.revertStrings(0)
assert type(tx) == TransactionReceipt
assert tx.status == 0
def test_nonce(tester, accounts):
"""nonces increment properly"""
nonce = accounts[0].nonce
tester.revertStrings(5, {"from": accounts[0]})
assert accounts[0].nonce == nonce + 1
def test_balance_int(tester, accounts, web3):
"""transfers use the correct balance"""
tester.receiveEth({"from": accounts[0], "amount": 1000000})
assert tester.balance() == 1000000
assert web3.eth.get_balance(tester.address) == 1000000
def test_balance_wei(tester, accounts):
"""transfer balances are converted using wei"""
tester.receiveEth({"from": accounts[0], "amount": "1 ether"})
assert tester.balance() == 1000000000000000000
def test_gas_price_manual(tester, accounts):
"""gas price is set correctly when specified in the call"""
balance = accounts[0].balance()
tx = tester.doNothing({"from": accounts[0], "gas_price": 100})
assert tx.gas_price == 100
assert accounts[0].balance() == balance - (100 * tx.gas_used)
@pytest.mark.parametrize("auto", (True, False, None, "auto"))
def test_gas_price_automatic(tester, accounts, config, web3, auto):
"""gas price is set correctly using web3.eth.gas_price"""
config.active_network["settings"]["gas_price"] = auto
balance = accounts[0].balance()
tx = tester.doNothing({"from": accounts[0]})
assert tx.gas_price == web3.eth.gas_price
assert accounts[0].balance() == balance - (tx.gas_price * tx.gas_used)
def test_gas_price_config(tester, accounts, config):
"""gas price is set correctly from the config"""
config.active_network["settings"]["gas_price"] = 50
balance = accounts[0].balance()
tx = tester.doNothing({"from": accounts[0]})
assert tx.gas_price == 50
assert accounts[0].balance() == balance - (50 * tx.gas_used)
def test_gas_limit_manual(tester, accounts):
"""gas limit is set correctly when specified in the call"""
tx = tester.doNothing({"from": accounts[0], "gas_limit": 100000})
assert tx.gas_limit == 100000
@pytest.mark.parametrize("gas_limit", (True, False, None, "auto"))
@pytest.mark.parametrize("gas_buffer", (1, 1.25))
def test_gas_limit_automatic(tester, accounts, config, gas_limit, gas_buffer):
"""gas limit is set correctly using web3.eth.estimate_gas"""
config.active_network["settings"]["gas_limit"] = gas_limit
config.active_network["settings"]["gas_buffer"] = gas_buffer
tx = tester.doNothing({"from": accounts[0]})
assert int(tx.gas_used * gas_buffer) == tx.gas_limit
def test_gas_limit_config(tester, accounts, config):
"""gas limit is set correctly from the config"""
config.active_network["settings"]["gas_limit"] = 50000
tx = tester.doNothing({"from": accounts[0]})
assert tx.gas_limit == 50000
config.active_network["settings"]["gas_limit"] = False
def METHOD_NAME(tester, accounts):
"""call is successful when correct nonce is specified"""
nonce = accounts[0].nonce
tx = tester.doNothing({"from": accounts[0], "nonce": nonce})
assert tx.nonce == nonce
# @pytest.mark.parametrize("nonce", (-1, 1, 15))
# def test_raises_on_invalid_nonce_manual(tester, accounts, nonce):
# """raises if invalid nonce is specified"""
# nonce += accounts[0].nonce
# with pytest.raises(ValueError):
# tester.doNothing({"from": accounts[0], "nonce": nonce})
def test_repr(tester):
repr(tester.revertStrings)
def test_repr_fixedtype(vypertester):
r = repr(vypertester.fixedType)
assert "decimal" in r
assert "fixed168x10" not in r
def test_tuples(tester, accounts):
value = ["blahblah", accounts[1], ["yesyesyes", "0x1234"]]
tx = tester.setTuple(value)
assert tx.status == 1
tx = tester.getTuple.transact(accounts[1], {"from": accounts[0]})
assert tx.status == 1
assert tx.return_value == value
assert tx.return_value["nested"]["a"] == "yesyesyes"
def test_gas_limit_and_buffer(tester, accounts):
with pytest.raises(ValueError):
tester.doNothing({"from": accounts[0], "gas_limit": 100000, "gas_buffer": 1.2})
|
3,233 |
load data
|
"""Shared definitions in FAOSTAT meadow steps.
Some basic processing is required to create tables from the raw data.
For example, column "Note" (present in some datasets) is skipped to avoid parsing errors.
Other minor changes can be found in the code.
"""
import os
import tempfile
import zipfile
from pathlib import Path
import pandas as pd
import structlog
from owid.catalog import Table
from etl.helpers import PathFinder, create_dataset
# Initialise log.
log = structlog.get_logger()
# Define path to current folder, namespace and version of all datasets in this folder.
CURRENT_DIR = Path(__file__).parent
NAMESPACE = CURRENT_DIR.parent.name
VERSION = CURRENT_DIR.name
def METHOD_NAME(local_path: Path) -> pd.DataFrame:
"""Load snapshot data (as a dataframe) for current dataset.
Parameters
----------
local_path : Path or str
Path to local snapshot file.
Returns
-------
data : pd.DataFrame
Snapshot data.
"""
# Unzip data into a temporary folder.
with tempfile.TemporaryDirectory() as temp_dir:
z = zipfile.ZipFile(local_path)
z.extractall(temp_dir)
(filename,) = list(filter(lambda x: "(Normalized)" in x, os.listdir(temp_dir)))
# Load data from main file.
try:
data = pd.read_csv(os.path.join(temp_dir, filename), encoding="latin-1")
except pd.errors.ParserError:
# Some files are impossible to parse (e.g. faostat_wcad) because column "Note" is poorly formatted.
# Instead of skipping problematic rows, load the file skipping that problematic column.
columns = pd.read_csv(
os.path.join(temp_dir, filename), encoding="latin-1", on_bad_lines="skip", nrows=0
).columns
columns = columns.drop("Note")
data = pd.read_csv(os.path.join(temp_dir, filename), encoding="latin-1", usecols=columns)
return data
def run_sanity_checks(data: pd.DataFrame) -> None:
"""Run basic sanity checks on loaded data (raise assertion errors if any check fails).
Parameters
----------
data : pd.DataFrame
Data to be checked.
"""
df = data.copy()
# Check that column "Year Code" is identical to "Year", and can therefore be dropped.
error = "Column 'Year Code' does not coincide with column 'Year'."
if "Year" not in data.columns:
pass
# Column 'Year' is not in data (this happens at least in faostat_wcad, which requires further processing).
elif df["Year"].dtype == int:
# In most cases, columns "Year Code" and "Year" are simply the year.
assert (df["Year Code"] == df["Year"]).all(), error
else:
# Sometimes (e.g. for dataset fs) there are year ranges (e.g. with "Year Code" 20002002 and "Year" "2000-2002").
assert (df["Year Code"] == df["Year"].str.replace("-", "").astype(int)).all(), error
# Check that there is only one element-unit for each element code.
error = "Multiple element-unit for the same element code."
assert (df.groupby(["Element", "Unit"])["Element Code"].nunique() == 1).all(), error
def prepare_output_data(data: pd.DataFrame) -> pd.DataFrame:
"""Prepare data before saving it to meadow.
Parameters
----------
data : pd.DataFrame
Data.
Returns
-------
df : pd.DataFrame
Data ready to be stored as a table in meadow.
"""
df = data.copy()
# Select columns to keep.
# Note:
# * Ignore column "Year Code" (which is almost identical to "Year", and does not add information).
# * Ignore column "Note" (which is included only in faostat_fa, faostat_fs, faostat_sdgb and faostat_wcad datasets).
# This column may contain double-quoted text within double-quoted text, which becomes impossible to parse.
# E.g. faostat_wcad line 105.
# * Add "Recipient Country Code" and "Recipient Code", which are the names for "Area Code" and "Area", respectively,
# for dataset faostat_fa.
columns_to_keep = [
"Area Code",
"Area",
"Year",
"Item Code",
"Item",
"Element Code",
"Element",
"Unit",
"Value",
"Flag",
"Recipient Country Code",
"Recipient Country",
# Additional columns for faostat_wcad.
"WCA Round",
"Census Year",
]
# Select only columns that are found in the dataframe.
columns_to_keep = list(set(columns_to_keep) & set(df.columns))
df = df[columns_to_keep]
# Set index columns depending on what columns are available in the dataframe.
# Note: "Recipient Country Code" appears only in faostat_fa, and seems to replace "Area Code".
# Note: "WCA Round" and "Census Year" appear only in faostat_wcad.
index_columns = list(
{"Area Code", "Recipient Country Code", "Year", "Item Code", "Element Code", "WCA Round", "Census Year"}
& set(df.columns)
)
if df.duplicated(subset=index_columns).any():
log.warning("Index has duplicated keys.")
df = df.set_index(index_columns)
return df
def run(dest_dir: str) -> None:
#
# Load data.
#
# Fetch the dataset short name from dest_dir.
dataset_short_name = Path(dest_dir).name
# Define path to current step file.
current_step_file = (CURRENT_DIR / dataset_short_name).with_suffix(".py")
# Get paths and naming conventions for current data step.
paths = PathFinder(current_step_file.as_posix())
# Load snapshot.
snapshot = paths.load_dependency(short_name=dataset_short_name + ".zip", channel="snapshot")
df_snapshot = METHOD_NAME(snapshot.path)
#
# Process data.
#
# Run sanity checks.
run_sanity_checks(data=df_snapshot)
# Prepare output meadow table.
tb_meadow = Table(prepare_output_data(data=df_snapshot), short_name=dataset_short_name)
#
# Save outputs.
#
# Create a new meadow dataset.
ds_meadow = create_dataset(dest_dir=dest_dir, tables=[tb_meadow], default_metadata=snapshot.metadata)
ds_meadow.save()
|
3,234 |
try lockf on other process fail
|
"""Test program for the fcntl C module.
"""
import platform
import os
import struct
import sys
import unittest
from multiprocessing import Process
from test.support import verbose, cpython_only
from test.support.import_helper import import_module
from test.support.os_helper import TESTFN, unlink
# Skip test if no fcntl module.
fcntl = import_module('fcntl')
def get_lockdata():
try:
os.O_LARGEFILE
except AttributeError:
start_len = "ll"
else:
start_len = "qq"
if (sys.platform.startswith(('netbsd', 'freebsd', 'openbsd'))
or sys.platform == 'darwin'):
if struct.calcsize('l') == 8:
off_t = 'l'
pid_t = 'i'
else:
off_t = 'lxxxx'
pid_t = 'l'
lockdata = struct.pack(off_t + off_t + pid_t + 'hh', 0, 0, 0,
fcntl.F_WRLCK, 0)
elif sys.platform.startswith('gnukfreebsd'):
lockdata = struct.pack('qqihhi', 0, 0, 0, fcntl.F_WRLCK, 0, 0)
elif sys.platform in ['hp-uxB', 'unixware7']:
lockdata = struct.pack('hhlllii', fcntl.F_WRLCK, 0, 0, 0, 0, 0, 0)
else:
lockdata = struct.pack('hh'+start_len+'hh', fcntl.F_WRLCK, 0, 0, 0, 0, 0)
if lockdata:
if verbose:
print('struct.pack: ', repr(lockdata))
return lockdata
lockdata = get_lockdata()
class BadFile:
def __init__(self, fn):
self.fn = fn
def fileno(self):
return self.fn
def METHOD_NAME(fname, cmd):
f = open(fname, 'wb+')
try:
fcntl.lockf(f, cmd)
except BlockingIOError:
pass
finally:
f.close()
def try_lockf_on_other_process(fname, cmd):
f = open(fname, 'wb+')
fcntl.lockf(f, cmd)
fcntl.lockf(f, fcntl.LOCK_UN)
f.close()
class TestFcntl(unittest.TestCase):
def setUp(self):
self.f = None
def tearDown(self):
if self.f and not self.f.closed:
self.f.close()
unlink(TESTFN)
def test_fcntl_fileno(self):
# the example from the library docs
self.f = open(TESTFN, 'wb')
rv = fcntl.fcntl(self.f.fileno(), fcntl.F_SETFL, os.O_NONBLOCK)
if verbose:
print('Status from fcntl with O_NONBLOCK: ', rv)
rv = fcntl.fcntl(self.f.fileno(), fcntl.F_SETLKW, lockdata)
if verbose:
print('String from fcntl with F_SETLKW: ', repr(rv))
self.f.close()
def test_fcntl_file_descriptor(self):
# again, but pass the file rather than numeric descriptor
self.f = open(TESTFN, 'wb')
rv = fcntl.fcntl(self.f, fcntl.F_SETFL, os.O_NONBLOCK)
if verbose:
print('Status from fcntl with O_NONBLOCK: ', rv)
rv = fcntl.fcntl(self.f, fcntl.F_SETLKW, lockdata)
if verbose:
print('String from fcntl with F_SETLKW: ', repr(rv))
self.f.close()
def test_fcntl_bad_file(self):
with self.assertRaises(ValueError):
fcntl.fcntl(-1, fcntl.F_SETFL, os.O_NONBLOCK)
with self.assertRaises(ValueError):
fcntl.fcntl(BadFile(-1), fcntl.F_SETFL, os.O_NONBLOCK)
with self.assertRaises(TypeError):
fcntl.fcntl('spam', fcntl.F_SETFL, os.O_NONBLOCK)
with self.assertRaises(TypeError):
fcntl.fcntl(BadFile('spam'), fcntl.F_SETFL, os.O_NONBLOCK)
@cpython_only
def test_fcntl_bad_file_overflow(self):
from _testcapi import INT_MAX, INT_MIN
# Issue 15989
with self.assertRaises(OverflowError):
fcntl.fcntl(INT_MAX + 1, fcntl.F_SETFL, os.O_NONBLOCK)
with self.assertRaises(OverflowError):
fcntl.fcntl(BadFile(INT_MAX + 1), fcntl.F_SETFL, os.O_NONBLOCK)
with self.assertRaises(OverflowError):
fcntl.fcntl(INT_MIN - 1, fcntl.F_SETFL, os.O_NONBLOCK)
with self.assertRaises(OverflowError):
fcntl.fcntl(BadFile(INT_MIN - 1), fcntl.F_SETFL, os.O_NONBLOCK)
@unittest.skipIf(
platform.machine().startswith('arm') and platform.system() == 'Linux',
"ARM Linux returns EINVAL for F_NOTIFY DN_MULTISHOT")
def test_fcntl_64_bit(self):
# Issue #1309352: fcntl shouldn't fail when the third arg fits in a
# C 'long' but not in a C 'int'.
try:
cmd = fcntl.F_NOTIFY
# This flag is larger than 2**31 in 64-bit builds
flags = fcntl.DN_MULTISHOT
except AttributeError:
self.skipTest("F_NOTIFY or DN_MULTISHOT unavailable")
fd = os.open(os.path.dirname(os.path.abspath(TESTFN)), os.O_RDONLY)
try:
fcntl.fcntl(fd, cmd, flags)
finally:
os.close(fd)
def test_flock(self):
# Solaris needs readable file for shared lock
self.f = open(TESTFN, 'wb+')
fileno = self.f.fileno()
fcntl.flock(fileno, fcntl.LOCK_SH)
fcntl.flock(fileno, fcntl.LOCK_UN)
fcntl.flock(self.f, fcntl.LOCK_SH | fcntl.LOCK_NB)
fcntl.flock(self.f, fcntl.LOCK_UN)
fcntl.flock(fileno, fcntl.LOCK_EX)
fcntl.flock(fileno, fcntl.LOCK_UN)
self.assertRaises(ValueError, fcntl.flock, -1, fcntl.LOCK_SH)
self.assertRaises(TypeError, fcntl.flock, 'spam', fcntl.LOCK_SH)
@unittest.skipIf(platform.system() == "AIX", "AIX returns PermissionError")
def test_lockf_exclusive(self):
self.f = open(TESTFN, 'wb+')
cmd = fcntl.LOCK_EX | fcntl.LOCK_NB
fcntl.lockf(self.f, cmd)
p = Process(target=METHOD_NAME, args=(TESTFN, cmd))
p.start()
p.join()
fcntl.lockf(self.f, fcntl.LOCK_UN)
self.assertEqual(p.exitcode, 0)
@unittest.skipIf(platform.system() == "AIX", "AIX returns PermissionError")
def test_lockf_share(self):
self.f = open(TESTFN, 'wb+')
cmd = fcntl.LOCK_SH | fcntl.LOCK_NB
fcntl.lockf(self.f, cmd)
p = Process(target=try_lockf_on_other_process, args=(TESTFN, cmd))
p.start()
p.join()
fcntl.lockf(self.f, fcntl.LOCK_UN)
self.assertEqual(p.exitcode, 0)
@cpython_only
def test_flock_overflow(self):
import _testcapi
self.assertRaises(OverflowError, fcntl.flock, _testcapi.INT_MAX+1,
fcntl.LOCK_SH)
@unittest.skipIf(sys.platform != 'darwin', "F_GETPATH is only available on macos")
def test_fcntl_f_getpath(self):
self.f = open(TESTFN, 'wb')
expected = os.path.abspath(TESTFN).encode('utf-8')
res = fcntl.fcntl(self.f.fileno(), fcntl.F_GETPATH, bytes(len(expected)))
self.assertEqual(expected, res)
@unittest.skipUnless(
hasattr(fcntl, "F_SETPIPE_SZ") and hasattr(fcntl, "F_GETPIPE_SZ"),
"F_SETPIPE_SZ and F_GETPIPE_SZ are not available on all platforms.")
def test_fcntl_f_pipesize(self):
test_pipe_r, test_pipe_w = os.pipe()
try:
# Get the default pipesize with F_GETPIPE_SZ
pipesize_default = fcntl.fcntl(test_pipe_w, fcntl.F_GETPIPE_SZ)
pipesize = pipesize_default // 2 # A new value to detect change.
if pipesize < 512: # the POSIX minimum
raise unittest.SkitTest(
'default pipesize too small to perform test.')
fcntl.fcntl(test_pipe_w, fcntl.F_SETPIPE_SZ, pipesize)
self.assertEqual(fcntl.fcntl(test_pipe_w, fcntl.F_GETPIPE_SZ),
pipesize)
finally:
os.close(test_pipe_r)
os.close(test_pipe_w)
if __name__ == '__main__':
unittest.main()
|
3,235 |
edit news
|
import urllib
from django.shortcuts import render, get_object_or_404, redirect
from django.urls import reverse
from django.utils import timezone
from django.contrib import messages
from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger
from django.db.models import Q
from comms import models, forms
from core import files, logic as core_logic, models as core_models
from security.decorators import editor_user_required, file_user_required, has_request
@editor_user_required
def news(request):
"""
Allows an editor user to add or delete news items.
:param request: HttpRequest object
:return: HttpResponse object
"""
new_items = models.NewsItem.objects.filter(content_type=request.model_content_type,
object_id=request.site_type.pk).order_by("sequence", "-start_display")
form = forms.NewsItemForm()
new_file = None
if 'delete' in request.POST:
news_item_pk = request.POST.get('delete')
item = get_object_or_404(models.NewsItem,
pk=news_item_pk,
content_type=request.model_content_type,
object_id=request.site_type.pk)
item.delete()
return redirect(reverse('core_manager_news'))
if request.POST:
form = forms.NewsItemForm(request.POST)
if request.FILES:
uploaded_file = request.FILES.get('image_file')
if not files.guess_mime(uploaded_file.name) in files.IMAGE_MIMETYPES:
form.add_error('image_file', 'File must be an image.')
else:
if request.model_content_type.name == 'journal':
new_file = files.save_file_to_journal(request, uploaded_file, 'News Item', 'News Item', public=True)
core_logic.resize_and_crop(new_file.journal_path(request.journal), [750, 324], 'middle')
elif request.model_content_type.name == 'press':
new_file = files.save_file_to_press(request, uploaded_file, 'News Item', 'News Item', public=True)
core_logic.resize_and_crop(new_file.press_path(), [750, 324], 'middle')
if form.is_valid():
new_item = form.save(commit=False)
new_item.content_type = request.model_content_type
new_item.object_id = request.site_type.pk
new_item.posted_by = request.user
new_item.posted = timezone.now()
new_item.large_image_file = new_file
new_item.save()
return redirect(reverse('core_manager_news'))
template = 'core/manager/news/index.html'
context = {
'news_items': new_items,
'action': 'new',
'form': form,
}
return render(request, template, context)
@editor_user_required
def METHOD_NAME(request, news_pk):
"""
Allows an editor to edit an existing news item
:param request: HttpRequest object
:param news_pk: PK of an NewsItem object
:return: HttpResponse object
"""
new_items = models.NewsItem.objects.filter(content_type=request.model_content_type,
object_id=request.site_type.pk).order_by('-posted')
news_item = get_object_or_404(models.NewsItem, pk=news_pk)
form = forms.NewsItemForm(instance=news_item)
new_file = None
if 'delete_image' in request.POST:
delete_image_id = request.POST.get('delete_image')
file = get_object_or_404(core_models.File, pk=delete_image_id)
if file.owner == request.user or request.user.is_staff:
file.delete()
messages.add_message(request, messages.SUCCESS, 'Image deleted')
else:
messages.add_message(request, messages.WARNING, 'Only the owner or staff can delete this image.')
return redirect(reverse('core_manager_edit_news', kwargs={'news_pk': news_item.pk}))
if request.POST:
form = forms.NewsItemForm(request.POST, instance=news_item)
if request.FILES:
uploaded_file = request.FILES.get('image_file')
if request.model_content_type.name == 'journal':
new_file = files.save_file_to_journal(request, uploaded_file, 'News Item', 'News Item', public=True)
core_logic.resize_and_crop(new_file.journal_path(request.journal), [750, 324], 'middle')
elif request.model_content_type.name == 'press':
new_file = files.save_file_to_press(request, uploaded_file, 'News Item', 'News Item', public=True)
core_logic.resize_and_crop(new_file.press_path(), [750, 324], 'middle')
if form.is_valid():
item = form.save(commit=False)
if new_file:
item.large_image_file = new_file
item.save()
return redirect(reverse('core_manager_news'))
template = 'core/manager/news/index.html'
context = {
'news_item': news_item,
'news_items': new_items,
'action': 'edit',
'form': form,
}
return render(request, template, context)
@has_request
@file_user_required
def serve_news_file(request, identifier_type, identifier, file_id):
""" Serves a news file (designed for use in the carousel).
:param request: the request associated with this call
:param identifier_type: the identifier type for the article
:param identifier: the identifier for the article
:param file_id: the file ID to serve
:return: a streaming response of the requested file or 404
"""
new_item = models.NewsItem.objects.get(
content_type=request.model_content_type,
object_id=request.site_type.pk,
pk=identifier
)
return new_item.serve_news_file()
def news_list(request, tag=None):
"""
Lists all a press or journal news items, and allows them to be filtered by tag
:param request: HttpRequest object
:param tag: a string matching a Tags.text attribute
:return: HttpResponse object
"""
if not tag:
news_objects = models.NewsItem.objects.filter(
(Q(content_type=request.model_content_type) & Q(object_id=request.site_type.id)) &
(Q(start_display__lte=timezone.now()) | Q(start_display=None)) &
(Q(end_display__gte=timezone.now()) | Q(end_display=None))
).order_by('-posted')
else:
tag = urllib.parse.unquote(tag)
news_objects = models.NewsItem.objects.filter(
(Q(content_type=request.model_content_type) & Q(object_id=request.site_type.id)) &
(Q(start_display__lte=timezone.now()) | Q(start_display=None)) &
(Q(end_display__gte=timezone.now()) | Q(end_display=None)),
tags__text=tag
).order_by('-posted')
paginator = Paginator(news_objects, 15)
page = request.GET.get('page', 1)
try:
news_items = paginator.page(page)
except PageNotAnInteger:
news_items = paginator.page(1)
except EmptyPage:
news_items = paginator.page(paginator.num_pages)
if not request.journal:
template = 'press/core/news/index.html'
else:
template = 'core/news/index.html'
context = {
'news_items': news_items,
'tag': tag,
}
return render(request, template, context)
def news_item(request, news_pk):
"""
Renders a news item for public display.
:param request: HttpRequest object
:param news_pk: PK of a NewsItem object
:return: HttpResponse object
"""
item = get_object_or_404(models.NewsItem.objects.prefetch_related('tags'),
pk=news_pk,
content_type=request.model_content_type)
if request.journal:
template = 'core/news/item.html'
else:
template = 'press/core/news/item.html'
context = {
'news_item': item,
}
return render(request, template, context)
|
3,236 |
byte length
|
#!/usr/bin/env python
"""A module with client action for talking with osquery."""
import json
import os
import subprocess
from typing import Any, Iterator, List, Optional, Text
from grr_response_client import actions
from grr_response_core import config
from grr_response_core.lib.rdfvalues import osquery as rdf_osquery
from grr_response_core.lib.util import precondition
class Error(Exception):
"""A class for all osquery-related exceptions."""
def __init__(self, message: Text, cause: Optional[Exception] = None):
if cause is not None:
message = "{message}: {cause}".format(message=message, cause=cause)
super().__init__(message)
self.cause = cause
# TODO(hanuszczak): Fix the linter error properly.
class TimeoutError(Error): # pylint: disable=redefined-builtin
"""A class of exceptions raised when a call to osquery timeouts."""
def __init__(self, cause: Optional[Exception] = None):
super().__init__("osquery timeout", cause=cause)
class Osquery(actions.ActionPlugin):
"""An action plugin class for talking with osquery."""
in_rdfvalue = rdf_osquery.OsqueryArgs
out_rdfvalues = [rdf_osquery.OsqueryResult]
def Run(self, args):
for result in self.Process(args):
self.SendReply(result)
# TODO(hanuszczak): This does not need to be a class method. It should be
# refactored to a separate function and tested as such.
def Process(self, args) -> Iterator[rdf_osquery.OsqueryResult]:
if not config.CONFIG["Osquery.path"]:
raise RuntimeError("The `Osquery` action invoked on a client without "
"osquery path specified.")
if not os.path.exists(config.CONFIG["Osquery.path"]):
raise RuntimeError("The `Osquery` action invoked on a client where "
"osquery executable is not available.")
if not args.query:
raise ValueError("The `Osquery` was invoked with an empty query.")
output = Query(args)
json_decoder = json.JSONDecoder(object_pairs_hook=dict)
table = ParseTable(json_decoder.decode(output))
table.query = args.query
for chunk in ChunkTable(table, config.CONFIG["Osquery.max_chunk_size"]):
yield rdf_osquery.OsqueryResult(table=chunk)
def ChunkTable(table: rdf_osquery.OsqueryTable,
max_chunk_size: int) -> Iterator[rdf_osquery.OsqueryTable]:
"""Chunks given table into multiple smaller ones.
Tables that osquery yields can be arbitrarily large. Because GRR's messages
cannot be arbitrarily large, it might happen that the table has to be split
into multiple smaller ones.
Note that that serialized response protos are going to be slightly bigger than
the specified limit. For regular values the additional payload should be
negligible.
Note that chunking a table that is empty results in no chunks at all.
Args:
table: A table to split into multiple smaller ones.
max_chunk_size: A maximum size of the returned table in bytes.
Yields:
Tables with the same query and headers as the input table and a subset of
rows.
"""
def METHOD_NAME(string: Text) -> int:
return len(string.encode("utf-8"))
def Chunk() -> rdf_osquery.OsqueryTable:
result = rdf_osquery.OsqueryTable()
result.query = table.query
result.header = table.header
return result
chunk = Chunk()
chunk_size = 0
for row in table.rows:
row_size = sum(map(METHOD_NAME, row.values))
if chunk_size + row_size > max_chunk_size:
yield chunk
chunk = Chunk()
chunk_size = 0
chunk.rows.append(row)
chunk_size += row_size
# There might be some rows that did not cause the chunk to overflow so it has
# not been yielded as part of the loop.
if chunk.rows:
yield chunk
def ParseTable(table: Any) -> rdf_osquery.OsqueryTable:
"""Parses table of osquery output.
Args:
table: A table in a "parsed JSON" representation.
Returns:
A parsed `rdf_osquery.OsqueryTable` instance.
"""
precondition.AssertIterableType(table, dict)
result = rdf_osquery.OsqueryTable()
result.header = ParseHeader(table)
for row in table:
result.rows.append(ParseRow(result.header, row))
return result
# TODO: Parse type information.
def ParseHeader(table: Any) -> rdf_osquery.OsqueryHeader:
"""Parses header of osquery output.
Args:
table: A table in a "parsed JSON" representation.
Returns:
A parsed `rdf_osquery.OsqueryHeader` instance.
"""
precondition.AssertIterableType(table, dict)
prototype: List[Text] = None
for row in table:
columns = list(row.keys())
if prototype is None:
prototype = columns
elif prototype != columns:
message = "Expected columns '{expected}', got '{actual}' for table {json}"
message = message.format(expected=prototype, actual=columns, json=table)
raise ValueError(message)
result = rdf_osquery.OsqueryHeader()
for name in prototype or []:
result.columns.append(rdf_osquery.OsqueryColumn(name=name))
return result
def ParseRow(header: rdf_osquery.OsqueryHeader,
row: Any) -> rdf_osquery.OsqueryRow:
"""Parses a single row of osquery output.
Args:
header: A parsed header describing the row format.
row: A row in a "parsed JSON" representation.
Returns:
A parsed `rdf_osquery.OsqueryRow` instance.
"""
precondition.AssertDictType(row, Text, Text)
result = rdf_osquery.OsqueryRow()
for column in header.columns:
result.values.append(row[column.name])
return result
def Query(args: rdf_osquery.OsqueryArgs) -> str:
"""Calls osquery with given query and returns its output.
Args:
args: A query to call osquery with.
Returns:
A "parsed JSON" representation of the osquery output.
Raises:
TimeoutError: If a call to the osquery executable times out.
Error: If anything goes wrong with the subprocess call, including if the
query is incorrect.
"""
timeout = args.timeout_millis / 1000 # `subprocess.run` uses seconds.
try:
# We use `--S` to enforce shell execution. This is because on Windows there
# is only `osqueryd` and `osqueryi` is not available. However, by passing
# `--S` we can make `osqueryd` behave like `osqueryi`. Since this flag also
# works with `osqueryi`, by passing it we simply expand number of supported
# executable types.
command = [
config.CONFIG["Osquery.path"],
"--S", # Enforce shell execution.
"--logger_stderr=false", # Only allow errors to be written to stderr.
"--logger_min_status=3", # Disable status logs.
"--logger_min_stderr=2", # Only ERROR-level logs to stderr.
"--json", # Set output format to JSON.
]
proc = subprocess.run(
command,
timeout=timeout,
check=True,
input=args.query,
text=True,
encoding="utf-8",
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
except subprocess.TimeoutExpired as error:
raise TimeoutError(cause=error)
except subprocess.CalledProcessError as error:
stderr = error.stderr
raise Error(message=f"Osquery error on the client: {stderr}")
stderr = proc.stderr.strip()
if stderr:
# Depending on the version, in case of a syntax error osquery might or might
# not terminate with a non-zero exit code, but it will always print the
# error to stderr.
raise Error(message=f"Osquery error on the client: {stderr}")
return proc.stdout
|
3,237 |
patch make head requests
|
from test.constants import API_URL
from unittest.mock import patch
from uuid import uuid4
from django.conf import settings
import pytest
import requests
from fakeredis import FakeRedis
from api.controllers.search_controller import DEAD_LINK_RATIO
@pytest.fixture(autouse=True)
def redis(monkeypatch) -> FakeRedis:
fake_redis = FakeRedis()
def get_redis_connection(*args, **kwargs):
return fake_redis
monkeypatch.setattr("django_redis.get_redis_connection", get_redis_connection)
yield fake_redis
fake_redis.client().close()
@pytest.fixture(autouse=True)
def turn_off_db_read(monkeypatch):
"""
Prevent DB lookup for ES results because DB is empty.
Since ImageSerializer has set ``needs_db`` to ``True``, all results from ES will be
mapped to DB models. Since the test DB is empty, results array will be empty. By
patching ``needs_db`` to ``False``, we can test the dead link filtering process
without needing to populate the test DB.
"""
monkeypatch.setattr("api.views.image_views.ImageSerializer.needs_db", False)
@pytest.fixture
def unique_query_hash(redis, monkeypatch):
def get_unique_hash(*args, **kwargs):
return str(uuid4())
monkeypatch.setattr(
"api.controllers.search_controller.get_query_hash", get_unique_hash
)
@pytest.fixture
def empty_validation_cache(monkeypatch):
def get_empty_cached_statuses(_, image_urls):
return [None] * len(image_urls)
monkeypatch.setattr(
"api.utils.check_dead_links._get_cached_statuses",
get_empty_cached_statuses,
)
_MAKE_HEAD_REQUESTS_MODULE_PATH = "api.utils.check_dead_links._make_head_requests"
def METHOD_NAME():
def _make_head_requests(urls):
responses = []
for idx, url in enumerate(urls):
status_code = 200 if idx % 10 != 0 else 404
responses.append((url, status_code))
return responses
return patch(_MAKE_HEAD_REQUESTS_MODULE_PATH, side_effect=_make_head_requests)
def patch_link_validation_dead_for_count(count):
total_res_count = 0
def _make_head_requests(urls):
nonlocal total_res_count
responses = []
for idx, url in enumerate(urls):
total_res_count += 1
status_code = 404 if total_res_count <= count else 200
responses.append((url, status_code))
return responses
return patch(_MAKE_HEAD_REQUESTS_MODULE_PATH, side_effect=_make_head_requests)
@pytest.mark.django_db
@METHOD_NAME()
def test_dead_link_filtering(mocked_map, client):
path = "/v1/images/"
query_params = {"q": "*", "page_size": 20}
# Make a request that does not filter dead links...
res_with_dead_links = client.get(
path,
query_params | {"filter_dead": False},
)
# ...and ensure that our patched function was not called
mocked_map.assert_not_called()
# Make a request that filters dead links...
res_without_dead_links = client.get(
path,
query_params | {"filter_dead": True},
)
# ...and ensure that our patched function was called
mocked_map.assert_called()
assert res_with_dead_links.status_code == 200
assert res_without_dead_links.status_code == 200
data_with_dead_links = res_with_dead_links.json()
data_without_dead_links = res_without_dead_links.json()
res_1_ids = {result["id"] for result in data_with_dead_links["results"]}
res_2_ids = {result["id"] for result in data_without_dead_links["results"]}
# In this case, both have 20 results as the dead link filter has "back filled" the
# pages of dead links. See the subsequent test for the case when this does not
# occur (i.e., when the entire first page of links is dead).
assert len(res_1_ids) == 20
assert len(res_2_ids) == 20
assert bool(res_1_ids - res_2_ids)
@pytest.mark.django_db
@pytest.mark.parametrize(
("filter_dead", "page_size", "expected_result_count"),
(
(True, 20, 0),
(False, 20, 20),
),
)
def test_dead_link_filtering_all_dead_links(
client,
filter_dead,
page_size,
expected_result_count,
unique_query_hash,
empty_validation_cache,
):
path = "/v1/images/"
query_params = {"q": "*", "page_size": page_size}
with patch_link_validation_dead_for_count(page_size / DEAD_LINK_RATIO):
response = client.get(
path,
query_params | {"filter_dead": filter_dead},
)
assert response.status_code == 200
res_json = response.json()
assert len(res_json["results"]) == expected_result_count
if expected_result_count == 0:
assert res_json["result_count"] == 0
@pytest.fixture
def search_factory(client):
"""Allow passing url parameters along with a search request."""
def _parameterized_search(**kwargs):
response = requests.get(f"{API_URL}/v1/images", params=kwargs, verify=False)
assert response.status_code == 200
parsed = response.json()
return parsed
return _parameterized_search
@pytest.fixture
def search_without_dead_links(search_factory):
"""Test with ``filter_dead`` parameter set to true."""
def _search_without_dead_links(**kwargs):
return search_factory(filter_dead=True, **kwargs)
return _search_without_dead_links
@pytest.mark.django_db
def test_page_size_removing_dead_links(search_without_dead_links):
"""
Test whether the number of results returned is equal to the requested page size.
We have about 500 dead links in the sample data and should have around
8 dead links in the first 100 results on a query composed of a single
wildcard operator.
"""
data = search_without_dead_links(q="*", page_size=20)
assert len(data["results"]) == 20
@pytest.mark.django_db
def test_page_consistency_removing_dead_links(search_without_dead_links):
"""Test that results in consecutive pages don't repeat when filtering dead links."""
total_pages = settings.MAX_PAGINATION_DEPTH
page_size = 5
page_results = []
for page in range(1, total_pages + 1):
page_data = search_without_dead_links(q="*", page_size=page_size, page=page)
page_results += page_data["results"]
def no_duplicates(xs):
s = set()
for x in xs:
if x in s:
return False
s.add(x)
return True
ids = list(map(lambda x: x["id"], page_results))
# No results should be repeated so we should have no duplicate ids
assert no_duplicates(ids)
@pytest.mark.django_db
def test_max_page_count():
response = requests.get(
f"{API_URL}/v1/images",
params={"page": settings.MAX_PAGINATION_DEPTH + 1},
verify=False,
)
assert response.status_code == 400
|
3,238 |
test artifact show artifact name
|
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Pylint doesn't play well with fixtures and dependency injection from pytest
# pylint: disable=redefined-outer-name
import os
import pytest
from buildstream.exceptions import ErrorDomain
from buildstream._testing import cli # pylint: disable=unused-import
from tests.testutils import create_artifact_share
# Project directory
DATA_DIR = os.path.join(
os.path.dirname(os.path.realpath(__file__)),
"project",
)
SIMPLE_DIR = os.path.join(
os.path.dirname(os.path.realpath(__file__)),
"simple",
)
# Test artifact show
@pytest.mark.datafiles(DATA_DIR)
def test_artifact_show_element_name(cli, tmpdir, datafiles):
project = str(datafiles)
element = "target.bst"
result = cli.run(project=project, args=["artifact", "show", element])
result.assert_success()
assert "not cached {}".format(element) in result.output
result = cli.run(project=project, args=["build", element])
result.assert_success()
result = cli.run(project=project, args=["artifact", "show", element])
result.assert_success()
assert "cached {}".format(element) in result.output
# Test artifact show on a failed element
@pytest.mark.datafiles(DATA_DIR)
def test_artifact_show_failed_element(cli, tmpdir, datafiles):
project = str(datafiles)
element = "manual.bst"
result = cli.run(project=project, args=["artifact", "show", element])
result.assert_success()
assert "not cached {}".format(element) in result.output
result = cli.run(project=project, args=["build", element])
result.assert_task_error(ErrorDomain.SANDBOX, "missing-command")
result = cli.run(project=project, args=["artifact", "show", element])
result.assert_success()
assert "failed {}".format(element) in result.output
# Test artifact show with a deleted dependency
@pytest.mark.datafiles(DATA_DIR)
def test_artifact_show_element_missing_deps(cli, tmpdir, datafiles):
project = str(datafiles)
element = "target.bst"
dependency = "import-bin.bst"
result = cli.run(project=project, args=["build", element])
result.assert_success()
result = cli.run(project=project, args=["artifact", "delete", dependency])
result.assert_success()
result = cli.run(project=project, args=["artifact", "show", "--deps", "all", element])
result.assert_success()
assert "not cached {}".format(dependency) in result.output
assert "cached {}".format(element) in result.output
# Test artifact show with artifact ref
@pytest.mark.datafiles(DATA_DIR)
@pytest.mark.parametrize("with_project", [True, False], ids=["with-project", "without-project"])
def METHOD_NAME(cli, tmpdir, datafiles, with_project):
project = str(datafiles)
element = "target.bst"
result = cli.run(project=project, args=["build", element])
result.assert_success()
cache_key = cli.get_element_key(project, element)
artifact_ref = "test/target/" + cache_key
# Delete the project.conf if we're going to try this without a project
if not with_project:
os.remove(os.path.join(project, "project.conf"))
result = cli.run(project=project, args=["artifact", "show", artifact_ref])
result.assert_success()
assert "cached {}".format(artifact_ref) in result.output
# Test artifact show glob behaviors
@pytest.mark.datafiles(SIMPLE_DIR)
@pytest.mark.parametrize(
"pattern,expected_prefixes",
[
# List only artifact results in the test/project
#
("test/**", ["test/target/", "test/compose-all/", "test/import-bin", "test/import-dev"]),
# List only artifact results by their .bst element names
#
("**.bst", ["import-bin.bst", "import-dev.bst", "compose-all.bst", "target.bst", "subdir/target.bst"]),
# List only the import artifact results
#
("import*.bst", ["import-bin.bst", "import-dev.bst"]),
],
ids=["test/**", "**.bst", "import*.bst"],
)
def test_artifact_show_glob(cli, tmpdir, datafiles, pattern, expected_prefixes):
project = str(datafiles)
result = cli.run(project=project, args=["build", "target.bst"])
result.assert_success()
result = cli.run(project=project, args=["artifact", "show", pattern])
result.assert_success()
output = result.output.strip().splitlines()
# Assert that the number of results match the number of expected results
assert len(output) == len(expected_prefixes)
# Assert that each expected result was found.
for expected_prefix in expected_prefixes:
found = False
for result_line in output:
result_split = result_line.split()
if result_split[-1].startswith(expected_prefix):
found = True
break
assert found, "Expected result {} not found".format(expected_prefix)
# Test artifact show artifact in remote
@pytest.mark.datafiles(DATA_DIR)
def test_artifact_show_element_available_remotely(cli, tmpdir, datafiles):
project = str(datafiles)
element = "target.bst"
# Set up remote and local shares
local_cache = os.path.join(str(tmpdir), "artifacts")
with create_artifact_share(os.path.join(str(tmpdir), "remote")) as remote:
cli.configure(
{
"artifacts": {"servers": [{"url": remote.repo, "push": True}]},
"cachedir": local_cache,
}
)
# Build the element
result = cli.run(project=project, args=["build", element])
result.assert_success()
# Make sure it's in the share
assert remote.get_artifact(cli.get_artifact_name(project, "test", element))
# Delete the artifact from the local cache
result = cli.run(project=project, args=["artifact", "delete", element])
result.assert_success()
assert cli.get_element_state(project, element) != "cached"
result = cli.run(project=project, args=["artifact", "show", element])
result.assert_success()
assert "available {}".format(element) in result.output
|
3,239 |
sci not
|
import argparse
import sys
import numpy as np
def METHOD_NAME(num):
exp = int(np.log10(num))
mant = num/10.0**exp
if mant < 1:
mant *= 10.0
exp -= 1
return r"${:5.3f} \times 10^{{{}}}$".format(round(mant, 3), exp)
class Variable():
def __init__(self, name, lo, o1, med, o2, hi):
self.name = name
self.lo = float(lo)
self.o1 = float(o1)
self.med = float(med)
self.o2 = float(o2)
self.hi = float(hi)
def get_table_line(self, pretty_name=None, simple=False):
if pretty_name is not None:
name = pretty_name
else:
name = self.name
if simple:
_str = r" {:27} {:14.10g} {:5.3f} {:14.10g} {:5.3f} {:14.10g}"
return _str.format(self.name, self.lo, round(self.o1, 3), self.med, round(self.o2, 3), self.hi)
else:
_str = r" {:27} & {:23} & {:5.3f} & {:23} & {:5.3f} & {:23} \\"
return _str.format(name, METHOD_NAME(self.lo), round(self.o1, 3), METHOD_NAME(self.med), round(self.o2, 3), METHOD_NAME(self.hi))
class ConvergenceData():
def __init__(self):
self.data = []
def add_variable(self, name, lo, order1, med, order2, hi):
self.data.append(Variable(name, lo, order1, med, order2, hi))
def read_convergence(file_lo, file_hi):
# we'll wait until we find the L1 data
lines_lo = []
found_l1 = False
with open(file_lo, "r") as flo:
for line in flo:
if "L1 norm" in line:
found_l1 = True
continue
if not found_l1:
continue
# value data lines have 4 columns
if len(line.split()) == 4:
lines_lo.append(line.strip())
lines_hi = []
found_l1 = False
with open(file_hi, "r") as fhi:
for line in fhi:
if "L1 norm" in line:
found_l1 = True
continue
if not found_l1:
continue
# value data lines have 4 columns
if len(line.split()) == 4:
lines_hi.append(line.strip())
cd = ConvergenceData()
for llo, lhi in zip(lines_lo, lines_hi):
vlo, elo, o1, emed1 = llo.split()
vhi, emed2, o2, ehi = lhi.split()
if "---" in o1 or "---" in o2:
print("skipping {}".format(vlo))
continue
if vlo != vhi:
sys.exit("error: variable mismatch")
if emed1.strip() != emed2.strip():
print(emed1, emed2)
sys.exit("error: error mismatch")
cd.add_variable(vlo, elo, o1, emed1, o2, ehi)
return cd
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--simple', action="store_true", help='no latex output')
parser.add_argument("lofile", type=str, nargs=1,
help="name of the low resolution convergence output file")
parser.add_argument("hifile", type=str, nargs=1,
help="name of the high resolution convergence output file")
args = parser.parse_args()
good_vars = {"density": r"$\rho$",
"xmom": r"$\rho u$",
"ymom": r"$\rho v$",
"rho_E": r"$\rho E$",
"rho_e": r"$\rho e$",
"Temp": r"$T$",
"rho_H1": r"$\rho X(\isotm{H}{1})$",
"rho_He4": r"$\rho X(\isotm{He}{4})$",
"rho_C12": r"$\rho X(\isotm{C}{12})$",
"rho_O16": r"$\rho X(\isotm{O}{16})$",
"rho_Fe56": r"$\rho X(\isotm{Fe}{56})$",
"rho_Ye": r"$\rho Y_e$",
"rho_abar": r"$\rho \bar{A}$",
"rho_bea": r"$\rho (B/A)$"
}
# sdc4
file_lo = args.lofile[0]
file_hi = args.hifile[0]
sdc4 = read_convergence(file_lo, file_hi)
for v in sdc4.data:
if v.name in good_vars.keys():
print(v.get_table_line(pretty_name=good_vars[v.name], simple=args.simple))
|
3,240 |
next idx
|
from typing import List, Dict, Literal, Optional, Any
from time import time
import json
import os
class AutoTest():
class HyperParams():
def __init__(self, data: Dict[str, Any]) -> None:
self.__data = data
def __len__(self):
return len(self.__data)
def __getattr__(self, key):
return self.__data.get(key, None)
def __init__(self, settings: Dict[str, List]) -> None:
self.keys = list(settings.keys())
self.values = [settings[k] for k in self.keys]
self.reset()
self._lens = tuple((len(vs) for vs in self.values))
self._running: bool = False
self._autosave: Optional[_Saver] = None
def __len__(self):
return len(self.keys)
def reset(self):
self._state = [0] * len(self.keys)
self._step = 0
self._records = []
self._time_global = 0.0
self._time_current = 0.0
def get(self, *idx: int):
"""Output current parameter combination as a list."""
return tuple(self.values[i][k] for i, k in enumerate(idx))
def as_dict(self, *idx: int):
"""Outout current parameter value as a dict."""
vals = self.get(*idx)
return {self.keys[i]: vals[i] for i in range(len(self))}
def record(self, **msg):
"""
Record the information generated during the test.
Example
---
```
for w in autotest.run():
# training codes #
autotest.record(time=..., loss=..., error=...)
```
Raise
---
Raise Exception if call `record` when test is not running.
"""
if not self._running:
raise Exception('Can not record when test is not running.')
rec = {}
rec['params'] = self.as_dict(*self._state)
rec['msg'] = msg
if self._autosave is None:
self._records.append(rec)
else:
self._autosave.save(rec)
def set_autosave(self, filename: Optional[str],
mode: Literal['append', 'replace']='append'):
"""Control the autosave behavior. If `filename` is not None, automatically
save the records to file when calling `record`.
Args
---
filename: str or None.
mode: `'append'` or `'replace'`.
the 'append' mode: always append new records to file.
the 'replace' mode: records will be cleared when new test starts.
Warning
---
When setting `filename` to `None`, autosave stops and all records be discarded.
"""
if filename is not None:
self._autosave = _Saver(filename=filename, mode=mode)
else:
self._autosave = None
def METHOD_NAME(self):
cursor = 0
while self._state[cursor] >= self._lens[cursor] - 1:
self._state[cursor] = 0
cursor += 1
if cursor >= len(self.keys):
return 1
self._state[cursor] += 1
self._step += 1
return 0
def test_runtime(self):
"""Return time from the start of all the tests to the preesent.
Return `0.0` if the test is not running."""
if self._running:
return time() - self._time_global
return 0.0
def item_runtime(self):
"""Return the time since the start of current test to
the present.
Return `0.0` if the test is not running."""
if self._running:
return time() - self._time_current
return 0.0
def run(self, auto_reset: bool=True):
"""Start the test."""
flag = 0
self._running = True
print(f"自动测试启动...")
if auto_reset:
print("重置测试器状态")
self.reset()
if self._autosave is not None:
print(f"已启用记录自动保存:{self._autosave.filename}")
self._autosave.start()
self._time_global = time()
while flag == 0:
print(f"\n===== 进行第 {self._step} 项测试 =====")
self.print_current_info()
self._time_current = time()
yield AutoTest.HyperParams(self.as_dict(*self._state))
print(f"该项结束,用时 {self.item_runtime():.3f} 秒")
flag = self.METHOD_NAME()
print(f"\n所有测试项结束")
self._running = False
def print_current_info(self):
for k, v in self.as_dict(*self._state).items():
print(f"{k} = {v}")
def save_record(self, filename: str):
"""Save records as a `.json` file.
When using autosave, this can not save anything."""
with open(filename, 'w', encoding='utf-8') as a:
a.write(json.dumps(self._records, indent=4))
class _Saver():
def __init__(self, filename: str, mode: Literal['append', 'replace']) -> None:
self._filename = filename
if mode in ('append', 'replace'):
self._mode = mode
else:
raise ValueError(f'Unknown mode {mode}')
self._data: List[Dict[str, Any]] = []
@property
def filename(self):
return self._filename
def start(self):
"""Start: Try to read old data from the file."""
if self._mode == 'append':
if os.path.exists(self._filename):
with open(self._filename, 'r', encoding='utf-8') as a:
data = json.loads(a.read())
if not isinstance(data, list):
raise TypeError(f'Object in the existed file must be a list, but found {type(data)}.')
self._data = data
def save(self, obj: Dict):
self._data.append(obj)
with open(self._filename, 'w', encoding='utf-8') as a:
a.write(json.dumps(self._data, indent=4))
|
3,241 |
get past ipo
|
import logging
from datetime import datetime, timedelta
from typing import Optional
import pandas as pd
from openbb_terminal.core.session.current_user import get_current_user
from openbb_terminal.decorators import check_api_key, log_start_end
from openbb_terminal.helper_funcs import request
from openbb_terminal.rich_config import console
logger = logging.getLogger(__name__)
@log_start_end(log=logger)
@check_api_key(["API_FINNHUB_KEY"])
def get_ipo_calendar(
start_date: Optional[str] = None,
end_date: Optional[str] = None,
) -> pd.DataFrame:
"""Get IPO calendar
Parameters
----------
start_date : Optional[str]
Initial date, format YYYY-MM-DD
end_date : Optional[str]
Final date, format YYYY-MM-DD
Returns
-------
pd.DataFrame
Get dataframe with IPO calendar events
"""
if start_date is None:
start_date = (datetime.now() - timedelta(days=5)).strftime("%Y-%m-%d")
if end_date is None:
end_date = datetime.now().strftime("%Y-%m-%d")
response = request(
f"https://finnhub.io/api/v1/calendar/ipo?from={start_date}&to={end_date}&token={get_current_user().credentials.API_FINNHUB_KEY}"
)
df = pd.DataFrame()
if response.status_code == 200:
d_data = response.json()
if "ipoCalendar" in d_data:
d_refactor_columns = {
"numberOfShares": "Number of Shares",
"totalSharesValue": "Total Shares Value",
"date": "Date",
"exchange": "Exchange",
"name": "Name",
"price": "Price",
"status": "Status",
}
df = pd.DataFrame(d_data["ipoCalendar"]).rename(columns=d_refactor_columns)
else:
console.print("Response is empty")
elif response.status_code == 401:
console.print("[red]Invalid API Key[/red]\n")
elif response.status_code == 403:
console.print("[red]API Key not authorized for Premium Feature[/red]\n")
else:
console.print(f"Error in request: {response.json()['error']}", "\n")
return df
@log_start_end(log=logger)
@check_api_key(["API_FINNHUB_KEY"])
def METHOD_NAME(
num_days_behind: int = 5,
start_date: Optional[str] = None,
) -> pd.DataFrame:
"""Past IPOs dates. [Source: Finnhub]
Parameters
----------
num_days_behind: int
Number of days to look behind for IPOs dates
start_date: str
The starting date (format YYYY-MM-DD) to look for IPOs
Returns
-------
pd.DataFrame
Get dataframe with past IPOs
"""
today = datetime.now()
start = (
(today - timedelta(days=num_days_behind)).strftime("%Y-%m-%d")
if start_date is None
else start_date
)
df_past_ipo = (
get_ipo_calendar(start, today.strftime("%Y-%m-%d"))
.rename(columns={"Date": "Past"})
.fillna("")
)
if df_past_ipo.empty:
console.print(f"No IPOs found since the last {num_days_behind} days")
else:
df_past_ipo = df_past_ipo.sort_values("Past", ascending=False)
return df_past_ipo
@log_start_end(log=logger)
def get_future_ipo(
num_days_ahead: int = 5,
end_date: Optional[str] = None,
) -> pd.DataFrame:
"""Future IPOs dates. [Source: Finnhub]
Parameters
----------
num_days_ahead: int
Number of days to look ahead for IPOs dates
end_date: datetime
The end date (format YYYY-MM-DD) to look for IPOs from today onwards
Returns
-------
pd.DataFrame
Get dataframe with future IPOs
"""
today = datetime.now()
end = (
(today + timedelta(days=num_days_ahead)).strftime("%Y-%m-%d")
if end_date is None
else end_date
)
df_future_ipo = (
get_ipo_calendar(today.strftime("%Y-%m-%d"), end)
.rename(columns={"Date": "Future"})
.fillna("")
)
if df_future_ipo.empty:
console.print(f"No IPOs found for the next {num_days_ahead} days")
else:
df_future_ipo = df_future_ipo.sort_values("Future", ascending=False)
return df_future_ipo
|
3,242 |
prepare request
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.mgmt.core.exceptions import ARMErrorFormat
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class AvailableEndpointServicesOperations:
"""AvailableEndpointServicesOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2020_05_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def list(
self,
location: str,
**kwargs
) -> AsyncIterable["_models.EndpointServicesListResult"]:
"""List what values of endpoint services are available for use.
:param location: The location to check available endpoint services.
:type location: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either EndpointServicesListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2020_05_01.models.EndpointServicesListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.EndpointServicesListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-05-01"
accept = "application/json"
def METHOD_NAME(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'location': self._serialize.url("location", location, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('EndpointServicesListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = METHOD_NAME(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Network/locations/{location}/virtualNetworkAvailableEndpointServices'} # type: ignore
|
3,243 |
proc6
|
#! /usr/bin/env python3
"""
"PYSTONE" Benchmark Program
Version: Python/1.1 (corresponds to C/1.1 plus 2 Pystone fixes)
Author: Reinhold P. Weicker, CACM Vol 27, No 10, 10/84 pg. 1013.
Translated from ADA to C by Rick Richardson.
Every method to preserve ADA-likeness has been used,
at the expense of C-ness.
Translated from C to Python by Guido van Rossum.
Version History:
Version 1.1 corrects two bugs in version 1.0:
First, it leaked memory: in Proc1(), NextRecord ends
up having a pointer to itself. I have corrected this
by zapping NextRecord.PtrComp at the end of Proc1().
Second, Proc3() used the operator != to compare a
record to None. This is rather inefficient and not
true to the intention of the original benchmark (where
a pointer comparison to None is intended; the !=
operator attempts to find a method __cmp__ to do value
comparison of the record). Version 1.1 runs 5-10
percent faster than version 1.0, so benchmark figures
of different versions can't be compared directly.
"""
LOOPS = 50000
from time import clock
__version__ = "1.1"
[Ident1, Ident2, Ident3, Ident4, Ident5] = range(1, 6)
class Record:
def __init__(self, PtrComp = None, Discr = 0, EnumComp = 0,
IntComp = 0, StringComp = 0):
self.PtrComp = PtrComp
self.Discr = Discr
self.EnumComp = EnumComp
self.IntComp = IntComp
self.StringComp = StringComp
def copy(self):
return Record(self.PtrComp, self.Discr, self.EnumComp,
self.IntComp, self.StringComp)
TRUE = 1
FALSE = 0
def main(loops=LOOPS):
benchtime, stones = pystones(loops)
print("Pystone(%s) time for %d passes = %g" % \
(__version__, loops, benchtime))
print("This machine benchmarks at %g pystones/second" % stones)
def pystones(loops=LOOPS):
return Proc0(loops)
IntGlob = 0
BoolGlob = FALSE
Char1Glob = '\0'
Char2Glob = '\0'
Array1Glob = [0]*51
Array2Glob = [x[:] for x in [Array1Glob]*51]
PtrGlb = None
PtrGlbNext = None
def Proc0(loops=LOOPS):
global IntGlob
global BoolGlob
global Char1Glob
global Char2Glob
global Array1Glob
global Array2Glob
global PtrGlb
global PtrGlbNext
starttime = clock()
for i in range(loops):
pass
nulltime = clock() - starttime
PtrGlbNext = Record()
PtrGlb = Record()
PtrGlb.PtrComp = PtrGlbNext
PtrGlb.Discr = Ident1
PtrGlb.EnumComp = Ident3
PtrGlb.IntComp = 40
PtrGlb.StringComp = "DHRYSTONE PROGRAM, SOME STRING"
String1Loc = "DHRYSTONE PROGRAM, 1'ST STRING"
Array2Glob[8][7] = 10
starttime = clock()
for i in range(loops):
Proc5()
Proc4()
IntLoc1 = 2
IntLoc2 = 3
String2Loc = "DHRYSTONE PROGRAM, 2'ND STRING"
EnumLoc = Ident2
BoolGlob = not Func2(String1Loc, String2Loc)
while IntLoc1 < IntLoc2:
IntLoc3 = 5 * IntLoc1 - IntLoc2
IntLoc3 = Proc7(IntLoc1, IntLoc2)
IntLoc1 = IntLoc1 + 1
Proc8(Array1Glob, Array2Glob, IntLoc1, IntLoc3)
PtrGlb = Proc1(PtrGlb)
CharIndex = 'A'
while CharIndex <= Char2Glob:
if EnumLoc == Func1(CharIndex, 'C'):
EnumLoc = METHOD_NAME(Ident1)
CharIndex = chr(ord(CharIndex)+1)
IntLoc3 = IntLoc2 * IntLoc1
IntLoc2 = IntLoc3 / IntLoc1
IntLoc2 = 7 * (IntLoc3 - IntLoc2) - IntLoc1
IntLoc1 = Proc2(IntLoc1)
benchtime = clock() - starttime - nulltime
if benchtime == 0.0:
loopsPerBenchtime = 0.0
else:
loopsPerBenchtime = (loops / benchtime)
return benchtime, loopsPerBenchtime
def Proc1(PtrParIn):
PtrParIn.PtrComp = NextRecord = PtrGlb.copy()
PtrParIn.IntComp = 5
NextRecord.IntComp = PtrParIn.IntComp
NextRecord.PtrComp = PtrParIn.PtrComp
NextRecord.PtrComp = Proc3(NextRecord.PtrComp)
if NextRecord.Discr == Ident1:
NextRecord.IntComp = 6
NextRecord.EnumComp = METHOD_NAME(PtrParIn.EnumComp)
NextRecord.PtrComp = PtrGlb.PtrComp
NextRecord.IntComp = Proc7(NextRecord.IntComp, 10)
else:
PtrParIn = NextRecord.copy()
NextRecord.PtrComp = None
return PtrParIn
def Proc2(IntParIO):
IntLoc = IntParIO + 10
while 1:
if Char1Glob == 'A':
IntLoc = IntLoc - 1
IntParIO = IntLoc - IntGlob
EnumLoc = Ident1
if EnumLoc == Ident1:
break
return IntParIO
def Proc3(PtrParOut):
global IntGlob
if PtrGlb is not None:
PtrParOut = PtrGlb.PtrComp
else:
IntGlob = 100
PtrGlb.IntComp = Proc7(10, IntGlob)
return PtrParOut
def Proc4():
global Char2Glob
BoolLoc = Char1Glob == 'A'
BoolLoc = BoolLoc or BoolGlob
Char2Glob = 'B'
def Proc5():
global Char1Glob
global BoolGlob
Char1Glob = 'A'
BoolGlob = FALSE
def METHOD_NAME(EnumParIn):
EnumParOut = EnumParIn
if not Func3(EnumParIn):
EnumParOut = Ident4
if EnumParIn == Ident1:
EnumParOut = Ident1
elif EnumParIn == Ident2:
if IntGlob > 100:
EnumParOut = Ident1
else:
EnumParOut = Ident4
elif EnumParIn == Ident3:
EnumParOut = Ident2
elif EnumParIn == Ident4:
pass
elif EnumParIn == Ident5:
EnumParOut = Ident3
return EnumParOut
def Proc7(IntParI1, IntParI2):
IntLoc = IntParI1 + 2
IntParOut = IntParI2 + IntLoc
return IntParOut
def Proc8(Array1Par, Array2Par, IntParI1, IntParI2):
global IntGlob
IntLoc = IntParI1 + 5
Array1Par[IntLoc] = IntParI2
Array1Par[IntLoc+1] = Array1Par[IntLoc]
Array1Par[IntLoc+30] = IntLoc
for IntIndex in range(IntLoc, IntLoc+2):
Array2Par[IntLoc][IntIndex] = IntLoc
Array2Par[IntLoc][IntLoc-1] = Array2Par[IntLoc][IntLoc-1] + 1
Array2Par[IntLoc+20][IntLoc] = Array1Par[IntLoc]
IntGlob = 5
def Func1(CharPar1, CharPar2):
CharLoc1 = CharPar1
CharLoc2 = CharLoc1
if CharLoc2 != CharPar2:
return Ident1
else:
return Ident2
def Func2(StrParI1, StrParI2):
IntLoc = 1
while IntLoc <= 1:
if Func1(StrParI1[IntLoc], StrParI2[IntLoc+1]) == Ident1:
CharLoc = 'A'
IntLoc = IntLoc + 1
if CharLoc >= 'W' and CharLoc <= 'Z':
IntLoc = 7
if CharLoc == 'X':
return TRUE
else:
if StrParI1 > StrParI2:
IntLoc = IntLoc + 7
return TRUE
else:
return FALSE
def Func3(EnumParIn):
EnumLoc = EnumParIn
if EnumLoc == Ident3: return TRUE
return FALSE
if __name__ == '__main__':
import sys
def error(msg):
print(msg, end=' ', file=sys.stderr)
print("usage: %s [number_of_loops]" % sys.argv[0], file=sys.stderr)
sys.exit(100)
nargs = len(sys.argv) - 1
if nargs > 1:
error("%d arguments are too many;" % nargs)
elif nargs == 1:
try: loops = int(sys.argv[1])
except ValueError:
error("Invalid argument %r;" % sys.argv[1])
else:
loops = LOOPS
main(loops)
|
3,244 |
test unlock migration
|
#!/usr/bin/env python3
# Copyright (c) 2014-2019 The Bitcoin Core developers
# Copyright (c) DeFi Blockchain Developers
# Distributed under the MIT software license, see the accompanying
# file LICENSE or http://www.opensource.org/licenses/mit-license.php.
"""Test token split migrate lock"""
from test_framework.test_framework import DefiTestFramework
from test_framework.util import assert_equal
from decimal import Decimal
import time
class TokenSplitMigrateLockTest(DefiTestFramework):
def set_test_params(self):
self.num_nodes = 1
self.setup_clean_chain = True
self.extra_args = [
[
"-vaultindex=1",
"-txnotokens=0",
"-amkheight=1",
"-bayfrontheight=1",
"-eunosheight=1",
"-fortcanningheight=1",
"-fortcanningmuseumheight=1",
"-fortcanninghillheight=1",
"-fortcanningroadheight=1",
"-fortcanningcrunchheight=200",
"-greatworldheight=200",
"-grandcentralheight=200",
"-subsidytest=1",
]
]
def run_test(self):
self.setup_test_tokens()
self.METHOD_NAME()
def setup_test_tokens(self):
self.nodes[0].generate(201)
# Symbols
self.symbolDFI = "DFI"
self.symbolGOOGL = "GOOGL"
# Store address
self.address = self.nodes[0].get_genesis_keys().ownerAuthAddress
# Price feeds
price_feed = [
{"currency": "USD", "token": self.symbolGOOGL},
]
# Appoint oracle
oracle_address = self.nodes[0].getnewaddress("", "legacy")
oracle = self.nodes[0].appointoracle(oracle_address, price_feed, 10)
self.nodes[0].generate(1)
# Set Oracle prices
oracle_prices = [
{"currency": "USD", "tokenAmount": f"1@{self.symbolGOOGL}"},
]
self.nodes[0].setoracledata(oracle, int(time.time()), oracle_prices)
self.nodes[0].generate(10)
# Set loan tokens
self.nodes[0].setloantoken(
{
"symbol": self.symbolGOOGL,
"name": self.symbolGOOGL,
"fixedIntervalPriceId": f"{self.symbolGOOGL}/USD",
"isDAT": True,
"interest": 0,
}
)
self.nodes[0].generate(1)
# Store token IDs
self.idGOOGL = list(self.nodes[0].gettoken(self.symbolGOOGL).keys())[0]
# Mint some loan tokens
self.nodes[0].minttokens(
[
f"1000000@{self.symbolGOOGL}",
]
)
self.nodes[0].generate(1)
def METHOD_NAME(self):
# Move to FCC / GW
self.nodes[0].generate(200 - self.nodes[0].getblockcount())
# Define split height
split_height = self.nodes[0].getblockcount() + 11
# Lock token
self.nodes[0].setgovheight(
{"ATTRIBUTES": {f"v0/locks/token/{self.idGOOGL}": "true"}}, split_height - 2
)
self.nodes[0].generate(1)
# Token split
self.nodes[0].setgovheight(
{"ATTRIBUTES": {f"v0/oracles/splits/{split_height}": f"{self.idGOOGL}/2"}},
split_height - 1,
)
self.nodes[0].generate(1)
# Token unlock
self.nodes[0].setgovheight(
{"ATTRIBUTES": {f"v0/locks/token/{self.idGOOGL}": "false"}},
split_height + 10,
)
self.nodes[0].generate(1)
# Move to split height
self.nodes[0].generate(split_height - self.nodes[0].getblockcount())
# Udpate token ID
self.idGOOGL = list(self.nodes[0].gettoken(self.symbolGOOGL).keys())[0]
# Check split successful
result = self.nodes[0].gettoken(self.symbolGOOGL)[f"{self.idGOOGL}"]
assert_equal(result["minted"], Decimal("2000000.00000000"))
# Check stored token unlock has migrated
result = self.nodes[0].listgovs()[8][1]
assert_equal(
result[f"{split_height + 10}"], {f"v0/locks/token/{self.idGOOGL}": "false"}
)
if __name__ == "__main__":
TokenSplitMigrateLockTest().main()
|
3,245 |
get user dicts
|
from collections import Counter
from typing import List, Optional
from sqlalchemy.orm.session import Session
from src.challenges.challenge import (
ChallengeManager,
ChallengeUpdater,
FullEventMetadata,
)
from src.challenges.challenge_event import ChallengeEvent
from src.models.rewards.profile_completion_challenge import ChallengeProfileCompletion
from src.models.rewards.user_challenge import UserChallenge
from src.models.social.follow import Follow
from src.models.social.repost import Repost
from src.models.social.save import Save
from src.models.users.user import User
REPOST_THRESHOLD = 1
FOLLOW_THRESHOLD = 5
FAVORITES_THRESHOLD = 1
class ProfileChallengeUpdater(ChallengeUpdater):
"""Updates a profile completion challenge. Requires 7 steps to complete:
- name (always exists)
- description
- cover photo
- profile photo
- follows > threshold
- reposts > threshold
- favorites > threshold
"""
def update_user_challenges(
self,
session: Session,
event: str,
user_challenges: List[UserChallenge],
step_count: Optional[int],
event_metadatas: List[FullEventMetadata],
starting_block: Optional[int],
):
user_ids = [user_challenge.user_id for user_challenge in user_challenges]
partial_completions = get_profile_completion_challenges(session, user_ids)
completion_map = {
completion.user_id: completion for completion in partial_completions
}
if event == ChallengeEvent.profile_update:
users = METHOD_NAME(session, user_ids)
self._handle_profile_updates(completion_map, users)
elif event == ChallengeEvent.repost:
self._handle_reposts(session, partial_completions)
elif event == ChallengeEvent.follow:
self._handle_follow(session, partial_completions)
elif event == ChallengeEvent.favorite:
self._handle_favorite(session, partial_completions)
# Update the user_challenges
for user_challenge in user_challenges:
matching_partial_challenge = completion_map[user_challenge.user_id]
# Update step count
user_challenge.current_step_count = self._get_steps_complete(
matching_partial_challenge
)
# Update completion
user_challenge.is_complete = user_challenge.current_step_count == step_count
def on_after_challenge_creation(self, session, metadatas: List[FullEventMetadata]):
profile_completion_challenges = [
ChallengeProfileCompletion(
user_id=metadata["user_id"],
profile_description=False,
profile_name=False,
profile_picture=False,
profile_cover_photo=False,
follows=False,
reposts=False,
favorites=False,
)
for metadata in metadatas
]
session.add_all(profile_completion_challenges)
# Helpers
def _handle_profile_updates(self, completion_map, user_dicts):
for user in user_dicts:
completion = completion_map[user["user_id"]]
completion.profile_description = user["bio"] is not None
completion.profile_name = user["name"] is not None
completion.profile_picture = (
user["profile_picture"] is not None
or user["profile_picture_sizes"] is not None
)
completion.profile_cover_photo = (
user["cover_photo"] is not None or user["cover_photo_sizes"] is not None
)
def _handle_reposts(self, session, partial_completions):
user_ids = list(map(lambda x: x.user_id, partial_completions))
reposts = (
session.query(Repost)
.filter(
Repost.is_current == True,
Repost.user_id.in_(user_ids),
Repost.is_delete == False,
)
.all()
)
reposts_counter = Counter(map(lambda x: x.user_id, reposts))
for completion in partial_completions:
completion.reposts = reposts_counter[completion.user_id] >= REPOST_THRESHOLD
def _handle_follow(self, session, partial_completions):
user_ids = list(map(lambda x: x.user_id, partial_completions))
follows = (
session.query(Follow)
.filter(
Follow.is_current == True,
Follow.follower_user_id.in_(user_ids),
Follow.is_delete == False,
)
.all()
)
follows_counter = Counter(map(lambda x: x.follower_user_id, follows))
for completion in partial_completions:
completion.follows = follows_counter[completion.user_id] >= FOLLOW_THRESHOLD
def _handle_favorite(self, session, partial_completions):
user_ids = list(map(lambda x: x.user_id, partial_completions))
favorites = (
session.query(Save)
.filter(
Save.is_current == True,
Save.user_id.in_(user_ids),
Save.is_delete == False,
)
.all()
)
follows_counter = Counter(map(lambda x: x.user_id, favorites))
for completion in partial_completions:
completion.favorites = (
follows_counter[completion.user_id] >= FAVORITES_THRESHOLD
)
def _get_steps_complete(self, partial_challenge):
return (
partial_challenge.profile_description
+ partial_challenge.profile_name
+ partial_challenge.profile_picture
+ partial_challenge.profile_cover_photo
+ partial_challenge.follows
+ partial_challenge.favorites
+ partial_challenge.reposts
)
profile_challenge_manager = ChallengeManager(
"profile-completion", ProfileChallengeUpdater()
)
# Accessors
def get_profile_completion_challenges(session, user_ids):
return (
session.query(ChallengeProfileCompletion)
.filter(ChallengeProfileCompletion.user_id.in_(user_ids))
.all()
)
def METHOD_NAME(session, user_ids):
res = (
session.query(
User.bio,
User.name,
User.profile_picture,
User.profile_picture_sizes,
User.cover_photo,
User.cover_photo_sizes,
User.user_id,
).filter(User.user_id.in_(user_ids), User.is_current == True)
).all()
return [
{
"bio": attr[0],
"name": attr[1],
"profile_picture": attr[2],
"profile_picture_sizes": attr[3],
"cover_photo": attr[4],
"cover_photo_sizes": attr[5],
"user_id": attr[6],
}
for attr in res
]
|
3,246 |
check game mode
|
import os
import importlib
from twisted.logger import Logger
log = Logger()
def check_scripts(script_names):
'''Validation for a list of regular extension scripts.
Check there are no duplicate scripts in the list of scripts names that is passed in.
Args:
script_names: The list of scripts names to be checked
Return:
bool: True if there are no duplicate scripts, False otherwise
'''
seen = set()
dups = []
for script in script_names:
if script in seen:
dups.append(script)
else:
seen.add(script)
if dups:
log.warn("Scripts included multiple times: {}".format(dups))
return False
return True
def METHOD_NAME(game_mode_name):
'''Validation for a game mode script.
Check if the game_mode is not a default one ('ctf' or 'tc').
Args:
game_mode_name: The game mode name to be checked
Return:
bool: True if the game mode is not a default one, False otherwise
'''
return game_mode_name not in ('ctf', 'tc')
def load_scripts(script_names, script_dir, script_type):
'''Load script as module.
Loads all scripts, matching the script_names, from the script_dir folder. The function also requires the
specification of which type of scripts are passed in (e.g. "script", "gamemode", "testscript"). This is
necessary for naming the namespace and error handling.
Args:
script_names: The list of script names to be loaded
script_dir: The path to the corresponding scripts directory
script_type: The script type ("script" for regular scripts and "gamemode" for game_mode script)
Return:
[module]: The list of module objects containing the loaded scripts
'''
script_objects = []
finder = importlib.machinery.PathFinder()
for script in script_names:
spec_scripts = finder.find_spec(script, [script_dir])
spec_global = importlib.util.find_spec(script)
spec = spec_scripts or spec_global
if not spec:
log.error(
"{} '{}' not found in either {} directory or global scope".format(
script_type, script, script_dir))
continue
# namespace module name to avoid shadowing global modules
# TODO: figure out if there are any right or better ways.
spec.name = 'piqueserver._{}_namespace.{}'.format(script_type, script)
spec.loader.name = spec.name
# load module
try:
module = importlib.util.module_from_spec(spec)
spec.loader.exec_module(module)
script_objects.append(module)
continue
except Exception as e: # needs to be broad since we exec the module
log.failure("Error while loading {} {}: {!r}".format(
script_type, script, e))
return script_objects
def load_scripts_regular_extension(script_names, script_dir):
''' Wrapper for load function
It loads scripts in the case of regular extension scripts.
Args:
script_names: The list of script names to be loaded
script_dir: The path to the corresponding scripts directory
Return:
[module]: The list of module objects containing the loaded scripts
'''
return load_scripts(script_names, script_dir, 'script')
def load_script_game_mode(script_name, script_dir):
''' Wrapper for load function
It loads scripts in the case of game mode scripts. Prior to this, it checks if the game mode is not a
default one (if it's a default no scripts should be loaded)
Args:
script_names: The list of script names to be loaded
script_dir: The path to the corresponding scripts directory
Return:
[module]: The list of module objects containing the uploaded scripts
'''
if METHOD_NAME(script_name):
return load_scripts([script_name], script_dir, 'gamemode')
return []
def apply_scripts(scripts, config, protocol_class, connection_class):
''' Application of scripts modules
It applies the script modules to the specified protocol and connection class instances, in order to build
the resulting classes with all the additional features that the scripts implement (more information
about how scripts are implemented and used can be found in the project documentation
https://piqueserver.readthedocs.io/en/latest/architecture.html#extension-scripts).
Args:
scripts: List of scripts modules to apply
config: It holds the dict of sections and options; this is required by the scripts
logic
protocol_class: The protocol class instance to update
connection_class: The connection class instance to update
Return:
(FeatureProtocol, FeatureConnection): The updated protocol and connection class instances
'''
for script in scripts:
protocol_class, connection_class = script.apply_script(
protocol_class, connection_class, config.get_dict())
return (protocol_class, connection_class)
|
3,247 |
parametrize external json codegen checks
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Utilities for testing external code generation"""
import os
import sys
import pytest
import tvm
from tvm import relay, runtime, testing
from tvm.contrib import utils
skip_windows = pytest.mark.skipif(sys.platform == "win32", reason="Skip test on Windows for now")
skip_micro = pytest.mark.skipif(
tvm.support.libinfo().get("USE_MICRO", "OFF") != "ON",
reason="MicroTVM support not enabled. Set USE_MICRO=ON in config.cmake to enable.",
)
def parametrize_external_codegen_checks(test):
"""Parametrize over the various check_result functions which are available"""
return pytest.mark.parametrize(
"check_result",
[
pytest.param(check_aot_executor_result, marks=[skip_windows, skip_micro]),
pytest.param(check_graph_executor_result, marks=[skip_windows]),
pytest.param(check_vm_result, marks=[skip_windows]),
],
)(test)
def METHOD_NAME(test):
"""Parametrize over the various check_result functions which are available for JSON"""
return pytest.mark.parametrize(
"check_result",
[
pytest.param(check_graph_executor_result, marks=[skip_windows]),
pytest.param(check_vm_result, marks=[skip_windows]),
],
)(test)
def update_lib(lib):
test_dir = os.path.dirname(os.path.realpath(os.path.expanduser(__file__)))
source_dir = os.path.join(test_dir, "..", "..", "..", "..")
contrib_path = os.path.join(source_dir, "src", "runtime", "contrib")
kwargs = {}
kwargs["options"] = ["-O2", "-std=c++17", "-I" + contrib_path]
tmp_path = utils.tempdir()
lib_name = "lib.so"
lib_path = tmp_path.relpath(lib_name)
lib.export_library(lib_path, fcompile=False, **kwargs)
lib = tvm.runtime.load_module(lib_path)
return lib
def check_vm_result(mod, map_inputs, out_shape, result, tol=1e-5, target="llvm", device=tvm.cpu()):
with tvm.transform.PassContext(opt_level=3, disabled_pass=["AlterOpLayout"]):
exe = relay.vm.compile(mod, target=target)
code, lib = exe.save()
lib = update_lib(lib)
exe = runtime.vm.Executable.load_exec(code, lib)
vm = runtime.vm.VirtualMachine(exe, device)
out = vm.run(**map_inputs)
tvm.testing.assert_allclose(out.numpy(), result, rtol=tol, atol=tol)
def check_graph_executor_result(
mod, map_inputs, out_shape, result, tol=1e-5, target="llvm", device=tvm.cpu()
):
with tvm.transform.PassContext(opt_level=3, disabled_pass=["AlterOpLayout"]):
executor_factory = relay.build(mod, target=target)
lib = update_lib(executor_factory.lib)
rt_mod = tvm.contrib.graph_executor.create(executor_factory.graph_json, lib, device)
for name, data in map_inputs.items():
rt_mod.set_input(name, data)
rt_mod.run()
out = tvm.nd.empty(out_shape, device=device)
out = rt_mod.get_output(0, out)
tvm.testing.assert_allclose(out.numpy(), result, rtol=tol, atol=tol)
def check_aot_executor_result(
mod, map_inputs, out_shape, result, tol=1e-5, target="llvm", device=tvm.cpu()
):
# Late import to avoid breaking test with USE_MICRO=OFF.
from tvm.testing.aot import AOTTestModel, compile_and_run
from tvm.micro.testing.aot_test_utils import AOT_DEFAULT_RUNNER
interface_api = "packed"
use_unpacked_api = False
test_runner = AOT_DEFAULT_RUNNER
compile_and_run(
AOTTestModel(module=mod, inputs=map_inputs, outputs={"output": result}),
test_runner,
interface_api,
use_unpacked_api,
)
def set_external_func_attr(func, compiler, ext_symbol):
func = func.with_attr("Primitive", tvm.tir.IntImm("int32", 1))
func = func.with_attr("Compiler", compiler)
func = func.with_attr("global_symbol", ext_symbol)
return func
|
3,248 |
chrome options
|
"""Pytest setup."""
import os
from typing import Generator, List, Optional
import pytest
from automation.menus.left_menu import LeftMenu
from automation.pages.app_settings import AppSettings
from automation.resources.robot_data import ROBOT_MAPPING, RobotDataType
from dotenv import find_dotenv, load_dotenv
from rich import pretty, traceback
from rich.console import Console
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
from selenium.webdriver.chrome.webdriver import WebDriver
collect_ignore_glob = ["files/**/*.py"]
_console = Console(color_system="auto")
pretty.install(console=_console)
traceback.install(console=_console)
# Check to see if we have a dotenv file and use it
if find_dotenv():
load_dotenv(find_dotenv())
def pytest_collection_modifyitems(items): # type: ignore # noqa: ANN201,ANN001
"""Order tests."""
# When running all tests calibrate the robot first.
# Most other tests require this.
MODULE_ORDER = ["tests.calibrate_test"]
module_mapping = {item: item.module.__name__ for item in items}
sorted_items = items.copy()
# Iteratively move tests of each module to the end of the test queue
for module in MODULE_ORDER:
sorted_items = [it for it in sorted_items if module_mapping[it] == module] + [
it for it in sorted_items if module_mapping[it] != module
]
items[:] = sorted_items
def METHOD_NAME() -> Options:
"""Chrome options for setup."""
options = Options()
executable_path = os.getenv("EXECUTABLE_PATH")
assert executable_path is not None, "EXECUTABLE_PATH environment variable must be set"
_console.print(f"EXECUTABLE_PATH is {executable_path}", style="white on blue")
options.binary_location = executable_path
options.add_argument("whitelisted-ips=''") # type: ignore
options.add_argument("disable-xss-auditor") # type: ignore
options.add_argument("disable-web-security") # type: ignore
options.add_argument("allow-running-insecure-content") # type: ignore
options.add_argument("no-sandbox") # type: ignore
options.add_argument("disable-setuid-sandbox") # type: ignore
options.add_argument("disable-popup-blocking") # type: ignore
options.add_argument("allow-elevated-browser") # type: ignore
return options
def add_localhost(driver: WebDriver, request: pytest.FixtureRequest) -> None:
"""Add localhost using the app UI."""
# This was necessary because
# os.environ["OT_APP_DISCOVERY__CANDIDATES"] = "localhost" was broken
# now preserving in case we want to use in the future
# how to call this method
# use .env to set LOCALHOST
# localhost: Optional[str] = os.getenv("LOCALHOST")
# if localhost:
# if localhost.lower() == "true":
# add_localhost(driver=driver, request=request)
app_settings: AppSettings = AppSettings(driver, _console, request.node.nodeid)
left_menu: LeftMenu = LeftMenu(driver, _console, request.node.nodeid)
left_menu.navigate("app-settings")
assert app_settings.get_app_settings_header().text == "App Settings"
assert app_settings.get_connect_robot_via_IP_header().is_displayed()
assert app_settings.get_connect_to_robot_via_IP_address_button().is_displayed()
app_settings.click_connect_to_robot_via_IP_address_button()
assert app_settings.get_textbox_to_enter_the_ip().is_displayed()
app_settings.click_add_ip_or_hostname()
app_settings.enter_hostname("localhost")
assert app_settings.get_add_button().is_displayed()
app_settings.click_add_button()
assert app_settings.get_done_button().is_displayed()
app_settings.click_done_button()
@pytest.fixture(scope="session")
def driver(request: pytest.FixtureRequest) -> Generator[WebDriver, None, None]:
"""Pass standard Chrome options to a test."""
update_channel = os.getenv("UPDATE_CHANNEL")
assert update_channel is not None, "UPDATE_CHANNEL environment variable must be set"
options = METHOD_NAME()
os.environ["OT_APP_ANALYTICS__SEEN_OPT_IN"] = "true"
os.environ["OT_APP_ANALYTICS__OPTED_IN"] = "true"
os.environ["OT_APP_ANALYTICS__APP_ID"] = "6dcc8733-c3e6-4ac4-b14f-638ede114ac5"
os.environ["OT_APP_ANALYTICS__USER_ID"] = "b806c211-3b21-4c5e-8b06-aedc58887cce"
os.environ["OT_APP_UPDATE__CHANNEL"] = update_channel
os.environ["OT_APP_LOG__LEVEL__CONSOLE"] = "error"
os.environ["OT_APP_DISCOVERY__CANDIDATES"] = "localhost" # fixed in 6.2
with webdriver.Chrome(options=options) as driver:
_console.print("Driver Capabilities.", style="bright_yellow on blue")
_console.print(driver.capabilities)
localhost: Optional[str] = os.getenv("LOCALHOST")
if localhost:
if localhost.lower() == "true":
add_localhost(driver=driver, request=request)
yield driver
@pytest.fixture(scope="session")
def console() -> Console:
"""Rich console for output."""
return _console
@pytest.fixture(scope="session")
def robots() -> List[RobotDataType]:
"""Robot data."""
# provide all robot data to the tests
robots = ["dev", "kansas", "emulated_alpha"]
result = []
for robot in robots:
robot_type = ROBOT_MAPPING[robot]
result.append(robot_type)
return result
|
3,249 |
custom formatting
|
from nicegui import ui
from ..documentation_tools import text_demo
def main_demo() -> None:
columns = [
{'name': 'name', 'label': 'Name', 'field': 'name', 'required': True, 'align': 'left'},
{'name': 'age', 'label': 'Age', 'field': 'age', 'sortable': True},
]
rows = [
{'name': 'Alice', 'age': 18},
{'name': 'Bob', 'age': 21},
{'name': 'Carol'},
]
ui.table(columns=columns, rows=rows, row_key='name')
def more() -> None:
@text_demo('Table with expandable rows', '''
Scoped slots can be used to insert buttons that toggle the expand state of a table row.
See the [Quasar documentation](https://quasar.dev/vue-components/table#expanding-rows) for more information.
''')
def table_with_expandable_rows():
columns = [
{'name': 'name', 'label': 'Name', 'field': 'name'},
{'name': 'age', 'label': 'Age', 'field': 'age'},
]
rows = [
{'name': 'Alice', 'age': 18},
{'name': 'Bob', 'age': 21},
{'name': 'Carol'},
]
table = ui.table(columns=columns, rows=rows, row_key='name').classes('w-72')
table.add_slot('header', r'''
<q-tr :props="props">
<q-th auto-width />
<q-th v-for="col in props.cols" :key="col.name" :props="props">
{{ col.label }}
</q-th>
</q-tr>
''')
table.add_slot('body', r'''
<q-tr :props="props">
<q-td auto-width>
<q-btn size="sm" color="accent" round dense
@click="props.expand = !props.expand"
:icon="props.expand ? 'remove' : 'add'" />
</q-td>
<q-td v-for="col in props.cols" :key="col.name" :props="props">
{{ col.value }}
</q-td>
</q-tr>
<q-tr v-show="props.expand" :props="props">
<q-td colspan="100%">
<div class="text-left">This is {{ props.row.name }}.</div>
</q-td>
</q-tr>
''')
@text_demo('Show and hide columns', '''
Here is an example of how to show and hide columns in a table.
''')
def show_and_hide_columns():
from typing import Dict
columns = [
{'name': 'name', 'label': 'Name', 'field': 'name', 'required': True, 'align': 'left'},
{'name': 'age', 'label': 'Age', 'field': 'age', 'sortable': True},
]
rows = [
{'name': 'Alice', 'age': 18},
{'name': 'Bob', 'age': 21},
{'name': 'Carol'},
]
table = ui.table(columns=columns, rows=rows, row_key='name')
def toggle(column: Dict, visible: bool) -> None:
column['classes'] = '' if visible else 'hidden'
column['headerClasses'] = '' if visible else 'hidden'
table.update()
with ui.button(icon='menu'):
with ui.menu(), ui.column().classes('gap-0 p-2'):
for column in columns:
ui.switch(column['label'], value=True, on_change=lambda e, column=column: toggle(column, e.value))
@text_demo('Table with drop down selection', '''
Here is an example of how to use a drop down selection in a table.
After emitting a `rename` event from the scoped slot, the `rename` function updates the table rows.
''')
def table_with_drop_down_selection():
from nicegui import events
columns = [
{'name': 'name', 'label': 'Name', 'field': 'name'},
{'name': 'age', 'label': 'Age', 'field': 'age'},
]
rows = [
{'id': 0, 'name': 'Alice', 'age': 18},
{'id': 1, 'name': 'Bob', 'age': 21},
{'id': 2, 'name': 'Carol'},
]
name_options = ['Alice', 'Bob', 'Carol']
def rename(e: events.GenericEventArguments) -> None:
for row in rows:
if row['id'] == e.args['id']:
row['name'] = e.args['name']
ui.notify(f'Table.rows is now: {table.rows}')
table = ui.table(columns=columns, rows=rows, row_key='name').classes('w-full')
table.add_slot('body', r'''
<q-tr :props="props">
<q-td key="name" :props="props">
<q-select
v-model="props.row.name"
:options="''' + str(name_options) + r'''"
@update:model-value="() => $parent.$emit('rename', props.row)"
/>
</q-td>
<q-td key="age" :props="props">
{{ props.row.age }}
</q-td>
</q-tr>
''')
table.on('rename', rename)
@text_demo('Table from pandas dataframe', '''
Here is a demo of how to create a table from a pandas dataframe.
''')
def table_from_pandas_demo():
import pandas as pd
df = pd.DataFrame(data={'col1': [1, 2], 'col2': [3, 4]})
ui.table(
columns=[{'name': col, 'label': col, 'field': col} for col in df.columns],
rows=df.to_dict('records'),
)
@text_demo('Adding rows', '''
It's simple to add new rows with the `add_rows(dict)` method.
''')
def adding_rows():
import os
import random
def add():
item = os.urandom(10 // 2).hex()
table.add_rows({'id': item, 'count': random.randint(0, 100)})
ui.button('add', on_click=add)
columns = [
{'name': 'id', 'label': 'ID', 'field': 'id'},
{'name': 'count', 'label': 'Count', 'field': 'count'},
]
table = ui.table(columns=columns, rows=[], row_key='id').classes('w-full')
@text_demo('Custom sorting and formatting', '''
You can define dynamic column attributes using a `:` prefix.
This way you can define custom sorting and formatting functions.
The following example allows sorting the `name` column by length.
The `age` column is formatted to show the age in years.
''')
def METHOD_NAME():
columns = [
{
'name': 'name',
'label': 'Name',
'field': 'name',
'sortable': True,
':sort': '(a, b, rowA, rowB) => b.length - a.length',
},
{
'name': 'age',
'label': 'Age',
'field': 'age',
':format': 'value => value + " years"',
},
]
rows = [
{'name': 'Alice', 'age': 18},
{'name': 'Bob', 'age': 21},
{'name': 'Carl', 'age': 42},
]
ui.table(columns=columns, rows=rows, row_key='name')
@text_demo('Toggle fullscreen', '''
You can toggle the fullscreen mode of a table using the `toggle_fullscreen()` method.
''')
def toggle_fullscreen():
table = ui.table(
columns=[{'name': 'name', 'label': 'Name', 'field': 'name'}],
rows=[{'name': 'Alice'}, {'name': 'Bob'}, {'name': 'Carol'}],
).classes('w-full')
with table.add_slot('top-left'):
def toggle() -> None:
table.toggle_fullscreen()
button.props('icon=fullscreen_exit' if table.is_fullscreen else 'icon=fullscreen')
button = ui.button('Toggle fullscreen', icon='fullscreen', on_click=toggle).props('flat')
|
3,250 |
test gotocomputer proxyjump
|
# -*- coding: utf-8 -*-
###########################################################################
# Copyright (c), The AiiDA team. All rights reserved. #
# This file is part of the AiiDA code. #
# #
# The code is hosted on GitHub at https://github.com/aiidateam/aiida-core #
# For further information on the license, see the LICENSE.txt file #
# For further information please visit http://www.aiida.net #
###########################################################################
"""Test the `SshTransport` plugin on localhost."""
import logging
import unittest
import paramiko
from aiida.transports.plugins.ssh import SshTransport
from aiida.transports.transport import TransportInternalError
# This will be used by test_all_plugins
plugin_transport = SshTransport(machine='localhost', timeout=30, load_system_host_keys=True, key_policy='AutoAddPolicy')
class TestBasicConnection(unittest.TestCase):
"""
Test basic connections.
"""
def test_closed_connection_ssh(self):
"""Test calling command on a closed connection."""
with self.assertRaises(TransportInternalError):
transport = SshTransport(machine='localhost')
transport._exec_command_internal('ls') # pylint: disable=protected-access
def test_closed_connection_sftp(self):
"""Test calling sftp command on a closed connection."""
with self.assertRaises(TransportInternalError):
transport = SshTransport(machine='localhost')
transport.listdir()
@staticmethod
def test_auto_add_policy():
"""Test the auto add policy."""
with SshTransport(machine='localhost', timeout=30, load_system_host_keys=True, key_policy='AutoAddPolicy'):
pass
@staticmethod
def test_proxy_jump():
"""Test the connection with a proxy jump or several"""
with SshTransport(
machine='localhost',
proxy_jump='localhost',
timeout=30,
load_system_host_keys=True,
key_policy='AutoAddPolicy'
):
pass
# kind of pointless, but should work and to check that proxy chaining works
with SshTransport(
machine='localhost',
proxy_jump='localhost,localhost,localhost',
timeout=30,
load_system_host_keys=True,
key_policy='AutoAddPolicy'
):
pass
def test_proxy_jump_invalid(self):
"""Test proper error reporting when invalid host as a proxy"""
# import is also that when Python is running with debug warnings `-Wd`
# no unclosed files are reported.
with self.assertRaises(paramiko.SSHException):
with SshTransport(
machine='localhost',
proxy_jump='localhost,nohost',
timeout=30,
load_system_host_keys=True,
key_policy='AutoAddPolicy'
):
pass
@staticmethod
def test_proxy_command():
"""Test the connection with a proxy command"""
with SshTransport(
machine='localhost',
proxy_command='ssh -W localhost:22 localhost',
timeout=30,
load_system_host_keys=True,
key_policy='AutoAddPolicy'
):
pass
def test_no_host_key(self):
"""Test if there is no host key."""
# Disable logging to avoid output during test
logging.disable(logging.ERROR)
with self.assertRaises(paramiko.SSHException):
with SshTransport(machine='localhost', timeout=30, load_system_host_keys=False):
pass
# Reset logging level
logging.disable(logging.NOTSET)
def test_gotocomputer():
"""Test gotocomputer"""
with SshTransport(
machine='localhost',
timeout=30,
use_login_shell=False,
key_policy='AutoAddPolicy',
proxy_command='ssh -W localhost:22 localhost',
) as transport:
cmd_str = transport.gotocomputer_command('/remote_dir/')
expected_str = (
"""ssh -t localhost -o ProxyCommand='ssh -W localhost:22 localhost' "if [ -d '/remote_dir/' ] ;"""
""" then cd '/remote_dir/' ; bash ; else echo ' ** The directory' ; """
"""echo ' ** /remote_dir/' ; echo ' ** seems to have been deleted, I logout...' ; fi" """
)
assert cmd_str == expected_str
def METHOD_NAME():
"""Test gotocomputer"""
with SshTransport(
machine='localhost',
timeout=30,
use_login_shell=False,
key_policy='AutoAddPolicy',
proxy_jump='localhost',
) as transport:
cmd_str = transport.gotocomputer_command('/remote_dir/')
expected_str = (
"""ssh -t localhost -o ProxyJump='localhost' "if [ -d '/remote_dir/' ] ;"""
""" then cd '/remote_dir/' ; bash ; else echo ' ** The directory' ; """
"""echo ' ** /remote_dir/' ; echo ' ** seems to have been deleted, I logout...' ; fi" """
)
assert cmd_str == expected_str
|
3,251 |
get observed date
|
# python-holidays
# ---------------
# A fast, efficient Python library for generating country, province and state
# specific sets of holidays on the fly. It aims to make determining whether a
# specific date is a holiday as fast and flexible as possible.
#
# Authors: dr-prodigy <[email protected]> (c) 2017-2022
# ryanss <[email protected]> (c) 2014-2017
# Website: https://github.com/dr-prodigy/python-holidays
# License: MIT (see LICENSE file)
from datetime import date
from datetime import timedelta as td
from typing import Dict, Optional, Tuple, Set
from holidays.calendars.gregorian import MON, TUE, WED, THU, FRI, SAT, SUN
from holidays.holiday_base import DateArg, HolidayBase
class ObservedRule(Dict[int, int]):
__slots__ = ()
def __add__(self, other):
return ObservedRule({**self, **other})
# Observance calculation rules: +7 - next workday, -7 - previous workday.
# Single days.
MON_TO_NEXT_TUE = ObservedRule({MON: +1})
TUE_TO_PREV_MON = ObservedRule({TUE: -1})
TUE_TO_PREV_FRI = ObservedRule({TUE: -4})
WED_TO_PREV_MON = ObservedRule({WED: -2})
WED_TO_NEXT_FRI = ObservedRule({WED: +2})
THU_TO_PREV_MON = ObservedRule({THU: -3})
THU_TO_PREV_WED = ObservedRule({THU: -1})
THU_TO_NEXT_MON = ObservedRule({THU: +4})
THU_TO_NEXT_FRI = ObservedRule({THU: +1})
FRI_TO_PREV_THU = ObservedRule({FRI: -1})
FRI_TO_NEXT_MON = ObservedRule({FRI: +3})
FRI_TO_NEXT_SAT = ObservedRule({FRI: +1})
FRI_TO_NEXT_WORKDAY = ObservedRule({FRI: +7})
SAT_TO_PREV_FRI = ObservedRule({SAT: -1})
SAT_TO_PREV_WORKDAY = ObservedRule({SAT: -7})
SAT_TO_NEXT_MON = ObservedRule({SAT: +2})
SAT_TO_NEXT_TUE = ObservedRule({SAT: +3})
SAT_TO_NEXT_SUN = ObservedRule({SAT: +1})
SAT_TO_NEXT_WORKDAY = ObservedRule({SAT: +7})
SUN_TO_NEXT_MON = ObservedRule({SUN: +1})
SUN_TO_NEXT_TUE = ObservedRule({SUN: +2})
SUN_TO_NEXT_WED = ObservedRule({SUN: +3})
SUN_TO_NEXT_WORKDAY = ObservedRule({SUN: +7})
# Multiple days.
ALL_TO_NEAREST_MON = ObservedRule({TUE: -1, WED: -2, THU: -3, FRI: +3, SAT: +2, SUN: +1})
ALL_TO_NEAREST_MON_LATAM = ObservedRule({TUE: -1, WED: -2, THU: 4, FRI: +3, SAT: +2, SUN: +1})
ALL_TO_NEXT_MON = ObservedRule({TUE: +6, WED: +5, THU: +4, FRI: +3, SAT: +2, SUN: +1})
ALL_TO_NEXT_SUN = ObservedRule({MON: +6, TUE: +5, WED: +4, THU: +3, FRI: +2, SAT: +1})
WORKDAY_TO_NEAREST_MON = ObservedRule({TUE: -1, WED: -2, THU: -3, FRI: +3})
WORKDAY_TO_NEXT_MON = ObservedRule({TUE: +6, WED: +5, THU: +4, FRI: +3})
WORKDAY_TO_NEXT_WORKDAY = ObservedRule({MON: +7, TUE: +7, WED: +7, THU: +7, FRI: +7})
TUE_WED_TO_PREV_MON = ObservedRule({TUE: -1, WED: -2})
TUE_WED_THU_TO_PREV_MON = ObservedRule({TUE: -1, WED: -2, THU: -3})
WED_THU_TO_NEXT_FRI = ObservedRule({WED: +2, THU: +1})
THU_FRI_TO_NEXT_MON = ObservedRule({THU: +4, FRI: +3})
THU_FRI_TO_NEXT_WORKDAY = ObservedRule({THU: +7, FRI: +7})
THU_FRI_SUN_TO_NEXT_MON = ObservedRule({THU: +4, FRI: +3, SUN: +1})
FRI_SAT_TO_NEXT_WORKDAY = ObservedRule({FRI: +7, SAT: +7})
FRI_SUN_TO_NEXT_MON = ObservedRule({FRI: +3, SUN: +1})
FRI_SUN_TO_NEXT_SAT_MON = ObservedRule({FRI: +1, SUN: +1})
SAT_SUN_TO_PREV_FRI = ObservedRule({SAT: -1, SUN: -2})
SAT_SUN_TO_NEXT_MON = ObservedRule({SAT: +2, SUN: +1})
SAT_SUN_TO_NEXT_TUE = ObservedRule({SAT: +3, SUN: +2})
SAT_SUN_TO_NEXT_MON_TUE = ObservedRule({SAT: +2, SUN: +2})
SAT_SUN_TO_NEXT_WORKDAY = ObservedRule({SAT: +7, SUN: +7})
class ObservedHolidayBase(HolidayBase):
"""Observed holidays implementation."""
observed_label = "%s"
def __init__(self, observed_rule: ObservedRule, observed_since: int = None, *args, **kwargs):
self._observed_rule = observed_rule
self._observed_since = observed_since
super().__init__(*args, **kwargs)
def _is_observed(self, *args, **kwargs) -> bool:
return self._observed_since is None or self._year >= self._observed_since
def METHOD_NAME(self, dt: date, rule: ObservedRule) -> date:
delta = rule.get(dt.weekday(), 0)
if delta != 0:
if abs(delta) == 7:
delta //= 7
dt += td(days=delta)
while dt.year == self._year and (
dt in self or self._is_weekend(dt) # type: ignore[operator]
):
dt += td(days=delta)
else:
dt += td(days=delta)
return dt
def _add_observed(
self, dt: DateArg, name: Optional[str] = None, rule: Optional[ObservedRule] = None
) -> Tuple[bool, date]:
dt = dt if isinstance(dt, date) else date(self._year, *dt)
if not self.observed or not self._is_observed(dt):
return False, dt
dt_observed = self.METHOD_NAME(dt, rule or self._observed_rule)
if dt_observed == dt:
return False, dt
observed_label = getattr(
self,
"observed_label_before" if dt_observed < dt else "observed_label",
self.observed_label,
)
for name in (name,) if name else self.get_list(dt):
super()._add_holiday(self.tr(observed_label) % self.tr(name), dt_observed)
return True, dt_observed
def _move_holiday(self, dt: date, rule: Optional[ObservedRule] = None) -> Tuple[bool, date]:
is_observed, dt_observed = self._add_observed(dt, rule=rule)
if is_observed:
self.pop(dt)
return is_observed, dt_observed if is_observed else dt
def _populate_observed(self, dts: Set[date], multiple: bool = False) -> None:
"""
When multiple is True, each holiday from a given date has its own observed date.
"""
for dt in sorted(dts):
if not self._is_observed(dt):
continue
if multiple:
for name in self.get_list(dt):
self._add_observed(dt, name)
else:
self._add_observed(dt)
|
3,252 |
setup
|
import re
from virttest import utils_disk
from virttest import virsh
from virttest.libvirt_xml import vm_xml
from provider.backingchain import blockcommand_base
def run(test, params, env):
"""
Test domblkthreshold for domain which has backing chain
"""
def METHOD_NAME():
"""
Prepare active domain and backingchain
"""
test.log.info("Setup env.")
test_obj.backingchain_common_setup(create_snap=True,
snap_num=4)
def test_backing_target():
"""
Do domblkthreshold for the backing file device target
"""
test.log.info("TEST_STEP1:Set domblkthreshold for backing file device")
virsh.domblkthreshold(vm_name, '%s[%s]' % (target_disk, domblk_index),
domblk_threshold, debug=True,
ignore_status=False)
check_domstats_threshold(domstats_option, domblk_threshold)
test.log.info("TEST_STEP2:Do blockcommit and check event")
event_session = virsh.EventTracker.start_get_event(vm_name)
write_file()
virsh.blockcommit(vm.name, target_disk,
commit_options % test_obj.snap_path_list[1],
ignore_status=False, debug=True)
expected_event = event % (vm_name, target_disk, domblk_index,
domblk_threshold)
check_event(event_session, expected_event)
check_domstats_threshold(domstats_option)
def test_entire_disk():
"""
Do domblkthreshold for the entire disk device target
"""
test.log.info("TEST_STEP1:Set domblkthreshold for the entire disk")
virsh.domblkthreshold(vm_name, '%s' % target_disk,
domblk_threshold, debug=True,
ignore_status=False)
check_domstats_threshold(domstats_option, domblk_threshold)
test.log.info("TEST_STEP2:Write file in guest and check event")
event_session = virsh.EventTracker.start_get_event(vm_name)
write_file()
expected_event = event % (vm_name, target_disk, domblk_threshold)
check_event(event_session, expected_event)
check_domstats_threshold(domstats_option)
def teardown():
"""
Clean env
"""
test_obj.backingchain_common_teardown()
session = vm.wait_for_login()
test_obj.clean_file(large_file, session)
session.close()
bkxml.sync()
def check_domstats_threshold(options, threshold_value=None):
"""
Check domstats threshold
:param options: extra options for virsh domstats command
:param threshold_value: domstats threshold value, if it is None,
result should be no output
"""
result = virsh.domstats(vm_name, options, debug=True,
ignore_status=False).stdout_text.strip()
if not threshold_value:
pattern = "block.*.threshold"
if re.search(pattern, result):
test.fail("Threshold: %s should not be in %s" % (pattern, result))
else:
pattern = r"block.*.threshold=%s" % threshold_value
if not re.search(pattern, result):
test.fail("Not get correct threshold: %s should be in %s" % (
pattern, result))
def write_file():
"""
Write file in guest
"""
session = vm.wait_for_login()
utils_disk.dd_data_to_vm_disk(session, large_file, bs='4M', count='200')
session.close()
def check_event(event_session, expected_event):
"""
Check event correct
:param event_session: virsh session
:param expected_event: expected event pattern
"""
test.log.debug('Checking event pattern is -> %s', expected_event)
event_output = virsh.EventTracker.finish_get_event(event_session)
if not re.search(expected_event, event_output):
test.fail('Not find: %s from event output:%s' % (
expected_event, event_output))
# Process cartesian parameters
vm_name = params.get("main_vm")
vm = env.get_vm(vm_name)
event = params.get('event')
case_name = params.get('case_name', '')
target_disk = params.get('target_disk')
domblk_threshold = params.get('domblk_threshold')
domblk_index = params.get('domblk_index')
domstats_option = params.get('domstats_option')
commit_options = params.get('commit_options')
test_obj = blockcommand_base.BlockCommand(test, vm, params)
vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
bkxml = vmxml.copy()
large_file = '/tmp/file'
run_test = eval("test_%s" % case_name)
try:
METHOD_NAME()
run_test()
finally:
teardown()
|
3,253 |
get project
|
from rest_framework import viewsets
from .models import ListModel
from . import serializers
from utils.page import MyPageNumberPagination
from rest_framework.filters import OrderingFilter
from django_filters.rest_framework import DjangoFilterBackend
from rest_framework.response import Response
from .filter import Filter
from rest_framework.exceptions import APIException
class APIViewSet(viewsets.ModelViewSet):
"""
retrieve:
Response a data list(get)
list:
Response a data list(all)
create:
Create a data line(post)
delete:
Delete a data line(delete)
partial_update:
Partial_update a data(patch:partial_update)
update:
Update a data(put:update)
"""
pagination_class = MyPageNumberPagination
filter_backends = [DjangoFilterBackend, OrderingFilter, ]
ordering_fields = ['id', "create_time", "update_time", ]
filter_class = Filter
def METHOD_NAME(self):
try:
id = self.kwargs.get('pk')
return id
except:
return None
def get_queryset(self):
id = self.METHOD_NAME()
if self.request.user:
if id is None:
return ListModel.objects.filter(openid=self.request.auth.openid, is_delete=False)
else:
return ListModel.objects.filter(openid=self.request.auth.openid, id=id, is_delete=False)
else:
return ListModel.objects.none()
def get_serializer_class(self):
if self.action in ['list', 'retrieve', 'destroy']:
return serializers.GoodsbrandGetSerializer
elif self.action in ['create']:
return serializers.GoodsbrandPostSerializer
elif self.action in ['update']:
return serializers.GoodsbrandUpdateSerializer
elif self.action in ['partial_update']:
return serializers.GoodsbrandPartialUpdateSerializer
else:
return self.http_method_not_allowed(request=self.request)
def create(self, request, *args, **kwargs):
data = self.request.data
data['openid'] = self.request.auth.openid
if ListModel.objects.filter(openid=data['openid'], goods_brand=data['goods_brand'], is_delete=False).exists():
raise APIException({"detail": "Data exists"})
else:
serializer = self.get_serializer(data=data)
serializer.is_valid(raise_exception=True)
serializer.save()
headers = self.get_success_headers(serializer.data)
return Response(serializer.data, status=200, headers=headers)
def update(self, request, pk):
qs = self.get_object()
if qs.openid != self.request.auth.openid:
raise APIException({"detail": "Cannot update data which not yours"})
else:
data = self.request.data
serializer = self.get_serializer(qs, data=data)
serializer.is_valid(raise_exception=True)
serializer.save()
headers = self.get_success_headers(serializer.data)
return Response(serializer.data, status=200, headers=headers)
def partial_update(self, request, pk):
qs = self.get_object()
if qs.openid != self.request.auth.openid:
raise APIException({"detail": "Cannot partial_update data which not yours"})
else:
data = self.request.data
serializer = self.get_serializer(qs, data=data, partial=True)
serializer.is_valid(raise_exception=True)
serializer.save()
headers = self.get_success_headers(serializer.data)
return Response(serializer.data, status=200, headers=headers)
def destroy(self, request, pk):
qs = self.get_object()
if qs.openid != self.request.auth.openid:
raise APIException({"detail": "Cannot delete data which not yours"})
else:
qs.is_delete = True
qs.save()
serializer = self.get_serializer(qs, many=False)
headers = self.get_success_headers(serializer.data)
return Response(serializer.data, status=200, headers=headers)
|
3,254 |
create network
|
# SPDX-FileCopyrightText: Red Hat, Inc.
# SPDX-License-Identifier: GPL-2.0-or-later
from __future__ import absolute_import
from __future__ import division
from collections import defaultdict
import logging
import threading
import six
import xml.etree.ElementTree as etree
from xml.sax.saxutils import escape
from libvirt import libvirtError, VIR_ERR_NO_NETWORK
from vdsm.common import libvirtconnection
from vdsm.common import xmlutils
LIBVIRT_NET_PREFIX = 'vdsm-'
_libvirt_net_lock = threading.Lock()
def createNetworkDef(network, bridged=True, iface=None):
"""
Creates Network Xml e.g.:
<network>
<name>vdsm-awesome_net</name>
<forward mode='bridge'/><bridge name='awesome_net'/> ||
<forward mode='passthrough'><interface dev='incredible'/></forward>
</network>
Forward mode can be either bridge or passthrough,
according to net if bridged or bridgeless this
determines respectively the presence of bridge element
or interface subelement.
"""
netName = LIBVIRT_NET_PREFIX + network
def EtreeElement(tagName, text=None, **attrs):
elem = etree.Element(tagName)
if text:
elem.text = escape(text)
if attrs:
for attr, value in six.iteritems(attrs):
elem.set(attr, escape(str(value)))
return elem
root = etree.Element('network')
nameElem = EtreeElement('name', netName)
forwardElem = EtreeElement(
'forward', mode='bridge' if bridged else 'passthrough'
)
root.append(nameElem)
root.append(forwardElem)
if bridged:
root.append(EtreeElement('bridge', name=network))
else:
forwardElem.append(EtreeElement('interface', dev=iface))
return xmlutils.tostring(root)
def METHOD_NAME(netname, iface, user_reference=None):
"""
Create a libvirt network if it does not yet exist.
The user_reference argument is a unique identifier of the caller,
used to track the network users.
"""
with _libvirt_net_lock:
if not is_libvirt_network(netname):
bridged = netname == iface
iface = None if bridged else iface
_createNetwork(createNetworkDef(netname, bridged, iface))
NetworksUsersCache.add(netname, user_reference)
def delete_network(netname, user_reference=None):
"""
Remove a libvirt network when all its users have asked to remove it.
"""
with _libvirt_net_lock:
NetworksUsersCache.remove(netname, user_reference)
if not NetworksUsersCache.has_users(netname):
removeNetwork(netname)
def _createNetwork(netXml):
conn = libvirtconnection.get()
net = conn.networkDefineXML(netXml)
net.create()
net.setAutostart(1)
def removeNetwork(network):
netName = LIBVIRT_NET_PREFIX + network
conn = libvirtconnection.get()
net = _netlookup_by_name(conn, netName)
if net:
if net.isActive():
net.destroy()
if net.isPersistent():
net.undefine()
def networks():
"""
Get dict of networks from libvirt
:returns: dict of networkname={properties}
:rtype: dict of dict
{ 'ovirtmgmt': { 'bridge': 'ovirtmgmt', 'bridged': True}
'red': { 'iface': 'red', 'bridged': False}}
"""
nets = {}
conn = libvirtconnection.get()
allNets = ((net, net.name()) for net in conn.listAllNetworks(0))
for net, netname in allNets:
if netname.startswith(LIBVIRT_NET_PREFIX):
netname = netname[len(LIBVIRT_NET_PREFIX):]
nets[netname] = {}
xml = etree.fromstring(net.XMLDesc())
interface = xml.find('.//interface')
if interface is not None:
nets[netname]['iface'] = interface.get('dev')
nets[netname]['bridged'] = False
else:
nets[netname]['bridge'] = xml.find('.//bridge').get('name')
nets[netname]['bridged'] = True
return nets
def is_libvirt_network(netname):
conn = libvirtconnection.get()
libvirt_nets = conn.listAllNetworks()
netname = LIBVIRT_NET_PREFIX + netname
return any(n.name() == netname for n in libvirt_nets)
def netname_o2l(ovirt_name):
"""Translate ovirt network name to the name used by libvirt database"""
return LIBVIRT_NET_PREFIX + ovirt_name
def netname_l2o(libvirt_name):
"""Translate the name used by libvirt database to the ovirt network name"""
return libvirt_name[len(LIBVIRT_NET_PREFIX):]
def _netlookup_by_name(conn, netname):
try:
return conn.networkLookupByName(netname)
except libvirtError as e:
if e.get_error_code() == VIR_ERR_NO_NETWORK:
return None
raise
class NetworksUsersCache(object):
"""
Manages networks users reference.
Note: The implementation is NOT thread safe.
"""
_nets_users = defaultdict(set)
@staticmethod
def add(net, user_ref):
if (net in NetworksUsersCache._nets_users and
user_ref in NetworksUsersCache._nets_users[net]):
logging.warning('Attempting to add an existing net user: %s/%s',
net, user_ref)
NetworksUsersCache._nets_users[net].add(user_ref)
@staticmethod
def remove(net, user_ref):
if net not in NetworksUsersCache._nets_users:
logging.warning('Attempting to remove a non existing network: '
'%s/%s', net, user_ref)
net_users = NetworksUsersCache._nets_users[net]
try:
net_users.remove(user_ref)
except KeyError:
logging.warning('Attempting to remove a non existing net user: '
'%s/%s', net, user_ref)
if len(net_users) == 0:
del NetworksUsersCache._nets_users[net]
@staticmethod
def has_users(net):
if net not in NetworksUsersCache._nets_users:
return False
return len(NetworksUsersCache._nets_users[net]) > 0
|
3,255 |
test abspath or url
|
from enum import auto
from importlib.metadata import version as package_version
from os.path import abspath, expanduser, sep
from pathlib import Path
import pytest
from packaging.version import parse as parse_version
from napari.utils.misc import (
StringEnum,
_is_array_type,
_quiet_array_equal,
abspath_or_url,
ensure_iterable,
ensure_list_of_layer_data_tuple,
ensure_sequence_of_iterables,
pick_equality_operator,
)
ITERABLE = (0, 1, 2)
NESTED_ITERABLE = [ITERABLE, ITERABLE, ITERABLE]
DICT = {'a': 1, 'b': 3, 'c': 5}
LIST_OF_DICTS = [DICT, DICT, DICT]
PARTLY_NESTED_ITERABLE = [ITERABLE, None, None]
REPEATED_PARTLY_NESTED_ITERABLE = [PARTLY_NESTED_ITERABLE] * 3
@pytest.mark.parametrize(
'input_data, expected',
[
[ITERABLE, NESTED_ITERABLE],
[NESTED_ITERABLE, NESTED_ITERABLE],
[(ITERABLE, (2,), (3, 1, 6)), (ITERABLE, (2,), (3, 1, 6))],
[DICT, LIST_OF_DICTS],
[LIST_OF_DICTS, LIST_OF_DICTS],
[(ITERABLE, (2,), (3, 1, 6)), (ITERABLE, (2,), (3, 1, 6))],
[None, (None, None, None)],
[PARTLY_NESTED_ITERABLE, REPEATED_PARTLY_NESTED_ITERABLE],
[[], ([], [], [])],
],
)
def test_sequence_of_iterables(input_data, expected):
"""Test ensure_sequence_of_iterables returns a sequence of iterables."""
zipped = zip(
range(3),
ensure_sequence_of_iterables(input_data, repeat_empty=True),
expected,
)
for _i, result, expectation in zipped:
assert result == expectation
def test_sequence_of_iterables_allow_none():
input_data = [(1, 2), None]
assert (
ensure_sequence_of_iterables(input_data, allow_none=True) == input_data
)
def test_sequence_of_iterables_no_repeat_empty():
assert ensure_sequence_of_iterables([], repeat_empty=False) == []
with pytest.raises(ValueError):
ensure_sequence_of_iterables([], repeat_empty=False, length=3)
def test_sequence_of_iterables_raises():
with pytest.raises(ValueError):
# the length argument asserts a specific length
ensure_sequence_of_iterables(((0, 1),), length=4)
# BEWARE: only the first element of a nested sequence is checked.
with pytest.raises(AssertionError):
iterable = (None, (0, 1), (0, 2))
result = iter(ensure_sequence_of_iterables(iterable))
assert next(result) is None
@pytest.mark.parametrize(
'input_data, expected',
[
[ITERABLE, ITERABLE],
[DICT, DICT],
[1, [1, 1, 1]],
['foo', ['foo', 'foo', 'foo']],
[None, [None, None, None]],
],
)
def test_ensure_iterable(input_data, expected):
"""Test test_ensure_iterable returns an iterable."""
zipped = zip(range(3), ensure_iterable(input_data), expected)
for _i, result, expectation in zipped:
assert result == expectation
def test_string_enum():
# Make a test StringEnum
class TestEnum(StringEnum):
THING = auto()
OTHERTHING = auto()
# test setting by value, correct case
assert TestEnum('thing') == TestEnum.THING
# test setting by value mixed case
assert TestEnum('thInG') == TestEnum.THING
# test setting by instance of self
assert TestEnum(TestEnum.THING) == TestEnum.THING
# test setting by name correct case
assert TestEnum['THING'] == TestEnum.THING
# test setting by name mixed case
assert TestEnum['tHiNg'] == TestEnum.THING
# test setting by value with incorrect value
with pytest.raises(ValueError):
TestEnum('NotAThing')
# test setting by name with incorrect name
with pytest.raises(KeyError):
TestEnum['NotAThing']
# test creating a StringEnum with the functional API
animals = StringEnum('Animal', 'AARDVARK BUFFALO CAT DOG')
assert str(animals.AARDVARK) == 'aardvark'
assert animals('BUffALO') == animals.BUFFALO
assert animals['BUffALO'] == animals.BUFFALO
# test setting by instance of self
class OtherEnum(StringEnum):
SOMETHING = auto()
# test setting by instance of a different StringEnum is an error
with pytest.raises(ValueError):
TestEnum(OtherEnum.SOMETHING)
# test string conversion
assert str(TestEnum.THING) == 'thing'
# test direct comparison with a string
assert TestEnum.THING == 'thing'
assert 'thing' == TestEnum.THING
assert TestEnum.THING != 'notathing'
assert 'notathing' != TestEnum.THING
# test comparison with another enum with same value names
class AnotherTestEnum(StringEnum):
THING = auto()
ANOTHERTHING = auto()
assert TestEnum.THING != AnotherTestEnum.THING
# test lookup in a set
assert TestEnum.THING in {TestEnum.THING, TestEnum.OTHERTHING}
assert TestEnum.THING not in {TestEnum.OTHERTHING}
assert TestEnum.THING in {'thing', TestEnum.OTHERTHING}
assert TestEnum.THING not in {
AnotherTestEnum.THING,
AnotherTestEnum.ANOTHERTHING,
}
def METHOD_NAME():
relpath = "~" + sep + "something"
assert abspath_or_url(relpath) == expanduser(relpath)
assert abspath_or_url('something') == abspath('something')
assert abspath_or_url(sep + 'something') == abspath(sep + 'something')
assert abspath_or_url('https://something') == 'https://something'
assert abspath_or_url('http://something') == 'http://something'
assert abspath_or_url('ftp://something') == 'ftp://something'
assert abspath_or_url('s3://something') == 's3://something'
assert abspath_or_url('file://something') == 'file://something'
with pytest.raises(TypeError):
abspath_or_url({'a', '~'})
def test_type_stable():
assert isinstance(abspath_or_url('~'), str)
assert isinstance(abspath_or_url(Path('~')), Path)
def test_equality_operator():
import operator
import dask.array as da
import numpy as np
import xarray as xr
import zarr
class MyNPArray(np.ndarray):
pass
assert pick_equality_operator(np.ones((1, 1))) == _quiet_array_equal
assert pick_equality_operator(MyNPArray([1, 1])) == _quiet_array_equal
assert pick_equality_operator(da.ones((1, 1))) == operator.is_
assert pick_equality_operator(zarr.ones((1, 1))) == operator.is_
assert (
pick_equality_operator(xr.DataArray(np.ones((1, 1))))
== _quiet_array_equal
)
@pytest.mark.skipif(
parse_version(package_version("numpy")) >= parse_version("1.25.0"),
reason="Numpy 1.25.0 return true for below comparison",
)
def test_equality_operator_silence():
import numpy as np
eq = pick_equality_operator(np.asarray([]))
# make sure this doesn't warn
assert not eq(np.asarray([]), np.asarray([], '<U32'))
def test_is_array_type_with_xarray():
import numpy as np
import xarray as xr
assert _is_array_type(xr.DataArray(), 'xarray.DataArray')
assert not _is_array_type(xr.DataArray(), 'xr.DataArray')
assert not _is_array_type(
xr.DataArray(), 'xarray.core.dataarray.DataArray'
)
assert not _is_array_type([], 'xarray.DataArray')
assert not _is_array_type(np.array([]), 'xarray.DataArray')
@pytest.mark.parametrize(
'input_data, expected',
[
([([1, 10],)], [([1, 10],)]),
([([1, 10], {'name': 'hi'})], [([1, 10], {'name': 'hi'})]),
(
[([1, 10], {'name': 'hi'}, "image")],
[([1, 10], {'name': 'hi'}, "image")],
),
([], []),
],
)
def test_ensure_list_of_layer_data_tuple(input_data, expected):
"""Ensure that when given layer data that a tuple can be generated.
When data with a name is supplied a layer should be created and named.
When an empty dataset is supplied no layer is created and no errors are produced.
"""
assert ensure_list_of_layer_data_tuple(input_data) == expected
|
3,256 |
test logout
|
from scsession import SaltcornSession
email = "[email protected]"
password="AhGGr6rhu45"
class Test:
def setup_class(self):
SaltcornSession.reset_to_fixtures()
self.sess = SaltcornSession(3001)
def teardown_class(self):
self.sess.close()
# helpers
def cannot_access_admin(self):
self.sess.get('/table')
assert self.sess.status == 302
assert "Your tables" not in self.sess.content
def is_incorrect_user_or_password(self):
assert self.sess.redirect_url.startswith('/auth/login')
self.sess.follow_redirect()
assert "Incorrect user or password" in self.sess.content
def test_public_cannot_access_admin(self):
self.sess.reset()
self.cannot_access_admin()
def test_can_login_as_admin(self):
self.sess.reset()
self.sess.get('/auth/login')
assert "Login" in self.sess.content
assert self.sess.status == 200
self.sess.postForm('/auth/login',
{'email': email,
'password': password,
'_csrf': self.sess.csrf()
})
assert self.sess.redirect_url == '/'
self.sess.get('/table')
assert self.sess.status == 200
assert "Your tables" in self.sess.content
def METHOD_NAME(self):
self.sess.reset()
self.sess.get('/auth/login')
assert "Login" in self.sess.content
assert self.sess.status == 200
self.sess.postForm('/auth/login',
{'email': email,
'password': password,
'_csrf': self.sess.csrf()
})
assert self.sess.redirect_url == '/'
self.sess.get('/table')
assert self.sess.status == 200
assert "Your tables" in self.sess.content
self.sess.get('/auth/logout')
assert self.sess.redirect_url.startswith('/auth/login')
self.cannot_access_admin()
def test_login_without_csrf(self):
self.sess.reset()
self.sess.get('/auth/login')
self.sess.postForm('/auth/login',
{'email': email,
'password': password,
})
assert self.sess.redirect_url.startswith('/auth/login')
self.cannot_access_admin()
def test_login_with_wrong_csrf(self):
self.sess.reset()
self.sess.get('/auth/login')
self.sess.postForm('/auth/login',
{'email': email,
'password': password,
'_csrf': 'ytjutydetjk'
})
assert self.sess.redirect_url.startswith('/auth/login')
self.cannot_access_admin()
def test_login_with_blank_csrf(self):
self.sess.reset()
self.sess.get('/auth/login')
self.sess.postForm('/auth/login',
{'email': email,
'password': password,
'_csrf': ''
})
assert self.sess.redirect_url.startswith('/auth/login')
self.cannot_access_admin()
def test_login_with_wrong_password(self):
self.sess.reset()
self.sess.get('/auth/login')
self.sess.postForm('/auth/login',
{'email': email,
'password': 'fidelio',
'_csrf': self.sess.csrf()
})
self.is_incorrect_user_or_password()
self.cannot_access_admin()
def test_login_with_no_password(self):
self.sess.reset()
self.sess.get('/auth/login')
self.sess.postForm('/auth/login', {'email': email , '_csrf': self.sess.csrf()})
self.is_incorrect_user_or_password()
self.cannot_access_admin()
def test_login_with_no_email(self):
self.sess.reset()
self.sess.get('/auth/login')
self.sess.postForm('/auth/login', {'password': password, '_csrf': self.sess.csrf()})
self.is_incorrect_user_or_password()
self.cannot_access_admin()
def test_login_with_blank_email(self):
self.sess.reset()
self.sess.get('/auth/login')
self.sess.postForm('/auth/login', {'email':'', 'password': password, '_csrf': self.sess.csrf()})
self.is_incorrect_user_or_password()
self.cannot_access_admin()
def test_login_with_nothing(self):
self.sess.reset()
self.sess.get('/auth/login')
self.sess.postForm('/auth/login', {'_csrf': self.sess.csrf()})
self.is_incorrect_user_or_password()
self.cannot_access_admin()
def test_login_with_blank_password(self):
self.sess.reset()
self.sess.get('/auth/login')
self.sess.postForm('/auth/login', {'email': email,'password': '', '_csrf': self.sess.csrf()})
self.is_incorrect_user_or_password()
self.cannot_access_admin(
|
3,257 |
mv dir
|
import os
from django.conf import settings
from google.cloud.exceptions import NotFound
from google.cloud.storage import Client
from project.utility import DirectoryInfo, FileInfo, readable_size
from storages.backends.gcloud import GoogleCloudStorage
def get_client():
return GoogleCloudStorage().client
class ObjectPath(object):
def __init__(self, path):
self._client = None
self._bucket = None
try:
normalized_path = path.normpath(path)
self._bucket_name, self._key = normalized_path.split('/', 1)
except ValueError:
raise ValueError('path should specify the bucket an object key/prefix')
def __repr__(self):
return f"ObjectPath('{self.bucket_name()}', '{self.key()}')"
def bucket_name(self):
return self._bucket_name
def key(self):
if self._key == '':
raise ValueError('object key cannot be empty')
return self._key
def dir_key(self):
if self._key == '':
return self._key
return self._key + '/'
def client(self):
if self._client is None:
self._client = get_client()
return self._client
def bucket(self):
if self._bucket is None:
self._bucket = self.client().get_bucket(self.bucket_name())
return self._bucket
def blob(self):
return self.bucket().blob(self.key())
def dir_blob(self):
return self.bucket().blob(self.dir_key())
def put(self, data):
bucket = self.bucket()
blob = bucket.blob(self.key())
blob.upload_from_string(data)
def put_fileobj(self, file):
bucket = self.bucket()
blob = bucket.blob(self.key())
blob.upload_from_file(file)
def mkdir(self, **kwargs):
bucket = self.bucket()
blob = bucket.blob(self.dir_key())
blob.upload_from_string('')
def exists(self):
return self.file_exists() or self.dir_exists()
def file_exists(self):
bucket = self.bucket()
blob = bucket.blob(self.key())
return blob.exists()
def dir_exists(self):
iterator = self.client().list_blobs(self.bucket_name(), prefix=self.dir_key(), max_results=1)
return len(list(iterator)) > 0
def dir_size(self):
iterator = self.client().list_blobs(self.bucket_name(), prefix=self.dir_key())
return sum([obj.size for obj in iterator])
def open(self, mode='rb'):
storage = GoogleCloudStorage(bucket_name=self.bucket_name())
return storage.open(self.key(), mode=mode)
def list_dir(self):
iterator = self.client().list_blobs(self.bucket_name(), prefix=self.dir_key(), delimiter='/')
blobs = list(iterator)
prefixes = iterator.prefixes
files = []
dirs = []
for blob in blobs:
name = blob.name.replace(self.dir_key(), '', 1)
if name != '':
size = readable_size(blob.size)
modified = blob.updated.strftime("%Y-%m-%d")
files.append(FileInfo(name, size, modified))
for prefix in prefixes:
dirs.append(DirectoryInfo(prefix.replace(self.dir_key(), '', 1)[:-1]))
files.sort()
dirs.sort()
return files, dirs
def url(self):
storage = GoogleCloudStorage(bucket_name=self.bucket_name())
return storage.url(self.key())
def rm(self):
try:
self.rm_file()
except NotFound:
pass
self.rm_dir()
def rm_file(self):
self.blob().delete()
def rm_dir(self):
blobs = list(self.client().list_blobs(self.bucket_name(), prefix=self.dir_key()))
self.bucket().delete_blobs(blobs=blobs)
def cp(self, other):
try:
self.cp_file(other)
except NotFound:
pass
self.cp_directory(other)
def cp_file(self, other):
self.bucket().copy_blob(self.blob(), other.bucket(), new_name=other.key())
def cp_directory(self, other):
iterator = self.client().list_blobs(self.bucket_name(), prefix=self.dir_key())
for blob in iterator:
new_name = blob.name.replace(self.dir_key(), other.dir_key(), 1)
self.bucket().copy_blob(blob, other.bucket(), new_name=new_name)
def mv(self, other):
self.cp(other)
self.rm()
def mv_file(self, other):
self.cp_file(other)
self.rm_file()
def METHOD_NAME(self, other):
self.cp_dir(other)
self.rm_dir()
|
3,258 |
unknown exception class
|
# Copyright (C) Dnspython Contributors, see LICENSE for text of ISC license
# Copyright (C) 2001-2017 Nominum, Inc.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose with or without fee is hereby granted,
# provided that the above copyright notice and this permission notice
# appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
"""DNS Result Codes."""
from typing import Tuple
import dns.enum
import dns.exception
class Rcode(dns.enum.IntEnum):
#: No error
NOERROR = 0
#: Format error
FORMERR = 1
#: Server failure
SERVFAIL = 2
#: Name does not exist ("Name Error" in RFC 1025 terminology).
NXDOMAIN = 3
#: Not implemented
NOTIMP = 4
#: Refused
REFUSED = 5
#: Name exists.
YXDOMAIN = 6
#: RRset exists.
YXRRSET = 7
#: RRset does not exist.
NXRRSET = 8
#: Not authoritative.
NOTAUTH = 9
#: Name not in zone.
NOTZONE = 10
#: DSO-TYPE Not Implemented
DSOTYPENI = 11
#: Bad EDNS version.
BADVERS = 16
#: TSIG Signature Failure
BADSIG = 16
#: Key not recognized.
BADKEY = 17
#: Signature out of time window.
BADTIME = 18
#: Bad TKEY Mode.
BADMODE = 19
#: Duplicate key name.
BADNAME = 20
#: Algorithm not supported.
BADALG = 21
#: Bad Truncation
BADTRUNC = 22
#: Bad/missing Server Cookie
BADCOOKIE = 23
@classmethod
def _maximum(cls):
return 4095
@classmethod
def METHOD_NAME(cls):
return UnknownRcode
class UnknownRcode(dns.exception.DNSException):
"""A DNS rcode is unknown."""
def from_text(text: str) -> Rcode:
"""Convert text into an rcode.
*text*, a ``str``, the textual rcode or an integer in textual form.
Raises ``dns.rcode.UnknownRcode`` if the rcode mnemonic is unknown.
Returns a ``dns.rcode.Rcode``.
"""
return Rcode.from_text(text)
def from_flags(flags: int, ednsflags: int) -> Rcode:
"""Return the rcode value encoded by flags and ednsflags.
*flags*, an ``int``, the DNS flags field.
*ednsflags*, an ``int``, the EDNS flags field.
Raises ``ValueError`` if rcode is < 0 or > 4095
Returns a ``dns.rcode.Rcode``.
"""
value = (flags & 0x000F) | ((ednsflags >> 20) & 0xFF0)
return Rcode.make(value)
def to_flags(value: Rcode) -> Tuple[int, int]:
"""Return a (flags, ednsflags) tuple which encodes the rcode.
*value*, a ``dns.rcode.Rcode``, the rcode.
Raises ``ValueError`` if rcode is < 0 or > 4095.
Returns an ``(int, int)`` tuple.
"""
if value < 0 or value > 4095:
raise ValueError("rcode must be >= 0 and <= 4095")
v = value & 0xF
ev = (value & 0xFF0) << 20
return (v, ev)
def to_text(value: Rcode, tsig: bool = False) -> str:
"""Convert rcode into text.
*value*, a ``dns.rcode.Rcode``, the rcode.
Raises ``ValueError`` if rcode is < 0 or > 4095.
Returns a ``str``.
"""
if tsig and value == Rcode.BADVERS:
return "BADSIG"
return Rcode.to_text(value)
### BEGIN generated Rcode constants
NOERROR = Rcode.NOERROR
FORMERR = Rcode.FORMERR
SERVFAIL = Rcode.SERVFAIL
NXDOMAIN = Rcode.NXDOMAIN
NOTIMP = Rcode.NOTIMP
REFUSED = Rcode.REFUSED
YXDOMAIN = Rcode.YXDOMAIN
YXRRSET = Rcode.YXRRSET
NXRRSET = Rcode.NXRRSET
NOTAUTH = Rcode.NOTAUTH
NOTZONE = Rcode.NOTZONE
DSOTYPENI = Rcode.DSOTYPENI
BADVERS = Rcode.BADVERS
BADSIG = Rcode.BADSIG
BADKEY = Rcode.BADKEY
BADTIME = Rcode.BADTIME
BADMODE = Rcode.BADMODE
BADNAME = Rcode.BADNAME
BADALG = Rcode.BADALG
BADTRUNC = Rcode.BADTRUNC
BADCOOKIE = Rcode.BADCOOKIE
### END generated Rcode constants
|
3,259 |
query parameters
|
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
#
# Code generated by aaz-dev-tools
# --------------------------------------------------------------------------------------------
# pylint: skip-file
# flake8: noqa
from azure.cli.core.aaz import *
@register_command(
"monitor private-link-scope scoped-resource delete",
is_preview=True,
confirmation="Are you sure you want to perform this operation?",
)
class Delete(AAZCommand):
"""Delete a scoped resource of a private link scope resource.
:example: Delete a scoped resource of a private link scope resource.
az monitor private-link-scope scoped-resource delete -g MyResourceGroup -n MyName --scope-name MyScope
"""
_aaz_info = {
"version": "2019-10-17-preview",
"resources": [
["mgmt-plane", "/subscriptions/{}/resourcegroups/{}/providers/microsoft.insights/privatelinkscopes/{}/scopedresources/{}", "2019-10-17-preview"],
]
}
AZ_SUPPORT_NO_WAIT = True
def _handler(self, command_args):
super()._handler(command_args)
return self.build_lro_poller(self._execute_operations, None)
_args_schema = None
@classmethod
def _build_arguments_schema(cls, *args, **kwargs):
if cls._args_schema is not None:
return cls._args_schema
cls._args_schema = super()._build_arguments_schema(*args, **kwargs)
# define Arg Group ""
_args_schema = cls._args_schema
_args_schema.name = AAZStrArg(
options=["-n", "--name"],
help="Name of the assigned resource.",
required=True,
id_part="child_name_1",
)
_args_schema.resource_group = AAZResourceGroupNameArg(
required=True,
)
_args_schema.scope_name = AAZStrArg(
options=["--scope-name"],
help="Name of the Azure Monitor Private Link Scope.",
required=True,
id_part="name",
)
return cls._args_schema
def _execute_operations(self):
self.pre_operations()
yield self.PrivateLinkScopedResourcesDelete(ctx=self.ctx)()
self.post_operations()
@register_callback
def pre_operations(self):
pass
@register_callback
def post_operations(self):
pass
class PrivateLinkScopedResourcesDelete(AAZHttpOperation):
CLIENT_TYPE = "MgmtClient"
def __call__(self, *args, **kwargs):
request = self.make_request()
session = self.client.send_request(request=request, stream=False, **kwargs)
if session.http_response.status_code in [202]:
return self.client.build_lro_polling(
self.ctx.args.no_wait,
session,
self.on_200,
self.on_error,
lro_options={"final-state-via": "azure-async-operation"},
path_format_arguments=self.url_parameters,
)
if session.http_response.status_code in [200]:
return self.client.build_lro_polling(
self.ctx.args.no_wait,
session,
self.on_200,
self.on_error,
lro_options={"final-state-via": "azure-async-operation"},
path_format_arguments=self.url_parameters,
)
if session.http_response.status_code in [204]:
return self.client.build_lro_polling(
self.ctx.args.no_wait,
session,
self.on_204,
self.on_error,
lro_options={"final-state-via": "azure-async-operation"},
path_format_arguments=self.url_parameters,
)
return self.on_error(session.http_response)
@property
def url(self):
return self.client.format_url(
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Insights/privateLinkScopes/{scopeName}/scopedResources/{name}",
**self.url_parameters
)
@property
def method(self):
return "DELETE"
@property
def error_format(self):
return "MgmtErrorFormat"
@property
def url_parameters(self):
parameters = {
**self.serialize_url_param(
"name", self.ctx.args.name,
required=True,
),
**self.serialize_url_param(
"resourceGroupName", self.ctx.args.resource_group,
required=True,
),
**self.serialize_url_param(
"scopeName", self.ctx.args.scope_name,
required=True,
),
**self.serialize_url_param(
"subscriptionId", self.ctx.subscription_id,
required=True,
),
}
return parameters
@property
def METHOD_NAME(self):
parameters = {
**self.serialize_query_param(
"api-version", "2019-10-17-preview",
required=True,
),
}
return parameters
def on_200(self, session):
pass
def on_204(self, session):
pass
class _DeleteHelper:
"""Helper class for Delete"""
__all__ = ["Delete"]
|
3,260 |
bigcouch quorum count
|
from time import sleep
from django.conf import settings
from couchdbkit import ResourceConflict
from couchdbkit.client import Database
from memoized import memoized
from requests.models import Response
from requests.exceptions import RequestException
from dimagi.ext.couchdbkit import Document
from dimagi.utils.chunked import chunked
from dimagi.utils.couch.bulk import BulkFetchException, get_docs
from ..retry import retry_on
class DocTypeMismatchException(Exception):
pass
class DesignDoc(object):
"""Data structure representing a design doc"""
def __init__(self, database, id):
self.id = id
self._doc = database.get(id)
self.name = id.replace("_design/", "")
@property
def views(self):
views = []
if "views" in self._doc:
for view_name, _ in self._doc["views"].items():
views.append(view_name)
return views
def get_db(postfix=None):
"""
Get the couch database.
"""
# this is a bit of a hack, since it assumes all the models talk to the same
# db. that said a lot of our code relies on that assumption.
# this import is here because of annoying dependencies
db_url = settings.COUCH_DATABASE
if postfix:
db_url = settings.EXTRA_COUCHDB_DATABASES[postfix]
return Database(db_url, create=True)
def get_design_docs(database):
design_doc_rows = database.view("_all_docs", startkey="_design/",
endkey="_design/zzzz")
ret = []
for row in design_doc_rows:
ret.append(DesignDoc(database, row["id"]))
return ret
def iter_docs(database, ids, chunksize=100, **query_params):
for doc_ids in chunked(ids, chunksize):
for doc in get_docs(database, keys=doc_ids, **query_params):
yield doc
def iter_bulk_delete(database, ids, chunksize=100, doc_callback=None, wait_time=None,
max_fetch_attempts=1):
total_count = 0
for doc_ids in chunked(ids, chunksize):
for i in range(max_fetch_attempts):
try:
doc_dicts = get_docs(database, keys=doc_ids)
break
except RequestException:
if i == (max_fetch_attempts - 1):
raise
sleep(30)
if doc_callback:
for doc in doc_dicts:
doc_callback(doc)
total_count += len(doc_dicts)
database.bulk_delete(doc_dicts)
if wait_time:
sleep(wait_time)
return total_count
def iter_bulk_delete_with_doc_type_verification(database, ids, doc_type, chunksize=100, wait_time=None,
max_fetch_attempts=1):
def verify_doc_type(doc):
actual_doc_type = doc.get('doc_type')
if actual_doc_type != doc_type:
raise DocTypeMismatchException("Expected %s, got %s" % (doc_type, actual_doc_type))
return iter_bulk_delete(database, ids, chunksize=chunksize, doc_callback=verify_doc_type, wait_time=wait_time,
max_fetch_attempts=max_fetch_attempts)
def is_bigcouch():
# this is a bit of a hack but we'll use it for now
return 'cloudant' in settings.COUCH_DATABASE or getattr(settings, 'BIGCOUCH', False)
def METHOD_NAME():
"""
The number of nodes to force an update/read in bigcouch to make sure
we have a quorum. Should typically be the number of copies of a doc
that end up in the cluster.
"""
return (3 if not hasattr(settings, 'BIGCOUCH_QUORUM_COUNT')
else settings.BIGCOUCH_QUORUM_COUNT)
def get_safe_write_kwargs():
return {'w': METHOD_NAME()} if is_bigcouch() else {}
def get_safe_read_kwargs():
return {'r': METHOD_NAME()} if is_bigcouch() else {}
class SafeSaveDocument(Document):
"""
A document class that overrides save such that any time it's called in bigcouch
mode it saves with the maximum quorum count (unless explicitly overridden).
"""
def save(self, **params):
if is_bigcouch() and 'w' not in params:
params['w'] = METHOD_NAME()
return super(SafeSaveDocument, self).save(**params)
def safe_delete(db, doc_or_id):
if not isinstance(doc_or_id, str):
doc_or_id = doc_or_id._id
db.delete_doc(doc_or_id, **get_safe_write_kwargs())
def apply_update(doc, update_fn, max_tries=5):
"""
A function for safely applying a change to a couch doc. For getting around ResourceConflict
errors that stem from the distributed cloudant nodes
"""
tries = 0
while tries < max_tries:
try:
update_fn(doc)
doc.save()
return doc
except ResourceConflict:
doc = doc.__class__.get(doc._id)
tries += 1
raise ResourceConflict("Document update conflict. -- Max Retries Reached")
def _is_couch_error(err):
if isinstance(err, BulkFetchException):
return True
request = err.request
if request is None:
request = _get_request_from_traceback(err.__traceback__)
return request and request.url.startswith(_get_couch_base_urls())
# Decorator to retry function call on Couch error
#
# Retry up to 5 times with exponential backoff. Raise the last
# received error from Couch if all calls fail.
retry_on_couch_error = retry_on(
BulkFetchException,
RequestException,
should_retry=_is_couch_error,
)
def _get_request_from_traceback(tb):
# Response.iter_content() raises errors without request context.
# Maybe https://github.com/psf/requests/pull/5323 will get merged?
while tb.tb_next is not None:
tb = tb.tb_next
if "self" in tb.tb_frame.f_locals:
obj = tb.tb_frame.f_locals["self"]
if isinstance(obj, Response) and obj.request:
return obj.request
return None
@memoized
def _get_couch_base_urls():
urls = set()
for config in settings.COUCH_DATABASES.values():
protocol = 'https' if config['COUCH_HTTPS'] else 'http'
urls.add(f"{protocol}://{config['COUCH_SERVER_ROOT']}")
return tuple(urls)
|
3,261 |
config options
|
from conan import ConanFile
from conan.errors import ConanInvalidConfiguration
from conan.tools.build import check_min_cppstd
from conan.tools.cmake import CMake, CMakeDeps, CMakeToolchain, cmake_layout
from conan.tools.files import apply_conandata_patches, copy, export_conandata_patches, get, load, rm, save
from conan.tools.microsoft import is_msvc
from conan.tools.scm import Version
import os
import re
required_conan_version = ">=1.53.0"
class RestbedConan(ConanFile):
name = "restbed"
homepage = "https://github.com/Corvusoft/restbed"
description = "Corvusoft's Restbed framework brings asynchronous RESTful functionality to C++14 applications."
topics = ("restful", "server", "client", "json", "http", "ssl", "tls")
url = "https://github.com/conan-io/conan-center-index"
license = "AGPL-3.0-or-later", "LicenseRef-CPL" # Corvusoft Permissive License (CPL)
package_type = "library"
settings = "os", "arch", "compiler", "build_type"
options = {
"shared": [True, False],
"fPIC": [True, False],
"ipc": [True, False],
"with_openssl": [True, False],
}
default_options = {
"shared": False,
"fPIC": True,
"ipc": False,
"with_openssl": True,
}
@property
def _minimum_cpp_standard(self):
return 14
@property
def _compilers_minimum_version(self):
return {
"gcc": "5",
"clang": "7",
"apple-clang": "10",
}
def export_sources(self):
export_conandata_patches(self)
def METHOD_NAME(self):
if self.settings.os == "Windows":
del self.options.fPIC
del self.options.ipc
def configure(self):
if self.options.shared:
self.options.rm_safe("fPIC")
def layout(self):
cmake_layout(self, src_folder="src")
def requirements(self):
self.requires("asio/1.27.0")
if self.options.with_openssl:
self.requires("openssl/[>=1.1 <4]")
def validate(self):
if getattr(self.info.settings.compiler, "cppstd"):
check_min_cppstd(self, self._minimum_cpp_standard)
if not is_msvc(self):
minimum_version = self._compilers_minimum_version.get(str(self.info.settings.compiler), False)
if minimum_version and Version(self.info.settings.compiler.version) < minimum_version:
raise ConanInvalidConfiguration(
f"{self.ref} requires C++{self._minimum_cpp_standard}, which your compiler does not support."
)
def source(self):
get(self, **self.conan_data["sources"][self.version], strip_root=True)
def generate(self):
tc = CMakeToolchain(self)
tc.variables["BUILD_TESTS"] = False
tc.variables["BUILD_SSL"] = self.options.with_openssl
tc.variables["BUILD_IPC"] = self.options.get_safe("ipc", False)
tc.generate()
deps = CMakeDeps(self)
deps.generate()
def _patch_sources(self):
apply_conandata_patches(self)
if not self.options.shared:
# Remove __declspec(dllexport) and __declspec(dllimport)
for root, _, files in os.walk(self.source_folder):
for file in files:
if os.path.splitext(file)[1] in (".hpp", ".h"):
full_path = os.path.join(root, file)
data = load(self, full_path)
data, _ = re.subn(r"__declspec\((dllexport|dllimport)\)", "", data)
save(self, full_path, data)
def build(self):
self._patch_sources()
cmake = CMake(self)
cmake.configure()
cmake.build()
def package(self):
copy(self, "LICENSE*", src=self.source_folder, dst=os.path.join(self.package_folder, "licenses"))
cmake = CMake(self)
cmake.install()
rm(self, "*.pdb", os.path.join(self.package_folder, "bin"))
def package_info(self):
libname = "restbed"
if self.settings.os in ("Windows", ) and self.options.shared:
libname += "-shared"
self.cpp_info.libs = [libname]
if self.settings.os in ("FreeBSD", "Linux", ):
self.cpp_info.system_libs.extend(["dl", "m"])
elif self.settings.os in ("Windows", ):
self.cpp_info.system_libs.append("mswsock")
|
3,262 |
test user object type error
|
from django.contrib.auth import get_user_model
from django.contrib.contenttypes.models import ContentType
from django.test import TestCase
from nautobot.core.celery import app
from nautobot.core.testing import TransactionTestCase
from nautobot.core.utils.lookup import get_changes_for_model
from nautobot.dcim.models import Location, LocationType
from nautobot.extras.choices import ObjectChangeActionChoices, ObjectChangeEventContextChoices
from nautobot.extras.context_managers import web_request_context
from nautobot.extras.models import Status, Webhook
# Use the proper swappable User model
User = get_user_model()
class WebRequestContextTestCase(TestCase):
def setUp(self):
self.user = User.objects.create_user(username="jacob", email="[email protected]", password="top_secret")
location_ct = ContentType.objects.get_for_model(Location)
MOCK_URL = "http://localhost/"
MOCK_SECRET = "LOOKATMEIMASECRETSTRING"
webhooks = Webhook.objects.bulk_create(
(
Webhook(
name="Location Create Webhook",
type_create=True,
payload_url=MOCK_URL,
secret=MOCK_SECRET,
),
)
)
for webhook in webhooks:
webhook.content_types.set([location_ct])
app.control.purge() # Begin each test with an empty queue
def METHOD_NAME(self):
with self.assertRaises(TypeError):
with web_request_context("a string is not a user object"):
pass
def test_change_log_created(self):
with web_request_context(self.user):
location_type = LocationType.objects.get(name="Campus")
location_status = Status.objects.get_for_model(Location).first()
location = Location(name="Test Location 1", location_type=location_type, status=location_status)
location.save()
location = Location.objects.get(name="Test Location 1")
oc_list = get_changes_for_model(location).order_by("pk")
self.assertEqual(len(oc_list), 1)
self.assertEqual(oc_list[0].changed_object, location)
self.assertEqual(oc_list[0].action, ObjectChangeActionChoices.ACTION_CREATE)
def test_change_log_context(self):
with web_request_context(self.user, context_detail="test_change_log_context"):
location_type = LocationType.objects.get(name="Campus")
location_status = Status.objects.get_for_model(Location).first()
location = Location(name="Test Location 1", location_type=location_type, status=location_status)
location.save()
location = Location.objects.get(name="Test Location 1")
oc_list = get_changes_for_model(location)
with self.subTest():
self.assertEqual(oc_list[0].change_context, ObjectChangeEventContextChoices.CONTEXT_ORM)
with self.subTest():
self.assertEqual(oc_list[0].change_context_detail, "test_change_log_context")
def test_change_webhook_enqueued(self):
"""Test that the webhook resides on the queue"""
# TODO(john): come back to this with a way to actually do it without a running worker
# The celery inspection API expects to be able to communicate with at least 1 running
# worker and there does not appear to be an easy way to look into the queues directly.
# with web_request_context(self.user):
# site = Site(name="Test Site 2")
# site.save()
# Verify that a job was queued for the object creation webhook
# site = Site.objects.get(name="Test Site 2")
# self.assertEqual(job.args[0], Webhook.objects.get(type_create=True))
# self.assertEqual(job.args[1]["id"], str(site.pk))
# self.assertEqual(job.args[2], "site")
class WebRequestContextTransactionTestCase(TransactionTestCase):
def test_change_log_thread_safe(self):
"""
Emulate a race condition where the change log signal handler
is disconnected while there is a pending object change.
"""
user = User.objects.create(username="test-user123")
with web_request_context(user, context_detail="test_change_log_context"):
with web_request_context(user, context_detail="test_change_log_context"):
Status.objects.create(name="Test Status 1")
Status.objects.create(name="Test Status 2")
self.assertEqual(get_changes_for_model(Status).count(), 2)
|
3,263 |
centers
|
#!/usr/bin/env python
from math import pi
import numpy as np
from numpy.linalg import norm
import openmc
import openmc.model
import pytest
import scipy.spatial
_RADIUS = 0.1
_PACKING_FRACTION = 0.35
_PARAMS = [
{'shape': 'rectangular_prism', 'volume': 1**3},
{'shape': 'x_cylinder', 'volume': 1*pi*1**2},
{'shape': 'y_cylinder', 'volume': 1*pi*1**2},
{'shape': 'z_cylinder', 'volume': 1*pi*1**2},
{'shape': 'sphere', 'volume': 4/3*pi*1**3},
{'shape': 'spherical_shell', 'volume': 4/3*pi*(1**3 - 0.5**3)}
]
@pytest.fixture(scope='module', params=_PARAMS)
def container(request):
return request.param
@pytest.fixture(scope='module')
def METHOD_NAME(request, container):
return request.getfixturevalue('centers_' + container['shape'])
@pytest.fixture(scope='module')
def centers_rectangular_prism():
min_x = openmc.XPlane(0)
max_x = openmc.XPlane(1)
min_y = openmc.YPlane(0)
max_y = openmc.YPlane(1)
min_z = openmc.ZPlane(0)
max_z = openmc.ZPlane(1)
region = +min_x & -max_x & +min_y & -max_y & +min_z & -max_z
return openmc.model.pack_spheres(radius=_RADIUS, region=region,
pf=_PACKING_FRACTION, initial_pf=0.2)
@pytest.fixture(scope='module')
def centers_x_cylinder():
cylinder = openmc.XCylinder(r=1, y0=1, z0=2)
min_x = openmc.XPlane(0)
max_x = openmc.XPlane(1)
region = +min_x & -max_x & -cylinder
return openmc.model.pack_spheres(radius=_RADIUS, region=region,
pf=_PACKING_FRACTION, initial_pf=0.2)
@pytest.fixture(scope='module')
def centers_y_cylinder():
cylinder = openmc.YCylinder(r=1, x0=1, z0=2)
min_y = openmc.YPlane(0)
max_y = openmc.YPlane(1)
region = +min_y & -max_y & -cylinder
return openmc.model.pack_spheres(radius=_RADIUS, region=region,
pf=_PACKING_FRACTION, initial_pf=0.2)
@pytest.fixture(scope='module')
def centers_z_cylinder():
cylinder = openmc.ZCylinder(r=1, x0=1, y0=2)
min_z = openmc.ZPlane(0)
max_z = openmc.ZPlane(1)
region = +min_z & -max_z & -cylinder
return openmc.model.pack_spheres(radius=_RADIUS, region=region,
pf=_PACKING_FRACTION, initial_pf=0.2)
@pytest.fixture(scope='module')
def centers_sphere():
sphere = openmc.Sphere(r=1, x0=1, y0=2, z0=3)
region = -sphere
return openmc.model.pack_spheres(radius=_RADIUS, region=region,
pf=_PACKING_FRACTION, initial_pf=0.2)
@pytest.fixture(scope='module')
def centers_spherical_shell():
sphere = openmc.Sphere(r=1, x0=1, y0=2, z0=3)
inner_sphere = openmc.Sphere(r=0.5, x0=1, y0=2, z0=3)
region = -sphere & +inner_sphere
return openmc.model.pack_spheres(radius=_RADIUS, region=region,
pf=_PACKING_FRACTION, initial_pf=0.2)
@pytest.fixture(scope='module')
def triso_universe():
sphere = openmc.Sphere(r=_RADIUS)
cell = openmc.Cell(region=-sphere)
univ = openmc.Universe(cells=[cell])
return univ
def test_overlap(METHOD_NAME):
"""Check that none of the spheres in the packed configuration overlap."""
# Create KD tree for quick nearest neighbor search
tree = scipy.spatial.cKDTree(METHOD_NAME)
# Find distance to nearest neighbor for all spheres
d = tree.query(METHOD_NAME, k=2)[0]
# Get the smallest distance between any two spheres
d_min = min(d[:, 1])
assert d_min > 2*_RADIUS or d_min == pytest.approx(2*_RADIUS)
def test_contained_rectangular_prism(centers_rectangular_prism):
"""Make sure all spheres are entirely contained within the domain."""
d_max = np.amax(centers_rectangular_prism) + _RADIUS
d_min = np.amin(centers_rectangular_prism) - _RADIUS
assert d_max < 1 or d_max == pytest.approx(1)
assert d_min > 0 or d_min == pytest.approx(0)
def test_contained_x_cylinder(centers_x_cylinder):
"""Make sure all spheres are entirely contained within the domain."""
d = np.linalg.norm(centers_x_cylinder[:,[1,2]] - [1, 2], axis=1)
r_max = max(d) + _RADIUS
x_max = max(centers_x_cylinder[:,0]) + _RADIUS
x_min = min(centers_x_cylinder[:,0]) - _RADIUS
assert r_max < 1 or r_max == pytest.approx(1)
assert x_max < 1 or x_max == pytest.approx(1)
assert x_min > 0 or x_min == pytest.approx(0)
def test_contained_y_cylinder(centers_y_cylinder):
"""Make sure all spheres are entirely contained within the domain."""
d = np.linalg.norm(centers_y_cylinder[:,[0,2]] - [1, 2], axis=1)
r_max = max(d) + _RADIUS
y_max = max(centers_y_cylinder[:,1]) + _RADIUS
y_min = min(centers_y_cylinder[:,1]) - _RADIUS
assert r_max < 1 or r_max == pytest.approx(1)
assert y_max < 1 or y_max == pytest.approx(1)
assert y_min > 0 or y_min == pytest.approx(0)
def test_contained_z_cylinder(centers_z_cylinder):
"""Make sure all spheres are entirely contained within the domain."""
d = np.linalg.norm(centers_z_cylinder[:,[0,1]] - [1, 2], axis=1)
r_max = max(d) + _RADIUS
z_max = max(centers_z_cylinder[:,2]) + _RADIUS
z_min = min(centers_z_cylinder[:,2]) - _RADIUS
assert r_max < 1 or r_max == pytest.approx(1)
assert z_max < 1 or z_max == pytest.approx(1)
assert z_min > 0 or z_min == pytest.approx(0)
def test_contained_sphere(centers_sphere):
"""Make sure all spheres are entirely contained within the domain."""
d = np.linalg.norm(centers_sphere - [1, 2, 3], axis=1)
r_max = max(d) + _RADIUS
assert r_max < 1 or r_max == pytest.approx(1)
def test_contained_spherical_shell(centers_spherical_shell):
"""Make sure all spheres are entirely contained within the domain."""
d = np.linalg.norm(centers_spherical_shell - [1, 2, 3], axis=1)
r_max = max(d) + _RADIUS
r_min = min(d) - _RADIUS
assert r_max < 1 or r_max == pytest.approx(1)
assert r_min > 0.5 or r_min == pytest.approx(0.5)
def test_packing_fraction(container, METHOD_NAME):
"""Check that the actual PF is close to the requested PF."""
pf = len(METHOD_NAME) * 4/3 * pi *_RADIUS**3 / container['volume']
assert pf == pytest.approx(_PACKING_FRACTION, rel=1e-2)
def test_num_spheres():
"""Check that the function returns the correct number of spheres"""
METHOD_NAME = openmc.model.pack_spheres(
radius=_RADIUS, region=-openmc.Sphere(r=1), num_spheres=50
)
assert len(METHOD_NAME) == 50
def test_triso_lattice(triso_universe, centers_rectangular_prism):
trisos = [openmc.model.TRISO(_RADIUS, triso_universe, c)
for c in centers_rectangular_prism]
lower_left = np.array((0, 0, 0))
upper_right = np.array((1, 1, 1))
shape = (3, 3, 3)
pitch = (upper_right - lower_left)/shape
background = openmc.Material()
lattice = openmc.model.create_triso_lattice(
trisos, lower_left, pitch, shape, background
)
def test_container_input(triso_universe):
# Invalid container shape
with pytest.raises(ValueError):
METHOD_NAME = openmc.model.pack_spheres(
radius=_RADIUS, region=+openmc.Sphere(r=1), num_spheres=100
)
def test_packing_fraction_input():
# Provide neither packing fraction nor number of spheres
with pytest.raises(ValueError):
METHOD_NAME = openmc.model.pack_spheres(
radius=_RADIUS, region=-openmc.Sphere(r=1)
)
# Specify a packing fraction that is too high for CRP
with pytest.raises(ValueError):
METHOD_NAME = openmc.model.pack_spheres(
radius=_RADIUS, region=-openmc.Sphere(r=1), pf=1
)
# Specify a packing fraction that is too high for RSP
with pytest.raises(ValueError):
METHOD_NAME = openmc.model.pack_spheres(
radius=_RADIUS, region=-openmc.Sphere(r=1), pf=0.5, initial_pf=0.4
)
|
3,264 |
continuous training pipeline
|
# This sample demonstrates a common training scenario.
# New models are being trained strarting from the production model (if it exists).
# This sample produces two runs:
# 1. The trainer will train the model from scratch and set as prod after testing it
# 2. Exact same configuration, but the pipeline will discover the existing prod model (published by the 1st run) and warm-start the training from it.
# GCS URI of a directory where the models and the model pointers should be be stored.
model_dir_uri='gs://<bucket>/<path>'
kfp_endpoint=None
import kfp.deprecated as kfp
from kfp.deprecated import components
chicago_taxi_dataset_op = components.load_component_from_url('https://raw.githubusercontent.com/kubeflow/pipelines/e3337b8bdcd63636934954e592d4b32c95b49129/components/datasets/Chicago%20Taxi/component.yaml')
xgboost_train_on_csv_op = components.load_component_from_url('https://raw.githubusercontent.com/kubeflow/pipelines/567c04c51ff00a1ee525b3458425b17adbe3df61/components/XGBoost/Train/component.yaml')
xgboost_predict_on_csv_op = components.load_component_from_url('https://raw.githubusercontent.com/kubeflow/pipelines/567c04c51ff00a1ee525b3458425b17adbe3df61/components/XGBoost/Predict/component.yaml')
pandas_transform_csv_op = components.load_component_from_url('https://raw.githubusercontent.com/kubeflow/pipelines/6162d55998b176b50267d351241100bb0ee715bc/components/pandas/Transform_DataFrame/in_CSV_format/component.yaml')
drop_header_op = kfp.components.load_component_from_url('https://raw.githubusercontent.com/kubeflow/pipelines/02c9638287468c849632cf9f7885b51de4c66f86/components/tables/Remove_header/component.yaml')
calculate_regression_metrics_from_csv_op = kfp.components.load_component_from_url('https://raw.githubusercontent.com/kubeflow/pipelines/616542ac0f789914f4eb53438da713dd3004fba4/components/ml_metrics/Calculate_regression_metrics/from_CSV/component.yaml')
download_from_gcs_op = components.load_component_from_url('https://raw.githubusercontent.com/kubeflow/pipelines/5c7593f18f347f1c03f5ae6778a1ff305abc315c/components/google-cloud/storage/download/component.yaml')
upload_to_gcs_op = components.load_component_from_url('https://raw.githubusercontent.com/kubeflow/pipelines/616542ac0f789914f4eb53438da713dd3004fba4/components/google-cloud/storage/upload_to_explicit_uri/component.yaml')
upload_to_gcs_unique_op = components.load_component_from_url('https://raw.githubusercontent.com/kubeflow/pipelines/616542ac0f789914f4eb53438da713dd3004fba4/components/google-cloud/storage/upload_to_unique_uri/component.yaml')
def METHOD_NAME(
model_dir_uri,
training_start_date: str = '2019-02-01',
training_end_date: str = '2019-03-01',
testing_start_date: str = '2019-01-01',
testing_end_date: str = '2019-02-01',
):
# Preparing the training and testing data
training_data = chicago_taxi_dataset_op(
where='trip_start_timestamp >= "{}" AND trip_start_timestamp < "{}"'.format(str(training_start_date), str(training_end_date)),
select='tips,trip_seconds,trip_miles,pickup_community_area,dropoff_community_area,fare,tolls,extras,trip_total',
limit=10000,
).set_display_name('Training data').output
testing_data = chicago_taxi_dataset_op(
where='trip_start_timestamp >= "{}" AND trip_start_timestamp < "{}"'.format(str(testing_start_date), str(testing_end_date)),
select='tips,trip_seconds,trip_miles,pickup_community_area,dropoff_community_area,fare,tolls,extras,trip_total',
limit=10000,
).set_display_name('Testing data').output
# Preparing the true values for the testing data
true_values_table = pandas_transform_csv_op(
table=testing_data,
transform_code='''df = df[["tips"]]''',
).set_display_name('True values').output
true_values = drop_header_op(true_values_table).output
# Getting the active prod model
prod_model_pointer_uri = str(model_dir_uri) + 'prod'
get_prod_model_uri_task = download_from_gcs_op(
gcs_path=prod_model_pointer_uri,
default_data='',
).set_display_name('Get prod model')
# Disabling cache reuse to always get new data
get_prod_model_uri_task.execution_options.caching_strategy.max_cache_staleness = 'P0D'
prod_model_uri = get_prod_model_uri_task.output
# Training new model from scratch
with kfp.dsl.Condition(prod_model_uri == ""):
# Training
model = xgboost_train_on_csv_op(
training_data=training_data,
label_column=0,
objective='reg:squarederror',
num_iterations=400,
).outputs['model']
# Predicting
predictions = xgboost_predict_on_csv_op(
data=testing_data,
model=model,
label_column=0,
).output
# Calculating the regression metrics
metrics_task = calculate_regression_metrics_from_csv_op(
true_values=true_values,
predicted_values=predictions,
)
# Checking the metrics
with kfp.dsl.Condition(metrics_task.outputs['mean_squared_error'] < 2.0):
# Uploading the model
model_uri = upload_to_gcs_unique_op(
data=model,
gcs_path_prefix=model_dir_uri,
).set_display_name('Upload model').output
# Setting the model as prod
upload_to_gcs_op(
data=model_uri,
gcs_path=prod_model_pointer_uri,
).set_display_name('Set prod model')
# Training new model starting from the prod model
with kfp.dsl.Condition(prod_model_uri != ""):
# Downloading the model
prod_model = download_from_gcs_op(prod_model_uri).output
# Training
model = xgboost_train_on_csv_op(
training_data=training_data,
starting_model=prod_model,
label_column=0,
objective='reg:squarederror',
num_iterations=100,
).outputs['model']
# Predicting
predictions = xgboost_predict_on_csv_op(
data=testing_data,
model=model,
label_column=0,
).output
# Calculating the regression metrics
metrics_task = calculate_regression_metrics_from_csv_op(
true_values=true_values,
predicted_values=predictions,
)
# Checking the metrics
with kfp.dsl.Condition(metrics_task.outputs['mean_squared_error'] < 2.0):
# Uploading the model
model_uri = upload_to_gcs_unique_op(
data=model,
gcs_path_prefix=model_dir_uri,
).set_display_name('Upload model').output
# Setting the model as prod
upload_to_gcs_op(
data=model_uri,
gcs_path=prod_model_pointer_uri,
).set_display_name('Set prod model')
if __name__ == '__main__':
# Running the first time. The trainer will train the model from scratch and set as prod after testing it
pipelin_run = kfp.Client(host=kfp_endpoint).create_run_from_pipeline_func(
METHOD_NAME,
arguments=dict(
model_dir_uri=model_dir_uri,
training_start_date='2019-02-01',
training_end_date='2019-03-01',
),
)
pipelin_run.wait_for_run_completion()
# Running the second time. The trainer should warm-start the training from the prod model and set the new model as prod after testing it
kfp.Client(host=kfp_endpoint).create_run_from_pipeline_func(
METHOD_NAME,
arguments=dict(
model_dir_uri=model_dir_uri,
training_start_date='2019-02-01',
training_end_date='2019-03-01',
),
)
|
3,265 |
landmarks names ordered
|
# Copyright (c) 2020 the original author or authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
# or implied. See the License for the specific language governing
# permissions and limitations under the License.
import logging
import math
from collections import namedtuple
from typing import List
import numpy as np
import tensorflow.compat.v1 as tf1
from tensorflow.python.platform import gfile
from cached_property import cached_property
import sys
sys.path.append('srcext')
from mtcnn import MTCNN
from src.constants import ENV
from src.services.dto.bounding_box import BoundingBoxDTO
from src.services.facescan.plugins import mixins
from src.services.facescan.imgscaler.imgscaler import ImgScaler
from src.services.imgtools.proc_img import crop_img, squish_img
from src.services.imgtools.types import Array3D
from src.services.utils.pyutils import get_current_dir
from src.services.facescan.plugins import base
from src._endpoints import FaceDetection
CURRENT_DIR = get_current_dir(__file__)
logger = logging.getLogger(__name__)
_EmbeddingCalculator = namedtuple('_EmbeddingCalculator', 'graph sess')
_FaceDetectionNets = namedtuple('_FaceDetectionNets', 'pnet rnet onet')
def prewhiten(img):
""" Normalize image."""
mean = np.mean(img)
std = np.std(img)
std_adj = np.maximum(std, 1.0 / np.sqrt(img.size))
y = np.multiply(np.subtract(img, mean), 1 / std_adj)
return y
class FaceDetector(mixins.FaceDetectorMixin, base.BasePlugin):
FACE_MIN_SIZE = 20
SCALE_FACTOR = 0.709
IMAGE_SIZE = 160
IMG_LENGTH_LIMIT = ENV.IMG_LENGTH_LIMIT
KEYPOINTS_ORDER = ['left_eye', 'right_eye', 'nose', 'mouth_left', 'mouth_right']
# detection settings
det_prob_threshold = 0.85
det_threshold_a = 0.9436513301
det_threshold_b = 0.7059968943
det_threshold_c = 0.5506904359
# face alignment settings (were calculated for current detector)
left_margin = 0.2125984251968504
right_margin = 0.2230769230769231
top_margin = 0.10526315789473684
bottom_margin = 0.09868421052631579
@cached_property
def _face_detection_net(self):
return MTCNN(
min_face_size=self.FACE_MIN_SIZE,
scale_factor=self.SCALE_FACTOR,
steps_threshold=[self.det_threshold_a, self.det_threshold_b, self.det_threshold_c]
)
def crop_face(self, img: Array3D, box: BoundingBoxDTO) -> Array3D:
return squish_img(crop_img(img, box), (self.IMAGE_SIZE, self.IMAGE_SIZE))
def find_faces(self, img: Array3D, det_prob_threshold: float = None) -> List[BoundingBoxDTO]:
if det_prob_threshold is None:
det_prob_threshold = self.det_prob_threshold
assert 0 <= det_prob_threshold <= 1
scaler = ImgScaler(self.IMG_LENGTH_LIMIT)
img = scaler.downscale_img(img)
if FaceDetection.SKIPPING_FACE_DETECTION:
bounding_boxes = []
bounding_boxes.append({
'box': [0, 0, img.shape[0], img.shape[1]],
'confidence': 1.0,
'keypoints': {
'left_eye': (),
'right_eye': (),
'nose': (),
'mouth_left': (),
'mouth_right': (),
}
})
det_prob_threshold = self.det_prob_threshold
detect_face_result = bounding_boxes
else:
fdn = self._face_detection_net
detect_face_result = fdn.detect_faces(img)
img_size = np.asarray(img.shape)[0:2]
bounding_boxes = []
for face in detect_face_result:
x, y, w, h = face['box']
box = BoundingBoxDTO(
x_min=int(np.maximum(x - (self.left_margin * w), 0)),
y_min=int(np.maximum(y - (self.top_margin * h), 0)),
x_max=int(np.minimum(x + w + (self.right_margin * w), img_size[1])),
y_max=int(np.minimum(y + h + (self.bottom_margin * h), img_size[0])),
np_landmarks=np.array([list(face['keypoints'][point_name]) for point_name in self.KEYPOINTS_ORDER]),
probability=face['confidence']
)
logger.debug(f"Found: {box}")
bounding_boxes.append(box)
filtered_bounding_boxes = []
for box in bounding_boxes:
box = box.scaled(scaler.upscale_coefficient)
if box.probability <= det_prob_threshold:
logger.debug(f'Box filtered out because below threshold ({det_prob_threshold}): {box}')
continue
filtered_bounding_boxes.append(box)
return filtered_bounding_boxes
class Calculator(mixins.CalculatorMixin, base.BasePlugin):
ml_models = (
# VGGFace2 training set, 0.9965 LFW accuracy
('20180402-114759', '1im5Qq006ZEV_tViKh3cgia_Q4jJ13bRK', (1.1817961, 5.291995557), 0.4),
# CASIA-WebFace training set, 0.9905 LFW accuracy
('20180408-102900', '100w4JIUz44Tkwte9F-wEH0DOFsY-bPaw', (1.1362496, 5.803152427), 0.4),
# CASIA-WebFace-Masked, 0.9873 LFW, 0.9667 LFW-Masked (orig model has 0.9350 on LFW-Masked)
('inception_resnetv1_casia_masked', '1FddVjS3JbtUOjgO0kWs43CAh0nJH2RrG', (1.1145709, 4.554903071), 0.6)
)
BATCH_SIZE = 25
@property
def ml_model_file(self):
return str(self.ml_model.path / f'{self.ml_model.name}.pb')
def calc_embedding(self, face_img: Array3D) -> Array3D:
return self._calculate_embeddings([face_img])[0]
@cached_property
def _embedding_calculator(self):
with tf1.Graph().as_default() as graph:
graph_def = tf1.GraphDef()
with gfile.FastGFile(self.ml_model_file, 'rb') as f:
model = f.read()
graph_def.ParseFromString(model)
tf1.import_graph_def(graph_def, name='')
return _EmbeddingCalculator(graph=graph, sess=tf1.Session(graph=graph))
def _calculate_embeddings(self, cropped_images):
"""Run forward pass to calculate embeddings"""
prewhitened_images = [prewhiten(img) for img in cropped_images]
calc_model = self._embedding_calculator
graph_images_placeholder = calc_model.graph.get_tensor_by_name("input:0")
graph_embeddings = calc_model.graph.get_tensor_by_name("embeddings:0")
graph_phase_train_placeholder = calc_model.graph.get_tensor_by_name("phase_train:0")
embedding_size = graph_embeddings.get_shape()[1]
image_count = len(prewhitened_images)
batches_per_epoch = int(math.ceil(1.0 * image_count / self.BATCH_SIZE))
embeddings = np.zeros((image_count, embedding_size))
for i in range(batches_per_epoch):
start_index = i * self.BATCH_SIZE
end_index = min((i + 1) * self.BATCH_SIZE, image_count)
feed_dict = {graph_images_placeholder: prewhitened_images, graph_phase_train_placeholder: False}
embeddings[start_index:end_index, :] = calc_model.sess.run(
graph_embeddings, feed_dict=feed_dict)
return embeddings
class LandmarksDetector(mixins.LandmarksDetectorMixin, base.BasePlugin):
""" Extract landmarks from FaceDetector results."""
class PoseEstimator(mixins.PoseEstimatorMixin, base.BasePlugin):
""" Estimate head rotation regarding the camera """
@staticmethod
def METHOD_NAME():
""" List of lanmarks names orderred as in detector """
return FaceDetector.KEYPOINTS_ORDER
|
3,266 |
verify key
|
from datetime import datetime, timedelta
from enum import Enum
from typing import Any, Dict, List, Optional, Tuple, Union
from uuid import uuid4
from alerta.app import db, key_helper
from alerta.database.base import Query
from alerta.models.enums import Scope
from alerta.utils.format import DateTime
from alerta.utils.response import absolute_url
JSON = Dict[str, Any]
class ApiKeyStatus(str, Enum):
Active = 'active'
Expired = 'expired'
class ApiKey:
def __init__(self, user: str, scopes: List[str], text: str = '', expire_time: datetime = None, customer: str = None, **kwargs) -> None:
self.id = kwargs.get('id') or str(uuid4())
self.key = kwargs.get('key', None) or key_helper.generate()
self.user = user
self.scopes = scopes or key_helper.user_default_scopes
self.text = text
self.expire_time = expire_time or datetime.utcnow() + timedelta(days=key_helper.api_key_expire_days)
self.count = kwargs.get('count', 0) # type: ignore
self.last_used_time = kwargs.get('last_used_time', None)
self.customer = customer
@property
def type(self) -> str:
return key_helper.scopes_to_type(self.scopes)
@property
def status(self) -> ApiKeyStatus:
return ApiKeyStatus.Expired if datetime.utcnow() > self.expire_time else ApiKeyStatus.Active
@classmethod
def parse(cls, json: JSON) -> 'ApiKey':
if not isinstance(json.get('scopes', []), list):
raise ValueError('scopes must be a list')
api_key = ApiKey(
id=json.get('id', None),
user=json.get('user', None),
scopes=[Scope(s) for s in json.get('scopes', [])],
text=json.get('text', None),
expire_time=DateTime.parse(json['expireTime']) if 'expireTime' in json else None,
customer=json.get('customer', None),
key=json.get('key')
)
if 'type' in json:
api_key.scopes = key_helper.type_to_scopes(api_key.user, json['type'])
return api_key
@property
def serialize(self) -> Dict[str, Any]:
return {
'id': self.id,
'key': self.key,
'status': self.status,
'href': absolute_url('/key/' + self.key),
'user': self.user,
'scopes': self.scopes,
'type': self.type,
'text': self.text,
'expireTime': self.expire_time,
'count': self.count,
'lastUsedTime': self.last_used_time,
'customer': self.customer
}
def __repr__(self) -> str:
return 'ApiKey(key={!r}, status={!r}, user={!r}, scopes={!r}, expireTime={!r}, customer={!r})'.format(
self.key, self.status, self.user, self.scopes, self.expire_time, self.customer)
@classmethod
def from_document(cls, doc: Dict[str, Any]) -> 'ApiKey':
return ApiKey(
id=doc.get('id', None) or doc.get('_id'),
key=doc.get('key', None) or doc.get('_id'),
user=doc.get('user', None),
scopes=[Scope(s) for s in doc.get('scopes', list())] or key_helper.type_to_scopes(
doc.get('user', None), doc.get('type', None)) or list(),
text=doc.get('text', None),
expire_time=doc.get('expireTime', None),
count=doc.get('count', None),
last_used_time=doc.get('lastUsedTime', None),
customer=doc.get('customer', None)
)
@classmethod
def from_record(cls, rec) -> 'ApiKey':
return ApiKey(
id=rec.id,
key=rec.key,
user=rec.user,
scopes=[Scope(s) for s in rec.scopes], # legacy type => scopes conversion only required for mongo documents
text=rec.text,
expire_time=rec.expire_time,
count=rec.count,
last_used_time=rec.last_used_time,
customer=rec.customer
)
@classmethod
def from_db(cls, r: Union[Dict, Tuple]) -> 'ApiKey':
if isinstance(r, dict):
return cls.from_document(r)
elif isinstance(r, tuple):
return cls.from_record(r)
def create(self) -> 'ApiKey':
"""
Create a new API key.
"""
return ApiKey.from_db(db.create_key(self))
@staticmethod
def find_by_id(key: str, user: str = None) -> Optional['ApiKey']:
"""
Get API key details.
"""
return ApiKey.from_db(db.get_key(key, user))
@staticmethod
def find_all(query: Query = None, page: int = 1, page_size: int = 1000) -> List['ApiKey']:
"""
List all API keys.
"""
return [ApiKey.from_db(key) for key in db.get_keys(query, page, page_size)]
@staticmethod
def count(query: Query = None) -> int:
return db.get_keys_count(query)
@staticmethod
def find_by_user(user: str) -> List['ApiKey']:
"""
List API keys for a user.
"""
return [ApiKey.from_db(key) for key in db.get_keys_by_user(user)]
def update(self, **kwargs) -> 'ApiKey':
kwargs['expireTime'] = DateTime.parse(kwargs['expireTime']) if 'expireTime' in kwargs else None
return ApiKey.from_db(db.update_key(self.key, **kwargs))
def delete(self) -> bool:
"""
Delete an API key.
"""
return db.delete_key(self.key)
@staticmethod
def METHOD_NAME(key: str) -> Optional['ApiKey']:
key_info = ApiKey.from_db(db.get_key(key))
if key_info and key_info.expire_time > datetime.utcnow():
db.update_key_last_used(key)
return key_info
return None
|
3,267 |
url parameters
|
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
#
# Code generated by aaz-dev-tools
# --------------------------------------------------------------------------------------------
# pylint: skip-file
# flake8: noqa
from azure.cli.core.aaz import *
@register_command(
"servicebus topic subscription delete",
)
class Delete(AAZCommand):
"""Delete a subscription from the specified topic.
"""
_aaz_info = {
"version": "2022-01-01-preview",
"resources": [
["mgmt-plane", "/subscriptions/{}/resourcegroups/{}/providers/microsoft.servicebus/namespaces/{}/topics/{}/subscriptions/{}", "2022-01-01-preview"],
]
}
def _handler(self, command_args):
super()._handler(command_args)
self._execute_operations()
return None
_args_schema = None
@classmethod
def _build_arguments_schema(cls, *args, **kwargs):
if cls._args_schema is not None:
return cls._args_schema
cls._args_schema = super()._build_arguments_schema(*args, **kwargs)
# define Arg Group ""
_args_schema = cls._args_schema
_args_schema.namespace_name = AAZStrArg(
options=["--namespace-name"],
help="The namespace name",
required=True,
id_part="name",
fmt=AAZStrArgFormat(
max_length=50,
min_length=6,
),
)
_args_schema.resource_group = AAZResourceGroupNameArg(
required=True,
)
_args_schema.subscription_name = AAZStrArg(
options=["-n", "--name", "--subscription-name"],
help="The subscription name.",
required=True,
id_part="child_name_2",
fmt=AAZStrArgFormat(
max_length=50,
min_length=1,
),
)
_args_schema.topic_name = AAZStrArg(
options=["--topic-name"],
help="The topic name.",
required=True,
id_part="child_name_1",
fmt=AAZStrArgFormat(
min_length=1,
),
)
return cls._args_schema
def _execute_operations(self):
self.pre_operations()
self.SubscriptionsDelete(ctx=self.ctx)()
self.post_operations()
@register_callback
def pre_operations(self):
pass
@register_callback
def post_operations(self):
pass
class SubscriptionsDelete(AAZHttpOperation):
CLIENT_TYPE = "MgmtClient"
def __call__(self, *args, **kwargs):
request = self.make_request()
session = self.client.send_request(request=request, stream=False, **kwargs)
if session.http_response.status_code in [200]:
return self.on_200(session)
if session.http_response.status_code in [204]:
return self.on_204(session)
return self.on_error(session.http_response)
@property
def url(self):
return self.client.format_url(
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ServiceBus/namespaces/{namespaceName}/topics/{topicName}/subscriptions/{subscriptionName}",
**self.METHOD_NAME
)
@property
def method(self):
return "DELETE"
@property
def error_format(self):
return "MgmtErrorFormat"
@property
def METHOD_NAME(self):
parameters = {
**self.serialize_url_param(
"namespaceName", self.ctx.args.namespace_name,
required=True,
),
**self.serialize_url_param(
"resourceGroupName", self.ctx.args.resource_group,
required=True,
),
**self.serialize_url_param(
"subscriptionId", self.ctx.subscription_id,
required=True,
),
**self.serialize_url_param(
"subscriptionName", self.ctx.args.subscription_name,
required=True,
),
**self.serialize_url_param(
"topicName", self.ctx.args.topic_name,
required=True,
),
}
return parameters
@property
def query_parameters(self):
parameters = {
**self.serialize_query_param(
"api-version", "2022-01-01-preview",
required=True,
),
}
return parameters
def on_200(self, session):
pass
def on_204(self, session):
pass
class _DeleteHelper:
"""Helper class for Delete"""
__all__ = ["Delete"]
|
3,268 |
matrix rank
|
from __future__ import annotations
import functools
import math
from typing import Sequence
import torch
from . import _dtypes_impl, _util
from ._normalizations import ArrayLike, KeepDims, normalizer
class LinAlgError(Exception):
pass
def _atleast_float_1(a):
if not (a.dtype.is_floating_point or a.dtype.is_complex):
a = a.to(_dtypes_impl.default_dtypes().float_dtype)
return a
def _atleast_float_2(a, b):
dtyp = _dtypes_impl.result_type_impl(a, b)
if not (dtyp.is_floating_point or dtyp.is_complex):
dtyp = _dtypes_impl.default_dtypes().float_dtype
a = _util.cast_if_needed(a, dtyp)
b = _util.cast_if_needed(b, dtyp)
return a, b
def linalg_errors(func):
@functools.wraps(func)
def wrapped(*args, **kwds):
try:
return func(*args, **kwds)
except torch._C._LinAlgError as e:
raise LinAlgError(*e.args)
return wrapped
# ### Matrix and vector products ###
@normalizer
@linalg_errors
def matrix_power(a: ArrayLike, n):
a = _atleast_float_1(a)
return torch.linalg.matrix_power(a, n)
@normalizer
@linalg_errors
def multi_dot(inputs: Sequence[ArrayLike], *, out=None):
return torch.linalg.multi_dot(inputs)
# ### Solving equations and inverting matrices ###
@normalizer
@linalg_errors
def solve(a: ArrayLike, b: ArrayLike):
a, b = _atleast_float_2(a, b)
return torch.linalg.solve(a, b)
@normalizer
@linalg_errors
def lstsq(a: ArrayLike, b: ArrayLike, rcond=None):
a, b = _atleast_float_2(a, b)
# NumPy is using gelsd: https://github.com/numpy/numpy/blob/v1.24.0/numpy/linalg/umath_linalg.cpp#L3991
# on CUDA, only `gels` is available though, so use it instead
driver = "gels" if a.is_cuda or b.is_cuda else "gelsd"
return torch.linalg.lstsq(a, b, rcond=rcond, driver=driver)
@normalizer
@linalg_errors
def inv(a: ArrayLike):
a = _atleast_float_1(a)
result = torch.linalg.inv(a)
return result
@normalizer
@linalg_errors
def pinv(a: ArrayLike, rcond=1e-15, hermitian=False):
a = _atleast_float_1(a)
return torch.linalg.pinv(a, rtol=rcond, hermitian=hermitian)
@normalizer
@linalg_errors
def tensorsolve(a: ArrayLike, b: ArrayLike, axes=None):
a, b = _atleast_float_2(a, b)
return torch.linalg.tensorsolve(a, b, dims=axes)
@normalizer
@linalg_errors
def tensorinv(a: ArrayLike, ind=2):
a = _atleast_float_1(a)
return torch.linalg.tensorinv(a, ind=ind)
# ### Norms and other numbers ###
@normalizer
@linalg_errors
def det(a: ArrayLike):
a = _atleast_float_1(a)
return torch.linalg.det(a)
@normalizer
@linalg_errors
def slogdet(a: ArrayLike):
a = _atleast_float_1(a)
return torch.linalg.slogdet(a)
@normalizer
@linalg_errors
def cond(x: ArrayLike, p=None):
x = _atleast_float_1(x)
# check if empty
# cf: https://github.com/numpy/numpy/blob/v1.24.0/numpy/linalg/linalg.py#L1744
if x.numel() == 0 and math.prod(x.shape[-2:]) == 0:
raise LinAlgError("cond is not defined on empty arrays")
result = torch.linalg.cond(x, p=p)
# Convert nans to infs (numpy does it in a data-dependent way, depending on
# whether the input array has nans or not)
# XXX: NumPy does this: https://github.com/numpy/numpy/blob/v1.24.0/numpy/linalg/linalg.py#L1744
return torch.where(torch.isnan(result), float("inf"), result)
@normalizer
@linalg_errors
def METHOD_NAME(a: ArrayLike, tol=None, hermitian=False):
a = _atleast_float_1(a)
if a.ndim < 2:
return int((a != 0).any())
if tol is None:
# follow https://github.com/numpy/numpy/blob/v1.24.0/numpy/linalg/linalg.py#L1885
atol = 0
rtol = max(a.shape[-2:]) * torch.finfo(a.dtype).eps
else:
atol, rtol = tol, 0
return torch.linalg.METHOD_NAME(a, atol=atol, rtol=rtol, hermitian=hermitian)
@normalizer
@linalg_errors
def norm(x: ArrayLike, ord=None, axis=None, keepdims: KeepDims = False):
x = _atleast_float_1(x)
return torch.linalg.norm(x, ord=ord, dim=axis)
# ### Decompositions ###
@normalizer
@linalg_errors
def cholesky(a: ArrayLike):
a = _atleast_float_1(a)
return torch.linalg.cholesky(a)
@normalizer
@linalg_errors
def qr(a: ArrayLike, mode="reduced"):
a = _atleast_float_1(a)
result = torch.linalg.qr(a, mode=mode)
if mode == "r":
# match NumPy
result = result.R
return result
@normalizer
@linalg_errors
def svd(a: ArrayLike, full_matrices=True, compute_uv=True, hermitian=False):
a = _atleast_float_1(a)
if not compute_uv:
return torch.linalg.svdvals(a)
# NB: ignore the hermitian= argument (no pytorch equivalent)
result = torch.linalg.svd(a, full_matrices=full_matrices)
return result
# ### Eigenvalues and eigenvectors ###
@normalizer
@linalg_errors
def eig(a: ArrayLike):
a = _atleast_float_1(a)
w, vt = torch.linalg.eig(a)
if not a.is_complex() and w.is_complex() and (w.imag == 0).all():
w = w.real
vt = vt.real
return w, vt
@normalizer
@linalg_errors
def eigh(a: ArrayLike, UPLO="L"):
a = _atleast_float_1(a)
return torch.linalg.eigh(a, UPLO=UPLO)
@normalizer
@linalg_errors
def eigvals(a: ArrayLike):
a = _atleast_float_1(a)
result = torch.linalg.eigvals(a)
if not a.is_complex() and result.is_complex() and (result.imag == 0).all():
result = result.real
return result
@normalizer
@linalg_errors
def eigvalsh(a: ArrayLike, UPLO="L"):
a = _atleast_float_1(a)
return torch.linalg.eigvalsh(a, UPLO=UPLO)
|
3,269 |
discover
|
from mage_integrations.connections.redshift import Redshift as RedshiftConnection
from mage_integrations.sources.redshift.constants import (
DATA_TYPES_BOOLEAN,
DATA_TYPES_INTEGER,
DATA_TYPES_NUMBER,
DATA_TYPES_DATE,
)
from mage_integrations.sources.base import main
from mage_integrations.sources.catalog import Catalog, CatalogEntry
from mage_integrations.sources.constants import (
COLUMN_FORMAT_DATETIME,
COLUMN_TYPE_BOOLEAN,
COLUMN_TYPE_INTEGER,
COLUMN_TYPE_NULL,
COLUMN_TYPE_NUMBER,
COLUMN_TYPE_STRING,
REPLICATION_METHOD_FULL_TABLE,
UNIQUE_CONFLICT_METHOD_UPDATE,
)
from mage_integrations.sources.sql.base import Source
from mage_integrations.sources.utils import get_standard_metadata
from mage_integrations.utils.dictionary import group_by
from singer.schema import Schema
from typing import List
class Redshift(Source):
@property
def table_prefix(self):
database_name = self.config['database']
schema_name = self.config['schema']
return f'{database_name}.{schema_name}.'
def build_connection(self) -> RedshiftConnection:
return RedshiftConnection(
access_key_id=self.config.get('access_key_id'),
cluster_identifier=self.config.get('cluster_identifier'),
database=self.config.get('database'),
db_user=self.config.get('db_user'),
host=self.config.get('host'),
password=self.config.get('password'),
port=self.config.get('port'),
region=self.config.get('region'),
secret_access_key=self.config.get('secret_access_key'),
user=self.config.get('user'),
verbose=0 if self.discover_mode or self.discover_streams_mode else 1,
)
def METHOD_NAME(self, streams: List[str] = None) -> Catalog:
schema = self.config['schema']
query = f"""
SELECT
schemaname
, tablename
, "column" AS column_name
, type
, encoding
, "distkey"
, "sortkey"
, "notnull"
FROM PG_TABLE_DEF
WHERE schemaname = '{schema}'
"""
if streams:
table_names = ', '.join([f"'{n}'" for n in streams])
query = f'{query}\nAND tablename IN ({table_names})'
rows = self.build_connection().execute([
f'SET search_path TO {schema}',
query,
])
groups = group_by(lambda t: t[1], rows[len(rows) - 1])
streams = []
for stream_id, columns_data in groups.items():
properties = dict()
unique_constraints = []
valid_replication_keys = []
# https://docs.aws.amazon.com/redshift/latest/dg/r_PG_TABLE_DEF.html
for column_data in columns_data:
column_name = column_data[2]
column_type = column_data[3]
is_nullable = column_data[7]
column_format = None
column_properties = None
column_types = []
valid_replication_keys.append(column_name)
if is_nullable:
column_types.append(COLUMN_TYPE_NULL)
column_type = column_type.split('(')[0]
if column_type in DATA_TYPES_BOOLEAN:
column_types.append(COLUMN_TYPE_BOOLEAN)
elif column_type in DATA_TYPES_INTEGER:
column_types.append(COLUMN_TYPE_INTEGER)
elif column_type in DATA_TYPES_NUMBER:
column_types.append(COLUMN_TYPE_NUMBER)
elif column_type in DATA_TYPES_DATE:
column_format = COLUMN_FORMAT_DATETIME
column_types.append(COLUMN_TYPE_STRING)
else:
column_types.append(COLUMN_TYPE_STRING)
properties[column_name] = dict(
properties=column_properties,
format=column_format,
type=column_types,
)
schema = Schema.from_dict(dict(
properties=properties,
type='object',
))
metadata = get_standard_metadata(
key_properties=unique_constraints,
replication_method=REPLICATION_METHOD_FULL_TABLE,
schema=schema.to_dict(),
stream_id=stream_id,
valid_replication_keys=unique_constraints + valid_replication_keys,
)
catalog_entry = CatalogEntry(
key_properties=unique_constraints,
metadata=metadata,
replication_method=REPLICATION_METHOD_FULL_TABLE,
schema=schema,
stream=stream_id,
tap_stream_id=stream_id,
unique_conflict_method=UNIQUE_CONFLICT_METHOD_UPDATE,
unique_constraints=unique_constraints,
)
streams.append(catalog_entry)
return Catalog(streams)
if __name__ == '__main__':
main(Redshift)
|
3,270 |
update homophones
|
import os
from talon import Context, Module, actions, app, clip, fs, imgui, ui
########################################################################
# global settings
########################################################################
# a list of homophones where each line is a comma separated list
# e.g. where,wear,ware
# a suitable one can be found here:
# https://github.com/pimentel/homophones
cwd = os.path.dirname(os.path.realpath(__file__))
homophones_file = os.path.join(cwd, "homophones.csv")
# if quick_replace, then when a word is selected and only one homophone exists,
# replace it without bringing up the options
quick_replace = True
show_help = False
########################################################################
ctx = Context()
mod = Module()
mod.list("homophones_canonicals", desc="list of words ")
mod.tag(
"homophones_open",
desc="Tag for enabling homophones commands when the associated gui is open",
)
main_screen = ui.main_screen()
def METHOD_NAME(name, flags):
if name != homophones_file:
return
phones = {}
canonical_list = []
with open(homophones_file) as f:
for line in f:
words = line.rstrip().split(",")
canonical_list.append(words[0])
merged_words = set(words)
for word in words:
old_words = phones.get(word.lower(), [])
merged_words.update(old_words)
merged_words = sorted(merged_words)
for word in merged_words:
phones[word.lower()] = merged_words
global all_homophones
all_homophones = phones
ctx.lists["self.homophones_canonicals"] = canonical_list
METHOD_NAME(homophones_file, None)
fs.watch(cwd, METHOD_NAME)
active_word_list = None
is_selection = False
def close_homophones():
gui.hide()
ctx.tags = []
PHONES_FORMATTERS = [
lambda word: word.capitalize(),
lambda word: word.upper(),
]
def find_matching_format_function(word_with_formatting, format_functions):
"""Finds the formatter function from a list of formatter functions which transforms a word into itself.
Returns an identity function if none exists"""
for formatter in format_functions:
formatted_word = formatter(word_with_formatting)
if word_with_formatting == formatted_word:
return formatter
return lambda word: word
def raise_homophones(word_to_find_homophones_for, forced=False, selection=False):
global quick_replace
global active_word_list
global show_help
global force_raise
global is_selection
force_raise = forced
is_selection = selection
if is_selection:
word_to_find_homophones_for = word_to_find_homophones_for.strip()
formatter = find_matching_format_function(
word_to_find_homophones_for, PHONES_FORMATTERS
)
word_to_find_homophones_for = word_to_find_homophones_for.lower()
# We support plurals, but very naively. If we can't find your word but your word ends in an s, presume its plural
# and attempt to find the singular, then present the presumed plurals back. This could be improved!
if word_to_find_homophones_for in all_homophones:
valid_homophones = all_homophones[word_to_find_homophones_for]
elif (
word_to_find_homophones_for.endswith("s")
and word_to_find_homophones_for[:-1] in all_homophones
):
valid_homophones = map(
lambda w: w + "s", all_homophones[word_to_find_homophones_for[:-1]]
)
else:
app.notify(
"homophones.py", f'"{word_to_find_homophones_for}" not in homophones list'
)
return
# Move current word to end of list to reduce searcher's cognitive load
valid_homophones_reordered = list(
filter(
lambda word_from_list: word_from_list.lower()
!= word_to_find_homophones_for,
valid_homophones,
)
) + [word_to_find_homophones_for]
active_word_list = list(map(formatter, valid_homophones_reordered))
if (
is_selection
and len(active_word_list) == 2
and quick_replace
and not force_raise
):
if word_to_find_homophones_for == active_word_list[0].lower():
new = active_word_list[1]
else:
new = active_word_list[0]
clip.set(new)
actions.edit.paste()
return
ctx.tags = ["user.homophones_open"]
show_help = False
gui.show()
@imgui.open(x=main_screen.x + main_screen.width / 2.6, y=main_screen.y)
def gui(gui: imgui.GUI):
global active_word_list
if show_help:
gui.text("Homophone help - todo")
else:
gui.text("Select a homophone")
gui.line()
index = 1
for word in active_word_list:
if gui.button(f"Choose {index}: {word}"):
actions.insert(actions.user.homophones_select(index))
actions.user.homophones_hide()
index = index + 1
if gui.button("Phones hide"):
actions.user.homophones_hide()
def show_help_gui():
global show_help
show_help = True
gui.show()
@mod.capture(rule="{self.homophones_canonicals}")
def homophones_canonical(m) -> str:
"Returns a single string"
return m.homophones_canonicals
@mod.action_class
class Actions:
def homophones_hide():
"""Hides the homophones display"""
close_homophones()
def homophones_show(m: str):
"""Show the homophones display"""
raise_homophones(m, False, False)
def homophones_show_auto():
"""Show homophones for selection, or current word if selection is empty."""
text = actions.edit.selected_text()
if text:
raise_homophones(text, False, True)
else:
actions.edit.select_word()
actions.user.homophones_show_selection()
def homophones_show_selection():
"""Show the homophones display for the selected text"""
raise_homophones(actions.edit.selected_text(), False, True)
def homophones_force_show(m: str):
"""Show the homophones display forcibly"""
raise_homophones(m, True, False)
def homophones_force_show_selection():
"""Show the homophones display for the selected text forcibly"""
raise_homophones(actions.edit.selected_text(), True, True)
def homophones_select(number: int) -> str:
"""selects the homophone by number"""
if number <= len(active_word_list) and number > 0:
return active_word_list[number - 1]
error = "homophones.py index {} is out of range (1-{})".format(
number, len(active_word_list)
)
app.notify(error)
raise error
def homophones_get(word: str) -> [str] or None:
"""Get homophones for the given word"""
word = word.lower()
if word in all_homophones:
return all_homophones[word]
return None
|
3,271 |
test domain changes with identical domain
|
from __future__ import annotations
import unittest
from unittest.mock import Mock, patch
from boto.exception import SWFResponseError
from boto.swf.exceptions import SWFDomainAlreadyExistsError
from boto.swf.layer1 import Layer1
import simpleflow.swf.mapper.settings
from simpleflow.swf.mapper.constants import DEPRECATED
from simpleflow.swf.mapper.exceptions import AlreadyExistsError, ResponseError
from simpleflow.swf.mapper.models.domain import Domain, DomainDoesNotExist
from simpleflow.swf.mapper.querysets.domain import DomainQuerySet
from simpleflow.swf.mapper.querysets.workflow import WorkflowTypeQuerySet
from tests.test_simpleflow.swf.mapper.mocks.base import MiniMock
from tests.test_simpleflow.swf.mapper.mocks.domain import mock_describe_domain
simpleflow.swf.mapper.settings.set(aws_access_key_id="fakeaccesskey", aws_secret_access_key="fakesecret")
class TestDomain(unittest.TestCase):
def setUp(self):
self.domain = Domain("testdomain")
self.qs = DomainQuerySet(self)
self.mocked_workflow_type_qs = Mock(spec=WorkflowTypeQuerySet)
self.mocked_workflow_type_qs.all.return_value = []
def tearDown(self):
pass
@patch.object(
Layer1,
"__init__",
MiniMock(aws_access_key_id="test", aws_secret_access_key="test"),
)
def test_domain_inits_connection(self):
self.assertTrue(hasattr(self.domain, "connection"))
self.assertTrue(hasattr(self.domain.connection, "aws_access_key_id"))
self.assertTrue(hasattr(self.domain.connection, "aws_secret_access_key"))
def test_domain__diff_with_different_domain(self):
with patch.object(
Layer1,
"describe_domain",
mock_describe_domain,
):
domain = Domain("different-domain", status=DEPRECATED, description="blabla")
diffs = domain._diff()
self.assertIsNotNone(diffs)
self.assertEqual(len(diffs), 4)
self.assertTrue(hasattr(diffs[0], "attr"))
self.assertTrue(hasattr(diffs[0], "local"))
self.assertTrue(hasattr(diffs[0], "upstream"))
def test_domain__diff_with_identical_domain(self):
with patch.object(
Layer1,
"describe_domain",
mock_describe_domain,
):
mocked = mock_describe_domain()
domain = Domain(
mocked["domainInfo"]["name"],
status=mocked["domainInfo"]["status"],
description=mocked["domainInfo"]["description"],
retention_period=mocked["configuration"]["workflowExecutionRetentionPeriodInDays"],
)
diffs = domain._diff()
self.assertEqual(len(diffs), 0)
def test_domain_exists_with_existing_domain(self):
with patch.object(self.domain.connection, "describe_domain"):
self.assertTrue(self.domain.exists)
def test_domain_exists_with_non_existent_domain(self):
with patch.object(self.domain.connection, "describe_domain") as mock:
mock.side_effect = SWFResponseError(
400,
"Bad Request",
{
"message": "Unknown domain: does not exist",
"__type": "com.amazonaws.swf.base.model#UnknownResourceFault",
},
"UnknownResourceFault",
)
self.assertFalse(self.domain.exists)
def test_domain_exists_with_whatever_error(self):
with patch.object(self.domain.connection, "describe_domain") as mock:
with self.assertRaises(ResponseError):
mock.side_effect = SWFResponseError(
400,
"mocking exception",
{"__type": "WhateverError", "message": "Whatever"},
)
_ = self.domain.exists
def test_domain_is_synced_with_unsynced_domain(self):
pass
def test_domain_is_synced_with_synced_domain(self):
pass
def test_domain_is_synced_over_non_existent_domain(self):
with patch.object(Layer1, "describe_domain", mock_describe_domain):
domain = Domain("non-existent-domain")
self.assertFalse(domain.is_synced)
def test_domain_changes_with_different_domain(self):
with patch.object(
Layer1,
"describe_domain",
mock_describe_domain,
):
domain = Domain("different-domain", status=DEPRECATED, description="blabla")
diffs = domain.changes
self.assertIsNotNone(diffs)
self.assertEqual(len(diffs), 4)
self.assertTrue(hasattr(diffs[0], "attr"))
self.assertTrue(hasattr(diffs[0], "local"))
self.assertTrue(hasattr(diffs[0], "upstream"))
def METHOD_NAME(self):
with patch.object(
Layer1,
"describe_domain",
mock_describe_domain,
):
mocked = mock_describe_domain()
domain = Domain(
mocked["domainInfo"]["name"],
status=mocked["domainInfo"]["status"],
description=mocked["domainInfo"]["description"],
retention_period=mocked["configuration"]["workflowExecutionRetentionPeriodInDays"],
)
diffs = domain.changes
self.assertEqual(len(diffs), 0)
def test_domain_save_valid_domain(self):
with patch.object(self.domain.connection, "register_domain"):
self.domain.save()
def test_domain_save_already_existing_domain(self):
with patch.object(self.domain.connection, "register_domain") as mock:
with self.assertRaises(AlreadyExistsError):
mock.side_effect = SWFDomainAlreadyExistsError(400, "mocking exception")
self.domain.save()
def test_domain_delete_existing_domain(self):
with patch.object(self.domain.connection, "deprecate_domain"):
self.domain.delete()
def test_domain_delete_non_existent_domain(self):
with patch.object(self.domain.connection, "deprecate_domain") as mock:
with self.assertRaises(DomainDoesNotExist):
mock.side_effect = SWFResponseError(
400,
"Bad Request",
{
"message": "Unknown domain: does not exist",
"__type": "com.amazonaws.swf.base.model#UnknownResourceFault",
},
"UnknownResourceFault",
)
self.domain.delete()
def test_domain_workflows_without_existent_workflows(self):
with patch.object(WorkflowTypeQuerySet, "all") as all_method:
all_method.return_value = []
self.assertEqual(self.domain.workflows(), [])
|
3,272 |
debug flags
|
# Copyright 2013-2023 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
import os
import re
import sys
import llnl.util.lang
from spack.compiler import Compiler, UnsupportedCompilerFlag
from spack.version import Version
#: compiler symlink mappings for mixed f77 compilers
f77_mapping = [
("gfortran", os.path.join("clang", "gfortran")),
("xlf_r", os.path.join("xl_r", "xlf_r")),
("xlf", os.path.join("xl", "xlf")),
("pgfortran", os.path.join("pgi", "pgfortran")),
("ifort", os.path.join("intel", "ifort")),
]
#: compiler symlink mappings for mixed f90/fc compilers
fc_mapping = [
("gfortran", os.path.join("clang", "gfortran")),
("xlf90_r", os.path.join("xl_r", "xlf90_r")),
("xlf90", os.path.join("xl", "xlf90")),
("pgfortran", os.path.join("pgi", "pgfortran")),
("ifort", os.path.join("intel", "ifort")),
]
class Clang(Compiler):
# Subclasses use possible names of C compiler
cc_names = ["clang"]
# Subclasses use possible names of C++ compiler
cxx_names = ["clang++"]
# Subclasses use possible names of Fortran 77 compiler
f77_names = ["flang", "gfortran", "xlf_r"]
# Subclasses use possible names of Fortran 90 compiler
fc_names = ["flang", "gfortran", "xlf90_r"]
version_argument = "--version"
@property
def METHOD_NAME(self):
return [
"-gcodeview",
"-gdwarf-2",
"-gdwarf-3",
"-gdwarf-4",
"-gdwarf-5",
"-gline-tables-only",
"-gmodules",
"-gz",
"-g",
]
@property
def opt_flags(self):
return ["-O0", "-O1", "-O2", "-O3", "-Ofast", "-Os", "-Oz", "-Og", "-O", "-O4"]
# Clang has support for using different fortran compilers with the
# clang executable.
@property
def link_paths(self):
# clang links are always the same
link_paths = {
"cc": os.path.join("clang", "clang"),
"cxx": os.path.join("clang", "clang++"),
}
# fortran links need to look at the actual compiler names from
# compilers.yaml to figure out which named symlink to use
for compiler_name, link_path in f77_mapping:
if self.f77 and compiler_name in self.f77:
link_paths["f77"] = link_path
break
else:
link_paths["f77"] = os.path.join("clang", "flang")
for compiler_name, link_path in fc_mapping:
if self.fc and compiler_name in self.fc:
link_paths["fc"] = link_path
break
else:
link_paths["fc"] = os.path.join("clang", "flang")
return link_paths
@property
def verbose_flag(self):
return "-v"
openmp_flag = "-fopenmp"
@property
def cxx11_flag(self):
if self.real_version < Version("3.3"):
raise UnsupportedCompilerFlag(self, "the C++11 standard", "cxx11_flag", "< 3.3")
return "-std=c++11"
@property
def cxx14_flag(self):
if self.real_version < Version("3.4"):
raise UnsupportedCompilerFlag(self, "the C++14 standard", "cxx14_flag", "< 3.5")
elif self.real_version < Version("3.5"):
return "-std=c++1y"
return "-std=c++14"
@property
def cxx17_flag(self):
if self.real_version < Version("3.5"):
raise UnsupportedCompilerFlag(self, "the C++17 standard", "cxx17_flag", "< 3.5")
elif self.real_version < Version("5.0"):
return "-std=c++1z"
return "-std=c++17"
@property
def c99_flag(self):
return "-std=c99"
@property
def c11_flag(self):
if self.real_version < Version("3.0"):
raise UnsupportedCompilerFlag(self, "the C11 standard", "c11_flag", "< 3.0")
if self.real_version < Version("3.1"):
return "-std=c1x"
return "-std=c11"
@property
def c17_flag(self):
if self.real_version < Version("6.0"):
raise UnsupportedCompilerFlag(self, "the C17 standard", "c17_flag", "< 6.0")
return "-std=c17"
@property
def c23_flag(self):
if self.real_version < Version("9.0"):
raise UnsupportedCompilerFlag(self, "the C23 standard", "c23_flag", "< 9.0")
return "-std=c2x"
@property
def cc_pic_flag(self):
return "-fPIC"
@property
def cxx_pic_flag(self):
return "-fPIC"
@property
def f77_pic_flag(self):
return "-fPIC"
@property
def fc_pic_flag(self):
return "-fPIC"
required_libs = ["libclang"]
@classmethod
@llnl.util.lang.memoized
def extract_version_from_output(cls, output):
ver = "unknown"
if ("Apple" in output) or ("AMD" in output):
return ver
match = re.search(
# Normal clang compiler versions are left as-is
r"clang version ([^ )\n]+)-svn[~.\w\d-]*|"
# Don't include hyphenated patch numbers in the version
# (see https://github.com/spack/spack/pull/14365 for details)
r"clang version ([^ )\n]+?)-[~.\w\d-]*|" r"clang version ([^ )\n]+)",
output,
)
if match:
ver = match.group(match.lastindex)
return ver
@classmethod
def fc_version(cls, fc):
# We could map from gcc/gfortran version to clang version, but on macOS
# we normally mix any version of gfortran with any version of clang.
if sys.platform == "darwin":
return cls.default_version("clang")
else:
return cls.default_version(fc)
@classmethod
def f77_version(cls, f77):
return cls.fc_version(f77)
|
3,273 |
get urlpatterns
|
from django.core.exceptions import ImproperlyConfigured
from django.urls import reverse
from django.utils.functional import cached_property
from wagtail.admin.menu import WagtailMenuRegisterable, WagtailMenuRegisterableGroup
class ViewSet(WagtailMenuRegisterable):
"""
Defines a viewset to be registered with the Wagtail admin.
All properties of the viewset can be defined as class-level attributes, or passed as
keyword arguments to the constructor (in which case they will override any class-level
attributes). Additionally, the `name` property can be passed as the first positional
argument to the constructor.
"""
#: A name for this viewset, used as the default URL prefix and namespace.
name = None
#: The icon to use across the views.
icon = ""
def __init__(self, name=None, **kwargs):
if name:
self.__dict__["name"] = name
for key, value in kwargs.items():
self.__dict__[key] = value
def get_common_view_kwargs(self, **kwargs):
"""
Returns a dictionary of keyword arguments to be passed to all views within this viewset.
"""
return kwargs
def construct_view(self, view_class, **kwargs):
"""
Wrapper for view_class.as_view() which passes the kwargs returned from get_common_view_kwargs
in addition to any kwargs passed to this method. Items from get_common_view_kwargs will be
filtered to only include those that are valid for the given view_class.
"""
filtered_kwargs = {
key: value
for key, value in self.get_common_view_kwargs().items()
if hasattr(view_class, key)
}
filtered_kwargs.update(kwargs)
return view_class.as_view(**filtered_kwargs)
def inject_view_methods(self, view_class, method_names):
"""
Check for the presence of any of the named methods on this viewset. If any are found,
create a subclass of view_class that overrides those methods to call the implementation
on this viewset instead. Otherwise, return view_class unmodified.
"""
viewset = self
overrides = {}
for method_name in method_names:
viewset_method = getattr(viewset, method_name, None)
if viewset_method:
def view_method(self, *args, **kwargs):
return viewset_method(*args, **kwargs)
view_method.__name__ = method_name
overrides[method_name] = view_method
if overrides:
return type(view_class.__name__, (view_class,), overrides)
else:
return view_class
@cached_property
def url_prefix(self):
"""
The preferred URL prefix for views within this viewset. When registered through
Wagtail's ``register_admin_viewset`` hook, this will be used as the URL path component
following ``/admin/``. Other URL registration mechanisms (e.g. editing urls.py manually)
may disregard this and use a prefix of their own choosing.
Defaults to the viewset's name.
"""
if not self.name:
raise ImproperlyConfigured(
"ViewSet %r must provide a `name` property" % self
)
return self.name
@cached_property
def url_namespace(self):
"""
The URL namespace for views within this viewset. Will be used internally as the
application namespace for the viewset's URLs, and generally be the instance namespace
too.
Defaults to the viewset's name.
"""
if not self.name:
raise ImproperlyConfigured(
"ViewSet %r must provide a `name` property" % self
)
return self.name
def on_register(self):
"""
Called when the viewset is registered; subclasses can override this to perform additional setup.
"""
self.register_menu_item()
def METHOD_NAME(self):
"""
Returns a set of URL routes to be registered with the Wagtail admin.
"""
return []
def get_url_name(self, view_name):
"""
Returns the namespaced URL name for the given view.
"""
return self.url_namespace + ":" + view_name
@cached_property
def menu_icon(self):
return self.icon
@cached_property
def menu_url(self):
return reverse(self.get_url_name(self.METHOD_NAME()[0].name))
class ViewSetGroup(WagtailMenuRegisterableGroup):
"""
A container for grouping together multiple ViewSet instances.
Creates a menu item with a submenu for accessing the main URL for each instances.
"""
def on_register(self):
self.register_menu_item()
|
3,274 |
auto
|
import configparser
import logging
import os
from typing import TYPE_CHECKING, Any, Optional
from urllib.parse import urlparse, urlunparse
import wandb
try:
from git import ( # type: ignore
GitCommandError,
InvalidGitRepositoryError,
NoSuchPathError,
Repo,
)
except ImportError:
Repo = None
if TYPE_CHECKING:
from git import Repo
logger = logging.getLogger(__name__)
class GitRepo:
def __init__(
self,
root: Optional[str] = None,
remote: str = "origin",
lazy: bool = True,
remote_url: Optional[str] = None,
commit: Optional[str] = None,
) -> None:
self.remote_name = remote if remote_url is None else None
self._root = root
self._remote_url = remote_url
self._commit = commit
self._repo = None
self._repo_initialized = False
if not lazy:
self._repo = self._init_repo()
def _init_repo(self) -> Optional[Repo]:
self._repo_initialized = True
if Repo is None:
return None
if self.remote_name is None:
return None
try:
return Repo(self._root or os.getcwd(), search_parent_directories=True)
except InvalidGitRepositoryError:
logger.debug("git repository is invalid")
except NoSuchPathError:
wandb.termwarn(f"git root {self._root} does not exist")
logger.warn(f"git root {self._root} does not exist")
return None
@property
def repo(self) -> Optional[Repo]:
if not self._repo_initialized:
self._repo = self._init_repo()
return self._repo
@property
def METHOD_NAME(self) -> bool:
return self._remote_url is None
def is_untracked(self, file_name: str) -> Optional[bool]:
if not self.repo:
return True
try:
return file_name in self.repo.untracked_files
except GitCommandError:
return None
@property
def enabled(self) -> bool:
return bool(self.repo)
@property
def root(self) -> Any:
if not self.repo:
return None
try:
return self.repo.git.rev_parse("--show-toplevel")
except GitCommandError as e:
# todo: collect telemetry on this
logger.error(f"git root error: {e}")
return None
@property
def dirty(self) -> Any:
if not self.repo:
return False
try:
return self.repo.is_dirty()
except GitCommandError:
return False
@property
def email(self) -> Optional[str]:
if not self.repo:
return None
try:
return self.repo.config_reader().get_value("user", "email") # type: ignore
except configparser.Error:
return None
@property
def last_commit(self) -> Any:
if self._commit:
return self._commit
if not self.repo:
return None
if not self.repo.head or not self.repo.head.is_valid():
return None
# TODO: Saw a user getting a Unicode decode error when parsing refs,
# more details on implementing a real fix in [WB-4064]
try:
if len(self.repo.refs) > 0:
return self.repo.head.commit.hexsha
else:
return self.repo.git.show_ref("--head").split(" ")[0]
except Exception:
logger.exception("Unable to find most recent commit in git")
return None
@property
def branch(self) -> Any:
if not self.repo:
return None
return self.repo.head.ref.name
@property
def remote(self) -> Any:
if not self.repo:
return None
try:
return self.repo.remotes[self.remote_name]
except IndexError:
return None
# the --submodule=diff option doesn't exist in pre-2.11 versions of git (november 2016)
# https://stackoverflow.com/questions/10757091/git-list-of-all-changed-files-including-those-in-submodules
@property
def has_submodule_diff(self) -> bool:
if not self.repo:
return False
return bool(self.repo.git.version_info >= (2, 11, 0))
@property
def remote_url(self) -> Any:
if self._remote_url:
return self._remote_url
if not self.remote:
return None
parsed = urlparse(self.remote.url)
hostname = parsed.hostname
if parsed.port is not None:
hostname = f"{hostname}:{parsed.port}"
if parsed.password is not None:
return urlunparse(parsed._replace(netloc=f"{parsed.username}:@{hostname}"))
return urlunparse(parsed._replace(netloc=hostname))
@property
def root_dir(self) -> Any:
if not self.repo:
return None
try:
return self.repo.git.rev_parse("--show-toplevel")
except GitCommandError:
return None
def get_upstream_fork_point(self) -> Any:
"""Get the most recent ancestor of HEAD that occurs on an upstream branch.
First looks at the current branch's tracking branch, if applicable. If
that doesn't work, looks at every other branch to find the most recent
ancestor of HEAD that occurs on a tracking branch.
Returns:
git.Commit object or None
"""
possible_relatives = []
try:
if not self.repo:
return None
try:
active_branch = self.repo.active_branch
except (TypeError, ValueError):
logger.debug("git is in a detached head state")
return None # detached head
else:
tracking_branch = active_branch.tracking_branch()
if tracking_branch:
possible_relatives.append(tracking_branch.commit)
if not possible_relatives:
for branch in self.repo.branches:
tracking_branch = branch.tracking_branch()
if tracking_branch is not None:
possible_relatives.append(tracking_branch.commit)
head = self.repo.head
most_recent_ancestor = None
for possible_relative in possible_relatives:
# at most one:
for ancestor in self.repo.merge_base(head, possible_relative):
if most_recent_ancestor is None:
most_recent_ancestor = ancestor
elif self.repo.is_ancestor(most_recent_ancestor, ancestor): # type: ignore
most_recent_ancestor = ancestor
return most_recent_ancestor
except GitCommandError as e:
logger.debug("git remote upstream fork point could not be found")
logger.debug(str(e))
return None
def tag(self, name: str, message: Optional[str]) -> Any:
if not self.repo:
return None
try:
return self.repo.create_tag(f"wandb/{name}", message=message, force=True)
except GitCommandError:
print("Failed to tag repository.")
return None
def push(self, name: str) -> Any:
if not self.remote:
return None
try:
return self.remote.push(f"wandb/{name}", force=True)
except GitCommandError:
logger.debug("failed to push git")
return None
|
3,275 |
test missing courses list
|
"""
Unittest for generate a test course in an given modulestore
"""
import json
from unittest import mock
import ddt
from django.core.management import CommandError, call_command
from xmodule.modulestore.django import modulestore
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase
@ddt.ddt
class TestGenerateCourses(ModuleStoreTestCase):
"""
Unit tests for creating a course in split store via command line
"""
@mock.patch('cms.djangoapps.contentstore.management.commands.generate_courses.logger')
def test_generate_course_in_stores(self, mock_logger):
"""
Test that a course is created successfully
"""
settings = {"courses": [{
"organization": "test-course-generator",
"number": "1",
"run": "1",
"user": str(self.user.email),
"fields": {"display_name": "test-course", "announcement": "2010-04-20T20:08:21.634121"}
}]}
arg = json.dumps(settings)
call_command("generate_courses", arg)
key = modulestore().make_course_key("test-course-generator", "1", "1")
self.assertTrue(modulestore().has_course(key))
mock_logger.info.assert_any_call("Created course-v1:test-course-generator+1+1")
mock_logger.info.assert_any_call("announcement has been set to 2010-04-20T20:08:21.634121")
mock_logger.info.assert_any_call("display_name has been set to test-course")
def test_invalid_json(self):
"""
Test that providing an invalid JSON object will result in the appropriate command error
"""
with self.assertRaisesRegex(CommandError, "Invalid JSON object"):
arg = "invalid_json"
call_command("generate_courses", arg)
def METHOD_NAME(self):
"""
Test that a missing list of courses in json will result in the appropriate command error
"""
with self.assertRaisesRegex(CommandError, "JSON object is missing courses list"):
settings = {}
arg = json.dumps(settings)
call_command("generate_courses", arg)
@mock.patch('cms.djangoapps.contentstore.management.commands.generate_courses.logger')
@ddt.data("organization", "number", "run", "fields")
def test_missing_course_settings(self, setting, mock_logger):
"""
Test that missing required settings in JSON object will result in the appropriate error message
"""
settings = {"courses": [{
"organization": "test-course-generator",
"number": "1",
"run": "1",
"user": str(self.user.email),
"fields": {"display_name": "test-course"}
}]}
del settings["courses"][0][setting]
arg = json.dumps(settings)
call_command("generate_courses", arg)
mock_logger.warning.assert_any_call("Course json is missing " + setting)
@mock.patch('cms.djangoapps.contentstore.management.commands.generate_courses.logger')
def test_invalid_user(self, mock_logger):
"""
Test that providing an invalid user in the course JSON will result in the appropriate error message
"""
settings = {"courses": [{
"organization": "test-course-generator",
"number": "1",
"run": "1",
"user": "invalid_user",
"fields": {"display_name": "test-course"}
}]}
arg = json.dumps(settings)
call_command("generate_courses", arg)
mock_logger.warning.assert_any_call("invalid_user user does not exist")
@mock.patch('cms.djangoapps.contentstore.management.commands.generate_courses.logger')
def test_missing_display_name(self, mock_logger):
"""
Test that missing required display_name in JSON object will result in the appropriate error message
"""
settings = {"courses": [{
"organization": "test-course-generator",
"number": "1",
"run": "1",
"user": str(self.user.email),
"fields": {}
}]}
arg = json.dumps(settings)
call_command("generate_courses", arg)
mock_logger.warning.assert_any_call("Fields json is missing display_name")
@mock.patch('cms.djangoapps.contentstore.management.commands.generate_courses.logger')
def test_invalid_course_field(self, mock_logger):
"""
Test that an invalid course field will result in the appropriate message
"""
settings = {"courses": [{
"organization": "test-course-generator",
"number": "1",
"run": "1",
"user": str(self.user.email),
"fields": {"display_name": "test-course", "invalid_field": "invalid_value"}
}]}
arg = json.dumps(settings)
call_command("generate_courses", arg)
mock_logger.info.assert_any_call(('invalid_field') + "is not a valid CourseField")
@mock.patch('cms.djangoapps.contentstore.management.commands.generate_courses.logger')
def test_invalid_date_setting(self, mock_logger):
"""
Test that an invalid date json will result in the appropriate message
"""
settings = {"courses": [{
"organization": "test-course-generator",
"number": "1",
"run": "1",
"user": str(self.user.email),
"fields": {"display_name": "test-course", "announcement": "invalid_date"}
}]}
arg = json.dumps(settings)
call_command("generate_courses", arg)
mock_logger.info.assert_any_call("The date string could not be parsed for announcement")
@mock.patch('cms.djangoapps.contentstore.management.commands.generate_courses.logger')
def test_invalid_course_tab_list_setting(self, mock_logger):
"""
Test that an invalid course tab list json will result in the appropriate message
"""
settings = {"courses": [{
"organization": "test-course-generator",
"number": "1",
"run": "1",
"user": str(self.user.email),
"fields": {"display_name": "test-course", "tabs": "invalid_tabs"}
}]}
arg = json.dumps(settings)
call_command("generate_courses", arg)
mock_logger.info.assert_any_call("The course tab list string could not be parsed for tabs")
@mock.patch('cms.djangoapps.contentstore.management.commands.generate_courses.logger')
@ddt.data("mobile_available", "enable_proctored_exams")
def test_missing_course_fields(self, field, mock_logger):
"""
Test that missing course fields in fields json will result in the appropriate message
"""
settings = {"courses": [{
"organization": "test-course-generator",
"number": "1",
"run": "1",
"user": str(self.user.email),
"fields": {"display_name": "test-course"}
}]}
arg = json.dumps(settings)
call_command("generate_courses", arg)
mock_logger.info.assert_any_call(field + " has not been set")
|
3,276 |
close
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from copy import deepcopy
from typing import Any, Awaitable, TYPE_CHECKING
from azure.core.rest import AsyncHttpResponse, HttpRequest
from azure.mgmt.core import AsyncARMPipelineClient
from .. import models as _models
from ..._serialization import Deserializer, Serializer
from ._configuration import CustomLocationsConfiguration
from .operations import CustomLocationsOperations, ResourceSyncRulesOperations
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from azure.core.credentials_async import AsyncTokenCredential
class CustomLocations: # pylint: disable=client-accepts-api-version-keyword
"""The customLocations Rest API spec.
:ivar custom_locations: CustomLocationsOperations operations
:vartype custom_locations:
azure.mgmt.extendedlocation.v2021_08_31_preview.aio.operations.CustomLocationsOperations
:ivar resource_sync_rules: ResourceSyncRulesOperations operations
:vartype resource_sync_rules:
azure.mgmt.extendedlocation.v2021_08_31_preview.aio.operations.ResourceSyncRulesOperations
:param credential: Credential needed for the client to connect to Azure. Required.
:type credential: ~azure.core.credentials_async.AsyncTokenCredential
:param subscription_id: The ID of the target subscription. Required.
:type subscription_id: str
:param base_url: Service URL. Default value is "https://management.azure.com".
:type base_url: str
:keyword api_version: Api Version. Default value is "2021-08-31-preview". Note that overriding
this default value may result in unsupported behavior.
:paramtype api_version: str
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
"""
def __init__(
self,
credential: "AsyncTokenCredential",
subscription_id: str,
base_url: str = "https://management.azure.com",
**kwargs: Any
) -> None:
self._config = CustomLocationsConfiguration(credential=credential, subscription_id=subscription_id, **kwargs)
self._client = AsyncARMPipelineClient(base_url=base_url, config=self._config, **kwargs)
client_models = {k: v for k, v in _models.__dict__.items() if isinstance(v, type)}
self._serialize = Serializer(client_models)
self._deserialize = Deserializer(client_models)
self._serialize.client_side_validation = False
self.custom_locations = CustomLocationsOperations(
self._client, self._config, self._serialize, self._deserialize
)
self.resource_sync_rules = ResourceSyncRulesOperations(
self._client, self._config, self._serialize, self._deserialize
)
def _send_request(self, request: HttpRequest, **kwargs: Any) -> Awaitable[AsyncHttpResponse]:
"""Runs the network request through the client's chained policies.
>>> from azure.core.rest import HttpRequest
>>> request = HttpRequest("GET", "https://www.example.org/")
<HttpRequest [GET], url: 'https://www.example.org/'>
>>> response = await client._send_request(request)
<AsyncHttpResponse: 200 OK>
For more information on this code flow, see https://aka.ms/azsdk/dpcodegen/python/send_request
:param request: The network request you want to make. Required.
:type request: ~azure.core.rest.HttpRequest
:keyword bool stream: Whether the response payload will be streamed. Defaults to False.
:return: The response of your network call. Does not do error handling on your response.
:rtype: ~azure.core.rest.AsyncHttpResponse
"""
request_copy = deepcopy(request)
request_copy.url = self._client.format_url(request_copy.url)
return self._client.send_request(request_copy, **kwargs)
async def METHOD_NAME(self) -> None:
await self._client.METHOD_NAME()
async def __aenter__(self) -> "CustomLocations":
await self._client.__aenter__()
return self
async def __aexit__(self, *exc_details: Any) -> None:
await self._client.__aexit__(*exc_details)
|
3,277 |
scheduleonboarding
|
# Copyright 2023 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from datetime import datetime, timedelta
import json
from firebase_admin import auth, firestore, initialize_app
from firebase_functions import https_fn, identity_fn, tasks_fn, options, params
import google.auth
import google.auth.transport.requests
import google.cloud.firestore
import google.cloud.tasks_v2
import google.oauth2.credentials
import googleapiclient.discovery
initialize_app()
# [START savegoogletoken]
@identity_fn.before_user_created()
def savegoogletoken(
event: identity_fn.AuthBlockingEvent,
) -> identity_fn.BeforeCreateResponse | None:
"""During sign-up, save the Google OAuth2 access token and queue up a task
to schedule an onboarding session on the user's Google Calendar.
You will only get an access token if you enabled it in your project's blocking
functions settings in the Firebase console:
https://console.firebase.google.com/project/_/authentication/settings
"""
if (
event.credential is not None
and event.credential.provider_id == "google.com"
):
print(
f"Signed in with {event.credential.provider_id}. Saving access token."
)
firestore_client: google.cloud.firestore.Client = firestore.client()
doc_ref = firestore_client.collection("user_info").document(
event.data.uid
)
doc_ref.set(
{"calendar_access_token": event.credential.access_token}, merge=True
)
tasks_client = google.cloud.tasks_v2.CloudTasksClient()
task_queue = tasks_client.queue_path(
params.PROJECT_ID.value,
options.SupportedRegion.US_CENTRAL1,
"scheduleonboarding",
)
target_uri = get_function_url("scheduleonboarding")
calendar_task = google.cloud.tasks_v2.Task(
http_request={
"http_method": google.cloud.tasks_v2.HttpMethod.POST,
"url": target_uri,
"headers": {"Content-type": "application/json"},
"body": json.dumps({"data": {"uid": event.data.uid}}).encode(),
},
schedule_time=datetime.now() + timedelta(minutes=1),
)
tasks_client.create_task(parent=task_queue, task=calendar_task)
# [END savegoogletoken]
# [START scheduleonboarding]
@tasks_fn.on_task_dispatched()
def METHOD_NAME(request: tasks_fn.CallableRequest) -> https_fn.Response:
"""Add an onboarding event to a user's Google Calendar.
Retrieves and deletes the access token that was saved to Cloud Firestore.
"""
if "uid" not in request.data:
return https_fn.Response(
status=https_fn.FunctionsErrorCode.INVALID_ARGUMENT,
response="No user specified.",
)
uid = request.data["uid"]
user_record: auth.UserRecord = auth.get_user(uid)
if user_record.email is None:
return https_fn.Response(
status=https_fn.FunctionsErrorCode.INVALID_ARGUMENT,
response="No email address on record.",
)
firestore_client: google.cloud.firestore.Client = firestore.client()
user_info = (
firestore_client.collection("user_info").document(uid).get().to_dict()
)
if (
not isinstance(user_info, dict)
or "calendar_access_token" not in user_info
):
return https_fn.Response(
status=https_fn.FunctionsErrorCode.PERMISSION_DENIED,
response="No Google OAuth token found.",
)
calendar_access_token = user_info["calendar_access_token"]
firestore_client.collection("user_info").document(uid).update(
{"calendar_access_token": google.cloud.firestore.DELETE_FIELD}
)
google_credentials = google.oauth2.credentials.Credentials(
token=calendar_access_token
)
calendar_client = googleapiclient.discovery.build(
"calendar", "v3", credentials=google_credentials
)
calendar_event = {
"summary": "Onboarding with ExampleCo",
"location": "Video call",
"description": "Walk through onboarding tasks with an ExampleCo engineer.",
"start": {
"dateTime": (datetime.now() + timedelta(days=3)).isoformat(),
"timeZone": "America/Los_Angeles",
},
"end": {
"dateTime": (
datetime.now() + timedelta(days=3, hours=1)
).isoformat(),
"timeZone": "America/Los_Angeles",
},
"attendees": [
{"email": user_record.email},
{"email": "[email protected]"},
],
}
calendar_client.events().insert(
calendarId="primary", body=calendar_event
).execute()
return https_fn.Response("Success")
# [END scheduleonboarding]
def get_function_url(
name: str, location: str = options.SupportedRegion.US_CENTRAL1
) -> str:
"""Get the URL of a given v2 cloud function.
Params:
name: the function's name
location: the function's location
Returns:
The URL of the function
"""
credentials, project_id = google.auth.default(
scopes=["https://www.googleapis.com/auth/cloud-platform"]
)
authed_session = google.auth.transport.requests.AuthorizedSession(
credentials
)
url = (
"https://cloudfunctions.googleapis.com/v2beta/"
+ f"projects/{project_id}/locations/{location}/functions/{name}"
)
response = authed_session.get(url)
data = response.json()
function_url = data["serviceConfig"]["uri"]
return function_url
|
3,278 |
test get form submissions grouped by field
|
from datetime import datetime, timedelta
from django.utils.timezone import utc
import os
from mock import patch
from onadata.apps.logger.models.instance import Instance
from onadata.apps.main.tests.test_base import TestBase
from onadata.libs.data.query import (
get_form_submissions_grouped_by_field,
get_date_fields,
get_field_records,
)
class TestTools(TestBase):
def setUp(self):
super().setUp()
self._create_user_and_login()
self._publish_transportation_form()
@patch("django.utils.timezone.now")
def test_get_form_submissions_grouped_by_field(self, mock_time):
mock_time.return_value = datetime.utcnow().replace(tzinfo=utc)
self._make_submissions()
count_key = "count"
fields = ["_submission_time", "_xform_id_string"]
count = len(self.xform.instances.all())
for field in fields:
result = get_form_submissions_grouped_by_field(self.xform, field)[0]
self.assertEqual([field, count_key], sorted(list(result)))
self.assertEqual(result[count_key], count)
@patch("onadata.apps.logger.models.instance.submission_time")
def test_get_form_submissions_grouped_by_field_datetime_to_date(self, mock_time):
now = datetime(2014, 1, 1, tzinfo=utc)
times = [
now,
now + timedelta(seconds=1),
now + timedelta(seconds=2),
now + timedelta(seconds=3),
]
mock_time.side_effect = times
self._make_submissions()
for i in self.xform.instances.all().order_by("-pk"):
i.date_created = times.pop()
i.save()
count_key = "count"
fields = ["_submission_time"]
count = len(self.xform.instances.all())
for field in fields:
result = get_form_submissions_grouped_by_field(self.xform, field)[0]
self.assertEqual([field, count_key], sorted(list(result)))
self.assertEqual(result[field], str(now.date()))
self.assertEqual(result[count_key], count)
@patch("django.utils.timezone.now")
def test_get_form_submissions_two_xforms(self, mock_time):
mock_time.return_value = datetime.utcnow().replace(tzinfo=utc)
self._make_submissions()
self._publish_xls_file(os.path.join("fixtures", "gps", "gps.xlsx"))
first_xform = self.xform
xform = self.user.xforms.all().order_by("-pk")[0]
self._make_submission(
os.path.join(
"onadata",
"apps",
"main",
"tests",
"fixtures",
"gps",
"instances",
"gps_1980-01-23_20-52-08.xml",
)
)
count_key = "count"
fields = ["_submission_time", "_xform_id_string"]
count = len(xform.instances.all())
for field in fields:
result = get_form_submissions_grouped_by_field(xform, field)[0]
self.assertEqual([field, count_key], sorted(list(result)))
self.assertEqual(result[count_key], count)
count = len(first_xform.instances.all())
for field in fields:
result = get_form_submissions_grouped_by_field(first_xform, field)[0]
self.assertEqual([field, count_key], sorted(list(result)))
self.assertEqual(result[count_key], count)
@patch("django.utils.timezone.now")
def test_get_form_submissions_xform_no_submissions(self, mock_time):
mock_time.return_value = datetime.utcnow().replace(tzinfo=utc)
self._make_submissions()
self._publish_xls_file(os.path.join("fixtures", "gps", "gps.xlsx"))
xform = self.user.xforms.all().order_by("-pk")[0]
fields = ["_submission_time", "_xform_id_string"]
count = len(xform.instances.all())
self.assertEqual(count, 0)
for field in fields:
result = get_form_submissions_grouped_by_field(xform, field)
self.assertEqual(result, [])
@patch("django.utils.timezone.now")
def METHOD_NAME(self, mock_time):
mock_time.return_value = datetime.utcnow().replace(tzinfo=utc)
self._make_submissions()
count_key = "count"
fields = ["_submission_time", "_xform_id_string"]
name = "_my_name"
xform = self.user.xforms.all()[0]
count = len(xform.instances.all())
for field in fields:
result = get_form_submissions_grouped_by_field(xform, field, name)[0]
self.assertEqual([name, count_key], sorted(list(result)))
self.assertEqual(result[count_key], count)
def test_get_form_submissions_when_response_not_provided(self):
"""
Test that the None value is stripped when of the submissions
doesnt have a response for the specified field
"""
self._make_submissions()
count = Instance.objects.count()
# make submission that doesnt have a response for
# `available_transportation_types_to_referral_facility`
path = os.path.join(
self.this_directory,
"fixtures",
"transportation",
"instances",
"transport_no_response",
"transport_no_response.xml",
)
self._make_submission(path, self.user.username)
self.assertEqual(Instance.objects.count(), count + 1)
field = "transport/available_transportation_types_to_referral_facility"
xform = self.user.xforms.all()[0]
results = get_form_submissions_grouped_by_field(
xform, field, "available_transportation_types_to_referral_facility"
)
# we should have a similar number of aggregates as submissions as each
# submission has a unique value for the field
self.assertEqual(len(results), count + 1)
# the count where the value is None should have a count of 1
result = [
r
for r in results
if r["available_transportation_types_to_referral_facility"] is None
][0]
self.assertEqual(result["count"], 1)
def test_get_date_fields_includes_start_end(self):
path = os.path.join(
os.path.dirname(__file__), "fixtures", "tutorial", "tutorial.xlsx"
)
self._publish_xls_file_and_set_xform(path)
fields = get_date_fields(self.xform)
expected_fields = sorted(
["_submission_time", "date", "start_time", "end_time", "today", "exactly"]
)
self.assertEqual(sorted(fields), expected_fields)
def test_get_field_records_when_some_responses_are_empty(self):
submissions = ["1", "2", "3", "no_age"]
path = os.path.join(
os.path.dirname(__file__), "fixtures", "tutorial", "tutorial.xlsx"
)
self._publish_xls_file_and_set_xform(path)
for i in submissions:
self._make_submission(
os.path.join(
"onadata",
"apps",
"api",
"tests",
"fixtures",
"forms",
"tutorial",
"instances",
f"{i}.xml",
)
)
field = "age"
records = get_field_records(field, self.xform)
self.assertEqual(sorted(records), sorted([23, 23, 35]))
|
3,279 |
resource exists
|
# -*- coding: utf-8 -*-
from __future__ import annotations
from ckan.types import Context
import re
import logging
from typing import Any, Container
import ckan.plugins as plugins
from ckan.common import CKANConfig, config
from ckanext.datastore.interfaces import IDatastoreBackend
log = logging.getLogger(__name__)
def get_all_resources_ids_in_datastore() -> list[str]:
"""
Helper for getting id of all resources in datastore.
Uses `get_all_ids` of active datastore backend.
"""
DatastoreBackend.register_backends()
DatastoreBackend.set_active_backend(config)
backend = DatastoreBackend.get_active_backend()
backend.configure(config)
return backend.get_all_ids()
def _parse_sort_clause( # type: ignore
clause: str, fields_types: Container[str]):
clause_match = re.match(
u'^(.+?)( +(asc|desc))?( nulls +(first|last) *)?$', clause, re.I
)
if not clause_match:
return False
field = clause_match.group(1)
if field[0] == field[-1] == u'"':
field = field[1:-1]
sort = (clause_match.group(3) or u'asc').lower()
if clause_match.group(4):
sort += (clause_match.group(4)).lower()
if field not in fields_types:
return False
return field, sort
class DatastoreException(Exception):
pass
class DatastoreBackend:
"""Base class for all datastore backends.
Very simple example of implementation based on SQLite can be found in
`ckanext.example_idatastorebackend`. In order to use it, set
datastore.write_url to
'example-sqlite:////tmp/database-name-on-your-choice'
:prop _backend: mapping(schema, class) of all registered backends
:type _backend: dictonary
:prop _active_backend: current active backend
:type _active_backend: DatastoreBackend
"""
_backends = {}
_active_backend: "DatastoreBackend"
@classmethod
def register_backends(cls):
"""Register all backend implementations inside extensions.
"""
for plugin in plugins.PluginImplementations(IDatastoreBackend):
cls._backends.update(plugin.register_backends())
@classmethod
def set_active_backend(cls, config: CKANConfig):
"""Choose most suitable backend depending on configuration
:param config: configuration object
:rtype: ckan.common.CKANConfig
"""
schema = config.get(u'ckan.datastore.write_url').split(u':')[0]
read_schema = config.get(
u'ckan.datastore.read_url').split(u':')[0]
assert read_schema == schema, u'Read and write engines are different'
cls._active_backend = cls._backends[schema]()
@classmethod
def get_active_backend(cls):
"""Return currently used backend
"""
return cls._active_backend
def configure(self, config: CKANConfig):
"""Configure backend, set inner variables, make some initial setup.
:param config: configuration object
:returns: config
:rtype: CKANConfig
"""
return config
def create(self, context: Context, data_dict: dict[str, Any]) -> Any:
"""Create new resourct inside datastore.
Called by `datastore_create`.
:param data_dict: See `ckanext.datastore.logic.action.datastore_create`
:returns: The newly created data object
:rtype: dictonary
"""
raise NotImplementedError()
def upsert(self, context: Context, data_dict: dict[str, Any]) -> Any:
"""Update or create resource depending on data_dict param.
Called by `datastore_upsert`.
:param data_dict: See `ckanext.datastore.logic.action.datastore_upsert`
:returns: The modified data object
:rtype: dictonary
"""
raise NotImplementedError()
def delete(self, context: Context, data_dict: dict[str, Any]) -> Any:
"""Remove resource from datastore.
Called by `datastore_delete`.
:param data_dict: See `ckanext.datastore.logic.action.datastore_delete`
:returns: Original filters sent.
:rtype: dictonary
"""
raise NotImplementedError()
def search(self, context: Context, data_dict: dict[str, Any]) -> Any:
"""Base search.
Called by `datastore_search`.
:param data_dict: See `ckanext.datastore.logic.action.datastore_search`
:rtype: dictonary with following keys
:param fields: fields/columns and their extra metadata
:type fields: list of dictionaries
:param offset: query offset value
:type offset: int
:param limit: query limit value
:type limit: int
:param filters: query filters
:type filters: list of dictionaries
:param total: number of total matching records
:type total: int
:param records: list of matching results
:type records: list of dictionaries
"""
raise NotImplementedError()
def search_sql(self, context: Context, data_dict: dict[str, Any]) -> Any:
"""Advanced search.
Called by `datastore_search_sql`.
:param sql: a single seach statement
:type sql: string
:rtype: dictonary
:param fields: fields/columns and their extra metadata
:type fields: list of dictionaries
:param records: list of matching results
:type records: list of dictionaries
"""
raise NotImplementedError()
def METHOD_NAME(self, id: str) -> bool:
"""Define whether resource exists in datastore.
"""
raise NotImplementedError()
def resource_fields(self, id: str) -> Any:
"""Return dictonary with resource description.
Called by `datastore_info`.
:returns: A dictionary describing the columns and their types.
"""
raise NotImplementedError()
def resource_info(self, id: str) -> Any:
"""Return DataDictonary with resource's info - #3414
"""
raise NotImplementedError()
def resource_id_from_alias(self, alias: str) -> Any:
"""Convert resource's alias to real id.
:param alias: resource's alias or id
:type alias: string
:returns: real id of resource
:rtype: string
"""
raise NotImplementedError()
def get_all_ids(self) -> list[str]:
"""Return id of all resource registered in datastore.
:returns: all resources ids
:rtype: list of strings
"""
raise NotImplementedError()
def create_function(self, *args: Any, **kwargs: Any) -> Any:
"""Called by `datastore_function_create` action.
"""
raise NotImplementedError()
def drop_function(self, *args: Any, **kwargs: Any) -> Any:
"""Called by `datastore_function_delete` action.
"""
raise NotImplementedError()
|
3,280 |
reader factory
|
import json
import logging
import os
import typing
from typing import Optional, Text, Callable, Dict, Any, List
import rasa.shared.utils.io
from rasa.shared.nlu.training_data.formats.dialogflow import (
DIALOGFLOW_AGENT,
DIALOGFLOW_ENTITIES,
DIALOGFLOW_ENTITY_ENTRIES,
DIALOGFLOW_INTENT,
DIALOGFLOW_INTENT_EXAMPLES,
DIALOGFLOW_PACKAGE,
)
from rasa.shared.nlu.training_data.training_data import TrainingData
if typing.TYPE_CHECKING:
from rasa.shared.nlu.training_data.formats.readerwriter import TrainingDataReader
logger = logging.getLogger(__name__)
# Different supported file formats and their identifier
WIT = "wit"
LUIS = "luis"
RASA = "rasa_nlu"
RASA_YAML = "rasa_yml"
UNK = "unk"
DIALOGFLOW_RELEVANT = {DIALOGFLOW_ENTITIES, DIALOGFLOW_INTENT}
_json_format_heuristics: Dict[Text, Callable[[Any, Text], bool]] = {
WIT: lambda js, fn: "utterances" in js and "luis_schema_version" not in js,
LUIS: lambda js, fn: "luis_schema_version" in js,
RASA: lambda js, fn: "rasa_nlu_data" in js,
DIALOGFLOW_AGENT: lambda js, fn: "supportedLanguages" in js,
DIALOGFLOW_PACKAGE: lambda js, fn: "version" in js and len(js) == 1,
DIALOGFLOW_INTENT: lambda js, fn: "responses" in js,
DIALOGFLOW_ENTITIES: lambda js, fn: "isEnum" in js,
DIALOGFLOW_INTENT_EXAMPLES: lambda js, fn: "_usersays_" in fn,
DIALOGFLOW_ENTITY_ENTRIES: lambda js, fn: "_entries_" in fn,
}
def load_data(resource_name: Text, language: Optional[Text] = "en") -> "TrainingData":
"""Load training data from disk.
Merges them if loaded from disk and multiple files are found."""
if not os.path.exists(resource_name):
raise ValueError(f"File '{resource_name}' does not exist.")
if os.path.isfile(resource_name):
files = [resource_name]
else:
files = rasa.shared.utils.io.list_files(resource_name)
data_sets = [_load(f, language) for f in files]
training_data_sets: List[TrainingData] = [ds for ds in data_sets if ds]
if len(training_data_sets) == 0:
training_data = TrainingData()
elif len(training_data_sets) == 1:
training_data = training_data_sets[0]
else:
training_data = training_data_sets[0].merge(*training_data_sets[1:])
return training_data
def METHOD_NAME(fformat: Text) -> Optional["TrainingDataReader"]:
"""Generates the appropriate reader class based on the file format."""
from rasa.shared.nlu.training_data.formats import (
RasaYAMLReader,
WitReader,
LuisReader,
RasaReader,
DialogflowReader,
)
reader: Optional["TrainingDataReader"] = None
if fformat == LUIS:
reader = LuisReader()
elif fformat == WIT:
reader = WitReader()
elif fformat in DIALOGFLOW_RELEVANT:
reader = DialogflowReader()
elif fformat == RASA:
reader = RasaReader()
elif fformat == RASA_YAML:
reader = RasaYAMLReader()
return reader
def _load(filename: Text, language: Optional[Text] = "en") -> Optional["TrainingData"]:
"""Loads a single training data file from disk."""
fformat = guess_format(filename)
if fformat == UNK:
raise ValueError(f"Unknown data format for file '{filename}'.")
reader = METHOD_NAME(fformat)
if reader:
return reader.read(filename, language=language, fformat=fformat)
else:
return None
def guess_format(filename: Text) -> Text:
"""Applies heuristics to guess the data format of a file.
Args:
filename: file whose type should be guessed
Returns:
Guessed file format.
"""
from rasa.shared.nlu.training_data.formats import RasaYAMLReader
guess = UNK
if not os.path.isfile(filename):
return guess
try:
content = rasa.shared.utils.io.read_file(filename)
js = json.loads(content)
except ValueError:
if RasaYAMLReader.is_yaml_nlu_file(filename):
guess = RASA_YAML
else:
for file_format, format_heuristic in _json_format_heuristics.items():
if format_heuristic(js, filename):
guess = file_format
break
logger.debug(f"Training data format of '{filename}' is '{guess}'.")
return guess
|
3,281 |
common entry
|
# Copyright 2014 ETH Zurich
# Copyright 2018 ETH Zurich, Anapaya Systems
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
:mod:`supervisor` --- SCION topology supervisor generator
=============================================
"""
# Stdlib
import configparser
import os
import shlex
from io import StringIO
# SCION
from topology.util import write_file
from topology.common import (
ArgsTopoDicts,
DISP_CONFIG_NAME,
SD_CONFIG_NAME,
)
SUPERVISOR_CONF = 'supervisord.conf'
class SupervisorGenArgs(ArgsTopoDicts):
pass
class SupervisorGenerator(object):
def __init__(self, args):
"""
:param SupervisorGenArgs args: Contains the passed command line arguments and topo dicts.
"""
self.args = args
def generate(self):
config = configparser.ConfigParser(interpolation=None)
for topo_id, topo in self.args.topo_dicts.items():
self._add_as_config(config, topo_id, topo)
self._add_dispatcher(config)
self._write_config(config,
os.path.join(self.args.output_dir, SUPERVISOR_CONF))
def _add_as_config(self, config, topo_id, topo):
entries = self._as_entries(topo_id, topo)
for elem, entry in sorted(entries):
self._add_prog(config, elem, entry)
config["group:as%s" % topo_id.file_fmt()] = {
"programs": ",".join(name for name, _ in sorted(entries))
}
def _as_entries(self, topo_id, topo):
base = topo_id.base_dir(self.args.output_dir)
entries = []
entries.extend(self._br_entries(topo, "bin/router", base))
entries.extend(self._control_service_entries(topo, base))
entries.append(self._sciond_entry(topo_id, base))
return entries
def _br_entries(self, topo, cmd, base):
entries = []
for k, v in topo.get("border_routers", {}).items():
conf = os.path.join(base, "%s.toml" % k)
prog = self.METHOD_NAME(k, [cmd, "--config", conf])
prog['environment'] += ',GODEBUG="cgocheck=0"'
entries.append((k, prog))
return entries
def _control_service_entries(self, topo, base):
entries = []
for k, v in topo.get("control_service", {}).items():
# only a single control service instance per AS is currently supported
if k.endswith("-1"):
conf = os.path.join(base, "%s.toml" % k)
prog = self.METHOD_NAME(k, ["bin/control", "--config", conf])
entries.append((k, prog))
return entries
def _sciond_entry(self, topo_id, conf_dir):
sd_name = "sd%s" % topo_id.file_fmt()
cmd_args = [
"bin/daemon", "--config",
os.path.join(conf_dir, SD_CONFIG_NAME)
]
return (sd_name, self.METHOD_NAME(sd_name, cmd_args))
def _add_dispatcher(self, config):
name, entry = self._dispatcher_entry()
self._add_prog(config, name, entry)
def _dispatcher_entry(self):
name = "dispatcher"
conf_dir = os.path.join(self.args.output_dir, name)
cmd_args = [
"bin/dispatcher", "--config",
os.path.join(conf_dir, DISP_CONFIG_NAME)
]
return (name, self.METHOD_NAME(name, cmd_args))
def _add_prog(self, config, name, entry):
config["program:%s" % name] = entry
def METHOD_NAME(self, name, cmd_args):
entry = {
'autostart': 'false',
'autorestart': 'false',
'environment': 'TZ=UTC',
'stdout_logfile': "logs/%s.log" % name,
'redirect_stderr': True,
'startretries': 0,
'startsecs': 5,
'priority': 100,
'command': ' '.join(shlex.quote(a) for a in cmd_args),
}
if name == "dispatcher":
entry['startsecs'] = 1
entry['priority'] = 50
return entry
def _write_config(self, config, path):
text = StringIO()
config.write(text)
write_file(path, text.getvalue())
|
3,282 |
config mock
|
#!/usr/bin/env python3
# Allow direct execution
import os
import sys
import unittest
import unittest.mock
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
import contextlib
import itertools
from pathlib import Path
from yt_dlp.compat import compat_expanduser
from yt_dlp.options import create_parser, parseOpts
from yt_dlp.utils import Config, get_executable_path
ENVIRON_DEFAULTS = {
'HOME': None,
'XDG_CONFIG_HOME': '/_xdg_config_home/',
'USERPROFILE': 'C:/Users/testing/',
'APPDATA': 'C:/Users/testing/AppData/Roaming/',
'HOMEDRIVE': 'C:/',
'HOMEPATH': 'Users/testing/',
}
@contextlib.contextmanager
def set_environ(**kwargs):
saved_environ = os.environ.copy()
for name, value in {**ENVIRON_DEFAULTS, **kwargs}.items():
if value is None:
os.environ.pop(name, None)
else:
os.environ[name] = value
yield
os.environ.clear()
os.environ.update(saved_environ)
def _generate_expected_groups():
xdg_config_home = os.getenv('XDG_CONFIG_HOME') or compat_expanduser('~/.config')
appdata_dir = os.getenv('appdata')
home_dir = compat_expanduser('~')
return {
'Portable': [
Path(get_executable_path(), 'yt-dlp.conf'),
],
'Home': [
Path('yt-dlp.conf'),
],
'User': [
Path(xdg_config_home, 'yt-dlp.conf'),
Path(xdg_config_home, 'yt-dlp', 'config'),
Path(xdg_config_home, 'yt-dlp', 'config.txt'),
*((
Path(appdata_dir, 'yt-dlp.conf'),
Path(appdata_dir, 'yt-dlp', 'config'),
Path(appdata_dir, 'yt-dlp', 'config.txt'),
) if appdata_dir else ()),
Path(home_dir, 'yt-dlp.conf'),
Path(home_dir, 'yt-dlp.conf.txt'),
Path(home_dir, '.yt-dlp', 'config'),
Path(home_dir, '.yt-dlp', 'config.txt'),
],
'System': [
Path('/etc/yt-dlp.conf'),
Path('/etc/yt-dlp/config'),
Path('/etc/yt-dlp/config.txt'),
]
}
class TestConfig(unittest.TestCase):
maxDiff = None
@set_environ()
def test_config__ENVIRON_DEFAULTS_sanity(self):
expected = make_expected()
self.assertCountEqual(
set(expected), expected,
'ENVIRON_DEFAULTS produces non unique names')
def test_config_all_environ_values(self):
for name, value in ENVIRON_DEFAULTS.items():
for new_value in (None, '', '.', value or '/some/dir'):
with set_environ(**{name: new_value}):
self._simple_grouping_test()
def test_config_default_expected_locations(self):
files, _ = self._simple_config_test()
self.assertEqual(
files, make_expected(),
'Not all expected locations have been checked')
def test_config_default_grouping(self):
self._simple_grouping_test()
def _simple_grouping_test(self):
expected_groups = make_expected_groups()
for name, group in expected_groups.items():
for index, existing_path in enumerate(group):
result, opts = self._simple_config_test(existing_path)
expected = expected_from_expected_groups(expected_groups, existing_path)
self.assertEqual(
result, expected,
f'The checked locations do not match the expected ({name}, {index})')
self.assertEqual(
opts.outtmpl['default'], '1',
f'The used result value was incorrect ({name}, {index})')
def _simple_config_test(self, *stop_paths):
encountered = 0
paths = []
def read_file(filename, default=[]):
nonlocal encountered
path = Path(filename)
paths.append(path)
if path in stop_paths:
encountered += 1
return ['-o', f'{encountered}']
with METHOD_NAME(read_file):
_, opts, _ = parseOpts([], False)
return paths, opts
@set_environ()
def test_config_early_exit_commandline(self):
self._early_exit_test(0, '--ignore-config')
@set_environ()
def test_config_early_exit_files(self):
for index, _ in enumerate(make_expected(), 1):
self._early_exit_test(index)
def _early_exit_test(self, allowed_reads, *args):
reads = 0
def read_file(filename, default=[]):
nonlocal reads
reads += 1
if reads > allowed_reads:
self.fail('The remaining config was not ignored')
elif reads == allowed_reads:
return ['--ignore-config']
with METHOD_NAME(read_file):
parseOpts(args, False)
@set_environ()
def test_config_override_commandline(self):
self._override_test(0, '-o', 'pass')
@set_environ()
def test_config_override_files(self):
for index, _ in enumerate(make_expected(), 1):
self._override_test(index)
def _override_test(self, start_index, *args):
index = 0
def read_file(filename, default=[]):
nonlocal index
index += 1
if index > start_index:
return ['-o', 'fail']
elif index == start_index:
return ['-o', 'pass']
with METHOD_NAME(read_file):
_, opts, _ = parseOpts(args, False)
self.assertEqual(
opts.outtmpl['default'], 'pass',
'The earlier group did not override the later ones')
@contextlib.contextmanager
def METHOD_NAME(read_file=None):
with unittest.mock.patch('yt_dlp.options.Config') as mock:
mock.return_value = Config(create_parser())
if read_file is not None:
mock.read_file = read_file
yield mock
def make_expected(*filepaths):
return expected_from_expected_groups(_generate_expected_groups(), *filepaths)
def make_expected_groups(*filepaths):
return _filter_expected_groups(_generate_expected_groups(), filepaths)
def expected_from_expected_groups(expected_groups, *filepaths):
return list(itertools.chain.from_iterable(
_filter_expected_groups(expected_groups, filepaths).values()))
def _filter_expected_groups(expected, filepaths):
if not filepaths:
return expected
result = {}
for group, paths in expected.items():
new_paths = []
for path in paths:
new_paths.append(path)
if path in filepaths:
break
result[group] = new_paths
return result
if __name__ == '__main__':
unittest.main()
|
3,283 |
default factory str
|
from typing import Any, ClassVar, Generic, List, Optional, TypeVar, Union
from typing_extensions import Self
from pydantic import BaseModel, ConfigDict, Field, create_model, field_validator, model_validator, validator
from pydantic.dataclasses import dataclass
class Model(BaseModel):
x: float
y: str
model_config = ConfigDict(from_attributes=True)
class SelfReferencingModel(BaseModel):
submodel: Optional['SelfReferencingModel']
@property
def prop(self) -> None:
...
SelfReferencingModel.model_rebuild()
model = Model(x=1, y='y')
Model(x=1, y='y', z='z')
# MYPY: error: Unexpected keyword argument "z" for "Model" [call-arg]
model.x = 2
model.model_validate(model)
self_referencing_model = SelfReferencingModel(submodel=SelfReferencingModel(submodel=None))
class KwargsModel(BaseModel, from_attributes=True):
x: float
y: str
kwargs_model = KwargsModel(x=1, y='y')
KwargsModel(x=1, y='y', z='z')
# MYPY: error: Unexpected keyword argument "z" for "KwargsModel" [call-arg]
kwargs_model.x = 2
kwargs_model.model_validate(kwargs_model.__dict__)
class InheritingModel(Model):
z: int = 1
InheritingModel.model_validate(model.__dict__)
class ForwardReferencingModel(Model):
future: 'FutureModel'
class FutureModel(Model):
pass
ForwardReferencingModel.model_rebuild()
future_model = FutureModel(x=1, y='a')
forward_model = ForwardReferencingModel(x=1, y='a', future=future_model)
class NoMutationModel(BaseModel):
x: int
model_config = ConfigDict(frozen=True)
class MutationModel(NoMutationModel):
a: int = 1
model_config = ConfigDict(frozen=False, from_attributes=True)
MutationModel(x=1).x = 2
MutationModel.model_validate(model.__dict__)
class KwargsNoMutationModel(BaseModel, frozen=True):
# MYPY: error: Cannot inherit frozen dataclass from a non-frozen one [misc]
x: int
class KwargsMutationModel(KwargsNoMutationModel, frozen=False, from_attributes=True):
# MYPY: error: Cannot inherit non-frozen dataclass from a frozen one [misc]
a: int = 1
KwargsMutationModel(x=1).x = 2
# MYPY: error: Property "x" defined in "KwargsNoMutationModel" is read-only [misc]
KwargsMutationModel.model_validate(model.__dict__)
class OverrideModel(Model):
x: int
OverrideModel(x=1, y='b')
class Mixin:
def f(self) -> None:
pass
class MultiInheritanceModel(BaseModel, Mixin):
pass
MultiInheritanceModel().f()
class AliasModel(BaseModel):
x: str = Field(..., alias='y')
alias_model = AliasModel(y='hello')
assert alias_model.x == 'hello'
class ClassVarModel(BaseModel):
x: int
y: ClassVar[int] = 1
ClassVarModel(x=1)
@dataclass(config={'validate_assignment': True})
class AddProject:
name: str
slug: Optional[str]
description: Optional[str]
p = AddProject(name='x', slug='y', description='z')
class TypeAliasAsAttribute(BaseModel):
__type_alias_attribute__ = Union[str, bytes]
class NestedModel(BaseModel):
class Model(BaseModel):
id: str
model: Model
_ = NestedModel.Model
DynamicModel = create_model('DynamicModel', __base__=Model)
dynamic_model = DynamicModel(x=1, y='y')
dynamic_model.x = 2
class FrozenModel(BaseModel):
x: int
model_config = ConfigDict(frozen=True)
class NotFrozenModel(FrozenModel):
a: int = 1
model_config = ConfigDict(frozen=False, from_attributes=True)
NotFrozenModel(x=1).x = 2
NotFrozenModel.model_validate(model.__dict__)
class KwargsFrozenModel(BaseModel, frozen=True):
# MYPY: error: Cannot inherit frozen dataclass from a non-frozen one [misc]
x: int
class KwargsNotFrozenModel(FrozenModel, frozen=False, from_attributes=True):
a: int = 1
KwargsNotFrozenModel(x=1).x = 2
KwargsNotFrozenModel.model_validate(model.__dict__)
class ModelWithSelfField(BaseModel):
self: str
def f(name: str) -> str:
return name
class ModelWithAllowReuseValidator(BaseModel):
name: str
normalize_name = field_validator('name')(f)
model_with_allow_reuse_validator = ModelWithAllowReuseValidator(name='xyz')
T = TypeVar('T')
class Response(BaseModel, Generic[T]):
data: T
error: Optional[str]
response = Response[Model](data=model, error=None)
class ModelWithAnnotatedValidator(BaseModel):
name: str
@field_validator('name')
def noop_validator_with_annotations(cls, name: str) -> str:
return name
def METHOD_NAME() -> str:
return 'x'
def _default_factory_list() -> List[int]:
return [1, 2, 3]
class FieldDefaultTestingModel(BaseModel):
# Required
a: int
b: int = Field()
c: int = Field(...)
# Default
d: int = Field(1)
# Default factory
g: List[int] = Field(default_factory=_default_factory_list)
h: str = Field(default_factory=METHOD_NAME)
i: str = Field(default_factory=lambda: 'test')
_TModel = TypeVar('_TModel')
_TType = TypeVar('_TType')
class OrmMixin(Generic[_TModel, _TType]):
@classmethod
def from_orm(cls, model: _TModel) -> _TType:
raise NotImplementedError
@classmethod
def from_orm_optional(cls, model: Optional[_TModel]) -> Optional[_TType]:
if model is None:
return None
return cls.from_orm(model)
import sys # noqa E402
if sys.version_info >= (3, 8):
from dataclasses import InitVar # E402
InitVarStr = InitVar[str]
else:
# InitVar is not supported in 3.7 due to loss of type information
InitVarStr = str
@dataclass
class MyDataClass:
foo: InitVarStr
bar: str
MyDataClass(foo='foo', bar='bar')
def get_my_custom_validator(field_name: str) -> Any:
@validator(field_name, allow_reuse=True)
def my_custom_validator(cls: Any, v: int) -> int:
return v
return my_custom_validator
def foo() -> None:
class MyModel(BaseModel):
number: int
custom_validator = get_my_custom_validator('number') # type: ignore[pydantic-field]
# MYPY: error: Unused "type: ignore" comment
@model_validator(mode='before')
@classmethod
def validate_before(cls, values: Any) -> Any:
return values
@model_validator(mode='after')
def validate_after(self) -> Self:
return self
MyModel(number=2)
|
3,284 |
test manual main port and address
|
# Copyright The Lightning AI team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from unittest import mock
import pytest
from lightning.fabric.plugins.environments import LSFEnvironment
def _make_rankfile(tmp_path):
hosts = "batch\n10.10.10.0\n10.10.10.1\n10.10.10.2\n10.10.10.3"
p = tmp_path / "lsb_djob_rankfile"
p.write_text(hosts)
return str(p)
@mock.patch.dict(os.environ, {"LSB_JOBID": "1234"})
def test_missing_lsb_djob_rankfile():
"""Test an error when the LSB_DJOB_RANKFILE cannot be found."""
with pytest.raises(ValueError, match="Did not find the environment variable `LSB_DJOB_RANKFILE`"):
LSFEnvironment()
@mock.patch.dict(os.environ, {"LSB_DJOB_RANKFILE": "", "LSB_JOBID": "1234"})
def test_empty_lsb_djob_rankfile():
"""Test an error when the LSB_DJOB_RANKFILE is not populated."""
with pytest.raises(ValueError, match="The environment variable `LSB_DJOB_RANKFILE` is empty"):
LSFEnvironment()
def test_missing_lsb_job_id(tmp_path):
"""Test an error when the job id cannot be found."""
with mock.patch.dict(os.environ, {"LSB_DJOB_RANKFILE": _make_rankfile(tmp_path)}), pytest.raises(
ValueError, match="Could not find job id in environment variable LSB_JOBID"
):
LSFEnvironment()
def METHOD_NAME(tmp_path):
"""Test a user can set the port manually through the MASTER_PORT env variable."""
environ = {
"LSB_DJOB_RANKFILE": _make_rankfile(tmp_path),
"LSB_JOBID": "1234",
"JSM_NAMESPACE_SIZE": "4",
"JSM_NAMESPACE_RANK": "3",
"JSM_NAMESPACE_LOCAL_RANK": "1",
}
with mock.patch.dict(os.environ, environ), mock.patch("socket.gethostname", return_value="10.10.10.2"):
env = LSFEnvironment()
assert env.main_port == 10234
def test_attributes_from_environment_variables(tmp_path):
"""Test that the LSF environment takes the attributes from the environment variables."""
environ = {
"LSB_DJOB_RANKFILE": _make_rankfile(tmp_path),
"LSB_JOBID": "1234",
"JSM_NAMESPACE_SIZE": "4",
"JSM_NAMESPACE_RANK": "3",
"JSM_NAMESPACE_LOCAL_RANK": "1",
}
with mock.patch.dict(os.environ, environ), mock.patch("socket.gethostname", return_value="10.10.10.2"):
env = LSFEnvironment()
assert env.creates_processes_externally
assert env.main_address == "10.10.10.0"
assert env.main_port == 10234
assert env.world_size() == 4
assert env.global_rank() == 3
assert env.local_rank() == 1
env.set_global_rank(100)
assert env.global_rank() == 3
env.set_world_size(100)
assert env.world_size() == 4
assert LSFEnvironment.detect()
def test_node_rank(tmp_path):
environ = {
"LSB_DJOB_RANKFILE": _make_rankfile(tmp_path),
"LSB_JOBID": "1234",
"JSM_NAMESPACE_SIZE": "4",
"JSM_NAMESPACE_RANK": "3",
"JSM_NAMESPACE_LOCAL_RANK": "1",
}
with mock.patch.dict(os.environ, environ), mock.patch("socket.gethostname", return_value="10.10.10.2"):
env = LSFEnvironment()
assert env.node_rank() == 2
def test_detect():
"""Test the detection of a LSF environment configuration."""
with mock.patch.dict(os.environ, {}, clear=True):
assert not LSFEnvironment.detect()
with mock.patch.dict(
os.environ,
{
"LSB_DJOB_RANKFILE": "",
"LSB_JOBID": "",
"JSM_NAMESPACE_SIZE": "",
"JSM_NAMESPACE_LOCAL_RANK": "",
},
):
assert LSFEnvironment.detect()
|
3,285 |
scrcpy server start
|
import socket
import struct
import threading
import time
import typing as t
from time import sleep
import numpy as np
from adbutils import AdbError, Network
from module.base.decorator import cached_property
from module.base.timer import Timer
from module.device.connection import Connection
from module.device.method.scrcpy.control import ControlSender
from module.device.method.scrcpy.options import ScrcpyOptions
from module.device.method.utils import AdbConnection, recv_all
from module.exception import RequestHumanTakeover
from module.logger import logger
class ScrcpyError(Exception):
pass
class ScrcpyCore(Connection):
"""
Scrcpy: https://github.com/Genymobile/scrcpy
Module from https://github.com/leng-yue/py-scrcpy-client
"""
_scrcpy_last_frame: t.Optional[np.ndarray] = None
_scrcpy_last_frame_time: float = 0.
_scrcpy_alive = False
_scrcpy_server_stream: t.Optional[AdbConnection] = None
_scrcpy_video_socket: t.Optional[socket.socket] = None
_scrcpy_control_socket: t.Optional[socket.socket] = None
_scrcpy_control_socket_lock = threading.Lock()
_scrcpy_stream_loop_thread = None
_scrcpy_resolution: t.Tuple[int, int] = (1280, 720)
@cached_property
def _scrcpy_control(self) -> ControlSender:
return ControlSender(self)
def scrcpy_init(self):
self._scrcpy_server_stop()
logger.hr('Scrcpy init')
logger.info(f'pushing {self.config.SCRCPY_FILEPATH_LOCAL}')
self.adb_push(self.config.SCRCPY_FILEPATH_LOCAL, self.config.SCRCPY_FILEPATH_REMOTE)
self._scrcpy_alive = False
self.scrcpy_ensure_running()
def scrcpy_ensure_running(self):
if not self._scrcpy_alive:
with self._scrcpy_control_socket_lock:
self.METHOD_NAME()
def METHOD_NAME(self):
"""
Connect to scrcpy server, there will be two sockets, video and control socket.
Raises:
ScrcpyError:
"""
logger.hr('Scrcpy server start')
commands = ScrcpyOptions.command_v120(jar_path=self.config.SCRCPY_FILEPATH_REMOTE)
self._scrcpy_server_stream: AdbConnection = self.adb.shell(
commands,
stream=True,
)
logger.info('Create server stream')
ret = self._scrcpy_server_stream.read(10)
# b'Aborted \r\n'
# Probably because file not exists
if b'Aborted' in ret:
raise ScrcpyError('Aborted')
if ret == b'[server] E':
# [server] ERROR: ...
ret += recv_all(self._scrcpy_server_stream)
logger.error(ret)
# java.lang.IllegalArgumentException: The server version (1.25) does not match the client (...)
if b'does not match the client' in ret:
raise ScrcpyError('Server version does not match the client')
else:
raise ScrcpyError('Unknown scrcpy error')
else:
# [server] INFO: Device: ...
ret += self._scrcpy_receive_from_server_stream()
logger.info(ret)
pass
logger.info('Create video socket')
timeout = Timer(3).start()
while 1:
if timeout.reached():
raise ScrcpyError('Connect scrcpy-server timeout')
try:
self._scrcpy_video_socket = self.adb.create_connection(
Network.LOCAL_ABSTRACT, "scrcpy"
)
break
except AdbError:
sleep(0.1)
dummy_byte = self._scrcpy_video_socket.recv(1)
if not len(dummy_byte) or dummy_byte != b"\x00":
raise ScrcpyError('Did not receive Dummy Byte from video stream')
logger.info('Create control socket')
self._scrcpy_control_socket = self.adb.create_connection(
Network.LOCAL_ABSTRACT, "scrcpy"
)
logger.info('Fetch device info')
device_name = self._scrcpy_video_socket.recv(64).decode("utf-8").rstrip("\x00")
if len(device_name):
logger.attr('Scrcpy Device', device_name)
else:
raise ScrcpyError('Did not receive Device Name')
ret = self._scrcpy_video_socket.recv(4)
self._scrcpy_resolution = struct.unpack(">HH", ret)
logger.attr('Scrcpy Resolution', self._scrcpy_resolution)
self._scrcpy_video_socket.setblocking(False)
self._scrcpy_alive = True
logger.info('Start video stream loop thread')
self._scrcpy_stream_loop_thread = threading.Thread(
target=self._scrcpy_stream_loop, daemon=True
)
self._scrcpy_stream_loop_thread.start()
while 1:
if self._scrcpy_stream_loop_thread is not None and self._scrcpy_stream_loop_thread.is_alive():
break
self.sleep(0.001)
logger.info('Scrcpy server is up')
def _scrcpy_server_stop(self):
"""
Stop listening (both threaded and blocked)
"""
logger.hr('Scrcpy server stop')
# err = self._scrcpy_receive_from_server_stream()
# if err:
# logger.error(err)
self._scrcpy_alive = False
if self._scrcpy_server_stream is not None:
try:
self._scrcpy_server_stream.close()
except Exception:
pass
if self._scrcpy_control_socket is not None:
try:
self._scrcpy_control_socket.close()
except Exception:
pass
if self._scrcpy_video_socket is not None:
try:
self._scrcpy_video_socket.close()
except Exception:
pass
logger.info('Scrcpy server stopped')
def _scrcpy_receive_from_server_stream(self):
if self._scrcpy_server_stream is not None:
try:
return self._scrcpy_server_stream.conn.recv(4096)
except Exception:
pass
def _scrcpy_stream_loop(self) -> None:
"""
Core loop for video parsing
"""
try:
from av.codec import CodecContext
from av.error import InvalidDataError
except ImportError as e:
logger.error(e)
logger.error('You must have `av` installed to use scrcpy screenshot, please update dependencies')
raise RequestHumanTakeover
codec = CodecContext.create("h264", "r")
while self._scrcpy_alive:
try:
raw_h264 = self._scrcpy_video_socket.recv(0x10000)
if raw_h264 == b"":
raise ScrcpyError("Video stream is disconnected")
packets = codec.parse(raw_h264)
for packet in packets:
frames = codec.decode(packet)
for frame in frames:
# logger.info('frame received')
frame = frame.to_ndarray(format="rgb24")
self._scrcpy_last_frame = frame
self._scrcpy_last_frame_time = time.time()
self._scrcpy_resolution = (frame.shape[1], frame.shape[0])
except (BlockingIOError, InvalidDataError):
# only return nonempty frames, may block cv2 render thread
time.sleep(0.001)
except (ConnectionError, OSError) as e: # Socket Closed
if self._scrcpy_alive:
logger.error(f'_scrcpy_stream_loop_thread: {repr(e)}')
raise
raise ScrcpyError('_scrcpy_stream_loop stopped')
|
3,286 |
loader
|
"""Views for managing sponsors of a project."""
from __future__ import annotations
from typing import TYPE_CHECKING, Optional
from flask import abort, flash, render_template, request
from baseframe import _
from baseframe.forms import Form
from baseframe.forms.auto import ConfirmDeleteForm
from coaster.auth import current_auth
from coaster.utils import getbool
from coaster.views import ModelView, UrlChangeCheck, UrlForView, requestform, route
from .. import app
from ..forms import ProjectSponsorForm
from ..models import Account, Project, ProjectSponsorMembership, db, sa
from ..typing import ReturnView
from .helpers import render_redirect
from .login_session import requires_login, requires_site_editor
from .mixins import ProjectViewMixin
def edit_sponsor_form(obj):
"""Customise ProjectSponsorForm to remove member field."""
form = ProjectSponsorForm(obj=obj)
del form.member
return form
@Project.views('sponsors')
@route('/<account>/<project>/sponsors/')
class ProjectSponsorLandingView(
ProjectViewMixin, UrlChangeCheck, UrlForView, ModelView
):
__decorators__ = [requires_login, requires_site_editor]
@route('add', methods=['POST', 'GET'])
def add_sponsor(self) -> ReturnView:
form = ProjectSponsorForm()
if request.method == 'POST':
if form.validate_on_submit():
if TYPE_CHECKING:
assert isinstance(form.member.data, Account) # nosec
existing_sponsorship = ProjectSponsorMembership.query.filter(
ProjectSponsorMembership.is_active,
ProjectSponsorMembership.project == self.obj,
ProjectSponsorMembership.member == form.member.data,
).one_or_none()
if existing_sponsorship is not None:
return (
{
'status': 'error',
'error_description': _(
"{sponsor} is already a sponsor"
).format(sponsor=form.member.data.pickername),
'errors': form.errors,
'form_nonce': form.form_nonce.data,
},
400,
)
sponsor_membership = ProjectSponsorMembership(
project=self.obj,
granted_by=current_auth.user,
)
form.populate_obj(sponsor_membership)
db.session.add(sponsor_membership)
db.session.commit()
flash(_("Sponsor has been added"), 'info')
return render_redirect(self.obj.url_for())
return (
{
'status': 'error',
'error_description': _("Sponsor could not be added"),
'errors': form.errors,
'form_nonce': form.form_nonce.data,
},
400,
)
return render_template(
'project_sponsor_popup.html.jinja2',
project=self.obj,
form=form,
action=self.obj.url_for('add_sponsor'),
ref_id='add_sponsor',
)
@route('reorder', methods=['POST'])
@requestform('target', 'other', ('before', getbool))
def reorder_sponsors(self, target: str, other: str, before: bool) -> ReturnView:
if not (current_auth.user and current_auth.user.is_site_editor):
abort(403)
if Form().validate_on_submit():
sponsor: ProjectSponsorMembership = (
ProjectSponsorMembership.query.filter_by(uuid_b58=target)
.options(
sa.orm.load_only(
ProjectSponsorMembership.id, ProjectSponsorMembership.seq
)
)
.one_or_404()
)
other_sponsor: ProjectSponsorMembership = (
ProjectSponsorMembership.query.filter_by(uuid_b58=other)
.options(
sa.orm.load_only(
ProjectSponsorMembership.id, ProjectSponsorMembership.seq
)
)
.one_or_404()
)
sponsor.reorder_item(other_sponsor, before)
db.session.commit()
return {'status': 'ok'}
return {'status': 'error', 'error': 'csrf'}, 422
ProjectSponsorLandingView.init_app(app)
@ProjectSponsorMembership.views('main')
@route('/<account>/<project>/sponsors/<sponsorship>')
class ProjectSponsorView(UrlChangeCheck, UrlForView, ModelView):
__decorators__ = [requires_login, requires_site_editor]
model = ProjectSponsorMembership
route_model_map = {
'account': 'project.account.urlname',
'project': 'project.name',
'sponsorship': 'uuid_b58',
}
def METHOD_NAME(
self,
account: str, # skipcq: PYL-W0613
project: str, # skipcq: PYL-W0613
sponsorship: Optional[str] = None,
) -> ProjectSponsorMembership:
obj = (
self.model.query.join(Project)
.join(Account, Project.account)
.filter(self.model.uuid_b58 == sponsorship)
.one_or_404()
)
if not obj.is_active:
abort(410)
return obj
@route('edit', methods=['GET', "POST"])
def edit(self) -> ReturnView:
form = edit_sponsor_form(self.obj)
if request.method == 'POST':
if form.validate_on_submit():
with db.session.no_autoflush:
with self.obj.amend_by(current_auth.user) as amendment:
form.populate_obj(amendment)
db.session.commit()
flash(_("Sponsor has been edited"), 'info')
return render_redirect(self.obj.project.url_for())
else:
return (
{
'status': 'error',
'error_description': _("Sponsor could not be edited"),
'errors': form.errors,
'form_nonce': form.form_nonce.data,
},
400,
)
return render_template(
'project_sponsor_popup.html.jinja2',
project=self.obj.project,
form=form,
action=self.obj.url_for('edit'),
ref_id='edit_sponsor',
sponsorship=self.obj,
)
@route('remove', methods=['GET', "POST"])
def remove(self) -> ReturnView:
form = ConfirmDeleteForm()
if request.method == 'POST':
if form.validate_on_submit():
self.obj.revoke(actor=current_auth.user)
db.session.commit()
flash(_("Sponsor has been removed"), 'info')
return render_redirect(self.obj.project.url_for())
return (
{
'status': 'error',
'error_description': _("Sponsor could not be removed"),
'errors': form.errors,
'form_nonce': form.form_nonce.data,
},
400,
)
return render_template(
'project_sponsor_popup.html.jinja2',
form=form,
title=_("Remove sponsor?"),
message=_("Remove ‘{sponsor}’ as a sponsor?").format(
sponsor=self.obj.title
),
action=self.obj.url_for('remove'),
ref_id='remove_sponsor',
remove=True,
)
ProjectSponsorView.init_app(app)
|
3,287 |
register
|
# This file is part of project Sverchok. It's copyrighted by the contributors
# recorded in the version control history of the file, available from
# its original location https://github.com/nortikin/sverchok/commit/master
#
# SPDX-License-Identifier: GPL3
# License-Filename: LICENSE
import bpy
from bpy.props import FloatProperty, EnumProperty, IntProperty
from sverchok.node_tree import SverchCustomTreeNode
from sverchok.data_structure import updateNode, ensure_nesting_level, zip_long_repeat, get_data_nesting_level
from sverchok.utils.field.scalar import SvScalarField
from sverchok.utils.voronoi3d import lloyd_in_solid, lloyd_on_solid_surface
from sverchok.dependencies import FreeCAD
if FreeCAD is not None:
import Part
class SvLloydSolidNode(SverchCustomTreeNode, bpy.types.Node):
"""
Triggers: Lloyd Solid
Tooltip: Redistribute 3D points in the volume of a Solid body uniformly by use of Lloyd's algorithm
"""
bl_idname = 'SvLloydSolidNode'
bl_label = 'Lloyd in Solid'
bl_icon = 'OUTLINER_OB_EMPTY'
sv_icon = 'SV_VORONOI'
sv_dependencies = {'scipy', 'FreeCAD'}
iterations : IntProperty(
name = "Iterations",
description = "Number of Lloyd algorithm iterations",
min = 0,
default = 3,
update = updateNode)
thickness : FloatProperty(
name = "Thickness",
default = 1.0,
min = 0.0,
description="Thickness of region where Voronoi diagram is generated",
update=updateNode)
accuracy: IntProperty(
name="Accuracy",
default=5,
min=1,
description="The accuracy of defining whether the point lies on the surface of the body",
update=updateNode)
def update_sockets(self, context):
self.inputs['Thickness'].hide_safe = self.mode != 'SURFACE'
updateNode(self, context)
modes = [
('VOLUME', "Volume", "Distribute points inside the volume of a Solid body", 0),
('SURFACE', "Surface", "Distribute points on the surface of a Solid body", 1)
]
mode : EnumProperty(
name = "Mode",
description = "Where to distribute points",
items = modes,
default = 'VOLUME',
update = update_sockets)
def draw_buttons(self, context, layout):
layout.prop(self, "mode", text='')
def draw_buttons_ext(self, context, layout):
self.draw_buttons(context, layout)
layout.prop(self, "accuracy")
def sv_init(self, context):
self.inputs.new('SvSolidSocket', "Solid")
self.inputs.new('SvVerticesSocket', "Sites").enable_input_link_menu = False
self.inputs.new('SvStringsSocket', 'Thickness').prop_name = 'thickness'
self.inputs.new('SvStringsSocket', 'Iterations').prop_name = 'iterations'
self.inputs.new('SvScalarFieldSocket', 'Weights').enable_input_link_menu = False
self.outputs.new('SvVerticesSocket', "Sites")
self.update_sockets(context)
def process(self):
if not any(socket.is_linked for socket in self.outputs):
return
solid_in = self.inputs['Solid'].sv_get()
sites_in = self.inputs['Sites'].sv_get()
iterations_in = self.inputs['Iterations'].sv_get()
thickness_in = self.inputs['Thickness'].sv_get()
weights_in = self.inputs['Weights'].sv_get(default=[[None]])
solid_in = ensure_nesting_level(solid_in, 2, data_types=(Part.Shape,))
input_level = get_data_nesting_level(sites_in)
sites_in = ensure_nesting_level(sites_in, 4)
iterations_in = ensure_nesting_level(iterations_in, 2)
thickness_in = ensure_nesting_level(thickness_in, 2)
if self.inputs['Weights'].is_linked:
weights_in = ensure_nesting_level(weights_in, 2, data_types=(SvScalarField,))
nested_output = input_level > 3
tolerance = 10**(-self.accuracy)
verts_out = []
for params in zip_long_repeat(solid_in, sites_in, iterations_in, thickness_in, weights_in):
new_verts = []
for solid, sites, iterations, thickness, weights in zip_long_repeat(*params):
if self.mode == 'VOLUME':
sites = lloyd_in_solid(solid, sites, iterations,
weight_field = weights, tolerance = tolerance)
else:
sites = lloyd_on_solid_surface(solid, sites, thickness, iterations,
weight_field = weights, tolerance = tolerance)
new_verts.append(sites)
if nested_output:
verts_out.append(new_verts)
else:
verts_out.extend(new_verts)
self.outputs['Sites'].sv_set(verts_out)
def METHOD_NAME():
bpy.utils.register_class(SvLloydSolidNode)
def unregister():
bpy.utils.unregister_class(SvLloydSolidNode)
|
3,288 |
no duplicate jars
|
# ---------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# ---------------------------------------------------------
# pylint: disable=unused-argument
import re
from typing import Any, Dict, List
from marshmallow import ValidationError, fields, post_dump, post_load, pre_dump, pre_load, validates
from azure.ai.ml._schema.core.fields import CodeField, EnvironmentField, NestedField
from azure.ai.ml._schema.core.schema import PathAwareSchema
from azure.ai.ml._schema.core.schema_meta import PatchedSchemaMeta
from ..core.fields import UnionField
re_memory_pattern = re.compile("^\\d+[kKmMgGtTpP]$")
class SparkEntryFileSchema(metaclass=PatchedSchemaMeta):
file = fields.Str(required=True)
# add spark_job_entry_type and make it dump only to align with model definition,
# this will make us get expected value when call spark._from_rest_object()
spark_job_entry_type = fields.Str(dump_only=True)
@pre_dump
def to_dict(self, data, **kwargs):
return {"file": data.entry}
class SparkEntryClassSchema(metaclass=PatchedSchemaMeta):
class_name = fields.Str(required=True)
# add spark_job_entry_type and make it dump only to align with model definition,
# this will make us get expected value when call spark._from_rest_object()
spark_job_entry_type = fields.Str(dump_only=True)
@pre_dump
def to_dict(self, data, **kwargs):
return {"class_name": data.entry}
CONF_KEY_MAP = {
"driver_cores": "spark.driver.cores",
"driver_memory": "spark.driver.memory",
"executor_cores": "spark.executor.cores",
"executor_memory": "spark.executor.memory",
"executor_instances": "spark.executor.instances",
"dynamic_allocation_enabled": "spark.dynamicAllocation.enabled",
"dynamic_allocation_min_executors": "spark.dynamicAllocation.minExecutors",
"dynamic_allocation_max_executors": "spark.dynamicAllocation.maxExecutors",
}
def no_duplicates(name: str, value: List):
if len(value) != len(set(value)):
raise ValidationError(f"{name} must not contain duplicate entries.")
class ParameterizedSparkSchema(PathAwareSchema):
code = CodeField()
entry = UnionField(
[NestedField(SparkEntryFileSchema), NestedField(SparkEntryClassSchema)],
required=True,
metadata={"description": "Entry."},
)
py_files = fields.List(fields.Str(required=True))
jars = fields.List(fields.Str(required=True))
files = fields.List(fields.Str(required=True))
archives = fields.List(fields.Str(required=True))
conf = fields.Dict(keys=fields.Str(), values=fields.Raw())
properties = fields.Dict(keys=fields.Str(), values=fields.Raw())
environment = EnvironmentField(allow_none=True)
args = fields.Str(metadata={"description": "Command Line arguments."})
@validates("py_files")
def no_duplicate_py_files(self, value):
no_duplicates("py_files", value)
@validates("jars")
def METHOD_NAME(self, value):
no_duplicates("jars", value)
@validates("files")
def no_duplicate_files(self, value):
no_duplicates("files", value)
@validates("archives")
def no_duplicate_archives(self, value):
no_duplicates("archives", value)
@pre_load
# pylint: disable-next=docstring-missing-param,docstring-missing-return,docstring-missing-rtype
def map_conf_field_names(self, data, **kwargs):
"""Map the field names in the conf dictionary.
This function must be called after YamlFileSchema.load_from_file.
Given marshmallow executes the pre_load functions in the alphabetical order (marshmallow\\schema.py:L166,
functions will be checked in alphabetical order when inject to cls._hooks), we must make sure the function
name is alphabetically after "load_from_file".
"""
# TODO: it's dangerous to depend on an alphabetical order, we'd better move related logic out of Schema.
conf = data["conf"] if "conf" in data else None
if conf is not None:
for field_key, dict_key in CONF_KEY_MAP.items():
value = conf.get(dict_key, None)
if dict_key in conf and value is not None:
del conf[dict_key]
conf[field_key] = value
data["conf"] = conf
return data
@post_dump(pass_original=True)
def serialize_field_names(self, data: Dict[str, Any], original_data: Dict[str, Any], **kwargs):
conf = data["conf"] if "conf" in data else {}
if original_data.conf is not None and conf is not None:
for field_name, value in original_data.conf.items():
if field_name not in conf:
if isinstance(value, str) and value.isdigit():
value = int(value)
conf[field_name] = value
if conf is not None:
for field_name, dict_name in CONF_KEY_MAP.items():
val = conf.get(field_name, None)
if field_name in conf and val is not None:
if isinstance(val, str) and val.isdigit():
val = int(val)
del conf[field_name]
conf[dict_name] = val
data["conf"] = conf
return data
@post_load
def demote_conf_fields(self, data, **kwargs):
conf = data["conf"] if "conf" in data else None
if conf is not None:
for field_name, _ in CONF_KEY_MAP.items():
value = conf.get(field_name, None)
if field_name in conf and value is not None:
del conf[field_name]
data[field_name] = value
return data
@pre_dump
def promote_conf_fields(self, data: object, **kwargs):
# copy fields from root object into the 'conf'
conf = data.conf or {}
for field_name, _ in CONF_KEY_MAP.items():
value = data.__getattribute__(field_name)
if value is not None:
conf[field_name] = value
data.__setattr__("conf", conf)
return data
|
3,289 |
from zsl
|
"""This module provides classes to identify optimal substrates for film growth."""
from __future__ import annotations
from dataclasses import dataclass
from typing import TYPE_CHECKING
from pymatgen.analysis.elasticity.strain import Deformation, Strain
from pymatgen.analysis.interfaces.zsl import ZSLGenerator, ZSLMatch, reduce_vectors
from pymatgen.core.surface import SlabGenerator, get_symmetrically_distinct_miller_indices
if TYPE_CHECKING:
from pymatgen.core import Structure
@dataclass
class SubstrateMatch(ZSLMatch):
"""
A substrate match building on the Zur and McGill algorithm. This match class includes the miller
planes of the film and substrate the full strain tensor, the Von Mises strain, the ground state
energy if provided, and the elastic energy.
"""
film_miller: tuple[int, int, int]
substrate_miller: tuple[int, int, int]
strain: Strain
von_mises_strain: float
ground_state_energy: float
elastic_energy: float
@classmethod
def METHOD_NAME(
cls,
match: ZSLMatch,
film: Structure,
film_miller,
substrate_miller,
elasticity_tensor=None,
ground_state_energy=0,
):
"""Generate a substrate match from a ZSL match plus metadata."""
# Get the appropriate surface structure
struct = SlabGenerator(film, film_miller, 20, 15, primitive=False).get_slab().oriented_unit_cell
dfm = Deformation(match.match_transformation)
strain = dfm.green_lagrange_strain.convert_to_ieee(struct, initial_fit=False)
von_mises_strain = strain.von_mises_strain
if elasticity_tensor is not None:
energy_density = elasticity_tensor.energy_density(strain)
elastic_energy = film.volume * energy_density / len(film)
else:
elastic_energy = 0
return cls(
film_miller=film_miller,
substrate_miller=substrate_miller,
strain=strain,
von_mises_strain=von_mises_strain,
elastic_energy=elastic_energy,
ground_state_energy=ground_state_energy,
**{
k: getattr(match, k)
for k in [
"film_sl_vectors",
"substrate_sl_vectors",
"film_vectors",
"substrate_vectors",
"film_transformation",
"substrate_transformation",
]
},
)
@property
def total_energy(self):
"""Total energy of this match."""
return self.ground_state_energy + self.elastic_energy
class SubstrateAnalyzer(ZSLGenerator):
"""
This class applies a set of search criteria to identify suitable
substrates for film growth. It first uses a topological search by Zur
and McGill to identify matching super-lattices on various faces of the
two materials. Additional criteria can then be used to identify the most
suitable substrate. Currently, the only additional criteria is the
elastic strain energy of the super-lattices.
"""
def __init__(self, film_max_miller=1, substrate_max_miller=1, **kwargs):
"""
Initializes the substrate analyzer
Args:
zslgen(ZSLGenerator): Defaults to a ZSLGenerator with standard
tolerances, but can be fed one with custom tolerances
film_max_miller(int): maximum miller index to generate for film
surfaces
substrate_max_miller(int): maximum miller index to generate for
substrate surfaces.
"""
self.film_max_miller = film_max_miller
self.substrate_max_miller = substrate_max_miller
self.kwargs = kwargs
super().__init__(**kwargs)
def generate_surface_vectors(self, film_millers, substrate_millers):
"""
Generates the film/substrate slab combinations for a set of given
miller indices.
Args:
film_millers(array): all miller indices to generate slabs for
film
substrate_millers(array): all miller indices to generate slabs
for substrate
"""
vector_sets = []
for f in film_millers:
film_slab = SlabGenerator(self.film, f, 20, 15, primitive=False).get_slab()
film_vectors = reduce_vectors(film_slab.lattice.matrix[0], film_slab.lattice.matrix[1])
for s in substrate_millers:
substrate_slab = SlabGenerator(self.substrate, s, 20, 15, primitive=False).get_slab()
substrate_vectors = reduce_vectors(substrate_slab.lattice.matrix[0], substrate_slab.lattice.matrix[1])
vector_sets.append((film_vectors, substrate_vectors, f, s))
return vector_sets
def calculate(
self,
film,
substrate,
elasticity_tensor=None,
film_millers=None,
substrate_millers=None,
ground_state_energy=0,
lowest=False,
):
"""
Finds all topological matches for the substrate and calculates elastic
strain energy and total energy for the film if elasticity tensor and
ground state energy are provided:
Args:
film(Structure): conventional standard structure for the film
substrate(Structure): conventional standard structure for the
substrate
elasticity_tensor(ElasticTensor): elasticity tensor for the film
in the IEEE orientation
film_millers(array): film facets to consider in search as defined by
miller indices
substrate_millers(array): substrate facets to consider in search as
defined by miller indices
ground_state_energy(float): ground state energy for the film
lowest(bool): only consider lowest matching area for each surface
"""
self.film = film
self.substrate = substrate
# Generate miller indices if none specified for film
if film_millers is None:
film_millers = sorted(get_symmetrically_distinct_miller_indices(self.film, self.film_max_miller))
# Generate miller indices if none specified for substrate
if substrate_millers is None:
substrate_millers = sorted(
get_symmetrically_distinct_miller_indices(self.substrate, self.substrate_max_miller)
)
# Check each miller index combination
surface_vector_sets = self.generate_surface_vectors(film_millers, substrate_millers)
for [
film_vectors,
substrate_vectors,
film_miller,
substrate_miller,
] in surface_vector_sets:
for match in self(film_vectors, substrate_vectors, lowest):
sub_match = SubstrateMatch.METHOD_NAME(
match=match,
film=film,
film_miller=film_miller,
substrate_miller=substrate_miller,
elasticity_tensor=elasticity_tensor,
ground_state_energy=ground_state_energy,
)
yield sub_match
|
3,290 |
construct manifest filename
|
""" This module tests full pipeline SVM processing for an ACS WFC, full-frame, one filter dataset.
"""
import datetime
import glob
import os
import pytest
from drizzlepac.haputils import astroquery_utils as aqutils
from drizzlepac import runsinglehap
from astropy.io import fits, ascii
from pathlib import Path
"""
test_svm_je281u.py
This test file can be executed in the following manner:
$ pytest -s --basetemp=/internal/hladata/yourUniqueDirectoryHere test_svm_je281u.py >& je281u.log &
$ tail -f je281u.log
* Note: When running this test, the `--basetemp` directory should be set to a unique
existing directory to avoid deleting previous test output.
* The POLLER_FILE exists in the tests/hap directory.
* If running manually with `--basetemp`, the je281u.log file will still be written to the
originating directory.
"""
WCS_SUB_NAME = "FIT_SVM_GAIA"
POLLER_FILE = "acs_e28_1u_input.out"
@pytest.fixture(scope="module")
def read_csv_for_filenames():
# Read the CSV poller file residing in the tests directory to extract the individual visit FLT/FLC filenames
path = os.path.join(os.path.dirname(__file__), POLLER_FILE)
table = ascii.read(path, format="no_header")
filename_column = table.colnames[0]
filenames = list(table[filename_column])
print("\nread_csv_for_filenames. Filesnames from poller: {}".format(filenames))
return filenames
@pytest.fixture(scope="module")
def gather_data_for_processing(read_csv_for_filenames, tmp_path_factory):
# Create working directory specified for the test
curdir = tmp_path_factory.mktemp(os.path.basename(__file__))
os.chdir(curdir)
# Establish FLC/FLT lists and obtain the requested data
flc_flag = ""
flt_flag = ""
# In order to obtain individual FLC or FLT images from MAST (if the files are not reside on disk) which
# may be part of an ASN, use only IPPPSS with a wildcard. The unwanted images have to be removed
# after-the-fact.
for fn in read_csv_for_filenames:
if fn.lower().endswith("flc.fits") and flc_flag == "":
flc_flag = fn[0:6] + "*"
elif fn.lower().endswith("flt.fits") and flt_flag == "":
flt_flag = fn[0:6] + "*"
# If both flags have been set, then break out the loop early. It may be
# that all files have to be checked which means the for loop continues
# until its natural completion.
if flc_flag and flt_flag:
break
# Get test data through astroquery - only retrieve the pipeline processed FLC and/or FLT files
# (e.g., j*_flc.fits) as necessary. The logic here and the above for loop is an attempt to
# avoid downloading too many images which are not needed for processing.
flcfiles = []
fltfiles = []
if flc_flag:
flcfiles = aqutils.retrieve_observation(flc_flag, suffix=["FLC"], product_type="pipeline")
if flt_flag:
fltfiles = aqutils.retrieve_observation(flt_flag, suffix=["FLT"], product_type="pipeline")
flcfiles.extend(fltfiles)
# Keep only the files which exist in BOTH lists for processing
files_to_process= set(read_csv_for_filenames).intersection(set(flcfiles))
# Identify unwanted files from the download list and remove from disk
files_to_remove = set(read_csv_for_filenames).symmetric_difference(set(flcfiles))
try:
for ftr in files_to_remove:
os.remove(ftr)
except Exception as x_cept:
print("")
print("Exception encountered: {}.".format(x_cept))
print("The file {} could not be deleted from disk. ".format(ftr))
print("Remove files which are not used for processing from disk manually.")
print("\ngather_data_for_processing. Gathered data: {}".format(files_to_process))
return files_to_process
@pytest.fixture(scope="module")
def gather_output_data(METHOD_NAME):
# Determine the filenames of all the output files from the manifest
files = []
with open(METHOD_NAME, 'r') as fout:
for line in fout.readlines():
files.append(line.rstrip("\n"))
print("\ngather_output_data. Output data files: {}".format(files))
return files
@pytest.fixture(scope="module")
def METHOD_NAME(read_csv_for_filenames):
# Construct the output manifest filename from input file keywords
inst = fits.getval(read_csv_for_filenames[0], "INSTRUME", ext=0).lower()
root = fits.getval(read_csv_for_filenames[0], "ROOTNAME", ext=0).lower()
tokens_tuple = (inst, root[1:4], root[4:6], "manifest.txt")
manifest_filename = "_".join(tokens_tuple)
print("\nconstruct_manifest_filename. Manifest filename: {}".format(manifest_filename))
return manifest_filename
@pytest.fixture(scope="module", autouse=True)
def svm_setup(gather_data_for_processing):
# Act: Process the input data by executing runsinglehap - time consuming activity
current_dt = datetime.datetime.now()
print(str(current_dt))
print("\nsvm_setup fixture")
# Read the "poller file" and download the input files, as necessary
input_names = gather_data_for_processing
# Run the SVM processing
path = os.path.join(os.path.dirname(__file__), POLLER_FILE)
try:
status = runsinglehap.perform(path)
# Catch anything that happens and report it. This is meant to catch unexpected errors and
# generate sufficient output exception information so algorithmic problems can be addressed.
except Exception as except_details:
print(except_details)
pytest.fail("\nsvm_setup. Exception Visit: {}\n", path)
current_dt = datetime.datetime.now()
print(str(current_dt))
# TESTS
def test_svm_manifest_name(METHOD_NAME):
# Construct the manifest filename from the header of an input file in the list and check it exists.
path = Path(METHOD_NAME)
print("\ntest_svm_manifest. Filename: {}".format(path))
# Ensure the manifest file uses the proper naming convention
assert(path.is_file())
def test_svm_wcs(gather_output_data):
# Check the output primary WCSNAME includes FIT_SVM_GAIA as part of the string value
tdp_files = [files for files in gather_output_data if files.lower().find("total") > -1 and files.lower().endswith(".fits")]
for tdp in tdp_files:
wcsname = fits.getval(tdp, "WCSNAME", ext=1).upper()
print("\ntest_svm_wcs. WCSNAME: {} Output file: {}".format(wcsname, tdp))
assert WCS_SUB_NAME in wcsname, f"WCSNAME is not as expected for file {tdp}."
def test_svm_cat_sources(gather_output_data):
# Check the output catalogs should contain > 0 measured sources
cat_files = [files for files in gather_output_data if files.lower().endswith("-cat.ecsv")]
for cat in cat_files:
table_length = len(ascii.read(cat, format="ecsv"))
print("\ntest_svm_cat_sources. Number of sources in catalog {} is {}.".format(cat, table_length))
assert table_length > 0, f"Catalog file {cat} is unexpectedly empty"
|
3,291 |
resolve rent stab info
|
from onboarding.scaffolding import (
OnboardingScaffolding,
OnboardingScaffoldingMutation,
get_scaffolding,
)
from . import models, forms, email_dhcr
from typing import Dict, Any, Optional
from django.utils import translation
from django.db import connections
from django.conf import settings
import graphene
from graphql import ResolveInfo
from project import slack
from project.util.session_mutation import SessionFormMutation
from project.util.streaming_json import generate_json_rows
from project.util.site_util import absolute_reverse, SITE_CHOICES
from project import schema_registry
import project.locales
from frontend.static_content import react_render_email
from rapidpro.followup_campaigns import trigger_followup_campaign_async
from loc.landlord_lookup import lookup_bbl_and_bin_and_full_address
RENT_STAB_INFO_SESSION_KEY = "rh_rent_stab_v1"
BLANK_RENT_STAB_INFO = {"latest_year": None, "latest_unit_count": None}
def get_slack_notify_text(rhr: models.RentalHistoryRequest) -> str:
rh_link = slack.hyperlink(
text="rent history",
href=absolute_reverse("admin:rh_rentalhistoryrequest_change", args=[rhr.pk]),
)
if rhr.user:
user_text = slack.hyperlink(text=rhr.user.best_first_name, href=rhr.user.admin_url)
else:
user_text = slack.escape(rhr.first_name)
return f"{user_text} has requested {rh_link}!"
def run_rent_stab_sql_query(bbl: str) -> Optional[Dict[str, Any]]:
sql_query = """
select uc2007, uc2008, uc2009, uc2010, uc2011, uc2012, uc2013,
uc2014, uc2015, uc2016, uc2017, uc2018, uc2019, uc2020
from rentstab
full join rentstab_v2 using(ucbbl)
where ucbbl = %(bbl)s
"""
with connections[settings.NYCDB_DATABASE].cursor() as cursor:
cursor.execute(sql_query, {"bbl": bbl})
json_result = list(generate_json_rows(cursor))
if not json_result:
return None
return json_result[0]
def process_rent_stab_data(raw_data: Dict[str, Any]) -> Dict[str, Any]:
for item in sorted(raw_data.items(), reverse=True):
if item[1] and item[1] > 0:
year = item[0].replace("uc", "")
assert year
return {"latest_year": year, "latest_unit_count": item[1]}
return BLANK_RENT_STAB_INFO
def get_rent_stab_info_for_bbl(bbl: str) -> Optional[Dict[str, Any]]:
# Case 1: No connection to GeoSearch or the nycdb database
if not (bbl and settings.NYCDB_DATABASE):
return None
raw_data = run_rent_stab_sql_query(bbl)
# Case 2: We connected to the database, but no RS data was found
if not raw_data:
return BLANK_RENT_STAB_INFO
# Case 3: We connected to the database, and RS data was found
else:
return process_rent_stab_data(raw_data)
@schema_registry.register_mutation
class RhForm(OnboardingScaffoldingMutation):
class Meta:
form_class = forms.RhForm
@classmethod
def perform_mutate(cls, form, info: ResolveInfo):
request = info.context
result = super().perform_mutate(form, info)
scf = get_scaffolding(request)
assert scf.street and scf.borough
full_address = scf.street + ", " + scf.borough
bbl, _, _ = lookup_bbl_and_bin_and_full_address(full_address)
request.session[RENT_STAB_INFO_SESSION_KEY] = get_rent_stab_info_for_bbl(bbl)
return result
def scaffolding_has_rental_history_request_info(scf: OnboardingScaffolding) -> bool:
return bool(
scf.first_name
and scf.last_name
and scf.street
and scf.borough
and scf.phone_number
and scf.apt_number
)
@schema_registry.register_mutation
class RhSendEmail(SessionFormMutation):
class Meta:
form_class = forms.RhSendEmail
@classmethod
def perform_mutate(cls, form, info):
request = info.context
scf = get_scaffolding(request)
if not scaffolding_has_rental_history_request_info(scf):
cls.log(info, "User has not completed the rental history form, aborting mutation.")
return cls.make_error("You haven't completed all the previous steps yet.")
rhr = models.RentalHistoryRequest(
first_name=scf.first_name,
last_name=scf.last_name,
apartment_number=scf.apt_number,
phone_number=scf.phone_number,
address=scf.street,
address_verified=scf.address_verified,
borough=scf.borough,
zipcode=scf.zip_code,
)
rhr.set_user(request.user)
rhr.full_clean()
rhr.save()
slack.sendmsg_async(get_slack_notify_text(rhr), is_safe=True)
email = react_render_email(
SITE_CHOICES.JUSTFIX,
project.locales.DEFAULT,
"rh/email-to-dhcr.txt",
session=request.session,
)
email_dhcr.send_email_to_dhcr(email.subject, email.body)
trigger_followup_campaign_async(
f"{scf.first_name} {scf.last_name}",
scf.phone_number,
"RH",
locale=translation.get_language_from_request(request, check_path=True),
)
# Note that we used to purge the scaffolding information here, but lots
# of users go on to create an account after this, and we don't want them
# to have to re-enter all their information, so we'll keep it around.
return cls.mutation_success()
class RhRentStabData(graphene.ObjectType):
latest_year = graphene.String(
description=(
"The last year that the user's building had rent stabilized units. "
"If null, no units were found since 2007. "
"Note: this will never be the empty string. "
)
)
latest_unit_count = graphene.Int(
description=(
"The most recent count of rent stabilized units in user's building. "
"If null, no units were found since 2007."
)
)
@schema_registry.register_session_info
class RhSessionInfo(object):
rent_stab_info = graphene.Field(RhRentStabData)
def METHOD_NAME(self, info: ResolveInfo):
request = info.context
kwargs = request.session.get(RENT_STAB_INFO_SESSION_KEY, {})
if kwargs:
return RhRentStabData(**kwargs)
return None
|
3,292 |
removedata
|
#coding=utf8
import urllib
import urllib2
import urlparse
import cookielib
import re
import StringIO
try:
import json
except ImportError:
import simplejson as json
from upload import MultiPartForm
class UTorrentClient(object):
def __init__(self, base_url, username, password):
self.base_url = base_url
self.username = username
self.password = password
self.opener = self._make_opener('uTorrent', base_url, username, password)
self.token = self._get_token()
#TODO refresh token, when necessary
def _make_opener(self, realm, base_url, username, password):
'''uTorrent API need HTTP Basic Auth and cookie support for token verify.'''
auth_handler = urllib2.HTTPBasicAuthHandler()
auth_handler.add_password(realm=realm,
uri=base_url,
user=username,
passwd=password)
opener = urllib2.build_opener(auth_handler)
urllib2.install_opener(opener)
cookie_jar = cookielib.CookieJar()
cookie_handler = urllib2.HTTPCookieProcessor(cookie_jar)
handlers = [auth_handler, cookie_handler]
opener = urllib2.build_opener(*handlers)
return opener
def _get_token(self):
url = urlparse.urljoin(self.base_url, 'token.html')
response = self.opener.open(url)
token_re = "<div id='token' style='display:none;'>([^<>]+)</div>"
match = re.search(token_re, response.read())
return match.group(1)
def list(self, **kwargs):
params = [('list', '1')]
params += kwargs.items()
return self._action(params)
def start(self, *hashes):
params = [('action', 'start'),]
for hash in hashes:
params.append(('hash', hash))
return self._action(params)
def stop(self, *hashes):
params = [('action', 'stop'),]
for hash in hashes:
params.append(('hash', hash))
return self._action(params)
def pause(self, *hashes):
params = [('action', 'pause'),]
for hash in hashes:
params.append(('hash', hash))
return self._action(params)
def forcestart(self, *hashes):
params = [('action', 'forcestart'),]
for hash in hashes:
params.append(('hash', hash))
return self._action(params)
def remove(self, *hashes):
params = [('action', 'remove'),]
for hash in hashes:
params.append(('hash', hash))
return self._action(params)
def METHOD_NAME(self, *hashes):
params = [('action', 'removedata'),]
for hash in hashes:
params.append(('hash', hash))
return self._action(params)
def recheck(self, *hashes):
params = [('action', 'recheck'),]
for hash in hashes:
params.append(('hash', hash))
return self._action(params)
def getfiles(self, hash):
params = [('action', 'getfiles'), ('hash', hash)]
return self._action(params)
def getprops(self, hash):
params = [('action', 'getprops'), ('hash', hash)]
return self._action(params)
def setprio(self, hash, priority, *files):
params = [('action', 'setprio'), ('hash', hash), ('p', str(priority))]
for file_index in files:
params.append(('f', str(file_index)))
return self._action(params)
def addfile(self, filename, filepath=None, bytes=None):
params = [('action', 'add-file')]
form = MultiPartForm()
if filepath is not None:
file_handler = open(filepath)
else:
file_handler = StringIO.StringIO(bytes)
form.add_file('torrent_file', filename.encode('utf-8'), file_handler)
return self._action(params, str(form), form.get_content_type())
def _action(self, params, body=None, content_type=None):
#about token, see https://github.com/bittorrent/webui/wiki/TokenSystem
url = self.base_url + '?token=' + self.token + '&' + urllib.urlencode(params)
request = urllib2.Request(url)
if body:
request.add_data(body)
request.add_header('Content-length', len(body))
if content_type:
request.add_header('Content-type', content_type)
try:
response = self.opener.open(request)
return response.code, json.loads(response.read())
except urllib2.HTTPError,e:
raise
|
3,293 |
attach combobox
|
# SPDX-License-Identifier: Apache-2.0
# Copyright Contributors to the Rez Project
from Qt import QtCore, QtWidgets
from functools import partial
class Config(QtCore.QSettings):
"""Persistent application settings.
Methods are also provided for easily attaching widgets to settings.
"""
def __init__(self, default_settings, organization=None, application=None,
parent=None):
super(Config, self).__init__(organization, application, parent)
self.default_settings = default_settings
def value(self, key, type_=None):
"""Get the value of a setting.
If `type` is not provided, the key must be for a known setting,
present in `self.default_settings`. Conversely if `type` IS provided,
the key must be for an unknown setting.
"""
if type_ is None:
default = self._default_value(key)
val = self._value(key, default)
if type(val) == type(default):
return val
else:
return self._convert_value(val, type(default))
else:
val = self._value(key, None)
if val is None:
return None
return self._convert_value(val, type_)
def get(self, key, type_=None):
return self.value(key, type_)
def get_string_list(self, key):
"""Get a list of strings."""
strings = []
size = self.beginReadArray(key)
for i in range(size):
self.setArrayIndex(i)
entry = str(self._value("entry"))
strings.append(entry)
self.endArray()
return strings
def prepend_string_list(self, key, value, max_length_key):
"""Prepend a fixed-length string list with a new string.
The oldest string will be removed from the list. If the string is
already in the list, it is shuffled to the top. Use this to implement
things like a 'most recent files' entry.
"""
max_len = self.get(max_length_key)
strings = self.get_string_list(key)
strings = [value] + [x for x in strings if x != value]
strings = strings[:max_len]
self.beginWriteArray(key)
for i in range(len(strings)):
self.setArrayIndex(i)
self.setValue("entry", strings[i])
self.endArray()
def attach(self, widget, key):
if isinstance(widget, QtWidgets.QComboBox):
self.METHOD_NAME(widget, key)
elif isinstance(widget, QtWidgets.QCheckBox):
self._attach_checkbox(widget, key)
else:
raise NotImplementedError
def _value(self, key, defaultValue=None):
val = super(Config, self).value(key, defaultValue)
if hasattr(val, "toPyObject"):
val = val.toPyObject()
return val
@classmethod
def _convert_value(cls, value, type_):
if type_ is bool:
return (str(value).lower() == "true")
else:
return type_(value)
def _attach_checkbox(self, widget, key):
if widget.isTristate():
raise NotImplementedError
value = self.value(key)
widget.setCheckState(QtCore.Qt.Checked if value else QtCore.Qt.Unchecked)
widget.stateChanged.connect(
partial(self._checkbox_stateChanged, widget, key))
def _checkbox_stateChanged(self, widget, key):
value = widget.isChecked()
self.setValue(key, value)
def METHOD_NAME(self, widget, key):
value = str(self.value(key))
index = widget.findText(value)
if index == -1:
widget.setEditText(value)
else:
widget.setCurrentIndex(index)
widget.currentIndexChanged.connect(
partial(self._combobox_currentIndexChanged, widget, key))
widget.editTextChanged.connect(
partial(self._combobox_editTextChanged, widget, key))
def _combobox_currentIndexChanged(self, widget, key, index):
value = widget.itemText(index)
self.setValue(key, value)
def _combobox_editTextChanged(self, widget, key, txt):
self.setValue(key, txt)
def _default_value(self, key):
keys = key.lstrip('/').split('/')
value = self.default_settings
for k in keys:
try:
value = value[k]
except KeyError:
raise ValueError("No such application setting: %r" % key)
return value
|
3,294 |
properties
|
# coding=utf-8
# *** WARNING: this file was generated by pulumi. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import copy
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
__all__ = [
'GetProvisionedClusterResult',
'AwaitableGetProvisionedClusterResult',
'get_provisioned_cluster',
'get_provisioned_cluster_output',
]
@pulumi.output_type
class GetProvisionedClusterResult:
"""
The provisionedClusters resource definition.
"""
def __init__(__self__, extended_location=None, id=None, identity=None, location=None, name=None, METHOD_NAME=None, system_data=None, tags=None, type=None):
if extended_location and not isinstance(extended_location, dict):
raise TypeError("Expected argument 'extended_location' to be a dict")
pulumi.set(__self__, "extended_location", extended_location)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if identity and not isinstance(identity, dict):
raise TypeError("Expected argument 'identity' to be a dict")
pulumi.set(__self__, "identity", identity)
if location and not isinstance(location, str):
raise TypeError("Expected argument 'location' to be a str")
pulumi.set(__self__, "location", location)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if METHOD_NAME and not isinstance(METHOD_NAME, dict):
raise TypeError("Expected argument 'properties' to be a dict")
pulumi.set(__self__, "properties", METHOD_NAME)
if system_data and not isinstance(system_data, dict):
raise TypeError("Expected argument 'system_data' to be a dict")
pulumi.set(__self__, "system_data", system_data)
if tags and not isinstance(tags, dict):
raise TypeError("Expected argument 'tags' to be a dict")
pulumi.set(__self__, "tags", tags)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
@property
@pulumi.getter(name="extendedLocation")
def extended_location(self) -> Optional['outputs.ProvisionedClustersResponseResponseExtendedLocation']:
return pulumi.get(self, "extended_location")
@property
@pulumi.getter
def id(self) -> str:
"""
Fully qualified resource ID for the resource. Ex - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def identity(self) -> Optional['outputs.ProvisionedClusterIdentityResponse']:
"""
Identity for the Provisioned cluster.
"""
return pulumi.get(self, "identity")
@property
@pulumi.getter
def location(self) -> str:
"""
The geo-location where the resource lives
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> str:
"""
The name of the resource
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def METHOD_NAME(self) -> 'outputs.ProvisionedClustersResponsePropertiesResponse':
return pulumi.get(self, "properties")
@property
@pulumi.getter(name="systemData")
def system_data(self) -> 'outputs.SystemDataResponse':
"""
Metadata pertaining to creation and last modification of the resource.
"""
return pulumi.get(self, "system_data")
@property
@pulumi.getter
def tags(self) -> Optional[Mapping[str, str]]:
"""
Resource tags.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter
def type(self) -> str:
"""
The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or "Microsoft.Storage/storageAccounts"
"""
return pulumi.get(self, "type")
class AwaitableGetProvisionedClusterResult(GetProvisionedClusterResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetProvisionedClusterResult(
extended_location=self.extended_location,
id=self.id,
identity=self.identity,
location=self.location,
name=self.name,
METHOD_NAME=self.METHOD_NAME,
system_data=self.system_data,
tags=self.tags,
type=self.type)
def get_provisioned_cluster(resource_group_name: Optional[str] = None,
resource_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetProvisionedClusterResult:
"""
Gets the Hybrid AKS provisioned cluster
:param str resource_group_name: The name of the resource group. The name is case insensitive.
:param str resource_name: Parameter for the name of the provisioned cluster
"""
__args__ = dict()
__args__['resourceGroupName'] = resource_group_name
__args__['resourceName'] = resource_name
opts = pulumi.InvokeOptions.merge(_utilities.get_invoke_opts_defaults(), opts)
__ret__ = pulumi.runtime.invoke('azure-native:hybridcontainerservice/v20220901preview:getProvisionedCluster', __args__, opts=opts, typ=GetProvisionedClusterResult).value
return AwaitableGetProvisionedClusterResult(
extended_location=pulumi.get(__ret__, 'extended_location'),
id=pulumi.get(__ret__, 'id'),
identity=pulumi.get(__ret__, 'identity'),
location=pulumi.get(__ret__, 'location'),
name=pulumi.get(__ret__, 'name'),
METHOD_NAME=pulumi.get(__ret__, 'properties'),
system_data=pulumi.get(__ret__, 'system_data'),
tags=pulumi.get(__ret__, 'tags'),
type=pulumi.get(__ret__, 'type'))
@_utilities.lift_output_func(get_provisioned_cluster)
def get_provisioned_cluster_output(resource_group_name: Optional[pulumi.Input[str]] = None,
resource_name: Optional[pulumi.Input[str]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetProvisionedClusterResult]:
"""
Gets the Hybrid AKS provisioned cluster
:param str resource_group_name: The name of the resource group. The name is case insensitive.
:param str resource_name: Parameter for the name of the provisioned cluster
"""
...
|
3,295 |
get individual name
|
# -*- coding: utf-8 -*-
#
# Copyright (C) 2014-2021 Bitergia
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Authors:
# Santiago Dueñas <[email protected]>
# Miguel Ángel Fernández <[email protected]>
# Eva Millán <[email protected]>
#
import logging
import re
from functools import lru_cache
import requests
import urllib3.util
from ..db import find_individual_by_uuid, find_identity
from ..errors import NotFoundError, InvalidValueError
from .exclusion import fetch_recommender_exclusion_list
logger = logging.getLogger(__name__)
strict_name_pattern = re.compile(r"(^\w{2,})\s+\w+")
loose_name_pattern = re.compile(r"(^\w{2,})")
def recommend_gender(uuids, exclude=True, no_strict_matching=False):
"""Recommend possible genders for a list of individuals.
Returns a generator of gender recommendations based on the
individuals first name, using the genderize.io API. The
genders returned by the API are 'male' and 'female'.
Each recommendation contains the uuid of the individual, the
suggested gender and the accuracy of the prediction.
When the individual does not have a name set, or the individual
is not found, it will not be included in the result. By default,
the name will also need to follow a 'Name LastName' pattern, but
this validation can be disabled with the 'no_strict_matching' flag.
:param uuids: list of individual identifiers
:param exclude: if set to `True`, the results list will ignore individual identities
if any value from the `email`, `name`, or `username` fields are found in the
RecommenderExclusionTerm table. Otherwise, results will not ignore them.
:param no_strict_matching: disable name validation
:returns: a generator of recommendations
"""
logger.debug(
f"Generating genders recommendations; "
f"uuids={uuids}; ..."
)
if exclude:
excluded_terms = set(fetch_recommender_exclusion_list())
strict = not no_strict_matching
for uuid in uuids:
try:
if exclude and _exclude_uuid(uuid, excluded_terms):
continue
individual = find_individual_by_uuid(uuid)
name = METHOD_NAME(individual, strict)
gender, accuracy = _genderize(name)
except NotFoundError:
message = f"Skipping {uuid}: Individual not found"
logger.warning(message)
continue
except InvalidValueError:
message = f"Skipping {uuid}: No valid name"
logger.warning(message)
continue
except requests.exceptions.RequestException as e:
message = f"Skipping {uuid} due to a connection error: {str(e)}"
logger.warning(message)
continue
else:
yield uuid, (gender, accuracy)
logger.info(f"Gender recommendations generated; uuids='{uuids}'")
def _exclude_uuid(uuid, excluded_terms):
"""If one of username, email, or name are in excluded_terms
it will return True and False if not.
:param uuid: Individual UUID
:excluded_terms: Set of terms (RecommenderExclusionTerm)
:returns: True | False
"""
identity = find_identity(uuid)
identity_set = {identity.username, identity.name, identity.email}
identity_set.discard(None)
return not identity_set.isdisjoint(excluded_terms)
def METHOD_NAME(individual, strict):
"""Get the first name of an individual from their profile"""
name_pattern = loose_name_pattern
if strict:
name_pattern = strict_name_pattern
try:
name_match = name_pattern.match(individual.profile.name)
first_name = name_match.group(1).lower()
except Exception as e:
raise InvalidValueError(msg=str(e))
else:
return first_name
@lru_cache(maxsize=128)
def _genderize(name):
"""Fetch gender from genderize.io"""
from django.conf import settings
api_key = settings.SORTINGHAT_GENDERIZE_API_KEY
genderize_api_url = "https://api.genderize.io/"
total_retries = 10
max_retries = 5
sleep_time = 0.25
status_forcelist = [502]
params = {
'name': name
}
if api_key:
params['apikey'] = api_key
session = requests.Session()
retries = urllib3.util.Retry(total=total_retries,
connect=max_retries,
status=max_retries,
status_forcelist=status_forcelist,
backoff_factor=sleep_time,
raise_on_status=True)
session.mount('http://', requests.adapters.HTTPAdapter(max_retries=retries))
session.mount('https://', requests.adapters.HTTPAdapter(max_retries=retries))
r = session.get(genderize_api_url, params=params)
result = r.json()
r.raise_for_status()
gender = result.get('gender', None)
prob = result.get('probability', None)
acc = int(prob * 100) if prob else None
return gender, acc
|
3,296 |
has tables
|
from django.contrib import messages
from django.http import HttpResponseRedirect
from django.utils.functional import cached_property
from django.utils.translation import gettext as _
from django.utils.translation import gettext_noop, gettext_lazy
from couchdbkit import ResourceNotFound
from memoized import memoized
from corehq.apps.fixtures.dispatcher import FixtureInterfaceDispatcher
from corehq.apps.fixtures.models import LookupTable
from corehq.apps.fixtures.views import FixtureViewMixIn, fixtures_home, table_json
from corehq.apps.reports.filters.base import BaseSingleOptionFilter
from corehq.apps.reports.generic import GenericReportView, GenericTabularReport
class FixtureInterface(FixtureViewMixIn, GenericReportView):
base_template = 'fixtures/fixtures_base.html'
asynchronous = False
dispatcher = FixtureInterfaceDispatcher
exportable = False
needs_filters = False
class FixtureSelectFilter(BaseSingleOptionFilter):
slug = "table_id"
label = ""
placeholder = "place"
default_text = gettext_lazy("Select a Table")
@property
def selected(self):
# ko won't display default selected-value as it should, display default_text instead
return ""
def _fixture_options(self):
return sorted(
LookupTable.objects.by_domain(self.domain).values("id", "tag"),
key=lambda t: t["tag"].lower()
)
@property
@memoized
def options(self):
return [(f["id"].hex, f["tag"]) for f in self._fixture_options()]
class FixtureViewInterface(GenericTabularReport, FixtureInterface):
name = gettext_noop("View Tables")
slug = "view_lookup_tables"
report_template_path = 'fixtures/view_table.html'
fields = ['corehq.apps.fixtures.interface.FixtureSelectFilter']
@property
def view_response(self):
if not self.METHOD_NAME():
messages.info(self.request, _("You don't have any tables defined yet - create tables to view them."))
return HttpResponseRedirect(fixtures_home(self.domain))
else:
return super(FixtureViewInterface, self).view_response
@property
def report_context(self):
assert self.METHOD_NAME()
if not self.request.GET.get("table_id", None):
return {"table_not_selected": True}
try:
context = super(FixtureViewInterface, self).report_context
except ResourceNotFound:
return {"table_not_selected": True}
# Build javascript options for DataTables
report_table = context['report_table']
headers = report_table.get('headers')
data_tables_options = {
'slug': self.context['report']['slug'],
'defaultRows': report_table.get('default_rows', 10),
'startAtRowNum': report_table.get('start_at_row', 0),
'showAllRowsOption': report_table.get('show_all_rows'),
'autoWidth': headers.auto_width,
}
if headers.render_aoColumns:
data_tables_options.update({
'aoColumns': headers.render_aoColumns,
})
if headers.custom_sort:
data_tables_options.update({
'customSort': headers.custom_sort,
})
pagination = context['report_table'].get('pagination', {})
if pagination.get('is_on'):
data_tables_options.update({
'ajaxSource': pagination.get('source'),
'ajaxParams': pagination.get('params'),
})
left_col = context['report_table'].get('left_col', {})
if left_col.get('is_fixed'):
data_tables_options.update({
'fixColumns': True,
'fixColsNumLeft': left_col['fixed'].get('num'),
'fixColsWidth': left_col['fixed'].get('width'),
})
context.update({
"selected_table": self.table.get("table_id", ""),
'data_tables_options': data_tables_options,
})
if self.lookup_table:
context.update({
"table_description": self.lookup_table.description,
})
return context
@memoized
def METHOD_NAME(self):
return LookupTable.objects.filter(domain=self.domain).exists()
@property
@memoized
def table(self):
from corehq.apps.fixtures.views import data_table
if self.METHOD_NAME() and self.request.GET.get("table_id", None):
return data_table(self.request, self.domain)
else:
return {"headers": None, "rows": None}
@cached_property
def lookup_table(self):
try:
return LookupTable.objects.get(id=self.request.GET['table_id'])
except LookupTable.DoesNotExist:
return None
@property
def headers(self):
return self.table["headers"]
@property
def rows(self):
return self.table["rows"]
class FixtureEditInterface(FixtureInterface):
name = gettext_noop("Manage Tables")
slug = "edit_lookup_tables"
report_template_path = 'fixtures/manage_tables.html'
@property
def report_context(self):
context = super(FixtureEditInterface, self).report_context
is_managed_by_upstream_domain = any(data_type['is_synced'] for data_type in self.data_types)
context.update(
types=self.data_types,
is_managed_by_upstream_domain=is_managed_by_upstream_domain,
can_edit_linked_data=self.can_edit_linked_data(),
)
return context
@property
@memoized
def data_types(self):
return [table_json(t) for t in LookupTable.objects.by_domain(self.domain)]
def can_edit_linked_data(self):
return self.request.couch_user.can_edit_linked_data(self.domain)
|
3,297 |
deserialize if empty
|
"""DualDict is a dict with lazily synchronized string representation."""
import collections.abc
import copy
class DualDict(collections.abc.MutableMapping):
"""DualDict class serves as dict with lazily synchronized string representation.
>>> ddict = DualDict('Number=Sing|Person=1')
>>> ddict['Case'] = 'Nom'
>>> str(ddict)
'Case=Nom|Number=Sing|Person=1'
>>> ddict['NonExistent']
''
This class provides access to both
* a structured (dict-based, deserialized) representation,
e.g. {'Number': 'Sing', 'Person': '1'}, and
* a string (serialized) representation of the mapping, e.g. `Number=Sing|Person=1`.
There is a clever mechanism that makes sure that users can read and write
both of the representations which are always kept synchronized.
Moreover, the synchronization is lazy, so the serialization and deserialization
is done only when needed. This speeds up scenarios where access to dict is not needed.
A value can be deleted with any of the following three ways:
>>> del ddict['Case']
>>> ddict['Case'] = None
>>> ddict['Case'] = ''
and it works even if the value was already missing.
"""
__slots__ = ['_string', '_dict']
def __init__(self, value=None, **kwargs):
if value is not None and kwargs:
raise ValueError('If value is specified, no other kwarg is allowed ' + str(kwargs))
self._dict = dict(**kwargs)
self._string = None
if value is not None:
self.set_mapping(value)
def __str__(self):
if self._string is None:
serialized = []
for name, value in sorted(self._dict.items(), key=lambda s: s[0].lower()):
if value is True:
serialized.append(name)
else:
serialized.append(f"{name}={value}")
self._string = '|'.join(serialized) if serialized else '_'
return self._string
def METHOD_NAME(self):
if not self._dict and self._string is not None and self._string != '_':
for raw_feature in self._string.split('|'):
namevalue = raw_feature.split('=', 1)
if len(namevalue) == 2:
name, value = namevalue
else:
name, value = namevalue[0], True
self._dict[name] = value
def __getitem__(self, key):
self.METHOD_NAME()
return self._dict.get(key, '')
def __setitem__(self, key, value):
self.METHOD_NAME()
self._string = None
if value is not None and value != '':
self._dict[key] = value
else:
self.__delitem__(key)
def __delitem__(self, key):
self.METHOD_NAME()
try:
del self._dict[key]
self._string = None
except KeyError:
pass
def __iter__(self):
self.METHOD_NAME()
return self._dict.__iter__()
def __len__(self):
self.METHOD_NAME()
return len(self._dict)
def __contains__(self, key):
self.METHOD_NAME()
return self._dict.__contains__(key)
def clear(self):
self._string = '_'
self._dict.clear()
def copy(self):
"""Return a deep copy of this instance."""
return copy.deepcopy(self)
def set_mapping(self, value):
"""Set the mapping from a dict or string.
If the `value` is None or an empty string, it is converted to storing string `_`
(which is the CoNLL-U way of representing an empty value).
If the `value` is a string, it is stored as is.
If the `value` is a dict (or any instance of `collections.abc.Mapping`),
its copy is stored.
Other types of `value` raise an `ValueError` exception.
"""
if value is None:
self.clear()
elif isinstance(value, str):
self._dict.clear()
self._string = value if value != '' else '_'
elif isinstance(value, collections.abc.Mapping):
self._string = None
self._dict = dict(value)
else:
raise ValueError("Unsupported value type " + str(value))
|
3,298 |
enable ports
|
"""
This module contains the workflows for installing and configuring COSBench - Cloud
Object Storage Benchmark tool on the provided nodes. It supports
- deploying COSBench on the provided systems
- Configuring COSBench based on the given nodes
Sample test script
- test:
abort-on-fail: true
config:
controllers:
- node6
drivers:
count: 1
hosts:
- node6
- node7
desc: Start COS Bench controller and driver
module: cosbench.py
name: deploy cosbench
"""
from json import loads
from typing import Dict, List
from jinja2 import Template
from ceph.ceph import Ceph, CephNode, CommandFailed
from ceph.utils import get_nodes_by_ids
from utility.log import Log
LOG = Log(__name__)
RPMS = ["java-1.8.0-openjdk", "unzip", "nmap-ncat"]
CB_VER = "0.4.2.c4"
CB_FILE = f"{CB_VER}.zip"
CB_URL = (
f"https://github.com/intel-cloud/cosbench/releases/download/v{CB_VER}/{CB_FILE}"
)
CTRL_CONF = """[controller]
name = CephCI COS
log_level = INFO
log_file = log/controller.log
archive_dir = archive
drivers = {{ data|length }}
{% for item in data %}
[driver{{ loop.index }}]
name = {{ item.name }}-{{ loop.index }}
url = http://{{ item.ip_address }}:{{ item.port }}/driver
{%- endfor %}
"""
def install(nodes: List[CephNode]) -> None:
"""
Installs COS Bench along with its pre-requisites.
Args:
nodes (list): The list of nodes on which the packages are installed.
Returns:
None
Raises:
CommandFailed
"""
pre_req_pkgs = " ".join(RPMS)
for node in nodes:
try:
node.exec_command(cmd="ls -l /opt/cosbench")
continue
except CommandFailed:
pass
node.exec_command(sudo=True, cmd="yum remove -y java-*")
node.exec_command(sudo=True, cmd=f"yum install -y {pre_req_pkgs}")
node.exec_command(cmd=f"curl -L {CB_URL} -O")
node.exec_command(cmd=f"unzip {CB_FILE}")
node.exec_command(cmd=f"sudo mv {CB_VER} /opt/cosbench")
node.exec_command(cmd="chmod +x /opt/cosbench/*.sh")
LOG.info("Successfully install COSBench!!!")
def METHOD_NAME(node: CephNode, port: int = 18088) -> None:
"""
Opens the required firewall ports on the COSBench role type nodes.
Args:
node (CephNode): The list of nodes for which the port has to be opened.
port (int): The network port that needs to be opened
Returns:
None
Raises:
CommandFailed
"""
LOG.debug("Opening the required network ports if firewall is configured.")
try:
out, err = node.exec_command(sudo=True, cmd="firewall-cmd --state")
if out.lower() != "running":
return
except CommandFailed:
LOG.debug(f"{node.shortname} has no firewall configuration.")
return
node.exec_command(
sudo=True, cmd=f"firewall-cmd --zone public --permanent --port {port}/tcp"
)
def config(node: CephNode, data: List) -> None:
"""
Writes the COS Bench controller configuration file.
Args:
node: The node that is designated to be a COS controller
data: A list of dictionaries having driver details (name & ip_address)
Returns:
None
Raises:
CommandFailed
"""
LOG.info("Generating the COS Bench controller file.")
templ = Template(CTRL_CONF)
conf = templ.render(data=data)
conf_file = node.remote_file(
file_name="/opt/cosbench/conf/controller.conf", file_mode="w"
)
conf_file.write(conf)
conf_file.flush()
def execute_cosbench_script(nodes: List[CephNode], script: str) -> None:
"""Executes the given script on all provided nodes.
Args:
nodes (list): The list of nodes on which the script needs to be executed.
script (str): The script file that needs to be executed.
Returns:
None
Raises:
CommandFailed
"""
LOG.debug(f"Executing COS Bench script: {script}")
for node in nodes:
node.exec_command(cmd=f"cd /opt/cosbench && sudo ./{script}")
def get_or_create_user(node: CephNode) -> Dict:
"""Creates or retrieves a RADOS user.
Returns:
Dictionary holding the keys user, access_key & secret_key
"""
LOG.debug("Get or Create cosbench01 user using radosgw-admin.")
user = "cosbench01"
try:
out, err = node.exec_command(cmd=f"sudo radosgw-admin user info --uid {user}")
out = loads(out)
return out["keys"][0]
except CommandFailed:
out, err = node.exec_command(
cmd=f"sudo radosgw-admin user create --uid {user} --display-name {user}"
f" --email {user}@noreply.com"
)
out = loads(out)
return out["keys"][0]
def run(ceph_cluster: Ceph, **kwargs) -> int:
"""
Entry point to this module that executes the set of workflows.
Here, Cloud Object Store Benchmark tool (COSBench) is installed on the nodes in the
cluster having the following roles
- cosbench-controller
Args:
ceph_cluster: Cluster participating in the test.
Returns:
0 on Success and 1 on Failure.
"""
LOG.info("Being COSBench deploy and configuration workflow.")
client = ceph_cluster.get_nodes(role="installer")[0]
controllers = get_nodes_by_ids(ceph_cluster, kwargs["config"]["controllers"])
drivers = get_nodes_by_ids(ceph_cluster, kwargs["config"]["drivers"]["hosts"])
try:
client.exec_command(cmd="sudo yum install -y --nogpgcheck ceph-common")
install(controllers)
for ctrl in controllers:
METHOD_NAME(ctrl, port=19088)
install(drivers)
data = list()
driver_count = kwargs["config"]["drivers"].get("count", 1)
for driver in drivers:
for i in range(driver_count):
port = 18088 + 100 * i
METHOD_NAME(driver, port)
data.append(
{
"name": driver.shortname,
"ip_address": driver.ip_address,
"port": port,
}
)
config(controllers[0], data=data)
execute_cosbench_script(drivers, script=f"start-driver.sh {driver_count}")
execute_cosbench_script(controllers, script="start-controller.sh")
get_or_create_user(client)
except BaseException as be: # noqa
LOG.error(be)
return 1
LOG.info("Successfully deployed COSBench!!!")
return 0
|
3,299 |
instance to type environment
|
from __future__ import annotations
from mypy.expandtype import expand_type
from mypy.nodes import TypeInfo
from mypy.types import AnyType, Instance, TupleType, Type, TypeOfAny, TypeVarId, has_type_vars
def map_instance_to_supertype(instance: Instance, superclass: TypeInfo) -> Instance:
"""Produce a supertype of `instance` that is an Instance
of `superclass`, mapping type arguments up the chain of bases.
If `superclass` is not a nominal superclass of `instance.type`,
then all type arguments are mapped to 'Any'.
"""
if instance.type == superclass:
# Fast path: `instance` already belongs to `superclass`.
return instance
if superclass.fullname == "builtins.tuple" and instance.type.tuple_type:
if has_type_vars(instance.type.tuple_type):
# We special case mapping generic tuple types to tuple base, because for
# such tuples fallback can't be calculated before applying type arguments.
alias = instance.type.special_alias
assert alias is not None
if not alias._is_recursive:
# Unfortunately we can't support this for generic recursive tuples.
# If we skip this special casing we will fall back to tuple[Any, ...].
env = METHOD_NAME(instance)
tuple_type = expand_type(instance.type.tuple_type, env)
if isinstance(tuple_type, TupleType):
# Make the import here to avoid cyclic imports.
import mypy.typeops
return mypy.typeops.tuple_fallback(tuple_type)
if not superclass.type_vars:
# Fast path: `superclass` has no type variables to map to.
return Instance(superclass, [])
return map_instance_to_supertypes(instance, superclass)[0]
def map_instance_to_supertypes(instance: Instance, supertype: TypeInfo) -> list[Instance]:
# FIX: Currently we should only have one supertype per interface, so no
# need to return an array
result: list[Instance] = []
for path in class_derivation_paths(instance.type, supertype):
types = [instance]
for sup in path:
a: list[Instance] = []
for t in types:
a.extend(map_instance_to_direct_supertypes(t, sup))
types = a
result.extend(types)
if result:
return result
else:
# Nothing. Presumably due to an error. Construct a dummy using Any.
any_type = AnyType(TypeOfAny.from_error)
return [Instance(supertype, [any_type] * len(supertype.type_vars))]
def class_derivation_paths(typ: TypeInfo, supertype: TypeInfo) -> list[list[TypeInfo]]:
"""Return an array of non-empty paths of direct base classes from
type to supertype. Return [] if no such path could be found.
InterfaceImplementationPaths(A, B) == [[B]] if A inherits B
InterfaceImplementationPaths(A, C) == [[B, C]] if A inherits B and
B inherits C
"""
# FIX: Currently we might only ever have a single path, so this could be
# simplified
result: list[list[TypeInfo]] = []
for base in typ.bases:
btype = base.type
if btype == supertype:
result.append([btype])
else:
# Try constructing a longer path via the base class.
for path in class_derivation_paths(btype, supertype):
result.append([btype] + path)
return result
def map_instance_to_direct_supertypes(instance: Instance, supertype: TypeInfo) -> list[Instance]:
# FIX: There should only be one supertypes, always.
typ = instance.type
result: list[Instance] = []
for b in typ.bases:
if b.type == supertype:
env = METHOD_NAME(instance)
t = expand_type(b, env)
assert isinstance(t, Instance)
result.append(t)
if result:
return result
else:
# Relationship with the supertype not specified explicitly. Use dynamic
# type arguments implicitly.
any_type = AnyType(TypeOfAny.unannotated)
return [Instance(supertype, [any_type] * len(supertype.type_vars))]
def METHOD_NAME(instance: Instance) -> dict[TypeVarId, Type]:
"""Given an Instance, produce the resulting type environment for type
variables bound by the Instance's class definition.
An Instance is a type application of a class (a TypeInfo) to its
required number of type arguments. So this environment consists
of the class's type variables mapped to the Instance's actual
arguments. The type variables are mapped by their `id`.
"""
return {binder.id: arg for binder, arg in zip(instance.type.defn.type_vars, instance.args)}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.