id
int64 0
300k
| label
stringlengths 1
74
⌀ | text
stringlengths 4k
8k
|
---|---|---|
2,600 |
set mkl envs
|
#!/usr/bin/python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import json
import os
import sys
import subprocess as sp
DEFAULT_SEASTAR_PORT="3333"
JEMALLOC_244 = "libjemalloc.so.2.4.4"
JEMALLOC_251 = "libjemalloc.so.2.5.1"
def gen_cluster_info(workspace):
tf_config_json = os.environ.get("TF_CONFIG", "{}")
print("TF_CONFIG=", tf_config_json)
tf_config = json.loads(tf_config_json)
cluster = tf_config.get("cluster", {})
if cluster is None:
print("TF_CONFIG cluster is empty")
return
ps_hosts = []
worker_hosts = []
chief_hosts = []
node_list = []
for key, value in cluster.items():
if "ps" == key:
ps_hosts = value
elif "worker" == key:
worker_hosts = value
elif "chief" == key:
chief_hosts = value
node_list.extend(value)
os.environ['TF_SEASTAR_ENDPOINT_MAP_PATH'] = '/tmp/'
print("Start to gen endpoint_map file.")
#endpoint_map_path = os.path.join(workspace, ".endpoint_map")
endpoint_map_path = "/tmp/.endpoint_map"
with open(endpoint_map_path, 'w') as fout:
for node in node_list:
host = node[0:node.index(':')]
fout.write(node + "=" + host + ":" + DEFAULT_SEASTAR_PORT + "\n")
os.system("ls -ltr /tmp/.endpoint_map")
task = tf_config.get("task", {})
if task is None:
print("TF_CONFIG task is empty")
return
task_index = task['index']
job_name = task['type']
return ps_hosts, worker_hosts, chief_hosts, job_name, task_index
def copy_python_binary(local_dir):
cmd_str = "cp /usr/bin/python " + os.path.join(local_dir, "python_bin")
return sp.call(cmd_str, shell=True)
def set_jemalloc_version(workspace):
strategy = os.environ.get("MEM_USAGE_STRATEGY", "")
cmd_str = ""
if "xmin" == strategy:
cmd_str = "export JEMALLOC_VERSION=" + os.path.join(workspace, JEMALLOC_244) + ";"
cmd_str += "export MALLOC_CONF=decay_time:0;"
elif "xmid" == strategy:
cmd_str = "export JEMALLOC_VERSION=" + os.path.join(workspace, JEMALLOC_244) + ";"
elif "min" == strategy:
cmd_str = "export JEMALLOC_VERSION=" + os.path.join(workspace, JEMALLOC_251) + ";"
cmd_str += "export MALLOC_CONF=dirty_decay_ms:0,muzzy_decay_ms:0;"
elif "mid" == strategy:
cmd_str = "export JEMALLOC_VERSION=" + os.path.join(workspace, JEMALLOC_251) + ";"
cmd_str += "export MALLOC_CONF=background_thread:true,dirty_decay_ms:10000,muzzy_decay_ms:10000;"
elif "max" == strategy:
cmd_str = "export JEMALLOC_VERSION=" + os.path.join(workspace, JEMALLOC_251) + ";"
cmd_str += "export MALLOC_CONF=background_thread:true,metadata_thp:auto,dirty_decay_ms:240000,muzzy_decay_ms:240000;"
elif "244" == strategy:
cmd_str = "export JEMALLOC_VERSION=" + os.path.join(workspace, JEMALLOC_244) + ";"
elif "251" == strategy:
cmd_str = "export JEMALLOC_VERSION=" + os.path.join(workspace, JEMALLOC_251) + ";"
cmd_str += "export MALLOC_CONF=background_thread:true,metadata_thp:auto,dirty_decay_ms:60000,muzzy_decay_ms:60000;"
elif "close" == strategy:
pass
else:
cmd_str = "export JEMALLOC_VERSION=" + os.path.join(workspace, JEMALLOC_251) + ";"
cmd_str += "export MALLOC_CONF=background_thread:true,metadata_thp:auto,dirty_decay_ms:240000,muzzy_decay_ms:240000;"
return cmd_str
def pip_install_requirements(workspace):
requirements_path = os.path.join(workspace, "requirements.txt")
if not os.path.exists(requirements_path):
return 0
cmd_str = "$(which pip) install -r " + requirements_path
print("try to install requirements.txt from " + requirements_path)
return sp.call(cmd_str, shell=True)
def run_tensorflow_job(workspace, tf_script, tf_args, tf_envs, set_jemalloc_version_cmd):
cmd_str = "cd " + workspace + ";"
if set_jemalloc_version_cmd:
cmd_str += set_jemalloc_version_cmd
cmd_str += "LD_PRELOAD=${JEMALLOC_VERSION} "
cmd_str += " ".join(tf_envs) + " $(which python) -u "
cmd_str += tf_script + " " + " ".join(tf_args)
print("run tensorflow command:", cmd_str)
return sp.call(cmd_str, shell=True)
def METHOD_NAME(job_name):
envs = []
if "ps" == job_name:
envs.append("OMP_NUM_THREADS=1")
envs.append("KMP_BLOCKTIME=0")
envs.append("MKL_ENABLE_INSTRUCTIONS=AVX2")
elif "worker" == job_name:
envs.append("OMP_NUM_THREADS=6")
envs.append("KMP_BLOCKTIME=0")
envs.append("MKL_ENABLE_INSTRUCTIONS=AVX2")
elif "evaluator" == job_name or "chief" == job_name:
envs.append("OMP_NUM_THREADS=1")
envs.append("KMP_BLOCKTIME=0")
envs.append("MKL_ENABLE_INSTRUCTIONS=AVX2")
else:
envs.append("OMP_NUM_THREADS=1")
envs.append("KMP_BLOCKTIME=0")
envs.append("MKL_ENABLE_INSTRUCTIONS=AVX2")
return envs
def set_network_threads(job_name):
envs = []
if "ps" == job_name:
envs.append("WORKER_DEFAULT_CORE_NUM=24")
elif "worker" == job_name:
envs.append("PS_DEFAULT_CORE_NUM=24")
return envs
if __name__ == "__main__":
print("start launching tensorflow job")
if "TF_WORKSPACE" not in os.environ:
print("TF_WORKSPACE env should be set.")
exit(1)
workspace = os.environ.get("TF_WORKSPACE", "")
if "TF_SCRIPT" not in os.environ:
print("TF_SCRIPT env should be set.")
exit(1)
tf_script = os.environ.get("TF_SCRIPT", "")
if "JEMALLOC_PATH" not in os.environ:
jemalloc_path = workspace
else:
jemalloc_path = os.environ.get("JEMALLOC_PATH", "")
#ret_code = copy_python_binary(workspace)
#if (ret_code != 0):
# exit(ret_code)
tf_args = sys.argv[1:]
tf_envs = []
#tf_envs.append("TF_SEASTAR_ENDPOINT_MAP_PATH=/tmp/")
if "TF_CONFIG" in os.environ:
ps_hosts, worker_hosts, chief_hosts, job_name, task_index = gen_cluster_info(workspace)
os.environ["TASK_INDEX"] = str(task_index)
os.environ["JOB_NAME"] = str(job_name)
#tf_envs.extend(set_mkl_envs(job_name))
set_jemalloc_version_cmd = set_jemalloc_version(jemalloc_path)
ret_code = pip_install_requirements(workspace)
if (ret_code != 0):
exit(ret_code)
ret_code = run_tensorflow_job(workspace, tf_script, tf_args, tf_envs, set_jemalloc_version_cmd)
if (ret_code != 0):
exit(ret_code
|
2,601 |
forward
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import math
from fairseq import utils
from fairseq.logging import metrics
from fairseq.criterions import register_criterion
from .label_smoothed_cross_entropy import (
LabelSmoothedCrossEntropyCriterion,
LabelSmoothedCrossEntropyCriterionConfig,
)
from dataclasses import dataclass, field
@dataclass
class LabelSmoothedCrossEntropyCriterionWithAlignmentConfig(
LabelSmoothedCrossEntropyCriterionConfig
):
alignment_lambda: float = field(
default=0.05, metadata={"help": "weight for the alignment loss"}
)
@register_criterion(
"label_smoothed_cross_entropy_with_alignment",
dataclass=LabelSmoothedCrossEntropyCriterionWithAlignmentConfig,
)
class LabelSmoothedCrossEntropyCriterionWithAlignment(
LabelSmoothedCrossEntropyCriterion
):
def __init__(self, task, sentence_avg, label_smoothing, alignment_lambda):
super().__init__(task, sentence_avg, label_smoothing)
self.alignment_lambda = alignment_lambda
def METHOD_NAME(self, model, sample, reduce=True):
"""Compute the loss for the given sample.
Returns a tuple with three elements:
1) the loss
2) the sample size, which is used as the denominator for the gradient
3) logging outputs to display while training
"""
net_output = model(**sample["net_input"])
loss, nll_loss = self.compute_loss(model, net_output, sample, reduce=reduce)
sample_size = (
sample["target"].size(0) if self.sentence_avg else sample["ntokens"]
)
logging_output = {
"loss": utils.item(loss.data) if reduce else loss.data,
"nll_loss": utils.item(nll_loss.data) if reduce else nll_loss.data,
"ntokens": sample["ntokens"],
"nsentences": sample["target"].size(0),
"sample_size": sample_size,
}
alignment_loss = None
# Compute alignment loss only for training set and non dummy batches.
if "alignments" in sample and sample["alignments"] is not None:
alignment_loss = self.compute_alignment_loss(sample, net_output)
if alignment_loss is not None:
logging_output["alignment_loss"] = utils.item(alignment_loss.data)
loss += self.alignment_lambda * alignment_loss
return loss, sample_size, logging_output
def compute_alignment_loss(self, sample, net_output):
attn_prob = net_output[1]["attn"][0]
bsz, tgt_sz, src_sz = attn_prob.shape
attn = attn_prob.view(bsz * tgt_sz, src_sz)
align = sample["alignments"]
align_weights = sample["align_weights"].float()
if len(align) > 0:
# Alignment loss computation. align (shape [:, 2]) contains the src-tgt index pairs corresponding to
# the alignments. align_weights (shape [:]) contains the 1 / frequency of a tgt index for normalizing.
loss = -(
(attn[align[:, 1][:, None], align[:, 0][:, None]]).log()
* align_weights[:, None]
).sum()
else:
return None
return loss
@staticmethod
def reduce_metrics(logging_outputs) -> None:
"""Aggregate logging outputs from data parallel training."""
loss_sum = utils.item(sum(log.get("loss", 0) for log in logging_outputs))
nll_loss_sum = utils.item(
sum(log.get("nll_loss", 0) for log in logging_outputs)
)
alignment_loss_sum = utils.item(
sum(log.get("alignment_loss", 0) for log in logging_outputs)
)
ntokens = utils.item(sum(log.get("ntokens", 0) for log in logging_outputs))
sample_size = utils.item(
sum(log.get("sample_size", 0) for log in logging_outputs)
)
metrics.log_scalar(
"loss", loss_sum / sample_size / math.log(2), sample_size, round=3
)
metrics.log_scalar(
"nll_loss", nll_loss_sum / ntokens / math.log(2), ntokens, round=3
)
metrics.log_scalar(
"alignment_loss",
alignment_loss_sum / sample_size / math.log(2),
sample_size,
round=3,
)
metrics.log_derived(
"ppl", lambda meters: utils.get_perplexity(meters["nll_loss"].avg)
)
@staticmethod
def logging_outputs_can_be_summed() -> bool:
"""
Whether the logging outputs returned by `forward` can be summed
across workers prior to calling `reduce_metrics`. Setting this
to True will improves distributed training speed.
"""
return True
|
2,602 |
list experiment
|
"""
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on an
"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
KIND, either express or implied. See the License for the
specific language governing permissions and limitations
under the License.
"""
import json
import time
import click
from rich.console import Console
from rich.json import JSON as richJSON
from rich.panel import Panel
from rich.table import Table
from submarine.cli.config.config import loadConfig
from submarine.client.api.experiment_client import ExperimentClient
from submarine.client.exceptions import ApiException
submarineCliConfig = loadConfig()
experimentClient = ExperimentClient(
host=f"http://{submarineCliConfig.connection.hostname}:{submarineCliConfig.connection.port}"
)
POLLING_INTERVAL = 1 # sec
TIMEOUT = 30 # sec
@click.command("experiment")
def METHOD_NAME():
"""List experiments"""
COLS_TO_SHOW = ["Name", "Id", "Tags", "Finished Time", "Created Time", "Running Time", "Status"]
console = Console()
try:
thread = experimentClient.list_experiments_async()
timeout = time.time() + TIMEOUT
with console.status("[bold green] Fetching Experiments..."):
while not thread.ready():
time.sleep(POLLING_INTERVAL)
if time.time() > timeout:
console.print("[bold red] Timeout!")
return
result = thread.get()
results = result.result
results = list(
map(
lambda r: [
r["spec"]["meta"]["name"],
r["experimentId"],
",".join(r["spec"]["meta"]["tags"]),
r["finishedTime"],
r["createdTime"],
r["runningTime"],
r["status"],
],
results,
)
)
table = Table(title="List of Experiments")
for col in COLS_TO_SHOW:
table.add_column(col, overflow="fold")
for res in results:
table.add_row(*res)
console.print(table)
except ApiException as err:
if err.body is not None:
errbody = json.loads(err.body)
click.echo(f"[Api Error] {errbody['message']}")
else:
click.echo(f"[Api Error] {err}")
@click.command("experiment")
@click.argument("id")
def get_experiment(id):
"""Get experiments"""
console = Console()
try:
thread = experimentClient.get_experiment_async(id)
timeout = time.time() + TIMEOUT
with console.status(f"[bold green] Fetching Experiment(id = {id} )..."):
while not thread.ready():
time.sleep(POLLING_INTERVAL)
if time.time() > timeout:
console.print("[bold red] Timeout!")
return
result = thread.get()
result = result.result
json_data = richJSON.from_data(result)
console.print(Panel(json_data, title=f"Experiment(id = {id} )"))
except ApiException as err:
if err.body is not None:
errbody = json.loads(err.body)
click.echo(f"[Api Error] {errbody['message']}")
else:
click.echo(f"[Api Error] {err}")
@click.command("experiment")
@click.argument("id")
@click.option("--wait/--no-wait", is_flag=True, default=False)
def delete_experiment(id, wait):
"""Delete experiment"""
console = Console()
try:
thread = experimentClient.delete_experiment_async(id)
timeout = time.time() + TIMEOUT
with console.status(f"[bold green] Deleting Experiment(id = {id} )..."):
while not thread.ready():
time.sleep(POLLING_INTERVAL)
if time.time() > timeout:
console.print("[bold red] Timeout!")
return
result = thread.get()
result = result.result
if wait:
if result["status"] == "Deleted":
console.print(f"[bold green] Experiment(id = {id} ) deleted")
else:
console.print("[bold red] Failed")
json_data = richJSON.from_data(result)
console.print(Panel(json_data, title=f"Experiment(id = {id} )"))
except ApiException as err:
if err.body is not None:
errbody = json.loads(err.body)
click.echo(f"[Api Error] {errbody['message']}")
else:
click.echo(f"[Api Error] {err}")
|
2,603 |
gen kjt
|
from collections import namedtuple
from functools import partial
import torch
from torchrec.models import deepfm, dlrm
from torchrec.modules.embedding_configs import EmbeddingBagConfig
from torchrec.modules.embedding_modules import EmbeddingBagCollection
from torchrec.sparse.jagged_tensor import KeyedJaggedTensor, KeyedTensor
from ..registry import ModelAttribute, model_zoo
BATCH = 2
SHAPE = 10
def gen_kt():
KT = KeyedTensor(keys=["f1", "f2"], length_per_key=[SHAPE, SHAPE], values=torch.rand((BATCH, 2 * SHAPE)))
return KT
# KeyedJaggedTensor
def METHOD_NAME():
KJT = KeyedJaggedTensor.from_offsets_sync(keys=["f1", "f2"],
values=torch.tensor([1, 2, 3, 4, 5, 6, 7, 8]),
offsets=torch.tensor([0, 2, 4, 6, 8]))
return KJT
data_gen_fn = lambda: dict(features=torch.rand((BATCH, SHAPE)))
def interaction_arch_data_gen_fn():
KT = gen_kt()
return dict(dense_features=torch.rand((BATCH, SHAPE)), sparse_features=KT)
def simple_dfm_data_gen_fn():
KJT = METHOD_NAME()
return dict(dense_features=torch.rand((BATCH, SHAPE)), sparse_features=KJT)
def sparse_arch_data_gen_fn():
KJT = METHOD_NAME()
return dict(features=KJT)
def output_transform_fn(x):
if isinstance(x, KeyedTensor):
output = dict()
for key in x.keys():
output[key] = x[key]
return output
else:
return dict(output=x)
def output_transform_fn(x):
if isinstance(x, KeyedTensor):
output = dict()
for key in x.keys():
output[key] = x[key]
return output
else:
return dict(output=x)
def get_ebc():
# EmbeddingBagCollection
eb1_config = EmbeddingBagConfig(name="t1", embedding_dim=SHAPE, num_embeddings=SHAPE, feature_names=["f1"])
eb2_config = EmbeddingBagConfig(name="t2", embedding_dim=SHAPE, num_embeddings=SHAPE, feature_names=["f2"])
return EmbeddingBagCollection(tables=[eb1_config, eb2_config], device=torch.device('cpu'))
def sparse_arch_model_fn():
ebc = get_ebc()
return deepfm.SparseArch(ebc)
def simple_deep_fmnn_model_fn():
ebc = get_ebc()
return deepfm.SimpleDeepFMNN(SHAPE, ebc, SHAPE, SHAPE)
def dlrm_model_fn():
ebc = get_ebc()
return dlrm.DLRM(ebc, SHAPE, [SHAPE, SHAPE], [5, 1])
def dlrm_sparsearch_model_fn():
ebc = get_ebc()
return dlrm.SparseArch(ebc)
model_zoo.register(name='deepfm_densearch',
model_fn=partial(deepfm.DenseArch, SHAPE, SHAPE, SHAPE),
data_gen_fn=data_gen_fn,
output_transform_fn=output_transform_fn)
model_zoo.register(name='deepfm_interactionarch',
model_fn=partial(deepfm.FMInteractionArch, SHAPE * 3, ["f1", "f2"], SHAPE),
data_gen_fn=interaction_arch_data_gen_fn,
output_transform_fn=output_transform_fn)
model_zoo.register(name='deepfm_overarch',
model_fn=partial(deepfm.OverArch, SHAPE),
data_gen_fn=data_gen_fn,
output_transform_fn=output_transform_fn)
model_zoo.register(name='deepfm_simpledeepfmnn',
model_fn=simple_deep_fmnn_model_fn,
data_gen_fn=simple_dfm_data_gen_fn,
output_transform_fn=output_transform_fn)
model_zoo.register(name='deepfm_sparsearch',
model_fn=sparse_arch_model_fn,
data_gen_fn=sparse_arch_data_gen_fn,
output_transform_fn=output_transform_fn)
model_zoo.register(name='dlrm',
model_fn=dlrm_model_fn,
data_gen_fn=simple_dfm_data_gen_fn,
output_transform_fn=output_transform_fn)
model_zoo.register(name='dlrm_densearch',
model_fn=partial(dlrm.DenseArch, SHAPE, [SHAPE, SHAPE]),
data_gen_fn=data_gen_fn,
output_transform_fn=output_transform_fn)
model_zoo.register(name='dlrm_interactionarch',
model_fn=partial(dlrm.InteractionArch, 2),
data_gen_fn=interaction_arch_data_gen_fn,
output_transform_fn=output_transform_fn)
model_zoo.register(name='dlrm_overarch',
model_fn=partial(dlrm.OverArch, SHAPE, [5, 1]),
data_gen_fn=data_gen_fn,
output_transform_fn=output_transform_fn)
model_zoo.register(name='dlrm_sparsearch',
model_fn=dlrm_sparsearch_model_fn,
data_gen_fn=sparse_arch_data_gen_fn,
output_transform_fn=output_transform_fn)
|
2,604 |
test api no valid parameters
|
# -*- coding: utf-8 -*-
# Copyright (C) 2018-2022, earthobservations developers.
# Distributed under the MIT License. See LICENSE for more info.
import pytest
from wetterdienst.exceptions import NoParametersFound
from wetterdienst.provider.dwd.observation import (
DwdObservationDataset,
DwdObservationRequest,
DwdObservationResolution,
)
@pytest.mark.remote
@pytest.mark.parametrize(
"ts_skip_criteria,expected_stations",
[("min", ["05906", "04928"]), ("mean", ["05426", "04177"]), ("max", ["00377", "05426"])],
)
def test_api_skip_empty_stations(settings_skip_empty_true, ts_skip_criteria, expected_stations):
# overcharge skip criteria
settings_skip_empty_true.ts_skip_criteria = ts_skip_criteria
settings_skip_empty_true.ts_skip_threshold = 0.6
request = DwdObservationRequest(
parameter=["kl", "solar"],
resolution="daily",
start_date="2021-01-01",
end_date="2021-12-31",
settings=settings_skip_empty_true,
).filter_by_rank(latlon=(49.19780976647141, 8.135207205143768), rank=2)
values = request.values.all()
assert (
values.df.get_column("station_id").take(0).to_list() != request.df.get_column("station_id").take(0).to_list()
) # not supposed to be the first station of the list
assert values.df.get_column("station_id").unique(maintain_order=True).to_list() == expected_stations
assert values.df_stations.get_column("station_id").to_list() == expected_stations
@pytest.mark.remote
def test_api_skip_empty_stations_equal_on_any_skip_criteria_with_one_parameter(settings_skip_empty_true):
"""If there is only one parameter any skip criteria (min, mean, max) should return the same station"""
def _get_values(settings):
return (
DwdObservationRequest(
parameter=["sunshine_duration"],
resolution="daily",
start_date="1990-01-01",
end_date="2021-12-31",
settings=settings,
)
.filter_by_rank(latlon=(49.19780976647141, 8.135207205143768), rank=1)
.values.all()
)
settings_skip_empty_true.ts_skip_threshold = 0.9
expected_station = ["05426"]
settings_skip_empty_true.ts_skip_criteria = "min"
values = _get_values(settings_skip_empty_true)
assert values.df.get_column("station_id").unique().to_list() == expected_station
assert values.df_stations.get_column("station_id").to_list() == expected_station
settings_skip_empty_true.ts_skip_criteria = "mean"
values = _get_values(settings_skip_empty_true)
assert values.df.get_column("station_id").unique(maintain_order=True).to_list() == expected_station
assert values.df_stations.get_column("station_id").to_list() == expected_station
settings_skip_empty_true.ts_skip_criteria = "max"
values = _get_values(settings_skip_empty_true)
assert values.df.get_column("station_id").unique(maintain_order=True).to_list() == expected_station
assert values.df_stations.get_column("station_id").to_list() == expected_station
@pytest.mark.remote
def test_api_dropna(settings_dropna_true):
request = DwdObservationRequest(
parameter=[
"temperature_air",
"precipitation",
],
resolution="minute_10",
start_date="2021-01-01",
end_date="2021-12-31",
settings=settings_dropna_true,
).filter_by_rank(latlon=(49.19780976647141, 8.135207205143768), rank=20)
values = next(request.values.query())
assert values.df.shape[0] == 51971
def METHOD_NAME(default_settings):
with pytest.raises(NoParametersFound):
DwdObservationRequest(
parameter=[
DwdObservationDataset.TEMPERATURE_AIR,
],
resolution=DwdObservationResolution.DAILY,
settings=default_settings,
)
def test_api_partly_valid_parameters(default_settings, caplog):
request = DwdObservationRequest(
parameter=[
DwdObservationDataset.TEMPERATURE_AIR,
DwdObservationDataset.WIND,
DwdObservationDataset.PRECIPITATION,
DwdObservationDataset.SOLAR,
],
resolution=DwdObservationResolution.DAILY,
settings=default_settings,
)
assert "dataset WIND is not a valid dataset for resolution DAILY" in caplog.text
assert "dataset PRECIPITATION is not a valid dataset for resolution DAILY" in caplog.text
assert request.parameter == [
(
DwdObservationDataset.SOLAR,
DwdObservationDataset.SOLAR,
)
]
|
2,605 |
get standard fx prices
|
"""
Spot fx prices
"""
import numpy as np
import pandas as pd
import datetime
from sysdata.base_data import baseData
from syscore.pandas.merge_data_keeping_past_data import SPIKE_IN_DATA
from syslogging.logger import *
from sysobjects.spot_fx_prices import fxPrices, get_fx_tuple_from_code, DEFAULT_CURRENCY
DEFAULT_DATES = pd.date_range(
start=datetime.datetime(1970, 1, 1), freq="B", end=datetime.datetime.now()
)
DEFAULT_RATE_SERIES = pd.Series(np.full(len(DEFAULT_DATES), 1.0), index=DEFAULT_DATES)
USE_CHILD_CLASS_ERROR = "You need to use a child class of fxPricesData"
class fxPricesData(baseData):
"""
Read and write data class to get fx prices
We'd inherit from this class for a specific implementation
"""
def __repr__(self):
return USE_CHILD_CLASS_ERROR
def keys(self):
return self.get_list_of_fxcodes()
def __getitem__(self, code):
return self.get_fx_prices(code)
def get_fx_prices(self, fx_code: str) -> fxPrices:
"""
Get a historical series of FX prices
:param fx_code: currency code, in the form EURUSD
:return: fxData object
"""
currency1, currency2 = get_fx_tuple_from_code(fx_code)
if currency1 == currency2:
# Trivial, just a bunch of 1's
fx_data = DEFAULT_RATE_SERIES
elif currency2 == DEFAULT_CURRENCY:
# We ought to have data
fx_data = self.METHOD_NAME(fx_code)
elif currency1 == DEFAULT_CURRENCY:
# inversion
fx_data = self._get_fx_prices_for_inversion(fx_code)
else:
# Try a cross rate
fx_data = self._get_fx_cross(fx_code)
return fx_data
def METHOD_NAME(self, fx_code: str) -> fxPrices:
currency1, currency2 = get_fx_tuple_from_code(fx_code)
assert currency2 == DEFAULT_CURRENCY
fx_data = self._get_fx_prices_vs_default(currency1)
return fx_data
def _get_fx_prices_for_inversion(self, fx_code: str) -> fxPrices:
"""
Get a historical series of FX prices, must be USDXXX
:param currency2
:return: fxData
"""
currency1, currency2 = get_fx_tuple_from_code(fx_code)
assert currency1 == DEFAULT_CURRENCY
raw_fx_data = self._get_fx_prices_vs_default(currency2)
if raw_fx_data.empty:
log = self.log.setup(**{CURRENCY_CODE_LOG_LABEL: fx_code})
log.warning(
"Data for %s is missing, needed to calculate %s"
% (currency2 + DEFAULT_CURRENCY, DEFAULT_CURRENCY + currency2)
)
return raw_fx_data
inverted_fx_data = 1.0 / raw_fx_data
return inverted_fx_data
def _get_fx_cross(self, fx_code: str) -> fxPrices:
"""
Get a currency cross rate XXXYYY, eg not XXXUSD or USDXXX or XXXXXX
:return: fxPrices
"""
currency1, currency2 = get_fx_tuple_from_code(fx_code)
currency1_vs_default = self._get_fx_prices_vs_default(currency1)
currency2_vs_default = self._get_fx_prices_vs_default(currency2)
if currency1_vs_default.empty or currency2_vs_default.empty:
return fxPrices.create_empty()
(aligned_c1, aligned_c2) = currency1_vs_default.align(
currency2_vs_default, join="outer"
)
fx_rate_series = aligned_c1.ffill() / aligned_c2.ffill()
return fx_rate_series
def _get_fx_prices_vs_default(self, currency1: str) -> fxPrices:
"""
Get a historical series of FX prices, must be XXXUSD
:param code: currency code, in the form EUR
:return: fxData object
"""
code = currency1 + DEFAULT_CURRENCY
fx_data = self._get_fx_prices(code)
return fx_data
def _get_fx_prices(self, code: str) -> fxPrices:
if not self.is_code_in_data(code):
log = self.log.setup(**{CURRENCY_CODE_LOG_LABEL: code})
log.warning("Currency %s is missing from list of FX data" % code)
return fxPrices.create_empty()
data = self._get_fx_prices_without_checking(code)
return data
def delete_fx_prices(self, code: str, are_you_sure=False):
log = self.log.setup(**{CURRENCY_CODE_LOG_LABEL: code})
if are_you_sure:
if self.is_code_in_data(code):
self._delete_fx_prices_without_any_warning_be_careful(code)
log.info("Deleted fx price data for %s" % code)
else:
# doesn't exist anyway
log.warning("Tried to delete non existent fx prices for %s" % code)
else:
log.warning("You need to call delete_fx_prices with a flag to be sure")
def is_code_in_data(self, code: str) -> bool:
if code in self.get_list_of_fxcodes():
return True
else:
return False
def add_fx_prices(
self, code: str, fx_price_data: fxPrices, ignore_duplication: bool = False
):
log = self.log.setup(**{CURRENCY_CODE_LOG_LABEL: code})
if self.is_code_in_data(code):
if ignore_duplication:
pass
else:
log.warning(
"There is already %s in the data, you have to delete it first, or set ignore_duplication=True, or use update_fx_prices"
% code
)
return None
self._add_fx_prices_without_checking_for_existing_entry(code, fx_price_data)
log.info("Added fx data for code %s" % code)
def update_fx_prices(
self, code: str, new_fx_prices: fxPrices, check_for_spike=True
) -> int:
"""
Checks existing data, adds any new data with a timestamp greater than the existing data
:param code: FX code
:param new_fx_prices: fxPrices object
:return: int, number of rows added
"""
log = self.log.setup(**{CURRENCY_CODE_LOG_LABEL: code})
old_fx_prices = self.get_fx_prices(code)
merged_fx_prices = old_fx_prices.add_rows_to_existing_data(
new_fx_prices, check_for_spike=check_for_spike
)
if merged_fx_prices is SPIKE_IN_DATA:
return SPIKE_IN_DATA
rows_added = len(merged_fx_prices) - len(old_fx_prices)
if rows_added == 0:
if len(old_fx_prices) == 0:
log.debug("No new or old prices for %s" % code)
else:
log.debug(
"No additional data since %s for %s"
% (str(old_fx_prices.index[-1]), code)
)
return 0
self.add_fx_prices(code, merged_fx_prices, ignore_duplication=True)
log.debug("Added %d additional rows for %s" % (rows_added, code))
return rows_added
def get_list_of_fxcodes(self):
raise NotImplementedError(USE_CHILD_CLASS_ERROR)
def _add_fx_prices_without_checking_for_existing_entry(self, code, fx_price_data):
raise NotImplementedError(USE_CHILD_CLASS_ERROR)
def _delete_fx_prices_without_any_warning_be_careful(self, code):
raise NotImplementedError(USE_CHILD_CLASS_ERROR)
def _get_fx_prices_without_checking(self, code):
raise NotImplementedError(USE_CHILD_CLASS_ERROR)
|
2,606 |
test gen export chunk filename
|
import builtins
import gzip
import itertools
import glob
import os
from unittest import TestCase
from pipelinewise.fastsync.commons import split_gzip
DATA_WITH_100_BYTES = b"""0,12345678
1,12345678
2,12345678
3,12345678
4,12345678
5,12345678
6,12345678
7,12345678
8,12345678
9,12345678
"""
def unlink(filename):
"""Helper function to delete file silently"""
try:
os.unlink(filename)
except FileNotFoundError:
pass
class TestSplitGzipFile(TestCase):
"""
Unit tests for SplitGzipFile
"""
filename = '{}_{}_tmp'.format('@test', os.getpid())
def setUp(self):
unlink(self.filename)
def tearDown(self):
for temp_file in glob.glob('@test_*_tmp*'):
unlink(temp_file)
# pylint: disable=W0212
def test_bytes_to_megabytes(self):
"""
Test bytes to megabytes transformer
"""
gzip_splitter = split_gzip.SplitGzipFile('foo')
# Using binary unit
self.assertEqual(gzip_splitter._bytes_to_megabytes(1024 ** 2), 1)
self.assertEqual(gzip_splitter._bytes_to_megabytes(1024 ** 2 * 10), 10)
self.assertEqual(gzip_splitter._bytes_to_megabytes(1024 ** 3), 1024)
# Using SI kilo unit
self.assertEqual(round(gzip_splitter._bytes_to_megabytes(1000), 5), 0.00095)
self.assertEqual(
round(gzip_splitter._bytes_to_megabytes(1000 ** 2 * 10), 5), 9.53674
)
self.assertEqual(
round(gzip_splitter._bytes_to_megabytes(1000 ** 3), 5), 953.67432
)
def test_parameter_validation(self):
"""
Test if passing invalid parameters raising exceptions
"""
with self.assertRaises(ValueError):
split_gzip.open('basefile', mode='invalidmode')
with self.assertRaises(ValueError):
split_gzip.open('basefile', mode='wt', chunk_size_mb=0)
with self.assertRaises(ValueError):
split_gzip.open('basefile', max_chunks=-1)
# pylint: disable=W0212
def METHOD_NAME(self):
"""
Test generating chunked filenames
"""
# split_large_files should be disabled when max_chunks is zero
gzip_splitter = split_gzip.SplitGzipFile(
'basefile', chunk_size_mb=1000, max_chunks=0
)
self.assertEqual(gzip_splitter._gen_chunk_filename(), 'basefile')
# first chunk should be part nr 1
gzip_splitter = split_gzip.SplitGzipFile(
'basefile', chunk_size_mb=1000, max_chunks=20
)
self.assertEqual(gzip_splitter._gen_chunk_filename(), 'basefile.part00001')
# generated file part should be in sync with chunk_seq
gzip_splitter.chunk_seq = 5
self.assertEqual(gzip_splitter._gen_chunk_filename(), 'basefile.part00005')
# chunk seq should not increase if chunk_size is lower than split_file_chunk_size_mb
gzip_splitter.current_chunk_size_mb = 500
self.assertEqual(gzip_splitter._gen_chunk_filename(), 'basefile.part00005')
# chunk seq should increase and size should reset if current_chunk_size_mb greater than chunk_size_mb
gzip_splitter.current_chunk_size_mb = 1050
self.assertEqual(gzip_splitter._gen_chunk_filename(), 'basefile.part00006')
self.assertEqual(gzip_splitter.current_chunk_size_mb, 0)
# chunk seq should not increase further if chunk_seq equals to split_file_max_chunks
gzip_splitter.chunk_seq = 20
self.assertEqual(gzip_splitter._gen_chunk_filename(), 'basefile.part00020')
# chunk seq should not increase further if chunk_seq reached max_chunks, even if the chunk size is greater
# than chunk_size_mb
gzip_splitter.current_chunk_size_mb = 1050
self.assertEqual(gzip_splitter._gen_chunk_filename(), 'basefile.part00020')
def test_write_with_no_split(self):
"""
Write gzip without splitting it and reading it
"""
# max_chunk = 0 should create a file with no splitting and binary mode
with split_gzip.SplitGzipFile(self.filename, 'wb', max_chunks=0) as f_write:
f_write.write(DATA_WITH_100_BYTES * 50)
with gzip.open(self.filename, 'rb') as f_read:
file_content = f_read.read()
self.assertEqual(file_content, DATA_WITH_100_BYTES * 50)
# max_chunk = 0 should create a file with no splitting and text mode
with split_gzip.SplitGzipFile(self.filename, 'wt', max_chunks=0) as f_write:
f_write.write(str(DATA_WITH_100_BYTES * 50))
with gzip.open(self.filename, 'rt') as f_read:
file_content = f_read.read()
self.assertEqual(file_content, str(DATA_WITH_100_BYTES * 50))
def test_write_with_no_split_no_compress(self):
"""
Write gzip without splitting it and reading it
"""
# max_chunk = 0 should create a file with no splitting and binary mode
with split_gzip.SplitGzipFile(
self.filename, 'wb', max_chunks=0, compress=False) as f_write:
f_write.write(DATA_WITH_100_BYTES * 50)
with builtins.open(self.filename, 'rb') as f_read:
file_content = f_read.read()
self.assertEqual(file_content, DATA_WITH_100_BYTES * 50)
# max_chunk = 0 should create a file with no splitting and text mode
with split_gzip.SplitGzipFile(self.filename, 'wt', max_chunks=0) as f_write:
f_write.write(str(DATA_WITH_100_BYTES * 50))
with gzip.open(self.filename, 'rt') as f_read:
file_content = f_read.read()
self.assertEqual(file_content, str(DATA_WITH_100_BYTES * 50))
def test_write_with_single_chunk(self):
"""
Write all data into one chunk
"""
# test data fits into one chunk
with split_gzip.SplitGzipFile(
self.filename, 'wb', chunk_size_mb=1000, max_chunks=20
) as f_write:
f_write.write(DATA_WITH_100_BYTES * 50)
with gzip.open(f'{self.filename}.part00001', 'rb') as f_read:
file_content = f_read.read()
self.assertEqual(file_content, DATA_WITH_100_BYTES * 50)
def test_write_with_multiple_chunks(self):
"""
Write data into multiple gzip files
"""
# test data fits into one chunk
with split_gzip.SplitGzipFile(
self.filename,
'wb',
chunk_size_mb=split_gzip.SplitGzipFile._bytes_to_megabytes(200),
max_chunks=20,
est_compr_rate=1,
) as f_write:
# Write 1100 bytes of test data
for _ in itertools.repeat(None, 11):
f_write.write(DATA_WITH_100_BYTES)
# Result should be in 6 gzip files
with gzip.open(f'{self.filename}.part00001', 'rb') as f_read:
self.assertEqual(f_read.read(), DATA_WITH_100_BYTES * 2)
with gzip.open(f'{self.filename}.part00002', 'rb') as f_read:
self.assertEqual(f_read.read(), DATA_WITH_100_BYTES * 2)
with gzip.open(f'{self.filename}.part00003', 'rb') as f_read:
self.assertEqual(f_read.read(), DATA_WITH_100_BYTES * 2)
with gzip.open(f'{self.filename}.part00004', 'rb') as f_read:
self.assertEqual(f_read.read(), DATA_WITH_100_BYTES * 2)
with gzip.open(f'{self.filename}.part00005', 'rb') as f_read:
self.assertEqual(f_read.read(), DATA_WITH_100_BYTES * 2)
# Last chunk should be smaller
with gzip.open(f'{self.filename}.part00006', 'rb') as f_read:
self.assertEqual(f_read.read(), DATA_WITH_100_BYTES)
|
2,607 |
perform create
|
from django.db import transaction
from django.utils.translation import gettext_lazy as _
import django_filters
import reversion
from rest_framework import exceptions, serializers, viewsets
from resources.api.base import NullableDateTimeField, TranslatedModelSerializer, register_view
from .models import CateringProduct, CateringProductCategory, CateringOrder, CateringOrderLine, CateringProvider
class CateringProviderSerializer(TranslatedModelSerializer):
class Meta:
model = CateringProvider
fields = ('id', 'name', 'price_list_url', 'units')
class CateringProviderFilter(django_filters.rest_framework.FilterSet):
unit = django_filters.CharFilter(field_name='units')
class Meta:
model = CateringProvider
fields = ('unit',)
class CateringProvider(viewsets.ReadOnlyModelViewSet):
queryset = CateringProvider.objects.prefetch_related('units')
serializer_class = CateringProviderSerializer
filter_backends = (django_filters.rest_framework.DjangoFilterBackend,)
filterset_class = CateringProviderFilter
register_view(CateringProvider, 'catering_provider')
class CateringProductCategorySerializer(TranslatedModelSerializer):
class Meta:
model = CateringProductCategory
fields = ('id', 'name', 'products', 'provider')
class CateringProductCategoryFilter(django_filters.rest_framework.FilterSet):
class Meta:
model = CateringProductCategory
fields = ('provider',)
class CateringProductCategoryViewSet(viewsets.ReadOnlyModelViewSet):
queryset = CateringProductCategory.objects.prefetch_related('products')
serializer_class = CateringProductCategorySerializer
filter_backends = (django_filters.rest_framework.DjangoFilterBackend,)
filterset_class = CateringProductCategoryFilter
register_view(CateringProductCategoryViewSet, 'catering_product_category')
class CateringProductSerializer(TranslatedModelSerializer):
class Meta:
model = CateringProduct
fields = ('id', 'name', 'category', 'description')
class CateringProductFilter(django_filters.rest_framework.FilterSet):
provider = django_filters.NumberFilter(field_name='category__provider')
class Meta:
model = CateringProduct
fields = ('provider', 'category')
class CateringProductViewSet(viewsets.ReadOnlyModelViewSet):
queryset = CateringProduct.objects.all()
serializer_class = CateringProductSerializer
filter_backends = (django_filters.rest_framework.DjangoFilterBackend,)
filterset_class = CateringProductFilter
register_view(CateringProductViewSet, 'catering_product')
# taken from https://github.com/encode/django-rest-framework/issues/3847
# needed because product field must be required always, also with PATCH
class MonkeyPatchPartial:
"""
Work around bug #3847 in djangorestframework by monkey-patching the partial
attribute of the root serializer during the call to validate_empty_values.
"""
def __init__(self, root):
self._root = root
def __enter__(self):
self._old = getattr(self._root, 'partial')
setattr(self._root, 'partial', False)
def __exit__(self, *args):
setattr(self._root, 'partial', self._old)
class CateringOrderLineSerializer(serializers.ModelSerializer):
class Meta:
model = CateringOrderLine
fields = ('product', 'quantity')
def run_validation(self, *args, **kwargs):
with MonkeyPatchPartial(self.root):
return super().run_validation(*args, **kwargs)
class CateringOrderSerializer(serializers.ModelSerializer):
created_at = NullableDateTimeField(read_only=True)
modified_at = NullableDateTimeField(read_only=True)
order_lines = CateringOrderLineSerializer(many=True, required=True, allow_empty=False)
class Meta:
model = CateringOrder
fields = (
'id', 'created_at', 'modified_at', 'reservation', 'order_lines', 'invoicing_data', 'message',
'serving_time',
)
def _handle_order_lines(self, order, order_line_data):
order.order_lines.all().delete()
for order_line_datum in order_line_data:
CateringOrderLine.objects.create(order=order, **order_line_datum)
@transaction.atomic
def create(self, validated_data):
order_line_data = validated_data.pop('order_lines', [])
new_order = super().create(validated_data)
self._handle_order_lines(new_order, order_line_data)
return new_order
@transaction.atomic
def update(self, instance, validated_data):
order_line_data = validated_data.pop('order_lines', [])
updated_order = super().update(instance, validated_data)
self._handle_order_lines(updated_order, order_line_data)
return updated_order
def to_internal_value(self, data):
# Remove order lines with quantity == 0
if 'order_lines' in data and isinstance(data['order_lines'], list):
order_lines = data['order_lines']
data['order_lines'] = [x for x in order_lines if x.get('quantity') != 0]
return super().to_internal_value(data)
def validate(self, validated_data):
reservation = validated_data.get('reservation') or self.instance.reservation
if reservation:
resource = reservation.resource
user = self.context['request'].user
if reservation.user != user and not resource.can_modify_reservation_catering_orders(user):
raise exceptions.PermissionDenied(_("No permission to modify this reservation's catering orders."))
provider = validated_data['order_lines'][0]['product'].category.provider
validated_data['provider'] = provider
for order_line in validated_data['order_lines'][1:]:
if order_line['product'].category.provider != provider:
raise exceptions.ValidationError(_('The order contains products from several providers.'))
if reservation.resource.unit not in provider.units.all():
raise exceptions.ValidationError(
"The provider isn't available in the reservation's unit."
)
return validated_data
class CateringOrderFilter(django_filters.rest_framework.FilterSet):
class Meta:
model = CateringOrder
fields = ('reservation',)
class CateringOrderViewSet(viewsets.ModelViewSet):
queryset = CateringOrder.objects.prefetch_related('order_lines')
serializer_class = CateringOrderSerializer
filter_backends = (django_filters.rest_framework.DjangoFilterBackend,)
filterset_class = CateringOrderFilter
def get_queryset(self):
return super().get_queryset().can_view(self.request.user)
def METHOD_NAME(self, serializer):
with reversion.create_revision():
instance = serializer.save()
reversion.set_user(self.request.user)
reversion.set_comment('Created using the API.')
instance.send_created_notification(request=self.request)
def perform_update(self, serializer):
with reversion.create_revision():
instance = serializer.save()
reversion.set_user(self.request.user)
reversion.set_comment('Updated using the API.')
# TODO somehow check that the order is actually modified before sending the notification?
instance.send_modified_notification(request=self.request)
def perform_destroy(self, instance):
instance.send_deleted_notification(request=self.request)
super().perform_destroy(instance)
register_view(CateringOrderViewSet, 'catering_order')
|
2,608 |
test update by
|
#
# Copyright (c) 2016-2022 Deephaven Data Labs and Patent Pending
#
import time
import unittest
from pyarrow import csv
from pydeephaven import DHError, agg
from pydeephaven.updateby import ema_tick, cum_prod
from tests.testbase import BaseTestCase
class QueryTestCase(BaseTestCase):
def test_tail_update_static(self):
table = self.session.empty_table(10)
query = self.session.query(table).update(formulas=["Col1 = i + 1"]) \
.tail(5).update(formulas=["Col2=i*i"])
result_table = query.exec()
self.assertEqual(5, result_table.size)
time.sleep(1)
result_table2 = query.exec()
self.assertEqual(result_table.size, result_table2.size)
def test_tail_update_join_fail(self):
pa_table = csv.read_csv(self.csv_file)
test_table = self.session.import_table(pa_table)
right_table = self.session.empty_table(1000).update(["a = i"])
query = self.session.query(test_table)
(query.drop_columns(cols=['c'])
.where(["a > 10"])
.tail(10)
.join(right_table, on=["a"]))
with self.assertRaises(DHError):
query.exec()
def test_tail_update_join(self):
pa_table = csv.read_csv(self.csv_file)
test_table = self.session.import_table(pa_table)
right_table = self.session.empty_table(1000).update(["a = ii"])
query = self.session.query(test_table)
(query.drop_columns(cols=['c'])
.where(["a > 10"]).tail(10)
.join(right_table, on=["a"]))
result_table = query.exec()
self.assertTrue(result_table.size > 0)
def METHOD_NAME(self):
pa_table = csv.read_csv(self.csv_file)
test_table = self.session.import_table(pa_table)
ub_ops = [ema_tick(decay_ticks=100, cols=["ema_a = a"]),
cum_prod(cols=["cc = c", "cb = b"]),
]
query = self.session.query(test_table)
(query.drop_columns(cols=['e'])
.where(["a > 10"])
.update_by(ops=ub_ops, by=["b"])
.tail(10))
result_table = query.exec()
self.assertTrue(result_table.size == 10)
def test_snapshot(self):
test_table = self.session.time_table(period=1000000)
while test_table.snapshot().size < 100:
time.sleep(0.001)
query = self.session.query(test_table)
(query.update(formulas=["Col1 = i", "Col2 = i * 2"])
.where(["Col1 > 10"])
.snapshot()
.head(10))
result_table = query.exec()
self.assertEqual(result_table.to_arrow().num_rows, 10)
def test_snapshot_when(self):
source_table = (self.session.time_table(period=10_000_000)
.update(formulas=["Col1= i", "Col2 = i * 2"]).drop_columns(["Timestamp"]))
trigger_table = self.session.time_table(period=1_000_000_000)
query = self.session.query(source_table).snapshot_when(trigger_table=trigger_table, stamp_cols=["Timestamp"],
initial=True, incremental=True, history=False)
result_table = query.exec()
self.assertEqual(len(result_table.schema), len(source_table.schema) + 1)
def test_agg_by(self):
pa_table = csv.read_csv(self.csv_file)
table = self.session.import_table(pa_table)
query = self.session.query(table).agg_by(aggs=[agg.avg(cols=["a"])], by=["b"]) \
.update(formulas=["Col1 = i + 1"]) \
.tail(5).update(formulas=["Col2=i*i"])
result_table = query.exec()
self.assertEqual(5, result_table.size)
def test_agg_all_by(self):
pa_table = csv.read_csv(self.csv_file)
table = self.session.import_table(pa_table)
query = self.session.query(table).agg_all_by(agg=agg.avg(), by=["b"]) \
.update(formulas=["Col1 = i + 1"]) \
.tail(5).update(formulas=["Col2=i*i"])
result_table = query.exec()
self.assertEqual(5, result_table.size)
def test_where_in(self):
pa_table = csv.read_csv(self.csv_file)
test_table = self.session.import_table(pa_table)
unique_table = test_table.head(num_rows=50).select_distinct(
cols=["a", "c"]
)
with self.subTest("where_in"):
query = self.session.query(test_table)
(query.drop_columns(cols=['b'])
.update(["f = a * 10"])
.where_in(unique_table, cols=["c"])
.where(["f == a * 10"]))
result_table = query.exec()
self.assertLessEqual(unique_table.size, result_table.size)
with self.subTest("where_not_in"):
query = self.session.query(test_table)
(query.drop_columns(cols=['b'])
.update(["f = a * 10"])
.where_not_in(unique_table, cols=["c"])
.where(["f == a * 10"]))
result_table = query.exec()
self.assertGreaterEqual(test_table.size - unique_table.size, result_table.size)
if __name__ == '__main__':
unittest.main()
|
2,609 |
start
|
"""Websockets Transport classes and functions."""
import asyncio
import logging
from typing import Optional
from aiohttp import WSMessage, WSMsgType, web
from ...messaging.error import MessageParseError
from ..error import WireFormatParseError
from .base import BaseInboundTransport, InboundTransportSetupError
LOGGER = logging.getLogger(__name__)
class WsTransport(BaseInboundTransport):
"""Websockets Transport class."""
def __init__(self, host: str, port: int, create_session, **kwargs) -> None:
"""Initialize an inbound WebSocket transport instance.
Args:
host: Host to listen on
port: Port to listen on
create_session: Method to create a new inbound session
"""
super().__init__("ws", create_session, **kwargs)
self.host = host
self.port = port
self.site: web.BaseSite = None
self.heartbeat_interval: Optional[int] = self.root_profile.settings.get_int(
"transport.ws.heartbeat_interval"
)
self.timout_interval: Optional[int] = self.root_profile.settings.get_int(
"transport.ws.timout_interval"
)
# TODO: set scheme dynamically based on SSL settings (ws/wss)
@property
def scheme(self):
"""Accessor for this transport's scheme."""
return self._scheme
async def make_application(self) -> web.Application:
"""Construct the aiohttp application."""
app = web.Application()
app.add_routes([web.get("/", self.inbound_message_handler)])
return app
async def METHOD_NAME(self) -> None:
"""Start this transport.
Raises:
InboundTransportSetupError: If there was an error starting the webserver
"""
app = await self.make_application()
runner = web.AppRunner(app)
await runner.setup()
self.site = web.TCPSite(runner, host=self.host, port=self.port)
try:
await self.site.METHOD_NAME()
except OSError:
raise InboundTransportSetupError(
"Unable to start websocket server with host "
+ f"'{self.host}' and port '{self.port}'\n"
)
async def stop(self) -> None:
"""Stop this transport."""
if self.site:
await self.site.stop()
self.site = None
async def inbound_message_handler(self, request):
"""Message handler for inbound messages.
Args:
request: aiohttp request object
Returns:
The web response
"""
ws = web.WebSocketResponse(
autoping=True,
heartbeat=self.heartbeat_interval,
receive_timeout=self.timout_interval,
)
await ws.prepare(request)
loop = asyncio.get_event_loop()
client_info = {"host": request.host, "remote": request.remote}
session = await self.create_session(
accept_undelivered=True, can_respond=True, client_info=client_info
)
async with session:
inbound = loop.create_task(ws.receive())
outbound = loop.create_task(session.wait_response())
while not ws.closed:
await asyncio.wait(
(inbound, outbound), return_when=asyncio.FIRST_COMPLETED
)
if inbound.done():
msg: WSMessage = inbound.result()
LOGGER.info("Websocket received message: %s", msg.data)
if msg.type in (WSMsgType.TEXT, WSMsgType.BINARY):
try:
await session.receive(msg.data)
except (MessageParseError, WireFormatParseError):
await ws.close(1003) # unsupported data error
elif msg.type == WSMsgType.ERROR:
LOGGER.error(
"Websocket connection closed with exception: %s",
ws.exception(),
)
else:
LOGGER.error(
"Unexpected Websocket message type received: %s: %s, %s",
msg.type,
msg.data,
msg.extra,
)
if not ws.closed:
inbound = loop.create_task(ws.receive())
if outbound.done() and not ws.closed:
# response would be None if session was closed
response = outbound.result()
if isinstance(response, bytes):
await ws.send_bytes(response)
else:
await ws.send_str(response)
session.clear_response()
outbound = loop.create_task(session.wait_response())
if inbound and not inbound.done():
inbound.cancel()
if outbound and not outbound.done():
outbound.cancel()
if not ws.closed:
await ws.close()
LOGGER.info("Websocket connection closed")
return ws
|
2,610 |
test system importer file csv upload get
|
import urllib.parse
from django.contrib.auth.models import User
from django.contrib.messages import get_messages
from django.test import TestCase
from dfirtrack_main.tests.system_importer.config_functions import (
set_csv_import_username,
set_csv_skip_existing_system,
)
class SystemImporterFileCsvUploadGetMetaViewTestCase(TestCase):
"""system importer file CSV view tests"""
@classmethod
def setUpTestData(cls):
"""one-time setup"""
# create user
test_user = User.objects.create_user(
username='testuser_system_importer_file_csv_upload_get_meta',
password='39gE1C0nA1hmlcoxZjAd',
)
# change config
set_csv_import_username(test_user)
def test_system_importer_file_csv_upload_get_meta_not_logged_in(self):
"""test importer view"""
# create url
destination = '/login/?next=' + urllib.parse.quote(
'/system/importer/file/csv/upload/', safe=''
)
# get response
response = self.client.get('/system/importer/file/csv/upload/', follow=True)
# compare
self.assertRedirects(
response, destination, status_code=302, target_status_code=200
)
def test_system_importer_file_csv_upload_get_meta_logged_in(self):
"""test importer view"""
# login testuser
self.client.login(
username='testuser_system_importer_file_csv_upload_get_meta',
password='39gE1C0nA1hmlcoxZjAd',
)
# get response
response = self.client.get('/system/importer/file/csv/upload/')
# compare
self.assertEqual(response.status_code, 200)
def METHOD_NAME(self):
"""test importer view"""
# login testuser
self.client.login(
username='testuser_system_importer_file_csv_upload_get_meta',
password='39gE1C0nA1hmlcoxZjAd',
)
# get response
response = self.client.get('/system/importer/file/csv/upload/')
# compare
self.assertTemplateUsed(
response, 'dfirtrack_main/system/system_importer_file_csv.html'
)
def test_system_importer_file_csv_upload_get_meta_get_user_context(self):
"""test importer view"""
# login testuser
self.client.login(
username='testuser_system_importer_file_csv_upload_get_meta',
password='39gE1C0nA1hmlcoxZjAd',
)
# get response
response = self.client.get('/system/importer/file/csv/upload/')
# compare
self.assertEqual(
str(response.context['user']),
'testuser_system_importer_file_csv_upload_get_meta',
)
def test_system_importer_file_csv_upload_get_meta_redirect(self):
"""test importer view"""
# login testuser
self.client.login(
username='testuser_system_importer_file_csv_upload_get_meta',
password='39gE1C0nA1hmlcoxZjAd',
)
# create url
destination = urllib.parse.quote('/system/importer/file/csv/upload/', safe='/')
# get response
response = self.client.get('/system/importer/file/csv/upload', follow=True)
# compare
self.assertRedirects(
response, destination, status_code=301, target_status_code=200
)
def test_system_importer_file_csv_upload_get_meta_skip_warning(self):
"""test importer view"""
# change config
set_csv_skip_existing_system(False)
# login testuser
self.client.login(
username='testuser_system_importer_file_csv_upload_get_meta',
password='39gE1C0nA1hmlcoxZjAd',
)
# get response
response = self.client.get('/system/importer/file/csv/upload/', follow=True)
# get messages
messages = list(get_messages(response.wsgi_request))
# compare
self.assertEqual(response.status_code, 200)
self.assertEqual(
messages[0].message, 'WARNING: Existing systems will be updated!'
)
self.assertEqual(messages[0].level_tag, 'warning')
|
2,611 |
port to eeprom mapping
|
try:
import time
from sonic_sfp.sfputilbase import SfpUtilBase
except ImportError as e:
raise ImportError(str(e) + "- required module not found")
SFP_STATUS_INSERTED = '1'
SFP_STATUS_REMOVED = '0'
class SfpUtil(SfpUtilBase):
"""Platform specific SfpUtill class"""
_port_start = 0
_port_end = 53
_qsfp_port_start = 48
_ports_in_block = 54
_port_to_eeprom_mapping = {}
_port_to_i2c_mapping = {
0: 37,
1: 38,
2: 39,
3: 40,
4: 41,
5: 42,
6: 43,
7: 44,
8: 45,
9: 46,
10: 47,
11: 48,
12: 49,
13: 50,
14: 51,
15: 52,
16: 53,
17: 54,
18: 55,
19: 56,
20: 57,
21: 58,
22: 59,
23: 60,
24: 61,
25: 62,
26: 63,
27: 64,
28: 65,
29: 66,
30: 67,
31: 68,
32: 69,
33: 70,
34: 71,
35: 72,
36: 73,
37: 74,
38: 75,
39: 76,
40: 77,
41: 78,
42: 79,
43: 80,
44: 81,
45: 82,
46: 83,
47: 84,
48: 21,
49: 22,
50: 23,
51: 24,
52: 25,
53: 26,
}
_qsfp_ports = list(range(_qsfp_port_start, _ports_in_block + 1))
_present_status = dict()
def __init__(self):
eeprom_path = '/sys/bus/i2c/devices/{0}-0050/sfp_eeprom'
for x in range(self._port_start, self._port_end + 1):
port_eeprom_path = eeprom_path.format(self._port_to_i2c_mapping[x])
self._port_to_eeprom_mapping[x] = port_eeprom_path
self._present_status[x] = SFP_STATUS_REMOVED
SfpUtilBase.__init__(self)
def reset(self, port_num):
# Check for invalid port_num
if port_num < self._qsfp_port_start or port_num > self._port_end:
print("Error: port %d is not qsfp port" % port_num)
return False
path = "/sys/bus/i2c/devices/{0}-0050/sfp_port_reset"
port_ps = path.format(self._port_to_i2c_mapping[port_num])
try:
reg_file = open(port_ps, 'w')
except IOError as e:
print("Error: unable to open file: %s" % str(e))
return False
# toggle reset
reg_file.seek(0)
reg_file.write('1')
time.sleep(1)
reg_file.seek(0)
reg_file.write('0')
reg_file.close()
return True
def set_low_power_mode(self, port_nuM, lpmode):
raise NotImplementedError
def get_low_power_mode(self, port_num):
raise NotImplementedError
def get_presence(self, port_num):
# Check for invalid port_num
if port_num < self._port_start or port_num > self._port_end:
return False
path = "/sys/bus/i2c/devices/{0}-0050/sfp_is_present"
port_ps = path.format(self._port_to_i2c_mapping[port_num])
reg_value = '0'
try:
reg_file = open(port_ps)
reg_value = reg_file.readline().rstrip()
reg_file.close()
except IOError as e:
print("Error: unable to access file: %s" % str(e))
return False
if reg_value == '1':
return True
return False
def get_transceiver_change_event(self, timeout=0):
raise NotImplementedError
@property
def port_start(self):
return self._port_start
@property
def port_end(self):
return self._port_end
@property
def qsfp_ports(self):
return self._qsfp_ports
@property
def METHOD_NAME(self):
return self._port_to_eeprom_mapping
def get_transceiver_change_event(self, timeout=0):
ret_present = dict()
for phy_port in range(self._port_start, self._port_end + 1):
last_present_status = SFP_STATUS_INSERTED if self.get_presence(phy_port) else SFP_STATUS_REMOVED
if self._present_status[phy_port] != last_present_status:
ret_present[phy_port] = last_present_status
self._present_status[phy_port] = last_present_status
time.sleep(2)
return True, ret_present
|
2,612 |
from text
|
# Copyright (C) Dnspython Contributors, see LICENSE for text of ISC license
# Copyright (C) 2004-2007, 2009-2011 Nominum, Inc.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose with or without fee is hereby granted,
# provided that the above copyright notice and this permission notice
# appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
import base64
import calendar
import struct
import time
import dns.dnssectypes
import dns.exception
import dns.immutable
import dns.rdata
import dns.rdatatype
class BadSigTime(dns.exception.DNSException):
"""Time in DNS SIG or RRSIG resource record cannot be parsed."""
def sigtime_to_posixtime(what):
if len(what) <= 10 and what.isdigit():
return int(what)
if len(what) != 14:
raise BadSigTime
year = int(what[0:4])
month = int(what[4:6])
day = int(what[6:8])
hour = int(what[8:10])
minute = int(what[10:12])
second = int(what[12:14])
return calendar.timegm((year, month, day, hour, minute, second, 0, 0, 0))
def posixtime_to_sigtime(what):
return time.strftime("%Y%m%d%H%M%S", time.gmtime(what))
@dns.immutable.immutable
class RRSIG(dns.rdata.Rdata):
"""RRSIG record"""
__slots__ = [
"type_covered",
"algorithm",
"labels",
"original_ttl",
"expiration",
"inception",
"key_tag",
"signer",
"signature",
]
def __init__(
self,
rdclass,
rdtype,
type_covered,
algorithm,
labels,
original_ttl,
expiration,
inception,
key_tag,
signer,
signature,
):
super().__init__(rdclass, rdtype)
self.type_covered = self._as_rdatatype(type_covered)
self.algorithm = dns.dnssectypes.Algorithm.make(algorithm)
self.labels = self._as_uint8(labels)
self.original_ttl = self._as_ttl(original_ttl)
self.expiration = self._as_uint32(expiration)
self.inception = self._as_uint32(inception)
self.key_tag = self._as_uint16(key_tag)
self.signer = self._as_name(signer)
self.signature = self._as_bytes(signature)
def covers(self):
return self.type_covered
def to_text(self, origin=None, relativize=True, **kw):
return "%s %d %d %d %s %s %d %s %s" % (
dns.rdatatype.to_text(self.type_covered),
self.algorithm,
self.labels,
self.original_ttl,
posixtime_to_sigtime(self.expiration),
posixtime_to_sigtime(self.inception),
self.key_tag,
self.signer.choose_relativity(origin, relativize),
dns.rdata._base64ify(self.signature, **kw),
)
@classmethod
def METHOD_NAME(
cls, rdclass, rdtype, tok, origin=None, relativize=True, relativize_to=None
):
type_covered = dns.rdatatype.METHOD_NAME(tok.get_string())
algorithm = dns.dnssectypes.Algorithm.METHOD_NAME(tok.get_string())
labels = tok.get_int()
original_ttl = tok.get_ttl()
expiration = sigtime_to_posixtime(tok.get_string())
inception = sigtime_to_posixtime(tok.get_string())
key_tag = tok.get_int()
signer = tok.get_name(origin, relativize, relativize_to)
b64 = tok.concatenate_remaining_identifiers().encode()
signature = base64.b64decode(b64)
return cls(
rdclass,
rdtype,
type_covered,
algorithm,
labels,
original_ttl,
expiration,
inception,
key_tag,
signer,
signature,
)
def _to_wire(self, file, compress=None, origin=None, canonicalize=False):
header = struct.pack(
"!HBBIIIH",
self.type_covered,
self.algorithm,
self.labels,
self.original_ttl,
self.expiration,
self.inception,
self.key_tag,
)
file.write(header)
self.signer.to_wire(file, None, origin, canonicalize)
file.write(self.signature)
@classmethod
def from_wire_parser(cls, rdclass, rdtype, parser, origin=None):
header = parser.get_struct("!HBBIIIH")
signer = parser.get_name(origin)
signature = parser.get_remaining()
return cls(rdclass, rdtype, *header, signer, signature)
|
2,613 |
ref maximum2
|
# Copyright 2017,2018,2019,2020,2021 Sony Corporation.
# Copyright 2021 Sony Group Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
import numpy as np
import nnabla as nn
import nnabla.functions as F
from nbla_test_utils import (
function_tester,
list_ctx_and_func_name)
# -----------------------------------------------------------------------------
# Reference functions
# -----------------------------------------------------------------------------
def ref_add2(x, y):
return x + y
def ref_sub2(x, y):
return x - y
def ref_mul2(x, y):
return x * y
def ref_div2(x, y):
return x / y
def ref_pow2(x, y):
return x ** y
def METHOD_NAME(x, y):
return np.maximum(x, y)
def ref_minimum2(x, y):
return np.minimum(x, y)
# -----------------------------------------------------------------------------
# Initializer
# -----------------------------------------------------------------------------
def get_inputs(fname, shapes, rng):
if fname == 'div2':
denom = rng.randn(*shapes[1]).astype(np.float32)
denom[np.abs(denom) < 0.5] = 0.5
return [rng.randn(*shapes[0]).astype(np.float32), denom]
if fname == 'pow2':
return [rng.rand(*shapes[0]).astype(np.float32) + 0.5,
rng.randn(*shapes[1]).astype(np.float32)]
return [rng.randn(*shapes[i]).astype(np.float32) * 2 for i in range(2)]
# -----------------------------------------------------------------------------
# Test body
# -----------------------------------------------------------------------------
@pytest.mark.parametrize("fname, ctx, func_name",
list_ctx_and_func_name(['sub2',
'mul2',
'div2',
'pow2']))
@pytest.mark.parametrize("seed", [314])
def test_transform_binary_inplace(seed, fname, ctx, func_name):
from nbla_test_utils import inplace_function_test_helper
x0 = nn.Variable([2, 3, 4], need_grad=True)
x1 = nn.Variable([2, 3, 4], need_grad=True)
func = getattr(F, fname)
inplace_function_test_helper(
[x0, x1], func, ctx=ctx, rng=np.random.RandomState(seed))
atol_list = {
'add2': (1e-6, 4e-3),
'sub2': (1e-6, 3e-3),
'mul2': (1e-6, 2e-2),
'div2': (1e-4, 1e-1),
'pow2': (1e-4, 1e-1),
'maximum2': (1e-6, 3e-3),
'minimum2': (1e-6, 4e-3),
}
@pytest.mark.parametrize("fname, ctx, func_name",
list_ctx_and_func_name(['add2',
'sub2',
'mul2',
'div2',
'pow2',
'maximum2',
'minimum2']))
@pytest.mark.parametrize("seed", [313])
@pytest.mark.parametrize("broadcast_dims", [
(None, None),
(None, (0,)),
((1,), None),
(None, (2,)),
((0, 2), None),
((0,), (2,))])
def test_transform_binary_forward_backward(fname, ctx, func_name, broadcast_dims, seed):
from nbla_test_utils import function_tester
atol_f, atol_b = atol_list[fname]
func = getattr(F, fname)
ref_func = eval('ref_' + fname)
rng = np.random.RandomState(seed)
shape = [2, 3, 4]
shapes = []
for i in range(2):
if broadcast_dims[i] is None:
shapes.append(shape)
continue
s = np.array(shape).copy()
s[np.array(broadcast_dims[i])] = 1
shapes.append(s.tolist())
inputs = get_inputs(fname, shapes, rng)
function_tester(rng, func, ref_func, inputs,
ctx=ctx, func_name=func_name,
atol_f=atol_f, atol_b=atol_b)
# This is a test of grid-strided loop of CUDA kernels used in transform binary.
# This test only cover a few cases to reduce test time. Therefore this does not
# test some CUDA kernels which are called in specific conditions. Keep in mind
# the risk of small test coverage.
@pytest.mark.parametrize("fname, ctx, func_name",
list_ctx_and_func_name(['mul2']))
def test_large_transform_binary(fname, ctx, func_name):
if not func_name.endswith('Cuda'):
pytest.skip('Grid-strided loop is tested only for CUDA backend')
with nn.context_scope(ctx), nn.auto_forward(True):
a = nn.Variable.from_numpy_array(
np.random.randn(1024, 64, 1)).apply(need_grad=True)
b = nn.Variable.from_numpy_array(
np.random.randn(1024, 64, 3)).apply(need_grad=True)
c = F.mul2(a, b)
c.backward()
|
2,614 |
quaternion canonize
|
"""
This module contains functions that operate on and/or return quaternions.
Notes
-----
The default convention to represent a quaternion :math:`q` in this module is by four real values **w**, **x**, **y**, **z**.
The first value **w** is the scalar (real) part, and **x**, **y**, **z** form the vector (complex, imaginary) part [1]_, so that:
:math:`q = w + xi + yj + zk`
where :math:`i, j, k` are basis components with following multiplication rules [2]_:
:math:`ii = jj = kk = ijk = -1`
:math:`ij = k,\\qquad ji = -k`
:math:`jk = i,\\qquad kj = -i`
:math:`ki = j,\\qquad ik = -j`
Quaternions are associative but not commutative.
**Quaternion as rotation.**
A rotation through an angle :math:`\\theta` around an axis defined by a euclidean unit vector :math:`u = u_{x}i + u_{y}j + u_{z}k`
can be represented as a quaternion:
:math:`q = cos(\\frac{\\theta}{2}) + sin(\\frac{\\theta}{2}) [u_{x}i + u_{y}j + u_{z}k]`
i.e.:
:math:`w = cos(\\frac{\\theta}{2})`
:math:`x = sin(\\frac{\\theta}{2}) u_{x}`
:math:`y = sin(\\frac{\\theta}{2}) u_{y}`
:math:`z = sin(\\frac{\\theta}{2}) u_{z}`
For a quaternion to represent a rotation or orientation, it must be unit-length.
A quaternion representing a rotation :math:`p` resulting from applying a rotation :math:`r` to a rotation :math:`q`, i.e.:
:math:`p = rq`,
is also unit-length.
References
----------
.. [1] http://mathworld.wolfram.com/Quaternion.html
.. [2] http://mathworld.wolfram.com/HamiltonsRules.html
.. [3] https://github.com/matthew-brett/transforms3d/blob/master/transforms3d/quaternions.py
"""
import math
from ._algebra import allclose
ATOL = 1e-6 # absolute tolerance
def quaternion_norm(q):
"""Calculates the length (euclidean norm) of a quaternion.
Parameters
----------
q : [float, float, float, float] | :class:`~compas.geometry.Quaternion`
Quaternion or sequence of four floats ``[w, x, y, z]``.
Returns
-------
float
The length (euclidean norm) of a quaternion.
References
----------
* Quaternion Norm: http://mathworld.wolfram.com/QuaternionNorm.html
"""
return math.sqrt(sum([x * x for x in q]))
def quaternion_unitize(q):
"""Makes a quaternion unit-length.
Parameters
----------
q : [float, float, float, float] | :class:`~compas.geometry.Quaternion`
Quaternion or sequence of four floats ``[w, x, y, z]``.
Returns
-------
[float, float, float, float]
Quaternion of length 1 as a list of four real values ``[nw, nx, ny, nz]``.
"""
n = quaternion_norm(q)
if allclose([n], [0.0], ATOL):
raise ValueError("The given quaternion has zero length.")
else:
return [x / n for x in q]
def quaternion_is_unit(q, tol=ATOL):
"""Checks if a quaternion is unit-length.
Parameters
----------
q : [float, float, float, float] | :class:`~compas.geometry.Quaternion`
Quaternion or sequence of four floats ``[w, x, y, z]``.
tol : float, optional
Requested decimal precision.
Returns
-------
bool
True if the quaternion is unit-length,
and False if otherwise.
"""
n = quaternion_norm(q)
return allclose([n], [1.0], tol)
def quaternion_multiply(r, q):
"""Multiplies two quaternions.
Parameters
----------
r : [float, float, float, float] | :class:`~compas.geometry.Quaternion`
Quaternion or sequence of four floats ``[w, x, y, z]``.
q : [float, float, float, float] | :class:`~compas.geometry.Quaternion`
Quaternion or sequence of four floats ``[w, x, y, z]``.
Returns
-------
[float, float, float, float]
Quaternion :math:`p = rq` as a list of four real values ``[pw, px, py, pz]``.
Notes
-----
Multiplication of two quaternions :math:`p = rq` can be interpreted as applying rotation :math:`r` to an orientation :math:`q`,
provided that both :math:`r` and :math:`q` are unit-length.
The result is also unit-length.
Multiplication of quaternions is not commutative!
References
----------
* Quaternion: http://mathworld.wolfram.com/Quaternion.html
"""
rw, rx, ry, rz = r
qw, qx, qy, qz = q
pw = rw * qw - rx * qx - ry * qy - rz * qz
px = rw * qx + rx * qw + ry * qz - rz * qy
py = rw * qy - rx * qz + ry * qw + rz * qx
pz = rw * qz + rx * qy - ry * qx + rz * qw
return [pw, px, py, pz]
def METHOD_NAME(q):
"""Converts a quaternion into a canonic form if needed.
Parameters
----------
q : [float, float, float, float] | :class:`~compas.geometry.Quaternion`
Quaternion or sequence of four floats ``[w, x, y, z]``.
Returns
-------
[float, float, float, float]
Quaternion in a canonic form as a list of four real values ``[cw, cx, cy, cz]``.
Notes
-----
Canonic form means the scalar component is a non-negative number.
"""
if q[0] < 0.0:
return [-x for x in q]
return q[:]
def quaternion_conjugate(q):
"""Conjugate of a quaternion.
Parameters
----------
q : [float, float, float, float] | :class:`~compas.geometry.Quaternion`
Quaternion or sequence of four floats ``[w, x, y, z]``.
Returns
-------
[float, float, float, float]
Conjugate quaternion as a list of four real values ``[cw, cx, cy, cz]``.
References
----------
* Quaternion Conjugate: http://mathworld.wolfram.com/QuaternionConjugate.html
"""
return [q[0], -q[1], -q[2], -q[3]]
|
2,615 |
make undirected
|
import os
import pickle
import random
import time
import dgl
import numpy as np
import scipy.sparse as sp
import torch
from dgl.data.utils import (
_get_dgl_url,
download,
extract_archive,
get_download_dir,
)
from torch.utils.data import DataLoader
def ReadTxtNet(file_path="", undirected=True):
"""Read the txt network file.
Notations: The network is unweighted.
Parameters
----------
file_path str : path of network file
undirected bool : whether the edges are undirected
Return
------
net dict : a dict recording the connections in the graph
node2id dict : a dict mapping the nodes to their embedding indices
id2node dict : a dict mapping nodes embedding indices to the nodes
"""
if file_path == "youtube" or file_path == "blog":
name = file_path
dir = get_download_dir()
zip_file_path = "{}/{}.zip".format(dir, name)
download(
_get_dgl_url(
os.path.join("dataset/DeepWalk/", "{}.zip".format(file_path))
),
path=zip_file_path,
)
extract_archive(zip_file_path, "{}/{}".format(dir, name))
file_path = "{}/{}/{}-net.txt".format(dir, name, name)
node2id = {}
id2node = {}
cid = 0
src = []
dst = []
weight = []
net = {}
with open(file_path, "r") as f:
for line in f.readlines():
tup = list(map(int, line.strip().split(" ")))
assert len(tup) in [
2,
3,
], "The format of network file is unrecognizable."
if len(tup) == 3:
n1, n2, w = tup
elif len(tup) == 2:
n1, n2 = tup
w = 1
if n1 not in node2id:
node2id[n1] = cid
id2node[cid] = n1
cid += 1
if n2 not in node2id:
node2id[n2] = cid
id2node[cid] = n2
cid += 1
n1 = node2id[n1]
n2 = node2id[n2]
if n1 not in net:
net[n1] = {n2: w}
src.append(n1)
dst.append(n2)
weight.append(w)
elif n2 not in net[n1]:
net[n1][n2] = w
src.append(n1)
dst.append(n2)
weight.append(w)
if undirected:
if n2 not in net:
net[n2] = {n1: w}
src.append(n2)
dst.append(n1)
weight.append(w)
elif n1 not in net[n2]:
net[n2][n1] = w
src.append(n2)
dst.append(n1)
weight.append(w)
print("node num: %d" % len(net))
print("edge num: %d" % len(src))
assert max(net.keys()) == len(net) - 1, "error reading net, quit"
sm = sp.coo_matrix((np.array(weight), (src, dst)), dtype=np.float32)
return net, node2id, id2node, sm
def net2graph(net_sm):
"""Transform the network to DGL graph
Return
------
G DGLGraph : graph by DGL
"""
start = time.time()
G = dgl.DGLGraph(net_sm)
end = time.time()
t = end - start
print("Building DGLGraph in %.2fs" % t)
return G
def METHOD_NAME(G):
G.add_edges(G.edges()[1], G.edges()[0])
return G
def find_connected_nodes(G):
nodes = torch.nonzero(G.out_degrees(), as_tuple=False).squeeze(-1)
return nodes
class LineDataset:
def __init__(
self,
net_file,
batch_size,
num_samples,
negative=5,
gpus=[0],
fast_neg=True,
ogbl_name="",
load_from_ogbl=False,
ogbn_name="",
load_from_ogbn=False,
):
"""This class has the following functions:
1. Transform the txt network file into DGL graph;
2. Generate random walk sequences for the trainer;
3. Provide the negative table if the user hopes to sample negative
nodes according to nodes' degrees;
Parameter
---------
net_file str : path of the dgl network file
walk_length int : number of nodes in a sequence
window_size int : context window size
num_walks int : number of walks for each node
batch_size int : number of node sequences in each batch
negative int : negative samples for each positve node pair
fast_neg bool : whether do negative sampling inside a batch
"""
self.batch_size = batch_size
self.negative = negative
self.num_samples = num_samples
self.num_procs = len(gpus)
self.fast_neg = fast_neg
if load_from_ogbl:
assert (
len(gpus) == 1
), "ogb.linkproppred is not compatible with multi-gpu training."
from load_dataset import load_from_ogbl_with_name
self.G = load_from_ogbl_with_name(ogbl_name)
elif load_from_ogbn:
assert (
len(gpus) == 1
), "ogb.linkproppred is not compatible with multi-gpu training."
from load_dataset import load_from_ogbn_with_name
self.G = load_from_ogbn_with_name(ogbn_name)
else:
self.G = dgl.load_graphs(net_file)[0][0]
self.G = METHOD_NAME(self.G)
print("Finish reading graph")
self.num_nodes = self.G.num_nodes()
start = time.time()
seeds = np.random.choice(
np.arange(self.G.num_edges()), self.num_samples, replace=True
) # edge index
self.seeds = torch.split(
torch.LongTensor(seeds),
int(np.ceil(self.num_samples / self.num_procs)),
0,
)
end = time.time()
t = end - start
print("generate %d samples in %.2fs" % (len(seeds), t))
# negative table for true negative sampling
self.valid_nodes = find_connected_nodes(self.G)
if not fast_neg:
node_degree = self.G.out_degrees(self.valid_nodes).numpy()
node_degree = np.power(node_degree, 0.75)
node_degree /= np.sum(node_degree)
node_degree = np.array(node_degree * 1e8, dtype=np.int)
self.neg_table = []
for idx, node in enumerate(self.valid_nodes):
self.neg_table += [node] * node_degree[idx]
self.neg_table_size = len(self.neg_table)
self.neg_table = np.array(self.neg_table, dtype=np.long)
del node_degree
def create_sampler(self, i):
"""create random walk sampler"""
return EdgeSampler(self.G, self.seeds[i])
def save_mapping(self, map_file):
with open(map_file, "wb") as f:
pickle.dump(self.node2id, f)
class EdgeSampler(object):
def __init__(self, G, seeds):
self.G = G
self.seeds = seeds
self.edges = torch.cat(
(self.G.edges()[0].unsqueeze(0), self.G.edges()[1].unsqueeze(0)), 0
).t()
def sample(self, seeds):
"""seeds torch.LongTensor : a batch of indices of edges"""
return self.edges[torch.LongTensor(seeds)]
|
2,616 |
connect
|
# This file is part of HEPData.
# Copyright (C) 2016 CERN.
#
# HEPData is free software; you can redistribute it
# and/or modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# HEPData is distributed in the hope that it will be
# useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with HEPData; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston,
# MA 02111-1307, USA.
#
# In applying this license, CERN does not
# waive the privileges and immunities granted to it by virtue of its status
# as an Intergovernmental Organization or submit itself to any jurisdiction.
"""Provides high-level common email utilities."""
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
from smtplib import SMTP, SMTPRecipientsRefused
from celery import shared_task
from flask import current_app
from flask_celeryext import create_celery_app
def create_send_email_task(destination, subject, message, reply_to_address=None):
"""
Schedules a task to send an email.
:param destination:
:param subject:
:param message:
:param reply_to_address:
:return: send_email
"""
# this is required for some unknown reason due to an initialisation problem with celery.
if not current_app.config.get('TESTING', False):
create_celery_app(current_app)
print('Sending email to {0}'.format(destination))
send_email.delay(destination, subject, message, reply_to_address)
else:
print('Not sending email as TESTING=True; would have sent email to {0}:'.format(destination))
import re
clean = re.compile('(?s)<.*?>')
newlines = re.compile(r'(?ms)(\n(\s*)){2,}')
print(re.sub(newlines, '\n', re.sub(clean, '', message)))
@shared_task
def send_email(destination, subject, message, reply_to_address=None):
try:
connection = METHOD_NAME()
mmp_msg = MIMEMultipart('alternative')
mmp_msg['Subject'] = subject
mmp_msg['From'] = current_app.config['MAIL_DEFAULT_SENDER']
mmp_msg['To'] = destination
if reply_to_address:
mmp_msg['Reply-To'] = reply_to_address
part1 = MIMEText(message, 'html', 'utf-8')
mmp_msg.attach(part1)
recipients = destination.split(',')
recipients.append(current_app.config['ADMIN_EMAIL'])
connection.send_message(mmp_msg, current_app.config['MAIL_DEFAULT_SENDER'], recipients)
connection.quit()
except SMTPRecipientsRefused as smtp_error:
send_error_mail(smtp_error)
except Exception as e:
print('Exception occurred.')
raise e
def send_flask_message_email(message):
"""
Creates a task to send an email from a flask_mail.Message instance
"""
create_send_email_task(
','.join(message.recipients),
message.subject,
message.html,
reply_to_address=current_app.config['SECURITY_EMAIL_SENDER']
)
def send_error_mail(exception):
"""
Sends an error email to the default system email (which should always be valid!).
:param exception: SMTPRecipientsRefused exception
"""
# get default
destination_email = current_app.config['SECURITY_EMAIL_SENDER']
create_send_email_task(destination_email, '[HEPData Error] Error sending email', str(exception))
def METHOD_NAME():
if current_app.config['SMTP_ENCRYPTION']:
smtp = SMTP(current_app.config['MAIL_SERVER'])
else:
smtp = SMTP()
smtp.METHOD_NAME(current_app.config['MAIL_SERVER'], current_app.config['MAIL_PORT'])
if not current_app.config['SMTP_NO_PASSWORD']:
if current_app.config['SMTP_ENCRYPTION']:
smtp.starttls()
smtp.login(current_app.config['MAIL_USERNAME'], current_app.config['MAIL_PASSWORD'])
return smtp
|
2,617 |
test ensure rgb from provider
|
import pytest
import numpy as np
import tensorflow as tf
from sleap.nn.system import use_cpu_only
use_cpu_only() # hide GPUs for test
from sleap.nn.data import normalization
from sleap.nn.data import providers
def test_ensure_min_image_rank():
assert normalization.ensure_min_image_rank(tf.zeros([2, 2])).shape == (2, 2, 1)
assert normalization.ensure_min_image_rank(tf.zeros([2, 2, 1])).shape == (2, 2, 1)
def test_ensure_float():
assert normalization.ensure_float(tf.zeros([2, 2], tf.uint8)).dtype == tf.float32
assert normalization.ensure_float(tf.zeros([2, 2], tf.float32)).dtype == tf.float32
def test_ensure_int():
np.testing.assert_array_equal(
normalization.ensure_int(tf.constant([0.0, 0.5, 1.0])), np.array([0, 127, 255])
)
np.testing.assert_array_equal(
normalization.ensure_int(tf.constant([0.0, 127.0, 255.0])),
np.array([0, 127, 255]),
)
np.testing.assert_array_equal(
normalization.ensure_int(tf.constant([0, 127, 255])), np.array([0, 127, 255])
)
def test_ensure_grayscale():
np.testing.assert_array_equal(
normalization.ensure_grayscale(tf.ones([2, 2, 3], tf.uint8) * 255),
tf.ones([2, 2, 1], tf.uint8) * 255,
)
np.testing.assert_array_equal(
normalization.ensure_grayscale(tf.ones([2, 2, 1], tf.uint8) * 255),
tf.ones([2, 2, 1], tf.uint8) * 255,
)
np.testing.assert_allclose(
normalization.ensure_grayscale(tf.ones([2, 2, 3], tf.float32)),
tf.ones([2, 2, 1], tf.float32),
atol=1e-4,
)
def test_ensure_rgb():
np.testing.assert_array_equal(
normalization.ensure_rgb(tf.ones([2, 2, 3], tf.uint8) * 255),
tf.ones([2, 2, 3], tf.uint8) * 255,
)
np.testing.assert_array_equal(
normalization.ensure_rgb(tf.ones([2, 2, 1], tf.uint8) * 255),
tf.ones([2, 2, 3], tf.uint8) * 255,
)
def test_convert_rgb_to_bgr():
img_rgb = tf.stack(
[
tf.ones([2, 2], dtype=tf.uint8) * 1,
tf.ones([2, 2], dtype=tf.uint8) * 2,
tf.ones([2, 2], dtype=tf.uint8) * 3,
],
axis=-1,
)
img_bgr = tf.stack(
[
tf.ones([2, 2], dtype=tf.uint8) * 3,
tf.ones([2, 2], dtype=tf.uint8) * 2,
tf.ones([2, 2], dtype=tf.uint8) * 1,
],
axis=-1,
)
np.testing.assert_array_equal(normalization.convert_rgb_to_bgr(img_rgb), img_bgr)
def test_scale_image_range():
np.testing.assert_array_equal(
normalization.scale_image_range(
tf.cast([0, 0.5, 1.0], tf.float32), min_val=-1.0, max_val=1.0
),
[-1, 0, 1],
)
def test_normalizer(min_labels):
# tf.executing_eagerly()
labels_reader = providers.LabelsReader(min_labels)
ds_img = labels_reader.make_dataset()
normalizer = normalization.Normalizer(ensure_grayscale=True)
ds = normalizer.transform_dataset(ds_img)
example = next(iter(ds))
assert example["image"].shape[-1] == 1
normalizer = normalization.Normalizer(ensure_float=True, ensure_grayscale=True)
ds = normalizer.transform_dataset(ds_img)
example = next(iter(ds))
assert example["image"].dtype == tf.float32
assert example["image"].shape[-1] == 1
normalizer = normalization.Normalizer(ensure_float=True, ensure_rgb=True)
ds = normalizer.transform_dataset(ds_img)
example = next(iter(ds))
assert example["image"].dtype == tf.float32
assert example["image"].shape[-1] == 3
normalizer = normalization.Normalizer(ensure_grayscale=True, ensure_rgb=True)
ds = normalizer.transform_dataset(ds_img)
example = next(iter(ds))
assert example["image"].shape[-1] == 1
def test_normalizer_from_config():
normalizer = normalization.Normalizer.from_config(
config=normalization.PreprocessingConfig(
ensure_rgb=False, ensure_grayscale=False, imagenet_mode=None
)
)
assert normalizer.image_key == "image"
assert normalizer.ensure_float == True
assert normalizer.ensure_rgb == False
assert normalizer.ensure_grayscale == False
assert normalizer.imagenet_mode is None
normalizer = normalization.Normalizer.from_config(
config=normalization.PreprocessingConfig(
ensure_rgb=False, ensure_grayscale=False, imagenet_mode="tf"
)
)
assert normalizer.image_key == "image"
assert normalizer.ensure_float == True
assert normalizer.ensure_rgb == False
assert normalizer.ensure_grayscale == False
assert normalizer.imagenet_mode is "tf"
def test_ensure_grayscale_from_provider(small_robot_mp4_vid):
video = providers.VideoReader(
video=small_robot_mp4_vid,
example_indices=[0],
)
normalizer = normalization.Normalizer(image_key="image", ensure_grayscale=True)
ds = video.make_dataset()
ds = normalizer.transform_dataset(ds)
example = next(iter(ds))
assert example["image"].shape[-1] == 1
def METHOD_NAME(centered_pair_vid):
video = providers.VideoReader(
video=centered_pair_vid,
example_indices=[0],
)
normalizer = normalization.Normalizer(image_key="image", ensure_rgb=True)
ds = video.make_dataset()
ds = normalizer.transform_dataset(ds)
example = next(iter(ds))
assert example["image"].shape[-1] == 3
|
2,618 |
forward
|
# Tencent is pleased to support the open source community by making ncnn available.
#
# Copyright (C) 2021 THL A29 Limited, a Tencent company. All rights reserved.
#
# Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
# in compliance with the License. You may obtain a copy of the License at
#
# https://opensource.org/licenses/BSD-3-Clause
#
# Unless required by applicable law or agreed to in writing, software distributed
# under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
# CONDITIONS OF ANY KIND, either express or implied. See the License for the
# specific language governing permissions and limitations under the License.
import torch
import torch.nn as nn
import torch.nn.functional as F
from packaging import version
class Model(nn.Module):
def __init__(self):
super(Model, self).__init__()
self.attention_0_0 = nn.MultiheadAttention(embed_dim=64, num_heads=4)
self.attention_0_1 = nn.MultiheadAttention(embed_dim=64, num_heads=8, bias=False, add_bias_kv=False, add_zero_attn=False)
self.attention_0_2 = nn.MultiheadAttention(embed_dim=64, num_heads=16, bias=True, add_bias_kv=True, add_zero_attn=True)
self.attention_0_3 = nn.MultiheadAttention(embed_dim=32, num_heads=8, bias=True)
self.attention_0_4 = nn.MultiheadAttention(embed_dim=40, num_heads=4, kdim=30, vdim=20)
self.attention_0_5 = nn.MultiheadAttention(embed_dim=40, num_heads=8, kdim=30, vdim=20, bias=False, add_bias_kv=False, add_zero_attn=False)
self.attention_0_6 = nn.MultiheadAttention(embed_dim=40, num_heads=10, kdim=30, vdim=20, bias=True, add_bias_kv=True, add_zero_attn=True)
if version.parse(torch.__version__) >= version.parse('1.9'):
self.attention_1_0 = nn.MultiheadAttention(embed_dim=64, num_heads=4, batch_first=True)
self.attention_1_1 = nn.MultiheadAttention(embed_dim=64, num_heads=8, bias=False, add_bias_kv=False, add_zero_attn=False, batch_first=True)
self.attention_1_2 = nn.MultiheadAttention(embed_dim=64, num_heads=16, bias=True, add_bias_kv=True, add_zero_attn=True, batch_first=True)
self.attention_1_3 = nn.MultiheadAttention(embed_dim=32, num_heads=8, bias=True, batch_first=True)
self.attention_1_4 = nn.MultiheadAttention(embed_dim=40, num_heads=4, kdim=30, vdim=20, batch_first=True)
self.attention_1_5 = nn.MultiheadAttention(embed_dim=40, num_heads=8, kdim=30, vdim=20, bias=False, add_bias_kv=False, add_zero_attn=False, batch_first=True)
self.attention_1_6 = nn.MultiheadAttention(embed_dim=40, num_heads=10, kdim=30, vdim=20, bias=True, add_bias_kv=True, add_zero_attn=True, batch_first=True)
def METHOD_NAME(self, xq, xk, xv, z, zmask, yq, yk, yv, ymask, ymask2):
x0, x0w = self.attention_0_0(xq, xk, xv)
x1, x1w = self.attention_0_1(xq, xk, xv)
x2, x2w = self.attention_0_2(xq, xk, xk)
x3, _ = self.attention_0_3(z, z, z, need_weights=False)
x33, _ = self.attention_0_3(z, z, z, attn_mask=zmask)
x4, x4w = self.attention_0_4(yq, yk, yv)
x5, x5w = self.attention_0_5(yq, yk, yv, attn_mask=ymask)
x6, x6w = self.attention_0_6(yq, yk, yv, attn_mask=ymask2)
if version.parse(torch.__version__) < version.parse('1.9'):
return x0, x0w, x1, x1w, x2, x2w, x3, x33, x4, x4w, x5, x5w, x6, x6w
xq = xq.transpose(0, 1)
xk = xk.transpose(0, 1)
xv = xv.transpose(0, 1)
z = z.transpose(0, 1)
yq = yq.transpose(0, 1)
yk = yk.transpose(0, 1)
yv = yv.transpose(0, 1)
y0, y0w = self.attention_1_0(xq, xk, xv)
y1, y1w = self.attention_1_1(xq, xk, xv)
y2, y2w = self.attention_1_2(xq, xk, xk)
y3, _ = self.attention_1_3(z, z, z)
if version.parse(torch.__version__) >= version.parse('1.12') and version.parse(torch.__version__) < version.parse('1.13'):
# HACK pytorch 1.12 breaks 2-dim zmask
# https://github.com/pytorch/pytorch/issues/97409
# zmask2 = zmask.reshape(1, 1, 30, 30).expand(1, 8, 30, 30)
# y33, _ = self.attention_1_3(z, z, z, attn_mask=zmask2)
# but it produce all nan then, skip test :(
y33 = y3
elif version.parse(torch.__version__) >= version.parse('2.0') and version.parse(torch.__version__) < version.parse('2.1'):
# HACK pytorch 2.0 produce all nan, skip test :(
y33 = y3
else:
y33, _ = self.attention_1_3(z, z, z, attn_mask=zmask)
y4, y4w = self.attention_1_4(yq, yk, yv)
y5, y5w = self.attention_1_5(yq, yk, yv, attn_mask=ymask)
y6, y6w = self.attention_1_6(yq, yk, yv, attn_mask=ymask2)
return x0, x0w, x1, x1w, x2, x2w, x3, x33, x4, x4w, x5, x5w, x6, x6w, y0, y0w, y1, y1w, y2, y2w, y3, y33, y4, y4w, y5, y5w, y6, y6w
def test():
torch.set_grad_enabled(False)
net = Model()
net.eval()
torch.manual_seed(0)
xq = torch.rand(20, 1, 64)
xk = torch.rand(20, 1, 64)
xv = torch.rand(20, 1, 64)
z = torch.rand(30, 1, 32)
zmask = torch.rand(30, 30)
yq = torch.rand(15, 1, 40)
yk = torch.rand(24, 1, 30)
yv = torch.rand(24, 1, 20)
ymask = torch.rand(15, 24)
ymask2 = torch.rand(10, 15, 24)
a = net(xq, xk, xv, z, zmask, yq, yk, yv, ymask, ymask2)
# export torchscript
print(torch.__version__)
if version.parse(torch.__version__) >= version.parse('1.12.0'):
mod = torch.jit.trace(net, (xq, xk, xv, z, zmask, yq, yk, yv, ymask, ymask2), check_trace=False)
else:
mod = torch.jit.trace(net, (xq, xk, xv, z, zmask, yq, yk, yv, ymask, ymask2))
mod.save("test_nn_MultiheadAttention.pt")
# torchscript to pnnx
import os
os.system("../src/pnnx test_nn_MultiheadAttention.pt inputshape=[20,1,64],[20,1,64],[20,1,64],[30,1,32],[30,30],[15,1,40],[24,1,30],[24,1,20],[15,24],[10,15,24]")
# pnnx inference
import test_nn_MultiheadAttention_pnnx
b = test_nn_MultiheadAttention_pnnx.test_inference()
for a0, b0 in zip(a, b):
if not torch.allclose(a0, b0, 1e-4, 1e-4):
return False
return True
if __name__ == "__main__":
if test():
exit(0)
else:
exit(1)
|
2,619 |
test read
|
import os
import unittest
from lammps.formats import LogFile, AvgChunkFile
has_yaml = False
try:
import yaml
has_yaml = True
try:
from yaml import CSafeLoader as Loader, CSafeDumper as Dumper
except ImportError:
from yaml import SafeLoader, SafeDumper
except:
pass
EXAMPLES_DIR=os.path.abspath(os.path.join(__file__, '..', '..', '..', 'examples'))
DEFAULT_STYLE_EXAMPLE_LOG="melt/log.8Apr21.melt.g++.1"
MULTI_STYLE_EXAMPLE_LOG="peptide/log.27Nov18.peptide.g++.1"
AVG_CHUNK_FILE="VISCOSITY/profile.13Oct16.nemd.2d.g++.1"
YAML_STYLE_EXAMPLE_LOG="yaml/log.7Apr22.yaml.g++.1"
class Logfiles(unittest.TestCase):
def testLogFileNotFound(self):
with self.assertRaises(FileNotFoundError):
LogFile('test.log')
def testDefaultLogFile(self):
log = LogFile(os.path.join(EXAMPLES_DIR, DEFAULT_STYLE_EXAMPLE_LOG))
self.assertEqual(len(log.runs), 1)
run = log.runs[0]
self.assertEqual(len(run.keys()), 6)
self.assertIn("Step", run)
self.assertIn("Temp", run)
self.assertIn("E_pair", run)
self.assertIn("E_mol", run)
self.assertIn("TotEng", run)
self.assertIn("Press", run)
self.assertEqual(len(run["Step"]), 6)
self.assertEqual(len(run["Temp"]), 6)
self.assertEqual(len(run["E_pair"]), 6)
self.assertEqual(len(run["E_mol"]), 6)
self.assertEqual(len(run["TotEng"]), 6)
self.assertEqual(len(run["Press"]), 6)
self.assertEqual(log.runs[0]["Step"], [0, 50, 100, 150, 200, 250])
def testMultiLogFile(self):
log = LogFile(os.path.join(EXAMPLES_DIR, MULTI_STYLE_EXAMPLE_LOG))
self.assertEqual(len(log.runs), 1)
run0 = log.runs[0]
self.assertEqual(len(run0.keys()), 14)
self.assertIn("Step", run0)
self.assertIn("CPU", run0)
self.assertIn("TotEng", run0)
self.assertIn("KinEng", run0)
self.assertIn("Temp", run0)
self.assertIn("PotEng", run0)
self.assertIn("E_bond", run0)
self.assertIn("E_angle", run0)
self.assertIn("E_dihed", run0)
self.assertIn("E_impro", run0)
self.assertIn("E_vdwl", run0)
self.assertIn("E_coul", run0)
self.assertIn("E_long", run0)
self.assertIn("Press", run0)
for k in run0:
self.assertEqual(len(run0[k]), 7)
self.assertEqual(run0["Step"], list(range(0,350, 50)))
@unittest.skipIf(not has_yaml,"Missing the PyYAML python module")
def testYamlLogFile(self):
log = LogFile(os.path.join(EXAMPLES_DIR, YAML_STYLE_EXAMPLE_LOG))
self.assertEqual(len(log.runs), 2)
run = log.runs[0]
self.assertEqual(len(run.keys()), 12)
self.assertIn("Step", run)
self.assertIn("Temp", run)
self.assertIn("E_vdwl", run)
self.assertIn("E_coul", run)
self.assertIn("E_bond", run)
self.assertIn("E_angle", run)
self.assertIn("Press", run)
self.assertEqual(len(run["Step"]), 11)
self.assertEqual(len(run["Temp"]), 11)
self.assertEqual(len(run["E_vdwl"]), 11)
self.assertEqual(len(run["E_coul"]), 11)
self.assertEqual(len(run["E_bond"]), 11)
self.assertEqual(len(run["E_angle"]), 11)
self.assertEqual(len(run["Press"]), 11)
self.assertEqual(log.runs[0]["Step"], [0, 10, 20, 30, 40, 50, 60, 70, 80, 90, 100])
class AvgChunkFiles(unittest.TestCase):
def testAvgChunkFileNotFound(self):
with self.assertRaises(FileNotFoundError):
AvgChunkFile('test.log')
def METHOD_NAME(self):
cfile = AvgChunkFile(os.path.join(EXAMPLES_DIR, AVG_CHUNK_FILE))
self.assertEqual(cfile.fix_name, "4")
self.assertEqual(cfile.group_name, "all")
self.assertEqual(cfile.timesteps, list(range(10000, 110000, 5000)))
ntimesteps = len(cfile.timesteps)
ntotal_count = len(cfile.total_count)
nchunks = len(cfile.chunks)
self.assertEqual(ntimesteps, ntotal_count)
self.assertEqual(nchunks, 20)
for i in range(1, nchunks+1):
chunk = cfile.chunks[i-1];
self.assertEqual(chunk['id'], i)
self.assertEqual(len(chunk['coord']), ntimesteps)
self.assertEqual(len(chunk['ncount']), ntimesteps)
self.assertIn("vx", chunk)
self.assertEqual(len(chunk['vx']), ntimesteps)
self.assertEqual(len(chunk['coord'][0]), 1)
from lammps import lammps
has_dump_yaml = False
try:
machine=None
if 'LAMMPS_MACHINE_NAME' in os.environ:
machine=os.environ['LAMMPS_MACHINE_NAME']
lmp=lammps(name=machine)
has_dump_yaml = lmp.has_style("atom","full") and lmp.has_style("dump", "yaml")
lmp.close()
except:
pass
@unittest.skipIf(not (has_dump_yaml and has_yaml), "Either atom_style full, dump_style yaml, or the python PyYAML module are not available")
class PythonDump(unittest.TestCase):
def setUp(self):
machine = None
if 'LAMMPS_MACHINE_NAME' in os.environ:
machine=os.environ['LAMMPS_MACHINE_NAME']
self.lmp = lammps(name=machine, cmdargs=['-nocite', '-log','none', '-echo','screen'])
def tearDown(self):
del self.lmp
def testDumpYaml(self):
dumpfile = os.path.join(os.path.abspath('.'), 'dump.yaml')
self.lmp.command('shell cd ' + os.environ['TEST_INPUT_DIR'])
self.lmp.command("newton on on")
self.lmp.file("in.fourmol")
self.lmp.command("dump 1 all yaml 2 " + dumpfile + " id type mol q x y z vx vy vz")
self.lmp.command("dump_modify 1 time yes sort id units yes")
self.lmp.command("run 4 post no")
with open(dumpfile) as d:
traj = tuple(yaml.load_all(d, Loader=Loader))
self.assertEqual(len(traj), 3)
self.assertEqual(traj[0]['timestep'], 0)
self.assertEqual(traj[0]['time'], 0)
self.assertEqual(traj[0]['natoms'], 29)
self.assertEqual(traj[0]['units'], 'real')
self.assertEqual(len(traj[0]['boundary']), 6)
self.assertEqual(traj[0]['boundary'][0], 'p')
self.assertEqual(traj[1]['timestep'], 2)
self.assertEqual(traj[1]['time'], 0.2)
self.assertEqual(traj[2]['timestep'], 4)
self.assertEqual(traj[2]['time'], 0.4)
self.assertEqual(traj[0]['keywords'],['id', 'type', 'mol', 'q', 'x', 'y', 'z',
'vx', 'vy', 'vz'])
self.assertEqual(traj[0]['data'][0],[1, 3, 1, -0.47, -0.279937, 2.47266, -0.172009,
0.000778678, 0.000589703, -0.000221795])
if __name__ == "__main__":
unittest.main()
|
2,620 |
correct shares
|
""" Priority corrector for the group and in-group shares
"""
from DIRAC import gLogger, S_OK, S_ERROR
from DIRAC.Core.Utilities.ObjectLoader import ObjectLoader
from DIRAC.ConfigurationSystem.Client.Helpers.Operations import Operations
from DIRAC.WorkloadManagementSystem.private.correctors.BaseCorrector import BaseCorrector
class SharesCorrector:
def __init__(self, opsHelper):
if not opsHelper:
opsHelper = Operations()
self.__opsHelper = opsHelper
self.__log = gLogger.getSubLogger(self.__class__.__name__)
self.__shareCorrectors = {}
self.__correctorsOrder = []
self.__baseCS = "JobScheduling/ShareCorrections"
def __getCSValue(self, path, defaultValue=""):
return self.__opsHelper.getValue(f"{self.__baseCS}/{path}", defaultValue)
def __getCorrectorClass(self, correctorName):
baseImport = "WorkloadManagementSystem.private.correctors"
fullCN = f"{baseImport}.{correctorName}Corrector"
result = ObjectLoader().getObjects(baseImport, ".*Corrector", parentClass=BaseCorrector)
if not result["OK"]:
return result
data = result["Value"]
if fullCN not in data:
return S_ERROR(f"Can't find corrector {fullCN}")
return S_OK(data[fullCN])
def instantiateRequiredCorrectors(self):
correctorsToStart = self.__getCSValue("ShareCorrectorsToStart", [])
self.__correctorsOrder = correctorsToStart
self.__log.info(f"Correctors requested: {', '.join(correctorsToStart)}")
for corrector in self.__shareCorrectors:
if corrector not in correctorsToStart:
self.__log.info(f"Stopping corrector {corrector}")
del self.__shareCorrectors[corrector]
for corrector in correctorsToStart:
if corrector not in self.__shareCorrectors:
self.__log.info(f"Starting corrector {corrector}")
result = self.__opsHelper.getSections(f"{self.__baseCS}/{corrector}")
if not result["OK"]:
self.__log.error(
"Cannot get list of correctors to instantiate",
f" for corrector type {corrector}: {result['Message']}",
)
continue
groupCorrectors = result["Value"]
self.__shareCorrectors[corrector] = {}
result = self.__getCorrectorClass(corrector)
if not result["OK"]:
self.__log.error("Cannot instantiate corrector", f"{corrector} {result['Message']}")
continue
correctorClass = result["Value"]
for groupCor in groupCorrectors:
groupPath = f"{corrector}/{groupCor}/Group"
groupToCorrect = self.__getCSValue(groupPath, "")
if groupToCorrect:
groupKey = f"gr:{groupToCorrect}"
else:
groupKey = "global"
self.__log.info(f"Instantiating group corrector {groupCor} ({groupToCorrect}) of type {corrector}")
if groupKey in self.__shareCorrectors[corrector]:
self.__log.error(
"There are two group correctors defined",
f" for {corrector} type (group {groupToCorrect})",
)
else:
groupCorPath = f"{self.__baseCS}/{corrector}/{groupCor}"
correctorObj = correctorClass(self.__opsHelper, groupCorPath, groupToCorrect)
result = correctorObj.initialize()
if not result["OK"]:
self.__log.error(
"Could not initialize corrector %s for %s: %s"
% (corrector, groupKey, result["Message"])
)
else:
self.__shareCorrectors[corrector][groupKey] = correctorObj
return S_OK()
def updateCorrectorsKnowledge(self):
for corrector in self.__shareCorrectors:
for groupTC in self.__shareCorrectors[corrector]:
self.__shareCorrectors[corrector][groupTC].updateHistoryKnowledge()
def update(self):
self.instantiateRequiredCorrectors()
self.updateCorrectorsKnowledge()
def METHOD_NAME(self, shareDict, group=""):
if group:
groupKey = f"gr:{group}"
else:
groupKey = "global"
for corrector in self.__shareCorrectors:
if groupKey in self.__shareCorrectors[corrector]:
shareDict = self.__shareCorrectors[corrector][groupKey].applyCorrection(shareDict)
return shareDict
|
2,621 |
test digester
|
# emacs: -*- mode: python; py-indent-offset: 4; tab-width: 4; indent-tabs-mode: nil -*-
# ex: set sts=4 ts=4 sw=4 noet:
# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##
#
# See COPYING file distributed along with the dandi package for the
# copyright and license terms.
#
# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##
from pathlib import Path
from pytest_mock import MockerFixture
from .. import digests
from ..digests import Digester, get_zarr_checksum
def METHOD_NAME(tmp_path):
digester = Digester()
f = tmp_path / "sample.txt"
f.write_bytes(b"123")
assert digester(f) == {
"md5": "202cb962ac59075b964b07152d234b70",
"sha1": "40bd001563085fc35165329ea1ff5c5ecbdbbeef",
"sha256": "a665a45920422f9d417e4867efdc4fb8a04a1f3fff1fa07e998e86f7f7a27ae3",
"sha512": "3c9909afec25354d551dae21590bb26e38d53f2173b8d3dc3eee4c047e7a"
"b1c1eb8b85103e3be7ba613b31bb5c9c36214dc9f14a42fd7a2fdb84856b"
"ca5c44c2",
}
f = tmp_path / "0"
f.write_bytes(chr(0).encode())
assert digester(f) == {
"md5": "93b885adfe0da089cdf634904fd59f71",
"sha1": "5ba93c9db0cff93f52b521d7420e43f6eda2784f",
"sha256": "6e340b9cffb37a989ca544e6bb780a2c78901d3fb33738768511a30617afa01d",
"sha512": "b8244d028981d693af7b456af8efa4cad63d282e19ff14942c246e50d935"
"1d22704a802a71c3580b6370de4ceb293c324a8423342557d4e5c38438f0"
"e36910ee",
}
f = tmp_path / "long.txt"
f.write_bytes(b"123abz\n" * 1000000)
assert digester(f) == {
"md5": "81b196e3d8a1db4dd2e89faa39614396",
"sha1": "5273ac6247322c3c7b4735a6d19fd4a5366e812f",
"sha256": "80028815b3557e30d7cbef1d8dbc30af0ec0858eff34b960d2839fd88ad08871",
"sha512": "684d23393eee455f44c13ab00d062980937a5d040259d69c6b291c983bf6"
"35e1d405ff1dc2763e433d69b8f299b3f4da500663b813ce176a43e29ffc"
"c31b0159",
}
def test_get_zarr_checksum(mocker: MockerFixture, tmp_path: Path) -> None:
# Use write_bytes() so that the line endings are the same on POSIX and
# Windows.
(tmp_path / "file1.txt").write_bytes(b"This is the first file.\n")
(tmp_path / "file2.txt").write_bytes(b"This is the second file.\n")
sub1 = tmp_path / "sub1"
sub1.mkdir()
(sub1 / "file3.txt").write_bytes(b"This is the third file.\n")
(sub1 / "file4.txt").write_bytes(b"This is the fourth file.\n")
(sub1 / "file5.txt").write_bytes(b"This is the fifth file.\n")
subsub = sub1 / "subsub"
subsub.mkdir()
(subsub / "file6.txt").write_bytes(b"This is the sixth file.\n")
sub2 = tmp_path / "sub2"
sub2.mkdir()
(sub2 / "file7.txt").write_bytes(b"This is the seventh file.\n")
(sub2 / "file8.txt").write_bytes(b"This is the eighth file.\n")
empty = tmp_path / "empty"
empty.mkdir()
assert (
get_zarr_checksum(tmp_path / "file1.txt") == "d0aa42f003e36c1ecaf9aa8f20b6f1ad"
)
assert get_zarr_checksum(tmp_path) == "25627e0fc7c609d10100d020f7782a25-8--197"
assert get_zarr_checksum(sub1) == "64af93ad7f8d471c00044d1ddbd4c0ba-4--97"
assert get_zarr_checksum(empty) == "481a2f77ab786a0f45aafd5db0971caa-0--0"
spy = mocker.spy(digests, "md5file_nocache")
assert (
get_zarr_checksum(
tmp_path,
known={
"file1.txt": "9ee7a152c5adb60803c928733acc1533",
# ^^ This one is different!
"file2.txt": "340c108ee69bf4626e7995a7048f52b8",
"sub1/file3.txt": "7351dc767bfad322ddce50401badc359",
"sub1/file4.txt": "bbede70f39fa8fc34f2dc4eda8b6bdea",
"sub1/file5.txt": "c4e828c509f90b84e5b72d9d5612d676",
"sub1/subsub/file6.txt": "6a7fe3b9e2c69a54216b7d5dcb4fe61d",
# "sub2/file7.txt": Absent!
"sub2/file8.txt": "7aadbff2b21f438baccded18b2e81ae3",
"nonexistent-file.txt": "123456789012345678901234567890ab",
# ^^ Not used in calculation!
},
)
== "f77f4c5b277575f781c19ba91422f0c5-8--197"
)
spy.assert_called_once_with(sub2 / "file7.txt")
|
2,622 |
send mynt from multisig to fee sharing
|
from brownie import *
import json
from scripts.utils import *
import scripts.contractInteraction.config as conf
def getBalance(contractAddress, acct):
contract = Contract.from_abi("Token", address=contractAddress, abi=LoanToken.abi, owner=conf.acct)
balance = contract.balanceOf(acct)
print(balance/1e18)
return balance
def getContractBTCBalance(contractAddress):
contract = Contract.from_abi("Token", address=contractAddress, abi=LoanToken.abi, owner=conf.acct)
return contract.balance()
def buyWRBTC(amount):
contract = Contract.from_abi("WRBTC", address=conf.contracts["WRBTC"], abi=WRBTC.abi, owner=conf.acct)
tx = contract.deposit({'value':amount})
tx.info()
print("New balance: ", contract.balanceOf(conf.acct))
def buyWRBTCWithMS(amount):
contract = Contract.from_abi("WRBTC", address=conf.contracts["WRBTC"], abi=WRBTC.abi, owner=conf.acct)
data = contract.deposit.encode_input()
sendWithMultisig(conf.contracts['multisig'], contract, data, conf.acct, amount)
def hasApproval(tokenContractAddr, sender, receiver):
tokenContract = Contract.from_abi("Token", address=tokenContractAddr, abi=TestToken.abi, owner=sender)
allowance = tokenContract.allowance(sender, receiver)
print("allowance: ", allowance/1e18)
return allowance
def mintNFT(contractAddress, receiver):
abiFile = open('./scripts/contractInteraction/ABIs/SovrynNft.json')
abi = json.load(abiFile)
nft = Contract.from_abi("NFT", address=contractAddress, abi=abi, owner=conf.acct)
nft.mint(receiver)
def transferTokensFromWallet(tokenContract, receiver, amount):
token = Contract.from_abi("Token", address=tokenContract, abi = TestToken.abi, owner=conf.acct)
token.transfer(receiver, amount)
def sendToWatcher(tokenAddress, amount):
if(tokenAddress == conf.contracts['WRBTC']):
buyWRBTC(amount)
transferTokensFromWallet(tokenAddress, conf.contracts['WatcherContract'], amount)
def tokenApproveFromMS(tokenContract, receiver, amount):
token = Contract.from_abi("Token", address= tokenContract, abi = TestToken.abi, owner=conf.acct)
data = token.approve.encode_input(receiver, amount)
sendWithMultisig(conf.contracts['multisig'], tokenContract, data, conf.acct)
def increaseAllowanceFromMS(tokenContractAddress, receiver, amount):
token = Contract.from_abi("Token", address= tokenContractAddress, abi = TestToken.abi, owner=conf.acct)
data = token.increaseAllowance.encode_input(receiver, amount)
sendWithMultisig(conf.contracts['multisig'], tokenContractAddress, data, conf.acct)
def METHOD_NAME(amount):
feeSharingCollectorProxy = Contract.from_abi("FeeSharingCollector", address=conf.contracts['FeeSharingCollectorProxy'], abi=FeeSharingCollector.abi, owner=conf.acct)
multisig = Contract.from_abi("MultiSig", address=conf.contracts['multisig'], abi=MultiSigWallet.abi, owner=conf.acct)
token = Contract.from_abi("MYNT", address=conf.contracts['MYNT'], abi=ERC20.abi, owner=conf.acct)
if(token.allowance(multisig.address, feeSharingCollectorProxy.address) < amount):
myntBalance = getBalanceOf(conf.contracts['MYNT'], conf.contracts['multisig'])
if(myntBalance < amount):
print('⚠️ ALERT! Multisig does not have enough MYNT balance to transfer to FeeSharingCollectorProxy: need ', amount - myntBalance)
print('Approving MYNT for FeeSharingCollectorProxy: ', amount)
tokenApproveFromMS(conf.contracts["MYNT"], feeSharingCollectorProxy, amount)
data = feeSharingCollectorProxy.transferTokens.encode_input(conf.contracts['MYNT'], amount)
print('Calling feeSharingCollectorProxy.transferTokens(multisig, mynt, amount): ', conf.contracts['multisig'], conf.contracts['MYNT'], amount)
print(data)
sendWithMultisig(conf.contracts['multisig'], conf.contracts['FeeSharingCollectorProxy'], data, conf.acct)
def getBalanceOf(contractAddress, acct):
balance = getBalanceNoPrintOf(contractAddress, acct)
print(balance)
return balance
def getBalanceNoPrintOf(contractAddress, acct):
contract = Contract.from_abi("Token", address=contractAddress, abi=TestToken.abi, owner=conf.acct)
balance = contract.balanceOf(acct)
return balance
def getTotalSupply(contractAddress):
contract = Contract.from_abi("Token", address=contractAddress, abi=TestToken.abi, owner=conf.acct)
balance = contract.totalSupply()
print(balance)
return balance
def deployTestTokenLimited(name, symbol):
token = conf.acct.deploy(TestTokenLimited, name, symbol, 18, 100000e18)
def printLendingPoolData(iTokenName, tokenName):
loanToken = Contract.from_abi("loanToken", address=conf.contracts[iTokenName], abi=LoanTokenLogicStandard.abi, owner=conf.acct)
print(iTokenName)
print(" - totalSupply():"," ", loanToken.totalSupply()/1e18)
print(" - marketLiquidity():", loanToken.marketLiquidity()/1e18)
print(" - tokenPrice():"," ", loanToken.tokenPrice()/1e18)
print(" - balance:"," ", getBalanceNoPrintOf(conf.contracts[tokenName], loanToken.address)/1e18, tokenName)
def printLendingPoolsData():
printLendingPoolData("iRBTC", "WRBTC")
printLendingPoolData("iUSDT", "USDT")
printLendingPoolData("iXUSD", "XUSD")
printLendingPoolData("iBPro", "BPro")
printLendingPoolData("iDOC", "DoC")
printLendingPoolData("iDLLR", "DLLR")
|
2,623 |
emulator
|
#!/usr/bin/env python
#
# Copyright (C) 2015 GNS3 Technologies Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import json
import copy
import os
import collections.abc
import jsonschema
from gns3.utils.get_resource import get_resource
class ApplianceError(Exception):
pass
class Appliance(collections.abc.Mapping):
def __init__(self, registry, path):
"""
:params registry: Instance of the registry where images are located
:params path: Path of the appliance file on disk or file content
"""
self._registry = registry
if os.path.isabs(path):
try:
with open(path, encoding="utf-8") as f:
self._appliance = json.load(f)
except (OSError, ValueError) as e:
raise ApplianceError("Could not read appliance {}: {}".format(os.path.abspath(path), str(e)))
else:
try:
self._appliance = json.loads(path)
except ValueError as e:
raise ApplianceError("Could not read appliance {}: {}".format(os.path.abspath(path), str(e)))
self._check_config()
self._resolve_version()
def _check_config(self):
"""
:param appliance: Sanity check on the appliance configuration
"""
if "registry_version" not in self._appliance:
raise ApplianceError("Invalid appliance configuration please report the issue on https://github.com/GNS3/gns3-registry")
if self._appliance["registry_version"] > 7:
raise ApplianceError("Please update GNS3 in order to install this appliance")
with open(get_resource(os.path.join("schemas", "appliance.json"))) as f:
schema = json.load(f)
v = jsonschema.Draft4Validator(schema)
try:
v.validate(self._appliance)
except jsonschema.ValidationError as e:
error = jsonschema.exceptions.best_match(v.iter_errors(self._appliance)).message
raise ApplianceError("Invalid appliance file: {}".format(error))
def __getitem__(self, key):
return self._appliance.__getitem__(key)
def __iter__(self):
return self._appliance.__iter__()
def __len__(self):
return self._appliance.__len__()
def _resolve_version(self):
"""
Replace image field in versions by their the complete information from images
"""
if "versions" not in self._appliance:
return
for version in self._appliance["versions"]:
for image_type, filename in version["images"].items():
found = False
for file in self._appliance["images"]:
file = copy.copy(file)
if "idlepc" in version:
file["idlepc"] = version["idlepc"]
if "/" in filename:
parent = filename.split("/")[0]
name = filename.split("/")[-1:][0]
filename = os.path.join(parent, name)
else:
parent = filename
if file["filename"] == parent:
file["filename"] = filename
version["images"][image_type] = file
found = True
break
if not found:
raise ApplianceError("Broken appliance missing file {} for version {}".format(filename, version["name"]))
def create_new_version(self, new_version):
"""
Duplicate a version in order to create a new version
"""
if "versions" not in self._appliance.keys() or not self._appliance["versions"]:
raise ApplianceError("Your appliance file doesn't contain any versions")
self._appliance["versions"].append(new_version)
def search_images_for_version(self, version_name):
"""
Search on disk the images required by this version.
And keep only the require images in the images fields. Add to the images
their disk type and path.
:param version_name: Version name
:returns: Appliance with only require images
"""
found = False
appliance = copy.deepcopy(self._appliance)
for version in appliance["versions"]:
if version["name"] == version_name:
appliance["images"] = []
for image_type, image in version["images"].items():
image["type"] = image_type
img = self._registry.search_image_file(self.METHOD_NAME(), image["filename"], image.get("md5sum"), image.get("filesize"))
if img is None:
if "md5sum" in image:
raise ApplianceError("File {} with checksum {} not found for {}".format(image["filename"], image["md5sum"], appliance["name"]))
else:
raise ApplianceError("File {} not found for {}".format(image["filename"], appliance["name"]))
image["path"] = img.path
image["location"] = img.location
if "md5sum" not in image:
image["md5sum"] = img.md5sum
image["filesize"] = img.filesize
appliance["images"].append(image)
found = True
appliance["name"] = "{} {}".format(appliance["name"], version_name)
break
if not found:
raise ApplianceError("Version {} not found for {}".format(version_name, appliance["name"]))
return appliance
def copy(self):
"""
Get a copy of the appliance
"""
return copy.deepcopy(self._appliance)
def is_version_installable(self, version):
"""
Search on disk if a version is available for this appliance
:params version: Version name
:returns: Boolean true if installable
"""
try:
self.search_images_for_version(version)
return True
except ApplianceError:
return False
def METHOD_NAME(self):
if "qemu" in self._appliance:
return "qemu"
if "iou" in self._appliance:
return "iou"
return "dynamips"
|
2,624 |
get shard range
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# https://github.com/pytorch/fairseq/blob/265df7144c79446f5ea8d835bda6e727f54dad9d/LICENSE
import logging
from pathlib import Path
from typing import Optional, Tuple, Union
import torch
import torchaudio
from torch import Tensor
from torch.nn import Module
from .common_utils import _get_feat_lens_paths
_LG = logging.getLogger(__name__)
_DEFAULT_DEVICE = torch.device("cpu")
def METHOD_NAME(num_lines: int, num_rank: int, rank: int) -> Tuple[int, int]:
r"""Get the range of indices for the current rank in multi-processing.
Args:
num_lines (int): The number of lines to process.
num_rank (int): The number of ranks for multi-processing in feature extraction.
rank (int): The rank in the multi-processing.
Returns:
(int, int):
int: The start index for the current rank.
int: The end index for the current rank.
"""
assert 1 <= rank <= num_rank, f"invalid rank/num_rank {rank}/{num_rank}"
assert num_lines > 0, f"Found {num_lines} files, make sure you specify the correct root directory"
start = round(num_lines / num_rank * (rank - 1))
end = round(num_lines / num_rank * rank)
_LG.info(f"rank {rank} of {num_rank}, process {end-start} " f"({start}-{end}) out of {num_lines}")
return start, end
def extract_feature_mfcc(
path: str,
device: torch.device,
sample_rate: int,
) -> Tensor:
r"""Extract MFCC features for KMeans clustering and pseudo label prediction.
Args:
path (str): The file path of the audio.
device (torch.device): The location to allocate for PyTorch Tensors.
Options: [``torch.device('cpu')``, torch.device('cuda')``].
sample_rate (int): The sample rate of the audio.
Returns:
Tensor: The desired feature tensor of the given audio file.
"""
waveform, sr = torchaudio.load(path)
assert sr == sample_rate
feature_extractor = torchaudio.transforms.MFCC(
sample_rate=sample_rate, n_mfcc=13, melkwargs={"n_fft": 400, "hop_length": 160, "center": False}
).to(device)
waveform = waveform[0].to(device)
mfccs = feature_extractor(waveform) # (freq, time)
deltas = torchaudio.functional.compute_deltas(mfccs)
ddeltas = torchaudio.functional.compute_deltas(deltas)
concat = torch.cat([mfccs, deltas, ddeltas], dim=0)
feat = concat.transpose(0, 1) # (time, freq)
return feat
def extract_feature_hubert(
path: str,
device: torch.device,
sample_rate: int,
model: Module,
layer_index: int,
) -> Tensor:
r"""Extract HuBERT features for KMeans clustering and pseudo label prediction.
Args:
path (str): The file path of the audio.
device (torch.device): The location to allocate for PyTorch Tensors.
Options: [``torch.device('cpu')``, torch.device('cuda')``].
sample_rate (int): The sample rate of the audio.
model (Module): The loaded ``HuBERTPretrainModel`` model.
layer_index (int): The index of transformer layers in
``torchaudio.models.HuBERTPretrainModel`` for extracting features.
(``1`` means the first layer output).
Returns:
Tensor: The desired feature tensor of the given audio file.
"""
waveform, sr = torchaudio.load(path)
assert sr == sample_rate
waveform = waveform.to(device)
with torch.inference_mode():
feat = model.wav2vec2.extract_features(waveform, num_layers=layer_index)[0][-1][0] # (time, feat_dim)
return feat
def _load_state(model: Module, checkpoint_path: Path, device=_DEFAULT_DEVICE) -> Module:
"""Load weights from HuBERTPretrainModel checkpoint into hubert_pretrain_base model.
Args:
model (Module): The hubert_pretrain_base model.
checkpoint_path (Path): The model checkpoint.
device (torch.device, optional): The device of the model. (Default: ``torch.device("cpu")``)
Returns:
(Module): The pretrained model.
"""
state_dict = torch.load(checkpoint_path, map_location=device)
state_dict = {k.replace("model.", ""): v for k, v in state_dict["state_dict"].items()}
model.load_state_dict(state_dict)
return model
def dump_features(
tsv_file: Union[str, Path],
out_dir: Union[str, Path],
split: str,
rank: int,
num_rank: int,
device: torch.device,
feature_type: str = "mfcc",
layer_index: Optional[int] = None,
checkpoint_path: Optional[Path] = None,
sample_rate: int = 16_000,
) -> None:
r"""Dump the feature tensors given a ``.tsv`` file list. The feature and lengths tensors
will be stored under ``out_dir`` directory.
Args:
tsv_file (str or Path): The path of the tsv file.
out_dir (str or Path): The directory to store the feature tensors.
split (str): The split of data. Options: [``train``, ``valid``].
rank (int): The rank in the multi-processing.
num_rank (int): The number of ranks for multi-processing in feature extraction.
device (torch.device): The location to allocate for PyTorch Tensors.
Options: [``torch.device('cpu')``, torch.device('cuda')``].
feature_type (str, optional): The type of the desired feature. Options: [``mfcc``, ``hubert``].
(Default: ``mfcc``)
layer_index (int or None, optional): The index of transformer layers in
``torchaudio.models.HuBERTPretrainModel`` for extracting features.
(``1`` means the first layer output). Only active when ``feature_type``
is set to ``hubert``. (Default: ``None``)
checkpoint_path(Path or None, optional): The checkpoint path of ``torchaudio.models.HuBERTPretrainModel``.
Only active when ``feature_type`` is set to ``hubert``. (Default: ``None``)
sample_rate (int, optional): The sample rate of the audio. (Default: ``16000``)
Returns:
None
"""
if feature_type not in ["mfcc", "hubert"]:
raise ValueError(f"Expected feature type to be 'mfcc' or 'hubert'. Found {feature_type}.")
if feature_type == "hubert" and layer_index is None:
assert ValueError("Please set the layer_index for HuBERT feature.")
features = []
lens = []
out_dir = Path(out_dir)
feat_path, len_path = _get_feat_lens_paths(out_dir, split, rank, num_rank)
if feature_type == "hubert":
from torchaudio.models import hubert_pretrain_base
model = hubert_pretrain_base()
model.to(device)
model = _load_state(model, checkpoint_path, device)
with open(tsv_file, "r") as f:
root = f.readline().rstrip()
lines = [line.rstrip() for line in f]
start, end = METHOD_NAME(len(lines), num_rank, rank)
lines = lines[start:end]
for line in lines:
path, nsample = line.split("\t")
path = f"{root}/{path}"
nsample = int(nsample)
if feature_type == "mfcc":
feature = extract_feature_mfcc(path, device, sample_rate)
else:
feature = extract_feature_hubert(path, device, sample_rate, model, layer_index)
features.append(feature.cpu())
lens.append(feature.shape[0])
features = torch.cat(features)
lens = torch.Tensor(lens)
torch.save(features, feat_path)
torch.save(lens, len_path)
_LG.info(f"Finished dumping features for rank {rank} of {num_rank} successfully")
|
2,625 |
metadatas
|
import types
import typing
from _typeshed import Incomplete
from collections.abc import Generator
from dataclasses import Field
from mashumaro.config import (
ADD_DIALECT_SUPPORT as ADD_DIALECT_SUPPORT,
ADD_SERIALIZATION_CONTEXT as ADD_SERIALIZATION_CONTEXT,
BaseConfig as BaseConfig,
SerializationStrategyValueType as SerializationStrategyValueType,
TO_DICT_ADD_BY_ALIAS_FLAG as TO_DICT_ADD_BY_ALIAS_FLAG,
TO_DICT_ADD_OMIT_NONE_FLAG as TO_DICT_ADD_OMIT_NONE_FLAG,
)
from mashumaro.core.const import Sentinel as Sentinel
from mashumaro.core.helpers import ConfigValue as ConfigValue
from mashumaro.core.meta.code.lines import CodeLines as CodeLines
from mashumaro.core.meta.helpers import (
get_args as get_args,
get_class_that_defines_field as get_class_that_defines_field,
get_class_that_defines_method as get_class_that_defines_method,
get_literal_values as get_literal_values,
get_name_error_name as get_name_error_name,
hash_type_args as hash_type_args,
is_class_var as is_class_var,
is_dataclass_dict_mixin as is_dataclass_dict_mixin,
is_dialect_subclass as is_dialect_subclass,
is_init_var as is_init_var,
is_literal as is_literal,
is_optional as is_optional,
is_type_var_any as is_type_var_any,
resolve_type_params as resolve_type_params,
substitute_type_params as substitute_type_params,
type_name as type_name,
)
from mashumaro.core.meta.types.common import FieldContext as FieldContext, ValueSpec as ValueSpec
from mashumaro.core.meta.types.pack import PackerRegistry as PackerRegistry
from mashumaro.core.meta.types.unpack import (
SubtypeUnpackerBuilder as SubtypeUnpackerBuilder,
UnpackerRegistry as UnpackerRegistry,
)
from mashumaro.dialect import Dialect as Dialect
from mashumaro.exceptions import (
BadDialect as BadDialect,
BadHookSignature as BadHookSignature,
InvalidFieldValue as InvalidFieldValue,
MissingDiscriminatorError as MissingDiscriminatorError,
MissingField as MissingField,
SuitableVariantNotFoundError as SuitableVariantNotFoundError,
ThirdPartyModuleNotFoundError as ThirdPartyModuleNotFoundError,
UnresolvedTypeReferenceError as UnresolvedTypeReferenceError,
UnserializableDataError as UnserializableDataError,
UnserializableField as UnserializableField,
UnsupportedDeserializationEngine as UnsupportedDeserializationEngine,
UnsupportedSerializationEngine as UnsupportedSerializationEngine,
)
from mashumaro.types import Discriminator as Discriminator
__PRE_SERIALIZE__: str
__PRE_DESERIALIZE__: str
__POST_SERIALIZE__: str
__POST_DESERIALIZE__: str
class CodeBuilder:
cls: Incomplete
lines: Incomplete
globals: Incomplete
resolved_type_params: Incomplete
field_classes: Incomplete
initial_type_args: Incomplete
dialect: Incomplete
default_dialect: Incomplete
allow_postponed_evaluation: Incomplete
format_name: Incomplete
decoder: Incomplete
encoder: Incomplete
encoder_kwargs: Incomplete
def __init__(
self,
cls: typing.Type,
type_args: typing.Tuple[typing.Type, ...] = ...,
dialect: typing.Optional[typing.Type[Dialect]] = ...,
first_method: str = ...,
allow_postponed_evaluation: bool = ...,
format_name: str = ...,
decoder: typing.Optional[typing.Any] = ...,
encoder: typing.Optional[typing.Any] = ...,
encoder_kwargs: typing.Optional[typing.Dict[str, typing.Any]] = ...,
default_dialect: typing.Optional[typing.Type[Dialect]] = ...,
) -> None: ...
def reset(self) -> None: ...
@property
def namespace(self) -> typing.Mapping[typing.Any, typing.Any]: ...
@property
def annotations(self) -> typing.Dict[str, typing.Any]: ...
def get_field_resolved_type_params(
self, field_name: str
) -> typing.Dict[typing.Type, typing.Type]: ...
def get_field_types(self, include_extras: bool = ...) -> typing.Dict[str, typing.Any]: ...
@property
def dataclass_fields(self) -> typing.Dict[str, Field]: ...
@property
def METHOD_NAME(self) -> typing.Dict[str, typing.Mapping[str, typing.Any]]: ...
def get_field_default(self, name: str) -> typing.Any: ...
def add_type_modules(self, *types_: typing.Type) -> None: ...
def ensure_module_imported(self, module: types.ModuleType) -> None: ...
def ensure_object_imported(
self, obj: typing.Any, name: typing.Optional[str] = ...
) -> None: ...
def add_line(self, line: str) -> None: ...
def indent(self, expr: typing.Optional[str] = ...) -> typing.Generator[None, None, None]: ...
def compile(self) -> None: ...
def get_declared_hook(self, method_name: str) -> typing.Any: ...
def add_unpack_method(self) -> None: ...
def get_config(self, cls: Incomplete | None = ..., look_in_parents: bool = ...): ...
def get_discriminator(self) -> typing.Optional[Discriminator]: ...
def get_pack_method_flags(
self, cls: typing.Optional[typing.Type] = ..., pass_encoder: bool = ...
) -> str: ...
def get_unpack_method_flags(
self, cls: typing.Optional[typing.Type] = ..., pass_decoder: bool = ...
) -> str: ...
def get_pack_method_default_flag_values(
self, cls: typing.Optional[typing.Type] = ..., pass_encoder: bool = ...
) -> str: ...
def get_unpack_method_default_flag_values(self, pass_decoder: bool = ...) -> str: ...
def is_code_generation_option_enabled(
self, option: str, cls: typing.Optional[typing.Type] = ...
) -> bool: ...
@classmethod
def get_unpack_method_name(
cls,
type_args: typing.Iterable = ...,
format_name: str = ...,
decoder: typing.Optional[typing.Any] = ...,
) -> str: ...
@classmethod
def get_pack_method_name(
cls,
type_args: typing.Tuple[typing.Type, ...] = ...,
format_name: str = ...,
encoder: typing.Optional[typing.Any] = ...,
) -> str: ...
def add_pack_method(self) -> None: ...
def iter_serialization_strategies(
self, metadata, ftype
) -> Generator[Incomplete, None, None]: ...
|
2,626 |
test hop 2 2 forwards
|
import pandas as pd
from common import NoAuthTestCase
from functools import lru_cache
from graphistry.tests.test_compute import CGFull
@lru_cache(maxsize=1)
def hops_graph():
nodes_df = pd.DataFrame([
{'node': 'a'},
{'node': 'b'},
{'node': 'c'},
{'node': 'd'},
{'node': 'e'},
{'node': 'f'},
{'node': 'g'},
{'node': 'h'},
{'node': 'i'},
{'node': 'j'},
{'node': 'k'},
{'node': 'l'},
{'node': 'm'},
{'node': 'n'},
{'node': 'o'},
{'node': 'p'}
]).assign(type='n')
edges_df = pd.DataFrame([
{'s': 'e', 'd': 'l'},
{'s': 'l', 'd': 'b'},
{'s': 'k', 'd': 'a'},
{'s': 'e', 'd': 'g'},
{'s': 'g', 'd': 'a'},
{'s': 'd', 'd': 'f'},
{'s': 'd', 'd': 'c'},
{'s': 'd', 'd': 'j'},
{'s': 'd', 'd': 'i'},
{'s': 'd', 'd': 'h'},
{'s': 'j', 'd': 'p'},
{'s': 'i', 'd': 'n'},
{'s': 'h', 'd': 'm'},
{'s': 'j', 'd': 'o'},
{'s': 'o', 'd': 'b'},
{'s': 'm', 'd': 'a'},
{'s': 'n', 'd': 'a'},
{'s': 'p', 'd': 'b'},
]).assign(type='e')
return CGFull().nodes(nodes_df, 'node').edges(edges_df, 's', 'd')
class TestComputeHopMixin(NoAuthTestCase):
def test_hop_0(self):
g = hops_graph()
g2 = g.hop(pd.DataFrame({g._node: []}), 0)
assert g2._nodes.shape == (0, 2)
assert g2._edges.shape == (0, 3)
def test_hop_0b(self):
g = hops_graph()
g2 = g.hop(pd.DataFrame({g._node: ['d']}), 0)
assert g2._nodes.shape == (0, 2)
assert g2._edges.shape == (0, 3)
def test_hop_1_1_forwards(self):
g = hops_graph()
g2 = g.hop(pd.DataFrame({g._node: ['d']}), 1)
assert g2._nodes.shape == (6, 2)
assert (g2._nodes[g2._node].sort_values().to_list() == # noqa: W504
sorted(['f', 'j', 'd','i', 'c', 'h']))
assert g2._edges.shape == (5, 3)
def test_hop_2_1_forwards(self):
g = hops_graph()
g2 = g.hop(pd.DataFrame({g._node: ['k', 'd']}), 1)
assert g2._nodes.shape == (8, 2)
assert g2._edges.shape == (6, 3)
def METHOD_NAME(self):
g = hops_graph()
g2 = g.hop(pd.DataFrame({g._node: ['k', 'd']}), 2)
assert g2._nodes.shape == (12, 2)
assert g2._edges.shape == (10, 3)
def test_hop_2_all_forwards(self):
g = hops_graph()
g2 = g.hop(pd.DataFrame({g._node: ['k', 'd']}), to_fixed_point=True)
assert g2._nodes.shape == (13, 2)
assert g2._edges.shape == (14, 3)
def test_hop_1_2_undirected(self):
g = hops_graph()
g2 = g.hop(pd.DataFrame({g._node: ['j']}), 2, direction='undirected')
assert g2._nodes.shape == (9, 2)
assert g2._edges.shape == (9, 3)
def test_hop_1_all_reverse(self):
g = hops_graph()
g2 = g.hop(pd.DataFrame({g._node: ['b']}), direction='reverse', to_fixed_point=True)
assert g2._nodes.shape == (7, 2)
assert g2._edges.shape == (7, 3)
#edge filter
def test_hop_1_1_forwards_edge(self):
g = hops_graph()
g2 = g.hop(pd.DataFrame({g._node: ['d']}), 1, edge_match={'d': 'f'})
assert g2._nodes.shape == (2, 2)
assert (g2._nodes[g2._node].sort_values().to_list() == # noqa: W504
sorted(['f', 'd']))
assert g2._edges.shape == (1, 3)
def test_hop_post_match(self):
g = hops_graph()
g2 = g.hop(destination_node_match={'node': 'b'})
assert g2._nodes.shape == (4, 2)
assert (g2._nodes[g2._node].sort_values().to_list() == # noqa: W504
sorted(['b', 'l', 'o', 'p']))
assert g2._edges.shape == (3, 3)
def test_hop_pre_match(self):
g = hops_graph()
g2 = g.hop(source_node_match={'node': 'e'})
assert g2._nodes.shape == (3, 2)
assert (g2._nodes[g2._node].sort_values().to_list() == # noqa: W504
sorted(['e', 'l', 'g']))
assert g2._edges.shape == (2, 3)
def test_hop_pre_post_match_1(self):
g = hops_graph()
g2 = g.hop(source_node_match={'node': 'e'}, destination_node_match={'node': 'l'})
assert g2._nodes.shape == (2, 2)
assert (g2._nodes[g2._node].sort_values().to_list() == # noqa: W504
sorted(['e', 'l']))
assert g2._edges.shape == (1, 3)
|
2,627 |
test approve testing
|
# Copyright © 2016-2019 Red Hat, Inc. and others.
#
# This file is part of Bodhi.
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
"""This module contains tests for the compatibility scripts."""
from unittest.mock import patch, Mock
from click.testing import CliRunner
from bodhi.server.scripts import compat
from ..base import BasePyTestCase
# Don't muck around with global log level.
@patch('bodhi.server.scripts.compat.logging.basicConfig',
new=lambda *p, **k: None)
class TestCompat(BasePyTestCase):
"""This class contains tests for the compatibility scripts."""
def setup_method(self, method):
super().setup_method(method)
self.task_result = Mock(name="task_result")
self.task_mock = Mock(name="task")
self.task_mock.delay.return_value = self.task_result
def METHOD_NAME(self):
"""Ensure the approve_testing task is called."""
cli = CliRunner()
with patch('bodhi.server.tasks.approve_testing_task', self.task_mock):
result = cli.invoke(compat.approve_testing)
assert result.exit_code == 0
self.task_mock.delay.assert_called_with()
self.task_result.get.assert_called_with(propagate=True)
def test_check_policies(self):
"""Ensure the check_policies task is called."""
cli = CliRunner()
with patch('bodhi.server.tasks.check_policies_task', self.task_mock):
result = cli.invoke(compat.check_policies)
assert result.exit_code == 0
self.task_mock.delay.assert_called_with()
self.task_result.get.assert_called_with(propagate=True)
def test_clean_old_composes(self):
"""Ensure the clean_old_composes task is called."""
cli = CliRunner()
with patch('bodhi.server.tasks.clean_old_composes_task', self.task_mock):
result = cli.invoke(compat.clean_old_composes)
assert result.exit_code == 0
self.task_mock.delay.assert_called_with(num_to_keep=10)
self.task_result.get.assert_called_with(propagate=True)
def test_expire_overrides(self):
"""Ensure the expire_overrides task is called."""
cli = CliRunner()
with patch('bodhi.server.tasks.expire_overrides_task', self.task_mock):
result = cli.invoke(compat.expire_overrides)
assert result.exit_code == 0
self.task_mock.delay.assert_called_with()
self.task_result.get.assert_called_with(propagate=True)
def test_propagate_exceptions(self):
"""Ensure the exceptions cause the script to exit with a non-zero status."""
self.task_result.get.side_effect = RuntimeError("Kaboom!")
cli = CliRunner()
with patch('bodhi.server.tasks.expire_overrides_task', self.task_mock):
result = cli.invoke(compat.expire_overrides)
assert result.exit_code == 1
assert "Kaboom!" in result.output
@patch("bodhi.server.scripts.compat.get_appsettings")
@patch("bodhi.server.scripts.compat.config")
def test_arg_config_uri(self, config, get_appsettings):
"""Ensure the path to the configuration file can be passed."""
get_appsettings.return_value = {"foo": "bar"}
cli = CliRunner()
with patch('bodhi.server.tasks.expire_overrides_task', self.task_mock):
result = cli.invoke(compat.expire_overrides, ["test-config.ini"])
assert result.exit_code == 0
get_appsettings.assert_called_with("test-config.ini")
config.load_config.assert_called_with({"foo": "bar"})
def test_no_result_backend(self):
"""Ensure we don't crash when no result backend has been set."""
cli = CliRunner()
# mock.patch.dict() fails because the conf object is too complex, mock manually:
import bodhi.server.tasks
app = bodhi.server.tasks.app
old_result_backend = app.conf.result_backend
app.conf.result_backend = None
try:
with patch('bodhi.server.tasks.expire_overrides_task', self.task_mock):
result = cli.invoke(compat.expire_overrides)
finally:
app.conf.result_backend = old_result_backend
assert result.exit_code == 0
self.task_mock.delay.assert_called_with()
self.task_result.get.assert_not_called()
assert result.output.strip() == (
"No result backend have been configured in Celery, "
"I cannot wait for the task to complete."
)
|
2,628 |
test equality same object
|
# (C) Copyright 2005-2023 Enthought, Inc., Austin, TX
# All rights reserved.
#
# This software is provided without warranty under the terms of the BSD
# license included in LICENSE.txt and may be redistributed only under
# the conditions described in the aforementioned license. The license
# is also available online at http://www.enthought.com/licenses/BSD.txt
#
# Thanks for using Enthought open source!
import unittest
from traits.api import (
Any,
cached_property,
ComparisonMode,
HasTraits,
Property,
Str,
)
class NoneCompare(HasTraits):
bar = Any(comparison_mode=ComparisonMode.none)
class IdentityCompare(HasTraits):
bar = Any(comparison_mode=ComparisonMode.identity)
class EqualityCompare(HasTraits):
bar = Any(comparison_mode=ComparisonMode.equality)
class Foo(HasTraits):
"""
Class implementing custom equality.
"""
name = Str
def __eq__(self, other):
return self.name == other.name
class TestComparisonMode(unittest.TestCase):
def setUp(self):
self.a = Foo(name="a")
self.same_as_a = Foo(name="a")
self.different_from_a = Foo(name="not a")
def bar_changed(self, object, trait, old, new):
self.changed_object = object
self.changed_trait = trait
self.changed_old = old
self.changed_new = new
self.changed_count += 1
def reset_change_tracker(self):
self.changed_object = None
self.changed_trait = None
self.changed_old = None
self.changed_new = None
self.changed_count = 0
def check_tracker(self, object, trait, old, new, count):
self.assertEqual(count, self.changed_count)
self.assertIs(object, self.changed_object)
self.assertEqual(trait, self.changed_trait)
self.assertIs(old, self.changed_old)
self.assertIs(new, self.changed_new)
def test_none_first_assignment(self):
nc = NoneCompare()
nc.on_trait_change(self.bar_changed, "bar")
self.reset_change_tracker()
default_value = nc.bar
nc.bar = self.a
self.check_tracker(nc, "bar", default_value, self.a, 1)
def test_identity_first_assignment(self):
ic = IdentityCompare()
ic.on_trait_change(self.bar_changed, "bar")
self.reset_change_tracker()
default_value = ic.bar
ic.bar = self.a
self.check_tracker(ic, "bar", default_value, self.a, 1)
def test_equality_first_assignment(self):
ec = EqualityCompare()
ec.on_trait_change(self.bar_changed, "bar")
self.reset_change_tracker()
default_value = ec.bar
ec.bar = self.a
self.check_tracker(ec, "bar", default_value, self.a, 1)
def test_none_same_object(self):
nc = NoneCompare()
nc.on_trait_change(self.bar_changed, "bar")
self.reset_change_tracker()
default_value = nc.bar
nc.bar = self.a
self.check_tracker(nc, "bar", default_value, self.a, 1)
nc.bar = self.a
self.check_tracker(nc, "bar", self.a, self.a, 2)
def test_identity_same_object(self):
ic = IdentityCompare()
ic.on_trait_change(self.bar_changed, "bar")
self.reset_change_tracker()
default_value = ic.bar
ic.bar = self.a
self.check_tracker(ic, "bar", default_value, self.a, 1)
ic.bar = self.a
self.check_tracker(ic, "bar", default_value, self.a, 1)
def METHOD_NAME(self):
ec = EqualityCompare()
ec.on_trait_change(self.bar_changed, "bar")
self.reset_change_tracker()
default_value = ec.bar
ec.bar = self.a
self.check_tracker(ec, "bar", default_value, self.a, 1)
ec.bar = self.a
self.check_tracker(ec, "bar", default_value, self.a, 1)
def test_none_different_object(self):
nc = NoneCompare()
nc.on_trait_change(self.bar_changed, "bar")
self.reset_change_tracker()
default_value = nc.bar
nc.bar = self.a
self.check_tracker(nc, "bar", default_value, self.a, 1)
nc.bar = self.different_from_a
self.check_tracker(nc, "bar", self.a, self.different_from_a, 2)
def test_identity_different_object(self):
ic = IdentityCompare()
ic.on_trait_change(self.bar_changed, "bar")
self.reset_change_tracker()
default_value = ic.bar
ic.bar = self.a
self.check_tracker(ic, "bar", default_value, self.a, 1)
ic.bar = self.different_from_a
self.check_tracker(ic, "bar", self.a, self.different_from_a, 2)
def test_equality_different_object(self):
ec = EqualityCompare()
ec.on_trait_change(self.bar_changed, "bar")
self.reset_change_tracker()
default_value = ec.bar
ec.bar = self.a
self.check_tracker(ec, "bar", default_value, self.a, 1)
ec.bar = self.different_from_a
self.check_tracker(ec, "bar", self.a, self.different_from_a, 2)
def test_none_different_object_same_as(self):
nc = NoneCompare()
nc.on_trait_change(self.bar_changed, "bar")
self.reset_change_tracker()
default_value = nc.bar
nc.bar = self.a
self.check_tracker(nc, "bar", default_value, self.a, 1)
nc.bar = self.same_as_a
self.check_tracker(nc, "bar", self.a, self.same_as_a, 2)
def test_identity_different_object_same_as(self):
ic = IdentityCompare()
ic.on_trait_change(self.bar_changed, "bar")
self.reset_change_tracker()
default_value = ic.bar
ic.bar = self.a
self.check_tracker(ic, "bar", default_value, self.a, 1)
ic.bar = self.same_as_a
self.check_tracker(ic, "bar", self.a, self.same_as_a, 2)
def test_equality_different_object_same_as(self):
ec = EqualityCompare()
ec.on_trait_change(self.bar_changed, "bar")
self.reset_change_tracker()
default_value = ec.bar
ec.bar = self.a
self.check_tracker(ec, "bar", default_value, self.a, 1)
# Values of a and same_as_a are the same and should therefore not
# be considered a change.
ec.bar = self.same_as_a
self.check_tracker(ec, "bar", default_value, self.a, 1)
def test_comparison_mode_none_with_cached_property(self):
# Even though the property is cached such that old value equals new
# value, its change event is tied to the dependent.
class Model(HasTraits):
value = Property(depends_on="name")
name = Str(comparison_mode=ComparisonMode.none)
@cached_property
def _get_value(self):
return self.trait_names
instance = Model()
events = []
instance.on_trait_change(lambda: events.append(None), "value")
instance.name = "A"
events.clear()
# when
instance.name = "A"
# then
self.assertEqual(len(events), 1)
|
2,629 |
setup
|
"""
Move with a Sprite Animation
Simple program to show basic sprite usage.
Artwork from https://kenney.nl
If Python and Arcade are installed, this example can be run from the command line with:
python -m arcade.examples.sprite_move_animation
"""
from __future__ import annotations
import arcade
import random
SCREEN_WIDTH = 800
SCREEN_HEIGHT = 600
SCREEN_TITLE = "Move with a Sprite Animation Example"
COIN_SCALE = 0.5
COIN_COUNT = 50
CHARACTER_SCALING = 1
# How fast to move, and how fast to run the animation
MOVEMENT_SPEED = 5
UPDATES_PER_FRAME = 5
# Constants used to track if the player is facing left or right
RIGHT_FACING = 0
LEFT_FACING = 1
class PlayerCharacter(arcade.Sprite):
def __init__(self):
# Default to face-right
self.character_face_direction = RIGHT_FACING
# Used for flipping between image sequences
self.cur_texture = 0
# Adjust the collision box. Default includes too much empty space
# side-to-side. Box is centered at sprite center, (0, 0)
self.points = [[-22, -64], [22, -64], [22, 28], [-22, 28]]
# --- Load Textures ---
# Images from Kenney.nl's Asset Pack 3
main_path = ":resources:images/animated_characters/female_adventurer/femaleAdventurer"
# main_path = ":resources:images/animated_characters/female_person/femalePerson"
# main_path = ":resources:images/animated_characters/male_person/malePerson"
# main_path = ":resources:images/animated_characters/male_adventurer/maleAdventurer"
# main_path = ":resources:images/animated_characters/zombie/zombie"
# main_path = ":resources:images/animated_characters/robot/robot"
# Load textures for idle standing
self.idle_texture_pair = arcade.load_texture_pair(f"{main_path}_idle.png")
# Set up parent class
super().__init__(self.idle_texture_pair[0], scale=CHARACTER_SCALING)
# Load textures for walking
self.walk_textures = []
for i in range(8):
texture = arcade.load_texture_pair(f"{main_path}_walk{i}.png")
self.walk_textures.append(texture)
def update_animation(self, delta_time: float = 1 / 60):
# Figure out if we need to flip face left or right
if self.change_x < 0 and self.character_face_direction == RIGHT_FACING:
self.character_face_direction = LEFT_FACING
elif self.change_x > 0 and self.character_face_direction == LEFT_FACING:
self.character_face_direction = RIGHT_FACING
# Idle animation
if self.change_x == 0 and self.change_y == 0:
self.texture = self.idle_texture_pair[self.character_face_direction]
return
# Walking animation
self.cur_texture += 1
if self.cur_texture > 7 * UPDATES_PER_FRAME:
self.cur_texture = 0
frame = self.cur_texture // UPDATES_PER_FRAME
direction = self.character_face_direction
self.texture = self.walk_textures[frame][direction]
class MyGame(arcade.Window):
""" Main application class. """
def __init__(self, width, height, title):
""" Set up the game and initialize the variables. """
super().__init__(width, height, title)
# Sprite lists
self.player_list = None
self.coin_list = None
# Set up the player
self.score = 0
self.player = None
def METHOD_NAME(self):
self.player_list = arcade.SpriteList()
self.coin_list = arcade.SpriteList()
# Set up the player
self.score = 0
self.player = PlayerCharacter()
self.player.center_x = SCREEN_WIDTH // 2
self.player.center_y = SCREEN_HEIGHT // 2
self.player.scale = 0.8
self.player_list.append(self.player)
for i in range(COIN_COUNT):
coin = arcade.Sprite(":resources:images/items/gold_1.png",
scale=0.5)
coin.center_x = random.randrange(SCREEN_WIDTH)
coin.center_y = random.randrange(SCREEN_HEIGHT)
self.coin_list.append(coin)
# Set the background color
self.background_color = arcade.color.AMAZON
def on_draw(self):
"""
Render the screen.
"""
# This command has to happen before we start drawing
self.clear()
# Draw all the sprites.
self.coin_list.draw()
self.player_list.draw()
# Put the text on the screen.
output = f"Score: {self.score}"
arcade.draw_text(output, 10, 20, arcade.color.WHITE, 14)
def on_key_press(self, key, modifiers):
"""
Called whenever a key is pressed.
"""
if key == arcade.key.UP:
self.player.change_y = MOVEMENT_SPEED
elif key == arcade.key.DOWN:
self.player.change_y = -MOVEMENT_SPEED
elif key == arcade.key.LEFT:
self.player.change_x = -MOVEMENT_SPEED
elif key == arcade.key.RIGHT:
self.player.change_x = MOVEMENT_SPEED
def on_key_release(self, key, modifiers):
"""
Called when the user releases a key.
"""
if key == arcade.key.UP or key == arcade.key.DOWN:
self.player.change_y = 0
elif key == arcade.key.LEFT or key == arcade.key.RIGHT:
self.player.change_x = 0
def on_update(self, delta_time):
""" Movement and game logic """
# Move the player
self.player_list.update()
# Update the players animation
self.player_list.update_animation()
# Generate a list of all sprites that collided with the player.
hit_list = arcade.check_for_collision_with_list(self.player, self.coin_list)
# Loop through each colliding sprite, remove it, and add to the score.
for coin in hit_list:
coin.remove_from_sprite_lists()
self.score += 1
def main():
""" Main function """
window = MyGame(SCREEN_WIDTH, SCREEN_HEIGHT, SCREEN_TITLE)
window.METHOD_NAME()
arcade.run()
if __name__ == "__main__":
main()
|
2,630 |
decode state
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import binascii
from base64 import b64decode, b64encode
from typing import Optional, Tuple, cast
import urllib3.exceptions # type: ignore[import]
from etcd import Client as EtcdClient # type: ignore[import]
from etcd import (
EtcdAlreadyExist,
EtcdCompareFailed,
EtcdException,
EtcdKeyNotFound,
EtcdResult,
)
from torch.distributed import Store
from .api import RendezvousConnectionError, RendezvousParameters, RendezvousStateError
from .dynamic_rendezvous import RendezvousBackend, Token
from .etcd_store import EtcdStore
from .utils import parse_rendezvous_endpoint
class EtcdRendezvousBackend(RendezvousBackend):
"""Represents an etcd-based rendezvous backend.
Args:
client:
The ``etcd.Client`` instance to use to communicate with etcd.
run_id:
The run id of the rendezvous.
key_prefix:
The path under which to store the rendezvous state in etcd.
ttl:
The TTL of the rendezvous state. If not specified, defaults to two hours.
"""
_DEFAULT_TTL = 7200 # 2 hours
_client: EtcdClient
_key: str
_ttl: int
def __init__(
self,
client: EtcdClient,
run_id: str,
key_prefix: Optional[str] = None,
ttl: Optional[int] = None,
) -> None:
if not run_id:
raise ValueError("The run id must be a non-empty string.")
self._client = client
if key_prefix:
self._key = key_prefix + "/" + run_id
else:
self._key = run_id
if ttl and ttl > 0:
self._ttl = ttl
else:
self._ttl = self._DEFAULT_TTL
@property
def name(self) -> str:
"""See base class."""
return "etcd-v2"
def get_state(self) -> Optional[Tuple[bytes, Token]]:
"""See base class."""
try:
result = self._client.read(self._key)
except EtcdKeyNotFound:
return None
except (EtcdException, urllib3.exceptions.TimeoutError) as exc:
raise RendezvousConnectionError(
"The connection to etcd has failed. See inner exception for details."
) from exc
return self.METHOD_NAME(result)
def set_state(
self, state: bytes, token: Optional[Token] = None
) -> Optional[Tuple[bytes, Token, bool]]:
"""See base class."""
base64_state = b64encode(state).decode()
kwargs = {}
def get_state():
result = self.get_state()
if result is not None:
tmp = *result, False
# Python 3.6 does not support tuple unpacking in return
# statements.
return tmp
return None
if token:
try:
token = int(token)
except ValueError:
return get_state()
if token:
kwargs["prevIndex"] = token
else:
kwargs["prevExist"] = False
try:
result = self._client.write(self._key, base64_state, self._ttl, **kwargs)
except (EtcdAlreadyExist, EtcdCompareFailed):
result = None
except (EtcdException, urllib3.exceptions.TimeoutError) as exc:
raise RendezvousConnectionError(
"The connection to etcd has failed. See inner exception for details."
) from exc
if result is None:
return get_state()
tmp = *self.METHOD_NAME(result), True
return tmp
def METHOD_NAME(self, result: EtcdResult) -> Tuple[bytes, Token]:
base64_state = result.value.encode()
try:
state = b64decode(base64_state)
except binascii.Error as exc:
raise RendezvousStateError(
"The state object is corrupt. See inner exception for details."
) from exc
return state, result.modifiedIndex
def _create_etcd_client(params: RendezvousParameters) -> EtcdClient:
host, port = parse_rendezvous_endpoint(params.endpoint, default_port=2379)
# The timeout
read_timeout = cast(int, params.get_as_int("read_timeout", 60))
if read_timeout <= 0:
raise ValueError("The read timeout must be a positive integer.")
# The communication protocol
protocol = params.get("protocol", "http").strip().lower()
if protocol != "http" and protocol != "https":
raise ValueError("The protocol must be HTTP or HTTPS.")
# The SSL client certificate
ssl_cert = params.get("ssl_cert")
if ssl_cert:
ssl_cert_key = params.get("ssl_cert_key")
if ssl_cert_key:
# The etcd client expects the certificate key as the second element
# of the `cert` tuple.
ssl_cert = (ssl_cert, ssl_cert_key)
# The root certificate
ca_cert = params.get("ca_cert")
try:
return EtcdClient(
host,
port,
read_timeout=read_timeout,
protocol=protocol,
cert=ssl_cert,
ca_cert=ca_cert,
allow_reconnect=True,
)
except (EtcdException, urllib3.exceptions.TimeoutError) as exc:
raise RendezvousConnectionError(
"The connection to etcd has failed. See inner exception for details."
) from exc
def create_backend(params: RendezvousParameters) -> Tuple[EtcdRendezvousBackend, Store]:
"""Creates a new :py:class:`EtcdRendezvousBackend` from the specified
parameters.
+--------------+-----------------------------------------------------------+
| Parameter | Description |
+==============+===========================================================+
| read_timeout | The read timeout, in seconds, for etcd operations. |
| | Defaults to 60 seconds. |
+--------------+-----------------------------------------------------------+
| protocol | The protocol to use to communicate with etcd. Valid |
| | values are "http" and "https". Defaults to "http". |
+--------------+-----------------------------------------------------------+
| ssl_cert | The path to the SSL client certificate to use along with |
| | HTTPS. Defaults to ``None``. |
+--------------+-----------------------------------------------------------+
| ssl_cert_key | The path to the private key of the SSL client certificate |
| | to use along with HTTPS. Defaults to ``None``. |
+--------------+-----------------------------------------------------------+
| ca_cert | The path to the rool SSL authority certificate. Defaults |
| | to ``None``. |
+--------------+-----------------------------------------------------------+
"""
client = _create_etcd_client(params)
backend = EtcdRendezvousBackend(client, params.run_id, key_prefix="/torch/elastic/rendezvous")
store = EtcdStore(client, "/torch/elastic/store")
return backend, store
|
2,631 |
handle group change
|
"""
core node services
"""
import tkinter as tk
from tkinter import messagebox, ttk
from typing import TYPE_CHECKING, Optional
from core.api.grpc.wrappers import Node
from core.gui.dialogs.dialog import Dialog
from core.gui.dialogs.serviceconfig import ServiceConfigDialog
from core.gui.themes import FRAME_PAD, PADX, PADY
from core.gui.widgets import CheckboxList, ListboxScroll
if TYPE_CHECKING:
from core.gui.app import Application
class NodeServiceDialog(Dialog):
def __init__(self, app: "Application", node: Node) -> None:
title = f"{node.name} Services (Deprecated)"
super().__init__(app, title)
self.node: Node = node
self.groups: Optional[ListboxScroll] = None
self.services: Optional[CheckboxList] = None
self.current: Optional[ListboxScroll] = None
services = set(node.services)
self.current_services: set[str] = services
self.protocol("WM_DELETE_WINDOW", self.click_cancel)
self.draw()
def draw(self) -> None:
self.top.columnconfigure(0, weight=1)
self.top.rowconfigure(0, weight=1)
frame = ttk.Frame(self.top)
frame.grid(stick="nsew", pady=PADY)
frame.rowconfigure(0, weight=1)
for i in range(3):
frame.columnconfigure(i, weight=1)
label_frame = ttk.LabelFrame(frame, text="Groups", padding=FRAME_PAD)
label_frame.grid(row=0, column=0, sticky=tk.NSEW)
label_frame.rowconfigure(0, weight=1)
label_frame.columnconfigure(0, weight=1)
self.groups = ListboxScroll(label_frame)
self.groups.grid(sticky=tk.NSEW)
for group in sorted(self.app.core.services):
self.groups.listbox.insert(tk.END, group)
self.groups.listbox.bind("<<ListboxSelect>>", self.METHOD_NAME)
self.groups.listbox.selection_set(0)
label_frame = ttk.LabelFrame(frame, text="Services")
label_frame.grid(row=0, column=1, sticky=tk.NSEW)
label_frame.columnconfigure(0, weight=1)
label_frame.rowconfigure(0, weight=1)
self.services = CheckboxList(
label_frame, self.app, clicked=self.service_clicked, padding=FRAME_PAD
)
self.services.grid(sticky=tk.NSEW)
label_frame = ttk.LabelFrame(frame, text="Selected", padding=FRAME_PAD)
label_frame.grid(row=0, column=2, sticky=tk.NSEW)
label_frame.rowconfigure(0, weight=1)
label_frame.columnconfigure(0, weight=1)
self.current = ListboxScroll(label_frame)
self.current.grid(sticky=tk.NSEW)
for service in sorted(self.current_services):
self.current.listbox.insert(tk.END, service)
if self.is_custom_service(service):
self.current.listbox.itemconfig(tk.END, bg="green")
frame = ttk.Frame(self.top)
frame.grid(stick="ew")
for i in range(4):
frame.columnconfigure(i, weight=1)
button = ttk.Button(frame, text="Configure", command=self.click_configure)
button.grid(row=0, column=0, sticky=tk.EW, padx=PADX)
button = ttk.Button(frame, text="Save", command=self.click_save)
button.grid(row=0, column=1, sticky=tk.EW, padx=PADX)
button = ttk.Button(frame, text="Remove", command=self.click_remove)
button.grid(row=0, column=2, sticky=tk.EW, padx=PADX)
button = ttk.Button(frame, text="Cancel", command=self.click_cancel)
button.grid(row=0, column=3, sticky=tk.EW)
# trigger group change
self.METHOD_NAME()
def METHOD_NAME(self, event: tk.Event = None) -> None:
selection = self.groups.listbox.curselection()
if selection:
index = selection[0]
group = self.groups.listbox.get(index)
self.services.clear()
for name in sorted(self.app.core.services[group]):
checked = name in self.current_services
self.services.add(name, checked)
def service_clicked(self, name: str, var: tk.IntVar) -> None:
if var.get() and name not in self.current_services:
self.current_services.add(name)
elif not var.get() and name in self.current_services:
self.current_services.remove(name)
self.node.service_configs.pop(name, None)
self.node.service_file_configs.pop(name, None)
self.current.listbox.delete(0, tk.END)
for name in sorted(self.current_services):
self.current.listbox.insert(tk.END, name)
if self.is_custom_service(name):
self.current.listbox.itemconfig(tk.END, bg="green")
self.node.services = self.current_services.copy()
def click_configure(self) -> None:
current_selection = self.current.listbox.curselection()
if len(current_selection):
dialog = ServiceConfigDialog(
self,
self.app,
self.current.listbox.get(current_selection[0]),
self.node,
)
# if error occurred when creating ServiceConfigDialog, don't show the dialog
if not dialog.has_error:
dialog.show()
else:
dialog.destroy()
else:
messagebox.showinfo(
"Service Configuration", "Select a service to configure", parent=self
)
def click_cancel(self) -> None:
self.destroy()
def click_save(self) -> None:
self.node.services = self.current_services.copy()
self.destroy()
def click_remove(self) -> None:
cur = self.current.listbox.curselection()
if cur:
service = self.current.listbox.get(cur[0])
self.current.listbox.delete(cur[0])
self.current_services.remove(service)
self.node.service_configs.pop(service, None)
self.node.service_file_configs.pop(service, None)
for checkbutton in self.services.frame.winfo_children():
if checkbutton["text"] == service:
checkbutton.invoke()
return
def is_custom_service(self, service: str) -> bool:
has_service_config = service in self.node.service_configs
has_file_config = service in self.node.service_file_configs
return has_service_config or has_file_config
|
2,632 |
append path
|
from typing import Any, Optional
class Context:
def __init__(self, target: Any) -> None: ...
def get_target(self): ...
def save(self) -> None: ...
def restore(self) -> None: ...
def __enter__(self): ...
def __exit__(self, exc_type: Any, exc_val: Any, exc_tb: Any) -> None: ...
def push_group(self) -> None: ...
def push_group_with_content(self, content: Any) -> None: ...
def pop_group(self): ...
def pop_group_to_source(self) -> None: ...
def get_group_target(self): ...
def set_source_rgba(
self, red: float, green: float, blue: float, alpha: float = ...
) -> None: ...
def set_source_rgb(self, red: float, green: float, blue: float) -> None: ...
def set_source_surface(self, surface: Any, x: int = ..., y: int = ...) -> None: ...
def set_source(self, source: Any) -> None: ...
def get_source(self): ...
def set_antialias(self, antialias: Any) -> None: ...
def get_antialias(self): ...
def set_dash(self, dashes: Any, offset: int = ...) -> None: ...
def get_dash(self): ...
def get_dash_count(self): ...
def set_fill_rule(self, fill_rule: Any) -> None: ...
def get_fill_rule(self): ...
def set_line_cap(self, line_cap: Any) -> None: ...
def get_line_cap(self): ...
def set_line_join(self, line_join: Any) -> None: ...
def get_line_join(self): ...
def set_line_width(self, width: Any) -> None: ...
def get_line_width(self): ...
def set_miter_limit(self, limit: Any) -> None: ...
def get_miter_limit(self): ...
def set_operator(self, operator: Any) -> None: ...
def get_operator(self): ...
def set_tolerance(self, tolerance: Any) -> None: ...
def get_tolerance(self): ...
def translate(self, tx: Any, ty: Any) -> None: ...
def scale(self, sx: Any, sy: Optional[Any] = ...) -> None: ...
def rotate(self, radians: Any) -> None: ...
def transform(self, matrix: Any) -> None: ...
def set_matrix(self, matrix: Any) -> None: ...
def get_matrix(self): ...
def identity_matrix(self) -> None: ...
def user_to_device(self, x: Any, y: Any): ...
def user_to_device_distance(self, dx: Any, dy: Any): ...
def device_to_user(self, x: Any, y: Any): ...
def device_to_user_distance(self, dx: Any, dy: Any): ...
def has_current_point(self): ...
def get_current_point(self): ...
def new_path(self) -> None: ...
def new_sub_path(self) -> None: ...
def move_to(self, x: Any, y: Any) -> None: ...
def rel_move_to(self, dx: Any, dy: Any) -> None: ...
def line_to(self, x: Any, y: Any) -> None: ...
def rel_line_to(self, dx: Any, dy: Any) -> None: ...
def rectangle(self, x: Any, y: Any, width: Any, height: Any) -> None: ...
def arc(self, xc: Any, yc: Any, radius: Any, angle1: Any, angle2: Any) -> None: ...
def arc_negative(self, xc: Any, yc: Any, radius: Any, angle1: Any, angle2: Any) -> None: ...
def curve_to(self, x1: Any, y1: Any, x2: Any, y2: Any, x3: Any, y3: Any) -> None: ...
def rel_curve_to(
self, dx1: Any, dy1: Any, dx2: Any, dy2: Any, dx3: Any, dy3: Any
) -> None: ...
def text_path(self, text: Any) -> None: ...
def glyph_path(self, glyphs: Any) -> None: ...
def close_path(self) -> None: ...
def copy_path(self): ...
def copy_path_flat(self): ...
def METHOD_NAME(self, path: Any) -> None: ...
def path_extents(self): ...
def paint(self) -> None: ...
def paint_with_alpha(self, alpha: Any) -> None: ...
def mask(self, pattern: Any) -> None: ...
def mask_surface(self, surface: Any, surface_x: int = ..., surface_y: int = ...) -> None: ...
def fill(self) -> None: ...
def fill_preserve(self) -> None: ...
def fill_extents(self): ...
def in_fill(self, x: Any, y: Any): ...
def stroke(self) -> None: ...
def stroke_preserve(self) -> None: ...
def stroke_extents(self): ...
def in_stroke(self, x: Any, y: Any): ...
def clip(self) -> None: ...
def clip_preserve(self) -> None: ...
def clip_extents(self): ...
def copy_clip_rectangle_list(self): ...
def in_clip(self, x: Any, y: Any): ...
def reset_clip(self) -> None: ...
def select_font_face(
self, family: str = ..., slant: Any = ..., weight: Any = ...
) -> None: ...
def set_font_face(self, font_face: Any) -> None: ...
def get_font_face(self): ...
def set_font_size(self, size: Any) -> None: ...
def set_font_matrix(self, matrix: Any) -> None: ...
def get_font_matrix(self): ...
def set_font_options(self, font_options: Any) -> None: ...
def get_font_options(self): ...
def set_scaled_font(self, scaled_font: Any) -> None: ...
def get_scaled_font(self): ...
def font_extents(self): ...
def text_extents(self, text: Any): ...
def glyph_extents(self, glyphs: Any): ...
def show_text(self, text: Any) -> None: ...
def show_glyphs(self, glyphs: Any) -> None: ...
def show_text_glyphs(
self, text: Any, glyphs: Any, clusters: Any, cluster_flags: int = ...
) -> None: ...
def show_page(self) -> None: ...
def copy_page(self) -> None: ...
def tag_begin(self, tag_name: Any, attributes: Optional[Any] = ...) -> None: ...
def tag_end(self, tag_name: Any) -> None: ...
|
2,633 |
disassemble to gtirb
|
import contextlib
from pathlib import Path
import os
import subprocess
import tempfile
import typing
import gtirb
class SnippetTestException(Exception):
"""
Custom exceptions raised by snippet tests
"""
@contextlib.contextmanager
def assemble_snippet(
snippet: str, arch=gtirb.Module.ISA
) -> typing.Generator[Path, None, None]:
"""
Assemble an assembly snippet and return a path to the binary.
The snippet becomes embedded in the function `main`, and the symbol
`main_end` is placed at the end of the snippet.
"""
with tempfile.TemporaryDirectory() as tmpdir:
if arch == gtirb.Module.ISA.ARM:
compiler = "arm-linux-gnueabihf-gcc"
ret = "bx lr"
type_prefix = "%"
elif arch == gtirb.Module.ISA.X64:
compiler = "gcc"
ret = "retq"
type_prefix = "@"
else:
raise SnippetTestException(f"Unimplemented snippet arch: {arch}")
src_path = os.path.join(tmpdir, "test.s")
with open(src_path, "w") as f:
f.write(
f"""
.globl main
.type main, {type_prefix}function
main:
{snippet}
{ret}
.globl main_end
main_end:
"""
)
binary_path = os.path.join(tmpdir, "testtmp")
cmd = [compiler, "-o", binary_path, src_path]
subprocess.run(cmd, check=True)
yield binary_path
def METHOD_NAME(target: str) -> gtirb.Module:
"""
Disassemble a binary and return the loaded GTIRB module
"""
with tempfile.TemporaryDirectory() as tmpdir:
gtirb_path = os.path.join(tmpdir, "tmp.gtirb")
cmd = [
"ddisasm",
target,
"--ir",
gtirb_path,
"-j",
"1",
# Needed so stack_def_use.def_used is is available.
"--with-souffle-relations",
]
subprocess.run(cmd, timeout=60, check=True)
loaded_gtirb = gtirb.IR.load_protobuf(gtirb_path)
return loaded_gtirb.modules[0]
def asm_to_gtirb(
snippet: str, arch: gtirb.Module.ISA = gtirb.Module.ISA.X64
) -> gtirb.Module:
"""
Build and load a gtirb module for an assembly snippet
"""
with assemble_snippet(snippet, arch=arch) as binary:
return METHOD_NAME(binary)
def snippet_bounds(module: gtirb.Module) -> typing.Tuple[int, int]:
"""
Get a tuple representing a snippet's address range
Works for snippets assembled with assemble_snippet
"""
# snippets built with assemble_snippet bound the snippet with the symbols
# `main` and `main_end`
bounds = []
for sym_name in ("main", "main_end"):
for sym in module.symbols:
if sym.name == sym_name:
break
else:
raise SnippetTestException(f"No symbol: '{sym_name}'")
if sym.referent is None:
raise SnippetTestException(f"No referent: '{sym_name}'")
if sym.referent.address is None:
raise SnippetTestException(f"No address: '{sym_name}'")
bounds.append(sym.referent.address)
return tuple(bounds)
def parse_field(field: str, type_spec: str) -> typing.Any:
"""
Parse a field in a tuple
"""
base_type = type_spec.split(":")[1]
if base_type in ("i", "u"):
# base=0 supports both prefixed hexadecimal and decimal
value = int(field, base=0)
elif base_type == "s":
value = field
elif base_type == "r":
value = parse_record(field, type_spec)
else:
raise SnippetTestException("Cannot parse type: " + str(type_spec))
return value
def parse_record(record_str: str, type_spec: str) -> typing.Tuple[typing.Any]:
"""
Parse a record entry using a type spec generator
"""
record_types = {"stack_var": "BaseReg:s:register,StackPos:i:number"}
type_name = type_spec.split(":")[2]
type_spec = record_types[type_name]
# strip brackets
record_str = record_str.strip("[]")
field_types = type_spec.split(",")
parsed_fields = []
# we can't just split the fields by ", " since there might be nested
# records.
for i, t in enumerate(field_types):
if i == len(field_types) - 1:
field = record_str
record_str = ""
else:
field, record_str = record_str.split(", ", 1)
parsed_fields.append(parse_field(field, t))
return tuple(parsed_fields)
def parse_souffle_output(module: gtirb.Module, relation_name: str):
"""
Parse a relation from the souffleOutputs auxdata
"""
type_spec, data = module.aux_data["souffleOutputs"].data[
"disassembly." + relation_name
]
type_spec = type_spec.strip("<>")
lines = data.strip().split("\n")
if lines[0] == "":
# empty relation
return
for line in lines:
fields = line.split("\t")
parsed_fields = []
for field, t in zip(fields, type_spec.split(",")):
parsed_fields.append(parse_field(field, t))
yield tuple(parsed_fields)
|
2,634 |
test fake lvcreate non zeroed
|
import unittest
import unittest.mock as mock
import lvmlib
class ExecResultMixIn(object):
def assertExecutionSucceeded(self, exec_result):
returncode, stdout, stderr = exec_result
self.assertEqual(0, returncode)
def assertExecutionFailed(self, exec_result):
returncode, stdout, stderr = exec_result
self.assertEqual(1, returncode)
class TestLVSubSystem(unittest.TestCase, ExecResultMixIn):
def test_lvcreate_is_mocked(self):
executable_injector = mock.Mock()
lvsubsystem = lvmlib.LVSubsystem(None, executable_injector)
self.assertTrue(
mock.call('/usr/sbin/lvcreate', lvsubsystem.fake_lvcreate)
in executable_injector.mock_calls
)
def test_lvremove_is_mocked(self):
executable_injector = mock.Mock()
lvsubsystem = lvmlib.LVSubsystem(None, executable_injector)
self.assertTrue(
mock.call('/usr/sbin/lvremove', lvsubsystem.fake_lvremove)
in executable_injector.mock_calls
)
def test_dmsetup_is_mocked(self):
executable_injector = mock.Mock()
lvsubsystem = lvmlib.LVSubsystem(None, executable_injector)
self.assertTrue(
mock.call('/sbin/dmsetup', lvsubsystem.fake_dmsetup)
in executable_injector.mock_calls
)
def test_add_volume_group(self):
lvsubsystem = lvmlib.LVSubsystem(None, mock.Mock())
lvsubsystem.add_volume_group('vg')
vg = lvsubsystem.get_volume_group('vg')
self.assertEqual('vg', vg.name)
def test_add_multiple_volume_groups(self):
lvsubsystem = lvmlib.LVSubsystem(None, mock.Mock())
lvsubsystem.add_volume_group('vg1')
lvsubsystem.add_volume_group('vg2')
lvsubsystem.add_volume_group('vg3')
vg1 = lvsubsystem.get_volume_group('vg1')
vg2 = lvsubsystem.get_volume_group('vg2')
vg3 = lvsubsystem.get_volume_group('vg3')
self.assertEqual('vg1', vg1.name)
self.assertEqual('vg2', vg2.name)
self.assertEqual('vg3', vg3.name)
def test_fake_lvcreate_creates_volume(self):
lvsubsystem = lvmlib.LVSubsystem(mock.Mock(), mock.Mock())
vg = lvsubsystem.add_volume_group('vg')
exec_result = lvsubsystem.fake_lvcreate(
"someprog -n name -L 100 vg".split(), '')
lv, = lvsubsystem.get_logical_volumes_with_name('name')
self.assertEqual('name', lv.name)
self.assertEqual(lvsubsystem.get_volume_group('vg'), lv.volume_group)
self.assertTrue(lv.active)
self.assertTrue(lv.zeroed)
self.assertEqual(None, lv.tag)
self.assertEqual(100, lv.size_mb)
def test_fake_lvcreate_with_tags(self):
lvsubsystem = lvmlib.LVSubsystem(mock.Mock(), mock.Mock())
lvsubsystem.add_volume_group('vg')
exec_result = lvsubsystem.fake_lvcreate(
"someprog -n name --addtag tagg -L 100 vg".split(), '')
lv, = lvsubsystem.get_logical_volumes_with_name('name')
self.assertEqual('tagg', lv.tag)
def test_fake_lvcreate_inactive(self):
lvsubsystem = lvmlib.LVSubsystem(mock.Mock(), mock.Mock())
lvsubsystem.add_volume_group('vg')
exec_result = lvsubsystem.fake_lvcreate(
"someprog -n name --inactive -L 100 vg".split(), '')
lv, = lvsubsystem.get_logical_volumes_with_name('name')
self.assertFalse(lv.active)
def METHOD_NAME(self):
lvsubsystem = lvmlib.LVSubsystem(mock.Mock(), mock.Mock())
lvsubsystem.add_volume_group('vg')
exec_result = lvsubsystem.fake_lvcreate(
"someprog -n name --zero n -L 100 vg".split(), '')
lv, = lvsubsystem.get_logical_volumes_with_name('name')
self.assertFalse(lv.zeroed)
self.assertExecutionSucceeded(exec_result)
def test_get_the_correct_volume(self):
lvsubsystem = lvmlib.LVSubsystem(mock.Mock(), mock.Mock())
lvsubsystem.add_volume_group('vg')
result1 = lvsubsystem.fake_lvcreate(
"someprog -n name1 --zero n -L 100 vg".split(), '')
result2 = lvsubsystem.fake_lvcreate(
"someprog -n name2 --zero n -L 200 vg".split(), '')
lv, = lvsubsystem.get_logical_volumes_with_name('name1')
self.assertEqual(100, lv.size_mb)
lv, = lvsubsystem.get_logical_volumes_with_name('name2')
self.assertEqual(200, lv.size_mb)
# Now remove them
lvsubsystem.fake_lvremove('someprog vg/name2'.split(), '')
def test_fake_lvcreate_called_with_wrong_params(self):
lvsubsystem = lvmlib.LVSubsystem(mock.Mock(), mock.Mock())
lvsubsystem.add_volume_group('vg')
exec_result = lvsubsystem.fake_lvcreate(
"someprog --something-stupid -n name n -L 100 vg".split(), '')
self.assertExecutionFailed(exec_result)
def test_fake_lvcreate_fails_if_no_volume_group_found(self):
lvsubsystem = lvmlib.LVSubsystem(mock.Mock(), mock.Mock())
exec_result = lvsubsystem.fake_lvcreate(
"someprog -n name -L 100 nonexisting".split(), '')
self.assertExecutionFailed(exec_result)
def test_fake_lvremove(self):
lvsubsystem = lvmlib.LVSubsystem(mock.Mock(), mock.Mock())
lvsubsystem.add_volume_group('vg')
lvsubsystem.get_volume_group('vg').add_volume('lv', 100)
exec_result = lvsubsystem.fake_lvremove(
"someprog vg/lv".split(), '')
self.assertExecutionSucceeded(exec_result)
def test_fake_lvremove_with_force(self):
lvsubsystem = lvmlib.LVSubsystem(mock.Mock(), mock.Mock())
lvsubsystem.add_volume_group('vg')
lvsubsystem.get_volume_group('vg').add_volume('lv', 100)
exec_result = lvsubsystem.fake_lvremove(
"someprog -f vg/lv".split(), '')
self.assertExecutionSucceeded(exec_result)
def test_fake_lvremove_with_bad_params(self):
lvsubsystem = lvmlib.LVSubsystem(mock.Mock(), mock.Mock())
lvsubsystem.add_volume_group('vg')
lvsubsystem.get_volume_group('vg').add_volume('lv', 100)
exec_result = lvsubsystem.fake_lvremove(
"someprog -f vg/lv --stupid-parameter".split(), '')
self.assertExecutionFailed(exec_result)
def test_fake_dmsetup_status_returns_zero(self):
lvsubsystem = lvmlib.LVSubsystem(mock.Mock(), mock.Mock())
exec_result = lvsubsystem.fake_dmsetup(
"someprog status".split(), '')
self.assertExecutionSucceeded(exec_result)
|
2,635 |
test lf source lf patch
|
# Copyright (C) 2014 Anaconda, Inc
# SPDX-License-Identifier: BSD-3-Clause
from __future__ import annotations
from pathlib import Path
from subprocess import CalledProcessError
from textwrap import dedent
from types import SimpleNamespace
import pytest
from conda_build.source import (
_ensure_CRLF,
_ensure_LF,
_guess_patch_strip_level,
apply_patch,
)
@pytest.mark.parametrize(
"patches,results",
[
pytest.param(
[
Path("one.txt"),
Path("some", "common", "prefix", "two.txt"),
Path("some", "common", "prefix", "three.txt"),
],
[(0, False), (0, False), (0, False), (0, False)],
id="strip level 0",
),
pytest.param(
[
Path("some", "one.txt"),
Path("some", "common", "prefix", "two.txt"),
Path("some", "common", "prefix", "three.txt"),
],
[(0, False), (1, False), (0, True), (0, True)],
id="strip level 1",
),
pytest.param(
[
Path("some", "common", "one.txt"),
Path("some", "common", "prefix", "two.txt"),
Path("some", "common", "prefix", "three.txt"),
],
[(0, False), (1, False), (2, False), (0, True)],
id="strip level 2",
),
pytest.param(
[
Path("some", "common", "prefix", "one.txt"),
Path("some", "common", "prefix", "two.txt"),
Path("some", "common", "prefix", "three.txt"),
],
[(0, False), (1, False), (2, False), (3, False)],
id="strip level 3",
),
],
)
def test_patch_strip_level(
patches: Path, results: list[tuple[int, bool]], tmp_path: Path
):
# generate dummy files
for patch in patches:
(tmp_path / patch).parent.mkdir(parents=True, exist_ok=True)
(tmp_path / patch).touch()
src_dir = tmp_path
assert _guess_patch_strip_level(patches, src_dir) == results[0]
src_dir = src_dir / "some"
assert _guess_patch_strip_level(patches, src_dir) == results[1]
src_dir = src_dir / "common"
assert _guess_patch_strip_level(patches, src_dir) == results[2]
src_dir = src_dir / "prefix"
assert _guess_patch_strip_level(patches, src_dir) == results[3]
@pytest.fixture
def patch_paths(tmp_path):
paths = SimpleNamespace(
deletion=tmp_path / "file-deletion.txt",
modification=tmp_path / "file-modification.txt",
creation=tmp_path / "file-creation.txt",
diff=tmp_path / "patch.diff",
)
paths.deletion.write_text("hello\n")
paths.modification.write_text("hello\n")
paths.diff.write_text(
dedent(
"""
diff file-deletion.txt file-deletion.txt
--- file-deletion.txt 2016-06-07 21:55:59.549798700 +0100
+++ file-deletion.txt 1970-01-01 01:00:00.000000000 +0100
@@ -1 +0,0 @@
-hello
diff file-creation.txt file-creation.txt
--- file-creation.txt 1970-01-01 01:00:00.000000000 +0100
+++ file-creation.txt 2016-06-07 21:55:59.549798700 +0100
@@ -0,0 +1 @@
+hello
diff file-modification.txt file-modification.txt
--- file-modification.txt 2016-06-08 18:23:08.384136600 +0100
+++ file-modification.txt 2016-06-08 18:23:37.565136200 +0100
@@ -1 +1 @@
-hello
+43770
"""
).lstrip()
)
return paths
def test_patch_paths(tmp_path, patch_paths, testing_config):
assert patch_paths.deletion.exists()
assert not patch_paths.creation.exists()
assert patch_paths.modification.exists()
assert patch_paths.modification.read_text() == "hello\n"
apply_patch(str(tmp_path), patch_paths.diff, testing_config)
assert not patch_paths.deletion.exists()
assert patch_paths.creation.exists()
assert patch_paths.modification.exists()
assert patch_paths.modification.read_text() == "43770\n"
def test_ensure_unix_line_endings_with_nonutf8_characters(tmp_path):
win_path = tmp_path / "win_le"
win_path.write_bytes(b"\xf1\r\n") # tilde-n encoded in latin1
unix_path = tmp_path / "unix_le"
_ensure_LF(win_path, unix_path)
unix_path.read_bytes() == b"\xf1\n"
def METHOD_NAME(tmp_path, patch_paths, testing_config):
_ensure_LF(patch_paths.modification)
_ensure_LF(patch_paths.deletion)
_ensure_LF(patch_paths.diff)
apply_patch(str(tmp_path), patch_paths.diff, testing_config)
assert patch_paths.modification.read_text() == "43770\n"
def test_lf_source_crlf_patch(tmp_path, patch_paths, testing_config):
_ensure_LF(patch_paths.modification)
_ensure_LF(patch_paths.deletion)
_ensure_CRLF(patch_paths.diff)
with pytest.raises(CalledProcessError):
apply_patch(str(tmp_path), patch_paths.diff, testing_config)
def test_crlf_source_lf_patch(tmp_path, patch_paths, testing_config):
_ensure_CRLF(patch_paths.modification)
_ensure_CRLF(patch_paths.deletion)
_ensure_LF(patch_paths.diff)
with pytest.raises(CalledProcessError):
apply_patch(str(tmp_path), patch_paths.diff, testing_config)
def test_crlf_source_crlf_patch(tmp_path, patch_paths, testing_config):
_ensure_CRLF(patch_paths.modification)
_ensure_CRLF(patch_paths.deletion)
_ensure_CRLF(patch_paths.diff)
apply_patch(str(tmp_path), patch_paths.diff, testing_config)
assert patch_paths.modification.read_bytes() == b"43770\r\n"
|
2,636 |
schedule
|
import os
import argparse
from functools import partial
import numpy as np
import tensorflow as tf
from tensorflow.keras.optimizers import Adam
from tensorflow.keras import callbacks as cb
from paz.models import ProtoEmbedding, ProtoNet
from paz.utils import build_directory, write_dictionary, write_weights
from paz.datasets.omniglot import (load, remove_classes, split_data,
sample_between_alphabet,
sample_within_alphabet, Generator)
# TODO move to optimization and add tests
def METHOD_NAME(period=20, rate=0.5):
def apply(epoch, learning_rate):
if ((epoch % period) == 0) and (epoch != 0):
learning_rate = rate * learning_rate
return learning_rate
return apply
description = 'Train and evaluation of prototypical networks'
parser = argparse.ArgumentParser(description=description)
parser.add_argument('--seed', default=777, type=int)
parser.add_argument('--root', default='experiments', type=str)
parser.add_argument('--label', default='PROTONET', type=str)
parser.add_argument('--image_H', default=28, type=int)
parser.add_argument('--image_W', default=28, type=int)
parser.add_argument('--num_blocks', default=4, type=int)
parser.add_argument('--steps_per_epoch', default=100, type=int)
parser.add_argument('--epochs', default=200, type=int)
parser.add_argument('--period', default=10, type=int)
parser.add_argument('--rate', default=0.5, type=int)
parser.add_argument('--train_classes', default=964, type=int)
parser.add_argument('--validation_split', default=0.20, type=float)
parser.add_argument('--train_path', default='omniglot/images_background/')
parser.add_argument('--tests_path', default='omniglot/images_evaluation/')
parser.add_argument('--learning_rate', default=0.001, type=float)
parser.add_argument('--loss', default='sparse_categorical_crossentropy')
parser.add_argument('--metric', default='sparse_categorical_accuracy')
parser.add_argument('--train_ways', default=60, type=int)
parser.add_argument('--train_shots', default=5, type=int)
parser.add_argument('--train_queries', default=5, type=int)
parser.add_argument('--test_steps', default=1000, type=int)
parser.add_argument('--test_ways', nargs='+', default=[5, 20])
parser.add_argument('--test_shots', nargs='+', default=[1, 5])
parser.add_argument('--test_queries', default=1, type=int)
parser.add_argument('--stop_patience', default=100, type=int)
parser.add_argument('--stop_delta', default=1e-3, type=int)
args = parser.parse_args()
RNG = np.random.default_rng(args.seed)
tf.random.set_seed(args.seed)
directory = build_directory(args.root, args.label)
write_dictionary(args.__dict__, directory, 'parameters.json')
image_shape = (args.image_H, args.image_W, 1)
train_args = (args.train_ways, args.train_shots, args.train_queries)
embed = ProtoEmbedding(image_shape, args.num_blocks)
model = ProtoNet(embed, *train_args, image_shape)
optimizer = Adam(args.learning_rate)
metrics = [args.metric]
model.compile(Adam(args.learning_rate), loss=args.loss, metrics=metrics)
callbacks = [
cb.LearningRateScheduler(METHOD_NAME(args.period, args.rate), verbose=1),
cb.CSVLogger(os.path.join(directory, 'log.csv')),
cb.EarlyStopping('val_loss', args.stop_delta, args.stop_patience, 1)
]
train_data = load('train', image_shape[:2], True)
train_data = remove_classes(RNG, train_data, args.train_classes)
train_data, validation_data = split_data(train_data, args.validation_split)
sampler = partial(sample_between_alphabet, RNG, train_data, *train_args)
sequence = Generator(sampler, *train_args, image_shape, args.steps_per_epoch)
sampler = partial(sample_between_alphabet, RNG, validation_data, *train_args)
validation_data = Generator(sampler, *train_args, image_shape, 100)
model.fit(sequence,
epochs=args.epochs,
callbacks=callbacks,
validation_data=validation_data)
results = {}
for way in args.test_ways:
for shot in args.test_shots:
test_model = ProtoNet(embed, way, shot, args.test_queries, image_shape)
test_model.compile(optimizer, loss=args.loss, metrics=metrics)
test_args = (way, shot, args.test_queries)
data = load('test', image_shape[:2], flat=False)
sampler = partial(sample_within_alphabet, RNG, data, *test_args)
sequence = Generator(sampler, *test_args, image_shape, args.test_steps)
losses, accuracy = test_model.evaluate(sequence)
accuracy = round(100 * accuracy, 2)
results[f'{way}-way_{shot}-shot_within_alphabet'] = accuracy
print(f'Within alphabet {way}-way {shot}-shot accuracy {accuracy} %')
data = load('test', image_shape[:2], flat=True)
sampler = partial(sample_between_alphabet, RNG, data, *test_args)
sequence = Generator(sampler, *test_args, image_shape, args.test_steps)
losses, accuracy = test_model.evaluate(sequence)
accuracy = round(100 * accuracy, 2)
results[f'{way}-way_{shot}-shot_between_alphabet'] = accuracy
print(f'Between alphabet {way}-way {shot}-shot accuracy {accuracy} %')
write_weights(embed, directory)
write_dictionary(results, directory, 'test_accuracies.json')
|
2,637 |
system data
|
# coding=utf-8
# *** WARNING: this file was generated by pulumi. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import copy
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
__all__ = [
'GetGuestAgentResult',
'AwaitableGetGuestAgentResult',
'get_guest_agent',
'get_guest_agent_output',
]
@pulumi.output_type
class GetGuestAgentResult:
"""
Defines the GuestAgent.
"""
def __init__(__self__, credentials=None, http_proxy_config=None, id=None, name=None, provisioning_action=None, provisioning_state=None, status=None, METHOD_NAME=None, type=None):
if credentials and not isinstance(credentials, dict):
raise TypeError("Expected argument 'credentials' to be a dict")
pulumi.set(__self__, "credentials", credentials)
if http_proxy_config and not isinstance(http_proxy_config, dict):
raise TypeError("Expected argument 'http_proxy_config' to be a dict")
pulumi.set(__self__, "http_proxy_config", http_proxy_config)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if provisioning_action and not isinstance(provisioning_action, str):
raise TypeError("Expected argument 'provisioning_action' to be a str")
pulumi.set(__self__, "provisioning_action", provisioning_action)
if provisioning_state and not isinstance(provisioning_state, str):
raise TypeError("Expected argument 'provisioning_state' to be a str")
pulumi.set(__self__, "provisioning_state", provisioning_state)
if status and not isinstance(status, str):
raise TypeError("Expected argument 'status' to be a str")
pulumi.set(__self__, "status", status)
if METHOD_NAME and not isinstance(METHOD_NAME, dict):
raise TypeError("Expected argument 'system_data' to be a dict")
pulumi.set(__self__, "system_data", METHOD_NAME)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
@property
@pulumi.getter
def credentials(self) -> Optional['outputs.GuestCredentialResponse']:
"""
Username / Password Credentials to provision guest agent.
"""
return pulumi.get(self, "credentials")
@property
@pulumi.getter(name="httpProxyConfig")
def http_proxy_config(self) -> Optional['outputs.HttpProxyConfigurationResponse']:
"""
HTTP Proxy configuration for the VM.
"""
return pulumi.get(self, "http_proxy_config")
@property
@pulumi.getter
def id(self) -> str:
"""
Fully qualified resource ID for the resource. Ex - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def name(self) -> str:
"""
The name of the resource
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="provisioningAction")
def provisioning_action(self) -> Optional[str]:
"""
The guest agent provisioning action.
"""
return pulumi.get(self, "provisioning_action")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> str:
"""
The provisioning state.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter
def status(self) -> str:
"""
The guest agent status.
"""
return pulumi.get(self, "status")
@property
@pulumi.getter(name="systemData")
def METHOD_NAME(self) -> 'outputs.SystemDataResponse':
"""
Azure Resource Manager metadata containing createdBy and modifiedBy information.
"""
return pulumi.get(self, "system_data")
@property
@pulumi.getter
def type(self) -> str:
"""
The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or "Microsoft.Storage/storageAccounts"
"""
return pulumi.get(self, "type")
class AwaitableGetGuestAgentResult(GetGuestAgentResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetGuestAgentResult(
credentials=self.credentials,
http_proxy_config=self.http_proxy_config,
id=self.id,
name=self.name,
provisioning_action=self.provisioning_action,
provisioning_state=self.provisioning_state,
status=self.status,
METHOD_NAME=self.METHOD_NAME,
type=self.type)
def get_guest_agent(name: Optional[str] = None,
resource_group_name: Optional[str] = None,
virtual_machine_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetGuestAgentResult:
"""
Implements GuestAgent GET method.
:param str name: Name of the GuestAgent.
:param str resource_group_name: The name of the resource group. The name is case insensitive.
:param str virtual_machine_name: Name of the vm.
"""
__args__ = dict()
__args__['name'] = name
__args__['resourceGroupName'] = resource_group_name
__args__['virtualMachineName'] = virtual_machine_name
opts = pulumi.InvokeOptions.merge(_utilities.get_invoke_opts_defaults(), opts)
__ret__ = pulumi.runtime.invoke('azure-native:azurestackhci/v20221215preview:getGuestAgent', __args__, opts=opts, typ=GetGuestAgentResult).value
return AwaitableGetGuestAgentResult(
credentials=pulumi.get(__ret__, 'credentials'),
http_proxy_config=pulumi.get(__ret__, 'http_proxy_config'),
id=pulumi.get(__ret__, 'id'),
name=pulumi.get(__ret__, 'name'),
provisioning_action=pulumi.get(__ret__, 'provisioning_action'),
provisioning_state=pulumi.get(__ret__, 'provisioning_state'),
status=pulumi.get(__ret__, 'status'),
METHOD_NAME=pulumi.get(__ret__, 'system_data'),
type=pulumi.get(__ret__, 'type'))
@_utilities.lift_output_func(get_guest_agent)
def get_guest_agent_output(name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
virtual_machine_name: Optional[pulumi.Input[str]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetGuestAgentResult]:
"""
Implements GuestAgent GET method.
:param str name: Name of the GuestAgent.
:param str resource_group_name: The name of the resource group. The name is case insensitive.
:param str virtual_machine_name: Name of the vm.
"""
...
|
2,638 |
redraw view
|
from PyQt5.QtCore import QTimer, pyqtSlot, Qt, pyqtSignal
from PyQt5.QtGui import QIcon, QKeySequence, QWheelEvent, QCursor, QContextMenuEvent
from PyQt5.QtWidgets import QAction, QMenu
from urh.ui.painting.SceneManager import SceneManager
from urh.ui.views.SelectableGraphicView import SelectableGraphicView
from urh.util.Logger import logger
class ZoomableGraphicView(SelectableGraphicView):
MINIMUM_VIEW_WIDTH = 300
# argument is x zoom factor
# if argument is -1, then show_full_scene was triggered during zoom
zoomed = pyqtSignal(float)
def __init__(self, parent=None):
super().__init__(parent)
self.context_menu_position = None # type: QPoint
self.scene_type = 0
self.auto_fit_on_resize_is_blocked = False
self.zoom_in_action = QAction(self.tr("Zoom in"), self)
self.zoom_in_action.setShortcut(QKeySequence.ZoomIn)
self.zoom_in_action.triggered.connect(self.on_zoom_in_action_triggered)
self.zoom_in_action.setShortcutContext(Qt.WidgetWithChildrenShortcut)
self.zoom_in_action.setIcon(QIcon.fromTheme("zoom-in"))
self.addAction(self.zoom_in_action)
self.zoom_out_action = QAction(self.tr("Zoom out"), self)
self.zoom_out_action.setShortcut(QKeySequence.ZoomOut)
self.zoom_out_action.triggered.connect(self.on_zoom_out_action_triggered)
self.zoom_out_action.setShortcutContext(Qt.WidgetWithChildrenShortcut)
self.zoom_out_action.setIcon(QIcon.fromTheme("zoom-out"))
self.addAction(self.zoom_out_action)
self.zoom_original_action = QAction(self.tr("Zoom original"), self)
self.zoom_original_action.setShortcut(QKeySequence(Qt.CTRL + Qt.Key_0))
self.zoom_original_action.triggered.connect(self.on_zoom_original_action_triggered)
self.zoom_original_action.setShortcutContext(Qt.WidgetWithChildrenShortcut)
self.zoom_original_action.setIcon(QIcon.fromTheme("zoom-original"))
self.addAction(self.zoom_original_action)
self.redraw_timer = QTimer(self)
self.redraw_timer.setSingleShot(True)
self.redraw_timer.timeout.connect(self.METHOD_NAME)
self.zoomed.connect(self.on_signal_zoomed)
self.scene_x_zoom_stretch = 1
@property
def y_center(self):
try:
if self.scene_type == 0:
# Normal scene
return 0
else:
return -self.signal.center
except Exception as e:
logger.error("Could not access y_center property: {0}. Falling back to 0".format(e))
return 0
def create_context_menu(self):
menu = QMenu()
self._add_zoom_actions_to_menu(menu)
return menu
def contextMenuEvent(self, event: QContextMenuEvent):
self.context_menu_position = event.pos()
menu = self.create_context_menu()
menu.exec_(self.mapToGlobal(event.pos()))
self.context_menu_position = None
def _add_zoom_actions_to_menu(self, menu: QMenu):
menu.addAction(self.zoom_in_action)
menu.addAction(self.zoom_out_action)
if self.something_is_selected:
zoom_action = menu.addAction(self.tr("Zoom selection"))
zoom_action.setIcon(QIcon.fromTheme("zoom-fit-best"))
zoom_action.triggered.connect(self.on_zoom_action_triggered)
menu.addSeparator()
def scrollContentsBy(self, dx: int, dy: int):
try:
super().scrollContentsBy(dx, dy)
self.redraw_timer.start(0)
except RuntimeError as e:
logger.warning("Graphic View already closed: " + str(e))
def zoom(self, factor, zoom_to_mouse_cursor=True, cursor_pos=None):
if factor > 1 and self.view_rect().width() / factor < self.MINIMUM_VIEW_WIDTH:
factor = self.view_rect().width() / self.MINIMUM_VIEW_WIDTH
if zoom_to_mouse_cursor:
pos = self.mapFromGlobal(QCursor.pos()) if cursor_pos is None else cursor_pos
else:
pos = None
old_pos = self.mapToScene(pos) if pos is not None else None
show_full = False
if self.view_rect().width() / factor > self.sceneRect().width():
self.show_full_scene()
factor = 1
show_full = True
self.scale(factor, 1)
if show_full:
self.zoomed.emit(-1)
else:
self.zoomed.emit(factor)
if pos is not None:
move = self.mapToScene(pos) - old_pos
self.translate(move.x(), 0)
def wheelEvent(self, event: QWheelEvent):
zoom_factor = 1.001 ** event.angleDelta().y()
self.zoom(zoom_factor, cursor_pos=event.pos())
def resizeEvent(self, event):
if self.sceneRect().width() == 0 or self.auto_fit_on_resize_is_blocked:
return
self.auto_fit_view()
def auto_fit_view(self):
h_tar = self.sceneRect().height()
h_view = self.view_rect().height()
if abs(h_tar) > 0:
self.scale(1, h_view / h_tar)
self.centerOn(self.view_rect().x() + self.view_rect().width() / 2, self.y_center)
def show_full_scene(self, reinitialize=False):
y_factor = self.transform().m22()
self.resetTransform()
# Use full self.width() here to enable show_full_scene when view_rect not yet set e.g. in Record Signal Dialog
x_factor = self.width() / (
self.sceneRect().width() * self.scene_x_zoom_stretch) if self.sceneRect().width() else 1
self.scale(x_factor, y_factor)
self.centerOn(0, self.y_center)
self.METHOD_NAME(reinitialize)
def zoom_to_selection(self, start: int, end: int):
if start == end:
return
x_factor = self.view_rect().width() / (end - start)
self.zoom(x_factor, zoom_to_mouse_cursor=False)
self.centerOn(start + (end - start) / 2, self.y_center)
def plot_data(self, data):
if self.scene_manager is None:
self.scene_manager = SceneManager(self)
self.scene_manager.plot_data = data
self.scene_manager.init_scene()
self.setScene(self.scene_manager.scene)
self.scene_manager.show_full_scene()
def METHOD_NAME(self, reinitialize=False):
if hasattr(self, "scene_manager") and self.scene_manager is not None:
self.scene_manager.scene_type = self.scene_type
if reinitialize:
self.scene_manager.init_scene()
vr = self.view_rect()
start, end = vr.x(), vr.x() + vr.width()
self.scene_manager.show_scene_section(start, end, *self._get_sub_path_ranges_and_colors(start, end))
if self.scene_type == 1:
self.scene().redraw_legend()
else:
self.scene().hide_legend()
def _get_sub_path_ranges_and_colors(self, start: float, end: float):
# Overwritten in Epic Graphic View
return None, None
def eliminate(self):
self.redraw_timer.stop()
super().eliminate()
@pyqtSlot()
def on_signal_zoomed(self):
self.redraw_timer.start(30)
@pyqtSlot()
def on_zoom_in_action_triggered(self):
self.zoom(1.1)
@pyqtSlot()
def on_zoom_out_action_triggered(self):
self.zoom(0.9)
@pyqtSlot()
def on_zoom_original_action_triggered(self):
self.show_full_scene(reinitialize=False)
self.zoomed.emit(-1)
@pyqtSlot()
def on_zoom_action_triggered(self):
self.zoom_to_selection(self.selection_area.start, self.selection_area.end)
|
2,639 |
test script
|
# Copyright (c) MONAI Consortium
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import annotations
import unittest
import torch
from parameterized import parameterized
from monai.networks import eval_mode
from monai.networks.nets.vit import ViT
from tests.utils import SkipIfBeforePyTorchVersion, skip_if_quick, test_script_save
TEST_CASE_Vit = []
for dropout_rate in [0.6]:
for in_channels in [4]:
for hidden_size in [768]:
for img_size in [96, 128]:
for patch_size in [16]:
for num_heads in [12]:
for mlp_dim in [3072]:
for num_layers in [4]:
for num_classes in [8]:
for pos_embed in ["conv", "perceptron"]:
for classification in [False, True]:
for nd in (2, 3):
test_case = [
{
"in_channels": in_channels,
"img_size": (img_size,) * nd,
"patch_size": (patch_size,) * nd,
"hidden_size": hidden_size,
"mlp_dim": mlp_dim,
"num_layers": num_layers,
"num_heads": num_heads,
"pos_embed": pos_embed,
"classification": classification,
"num_classes": num_classes,
"dropout_rate": dropout_rate,
},
(2, in_channels, *([img_size] * nd)),
(2, (img_size // patch_size) ** nd, hidden_size),
]
if nd == 2:
test_case[0]["spatial_dims"] = 2 # type: ignore
if classification:
test_case[0]["post_activation"] = False # type: ignore
if test_case[0]["classification"]: # type: ignore
test_case[2] = (2, test_case[0]["num_classes"]) # type: ignore
TEST_CASE_Vit.append(test_case)
@skip_if_quick
class TestViT(unittest.TestCase):
@parameterized.expand(TEST_CASE_Vit)
def test_shape(self, input_param, input_shape, expected_shape):
net = ViT(**input_param)
with eval_mode(net):
result, _ = net(torch.randn(input_shape))
self.assertEqual(result.shape, expected_shape)
def test_ill_arg(self):
with self.assertRaises(ValueError):
ViT(
in_channels=1,
img_size=(128, 128, 128),
patch_size=(16, 16, 16),
hidden_size=128,
mlp_dim=3072,
num_layers=12,
num_heads=12,
pos_embed="conv",
classification=False,
dropout_rate=5.0,
)
with self.assertRaises(ValueError):
ViT(
in_channels=1,
img_size=(32, 32, 32),
patch_size=(64, 64, 64),
hidden_size=512,
mlp_dim=3072,
num_layers=12,
num_heads=8,
pos_embed="perceptron",
classification=False,
dropout_rate=0.3,
)
with self.assertRaises(ValueError):
ViT(
in_channels=1,
img_size=(96, 96, 96),
patch_size=(8, 8, 8),
hidden_size=512,
mlp_dim=3072,
num_layers=12,
num_heads=14,
pos_embed="conv",
classification=False,
dropout_rate=0.3,
)
with self.assertRaises(ValueError):
ViT(
in_channels=1,
img_size=(97, 97, 97),
patch_size=(4, 4, 4),
hidden_size=768,
mlp_dim=3072,
num_layers=12,
num_heads=8,
pos_embed="perceptron",
classification=True,
dropout_rate=0.3,
)
with self.assertRaises(ValueError):
ViT(
in_channels=4,
img_size=(96, 96, 96),
patch_size=(16, 16, 16),
hidden_size=768,
mlp_dim=3072,
num_layers=12,
num_heads=12,
pos_embed="perc",
classification=False,
dropout_rate=0.3,
)
@parameterized.expand(TEST_CASE_Vit)
@SkipIfBeforePyTorchVersion((1, 9))
def METHOD_NAME(self, input_param, input_shape, _):
net = ViT(**(input_param))
net.eval()
with torch.no_grad():
torch.jit.script(net)
test_data = torch.randn(input_shape)
test_script_save(net, test_data)
def test_access_attn_matrix(self):
# input format
in_channels = 1
img_size = (96, 96, 96)
patch_size = (16, 16, 16)
in_shape = (1, in_channels, img_size[0], img_size[1], img_size[2])
# no data in the matrix
no_matrix_acess_blk = ViT(in_channels=in_channels, img_size=img_size, patch_size=patch_size)
no_matrix_acess_blk(torch.randn(in_shape))
assert isinstance(no_matrix_acess_blk.blocks[0].attn.att_mat, torch.Tensor)
# no of elements is zero
assert no_matrix_acess_blk.blocks[0].attn.att_mat.nelement() == 0
# be able to acess the attention matrix
matrix_acess_blk = ViT(in_channels=in_channels, img_size=img_size, patch_size=patch_size, save_attn=True)
matrix_acess_blk(torch.randn(in_shape))
assert matrix_acess_blk.blocks[0].attn.att_mat.shape == (in_shape[0], 12, 216, 216)
if __name__ == "__main__":
unittest.main()
|
2,640 |
get group count
|
# rhndb.py -- Database functions
# Copyright (C) 2007 NC State University
# Written by Jack Neely <[email protected]>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
class RHNStore(object):
def __init__(self, sdb):
self.sdb = sdb
self.conn = self.sdb.getConnection()
self.c = self.sdb.getCursor()
def commit(self):
self.conn.commit()
def addSystem(self, system):
q1 = """select clientid from CLIENTS where rhnsid = %s"""
q2 = """insert into CLIENTS (rhnsid, name, lastcheckin) values
(%s, %s, %s)"""
q3 = """update CLIENTS set lastcheckin = %s where clientid = %s"""
self.c.execute(q1, (system["id"],))
ret = self.c.fetchone()
if ret == None:
self.c.execute(q2, (system["id"], system["name"],
system["last_checkin"]))
else:
self.c.execute(q3, (system["last_checkin"], ret[0]))
return ret[0]
self.c.execute(q1, (system["id"],))
return self.c.fetchone()[0]
def addGroup(self, grp):
q1 = """select groupid from GROUPINFO where rhnsid = %s"""
q2 = """insert into GROUPINFO (rhnsid, name) values (%s, %s)"""
self.c.execute(q1, (grp["sgid"],))
ret = self.c.fetchone()
if ret == None:
self.c.execute(q2, (grp["sgid"], grp["system_group_name"]))
else:
return ret[0]
self.c.execute(q1, (grp["sgid"],))
return self.c.fetchone()[0]
def subscribeGroup(self, clientid, groupids):
q1 = """delete from GROUPS where clientid = %s"""
self.c.execute(q1, (clientid,))
if len(groupids) == 0:
return
q2 = """insert into GROUPS (clientid, groupid) values (%s, %s)"""
for id in groupids:
self.c.execute(q2, (clientid, id))
def markRL(self, clients):
q = ""
for id in clients:
if q == "":
q = "clientid = %s"
else:
q = q + " or clientid = %s"
q1 = """update CLIENTS set rl = 0"""
q2 = """update CLIENTS set rl = 1 where """ + q
self.c.execute(q1, ())
self.c.execute(q2, clients)
def markActive(self, clients):
q = ""
for id in clients:
if q == "":
q = "clientid = %s"
else:
q = q + " or clientid = %s"
q1 = """update CLIENTS set active = 0"""
q2 = """update CLIENTS set active = 1 where """ + q
self.c.execute(q1, ())
self.c.execute(q2, clients)
def getGroups(self):
q = "select groupid from GROUPINFO"
self.c.execute(q)
ret = self.c.fetchone()
list = []
while ret != None:
list.append(ret[0])
ret = self.c.fetchone()
return list
def getGroupName(self, gid):
q = "select name from GROUPINFO where groupid = %s"
self.c.execute(q, (gid,))
ret = self.c.fetchone()
if ret == None:
return None
else:
return ret[0]
def getTotalRLCount(self):
q = "select count(*) from CLIENTS where rl = 1 and active = 1"
self.c.execute(q)
ret = self.c.fetchone()
return ret[0]
def getTotalCount(self):
q = "select count(*) from CLIENTS where active = 1"
self.c.execute(q)
ret = self.c.fetchone()
return ret[0]
def getGroupRLCount(self, gid):
q = """select count(*) from GROUPS, CLIENTS where
GROUPS.clientid = CLIENTS.clientid and
CLIENTS.active = 1 and
CLIENTS.rl = 1 and
GROUPS.groupid = %s"""
self.c.execute(q, (gid,))
ret = self.c.fetchone()
return ret[0]
def METHOD_NAME(self, gid):
q = """select count(*) from GROUPS, CLIENTS where
GROUPS.clientid = CLIENTS.clientid and
CLIENTS.active = 1 and
GROUPS.groupid = %s"""
self.c.execute(q, (gid,))
ret = self.c.fetchone()
return ret[0]
|
2,641 |
test dict order parallel
|
import pytest
import numpy as np
from firedrake import *
@pytest.fixture(scope="module")
def m():
return UnitIntervalMesh(2)
@pytest.fixture
def f(m):
cg = FunctionSpace(m, "CG", 1)
dg = FunctionSpace(m, "DG", 0)
c = Function(cg)
d = Function(dg)
return c, d
@pytest.fixture
def f_mixed(m):
cg = FunctionSpace(m, "CG", 1)
dg = FunctionSpace(m, "DG", 0)
return Function(cg*dg)
@pytest.fixture
def const(m):
return Constant(1.0)
@pytest.fixture
def f_extruded():
i = UnitIntervalMesh(2)
m = ExtrudedMesh(i, 2, layer_height=0.1)
cg = FunctionSpace(m, "CG", 1)
dg = FunctionSpace(m, "DG", 0)
c = Function(cg)
d = Function(dg)
return c, d
def test_direct_par_loop(f):
c, _ = f
domain = ""
instructions = """
c[0, 0] = 1
"""
par_loop((domain, instructions), direct, {'c': (c, WRITE)})
assert np.allclose(c.dat.data, 1.0)
def test_mixed_direct_par_loop(f_mixed):
with pytest.raises(NotImplementedError):
domain = ""
instructions = """
c[0, 0] = 1
"""
par_loop((domain, instructions), direct, {'c': (f_mixed, WRITE)})
assert all(np.allclose(f.dat.data, 1.0) for f in f_mixed.subfunctions)
@pytest.mark.parametrize('idx', [0, 1])
def test_mixed_direct_par_loop_components(f_mixed, idx):
domain = ""
instructions = """
c[0, 0] = 1
"""
par_loop((domain, instructions), direct, {'c': (f_mixed[idx], WRITE)})
assert np.allclose(f_mixed.dat[idx].data, 1.0)
def test_direct_par_loop_read_const(f, const):
c, _ = f
const.assign(10.0)
domain = ""
instructions = """
c[0, 0] = constant[0]
"""
par_loop((domain, instructions), direct, {'c': (c, WRITE), 'constant': (const, READ)})
assert np.allclose(c.dat.data, const.dat.data)
def test_indirect_par_loop_read_const(f, const):
_, d = f
const.assign(10.0)
domain = "{[i]: 0 <= i < d.dofs}"
instructions = """
for i
d[i, 0] = constant[0]
end
"""
par_loop((domain, instructions), dx, {'d': (d, WRITE), 'constant': (const, READ)})
assert np.allclose(d.dat.data, const.dat.data)
def test_indirect_par_loop_read_const_mixed(f_mixed, const):
const.assign(10.0)
with pytest.raises(NotImplementedError):
domain = "{[i]: 0 <= i < d.dofs}"
instructions = """
for i
d[i, 0] = constant[0]
end
"""
par_loop((domain, instructions), dx, {'d': (f_mixed, WRITE), 'constant': (const, READ)})
assert all(np.allclose(f.dat.data, const.dat.data) for f in f_mixed.subfunctions)
@pytest.mark.parallel(nprocs=2)
def METHOD_NAME():
mesh = UnitIntervalMesh(10)
d = Function(FunctionSpace(mesh, "DG", 0))
consts = []
for i in range(20):
consts.append(Constant(i, domain=mesh))
arg = {}
if mesh.comm.rank == 0:
arg['d'] = (d, WRITE)
for i, c in enumerate(consts):
arg["c%d" % i] = (c, READ)
else:
arg['d'] = (d, WRITE)
for i, c in enumerate(reversed(consts)):
arg["c%d" % (len(consts) - i - 1)] = (c, READ)
domain = "{[i]: 0 <= i < d.dofs}"
instructions = """
for i
d[i, 0] = c10[0]
end
"""
par_loop((domain, instructions), dx, arg)
assert np.allclose(d.dat.data, consts[10].dat.data)
@pytest.mark.parametrize('idx', [0, 1])
def test_indirect_par_loop_read_const_mixed_component(f_mixed, const, idx):
const.assign(10.0)
domain = "{[i]: 0 <= i < d.dofs}"
instructions = """
for i
d[i, 0] = constant[0]
end
"""
par_loop((domain, instructions), dx, {'d': (f_mixed[idx], WRITE), 'constant': (const, READ)})
assert np.allclose(f_mixed.dat[idx].data, const.dat.data)
def test_par_loop_const_write_error(f, const):
_, d = f
with pytest.raises(RuntimeError):
domain = ""
instructions = """
c[0] = d[0, 0]
"""
par_loop((domain, instructions), direct, {'c': (const, WRITE), 'd': (d, READ)})
def test_cg_max_field(f):
c, d = f
x = SpatialCoordinate(d.function_space().mesh())
d.interpolate(x[0])
domain = "{[i]: 0 <= i < c.dofs}"
instructions = """
for i
<float64> real_c = real(c[i, 0])
<float64> real_d = real(d[0, 0])
c[i, 0] = fmax(real_c, real_d)
end
"""
par_loop((domain, instructions), dx, {'c': (c, RW), 'd': (d, READ)})
assert (c.dat.data == [1./4, 3./4, 3./4]).all()
def test_cg_max_field_extruded(f_extruded):
c, d = f_extruded
x = SpatialCoordinate(d.function_space().mesh())
d.interpolate(x[0])
domain = "{[i]: 0 <= i < c.dofs}"
instructions = """
for i
<float64> real_c = real(c[i, 0])
<float64> real_d = real(d[0, 0])
c[i, 0] = fmax(real_c, real_d)
end
"""
par_loop((domain, instructions), dx, {'c': (c, RW), 'd': (d, READ)})
assert (c.dat.data == [1./4, 1./4, 1./4,
3./4, 3./4, 3./4,
3./4, 3./4, 3./4]).all()
@pytest.mark.parametrize("subdomain", [1, 2])
def test_cell_subdomain(subdomain):
from os.path import abspath, dirname, join
mesh = Mesh(join(abspath(dirname(__file__)), "..",
"meshes", "cell-sets.msh"))
V = FunctionSpace(mesh, "DG", 0)
expect = interpolate(as_ufl(1), V, subset=mesh.cell_subset(subdomain))
f = Function(V)
domain = "{[i]: 0 <= i < f.dofs}"
instructions = """
for i
f[i, 0] = 1.0
end
"""
par_loop((domain, instructions), dx(subdomain), {'f': (f, WRITE)})
assert np.allclose(f.dat.data, expect.dat.data)
def test_walk_facets_rt():
m = UnitSquareMesh(3, 3)
x = SpatialCoordinate(m)
V = FunctionSpace(m, 'RT', 1)
f1 = Function(V)
f2 = Function(V)
project(as_vector((x[0], x[1])), f1)
domain = "{[i]: 0 <= i < f1.dofs}"
instructions = """
for i
f2[i, 0] = f1[i, 0]
end
"""
par_loop((domain, instructions), dS, {'f1': (f1, READ), 'f2': (f2, WRITE)})
par_loop((domain, instructions), ds, {'f1': (f1, READ), 'f2': (f2, WRITE)})
assert errornorm(f1, f2, degree_rise=0) < 1e-10
def test_par_loop_respects_shape():
m = UnitSquareMesh(2, 2)
f_scalar = Function(FunctionSpace(m, "CG", 1))
f_vector = Function(VectorFunctionSpace(m, "CG", 1))
domain = "{[i] : 0 <= i < A.dofs}"
instructions = "A[i, 0] = 1"
par_loop((domain, instructions), dx, {'A': (f_vector, WRITE)})
assert np.allclose(f_vector.dat.data[:, 0], 1.0)
par_loop((domain, instructions), dx, {'A': (f_scalar, WRITE)})
assert np.allclose(f_scalar.dat.data, 1.0)
|
2,642 |
test can mix fixture and positional strategy
|
# This file is part of Hypothesis, which may be found at
# https://github.com/HypothesisWorks/hypothesis/
#
# Copyright the Hypothesis Authors.
# Individual contributors are listed in AUTHORS.rst and the git log.
#
# This Source Code Form is subject to the terms of the Mozilla Public License,
# v. 2.0. If a copy of the MPL was not distributed with this file, You can
# obtain one at https://mozilla.org/MPL/2.0/.
from unittest.mock import Mock, create_autospec
import pytest
from hypothesis import example, given
from hypothesis.strategies import integers
from tests.common.utils import fails
pytest_plugins = "pytester"
@pytest.fixture(scope="session")
def infinity():
return float("inf")
@pytest.fixture(scope="module")
def mock_fixture():
return Mock()
@pytest.fixture(scope="module")
def spec_fixture():
class Foo:
def __init__(self):
pass
def bar(self):
return "baz"
return create_autospec(Foo)
@given(integers())
def METHOD_NAME(infinity, xs):
# Hypothesis fills arguments from the right, so if @given() uses
# positional arguments then any strategies need to be on the right.
assert xs <= infinity
@given(xs=integers())
def test_can_mix_fixture_and_keyword_strategy(xs, infinity):
assert xs <= infinity
@example(xs=0)
@given(xs=integers())
def test_can_mix_fixture_example_and_keyword_strategy(xs, infinity):
assert xs <= infinity
@fails
@given(integers())
def test_can_inject_mock_via_fixture(mock_fixture, xs):
"""A negative test is better for this one - this condition uncovers a bug
whereby the mock fixture is executed instead of the test body and always
succeeds. If this test fails, then we know we've run the test body instead
of the mock.
"""
raise AssertionError
@given(integers())
def test_can_inject_autospecced_mock_via_fixture(spec_fixture, xs):
spec_fixture.bar.return_value = float("inf")
assert xs <= spec_fixture.bar()
TESTSUITE = """
import pytest
from hypothesis import given, strategies as st
@pytest.fixture(scope="function", autouse=True)
def autofix(request):
pass
@given(x=st.integers())
def test_requests_function_scoped_fixture(capsys, x):
pass
@pytest.mark.parametrize("percent", ["%", "%s"])
@given(x=st.integers())
def test_requests_function_scoped_fixture_percent_parametrized(capsys, x, percent):
# See https://github.com/HypothesisWorks/hypothesis/issues/2469
pass
class TestClass:
@given(x=st.integers())
def test_requests_method_scoped_fixture(capsys, x):
pass
@given(x=st.integers())
def test_autouse_function_scoped_fixture(x):
pass
"""
def test_given_plus_function_scoped_non_autouse_fixtures_are_deprecated(testdir):
script = testdir.makepyfile(TESTSUITE)
testdir.runpytest(script).assert_outcomes(passed=1, failed=4)
CONFTEST_SUPPRESS = """
from hypothesis import HealthCheck, settings
settings.register_profile(
"suppress",
suppress_health_check=[HealthCheck.function_scoped_fixture],
)
"""
def test_suppress_fixture_health_check_via_profile(testdir):
script = testdir.makepyfile(TESTSUITE)
testdir.makeconftest(CONFTEST_SUPPRESS)
testdir.runpytest(script).assert_outcomes(passed=1, failed=4)
testdir.runpytest(script, "--hypothesis-profile=suppress").assert_outcomes(passed=5)
TESTSCRIPT_SUPPRESS_FIXTURE = """
import pytest
from hypothesis import HealthCheck, given, settings, strategies as st
@given(x=st.integers())
def test_fails_health_check(capsys, x):
pass
@settings(suppress_health_check=[HealthCheck.function_scoped_fixture])
@given(x=st.integers())
def test_suppresses_health_check(capsys, x):
pass
@given(x=st.integers())
@settings(suppress_health_check=[HealthCheck.function_scoped_fixture])
def test_suppresses_health_check_2(capsys, x):
pass
"""
def test_suppress_health_check_function_scoped_fixture(testdir):
script = testdir.makepyfile(TESTSCRIPT_SUPPRESS_FIXTURE)
testdir.runpytest(script).assert_outcomes(passed=2, failed=1)
TESTSCRIPT_OVERRIDE_FIXTURE = """
import pytest
from hypothesis import given, strategies as st
@pytest.fixture(scope="function", name="event_loop")
def event_loop_1():
return
@pytest.fixture(scope="module", name="event_loop")
def event_loop_2():
return
@given(x=st.integers())
def test_override_fixture(event_loop, x):
pass
"""
def test_given_plus_overridden_fixture(testdir):
script = testdir.makepyfile(TESTSCRIPT_OVERRIDE_FIXTURE)
testdir.runpytest(script, "-Werror").assert_outcomes(passed=1, failed=0)
TESTSCRIPT_FIXTURE_THEN_GIVEN = """
import pytest
from hypothesis import given, strategies as st
@given(x=st.integers())
@pytest.fixture()
def test(x):
pass
"""
def test_given_fails_if_already_decorated_with_fixture(testdir):
script = testdir.makepyfile(TESTSCRIPT_FIXTURE_THEN_GIVEN)
testdir.runpytest(script).assert_outcomes(failed=1)
TESTSCRIPT_GIVEN_THEN_FIXTURE = """
import pytest
from hypothesis import given, strategies as st
@pytest.fixture()
@given(x=st.integers())
def test(x):
pass
"""
def test_fixture_errors_if_already_decorated_with_given(testdir):
script = testdir.makepyfile(TESTSCRIPT_GIVEN_THEN_FIXTURE)
if int(pytest.__version__.split(".")[0]) > 5:
testdir.runpytest(script).assert_outcomes(errors=1)
else:
testdir.runpytest(script).assert_outcomes(error=1)
|
2,643 |
set up
|
"""Tests for certbot.plugins.storage.PluginStorage"""
import json
import sys
from typing import Iterable
from typing import List
from typing import Optional
import unittest
from unittest import mock
import pytest
from certbot import errors
from certbot.compat import filesystem
from certbot.compat import os
from certbot.tests import util as test_util
class PluginStorageTest(test_util.ConfigTestCase):
"""Test for certbot.plugins.storage.PluginStorage"""
def METHOD_NAME(self):
super().METHOD_NAME()
self.plugin_cls = test_util.DummyInstaller
filesystem.mkdir(self.config.config_dir)
with mock.patch("certbot.reverter.util"):
self.plugin = self.plugin_cls(config=self.config, name="mockplugin")
def test_load_errors_cant_read(self):
with open(os.path.join(self.config.config_dir,
".pluginstorage.json"), "w") as fh:
fh.write("dummy")
# When unable to read file that exists
mock_open = mock.mock_open()
mock_open.side_effect = IOError
self.plugin.storage._storagepath = os.path.join(self.config.config_dir,
".pluginstorage.json")
with mock.patch("builtins.open", mock_open):
with mock.patch('certbot.compat.os.path.isfile', return_value=True):
with mock.patch("certbot.reverter.util"):
with pytest.raises(errors.PluginStorageError):
self.plugin.storage._load() # pylint: disable=protected-access
def test_load_errors_empty(self):
with open(os.path.join(self.config.config_dir, ".pluginstorage.json"), "w") as fh:
fh.write('')
with mock.patch("certbot.plugins.storage.logger.debug") as mock_log:
# Should not error out but write a debug log line instead
with mock.patch("certbot.reverter.util"):
nocontent = self.plugin_cls(self.config, "mockplugin")
with pytest.raises(KeyError):
nocontent.storage.fetch("value")
assert mock_log.called
assert "no values loaded" in mock_log.call_args[0][0]
def test_load_errors_corrupted(self):
with open(os.path.join(self.config.config_dir,
".pluginstorage.json"), "w") as fh:
fh.write('invalid json')
with mock.patch("certbot.plugins.storage.logger.error") as mock_log:
with mock.patch("certbot.reverter.util"):
corrupted = self.plugin_cls(self.config, "mockplugin")
with pytest.raises(errors.PluginError):
corrupted.storage.fetch("value")
assert "is corrupted" in mock_log.call_args[0][0]
def test_save_errors_cant_serialize(self):
with mock.patch("certbot.plugins.storage.logger.error") as mock_log:
# Set data as something that can't be serialized
self.plugin.storage._initialized = True # pylint: disable=protected-access
self.plugin.storage._storagepath = "/tmp/whatever"
self.plugin.storage._data = self.plugin_cls # pylint: disable=protected-access
with pytest.raises(errors.PluginStorageError):
self.plugin.storage.save()
assert "Could not serialize" in mock_log.call_args[0][0]
def test_save_errors_unable_to_write_file(self):
mock_open = mock.mock_open()
mock_open.side_effect = IOError
with mock.patch("certbot.compat.filesystem.open", mock_open):
with mock.patch("certbot.plugins.storage.logger.error") as mock_log:
self.plugin.storage._data = {"valid": "data"} # pylint: disable=protected-access
self.plugin.storage._initialized = True # pylint: disable=protected-access
self.plugin.storage._storagepath = "/tmp/whatever"
with pytest.raises(errors.PluginStorageError):
self.plugin.storage.save()
assert "Could not write" in mock_log.call_args[0][0]
def test_save_uninitialized(self):
with mock.patch("certbot.reverter.util"):
with pytest.raises(errors.PluginStorageError):
self.plugin_cls(self.config, "x").storage.save()
def test_namespace_isolation(self):
with mock.patch("certbot.reverter.util"):
plugin1 = self.plugin_cls(self.config, "first")
plugin2 = self.plugin_cls(self.config, "second")
plugin1.storage.put("first_key", "first_value")
with pytest.raises(KeyError):
plugin2.storage.fetch("first_key")
with pytest.raises(KeyError):
plugin2.storage.fetch("first")
assert plugin1.storage.fetch("first_key") == "first_value"
def test_saved_state(self):
self.plugin.storage.put("testkey", "testvalue")
# Write to disk
self.plugin.storage.save()
with mock.patch("certbot.reverter.util"):
another = self.plugin_cls(self.config, "mockplugin")
assert another.storage.fetch("testkey") == "testvalue"
with open(os.path.join(self.config.config_dir,
".pluginstorage.json"), 'r') as fh:
psdata = fh.read()
psjson = json.loads(psdata)
assert "mockplugin" in psjson.keys()
assert len(psjson) == 1
assert psjson["mockplugin"]["testkey"] == "testvalue"
if __name__ == "__main__":
sys.exit(pytest.main(sys.argv[1:] + [__file__])) # pragma: no cover
|
2,644 |
start
|
import asyncio
import json
import socket
import time
from typing import Tuple
from app.utility.base_world import BaseWorld
from plugins.manx.app.c_session import Session
class Contact(BaseWorld):
def __init__(self, services):
self.name = 'tcp'
self.description = 'Accept beacons through a raw TCP socket'
self.log = self.create_logger('contact_tcp')
self.contact_svc = services.get('contact_svc')
self.tcp_handler = TcpSessionHandler(services, self.log)
async def METHOD_NAME(self):
loop = asyncio.get_event_loop()
tcp = self.get_config('app.contact.tcp')
loop.create_task(asyncio.start_server(self.tcp_handler.accept, *tcp.split(':')))
loop.create_task(self.operation_loop())
async def operation_loop(self):
while True:
await self.tcp_handler.refresh()
for session in self.tcp_handler.sessions:
_, instructions = await self.contact_svc.handle_heartbeat(paw=session.paw)
for instruction in instructions:
try:
self.log.debug('TCP instruction: %s' % instruction.id)
status, _, response, agent_reported_time = await self.tcp_handler.send(
session.id,
self.decode_bytes(instruction.command),
timeout=instruction.timeout
)
beacon = dict(paw=session.paw,
results=[dict(id=instruction.id, output=self.encode_string(response), status=status, agent_reported_time=agent_reported_time)])
await self.contact_svc.handle_heartbeat(**beacon)
await asyncio.sleep(instruction.sleep)
except Exception as e:
self.log.debug('[-] operation exception: %s' % e)
await asyncio.sleep(20)
class TcpSessionHandler(BaseWorld):
def __init__(self, services, log):
self.services = services
self.log = log
self.sessions = []
async def refresh(self):
index = 0
while index < len(self.sessions):
session = self.sessions[index]
try:
session.connection.send(str.encode(' '))
except socket.error:
self.log.debug('Error occurred when refreshing session %s. Removing from session pool.', session.id)
del self.sessions[index]
else:
index += 1
async def accept(self, reader, writer):
try:
profile = await self._handshake(reader)
except Exception as e:
self.log.debug('Handshake failed: %s' % e)
return
connection = writer.get_extra_info('socket')
profile['executors'] = [e for e in profile['executors'].split(',') if e]
profile['contact'] = 'tcp'
agent, _ = await self.services.get('contact_svc').handle_heartbeat(**profile)
new_session = Session(id=self.generate_number(size=6), paw=agent.paw, connection=connection)
self.sessions.append(new_session)
await self.send(new_session.id, agent.paw, timeout=5)
async def send(self, session_id: int, cmd: str, timeout: int = 60) -> Tuple[int, str, str, str]:
try:
conn = next(i.connection for i in self.sessions if i.id == int(session_id))
conn.send(str.encode(' '))
conn.send(str.encode('%s\n' % cmd))
response = await self._attempt_connection(session_id, conn, timeout=timeout)
response = json.loads(response)
return response['status'], response['pwd'], response['response'], response.get('agent_reported_time', '')
except Exception as e:
self.log.exception(e)
return 1, '~$ ', str(e), ''
@staticmethod
async def _handshake(reader):
profile_bites = (await reader.readline()).strip()
return json.loads(profile_bites)
async def _attempt_connection(self, session_id, connection, timeout):
buffer = 4096
data = b''
waited_seconds = 0
time.sleep(0.1) # initial wait for fast operations.
while True:
try:
part = connection.recv(buffer)
data += part
if len(part) < buffer:
break
except BlockingIOError as err:
if waited_seconds < timeout:
time.sleep(1)
waited_seconds += 1
else:
self.log.error("Timeout reached for session %s", session_id)
return json.dumps(dict(status=1, pwd='~$ ', response=str(err)))
return str(data, 'utf-8')
|
2,645 |
test gen poll url text
|
# Copyright (c) 2021 elParaguayo
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# Widget specific tests
import sys
from importlib import reload
from types import ModuleType
import pytest
from libqtile.widget import generic_poll_text
class Mockxml(ModuleType):
@classmethod
def parse(cls, value):
return {"test": value}
class MockRequest:
return_value = None
def __init__(self, *args, **kwargs):
pass
class Mockurlopen:
def __init__(self, request):
self.request = request
class headers: # noqa: N801
@classmethod
def get_content_charset(cls):
return "utf-8"
def read(self):
return self.request.return_value
def test_gen_poll_text():
gpt_no_func = generic_poll_text.GenPollText()
assert gpt_no_func.poll() == "You need a poll function"
gpt_with_func = generic_poll_text.GenPollText(func=lambda: "Has function")
assert gpt_with_func.poll() == "Has function"
def test_gen_poll_url_not_configured():
gpurl = generic_poll_text.GenPollUrl()
assert gpurl.poll() == "Invalid config"
def test_gen_poll_url_no_json():
gpurl = generic_poll_text.GenPollUrl(json=False)
assert "Content-Type" not in gpurl.headers
def test_gen_poll_url_headers_and_json():
gpurl = generic_poll_text.GenPollUrl(
headers={"fake-header": "fake-value"},
data={"argument": "data value"},
user_agent="qtile test",
)
assert gpurl.headers["User-agent"] == "qtile test"
assert gpurl.headers["fake-header"] == "fake-value"
assert gpurl.headers["Content-Type"] == "application/json"
assert gpurl.data.decode() == '{"argument": "data value"}'
def METHOD_NAME(monkeypatch):
gpurl = generic_poll_text.GenPollUrl(json=False, parse=lambda x: x, url="testing")
monkeypatch.setattr(generic_poll_text, "Request", MockRequest)
monkeypatch.setattr(generic_poll_text, "urlopen", Mockurlopen)
generic_poll_text.Request.return_value = b"OK"
assert gpurl.poll() == "OK"
def test_gen_poll_url_json(monkeypatch):
gpurl = generic_poll_text.GenPollUrl(parse=lambda x: x, data=[1, 2, 3], url="testing")
monkeypatch.setattr(generic_poll_text, "Request", MockRequest)
monkeypatch.setattr(generic_poll_text, "urlopen", Mockurlopen)
generic_poll_text.Request.return_value = b'{"test": "OK"}'
assert gpurl.poll()["test"] == "OK"
def test_gen_poll_url_xml_no_xmltodict(monkeypatch):
gpurl = generic_poll_text.GenPollUrl(json=False, xml=True, parse=lambda x: x, url="testing")
monkeypatch.setattr(generic_poll_text, "Request", MockRequest)
monkeypatch.setattr(generic_poll_text, "urlopen", Mockurlopen)
generic_poll_text.Request.return_value = b"OK"
with pytest.raises(Exception):
gpurl.poll()
def test_gen_poll_url_xml_has_xmltodict(monkeypatch):
# injected fake xmltodict module but we have to reload the widget module
# as the ImportError test is only run once when the module is loaded.
monkeypatch.setitem(sys.modules, "xmltodict", Mockxml("xmltodict"))
reload(generic_poll_text)
gpurl = generic_poll_text.GenPollUrl(json=False, xml=True, parse=lambda x: x, url="testing")
monkeypatch.setattr(generic_poll_text, "Request", MockRequest)
monkeypatch.setattr(generic_poll_text, "urlopen", Mockurlopen)
generic_poll_text.Request.return_value = b"OK"
assert gpurl.poll()["test"] == "OK"
def test_gen_poll_url_broken_parse(monkeypatch):
gpurl = generic_poll_text.GenPollUrl(json=False, parse=lambda x: x.foo, url="testing")
monkeypatch.setattr(generic_poll_text, "Request", MockRequest)
monkeypatch.setattr(generic_poll_text, "urlopen", Mockurlopen)
generic_poll_text.Request.return_value = b"OK"
assert gpurl.poll() == "Can't parse"
|
2,646 |
test bad data4
|
from unittest import mock # noqa
import pytest
from awx.api.versioning import reverse
"""
def run_test_ad_hoc_command(self, **kwargs):
# Post to list to start a new ad hoc command.
expect = kwargs.pop('expect', 201)
url = kwargs.pop('url', reverse('api:ad_hoc_command_list'))
data = {
'inventory': self.inventory.pk,
'credential': self.credential.pk,
'module_name': 'command',
'module_args': 'uptime',
}
data.update(kwargs)
for k,v in data.items():
if v is None:
del data[k]
return self.post(url, data, expect=expect)
"""
@pytest.fixture
def post_adhoc(post, inventory, machine_credential):
def f(url, data, user, expect=201):
if not url:
url = reverse('api:ad_hoc_command_list')
if 'module_name' not in data:
data['module_name'] = 'command'
if 'module_args' not in data:
data['module_args'] = 'uptime'
if 'inventory' not in data:
data['inventory'] = inventory.id
if 'credential' not in data:
data['credential'] = machine_credential.id
for k, v in list(data.items()):
if v is None:
del data[k]
return post(url, data, user, expect=expect)
return f
@pytest.mark.django_db
def test_admin_post_ad_hoc_command_list(admin, post_adhoc, inventory, machine_credential):
res = post_adhoc(reverse('api:ad_hoc_command_list'), {}, admin, expect=201)
assert res.data['job_type'] == 'run'
assert res.data['inventory'], inventory.id
assert res.data['credential'] == machine_credential.id
assert res.data['module_name'] == 'command'
assert res.data['module_args'] == 'uptime'
assert res.data['limit'] == ''
assert res.data['forks'] == 0
assert res.data['verbosity'] == 0
assert res.data['become_enabled'] is False
@pytest.mark.django_db
def test_empty_post_403(admin, post):
post(reverse('api:ad_hoc_command_list'), {}, admin, expect=400)
@pytest.mark.django_db
def test_empty_put_405(admin, put):
put(reverse('api:ad_hoc_command_list'), {}, admin, expect=405)
@pytest.mark.django_db
def test_empty_patch_405(admin, patch):
patch(reverse('api:ad_hoc_command_list'), {}, admin, expect=405)
@pytest.mark.django_db
def test_empty_delete_405(admin, delete):
delete(reverse('api:ad_hoc_command_list'), admin, expect=405)
@pytest.mark.django_db
def test_user_post_ad_hoc_command_list(alice, post_adhoc, inventory, machine_credential):
inventory.adhoc_role.members.add(alice)
machine_credential.use_role.members.add(alice)
post_adhoc(reverse('api:ad_hoc_command_list'), {}, alice, expect=201)
@pytest.mark.django_db
def test_user_post_ad_hoc_command_list_xfail(alice, post_adhoc, inventory, machine_credential):
inventory.read_role.members.add(alice) # just read access? no dice.
machine_credential.use_role.members.add(alice)
post_adhoc(reverse('api:ad_hoc_command_list'), {}, alice, expect=403)
@pytest.mark.django_db
def test_user_post_ad_hoc_command_list_without_creds(alice, post_adhoc, inventory, machine_credential):
inventory.adhoc_role.members.add(alice)
post_adhoc(reverse('api:ad_hoc_command_list'), {}, alice, expect=403)
@pytest.mark.django_db
def test_user_post_ad_hoc_command_list_without_inventory(alice, post_adhoc, inventory, machine_credential):
machine_credential.use_role.members.add(alice)
post_adhoc(reverse('api:ad_hoc_command_list'), {}, alice, expect=403)
@pytest.mark.django_db
def test_admin_post_inventory_ad_hoc_command_list(admin, post_adhoc, inventory):
post_adhoc(reverse('api:inventory_ad_hoc_commands_list', kwargs={'pk': inventory.id}), {'inventory': None}, admin, expect=201)
post_adhoc(reverse('api:inventory_ad_hoc_commands_list', kwargs={'pk': inventory.id}), {}, admin, expect=201)
@pytest.mark.django_db
def test_get_inventory_ad_hoc_command_list(admin, alice, post_adhoc, get, inventory_factory, machine_credential):
inv1 = inventory_factory('inv1')
inv2 = inventory_factory('inv2')
post_adhoc(reverse('api:ad_hoc_command_list'), {'inventory': inv1.id}, admin, expect=201)
post_adhoc(reverse('api:ad_hoc_command_list'), {'inventory': inv2.id}, admin, expect=201)
res = get(reverse('api:ad_hoc_command_list'), admin, expect=200)
assert res.data['count'] == 2
res = get(reverse('api:inventory_ad_hoc_commands_list', kwargs={'pk': inv1.id}), admin, expect=200)
assert res.data['count'] == 1
res = get(reverse('api:inventory_ad_hoc_commands_list', kwargs={'pk': inv2.id}), admin, expect=200)
assert res.data['count'] == 1
inv1.adhoc_role.members.add(alice)
res = get(reverse('api:inventory_ad_hoc_commands_list', kwargs={'pk': inv1.id}), alice, expect=200)
assert res.data['count'] == 1
machine_credential.use_role.members.add(alice)
res = get(reverse('api:inventory_ad_hoc_commands_list', kwargs={'pk': inv1.id}), alice, expect=200)
assert res.data['count'] == 1
res = get(reverse('api:inventory_ad_hoc_commands_list', kwargs={'pk': inv2.id}), alice, expect=403)
@pytest.mark.django_db
def test_bad_data1(admin, post_adhoc):
post_adhoc(reverse('api:ad_hoc_command_list'), {'module_name': 'command', 'module_args': None}, admin, expect=400)
@pytest.mark.django_db
def test_bad_data2(admin, post_adhoc):
post_adhoc(reverse('api:ad_hoc_command_list'), {'job_type': 'baddata'}, admin, expect=400)
@pytest.mark.django_db
def test_bad_data3(admin, post_adhoc):
post_adhoc(reverse('api:ad_hoc_command_list'), {'verbosity': -1}, admin, expect=400)
@pytest.mark.django_db
def METHOD_NAME(admin, post_adhoc):
post_adhoc(reverse('api:ad_hoc_command_list'), {'forks': -1}, admin, expect=400)
|
2,647 |
build allocate ip data
|
#!/usr/bin/env python3
"""
Copyright 2020 The Magma Authors.
This source code is licensed under the BSD-style license found in the
LICENSE file in the root directory of this source tree.
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import argparse
import ipaddress
import json
from typing import List
from google.protobuf import json_format
from load_tests.common import (
PROTO_DIR,
benchmark_grpc_request,
make_full_request_type,
make_output_file_path,
)
from lte.protos.apn_pb2 import APNConfiguration
from lte.protos.mobilityd_pb2 import (
AllocateIPRequest,
IPBlock,
ReleaseIPRequest,
RemoveIPBlockRequest,
)
from lte.protos.mobilityd_pb2_grpc import MobilityServiceStub
from lte.protos.subscriberdb_pb2 import (
Non3GPPUserProfile,
SubscriberData,
SubscriberID,
)
from lte.protos.subscriberdb_pb2_grpc import SubscriberDBStub
from magma.common.service_registry import ServiceRegistry
from magma.subscriberdb.sid import SIDUtils
from orc8r.protos.common_pb2 import Void
TEST_APN = 'magma.ipv4'
MOBILITYD_SERVICE_NAME = 'mobilityd'
MOBILITYD_SERVICE_RPC_PATH = 'magma.lte.MobilityService'
SUBSCRIBERDB_SERVICE_NAME = 'subscriberdb'
MOBILITYD_PORT = '0.0.0.0:60051'
PROTO_PATH = PROTO_DIR + '/mobilityd.proto'
# Helper functions to build input data for gRPC functions
def _load_subs(num_subs: int) -> List[SubscriberID]:
client = SubscriberDBStub(
ServiceRegistry.get_rpc_channel(
SUBSCRIBERDB_SERVICE_NAME, ServiceRegistry.LOCAL,
),
)
sids = []
for i in range(1, num_subs):
sid = SubscriberID(id=str(i).zfill(15))
config = Non3GPPUserProfile(
apn_config=[APNConfiguration(service_selection=TEST_APN)],
)
data = SubscriberData(sid=sid, non_3gpp=config)
client.AddSubscriber(data)
sids.append(sid)
return sids
def _cleanup_subs():
client = SubscriberDBStub(
ServiceRegistry.get_rpc_channel(
SUBSCRIBERDB_SERVICE_NAME, ServiceRegistry.LOCAL,
),
)
for sid in client.ListSubscribers(Void()).sids:
client.DeleteSubscriber(SIDUtils.to_pb('IMSI%s' % sid.id))
def METHOD_NAME(num_subs: int, input_file: str):
active_sids = _load_subs(num_subs)
allocate_ip_reqs = []
for sid in active_sids:
ip_req = AllocateIPRequest(
sid=sid, version=AllocateIPRequest.IPV4,
apn=TEST_APN,
) # hardcoding APN
ip_req_dict = json_format.MessageToDict(ip_req)
# Dumping AllocateIP request into json
allocate_ip_reqs.append(ip_req_dict)
with open(input_file, 'w') as file:
json.dump(allocate_ip_reqs, file, separators=(',', ':'))
def _setup_ip_block(client):
ip_blocks_rsp = client.ListAddedIPv4Blocks(Void())
remove_blocks_req = RemoveIPBlockRequest(force=True)
for block in ip_blocks_rsp.ip_block_list:
remove_blocks_req.ip_blocks.append(block)
client.RemoveIPBlock(remove_blocks_req)
test_ip_block = '192.168.128.0/20'
ip_block = ipaddress.ip_network(test_ip_block)
client.AddIPBlock(
IPBlock(
version=IPBlock.IPV4,
net_address=ip_block.network_address.packed,
prefix_len=ip_block.prefixlen,
),
)
def _build_release_ip_data(client: MobilityServiceStub, input_file: str):
release_ip_reqs = []
table = client.GetSubscriberIPTable(Void())
if not table.entries:
print('No IPs allocated to be freed, please run allocate test first')
exit(1)
for entry in table.entries:
release_ip_req = ReleaseIPRequest(
sid=entry.sid, ip=entry.ip,
apn=entry.apn,
)
release_ip_dict = json_format.MessageToDict(release_ip_req)
# Dumping ReleaseIP request into json
release_ip_reqs.append(release_ip_dict)
with open(input_file, 'w') as file:
json.dump(release_ip_reqs, file)
def create_parser():
"""
Creates the argparse parser with all the arguments.
"""
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
)
# Add subcommands
subparsers = parser.add_subparsers(title="subcommands", dest="cmd")
parser_allocate = subparsers.add_parser(
"allocate",
help="Allocate IP load test",
)
parser_release = subparsers.add_parser(
"release",
help="Release IP load test",
)
# Add arguments
for cmd in [
parser_allocate,
parser_release,
]:
cmd.add_argument("--num", default=2000, help="Number of requests")
cmd.add_argument("--import_path", help="Protobuf dir import path")
# Add function callbacks
parser_allocate.set_defaults(func=parser_allocate)
parser_release.set_defaults(func=parser_release)
return parser
def main():
parser = create_parser()
# Parse the args
args = parser.parse_args()
if not args.cmd:
parser.print_usage()
exit(1)
client = MobilityServiceStub(
ServiceRegistry.get_rpc_channel(
MOBILITYD_SERVICE_NAME,
ServiceRegistry.LOCAL,
),
)
if args.cmd == 'allocate':
_cleanup_subs()
_setup_ip_block(client)
input_file = 'allocate_data.json'
request_type = 'AllocateIPAddress'
METHOD_NAME(args.num, input_file)
benchmark_grpc_request(
proto_path=PROTO_PATH,
full_request_type=make_full_request_type(
MOBILITYD_SERVICE_RPC_PATH, request_type,
),
input_file=input_file,
output_file=make_output_file_path(request_type),
num_reqs=args.num,
address=MOBILITYD_PORT,
import_path=args.import_path,
)
_cleanup_subs()
elif args.cmd == 'release':
input_file = 'release_data.json'
request_type = 'ReleaseIPAddress'
_build_release_ip_data(client, input_file)
benchmark_grpc_request(
proto_path=PROTO_PATH,
full_request_type=make_full_request_type(
MOBILITYD_SERVICE_RPC_PATH, request_type,
),
input_file=input_file,
output_file=make_output_file_path(request_type),
num_reqs=args.num,
address=MOBILITYD_PORT,
import_path=args.import_path,
)
if __name__ == "__main__":
main()
|
2,648 |
report
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
Teachers that wrap around other teachers, for instance, to modify message fields while
keeping the examples/episodes the same.
This is useful when working with agents that expect examples to be in a certain format,
for instance a classifier that classifies the "text" field of a message. The meta-
teachers in this module can be used to avoid writing several different nearly identical
variants of different teachers: for instance, if you want to flatten examples and strip
away all but the previous utterance in the 'text' field for several different teachers,
it would be much easier to do so with one teacher in this module than with a brand new
teacher for each of the original teachers.
"""
from typing import Optional
from parlai.core.params import ParlaiParser
import copy
from abc import ABC
from parlai.core.agents import create_agent_from_shared
from parlai.core.message import Message
from parlai.core.opt import Opt
from parlai.core.teachers import (
create_task_agent_from_taskname,
FixedDialogTeacher,
Teacher,
)
from parlai.utils.misc import warn_once
class AbstractWrapperTeacher(Teacher, ABC):
"""
Abstract teacher that wraps around another teacher.
This teacher allows for manipulating the fields returned by the inner teacher, in
the abstract self._edit_action() method that is called during self.act(). The inner
teacher must subclass FixedDialogTeacher in order to make use of that teacher's
.get_orig_action() and .process_action() methods.
"""
@classmethod
def add_cmdline_args(
cls, parser: ParlaiParser, partial_opt: Optional[Opt] = None
) -> ParlaiParser:
super().add_cmdline_args(parser, partial_opt)
agent = parser.add_argument_group('AbstractWrapper args')
agent.add_argument(
'-wt',
'--wrapper-task',
type=str,
help='The task whose fields will be manipulated.',
)
try:
parser.add_task_args(partial_opt['wrapper_task'], partial_opt)
except KeyError:
warn_once(
'The task name cannot be parsed from command-line arguments! '
'Task-specific flags will not be added.'
)
return parser
def __init__(self, opt: Opt, shared=None):
if ',' in opt['task']:
raise ValueError(
'AbstractWrapperTeacher cannot be used with multiple tasks!'
)
self.id = opt['task']
self.opt = opt
if shared:
self.task = create_agent_from_shared(shared['task'])
else:
opt_singletask = copy.deepcopy(opt)
opt_singletask['task'] = opt['wrapper_task']
self.task = create_task_agent_from_taskname(opt_singletask)[0]
assert isinstance(self.task, FixedDialogTeacher)
def act(self):
"""
Act on the previous observation.
Normally, the inner teacher would call .get_orig_action() and .process_action();
here, we insert an ._edit_action() method in between these two methods in order
to allow for arbitrary manipulation of the action before it is registered and
processed further by the inner teacher.
"""
orig_action = self.task.get_orig_action()
edited_action = self._edit_action(orig_action)
processed_action = self.task.process_action(edited_action)
return processed_action
def _edit_action(self, act: Message) -> Message:
"""
Edit and return the input action.
The input action typically comes from the inner teacher's .get_orig_action()
method.
"""
raise NotImplementedError(
'Abstract class: user must implement the _edit_action() method'
)
def num_examples(self):
"""
Return the number of examples.
"""
return self.task.num_examples()
def num_episodes(self):
"""
Return the number of episodes.
Because the dataset is flattened, there will be one episode per example.
"""
return self.task.num_examples()
def observe(self, observation):
"""
Make an observation.
"""
return self.task.observe(observation)
def epoch_done(self):
"""
Return whether the subtask is completed.
"""
return self.task.epoch_done()
def METHOD_NAME(self):
"""
Report metrics for the subtask.
"""
return self.task.METHOD_NAME()
def reset(self):
"""
Reset the subtask.
"""
self.task.reset()
def reset_metrics(self):
"""
Reset metrics for the subtask.
"""
self.task.reset_metrics()
def save(self):
"""
Save the subtask.
"""
self.task.save()
def share(self):
"""
Share the subtask.
"""
shared = {}
shared['class'] = type(self)
shared['opt'] = self.opt
shared['task'] = self.task.share()
return shared
class LabelToTextTeacher(AbstractWrapperTeacher):
"""
Teacher that will shift message['labels'][0] into message['text'] for whatever task
is specified with --wrapper-task.
Because the dialogue history is effectively overwritten by this action, all episodes
will be flattened into one example each.
"""
def __init__(self, opt: Opt, shared=None):
super().__init__(opt, shared)
def _edit_action(self, act: Message) -> Message:
"""
Edit the fields of the action manually.
"""
if 'labels' in act:
labels = act['labels']
if len(labels) != 1:
raise ValueError(
f'{type(self).__name__} can only be used with one label!'
)
act.force_set('text', labels[0])
act.force_set('labels', [''])
else:
assert 'text' not in act and act['episode_done'] is True
act.force_set('episode_done', True) # Clear the dialogue history
return act
|
2,649 |
user data
|
import jwt
from social_core.backends.oauth import BaseOAuth2
class KeycloakOAuth2(BaseOAuth2): # pylint: disable=abstract-method
"""Keycloak OAuth2 authentication backend
This backend has been tested working with a standard Keycloak installation,
but you might have to specialize it and tune the parameters per your
configuration.
This setup specializes the OAuth2 backend which, strictly speaking, offers
authorization without authentication capabilities.
Keycloak does offer a full OpenID Connect implementation, but the
implementation is rather labor intensive to implement.
This backend is configured to get an access token instead, and assume that
the access token contains the necessary user details for authentication.
The integrity of the authentication process is followed by public key
verification for the `access_token` along with OpenID Connect specification
`aud` field checking.
To set up, please take the following steps:
1. Create a new Keycloak client in the Clients section:
a. Choose the `Client ID` in the `General Settings` pane.
b. Select `Client authentication` and `Authorization` in the
`Capability config` pane.
2. Configure the following parameters in the Client setup:
Settings >
Client ID (copy to settings as `KEY` value)
Credentials >
Client Authenticator >
Use `Client Id and Secret` and copy the `Client secret` value
to settings as `SECRET` value
3. For the tokens to work with the JWT setup the following configuration has
to be made in Keycloak:
Advanced >
Fine grain OpenID Connect configuration >
User Info Signed Response Algorithm >
RS256
Advanced >
Fine grain OpenID Connect configuration >
Request Object Signature Algorithm >
RS256
4. Re-enable the audience (see https://issues.redhat.com/browse/KEYCLOAK-6638
for context):
Go to Client scopes > YOUR-CLIENT-ID-dedicated > Add mapper > Audience, pick
a name for the mapper and select the Client ID corresponding to your client
in `Included Client Audience`.
5. Get the public key (copy to settings as `PUBLIC_KEY` value) to be used
with the backend:
Realm Settings > Keys > Public key
6. Configure access token fields are configured via the Keycloak Client
mappers:
Clients > Client ID > Mappers
They have to include at least the `ID_KEY` value and the dictionary keys
defined in the `get_user_details` method.
7. Configure your web backend. Example setting values for Django settings
could be:
SOCIAL_AUTH_KEYCLOAK_KEY = 'example'
SOCIAL_AUTH_KEYCLOAK_SECRET = '1234abcd-1234-abcd-1234-abcd1234adcd'
SOCIAL_AUTH_KEYCLOAK_PUBLIC_KEY = \
'pempublickeythatis2048bitsinbase64andhaseg392characters'
SOCIAL_AUTH_KEYCLOAK_AUTHORIZATION_URL = \
'https://sso.com/auth/realms/example/protocol/openid-connect/auth'
SOCIAL_AUTH_KEYCLOAK_ACCESS_TOKEN_URL = \
'https://sso.com/auth/realms/example/protocol/openid-connect/token'
8. The default behaviour is to associate users via username field, but you
can change the key with e.g.
SOCIAL_AUTH_KEYCLOAK_ID_KEY = 'email'
Please make sure your Keycloak user database and Django user database do not
conflict and that there is no risk of user account hijacking by false
account association.
"""
name = "keycloak"
ACCESS_TOKEN_METHOD = "POST"
REDIRECT_STATE = False
def authorization_url(self):
return self.setting("AUTHORIZATION_URL")
def access_token_url(self):
return self.setting("ACCESS_TOKEN_URL")
def audience(self):
return self.setting("KEY")
def algorithm(self):
return self.setting("ALGORITHM", default="RS256")
def public_key(self):
return "\n".join(
[
"-----BEGIN PUBLIC KEY-----",
self.setting("PUBLIC_KEY"),
"-----END PUBLIC KEY-----",
]
)
def id_key(self):
return self.setting("ID_KEY", default="username")
def METHOD_NAME(
self, access_token, *args, **kwargs
): # pylint: disable=unused-argument
"""Decode user data from the access_token
You can specialize this method to e.g. get information
from the Keycloak backend if you do not want to include
the user information in the access_token.
"""
return jwt.decode(
access_token,
key=self.public_key(),
algorithms=self.algorithm(),
audience=self.audience(),
)
def get_user_details(self, response):
"""Map fields in user_data into Django User fields"""
return {
"username": response.get("preferred_username"),
"email": response.get("email"),
"fullname": response.get("name"),
"first_name": response.get("given_name"),
"last_name": response.get("family_name"),
}
def get_user_id(self, details, response):
"""Get and associate Django User by the field indicated by ID_KEY
The ID_KEY can be any field in the user details or the access token.
"""
id_key = self.id_key()
if id_key in details:
return details[id_key]
return response.get(id_key)
|
2,650 |
is buffer empty
|
import logging
from typing import Union, Dict
import numpy as np
from .abstract_recorder import *
class VideoRecorder(Recorder):
"""
Writes video streams (.mjpeg/.h264/.hevc) or directly to mp4/avi container.
"""
def __init__(self, lossless: bool = False):
self.path = None
self._stream_type = dict()
self._writers = dict()
self._closed = False
self._lossless = lossless
def __getitem__(self, item):
return self._writers[item]
# TODO device is not used
def update(self, path: Path, device: dai.Device, xouts: List['XoutFrames']):
"""
Update the recorder with new streams.
Args:
path: Path to save the output. Either a folder or a file.
device: Device to get the streams from.
xouts: List of output streams.
"""
if path is None:
return
self.path = path
if path.suffix == '' and path != Path('.'): # If no extension, create a folder
self.path.mkdir(parents=True, exist_ok=True)
for xout in xouts:
# for example, 'color_bitstream' (encoded) or 'color_video' (unencoded),
# if component was created with name='color'
xout_name = xout.name # for example, 'color' --> file is color.mp4 (encoded) or color.avi (unencoded)
file_name = xout_name
if file_name.startswith('CameraBoardSocket.'):
file_name = file_name[len('CameraBoardSocket.'):]
stream = OakStream(xout)
fourcc = stream.fourcc() # TODO add default fourcc? stream.fourcc() can be None.
print(fourcc, xout_name, stream.type)
if stream.is_raw() or stream.is_depth():
from .video_writers.video_writer import VideoWriter
self._writers[xout_name] = VideoWriter(self.path, file_name, self._lossless)
else:
try:
from .video_writers.av_writer import AvWriter
self._writers[xout_name] = AvWriter(self.path, file_name, fourcc)
except Exception as e:
# TODO here can be other errors, not only import error
logging.warning(f'Exception while creating AvWriter: {e}.'
'\nFalling back to FileWriter, saving uncontainerized encoded streams.')
from .video_writers.file_writer import FileWriter
self._writers[xout_name] = FileWriter(self.path, file_name, fourcc)
def create_files_for_buffer(self, subfolder: str, buf_name: str):
for _, writer in self._writers.items():
writer.create_file_for_buffer(subfolder, buf_name)
def create_file_for_buffer(self, wr_name: str, subfolder: str, buf_name: str):
self._writers[wr_name].create_file_for_buffer(subfolder, buf_name)
def create_file(self, wr_name: str, subfolder: str, frame: Union[np.ndarray, dai.ImgFrame]):
self._writers[wr_name].create_file(subfolder, frame)
def init_buffers(self, buffers: Dict[str, int]):
for _, writer in self._writers.items():
for name, max_seconds in buffers.items():
writer.init_buffer(name, max_seconds)
def add_to_buffers(self, buf_name: str, frames: Dict[str, Union[np.ndarray, dai.ImgFrame]]):
for name, writer in self._writers.items():
writer.add_to_buffer(buf_name, frames[name])
def add_to_buffer(self, wr_name: str, buf_name: str, frame: Union[np.ndarray, dai.ImgFrame]):
self._writers[wr_name].add_to_buffer(buf_name, frame)
def is_buffer_full(self, wr_name: str, buf_name: str):
return self._writers[wr_name].is_buffer_full(buf_name)
def METHOD_NAME(self, wr_name: str, buf_name: str):
return self._writers[wr_name].METHOD_NAME(buf_name)
def write_from_buffer(self, wr_name: str, buf_name: str, n_elems: int):
self._writers[wr_name].write_from_buffer(buf_name, n_elems)
def write(self, name: str, frame: Union[np.ndarray, dai.ImgFrame]):
self._writers[name].write(frame)
def close_files(self):
for _, writer in self._writers.items():
writer.close()
def close(self):
if self._closed:
return
self._closed = True
logging.info(f'Video Recorder saved stream(s) to folder: {str(self.path)}')
# Close opened files
for name, writer in self._writers.items():
writer.close()
|
2,651 |
load state dict
|
# Copyright (c) MONAI Consortium
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import annotations
from collections import defaultdict
from collections.abc import Callable, Mapping, Sequence
from enum import Enum
from threading import RLock
from typing import TYPE_CHECKING, Any
from monai.config import IgniteInfo
from monai.utils import min_version, optional_import
from monai.utils.enums import CommonKeys
Events, _ = optional_import("ignite.engine", IgniteInfo.OPT_IMPORT_VERSION, min_version, "Events")
if TYPE_CHECKING:
from ignite.engine import Engine
else:
Engine, _ = optional_import(
"ignite.engine", IgniteInfo.OPT_IMPORT_VERSION, min_version, "Engine", as_type="decorator"
)
def _get_loss_from_output(output: Sequence[Mapping[str, Any]], loss_key: str = CommonKeys.LOSS) -> Any:
return output[0][loss_key]
class MetricLoggerKeys(Enum):
METRICS = "Metrics"
LOSS = "Loss"
class MetricLogger:
"""
Collect per-iteration metrics and loss value from the attached trainer. This will also collect metric values from
a given evaluator object which is expected to perform evaluation at the end of training epochs. This class is
useful for collecting loss and metric values in one place for storage with checkpoint savers (`state_dict` and
`load_state_dict` methods provided as expected by Pytorch and Ignite) and for graphing during training.
Example::
# construct an evaluator saving mean dice metric values in the key "val_mean_dice"
evaluator = SupervisedEvaluator(..., key_val_metric={"val_mean_dice": MeanDice(...)})
# construct the logger and associate with evaluator to extract metric values from
logger = MetricLogger(evaluator=evaluator)
# construct the trainer with the logger passed in as a handler so that it logs loss values
trainer = SupervisedTrainer(..., train_handlers=[logger, ValidationHandler(1, evaluator)])
# run training, logger.loss will be a list of (iteration, loss) values, logger.metrics a dict with key
# "val_mean_dice" storing a list of (iteration, metric) values
trainer.run()
Args:
loss_transform: Converts the `output` value from the trainer's state into a loss value
`engine.state` and `loss_transform` inherit from the ignite concept:
https://pytorch.org/ignite/concepts.html#state, explanation and usage example are in the tutorial:
https://github.com/Project-MONAI/tutorials/blob/master/modules/batch_output_transform.ipynb.
metric_transform: Converts the metric value coming from the trainer/evaluator's state into a storable value
evaluator: Optional evaluator to consume metric results from at the end of its evaluation run
"""
def __init__(
self,
loss_transform: Callable = _get_loss_from_output,
metric_transform: Callable = lambda x: x,
evaluator: Engine | None = None,
) -> None:
self.loss_transform = loss_transform
self.metric_transform = metric_transform
self.loss: list = []
self.metrics: defaultdict = defaultdict(list)
self.iteration = 0
self.lock = RLock()
if evaluator is not None:
self.attach_evaluator(evaluator)
def attach(self, engine: Engine) -> None:
"""
Args:
engine: Ignite Engine, it can be a trainer, validator or evaluator.
"""
engine.add_event_handler(Events.ITERATION_COMPLETED, self)
def attach_evaluator(self, evaluator: Engine) -> None:
"""
Attach event handlers to the given evaluator to log metric values from it.
Args:
evaluator: Ignite Engine implementing network evaluation
"""
evaluator.add_event_handler(Events.COMPLETED, self.log_metrics)
def __call__(self, engine: Engine) -> None:
"""
Args:
engine: Ignite Engine, it can be a trainer, validator or evaluator.
"""
with self.lock:
self.iteration = engine.state.iteration
lossval = self.loss_transform(engine.state.output)
self.loss.append((self.iteration, lossval))
self.log_metrics(engine)
def log_metrics(self, engine: Engine) -> None:
"""
Log metrics from the given Engine's state member.
Args:
engine: Ignite Engine to log from
"""
with self.lock:
for m, v in engine.state.metrics.items():
v = self.metric_transform(v)
self.metrics[m].append((self.iteration, v))
def state_dict(self):
return {MetricLoggerKeys.LOSS: self.loss, MetricLoggerKeys.METRICS: self.metrics}
def METHOD_NAME(self, state_dict):
self.loss[:] = state_dict[MetricLoggerKeys.LOSS]
self.metrics.clear()
self.metrics.update(state_dict[MetricLoggerKeys.METRICS])
metriclogger = MetricLogger
|
2,652 |
test configure crc ranges
|
import array
from tests.QtTestCase import QtTestCase
from urh.controller.widgets.ChecksumWidget import ChecksumWidget
from urh.controller.dialogs.ProtocolLabelDialog import ProtocolLabelDialog
from urh.signalprocessing.ChecksumLabel import ChecksumLabel
from urh.signalprocessing.FieldType import FieldType
from urh.signalprocessing.Message import Message
from urh.signalprocessing.MessageType import MessageType
from urh.util import util
from urh.util.GenericCRC import GenericCRC
from urh.util.WSPChecksum import WSPChecksum
class TestChecksumWidget(QtTestCase):
def METHOD_NAME(self):
checksum_label = ChecksumLabel("checksum_label", 50, 100, 0, FieldType("crc", FieldType.Function.CHECKSUM))
crc_widget_controller = ChecksumWidget(checksum_label, Message([0] * 100, 0, MessageType("test")), 0)
model = crc_widget_controller.data_range_table_model
self.assertEqual(model.data(model.index(0, 0)), 1)
self.assertEqual(model.data(model.index(0, 1)), 50)
self.assertEqual(model.rowCount(), 1)
crc_widget_controller.ui.btnAddRange.click()
self.assertEqual(model.rowCount(), 2)
crc_widget_controller.ui.btnAddRange.click()
self.assertEqual(model.rowCount(), 3)
crc_widget_controller.ui.btnRemoveRange.click()
self.assertEqual(model.rowCount(), 2)
crc_widget_controller.ui.btnRemoveRange.click()
self.assertEqual(model.rowCount(), 1)
crc_widget_controller.ui.btnRemoveRange.click()
self.assertEqual(model.rowCount(), 1)
def test_configure_crc_parameters(self):
crc_label = ChecksumLabel("crc_label", 25, 120, 0, FieldType("crc", FieldType.Function.CHECKSUM))
crc_widget_controller = ChecksumWidget(crc_label, Message([0] * 150, 0, MessageType("test")), 0)
crc = GenericCRC(polynomial=list(GenericCRC.DEFAULT_POLYNOMIALS.keys())[0])
self.assertEqual(crc_widget_controller.ui.lineEditCRCPolynomial.text(), crc.polynomial_as_hex_str)
self.assertEqual(crc_widget_controller.ui.lineEditStartValue.text(), util.bit2hex(crc.start_value))
self.assertEqual(crc_widget_controller.ui.lineEditFinalXOR.text(), util.bit2hex(crc.final_xor))
crc_widget_controller.ui.comboBoxCRCFunction.setCurrentIndex(2)
crc.polynomial = crc.choose_polynomial(2)
self.assertEqual(crc_widget_controller.ui.lineEditCRCPolynomial.text(), crc.polynomial_as_hex_str)
crc_widget_controller.ui.lineEditCRCPolynomial.setText("abcde")
crc_widget_controller.ui.lineEditCRCPolynomial.editingFinished.emit()
self.assertEqual(crc_label.checksum.polynomial, array.array("B", [1]) + util.hex2bit("abcde"))
crc_widget_controller.ui.lineEditStartValue.setText("12345")
crc_widget_controller.ui.lineEditStartValue.editingFinished.emit()
self.assertEqual(util.bit2hex(crc_label.checksum.start_value), "12345")
crc_widget_controller.ui.lineEditFinalXOR.setText("cccaa")
crc_widget_controller.ui.lineEditFinalXOR.editingFinished.emit()
self.assertEqual(util.bit2hex(crc_label.checksum.final_xor), "cccaa")
def test_default_crcs(self):
crc_label = ChecksumLabel("crc_label", 25, 120, 0, FieldType("crc", FieldType.Function.CHECKSUM))
crc_widget_controller = ChecksumWidget(crc_label, Message([0] * 150, 0, MessageType("test")), 0)
default_crc_polynomials = GenericCRC.DEFAULT_POLYNOMIALS
special_crcs = ChecksumWidget.SPECIAL_CRCS
self.assertEqual(len(default_crc_polynomials) + len(special_crcs),
crc_widget_controller.ui.comboBoxCRCFunction.count())
for i, default_polynomial_name in enumerate(default_crc_polynomials):
self.assertEqual(default_polynomial_name, crc_widget_controller.ui.comboBoxCRCFunction.itemText(i))
for i, special_crc in enumerate(special_crcs):
self.assertEqual(special_crc, crc_widget_controller.ui.comboBoxCRCFunction.itemText(i+len(default_crc_polynomials)))
crc_widget_controller.ui.comboBoxCRCFunction.setCurrentIndex(1)
self.assertNotEqual(crc_widget_controller.ui.comboBoxCRCFunction.currentText(), "8_standard")
crc_widget_controller.ui.comboBoxCRCFunction.setCurrentText("8_standard")
self.assertEqual(crc_widget_controller.ui.comboBoxCRCFunction.currentText(), "8_standard")
self.assertEqual(crc_widget_controller.ui.lineEditCRCPolynomial.text(), "d5")
def test_crc_widget_in_protocol_label_dialog(self):
mt = MessageType("test")
mt.append(ChecksumLabel("test_crc", 8, 16, 0, FieldType("test_crc", FieldType.Function.CHECKSUM)))
self.dialog = ProtocolLabelDialog(Message([0] * 100, 0, mt), 0)
self.assertEqual(self.dialog.ui.tabWidgetAdvancedSettings.count(), 1)
self.assertEqual(self.dialog.ui.tabWidgetAdvancedSettings.tabText(0), "test_crc")
def test_enocean_checksum(self):
checksum_label = ChecksumLabel("checksum_label", 50, 100, 0, FieldType("crc", FieldType.Function.CHECKSUM))
crc_widget_controller = ChecksumWidget(checksum_label, Message([0] * 100, 0, MessageType("test")), 0)
crc_widget_controller.ui.comboBoxCategory.setCurrentIndex(1)
self.assertEqual(crc_widget_controller.ui.stackedWidget.currentWidget(), crc_widget_controller.ui.page_wsp)
self.assertTrue(crc_widget_controller.ui.radioButtonWSPAuto.isChecked())
crc_widget_controller.ui.radioButtonWSPChecksum8.click()
self.assertEqual(checksum_label.checksum.mode, WSPChecksum.ChecksumMode.checksum8)
|
2,653 |
data arrays to quantities
|
from typing import Hashable, Mapping, MutableMapping, Set
import cftime
import pace.util
import fv3gfs.wrapper
import fv3gfs.wrapper._properties
import numpy as np
import xarray as xr
from runtime.names import DELP, PHYSICS_PRECIP_RATE, TIME_KEYS
from runtime.types import State
from toolz import dissoc
from vcm import DerivedMapping, round_time
class FV3StateMapper(Mapping):
""" A mapping interface for the FV3GFS getter.
Maps variables to the common names used in shared functions.
By default adds mapping {"lon": "longitude", "lat": "latitude"}
"""
def __init__(self, getter, alternate_keys: Mapping[str, str] = None):
self._getter = getter
self._alternate_keys = alternate_keys or {
"lon": "longitude",
"lat": "latitude",
"physics_precip": PHYSICS_PRECIP_RATE,
}
# Expose a subset of the physics diagnostics to the getter. When adding
# fields to this dictionary it is important to check that they are
# instantaneous (as opposed to interval-averaged) diagnostics in FV3GFS;
# otherwise their meaning will change depending on the value of the
# gfs_physics_nml.fhzero namelist parameter.
self._diagnostics = {
"latent_heat_flux": "lhtfl",
"eastward_wind_at_10m": "u10m",
"northward_wind_at_10m": "v10m",
}
def __getitem__(self, key: str) -> xr.DataArray:
if key in TIME_KEYS:
time = self._getter.get_state([key])[key]
return xr.DataArray(time, name=key)
elif key in self._diagnostics:
return self._getter.get_diagnostic_by_name(
self._diagnostics[key]
).data_array
elif key == "total_water":
return self._total_water()
else:
if key in self._alternate_keys:
key = self._alternate_keys[key]
try:
return self._getter.get_state([key])[key].data_array
except pace.util.InvalidQuantityError as e:
raise KeyError(e)
def __iter__(self):
return iter(self.keys())
def __len__(self):
return len(self.keys())
def keys(self):
dynamics_names = set(
v["name"] for v in fv3gfs.wrapper._properties.DYNAMICS_PROPERTIES
)
physics_names = set(
v["name"] for v in fv3gfs.wrapper._properties.PHYSICS_PROPERTIES
)
tracer_names = set(v for v in self._getter.get_tracer_metadata())
# see __getitem__
local_names = {"latent_heat_flux", "total_water"}
return dynamics_names | physics_names | tracer_names | local_names
def _total_water(self):
a = self._getter.get_tracer_metadata()
water_species = [name for name in a if a[name]["is_water"]]
return sum(self[name] for name in water_species)
class DerivedFV3State(MutableMapping):
"""A uniform mapping-like interface to the FV3GFS model state
This class wraps the fv3gfs getters with the FV3StateMapper, that always returns
DataArray and has time as an attribute (since this isn't a DataArray).
This encapsulates from the details of Quantity
"""
def __init__(self, getter):
"""
Args:
getter: the fv3gfs object or a mock of it.
"""
self._getter = getter
self._mapper = DerivedMapping(FV3StateMapper(getter, alternate_keys=None))
@property
def time(self) -> cftime.DatetimeJulian:
state_time = self._getter.get_state(["time"])["time"]
return round_time(cftime.DatetimeJulian(*state_time.timetuple()))
def __getitem__(self, key: Hashable) -> xr.DataArray:
return self._mapper[key]
def __setitem__(self, key: str, value: xr.DataArray):
state_update = _cast_single_to_double({key: value})
try:
self._getter.set_state_mass_conserving(
METHOD_NAME(state_update)
)
except ValueError as e:
raise KeyError(e)
def keys(self):
return self._mapper.keys()
def update_mass_conserving(
self, items: State,
):
"""Update state from another mapping
This may be faster than setting each item individually. Same as dict.update.
All states except for pressure thicknesses are set in a mass-conserving fashion.
"""
items_with_attrs = _cast_single_to_double(self._assign_attrs_from_mapper(items))
if DELP in items_with_attrs:
self._getter.set_state(
METHOD_NAME({DELP: items_with_attrs[DELP]})
)
not_pressure = dissoc(items_with_attrs, DELP)
try:
self._getter.set_state_mass_conserving(
METHOD_NAME(not_pressure)
)
except ValueError as e:
raise KeyError(e)
def _assign_attrs_from_mapper(self, dst: State) -> State:
updated = {}
for name in dst:
updated[name] = dst[name].assign_attrs(self._mapper[name].attrs)
return updated
def __delitem__(self, key: str):
raise NotImplementedError()
def __iter__(self):
return iter(self.keys())
def __len__(self):
return len(self.keys())
class MergedState(DerivedFV3State):
"""Merge a State object with another mutable mapping
Commonly used to e.g. blend python and fv3gfs-fortran state into a single
object.
Attributes:
left: a derived fv3 state object represetning e.g. Fortran state.
right: a mutable mapping representing python based state.
Used for all keys not in ``left``.
"""
def __init__(self, left: DerivedFV3State, right: State):
self.left = left
self.right = right
def update_mass_conserving(
self, items: State,
):
self.left.update_mass_conserving(items)
@property
def time(self) -> cftime.DatetimeJulian:
return self.left.time
def __setitem__(self, key: str, value: xr.DataArray):
try:
self.left[key] = value
except KeyError:
self.right[key] = value
def __getitem__(self, key: Hashable) -> xr.DataArray:
try:
return self.left[key]
except KeyError:
return self.right[key]
def keys(self) -> Set[str]:
all_keys = set(self.left.keys()) | set(self.right.keys())
return set(map(str, all_keys))
def __delitem__(self, key: str):
try:
del self.left[key]
except (KeyError, NotImplementedError):
del self.right[key]
def _cast_single_to_double(state: State) -> State:
# wrapper state variables must be in double precision
cast_state = {}
for name in state:
if state[name].values.dtype == np.float32:
cast_state[name] = (
state[name]
.astype(np.float64, casting="same_kind")
.assign_attrs(state[name].attrs)
)
else:
cast_state[name] = state[name]
return cast_state
def METHOD_NAME(state: State) -> Mapping[Hashable, pace.util.Quantity]:
return {
key: pace.util.Quantity.from_data_array(value) for key, value in state.items()
}
|
2,654 |
make d bconnection
|
# If Hecke eigenfield is in the LMFDB, expresses eigenvalues in terms of listed integral basis
# TODO: Integrate with LLL-reduced basis? (see NotImplementedError below)
import pymongo, hashlib
from sage.all import PolynomialRing, QQ, preparse, gp, NumberField, matrix, vector
#from sage.all import *
#P = subprocess.Popen(["ssh","mongo","-N"])
_C = None
def METHOD_NAME():
global _C
_C = pymongo.MongoClient("localhost:37010")
# _C = pymongo.MongoClient("m0.lmfdb.xyz:27017")
# _C = pymongo.MongoClient("readonly.lmfdb.xyz:27017")
_C.admin.authenticate("lmfdb", "lmfdb")
def getDBconnection():
if _C is None:
METHOD_NAME()
return _C
def get_hmfs_hecke_field_and_eigenvals(label):
"""Get the Hecke field and eigenvalues for the Hilbert modular form with given label.
INPUT:
label -- string, the label of the Hilbert modular form
OUTPUT:
K_old -- number field, the field containing the Hecke eigenvalues
e -- number field element, a generator for K_old over QQ
eigenvals -- list, a list of the Hecke eigenvalues
"""
C = getDBconnection()
# Should I use find_one, or something else?
R = PolynomialRing(QQ,names=('x'))
form = C.hmfs.forms.find_one({'label':label})
poly = R(str(form['hecke_polynomial']))
K_old = NumberField(poly, names=('e',))
(e,) = K_old._first_ngens(1)
eigenvals_str = form['hecke_eigenvalues']
eigenvals = [K_old(eval(preparse(el))) for el in eigenvals_str]
return K_old, e, eigenvals
def polredabs_coeffs(poly):
"""Apply gp.polredabs to the given polynomial and return the coefficients as a comma-separated string.
INPUT:
poly -- polynomial, a polynomial with coefficients in QQ
OUTPUT:
cs_string -- string, the coefficients of the normalized polynomial (the output of gp.polredabs(poly)), given as a comma-separated string with no spaces
"""
R = poly.parent()
(x,) = R._first_ngens(1)
poly_new = R(str(gp.polredabs(poly)))
cs = poly_new.coefficients(sparse=False)
cs_string = ",".join([str(el) for el in cs])
return cs_string
def coeffs_to_poly(c_string):
"""Given a string of coefficients, returns the polynomial with those coefficients
INPUT:
c_string -- string, a a comma-separated string (with no spaces) of rational numbers
OUTPUT:
The polynomial with these coefficients
"""
R = PolynomialRing(QQ, names=('x',))
(x,) = R._first_ngens(1)
tup = eval(c_string)
return sum([tup[i]*x**i for i in range(0,len(tup))])
def field_coeffs_string_to_hash(c_string):
"""Given a string of coefficients, returns their hash
INPUT:
c_string -- string, a comma-separated string (with no spaces) of rational numbers
OUTPUT:
c_hash -- string, the hash of the string of coefficients
"""
c_hash = hashlib.md5(c_string).hexdigest()
return c_hash
def get_number_field_integral_basis(c_string):
r"""Get the integral basis for the field specified by the string.
INPUT:
c_string -- string, a string of comma-separated coefficients with no spaces: the coefficients of the normalized (using gp.polredabs) defining polynomial
OUTPUT:
fld_bool -- bool, True if the number field has a page in the LMFDB, False otherwise
K_new -- number field, the number field with defining polynomial that is the normalized version (given by gp.polredabs) of the one with coefficients specified by c_string
a -- number field element, generator for K_new
the integral basis for K_new recorded on its LMFDB page
"""
C = getDBconnection()
c_hash = field_coeffs_string_to_hash(c_string)
field = C.numberfields.fields.find_one({'coeffhash':c_hash})
fld_bool = True
try:
field['degree']
except TypeError:
fld_bool = False
if fld_bool:
field_str = field['coeffs']
int_basis_str = field['zk']
poly = coeffs_to_poly(field_str)
K_new = NumberField(poly, names=('a',))
(a,) = K_new._first_ngens(1)
return fld_bool, K_new, a, [K_new(eval(preparse(el))) for el in int_basis_str]
else:
# could add polynomial to list of number fields missing from LMFDB here
return fld_bool, None, None, None
def vector_to_string(vec):
"""Convert vector of integers to string
INPUT:
vec -- vector, a vector of integers
OUTPUT:
A comma-separated string with no spaces containing the integers in vec
"""
vec_string = ""
for i in range(0,len(vec)-1):
vec_string += str(vec[i]) + ","
vec_string += str(vec[len(vec)-1]) # don't forget to append last entry!
return vec_string
def convert_hecke_eigenvalues(K_old, eigenvals, K_new, int_basis):
"""Re-express the Hecke eigenvalues in terms of the integral basis given in the LMFDB
INPUT:
K_old -- the field containing the Hecke eigenvalues
eigenvals -- the Hecke eigenvalues, in terms of a field generator (usually the eigenvalue for T_2) for the field K_old
K_new -- a "nicer" field isomorphic to K_old (often one whose polynomial has been polredabs'd)
int_basis -- an integral basis for the ring of integers of K_new
OUTPUT:
eigenvals_new -- list, a list of strings of the coefficients of the Hecke eigenvalues with respect to the integral basis recorded in the LMFDB
K_new -- number field, the (normalized) number field containing the Hecke eigenvalues, as given in the LMFDB
a -- number field element, the generator for the field K_new
int_basis -- list, a list containing the integral basis for K_new
"""
if not K_old.is_isomorphic(K_new):
raise RuntimeError("Fields not isomorphic!")
iota = K_old.embeddings(K_new)[0]
(a,) = K_new._first_ngens(1)
# make change of basis matrix
chg_basis_entries = []
for el in int_basis:
chg_basis_entries.append(el.list())
chg_basis_mat = matrix(chg_basis_entries) # changes from int_basis to 1, a, a^2, ..., a^(n-1)
chg_basis_mat = chg_basis_mat.inverse() # changes from 1, a, a^2, ..., a^(n-1) to int_basis
# convert entries
eigenvals_new = []
for el in eigenvals:
v = vector(iota(el).list())
eigenvals_new.append(v*chg_basis_mat)
# verify correctness of new expression for eigenvalues
eigenvals_old = [iota(el) for el in eigenvals]
for j in range(0,len(eigenvals)):
new_val = 0
for i in range(0,len(int_basis)):
new_val += eigenvals_new[j][i]*int_basis[i]
assert new_val == eigenvals_old[j]
eigen_strings = []
for c in eigenvals_new:
eigen_strings.append(vector_to_string(c))
return eigen_strings, K_new, a, int_basis
# Wrapper for above functions
def convert_hmfs_hecke_eigenvalues_from_label(label):
"""Given the label of a Hilbert modular form, look for the entry of its Hecke eigenfield in the LMFDB and re-express the Hecke eigenvalues in terms of the integral basis given there
INPUT:
label -- string, the label of a Hilbert modular form
OUTPUT:
eigenvals_new -- list, a list of strings of the coefficients of the Hecke eigenvalues with respect to the integral basis recorded in the LMFDB
K_new -- number field, the (normalized) number field containing the Hecke eigenvalues, as given in the LMFDB
a -- number field element, the generator for the field K_new
int_basis -- list, a list containing the integral basis for K_new
"""
K_old, e, eigenvals = get_hmfs_hecke_field_and_eigenvals(label)
old_poly = K_old.defining_polynomial()
c_string = polredabs_coeffs(old_poly)
fld_bool, K_new, a, int_basis = get_number_field_integral_basis(c_string)
if not fld_bool:
raise NotImplementedError("No number field entry found in the LMFDB.")
else:
return convert_hecke_eigenvalues(K_old, eigenvals, K_new, int_basis)
|
2,655 |
test falls back to fallback if termios
|
import getpass
import os
import unittest
from io import BytesIO, StringIO, TextIOWrapper
from unittest import mock
from test import support
try:
import termios
except ImportError:
termios = None
try:
import pwd
except ImportError:
pwd = None
@mock.patch('os.environ')
class GetpassGetuserTest(unittest.TestCase):
def test_username_takes_username_from_env(self, environ):
expected_name = 'some_name'
environ.get.return_value = expected_name
self.assertEqual(expected_name, getpass.getuser())
def test_username_priorities_of_env_values(self, environ):
environ.get.return_value = None
try:
getpass.getuser()
except ImportError: # in case there's no pwd module
pass
self.assertEqual(
environ.get.call_args_list,
[mock.call(x) for x in ('LOGNAME', 'USER', 'LNAME', 'USERNAME')])
def test_username_falls_back_to_pwd(self, environ):
expected_name = 'some_name'
environ.get.return_value = None
if pwd:
with mock.patch('os.getuid') as uid, \
mock.patch('pwd.getpwuid') as getpw:
uid.return_value = 42
getpw.return_value = [expected_name]
self.assertEqual(expected_name,
getpass.getuser())
getpw.assert_called_once_with(42)
else:
self.assertRaises(ImportError, getpass.getuser)
class GetpassRawinputTest(unittest.TestCase):
def test_flushes_stream_after_prompt(self):
# see issue 1703
stream = mock.Mock(spec=StringIO)
input = StringIO('input_string')
getpass._raw_input('some_prompt', stream, input=input)
stream.flush.assert_called_once_with()
def test_uses_stderr_as_default(self):
input = StringIO('input_string')
prompt = 'some_prompt'
with mock.patch('sys.stderr') as stderr:
getpass._raw_input(prompt, input=input)
stderr.write.assert_called_once_with(prompt)
@mock.patch('sys.stdin')
def test_uses_stdin_as_default_input(self, mock_input):
mock_input.readline.return_value = 'input_string'
getpass._raw_input(stream=StringIO())
mock_input.readline.assert_called_once_with()
@mock.patch('sys.stdin')
def test_uses_stdin_as_different_locale(self, mock_input):
stream = TextIOWrapper(BytesIO(), encoding="ascii")
mock_input.readline.return_value = "Hasło: "
getpass._raw_input(prompt="Hasło: ",stream=stream)
mock_input.readline.assert_called_once_with()
def test_raises_on_empty_input(self):
input = StringIO('')
self.assertRaises(EOFError, getpass._raw_input, input=input)
def test_trims_trailing_newline(self):
input = StringIO('test\n')
self.assertEqual('test', getpass._raw_input(input=input))
# Some of these tests are a bit white-box. The functional requirement is that
# the password input be taken directly from the tty, and that it not be echoed
# on the screen, unless we are falling back to stderr/stdin.
# Some of these might run on platforms without termios, but play it safe.
@unittest.skipUnless(termios, 'tests require system with termios')
class UnixGetpassTest(unittest.TestCase):
def test_uses_tty_directly(self):
with mock.patch('os.open') as open, \
mock.patch('io.FileIO') as fileio, \
mock.patch('io.TextIOWrapper') as textio:
# By setting open's return value to None the implementation will
# skip code we don't care about in this test. We can mock this out
# fully if an alternate implementation works differently.
open.return_value = None
getpass.unix_getpass()
open.assert_called_once_with('/dev/tty',
os.O_RDWR | os.O_NOCTTY)
fileio.assert_called_once_with(open.return_value, 'w+')
textio.assert_called_once_with(fileio.return_value)
def test_resets_termios(self):
with mock.patch('os.open') as open, \
mock.patch('io.FileIO'), \
mock.patch('io.TextIOWrapper'), \
mock.patch('termios.tcgetattr') as tcgetattr, \
mock.patch('termios.tcsetattr') as tcsetattr:
open.return_value = 3
fake_attrs = [255, 255, 255, 255, 255]
tcgetattr.return_value = list(fake_attrs)
getpass.unix_getpass()
tcsetattr.assert_called_with(3, mock.ANY, fake_attrs)
def METHOD_NAME(self):
with mock.patch('os.open') as open, \
mock.patch('io.FileIO') as fileio, \
mock.patch('io.TextIOWrapper') as textio, \
mock.patch('termios.tcgetattr'), \
mock.patch('termios.tcsetattr') as tcsetattr, \
mock.patch('getpass.fallback_getpass') as fallback:
open.return_value = 3
fileio.return_value = BytesIO()
tcsetattr.side_effect = termios.error
getpass.unix_getpass()
fallback.assert_called_once_with('Password: ',
textio.return_value)
def test_flushes_stream_after_input(self):
# issue 7208
with mock.patch('os.open') as open, \
mock.patch('io.FileIO'), \
mock.patch('io.TextIOWrapper'), \
mock.patch('termios.tcgetattr'), \
mock.patch('termios.tcsetattr'):
open.return_value = 3
mock_stream = mock.Mock(spec=StringIO)
getpass.unix_getpass(stream=mock_stream)
mock_stream.flush.assert_called_with()
def test_falls_back_to_stdin(self):
with mock.patch('os.open') as os_open, \
mock.patch('sys.stdin', spec=StringIO) as stdin:
os_open.side_effect = IOError
stdin.fileno.side_effect = AttributeError
with support.captured_stderr() as stderr:
with self.assertWarns(getpass.GetPassWarning):
getpass.unix_getpass()
stdin.readline.assert_called_once_with()
self.assertIn('Warning', stderr.getvalue())
self.assertIn('Password:', stderr.getvalue())
if __name__ == "__main__":
unittest.main()
|
2,656 |
panic
|
# SPDX-FileCopyrightText: Red Hat, Inc.
# SPDX-License-Identifier: GPL-2.0-or-later
import errno
import threading
import sanlock
import pytest
from vdsm.common.units import MiB
from vdsm.storage import clusterlock
from vdsm.storage import exception as se
from vdsm.storage import spwd
class WatchdogCallback:
def __init__(self):
self.ready = threading.Event()
self.done = threading.Event()
def __call__(self):
"""
Called from watchdog loop to wake up the test.
"""
self.done.set()
self.ready.wait()
def wait(self):
"""
Wait until the watchdog invoke __call__.
"""
self.ready.clear()
if not self.done.wait(2):
raise RuntimeError("Timeout waiting for watchdog")
def resume(self):
"""
Make __call__ return, resuming the watchdog.
"""
self.done.clear()
self.ready.set()
class FakeMaster:
sdUUID = "master-domain-uuid"
def __init__(self):
self.resources = [
# External lease in another domain.
{
"lockspace": "other-domain-uuid",
"resource": "vm-id",
"version": 1,
"disks": [("/other/xleases", 100 * MiB)],
},
# A volume lease in the master domain.
{
"lockspace": "master-domain-uuid",
"resource": "volume-uuid-2",
"version": 1,
"disks": [("/master/leases", 100 * MiB)],
},
# The cluster lease in the master domain.
{
"lockspace": "master-domain-uuid",
"resource": "SDM",
"version": 1,
"disks": [("/master/leases", MiB)],
},
]
self.error = None
def inquireClusterLock(self):
if self.error:
raise self.error
return self.resources
def getClusterLease(self):
return clusterlock.Lease("SDM", "/master/leases", MiB)
class Panic(Exception):
"""
Raised instead of terminating the process group.
"""
class FakePanic:
def __init__(self):
self.was_called = False
def __call__(self, msg):
self.was_called = True
raise Panic(msg)
@pytest.fixture
def METHOD_NAME(monkeypatch):
METHOD_NAME = FakePanic()
monkeypatch.setattr(spwd, "panic", METHOD_NAME)
return METHOD_NAME
def test_normal_flow(METHOD_NAME):
master = FakeMaster()
cb = WatchdogCallback()
watchdog = spwd.Watchdog(master, 0.01, callback=cb)
watchdog.start()
try:
for i in range(10):
cb.wait()
assert not METHOD_NAME.was_called
cb.resume()
finally:
watchdog.stop()
def test_panic_on_lost_lease(METHOD_NAME):
master = FakeMaster()
cb = WatchdogCallback()
watchdog = spwd.Watchdog(master, 0.01, callback=cb)
watchdog.start()
try:
# Let first check succeed.
cb.wait()
assert not METHOD_NAME.was_called
# Simulate lost lease.
del master.resources[-1]
# Wait for the next check.
cb.resume()
cb.wait()
assert METHOD_NAME.was_called
finally:
cb.resume()
watchdog.stop()
def test_panic_on_wrong_disk(METHOD_NAME):
master = FakeMaster()
cb = WatchdogCallback()
watchdog = spwd.Watchdog(master, 0.01, callback=cb)
watchdog.start()
try:
# Let first check succeed.
cb.wait()
assert not METHOD_NAME.was_called
# Simulate bad disk.
master.resources[-1]["disks"] = [("/master/leases", 100 * MiB)]
# Wait for the next check.
cb.resume()
cb.wait()
assert METHOD_NAME.was_called
finally:
cb.resume()
watchdog.stop()
def test_panic_on_error(METHOD_NAME):
master = FakeMaster()
cb = WatchdogCallback()
watchdog = spwd.Watchdog(master, 0.01, callback=cb)
watchdog.start()
try:
# Let first check succeed.
cb.wait()
assert not METHOD_NAME.was_called
# Simulate error checking lease.
master.error = Exception("Inquire error")
# Wait for the next check.
cb.resume()
cb.wait()
assert METHOD_NAME.was_called
finally:
cb.resume()
watchdog.stop()
def test_temporary_error(METHOD_NAME):
master = FakeMaster()
cb = WatchdogCallback()
watchdog = spwd.Watchdog(master, 0.01, callback=cb)
watchdog.start()
try:
# Let first check succeed.
cb.wait()
assert not METHOD_NAME.was_called
# Simulate a temporary error checking lease.
e = sanlock.SanlockException(
errno.EBUSY, "Inquire error", "Device or resource busy")
master.error = se.SanlockInquireError(e.errno, str(e))
# Wait for next 3 checks
for i in range(3):
cb.resume()
cb.wait()
assert not METHOD_NAME.was_called
# Next error should trigger a panic.
cb.resume()
cb.wait()
assert METHOD_NAME.was_called
finally:
cb.resume()
watchdog.stop()
def test_max_errors(METHOD_NAME):
master = FakeMaster()
cb = WatchdogCallback()
watchdog = spwd.Watchdog(master, 0.01, max_errors=0, callback=cb)
watchdog.start()
try:
# Simulate a temporary error checking lease.
e = sanlock.SanlockException(
errno.EBUSY, "Inquire error", "Device or resource busy")
master.error = se.SanlockInquireError(e.errno, str(e))
# Next check should trigger a panic.
cb.wait()
assert METHOD_NAME.was_called
finally:
cb.resume()
watchdog.stop()
|
2,657 |
test version metadata
|
# (C) Datadog, Inc. 2020-present
# All rights reserved
# Licensed under a 3-clause BSD style license (see LICENSE)
import copy
from typing import Callable # noqa: F401
import pytest
import requests
from datadog_checks.azure_iot_edge import AzureIoTEdgeCheck
from datadog_checks.azure_iot_edge.types import Instance # noqa: F401
from datadog_checks.base.stubs.aggregator import AggregatorStub # noqa: F401
from datadog_checks.base.stubs.datadog_agent import DatadogAgentStub # noqa: F401
from datadog_checks.dev.utils import get_metadata_metrics
from . import common
@pytest.mark.usefixtures("mock_server")
def test_check(aggregator, mock_instance, dd_run_check):
# type: (AggregatorStub, Instance, Callable) -> None
"""
Under normal conditions, metrics and service checks are collected as expected.
"""
check = AzureIoTEdgeCheck('azure_iot_edge', {}, [mock_instance])
dd_run_check(check)
for metric, metric_type in common.HUB_METRICS:
# Don't assert exact tags since they're very complex (many cross products).
aggregator.assert_metric(metric, metric_type=metric_type)
m = aggregator._metrics[metric][0]
assert set(m.tags) >= set(common.TAGS)
for metric, metric_type, metric_tags in common.AGENT_METRICS:
tags = common.TAGS + metric_tags
aggregator.assert_metric(metric, metric_type=metric_type, count=1, tags=tags)
for metric, metric_type in common.MODULE_METRICS:
for module_name in common.MODULES:
tags = common.TAGS + ['module_name:{}'.format(module_name)]
aggregator.assert_metric(metric, metric_type=metric_type, count=1, tags=tags)
aggregator.assert_service_check(
'azure.iot_edge.edge_hub.prometheus.health',
AzureIoTEdgeCheck.OK,
count=1,
tags=common.CUSTOM_TAGS + ['endpoint:{}'.format(common.MOCK_EDGE_HUB_PROMETHEUS_URL)],
)
aggregator.assert_service_check(
'azure.iot_edge.edge_agent.prometheus.health',
AzureIoTEdgeCheck.OK,
count=1,
tags=common.CUSTOM_TAGS + ['endpoint:{}'.format(common.MOCK_EDGE_AGENT_PROMETHEUS_URL)],
)
aggregator.assert_all_metrics_covered()
aggregator.assert_metrics_using_metadata(get_metadata_metrics())
@pytest.mark.usefixtures("mock_server")
def METHOD_NAME(datadog_agent, dd_run_check, mock_instance):
# type: (DatadogAgentStub, Callable, Instance) -> None
check = AzureIoTEdgeCheck('azure_iot_edge', {}, [mock_instance])
check.check_id = 'test:123'
dd_run_check(check)
major, minor, patch, raw = common.MOCK_EDGE_AGENT_VERSION
version_metadata = {
'version.scheme': 'semver',
'version.major': major,
'version.minor': minor,
'version.patch': patch,
'version.raw': raw,
}
datadog_agent.assert_metadata('test:123', version_metadata)
@pytest.mark.usefixtures("mock_server")
@pytest.mark.parametrize(
"option, url, service_check",
[
pytest.param(
"edge_agent_prometheus_url",
common.MOCK_EDGE_AGENT_PROMETHEUS_URL,
"azure.iot_edge.edge_agent.prometheus.health",
id="edge-agent",
),
pytest.param(
"edge_hub_prometheus_url",
common.MOCK_EDGE_HUB_PROMETHEUS_URL,
"azure.iot_edge.edge_hub.prometheus.health",
id="edge-hub",
),
],
)
def test_prometheus_endpoint_down(aggregator, mock_instance, option, url, service_check):
# type: (AggregatorStub, dict, str, str, str) -> None
"""
When a Prometheus endpoint is unreachable, service check reports as CRITICAL.
"""
instance = copy.deepcopy(mock_instance)
wrong_port = common.MOCK_SERVER_PORT + 1 # Will trigger exception.
instance[option] = url.replace(str(common.MOCK_SERVER_PORT), str(wrong_port))
check = AzureIoTEdgeCheck('azure_iot_edge', {}, [instance])
with pytest.raises(requests.ConnectionError):
check.check(instance)
aggregator.assert_service_check(service_check, AzureIoTEdgeCheck.CRITICAL)
|
2,658 |
gaussian1d moments
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
The module contains tools for centroiding sources using Gaussians.
"""
import warnings
import numpy as np
from astropy.modeling.fitting import LevMarLSQFitter
from astropy.modeling.models import Const1D, Const2D, Gaussian1D, Gaussian2D
from astropy.utils.exceptions import AstropyUserWarning
__all__ = ['centroid_1dg', 'centroid_2dg']
def centroid_1dg(data, error=None, mask=None):
"""
Calculate the centroid of a 2D array by fitting 1D Gaussians to the
marginal ``x`` and ``y`` distributions of the array.
Non-finite values (e.g., NaN or inf) in the ``data`` or ``error``
arrays are automatically masked. These masks are combined.
Parameters
----------
data : 2D `~numpy.ndarray`
The 2D image data. The image should be a background-subtracted
cutout image containing a single source.
error : 2D `~numpy.ndarray`, optional
The 2D array of the 1-sigma errors of the input ``data``.
mask : 2D bool `~numpy.ndarray`, optional
A boolean mask, with the same shape as ``data``, where a `True`
value indicates the corresponding element of ``data`` is masked.
Returns
-------
centroid : `~numpy.ndarray`
The ``x, y`` coordinates of the centroid.
"""
data = np.ma.asanyarray(data)
if mask is not None and mask is not np.ma.nomask:
mask = np.asanyarray(mask)
if data.shape != mask.shape:
raise ValueError('data and mask must have the same shape.')
data.mask |= mask
if np.any(~np.isfinite(data)):
data = np.ma.masked_invalid(data)
warnings.warn('Input data contains non-finite values (e.g., NaN or '
'inf) that were automatically masked.',
AstropyUserWarning)
if error is not None:
error = np.ma.masked_invalid(error)
if data.shape != error.shape:
raise ValueError('data and error must have the same shape.')
data.mask |= error.mask
error.mask = data.mask
xy_error = [np.sqrt(np.ma.sum(error**2, axis=i)) for i in (0, 1)]
xy_weights = [(1.0 / xy_error[i].clip(min=1.0e-30)) for i in (0, 1)]
else:
xy_weights = [np.ones(data.shape[i]) for i in (1, 0)]
# assign zero weight where an entire row or column is masked
if np.any(data.mask):
bad_idx = [np.all(data.mask, axis=i) for i in (0, 1)]
for i in (0, 1):
xy_weights[i][bad_idx[i]] = 0.0
xy_data = [np.ma.sum(data, axis=i).data for i in (0, 1)]
constant_init = np.ma.min(data)
centroid = []
for (data_i, weights_i) in zip(xy_data, xy_weights):
params_init = METHOD_NAME(data_i)
g_init = Const1D(constant_init) + Gaussian1D(*params_init)
fitter = LevMarLSQFitter()
x = np.arange(data_i.size)
g_fit = fitter(g_init, x, data_i, weights=weights_i)
centroid.append(g_fit.mean_1.value)
return np.array(centroid)
def METHOD_NAME(data, mask=None):
"""
Estimate 1D Gaussian parameters from the moments of 1D data.
This function can be useful for providing initial parameter values
when fitting a 1D Gaussian to the ``data``.
Parameters
----------
data : 1D `~numpy.ndarray`
The 1D data array.
mask : 1D bool `~numpy.ndarray`, optional
A boolean mask, with the same shape as ``data``, where a `True`
value indicates the corresponding element of ``data`` is masked.
Returns
-------
amplitude, mean, stddev : float
The estimated parameters of a 1D Gaussian.
"""
if np.any(~np.isfinite(data)):
data = np.ma.masked_invalid(data)
warnings.warn('Input data contains non-finite values (e.g., NaN or '
'inf) that were automatically masked.',
AstropyUserWarning)
else:
data = np.ma.array(data)
if mask is not None and mask is not np.ma.nomask:
mask = np.asanyarray(mask)
if data.shape != mask.shape:
raise ValueError('data and mask must have the same shape.')
data.mask |= mask
data.fill_value = 0.0
data = data.filled()
x = np.arange(data.size)
x_mean = np.sum(x * data) / np.sum(data)
x_stddev = np.sqrt(abs(np.sum(data * (x - x_mean) ** 2) / np.sum(data)))
amplitude = np.ptp(data)
return amplitude, x_mean, x_stddev
def centroid_2dg(data, error=None, mask=None):
"""
Calculate the centroid of a 2D array by fitting a 2D Gaussian (plus
a constant) to the array.
Non-finite values (e.g., NaN or inf) in the ``data`` or ``error``
arrays are automatically masked. These masks are combined.
Parameters
----------
data : 2D `~numpy.ndarray`
The 2D image data. The image should be a background-subtracted
cutout image containing a single source.
error : 2D `~numpy.ndarray`, optional
The 2D array of the 1-sigma errors of the input ``data``.
mask : 2D bool `~numpy.ndarray`, optional
A boolean mask, with the same shape as ``data``, where a `True`
value indicates the corresponding element of ``data`` is masked.
Returns
-------
centroid : `~numpy.ndarray`
The ``x, y`` coordinates of the centroid.
"""
# prevent circular import
from photutils.morphology import data_properties
data = np.ma.asanyarray(data)
if mask is not None and mask is not np.ma.nomask:
mask = np.asanyarray(mask)
if data.shape != mask.shape:
raise ValueError('data and mask must have the same shape.')
data.mask |= mask
if np.any(~np.isfinite(data)):
data = np.ma.masked_invalid(data)
warnings.warn('Input data contains non-finite values (e.g., NaN or '
'inf) that were automatically masked.',
AstropyUserWarning)
if error is not None:
error = np.ma.masked_invalid(error)
if data.shape != error.shape:
raise ValueError('data and error must have the same shape.')
data.mask |= error.mask
weights = 1.0 / error.clip(min=1.0e-30)
else:
weights = np.ones(data.shape)
if np.ma.count(data) < 7:
raise ValueError('Input data must have a least 7 unmasked values to '
'fit a 2D Gaussian plus a constant.')
# assign zero weight to masked pixels
if data.mask is not np.ma.nomask:
weights[data.mask] = 0.0
mask = data.mask
data.fill_value = 0.0
data = data.filled()
# Subtract the minimum of the data as a rough background estimate.
# This will also make the data values positive, preventing issues with
# the moment estimation in data_properties. Moments from negative data
# values can yield undefined Gaussian parameters, e.g., x/y_stddev.
props = data_properties(data - np.min(data), mask=mask)
constant_init = 0.0 # subtracted data minimum above
g_init = (Const2D(constant_init)
+ Gaussian2D(amplitude=np.ptp(data),
x_mean=props.xcentroid,
y_mean=props.ycentroid,
x_stddev=props.semimajor_sigma.value,
y_stddev=props.semiminor_sigma.value,
theta=props.orientation.value))
fitter = LevMarLSQFitter()
y, x = np.indices(data.shape)
gfit = fitter(g_init, x, y, data, weights=weights)
return np.array([gfit.x_mean_1.value, gfit.y_mean_1.value])
|
2,659 |
test alter column
|
from unittest import TestCase
from piccolo.apps.migrations.auto import MigrationManager, SchemaSnapshot
class TestSchemaSnaphot(TestCase):
def test_add_table(self):
"""
Test adding tables.
"""
manager_1 = MigrationManager()
manager_1.add_table(class_name="Manager", tablename="manager")
manager_2 = MigrationManager()
manager_2.add_table(class_name="Band", tablename="band")
schema_snapshot = SchemaSnapshot(managers=[manager_1, manager_2])
snapshot = schema_snapshot.get_snapshot()
self.assertTrue(len(snapshot) == 2)
class_names = [i.class_name for i in snapshot]
self.assertTrue("Band" in class_names)
self.assertTrue("Manager" in class_names)
def test_drop_table(self):
"""
Test dropping tables.
"""
manager_1 = MigrationManager()
manager_1.add_table(class_name="Manager", tablename="manager")
manager_1.add_table(class_name="Band", tablename="band")
manager_2 = MigrationManager()
manager_2.drop_table(class_name="Band", tablename="band")
schema_snapshot = SchemaSnapshot(managers=[manager_1, manager_2])
snapshot = schema_snapshot.get_snapshot()
self.assertTrue(len(snapshot) == 1)
class_names = [i.class_name for i in snapshot]
self.assertTrue("Manager" in class_names)
def test_rename_table(self):
"""
Test renaming tables.
"""
manager_1 = MigrationManager()
manager_1.add_table(class_name="Band", tablename="band")
manager_2 = MigrationManager()
manager_2.rename_table(
old_class_name="Band",
old_tablename="band",
new_class_name="Performer",
new_tablename="performer",
)
schema_snapshot = SchemaSnapshot(managers=[manager_1, manager_2])
snapshot = schema_snapshot.get_snapshot()
self.assertTrue(snapshot[0].class_name == "Performer")
self.assertTrue(snapshot[0].tablename == "performer")
def test_add_column(self):
"""
Test adding columns.
"""
manager = MigrationManager()
manager.add_table(class_name="Manager", tablename="manager")
manager.add_column(
table_class_name="Manager",
tablename="manager",
column_name="name",
column_class_name="Varchar",
params={"length": 100},
)
schema_snapshot = SchemaSnapshot(managers=[manager])
snapshot = schema_snapshot.get_snapshot()
self.assertTrue(len(snapshot) == 1)
self.assertTrue(len(snapshot[0].columns) == 1)
def test_rename_column(self):
"""
Test renaming columns.
"""
manager_1 = MigrationManager()
manager_1.add_table(class_name="Manager", tablename="manager")
manager_1.add_column(
table_class_name="Manager",
tablename="manager",
column_name="name",
column_class_name="Varchar",
params={"length": 100},
)
manager_2 = MigrationManager()
manager_2.rename_column(
table_class_name="Manager",
tablename="manager",
old_column_name="name",
new_column_name="title",
)
schema_snapshot = SchemaSnapshot(managers=[manager_1, manager_2])
snapshot = schema_snapshot.get_snapshot()
self.assertTrue(snapshot[0].columns[0]._meta.name == "title")
# Make sure double renames still work
manager_3 = MigrationManager()
manager_3.rename_column(
table_class_name="Manager",
tablename="manager",
old_column_name="title",
new_column_name="label",
)
schema_snapshot = SchemaSnapshot(
managers=[manager_1, manager_2, manager_3]
)
snapshot = schema_snapshot.get_snapshot()
self.assertTrue(snapshot[0].columns[0]._meta.name == "label")
def test_drop_column(self):
"""
Test dropping columns.
"""
manager_1 = MigrationManager()
manager_1.add_table(class_name="Manager", tablename="manager")
manager_1.add_column(
table_class_name="Manager",
tablename="manager",
column_name="name",
column_class_name="Varchar",
params={"length": 100},
)
manager_2 = MigrationManager()
manager_2.drop_column(
table_class_name="Manager", tablename="manager", column_name="name"
)
schema_snapshot = SchemaSnapshot(managers=[manager_1, manager_2])
snapshot = schema_snapshot.get_snapshot()
self.assertEqual(len(snapshot[0].columns), 0)
def METHOD_NAME(self):
"""
Test altering columns.
"""
manager_1 = MigrationManager()
manager_1.add_table(class_name="Manager", tablename="manager")
manager_1.add_column(
table_class_name="Manager",
tablename="manager",
column_name="name",
column_class_name="Varchar",
params={"length": 100},
)
manager_2 = MigrationManager()
manager_2.alter_column(
table_class_name="Manager",
tablename="manager",
column_name="name",
params={"unique": True},
old_params={"unique": False},
)
schema_snapshot = SchemaSnapshot(managers=[manager_1, manager_2])
snapshot = schema_snapshot.get_snapshot()
self.assertTrue(snapshot[0].columns[0]._meta.unique)
def test_get_table_from_snapshot(self):
manager_1 = MigrationManager()
manager_1.add_table(class_name="Manager", tablename="manager")
manager_1.add_table(class_name="Band", tablename="band")
schema_snapshot = SchemaSnapshot(managers=[manager_1])
table = schema_snapshot.get_table_from_snapshot("Manager")
self.assertTrue(table.class_name == "Manager")
with self.assertRaises(ValueError):
schema_snapshot.get_table_from_snapshot("Foo")
|
2,660 |
remotes
|
# This file is part of the Frescobaldi project, http://www.frescobaldi.org/
#
# Copyright (c) 2008 - 2014 by Wilbert Berendsen
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
# See http://www.gnu.org/licenses/ for more information.
"""
Manage a Git repository
"""
import sys
import os
import subprocess
from .abstractrepo import AbstractVCSRepo
class GitError(Exception):
pass
class Repo(AbstractVCSRepo):
"""
Manage a git repository, be it
the running Frescobaldi application
or a document's project.
"""
_git_available = None
def __init__(self, root):
if not os.path.isdir(os.path.join(root, '.git')):
raise GitError(_("The given directory '{rootdir} "
"doesn't seem to be a Git repository.".format(rootdir=root)))
self.rootDir = root
@classmethod
def run_command(cls, cmd, args=[], dir=None):
"""
run a git command and return its output
as a string list.
Raise an exception if it returns an error.
- cmd is the git command (without 'git')
- args is a string or a list of strings
If no dir is passed the running dir of
Frescobaldi is used as default
"""
dir = os.path.normpath(os.path.join(sys.path[0], '..')) if dir is None else dir
from PyQt5.QtCore import QSettings
s = QSettings()
s.beginGroup("helper_applications")
git_cmd = s.value("git", "git", str)
git_cmd = git_cmd if git_cmd else "git"
cmd = [git_cmd, cmd]
cmd.extend(args)
pr = subprocess.Popen(cmd, cwd=dir,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
universal_newlines=True)
(out, error) = pr.communicate()
if error:
raise GitError(error)
result = out.split('\n')
if result[-1] == '':
result.pop()
return result
@classmethod
def vcs_available(cls):
"""Return True if Git is installed on the system"""
if cls._git_available is None:
try:
cls.run_command('--version')
cls._git_available = True
except (GitError):
cls._git_available = False
return cls._git_available
# #########################
# Internal helper functions
def _run_command(self, cmd, args=[]):
"""
run a git command and return its output
as a string list.
Raise an exception if it returns an error.
- cmd is the git command (without 'git')
- args is a string or a list of strings
"""
return Repo.run_command(cmd, args, self.rootDir)
def _branches(self, local=True):
"""
Returns a tuple.
The first element is the list of branch names.
The second element is the name of the current branch (may be None).
If local is False also return 'remote' branches.
"""
args = ['--color=never']
if not local:
args.append('-a')
branches = []
current_branch = None
for line in self._run_command('branch', args):
branch = line.strip()
if branch.startswith('* '):
branch = branch.lstrip('* ')
current_branch = branch
if branch.endswith('.stgit'):
continue
branches.append(branch)
return (branches, current_branch)
# ####################
# Public API functions
def branches(self, local=True):
"""
Returns a string list of branch names.
If local is False also return 'remote' branches.
"""
return self._branches(local)[0]
def checkout(self, branch):
"""
Tries to checkout a branch.
Add '-q' option because git checkout will
return its confirmation message on stderr.
May raise a GitError exception"""
self._run_command('checkout', ['-q', branch])
def current_branch(self):
"""
Returns the name of the current branch.
"""
current_branch = self._branches(local=True)[1]
if not current_branch:
raise GitError('current_branch: No branch found')
return current_branch
def HEAD(self, shortened=True):
"""
Returns the (shortened) committish of the current commit.
"""
head = self._run_command('log', ['-n', '1',
'--format=format:%{}'.format('h' if shortened else 'H')])
return head[0]
def has_branch(self, branch):
"""
Returns True if the given local branch exists.
"""
return (branch in self.branches(local=True))
def has_remote(self, remote):
"""Returns True if the given remote name is registered."""
return remote in self.METHOD_NAME()
def has_remote_branch(self, branch):
"""
Return True if the given branch is tracking a remote branch.
"""
remote, remote_branch = self.tracked_remote(branch)
return (remote != "local" or remote_branch != "local")
def METHOD_NAME(self):
"""Return a string list with registered remote names"""
return self._run_command('remote', ['show'])
def tracked_remote(self, branch):
"""
Return a tuple with the remote and branch tracked by
the given branch.
In most cases both values will be identical, but a branch
can also track a differently named remote branch.
Return ('local', 'local') if it doesn't track any branch.
"""
if not self.has_branch(branch):
raise GitError('Branch not found: ' + branch)
remote_name = self._run_command("config",
["branch." + branch + ".remote"])
remote_merge = self._run_command("config",
["branch." + branch + ".merge"])
if not remote_name or not remote_merge:
return ('local', 'local')
remote_name = remote_name[0]
remote_merge = remote_merge[0]
remote_branch = remote_merge[remote_merge.rfind('/')+1:]
return (remote_name, remote_branch)
def tracked_remote_label(self, branch):
"""
Returns a label for the tracked branch to be used in the GUI.
Consists of one of
- 'local'
- the remote name
- remote name concatenated with the remote branch
(if it should differ from the local branch name).
"""
remote, remote_branch = self.tracked_remote(branch)
if remote == 'local':
return 'local'
if branch == remote_branch:
return remote
else:
return remote + '/' + remote_branch
|
2,661 |
stop test run
|
"""Test result object"""
import os
import sys
import traceback
from StringIO import StringIO
from . import util
from functools import wraps
__unittest = True
def failfast(method):
@wraps(method)
def inner(self, *args, **kw):
if getattr(self, 'failfast', False):
self.stop()
return method(self, *args, **kw)
return inner
STDOUT_LINE = '\nStdout:\n%s'
STDERR_LINE = '\nStderr:\n%s'
class TestResult(object):
"""Holder for test result information.
Test results are automatically managed by the TestCase and TestSuite
classes, and do not need to be explicitly manipulated by writers of tests.
Each instance holds the total number of tests run, and collections of
failures and errors that occurred among those test runs. The collections
contain tuples of (testcase, exceptioninfo), where exceptioninfo is the
formatted traceback of the error that occurred.
"""
_previousTestClass = None
_testRunEntered = False
_moduleSetUpFailed = False
def __init__(self, stream=None, descriptions=None, verbosity=None):
self.failfast = False
self.failures = []
self.errors = []
self.testsRun = 0
self.skipped = []
self.expectedFailures = []
self.unexpectedSuccesses = []
self.shouldStop = False
self.buffer = False
self._stdout_buffer = None
self._stderr_buffer = None
self._original_stdout = sys.stdout
self._original_stderr = sys.stderr
self._mirrorOutput = False
def printErrors(self):
"Called by TestRunner after test run"
def startTest(self, test):
"Called when the given test is about to be run"
self.testsRun += 1
self._mirrorOutput = False
self._setupStdout()
def _setupStdout(self):
if self.buffer:
if self._stderr_buffer is None:
self._stderr_buffer = StringIO()
self._stdout_buffer = StringIO()
sys.stdout = self._stdout_buffer
sys.stderr = self._stderr_buffer
def startTestRun(self):
"""Called once before any tests are executed.
See startTest for a method called before each test.
"""
def stopTest(self, test):
"""Called when the given test has been run"""
self._restoreStdout()
self._mirrorOutput = False
def _restoreStdout(self):
if self.buffer:
if self._mirrorOutput:
output = sys.stdout.getvalue()
error = sys.stderr.getvalue()
if output:
if not output.endswith('\n'):
output += '\n'
self._original_stdout.write(STDOUT_LINE % output)
if error:
if not error.endswith('\n'):
error += '\n'
self._original_stderr.write(STDERR_LINE % error)
sys.stdout = self._original_stdout
sys.stderr = self._original_stderr
self._stdout_buffer.seek(0)
self._stdout_buffer.truncate()
self._stderr_buffer.seek(0)
self._stderr_buffer.truncate()
def METHOD_NAME(self):
"""Called once after all tests are executed.
See stopTest for a method called after each test.
"""
@failfast
def addError(self, test, err):
"""Called when an error has occurred. 'err' is a tuple of values as
returned by sys.exc_info().
"""
self.errors.append((test, self._exc_info_to_string(err, test)))
self._mirrorOutput = True
@failfast
def addFailure(self, test, err):
"""Called when an error has occurred. 'err' is a tuple of values as
returned by sys.exc_info()."""
self.failures.append((test, self._exc_info_to_string(err, test)))
self._mirrorOutput = True
def addSuccess(self, test):
"Called when a test has completed successfully"
pass
def addSkip(self, test, reason):
"""Called when a test is skipped."""
self.skipped.append((test, reason))
def addExpectedFailure(self, test, err):
"""Called when an expected failure/error occured."""
self.expectedFailures.append(
(test, self._exc_info_to_string(err, test)))
@failfast
def addUnexpectedSuccess(self, test):
"""Called when a test was expected to fail, but succeed."""
self.unexpectedSuccesses.append(test)
def wasSuccessful(self):
"Tells whether or not this result was a success"
return len(self.failures) == len(self.errors) == 0
def stop(self):
"Indicates that the tests should be aborted"
self.shouldStop = True
def _exc_info_to_string(self, err, test):
"""Converts a sys.exc_info()-style tuple of values into a string."""
exctype, value, tb = err
# Skip test runner traceback levels
while tb and self._is_relevant_tb_level(tb):
tb = tb.tb_next
if exctype is test.failureException:
# Skip assert*() traceback levels
length = self._count_relevant_tb_levels(tb)
msgLines = traceback.format_exception(exctype, value, tb, length)
else:
msgLines = traceback.format_exception(exctype, value, tb)
if self.buffer:
output = sys.stdout.getvalue()
error = sys.stderr.getvalue()
if output:
if not output.endswith('\n'):
output += '\n'
msgLines.append(STDOUT_LINE % output)
if error:
if not error.endswith('\n'):
error += '\n'
msgLines.append(STDERR_LINE % error)
return ''.join(msgLines)
def _is_relevant_tb_level(self, tb):
return '__unittest' in tb.tb_frame.f_globals
def _count_relevant_tb_levels(self, tb):
length = 0
while tb and not self._is_relevant_tb_level(tb):
length += 1
tb = tb.tb_next
return length
def __repr__(self):
return ("<%s run=%i errors=%i failures=%i>" %
(util.strclass(self.__class__), self.testsRun, len(self.errors),
len(self.failures)))
|
2,662 |
queryset
|
from django.conf import settings
from django.contrib import admin
from django.core.exceptions import ObjectDoesNotExist
from django.urls import NoReverseMatch
from django.utils.html import format_html
from django.utils.translation import gettext_lazy as _
from geotrek.common.mixins.actions import MergeActionMixin
from . import models as common_models
if 'modeltranslation' in settings.INSTALLED_APPS:
from modeltranslation.admin import TabbedTranslationAdmin
else:
from django.contrib.admin import ModelAdmin as TabbedTranslationAdmin
class OrganismAdmin(MergeActionMixin, admin.ModelAdmin):
list_display = ('organism', 'structure')
search_fields = ('organism', 'structure')
list_filter = ('structure',)
merge_field = 'organism'
class FileTypeAdmin(MergeActionMixin, admin.ModelAdmin):
list_display = ('type', 'structure')
search_fields = ('type', 'structure__name')
list_filter = ('structure',)
merge_field = 'type'
class LicenseAdmin(admin.ModelAdmin):
list_display = ["label"]
search_fields = ["label"]
class MapEntityContentTypeFilter(admin.SimpleListFilter):
title = _('content type')
parameter_name = 'content_type'
def lookups(self, request, model_admin):
from mapentity.registry import registry
values = []
for model, entity in registry.registry.items():
content_type = model.get_content_type_id()
values.append((content_type, entity.label))
return tuple(values)
def METHOD_NAME(self, request, METHOD_NAME):
if self.value():
return METHOD_NAME.filter(content_type=self.value())
class AttachmentAdmin(admin.ModelAdmin):
date_hierarchy = 'date_update'
search_fields = ('title', 'legend', 'author', 'object_id')
list_display = ('filename', 'legend', 'author', 'content_link', 'content_type')
list_filter = ('filetype', MapEntityContentTypeFilter)
exclude = ('object_id',)
readonly_fields = ('content_type', 'content_link', 'creator', 'title')
def has_add_permission(self, request):
""" Do not add from Adminsite. """
return False
def content_link(self, obj):
"""Returns content object link"""
try:
assert hasattr(obj.content_object, '_entity'), f'Unregistered model {obj.content_type}'
content_url = obj.content_object.get_detail_url()
except (ObjectDoesNotExist, NoReverseMatch, AssertionError):
return f'{obj.object_id}'
else:
return format_html('<a data-pk="{}" href="{}" >{}</a>',
obj.object_id, content_url, obj.object_id)
content_link.short_description = _('Linked content')
class ThemeAdmin(MergeActionMixin, TabbedTranslationAdmin):
list_display = ('label', 'cirkwi', 'pictogram_img')
search_fields = ('label',)
merge_field = 'label'
class RecordSourceAdmin(admin.ModelAdmin):
list_display = ('name', 'pictogram_img')
search_fields = ('name', )
class TargetPortalAdmin(admin.ModelAdmin):
list_display = ('name', 'website', 'title')
search_fields = ('name', 'website')
class ReservationSystemAdmin(MergeActionMixin, admin.ModelAdmin):
list_display = ('name',)
search_fields = ('name',)
merge_field = 'name'
class LabelAdmin(TabbedTranslationAdmin):
list_display = ('pictogram_img', 'name', 'filter', 'published')
list_display_links = ('name',)
search_fields = ('name', )
class HDViewPointAdmin(admin.ModelAdmin):
date_hierarchy = 'date_update'
search_fields = ('title', 'legend', 'author', 'object_id')
list_display = ('update_link', 'legend', 'author', 'related_object_link', 'content_type', 'license')
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.list_display_links = None
def has_add_permission(self, request):
""" Do not add from Adminsite. """
return False
def update_link(self, obj):
"""Returns link to HD View"""
return format_html(
'<a data-pk="{}" href="{}" >{}</a>',
obj.pk, obj.full_url, obj.title
)
def related_object_link(self, obj):
"""Returns content object link"""
content_url = obj.content_object.get_detail_url()
return format_html(
'<a data-pk="{}" href="{}" >{}</a>',
obj.object_id, content_url, str(obj.content_object)
)
related_object_link.short_description = _('Related to')
update_link.short_description = _('Title')
admin.site.register(common_models.Organism, OrganismAdmin)
admin.site.register(common_models.Attachment, AttachmentAdmin)
admin.site.register(common_models.FileType, FileTypeAdmin)
admin.site.register(common_models.Theme, ThemeAdmin)
admin.site.register(common_models.RecordSource, RecordSourceAdmin)
admin.site.register(common_models.TargetPortal, TargetPortalAdmin)
admin.site.register(common_models.ReservationSystem, ReservationSystemAdmin)
admin.site.register(common_models.Label, LabelAdmin)
admin.site.register(common_models.License, LicenseAdmin)
admin.site.register(common_models.HDViewPoint, HDViewPointAdmin)
|
2,663 |
action
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import copy
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
from . import outputs
__all__ = [
'GetResolverFirewallRulesResult',
'AwaitableGetResolverFirewallRulesResult',
'get_resolver_firewall_rules',
'get_resolver_firewall_rules_output',
]
@pulumi.output_type
class GetResolverFirewallRulesResult:
"""
A collection of values returned by getResolverFirewallRules.
"""
def __init__(__self__, METHOD_NAME=None, firewall_rule_group_id=None, firewall_rules=None, id=None, priority=None):
if METHOD_NAME and not isinstance(METHOD_NAME, str):
raise TypeError("Expected argument 'action' to be a str")
pulumi.set(__self__, "action", METHOD_NAME)
if firewall_rule_group_id and not isinstance(firewall_rule_group_id, str):
raise TypeError("Expected argument 'firewall_rule_group_id' to be a str")
pulumi.set(__self__, "firewall_rule_group_id", firewall_rule_group_id)
if firewall_rules and not isinstance(firewall_rules, list):
raise TypeError("Expected argument 'firewall_rules' to be a list")
pulumi.set(__self__, "firewall_rules", firewall_rules)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if priority and not isinstance(priority, int):
raise TypeError("Expected argument 'priority' to be a int")
pulumi.set(__self__, "priority", priority)
@property
@pulumi.getter
def METHOD_NAME(self) -> Optional[str]:
return pulumi.get(self, "action")
@property
@pulumi.getter(name="firewallRuleGroupId")
def firewall_rule_group_id(self) -> str:
return pulumi.get(self, "firewall_rule_group_id")
@property
@pulumi.getter(name="firewallRules")
def firewall_rules(self) -> Sequence['outputs.GetResolverFirewallRulesFirewallRuleResult']:
"""
List with information about the firewall rules. See details below.
"""
return pulumi.get(self, "firewall_rules")
@property
@pulumi.getter
def id(self) -> str:
"""
The provider-assigned unique ID for this managed resource.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def priority(self) -> Optional[int]:
return pulumi.get(self, "priority")
class AwaitableGetResolverFirewallRulesResult(GetResolverFirewallRulesResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetResolverFirewallRulesResult(
METHOD_NAME=self.METHOD_NAME,
firewall_rule_group_id=self.firewall_rule_group_id,
firewall_rules=self.firewall_rules,
id=self.id,
priority=self.priority)
def get_resolver_firewall_rules(METHOD_NAME: Optional[str] = None,
firewall_rule_group_id: Optional[str] = None,
priority: Optional[int] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetResolverFirewallRulesResult:
"""
`route53_get_resolver_firewall_rules` Provides details about rules in a specific Route53 Resolver Firewall rule group.
## Example Usage
The following example shows how to get Route53 Resolver Firewall rules based on its associated firewall group id.
```python
import pulumi
import pulumi_aws as aws
example = aws.route53.get_resolver_firewall_rules(firewall_rule_group_id=aws_route53_resolver_firewall_rule_group["example"]["id"])
```
:param str action: The action that DNS Firewall should take on a DNS query when it matches one of the domains in the rule's domain list.
:param str firewall_rule_group_id: The unique identifier of the firewall rule group that you want to retrieve the rules for.
:param int priority: The setting that determines the processing order of the rules in a rule group.
"""
__args__ = dict()
__args__['action'] = METHOD_NAME
__args__['firewallRuleGroupId'] = firewall_rule_group_id
__args__['priority'] = priority
opts = pulumi.InvokeOptions.merge(_utilities.get_invoke_opts_defaults(), opts)
__ret__ = pulumi.runtime.invoke('aws:route53/getResolverFirewallRules:getResolverFirewallRules', __args__, opts=opts, typ=GetResolverFirewallRulesResult).value
return AwaitableGetResolverFirewallRulesResult(
METHOD_NAME=pulumi.get(__ret__, 'action'),
firewall_rule_group_id=pulumi.get(__ret__, 'firewall_rule_group_id'),
firewall_rules=pulumi.get(__ret__, 'firewall_rules'),
id=pulumi.get(__ret__, 'id'),
priority=pulumi.get(__ret__, 'priority'))
@_utilities.lift_output_func(get_resolver_firewall_rules)
def get_resolver_firewall_rules_output(METHOD_NAME: Optional[pulumi.Input[Optional[str]]] = None,
firewall_rule_group_id: Optional[pulumi.Input[str]] = None,
priority: Optional[pulumi.Input[Optional[int]]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetResolverFirewallRulesResult]:
"""
`route53_get_resolver_firewall_rules` Provides details about rules in a specific Route53 Resolver Firewall rule group.
## Example Usage
The following example shows how to get Route53 Resolver Firewall rules based on its associated firewall group id.
```python
import pulumi
import pulumi_aws as aws
example = aws.route53.get_resolver_firewall_rules(firewall_rule_group_id=aws_route53_resolver_firewall_rule_group["example"]["id"])
```
:param str action: The action that DNS Firewall should take on a DNS query when it matches one of the domains in the rule's domain list.
:param str firewall_rule_group_id: The unique identifier of the firewall rule group that you want to retrieve the rules for.
:param int priority: The setting that determines the processing order of the rules in a rule group.
"""
...
|
2,664 |
test mask cube 2d in place
|
# Copyright Iris contributors
#
# This file is part of Iris and is released under the LGPL license.
# See COPYING and COPYING.LESSER in the root of the repository for full
# licensing details.
"""Test function :func:`iris.util.mask_cube"""
# Import iris.tests first so that some things can be initialised before
# importing anything else.
import iris.tests as tests # isort:skip
import pathlib
import dask.array as da
import numpy as np
import numpy.ma as ma
from iris.tests.stock import (
make_bounds_discontiguous_at_point,
sample_2d_latlons,
simple_1d,
simple_2d,
)
import iris.util
from iris.util import mask_cube
def full2d_global():
return sample_2d_latlons(transformed=True)
class MaskCubeMixin:
def assertOriginalMetadata(self, cube, func):
"""
Check metadata matches that of input cube. func is a string indicating
which function created the original cube.
"""
reference_dir = pathlib.Path("unit/util/mask_cube")
reference_fname = reference_dir / f"original_cube_{func}.cml"
self.assertCML(
cube,
reference_filename=str(reference_fname),
checksum=False,
)
class TestArrayMask(tests.IrisTest, MaskCubeMixin):
"""Tests with mask specified as numpy array."""
def setUp(self):
# Set up a 2d cube with a masked discontiguity to test masking
# of 2-dimensional cubes
self.cube_2d = full2d_global()
make_bounds_discontiguous_at_point(self.cube_2d, 3, 3)
def METHOD_NAME(self):
# This tests the masking of a 2d data array
cube = self.cube_2d
discontiguity_array = ma.getmaskarray(cube.data).copy()
expected = cube.copy()
# Remove mask so that we can pass an unmasked data set to
# mask_discontiguities, and check that it masks the correct point by
# comparing with masked data
cube.data = cube.data.data
returned = mask_cube(cube, discontiguity_array, in_place=True)
np.testing.assert_array_equal(expected.data.mask, cube.data.mask)
self.assertOriginalMetadata(cube, "full2d_global")
self.assertIs(returned, None)
def test_mask_cube_2d_not_in_place(self):
# This tests the masking of a 2d data array
cube = self.cube_2d
discontiguity_array = ma.getmaskarray(cube.data).copy()
expected = cube.copy()
# Remove mask so that we can pass an unmasked data set to
# mask_discontiguities, and check that it masks the correct point by
# comparing with masked data
cube.data = cube.data.data
returned = mask_cube(cube, discontiguity_array, in_place=False)
np.testing.assert_array_equal(expected.data.mask, returned.data.mask)
self.assertOriginalMetadata(returned, "full2d_global")
self.assertFalse(ma.is_masked(cube.data))
def test_mask_cube_lazy_in_place_broadcast(self):
cube = simple_2d()
cube.data = cube.lazy_data()
mask = [0, 1, 1, 0]
returned = mask_cube(cube, mask, in_place=True)
self.assertTrue(cube.has_lazy_data())
# Touch the data so lazyness status doesn't affect CML check.
cube.data
self.assertOriginalMetadata(cube, "simple_2d")
for subcube in cube.slices("foo"):
# Mask should have been broadcast across "bar" dimension.
np.testing.assert_array_equal(subcube.data.mask, mask)
self.assertIs(returned, None)
class TestCoordMask(tests.IrisTest, MaskCubeMixin):
"""Tests with mask specified as a Coord."""
def setUp(self):
self.cube = simple_2d()
def test_mask_cube_2d_first_dim(self):
mask_coord = iris.coords.AuxCoord([0, 1, 0], long_name="mask", units=1)
self.cube.add_aux_coord(mask_coord, 0)
returned = mask_cube(self.cube, mask_coord, in_place=False)
# Remove extra coord so we can check against original metadata.
returned.remove_coord(mask_coord)
self.assertOriginalMetadata(returned, "simple_2d")
for subcube in returned.slices("bar"):
# Mask should have been broadcast across "foo" dimension.
np.testing.assert_array_equal(subcube.data.mask, mask_coord.points)
def test_mask_cube_2d_second_dim(self):
mask_coord = iris.coords.AuxCoord(
[0, 0, 1, 1], long_name="mask", units=1
)
returned = mask_cube(self.cube, mask_coord, in_place=False, dim=1)
self.assertOriginalMetadata(returned, "simple_2d")
for subcube in returned.slices("foo"):
# Mask should have been broadcast across "bar" dimension.
np.testing.assert_array_equal(subcube.data.mask, mask_coord.points)
class TestCubeMask(tests.IrisTest, MaskCubeMixin):
"""Tests with mask specified as a Cube."""
def setUp(self):
self.cube = simple_2d()
def test_mask_cube_2d_first_dim_not_in_place(self):
mask = iris.cube.Cube([0, 1, 0], long_name="mask", units=1)
mask.add_dim_coord(self.cube.coord("bar"), 0)
returned = mask_cube(self.cube, mask, in_place=False)
self.assertOriginalMetadata(returned, "simple_2d")
for subcube in returned.slices("bar"):
# Mask should have been broadcast across 'foo' dimension.
np.testing.assert_array_equal(subcube.data.mask, mask.data)
def test_mask_cube_2d_first_dim_in_place(self):
mask = iris.cube.Cube([0, 1, 0], long_name="mask", units=1)
mask.add_dim_coord(self.cube.coord("bar"), 0)
returned = mask_cube(self.cube, mask, in_place=True)
self.assertOriginalMetadata(self.cube, "simple_2d")
for subcube in self.cube.slices("bar"):
# Mask should have been broadcast across 'foo' dimension.
np.testing.assert_array_equal(subcube.data.mask, mask.data)
self.assertIs(returned, None)
def test_mask_cube_2d_create_new_dim(self):
mask = iris.cube.Cube(
[[0, 1, 0], [0, 0, 1]], long_name="mask", units=1
)
broadcast_coord = iris.coords.DimCoord([1, 2], long_name="baz")
mask.add_dim_coord(broadcast_coord, 0)
mask.add_dim_coord(self.cube.coord("bar"), 1)
# Create length-1 dimension to enable broadcasting.
self.cube.add_aux_coord(broadcast_coord[0])
cube = iris.util.new_axis(self.cube, "baz")
returned = mask_cube(cube, mask, in_place=False)
self.assertCML(cube, checksum=False)
for subcube in returned.slices_over("baz"):
# Underlying data should have been broadcast across 'baz' dimension.
np.testing.assert_array_equal(subcube.data, self.cube.data)
for subcube in returned.slices_over("foo"):
# Mask should have been broadcast across 'foo' dimension.
np.testing.assert_array_equal(subcube.data.mask, mask.data)
def test_mask_cube_1d_lazy_mask_in_place(self):
cube = simple_1d()
mask = cube.copy(da.from_array([0, 0, 0, 1, 1, 1, 0, 1, 0, 1, 1]))
returned = mask_cube(cube, mask, in_place=True)
self.assertIs(returned, None)
self.assertTrue(cube.has_lazy_data())
# Touch the data so lazyness status doesn't interfere with CML check.
cube.data
self.assertOriginalMetadata(cube, "simple_1d")
np.testing.assert_array_equal(cube.data.mask, mask.data)
if __name__ == "__main__":
tests.main()
|
2,665 |
get watchers
|
# -*- coding: utf-8 -*-
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
#
# Copyright (c) 2021-present Kaleidos Ventures SL
from django.core.exceptions import ObjectDoesNotExist
from django.contrib.auth import get_user_model
from django.contrib.contenttypes.models import ContentType
from taiga.base.api import serializers
from taiga.base.fields import Field, MethodField, DateTimeField
from taiga.projects.history import models as history_models
from taiga.projects.attachments import models as attachments_models
from taiga.projects.history import services as history_service
from .cache import cached_get_user_by_email, cached_get_user_by_pk
from .fields import (UserRelatedField, HistoryUserField,
HistoryDiffField, HistoryValuesField, FileField)
class HistoryExportSerializer(serializers.LightSerializer):
user = HistoryUserField()
diff = HistoryDiffField()
snapshot = MethodField()
values = HistoryValuesField()
comment = Field()
delete_comment_date = DateTimeField()
delete_comment_user = HistoryUserField()
comment_versions = Field()
created_at = DateTimeField()
edit_comment_date = DateTimeField()
is_hidden = Field()
is_snapshot = Field()
type = Field()
def __init__(self, *args, **kwargs):
# Don't pass the extra ids args up to the superclass
self.statuses_queryset = kwargs.pop("statuses_queryset", {})
# Instantiate the superclass normally
super().__init__(*args, **kwargs)
def get_snapshot(self, obj):
user_model_cls = get_user_model()
snapshot = obj.snapshot
if snapshot is None:
return None
try:
owner_field = snapshot.get("owner", None)
if isinstance(owner_field, int):
owner = cached_get_user_by_pk(owner_field)
else:
owner = cached_get_user_by_email(owner_field)
snapshot["owner"] = owner.email
except user_model_cls.DoesNotExist:
pass
try:
assigned_to_field = snapshot.get("assigned_to", None)
if isinstance(assigned_to_field, int):
assigned_to = cached_get_user_by_pk(assigned_to_field)
else:
assigned_to = cached_get_user_by_email(assigned_to_field)
snapshot["assigned_to"] = assigned_to.email
except user_model_cls.DoesNotExist:
pass
if "status" in snapshot:
snapshot["status"] = self.statuses_queryset.get(snapshot["status"])
return snapshot
class HistoryExportSerializerMixin(serializers.LightSerializer):
history = MethodField("get_history")
def statuses_queryset(self, project):
raise NotImplementedError()
def get_history(self, obj):
history_qs = history_service.get_history_queryset_by_model_instance(
obj,
types=(history_models.HistoryType.change, history_models.HistoryType.create,)
)
return HistoryExportSerializer(history_qs, many=True,
statuses_queryset=self.statuses_queryset(obj.project)).data
class AttachmentExportSerializer(serializers.LightSerializer):
owner = UserRelatedField()
attached_file = FileField()
created_date = DateTimeField()
modified_date = DateTimeField()
description = Field()
is_deprecated = Field()
name = Field()
order = Field()
sha1 = Field()
size = Field()
class AttachmentExportSerializerMixin(serializers.LightSerializer):
attachments = MethodField()
def get_attachments(self, obj):
content_type = ContentType.objects.get_for_model(obj.__class__)
attachments_qs = attachments_models.Attachment.objects.filter(object_id=obj.pk,
content_type=content_type)
return AttachmentExportSerializer(attachments_qs, many=True).data
class CustomAttributesValuesExportSerializerMixin(serializers.LightSerializer):
custom_attributes_values = MethodField("get_custom_attributes_values")
def custom_attributes_queryset(self, project):
raise NotImplementedError()
def get_custom_attributes_values(self, obj):
def _use_name_instead_id_as_key_in_custom_attributes_values(custom_attributes, values):
ret = {}
for attr in custom_attributes:
value = values.get(str(attr["id"]), None)
if value is not None:
ret[attr["name"]] = value
return ret
try:
values = obj.custom_attributes_values.attributes_values
custom_attributes = self.custom_attributes_queryset(obj.project)
return _use_name_instead_id_as_key_in_custom_attributes_values(custom_attributes, values)
except ObjectDoesNotExist:
return None
class WatcheableObjectLightSerializerMixin(serializers.LightSerializer):
watchers = MethodField()
def METHOD_NAME(self, obj):
return [user.email for user in obj.METHOD_NAME()]
|
2,666 |
validate schema
|
from datasets.arrow_dataset import Batch
from transformers import BatchEncoding
from datasets import Dataset
from typing import Tuple, List
import torch
from primeqa.mrc.processors.preprocessors.abstract import AbstractPreProcessor
class ELI5FiDPreprocessor(AbstractPreProcessor):
_ignore_pad_token_for_loss = True
_question_column = "input"
_answer_column = "output"
_context_column = "passages"
def adapt_dataset(self, dataset: Dataset, is_train: bool) -> Dataset:
pass
def label_features_for_subsampling(self, tokenized_examples: BatchEncoding, examples: Batch) -> BatchEncoding:
pass
def subsample_features(self, dataset: Dataset) -> Dataset:
pass
def METHOD_NAME(self, dataset: Dataset, is_train: bool, pre_adaptation: bool = True) -> None:
pass
def process_train(self, examples: Dataset) -> Tuple[Dataset, Dataset]:
return self._process(examples, is_train=True)
def process_eval(self, examples: Dataset) -> Tuple[Dataset, Dataset]:
return self._process(examples, is_train=False)
def _process(self, examples: Dataset, is_train: bool) -> Tuple[Dataset, Dataset]:
mode = "train" if is_train else "eval"
dataset = examples.map(
self.preprocess_eli5_function_fid,
fn_kwargs=dict(mode=mode),
batched=True,
num_proc=self._num_workers,
remove_columns=examples.column_names,
load_from_cache_file=self._load_from_cache_file,
desc=f"Running tokenizer on {mode} dataset",
)
return examples, dataset
def preprocess_eli5_function_fid(self, examples: Dataset, mode: str) -> Dataset:
indexes, inputs, targets = self.preprocess_eli5_batch_fid(examples, mode=mode)
passage_ids, passage_masks = self.encode_passages(inputs)
#TODO: padding is set to True, should be in the input args
padding = "max_length"
if targets:
with self._tokenizer.as_target_tokenizer():
labels = self._tokenizer(targets, max_length=self._max_answer_len, padding=padding, truncation=True)
if padding == "max_length" and self._ignore_pad_token_for_loss:
labels["input_ids"] = [
[(l if l != self._tokenizer.pad_token_id else -100) for l in label] for label in labels["input_ids"]
]
model_inputs = {}
model_inputs["input_ids"] = passage_ids
model_inputs["attention_mask"] = passage_masks
if targets:
model_inputs["labels"] = labels["input_ids"]
model_inputs["example_id"] = indexes
return model_inputs
def encode_passages(self,batch_text_passages):
'''
Param:
batch_text_passages: (bsz, n_doc, )
all passages are encoded and padded to max_length
not using max padding will complicate the FID Data Collator
the input in the FID system does not need to be padded again
'''
passage_ids, passage_masks = [], []
for text_passages in batch_text_passages:
p = self._tokenizer(
text_passages,
padding='max_length',
max_length=self._max_seq_len,
return_tensors='pt',
truncation=True
)
passage_ids.append(p['input_ids'][None])
passage_masks.append(p['attention_mask'][None])
passage_ids = torch.cat(passage_ids, dim=0)
passage_masks = torch.cat(passage_masks, dim=0)
return passage_ids.tolist(), passage_masks.tolist()
def preprocess_eli5_batch_fid(self, examples, mode="train") -> Tuple[List[str], List[str]]:
indices = []
questions = examples[self._question_column]
contexts = examples[self._context_column]
n_doc = self._max_contexts
def top_passages(ctx):
assert n_doc <= len(ctx)
return [ctx[i]["text"] for i in range(n_doc)]
def append_question(passages, question):
return [f"question: {question} passage: {t}" for t in passages]
# multiple answers for training
if mode == "train":
answers = examples[self._answer_column]
inputs = []
targets = []
for idx,q in enumerate(questions):
if len(q) == 0:
# Skip empty questions
continue
passages = top_passages(contexts[idx])
question_passages = append_question(passages, q)
answer_list = answers[idx]
if len(answer_list) == 0:
inputs.append(question_passages)
targets.append("")
indices.append(examples["id"][idx])
else: # multiple answers
for answer_data in answer_list:
a = answer_data["answer"]
answer_score = answer_data["meta"]["score"]
if answer_score >= 3: # only takes answers whose score>3
inputs.append(question_passages)
targets.append(a)
indices.append(examples["id"][idx])
elif mode == "eval": # for evaluation only take each question once
inputs = []
if self._answer_column in examples:
answers = examples[self._answer_column]
else:
answers = []
for idx,q in enumerate(questions):
passages = top_passages(contexts[idx])
question_passages = append_question(passages, q)
inputs.append(question_passages)
indices.append(examples["id"][idx])
targets = [answer[0]["answer"] if len(answer) > 0 else "" for answer in answers]
else:
raise ValueError("mode requires eval or train")
return indices, inputs, targets # inputs is a list of a list of question+passage, targets is a list of answers
def set_max_contexts(self, new_max_contexts):
self._max_contexts = new_max_context
|
2,667 |
test source serializer needs a file module
|
import importlib
import sys
from pathlib import Path
from types import ModuleType
from typing import Generator
import pytest
from prefect.packaging.serializers import (
ImportSerializer,
PickleSerializer,
SourceSerializer,
)
def foo(return_val="foo"):
return return_val
@pytest.mark.parametrize(
"serializer", [SourceSerializer(), ImportSerializer(), PickleSerializer()]
)
def test_serialize_function(serializer):
blob = serializer.dumps(foo)
result = serializer.loads(blob)
assert type(result) == type(foo)
assert result.__kwdefaults__ == foo.__kwdefaults__
assert result.__name__ == foo.__name__
# The source serializer updates the module to __prefect_loader__
if not isinstance(serializer, SourceSerializer):
assert result.__module__ == result.__module__
assert result() == foo(), "Result should be callable"
@pytest.fixture
def busted_pickler() -> Generator[ModuleType, None, None]:
spec = importlib.machinery.ModuleSpec("busted_pickle", None)
busted_pickler = importlib.util.module_from_spec(spec)
sys.modules["busted_pickler"] = busted_pickler
try:
yield busted_pickler
finally:
del sys.modules["busted_pickler"]
def test_pickle_serializer_needs_a_sane_pickler(busted_pickler: ModuleType):
with pytest.raises(ValueError, match="Failed to import requested pickle library"):
PickleSerializer(picklelib="not-even-valid-identifier")
with pytest.raises(ValueError, match="does not have a 'dumps'"):
PickleSerializer(picklelib="busted_pickler")
setattr(busted_pickler, "dumps", lambda: "wat")
with pytest.raises(ValueError, match="does not have a 'loads'"):
PickleSerializer(picklelib="busted_pickler")
setattr(busted_pickler, "loads", lambda: "wat")
serializer = PickleSerializer(picklelib="busted_pickler")
assert serializer.picklelib == "busted_pickler"
def test_pickle_serializer_warns_about_mismatched_versions():
import cloudpickle
assert cloudpickle.__version__ != "0.0.0.0.0.0"
with pytest.warns(RuntimeWarning, match="Mismatched 'cloudpickle' versions"):
PickleSerializer(picklelib="cloudpickle", picklelib_version="0.0.0.0.0.0")
PickleSerializer(picklelib="cloudpickle", picklelib_version=cloudpickle.__version__)
def test_source_serializer_must_find_module():
with pytest.raises(ValueError, match="Cannot determine source module for object"):
# object a C object that doesn't have a __module__
SourceSerializer().dumps(object())
def METHOD_NAME():
with pytest.raises(ValueError, match="Found module <module 'builtins'"):
# object comes from the module `builtins`, a C module without Python source
SourceSerializer().dumps(object)
@pytest.mark.parametrize(
"garbage",
[
b"{}",
b"[]",
b"null",
b'{"source": "import antigravity\\n"}',
],
)
def test_source_serializer_cannot_decode_just_any_old_thing(garbage: bytes):
with pytest.raises(ValueError, match="Invalid serialized data"):
SourceSerializer().loads(garbage)
def test_pickle_serializer_does_not_allow_pickle_modules_without_cloudpickle():
with pytest.raises(ValueError, match="cloudpickle"):
PickleSerializer(pickle_modules=["test"], picklelib="pickle")
def test_pickle_serializer_supports_module_serialization(monkeypatch):
monkeypatch.syspath_prepend(str(Path(__file__).parent / "examples"))
from my_module.flow import test_flow
serializer = PickleSerializer(pickle_modules=["my_module"])
content = serializer.dumps(test_flow)
monkeypatch.undo()
sys.modules.pop("my_module")
flow = serializer.loads(content)
assert flow() == "test!"
def test_pickle_serializer_fails_on_relative_import_without_module_serialization(
monkeypatch,
):
monkeypatch.syspath_prepend(str(Path(__file__).parent / "examples"))
from my_module.flow import test_flow
serializer = PickleSerializer()
content = serializer.dumps(test_flow)
monkeypatch.undo()
sys.modules.pop("my_module")
with pytest.raises(ModuleNotFoundError, match="No module named 'my_module'"):
serializer.loads(content)
|
2,668 |
create programmer
|
#
# This file is part of LiteX-Boards.
#
# Copyright (c) 2020 Florent Kermarrec <[email protected]>
# Copyright (c) 2023 Charles-Henri Mousset <[email protected]>
# SPDX-License-Identifier: BSD-2-Clause
# The Colorlight 5A-75B PCB and IOs have been documented by @miek and @smunaut:
# https://github.com/q3k/chubby75/tree/master/5a-75b
# The Colorlight 5A-907 PCB, which is heavily based on the 5A-75B, has been documented by @chmouss:
# https://github.com/chmousset/colorlight_reverse
from litex.build.generic_platform import *
from litex.build.lattice import LatticeECP5Platform
from litex.build.lattice.programmer import OpenOCDJTAGProgrammer
# IOs ----------------------------------------------------------------------------------------------
_io_v7_0 = [ # Documented by @miek and @chmouss
# Clk
("clk25", 0, Pins("P6"), IOStandard("LVCMOS33")),
# Led
("user_led_n", 0, Pins("P11"), IOStandard("LVCMOS33")),
# Button
("user_btn_n", 0, Pins("M13"), IOStandard("LVCMOS33")),
# Serial
("serial", 0,
Subsignal("tx", Pins("P15")), # FAN pin 1
Subsignal("rx", Pins("L14")), # FAN pin 2
IOStandard("LVCMOS33")
),
("uartbone", 0,
Subsignal("tx", Pins("F15")), # EXT_VOL pin 1
Subsignal("rx", Pins("E16")), # EXT_VOL pin 2
IOStandard("LVCMOS33")
),
# SPIFlash (W25Q32JV)
("spiflash", 0,
# clk
Subsignal("cs_n", Pins("N8")),
#Subsignal("clk", Pins("")), driven through USRMCLK
Subsignal("mosi", Pins("T8")),
Subsignal("miso", Pins("T7")),
IOStandard("LVCMOS33"),
),
# SDR SDRAM (M126L6161A)
("sdram_clock", 0, Pins("C6"), IOStandard("LVCMOS33")),
("sdram", 0,
Subsignal("a", Pins(
"A9 E10 B12 D13 C12 D11 D10 E9",
"D9 B7 C8")),
Subsignal("dq", Pins(
"B13 C11 C10 A11 C9 E8 B6 B9",
"A6 B5 A5 B4 B3 C3 A2 B2",
"E2 D3 A4 E4 D4 C4 E5 D5",
"E6 D6 D8 A8 B8 B10 B11 E11")),
Subsignal("we_n", Pins("C7")),
Subsignal("ras_n", Pins("D7")),
Subsignal("cas_n", Pins("E7")),
#Subsignal("cs_n", Pins("")), # gnd
#Subsignal("cke", Pins("")), # 3v3
Subsignal("ba", Pins("A7")),
#Subsignal("dm", Pins("")), # gnd
IOStandard("LVCMOS33"),
Misc("SLEWRATE=FAST")
),
# RGMII Ethernet (B50612D)
("eth_clocks", 0,
Subsignal("tx", Pins("M2")),
Subsignal("rx", Pins("M1")),
IOStandard("LVCMOS33")
),
("eth", 0,
#Subsignal("rst_n", Pins("P5")),
Subsignal("mdio", Pins("T2")),
Subsignal("mdc", Pins("P3")),
Subsignal("rx_ctl", Pins("N6")),
Subsignal("rx_data", Pins("N1 M5 N5 M6")),
Subsignal("tx_ctl", Pins("M3")),
Subsignal("tx_data", Pins("L1 L3 P2 L4")),
IOStandard("LVCMOS33")
),
("eth_clocks", 1,
Subsignal("tx", Pins("M12")),
Subsignal("rx", Pins("M16")),
IOStandard("LVCMOS33")
),
("eth", 1,
#Subsignal("rst_n", Pins("P5")),
Subsignal("mdio", Pins("T2")),
Subsignal("mdc", Pins("P3")),
Subsignal("rx_ctl", Pins("L15")),
Subsignal("rx_data", Pins("P13 N13 P14 M15")),
Subsignal("tx_ctl", Pins("R15")),
Subsignal("tx_data", Pins("T14 R12 R13 R14")),
IOStandard("LVCMOS33")
),
# USB
# To use the USB:
# shunt R124 and R134
# remove R107
# connect R107's pad towards FPGA to R124 shunt through a 1.5k resistor
# note: it conflicts with uartbone
("usb", 0,
Subsignal("d_p", Pins("F15")), # EXT_VOL pin 1
Subsignal("d_n", Pins("E16")), # EXT_VOL pin 2
Subsignal("pullup", Pins("A12")), # R107's pad towards FPGA
IOStandard("LVCMOS33")
),
]
# Documented by @chmouss
_connectors_v7_0 = [
("door", "- - P16"),
("smoke", "- - M14 -"),
("fan", "- P15 L14"),
("ext_vol", "- F15 E16"),
# pinout: 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26
("j1", "- L2 K1 F12 J14 B16 - J5 K2 F3 F1 T4 G3 - G2 H3 R5 H5 J4 K3 - R8 G1 K4 C2 P8 E3"),
("j2", "- L2 K1 F12 J14 B16 - J2 J1 H4 K5 R7 P1 - R1 L5 P7 F2 P4 R2 - N7 M8 M9 T6 M7 R6"),
("j3", "- L2 K1 F12 J14 B16 - G4 G5 M11 N11 L13 P12 - K15 N12 G13 L16 K16 J15 - G12 J16 J12 H15 F13 G16"),
("j4", "- L2 K1 F12 J14 B16 - F5 F4 H13 J13 E15 H12 - G14 H14 D16 G15 A15 F16 - F14 A14 E13 B14 E14 A13"),
]
# Platform -----------------------------------------------------------------------------------------
class Platform(LatticeECP5Platform):
default_clk_name = "clk25"
default_clk_period = 1e9/25e6
def __init__(self, revision="7.0", toolchain="trellis"):
assert revision in ["7.0"]
self.revision = revision
device = {"7.0": "LFE5U-25F-6BG256C"}[revision]
io = {"7.0": _io_v7_0 }[revision]
connectors = {"7.0": _connectors_v7_0 }[revision]
LatticeECP5Platform.__init__(self, device, io, connectors=connectors, toolchain=toolchain)
def METHOD_NAME(self):
return OpenOCDJTAGProgrammer("openocd_colorlight_5a_75b.cfg")
def do_finalize(self, fragment):
LatticeECP5Platform.do_finalize(self, fragment)
self.add_period_constraint(self.lookup_request("clk25", loose=True), 1e9/25e6)
self.add_period_constraint(self.lookup_request("eth_clocks:rx", 0, loose=True), 1e9/125e6)
self.add_period_constraint(self.lookup_request("eth_clocks:rx", 1, loose=True), 1e9/125e6)
|
2,669 |
create loss
|
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import paddle
import paddle.nn as nn
import paddle.nn.functional as F
import math
import net
class DygraphModel():
# define model
def create_model(self, config):
emb_path = config.get("hyper_parameters.emb_path")
vocab_size = config.get("hyper_parameters.vocab_size")
emb_size = config.get("hyper_parameters.emb_size")
kernel_num = config.get("hyper_parameters.kernel_num")
conv_filter = config.get("hyper_parameters.conv_filter")
conv_act = config.get("hyper_parameters.conv_act")
hidden_size = config.get("hyper_parameters.hidden_size")
out_size = config.get("hyper_parameters.out_size")
pool_size = config.get("hyper_parameters.pool_size")
pool_stride = config.get("hyper_parameters.pool_stride")
pool_padding = config.get("hyper_parameters.pool_padding")
pool_type = config.get("hyper_parameters.pool_type")
hidden_act = config.get("hyper_parameters.hidden_act")
pyramid_model = net.MatchPyramidLayer(
emb_path, vocab_size, emb_size, kernel_num, conv_filter, conv_act,
hidden_size, out_size, pool_size, pool_stride, pool_padding,
pool_type, hidden_act)
return pyramid_model
# define feeds which convert numpy of batch data to paddle.tensor
def create_feeds(self, batch_data, sentence_left_size,
sentence_right_size):
sentence_left = paddle.to_tensor(batch_data[0].numpy().astype('int64')
.reshape(-1, sentence_left_size))
sentence_right = paddle.to_tensor(batch_data[1].numpy().astype('int64')
.reshape(-1, sentence_right_size))
return [sentence_left, sentence_right]
# define loss function by predicts and label
def METHOD_NAME(self, prediction):
pos = paddle.slice(
prediction, axes=[0, 1], starts=[0, 0], ends=[64, 1])
neg = paddle.slice(
prediction, axes=[0, 1], starts=[64, 0], ends=[128, 1])
loss_part1 = paddle.subtract(
paddle.full(
shape=[64, 1], fill_value=1.0, dtype='float32'), pos)
loss_part2 = paddle.add(loss_part1, neg)
loss_part3 = paddle.maximum(
paddle.full(
shape=[64, 1], fill_value=0.0, dtype='float32'),
loss_part2)
avg_cost = paddle.mean(loss_part3)
return avg_cost
# define optimizer
def create_optimizer(self, dy_model, config):
lr = config.get("hyper_parameters.optimizer.learning_rate", 0.001)
optimizer = paddle.optimizer.Adam(
learning_rate=lr, parameters=dy_model.parameters())
return optimizer
# define metrics such as auc/acc
# multi-task need to define multi metric
def create_metrics(self):
metrics_list_name = []
metrics_list = []
return metrics_list, metrics_list_name
# construct train forward phase
def train_forward(self, dy_model, metrics_list, batch_data, config):
sentence_left_size = config.get("hyper_parameters.sentence_left_size")
sentence_right_size = config.get(
"hyper_parameters.sentence_right_size")
batch_size = config.get("runner.train_batch_size", 128)
inputs = self.create_feeds(batch_data, sentence_left_size,
sentence_right_size)
prediction = dy_model.forward(inputs)
loss = self.METHOD_NAME(prediction)
# update metrics
print_dict = {"loss": loss}
return loss, metrics_list, print_dict
def infer_forward(self, dy_model, metrics_list, batch_data, config):
sentence_left_size = config.get("hyper_parameters.sentence_left_size")
sentence_right_size = config.get(
"hyper_parameters.sentence_right_size")
batch_size = config.get("runner.infer_batch_size", 128)
inputs = self.create_feeds(batch_data, sentence_left_size,
sentence_right_size)
prediction = dy_model.forward(inputs)
# update metrics
print_dict = {"prediction": prediction}
return metrics_list, print_dict
|
2,670 |
disarm
|
#!/usr/bin/env python
#############################################################################
#
# Watchdog contains an implementation of SONiC Platform Base Watchdog API
#
#############################################################################
import fcntl
import os
import array
try:
from sonic_platform_base.watchdog_base import WatchdogBase
except ImportError as e:
raise ImportError(str(e) + "- required module not found")
""" ioctl constants """
IO_WRITE = 0x40000000
IO_READ = 0x80000000
IO_READ_WRITE = 0xC0000000
IO_SIZE_INT = 0x00040000
IO_SIZE_40 = 0x00280000
IO_TYPE_WATCHDOG = ord('W') << 8
WDR_INT = IO_READ | IO_SIZE_INT | IO_TYPE_WATCHDOG
WDR_40 = IO_READ | IO_SIZE_40 | IO_TYPE_WATCHDOG
WDWR_INT = IO_READ_WRITE | IO_SIZE_INT | IO_TYPE_WATCHDOG
""" Watchdog ioctl commands """
WDIOC_GETSUPPORT = 0 | WDR_40
WDIOC_GETSTATUS = 1 | WDR_INT
WDIOC_GETBOOTSTATUS = 2 | WDR_INT
WDIOC_GETTEMP = 3 | WDR_INT
WDIOC_SETOPTIONS = 4 | WDR_INT
WDIOC_KEEPALIVE = 5 | WDR_INT
WDIOC_SETTIMEOUT = 6 | WDWR_INT
WDIOC_GETTIMEOUT = 7 | WDR_INT
WDIOC_SETPRETIMEOUT = 8 | WDWR_INT
WDIOC_GETPRETIMEOUT = 9 | WDR_INT
WDIOC_GETTIMELEFT = 10 | WDR_INT
""" Watchdog status constants """
WDIOS_DISABLECARD = 0x0001
WDIOS_ENABLECARD = 0x0002
WDT_COMMON_ERROR = -1
WD_MAIN_IDENTITY = "iTCO_wdt"
WDT_SYSFS_PATH = "/sys/class/watchdog/"
DEFAULT_TIMEOUT=180
watchdog=0
class Watchdog(WatchdogBase):
def __init__(self):
self.watchdog, self.wdt_main_dev_name = self._get_wdt()
if self.wdt_main_dev_name is None:
raise Exception("Watchdog device is not instantiated")
self.status_path = "/sys/class/watchdog/%s/status" % self.wdt_main_dev_name
self.state_path = "/sys/class/watchdog/%s/state" % self.wdt_main_dev_name
self.timeout_path = "/sys/class/watchdog/%s/timeout" % self.wdt_main_dev_name
# Set default value
self._disable()
self.armed = False
self.timeout = DEFAULT_TIMEOUT
def _is_wd_main(self, dev):
"""
Checks watchdog identity
"""
identity = self._read_file(
"{}/{}/identity".format(WDT_SYSFS_PATH, dev))
return identity == WD_MAIN_IDENTITY
def _get_wdt(self):
"""
Retrieves watchdog device
"""
global watchdog
wdt_main_dev_list = [dev for dev in os.listdir(
"/dev/") if dev.startswith("watchdog") and self._is_wd_main(dev)]
if not wdt_main_dev_list:
return (None, None)
wdt_main_dev_name = wdt_main_dev_list[0]
watchdog_device_path = "/dev/{}".format(wdt_main_dev_name)
if not watchdog:
watchdog = os.open(watchdog_device_path, os.O_RDWR)
return watchdog, wdt_main_dev_name
def _read_file(self, file_path):
"""
Read text file
"""
try:
with open(file_path, "r") as fd:
txt = fd.read()
except IOError:
return WDT_COMMON_ERROR
return txt.strip()
def _enable(self):
"""
Turn on the watchdog timer
"""
req = array.array('h', [WDIOS_ENABLECARD])
fcntl.ioctl(self.watchdog, WDIOC_SETOPTIONS, req, False)
def _disable(self):
"""
Turn off the watchdog timer
"""
req = array.array('h', [WDIOS_DISABLECARD])
fcntl.ioctl(self.watchdog, WDIOC_SETOPTIONS, req, False)
def _keepalive(self):
"""
Keep alive watchdog timer
"""
fcntl.ioctl(self.watchdog, WDIOC_KEEPALIVE)
def _settimeout(self, seconds):
"""
Set watchdog timer timeout
@param seconds - timeout in seconds
@return is the actual set timeout
"""
req = array.array('I', [seconds])
fcntl.ioctl(self.watchdog, WDIOC_SETTIMEOUT, req, True)
return int(req[0])
def _gettimeout(self, timeout_path):
"""
Get watchdog timeout
@return watchdog timeout
"""
req = array.array('I', [0])
fcntl.ioctl(self.watchdog, WDIOC_GETTIMEOUT, req, True)
return int(req[0])
def _gettimeleft(self):
"""
Get time left before watchdog timer expires
@return time left in seconds
"""
req = array.array('I', [0])
fcntl.ioctl(self.watchdog, WDIOC_GETTIMELEFT, req, True)
return int(req[0])
#################################################################
def arm(self, seconds):
"""
Arm the hardware watchdog with a timeout of <seconds> seconds.
If the watchdog is currently armed, calling this function will
simply reset the timer to the provided value. If the underlying
hardware does not support the value provided in <seconds>, this
method should arm the watchdog with the *next greater* available
value.
Returns:
An integer specifying the *actual* number of seconds the watchdog
was armed with. On failure returns -1.
"""
ret = WDT_COMMON_ERROR
if seconds < 0:
return ret
try:
if self.timeout != seconds:
self.timeout = self._settimeout(seconds)
if self.armed:
self._keepalive()
else:
self._settimeout(seconds)
self._enable()
self.armed = True
ret = self.timeout
except IOError as e:
pass
return ret
def METHOD_NAME(self):
"""
Disarm the hardware watchdog
Returns:
A boolean, True if watchdog is disarmed successfully, False if not
"""
disarmed = False
if self.is_armed():
try:
self._disable()
self.armed = False
disarmed = True
except IOError:
pass
return disarmed
def is_armed(self):
"""
Retrieves the armed state of the hardware watchdog.
Returns:
A boolean, True if watchdog is armed, False if not
"""
return self.armed
def get_remaining_time(self):
"""
If the watchdog is armed, retrieve the number of seconds remaining on
the watchdog timer
Returns:
An integer specifying the number of seconds remaining on thei
watchdog timer. If the watchdog is not armed, returns -1.
"""
timeleft = WDT_COMMON_ERROR
if self.armed:
try:
timeleft = self._gettimeleft()
except IOError:
pass
return timeleft
def __del__(self):
"""
Close watchdog
"""
if self.watchdog is not None :
os.close(self.watchdog)
|
2,671 |
add sort
|
import logging
from django.utils.safestring import mark_safe
from elasticsearch_dsl import A
from elasticsearch_dsl.query import Q
logger = logging.getLogger(__name__)
def _make_values_lists(kwargs):
return {k: v if isinstance(v, (list, tuple)) else [v] for k, v in kwargs.items()}
class EmtpyResponse(list):
class hits:
total = 0
class SearchQuerySetWrapper(object):
"""
Decorates a SearchQuerySet object using a generator for efficient iteration
"""
def __init__(self, sqs, model):
self.sqs = sqs
self.sqs.model = model
self.model = model
self.filters = []
self.post_filters = []
self.aggregations = []
self.applied_post_filters = {}
self.query = None
self.aggs = []
self.broken_query = False
def count(self):
total = self.response.hits.total
if isinstance(total, int):
return total
return total.value
def has_more(self):
total = self.response.hits.total
if isinstance(total, int):
return False
return total.relation == "gte"
def to_queryset(self):
if self.broken_query:
return self.model.objects.none()
return self.sqs.to_queryset()
def wrap_queryset(self, qs):
return ESQuerySetWrapper(qs, self.response)
def update_query(self):
if self.post_filters:
self.sqs = self.sqs.post_filter(Q("bool", must=self.post_filters))
if self.filters:
self.sqs = self.sqs.query("bool", filter=self.filters)
for field in self.aggregations:
agg_filters = self.applied_post_filters.copy()
agg_filters.pop(field, None)
filter_agg = A("filter", bool={"must": list(agg_filters.values())})
filter_agg.bucket(field, "terms", field=field)
self.sqs.aggs.bucket(field, filter_agg)
def all(self):
return self
def none(self):
self.broken_query = True
return self
@property
def response(self):
return self.get_response()
def get_response(self):
if self.broken_query:
return EmtpyResponse()
if not hasattr(self.sqs, "_response"):
self.sqs = self.sqs.source(excludes=["*"])
else:
return self.sqs._response
self.update_query()
try:
return self.sqs.execute()
except Exception as e:
logger.error("Elasticsearch error: %s", e)
self.broken_query = True
return EmtpyResponse()
def add_aggregation(self, aggs):
self.aggregations.extend(aggs)
return self
def add_date_histogram(self, date_field, interval="1y", format="yyyy"):
a = A(
"date_histogram",
field=date_field,
calendar_interval=interval,
format=format,
)
self.sqs.aggs.bucket(date_field, a)
return self
def get_facet_data(self):
if "aggregations" in self.response:
return self.response["aggregations"]
return {}
def get_aggregations(self):
if self.broken_query or "aggregations" not in self.response:
return {"fields": {}}
return {
"fields": {
k: [
[i["key"], self.response["aggregations"][k]["doc_count"]]
for i in self.response["aggregations"][k][k]["buckets"]
]
for k in self.response["aggregations"]
}
}
def _make_query(self, *args, **kwargs):
if kwargs:
return Q("terms", **_make_values_lists(kwargs))
return args[0]
def filter(self, *args, **kwargs):
query = self._make_query(*args, **kwargs)
self.filters.append(query)
return self
def post_filter(self, name, *args, **kwargs):
query = self._make_query(*args, **kwargs)
self.post_filters.append(query)
self.applied_post_filters[name] = query
return self
def set_query(self, q):
self.query = q
self.sqs = self.sqs.query(self.query)
return self
def METHOD_NAME(self, *sorts):
self.sqs = self.sqs.sort(*sorts)
return self
def __getitem__(self, key):
self.sqs = self.sqs[key]
return self
def __iter__(self):
return iter(self.sqs)
class ESQuerySetWrapper(object):
def __init__(self, qs, es_response):
self.__class__ = type(qs.__class__.__name__, (self.__class__, qs.__class__), {})
self.__dict__ = qs.__dict__
self._qs = qs
self._es_response = es_response
self._es_map = {int(hit.meta.id): hit for hit in es_response}
def __iter__(self):
for obj in self._qs:
hit = self._es_map[obj.pk]
# mark_safe should work because highlight_options
# has been set with encoder="html"
obj.query_highlight = mark_safe(" ".join(self._get_highlight(hit)))
yield obj
def _get_highlight(self, hit):
if hasattr(hit.meta, "highlight"):
for key in hit.meta.highlight:
yield from hit.meta.highlight[key]
|
2,672 |
sort ies
|
#!/usr/bin/env python3
# Allow direct execution
import os
import shutil
import sys
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from inspect import getsource
from devscripts.utils import get_filename_args, read_file, write_file
NO_ATTR = object()
STATIC_CLASS_PROPERTIES = [
'IE_NAME', '_ENABLED', '_VALID_URL', # Used for URL matching
'_WORKING', 'IE_DESC', '_NETRC_MACHINE', 'SEARCH_KEY', # Used for --extractor-descriptions
'age_limit', # Used for --age-limit (evaluated)
'_RETURN_TYPE', # Accessed in CLI only with instance (evaluated)
]
CLASS_METHODS = [
'ie_key', 'suitable', '_match_valid_url', # Used for URL matching
'working', 'get_temp_id', '_match_id', # Accessed just before instance creation
'description', # Used for --extractor-descriptions
'is_suitable', # Used for --age-limit
'supports_login', 'is_single_video', # Accessed in CLI only with instance
]
IE_TEMPLATE = '''
class {name}({bases}):
_module = {module!r}
'''
MODULE_TEMPLATE = read_file('devscripts/lazy_load_template.py')
def main():
lazy_extractors_filename = get_filename_args(default_outfile='yt_dlp/extractor/lazy_extractors.py')
if os.path.exists(lazy_extractors_filename):
os.remove(lazy_extractors_filename)
_ALL_CLASSES = get_all_ies() # Must be before import
import yt_dlp.plugins
from yt_dlp.extractor.common import InfoExtractor, SearchInfoExtractor
# Filter out plugins
_ALL_CLASSES = [cls for cls in _ALL_CLASSES if not cls.__module__.startswith(f'{yt_dlp.plugins.PACKAGE_NAME}.')]
DummyInfoExtractor = type('InfoExtractor', (InfoExtractor,), {'IE_NAME': NO_ATTR})
module_src = '\n'.join((
MODULE_TEMPLATE,
' _module = None',
*extra_ie_code(DummyInfoExtractor),
'\nclass LazyLoadSearchExtractor(LazyLoadExtractor):\n pass\n',
*build_ies(_ALL_CLASSES, (InfoExtractor, SearchInfoExtractor), DummyInfoExtractor),
))
write_file(lazy_extractors_filename, f'{module_src}\n')
def get_all_ies():
PLUGINS_DIRNAME = 'ytdlp_plugins'
BLOCKED_DIRNAME = f'{PLUGINS_DIRNAME}_blocked'
if os.path.exists(PLUGINS_DIRNAME):
# os.rename cannot be used, e.g. in Docker. See https://github.com/yt-dlp/yt-dlp/pull/4958
shutil.move(PLUGINS_DIRNAME, BLOCKED_DIRNAME)
try:
from yt_dlp.extractor.extractors import _ALL_CLASSES
finally:
if os.path.exists(BLOCKED_DIRNAME):
shutil.move(BLOCKED_DIRNAME, PLUGINS_DIRNAME)
return _ALL_CLASSES
def extra_ie_code(ie, base=None):
for var in STATIC_CLASS_PROPERTIES:
val = getattr(ie, var)
if val != (getattr(base, var) if base else NO_ATTR):
yield f' {var} = {val!r}'
yield ''
for name in CLASS_METHODS:
f = getattr(ie, name)
if not base or f.__func__ != getattr(base, name).__func__:
yield getsource(f)
def build_ies(ies, bases, attr_base):
names = []
for ie in METHOD_NAME(ies, bases):
yield build_lazy_ie(ie, ie.__name__, attr_base)
if ie in ies:
names.append(ie.__name__)
yield f'\n_ALL_CLASSES = [{", ".join(names)}]'
def METHOD_NAME(ies, ignored_bases):
"""find the correct sorting and add the required base classes so that subclasses can be correctly created"""
classes, returned_classes = ies[:-1], set()
assert ies[-1].__name__ == 'GenericIE', 'Last IE must be GenericIE'
while classes:
for c in classes[:]:
bases = set(c.__bases__) - {object, *ignored_bases}
restart = False
for b in sorted(bases, key=lambda x: x.__name__):
if b not in classes and b not in returned_classes:
assert b.__name__ != 'GenericIE', 'Cannot inherit from GenericIE'
classes.insert(0, b)
restart = True
if restart:
break
if bases <= returned_classes:
yield c
returned_classes.add(c)
classes.remove(c)
break
yield ies[-1]
def build_lazy_ie(ie, name, attr_base):
bases = ', '.join({
'InfoExtractor': 'LazyLoadExtractor',
'SearchInfoExtractor': 'LazyLoadSearchExtractor',
}.get(base.__name__, base.__name__) for base in ie.__bases__)
s = IE_TEMPLATE.format(name=name, module=ie.__module__, bases=bases)
return s + '\n'.join(extra_ie_code(ie, attr_base))
if __name__ == '__main__':
main()
|
2,673 |
test zeroconf custom addresses
|
"""Test suit for pairing process with Apple TV."""
import ipaddress
from unittest.mock import MagicMock
import pytest
import pytest_asyncio
from pyatv.conf import AppleTV, ManualService
from pyatv.const import Protocol
from pyatv.core import create_core
from pyatv.protocols.dmap import pairing, parser, tag_definitions
from pyatv.storage.memory_storage import MemoryStorage
from tests import utils, zeroconf_stub
REMOTE_NAME = "pyatv remote"
# This is a valid config for default pairing guid
PIN_CODE = 1234
PAIRING_GUID = "0x0000000000000001"
PAIRING_CODE = "690E6FF61E0D7C747654A42AED17047D"
# This is valid for a some other (non-default) config
PIN_CODE2 = 5555
PAIRING_GUID2 = "0x1234ABCDE56789FF"
PAIRING_CODE2 = "58AD1D195B6DAA58AA2EA29DC25B81C3"
# Code is padded with zeros
PIN_CODE3 = 1
PAIRING_GUID3 = "0x7D1324235F535AE7"
PAIRING_CODE3 = "A34C3361C7D57D61CA41F62A8042F069"
# Pairing guid is 8 bytes, which is 64 bits
RANDOM_128_BITS = 6558272190156386627
RANDOM_PAIRING_GUID = "0x5B03A9CF4A983143"
RANDOM_PAIRING_CODE = "7AF2D0B8629DE3C704D40A14C9E8CB93"
pytestmark = pytest.mark.asyncio
def pairing_url(zeroconf, pairing_code):
service = zeroconf.registered_services[0]
return (
f"http://127.0.0.1:{service.port}/"
+ f"pair?pairingcode={pairing_code}&servicename=test"
)
@pytest.fixture
def mock_random():
pairing.random.getrandbits = lambda x: RANDOM_128_BITS
@pytest.fixture(name="storage")
def storage_fixture() -> MemoryStorage:
yield MemoryStorage()
@pytest_asyncio.fixture
async def mock_pairing(event_loop, storage):
obj = MagicMock()
service = ManualService("id", Protocol.DMAP, 0, {})
config = AppleTV("Apple TV", "127.0.0.1")
config.add_service(service)
zeroconf = zeroconf_stub.stub(pairing)
async def _start(
pin_code=PIN_CODE, pairing_guid=PAIRING_GUID, name=REMOTE_NAME, addresses=None
):
options = {"zeroconf": zeroconf}
if pairing_guid:
options["pairing_guid"] = pairing_guid
if name:
options["name"] = name
if addresses:
options["addresses"] = addresses
settings = await storage.get_settings(config)
core = await create_core(config, service, settings=settings, loop=event_loop)
obj.pairing = pairing.DmapPairingHandler(core, **options)
await obj.pairing.begin()
obj.pairing.pin(pin_code)
return obj.pairing, zeroconf, service
yield _start
await obj.pairing.finish()
await obj.pairing.close()
async def test_zeroconf_service_published(mock_pairing):
_, zeroconf, _ = await mock_pairing()
assert len(zeroconf.registered_services) == 1, "no zeroconf service registered"
service = zeroconf.registered_services[0]
assert service.properties["DvNm"] == REMOTE_NAME, "remote name does not match"
assert [ipaddress.ip_address("10.0.10.1").packed] == service.addresses
@pytest.mark.parametrize("addresses", [(["1.2.3.4"])])
async def METHOD_NAME(mock_pairing, addresses):
_, zeroconf, _ = await mock_pairing(addresses=addresses)
assert len(zeroconf.registered_services) == len(addresses)
service = zeroconf.registered_services[0]
for address in addresses:
assert ipaddress.ip_address(address).packed in service.addresses
async def test_succesful_pairing(mock_pairing, storage):
pairing, zeroconf, service = await mock_pairing()
url = pairing_url(zeroconf, PAIRING_CODE)
data, _ = await utils.simple_get(url)
await pairing.finish()
# Verify content returned in pairingresponse
parsed = parser.parse(data, tag_definitions.lookup_tag)
assert parser.first(parsed, "cmpa", "cmpg") == 1
assert parser.first(parsed, "cmpa", "cmnm") == REMOTE_NAME
assert parser.first(parsed, "cmpa", "cmty") == "iPhone"
assert service.credentials == PAIRING_GUID
assert storage.settings[0].protocols.dmap.credentials == PAIRING_GUID
async def test_successful_pairing_random_pairing_guid_generated(
mock_random, mock_pairing, storage
):
pairing, zeroconf, service = await mock_pairing(pairing_guid=None)
url = pairing_url(zeroconf, RANDOM_PAIRING_CODE)
await utils.simple_get(url)
await pairing.finish()
assert service.credentials == RANDOM_PAIRING_GUID
assert storage.settings[0].protocols.dmap.credentials == RANDOM_PAIRING_GUID
async def test_succesful_pairing_with_any_pin(mock_pairing):
_, zeroconf, _ = await mock_pairing(pin_code=None)
url = pairing_url(zeroconf, "invalid_pairing_code")
_, status = await utils.simple_get(url)
assert status == 200
async def test_succesful_pairing_with_pin_leadering_zeros(mock_pairing):
_, zeroconf, _ = await mock_pairing(pin_code=PIN_CODE3, pairing_guid=PAIRING_GUID3)
url = pairing_url(zeroconf, PAIRING_CODE3)
_, status = await utils.simple_get(url)
assert status == 200
async def test_pair_custom_pairing_guid(mock_pairing, storage):
pairing, zeroconf, service = await mock_pairing(
pin_code=PIN_CODE2, pairing_guid=PAIRING_GUID2
)
url = pairing_url(zeroconf, PAIRING_CODE2)
data, _ = await utils.simple_get(url)
await pairing.finish()
# Verify content returned in pairingresponse
parsed = parser.parse(data, tag_definitions.lookup_tag)
assert parser.first(parsed, "cmpa", "cmpg") == int(PAIRING_GUID2, 16)
assert service.credentials == PAIRING_GUID2
assert storage.settings[0].protocols.dmap.credentials == PAIRING_GUID2
async def test_failed_pairing(mock_pairing):
_, zeroconf, _ = await mock_pairing()
url = pairing_url(zeroconf, "wrong")
data, status = await utils.simple_get(url)
assert status == 500
|
2,674 |
test require group
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
Tests `requires_provides.py`.
"""
import unittest
from antlir.compiler.requires_provides import (
_normalize_path,
ProvidesDirectory,
ProvidesDoNotAccess,
ProvidesFile,
ProvidesGroup,
ProvidesKey,
ProvidesSymlink,
ProvidesUser,
RequireDirectory,
RequireFile,
RequireGroup,
RequireKey,
RequireSymlink,
RequireUser,
)
from antlir.fs_utils import Path
class RequiresProvidesTestCase(unittest.TestCase):
def test_normalize_path(self) -> None:
self.assertEqual(Path("/a"), _normalize_path(Path("a//.")))
self.assertEqual(Path("/b/d"), _normalize_path(Path("/b/c//../d")))
self.assertEqual(Path("/x/y"), _normalize_path(Path("///x/./y/")))
def test_path_normalization(self) -> None:
self.assertEqual(Path("/a"), RequireDirectory(path=Path("a//.")).path)
self.assertEqual(
Path("/b/d"), ProvidesDirectory(path=Path("/b/c//../d")).req.path
)
self.assertEqual(Path("/x/y"), ProvidesFile(path=Path("///x/./y/")).req.path)
def test_provides_requires(self) -> None:
pf1 = ProvidesFile(path=Path("f"))
pf2 = ProvidesFile(path=Path("f/b"))
pf3 = ProvidesFile(path=Path("f/b/c"))
pd1 = ProvidesDirectory(path=Path("a"))
pd2 = ProvidesDirectory(path=Path("a/b"))
pd3 = ProvidesDirectory(path=Path("a/b/c"))
provides = [pf1, pf2, pf3, pd1, pd2, pd3]
rf1 = RequireFile(path=Path("f"))
rf2 = RequireFile(path=Path("f/b"))
rf3 = RequireFile(path=Path("f/b/c"))
rd1 = RequireDirectory(path=Path("a"))
rd2 = RequireDirectory(path=Path("a/b"))
rd3 = RequireDirectory(path=Path("a/b/c"))
requires = [rf1, rf2, rf3, rd1, rd2, rd3]
for p in provides:
for r in requires:
self.assertEqual(
p.req.path == r.path,
p.provides(r),
f"{p}.provides({r})",
)
def test_provides_do_not_access(self) -> None:
self.assertFalse(
ProvidesDoNotAccess(path=Path("//a/b")).provides(
RequireFile(path=Path("/a/b"))
)
)
def test_with_new_path(self) -> None:
for new_path in ["b", "b/", "/b", "/../a/../b/c/.."]:
self.assertEqual(
ProvidesDirectory(path=Path("unused")).with_new_path(Path(new_path)),
ProvidesDirectory(path=Path("b")),
)
def test_provides_path_object_path(self) -> None:
p = Path("/a/b/c")
self.assertEqual(p, ProvidesDirectory(p).path())
self.assertEqual(p, ProvidesDirectory(p).path())
def METHOD_NAME(self) -> None:
groupname = "foo"
g = RequireGroup(groupname)
self.assertEqual(g.name, groupname)
def test_provides_group(self) -> None:
groupname = "foo"
pg = ProvidesGroup(groupname)
# pyre-fixme[16]: `Requirement` has no attribute `name`.
self.assertEqual(pg.req.name, groupname)
self.assertTrue(pg.provides(RequireGroup(groupname)))
def test_require_user(self) -> None:
username = "user"
ru = RequireUser(username)
self.assertEqual(ru.name, username)
ru2 = RequireUser(username)
self.assertEqual(ru, ru2)
def test_provides_user(self) -> None:
username = "user"
pu = ProvidesUser(username)
# pyre-fixme[16]: `Requirement` has no attribute `name`.
self.assertEqual(pu.req.name, username)
self.assertTrue(pu.provides(RequireUser(username)))
self.assertFalse(pu.provides(RequireUser("user2")))
def test_require_symlink(self) -> None:
path = Path("/foo")
target = Path("/bar")
rs = RequireSymlink(path=path, target=target)
self.assertEqual(rs.path, path)
self.assertEqual(rs.target, target)
def test_provides_symlink(self) -> None:
path = Path("/foo")
target = Path("/bar")
ps = ProvidesSymlink(path=path, target=target)
rs = RequireSymlink(path=path, target=target)
self.assertEqual(ps.req, rs)
self.assertTrue(ps.provides(rs))
# Symlinks and files/dirs are different now
self.assertFalse(ps.provides(RequireFile(path)))
self.assertFalse(ps.provides(RequireDirectory(path)))
new_path = Path("/baz")
ps2 = ps.with_new_path(new_path)
rs2 = RequireSymlink(path=new_path, target=target)
self.assertEqual(ps2.req, rs2)
self.assertFalse(ps2.provides(rs))
self.assertTrue(ps2.provides(rs2))
def test_require_key(self) -> None:
rk = RequireKey(key="key")
self.assertEqual(rk.key, "key")
def test_provides_key(self) -> None:
pk = ProvidesKey(key="key")
rk = RequireKey(key="key")
self.assertTrue(pk.provides(rk))
|
2,675 |
handle connect
|
# this code based on Daniel Krech's RDFLib HTTP client code (see rdflib.dev)
import sys
import socket
from supervisor.compat import as_bytes
from supervisor.compat import as_string
from supervisor.compat import encodestring
from supervisor.compat import PY2
from supervisor.compat import urlparse
from supervisor.medusa import asynchat_25 as asynchat
CR = b'\x0d'
LF = b'\x0a'
CRLF = CR+LF
class Listener(object):
def status(self, url, status):
pass
def error(self, url, error):
sys.stderr.write("%s %s\n" % (url, error))
def response_header(self, url, name, value):
pass
def done(self, url):
pass
def feed(self, url, data):
try:
sdata = as_string(data)
except UnicodeDecodeError:
sdata = 'Undecodable: %r' % data
# We've got Unicode data in sdata now, but writing to stdout sometimes
# fails - see issue #1231.
try:
sys.stdout.write(sdata)
except UnicodeEncodeError:
if PY2:
# This might seem like The Wrong Thing To Do (writing bytes
# rather than text to an output stream), but it seems to work
# OK for Python 2.7.
sys.stdout.write(data)
else:
s = ('Unable to write Unicode to stdout because it has '
'encoding %s' % sys.stdout.encoding)
raise ValueError(s)
sys.stdout.flush()
def close(self, url):
pass
class HTTPHandler(asynchat.async_chat):
def __init__(
self,
listener,
username='',
password=None,
conn=None,
map=None
):
asynchat.async_chat.__init__(self, conn, map)
self.listener = listener
self.user_agent = 'Supervisor HTTP Client'
self.buffer = b''
self.set_terminator(CRLF)
self.connected = 0
self.part = self.status_line
self.chunk_size = 0
self.chunk_read = 0
self.length_read = 0
self.length = 0
self.encoding = None
self.username = username
self.password = password
self.url = None
self.error_handled = False
def get(self, serverurl, path=''):
if self.url is not None:
raise AssertionError('Already doing a get')
self.url = serverurl + path
scheme, host, path_ignored, params, query, fragment = urlparse.urlparse(
self.url)
if not scheme in ("http", "unix"):
raise NotImplementedError
self.host = host
if ":" in host:
hostname, port = host.split(":", 1)
port = int(port)
else:
hostname = host
port = 80
self.path = path
self.port = port
if scheme == "http":
ip = hostname
self.create_socket(socket.AF_INET, socket.SOCK_STREAM)
self.connect((ip, self.port))
elif scheme == "unix":
socketname = serverurl[7:]
self.create_socket(socket.AF_UNIX, socket.SOCK_STREAM)
self.connect(socketname)
def close(self):
self.listener.close(self.url)
self.connected = 0
self.del_channel()
self.socket.close()
self.url = "CLOSED"
def header(self, name, value):
self.push('%s: %s' % (name, value))
self.push(CRLF)
def handle_error(self):
if self.error_handled:
return
if 1 or self.connected:
t,v,tb = sys.exc_info()
msg = 'Cannot connect, error: %s (%s)' % (t, v)
self.listener.error(self.url, msg)
self.part = self.ignore
self.close()
self.error_handled = True
del t
del v
del tb
def METHOD_NAME(self):
self.connected = 1
method = "GET"
version = "HTTP/1.1"
self.push("%s %s %s" % (method, self.path, version))
self.push(CRLF)
self.header("Host", self.host)
self.header('Accept-Encoding', 'chunked')
self.header('Accept', '*/*')
self.header('User-agent', self.user_agent)
if self.password:
auth = '%s:%s' % (self.username, self.password)
auth = as_string(encodestring(as_bytes(auth))).strip()
self.header('Authorization', 'Basic %s' % auth)
self.push(CRLF)
self.push(CRLF)
def feed(self, data):
self.listener.feed(self.url, data)
def collect_incoming_data(self, bytes):
self.buffer = self.buffer + bytes
if self.part==self.body:
self.feed(self.buffer)
self.buffer = b''
def found_terminator(self):
self.part()
self.buffer = b''
def ignore(self):
self.buffer = b''
def status_line(self):
line = self.buffer
version, status, reason = line.split(None, 2)
status = int(status)
if not version.startswith(b'HTTP/'):
raise ValueError(line)
self.listener.status(self.url, status)
if status == 200:
self.part = self.headers
else:
self.part = self.ignore
msg = 'Cannot read, status code %s' % status
self.listener.error(self.url, msg)
self.close()
return version, status, reason
def headers(self):
line = self.buffer
if not line:
if self.encoding == b'chunked':
self.part = self.chunked_size
else:
self.part = self.body
self.set_terminator(self.length)
else:
name, value = line.split(b':', 1)
if name and value:
name = name.lower()
value = value.strip()
if name == b'transfer-encoding':
self.encoding = value
elif name == b'content-length':
self.length = int(value)
self.response_header(name, value)
def response_header(self, name, value):
self.listener.response_header(self.url, name, value)
def body(self):
self.done()
self.close()
def done(self):
self.listener.done(self.url)
def chunked_size(self):
line = self.buffer
if not line:
return
chunk_size = int(line.split()[0], 16)
if chunk_size==0:
self.part = self.trailer
else:
self.set_terminator(chunk_size)
self.part = self.chunked_body
self.length += chunk_size
def chunked_body(self):
line = self.buffer
self.set_terminator(CRLF)
self.part = self.chunked_size
self.feed(line)
def trailer(self):
# http://www.w3.org/Protocols/rfc2616/rfc2616-sec3.html#sec3.6.1
# trailer = *(entity-header CRLF)
line = self.buffer
if line == CRLF:
self.done()
self.close()
|
2,676 |
assert correct yesno call
|
"""Tests for certbot._internal.eff."""
import datetime
import sys
import unittest
from unittest import mock
import josepy
import pytest
import pytz
import requests
from acme import messages
from certbot._internal import account
from certbot._internal import constants
import certbot.tests.util as test_util
_KEY = josepy.JWKRSA.load(test_util.load_vector("rsa512_key.pem"))
class SubscriptionTest(test_util.ConfigTestCase):
"""Abstract class for subscription tests."""
def setUp(self):
super().setUp()
self.account = account.Account(
regr=messages.RegistrationResource(
uri=None, body=messages.Registration(),
new_authzr_uri='hi'),
key=_KEY,
meta=account.Account.Meta(
creation_host='test.certbot.org',
creation_dt=datetime.datetime(
2015, 7, 4, 14, 4, 10, tzinfo=pytz.UTC)))
self.config.email = '[email protected]'
self.config.eff_email = None
class PrepareSubscriptionTest(SubscriptionTest):
"""Tests for certbot._internal.eff.prepare_subscription."""
def _call(self):
from certbot._internal.eff import prepare_subscription
prepare_subscription(self.config, self.account)
@test_util.patch_display_util()
@mock.patch("certbot._internal.eff.display_util.notify")
def test_failure(self, mock_notify, mock_get_utility):
self.config.email = None
self.config.eff_email = True
self._call()
actual = mock_notify.call_args[0][0]
expected_part = "because you didn't provide an e-mail address"
assert expected_part in actual
assert self.account.meta.register_to_eff is None
@test_util.patch_display_util()
def test_will_not_subscribe_with_no_prompt(self, mock_get_utility):
self.config.eff_email = False
self._call()
self._assert_no_get_utility_calls(mock_get_utility)
assert self.account.meta.register_to_eff is None
@test_util.patch_display_util()
def test_will_subscribe_with_no_prompt(self, mock_get_utility):
self.config.eff_email = True
self._call()
self._assert_no_get_utility_calls(mock_get_utility)
assert self.account.meta.register_to_eff == self.config.email
@test_util.patch_display_util()
def test_will_not_subscribe_with_prompt(self, mock_get_utility):
mock_get_utility().yesno.return_value = False
self._call()
assert not mock_get_utility().add_message.called
self.METHOD_NAME(mock_get_utility)
assert self.account.meta.register_to_eff is None
@test_util.patch_display_util()
def test_will_subscribe_with_prompt(self, mock_get_utility):
mock_get_utility().yesno.return_value = True
self._call()
assert not mock_get_utility().add_message.called
self.METHOD_NAME(mock_get_utility)
assert self.account.meta.register_to_eff == self.config.email
def _assert_no_get_utility_calls(self, mock_get_utility):
assert not mock_get_utility().yesno.called
assert not mock_get_utility().add_message.called
def METHOD_NAME(self, mock_get_utility):
assert mock_get_utility().yesno.called
call_args, call_kwargs = mock_get_utility().yesno.call_args
actual = call_args[0]
expected_part = 'Electronic Frontier Foundation'
assert expected_part in actual
assert not call_kwargs.get('default', True)
class HandleSubscriptionTest(SubscriptionTest):
"""Tests for certbot._internal.eff.handle_subscription."""
def _call(self):
from certbot._internal.eff import handle_subscription
handle_subscription(self.config, self.account)
@mock.patch('certbot._internal.eff.subscribe')
def test_no_subscribe(self, mock_subscribe):
self._call()
assert mock_subscribe.called is False
@mock.patch('certbot._internal.eff.subscribe')
def test_subscribe(self, mock_subscribe):
self.account.meta = self.account.meta.update(register_to_eff=self.config.email)
self._call()
assert mock_subscribe.called
assert mock_subscribe.call_args[0][0] == self.config.email
class SubscribeTest(unittest.TestCase):
"""Tests for certbot._internal.eff.subscribe."""
def setUp(self):
self.email = '[email protected]'
self.json = {'status': True}
self.response = mock.Mock(ok=True)
self.response.json.return_value = self.json
patcher = mock.patch("certbot._internal.eff.display_util.notify")
self.mock_notify = patcher.start()
self.addCleanup(patcher.stop)
@mock.patch('certbot._internal.eff.requests.post')
def _call(self, mock_post):
mock_post.return_value = self.response
from certbot._internal.eff import subscribe
subscribe(self.email)
self._check_post_call(mock_post)
def _check_post_call(self, mock_post):
assert mock_post.call_count == 1
call_args, call_kwargs = mock_post.call_args
assert call_args[0] == constants.EFF_SUBSCRIBE_URI
data = call_kwargs.get('data')
assert data is not None
assert data.get('email') == self.email
def test_bad_status(self):
self.json['status'] = False
self._call()
actual = self._get_reported_message()
expected_part = 'because your e-mail address appears to be invalid.'
assert expected_part in actual
def test_not_ok(self):
self.response.ok = False
self.response.raise_for_status.side_effect = requests.exceptions.HTTPError
self._call()
actual = self._get_reported_message()
unexpected_part = 'because'
assert unexpected_part not in actual
def test_response_not_json(self):
self.response.json.side_effect = ValueError()
self._call()
actual = self._get_reported_message()
expected_part = 'problem'
assert expected_part in actual
def test_response_json_missing_status_element(self):
self.json.clear()
self._call()
actual = self._get_reported_message()
expected_part = 'problem'
assert expected_part in actual
def _get_reported_message(self):
assert self.mock_notify.called
return self.mock_notify.call_args[0][0]
@test_util.patch_display_util()
def test_subscribe(self, mock_get_utility):
self._call()
assert mock_get_utility.called is False
if __name__ == '__main__':
sys.exit(pytest.main(sys.argv[1:] + [__file__])) # pragma: no cover
|
2,677 |
test arithmetic drops references
|
import sys
import os
import mmap
import pytest
from pathlib import Path
from tempfile import NamedTemporaryFile, TemporaryFile
from numpy import (
memmap, sum, average, prod, ndarray, isscalar, add, subtract, multiply)
from numpy import arange, allclose, asarray
from numpy.testing import (
assert_, assert_equal, assert_array_equal, suppress_warnings, IS_PYPY,
break_cycles
)
class TestMemmap:
def setup_method(self):
self.tmpfp = NamedTemporaryFile(prefix='mmap')
self.shape = (3, 4)
self.dtype = 'float32'
self.data = arange(12, dtype=self.dtype)
self.data.resize(self.shape)
def teardown_method(self):
self.tmpfp.close()
self.data = None
if IS_PYPY:
break_cycles()
break_cycles()
def test_roundtrip(self):
# Write data to file
fp = memmap(self.tmpfp, dtype=self.dtype, mode='w+',
shape=self.shape)
fp[:] = self.data[:]
del fp # Test __del__ machinery, which handles cleanup
# Read data back from file
newfp = memmap(self.tmpfp, dtype=self.dtype, mode='r',
shape=self.shape)
assert_(allclose(self.data, newfp))
assert_array_equal(self.data, newfp)
assert_equal(newfp.flags.writeable, False)
def test_open_with_filename(self, tmp_path):
tmpname = tmp_path / 'mmap'
fp = memmap(tmpname, dtype=self.dtype, mode='w+',
shape=self.shape)
fp[:] = self.data[:]
del fp
def test_unnamed_file(self):
with TemporaryFile() as f:
fp = memmap(f, dtype=self.dtype, shape=self.shape)
del fp
def test_attributes(self):
offset = 1
mode = "w+"
fp = memmap(self.tmpfp, dtype=self.dtype, mode=mode,
shape=self.shape, offset=offset)
assert_equal(offset, fp.offset)
assert_equal(mode, fp.mode)
del fp
def test_filename(self, tmp_path):
tmpname = tmp_path / "mmap"
fp = memmap(tmpname, dtype=self.dtype, mode='w+',
shape=self.shape)
abspath = Path(os.path.abspath(tmpname))
fp[:] = self.data[:]
assert_equal(abspath, fp.filename)
b = fp[:1]
assert_equal(abspath, b.filename)
del b
del fp
def test_path(self, tmp_path):
tmpname = tmp_path / "mmap"
fp = memmap(Path(tmpname), dtype=self.dtype, mode='w+',
shape=self.shape)
# os.path.realpath does not resolve symlinks on Windows
# see: https://bugs.python.org/issue9949
# use Path.resolve, just as memmap class does internally
abspath = str(Path(tmpname).resolve())
fp[:] = self.data[:]
assert_equal(abspath, str(fp.filename.resolve()))
b = fp[:1]
assert_equal(abspath, str(b.filename.resolve()))
del b
del fp
def test_filename_fileobj(self):
fp = memmap(self.tmpfp, dtype=self.dtype, mode="w+",
shape=self.shape)
assert_equal(fp.filename, self.tmpfp.name)
@pytest.mark.skipif(sys.platform == 'gnu0',
reason="Known to fail on hurd")
def test_flush(self):
fp = memmap(self.tmpfp, dtype=self.dtype, mode='w+',
shape=self.shape)
fp[:] = self.data[:]
assert_equal(fp[0], self.data[0])
fp.flush()
def test_del(self):
# Make sure a view does not delete the underlying mmap
fp_base = memmap(self.tmpfp, dtype=self.dtype, mode='w+',
shape=self.shape)
fp_base[0] = 5
fp_view = fp_base[0:1]
assert_equal(fp_view[0], 5)
del fp_view
# Should still be able to access and assign values after
# deleting the view
assert_equal(fp_base[0], 5)
fp_base[0] = 6
assert_equal(fp_base[0], 6)
def METHOD_NAME(self):
fp = memmap(self.tmpfp, dtype=self.dtype, mode='w+',
shape=self.shape)
tmp = (fp + 10)
if isinstance(tmp, memmap):
assert_(tmp._mmap is not fp._mmap)
def test_indexing_drops_references(self):
fp = memmap(self.tmpfp, dtype=self.dtype, mode='w+',
shape=self.shape)
tmp = fp[(1, 2), (2, 3)]
if isinstance(tmp, memmap):
assert_(tmp._mmap is not fp._mmap)
def test_slicing_keeps_references(self):
fp = memmap(self.tmpfp, dtype=self.dtype, mode='w+',
shape=self.shape)
assert_(fp[:2, :2]._mmap is fp._mmap)
def test_view(self):
fp = memmap(self.tmpfp, dtype=self.dtype, shape=self.shape)
new1 = fp.view()
new2 = new1.view()
assert_(new1.base is fp)
assert_(new2.base is fp)
new_array = asarray(fp)
assert_(new_array.base is fp)
def test_ufunc_return_ndarray(self):
fp = memmap(self.tmpfp, dtype=self.dtype, shape=self.shape)
fp[:] = self.data
with suppress_warnings() as sup:
sup.filter(FutureWarning, "np.average currently does not preserve")
for unary_op in [sum, average, prod]:
result = unary_op(fp)
assert_(isscalar(result))
assert_(result.__class__ is self.data[0, 0].__class__)
assert_(unary_op(fp, axis=0).__class__ is ndarray)
assert_(unary_op(fp, axis=1).__class__ is ndarray)
for binary_op in [add, subtract, multiply]:
assert_(binary_op(fp, self.data).__class__ is ndarray)
assert_(binary_op(self.data, fp).__class__ is ndarray)
assert_(binary_op(fp, fp).__class__ is ndarray)
fp += 1
assert(fp.__class__ is memmap)
add(fp, 1, out=fp)
assert(fp.__class__ is memmap)
def test_getitem(self):
fp = memmap(self.tmpfp, dtype=self.dtype, shape=self.shape)
fp[:] = self.data
assert_(fp[1:, :-1].__class__ is memmap)
# Fancy indexing returns a copy that is not memmapped
assert_(fp[[0, 1]].__class__ is ndarray)
def test_memmap_subclass(self):
class MemmapSubClass(memmap):
pass
fp = MemmapSubClass(self.tmpfp, dtype=self.dtype, shape=self.shape)
fp[:] = self.data
# We keep previous behavior for subclasses of memmap, i.e. the
# ufunc and __getitem__ output is never turned into a ndarray
assert_(sum(fp, axis=0).__class__ is MemmapSubClass)
assert_(sum(fp).__class__ is MemmapSubClass)
assert_(fp[1:, :-1].__class__ is MemmapSubClass)
assert(fp[[0, 1]].__class__ is MemmapSubClass)
def test_mmap_offset_greater_than_allocation_granularity(self):
size = 5 * mmap.ALLOCATIONGRANULARITY
offset = mmap.ALLOCATIONGRANULARITY + 1
fp = memmap(self.tmpfp, shape=size, mode='w+', offset=offset)
assert_(fp.offset == offset)
def test_no_shape(self):
self.tmpfp.write(b'a'*16)
mm = memmap(self.tmpfp, dtype='float64')
assert_equal(mm.shape, (2,))
def test_empty_array(self):
# gh-12653
with pytest.raises(ValueError, match='empty file'):
memmap(self.tmpfp, shape=(0,4), mode='w+')
self.tmpfp.write(b'\0')
# ok now the file is not empty
memmap(self.tmpfp, shape=(0,4), mode='w+')
def test_shape_type(self):
memmap(self.tmpfp, shape=3, mode='w+')
memmap(self.tmpfp, shape=self.shape, mode='w+')
memmap(self.tmpfp, shape=list(self.shape), mode='w+')
memmap(self.tmpfp, shape=asarray(self.shape), mode='w+')
|
2,678 |
test reschedule public from other user
|
# -*- coding: utf-8 -*-
"""
Akvo RSR is covered by the GNU Affero General Public License.
See more details in the license.txt file located at the root folder of the Akvo RSR module.
For additional details on the GNU license please see < http://www.gnu.org/licenses/agpl.html >.
"""
from unittest.mock import ANY
from rest_framework.status import HTTP_200_OK, HTTP_400_BAD_REQUEST, HTTP_403_FORBIDDEN
from akvo.rsr.models import IndicatorPeriodAggregationJob
from akvo.rsr.permissions import GROUP_NAME_ME_MANAGERS
from akvo.rsr.tests.base import BaseTestCase
from akvo.rsr.tests.usecases.jobs.test_aggregation import AggregationJobBaseTests
from akvo.rsr.usecases.jobs.aggregation import schedule_aggregation_jobs
class AnonymousUserTestCase(BaseTestCase):
def test_anonymous_user(self):
"""Shouldn't be able to access any resources even if the project is public"""
response = self.c.get("/rest/v1/jobs/indicator_period_aggregation/?format=json")
self.assertEqual(response.status_code, HTTP_403_FORBIDDEN)
class EndpointTestCase(AggregationJobBaseTests):
"""Tests accessing indicator period aggregation job REST endpoints"""
def setUp(self):
super().setUp()
# Create private child project in the default org
self.private_user = self.create_user("[email protected]", "password", is_superuser=False)
self.private_project = self.create_project("Super private project", public=False)
self.make_parent(self.project, self.private_project)
self.private_project.set_reporting_org(self.org)
self.private_project.import_results()
self.make_employment(self.private_user, self.org, GROUP_NAME_ME_MANAGERS)
self.private_result = self.result.child_results.first()
self.private_indicator = self.indicator.child_indicators.first()
self.private_period = self.period.child_periods.first()
self.private_job = schedule_aggregation_jobs(self.private_period)[0]
# Create private project in another org
self.other_private_user = self.create_user("[email protected]", "password", is_superuser=False)
self.other_private_project, self.other_private_org = self._make_project("Private", public=False)
self.make_employment(self.other_private_user, self.other_private_org, GROUP_NAME_ME_MANAGERS)
self.other_private_result, self.other_private_indicator, self.other_private_period = \
self._make_results_framework(self.other_private_project)
self.other_private_job = schedule_aggregation_jobs(self.other_private_period)[0]
def test_super_user(self):
"""Super users should be able to access all jobs"""
self.c.login(username=self.user.username, password="password")
response = self.c.get("/rest/v1/jobs/indicator_period_aggregation/?format=json")
self.assertEqual(response.status_code, HTTP_200_OK)
data = response.json()
self.assertEqual(data.get("count"), 3)
results = data.get("results")
self.assertEqual(
{result["id"] for result in results},
{self.job.id, self.private_job.id, self.other_private_job.id},
)
def test_private_user(self):
"""Test a private user accessing jobs from the default org"""
self.c.login(username=self.private_user.username, password="password")
self._test_private_user_access({self.job.id, self.private_job.id})
def test_other_private_user(self):
"""Test a private user accessing jobs from the private org"""
self.c.login(username=self.other_private_user.username, password="password")
self._test_private_user_access({self.job.id, self.other_private_job.id})
def _test_private_user_access(self, expected_job_id_set):
"""
Private users should only be able to access jobs of their private projects and that of public ones
"""
response = self.c.get("/rest/v1/jobs/indicator_period_aggregation/?format=json")
self.assertEqual(response.status_code, HTTP_200_OK)
data = response.json()
self.assertEqual(data.get("count"), len(expected_job_id_set))
self.assertEqual({result["id"] for result in data["results"]}, expected_job_id_set)
def test_filter_by_root_period(self):
"""Ensure that the jobs of the child periods are returned"""
self.c.login(username=self.user.username, password="password")
response = self.c.get("/rest/v1/jobs/indicator_period_aggregation/?format=json&filter={'root_period_id':%s}" % (
self.period.id
))
self.assertEqual(response.status_code, HTTP_200_OK)
data = response.json()
self.assertEqual(data["count"], 2)
self.assertEqual({result["id"] for result in data["results"]}, {self.job.id, self.private_job.id})
def test_filter_by_status(self):
"""Ensure filtering by status works"""
self.job.status = IndicatorPeriodAggregationJob.Status.FINISHED
self.job.save()
self.c.login(username=self.user.username, password="password")
response = self.c.get("/rest/v1/jobs/indicator_period_aggregation/?format=json&filter={'status':'%s'}" % (
self.job.status.FINISHED
))
self.assertEqual((response.status_code, response.json()), (HTTP_200_OK, ANY))
data = response.json()
self.assertEqual(data["count"], 1)
self.assertEqual(data["results"][0]["id"], self.job.id)
def test_get_by_id(self):
"""Ensure the detail view works as expected"""
self.c.login(username=self.user.username, password="password")
response = self.c.get(f"/rest/v1/jobs/indicator_period_aggregation/{self.private_job.id}/?format=json")
self.assertEqual(response.status_code, HTTP_200_OK)
data = response.json()
self.assertEqual(data["id"], self.private_job.id)
def test_reschedule(self):
"""Ensure rescheduling creates a new job and leaves the old one intact"""
self.c.login(username=self.user.username, password="password")
self.private_job.mark_maxxed()
response = self.c.post(
f"/rest/v1/jobs/indicator_period_aggregation/{self.private_job.id}/reschedule/?format=json"
)
self.assertEqual(response.status_code, HTTP_200_OK)
data = response.json()
self.assertNotEqual(data[0]["id"], self.private_job.id)
self.assertEqual(
IndicatorPeriodAggregationJob.objects.filter(period=self.private_period).count(),
2
)
def test_reschedule_unmaxxed_job(self):
"""Attempting to reschedule a job in the wrong status shouldn't be allowed"""
self.c.login(username=self.user.username, password="password")
response = self.c.post(
f"/rest/v1/jobs/indicator_period_aggregation/{self.private_job.id}/reschedule/?format=json"
)
self.assertEqual((response.status_code, response.content), (HTTP_400_BAD_REQUEST, ANY))
def test_reschedule_private_from_other_user(self):
"""Attempting a reschedule of a private job from a user of another org should fail"""
self.c.login(username=self.other_private_user.username, password="password")
self.private_job.mark_maxxed()
response = self.c.post(
f"/rest/v1/jobs/indicator_period_aggregation/{self.private_job.id}/reschedule/?format=json"
)
self.assertEqual(response.status_code, HTTP_403_FORBIDDEN)
def METHOD_NAME(self):
"""Attempting a reschedule of a public job from a user of another org should fail"""
self.c.login(username=self.other_private_user.username, password="password")
self.job.mark_maxxed()
response = self.c.post(
f"/rest/v1/jobs/indicator_period_aggregation/{self.job.id}/reschedule/?format=json"
)
self.assertEqual(response.status_code, HTTP_403_FORBIDDEN)
|
2,679 |
with is active
|
"""
Copyright 2022 The Magma Authors.
This source code is licensed under the BSD-style license found in the
LICENSE file in the root directory of this source tree.
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import annotations
from typing import List
from uuid import uuid4
from dp.protos.cbsd_pb2 import (
CBSDStateResult,
EnodebdUpdateCbsdRequest,
InstallationParam,
LteChannel,
)
from google.protobuf.wrappers_pb2 import BoolValue, DoubleValue, StringValue
SOME_FCC_ID = "some_fcc_id"
USER_ID = "some_user_id"
UNREGISTERED = "unregistered"
class CbsdAPIDataBuilder:
def __init__(self):
self.payload = {
'fcc_id': SOME_FCC_ID,
'serial_number': str(uuid4()),
'user_id': USER_ID,
'cbsd_category': 'a',
'desired_state': 'registered',
'single_step_enabled': False,
'carrier_aggregation_enabled': False,
'grant_redundancy': False,
'grants': [],
'capabilities': {
'max_power': 20,
'min_power': 0,
'number_of_antennas': 2,
'max_ibw_mhz': 150,
},
'frequency_preferences': {
'bandwidth_mhz': 10,
'frequencies_mhz': [3625],
},
'installation_param': {
'antenna_gain': 15,
},
}
def with_serial_number(self, serial_number: str) -> CbsdAPIDataBuilder:
self.payload['serial_number'] = serial_number
return self
def with_fcc_id(self, fcc_id: str = SOME_FCC_ID) -> CbsdAPIDataBuilder:
self.payload['fcc_id'] = fcc_id
return self
def with_cbsd_category(self, cbsd_category: str) -> CbsdAPIDataBuilder:
self.payload['cbsd_category'] = cbsd_category
return self
def with_latitude_deg(self, latitude_deg: float = 10.5) -> CbsdAPIDataBuilder:
self.payload['installation_param']['latitude_deg'] = latitude_deg
return self
def with_longitude_deg(self, longitude_deg: float = 11.5) -> CbsdAPIDataBuilder:
self.payload['installation_param']['longitude_deg'] = longitude_deg
return self
def with_indoor_deployment(self, indoor_deployment: bool = False) -> CbsdAPIDataBuilder:
self.payload['installation_param']["indoor_deployment"] = indoor_deployment
return self
def with_full_installation_param(
self,
latitude_deg: float = 10.5,
longitude_deg: float = 11.5,
antenna_gain: int = 15,
indoor_deployment: bool = True,
height_m: float = 12.5,
height_type: str = "agl",
) -> CbsdAPIDataBuilder:
self.payload["installation_param"] = {
"latitude_deg": latitude_deg,
"longitude_deg": longitude_deg,
"antenna_gain": antenna_gain,
"indoor_deployment": indoor_deployment,
"height_m": height_m,
"height_type": height_type,
}
return self
def with_frequency_preferences(self, bandwidth_mhz: int, frequencies_mhz: List[int]) -> CbsdAPIDataBuilder:
self.payload["frequency_preferences"] = {
"bandwidth_mhz": bandwidth_mhz,
"frequencies_mhz": frequencies_mhz,
}
return self
def with_carrier_aggregation(self) -> CbsdAPIDataBuilder:
self.payload['grant_redundancy'] = True
self.payload['carrier_aggregation_enabled'] = True
return self
def with_desired_state(self, desired_state: str = "registered") -> CbsdAPIDataBuilder:
self.payload["desired_state"] = desired_state
return self
def without_grants(self) -> CbsdAPIDataBuilder:
self.payload['grants'] = []
return self
def with_grant(
self, bandwidth_mhz: int = 10, frequency_mhz: int = 3625, max_eirp: int = 28,
) -> CbsdAPIDataBuilder:
self.payload['grants'].append({
'bandwidth_mhz': bandwidth_mhz,
'frequency_mhz': frequency_mhz,
'max_eirp': max_eirp,
'state': 'authorized',
})
return self
def with_state(self, state: str = UNREGISTERED) -> CbsdAPIDataBuilder:
self.payload['state'] = state
return self
def with_cbsd_id(self, cbsd_id: str) -> CbsdAPIDataBuilder:
self.payload['cbsd_id'] = cbsd_id
return self
def METHOD_NAME(self, is_active: bool) -> CbsdAPIDataBuilder:
self.payload['is_active'] = is_active
return self
def with_single_step_enabled(self, enabled: bool) -> CbsdAPIDataBuilder:
self.payload['single_step_enabled'] = enabled
return self
def build_grant_state_data(self) -> CBSDStateResult:
# TODO rewrite builders to dataclasses
grants = [_api_to_proto_grant(g) for g in self.payload['grants']]
return CBSDStateResult(
radio_enabled=True,
carrier_aggregation_enabled=self.payload['carrier_aggregation_enabled'],
channel=grants[0],
channels=grants,
)
def build_enodebd_update_request(self, indoor_deployment=False, cbsd_category="a") -> EnodebdUpdateCbsdRequest:
return EnodebdUpdateCbsdRequest(
serial_number=self.payload["serial_number"],
installation_param=InstallationParam(
latitude_deg=DoubleValue(value=10.5),
longitude_deg=DoubleValue(value=11.5),
indoor_deployment=BoolValue(value=indoor_deployment),
height_type=StringValue(value="agl"),
height_m=DoubleValue(value=12.5),
),
cbsd_category=cbsd_category,
)
def _api_to_proto_grant(grant: dict[str, any]) -> LteChannel:
frequency_mhz = grant['frequency_mhz']
bandwidth_mhz = grant['bandwidth_mhz']
max_eirp_dbm_mhz = grant['max_eirp']
frequency_hz = 10**6 * frequency_mhz
bandwidth_hz = 10**6 * bandwidth_mhz
return LteChannel(
low_frequency_hz=frequency_hz - bandwidth_hz // 2,
high_frequency_hz=frequency_hz + bandwidth_hz // 2,
max_eirp_dbm_mhz=max_eirp_dbm_mhz,
)
|
2,680 |
test on stop
|
# coding: utf-8
# -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# -------------------------------------------------------------------------
from typing import List, Any
import time
import pytest
from devtools_testutils import recorded_by_proxy
from testcase import WebpubsubClientTest, WebpubsubClientPowerShellPreparer, on_group_message, TEST_RESULT
from azure.messaging.webpubsubclient.models import OnGroupDataMessageArgs, StartNotStoppedClientError
@pytest.mark.live_test_only
class TestWebpubsubClientSmoke(WebpubsubClientTest):
@WebpubsubClientPowerShellPreparer()
@recorded_by_proxy
def test_call_back_deadlock(self, webpubsubclient_connection_string):
client = self.create_client(connection_string=webpubsubclient_connection_string)
group_name = "test"
def on_group_message(msg: OnGroupDataMessageArgs):
client.send_to_group(group_name, msg.data, "text", no_echo=True)
with client:
client.join_group(group_name)
client.on("group-message", on_group_message)
client.send_to_group(group_name, "hello test_call_back_deadlock1", "text")
client.send_to_group(group_name, "hello test_call_back_deadlock2", "text")
client.send_to_group(group_name, "hello test_call_back_deadlock3", "text")
# sleep to make sure the callback has enough time to execute before stop
time.sleep(0.001)
@WebpubsubClientPowerShellPreparer()
@recorded_by_proxy
def test_context_manager(self, webpubsubclient_connection_string):
client = self.create_client(connection_string=webpubsubclient_connection_string)
with client:
group_name = "test"
client.join_group(group_name)
client.send_to_group(group_name, "test_context_manager", "text")
time.sleep(2.0)
assert client._sequence_id.sequence_id > 0
# test on_stop
@WebpubsubClientPowerShellPreparer()
@recorded_by_proxy
def METHOD_NAME(self, webpubsubclient_connection_string):
client = self.create_client(connection_string=webpubsubclient_connection_string)
def on_stop():
client._start()
with client:
# start client again after stop
client.on("stopped", on_stop)
assert client._is_connected()
client._stop()
time.sleep(1.0)
assert client._is_connected()
# remove stopped event and stop again
client.off("stopped", on_stop)
client._stop()
time.sleep(1.0)
assert not client._is_connected()
@WebpubsubClientPowerShellPreparer()
@recorded_by_proxy
def test_duplicated_start(self, webpubsubclient_connection_string):
client = self.create_client(connection_string=webpubsubclient_connection_string)
with pytest.raises(StartNotStoppedClientError):
with client:
client._start()
assert not client._is_connected()
@WebpubsubClientPowerShellPreparer()
@recorded_by_proxy
def test_duplicated_stop(self, webpubsubclient_connection_string):
client = self.create_client(connection_string=webpubsubclient_connection_string)
with client:
client._stop()
assert not client._is_connected()
@WebpubsubClientPowerShellPreparer()
@recorded_by_proxy
def test_send_event(self, webpubsubclient_connection_string):
client = self.create_client(connection_string=webpubsubclient_connection_string, message_retry_total=0)
with client:
# please register event handler in azure portal before run this test
client.send_event("event", "test_send_event", "text")
@WebpubsubClientPowerShellPreparer()
@recorded_by_proxy
def test_rejoin_group(self, webpubsubclient_connection_string):
def _test(enable_auto_rejoin, test_group_name, assert_func):
client = self.create_client(
connection_string=webpubsubclient_connection_string, auto_rejoin_groups=enable_auto_rejoin
)
group_name = test_group_name
client.on("group-message", on_group_message)
with client:
client.join_group(group_name)
with client:
time.sleep(1) # make sure rejoin group is called
client.send_to_group(group_name, "test_rejoin_group", "text")
time.sleep(1) # wait for on_group_message to be called
assert assert_func(test_group_name)
_test(enable_auto_rejoin=True, test_group_name="test_rejoin_group", assert_func=lambda x: x in TEST_RESULT)
_test(
enable_auto_rejoin=False,
test_group_name="test_disable_rejoin_group",
assert_func=lambda x: x not in TEST_RESULT,
)
|
2,681 |
test str repr returns expected string
|
# -------------------------------------------------------------------------------------------------
# Copyright (C) 2015-2023 Nautech Systems Pty Ltd. All rights reserved.
# https://nautechsystems.io
#
# Licensed under the GNU Lesser General Public License Version 3.0 (the "License");
# You may not use this file except in compliance with the License.
# You may obtain a copy of the License at https://www.gnu.org/licenses/lgpl-3.0.en.html
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# -------------------------------------------------------------------------------------------------
from nautilus_trader.indicators.donchian_channel import DonchianChannel
from nautilus_trader.test_kit.providers import TestInstrumentProvider
from nautilus_trader.test_kit.stubs.data import TestDataStubs
AUDUSD_SIM = TestInstrumentProvider.default_fx_ccy("AUD/USD")
class TestDonchianChannel:
def setup(self):
# Fixture Setup
self.dc = DonchianChannel(10)
def test_name_returns_expected_name(self):
# Arrange, Act, Assert
assert self.dc.name == "DonchianChannel"
def METHOD_NAME(self):
# Arrange, Act, Assert
assert str(self.dc) == "DonchianChannel(10)"
assert repr(self.dc) == "DonchianChannel(10)"
def test_period_returns_expected_value(self):
# Arrange, Act, Assert
assert self.dc.period == 10
def test_initialized_without_inputs_returns_false(self):
# Arrange, Act, Assert
assert self.dc.initialized is False
def test_initialized_with_required_inputs_returns_true(self):
# Arrange
self.dc.update_raw(1.00000, 1.00000)
self.dc.update_raw(1.00000, 1.00000)
self.dc.update_raw(1.00000, 1.00000)
self.dc.update_raw(1.00000, 1.00000)
self.dc.update_raw(1.00000, 1.00000)
self.dc.update_raw(1.00000, 1.00000)
self.dc.update_raw(1.00000, 1.00000)
self.dc.update_raw(1.00000, 1.00000)
self.dc.update_raw(1.00000, 1.00000)
self.dc.update_raw(1.00000, 1.00000)
# Act, Assert
assert self.dc.initialized is True
def test_handle_quote_tick_updates_indicator(self):
# Arrange
indicator = DonchianChannel(10)
tick = TestDataStubs.quote_tick()
# Act
indicator.handle_quote_tick(tick)
# Assert
assert indicator.has_inputs
assert indicator.middle == 1.0
def test_handle_trade_tick_updates_indicator(self):
# Arrange
indicator = DonchianChannel(10)
tick = TestDataStubs.trade_tick()
# Act
indicator.handle_trade_tick(tick)
# Assert
assert indicator.has_inputs
assert indicator.middle == 1.0
def test_handle_bar_updates_indicator(self):
# Arrange
indicator = DonchianChannel(10)
bar = TestDataStubs.bar_5decimal()
# Act
indicator.handle_bar(bar)
# Assert
assert indicator.has_inputs
assert indicator.middle == 1.000025
def test_value_with_one_input_returns_expected_value(self):
# Arrange
self.dc.update_raw(1.00020, 1.00000)
# Act, Assert
assert self.dc.upper == 1.00020
assert self.dc.middle == 1.00010
assert self.dc.lower == 1.00000
def test_value_with_three_inputs_returns_expected_value(self):
# Arrange
self.dc.update_raw(1.00020, 1.00000)
self.dc.update_raw(1.00030, 1.00010)
self.dc.update_raw(1.00040, 1.00020)
# Act, Assert
assert self.dc.upper == 1.00040
assert self.dc.middle == 1.00020
assert self.dc.lower == 1.00000
def test_reset_successfully_returns_indicator_to_fresh_state(self):
# Arrange
self.dc.update_raw(1.00020, 1.00000)
self.dc.update_raw(1.00030, 1.00010)
self.dc.update_raw(1.00040, 1.00020)
# Act
self.dc.reset()
# Assert
assert not self.dc.initialized
assert self.dc.upper == 0
assert self.dc.middle == 0
assert self.dc.lower == 0
|
2,682 |
test add season membership
|
# Copyright Iris contributors
#
# This file is part of Iris and is released under the LGPL license.
# See COPYING and COPYING.LESSER in the root of the repository for full
# licensing details.
"""
Test the coordinate categorisation functions.
"""
# import iris tests first so that some things can be initialised before importing anything else
import iris.tests as tests # isort:skip
import warnings
import cf_units
import numpy as np
import iris
import iris.coord_categorisation as ccat
CATEGORISATION_FUNCS = (
ccat.add_day_of_month,
ccat.add_day_of_year,
ccat.add_weekday,
ccat.add_weekday_fullname,
ccat.add_weekday_number,
ccat.add_month,
ccat.add_month_fullname,
ccat.add_month_number,
ccat.add_year,
ccat.add_season,
ccat.add_season_number,
ccat.add_season_year,
ccat.add_season_membership,
)
class TestCategorisations(tests.IrisTest):
def setUp(self):
# make a series of 'day numbers' for the time, that slide across month
# boundaries
day_numbers = np.arange(0, 600, 27, dtype=np.int32)
cube = iris.cube.Cube(
day_numbers, long_name="test cube", units="metres"
)
# use day numbers as data values also (don't actually use this for
# anything)
cube.data = day_numbers
time_coord = iris.coords.DimCoord(
day_numbers,
standard_name="time",
units=cf_units.Unit("days since epoch", "standard"),
)
cube.add_dim_coord(time_coord, 0)
self.cube = cube
self.time_coord = time_coord
def test_bad_coord(self):
for func in CATEGORISATION_FUNCS:
kwargs = {"name": "my_category"}
if func is ccat.add_season_membership:
kwargs["season"] = "djf"
with self.assertRaises(iris.exceptions.CoordinateNotFoundError):
func(self.cube, "DOES NOT EXIST", **kwargs)
def test_explicit_result_names(self):
result_name = "my_category"
fmt = "Missing/incorrectly named result for {0!r}"
for func in CATEGORISATION_FUNCS:
# Specify source coordinate by name
cube = self.cube.copy()
kwargs = {"name": result_name}
if func is ccat.add_season_membership:
kwargs["season"] = "djf"
with warnings.catch_warnings(record=True):
func(cube, "time", **kwargs)
result_coords = cube.coords(result_name)
self.assertEqual(len(result_coords), 1, fmt.format(func.__name__))
# Specify source coordinate by coordinate reference
cube = self.cube.copy()
time = cube.coord("time")
with warnings.catch_warnings(record=True):
func(cube, time, **kwargs)
result_coords = cube.coords(result_name)
self.assertEqual(len(result_coords), 1, fmt.format(func.__name__))
def test_basic(self):
cube = self.cube
time_coord = self.time_coord
ccat.add_year(cube, time_coord, "my_year")
ccat.add_day_of_month(cube, time_coord, "my_day_of_month")
ccat.add_day_of_year(cube, time_coord, "my_day_of_year")
ccat.add_month(cube, time_coord, "my_month")
ccat.add_month_fullname(cube, time_coord, "my_month_fullname")
ccat.add_month_number(cube, time_coord, "my_month_number")
ccat.add_weekday(cube, time_coord, "my_weekday")
ccat.add_weekday_number(cube, time_coord, "my_weekday_number")
ccat.add_weekday_fullname(cube, time_coord, "my_weekday_fullname")
ccat.add_season(cube, time_coord, "my_season")
ccat.add_season_number(cube, time_coord, "my_season_number")
ccat.add_season_year(cube, time_coord, "my_season_year")
# also test 'generic' categorisation interface
def _month_in_quarter(coord, pt_value):
date = coord.units.num2date(pt_value)
return (date.month - 1) % 3
ccat.add_categorised_coord(
cube, "my_month_in_quarter", time_coord, _month_in_quarter
)
# To ensure consistent results between 32-bit and 64-bit
# platforms, ensure all the numeric categorisation coordinates
# are always stored as int64.
for coord in cube.coords():
if coord.long_name is not None and coord.points.dtype.kind == "i":
coord.points = coord.points.astype(np.int64)
# check values
self.assertCML(cube, ("categorisation", "quickcheck.cml"))
def test_add_season_nonstandard(self):
# season categorisations work for non-standard seasons?
cube = self.cube
time_coord = self.time_coord
seasons = ["djfm", "amjj", "ason"]
ccat.add_season(cube, time_coord, name="seasons", seasons=seasons)
ccat.add_season_number(
cube, time_coord, name="season_numbers", seasons=seasons
)
ccat.add_season_year(
cube, time_coord, name="season_years", seasons=seasons
)
self.assertCML(cube, ("categorisation", "customcheck.cml"))
def METHOD_NAME(self):
# season membership identifies correct seasons?
season = "djf"
ccat.add_season_membership(self.cube, "time", season, name="in_season")
ccat.add_season(self.cube, "time")
coord_season = self.cube.coord("season")
coord_membership = self.cube.coord("in_season")
season_locations = np.where(coord_season.points == season)[0]
membership_locations = np.where(coord_membership.points)[0]
self.assertArrayEqual(membership_locations, season_locations)
def test_add_season_invalid_spec(self):
# custom seasons with an invalid season raises an error?
seasons = ("djf", "maj", "jja", "son") # MAJ not a season!
for func in (
ccat.add_season,
ccat.add_season_year,
ccat.add_season_number,
):
with self.assertRaises(ValueError):
func(self.cube, "time", name="my_category", seasons=seasons)
def test_add_season_repeated_months(self):
# custom seasons with repeated months raises an error?
seasons = ("djfm", "mam", "jja", "son")
for func in (
ccat.add_season,
ccat.add_season_year,
ccat.add_season_number,
):
with self.assertRaises(ValueError):
func(self.cube, "time", name="my_category", seasons=seasons)
def test_add_season_missing_months(self):
# custom seasons with missing months raises an error?
seasons = ("djfm", "amjj")
for func in (
ccat.add_season,
ccat.add_season_year,
ccat.add_season_number,
):
with self.assertRaises(ValueError):
func(self.cube, "time", name="my_category", seasons=seasons)
def test_add_season_membership_invalid_spec(self):
season = "maj" # not a season!
with self.assertRaises(ValueError):
ccat.add_season_membership(
self.cube, "time", season, name="maj_season"
)
if __name__ == "__main__":
tests.main()
|
2,683 |
test python recall
|
# Copyright (c) Recommenders contributors.
# Licensed under the MIT License.
import numpy as np
import pandas as pd
import pytest
from sklearn.preprocessing import minmax_scale
from recommenders.utils.constants import (
DEFAULT_USER_COL,
DEFAULT_ITEM_COL,
DEFAULT_RATING_COL,
DEFAULT_PREDICTION_COL,
SEED,
)
from recommenders.evaluation.python_evaluation import (
merge_rating_true_pred,
merge_ranking_true_pred,
rmse,
mae,
rsquared,
exp_var,
get_top_k_items,
precision_at_k,
recall_at_k,
ndcg_at_k,
map_at_k,
auc,
logloss,
)
import random
from recommenders.utils.timer import Timer
random.seed(SEED)
DATA_USER_NUM = 5000
DATA_ITEM_NUM = DATA_USER_NUM * 2
DATA_SAMPLE_NUM = DATA_USER_NUM * 1000
DATA_RATING_MAX = 5
TOL = 0.1
# fmt: off
@pytest.fixture
def rating_true():
return pd.DataFrame(
{
DEFAULT_USER_COL: np.random.choice(range(0, DATA_USER_NUM), DATA_SAMPLE_NUM),
DEFAULT_ITEM_COL: np.random.choice(range(0, DATA_ITEM_NUM), DATA_SAMPLE_NUM),
DEFAULT_RATING_COL: np.random.choice(range(1, DATA_RATING_MAX+1), DATA_SAMPLE_NUM)
}
)
@pytest.fixture
def rating_pred():
return pd.DataFrame(
{
DEFAULT_USER_COL: np.random.choice(range(0, DATA_USER_NUM), DATA_SAMPLE_NUM),
DEFAULT_ITEM_COL: np.random.choice(range(0, DATA_ITEM_NUM), DATA_SAMPLE_NUM),
DEFAULT_PREDICTION_COL: np.random.choice(range(1, DATA_RATING_MAX+1), DATA_SAMPLE_NUM)
}
)
# fmt: on
@pytest.fixture
def rating_true_binary(rating_true):
# Convert true ratings to binary
rating_true[DEFAULT_RATING_COL] = rating_true[DEFAULT_RATING_COL].apply(
lambda x: 1.0 if x >= 3 else 0.0
)
return rating_true
@pytest.fixture
def rating_pred_binary(rating_pred):
# Normalize the predictions
rating_pred[DEFAULT_PREDICTION_COL] = minmax_scale(
rating_pred[DEFAULT_PREDICTION_COL].astype(float)
)
return rating_pred
# The following time thresholds are benchmarked on Azure
# Standard_A8m_v2 with 8 vCPUs and 64 GiB memory.
def test_merge_rating(rating_true, rating_pred):
with Timer() as t:
merge_rating_true_pred(
rating_true,
rating_pred,
col_user=DEFAULT_USER_COL,
col_item=DEFAULT_ITEM_COL,
col_rating=DEFAULT_RATING_COL,
col_prediction=DEFAULT_PREDICTION_COL,
)
assert t.interval < 40 * (1 + TOL)
def test_merge_ranking(rating_true, rating_pred):
with Timer() as t:
merge_ranking_true_pred(
rating_true,
rating_pred,
col_user=DEFAULT_USER_COL,
col_item=DEFAULT_ITEM_COL,
col_rating=DEFAULT_RATING_COL,
col_prediction=DEFAULT_PREDICTION_COL,
relevancy_method="top_k",
)
assert t.interval < 45 * (1 + TOL)
def test_python_rmse(rating_true, rating_pred):
with Timer() as t:
rmse(
rating_true=rating_true,
rating_pred=rating_pred,
col_prediction=DEFAULT_PREDICTION_COL,
)
assert t.interval < 40 * (1 + TOL)
def test_python_mae(rating_true, rating_pred):
with Timer() as t:
mae(
rating_true=rating_true,
rating_pred=rating_pred,
col_prediction=DEFAULT_PREDICTION_COL,
)
assert t.interval < 40 * (1 + TOL)
def test_python_rsquared(rating_true, rating_pred):
with Timer() as t:
rsquared(
rating_true=rating_true,
rating_pred=rating_pred,
col_prediction=DEFAULT_PREDICTION_COL,
)
assert t.interval < 40 * (1 + TOL)
def test_python_exp_var(rating_true, rating_pred):
with Timer() as t:
exp_var(
rating_true=rating_true,
rating_pred=rating_pred,
col_prediction=DEFAULT_PREDICTION_COL,
)
assert t.interval < 40 * (1 + TOL)
def test_get_top_k_items(rating_true):
with Timer() as t:
get_top_k_items(
dataframe=rating_true,
col_user=DEFAULT_USER_COL,
col_rating=DEFAULT_RATING_COL,
k=10,
)
assert t.interval < 10 * (1 + TOL)
def test_get_top_k_items_largek(rating_true):
with Timer() as t:
get_top_k_items(
dataframe=rating_true,
col_user=DEFAULT_USER_COL,
col_rating=DEFAULT_RATING_COL,
k=1000,
)
assert t.interval < 10 * (1 + TOL)
def test_python_ndcg_at_k(rating_true, rating_pred):
with Timer() as t:
ndcg_at_k(
rating_true=rating_true,
rating_pred=rating_pred,
col_prediction=DEFAULT_PREDICTION_COL,
k=10,
)
assert t.interval < 80 * (1 + TOL)
def test_python_map_at_k(rating_true, rating_pred):
with Timer() as t:
map_at_k(
rating_true=rating_true,
rating_pred=rating_pred,
col_prediction=DEFAULT_PREDICTION_COL,
k=10,
)
assert t.interval < 50 * (1 + TOL)
def test_python_precision(rating_true, rating_pred):
with Timer() as t:
precision_at_k(rating_true, rating_pred, k=10)
assert t.interval < 50 * (1 + TOL)
def METHOD_NAME(rating_true, rating_pred):
with Timer() as t:
recall_at_k(
rating_true=rating_true,
rating_pred=rating_pred,
col_prediction=DEFAULT_PREDICTION_COL,
k=10,
)
assert t.interval < 50 * (1 + TOL)
def test_python_auc(rating_true_binary, rating_pred_binary):
with Timer() as t:
auc(
rating_true=rating_true_binary,
rating_pred=rating_pred_binary,
col_rating=DEFAULT_RATING_COL,
col_prediction=DEFAULT_PREDICTION_COL,
)
assert t.interval < 45 * (1 + TOL)
def test_python_logloss(rating_true_binary, rating_pred_binary):
with Timer() as t:
logloss(
rating_true=rating_true_binary,
rating_pred=rating_pred_binary,
col_rating=DEFAULT_RATING_COL,
col_prediction=DEFAULT_PREDICTION_COL,
)
assert t.interval < 45 * (1 + TOL)
|
2,684 |
get unicode category
|
'''
unicode_data.py
---------------
Python's unicodedata module uses an outdated spec (Unicode 5.2) and since
e.g. unicode categories are used in tokenization, we'd like to keep this
as up-to-date as possible with the latest standard.
'''
import csv
import os
import sys
from collections import defaultdict, namedtuple
this_dir = os.path.realpath(os.path.dirname(__file__))
sys.path.append(os.path.realpath(os.path.join(os.pardir, os.pardir)))
from geodata.file_utils import download_file
from geodata.string_utils import wide_unichr, wide_ord
from unicode_properties import *
from unicode_paths import UNICODE_DATA_DIR
UNIDATA_URL = 'http://unicode.org/Public/UNIDATA/UnicodeData.txt'
UNIDATA_DIR = os.path.join(UNICODE_DATA_DIR, 'unidata')
LOCAL_UNIDATA_FILE = os.path.join(UNIDATA_DIR, 'UnicodeData.txt')
unicode_categories = defaultdict(list)
unicode_blocks = defaultdict(list)
unicode_combining_classes = defaultdict(list)
unicode_general_categories = defaultdict(list)
unicode_scripts = defaultdict(list)
unicode_properties = {}
unicode_script_ids = {}
unicode_blocks = {}
unicode_category_aliases = {}
unicode_property_aliases = {}
unicode_property_value_aliases = {}
unicode_word_breaks = {}
# Ref: ftp://ftp.unicode.org/Public/3.0-Update/UnicodeData-3.0.0.html
UNIDATA_FIELDS = [
'code',
'name',
'category',
'combining',
'bidi_category',
'decomp_mapping',
'decimal_value',
'digit_value',
'numeric_value',
'mirrored',
'unicode_1_name',
'comment',
'upper_mapping',
'lower_mapping',
'title_mapping',
]
UnicodeDataRow = namedtuple('UnicodeDataRow', ','.join(UNIDATA_FIELDS))
def parse_unicode_data():
'''
Parse UnicodeData.txt into namedtuples using UNIDATA_FIELDS
'''
if not os.path.exists(LOCAL_UNIDATA_FILE):
download_file(UNIDATA_URL, LOCAL_UNIDATA_FILE)
unidata_file = open(LOCAL_UNIDATA_FILE)
for line in csv.reader(unidata_file, delimiter=';'):
yield UnicodeDataRow(*line)
def iter_unicode_combining_classes():
return unicode_combining_classes.iteritems()
def iter_unicode_categories():
return unicode_categories.iteritems()
def METHOD_NAME(cat):
return unicode_categories[cat]
def get_unicode_combining_class(c):
return unicode_combining_classes[c]
def get_unicode_categories():
'''
Build dict of unicode categories e.g.
{
'Lu': ['A', 'B', 'C', ...]
'Ll': ['a', 'b', 'c', ...]
}
'''
categories = defaultdict(list)
for row in parse_unicode_data():
categories[row.category].append(wide_unichr(unicode_to_integer(row.code)))
return dict(categories)
def get_unicode_combining_classes():
'''
Build dict of unicode combining classes e.g.
{
'0': ['\x00', '\x01', \x02', ...]
}
'''
combining_classes = defaultdict(list)
for row in parse_unicode_data():
combining_classes[int(row.combining)].append(wide_unichr(unicode_to_integer(row.code)))
return dict(combining_classes)
unicode_category_aliases = {
'letter': 'L',
'lower': 'Ll',
'lowercase': 'Ll',
'lowercaseletter': 'Ll',
'upper': 'Lu',
'uppercase': 'Lu',
'uppercaseletter': 'Lu',
'title': 'Lt',
'nonspacing mark': 'Mn',
'mark': 'M',
}
COMBINING_CLASS_PROP = 'canonical_combining_class'
BLOCK_PROP = 'block'
GENERAL_CATEGORY_PROP = 'general_category'
SCRIPT_PROP = 'script'
WORD_BREAK_PROP = 'word_break'
def init_unicode_categories():
'''
Initialize module-level dictionaries
'''
global unicode_categories, unicode_general_categories, unicode_scripts, unicode_category_aliases
global unicode_blocks, unicode_combining_classes, unicode_properties, unicode_property_aliases
global unicode_property_value_aliases, unicode_scripts, unicode_script_ids, unicode_word_breaks
unicode_categories.update(get_unicode_categories())
unicode_combining_classes.update(get_unicode_combining_classes())
for key in unicode_categories.keys():
unicode_general_categories[key[0]].extend(unicode_categories[key])
script_chars = get_chars_by_script()
for i, script in enumerate(script_chars):
if script:
unicode_scripts[script.lower()].append(wide_unichr(i))
unicode_scripts = dict(unicode_scripts)
unicode_script_ids.update(build_master_scripts_list(script_chars))
unicode_blocks.update(get_unicode_blocks())
unicode_properties.update(get_unicode_properties())
unicode_property_aliases.update(get_property_aliases())
unicode_word_breaks.update(get_word_break_properties())
for key, value in get_property_value_aliases().iteritems():
key = unicode_property_aliases.get(key, key)
if key == GENERAL_CATEGORY_PROP:
for k, v in value.iteritems():
k = k.lower()
unicode_category_aliases[k] = v
if '_' in k:
unicode_category_aliases[k.replace('_', '')] = v
unicode_property_value_aliases[key] = value
regex_chars = re.compile('([\[\]\{\}\-\^])')
def replace_regex_chars(s):
return regex_chars.sub(r'\\\1', s)
def format_regex_char(i):
c = wide_unichr(i)
return replace_regex_chars(c.encode('unicode-escape'))
def make_char_set_regex(chars):
'''
Build a regex character set from a list of characters
'''
group_start = None
group_end = None
last_ord = -2
ords = map(wide_ord, chars)
ords.sort()
ords.append(None)
groups = []
for i, o in enumerate(ords):
if o is not None and o == last_ord + 1:
group_end = o
elif group_start is not None and group_end is not None:
groups.append('-'.join((format_regex_char(group_start), format_regex_char(group_end))))
group_end = None
group_start = o
elif group_start is not None and group_end is None:
groups.append(format_regex_char(group_start))
group_start = o
else:
group_start = o
last_ord = o
return u'[{}]'.format(u''.join(groups))
name_category = [
('control_chars', 'Cc'),
('other_format_chars', 'Cf'),
('other_not_assigned_chars', 'Cn'),
('other_private_use_chars', 'Co'),
('other_surrogate_chars', 'Cs'),
('letter_lower_chars', 'Ll'),
('letter_modifier_chars', 'Lm'),
('letter_other_chars', 'Lo'),
('letter_title_chars', 'Lt'),
('letter_upper_chars', 'Lu'),
('mark_spacing_combining_chars', 'Mc'),
('mark_enclosing_chars', 'Me'),
('mark_nonspacing_chars', 'Mn'),
('number_or_digit_chars', 'Nd'),
('number_letter_chars', 'Nl'),
('number_other_chars', 'No'),
('punct_connector_chars', 'Pc'),
('punct_dash_chars', 'Pd'),
('punct_close_chars', 'Pe'),
('punct_final_quote_chars', 'Pf'),
('punct_initial_quote_chars', 'Pi'),
('punct_other_chars', 'Po'),
('punct_open_chars', 'Ps'),
('currency_symbol_chars', 'Sc'),
('symbol_modifier_chars', 'Sk'),
('symbol_math_chars', 'Sm'),
('symbol_other_chars', 'So'),
('separator_line_chars', 'Zl'),
('separator_paragraph_chars', 'Zp'),
('space', 'Zs'),
]
def main():
init_unicode_categories()
for name, cat in name_category:
if cat not in unicode_categories:
continue
chars = unicode_categories[cat]
print u'{} = {};'.format(name, make_char_set_regex(chars))
if __name__ == '__main__':
main()
|
2,685 |
name
|
# coding=utf-8
# *** WARNING: this file was generated by pulumi. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import copy
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
from . import outputs
__all__ = [
'GetCassandraDataCenterResult',
'AwaitableGetCassandraDataCenterResult',
'get_cassandra_data_center',
'get_cassandra_data_center_output',
]
@pulumi.output_type
class GetCassandraDataCenterResult:
"""
A managed Cassandra data center.
"""
def __init__(__self__, id=None, METHOD_NAME=None, properties=None, type=None):
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if METHOD_NAME and not isinstance(METHOD_NAME, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", METHOD_NAME)
if properties and not isinstance(properties, dict):
raise TypeError("Expected argument 'properties' to be a dict")
pulumi.set(__self__, "properties", properties)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
@property
@pulumi.getter
def id(self) -> str:
"""
The unique resource identifier of the database account.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def METHOD_NAME(self) -> str:
"""
The name of the database account.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def properties(self) -> 'outputs.DataCenterResourceResponseProperties':
"""
Properties of a managed Cassandra data center.
"""
return pulumi.get(self, "properties")
@property
@pulumi.getter
def type(self) -> str:
"""
The type of Azure resource.
"""
return pulumi.get(self, "type")
class AwaitableGetCassandraDataCenterResult(GetCassandraDataCenterResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetCassandraDataCenterResult(
id=self.id,
METHOD_NAME=self.METHOD_NAME,
properties=self.properties,
type=self.type)
def get_cassandra_data_center(cluster_name: Optional[str] = None,
data_center_name: Optional[str] = None,
resource_group_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetCassandraDataCenterResult:
"""
Get the properties of a managed Cassandra data center.
Azure REST API version: 2023-04-15.
:param str cluster_name: Managed Cassandra cluster name.
:param str data_center_name: Data center name in a managed Cassandra cluster.
:param str resource_group_name: The name of the resource group. The name is case insensitive.
"""
__args__ = dict()
__args__['clusterName'] = cluster_name
__args__['dataCenterName'] = data_center_name
__args__['resourceGroupName'] = resource_group_name
opts = pulumi.InvokeOptions.merge(_utilities.get_invoke_opts_defaults(), opts)
__ret__ = pulumi.runtime.invoke('azure-native:documentdb:getCassandraDataCenter', __args__, opts=opts, typ=GetCassandraDataCenterResult).value
return AwaitableGetCassandraDataCenterResult(
id=pulumi.get(__ret__, 'id'),
METHOD_NAME=pulumi.get(__ret__, 'name'),
properties=pulumi.get(__ret__, 'properties'),
type=pulumi.get(__ret__, 'type'))
@_utilities.lift_output_func(get_cassandra_data_center)
def get_cassandra_data_center_output(cluster_name: Optional[pulumi.Input[str]] = None,
data_center_name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetCassandraDataCenterResult]:
"""
Get the properties of a managed Cassandra data center.
Azure REST API version: 2023-04-15.
:param str cluster_name: Managed Cassandra cluster name.
:param str data_center_name: Data center name in a managed Cassandra cluster.
:param str resource_group_name: The name of the resource group. The name is case insensitive.
"""
...
|
2,686 |
generate bias
|
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
sys.path.append('..')
from program_config import TensorConfig, ProgramConfig, OpConfig, CxxConfig, TargetType, PrecisionType, DataLayoutType, Place
import numpy as np
from functools import partial
from typing import Optional, List, Callable, Dict, Any, Set
import unittest
import hypothesis
from hypothesis import given, settings, seed, example, assume, reproduce_failure
import hypothesis.strategies as st
def sample_program_configs(draw):
#conv param
in_shape = draw(
st.lists(
st.integers(
min_value=1, max_value=64), min_size=4, max_size=4))
kw = np.random.randint(1, 9)
kh = np.random.randint(1, 9)
cout = np.random.randint(1, 128)
cin = np.random.randint(1, 128)
scale_in = draw(st.floats(min_value=0.001, max_value=0.1))
scale_out = draw(st.floats(min_value=0.001, max_value=0.1))
weight_shape = [cout, cin, kh, kw]
groups = draw(st.sampled_from([1, 2, cin]))
val = in_shape[1] * groups
assume(val == cin)
assume(in_shape[1] == weight_shape[1])
assume(in_shape[2] >= weight_shape[2])
assume(in_shape[3] >= weight_shape[3])
paddings = draw(
st.lists(
st.integers(
min_value=0, max_value=2), min_size=2, max_size=2))
dilations = draw(st.sampled_from([[1, 1]]))
padding_algorithm = draw(st.sampled_from(["VALID", "SAME"]))
strides = draw(st.sampled_from([[1, 1], [2, 2]]))
data_format = "NCHW"
def generate_input(*args, **kwargs):
return np.random.random(in_shape).astype(np.float32)
def generate_filter(*args, **kwargs):
return np.random.random(weight_shape).astype(np.float32)
def generate_conv_bias(*args, **kwargs):
return np.random.random([cout]).astype(np.float32)
#bn param
is_test_val = draw(st.sampled_from([True, False]))
epsilon = draw(st.floats(min_value=0.00001, max_value=0.001))
momentum = draw(st.floats(min_value=0.1, max_value=0.9))
def generate_scale(*args, **kwargs):
return np.random.random([cout]).astype(np.float32) + 0.5
def METHOD_NAME(*args, **kwargs):
return np.random.random([cout]).astype(np.float32)
def generate_mean(*args, **kwargs):
return np.random.random([cout]).astype(np.float32)
def generate_variance(*args, **kwargs):
return np.random.random([cout]).astype(np.float32)
conv_op = OpConfig(
type="conv2d",
inputs={
"Input": ["input_data"],
"Filter": ["filter_data"],
"Bias": ["conv_bias_data"]
},
outputs={"Output": ["conv_output_data"]},
attrs={
"strides": strides,
"paddings": paddings,
"use_mkldnn": True,
"padding_algorithm": padding_algorithm,
"groups": groups,
"dilations": dilations,
"Scale_in": scale_in,
"Scale_out": scale_out,
"data_format": data_format
})
bn_op = OpConfig(
type="batch_norm",
inputs={
"X": ["conv_output_data"],
"Scale": ["scale_data"],
"Bias": ["bias_data"],
"Mean": ["mean_data"],
"Variance": ["variance_data"]
},
outputs={
"Y": ["output_data"],
"MeanOut": ["mean_data"],
"VarianceOut": ["variance_data"],
"SavedMean": ["saved_mean"],
"SavedVariance": ["saved_variance"]
},
attrs={
"is_test": False,
"trainable_statistics": False,
"data_layout": "NCHW",
"use_global_stats": False,
"epsilon": epsilon,
"momentum": momentum
})
ops = [conv_op, bn_op]
program_config = ProgramConfig(
ops=ops,
weights={
"filter_data": TensorConfig(data_gen=partial(generate_filter)),
"conv_bias_data":
TensorConfig(data_gen=partial(generate_conv_bias))
},
inputs={
"input_data": TensorConfig(data_gen=partial(generate_input)),
"scale_data": TensorConfig(data_gen=partial(generate_scale)),
"bias_data": TensorConfig(data_gen=partial(METHOD_NAME)),
"mean_data": TensorConfig(data_gen=partial(generate_mean)),
"variance_data": TensorConfig(data_gen=partial(generate_variance)),
},
outputs=[
"output_data", "mean_data", "variance_data", "saved_mean",
"saved_variance"
])
return program_config
|
2,687 |
copy
|
# Copyright 1999-2021 Alibaba Group Holding Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Dict, Tuple, Type
from ..serialization.serializables import Serializable, StringField
from ..serialization.serializables.core import SerializableSerializer
from ..utils import tokenize
class Base(Serializable):
_no_copy_attrs_ = {"_id"}
_init_update_key_ = True
_key = StringField("key", default=None)
_id = StringField("id")
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
if self._init_update_key_ and (not hasattr(self, "_key") or not self._key):
self._update_key()
if not hasattr(self, "_id") or not self._id:
self._id = str(id(self))
@property
def _keys_(self):
cls = type(self)
member = "__keys_" + cls.__name__
try:
return getattr(cls, member)
except AttributeError:
slots = sorted(self._FIELDS)
setattr(cls, member, slots)
return slots
@property
def _copy_tags_(self):
cls = type(self)
member = f"__copy_tags_{cls.__name__}"
try:
return getattr(cls, member)
except AttributeError:
slots = sorted(
f.name for k, f in self._FIELDS.items() if k not in self._no_copy_attrs_
)
setattr(cls, member, slots)
return slots
@property
def _values_(self):
values = []
fields = self._FIELDS
for k in self._copy_tags_:
try:
values.append(fields[k].get(self))
except AttributeError:
values.append(None)
return values
def __mars_tokenize__(self):
try:
return self._key
except AttributeError: # pragma: no cover
self._update_key()
return self._key
def _obj_set(self, k, v):
object.__setattr__(self, k, v)
def _update_key(self):
self._obj_set("_key", tokenize(type(self).__name__, *self._values_))
return self
def reset_key(self):
self._obj_set("_key", None)
return self
def __copy__(self):
return self.METHOD_NAME()
def METHOD_NAME(self):
return self.copy_to(type(self)(_key=self.key))
def copy_to(self, target: "Base"):
target_fields = target._FIELDS
no_copy_attrs = self._no_copy_attrs_
for k, field in self._FIELDS.items():
if k in no_copy_attrs:
continue
try:
# Slightly faster than getattr.
value = field.__get__(self, k)
target_fields[k].set(target, value)
except AttributeError:
continue
return target
def copy_from(self, obj):
obj.copy_to(self)
@property
def key(self):
return self._key
@property
def id(self):
return self._id
def to_kv(self, exclude_fields: Tuple[str], accept_value_types: Tuple[Type]):
fields = self._FIELDS
kv = {}
no_value = object()
for name, field in fields.items():
if name not in exclude_fields:
value = getattr(self, name, no_value)
if value is not no_value and isinstance(value, accept_value_types):
kv[field.tag] = value
return kv
class BaseSerializer(SerializableSerializer):
def serial(self, obj: Base, context: Dict):
return super().serial(obj, context)
BaseSerializer.register(Base)
class MarsError(Exception):
pass
class ExecutionError(MarsError):
def __init__(self, nested_error: BaseException):
super().__init__(nested_error)
self.nested_error = nested_error
|
2,688 |
set iio attr
|
# Copyright (C) 2019-2023 Analog Devices, Inc.
#
# SPDX short identifier: ADIBSD
import re
def get_numbers(s):
v = re.findall(r"[-+]?[.]?[\d]+(?:,\d\d\d)*[\.]?\d*(?:[eE][-+]?\d+)?", s)
v = [float(i) for i in v]
if len(v) == 1:
v = v[0]
if int(v) == v:
v = int(v)
return v
class attribute:
def _get_iio_attr_str_multi_dev(self, channel_names, attr_name, output, ctrls):
""" Get the same channel attribute across multiple devices
which are assumed to be strings
"""
if not isinstance(channel_names, list):
channel_names = [channel_names]
return {
ctrl.name: [
self._get_iio_attr_str(chan_name, attr_name, output, ctrl)
for chan_name in channel_names
]
for ctrl in ctrls
}
def _set_iio_attr_multi_dev(self, channel_names, attr_name, output, values, ctrls):
""" Set the same channel attribute across multiple devices
Unique parameters:
values: type=list
Must be of length <= len(ctrls)*len(channel_names)
"""
if len(values) > len(ctrls) * len(channel_names):
raise Exception("Too many values to write")
i = 0
for ctrl in ctrls:
for chan_name in channel_names:
self.METHOD_NAME(chan_name, attr_name, output, values[i], ctrl)
i += 1
def _set_iio_attr_float_multi_dev(
self, channel_names, attr_name, output, values, ctrls
):
""" Set the same float channel attribute(s) across multiple devices
Unique parameters:
values: type=list
Must be of length <= len(ctrls)*len(channel_names)
"""
for i, value in enumerate(values):
if isinstance(value, int):
values[i] = float(value)
if not isinstance(values[i], float):
raise Exception("Values must be floats")
self._set_iio_attr_multi_dev(channel_names, attr_name, output, values, ctrls)
def METHOD_NAME(self, channel_name, attr_name, output, value, _ctrl=None):
""" Set channel attribute """
if _ctrl:
channel = _ctrl.find_channel(channel_name, output)
else:
channel = self._ctrl.find_channel(channel_name, output)
try:
channel.attrs[attr_name].value = str(value)
except Exception as ex:
raise ex
def _set_iio_attr_float(self, channel_name, attr_name, output, value, _ctrl=None):
""" Set channel attribute with float """
if isinstance(value, int):
value = float(value)
if not isinstance(value, float):
raise Exception("Value must be a float")
self.METHOD_NAME(channel_name, attr_name, output, value, _ctrl)
def _set_iio_attr_float_vec(
self, channel_names, attr_name, output, values, _ctrl=None
):
""" Set channel attribute with list of floats """
if not isinstance(values, list):
raise Exception("Value must be a list")
for i, v in enumerate(values):
self._set_iio_attr_float(channel_names[i], attr_name, output, v, _ctrl)
def _set_iio_attr_int(self, channel_name, attr_name, output, value, _ctrl=None):
""" Set channel attribute with int """
if not isinstance(value, int):
raise Exception("Value must be an int")
self.METHOD_NAME(channel_name, attr_name, output, value, _ctrl)
def _set_iio_attr_int_vec(
self, channel_names, attr_name, output, values, _ctrl=None
):
""" Set channel attribute with list of ints """
if not isinstance(values, list):
raise Exception("Value must be a list")
for i, v in enumerate(values):
self._set_iio_attr_int(channel_names[i], attr_name, output, v, _ctrl)
def _set_iio_attr_str_vec(
self, channel_names, attr_name, output, values, _ctrl=None
):
""" Set channel attribute with list of strings """
if not isinstance(values, list):
raise Exception("Value must be a list")
for i, v in enumerate(list(values)):
self.METHOD_NAME(channel_names[i], attr_name, output, v, _ctrl)
def _get_iio_attr_str(self, channel_name, attr_name, output, _ctrl=None):
""" Get channel attribute as string """
if _ctrl:
channel = _ctrl.find_channel(channel_name, output)
else:
channel = self._ctrl.find_channel(channel_name, output)
if not channel:
raise Exception("No channel found with name: " + channel_name)
return channel.attrs[attr_name].value
def _get_iio_attr(self, channel_name, attr_name, output, _ctrl=None):
""" Get channel attribute as number """
return get_numbers(
self._get_iio_attr_str(channel_name, attr_name, output, _ctrl)
)
def _get_iio_attr_vec(self, channel_names, attr_name, output, _ctrl=None):
""" Get channel attributes as list of numbers """
vals = []
for chn in channel_names:
v = self._get_iio_attr(chn, attr_name, output, _ctrl)
vals.append(v)
return vals
def _get_iio_attr_str_vec(self, channel_names, attr_name, output, _ctrl=None):
""" Get channel attributes as list of numbers """
vals = []
for chn in channel_names:
v = self._get_iio_attr_str(chn, attr_name, output, _ctrl)
vals.append(v)
return vals
def _set_iio_dev_attr_str(self, attr_name, value, _ctrl=None):
""" Set device attribute with string """
try:
if _ctrl:
_ctrl.attrs[attr_name].value = str(value)
else:
self._ctrl.attrs[attr_name].value = str(value)
except Exception as ex:
raise ex
def _get_iio_dev_attr_str(self, attr_name, _ctrl=None):
""" Get device attribute as string """
if _ctrl:
return _ctrl.attrs[attr_name].value
else:
return self._ctrl.attrs[attr_name].value
def _set_iio_dev_attr(self, attr_name, value, _ctrl=None):
""" Set device attribute """
_dev = _ctrl or self._ctrl
try:
_dev.attrs[attr_name].value = str(value)
except Exception as ex:
raise ex
def _get_iio_dev_attr(self, attr_name, _ctrl=None):
""" Set device attribute as number """
return get_numbers(self._get_iio_dev_attr_str(attr_name, _ctrl))
def _set_iio_debug_attr_str(self, attr_name, value, _ctrl=None):
""" Set debug attribute with string """
try:
if _ctrl:
_ctrl.debug_attrs[attr_name].value = str(value)
else:
self._ctrl.debug_attrs[attr_name].value = str(value)
except Exception as ex:
raise ex
def _get_iio_debug_attr_str(self, attr_name, _ctrl=None):
""" Get debug attribute as string """
if _ctrl:
return _ctrl.debug_attrs[attr_name].value
else:
return self._ctrl.debug_attrs[attr_name].value
def _get_iio_debug_attr(self, attr_name, _ctrl=None):
""" Set debug attribute as number """
return get_numbers(self._get_iio_debug_attr_str(attr_name, _ctrl))
|
2,689 |
with encoder
|
# Copyright (c) OpenMMLab. All rights reserved.
from abc import ABCMeta, abstractmethod
from typing import Union
import torch
from mmengine.model.base_model import BaseModel
from mmocr.utils import (OptConfigType, OptMultiConfig, OptRecSampleList,
RecForwardResults, RecSampleList)
class BaseTextSpotter(BaseModel, metaclass=ABCMeta):
"""Base class for text spotter.
TODO: Refine docstr & typehint
Args:
data_preprocessor (dict or ConfigDict, optional): The pre-process
config of :class:`BaseDataPreprocessor`. it usually includes,
``pad_size_divisor``, ``pad_value``, ``mean`` and ``std``.
init_cfg (dict or ConfigDict or List[dict], optional): the config
to control the initialization. Defaults to None.
"""
def __init__(self,
data_preprocessor: OptConfigType = None,
init_cfg: OptMultiConfig = None):
super().__init__(
data_preprocessor=data_preprocessor, init_cfg=init_cfg)
@property
def with_backbone(self):
"""bool: whether the recognizer has a backbone"""
return hasattr(self, 'backbone')
@property
def METHOD_NAME(self):
"""bool: whether the recognizer has an encoder"""
return hasattr(self, 'encoder')
@property
def with_preprocessor(self):
"""bool: whether the recognizer has a preprocessor"""
return hasattr(self, 'preprocessor')
@property
def with_decoder(self):
"""bool: whether the recognizer has a decoder"""
return hasattr(self, 'decoder')
@abstractmethod
def extract_feat(self, inputs: torch.Tensor) -> torch.Tensor:
"""Extract features from images."""
pass
def forward(self,
inputs: torch.Tensor,
data_samples: OptRecSampleList = None,
mode: str = 'tensor',
**kwargs) -> RecForwardResults:
"""The unified entry for a forward process in both training and test.
The method should accept three modes: "tensor", "predict" and "loss":
- "tensor": Forward the whole network and return tensor or tuple of
tensor without any post-processing, same as a common nn.Module.
- "predict": Forward and return the predictions, which are fully
processed to a list of :obj:`DetDataSample`.
- "loss": Forward and return a dict of losses according to the given
inputs and data samples.
Note that this method doesn't handle neither back propagation nor
optimizer updating, which are done in the :meth:`train_step`.
Args:
inputs (torch.Tensor): The input tensor with shape
(N, C, ...) in general.
data_samples (list[:obj:`DetDataSample`], optional): The
annotation data of every samples. Defaults to None.
mode (str): Return what kind of value. Defaults to 'tensor'.
Returns:
The return type depends on ``mode``.
- If ``mode="tensor"``, return a tensor or a tuple of tensor.
- If ``mode="predict"``, return a list of :obj:`DetDataSample`.
- If ``mode="loss"``, return a dict of tensor.
"""
if mode == 'loss':
return self.loss(inputs, data_samples, **kwargs)
elif mode == 'predict':
return self.predict(inputs, data_samples, **kwargs)
elif mode == 'tensor':
return self._forward(inputs, data_samples, **kwargs)
else:
raise RuntimeError(f'Invalid mode "{mode}". '
'Only supports loss, predict and tensor mode')
@abstractmethod
def loss(self, inputs: torch.Tensor, data_samples: RecSampleList,
**kwargs) -> Union[dict, tuple]:
"""Calculate losses from a batch of inputs and data samples."""
pass
@abstractmethod
def predict(self, inputs: torch.Tensor, data_samples: RecSampleList,
**kwargs) -> RecSampleList:
"""Predict results from a batch of inputs and data samples with post-
processing."""
pass
@abstractmethod
def _forward(self,
inputs: torch.Tensor,
data_samples: OptRecSampleList = None,
**kwargs):
"""Network forward process.
Usually includes backbone, neck and head forward without any post-
processing.
"""
pass
|
2,690 |
optional arg decorator
|
# -*- coding: utf-8 -*-
# ------------------------------------------------------------------------------
# Name: common/decorators.py
# Purpose: Decorators for functions
#
# Authors: Michael Scott Asato Cuthbert
# Christopher Ariza
#
# Copyright: Copyright © 2009-2015 Michael Scott Asato Cuthbert
# License: BSD, see license.txt
# ------------------------------------------------------------------------------
from __future__ import annotations
from functools import wraps
import warnings
from music21 import exceptions21
__all__ = ['optional_arg_decorator', 'deprecated', 'cacheMethod']
# from Ryne Everett
# http://stackoverflow.com/questions/3888158/python-making-decorators-with-optional-arguments
def METHOD_NAME(fn):
'''
a decorator for decorators. Allows them to either have or not have arguments.
'''
@wraps(fn)
def wrapped_decorator(*arguments, **keywords):
is_bound_method = hasattr(arguments[0], fn.__name__) if arguments else False
klass = None
if is_bound_method:
klass = arguments[0]
arguments = arguments[1:]
# If no arguments were passed...
if len(arguments) == 1 and not keywords and callable(arguments[0]):
if is_bound_method:
return fn(klass, arguments[0])
else:
return fn(arguments[0])
else:
def real_decorator(toBeDecorated):
if is_bound_method:
return fn(klass, toBeDecorated, *arguments, **keywords)
else:
return fn(toBeDecorated, *arguments, **keywords)
return real_decorator
return wrapped_decorator
@METHOD_NAME
def deprecated(method, startDate=None, removeDate=None, message=None):
'''
Decorator that marks a function as deprecated and should not be called.
Because we're all developers, it does not use DeprecationWarning, which no
one would ever see, but UserWarning.
Warns once per session and never again.
Use without arguments for a simple case:
For demonstrating I need to screw with stderr...
>>> import sys
>>> saveStdErr = sys.stderr
>>> sys.stderr = sys.stdout
>>> @common.deprecated
... def hi(msg):
... print(msg)
(I'm printing "/" at the beginning because message begins with the filename and that is
different on each system, but you can't use ellipses at the beginning of a doctest)
>>> print('/'); hi('myke')
/...Music21DeprecationWarning: hi was deprecated
and will disappear soon. Find alternative methods.
...
myke
A second call raises no warning:
>>> hi('myke')
myke
Now a new function demonstrating the argument form.
>>> @common.deprecated('February 1972', 'September 2099', 'You should be okay...')
... def bye(msg):
... print(msg)
>>> print('/'); bye('world')
/...Music21DeprecationWarning: bye was deprecated on February 1972
and will disappear at or after September 2099. You should be okay...
...
world
Restore stderr at the end.
>>> sys.stderr = saveStdErr
'''
if hasattr(method, '__qualname__'):
funcName = method.__qualname__
else:
funcName = method.__name__
method._isDeprecated = True
if startDate is not None:
startDate = ' on ' + startDate
else:
startDate = ''
if removeDate is not None:
removeDate = 'at or after ' + removeDate
else:
removeDate = 'soon'
if message is None:
message = 'Find alternative methods.'
m = f'{funcName} was deprecated{startDate} and will disappear {removeDate}. {message}'
callInfo = {'calledAlready': False,
'message': m}
@wraps(method)
def func_wrapper(*arguments, **keywords):
if len(arguments) > 1 and arguments[1] in (
'_ipython_canary_method_should_not_exist_',
'_repr_mimebundle_',
'_is_coroutine'
):
# false positive from IPython for StreamIterator.__getattr__
# can remove after v9.
falsePositive = True
else:
falsePositive = False
# TODO: look at sys.warnstatus.
if callInfo['calledAlready'] is False and not falsePositive:
warnings.warn(callInfo['message'],
exceptions21.Music21DeprecationWarning,
stacklevel=2)
callInfo['calledAlready'] = True
return method(*arguments, **keywords)
return func_wrapper
def cacheMethod(method):
'''
A decorator for music21Objects or other objects that
assumes that there is a ._cache Dictionary in the instance
and returns or sets that value if it exists, otherwise calls the method
and stores the value.
To be used ONLY with zero-arg calls. Like properties. Well, can be
used by others but will not store per-value caches.
Not a generic memorize, because by storing in one ._cache place,
a .clearCache() method can eliminate them.
Uses the name of the function as the cache key.
* New in v6: helps to make all the caches easier to work with.
'''
if hasattr(method, '__qualname__'):
funcName = method.__qualname__
else:
funcName = method.__name__
@wraps(method)
def inner(instance, *arguments, **keywords):
if funcName in instance._cache:
return instance._cache[funcName]
instance._cache[funcName] = method(instance, *arguments, **keywords)
return instance._cache[funcName]
return inner
if __name__ == '__main__':
import music21
music21.mainTest()
|
2,691 |
before case
|
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
from typing import Any
from assertpy import assert_that
from lisa import (
Logger,
Node,
TestCaseMetadata,
TestSuite,
TestSuiteMetadata,
simple_requirement,
)
from lisa.base_tools import Cat
from lisa.operating_system import BSD, Windows
from lisa.sut_orchestrator import AZURE, READY
from lisa.tools import Dmesg, Echo, KernelConfig, Lsmod, Reboot
from lisa.util import SkippedException
from microsoft.testsuites.display.modetest import Modetest
@TestSuiteMetadata(
area="drm",
category="functional",
description="""
This test suite uses to verify drm driver sanity.
""",
requirement=simple_requirement(supported_platform_type=[AZURE, READY]),
)
class Drm(TestSuite):
@TestCaseMetadata(
description="""
This case is to check whether the hyperv_drm driver registered successfully.
Once driver is registered successfully it should appear in `lsmod` output.
Steps,
1. lsmod
2. Check if hyperv_drm exist in the list.
""",
priority=2,
)
def verify_drm_driver(self, node: Node, log: Logger) -> None:
if node.tools[KernelConfig].is_built_in("CONFIG_DRM_HYPERV"):
raise SkippedException(
"DRM hyperv driver is built-in in current distro"
f" {node.os.name} {node.os.information.version}"
)
else:
lsmod = node.tools[Lsmod]
assert_that(lsmod.module_exists("hyperv_drm")).described_as(
"hyperv_drm module is absent"
).is_equal_to(True)
@TestCaseMetadata(
description="""
This case is to check whether the dri node is populated correctly.
If hyperv_drm driver is bind correctly it should populate dri node.
This dri node can be find at following sysfs entry : /sys/kernel/debug/dri/0.
The dri node name (/sys/kernel/debug/dri/0/name) should contain `hyperv_drm`.
Step,
1. Cat /sys/kernel/debug/dri/0/name.
2. Verify it contains hyperv_drm string in it.
""",
priority=2,
)
def verify_dri_node(self, node: Node, log: Logger) -> None:
cat = node.tools[Cat]
dri_path = "/sys/kernel/debug/dri/0/name"
dri_name = cat.read(dri_path, sudo=True, force_run=True)
assert_that(dri_name).described_as(
"dri node not populated for hyperv_drm"
).matches("hyperv_drm")
@TestCaseMetadata(
description="""
This case is to check this patch
https://git.kernel.org/pub/scm/linux/kernel/git/next/linux-next.git/commit/?id=19b5e6659eaf537ebeac90ae30c7df0296fe5ab9 # noqa: E501
Step,
1. Get dmesg output.
2. Check no 'Unable to send packet via vmbus' shown up in dmesg output.
""",
priority=2,
)
def verify_no_error_output(self, node: Node, log: Logger) -> None:
assert_that(node.tools[Dmesg].get_output(force_run=True)).described_as(
"this error message is not expected to be seen "
"if dirt_needed default value is set as false"
).does_not_contain("Unable to send packet via vmbus")
@TestCaseMetadata(
description="""
This case is to check connector status using modetest utility for drm.
Step,
1. Install tool modetest.
2. Verify the status return from modetest is connected.
""",
priority=2,
)
def verify_connection_status(self, node: Node, log: Logger) -> None:
is_status_connected = node.tools[Modetest].is_status_connected("hyperv_drm")
assert_that(is_status_connected).described_as(
"dri connector status should be 'connected'"
).is_true()
def METHOD_NAME(self, log: Logger, **kwargs: Any) -> None:
node: Node = kwargs["node"]
if isinstance(node.os, BSD) or isinstance(node.os, Windows):
raise SkippedException(f"{node.os} is not supported.")
if node.tools[KernelConfig].is_enabled("CONFIG_DRM_HYPERV"):
log.debug(
f"Current os {node.os.name} {node.os.information.version} "
"supports DRM hyperv driver"
)
lsmod = node.tools[Lsmod]
# hyperv_fb takes priority over hyperv_drm, so blacklist it
if lsmod.module_exists("hyperv_fb"):
echo = node.tools[Echo]
echo.write_to_file(
"blacklist hyperv_fb",
node.get_pure_path("/etc/modprobe.d/blacklist-fb.conf"),
sudo=True,
)
node.tools[Reboot].reboot()
else:
raise SkippedException(
"DRM hyperv driver is not enabled in current distro"
f" {node.os.name} {node.os.information.version}"
)
|
2,692 |
find source files
|
import os
from enum import Enum
from pathlib import Path
from typing import List
from strictdoc.core.file_tree import File, FileFinder
from strictdoc.core.project_config import ProjectConfig
from strictdoc.core.source_tree import SourceTree
from strictdoc.helpers.auto_described import auto_described
class SourceFileType(Enum):
PYTHON = [".py"]
C = [".c"]
CPP = [".cpp", ".cc"]
TEX = [".tex"]
# Is there an idiomatic file extension for Jinja templates?
# https://stackoverflow.com/questions/29590931/is-there-an-idiomatic-file-extension-for-jinja-templates # noqa: #501
JINJA = [".jinja", ".jinja2", ".j2", ".html.jinja"]
@classmethod
def create_from_path(cls, path_to_file: str) -> "SourceFileType":
assert os.path.isfile(path_to_file), path_to_file
if path_to_file.endswith(".py"):
return cls.PYTHON
if path_to_file.endswith(".c"):
return cls.C
for enum_value in SourceFileType.CPP.value:
if path_to_file.endswith(enum_value):
return cls.CPP
if path_to_file.endswith(".tex"):
return cls.TEX
for enum_value in SourceFileType.JINJA.value:
if path_to_file.endswith(enum_value):
return cls.JINJA
raise NotImplementedError(path_to_file)
@staticmethod
def all() -> List[str]:
all_extensions = []
for enum_value in SourceFileType:
all_extensions += enum_value.value
return all_extensions
@auto_described
class SourceFile: # pylint: disable=too-many-instance-attributes
def __init__( # pylint: disable=too-many-arguments
self,
level,
full_path,
in_doctree_source_file_rel_path,
output_dir_full_path,
output_file_full_path,
):
assert isinstance(level, int)
assert os.path.exists(full_path)
self.level = level
self.full_path = full_path
self.in_doctree_source_file_rel_path = in_doctree_source_file_rel_path
self.in_doctree_source_file_rel_path_posix: str = (
in_doctree_source_file_rel_path.replace("\\", "/")
)
self.output_dir_full_path = output_dir_full_path
self.output_file_full_path = output_file_full_path
self.path_depth_prefix = ("../" * (level + 1))[:-1]
self.file_type: SourceFileType = SourceFileType.create_from_path(
in_doctree_source_file_rel_path
)
self.traceability_info = None
self.is_referenced = False
def is_python_file(self):
return self.file_type == SourceFileType.PYTHON
def is_c_file(self):
return self.file_type == SourceFileType.C
def is_cpp_file(self):
return self.file_type == SourceFileType.CPP
def is_tex_file(self):
return self.file_type == SourceFileType.TEX
def is_jinja_file(self):
return self.file_type == SourceFileType.JINJA
class SourceFilesFinder:
@staticmethod
def METHOD_NAME(
project_config: ProjectConfig,
) -> SourceTree:
map_file_to_source = {}
found_source_files: List[SourceFile] = []
# TODO: Unify this on the FileTree class level.
# Introduce #mount_directory method?
doctree_root_abs_path = os.getcwd()
doctree_root_abs_path = (
os.path.dirname(doctree_root_abs_path)
if os.path.isfile(doctree_root_abs_path)
else doctree_root_abs_path
)
assert isinstance(project_config.export_output_dir, str)
file_tree = FileFinder.find_files_with_extensions(
root_path=doctree_root_abs_path,
ignored_dirs=[project_config.export_output_dir],
extensions=SourceFileType.all(),
include_paths=project_config.include_source_paths,
exclude_paths=project_config.exclude_source_paths,
)
root_level = doctree_root_abs_path.count(os.sep)
file: File
for _, file, _ in file_tree.iterate():
in_doctree_source_file_rel_path = os.path.relpath(
file.root_path, doctree_root_abs_path
)
last_folder_in_path: str = os.path.relpath(
file.get_folder_path(), doctree_root_abs_path
)
output_dir_full_path: str = os.path.join(
project_config.export_output_html_root,
"_source_files",
last_folder_in_path,
)
Path(output_dir_full_path).mkdir(parents=True, exist_ok=True)
output_file_name = f"{file.get_file_name()}.html"
output_file_full_path = os.path.join(
output_dir_full_path, output_file_name
)
level = file.get_folder_path().count(os.sep) - root_level
source_file = SourceFile(
level,
file.root_path,
in_doctree_source_file_rel_path,
output_dir_full_path,
output_file_full_path,
)
found_source_files.append(source_file)
map_file_to_source[file] = source_file
source_tree = SourceTree(
file_tree, found_source_files, map_file_to_source
)
return source_tree
|
2,693 |
test scale global to worker tpu
|
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for lingvo.core.batch_utils."""
import itertools
from absl.testing import flagsaver
from absl.testing import parameterized
from lingvo import compat as tf
from lingvo.core import batch_utils
from lingvo.core import cluster_factory
class BatchUtilsTest(tf.test.TestCase, parameterized.TestCase):
@parameterized.parameters({'use_per_host_infeed': False},
{'use_per_host_infeed': True})
def testScaleInfeedToGlobalCPU(self, use_per_host_infeed):
with cluster_factory.ForTestingWorker(cpus=128):
self.assertEqual(
batch_utils.scale_infeed_to_global(1024, use_per_host_infeed), 1024)
@parameterized.parameters({'use_per_host_infeed': False},
{'use_per_host_infeed': True})
def testScaleInfeedToGlobalGPU(self, use_per_host_infeed):
with cluster_factory.ForTestingWorker(gpus=128):
self.assertEqual(
batch_utils.scale_infeed_to_global(1024, use_per_host_infeed), 1024)
@parameterized.parameters(
itertools.product(
(False, True), # use_per_host_infeed
(1, 4))) # num_tpu_hosts
def testScaleInfeedToGlobalTPU(self, use_per_host_infeed, num_tpu_hosts):
with flagsaver.flagsaver(xla_device='tpu', enable_asserts=False):
with cluster_factory.ForTestingWorker(
tpus=128, num_tpu_hosts=num_tpu_hosts):
num_infeeds = num_tpu_hosts if use_per_host_infeed else 1
self.assertEqual(
batch_utils.scale_infeed_to_global(1024, use_per_host_infeed),
1024 * num_infeeds)
@parameterized.parameters(
itertools.product(
(False, True), # use_per_host_infeed
(1, 8))) # split_size
def testScaleSplitToInfeedCPU(self, use_per_host_infeed, split_size):
with cluster_factory.ForTestingWorker(
cpus=128, split_size=split_size) as cluster:
num_splits = 128 // split_size
self.assertEqual(cluster.num_splits_per_client, num_splits)
self.assertEqual(
batch_utils.scale_split_to_infeed(1024, use_per_host_infeed),
1024 * num_splits)
@parameterized.parameters(
itertools.product(
(False, True), # use_per_host_infeed
(1, 8))) # split_size
def testScaleSplitToInfeedGPU(self, use_per_host_infeed, split_size):
with cluster_factory.ForTestingWorker(
gpus=128, split_size=split_size) as cluster:
num_splits = 128 // split_size
self.assertEqual(cluster.num_splits_per_client, num_splits)
self.assertEqual(
batch_utils.scale_split_to_infeed(1024, use_per_host_infeed),
1024 * num_splits)
@parameterized.parameters(
itertools.product(
(False, True), # use_per_host_infeed
(1, 8), # split_size
(1, 4))) # num_tpu_hosts
def testScaleSplitToInfeedTPU(self, use_per_host_infeed, split_size,
num_tpu_hosts):
with cluster_factory.ForTestingWorker(
tpus=128, split_size=split_size,
num_tpu_hosts=num_tpu_hosts) as cluster:
num_splits = 128 // split_size
num_infeeds = num_tpu_hosts if use_per_host_infeed else 1
self.assertEqual(cluster.num_splits_per_client, num_splits)
self.assertEqual(
batch_utils.scale_split_to_infeed(1024, use_per_host_infeed),
1024 * num_splits // num_infeeds)
@parameterized.product(tpus=[64, 128])
def METHOD_NAME(self, tpus):
with cluster_factory.ForTestingWorker(tpus=tpus) as cluster:
self.assertEqual(cluster.total_worker_devices, tpus)
self.assertEqual(batch_utils.scale_global_to_worker(1024), 1024 // tpus)
if __name__ == '__main__':
tf.test.main()
|
2,694 |
new timer
|
import ctypes
import signal
import threading
class BaseTimeoutException(Exception):
"""Base exception for timeouts."""
pass
class JobTimeoutException(BaseTimeoutException):
"""Raised when a job takes longer to complete than the allowed maximum
timeout value.
"""
pass
class HorseMonitorTimeoutException(BaseTimeoutException):
"""Raised when waiting for a horse exiting takes longer than the maximum
timeout value.
"""
pass
class BaseDeathPenalty:
"""Base class to setup job timeouts."""
def __init__(self, timeout, exception=BaseTimeoutException, **kwargs):
self._timeout = timeout
self._exception = exception
def __enter__(self):
self.setup_death_penalty()
def __exit__(self, type, value, traceback):
# Always cancel immediately, since we're done
try:
self.cancel_death_penalty()
except BaseTimeoutException:
# Weird case: we're done with the with body, but now the alarm is
# fired. We may safely ignore this situation and consider the
# body done.
pass
# __exit__ may return True to supress further exception handling. We
# don't want to suppress any exceptions here, since all errors should
# just pass through, BaseTimeoutException being handled normally to the
# invoking context.
return False
def setup_death_penalty(self):
raise NotImplementedError()
def cancel_death_penalty(self):
raise NotImplementedError()
class UnixSignalDeathPenalty(BaseDeathPenalty):
def handle_death_penalty(self, signum, frame):
raise self._exception('Task exceeded maximum timeout value ({0} seconds)'.format(self._timeout))
def setup_death_penalty(self):
"""Sets up an alarm signal and a signal handler that raises
an exception after the timeout amount (expressed in seconds).
"""
signal.signal(signal.SIGALRM, self.handle_death_penalty)
signal.alarm(self._timeout)
def cancel_death_penalty(self):
"""Removes the death penalty alarm and puts back the system into
default signal handling.
"""
signal.alarm(0)
signal.signal(signal.SIGALRM, signal.SIG_DFL)
class TimerDeathPenalty(BaseDeathPenalty):
def __init__(self, timeout, exception=JobTimeoutException, **kwargs):
super().__init__(timeout, exception, **kwargs)
self._target_thread_id = threading.current_thread().ident
self._timer = None
# Monkey-patch exception with the message ahead of time
# since PyThreadState_SetAsyncExc can only take a class
def init_with_message(self, *args, **kwargs): # noqa
super(exception, self).__init__("Task exceeded maximum timeout value ({0} seconds)".format(timeout))
self._exception.__init__ = init_with_message
def METHOD_NAME(self):
"""Returns a new timer since timers can only be used once."""
return threading.Timer(self._timeout, self.handle_death_penalty)
def handle_death_penalty(self):
"""Raises an asynchronous exception in another thread.
Reference http://docs.python.org/c-api/init.html#PyThreadState_SetAsyncExc for more info.
"""
ret = ctypes.pythonapi.PyThreadState_SetAsyncExc(
ctypes.c_long(self._target_thread_id), ctypes.py_object(self._exception)
)
if ret == 0:
raise ValueError("Invalid thread ID {}".format(self._target_thread_id))
elif ret > 1:
ctypes.pythonapi.PyThreadState_SetAsyncExc(ctypes.c_long(self._target_thread_id), 0)
raise SystemError("PyThreadState_SetAsyncExc failed")
def setup_death_penalty(self):
"""Starts the timer."""
if self._timeout <= 0:
return
self._timer = self.METHOD_NAME()
self._timer.start()
def cancel_death_penalty(self):
"""Cancels the timer."""
if self._timeout <= 0:
return
self._timer.cancel()
self._timer = None
|
2,695 |
class builder
|
"""Test for the invalid-name warning."""
# pylint: disable=unnecessary-pass, unnecessary-comprehension, unused-private-member
# pylint: disable=unnecessary-lambda-assignment
import abc
import collections
import typing
from enum import Enum
from typing import ClassVar
GOOD_CONST_NAME = ''
bad_const_name = 0 # [invalid-name]
def BADFUNCTION_name(): # [invalid-name]
"""Bad function name."""
BAD_LOCAL_VAR = 1 # [invalid-name]
print(BAD_LOCAL_VAR)
def func_bad_argname(NOT_GOOD): # [invalid-name]
"""Function with a badly named argument."""
return NOT_GOOD
def no_nested_args(arg1, arg21, arg22):
"""Well-formed function."""
print(arg1, arg21, arg22)
class bad_class_name: # [invalid-name]
"""Class with a bad name."""
class CorrectClassName:
"""Class with a good name."""
def __init__(self):
self._good_private_name = 10
self.__good_real_private_name = 11
self.good_attribute_name = 12
self._Bad_AtTR_name = None # [invalid-name]
self.Bad_PUBLIC_name = None # [invalid-name]
zz = 'Why Was It Bad Class Attribute?'
GOOD_CLASS_ATTR = 'Good Class Attribute'
def BadMethodName(self): # [invalid-name]
"""A Method with a bad name."""
def good_method_name(self):
"""A method with a good name."""
def __DunDER_IS_not_free_for_all__(self): # [invalid-name]
"""Another badly named method."""
class DerivedFromCorrect(CorrectClassName):
"""A derived class with an invalid inherited members.
Derived attributes and methods with invalid names do not trigger warnings.
"""
zz = 'Now a good class attribute'
def __init__(self):
super().__init__()
self._Bad_AtTR_name = None # Ignored
def BadMethodName(self):
"""Ignored since the method is in the interface."""
V = [WHAT_Ever_inListComp for WHAT_Ever_inListComp in GOOD_CONST_NAME]
def METHOD_NAME():
"""Function returning a class object."""
class EmbeddedClass:
"""Useless class."""
return EmbeddedClass
# +1:[invalid-name]
BAD_NAME_FOR_CLASS = collections.namedtuple('Named', ['tuple'])
NEXT_BAD_NAME_FOR_CLASS = METHOD_NAME() # [invalid-name]
GoodName = collections.namedtuple('Named', ['tuple'])
ToplevelClass = METHOD_NAME()
# Aliases for classes have the same name constraints.
AlsoCorrect = CorrectClassName
NOT_CORRECT = CorrectClassName # [invalid-name]
def test_globals():
"""Names in global statements are also checked."""
global NOT_CORRECT
global AlsoCorrect
NOT_CORRECT = 1
AlsoCorrect = 2
class FooClass:
"""A test case for property names.
Since by default, the regex for attributes is the same as the one
for method names, we check the warning messages to contain the
string 'attribute'.
"""
@property
def PROPERTY_NAME(self): # [invalid-name]
"""Ignored."""
pass
@property
@abc.abstractmethod
def ABSTRACT_PROPERTY_NAME(self): # [invalid-name]
"""Ignored."""
pass
@PROPERTY_NAME.setter
def PROPERTY_NAME_SETTER(self): # [invalid-name]
"""Ignored."""
pass
def _nice_and_long_descriptive_private_method_name(self):
"""private method with long name"""
pass
def good_public_function_name(good_arg_name):
"""This is a perfect public function"""
good_variable_name = 1
return good_variable_name + good_arg_name
def _private_scope_function_with_long_descriptive_name():
"""Private scope function are cool with long descriptive names"""
return 12
LONG_CONSTANT_NAME_IN_PUBLIC_SCOPE_ARE_OKAY = True
# We don't emit for non-const nodes
good_name_for_funcs = lambda: None
good_name_for_lists = [1, 2, 3]
class _AnExceptionalExceptionThatOccursVeryVeryRarely(Exception):
"""A very exceptional exception with a nice descriptive name"""
pass
class FooEnum(Enum):
"""A test case for enum names."""
GOOD_ENUM_NAME = 1
bad_enum_name = 2 # [invalid-name]
class Bar:
"""Class with class variables annotated with ClassVar."""
CLASS_CONST: ClassVar[int] = 42
CLASS_CONST2: ClassVar = "const"
variable: ClassVar[str] = "invalid name"
CLASS_CONST3: typing.ClassVar
variable2: typing.ClassVar[int]
|
2,696 |
test copy batch kms
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Integration tests for gcsio module.
Runs tests against Google Cloud Storage service.
Instantiates a TestPipeline to get options such as GCP project name, but
doesn't actually start a Beam pipeline or test any specific runner.
Options:
--kms_key_name=projects/<project-name>/locations/<region>/keyRings/\
<key-ring-name>/cryptoKeys/<key-name>/cryptoKeyVersions/<version>
Pass a Cloud KMS key name to test GCS operations using customer managed
encryption keys (CMEK).
Cloud KMS permissions:
The project's Cloud Storage service account requires Encrypter/Decrypter
permissions for the key specified in --kms_key_name.
To run these tests manually:
./gradlew :sdks:python:test-suites:dataflow:integrationTest \
-Dtests=apache_beam.io.gcp.gcsio_integration_test:GcsIOIntegrationTest \
-DkmsKeyName=KMS_KEY_NAME
"""
# pytype: skip-file
import logging
import unittest
import uuid
import pytest
from apache_beam.io.filesystems import FileSystems
from apache_beam.testing.test_pipeline import TestPipeline
try:
from apache_beam.io.gcp import gcsio
except ImportError:
gcsio = None # type: ignore
@unittest.skipIf(gcsio is None, 'GCP dependencies are not installed')
class GcsIOIntegrationTest(unittest.TestCase):
INPUT_FILE = 'gs://dataflow-samples/shakespeare/kinglear.txt'
# Larger than 1MB to test maxBytesRewrittenPerCall.
# Also needs to be in a different region than the dest to take effect.
INPUT_FILE_LARGE = 'gs://apache-beam-samples-us-east1/wikipedia_edits/wiki_data-000000000000.json' # pylint: disable=line-too-long
def setUp(self):
self.test_pipeline = TestPipeline(is_integration_test=True)
self.runner_name = type(self.test_pipeline.runner).__name__
if self.runner_name != 'TestDataflowRunner':
# This test doesn't run a pipeline, so it doesn't make sense to try it on
# different runners. Running with TestDataflowRunner makes sense since
# it uses GoogleCloudOptions such as 'project'.
raise unittest.SkipTest('This test only runs with TestDataflowRunner.')
self.project = self.test_pipeline.get_option('project')
self.gcs_tempdir = (
self.test_pipeline.get_option('temp_location') + '/gcs_it-' +
str(uuid.uuid4()))
self.kms_key_name = self.test_pipeline.get_option('kms_key_name')
self.gcsio = gcsio.GcsIO()
def tearDown(self):
FileSystems.delete([self.gcs_tempdir + '/'])
def _verify_copy(self, src, dst, dst_kms_key_name=None):
self.assertTrue(FileSystems.exists(src), 'src does not exist: %s' % src)
self.assertTrue(FileSystems.exists(dst), 'dst does not exist: %s' % dst)
src_checksum = self.gcsio.checksum(src)
dst_checksum = self.gcsio.checksum(dst)
self.assertEqual(src_checksum, dst_checksum)
actual_dst_kms_key = self.gcsio.kms_key(dst)
if actual_dst_kms_key is None:
self.assertEqual(actual_dst_kms_key, dst_kms_key_name)
else:
self.assertTrue(
actual_dst_kms_key.startswith(dst_kms_key_name),
"got: %s, wanted startswith: %s" %
(actual_dst_kms_key, dst_kms_key_name))
def _test_copy(
self,
name,
kms_key_name=None,
max_bytes_rewritten_per_call=None,
src=None):
src = src or self.INPUT_FILE
dst = self.gcs_tempdir + '/%s' % name
extra_kwargs = {}
if max_bytes_rewritten_per_call is not None:
extra_kwargs['max_bytes_rewritten_per_call'] = (
max_bytes_rewritten_per_call)
self.gcsio.copy(src, dst, kms_key_name, **extra_kwargs)
self._verify_copy(src, dst, kms_key_name)
@pytest.mark.it_postcommit
def test_copy(self):
self._test_copy("test_copy")
@pytest.mark.it_postcommit
def test_copy_kms(self):
if self.kms_key_name is None:
raise unittest.SkipTest('--kms_key_name not specified')
self._test_copy("test_copy_kms", self.kms_key_name)
@pytest.mark.it_postcommit
def test_copy_rewrite_token(self):
# Tests a multi-part copy (rewrite) operation. This is triggered by a
# combination of 3 conditions:
# - a large enough src
# - setting max_bytes_rewritten_per_call
# - setting kms_key_name
if self.kms_key_name is None:
raise unittest.SkipTest('--kms_key_name not specified')
rewrite_responses = []
self.gcsio._set_rewrite_response_callback(
lambda response: rewrite_responses.append(response))
self._test_copy(
"test_copy_rewrite_token",
kms_key_name=self.kms_key_name,
max_bytes_rewritten_per_call=50 * 1024 * 1024,
src=self.INPUT_FILE_LARGE)
# Verify that there was a multi-part rewrite.
self.assertTrue(any(not r.done for r in rewrite_responses))
def _test_copy_batch(
self,
name,
kms_key_name=None,
max_bytes_rewritten_per_call=None,
src=None):
num_copies = 10
srcs = [src or self.INPUT_FILE] * num_copies
dsts = [self.gcs_tempdir + '/%s_%d' % (name, i) for i in range(num_copies)]
src_dst_pairs = list(zip(srcs, dsts))
extra_kwargs = {}
if max_bytes_rewritten_per_call is not None:
extra_kwargs['max_bytes_rewritten_per_call'] = (
max_bytes_rewritten_per_call)
result_statuses = self.gcsio.copy_batch(
src_dst_pairs, kms_key_name, **extra_kwargs)
for status in result_statuses:
self.assertIsNone(status[2], status)
for _src, _dst in src_dst_pairs:
self._verify_copy(_src, _dst, kms_key_name)
@pytest.mark.it_postcommit
def test_copy_batch(self):
self._test_copy_batch("test_copy_batch")
@pytest.mark.it_postcommit
def METHOD_NAME(self):
if self.kms_key_name is None:
raise unittest.SkipTest('--kms_key_name not specified')
self._test_copy_batch("test_copy_batch_kms", self.kms_key_name)
@pytest.mark.it_postcommit
def test_copy_batch_rewrite_token(self):
# Tests a multi-part copy (rewrite) operation. This is triggered by a
# combination of 3 conditions:
# - a large enough src
# - setting max_bytes_rewritten_per_call
# - setting kms_key_name
if self.kms_key_name is None:
raise unittest.SkipTest('--kms_key_name not specified')
rewrite_responses = []
self.gcsio._set_rewrite_response_callback(
lambda response: rewrite_responses.append(response))
self._test_copy_batch(
"test_copy_batch_rewrite_token",
kms_key_name=self.kms_key_name,
max_bytes_rewritten_per_call=50 * 1024 * 1024,
src=self.INPUT_FILE_LARGE)
# Verify that there was a multi-part rewrite.
self.assertTrue(any(not r.done for r in rewrite_responses))
if __name__ == '__main__':
logging.getLogger().setLevel(logging.INFO)
unittest.main()
|
2,697 |
generic 3d check
|
import numpy as np
from .crop_zero_planes import CropExternalZeroplanes
from .non_zero_normalize import NonZeroNormalizeOnMaskedRegion
from .threshold_and_clip import (
threshold_transform,
clip_transform,
)
from .normalize_rgb import (
normalize_by_val_transform,
normalize_imagenet_transform,
normalize_standardize_transform,
normalize_div_by_255_transform,
)
from .template_matching import histogram_matching, stain_normalizer
from .resample_minimum import Resample_Minimum
from .rgb_conversion import rgba2rgb_transform, rgb2rgba_transform
from torchio.transforms import (
ZNormalization,
ToCanonical,
Crop,
CropOrPad,
Resize,
Resample,
Compose,
RescaleIntensity,
)
def METHOD_NAME(patch_size):
"""
This function reads the value from the configuration and returns an appropriate tuple for torchio to ingest.
Args:
patch_size (Union[list, tuple, array]): The generic list/tuple/array to check.
Returns:
tuple: The tuple to be ingested by torchio.
"""
patch_size_new = np.array(patch_size)
if len(patch_size) == 2:
patch_size_new = tuple(
np.append(
np.array(patch_size),
1,
)
)
return patch_size_new
def positive_voxel_mask(image):
return image > 0
def nonzero_voxel_mask(image):
return image != 0
def to_canonical_transform(parameters=None):
return ToCanonical()
def crop_transform(patch_size):
return Crop(METHOD_NAME(patch_size))
def centercrop_transform(patch_size):
return CropOrPad(target_shape=METHOD_NAME(patch_size))
def rescale_transform(parameters=None):
if parameters is None:
parameters = {}
# get defaults from torchio
rescaler = RescaleIntensity()
rescaler.out_min_max = parameters.get("out_min_max", rescaler.out_min_max)
rescaler.percentiles = parameters.get("percentiles", rescaler.percentiles)
rescaler.in_min_max = parameters.get("in_min_max", None)
return rescaler
# defining dict for pre-processing - key is the string and the value is the transform object
global_preprocessing_dict = {
"to_canonical": to_canonical_transform,
"threshold": threshold_transform,
"clip": clip_transform,
"clamp": clip_transform,
"crop_external_zero_planes": CropExternalZeroplanes,
"crop": crop_transform,
"centercrop": centercrop_transform,
"normalize_by_val": normalize_by_val_transform,
"normalize_imagenet": normalize_imagenet_transform(),
"normalize_standardize": normalize_standardize_transform(),
"normalize_div_by_255": normalize_div_by_255_transform(),
"normalize": ZNormalization(),
"normalize_positive": ZNormalization(masking_method=positive_voxel_mask),
"normalize_nonZero": ZNormalization(masking_method=nonzero_voxel_mask),
"normalize_nonzero": ZNormalization(masking_method=nonzero_voxel_mask),
"normalize_nonZero_masked": NonZeroNormalizeOnMaskedRegion(),
"normalize_nonzero_masked": NonZeroNormalizeOnMaskedRegion(),
"rescale": rescale_transform,
"rgba2rgb": rgba2rgb_transform,
"rgbatorgb": rgba2rgb_transform,
"rgba_to_rgb": rgba2rgb_transform,
"rgb2rgba": rgb2rgba_transform,
"rgbtorgba": rgb2rgba_transform,
"rgb_to_rgba": rgb2rgba_transform,
"histogram_matching": histogram_matching,
"stain_normalizer": stain_normalizer,
}
def get_transforms_for_preprocessing(
parameters, current_transformations, train_mode, apply_zero_crop
):
"""
This function gets the pre-processing transformations from the parameters.
Args:
parameters (dict): The parameters dictionary.
current_transformations (list): The current transformations list.
train_mode (bool): Whether the data is in train mode or not.
apply_zero_crop (bool): Whether to apply zero crop or not.
Returns:
list: The list of pre-processing transformations.
"""
preprocessing_params_dict = parameters["data_preprocessing"]
# first, we want to do thresholding, followed by clipping, if it is present - required for inference as well
normalize_to_apply = None
if not (preprocessing_params_dict is None):
# go through preprocessing in the order they are specified
for preprocess in preprocessing_params_dict:
preprocess_lower = preprocess.lower()
# special check for resize and resample
if preprocess_lower == "resize_patch":
resize_values = METHOD_NAME(preprocessing_params_dict[preprocess])
current_transformations.append(Resize(resize_values))
elif preprocess_lower == "resample":
if "resolution" in preprocessing_params_dict[preprocess]:
# Need to take a look here
resample_values = METHOD_NAME(
preprocessing_params_dict[preprocess]["resolution"]
)
current_transformations.append(Resample(resample_values))
elif preprocess_lower in ["resample_minimum", "resample_min"]:
if "resolution" in preprocessing_params_dict[preprocess]:
resample_values = METHOD_NAME(
preprocessing_params_dict[preprocess]["resolution"]
)
current_transformations.append(Resample_Minimum(resample_values))
# special check for histogram_matching
elif preprocess_lower == "histogram_matching":
if preprocessing_params_dict[preprocess] is not False:
current_transformations.append(
global_preprocessing_dict[preprocess_lower](
preprocessing_params_dict[preprocess]
)
)
# special check for stain_normalizer
elif preprocess_lower == "stain_normalizer":
if normalize_to_apply is None:
normalize_to_apply = global_preprocessing_dict[preprocess_lower](
preprocessing_params_dict[preprocess]
)
# normalize should be applied at the end
elif "normalize" in preprocess_lower:
if normalize_to_apply is None:
normalize_to_apply = global_preprocessing_dict[preprocess_lower]
# preprocessing routines that we only want for training
elif preprocess_lower in ["crop_external_zero_planes"]:
if train_mode or apply_zero_crop:
current_transformations.append(
global_preprocessing_dict["crop_external_zero_planes"](
patch_size=parameters["patch_size"]
)
)
# everything else is taken in the order passed by user
elif preprocess_lower in global_preprocessing_dict:
current_transformations.append(
global_preprocessing_dict[preprocess_lower](
preprocessing_params_dict[preprocess]
)
)
# normalization type is applied at the end
if normalize_to_apply is not None:
current_transformations.append(normalize_to_apply)
# compose the transformations
transforms_to_apply = None
if current_transformations:
transforms_to_apply = Compose(current_transformations)
return transforms_to_apply
|
2,698 |
test attack comparator with preproc
|
#!/usr/bin/env python3
import collections
from typing import List
import torch
from captum.robust import AttackComparator, FGSM
from tests.helpers.basic import assertTensorAlmostEqual, BaseTest
from tests.helpers.basic_models import BasicModel, BasicModel_MultiLayer
from torch import Tensor
def float_metric(model_out: Tensor, target: int):
return model_out[:, target]
ModelResult = collections.namedtuple("ModelResult", "accuracy output")
def tuple_metric(model_out: Tensor, target: int, named_tuple=False):
_, pred = torch.max(model_out, dim=1)
acc = (pred == target).float()
output = model_out[:, target]
if named_tuple:
return ModelResult(
accuracy=acc.item() if acc.numel() == 1 else acc,
output=output.item() if output.numel() == 1 else output,
)
return (acc, output)
def drop_column_perturb(inp: Tensor, column: int) -> Tensor:
mask = torch.ones_like(inp)
mask[:, column] = 0.0
return mask * inp
def text_preproc_fn(inp: List[str]) -> Tensor:
return torch.tensor([float(ord(elem[0])) for elem in inp]).unsqueeze(0)
def batch_text_preproc_fn(inp: List[List[str]]) -> Tensor:
return torch.cat([text_preproc_fn(elem) for elem in inp])
def string_perturb(inp: List[str]) -> List[str]:
return ["a" + elem for elem in inp]
def string_batch_perturb(inp: List[List[str]]) -> List[List[str]]:
return [string_perturb(elem) for elem in inp]
class SamplePerturb:
def __init__(self) -> None:
self.count = 0
def perturb(self, inp: Tensor) -> Tensor:
mask = torch.ones_like(inp)
mask[:, self.count % mask.shape[1]] = 0.0
self.count += 1
return mask * inp
class Test(BaseTest):
def test_attack_comparator_basic(self) -> None:
model = BasicModel()
inp = torch.tensor([[2.0, -9.0, 9.0, 1.0, -3.0]])
attack_comp = AttackComparator(
forward_func=lambda x: model(x)
+ torch.tensor([[0.000001, 0.0, 0.0, 0.0, 0.0]]),
metric=tuple_metric,
)
attack_comp.add_attack(
drop_column_perturb,
name="first_column_perturb",
attack_kwargs={"column": 0},
)
attack_comp.add_attack(
drop_column_perturb,
name="last_column_perturb",
attack_kwargs={"column": -1},
)
attack_comp.add_attack(
FGSM(model),
attack_kwargs={"epsilon": 0.5},
additional_attack_arg_names=["target"],
)
batch_results = attack_comp.evaluate(inp, target=0, named_tuple=True)
expected_first_results = {
"Original": (1.0, 1.0),
"first_column_perturb": {"mean": (0.0, 0.0)},
"last_column_perturb": {"mean": (1.0, 1.0)},
"FGSM": {"mean": (1.0, 1.0)},
}
self._compare_results(batch_results, expected_first_results)
alt_inp = torch.tensor([[1.0, 2.0, -3.0, 4.0, -5.0]])
second_batch_results = attack_comp.evaluate(alt_inp, target=4, named_tuple=True)
expected_second_results = {
"Original": (0.0, -5.0),
"first_column_perturb": {"mean": (0.0, -5.0)},
"last_column_perturb": {"mean": (0.0, 0.0)},
"FGSM": {"mean": (0.0, -4.5)},
}
self._compare_results(second_batch_results, expected_second_results)
expected_summary_results = {
"Original": {"mean": (0.5, -2.0)},
"first_column_perturb": {"mean": (0.0, -2.5)},
"last_column_perturb": {"mean": (0.5, 0.5)},
"FGSM": {"mean": (0.5, -1.75)},
}
self._compare_results(attack_comp.summary(), expected_summary_results)
def METHOD_NAME(self) -> None:
model = BasicModel_MultiLayer()
text_inp = ["abc", "zyd", "ghi"]
attack_comp = AttackComparator(
forward_func=model, metric=tuple_metric, preproc_fn=text_preproc_fn
)
attack_comp.add_attack(
SamplePerturb().perturb,
name="Sequence Column Perturb",
num_attempts=5,
apply_before_preproc=False,
)
attack_comp.add_attack(
string_perturb,
name="StringPerturb",
apply_before_preproc=True,
)
batch_results = attack_comp.evaluate(
text_inp, target=0, named_tuple=True, perturbations_per_eval=3
)
expected_first_results = {
"Original": (0.0, 1280.0),
"Sequence Column Perturb": {
"mean": (0.0, 847.2),
"max": (0.0, 892.0),
"min": (0.0, 792.0),
},
"StringPerturb": {"mean": (0.0, 1156.0)},
}
self._compare_results(batch_results, expected_first_results)
expected_summary_results = {
"Original": {"mean": (0.0, 1280.0)},
"Sequence Column Perturb Mean Attempt": {"mean": (0.0, 847.2)},
"Sequence Column Perturb Min Attempt": {"mean": (0.0, 792.0)},
"Sequence Column Perturb Max Attempt": {"mean": (0.0, 892.0)},
"StringPerturb": {"mean": (0.0, 1156.0)},
}
self._compare_results(attack_comp.summary(), expected_summary_results)
def test_attack_comparator_with_additional_args(self) -> None:
model = BasicModel_MultiLayer()
text_inp = [["abc", "zyd", "ghi"], ["mnop", "qrs", "Tuv"]]
additional_forward_args = torch.ones((2, 3)) * -97
attack_comp = AttackComparator(
forward_func=model, metric=tuple_metric, preproc_fn=batch_text_preproc_fn
)
attack_comp.add_attack(
SamplePerturb().perturb,
name="Sequence Column Perturb",
num_attempts=5,
apply_before_preproc=False,
)
attack_comp.add_attack(
string_batch_perturb,
name="StringPerturb",
apply_before_preproc=True,
)
batch_results = attack_comp.evaluate(
text_inp,
additional_forward_args=additional_forward_args,
target=0,
named_tuple=True,
perturbations_per_eval=2,
)
expected_first_results = {
"Original": ([0.0, 0.0], [116.0, 52.0]),
"Sequence Column Perturb": {
"mean": ([0.0, 0.0], [-1.0, -1.0]),
"max": ([0.0, 0.0], [-1.0, -1.0]),
"min": ([0.0, 0.0], [-1.0, -1.0]),
},
"StringPerturb": {"mean": ([0.0, 0.0], [2.0, 2.0])},
}
self._compare_results(batch_results, expected_first_results)
expected_summary_results = {
"Original": {
"mean": (0.0, 84.0),
},
"Sequence Column Perturb Mean Attempt": {"mean": (0.0, -1.0)},
"Sequence Column Perturb Min Attempt": {"mean": (0.0, -1.0)},
"Sequence Column Perturb Max Attempt": {"mean": (0.0, -1.0)},
"StringPerturb": {"mean": (0.0, 2.0)},
}
self._compare_results(attack_comp.summary(), expected_summary_results)
attack_comp.reset()
self.assertEqual(len(attack_comp.summary()), 0)
def _compare_results(self, obtained, expected) -> None:
if isinstance(expected, dict):
self.assertIsInstance(obtained, dict)
for key in expected:
self._compare_results(obtained[key], expected[key])
elif isinstance(expected, tuple):
self.assertIsInstance(obtained, tuple)
for i in range(len(expected)):
self._compare_results(obtained[i], expected[i])
else:
assertTensorAlmostEqual(self, obtained, expected)
|
2,699 |
type
|
# coding=utf-8
# *** WARNING: this file was generated by pulumi. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import copy
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
__all__ = [
'GetTokenResult',
'AwaitableGetTokenResult',
'get_token',
'get_token_output',
]
@pulumi.output_type
class GetTokenResult:
"""
An object that represents a token for a container registry.
"""
def __init__(__self__, creation_date=None, credentials=None, id=None, name=None, provisioning_state=None, scope_map_id=None, status=None, system_data=None, METHOD_NAME=None):
if creation_date and not isinstance(creation_date, str):
raise TypeError("Expected argument 'creation_date' to be a str")
pulumi.set(__self__, "creation_date", creation_date)
if credentials and not isinstance(credentials, dict):
raise TypeError("Expected argument 'credentials' to be a dict")
pulumi.set(__self__, "credentials", credentials)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if provisioning_state and not isinstance(provisioning_state, str):
raise TypeError("Expected argument 'provisioning_state' to be a str")
pulumi.set(__self__, "provisioning_state", provisioning_state)
if scope_map_id and not isinstance(scope_map_id, str):
raise TypeError("Expected argument 'scope_map_id' to be a str")
pulumi.set(__self__, "scope_map_id", scope_map_id)
if status and not isinstance(status, str):
raise TypeError("Expected argument 'status' to be a str")
pulumi.set(__self__, "status", status)
if system_data and not isinstance(system_data, dict):
raise TypeError("Expected argument 'system_data' to be a dict")
pulumi.set(__self__, "system_data", system_data)
if METHOD_NAME and not isinstance(METHOD_NAME, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", METHOD_NAME)
@property
@pulumi.getter(name="creationDate")
def creation_date(self) -> str:
"""
The creation date of scope map.
"""
return pulumi.get(self, "creation_date")
@property
@pulumi.getter
def credentials(self) -> Optional['outputs.TokenCredentialsPropertiesResponse']:
"""
The credentials that can be used for authenticating the token.
"""
return pulumi.get(self, "credentials")
@property
@pulumi.getter
def id(self) -> str:
"""
The resource ID.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def name(self) -> str:
"""
The name of the resource.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> str:
"""
Provisioning state of the resource.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter(name="scopeMapId")
def scope_map_id(self) -> Optional[str]:
"""
The resource ID of the scope map to which the token will be associated with.
"""
return pulumi.get(self, "scope_map_id")
@property
@pulumi.getter
def status(self) -> Optional[str]:
"""
The status of the token example enabled or disabled.
"""
return pulumi.get(self, "status")
@property
@pulumi.getter(name="systemData")
def system_data(self) -> 'outputs.SystemDataResponse':
"""
Metadata pertaining to creation and last modification of the resource.
"""
return pulumi.get(self, "system_data")
@property
@pulumi.getter
def METHOD_NAME(self) -> str:
"""
The type of the resource.
"""
return pulumi.get(self, "type")
class AwaitableGetTokenResult(GetTokenResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetTokenResult(
creation_date=self.creation_date,
credentials=self.credentials,
id=self.id,
name=self.name,
provisioning_state=self.provisioning_state,
scope_map_id=self.scope_map_id,
status=self.status,
system_data=self.system_data,
METHOD_NAME=self.METHOD_NAME)
def get_token(registry_name: Optional[str] = None,
resource_group_name: Optional[str] = None,
token_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetTokenResult:
"""
Gets the properties of the specified token.
:param str registry_name: The name of the container registry.
:param str resource_group_name: The name of the resource group. The name is case insensitive.
:param str token_name: The name of the token.
"""
__args__ = dict()
__args__['registryName'] = registry_name
__args__['resourceGroupName'] = resource_group_name
__args__['tokenName'] = token_name
opts = pulumi.InvokeOptions.merge(_utilities.get_invoke_opts_defaults(), opts)
__ret__ = pulumi.runtime.invoke('azure-native:containerregistry/v20230701:getToken', __args__, opts=opts, typ=GetTokenResult).value
return AwaitableGetTokenResult(
creation_date=pulumi.get(__ret__, 'creation_date'),
credentials=pulumi.get(__ret__, 'credentials'),
id=pulumi.get(__ret__, 'id'),
name=pulumi.get(__ret__, 'name'),
provisioning_state=pulumi.get(__ret__, 'provisioning_state'),
scope_map_id=pulumi.get(__ret__, 'scope_map_id'),
status=pulumi.get(__ret__, 'status'),
system_data=pulumi.get(__ret__, 'system_data'),
METHOD_NAME=pulumi.get(__ret__, 'type'))
@_utilities.lift_output_func(get_token)
def get_token_output(registry_name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
token_name: Optional[pulumi.Input[str]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetTokenResult]:
"""
Gets the properties of the specified token.
:param str registry_name: The name of the container registry.
:param str resource_group_name: The name of the resource group. The name is case insensitive.
:param str token_name: The name of the token.
"""
...
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.